code stringlengths 101 5.91M |
|---|
class Parameter(nn.Module):
_parameter: nn.Parameter
def __init__(self, data: torch.Tensor):
super().__init__()
self._parameter = nn.Parameter(data)
def forward(self) -> torch.Tensor:
return self._parameter
def __call__(self) -> torch.Tensor:
return super().__call__()
def data(self) -> torch.Tensor:
return self._parameter.data |
class MacroBlock():
def __init__(self, prefix):
self.prefix = prefix
self.blocks = []
self.combinations = []
self.connections = []
def declare(self, blocks):
self.blocks = blocks
def connect(self, combinations):
self.combinations = combinations
def __iter__(self):
return (((self.prefix + '_') + n) for n in chain(*self.blocks))
def __getitem__(self, i):
return ((self.prefix + '_') + next(islice(chain(*self.blocks), i, (i + 1))))
def __len__(self):
return sum((len(bl) for bl in self.blocks))
def resolve(self, localname):
return ((self.prefix + '_') + localname)
def register(self, start, context):
context = PrefixedDictProxy(context, (self.prefix + '_'))
offset = 0
for bl in self.blocks:
bl.register((start + offset), context)
offset += len(bl)
for ((con1, indices1), (con2, indices2)) in self.combinations:
for idx in range(min(len(indices1), len(indices2))):
self.connections += [(context[con1](indices1[idx]) + context[con2](indices2[idx]))]
def implement(self, equations):
for bl in self.blocks:
if hasattr(bl, 'implement'):
bl.implement(equations)
equations += self.connections |
def masked_accuracy(preds, labels, mask):
correct_prediction = tf.equal(tf.argmax(preds, 1), tf.argmax(labels, 1))
accuracy_all = tf.cast(correct_prediction, tf.float32)
mask = tf.cast(mask, dtype=tf.float32)
mask /= tf.reduce_mean(mask)
accuracy_all *= mask
return tf.reduce_mean(accuracy_all) |
class GraphProfiler():
def __init__(self, graph, device_id, ext_name, solver=None, n_run=100, max_measure_execution_time=1, time_scale='m', backward_accum=False):
self.graph = graph
self.solver = solver
self.n_run = n_run
self.device_id = str(device_id)
self.ext_name = ext_name
self.ext_module = import_extension_module(self.ext_name)
self.max_measure_execution_time = max_measure_execution_time
self.time_scale = time_scale
self.result = dict()
self.name2val = {v: k for (k, v) in nn.get_parameters().items()}
self.backward_accum = backward_accum
if (self.n_run < 1):
raise AssertionError('n_run must be bigger than 1')
def _measure_execution_time(self, execution, *execution_args):
result = 0.0
measured_count = 0
execution(*execution_args)
self.ext_module.synchronize(device_id=self.device_id)
start_0 = time.time()
for i in range(self.n_run):
start = time.time()
execution(*execution_args)
self.ext_module.synchronize(device_id=self.device_id)
stop = time.time()
result += (stop - start)
measured_count += 1
if ((stop - start_0) > self.max_measure_execution_time):
break
mean_time = (result / measured_count)
mean_time = convert_time_scale(mean_time, format=self.time_scale)
mean_time = '{:.8}'.format(mean_time, self.time_scale)
return (mean_time, measured_count)
def _time_profiling(self, f, target_process):
_zero_variables(f.inputs)
_zero_variables(f.outputs)
if (target_process is 'forward'):
(mean_time, measured_count) = self._measure_execution_time(f.forward, f.inputs, f.outputs)
elif (target_process is 'backward'):
accum = ([self.backward_accum] * len(f.inputs))
(mean_time, measured_count) = self._measure_execution_time(f.backward, f.inputs, f.outputs, accum)
else:
raise NotImplementedError('target process must be [forward, backward]')
_zero_variables(f.inputs)
_zero_variables(f.outputs)
parameter_scope = None
if (len(f.inputs) > 1):
if (f.inputs[1] in self.name2val.keys()):
parameter_scope = os.path.dirname(self.name2val[f.inputs[1]])
inputs_shape = [x.shape for x in f.inputs]
function_name = f.name
args_info = f.info.args.items()
self.result[target_process].append(ProfileStat(parameter_scope=parameter_scope, inputs_shape=inputs_shape, args_info=args_info, function_name=function_name, mean_time=mean_time, n_run=measured_count))
def time_profiling_forward(self):
self.result['forward'] = list()
func = partial(self._time_profiling, target_process='forward')
self.graph.visit(func)
def time_profiling_backward(self):
self.result['backward'] = list()
func = partial(self._time_profiling, target_process='backward')
self.graph.visit(func)
def training_function(self):
self.graph.forward(clear_no_need_grad=True)
self.solver.zero_grad()
self.graph.backward(clear_buffer=True)
self.solver.update()
def time_profiling_whole_graph(self):
(self.result['forward_all'], self.result['n_run_forward_all']) = self._measure_execution_time(self.graph.forward)
(self.result['backward_all'], self.result['n_run_backward_all']) = self._measure_execution_time(self.graph.backward)
if (self.solver is not None):
(self.result['training'], self.result['n_run_training']) = self._measure_execution_time(self.training_function)
def run(self):
self.time_profiling_forward()
self.time_profiling_backward()
self.time_profiling_whole_graph()
def get_result(self):
return self.result
def print_result(self):
print('time scale: {}'.format(self.time_scale))
print('forward')
for x in self.result['forward']:
print(x)
print()
print('backward')
for x in self.result['backward']:
print(x)
print()
print('all forward: {}, all backward: {}'.format(self.result['forward_all'], self.result['backward_all'])) |
class ArrayDiffStats():
def __init__(self, a, b):
self.astat = ArrayStats(a)
self.bstat = ArrayStats(b)
self.diffstat = ArrayStats((a - b))
def __str__(self):
lines = ['', '[diff]', str(self.diffstat), '[left]', str(self.astat), '[right]', str(self.bstat)]
return '\n'.join(lines) |
def is_abstract_token(token):
return (re.search('^([A-Z]+_)+\\d+$', token) or re.search('^\\d0*$', token)) |
def test_tf_3d_correct_shape():
p_enc_3d = TFPositionalEncoding3D(170)
z = tf.zeros((1, 4, 1, 1024, 170))
assert (p_enc_3d(z).shape == (1, 4, 1, 1024, 170)) |
class MaxPooling3D(_Pooling3D):
_pooling3d_support
def __init__(self, pool_size=(2, 2, 2), strides=None, padding='valid', data_format=None, **kwargs):
super(MaxPooling3D, self).__init__(pool_size, strides, padding, data_format, **kwargs)
def _pooling_function(self, inputs, pool_size, strides, padding, data_format):
output = K.pool3d(inputs, pool_size, strides, padding, data_format, pool_mode='max')
return output |
def weights_init(init_type='gaussian'):
def init_fun(m):
classname = m.__class__.__name__
if (((classname.find('Conv') == 0) or (classname.find('Linear') == 0)) and hasattr(m, 'weight')):
if (init_type == 'gaussian'):
init.normal_(m.weight.data, 0.0, 0.02)
elif (init_type == 'xavier'):
init.xavier_normal_(m.weight.data, gain=math.sqrt(2))
elif (init_type == 'kaiming'):
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif (init_type == 'orthogonal'):
init.orthogonal_(m.weight.data, gain=math.sqrt(2))
elif (init_type == 'default'):
pass
else:
assert 0, 'Unsupported initialization: {}'.format(init_type)
if (hasattr(m, 'bias') and (m.bias is not None)):
init.constant_(m.bias.data, 0.0)
return init_fun |
class TsEnum(EnumBuilder, TsBase):
def __init__(self, package, enum, args):
super(TsEnum, self).__init__(package, enum, args)
self.storage_type = TsTypeRef(self.storage_type) |
def spc2npow(spectrogram):
npow = np.apply_along_axis(_spvec2pow, 1, spectrogram)
meanpow = np.mean(npow)
npow = (10.0 * np.log10((npow / meanpow)))
return npow |
def process_sample(aud_path, lable, utt_id, sp, tgt_dict):
input = {}
output = {}
(si, ei) = torchaudio.info(aud_path)
input['length_ms'] = int((((si.length / si.channels) / si.rate) / MILLISECONDS_TO_SECONDS))
input['path'] = aud_path
token = ' '.join(sp.EncodeAsPieces(lable))
ids = tgt_dict.encode_line(token, append_eos=False)
output['text'] = lable
output['token'] = token
output['tokenid'] = ', '.join(map(str, [t.tolist() for t in ids]))
return {utt_id: {'input': input, 'output': output}} |
def test_rint_big_int():
val =
assert_equal(val, int(float(val)))
assert_equal(val, np.rint(val)) |
def register_Ns3ObjectBase_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::ObjectBase const &', 'arg0')])
cls.add_method('GetAttribute', 'void', [param('std::string', 'name'), param('ns3::AttributeValue &', 'value')], is_const=True)
cls.add_method('GetAttributeFailSafe', 'bool', [param('std::string', 'name'), param('ns3::AttributeValue &', 'attribute')], is_const=True)
cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_pure_virtual=True, is_const=True, is_virtual=True)
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('SetAttribute', 'void', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
cls.add_method('SetAttributeFailSafe', 'bool', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
cls.add_method('TraceConnect', 'bool', [param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
cls.add_method('TraceConnectWithoutContext', 'bool', [param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
cls.add_method('TraceDisconnect', 'bool', [param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
cls.add_method('TraceDisconnectWithoutContext', 'bool', [param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
cls.add_method('ConstructSelf', 'void', [param('ns3::AttributeConstructionList const &', 'attributes')], visibility='protected')
cls.add_method('NotifyConstructionCompleted', 'void', [], visibility='protected', is_virtual=True)
return |
(DipoleSource)
class DipoleSourceImpl(SimSourceImpl):
def __init__(self, source: DipoleSource) -> None:
self._src = source
self._J = None
def before_sim(self, sim: FdfdSimProp) -> None:
if (self._J is None):
self._J = fdfd_solvers.dipole.build_dipole_source(omega=((2 * np.pi) / sim.wlen), dxes=sim.dxes, eps=sim.eps, position=sim.grid.pos2ind(self._src.position, which_shifts=None), axis=self._src.axis, power=self._src.power, phase=np.exp((1j * self._src.phase)))
sim.source += self._J |
def test_Tuple_append():
def f38(builder):
content_one = builder.content(0)
content_one.append(1.1)
content_two = builder.content(1)
content_list = content_two.begin_list()
content_list.append(1)
content_list.append(2)
content_list.append(3)
content_two.end_list()
builder = lb.Tuple([lb.Numpy(np.float64), lb.ListOffset(np.int64, lb.Numpy(np.int32))])
f38(builder)
layout = builder.snapshot()
assert (ak.to_list(layout) == [(1.1, [1, 2, 3])])
error = ''
assert builder.is_valid(error), error
assert (len(builder) == 1)
builder.clear()
assert (len(builder) == 0) |
.skip('skipping')
def test_cylinder():
T = get_function_space('cylinder')
u = TrialFunction(T)
du = div(grad(u))
assert (du.tolatex() == '\\frac{\\partial^2 u}{\\partial x^2 }+\\frac{1}{x}\\frac{\\partial u}{\\partial x }+\\frac{1}{x^{2}}\\frac{\\partial^2 u}{\\partial y^2 }+\\frac{\\partial^2 u}{\\partial z^2 }')
V = VectorSpace(T)
u = TrialFunction(V)
du = div(grad(u))
assert (du.tolatex() == '\\left( \\frac{\\partial^2 u^{x}}{\\partial x^2 }+\\frac{1}{x}\\frac{\\partial u^{x}}{\\partial x }+\\frac{1}{x^{2}}\\frac{\\partial^2 u^{x}}{\\partial y^2 }- \\frac{2}{x}\\frac{\\partial u^{y}}{\\partial y }- \\frac{1}{x^{2}}u^{x}+\\frac{\\partial^2 u^{x}}{\\partial z^2 }\\right) \\mathbf{b}_{x} \\\\+\\left( \\frac{\\partial^2 u^{y}}{\\partial x^2 }+\\frac{3}{x}\\frac{\\partial u^{y}}{\\partial x }+\\frac{2}{x^{3}}\\frac{\\partial u^{x}}{\\partial y }+\\frac{1}{x^{2}}\\frac{\\partial^2 u^{y}}{\\partial y^2 }+\\frac{\\partial^2 u^{y}}{\\partial z^2 }\\right) \\mathbf{b}_{y} \\\\+\\left( \\frac{\\partial^2 u^{z}}{\\partial x^2 }+\\frac{1}{x}\\frac{\\partial u^{z}}{\\partial x }+\\frac{1}{x^{2}}\\frac{\\partial^2 u^{z}}{\\partial y^2 }+\\frac{\\partial^2 u^{z}}{\\partial z^2 }\\right) \\mathbf{b}_{z} \\\\') |
def _setup_wrapper(with_cuda):
here = os.path.abspath(os.path.dirname(__file__))
lib_dir = os.path.join(here, '..', '..', 'lib')
include_dirs = [os.path.join(lib_dir, 'include'), os.path.join(lib_dir, 'include', 'TH')]
wrapper_source = '#include <TH/TH.h>\n'
if with_cuda:
import torch.cuda
wrapper_source += '#include <THC/THC.h>\n'
if (os.sys.platform == 'win32'):
cuda_include_dirs = glob.glob((os.getenv('CUDA_PATH', '') + '/include'))
cuda_include_dirs += glob.glob((os.getenv('NVTOOLSEXT_PATH', '') + '/include'))
else:
cuda_include_dirs = glob.glob('/usr/local/cuda/include')
cuda_include_dirs += glob.glob('/Developer/NVIDIA/CUDA-*/include')
include_dirs.append(os.path.join(lib_dir, 'include', 'THC'))
include_dirs.extend(cuda_include_dirs)
return (wrapper_source, include_dirs) |
def _get_dataset_url(name):
url = _read_extend_url_file(FASTNLP_EXTEND_DATASET_URL, name)
if url:
return url
filename = DATASET_DIR.get(name, None)
if filename:
url = (_get_base_url('dataset') + filename)
return url
else:
raise KeyError(f'There is no {name}.') |
class TFElectraForSequenceClassification():
def __init__(self, *args, **kwargs):
requires_tf(self)
def from_pretrained(self, *args, **kwargs):
requires_tf(self) |
class Partition7(nn.Module):
LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[10]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[11]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[12]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[13]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[14]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[15]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[16]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[17]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[18]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[19]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[20]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[21]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[22]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[23]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5LayerNorm[final_layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/Dropout[dropout]', 'T5ForConditionalGeneration/Linear[lm_head]']
TENSORS = []
def __init__(self, layers, tensors, device='cuda:7'):
super().__init__()
for (idx, layer_scope) in enumerate(self.LAYER_SCOPES):
self.add_module(f'l_{idx}', layers[layer_scope])
b = p = 0
for tensor_scope in self.TENSORS:
tensor = tensors[tensor_scope]
if isinstance(tensor, nn.Parameter):
self.register_parameter(f'p_{p}', tensor)
p += 1
else:
self.register_buffer(f'b_{b}', tensor)
b += 1
self.device = torch.device(device)
self.input_structure = [1, 1, 1, 1, 1, 1, 1]
self.lookup = {'l_0': 'decoder.block.10', 'l_1': 'decoder.block.11', 'l_2': 'decoder.block.12', 'l_3': 'decoder.block.13', 'l_4': 'decoder.block.14', 'l_5': 'decoder.block.15', 'l_6': 'decoder.block.16', 'l_7': 'decoder.block.17', 'l_8': 'decoder.block.18', 'l_9': 'decoder.block.19', 'l_10': 'decoder.block.20', 'l_11': 'decoder.block.21', 'l_12': 'decoder.block.22', 'l_13': 'decoder.block.23', 'l_14': 'decoder.final_layer_norm', 'l_15': 'decoder.dropout', 'l_16': 'lm_head'}
self.to(self.device)
def forward(self, *args):
(labels, x0, x1, x2, x3, x4, x5) = unflatten(args, self.input_structure)
t_0 = self.l_0(x3, attention_mask=x1, position_bias=x4, encoder_attention_mask=x2, encoder_decoder_position_bias=x5, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, output_attentions=False, use_cache=False, encoder_hidden_states=x0)
t_1 = t_0[slice(None, 2, None)]
t_1 = t_1[0]
t_2 = t_0[2]
t_0 = t_0[3]
t_0 = self.l_1(t_1, attention_mask=x1, position_bias=t_2, encoder_attention_mask=x2, encoder_decoder_position_bias=t_0, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, output_attentions=False, use_cache=False, encoder_hidden_states=x0)
t_2 = t_0[slice(None, 2, None)]
t_2 = t_2[0]
t_1 = t_0[2]
t_0 = t_0[3]
t_0 = self.l_2(t_2, attention_mask=x1, position_bias=t_1, encoder_attention_mask=x2, encoder_decoder_position_bias=t_0, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, output_attentions=False, use_cache=False, encoder_hidden_states=x0)
t_1 = t_0[slice(None, 2, None)]
t_1 = t_1[0]
t_2 = t_0[2]
t_0 = t_0[3]
t_0 = self.l_3(t_1, attention_mask=x1, position_bias=t_2, encoder_attention_mask=x2, encoder_decoder_position_bias=t_0, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, output_attentions=False, use_cache=False, encoder_hidden_states=x0)
t_2 = t_0[slice(None, 2, None)]
t_2 = t_2[0]
t_1 = t_0[2]
t_0 = t_0[3]
t_0 = self.l_4(t_2, attention_mask=x1, position_bias=t_1, encoder_attention_mask=x2, encoder_decoder_position_bias=t_0, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, output_attentions=False, use_cache=False, encoder_hidden_states=x0)
t_1 = t_0[slice(None, 2, None)]
t_1 = t_1[0]
t_2 = t_0[2]
t_0 = t_0[3]
t_0 = self.l_5(t_1, attention_mask=x1, position_bias=t_2, encoder_attention_mask=x2, encoder_decoder_position_bias=t_0, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, output_attentions=False, use_cache=False, encoder_hidden_states=x0)
t_2 = t_0[slice(None, 2, None)]
t_2 = t_2[0]
t_1 = t_0[2]
t_0 = t_0[3]
t_0 = self.l_6(t_2, attention_mask=x1, position_bias=t_1, encoder_attention_mask=x2, encoder_decoder_position_bias=t_0, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, output_attentions=False, use_cache=False, encoder_hidden_states=x0)
t_1 = t_0[slice(None, 2, None)]
t_1 = t_1[0]
t_2 = t_0[2]
t_0 = t_0[3]
t_0 = self.l_7(t_1, attention_mask=x1, position_bias=t_2, encoder_attention_mask=x2, encoder_decoder_position_bias=t_0, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, output_attentions=False, use_cache=False, encoder_hidden_states=x0)
t_2 = t_0[slice(None, 2, None)]
t_2 = t_2[0]
t_1 = t_0[2]
t_0 = t_0[3]
t_0 = self.l_8(t_2, attention_mask=x1, position_bias=t_1, encoder_attention_mask=x2, encoder_decoder_position_bias=t_0, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, output_attentions=False, use_cache=False, encoder_hidden_states=x0)
t_1 = t_0[slice(None, 2, None)]
t_1 = t_1[0]
t_2 = t_0[2]
t_0 = t_0[3]
t_0 = self.l_9(t_1, attention_mask=x1, position_bias=t_2, encoder_attention_mask=x2, encoder_decoder_position_bias=t_0, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, output_attentions=False, use_cache=False, encoder_hidden_states=x0)
t_2 = t_0[slice(None, 2, None)]
t_2 = t_2[0]
t_1 = t_0[2]
t_0 = t_0[3]
t_0 = self.l_10(t_2, attention_mask=x1, position_bias=t_1, encoder_attention_mask=x2, encoder_decoder_position_bias=t_0, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, output_attentions=False, use_cache=False, encoder_hidden_states=x0)
t_1 = t_0[slice(None, 2, None)]
t_1 = t_1[0]
t_2 = t_0[2]
t_0 = t_0[3]
t_0 = self.l_11(t_1, attention_mask=x1, position_bias=t_2, encoder_attention_mask=x2, encoder_decoder_position_bias=t_0, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, output_attentions=False, use_cache=False, encoder_hidden_states=x0)
t_2 = t_0[slice(None, 2, None)]
t_2 = t_2[0]
t_1 = t_0[2]
t_0 = t_0[3]
t_0 = self.l_12(t_2, attention_mask=x1, position_bias=t_1, encoder_attention_mask=x2, encoder_decoder_position_bias=t_0, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, output_attentions=False, use_cache=False, encoder_hidden_states=x0)
t_1 = t_0[slice(None, 2, None)]
t_1 = t_1[0]
t_2 = t_0[2]
t_0 = t_0[3]
t_0 = self.l_13(t_1, attention_mask=x1, position_bias=t_2, encoder_attention_mask=x2, encoder_decoder_position_bias=t_0, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, output_attentions=False, use_cache=False, encoder_hidden_states=x0)
t_2 = t_0[slice(None, 2, None)]
t_2 = t_2[0]
t_2 = self.l_14(t_2)
t_1 = t_0[2]
t_0 = t_0[3]
t_2 = self.l_15(t_2)
t_2 = (t_2 * 0.03125)
t_2 = self.l_16(t_2)
t_3 = t_2.size((- 1))
t_3 = t_2.view((- 1), t_3)
t_2 = labels.view((- 1))
t_2 = torch.nn.functional.cross_entropy(t_3, t_2, weight=None, size_average=None, ignore_index=(- 100), reduce=None, reduction='mean')
return (t_2,)
def state_dict(self, *args, **kwargs):
return state_dict(self, *args, **kwargs)
def load_state_dict(self, *args, **kwargs):
return load_state_dict(self, *args, **kwargs)
def named_parameters(self, *args, **kwargs):
return named_parameters(self, *args, **kwargs)
def named_buffers(self, *args, **kwargs):
return named_buffers(self, *args, **kwargs)
def cpu(self):
return cpu(self)
def cuda(self, device=None):
return cuda(self, device=device)
def to(self, *args, **kwargs):
return to(self, *args, **kwargs) |
.parametrize('ctx', ctx_list)
.parametrize('seed', [313])
.parametrize('window_size, stride, fft_size', [(16, 8, 16), (16, 4, 16), (16, 8, 32)])
.parametrize('window_type', ['hanning', 'hamming', 'rectangular'])
.parametrize('center', [True, False])
.parametrize('pad_mode', ['reflect', 'constant'])
.parametrize('as_stft_backward', [False, True])
def test_istft_forward_backward(ctx, seed, window_size, stride, fft_size, window_type, center, pad_mode, as_stft_backward):
backend = ctx.backend[0].split(':')[0]
if (backend == 'cuda'):
pytest.skip('CUDA Convolution N-D is only supported in CUDNN extension')
if (not as_stft_backward):
if (pad_mode != 'constant'):
pytest.skip('`pad_mode != "constant"` is only for `as_stft_backward == True`')
func_name = ('ISTFTCuda' if (backend == 'cudnn') else 'ISTFT')
from nbla_test_utils import function_tester
rng = np.random.RandomState(seed)
x_shape = create_stft_input_shape(window_size)
stft_input = rng.randn(*x_shape).astype(np.float32)
(y_r, y_i) = ref_stft(stft_input, window_size, stride, fft_size, window_type, center, pad_mode, False)
istft_inputs = [y_r, y_i]
if (not as_stft_backward):
length = x_shape[1]
if is_nola_violation(window_type, window_size, stride, fft_size, length, center):
check_nola_violation(y_r, y_i, window_size, stride, fft_size, window_type, center, pad_mode, as_stft_backward)
return
function_tester(rng, F.istft, ref_istft, istft_inputs, func_args=[window_size, stride, fft_size, window_type, center, pad_mode, as_stft_backward], ctx=ctx, func_name=func_name, atol_f=1e-05, atol_b=0.03, dstep=0.01) |
def verify_ninja_availability():
if (not is_ninja_available()):
raise RuntimeError('Ninja is required to load C++ extensions') |
class Supervision_Train(pl.LightningModule):
def __init__(self, config):
super().__init__()
self.config = config
self.net = config.net
self.automatic_optimization = False
self.loss = config.loss
self.metrics_train = Evaluator(num_class=config.num_classes)
self.metrics_val = Evaluator(num_class=config.num_classes)
def forward(self, x):
seg_pre = self.net(x)
return seg_pre
def training_step(self, batch, batch_idx):
(img, mask) = (batch['img'], batch['gt_semantic_seg'])
prediction = self.net(img)
loss = self.loss(prediction, mask)
if self.config.use_aux_loss:
pre_mask = nn.Softmax(dim=1)(prediction[0])
else:
pre_mask = nn.Softmax(dim=1)(prediction)
pre_mask = pre_mask.argmax(dim=1)
for i in range(mask.shape[0]):
self.metrics_train.add_batch(mask[i].cpu().numpy(), pre_mask[i].cpu().numpy())
opt = self.optimizers(use_pl_optimizer=False)
self.manual_backward(loss)
if (((batch_idx + 1) % self.config.accumulate_n) == 0):
opt.step()
opt.zero_grad()
sch = self.lr_schedulers()
if (self.trainer.is_last_batch and (((self.trainer.current_epoch + 1) % 1) == 0)):
sch.step()
return {'loss': loss}
def training_epoch_end(self, outputs):
if ('vaihingen' in self.config.log_name):
mIoU = np.nanmean(self.metrics_train.Intersection_over_Union()[:(- 1)])
F1 = np.nanmean(self.metrics_train.F1()[:(- 1)])
elif ('potsdam' in self.config.log_name):
mIoU = np.nanmean(self.metrics_train.Intersection_over_Union()[:(- 1)])
F1 = np.nanmean(self.metrics_train.F1()[:(- 1)])
elif ('whu' in self.config.log_name):
mIoU = np.nanmean(self.metrics_train.Intersection_over_Union()[:(- 1)])
F1 = np.nanmean(self.metrics_train.F1()[:(- 1)])
elif ('mass' in self.config.log_name):
mIoU = np.nanmean(self.metrics_train.Intersection_over_Union()[:(- 1)])
F1 = np.nanmean(self.metrics_train.F1()[:(- 1)])
elif ('inria' in self.config.log_name):
mIoU = np.nanmean(self.metrics_train.Intersection_over_Union()[:(- 1)])
F1 = np.nanmean(self.metrics_train.F1()[:(- 1)])
else:
mIoU = np.nanmean(self.metrics_train.Intersection_over_Union())
F1 = np.nanmean(self.metrics_train.F1())
OA = np.nanmean(self.metrics_train.OA())
iou_per_class = self.metrics_train.Intersection_over_Union()
eval_value = {'mIoU': np.round(mIoU, 6), 'F1': np.round(F1, 6), 'OA': np.round(OA, 6)}
print(' ')
print('train:', eval_value)
iou_value = {}
for (class_name, iou) in zip(self.config.classes, iou_per_class):
iou_value[class_name] = np.round(iou, 6)
print(iou_value)
print('')
print(' ')
self.metrics_train.reset()
loss = torch.stack([x['loss'] for x in outputs]).mean()
log_dict = {'train_loss': loss, 'train_mIoU': mIoU, 'train_F1': F1, 'train_OA': OA}
self.log_dict(log_dict, prog_bar=True)
def validation_step(self, batch, batch_idx):
(img, mask) = (batch['img'], batch['gt_semantic_seg'])
prediction = self.forward(img)
pre_mask = nn.Softmax(dim=1)(prediction)
pre_mask = pre_mask.argmax(dim=1)
for i in range(mask.shape[0]):
self.metrics_val.add_batch(mask[i].cpu().numpy(), pre_mask[i].cpu().numpy())
loss_val = self.loss(prediction, mask)
return {'loss_val': loss_val}
def validation_epoch_end(self, outputs):
if ('vaihingen' in self.config.log_name):
mIoU = np.nanmean(self.metrics_val.Intersection_over_Union()[:(- 1)])
F1 = np.nanmean(self.metrics_val.F1()[:(- 1)])
elif ('potsdam' in self.config.log_name):
mIoU = np.nanmean(self.metrics_val.Intersection_over_Union()[:(- 1)])
F1 = np.nanmean(self.metrics_val.F1()[:(- 1)])
elif ('whu' in self.config.log_name):
mIoU = np.nanmean(self.metrics_val.Intersection_over_Union()[:(- 1)])
F1 = np.nanmean(self.metrics_val.F1()[:(- 1)])
elif ('mass' in self.config.log_name):
mIoU = np.nanmean(self.metrics_val.Intersection_over_Union()[:(- 1)])
F1 = np.nanmean(self.metrics_val.F1()[:(- 1)])
elif ('inria' in self.config.log_name):
mIoU = np.nanmean(self.metrics_val.Intersection_over_Union()[:(- 1)])
F1 = np.nanmean(self.metrics_val.F1()[:(- 1)])
else:
mIoU = np.nanmean(self.metrics_val.Intersection_over_Union())
F1 = np.nanmean(self.metrics_val.F1())
OA = np.nanmean(self.metrics_val.OA())
iou_per_class = self.metrics_val.Intersection_over_Union()
eval_value = {'mIoU': np.around(mIoU, 6), 'F1': np.around(F1, 6), 'OA': np.around(OA, 6)}
print(' ')
print('val:', eval_value)
iou_value = {}
for (class_name, iou) in zip(self.config.classes, iou_per_class):
iou_value[class_name] = np.around(iou, 6)
print(iou_value)
print('')
self.metrics_val.reset()
loss = torch.stack([x['loss_val'] for x in outputs]).mean()
log_dict = {'val_loss': loss, 'val_mIoU': mIoU, 'val_F1': F1, 'val_OA': OA}
self.log_dict(log_dict, prog_bar=True)
def configure_optimizers(self):
optimizer = self.config.optimizer
lr_scheduler = self.config.lr_scheduler
return ([optimizer], [lr_scheduler])
def train_dataloader(self):
return self.config.train_loader
def val_dataloader(self):
return self.config.val_loader |
def _read_arraydesc(f):
arraydesc = {'arrstart': _read_long(f)}
if (arraydesc['arrstart'] == 8):
_skip_bytes(f, 4)
arraydesc['nbytes'] = _read_long(f)
arraydesc['nelements'] = _read_long(f)
arraydesc['ndims'] = _read_long(f)
_skip_bytes(f, 8)
arraydesc['nmax'] = _read_long(f)
arraydesc['dims'] = [_read_long(f) for _ in range(arraydesc['nmax'])]
elif (arraydesc['arrstart'] == 18):
warnings.warn('Using experimental 64-bit array read', stacklevel=3)
_skip_bytes(f, 8)
arraydesc['nbytes'] = _read_uint64(f)
arraydesc['nelements'] = _read_uint64(f)
arraydesc['ndims'] = _read_long(f)
_skip_bytes(f, 8)
arraydesc['nmax'] = 8
arraydesc['dims'] = []
for d in range(arraydesc['nmax']):
v = _read_long(f)
if (v != 0):
raise Exception('Expected a zero in ARRAY_DESC')
arraydesc['dims'].append(_read_long(f))
else:
raise Exception(('Unknown ARRSTART: %i' % arraydesc['arrstart']))
return arraydesc |
class TracingAdapter(nn.Module):
flattened_inputs: Tuple[torch.Tensor] = None
inputs_schema: Schema = None
outputs_schema: Schema = None
def __init__(self, model: nn.Module, inputs, inference_func: Optional[Callable]=None, allow_non_tensor: bool=False):
super().__init__()
if isinstance(model, (nn.parallel.distributed.DistributedDataParallel, nn.DataParallel)):
model = model.module
self.model = model
if (not isinstance(inputs, tuple)):
inputs = (inputs,)
self.inputs = inputs
self.allow_non_tensor = allow_non_tensor
if (inference_func is None):
inference_func = (lambda model, *inputs: model(*inputs))
self.inference_func = inference_func
(self.flattened_inputs, self.inputs_schema) = flatten_to_tuple(inputs)
if all((isinstance(x, torch.Tensor) for x in self.flattened_inputs)):
return
if self.allow_non_tensor:
self.flattened_inputs = tuple([x for x in self.flattened_inputs if isinstance(x, torch.Tensor)])
self.inputs_schema = None
else:
for input in self.flattened_inputs:
if (not isinstance(input, torch.Tensor)):
raise ValueError(f'Inputs for tracing must only contain tensors. Got a {type(input)} instead.')
def forward(self, *args: torch.Tensor):
with torch.no_grad(), patch_builtin_len():
if (self.inputs_schema is not None):
inputs_orig_format = self.inputs_schema(args)
else:
if (args != self.flattened_inputs):
raise ValueError('TracingAdapter does not contain valid inputs_schema. So it cannot generalize to other inputs and must be traced with `.flattened_inputs`.')
inputs_orig_format = self.inputs
outputs = self.inference_func(self.model, *inputs_orig_format)
(flattened_outputs, schema) = flatten_to_tuple(outputs)
flattened_output_tensors = tuple([x for x in flattened_outputs if isinstance(x, torch.Tensor)])
if (len(flattened_output_tensors) < len(flattened_outputs)):
if self.allow_non_tensor:
flattened_outputs = flattened_output_tensors
self.outputs_schema = None
else:
raise ValueError('Model cannot be traced because some model outputs cannot flatten to tensors.')
elif (self.outputs_schema is None):
self.outputs_schema = schema
else:
assert (self.outputs_schema == schema), 'Model should always return outputs with the same structure so it can be traced!'
return flattened_outputs
def _create_wrapper(self, traced_model):
def forward(*args):
(flattened_inputs, _) = flatten_to_tuple(args)
flattened_outputs = traced_model(*flattened_inputs)
return self.outputs_schema(flattened_outputs)
return forward |
def main(argv):
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('contigs_db')
parser.add_argument('picklefile')
parser.add_argument('-k', '--ksize', type=int, default=31)
parser.add_argument('--scaled', type=int, default=10000)
args = parser.parse_args(argv)
mh = sourmash.MinHash(0, args.ksize, scaled=args.scaled)
hashval_to_contig_id = {}
notify(f'reading contigs from {args.contigs_db}')
contigs_db = sqlite3.connect(args.contigs_db)
for record in search_utils.contigs_iter_sqlite(contigs_db):
contig_id = int(record.name)
this_mh = mh.copy_and_clear()
this_mh.add_sequence(record.sequence, force=True)
for hashval in this_mh.hashes:
hashval_to_contig_id[hashval] = contig_id
notify('saving {} hashval -> cdbg_id mappings to {}', len(hashval_to_contig_id), args.picklefile)
with open(args.picklefile, 'wb') as dumpfp:
dump(hashval_to_contig_id, dumpfp)
return 0 |
class MLP(nn.Module):
def __init__(self, input_size, output_size, layer_sizes, activation, last_layer_activation, dropout_prob):
super().__init__()
(layers, layer_sizes) = ([], ([input_size] + list(layer_sizes)))
for i in range(1, len(layer_sizes)):
layers.append(nn.Linear(layer_sizes[(i - 1)], layer_sizes[i]))
layers.append(activation())
layers.append(nn.Dropout(p=dropout_prob))
layers.append(nn.Linear(layer_sizes[(- 1)], output_size))
layers.append(last_layer_activation())
self.layers = torch.nn.Sequential(*layers)
def forward(self, x):
return self.layers(x) |
class CrfRnn(nn.Module):
def __init__(self, num_labels, num_iterations=5, crf_init_params=None):
super(CrfRnn, self).__init__()
if (crf_init_params is None):
crf_init_params = DenseCRFParams()
self.params = crf_init_params
self.num_iterations = num_iterations
self._softmax = torch.nn.Softmax(dim=0)
self.num_labels = num_labels
self.spatial_ker_weights = nn.Parameter((crf_init_params.spatial_ker_weight * torch.eye(num_labels, dtype=torch.float32)))
self.bilateral_ker_weights = nn.Parameter((crf_init_params.bilateral_ker_weight * torch.eye(num_labels, dtype=torch.float32)))
self.compatibility_matrix = nn.Parameter(torch.eye(num_labels, dtype=torch.float32))
def forward(self, image, logits):
if (logits.shape[0] != 1):
raise ValueError('Only batch size 1 is currently supported!')
image = image[0]
logits = logits[0]
spatial_filter = SpatialFilter(image, gamma=self.params.gamma)
bilateral_filter = BilateralFilter(image, alpha=self.params.alpha, beta=self.params.beta)
(_, h, w) = image.shape
cur_logits = logits
for _ in range(self.num_iterations):
q_values = self._softmax(cur_logits)
spatial_out = torch.mm(self.spatial_ker_weights, spatial_filter.apply(q_values).view(self.num_labels, (- 1)))
bilateral_out = torch.mm(self.bilateral_ker_weights, bilateral_filter.apply(q_values).view(self.num_labels, (- 1)))
msg_passing_out = (spatial_out + bilateral_out)
msg_passing_out = torch.mm(self.compatibility_matrix, msg_passing_out).view(self.num_labels, h, w)
cur_logits = (msg_passing_out + logits)
return torch.unsqueeze(cur_logits, 0) |
def dictClean_Pickle(b):
trans_dict = {}
raw_dict = b[0]
for (key, val) in raw_dict.items():
trans_dict[key] = modules.dictConvert(val)
with open('/data-local/taejin/feat_dir/Fisher/fisher_trans_dict.pickle', 'wb') as handle:
pickle.dump(trans_dict, handle, protocol=pickle.HIGHEST_PROTOCOL) |
class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
def _buffer_decode(self, data, errors, final):
if (errors != 'strict'):
raise IDNAError('Unsupported error handling "{0}"'.format(errors))
if (not data):
return (u'', 0)
if isinstance(data, unicode):
labels = _unicode_dots_re.split(data)
else:
data = str(data)
unicode(data, 'ascii')
labels = data.split('.')
trailing_dot = u''
if labels:
if (not labels[(- 1)]):
trailing_dot = u'.'
del labels[(- 1)]
elif (not final):
del labels[(- 1)]
if labels:
trailing_dot = u'.'
result = []
size = 0
for label in labels:
result.append(ulabel(label))
if size:
size += 1
size += len(label)
result = (u'.'.join(result) + trailing_dot)
size += len(trailing_dot)
return (result, size) |
def make_non_contiguous(tensor):
if (tensor.numel() <= 1):
return tensor.clone()
osize = list(tensor.size())
for _ in range(2):
dim = random.randint(0, (len(osize) - 1))
add = random.randint(4, 15)
osize[dim] = (osize[dim] + add)
input = tensor.new(torch.Size((osize + [random.randint(2, 3)])))
input = input.select((len(input.size()) - 1), random.randint(0, 1))
for i in range(len(osize)):
if (input.size(i) != tensor.size(i)):
bounds = random.randint(1, (input.size(i) - tensor.size(i)))
input = input.narrow(i, bounds, tensor.size(i))
input.copy_(tensor)
return input |
class ReciprocalMappingExample(props.HasModel):
(sigma, sigmaMap, sigmaDeriv) = props.Invertible('Electrical conductivity (S/m)')
(rho, rhoMap, rhoDeriv) = props.Invertible('Electrical resistivity (Ohm m)')
props.Reciprocal(sigma, rho)
def __init__(self, sigma=None, sigmaMap=None, rho=None, rhoMap=None, **kwargs):
super().__init__(**kwargs)
self.sigma = sigma
self.rho = rho
self.sigmaMap = sigmaMap
self.rhoMap = rhoMap |
def grid_points(left, top, round_left=None, round_top=None):
def round(point, diff, direction, minimum, maximum):
assert ((direction is None) or (direction == 'up') or (direction == 'down'))
if ((diff > 0) and (direction == 'down')):
return max((point - 1), minimum)
elif ((diff < 0) and (direction == 'up')):
return min((point + 1), maximum)
else:
return point
top -= ROW_OFFSET
if ((not (0 <= left <= IMAGE_COLS)) or (not (0 <= top <= IMAGE_ROWS))):
logging.warn('({}, {}) is out of bounds'.format(left, top))
output_cols = float(OUTPUT_COLS)
output_rows = float(OUTPUT_ROWS)
image_cols = float(IMAGE_COLS)
image_rows = float(IMAGE_ROWS)
i = int(min((top / (image_rows / output_rows)), (output_rows - 1)))
j = int(min((left / (image_cols / output_cols)), (output_cols - 1)))
top += ROW_OFFSET
(grid_left, grid_top) = pixel_coordinates(i, j)
i = round(i, (grid_top - top), round_top, 0, (int(output_rows) - 1))
j = round(j, (grid_left - left), round_left, 0, (int(output_cols) - 1))
return (i, j) |
_repository.replaces_method('Array', 'requires_grad_')
_repository.replaces_method('Scalar', 'requires_grad_')
def requires_grad_(pv: newast.ProgramVisitor, sdfg: SDFG, state: SDFGState, self: str):
if (self not in sdfg.arrays):
raise common.DaceSyntaxError(pv, None, 'Array {} is not defined'.format(self))
ParameterArray.make_parameter(sdfg, self) |
class Resnet50_128(nn.Module):
def __init__(self):
super(Resnet50_128, self).__init__()
self.meta = {'mean': [131.0912, 103.8827, 91.4953], 'std': [1, 1, 1], 'imageSize': [224, 224, 3]}
self.conv1_7x7_s2 = nn.Conv2d(3, 64, kernel_size=[7, 7], stride=(2, 2), padding=(3, 3), bias=False)
self.conv1_7x7_s2_bn = nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv1_relu_7x7_s2 = nn.ReLU(inplace=True)
self.pool1_3x3_s2 = nn.MaxPool2d(kernel_size=[3, 3], stride=[2, 2], padding=(0, 0), dilation=1, ceil_mode=True)
self.conv2_1_1x1_reduce = nn.Conv2d(64, 64, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv2_1_1x1_reduce_bn = nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv2_1_1x1_reduce_relu = nn.ReLU(inplace=True)
self.conv2_1_3x3 = nn.Conv2d(64, 64, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1), bias=False)
self.conv2_1_3x3_bn = nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv2_1_3x3_relu = nn.ReLU(inplace=True)
self.conv2_1_1x1_increase = nn.Conv2d(64, 256, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv2_1_1x1_increase_bn = nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv2_1_1x1_proj = nn.Conv2d(64, 256, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv2_1_1x1_proj_bn = nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv2_1_relu = nn.ReLU(inplace=True)
self.conv2_2_1x1_reduce = nn.Conv2d(256, 64, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv2_2_1x1_reduce_bn = nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv2_2_1x1_reduce_relu = nn.ReLU(inplace=True)
self.conv2_2_3x3 = nn.Conv2d(64, 64, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1), bias=False)
self.conv2_2_3x3_bn = nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv2_2_3x3_relu = nn.ReLU(inplace=True)
self.conv2_2_1x1_increase = nn.Conv2d(64, 256, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv2_2_1x1_increase_bn = nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv2_2_relu = nn.ReLU(inplace=True)
self.conv2_3_1x1_reduce = nn.Conv2d(256, 64, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv2_3_1x1_reduce_bn = nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv2_3_1x1_reduce_relu = nn.ReLU(inplace=True)
self.conv2_3_3x3 = nn.Conv2d(64, 64, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1), bias=False)
self.conv2_3_3x3_bn = nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv2_3_3x3_relu = nn.ReLU(inplace=True)
self.conv2_3_1x1_increase = nn.Conv2d(64, 256, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv2_3_1x1_increase_bn = nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv2_3_relu = nn.ReLU(inplace=True)
self.conv3_1_1x1_reduce = nn.Conv2d(256, 128, kernel_size=[1, 1], stride=(2, 2), bias=False)
self.conv3_1_1x1_reduce_bn = nn.BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv3_1_1x1_reduce_relu = nn.ReLU(inplace=True)
self.conv3_1_3x3 = nn.Conv2d(128, 128, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1), bias=False)
self.conv3_1_3x3_bn = nn.BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv3_1_3x3_relu = nn.ReLU(inplace=True)
self.conv3_1_1x1_increase = nn.Conv2d(128, 512, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv3_1_1x1_increase_bn = nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv3_1_1x1_proj = nn.Conv2d(256, 512, kernel_size=[1, 1], stride=(2, 2), bias=False)
self.conv3_1_1x1_proj_bn = nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv3_1_relu = nn.ReLU(inplace=True)
self.conv3_2_1x1_reduce = nn.Conv2d(512, 128, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv3_2_1x1_reduce_bn = nn.BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv3_2_1x1_reduce_relu = nn.ReLU(inplace=True)
self.conv3_2_3x3 = nn.Conv2d(128, 128, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1), bias=False)
self.conv3_2_3x3_bn = nn.BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv3_2_3x3_relu = nn.ReLU(inplace=True)
self.conv3_2_1x1_increase = nn.Conv2d(128, 512, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv3_2_1x1_increase_bn = nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv3_2_relu = nn.ReLU(inplace=True)
self.conv3_3_1x1_reduce = nn.Conv2d(512, 128, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv3_3_1x1_reduce_bn = nn.BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv3_3_1x1_reduce_relu = nn.ReLU(inplace=True)
self.conv3_3_3x3 = nn.Conv2d(128, 128, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1), bias=False)
self.conv3_3_3x3_bn = nn.BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv3_3_3x3_relu = nn.ReLU(inplace=True)
self.conv3_3_1x1_increase = nn.Conv2d(128, 512, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv3_3_1x1_increase_bn = nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv3_3_relu = nn.ReLU(inplace=True)
self.conv3_4_1x1_reduce = nn.Conv2d(512, 128, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv3_4_1x1_reduce_bn = nn.BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv3_4_1x1_reduce_relu = nn.ReLU(inplace=True)
self.conv3_4_3x3 = nn.Conv2d(128, 128, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1), bias=False)
self.conv3_4_3x3_bn = nn.BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv3_4_3x3_relu = nn.ReLU(inplace=True)
self.conv3_4_1x1_increase = nn.Conv2d(128, 512, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv3_4_1x1_increase_bn = nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv3_4_relu = nn.ReLU(inplace=True)
self.conv4_1_1x1_reduce = nn.Conv2d(512, 256, kernel_size=[1, 1], stride=(2, 2), bias=False)
self.conv4_1_1x1_reduce_bn = nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv4_1_1x1_reduce_relu = nn.ReLU(inplace=True)
self.conv4_1_3x3 = nn.Conv2d(256, 256, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1), bias=False)
self.conv4_1_3x3_bn = nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv4_1_3x3_relu = nn.ReLU(inplace=True)
self.conv4_1_1x1_increase = nn.Conv2d(256, 1024, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv4_1_1x1_increase_bn = nn.BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv4_1_1x1_proj = nn.Conv2d(512, 1024, kernel_size=[1, 1], stride=(2, 2), bias=False)
self.conv4_1_1x1_proj_bn = nn.BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv4_1_relu = nn.ReLU(inplace=True)
self.conv4_2_1x1_reduce = nn.Conv2d(1024, 256, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv4_2_1x1_reduce_bn = nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv4_2_1x1_reduce_relu = nn.ReLU(inplace=True)
self.conv4_2_3x3 = nn.Conv2d(256, 256, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1), bias=False)
self.conv4_2_3x3_bn = nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv4_2_3x3_relu = nn.ReLU(inplace=True)
self.conv4_2_1x1_increase = nn.Conv2d(256, 1024, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv4_2_1x1_increase_bn = nn.BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv4_2_relu = nn.ReLU(inplace=True)
self.conv4_3_1x1_reduce = nn.Conv2d(1024, 256, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv4_3_1x1_reduce_bn = nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv4_3_1x1_reduce_relu = nn.ReLU(inplace=True)
self.conv4_3_3x3 = nn.Conv2d(256, 256, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1), bias=False)
self.conv4_3_3x3_bn = nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv4_3_3x3_relu = nn.ReLU(inplace=True)
self.conv4_3_1x1_increase = nn.Conv2d(256, 1024, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv4_3_1x1_increase_bn = nn.BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv4_3_relu = nn.ReLU(inplace=True)
self.conv4_4_1x1_reduce = nn.Conv2d(1024, 256, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv4_4_1x1_reduce_bn = nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv4_4_1x1_reduce_relu = nn.ReLU(inplace=True)
self.conv4_4_3x3 = nn.Conv2d(256, 256, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1), bias=False)
self.conv4_4_3x3_bn = nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv4_4_3x3_relu = nn.ReLU(inplace=True)
self.conv4_4_1x1_increase = nn.Conv2d(256, 1024, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv4_4_1x1_increase_bn = nn.BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv4_4_relu = nn.ReLU(inplace=True)
self.conv4_5_1x1_reduce = nn.Conv2d(1024, 256, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv4_5_1x1_reduce_bn = nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv4_5_1x1_reduce_relu = nn.ReLU(inplace=True)
self.conv4_5_3x3 = nn.Conv2d(256, 256, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1), bias=False)
self.conv4_5_3x3_bn = nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv4_5_3x3_relu = nn.ReLU(inplace=True)
self.conv4_5_1x1_increase = nn.Conv2d(256, 1024, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv4_5_1x1_increase_bn = nn.BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv4_5_relu = nn.ReLU(inplace=True)
self.conv4_6_1x1_reduce = nn.Conv2d(1024, 256, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv4_6_1x1_reduce_bn = nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv4_6_1x1_reduce_relu = nn.ReLU(inplace=True)
self.conv4_6_3x3 = nn.Conv2d(256, 256, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1), bias=False)
self.conv4_6_3x3_bn = nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv4_6_3x3_relu = nn.ReLU(inplace=True)
self.conv4_6_1x1_increase = nn.Conv2d(256, 1024, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv4_6_1x1_increase_bn = nn.BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv4_6_relu = nn.ReLU(inplace=True)
self.conv5_1_1x1_reduce = nn.Conv2d(1024, 512, kernel_size=[1, 1], stride=(2, 2), bias=False)
self.conv5_1_1x1_reduce_bn = nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv5_1_1x1_reduce_relu = nn.ReLU(inplace=True)
self.conv5_1_3x3 = nn.Conv2d(512, 512, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1), bias=False)
self.conv5_1_3x3_bn = nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv5_1_3x3_relu = nn.ReLU(inplace=True)
self.conv5_1_1x1_increase = nn.Conv2d(512, 2048, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv5_1_1x1_increase_bn = nn.BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv5_1_1x1_proj = nn.Conv2d(1024, 2048, kernel_size=[1, 1], stride=(2, 2), bias=False)
self.conv5_1_1x1_proj_bn = nn.BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv5_1_relu = nn.ReLU(inplace=True)
self.conv5_2_1x1_reduce = nn.Conv2d(2048, 512, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv5_2_1x1_reduce_bn = nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv5_2_1x1_reduce_relu = nn.ReLU(inplace=True)
self.conv5_2_3x3 = nn.Conv2d(512, 512, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1), bias=False)
self.conv5_2_3x3_bn = nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv5_2_3x3_relu = nn.ReLU(inplace=True)
self.conv5_2_1x1_increase = nn.Conv2d(512, 2048, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv5_2_1x1_increase_bn = nn.BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv5_2_relu = nn.ReLU(inplace=True)
self.conv5_3_1x1_reduce = nn.Conv2d(2048, 512, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv5_3_1x1_reduce_bn = nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv5_3_1x1_reduce_relu = nn.ReLU(inplace=True)
self.conv5_3_3x3 = nn.Conv2d(512, 512, kernel_size=[3, 3], stride=(1, 1), padding=(1, 1), bias=False)
self.conv5_3_3x3_bn = nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv5_3_3x3_relu = nn.ReLU(inplace=True)
self.conv5_3_1x1_increase = nn.Conv2d(512, 2048, kernel_size=[1, 1], stride=(1, 1), bias=False)
self.conv5_3_1x1_increase_bn = nn.BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.conv5_3_relu = nn.ReLU(inplace=True)
self.pool5_7x7_s1 = nn.AvgPool2d(kernel_size=[7, 7], stride=[1, 1], padding=0)
self.feat_extract = nn.Conv2d(2048, 128, kernel_size=[1, 1], stride=(1, 1), bias=False)
def forward(self, data):
conv1_7x7_s2 = self.conv1_7x7_s2(data)
conv1_7x7_s2_bn = self.conv1_7x7_s2_bn(conv1_7x7_s2)
conv1_7x7_s2_bnxx = self.conv1_relu_7x7_s2(conv1_7x7_s2_bn)
pool1_3x3_s2 = self.pool1_3x3_s2(conv1_7x7_s2_bnxx)
conv2_1_1x1_reduce = self.conv2_1_1x1_reduce(pool1_3x3_s2)
conv2_1_1x1_reduce_bn = self.conv2_1_1x1_reduce_bn(conv2_1_1x1_reduce)
conv2_1_1x1_reduce_bnxx = self.conv2_1_1x1_reduce_relu(conv2_1_1x1_reduce_bn)
conv2_1_3x3 = self.conv2_1_3x3(conv2_1_1x1_reduce_bnxx)
conv2_1_3x3_bn = self.conv2_1_3x3_bn(conv2_1_3x3)
conv2_1_3x3_bnxx = self.conv2_1_3x3_relu(conv2_1_3x3_bn)
conv2_1_1x1_increase = self.conv2_1_1x1_increase(conv2_1_3x3_bnxx)
conv2_1_1x1_increase_bn = self.conv2_1_1x1_increase_bn(conv2_1_1x1_increase)
conv2_1_1x1_proj = self.conv2_1_1x1_proj(pool1_3x3_s2)
conv2_1_1x1_proj_bn = self.conv2_1_1x1_proj_bn(conv2_1_1x1_proj)
conv2_1 = torch.add(conv2_1_1x1_proj_bn, 1, conv2_1_1x1_increase_bn)
conv2_1x = self.conv2_1_relu(conv2_1)
conv2_2_1x1_reduce = self.conv2_2_1x1_reduce(conv2_1x)
conv2_2_1x1_reduce_bn = self.conv2_2_1x1_reduce_bn(conv2_2_1x1_reduce)
conv2_2_1x1_reduce_bnxx = self.conv2_2_1x1_reduce_relu(conv2_2_1x1_reduce_bn)
conv2_2_3x3 = self.conv2_2_3x3(conv2_2_1x1_reduce_bnxx)
conv2_2_3x3_bn = self.conv2_2_3x3_bn(conv2_2_3x3)
conv2_2_3x3_bnxx = self.conv2_2_3x3_relu(conv2_2_3x3_bn)
conv2_2_1x1_increase = self.conv2_2_1x1_increase(conv2_2_3x3_bnxx)
conv2_2_1x1_increase_bn = self.conv2_2_1x1_increase_bn(conv2_2_1x1_increase)
conv2_2 = torch.add(conv2_1x, 1, conv2_2_1x1_increase_bn)
conv2_2x = self.conv2_2_relu(conv2_2)
conv2_3_1x1_reduce = self.conv2_3_1x1_reduce(conv2_2x)
conv2_3_1x1_reduce_bn = self.conv2_3_1x1_reduce_bn(conv2_3_1x1_reduce)
conv2_3_1x1_reduce_bnxx = self.conv2_3_1x1_reduce_relu(conv2_3_1x1_reduce_bn)
conv2_3_3x3 = self.conv2_3_3x3(conv2_3_1x1_reduce_bnxx)
conv2_3_3x3_bn = self.conv2_3_3x3_bn(conv2_3_3x3)
conv2_3_3x3_bnxx = self.conv2_3_3x3_relu(conv2_3_3x3_bn)
conv2_3_1x1_increase = self.conv2_3_1x1_increase(conv2_3_3x3_bnxx)
conv2_3_1x1_increase_bn = self.conv2_3_1x1_increase_bn(conv2_3_1x1_increase)
conv2_3 = torch.add(conv2_2x, 1, conv2_3_1x1_increase_bn)
conv2_3x = self.conv2_3_relu(conv2_3)
conv3_1_1x1_reduce = self.conv3_1_1x1_reduce(conv2_3x)
conv3_1_1x1_reduce_bn = self.conv3_1_1x1_reduce_bn(conv3_1_1x1_reduce)
conv3_1_1x1_reduce_bnxx = self.conv3_1_1x1_reduce_relu(conv3_1_1x1_reduce_bn)
conv3_1_3x3 = self.conv3_1_3x3(conv3_1_1x1_reduce_bnxx)
conv3_1_3x3_bn = self.conv3_1_3x3_bn(conv3_1_3x3)
conv3_1_3x3_bnxx = self.conv3_1_3x3_relu(conv3_1_3x3_bn)
conv3_1_1x1_increase = self.conv3_1_1x1_increase(conv3_1_3x3_bnxx)
conv3_1_1x1_increase_bn = self.conv3_1_1x1_increase_bn(conv3_1_1x1_increase)
conv3_1_1x1_proj = self.conv3_1_1x1_proj(conv2_3x)
conv3_1_1x1_proj_bn = self.conv3_1_1x1_proj_bn(conv3_1_1x1_proj)
conv3_1 = torch.add(conv3_1_1x1_proj_bn, 1, conv3_1_1x1_increase_bn)
conv3_1x = self.conv3_1_relu(conv3_1)
conv3_2_1x1_reduce = self.conv3_2_1x1_reduce(conv3_1x)
conv3_2_1x1_reduce_bn = self.conv3_2_1x1_reduce_bn(conv3_2_1x1_reduce)
conv3_2_1x1_reduce_bnxx = self.conv3_2_1x1_reduce_relu(conv3_2_1x1_reduce_bn)
conv3_2_3x3 = self.conv3_2_3x3(conv3_2_1x1_reduce_bnxx)
conv3_2_3x3_bn = self.conv3_2_3x3_bn(conv3_2_3x3)
conv3_2_3x3_bnxx = self.conv3_2_3x3_relu(conv3_2_3x3_bn)
conv3_2_1x1_increase = self.conv3_2_1x1_increase(conv3_2_3x3_bnxx)
conv3_2_1x1_increase_bn = self.conv3_2_1x1_increase_bn(conv3_2_1x1_increase)
conv3_2 = torch.add(conv3_1x, 1, conv3_2_1x1_increase_bn)
conv3_2x = self.conv3_2_relu(conv3_2)
conv3_3_1x1_reduce = self.conv3_3_1x1_reduce(conv3_2x)
conv3_3_1x1_reduce_bn = self.conv3_3_1x1_reduce_bn(conv3_3_1x1_reduce)
conv3_3_1x1_reduce_bnxx = self.conv3_3_1x1_reduce_relu(conv3_3_1x1_reduce_bn)
conv3_3_3x3 = self.conv3_3_3x3(conv3_3_1x1_reduce_bnxx)
conv3_3_3x3_bn = self.conv3_3_3x3_bn(conv3_3_3x3)
conv3_3_3x3_bnxx = self.conv3_3_3x3_relu(conv3_3_3x3_bn)
conv3_3_1x1_increase = self.conv3_3_1x1_increase(conv3_3_3x3_bnxx)
conv3_3_1x1_increase_bn = self.conv3_3_1x1_increase_bn(conv3_3_1x1_increase)
conv3_3 = torch.add(conv3_2x, 1, conv3_3_1x1_increase_bn)
conv3_3x = self.conv3_3_relu(conv3_3)
conv3_4_1x1_reduce = self.conv3_4_1x1_reduce(conv3_3x)
conv3_4_1x1_reduce_bn = self.conv3_4_1x1_reduce_bn(conv3_4_1x1_reduce)
conv3_4_1x1_reduce_bnxx = self.conv3_4_1x1_reduce_relu(conv3_4_1x1_reduce_bn)
conv3_4_3x3 = self.conv3_4_3x3(conv3_4_1x1_reduce_bnxx)
conv3_4_3x3_bn = self.conv3_4_3x3_bn(conv3_4_3x3)
conv3_4_3x3_bnxx = self.conv3_4_3x3_relu(conv3_4_3x3_bn)
conv3_4_1x1_increase = self.conv3_4_1x1_increase(conv3_4_3x3_bnxx)
conv3_4_1x1_increase_bn = self.conv3_4_1x1_increase_bn(conv3_4_1x1_increase)
conv3_4 = torch.add(conv3_3x, 1, conv3_4_1x1_increase_bn)
conv3_4x = self.conv3_4_relu(conv3_4)
conv4_1_1x1_reduce = self.conv4_1_1x1_reduce(conv3_4x)
conv4_1_1x1_reduce_bn = self.conv4_1_1x1_reduce_bn(conv4_1_1x1_reduce)
conv4_1_1x1_reduce_bnxx = self.conv4_1_1x1_reduce_relu(conv4_1_1x1_reduce_bn)
conv4_1_3x3 = self.conv4_1_3x3(conv4_1_1x1_reduce_bnxx)
conv4_1_3x3_bn = self.conv4_1_3x3_bn(conv4_1_3x3)
conv4_1_3x3_bnxx = self.conv4_1_3x3_relu(conv4_1_3x3_bn)
conv4_1_1x1_increase = self.conv4_1_1x1_increase(conv4_1_3x3_bnxx)
conv4_1_1x1_increase_bn = self.conv4_1_1x1_increase_bn(conv4_1_1x1_increase)
conv4_1_1x1_proj = self.conv4_1_1x1_proj(conv3_4x)
conv4_1_1x1_proj_bn = self.conv4_1_1x1_proj_bn(conv4_1_1x1_proj)
conv4_1 = torch.add(conv4_1_1x1_proj_bn, 1, conv4_1_1x1_increase_bn)
conv4_1x = self.conv4_1_relu(conv4_1)
conv4_2_1x1_reduce = self.conv4_2_1x1_reduce(conv4_1x)
conv4_2_1x1_reduce_bn = self.conv4_2_1x1_reduce_bn(conv4_2_1x1_reduce)
conv4_2_1x1_reduce_bnxx = self.conv4_2_1x1_reduce_relu(conv4_2_1x1_reduce_bn)
conv4_2_3x3 = self.conv4_2_3x3(conv4_2_1x1_reduce_bnxx)
conv4_2_3x3_bn = self.conv4_2_3x3_bn(conv4_2_3x3)
conv4_2_3x3_bnxx = self.conv4_2_3x3_relu(conv4_2_3x3_bn)
conv4_2_1x1_increase = self.conv4_2_1x1_increase(conv4_2_3x3_bnxx)
conv4_2_1x1_increase_bn = self.conv4_2_1x1_increase_bn(conv4_2_1x1_increase)
conv4_2 = torch.add(conv4_1x, 1, conv4_2_1x1_increase_bn)
conv4_2x = self.conv4_2_relu(conv4_2)
conv4_3_1x1_reduce = self.conv4_3_1x1_reduce(conv4_2x)
conv4_3_1x1_reduce_bn = self.conv4_3_1x1_reduce_bn(conv4_3_1x1_reduce)
conv4_3_1x1_reduce_bnxx = self.conv4_3_1x1_reduce_relu(conv4_3_1x1_reduce_bn)
conv4_3_3x3 = self.conv4_3_3x3(conv4_3_1x1_reduce_bnxx)
conv4_3_3x3_bn = self.conv4_3_3x3_bn(conv4_3_3x3)
conv4_3_3x3_bnxx = self.conv4_3_3x3_relu(conv4_3_3x3_bn)
conv4_3_1x1_increase = self.conv4_3_1x1_increase(conv4_3_3x3_bnxx)
conv4_3_1x1_increase_bn = self.conv4_3_1x1_increase_bn(conv4_3_1x1_increase)
conv4_3 = torch.add(conv4_2x, 1, conv4_3_1x1_increase_bn)
conv4_3x = self.conv4_3_relu(conv4_3)
conv4_4_1x1_reduce = self.conv4_4_1x1_reduce(conv4_3x)
conv4_4_1x1_reduce_bn = self.conv4_4_1x1_reduce_bn(conv4_4_1x1_reduce)
conv4_4_1x1_reduce_bnxx = self.conv4_4_1x1_reduce_relu(conv4_4_1x1_reduce_bn)
conv4_4_3x3 = self.conv4_4_3x3(conv4_4_1x1_reduce_bnxx)
conv4_4_3x3_bn = self.conv4_4_3x3_bn(conv4_4_3x3)
conv4_4_3x3_bnxx = self.conv4_4_3x3_relu(conv4_4_3x3_bn)
conv4_4_1x1_increase = self.conv4_4_1x1_increase(conv4_4_3x3_bnxx)
conv4_4_1x1_increase_bn = self.conv4_4_1x1_increase_bn(conv4_4_1x1_increase)
conv4_4 = torch.add(conv4_3x, 1, conv4_4_1x1_increase_bn)
conv4_4x = self.conv4_4_relu(conv4_4)
conv4_5_1x1_reduce = self.conv4_5_1x1_reduce(conv4_4x)
conv4_5_1x1_reduce_bn = self.conv4_5_1x1_reduce_bn(conv4_5_1x1_reduce)
conv4_5_1x1_reduce_bnxx = self.conv4_5_1x1_reduce_relu(conv4_5_1x1_reduce_bn)
conv4_5_3x3 = self.conv4_5_3x3(conv4_5_1x1_reduce_bnxx)
conv4_5_3x3_bn = self.conv4_5_3x3_bn(conv4_5_3x3)
conv4_5_3x3_bnxx = self.conv4_5_3x3_relu(conv4_5_3x3_bn)
conv4_5_1x1_increase = self.conv4_5_1x1_increase(conv4_5_3x3_bnxx)
conv4_5_1x1_increase_bn = self.conv4_5_1x1_increase_bn(conv4_5_1x1_increase)
conv4_5 = torch.add(conv4_4x, 1, conv4_5_1x1_increase_bn)
conv4_5x = self.conv4_5_relu(conv4_5)
conv4_6_1x1_reduce = self.conv4_6_1x1_reduce(conv4_5x)
conv4_6_1x1_reduce_bn = self.conv4_6_1x1_reduce_bn(conv4_6_1x1_reduce)
conv4_6_1x1_reduce_bnxx = self.conv4_6_1x1_reduce_relu(conv4_6_1x1_reduce_bn)
conv4_6_3x3 = self.conv4_6_3x3(conv4_6_1x1_reduce_bnxx)
conv4_6_3x3_bn = self.conv4_6_3x3_bn(conv4_6_3x3)
conv4_6_3x3_bnxx = self.conv4_6_3x3_relu(conv4_6_3x3_bn)
conv4_6_1x1_increase = self.conv4_6_1x1_increase(conv4_6_3x3_bnxx)
conv4_6_1x1_increase_bn = self.conv4_6_1x1_increase_bn(conv4_6_1x1_increase)
conv4_6 = torch.add(conv4_5x, 1, conv4_6_1x1_increase_bn)
conv4_6x = self.conv4_6_relu(conv4_6)
conv5_1_1x1_reduce = self.conv5_1_1x1_reduce(conv4_6x)
conv5_1_1x1_reduce_bn = self.conv5_1_1x1_reduce_bn(conv5_1_1x1_reduce)
conv5_1_1x1_reduce_bnxx = self.conv5_1_1x1_reduce_relu(conv5_1_1x1_reduce_bn)
conv5_1_3x3 = self.conv5_1_3x3(conv5_1_1x1_reduce_bnxx)
conv5_1_3x3_bn = self.conv5_1_3x3_bn(conv5_1_3x3)
conv5_1_3x3_bnxx = self.conv5_1_3x3_relu(conv5_1_3x3_bn)
conv5_1_1x1_increase = self.conv5_1_1x1_increase(conv5_1_3x3_bnxx)
conv5_1_1x1_increase_bn = self.conv5_1_1x1_increase_bn(conv5_1_1x1_increase)
conv5_1_1x1_proj = self.conv5_1_1x1_proj(conv4_6x)
conv5_1_1x1_proj_bn = self.conv5_1_1x1_proj_bn(conv5_1_1x1_proj)
conv5_1 = torch.add(conv5_1_1x1_proj_bn, 1, conv5_1_1x1_increase_bn)
conv5_1x = self.conv5_1_relu(conv5_1)
conv5_2_1x1_reduce = self.conv5_2_1x1_reduce(conv5_1x)
conv5_2_1x1_reduce_bn = self.conv5_2_1x1_reduce_bn(conv5_2_1x1_reduce)
conv5_2_1x1_reduce_bnxx = self.conv5_2_1x1_reduce_relu(conv5_2_1x1_reduce_bn)
conv5_2_3x3 = self.conv5_2_3x3(conv5_2_1x1_reduce_bnxx)
conv5_2_3x3_bn = self.conv5_2_3x3_bn(conv5_2_3x3)
conv5_2_3x3_bnxx = self.conv5_2_3x3_relu(conv5_2_3x3_bn)
conv5_2_1x1_increase = self.conv5_2_1x1_increase(conv5_2_3x3_bnxx)
conv5_2_1x1_increase_bn = self.conv5_2_1x1_increase_bn(conv5_2_1x1_increase)
conv5_2 = torch.add(conv5_1x, 1, conv5_2_1x1_increase_bn)
conv5_2x = self.conv5_2_relu(conv5_2)
conv5_3_1x1_reduce = self.conv5_3_1x1_reduce(conv5_2x)
conv5_3_1x1_reduce_bn = self.conv5_3_1x1_reduce_bn(conv5_3_1x1_reduce)
conv5_3_1x1_reduce_bnxx = self.conv5_3_1x1_reduce_relu(conv5_3_1x1_reduce_bn)
conv5_3_3x3 = self.conv5_3_3x3(conv5_3_1x1_reduce_bnxx)
conv5_3_3x3_bn = self.conv5_3_3x3_bn(conv5_3_3x3)
conv5_3_3x3_bnxx = self.conv5_3_3x3_relu(conv5_3_3x3_bn)
conv5_3_1x1_increase = self.conv5_3_1x1_increase(conv5_3_3x3_bnxx)
conv5_3_1x1_increase_bn = self.conv5_3_1x1_increase_bn(conv5_3_1x1_increase)
conv5_3 = torch.add(conv5_2x, 1, conv5_3_1x1_increase_bn)
conv5_3x = self.conv5_3_relu(conv5_3)
pool5_7x7_s1 = self.pool5_7x7_s1(conv5_3x)
feat_extract_preflatten = self.feat_extract(pool5_7x7_s1)
feat_extract = feat_extract_preflatten.view(feat_extract_preflatten.size(0), (- 1))
return (feat_extract, feat_extract_preflatten) |
def network(frame1, frame2, frame3, is_training, reuse=False, scope='netflow'):
with tf.variable_scope(scope, reuse=reuse):
c3_1_w = tf.get_variable('c3_1_w', shape=[3, 3, 1, 32], initializer=tf.contrib.layers.xavier_initializer(uniform=True))
c3_1_b = tf.get_variable('c3_1_b', shape=[32], initializer=tf.constant_initializer(0.0))
c3_2_w = tf.get_variable('c3_2_w', shape=[3, 3, 1, 32], initializer=tf.contrib.layers.xavier_initializer(uniform=True))
c3_2_b = tf.get_variable('c3_2_b', shape=[32], initializer=tf.constant_initializer(0.0))
c3_3_w = tf.get_variable('c3_3_w', shape=[3, 3, 1, 32], initializer=tf.contrib.layers.xavier_initializer(uniform=True))
c3_3_b = tf.get_variable('c3_3_b', shape=[32], initializer=tf.constant_initializer(0.0))
c5_1_w = tf.get_variable('c5_1_w', shape=[5, 5, 1, 32], initializer=tf.contrib.layers.xavier_initializer(uniform=True))
c5_1_b = tf.get_variable('c5_1_b', shape=[32], initializer=tf.constant_initializer(0.0))
c5_2_w = tf.get_variable('c5_2_w', shape=[5, 5, 1, 32], initializer=tf.contrib.layers.xavier_initializer(uniform=True))
c5_2_b = tf.get_variable('c5_2_b', shape=[32], initializer=tf.constant_initializer(0.0))
c5_3_w = tf.get_variable('c5_3_w', shape=[5, 5, 1, 32], initializer=tf.contrib.layers.xavier_initializer(uniform=True))
c5_3_b = tf.get_variable('c5_3_b', shape=[32], initializer=tf.constant_initializer(0.0))
c7_1_w = tf.get_variable('c7_1_w', shape=[7, 7, 1, 32], initializer=tf.contrib.layers.xavier_initializer(uniform=True))
c7_1_b = tf.get_variable('c7_1_b', shape=[32], initializer=tf.constant_initializer(0.0))
c7_2_w = tf.get_variable('c7_2_w', shape=[7, 7, 1, 32], initializer=tf.contrib.layers.xavier_initializer(uniform=True))
c7_2_b = tf.get_variable('c7_2_b', shape=[32], initializer=tf.constant_initializer(0.0))
c7_3_w = tf.get_variable('c7_3_w', shape=[7, 7, 1, 32], initializer=tf.contrib.layers.xavier_initializer(uniform=True))
c7_3_b = tf.get_variable('c7_3_b', shape=[32], initializer=tf.constant_initializer(0.0))
c1_w = tf.get_variable('c1_w', shape=[3, 3, ((32 * 3) * 3), 32], initializer=tf.contrib.layers.xavier_initializer(uniform=True))
c1_b = tf.get_variable('c1_b', shape=[32], initializer=tf.constant_initializer(0.0))
c2_w = tf.get_variable('c2_w', shape=[3, 3, 32, 32], initializer=tf.contrib.layers.xavier_initializer(uniform=True))
c2_b = tf.get_variable('c2_b', shape=[32], initializer=tf.constant_initializer(0.0))
c3_w = tf.get_variable('c3_w', shape=[3, 3, (32 * 2), 32], initializer=tf.contrib.layers.xavier_initializer(uniform=True))
c3_b = tf.get_variable('c3_b', shape=[32], initializer=tf.constant_initializer(0.0))
c4_w = tf.get_variable('c4_w', shape=[3, 3, (32 * 3), 32], initializer=tf.contrib.layers.xavier_initializer(uniform=True))
c4_b = tf.get_variable('c4_b', shape=[32], initializer=tf.constant_initializer(0.0))
c5_w = tf.get_variable('c5_w', shape=[3, 3, (32 * 4), 32], initializer=tf.contrib.layers.xavier_initializer(uniform=True))
c5_b = tf.get_variable('c5_b', shape=[32], initializer=tf.constant_initializer(0.0))
c6_w = tf.get_variable('c6_w', shape=[3, 3, 32, 1], initializer=tf.contrib.layers.xavier_initializer(uniform=True))
c6_b = tf.get_variable('c6_b', shape=[1], initializer=tf.constant_initializer(0.0))
c3_1 = tf.nn.conv2d(frame1, c3_1_w, strides=[1, 1, 1, 1], padding='SAME')
c3_1 = tf.nn.bias_add(c3_1, c3_1_b)
c3_1 = tflearn.activations.prelu(c3_1)
c5_1 = tf.nn.conv2d(frame1, c5_1_w, strides=[1, 1, 1, 1], padding='SAME')
c5_1 = tf.nn.bias_add(c5_1, c5_1_b)
c5_1 = tflearn.activations.prelu(c5_1)
c7_1 = tf.nn.conv2d(frame1, c7_1_w, strides=[1, 1, 1, 1], padding='SAME')
c7_1 = tf.nn.bias_add(c7_1, c7_1_b)
c7_1 = tflearn.activations.prelu(c7_1)
cc_1 = tf.concat([c3_1, c5_1, c7_1], 3)
c3_2 = tf.nn.conv2d(frame2, c3_2_w, strides=[1, 1, 1, 1], padding='SAME')
c3_2 = tf.nn.bias_add(c3_2, c3_2_b)
c3_2 = tflearn.activations.prelu(c3_2)
c5_2 = tf.nn.conv2d(frame2, c5_2_w, strides=[1, 1, 1, 1], padding='SAME')
c5_2 = tf.nn.bias_add(c5_2, c5_2_b)
c5_2 = tflearn.activations.prelu(c5_2)
c7_2 = tf.nn.conv2d(frame2, c7_2_w, strides=[1, 1, 1, 1], padding='SAME')
c7_2 = tf.nn.bias_add(c7_2, c7_2_b)
c7_2 = tflearn.activations.prelu(c7_2)
cc_2 = tf.concat([c3_2, c5_2, c7_2], 3)
c3_3 = tf.nn.conv2d(frame3, c3_3_w, strides=[1, 1, 1, 1], padding='SAME')
c3_3 = tf.nn.bias_add(c3_3, c3_3_b)
c3_3 = tflearn.activations.prelu(c3_3)
c5_3 = tf.nn.conv2d(frame3, c5_3_w, strides=[1, 1, 1, 1], padding='SAME')
c5_3 = tf.nn.bias_add(c5_3, c5_3_b)
c5_3 = tflearn.activations.prelu(c5_3)
c7_3 = tf.nn.conv2d(frame3, c7_3_w, strides=[1, 1, 1, 1], padding='SAME')
c7_3 = tf.nn.bias_add(c7_3, c7_3_b)
c7_3 = tflearn.activations.prelu(c7_3)
cc_3 = tf.concat([c3_3, c5_3, c7_3], 3)
c_concat = tf.concat([cc_1, cc_2, cc_3], 3)
c1 = tf.nn.conv2d(c_concat, c1_w, strides=[1, 1, 1, 1], padding='SAME')
c1 = tf.nn.bias_add(c1, c1_b)
c1 = tf.layers.batch_normalization(c1, training=is_training)
c1 = tflearn.activations.prelu(c1)
c2 = tf.nn.conv2d(c1, c2_w, strides=[1, 1, 1, 1], padding='SAME')
c2 = tf.nn.bias_add(c2, c2_b)
c2 = tf.layers.batch_normalization(c2, training=is_training)
c2 = tflearn.activations.prelu(c2)
cc2 = tf.concat([c1, c2], 3)
c3 = tf.nn.conv2d(cc2, c3_w, strides=[1, 1, 1, 1], padding='SAME')
c3 = tf.nn.bias_add(c3, c3_b)
c3 = tf.layers.batch_normalization(c3, training=is_training)
c3 = tflearn.activations.prelu(c3)
cc3 = tf.concat([c1, c2, c3], 3)
c4 = tf.nn.conv2d(cc3, c4_w, strides=[1, 1, 1, 1], padding='SAME')
c4 = tf.nn.bias_add(c4, c4_b)
c4 = tf.layers.batch_normalization(c4, training=is_training)
c4 = tflearn.activations.prelu(c4)
cc4 = tf.concat([c1, c2, c3, c4], 3)
c5 = tf.nn.conv2d(cc4, c5_w, strides=[1, 1, 1, 1], padding='SAME')
c5 = tf.nn.bias_add(c5, c5_b)
c5 = tf.layers.batch_normalization(c5, training=is_training)
c5 = tflearn.activations.prelu(c5)
c6 = tf.nn.conv2d(c5, c6_w, strides=[1, 1, 1, 1], padding='SAME')
c6 = tf.nn.bias_add(c6, c6_b)
c6 = tf.layers.batch_normalization(c6, training=is_training)
c6 = tflearn.activations.prelu(c6)
output = tf.add(c6, frame2)
return output |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--task_name', default=None, type=str, required=True, help='The name of the task to train.')
parser.add_argument('--cache_dir', default='', type=str, help='Where do you want to store the pre-trained models downloaded from s3')
parser.add_argument('--data_label', default='', type=str, help='Where do you want to store the pre-trained models downloaded from s3')
parser.add_argument('--max_seq_length', default=128, type=int, help='The maximum total input sequence length after WordPiece tokenization. \nSequences longer than this will be truncated, and sequences shorter \nthan this will be padded.')
parser.add_argument('--do_train', action='store_true', help='Whether to run training.')
parser.add_argument('--do_eval', action='store_true', help='Whether to run eval on the dev set.')
parser.add_argument('--do_lower_case', action='store_true', help='Set this flag if you are using an uncased model.')
parser.add_argument('--train_batch_size', default=16, type=int, help='Total batch size for training.')
parser.add_argument('--eval_batch_size', default=64, type=int, help='Total batch size for eval.')
parser.add_argument('--learning_rate', default=1e-05, type=float, help='The initial learning rate for Adam.')
parser.add_argument('--num_train_epochs', default=3.0, type=float, help='Total number of training epochs to perform.')
parser.add_argument('--warmup_proportion', default=0.1, type=float, help='Proportion of training to perform linear learning rate warmup for. E.g., 0.1 = 10%% of training.')
parser.add_argument('--no_cuda', action='store_true', help='Whether not to use CUDA when available')
parser.add_argument('--local_rank', type=int, default=(- 1), help='local_rank for distributed training on gpus')
parser.add_argument('--seed', type=int, default=42, help='random seed for initialization')
parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help='Number of updates steps to accumulate before performing a backward/update pass.')
parser.add_argument('--fp16', action='store_true', help='Whether to use 16-bit float precision instead of 32-bit')
parser.add_argument('--loss_scale', type=float, default=0, help='Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n0 (default value): dynamic loss scaling.\nPositive power of 2: static loss scaling value.\n')
parser.add_argument('--server_ip', type=str, default='', help='Can be used for distant debugging.')
parser.add_argument('--server_port', type=str, default='', help='Can be used for distant debugging.')
args = parser.parse_args()
processors = {'rte': RteProcessor}
output_modes = {'rte': 'classification'}
if ((args.local_rank == (- 1)) or args.no_cuda):
device = torch.device(('cuda' if (torch.cuda.is_available() and (not args.no_cuda)) else 'cpu'))
n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device('cuda', args.local_rank)
n_gpu = 1
torch.distributed.init_process_group(backend='nccl')
logger.info('device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}'.format(device, n_gpu, bool((args.local_rank != (- 1))), args.fp16))
if (args.gradient_accumulation_steps < 1):
raise ValueError('Invalid gradient_accumulation_steps parameter: {}, should be >= 1'.format(args.gradient_accumulation_steps))
args.train_batch_size = (args.train_batch_size // args.gradient_accumulation_steps)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if (n_gpu > 0):
torch.cuda.manual_seed_all(args.seed)
if ((not args.do_train) and (not args.do_eval)):
raise ValueError('At least one of `do_train` or `do_eval` must be True.')
task_name = args.task_name.lower()
if (task_name not in processors):
raise ValueError(('Task not found: %s' % task_name))
processor = processors[task_name]()
output_mode = output_modes[task_name]
test_examples = processor.load_scitail('test')
label_list = ['entailment', 'not_entailment']
num_labels = len(label_list)
print('num_labels:', num_labels, ' test size:', len(test_examples))
model = RobertaForSequenceClassification(num_labels)
tokenizer = RobertaTokenizer.from_pretrained(pretrain_model_dir, do_lower_case=args.do_lower_case)
model.load_state_dict(torch.load('DocNLI.pretrained.RoBERTA.model.pt', map_location=device))
model.to(device)
test_features = convert_examples_to_features(test_examples, label_list, args.max_seq_length, tokenizer, output_mode, cls_token_at_end=False, cls_token=tokenizer.cls_token, cls_token_segment_id=0, sep_token=tokenizer.sep_token, sep_token_extra=True, pad_on_left=False, pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0], pad_token_segment_id=0)
test_all_input_ids = torch.tensor([f.input_ids for f in test_features], dtype=torch.long)
test_all_input_mask = torch.tensor([f.input_mask for f in test_features], dtype=torch.long)
test_all_segment_ids = torch.tensor([f.segment_ids for f in test_features], dtype=torch.long)
test_all_label_ids = torch.tensor([f.label_id for f in test_features], dtype=torch.long)
test_data = TensorDataset(test_all_input_ids, test_all_input_mask, test_all_segment_ids, test_all_label_ids)
test_sampler = SequentialSampler(test_data)
test_dataloader = DataLoader(test_data, sampler=test_sampler, batch_size=args.eval_batch_size)
model.eval()
final_test_performance = evaluation(test_dataloader, device, model)
print('final_test_performance:', final_test_performance) |
def test_initialize_rdn_1():
_dn = BoostedRDNClassifier()
assert (_dn.target == 'None')
assert (_dn.n_estimators == 10) |
class BTSNmat(SpectralMatrix):
def assemble(self, method):
(test, trial) = (self.testfunction, self.trialfunction)
assert isinstance(test[0], T)
assert isinstance(trial[0], SN)
h = get_norm_sq(test[0], trial[0], method)
(M, N) = (test[0].N, (trial[0].N - 2))
alpha = trial[0].stencil_matrix()[2]
d = {0: h[:(- 2)], (- 2): (h[2:(dmax(M, N, (- 2)) + 2)] * alpha)}
return d |
def _strip_string(string: str) -> str:
string = string.replace('\n', '')
string = string.replace('\\!', '')
string = string.replace('\\\\', '\\')
string = string.replace('tfrac', 'frac')
string = string.replace('dfrac', 'frac')
string = string.replace('\\left', '')
string = string.replace('\\right', '')
string = string.replace('^{\\circ}', '')
string = string.replace('^\\circ', '')
string = string.replace('\\$', '')
string = _remove_right_units(string)
string = string.replace('\\%', '')
string = string.replace('\\%', '')
string = string.replace(' .', ' 0.')
string = string.replace('{.', '{0.')
if (len(string) == 0):
return string
if (string[0] == '.'):
string = ('0' + string)
if (len(string.split('=')) == 2):
if (len(string.split('=')[0]) <= 2):
string = string.split('=')[1]
string = _fix_sqrt(string)
string = string.replace(' ', '')
string = _fix_fracs(string)
if (string == '0.5'):
string = '\\frac{1}{2}'
string = _fix_a_slash_b(string)
return string |
def get_2lvl_model(**kwargs):
mel = AugmentMelSTFT(n_mels=128, sr=32000, win_length=800, hopsize=100, n_fft=1024, freqm=48, timem=192, htk=False, fmin=0.0, fmax=None, norm=1, fmin_aug_range=10, fmax_aug_range=2000)
net = get_model_passt(arch='stfthop100', input_tdim=3200)
model = PasstBasicWrapper(mel=mel, net=net, timestamp_embedding_size=(1295 * 2), **kwargs)
return model |
(frozen=True)
class PartialTrajectory():
observations: ObservationSequence
actions: NDArray
rewards: Float32NDArray
returns_to_go: Float32NDArray
terminals: Float32NDArray
timesteps: Int32NDArray
masks: Float32NDArray
length: int
def observation_signature(self) -> Signature:
shape = get_shape_from_observation_sequence(self.observations)
dtype = get_dtype_from_observation_sequence(self.observations)
if isinstance(self.observations, np.ndarray):
shape = [shape]
dtype = [dtype]
return Signature(dtype=dtype, shape=shape)
def action_signature(self) -> Signature:
return Signature(dtype=[self.actions.dtype], shape=[self.actions.shape[1:]])
def reward_signature(self) -> Signature:
return Signature(dtype=[self.rewards.dtype], shape=[self.rewards.shape[1:]])
def get_transition_count(self) -> int:
return (self.length if bool(self.terminals[(- 1)]) else (self.length - 1))
def get_as_transition(self, index: int) -> Transition:
assert (index < self.get_transition_count())
observation = retrieve_observation(self.observations, index)
terminal = float(self.terminals[index])
if (terminal and (index == (self.length - 1))):
next_observation = create_zero_observation(observation)
else:
next_observation = retrieve_observation(self.observations, (index + 1))
return Transition(observation=observation, action=self.actions[index], reward=self.rewards[index], next_observation=next_observation, return_to_go=self.returns_to_go[index], terminal=terminal, interval=1)
def __len__(self) -> int:
return self.length |
class Feat2Net(nn.Module):
def __init__(self, in_dim=1):
super(Feat2Net, self).__init__()
self.net = archs.v2v2d.V2VModel(in_dim, hyp.feat2_dim).cuda()
print(self.net)
def forward(self, feat, summ_writer=None, comp_mask=None):
total_loss = torch.tensor(0.0).cuda()
(B, C, H, W) = list(feat.shape)
if (summ_writer is not None):
summ_writer.summ_feat('feat2/feat_input', feat, pca=False)
feat = self.net(feat)
if (summ_writer is not None):
summ_writer.summ_feat('feat2/feat_output', feat)
return feat |
def _parse_comma_separated_option(arguments: list[str], option: str) -> list[str]:
index = arguments.index(option)
if (',' not in arguments[(index + 1)]):
return arguments
variables = arguments[(index + 1)].split(',')
return ((arguments[:(index + 1)] + variables) + arguments[(index + 2):]) |
def start_training():
logger.info('Setup config, data and model...')
opt = BaseOptions().parse()
set_seed(opt.seed)
if opt.debug:
cudnn.benchmark = False
cudnn.deterministic = True
dataset_config = dict(dset_name=opt.dset_name, data_path=opt.train_path, v_feat_dirs=opt.v_feat_dirs, q_feat_dir=opt.t_feat_dir, v_feat_dim=opt.v_feat_dim, q_feat_dim=opt.t_feat_dim, q_feat_type='last_hidden_state', max_q_l=opt.max_q_l, max_v_l=opt.max_v_l, ctx_mode=opt.ctx_mode, data_ratio=opt.data_ratio, normalize_v=(not opt.no_norm_vfeat), normalize_t=(not opt.no_norm_tfeat), clip_len=opt.clip_length, max_windows=opt.max_windows, span_loss_type=opt.span_loss_type, txt_drop_ratio=opt.txt_drop_ratio, use_cache=opt.use_cache, add_easy_negative=opt.add_easy_negative, easy_negative_only=opt.easy_negative_only)
dataset_config['data_path'] = opt.train_path
train_dataset = DatasetMR(**dataset_config)
if (opt.eval_path is not None):
dataset_config['data_path'] = opt.eval_path
dataset_config['txt_drop_ratio'] = 0
dataset_config['q_feat_dir'] = opt.t_feat_dir.replace('txt_clip_asr', 'txt_clip').replace('txt_clip_cap', 'txt_clip')
eval_dataset = DatasetMR(**dataset_config)
else:
eval_dataset = None
if (opt.lr_warmup > 0):
total_steps = opt.n_epoch
warmup_steps = (opt.lr_warmup if (opt.lr_warmup > 1) else int((opt.lr_warmup * total_steps)))
opt.lr_warmup = [warmup_steps, total_steps]
(model, criterion, optimizer, lr_scheduler) = setup_model(opt)
logger.info(f'Model {model}')
count_parameters(model)
logger.info('Start Training...')
train(model, criterion, optimizer, lr_scheduler, train_dataset, eval_dataset, opt)
return (opt.ckpt_filepath.replace('.ckpt', '_best.ckpt'), opt.eval_split_name, opt.eval_path, opt.debug) |
def reshape_nd(arr, ndim, dim):
if (arr.ndim != 1):
raise ValueError('arr must be a 1D array')
new_shape = ([1] * ndim)
new_shape[dim] = (- 1)
return np.reshape(arr, new_shape) |
def test_vectorizer_min_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', min_df=1)
vect.fit(test_data)
assert ('a' in vect.vocabulary_.keys())
assert (len(vect.vocabulary_.keys()) == 6)
assert (len(vect.stop_words_) == 0)
vect.min_df = 2
vect.fit(test_data)
assert ('c' not in vect.vocabulary_.keys())
assert (len(vect.vocabulary_.keys()) == 2)
assert ('c' in vect.stop_words_)
assert (len(vect.stop_words_) == 4)
vect.min_df = 0.8
vect.fit(test_data)
assert ('c' not in vect.vocabulary_.keys())
assert (len(vect.vocabulary_.keys()) == 1)
assert ('c' in vect.stop_words_)
assert (len(vect.stop_words_) == 5) |
.parametrize('device', ['cpu', 'cuda'])
def test_compatibility(device, m=4, M=5, L=16, B=2):
c2acr = diffsptk.CepstrumToAutocorrelation(M, L)
U.check_compatibility(device, c2acr, [], f'nrand -l {(B * (m + 1))}', f'c2acr -m {m} -M {M} -l {L}', [], dx=(m + 1), dy=(M + 1))
U.check_differentiable(device, c2acr, [B, (m + 1)]) |
class qConformalNoisyExpectedHypervolumeImprovement(qConformalExpectedHypervolumeImprovement, qNoisyExpectedHypervolumeImprovement):
def __init__(self, alpha, temp, grid_res, max_grid_refinements, ratio_estimator, optimistic=False, grid_sampler=None, randomized=False, *args, **kwargs):
qNoisyExpectedHypervolumeImprovement.__init__(self, *args, cache_root=False, **kwargs)
ConformalAcquisition.__init__(self, alpha, temp, grid_res, max_grid_refinements, ratio_estimator, optimistic, grid_sampler, randomized)
def _nonconformal_fwd(self, X, conditioned_model):
X_full = torch.cat([match_batch_shape(self.X_baseline, X), X], dim=(- 2))
posterior = conditioned_model.posterior(X_full)
event_shape_lag = (1 if is_fully_bayesian(self.model) else 2)
n_w = (posterior.event_shape[(X_full.dim() - event_shape_lag)] // X_full.shape[(- 2)])
q_in = (X.shape[(- 2)] * n_w)
self._set_sampler(q_in=q_in, posterior=posterior)
samples = self._get_f_X_samples(posterior=posterior, q_in=q_in)
return (self._compute_qehvi(samples=samples, X=X) + self._prev_nehvi)
_pending_points
_batch_mode_transform()
def forward(self, X):
values = ConformalAcquisition._conformal_fwd(self, X)
return values |
class FunctionEvent(FormattedTimesMixin):
def __init__(self, id, name, thread, cpu_start, cpu_end):
self.id = id
self.name = name
self.cpu_interval = Interval(cpu_start, cpu_end)
self.thread = thread
self.kernels = []
self.count = 1
def append_kernel(self, name, device, start, end):
self.kernels.append(Kernel(name, device, Interval(start, end)))
def cuda_time_total(self):
return sum((kinfo.interval.elapsed_us() for kinfo in self.kernels))
def cpu_time_total(self):
return self.cpu_interval.elapsed_us()
def key(self):
return self.name
def __repr__(self):
return '<FunctionEvent id={} cpu_time={} cuda_time={} name={} thread={}>'.format(self.id, self.cpu_time_str, self.cuda_time_str, self.name, self.thread) |
def test_empty_arrays_cartesian():
one = ak.Array([])
two = one = ak.Array([])
with pytest.raises(ValueError) as err:
to_list(ak.operations.cartesian([one, two]))
assert isinstance(err.value, ValueError)
to_list(ak.operations.concatenate([one, two], axis=0)) |
def print_net(model, namescope='gpu_0'):
logger.info('Printing model: {}'.format(model.net.Name()))
op_list = model.net.Proto().op
for op in op_list:
input_name = op.input
output_name = str(op.output[0])
op_type = op.type
op_name = op.name
if ((namescope is None) or output_name.startswith(namescope)):
if ((output_name.find('grad') >= 0) or (output_name.find('__m') >= 0)):
continue
try:
output_shape = workspace.FetchBlob(output_name).shape
except BaseException:
output_shape = '<unknown>'
first_blob = True
op_label = (op_type + (op_name if (op_name == '') else (':' + op_name)))
suffix = ' ------- (op: {})'.format(op_label)
for j in range(len(input_name)):
if (input_name[j] in model.params):
continue
input_blob = workspace.FetchBlob(input_name[j])
if isinstance(input_blob, np.ndarray):
input_shape = input_blob.shape
logger.info('{:28s}: {:20s} => {:28s}: {:20s}{}'.format(c2_utils.UnscopeName(str(input_name[j])), '{}'.format(input_shape), c2_utils.UnscopeName(str(output_name)), '{}'.format(output_shape), suffix))
if first_blob:
first_blob = False
suffix = ' ------|'
logger.info('End of model: {}'.format(model.net.Name())) |
def keyword_filter(caption) -> bool:
keywords = [kw for kws in KEYWORDS.values() for kw in kws]
return any(((x in caption) for x in keywords)) |
def train_epoch(data_loader, model, optimizer, lr_scheduler, evaluator, logger, **kwargs):
model.model.train()
for (batch_idx, batch) in enumerate(tqdm(data_loader)):
gt_answers = batch['answers']
(outputs, pred_answers, pred_answer_page, answer_conf) = model.forward(batch, return_pred_answer=True)
loss = ((outputs.loss + outputs.ret_loss) if hasattr(outputs, 'ret_loss') else outputs.loss)
loss.backward()
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
metric = evaluator.get_metrics(gt_answers, pred_answers)
batch_acc = np.mean(metric['accuracy'])
batch_anls = np.mean(metric['anls'])
log_dict = {'Train/Batch loss': outputs.loss.item(), 'Train/Batch Accuracy': batch_acc, 'Train/Batch ANLS': batch_anls, 'lr': optimizer.param_groups[0]['lr']}
if hasattr(outputs, 'ret_loss'):
log_dict['Train/Batch retrieval loss'] = outputs.ret_loss.item()
if (('answer_page_idx' in batch) and (None not in batch['answer_page_idx'])):
ret_metric = evaluator.get_retrieval_metric(batch.get('answer_page_idx', None), pred_answer_page)
batch_ret_prec = np.mean(ret_metric)
log_dict['Train/Batch Ret. Prec.'] = batch_ret_prec
logger.logger.log(log_dict, step=((logger.current_epoch * logger.len_dataset) + batch_idx)) |
def agg_runs(dir, metric_best='auto'):
results = {'train': None, 'val': None, 'test': None}
results_best = {'train': None, 'val': None, 'test': None}
for seed in os.listdir(dir):
if is_seed(seed):
dir_seed = os.path.join(dir, seed)
split = 'val'
if (split in os.listdir(dir_seed)):
dir_split = os.path.join(dir_seed, split)
fname_stats = os.path.join(dir_split, 'stats.json')
stats_list = json_to_dict_list(fname_stats)
if (metric_best == 'auto'):
metric = ('auc' if ('auc' in stats_list[0]) else 'accuracy')
else:
metric = metric_best
performance_np = np.array([stats[metric] for stats in stats_list])
best_epoch = stats_list[performance_np.argmax()]['epoch']
print(best_epoch)
for split in os.listdir(dir_seed):
if is_split(split):
dir_split = os.path.join(dir_seed, split)
fname_stats = os.path.join(dir_split, 'stats.json')
stats_list = json_to_dict_list(fname_stats)
stats_best = [stats for stats in stats_list if (stats['epoch'] == best_epoch)][0]
print(stats_best)
stats_list = [[stats] for stats in stats_list]
if (results[split] is None):
results[split] = stats_list
else:
results[split] = join_list(results[split], stats_list)
if (results_best[split] is None):
results_best[split] = [stats_best]
else:
results_best[split] += [stats_best]
results = {k: v for (k, v) in results.items() if (v is not None)}
results_best = {k: v for (k, v) in results_best.items() if (v is not None)}
for key in results:
for i in range(len(results[key])):
results[key][i] = agg_dict_list(results[key][i])
for key in results_best:
results_best[key] = agg_dict_list(results_best[key])
for (key, value) in results.items():
dir_out = os.path.join(dir, 'agg', key)
makedirs_rm_exist(dir_out)
fname = os.path.join(dir_out, 'stats.json')
dict_list_to_json(value, fname)
if cfg.tensorboard_agg:
writer = SummaryWriter(dir_out)
dict_list_to_tb(value, writer)
writer.close()
for (key, value) in results_best.items():
dir_out = os.path.join(dir, 'agg', key)
fname = os.path.join(dir_out, 'best.json')
dict_to_json(value, fname)
logging.info('Results aggregated across runs saved in {}'.format(os.path.join(dir, 'agg'))) |
def register_Ns3SimpleRefCount__Ns3Dot11sIeBeaconTimingUnit_Ns3Empty_Ns3DefaultDeleter__lt__ns3Dot11sIeBeaconTimingUnit__gt___methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::SimpleRefCount< ns3::dot11s::IeBeaconTimingUnit, ns3::empty, ns3::DefaultDeleter< ns3::dot11s::IeBeaconTimingUnit > > const &', 'o')])
return |
.parametrize('separate_eval', [True, False])
def test_concat_ade(separate_eval):
test_dataset = ADE20KDataset(pipeline=[], img_dir=osp.join(osp.dirname(__file__), '../data/pseudo_dataset/imgs'))
assert (len(test_dataset) == 5)
concat_dataset = ConcatDataset([test_dataset, test_dataset], separate_eval=separate_eval)
assert (len(concat_dataset) == 10)
pseudo_results = []
for _ in range(len(concat_dataset)):
(h, w) = (2, 2)
pseudo_results.append(np.random.randint(low=0, high=7, size=(h, w)))
file_paths = []
for i in range(len(pseudo_results)):
file_paths.extend(concat_dataset.format_results([pseudo_results[i]], '.format_ade', indices=[i]))
assert (len(file_paths) == len(concat_dataset))
temp = np.array(Image.open(file_paths[0]))
assert np.allclose(temp, (pseudo_results[0] + 1))
shutil.rmtree('.format_ade')
file_paths = concat_dataset.format_results(pseudo_results, '.format_ade')
assert (len(file_paths) == len(concat_dataset))
temp = np.array(Image.open(file_paths[0]))
assert np.allclose(temp, (pseudo_results[0] + 1))
shutil.rmtree('.format_ade') |
_model_architecture('ab_transformer_model', 'ab_transformer')
def ab_transformer_model(args):
args.encoder_layers = getattr(args, 'encoder_layers', 12)
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1024)
args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 4096)
args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 16)
args.decoder_layers = getattr(args, 'decoder_layers', 12)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 1024)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 4096)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 16)
base_architecture(args) |
def _get_shared_name_to_stage_ops(ops):
stage_ops = [op for op in ops if (op.type in STAGE_OP_TYPES)]
shared_name_to_stage_ops = {}
for stage_op in stage_ops:
shared_name = stage_op.get_attr('shared_name')
if (shared_name not in shared_name_to_stage_ops):
shared_name_to_stage_ops[shared_name] = []
shared_name_to_stage_ops[shared_name].append(stage_op)
return shared_name_to_stage_ops |
def downsample_conv(in_chs, out_chs, kernel_size, stride=1, dilation=1, first_dilation=None, norm_layer=None):
norm_layer = (norm_layer or nn.BatchNorm2d)
kernel_size = (1 if ((stride == 1) and (dilation == 1)) else kernel_size)
first_dilation = ((first_dilation or dilation) if (kernel_size > 1) else 1)
return ConvBnAct(in_chs, out_chs, kernel_size, stride=stride, dilation=first_dilation, norm_layer=norm_layer, act_layer=None) |
def set_vars_to_moving_average(moving_averager):
moving_avg_variables = tf.get_collection(tf.GraphKeys.MOVING_AVERAGE_VARIABLES)
return tf.group(*[tf.assign(x, moving_averager.average(x)) for x in moving_avg_variables]) |
def test_consistency():
x = np.logspace((- 30), 300, 200)
dataset = np.vstack(((x + 0j), spence(x))).T
FuncData(spence, dataset, 0, 1, rtol=1e-14).check() |
def p1_fit_plots():
for graph in ['test_acc', 'train_acc', 'train_loss', 'test_loss']:
plt.figure()
p1(graph) |
def resnet18(pretrained=False, encoder=False, **kwargs):
if encoder:
model = ResNet_Encoder(BasicBlock, [2, 2, 2, 2], **kwargs)
else:
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model |
def load_checkpoint(fpath):
if os.path.isfile(fpath):
checkpoint = torch.load(fpath, map_location=torch.device('cpu'))
print("=> Loaded checkpoint '{}'".format(fpath))
return checkpoint
else:
raise ValueError("=> No checkpoint found at '{}'".format(fpath)) |
def update_function_order_in_functsions_yaml():
d = utils.load_yaml_ordered(open(join(here, 'functions.yaml'), 'r'))
order_info_by_id = {}
order_info = OrderedDict()
duplicated = {}
missing = {}
for (cat_name, cat_info) in d.items():
for (func_name, func_info) in d[cat_name].items():
order_info[func_name] = OrderedDict()
default_full_name = func_name
default_arg = ''
if ('arguments' in func_info):
for (arg, arg_info) in func_info['arguments'].items():
default_arg += type_to_pack_format(arg_info['type'])
if (default_arg == ''):
default_arg = 'Empty'
else:
default_full_name = ((func_name + '_') + default_arg)
if (('function_ids' in func_info) and (func_info['function_ids'] is not None)):
for (func_arg, func_id) in func_info['function_ids'].items():
full_name = func_name
if (func_arg != 'Empty'):
full_name = ((func_name + '_') + func_arg)
if (func_id in order_info_by_id):
if (func_id not in duplicated):
duplicated[func_id] = [order_info_by_id[func_id]]
duplicated[func_id].append(full_name)
order_info_by_id[func_id] = full_name
order_info[func_name][full_name] = func_id
if (default_full_name not in order_info[func_name]):
if (cat_name not in missing):
missing[cat_name] = {}
if (func_name not in missing[cat_name]):
missing[cat_name][func_name] = []
missing[cat_name][func_name].append(default_arg)
else:
if (cat_name not in missing):
missing[cat_name] = {}
if (func_name not in missing[cat_name]):
missing[cat_name][func_name] = []
missing[cat_name][func_name].append(default_arg)
if ('c_runtime' not in func_info):
func_info['c_runtime'] = 'not support'
current_id = (sorted(order_info_by_id.keys()).pop() + 1)
if missing:
with open_api_level_yaml() as api_level_yaml:
for cat_name in missing:
for func_name in missing[cat_name]:
for arg in missing[cat_name][func_name]:
if (('function_ids' not in d[cat_name][func_name]) or (d[cat_name][func_name]['function_ids'] is None)):
d[cat_name][func_name]['function_ids'] = OrderedDict()
api_level_yaml.append_new_id(((func_name + '_') + arg), current_id)
d[cat_name][func_name]['function_ids'][arg] = current_id
current_id += 1
if len(duplicated):
print('')
print(' Errors in functions.yaml(START)')
for (func_id, functions) in duplicated.items():
if (len(functions) > 1):
print('ID {} duplicated between {}.'.format(func_id, functions))
print('Correct ID in "build-tools/code_generator/functions.yaml" manually.')
print(' Errors in functions.yaml(END)')
print('')
import sys
sys.exit((- 1))
utils.dump_yaml(d, open(join(here, 'functions.yaml'), 'w'), default_flow_style=False, width=80) |
def build_spk_hashtable(base_folder_dm, sample_rate):
wsj0_utterances = glob.glob(os.path.join(base_folder_dm, '**/*.wav'), recursive=True)
spk_hashtable = {}
for utt in wsj0_utterances:
spk_id = Path(utt).stem[:3]
assert (torchaudio.info(utt).sample_rate == sample_rate)
if (spk_id not in spk_hashtable.keys()):
spk_hashtable[spk_id] = [utt]
else:
spk_hashtable[spk_id].append(utt)
spk_weights = [len(spk_hashtable[x]) for x in spk_hashtable.keys()]
return (spk_hashtable, spk_weights) |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--video', default='webcam', type=str)
parser.add_argument('--output', default='output', type=str)
parser.add_argument('--inres', default='512,512', type=str)
parser.add_argument('--outres', default='1080,1920', type=str)
parser.add_argument('--max-frames', default=1000000, type=int)
parser.add_argument('--fps', default=(25.0 * 1.0), type=float)
(args, _) = parser.parse_known_args()
args.inres = tuple((int(x) for x in args.inres.split(',')))
args.outres = tuple((int(x) for x in args.outres.split(',')))
os.makedirs(args.output, exist_ok=True)
kwargs = {'num_stacks': 2, 'cnv_dim': 256, 'weights': 'ctdet_coco', 'inres': args.inres}
heads = {'hm': 80, 'reg': 2, 'wh': 2}
model = HourglassNetwork(heads=heads, **kwargs)
model = CtDetDecode(model)
drawer = COCODrawer()
letterbox_transformer = LetterboxTransformer(args.inres[0], args.inres[1])
cap = cv2.VideoCapture((0 if (args.video == 'webcam') else args.video))
out_fn = os.path.join(args.output, ('ctdet.' + os.path.basename(args.video))).replace('.mp4', '.avi')
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter(out_fn, fourcc, args.fps, args.outres[::(- 1)])
k = 0
tic = time.time()
while cap.isOpened():
if (k > args.max_frames):
print('Bye')
break
if ((k > 0) and ((k % 100) == 0)):
toc = time.time()
duration = (toc - tic)
print(('[%05d]: %.3f seconds / 100 iterations' % (k, duration)))
tic = toc
k += 1
(ret, img) = cap.read()
if (not ret):
print('Done')
break
pimg = letterbox_transformer(img)
pimg = normalize_image(pimg)
pimg = np.expand_dims(pimg, 0)
detections = model.predict(pimg)[0]
for d in detections:
(x1, y1, x2, y2, score, cl) = d
if (score < 0.3):
break
(x1, y1, x2, y2) = letterbox_transformer.correct_box(x1, y1, x2, y2)
img = drawer.draw_box(img, x1, y1, x2, y2, cl)
out.write(img)
print(('Video saved to: %s' % out_fn)) |
def is_parallel(model):
return isinstance(model, (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel)) |
def get_path_info(environ, charset='utf-8', errors='replace'):
path = wsgi_get_bytes(environ.get('PATH_INFO', ''))
return to_unicode(path, charset, errors, allow_none_charset=True) |
def _run_ninja_build(build_directory: str, verbose: bool, error_prefix: str) -> None:
command = ['ninja', '-v']
num_workers = _get_num_workers(verbose)
if (num_workers is not None):
command.extend(['-j', str(num_workers)])
env = os.environ.copy()
if (IS_WINDOWS and ('VSCMD_ARG_TGT_ARCH' not in env)):
from distutils.util import get_platform
from distutils._msvccompiler import _get_vc_env
plat_name = get_platform()
plat_spec = PLAT_TO_VCVARS[plat_name]
vc_env = _get_vc_env(plat_spec)
vc_env = {k.upper(): v for (k, v) in vc_env.items()}
for (k, v) in env.items():
uk = k.upper()
if (uk not in vc_env):
vc_env[uk] = v
env = vc_env
try:
sys.stdout.flush()
sys.stderr.flush()
if (sys.version_info >= (3, 5)):
stdout_fileno = 1
subprocess.run(command, stdout=(stdout_fileno if verbose else subprocess.PIPE), stderr=subprocess.STDOUT, cwd=build_directory, check=True, env=env)
else:
subprocess.check_output(command, stderr=subprocess.STDOUT, cwd=build_directory, env=env)
except subprocess.CalledProcessError as e:
(_, error, _) = sys.exc_info()
message = error_prefix
if (hasattr(error, 'output') and error.output):
message += ': {}'.format(error.output.decode())
raise RuntimeError(message) from e |
def vsd(R_est, t_est, R_gt, t_gt, model, depth_test, K, delta, tau, cost_type='tlinear'):
im_size = (depth_test.shape[1], depth_test.shape[0])
depth_est = renderer.render(model, im_size, K, R_est, t_est, clip_near=100, clip_far=10000, mode='depth')
depth_gt = renderer.render(model, im_size, K, R_gt, t_gt, clip_near=100, clip_far=10000, mode='depth')
dist_test = misc.depth_im_to_dist_im(depth_test, K)
dist_gt = misc.depth_im_to_dist_im(depth_gt, K)
dist_est = misc.depth_im_to_dist_im(depth_est, K)
visib_gt = visibility.estimate_visib_mask_gt(dist_test, dist_gt, delta)
visib_est = visibility.estimate_visib_mask_est(dist_test, dist_est, visib_gt, delta)
visib_inter = np.logical_and(visib_gt, visib_est)
visib_union = np.logical_or(visib_gt, visib_est)
costs = np.abs((dist_gt[visib_inter] - dist_est[visib_inter]))
if (cost_type == 'step'):
costs = (costs >= tau)
elif (cost_type == 'tlinear'):
costs *= (1.0 / tau)
costs[(costs > 1.0)] = 1.0
else:
print('Error: Unknown pixel matching cost.')
exit((- 1))
visib_union_count = visib_union.sum()
visib_comp_count = (visib_union_count - visib_inter.sum())
if (visib_union_count > 0):
e = ((costs.sum() + visib_comp_count) / float(visib_union_count))
else:
e = 1.0
return e |
class PoseFeature(nn.Module):
def __init__(self, cfg, input_dim, hidden_dim=128, num_layers=4):
super(PoseFeature, self).__init__()
self.CoordEncoder = CoordEncoder(cfg, hidden_dim, num_layers)
self.conv1 = nn.Sequential(nn.Conv2d(input_dim, hidden_dim, kernel_size=(3, 3), padding=(1, 1)), nn.ReLU())
self.conv2 = nn.Sequential(nn.Conv2d((hidden_dim * 2), hidden_dim, kernel_size=(3, 3), padding=(1, 1)), nn.ReLU())
def forward(self, feat, coord):
x = self.conv1(feat)
y = self.CoordEncoder(coord)
z = torch.cat([x, y], dim=1)
z = self.conv2(z)
return z |
class PythonCodeExecutor(object):
Py_single_input = 256
Py_file_input = 257
Py_eval_input = 258
def malloc(self, size):
chunk = gdb.parse_and_eval(('(void *) malloc((size_t) %d)' % size))
pointer = pointervalue(chunk)
if (pointer == 0):
raise gdb.GdbError('No memory could be allocated in the inferior.')
return pointer
def alloc_string(self, string):
pointer = self.malloc(len(string))
get_selected_inferior().write_memory(pointer, string)
return pointer
def alloc_pystring(self, string):
stringp = self.alloc_string(string)
PyString_FromStringAndSize = 'PyString_FromStringAndSize'
try:
gdb.parse_and_eval(PyString_FromStringAndSize)
except RuntimeError:
PyString_FromStringAndSize = ('PyUnicode%s_FromStringAndSize' % (get_inferior_unicode_postfix(),))
try:
result = gdb.parse_and_eval(('(PyObject *) %s((char *) %d, (size_t) %d)' % (PyString_FromStringAndSize, stringp, len(string))))
finally:
self.free(stringp)
pointer = pointervalue(result)
if (pointer == 0):
raise gdb.GdbError('Unable to allocate Python string in the inferior.')
return pointer
def free(self, pointer):
gdb.parse_and_eval(('free((void *) %d)' % pointer))
def incref(self, pointer):
gdb.parse_and_eval(('Py_IncRef((PyObject *) %d)' % pointer))
def xdecref(self, pointer):
gdb.parse_and_eval(('Py_DecRef((PyObject *) %d)' % pointer))
def evalcode(self, code, input_type, global_dict=None, local_dict=None):
if ('\x00' in code):
raise gdb.GdbError('String contains NUL byte.')
code += '\x00'
pointer = self.alloc_string(code)
globalsp = pointervalue(global_dict)
localsp = pointervalue(local_dict)
if ((globalsp == 0) or (localsp == 0)):
raise gdb.GdbError('Unable to obtain or create locals or globals.')
code = ('\n PyRun_String(\n (char *) %(code)d,\n (int) %(start)d,\n (PyObject *) %(globals)s,\n (PyObject *) %(locals)d)\n ' % dict(code=pointer, start=input_type, globals=globalsp, locals=localsp))
with FetchAndRestoreError():
try:
pyobject_return_value = gdb.parse_and_eval(code)
finally:
self.free(pointer)
return pyobject_return_value |
def _search_src_or_doc(what, string, extra1='', extra2='', extra3='', extra4='', extra5='', **kwargs):
interact = kwargs.get('interact', True)
path_re = kwargs.get('path_re', '')
module = kwargs.get('module', 'sage')
whole_word = kwargs.get('whole_word', False)
ignore_case = kwargs.get('ignore_case', True)
multiline = kwargs.get('multiline', False)
if (what == 'src'):
base_path = SAGE_SRC
if (module.find('sage') == 0):
module = module[4:].lstrip('.')
base_path = os.path.join(base_path, 'sage')
module = module.replace('.', os.sep)
exts = ['py', 'pyx', 'pxd']
title = 'Source Code'
else:
module = ''
exts = ['html']
title = 'Documentation'
base_path = os.path.join(SAGE_DOC, 'html')
if (not os.path.exists(base_path)):
print('Warning: the Sage documentation is not available')
strip = len(base_path)
results = []
regexp = string
extra_regexps = extras = [extra1, extra2, extra3, extra4, extra5]
if whole_word:
regexp = (('\\b' + regexp) + '\\b')
extra_regexps = [('\\b%s\\b' % e) for e in extra_regexps]
if ignore_case:
flags = re.IGNORECASE
else:
flags = 0
for (dirpath, dirs, files) in os.walk(os.path.join(base_path, module)):
try:
dirs.remove('_static')
except ValueError:
pass
for f in files:
if ((not f.startswith('.')) and re.search((('\\.(' + '|'.join(exts)) + ')$'), f)):
filename = os.path.join(dirpath, f)
if re.search(path_re, filename):
if multiline:
with open(filename) as fobj:
line = fobj.read()
if re.search(regexp, line, flags):
match_list = line
else:
match_list = None
for extra in extra_regexps:
if (extra and match_list):
if (not re.search(extra, match_list)):
match_list = None
if match_list:
results.append((filename[strip:].lstrip('/') + '\n'))
else:
with open(filename) as fobj:
match_list = [(lineno, line) for (lineno, line) in enumerate(fobj) if re.search(regexp, line, flags)]
for extra in extra_regexps:
if extra:
match_list = [s for s in match_list if re.search(extra, s[1], (re.MULTILINE | flags))]
for (num, line) in match_list:
results.append('{}:{}:{}'.format(filename[strip:].lstrip('/'), (num + 1), line))
text_results = ''.join(results).rstrip()
if (not interact):
return text_results
html_results = format_search_as_html(title, results, ([string] + extras))
from IPython.core.page import page
if (not isinstance(text_results, str)):
text_results = text_results.decode('utf-8', 'replace')
page({'text/plain': text_results}) |
class StreamElementsSpeech(VoiceBase):
def _setup(self) -> None:
def _speech(self, text: str, voice: str, _: int=0) -> bool:
tts_url = f'
response = requests.get(tts_url)
if (response.status_code == 200):
with open('speech.mp3', 'wb') as f:
f.write(response.content)
playsound('speech.mp3')
os.remove('speech.mp3')
return True
else:
logging.error('Request failed with status code: %s, response content: %s', response.status_code, response.content)
return False |
def code_to_sequence(code, code_dict, collapse_code):
if collapse_code:
prev_c = None
sequence = []
for c in code:
if ((c in code_dict) and (c != prev_c)):
sequence.append(code_dict[c])
prev_c = c
else:
sequence = [code_dict[c] for c in code if (c in code_dict)]
if (len(sequence) < (0.95 * len(code))):
print('WARNING : over 5%% codes are OOV')
return sequence |
class GoldenRatio(Constant):
def __init__(self, name='golden_ratio'):
conversions = dict(mathematica='(1+Sqrt[5])/2', gp='(1+sqrt(5))/2', maple='(1+sqrt(5))/2', maxima='(1+sqrt(5))/2', pari='(1+sqrt(5))/2', octave='(1+sqrt(5))/2', kash='(1+Sqrt(5))/2', giac='(1+sqrt(5))/2')
Constant.__init__(self, name, conversions=conversions, latex='\\phi', domain='positive')
def minpoly(self, bits=None, degree=None, epsilon=0):
from sage.rings.rational_field import QQ
x = QQ['x'].gen(0)
return (((x ** 2) - x) - 1)
def __float__(self):
return (0.5 + math.sqrt(1.25))
def _real_double_(self, R):
return R('1.')
def _mpfr_(self, R):
return ((R(1) + R(5).sqrt()) / R(2))
def _algebraic_(self, field):
import sage.rings.qqbar
return field(sage.rings.qqbar.get_AA_golden_ratio())
def _sympy_(self):
import sympy
return sympy.GoldenRatio |
def annotate_heatmap(im, data=None, valfmt='{x:.2f}', textcolors=('black', 'white'), threshold=None, **textkw):
if (not isinstance(data, (list, np.ndarray))):
data = im.get_array()
if (threshold is not None):
threshold = im.norm(threshold)
else:
threshold = (im.norm(data.max()) / 2.0)
kw = dict(horizontalalignment='center', verticalalignment='center')
kw.update(textkw)
if isinstance(valfmt, str):
valfmt = matplotlib.ticker.StrMethodFormatter(valfmt)
texts = []
for i in range(data.shape[0]):
for j in range(data.shape[1]):
kw.update(color=textcolors[int((im.norm(data[(i, j)]) > threshold))])
text = im.axes.text(j, i, valfmt(data[(i, j)], None), **kw)
texts.append(text)
return texts |
class Seq2SeqQuestionAnsweringModelOutput(ModelOutput):
loss: Optional[torch.FloatTensor] = None
start_logits: torch.FloatTensor = None
end_logits: torch.FloatTensor = None
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
encoder_last_hidden_state: Optional[torch.FloatTensor] = None
encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None |
class SamPreTrainedModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
class NewbobAbs(LearningRateControl):
def load_initial_kwargs_from_config(cls, config):
kwargs = super(NewbobAbs, cls).load_initial_kwargs_from_config(config)
kwargs.update({'error_threshold': config.float('newbob_error_threshold', (- 0.01))})
return kwargs
def __init__(self, error_threshold, **kwargs):
super(NewbobAbs, self).__init__(**kwargs)
self.error_threshold = error_threshold
def calc_learning_rate_for_epoch(self, epoch):
last_epoch = self.get_last_epoch(epoch)
if (last_epoch is None):
return self.default_learning_rate
learning_rate = self.epoch_data[last_epoch].learning_rate
if (learning_rate is None):
return self.default_learning_rate
last2_epoch = self.get_last_epoch(last_epoch)
if (last2_epoch is None):
return learning_rate
(old_key, old_error) = self.get_epoch_error_key_value(last2_epoch)
(new_key, new_error) = self.get_epoch_error_key_value(last_epoch)
if ((old_error is None) or (new_error is None)):
return learning_rate
if (old_key != new_key):
return learning_rate
error_diff = (new_error - old_error)
learning_rate = self.calc_learning_rate_decay_or_grow(learning_rate, decay=(error_diff > self.error_threshold))
return learning_rate |
class MultiPeriodDiscriminator(torch.nn.Module):
def __init__(self, h):
super(MultiPeriodDiscriminator, self).__init__()
self.mpd_reshapes = h.mpd_reshapes
print('mpd_reshapes: {}'.format(self.mpd_reshapes))
discriminators = [DiscriminatorP(h, rs, use_spectral_norm=h.use_spectral_norm) for rs in self.mpd_reshapes]
self.discriminators = nn.ModuleList(discriminators)
def forward(self, y, y_hat, flg_train=False):
y_d_rs = []
y_d_gs = []
fmap_rs = []
fmap_gs = []
for (i, d) in enumerate(self.discriminators):
(y_d_r, fmap_r) = d(y, flg_train=flg_train)
(y_d_g, fmap_g) = d(y_hat, flg_train=flg_train)
y_d_rs.append(y_d_r)
fmap_rs.append(fmap_r)
y_d_gs.append(y_d_g)
fmap_gs.append(fmap_g)
return (y_d_rs, y_d_gs, fmap_rs, fmap_gs) |
def low_weight_bases(N, p, m, NN, weightbound):
generators = []
for k in range(2, (weightbound + 2), 2):
b = ModularForms(N, k, base_ring=Zmod((p ** m))).q_expansion_basis(prec=NN)
generators.append(list(b))
return generators |
class CMRC2018Loss(LossBase):
def __init__(self, target_start=None, target_end=None, context_len=None, pred_start=None, pred_end=None, reduction='mean'):
super().__init__()
assert (reduction in ('mean', 'sum'))
self._init_param_map(target_start=target_start, target_end=target_end, context_len=context_len, pred_start=pred_start, pred_end=pred_end)
self.reduction = reduction
def get_loss(self, target_start, target_end, context_len, pred_start, pred_end):
(batch_size, max_len) = pred_end.size()
mask = seq_len_to_mask(context_len, max_len).eq(False)
pred_start = pred_start.masked_fill(mask, float('-inf'))
pred_end = pred_end.masked_fill(mask, float('-inf'))
start_loss = F.cross_entropy(pred_start, target_start, reduction='sum')
end_loss = F.cross_entropy(pred_end, target_end, reduction='sum')
loss = (start_loss + end_loss)
if (self.reduction == 'mean'):
loss = (loss / batch_size)
return (loss / 2) |
class ListOffsetArray(ListOffsetMeta[Content], Content):
def __init__(self, offsets, content, *, parameters=None):
if ((not isinstance(offsets, Index)) and (offsets.dtype in (np.dtype(np.int32), np.dtype(np.uint32), np.dtype(np.int64)))):
raise TypeError("{} 'offsets' must be an Index with dtype in (int32, uint32, int64), not {}".format(type(self).__name__, repr(offsets)))
if (not isinstance(content, Content)):
raise TypeError("{} 'content' must be a Content subtype, not {}".format(type(self).__name__, repr(content)))
if (content.backend.index_nplike.known_data and (offsets.length is not unknown_length) and (offsets.length == 0)):
raise ValueError(f'{type(self).__name__} len(offsets) ({offsets.length}) must be >= 1')
if ((parameters is not None) and (parameters.get('__array__') == 'string')):
if ((not content.is_numpy) or (not (content.parameter('__array__') == 'char'))):
raise ValueError("{} is a string, so its 'content' must be uint8 NumpyArray of char, not {}".format(type(self).__name__, repr(content)))
if ((parameters is not None) and (parameters.get('__array__') == 'bytestring')):
if ((not content.is_numpy) or (not (content.parameter('__array__') == 'byte'))):
raise ValueError("{} is a bytestring, so its 'content' must be uint8 NumpyArray of byte, not {}".format(type(self).__name__, repr(content)))
assert (offsets.nplike is content.backend.index_nplike)
self._offsets = offsets
self._content = content
self._init(parameters, content.backend)
def offsets(self) -> Index:
return self._offsets
form_cls: Final = ListOffsetForm
def copy(self, offsets=UNSET, content=UNSET, *, parameters=UNSET):
return ListOffsetArray((self._offsets if (offsets is UNSET) else offsets), (self._content if (content is UNSET) else content), parameters=(self._parameters if (parameters is UNSET) else parameters))
def __copy__(self):
return self.copy()
def __deepcopy__(self, memo):
return self.copy(offsets=copy.deepcopy(self._offsets, memo), content=copy.deepcopy(self._content, memo), parameters=copy.deepcopy(self._parameters, memo))
def simplified(cls, offsets, content, *, parameters=None):
return cls(offsets, content, parameters=parameters)
def starts(self) -> Index:
return self._offsets[:(- 1)]
def stops(self):
return self._offsets[1:]
def _form_with_key(self, getkey: Callable[([Content], (str | None))]) -> ListOffsetForm:
form_key = getkey(self)
return self.form_cls(self._offsets.form, self._content._form_with_key(getkey), parameters=self._parameters, form_key=form_key)
def _to_buffers(self, form: Form, getkey: Callable[([Content, Form, str], str)], container: MutableMapping[(str, ArrayLike)], backend: Backend, byteorder: str):
assert isinstance(form, self.form_cls)
key = getkey(self, form, 'offsets')
container[key] = ak._util.native_to_byteorder(self._offsets.raw(backend.index_nplike), byteorder)
self._content._to_buffers(form.content, getkey, container, backend, byteorder)
def _to_typetracer(self, forget_length: bool) -> Self:
offsets = self._offsets.to_nplike(TypeTracer.instance())
return ListOffsetArray((offsets.forget_length() if forget_length else offsets), self._content._to_typetracer(forget_length), parameters=self._parameters)
def _touch_data(self, recursive: bool):
self._offsets._touch_data()
if recursive:
self._content._touch_data(recursive)
def _touch_shape(self, recursive: bool):
self._offsets._touch_shape()
if recursive:
self._content._touch_shape(recursive)
def length(self) -> ShapeItem:
return (self._offsets.length - 1)
def __repr__(self):
return self._repr('', '', '')
def _repr(self, indent, pre, post):
out = [indent, pre, '<ListOffsetArray len=']
out.append(repr(str(self.length)))
out.append('>')
out.extend(self._repr_extra((indent + ' ')))
out.append('\n')
out.append(self._offsets._repr((indent + ' '), '<offsets>', '</offsets>\n'))
out.append(self._content._repr((indent + ' '), '<content>', '</content>\n'))
out.append((indent + '</ListOffsetArray>'))
out.append(post)
return ''.join(out)
def to_ListOffsetArray64(self, start_at_zero: bool=False) -> ListOffsetArray:
known_starts_at_zero = (self._backend.index_nplike.known_data and (self._offsets[0] == 0))
if (start_at_zero and (not known_starts_at_zero)):
offsets = Index64((self._offsets.data - self._offsets[0]), nplike=self._backend.index_nplike)
return ListOffsetArray(offsets, self._content[self._offsets[0]:], parameters=self._parameters)
else:
return ListOffsetArray(self._offsets.to64(), self._content, parameters=self._parameters)
def to_RegularArray(self):
(start, stop) = (self._offsets[0], self._offsets[self._backend.index_nplike.shape_item_as_index((self._offsets.length - 1))])
content = self._content._getitem_range(start, stop)
_size = Index64.empty(1, self._backend.index_nplike)
assert ((_size.nplike is self._backend.index_nplike) and (self._offsets.nplike is self._backend.index_nplike))
self._backend.maybe_kernel_error(self._backend[('awkward_ListOffsetArray_toRegularArray', _size.dtype.type, self._offsets.dtype.type)](_size.data, self._offsets.data, self._offsets.length))
size = self._backend.index_nplike.index_as_shape_item(_size[0])
length = (self._offsets.length - 1)
return ak.contents.RegularArray(content, size, length, parameters=self._parameters)
def _getitem_nothing(self):
return self._content._getitem_range(0, 0)
def _getitem_at(self, where: IndexType):
if ((not is_unknown_scalar(where)) and (where < 0)):
length_index = self._backend.index_nplike.shape_item_as_index(self.length)
where += length_index
if (not (is_unknown_scalar(where) or (self.length is unknown_length) or (0 <= where < self.length))):
raise ak._errors.index_error(self, where)
(start, stop) = (self._offsets[where], self._offsets[(where + 1)])
return self._content._getitem_range(start, stop)
def _getitem_range(self, start: IndexType, stop: IndexType) -> Content:
if (not self._backend.nplike.known_data):
self._touch_shape(recursive=False)
return self
offsets = self._offsets[start:(stop + 1)]
if ((offsets.length is not unknown_length) and (offsets.length == 0)):
offsets = Index(self._backend.index_nplike.zeros(1, dtype=self._offsets.dtype), nplike=self._backend.index_nplike)
return ListOffsetArray(offsets, self._content, parameters=self._parameters)
def _getitem_field(self, where: (str | SupportsIndex), only_fields: tuple[(str, ...)]=()) -> Content:
return ListOffsetArray(self._offsets, self._content._getitem_field(where, only_fields), parameters=None)
def _getitem_fields(self, where: list[(str | SupportsIndex)], only_fields: tuple[(str, ...)]=()) -> Content:
return ListOffsetArray(self._offsets, self._content._getitem_fields(where, only_fields), parameters=None)
def _carry(self, carry: Index, allow_lazy: bool) -> Content:
assert isinstance(carry, ak.index.Index)
try:
nextstarts = self.starts[carry.data]
nextstops = self.stops[carry.data]
except IndexError as err:
raise ak._errors.index_error(self, carry.data, str(err)) from err
return ak.contents.ListArray(nextstarts, nextstops, self._content, parameters=self._parameters)
def _compact_offsets64(self, start_at_zero: bool) -> Index64:
if ((not start_at_zero) or (self._backend.index_nplike.known_data and (self._offsets[0] == 0))):
return self._offsets
else:
return Index64((self._offsets.data - self._offsets[0]), nplike=self._backend.index_nplike)
def _broadcast_tooffsets64(self, offsets: Index) -> ListOffsetArray:
self._touch_data(recursive=False)
offsets._touch_data()
index_nplike = self._backend.index_nplike
assert (offsets.nplike is index_nplike)
if ((offsets.length is not unknown_length) and (offsets.length == 0)):
raise AssertionError('broadcast_tooffsets64 can only be used with non-empty offsets')
elif (index_nplike.known_data and (offsets[0] != 0)):
raise AssertionError(f'broadcast_tooffsets64 can only be used with offsets that start at 0, not {offsets[0]}')
elif ((offsets.length is not unknown_length) and (self._offsets.length is not unknown_length) and (offsets.length != self._offsets.length)):
raise AssertionError('cannot broadcast RegularArray of length {} to length {}'.format(self.length, (offsets.length - 1)))
this_start = self._offsets[0]
this_zero_offsets = self._offsets.data
if (index_nplike.known_data and (this_start == 0)):
next_content = self._content
else:
this_zero_offsets = (this_zero_offsets - this_start)
next_content = self._content[this_start:]
if (index_nplike.known_data and (not index_nplike.array_equal(this_zero_offsets, offsets.data))):
raise ValueError('cannot broadcast nested list')
return ListOffsetArray(offsets, next_content[:offsets[(- 1)]], parameters=self._parameters)
def _getitem_next_jagged(self, slicestarts: Index, slicestops: Index, slicecontent: Content, tail) -> Content:
out = ak.contents.ListArray(self.starts, self.stops, self._content, parameters=self._parameters)
return out._getitem_next_jagged(slicestarts, slicestops, slicecontent, tail)
def _getitem_next(self, head: (SliceItem | tuple), tail: tuple[(SliceItem, ...)], advanced: (Index | None)) -> Content:
if (head is NO_HEAD):
return self
elif is_integer_like(head):
assert (advanced is None)
lenstarts = (self._offsets.length - 1)
(starts, stops) = (self.starts, self.stops)
(nexthead, nexttail) = ak._slicing.head_tail(tail)
nextcarry = Index64.empty(lenstarts, self._backend.index_nplike)
assert ((nextcarry.nplike is self._backend.index_nplike) and (starts.nplike is self._backend.index_nplike) and (stops.nplike is self._backend.index_nplike))
self._maybe_index_error(self._backend[('awkward_ListArray_getitem_next_at', nextcarry.dtype.type, starts.dtype.type, stops.dtype.type)](nextcarry.data, starts.data, stops.data, lenstarts, head), slicer=head)
nextcontent = self._content._carry(nextcarry, True)
return nextcontent._getitem_next(nexthead, nexttail, advanced)
elif isinstance(head, slice):
(nexthead, nexttail) = ak._slicing.head_tail(tail)
lenstarts = (self._offsets.length - 1)
(start, stop, step) = (head.start, head.stop, head.step)
step = (1 if (step is None) else step)
start = (ak._util.kSliceNone if (start is None) else start)
stop = (ak._util.kSliceNone if (stop is None) else stop)
carrylength = Index64.empty(1, self._backend.index_nplike)
assert ((carrylength.nplike is self._backend.index_nplike) and (self.starts.nplike is self._backend.index_nplike) and (self.stops.nplike is self._backend.index_nplike))
self._maybe_index_error(self._backend[('awkward_ListArray_getitem_next_range_carrylength', carrylength.dtype.type, self.starts.dtype.type, self.stops.dtype.type)](carrylength.data, self.starts.data, self.stops.data, lenstarts, start, stop, step), slicer=head)
if (self._starts.dtype == 'int64'):
nextoffsets = Index64.empty((lenstarts + 1), nplike=self._backend.index_nplike)
elif (self._starts.dtype == 'int32'):
nextoffsets = ak.index.Index32.empty((lenstarts + 1), nplike=self._backend.index_nplike)
elif (self._starts.dtype == 'uint32'):
nextoffsets = ak.index.IndexU32.empty((lenstarts + 1), nplike=self._backend.index_nplike)
nextcarry = Index64.empty(carrylength[0], self._backend.index_nplike)
assert ((nextoffsets.nplike is self._backend.index_nplike) and (nextcarry.nplike is self._backend.index_nplike) and (self.starts.nplike is self._backend.index_nplike) and (self.stops.nplike is self._backend.index_nplike))
self._maybe_index_error(self._backend[('awkward_ListArray_getitem_next_range', nextoffsets.dtype.type, nextcarry.dtype.type, self.starts.dtype.type, self.stops.dtype.type)](nextoffsets.data, nextcarry.data, self.starts.data, self.stops.data, lenstarts, start, stop, step), slicer=head)
nextcontent = self._content._carry(nextcarry, True)
if ((advanced is None) or ((advanced.length is not unknown_length) and (advanced.length == 0))):
return ak.contents.ListOffsetArray(nextoffsets, nextcontent._getitem_next(nexthead, nexttail, advanced), parameters=self._parameters)
else:
total = Index64.empty(1, self._backend.index_nplike)
assert ((total.nplike is self._backend.index_nplike) and (nextoffsets.nplike is self._backend.index_nplike))
self._maybe_index_error(self._backend[('awkward_ListArray_getitem_next_range_counts', total.dtype.type, nextoffsets.dtype.type)](total.data, nextoffsets.data, lenstarts), slicer=head)
nextadvanced = Index64.empty(total[0], self._backend.index_nplike)
assert ((nextadvanced.nplike is self._backend.index_nplike) and (advanced.nplike is self._backend.index_nplike) and (nextoffsets.nplike is self._backend.index_nplike))
self._maybe_index_error(self._backend[('awkward_ListArray_getitem_next_range_spreadadvanced', nextadvanced.dtype.type, advanced.dtype.type, nextoffsets.dtype.type)](nextadvanced.data, advanced.data, nextoffsets.data, lenstarts), slicer=head)
return ak.contents.ListOffsetArray(nextoffsets, nextcontent._getitem_next(nexthead, nexttail, nextadvanced), parameters=self._parameters)
elif isinstance(head, str):
return self._getitem_next_field(head, tail, advanced)
elif isinstance(head, list):
return self._getitem_next_fields(head, tail, advanced)
elif (head is np.newaxis):
return self._getitem_next_newaxis(tail, advanced)
elif (head is Ellipsis):
return self._getitem_next_ellipsis(tail, advanced)
elif isinstance(head, Index64):
(nexthead, nexttail) = ak._slicing.head_tail(tail)
flathead = self._backend.index_nplike.reshape(self._backend.index_nplike.asarray(head.data), ((- 1),))
lenstarts = self.starts.length
regular_flathead = Index64(flathead)
if ((advanced is None) or ((advanced.length is not unknown_length) and (advanced.length == 0))):
nextcarry = Index64.empty((lenstarts * flathead.length), self._backend.index_nplike)
nextadvanced = Index64.empty((lenstarts * flathead.length), self._backend.index_nplike)
assert ((nextcarry.nplike is self._backend.index_nplike) and (nextadvanced.nplike is self._backend.index_nplike) and (regular_flathead.nplike is self._backend.index_nplike))
self._maybe_index_error(self._backend[('awkward_ListArray_getitem_next_array', nextcarry.dtype.type, nextadvanced.dtype.type, regular_flathead.dtype.type)](nextcarry.data, nextadvanced.data, self.starts.data, self.stops.data, regular_flathead.data, lenstarts, regular_flathead.length, self._content.length), slicer=head)
nextcontent = self._content._carry(nextcarry, True)
out = nextcontent._getitem_next(nexthead, nexttail, nextadvanced)
if (advanced is None):
return ak._slicing.getitem_next_array_wrap(out, head.metadata.get('shape', (head.length,), self.length))
else:
return out
else:
nextcarry = Index64.empty(self.length, self._backend.index_nplike)
nextadvanced = Index64.empty(self.length, self._backend.index_nplike)
assert ((nextcarry.nplike is self._backend.index_nplike) and (nextadvanced.nplike is self._backend.index_nplike) and (self.starts.nplike is self._backend.index_nplike) and (self.stops.nplike is self._backend.index_nplike) and (regular_flathead.nplike is self._backend.index_nplike) and (advanced.nplike is self._backend.index_nplike))
self._maybe_index_error(self._backend[('awkward_ListArray_getitem_next_array_advanced', nextcarry.dtype.type, nextadvanced.dtype.type, self.starts.dtype.type, self.stops.dtype.type, regular_flathead.dtype.type, advanced.dtype.type)](nextcarry.data, nextadvanced.data, self.starts.data, self.stops.data, regular_flathead.data, advanced.data, lenstarts, regular_flathead.length, self._content.length), slicer=head)
nextcontent = self._content._carry(nextcarry, True)
return nextcontent._getitem_next(nexthead, nexttail, nextadvanced)
elif isinstance(head, ak.contents.ListOffsetArray):
listarray = ak.contents.ListArray(self.starts, self.stops, self._content, parameters=self._parameters)
return listarray._getitem_next(head, tail, advanced)
elif isinstance(head, ak.contents.IndexedOptionArray):
return self._getitem_next_missing(head, tail, advanced)
else:
raise AssertionError(repr(head))
def _offsets_and_flattened(self, axis: int, depth: int) -> tuple[(Index, Content)]:
posaxis = maybe_posaxis(self, axis, depth)
if ((posaxis is not None) and ((posaxis + 1) == depth)):
raise AxisError('axis=0 not allowed for flatten')
elif ((posaxis is not None) and ((posaxis + 1) == (depth + 1))):
if ((self.parameter('__array__') == 'string') or (self.parameter('__array__') == 'bytestring')):
raise ValueError('array of strings cannot be directly flattened. To flatten this array, drop the `"__array__"="string"` parameter using `ak.enforce_type`, `ak.with_parameter`, or `ak.without_parameters`/')
listoffsetarray = self.to_ListOffsetArray64(True)
stop = listoffsetarray.offsets[(- 1)]
content = listoffsetarray.content._getitem_range(0, stop)
return (listoffsetarray.offsets, content)
else:
(inneroffsets, flattened) = self._content._offsets_and_flattened(axis, (depth + 1))
offsets = Index64.zeros(0, nplike=self._backend.index_nplike, dtype=np.int64)
if ((inneroffsets.length is not unknown_length) and (inneroffsets.length == 0)):
return (offsets, ListOffsetArray(self._offsets, flattened, parameters=self._parameters))
elif ((self._offsets.length is not unknown_length) and (self._offsets.length == 1)):
tooffsets = Index64([inneroffsets[0]])
return (offsets, ListOffsetArray(tooffsets, flattened, parameters=self._parameters))
else:
tooffsets = Index64.empty(self._offsets.length, self._backend.index_nplike, dtype=np.int64)
assert ((tooffsets.nplike is self._backend.index_nplike) and (self._offsets.nplike is self._backend.index_nplike) and (inneroffsets.nplike is self._backend.index_nplike))
self._backend.maybe_kernel_error(self._backend[('awkward_ListOffsetArray_flatten_offsets', tooffsets.dtype.type, self._offsets.dtype.type, inneroffsets.dtype.type)](tooffsets.data, self._offsets.data, self._offsets.length, inneroffsets.data, inneroffsets.length))
return (offsets, ListOffsetArray(tooffsets, flattened, parameters=self._parameters))
def _mergeable_next(self, other: Content, mergebool: bool) -> bool:
if (other.is_identity_like or other.is_union):
return True
elif (other.is_indexed or other.is_option):
return self._mergeable_next(other.content, mergebool)
elif (not type_parameters_equal(self._parameters, other._parameters)):
return False
elif isinstance(other, (ak.contents.RegularArray, ak.contents.ListArray, ak.contents.ListOffsetArray)):
return self._content._mergeable_next(other.content, mergebool)
elif (isinstance(other, ak.contents.NumpyArray) and (len(other.shape) > 1)):
return self._mergeable_next(other._to_regular_primitive(), mergebool)
else:
return False
def _mergemany(self, others: Sequence[Content]) -> Content:
if (len(others) == 0):
return self
listarray = ak.contents.ListArray(self.starts, self.stops, self._content, parameters=self._parameters)
out = listarray._mergemany(others)
if all(((isinstance(x, ListOffsetArray) and (x._offsets.dtype == self._offsets.dtype)) for x in others)):
return out.to_ListOffsetArray64(False)
else:
return out
def _fill_none(self, value: Content) -> Content:
return ListOffsetArray(self._offsets, self._content._fill_none(value), parameters=self._parameters)
def _local_index(self, axis, depth):
index_nplike = self._backend.index_nplike
posaxis = maybe_posaxis(self, axis, depth)
if ((posaxis is not None) and ((posaxis + 1) == depth)):
return self._local_index_axis0()
elif ((posaxis is not None) and ((posaxis + 1) == (depth + 1))):
offsets = self._compact_offsets64(True)
if self._backend.nplike.known_data:
innerlength = index_nplike.index_as_shape_item(offsets[(index_nplike.shape_item_as_index(offsets.length) - 1)])
else:
self._touch_data(recursive=False)
innerlength = unknown_length
localindex = Index64.empty(innerlength, index_nplike)
assert ((localindex.nplike is index_nplike) and (offsets.nplike is index_nplike))
self._backend.maybe_kernel_error(self._backend[('awkward_ListArray_localindex', localindex.dtype.type, offsets.dtype.type)](localindex.data, offsets.data, (offsets.length - 1)))
return ak.contents.ListOffsetArray(offsets, ak.contents.NumpyArray(localindex.data))
else:
return ak.contents.ListOffsetArray(self._offsets, self._content._local_index(axis, (depth + 1)))
def _numbers_to_type(self, name, including_unknown):
return ak.contents.ListOffsetArray(self._offsets, self._content._numbers_to_type(name, including_unknown), parameters=self._parameters)
def _is_unique(self, negaxis, starts, parents, outlength):
if ((self._offsets.length - 1) == 0):
return True
(branch, depth) = self.branch_depth
if ((self.parameter('__array__') == 'string') or (self.parameter('__array__') == 'bytestring')):
if (branch or ((negaxis is not None) and (negaxis != depth))):
raise ValueError('array with strings can only be checked on uniqueness with axis=-1')
if isinstance(self._content, ak.contents.NumpyArray):
(out, outoffsets) = self._content._as_unique_strings(self._offsets)
out2 = ak.contents.ListOffsetArray(outoffsets, out, parameters=self._parameters)
return (out2.length == self.length)
if (negaxis is None):
return self._content._is_unique(negaxis, starts, parents, outlength)
if ((not branch) and (negaxis == depth)):
return self._content._is_unique((negaxis - 1), starts, parents, outlength)
else:
nextlen = self._backend.index_nplike.index_as_shape_item((self._offsets[(- 1)] - self._offsets[0]))
nextparents = Index64.empty(nextlen, self._backend.index_nplike)
assert ((nextparents.nplike is self._backend.index_nplike) and (self._offsets.nplike is self._backend.index_nplike))
self._backend.maybe_kernel_error(self._backend[('awkward_ListOffsetArray_reduce_local_nextparents_64', nextparents.dtype.type, self._offsets.dtype.type)](nextparents.data, self._offsets.data, (self._offsets.length - 1)))
starts = self._offsets[:(- 1)]
return self._content._is_unique(negaxis, starts, nextparents, outlength)
def _unique(self, negaxis, starts, parents, outlength):
if ((self._offsets.length - 1) == 0):
return self
(branch, depth) = self.branch_depth
if ((self.parameter('__array__') == 'string') or (self.parameter('__array__') == 'bytestring')):
if (branch or (negaxis != depth)):
raise AxisError('array with strings can only be sorted with axis=-1')
if isinstance(self._content, ak.contents.NumpyArray):
(out, nextoffsets) = self._content._as_unique_strings(self._offsets)
return ak.contents.ListOffsetArray(nextoffsets, out, parameters=self._parameters)
if ((not branch) and (negaxis == depth)):
if ((self.parameter('__array__') == 'string') or (self.parameter('__array__') == 'bytestring')):
raise AxisError('array with strings can only be sorted with axis=-1')
if (self._backend.nplike.known_data and parents.nplike.known_data):
assert ((self._offsets.length - 1) == parents.length)
(distincts, maxcount, maxnextparents, nextcarry, nextparents, nextstarts) = self._rearrange_prepare_next(outlength, parents)
nextcontent = self._content._carry(nextcarry, False)
outcontent = nextcontent._unique((negaxis - 1), nextstarts, nextparents, (maxnextparents[0] + 1))
outcarry = Index64.empty(nextcarry.length, self._backend.index_nplike)
assert ((outcarry.nplike is self._backend.index_nplike) and (nextcarry.nplike is self._backend.index_nplike))
self._backend.maybe_kernel_error(self._backend[('awkward_ListOffsetArray_local_preparenext_64', outcarry.dtype.type, nextcarry.dtype.type)](outcarry.data, nextcarry.data, nextcarry.length))
return ak.contents.ListOffsetArray(outcontent._compact_offsets64(True), outcontent._content._carry(outcarry, False), parameters=self._parameters)
else:
nextlen = self._backend.index_nplike.index_as_shape_item((self._offsets[(- 1)] - self._offsets[0]))
nextparents = Index64.empty(nextlen, self._backend.index_nplike)
assert ((nextparents.nplike is self._backend.index_nplike) and (self._offsets.nplike is self._backend.index_nplike))
self._backend.maybe_kernel_error(self._backend[('awkward_ListOffsetArray_reduce_local_nextparents_64', nextparents.dtype.type, self._offsets.dtype.type)](nextparents.data, self._offsets.data, (self._offsets.length - 1)))
trimmed = self._content[self._offsets[0]:self._offsets[(- 1)]]
outcontent = trimmed._unique(negaxis, self._offsets[:(- 1)], nextparents, (self._offsets.length - 1))
if ((negaxis is None) or (negaxis == (depth - 1))):
return outcontent
outoffsets = self._compact_offsets64(True)
return ak.contents.ListOffsetArray(outoffsets, outcontent, parameters=self._parameters)
def _argsort_next(self, negaxis, starts, shifts, parents, outlength, ascending, stable):
(branch, depth) = self.branch_depth
if ((self.parameter('__array__') == 'string') or (self.parameter('__array__') == 'bytestring')):
if (branch or (negaxis != depth)):
raise AxisError('array with strings can only be sorted with axis=-1')
if isinstance(self._content, ak.contents.NumpyArray):
nextcarry = Index64.empty((self._offsets.length - 1), self._backend.index_nplike)
(self_starts, self_stops) = (self._offsets[:(- 1)], self._offsets[1:])
assert ((nextcarry.nplike is self._backend.index_nplike) and (parents.nplike is self._backend.index_nplike) and (self._content.backend is self._backend) and (self_starts.nplike is self._backend.index_nplike) and (self_stops.nplike is self._backend.index_nplike))
self._backend.maybe_kernel_error(self._backend[('awkward_ListOffsetArray_argsort_strings', nextcarry.dtype.type, parents.dtype.type, self._content.dtype.type, self_starts.dtype.type, self_stops.dtype.type)](nextcarry.data, parents.data, parents.length, self._content._data, self_starts.data, self_stops.data, stable, ascending, True))
return ak.contents.NumpyArray(nextcarry.data, parameters=None, backend=self._backend)
if ((not branch) and (negaxis == depth)):
if ((self.parameter('__array__') == 'string') or (self.parameter('__array__') == 'bytestring')):
raise AxisError('array with strings can only be sorted with axis=-1')
if (self._backend.nplike.known_data and parents.nplike.known_data):
assert ((self._offsets.length - 1) == parents.length)
(distincts, maxcount, maxnextparents, nextcarry, nextparents, nextstarts) = self._rearrange_prepare_next(outlength, parents)
nummissing = Index64.empty(maxcount, self._backend.index_nplike)
missing = Index64.empty(self._offsets[(- 1)], self._backend.index_nplike)
nextshifts = Index64.empty(nextcarry.length, self._backend.index_nplike)
assert ((nummissing.nplike is self._backend.index_nplike) and (missing.nplike is self._backend.index_nplike) and (nextshifts.nplike is self._backend.index_nplike) and (self._offsets.nplike is self._backend.index_nplike) and (starts.nplike is self._backend.index_nplike) and (parents.nplike is self._backend.index_nplike) and (nextcarry.nplike is self._backend.index_nplike))
self._backend.maybe_kernel_error(self._backend[('awkward_ListOffsetArray_reduce_nonlocal_nextshifts_64', nummissing.dtype.type, missing.dtype.type, nextshifts.dtype.type, self._offsets.dtype.type, starts.dtype.type, parents.dtype.type, nextcarry.dtype.type)](nummissing.data, missing.data, nextshifts.data, self._offsets.data, (self._offsets.length - 1), starts.data, parents.data, maxcount, nextcarry.length, nextcarry.data))
nextcontent = self._content._carry(nextcarry, False)
outcontent = nextcontent._argsort_next((negaxis - 1), nextstarts, nextshifts, nextparents, nextstarts.length, ascending, stable)
outcarry = Index64.empty(nextcarry.length, self._backend.index_nplike)
assert ((outcarry.nplike is self._backend.index_nplike) and (nextcarry.nplike is self._backend.index_nplike))
self._backend.maybe_kernel_error(self._backend[('awkward_ListOffsetArray_local_preparenext_64', outcarry.dtype.type, nextcarry.dtype.type)](outcarry.data, nextcarry.data, nextcarry.length))
out_offsets = self._compact_offsets64(True)
out = outcontent._carry(outcarry, False)
return ak.contents.ListOffsetArray(out_offsets, out, parameters=self._parameters)
else:
nextlen = self._backend.index_nplike.index_as_shape_item((self._offsets[(- 1)] - self._offsets[0]))
nextparents = Index64.empty(nextlen, self._backend.index_nplike)
assert ((nextparents.nplike is self._backend.index_nplike) and (self._offsets.nplike is self._backend.index_nplike))
self._backend.maybe_kernel_error(self._backend[('awkward_ListOffsetArray_reduce_local_nextparents_64', nextparents.dtype.type, self._offsets.dtype.type)](nextparents.data, self._offsets.data, (self._offsets.length - 1)))
trimmed = self._content[self._offsets[0]:self._offsets[(- 1)]]
outcontent = trimmed._argsort_next(negaxis, self._offsets[:(- 1)], shifts, nextparents, (self._offsets.length - 1), ascending, stable)
outoffsets = self._compact_offsets64(True)
return ak.contents.ListOffsetArray(outoffsets, outcontent, parameters=self._parameters)
def _sort_next(self, negaxis, starts, parents, outlength, ascending, stable):
(branch, depth) = self.branch_depth
index_nplike = self._backend.index_nplike
if ((self.parameter('__array__') == 'string') or (self.parameter('__array__') == 'bytestring')):
if (branch or (negaxis != depth)):
raise AxisError('array with strings can only be sorted with axis=-1')
if isinstance(self._content, ak.contents.NumpyArray):
nextcarry = Index64.empty((self._offsets.length - 1), index_nplike)
(starts, stops) = (self._offsets[:(- 1)], self._offsets[1:])
assert ((nextcarry.nplike is index_nplike) and (parents.nplike is index_nplike) and (self._content.backend is self._backend) and (starts.nplike is index_nplike) and (stops.nplike is index_nplike))
self._backend.maybe_kernel_error(self._backend[('awkward_ListOffsetArray_argsort_strings', nextcarry.dtype.type, parents.dtype.type, self._content.dtype.type, starts.dtype.type, stops.dtype.type)](nextcarry.data, parents.data, parents.length, self._content._data, starts.data, stops.data, stable, ascending, False))
return self._carry(nextcarry, False)
if ((not branch) and (negaxis == depth)):
if ((self.parameter('__array__') == 'string') or (self.parameter('__array__') == 'bytestring')):
raise AxisError('array with strings can only be sorted with axis=-1')
if (self._backend.nplike.known_data and parents.nplike.known_data):
assert ((self._offsets.length - 1) == parents.length)
(distincts, maxcount, maxnextparents, nextcarry, nextparents, nextstarts) = self._rearrange_prepare_next(outlength, parents)
nextcontent = self._content._carry(nextcarry, False)
outcontent = nextcontent._sort_next((negaxis - 1), nextstarts, nextparents, (maxnextparents + 1), ascending, stable)
outcarry = Index64.empty(nextcarry.length, index_nplike)
assert ((outcarry.nplike is index_nplike) and (nextcarry.nplike is index_nplike))
self._backend.maybe_kernel_error(self._backend[('awkward_ListOffsetArray_local_preparenext_64', outcarry.dtype.type, nextcarry.dtype.type)](outcarry.data, nextcarry.data, nextcarry.length))
return ak.contents.ListOffsetArray(self._compact_offsets64(True), outcontent._carry(outcarry, False), parameters=self._parameters)
else:
nextlen = index_nplike.index_as_shape_item((self._offsets[(- 1)] - self._offsets[0]))
nextparents = Index64.empty(nextlen, index_nplike)
lenstarts = (self._offsets.length - 1)
assert ((nextparents.nplike is index_nplike) and (self._offsets.nplike is index_nplike))
self._backend.maybe_kernel_error(self._backend[('awkward_ListOffsetArray_reduce_local_nextparents_64', nextparents.dtype.type, self._offsets.dtype.type)](nextparents.data, self._offsets.data, lenstarts))
trimmed = self._content[self._offsets[0]:self._offsets[(- 1)]]
outcontent = trimmed._sort_next(negaxis, self._offsets[:(- 1)], nextparents, lenstarts, ascending, stable)
outoffsets = self._compact_offsets64(True)
return ak.contents.ListOffsetArray(outoffsets, outcontent, parameters=self._parameters)
def _combinations(self, n, replacement, recordlookup, parameters, axis, depth):
index_nplike = self._backend.index_nplike
posaxis = maybe_posaxis(self, axis, depth)
if ((posaxis is not None) and ((posaxis + 1) == depth)):
return self._combinations_axis0(n, replacement, recordlookup, parameters)
elif ((posaxis is not None) and ((posaxis + 1) == (depth + 1))):
if ((self.parameter('__array__') == 'string') or (self.parameter('__array__') == 'bytestring')):
raise ValueError('ak.combinations does not compute combinations of the characters of a string; please split it into lists')
starts = self.starts
stops = self.stops
_totallen = Index64.empty(1, index_nplike, dtype=np.int64)
offsets = Index64.empty((self.length + 1), index_nplike, dtype=np.int64)
assert ((offsets.nplike is index_nplike) and (starts.nplike is index_nplike) and (stops.nplike is index_nplike))
self._backend.maybe_kernel_error(self._backend[('awkward_ListArray_combinations_length', _totallen.data.dtype.type, offsets.data.dtype.type, starts.data.dtype.type, stops.data.dtype.type)](_totallen.data, offsets.data, n, replacement, starts.data, stops.data, self.length))
totallen = self._backend.index_nplike.index_as_shape_item(_totallen[0])
tocarryraw = ak.index.Index.empty(n, dtype=np.intp, nplike=index_nplike)
tocarry = []
for i in range(n):
ptr = Index64.empty(totallen, nplike=index_nplike, dtype=np.int64)
tocarry.append(ptr)
if self._backend.nplike.known_data:
tocarryraw[i] = ptr.ptr
toindex = Index64.empty(n, index_nplike, dtype=np.int64)
fromindex = Index64.empty(n, index_nplike, dtype=np.int64)
assert ((toindex.nplike is index_nplike) and (fromindex.nplike is index_nplike) and (starts.nplike is index_nplike) and (stops.nplike is index_nplike))
self._backend.maybe_kernel_error(self._backend[('awkward_ListArray_combinations', np.int64, toindex.data.dtype.type, fromindex.data.dtype.type, starts.data.dtype.type, stops.data.dtype.type)](tocarryraw.data, toindex.data, fromindex.data, n, replacement, starts.data, stops.data, self.length))
contents = []
for ptr in tocarry:
contents.append(self._content._carry(ptr, True))
recordarray = ak.contents.RecordArray(contents, recordlookup, parameters=parameters, backend=self._backend)
return ak.contents.ListOffsetArray(offsets, recordarray, parameters=self._parameters)
else:
compact = self.to_ListOffsetArray64(True)
next = compact._content._combinations(n, replacement, recordlookup, parameters, axis, (depth + 1))
return ak.contents.ListOffsetArray(compact.offsets, next, parameters=self._parameters)
def _reduce_next(self, reducer, negaxis, starts, shifts, parents, outlength, mask, keepdims, behavior):
index_nplike = self._backend.index_nplike
if ((self._offsets.dtype != np.dtype(np.int64)) or (self._offsets.nplike.known_data and (self._offsets[0] != 0))):
next = self.to_ListOffsetArray64(True)
return next._reduce_next(reducer, negaxis, starts, shifts, parents, outlength, mask, keepdims, behavior)
(branch, depth) = self.branch_depth
globalstarts_length = (self._offsets.length - 1)
if ((not branch) and (negaxis == depth)):
(distincts, maxcount, maxnextparents, nextcarry, nextparents, nextstarts) = self._rearrange_prepare_next(outlength, parents)
outstarts = Index64.empty(outlength, index_nplike)
outstops = Index64.empty(outlength, index_nplike)
assert ((outstarts.nplike is index_nplike) and (outstops.nplike is index_nplike) and (distincts.nplike is index_nplike))
self._backend.maybe_kernel_error(self._backend[('awkward_ListOffsetArray_reduce_nonlocal_outstartsstops_64', outstarts.dtype.type, outstops.dtype.type, distincts.dtype.type)](outstarts.data, outstops.data, distincts.data, distincts.length, outlength))
if reducer.needs_position:
nextshifts = Index64.empty(nextcarry.length, index_nplike)
nummissing = Index64.empty(maxcount, index_nplike)
missing = Index64.empty(index_nplike.index_as_shape_item(self._offsets[(- 1)]), index_nplike)
assert ((nummissing.nplike is index_nplike) and (missing.nplike is index_nplike) and (nextshifts.nplike is index_nplike) and (self._offsets.nplike is index_nplike) and (starts.nplike is index_nplike) and (parents.nplike is index_nplike) and (nextcarry.nplike is index_nplike))
self._backend.maybe_kernel_error(self._backend[('awkward_ListOffsetArray_reduce_nonlocal_nextshifts_64', nummissing.dtype.type, missing.dtype.type, nextshifts.dtype.type, self._offsets.dtype.type, starts.dtype.type, parents.dtype.type, nextcarry.dtype.type)](nummissing.data, missing.data, nextshifts.data, self._offsets.data, globalstarts_length, starts.data, parents.data, maxcount, nextcarry.length, nextcarry.data))
else:
nextshifts = None
nextcontent = self._content._carry(nextcarry, False)
outcontent = nextcontent._reduce_next(reducer, (negaxis - 1), nextstarts, nextshifts, nextparents, (maxnextparents + 1), mask, False, behavior)
out = ak.contents.ListArray(outstarts, outstops, outcontent, parameters=None)
if keepdims:
out = ak.contents.RegularArray(out, 1, self.length, parameters=None)
return out
else:
nextlen = index_nplike.index_as_shape_item((self._offsets[(- 1)] - self._offsets[0]))
nextparents = Index64.empty(nextlen, index_nplike)
assert ((nextparents.nplike is index_nplike) and (self._offsets.nplike is index_nplike))
self._backend.maybe_kernel_error(self._backend[('awkward_ListOffsetArray_reduce_local_nextparents_64', nextparents.dtype.type, self._offsets.dtype.type)](nextparents.data, self._offsets.data, globalstarts_length))
trimmed = self._content[self.offsets[0]:self.offsets[(- 1)]]
nextstarts = self.offsets[:(- 1)]
outcontent = trimmed._reduce_next(reducer, negaxis, nextstarts, shifts, nextparents, globalstarts_length, mask, keepdims, behavior)
outoffsets = Index64.empty((outlength + 1), index_nplike)
assert ((outoffsets.nplike is index_nplike) and (parents.nplike is index_nplike))
self._backend.maybe_kernel_error(self._backend[('awkward_ListOffsetArray_reduce_local_outoffsets_64', outoffsets.dtype.type, parents.dtype.type)](outoffsets.data, parents.data, parents.length, outlength))
if (keepdims and (depth == (negaxis + 1))):
assert outcontent.is_regular
elif (depth >= (negaxis + 2)):
assert (outcontent.is_list or outcontent.is_regular)
outcontent = outcontent.to_ListOffsetArray64(False)
return ak.contents.ListOffsetArray(outoffsets, outcontent, parameters=None)
def _rearrange_prepare_next(self, outlength, parents):
index_nplike = self._backend.index_nplike
nextlen = index_nplike.index_as_shape_item((self._offsets[(- 1)] - self._offsets[0]))
lenstarts = (self._offsets.length - 1)
_maxcount = Index64.empty(1, index_nplike)
offsetscopy = Index64.empty(self.offsets.length, index_nplike)
assert ((_maxcount.nplike is index_nplike) and (offsetscopy.nplike is index_nplike) and (self._offsets.nplike is index_nplike))
self._backend.maybe_kernel_error(self._backend[('awkward_ListOffsetArray_reduce_nonlocal_maxcount_offsetscopy_64', _maxcount.dtype.type, offsetscopy.dtype.type, self._offsets.dtype.type)](_maxcount.data, offsetscopy.data, self._offsets.data, lenstarts))
maxcount = index_nplike.index_as_shape_item(_maxcount[0])
nextcarry = Index64.empty(nextlen, nplike=index_nplike)
nextparents = Index64.empty(nextlen, nplike=index_nplike)
_maxnextparents = Index64.empty(1, index_nplike)
if ((maxcount is unknown_length) or (outlength is unknown_length)):
distincts = Index64.empty(unknown_length, index_nplike)
else:
distincts = Index64.empty((outlength * maxcount), index_nplike)
assert ((_maxnextparents.nplike is index_nplike) and (distincts.nplike is index_nplike) and (self._offsets.nplike is index_nplike) and (offsetscopy.nplike is index_nplike) and (parents.nplike is index_nplike))
self._backend.maybe_kernel_error(self._backend[('awkward_ListOffsetArray_reduce_nonlocal_preparenext_64', nextcarry.dtype.type, nextparents.dtype.type, _maxnextparents.dtype.type, distincts.dtype.type, self._offsets.dtype.type, offsetscopy.dtype.type, parents.dtype.type)](nextcarry.data, nextparents.data, nextlen, _maxnextparents.data, distincts.data, distincts.length, offsetscopy.data, self._offsets.data, lenstarts, parents.data, maxcount))
maxnextparents = index_nplike.index_as_shape_item(_maxnextparents[0])
nextstarts = Index64.empty((maxnextparents + 1), index_nplike)
assert ((nextstarts.nplike is index_nplike) and (nextparents.nplike is index_nplike))
self._backend.maybe_kernel_error(self._backend[('awkward_ListOffsetArray_reduce_nonlocal_nextstarts_64', nextstarts.dtype.type, nextparents.dtype.type)](nextstarts.data, nextparents.data, nextlen))
return (distincts, maxcount, maxnextparents, nextcarry, nextparents, nextstarts)
def _validity_error(self, path):
if (self.offsets.length < 1):
return f'at {path} ({type(self)!r}): len(offsets) < 1'
assert ((self.starts.nplike is self._backend.index_nplike) and (self.stops.nplike is self._backend.index_nplike))
error = self._backend[('awkward_ListArray_validity', self.starts.dtype.type, self.stops.dtype.type)](self.starts.data, self.stops.data, self.starts.length, self._content.length)
if (error.str is not None):
if (error.filename is None):
filename = ''
else:
filename = (' (in compiled code: ' + error.filename.decode(errors='surrogateescape').lstrip('\n').lstrip('('))
message = error.str.decode(errors='surrogateescape')
return f'at {path} ("{type(self)}"): {message} at i={error.id}{filename}'
else:
return self._content._validity_error((path + '.content'))
def _nbytes_part(self):
return (self.offsets._nbytes_part() + self.content._nbytes_part())
def _pad_none(self, target, axis, depth, clip):
posaxis = maybe_posaxis(self, axis, depth)
index_nplike = self._backend.index_nplike
if ((posaxis is not None) and ((posaxis + 1) == depth)):
return self._pad_none_axis0(target, clip)
if ((posaxis is not None) and ((posaxis + 1) == (depth + 1))):
if (not clip):
_tolength = Index64.empty(1, index_nplike)
offsets_ = Index64.empty(self._offsets.length, index_nplike)
assert ((offsets_.nplike is index_nplike) and (self._offsets.nplike is index_nplike) and (_tolength.nplike is index_nplike))
self._backend.maybe_kernel_error(self._backend[('awkward_ListOffsetArray_rpad_length_axis1', offsets_.dtype.type, self._offsets.dtype.type, _tolength.dtype.type)](offsets_.data, self._offsets.data, (self._offsets.length - 1), target, _tolength.data))
tolength = index_nplike.index_as_shape_item(_tolength[0])
outindex = Index64.empty(tolength, index_nplike)
assert ((outindex.nplike is index_nplike) and (self._offsets.nplike is index_nplike))
self._backend.maybe_kernel_error(self._backend[('awkward_ListOffsetArray_rpad_axis1', outindex.dtype.type, self._offsets.dtype.type)](outindex.data, self._offsets.data, (self._offsets.length - 1), target))
next = ak.contents.IndexedOptionArray.simplified(outindex, self._content, parameters=self._parameters)
return ak.contents.ListOffsetArray(offsets_, next, parameters=self._parameters)
else:
starts_ = Index64.empty((self._offsets.length - 1), index_nplike)
stops_ = Index64.empty((self._offsets.length - 1), index_nplike)
assert ((starts_.nplike is index_nplike) and (stops_.nplike is index_nplike))
self._backend.maybe_kernel_error(self._backend[('awkward_index_rpad_and_clip_axis1', starts_.dtype.type, stops_.dtype.type)](starts_.data, stops_.data, target, starts_.length))
outindex = Index64.empty((target * (self._offsets.length - 1)), index_nplike)
assert ((outindex.nplike is index_nplike) and (self._offsets.nplike is index_nplike))
self._backend.maybe_kernel_error(self._backend[('awkward_ListOffsetArray_rpad_and_clip_axis1', outindex.dtype.type, self._offsets.dtype.type)](outindex.data, self._offsets.data, (self._offsets.length - 1), target))
next = ak.contents.IndexedOptionArray.simplified(outindex, self._content, parameters=self._parameters)
return ak.contents.RegularArray(next, target, self.length, parameters=self._parameters)
else:
return ak.contents.ListOffsetArray(self._offsets, self._content._pad_none(target, axis, (depth + 1), clip), parameters=self._parameters)
def _to_arrow(self, pyarrow: Any, mask_node: (Content | None), validbytes: (Content | None), length: int, options: ToArrowOptions):
is_string = (self.parameter('__array__') == 'string')
is_bytestring = (self.parameter('__array__') == 'bytestring')
if is_string:
downsize = options['string_to32']
elif is_bytestring:
downsize = options['bytestring_to32']
else:
downsize = options['list_to32']
npoffsets = self._offsets.raw(numpy)
akcontent = self._content[npoffsets[0]:npoffsets[length]]
if (len(npoffsets) > (length + 1)):
npoffsets = npoffsets[:(length + 1)]
if (npoffsets[0] != 0):
npoffsets = (npoffsets - npoffsets[0])
if (validbytes is not None):
nonzeros = (npoffsets[1:] != npoffsets[:(- 1)])
maskedbytes = (validbytes == 0)
if numpy.any((maskedbytes & nonzeros)):
new_starts = numpy.asarray(npoffsets[:(- 1)], copy=True)
new_stops = numpy.asarray(npoffsets[1:], copy=True)
new_starts[maskedbytes] = 0
new_stops[maskedbytes] = 0
next = ak.contents.ListArray(ak.index.Index(new_starts), ak.index.Index(new_stops), self._content, parameters=self._parameters)
return next.to_ListOffsetArray64(True)._to_arrow(pyarrow, mask_node, validbytes, length, options)
if issubclass(npoffsets.dtype.type, np.int64):
if (downsize and (npoffsets[(- 1)] < np.iinfo(np.int32).max)):
npoffsets = numpy.astype(npoffsets, np.int32)
if issubclass(npoffsets.dtype.type, np.uint32):
if (npoffsets[(- 1)] < np.iinfo(np.int32).max):
npoffsets = numpy.astype(npoffsets, np.int32)
else:
npoffsets = numpy.astype(npoffsets, np.int64)
if (is_string or is_bytestring):
assert isinstance(akcontent, ak.contents.NumpyArray)
if issubclass(npoffsets.dtype.type, np.int32):
if is_string:
string_type = pyarrow.string()
else:
string_type = pyarrow.binary()
elif is_string:
string_type = pyarrow.large_string()
else:
string_type = pyarrow.large_binary()
return pyarrow.Array.from_buffers(ak._connect.pyarrow.to_awkwardarrow_type(string_type, options['extensionarray'], options['record_is_scalar'], mask_node, self), length, [ak._connect.pyarrow.to_validbits(validbytes), pyarrow.py_buffer(npoffsets), pyarrow.py_buffer(akcontent._raw(numpy))])
else:
paarray = akcontent._to_arrow(pyarrow, None, None, akcontent.length, options)
content_type = pyarrow.list_(paarray.type).value_field.with_nullable(akcontent.is_option)
if issubclass(npoffsets.dtype.type, np.int32):
list_type = pyarrow.list_(content_type)
else:
list_type = pyarrow.large_list(content_type)
return pyarrow.Array.from_buffers(ak._connect.pyarrow.to_awkwardarrow_type(list_type, options['extensionarray'], options['record_is_scalar'], mask_node, self), length, [ak._connect.pyarrow.to_validbits(validbytes), pyarrow.py_buffer(npoffsets)], children=[paarray], null_count=ak._connect.pyarrow.to_null_count(validbytes, options['count_nulls']))
def _to_backend_array(self, allow_missing, backend):
array_param = self.parameter('__array__')
if (array_param == 'string'):
_max_code_points = backend.index_nplike.empty(1, dtype=np.int64)
backend[('awkward_NumpyArray_prepare_utf8_to_utf32_padded', self._content.dtype.type, self._offsets.dtype.type, _max_code_points.dtype.type)](self._content.data, self._offsets.data, self._offsets.length, _max_code_points)
max_code_points = backend.index_nplike.index_as_shape_item(_max_code_points[0])
if (max_code_points is not unknown_length):
max_code_points = max(1, max_code_points)
total_code_points = (max_code_points * self.length)
buffer = backend.nplike.empty(total_code_points, dtype=np.uint32)
self.backend[('awkward_NumpyArray_utf8_to_utf32_padded', self._content.dtype.type, self._offsets.dtype.type, buffer.dtype.type)](self._content.data, self._offsets.data, self._offsets.length, max_code_points, buffer)
return buffer.view(np.dtype(('U', max_code_points)))
elif (array_param == 'bytestring'):
if ((self.starts.length is not unknown_length) and (self.starts.length == 0)):
max_count = 0
else:
max_count = backend.index_nplike.index_as_shape_item(backend.index_nplike.max((self.stops.data - self.starts.data)))
if (max_count is not unknown_length):
max_count = max(1, max_count)
buffer = backend.nplike.empty((max_count * self.length), dtype=np.uint8)
self.backend[('awkward_NumpyArray_pad_zero_to_length', self._content.dtype.type, self._offsets.dtype.type, buffer.dtype.type)](self._content.data, self._offsets.data, self._offsets.length, max_count, buffer)
return buffer.view(np.dtype(('S', max_count)))
else:
return self.to_RegularArray()._to_backend_array(allow_missing, backend)
def _remove_structure(self, backend: Backend, options: RemoveStructureOptions) -> list[Content]:
if ((self.parameter('__array__') == 'string') or (self.parameter('__array__') == 'bytestring')):
return [self]
else:
content = self._content[self._offsets[0]:self._offsets[(- 1)]]
contents = content._remove_structure(backend, options)
if options['keepdims']:
if options['list_to_regular']:
return [ak.contents.RegularArray(c, size=c.length, zeros_length=1, parameters=self._parameters) for c in contents]
else:
return [ListOffsetArray(Index64(backend.index_nplike.asarray([0, backend.index_nplike.shape_item_as_index(c.length)])), c, parameters=self._parameters) for c in contents]
else:
return contents
def _drop_none(self) -> Content:
if self._content.is_option:
(_, _, none_indexes) = self._content._nextcarry_outindex()
new_content = self._content._drop_none()
return self._rebuild_without_nones(none_indexes, new_content)
else:
return self
def _rebuild_without_nones(self, none_indexes, new_content):
new_offsets = Index64.empty(self._offsets.length, self._backend.nplike)
assert ((new_offsets.nplike is self._backend.index_nplike) and (self._offsets.nplike is self._backend.index_nplike) and (none_indexes.nplike is self._backend.index_nplike))
self._backend.maybe_kernel_error(self._backend[('awkward_ListOffsetArray_drop_none_indexes', new_offsets.dtype.type, none_indexes.dtype.type, self._offsets.dtype.type)](new_offsets.data, none_indexes.data, self._offsets.data, self._offsets.length, none_indexes.length))
return ak.contents.ListOffsetArray(new_offsets, new_content)
def _recursively_apply(self, action: ImplementsApplyAction, depth: int, depth_context: (Mapping[(str, Any)] | None), lateral_context: (Mapping[(str, Any)] | None), options: ApplyActionOptions) -> (Content | None):
if self._backend.nplike.known_data:
offsetsmin = self._offsets[0]
offsets = ak.index.Index((self._offsets.data - offsetsmin), nplike=self._backend.index_nplike)
content = self._content[offsetsmin:self._offsets[(- 1)]]
else:
self._touch_data(recursive=False)
(offsets, content) = (self._offsets, self._content)
if options['return_array']:
def continuation():
return ListOffsetArray(offsets, content._recursively_apply(action, (depth + 1), copy.copy(depth_context), lateral_context, options), parameters=(self._parameters if options['keep_parameters'] else None))
else:
def continuation():
content._recursively_apply(action, (depth + 1), copy.copy(depth_context), lateral_context, options)
result = action(self, depth=depth, depth_context=depth_context, lateral_context=lateral_context, continuation=continuation, backend=self._backend, options=options)
if isinstance(result, Content):
return result
elif (result is None):
return continuation()
else:
raise AssertionError(result)
def to_packed(self) -> Self:
next = self.to_ListOffsetArray64(True)
next_content = next._content[:next._offsets[(- 1)]].to_packed()
return ListOffsetArray(next._offsets, next_content, parameters=next._parameters)
def _to_list(self, behavior, json_conversions):
if (not self._backend.nplike.known_data):
raise TypeError('cannot convert typetracer arrays to Python lists')
(starts, stops) = (self.starts, self.stops)
starts_data = starts.raw(numpy)
stops_data = stops.raw(numpy)[:len(starts_data)]
nonempty = (starts_data != stops_data)
if (numpy.count_nonzero(nonempty) == 0):
(mini, maxi) = (0, 0)
else:
mini = self._backend.index_nplike.min(starts_data)
maxi = self._backend.index_nplike.max(stops_data)
starts_data = (starts_data - mini)
stops_data = (stops_data - mini)
nextcontent = self._content._getitem_range(mini, maxi)
if (self.parameter('__array__') == 'bytestring'):
convert_bytes = (None if (json_conversions is None) else json_conversions['convert_bytes'])
data = nextcontent.data
out = ([None] * starts.length)
if (convert_bytes is None):
for i in range(starts.length):
out[i] = ak._util.tobytes(data[starts_data[i]:stops_data[i]])
else:
for i in range(starts.length):
out[i] = convert_bytes(ak._util.tobytes(data[starts_data[i]:stops_data[i]]))
return out
elif (self.parameter('__array__') == 'string'):
data = nextcontent.data
out = ([None] * starts.length)
for i in range(starts.length):
out[i] = ak._util.tobytes(data[starts_data[i]:stops_data[i]]).decode(errors='surrogateescape')
return out
else:
out = self._to_list_custom(behavior, json_conversions)
if (out is not None):
return out
content = nextcontent._to_list(behavior, json_conversions)
out = ([None] * starts.length)
for i in range(starts.length):
out[i] = content[starts_data[i]:stops_data[i]]
return out
def _to_backend(self, backend: Backend) -> Self:
content = self._content.to_backend(backend)
offsets = self._offsets.to_nplike(backend.index_nplike)
return ListOffsetArray(offsets, content, parameters=self._parameters)
def _awkward_strings_to_nonfinite(self, nonfinit_dict):
if (self.parameter('__array__') == 'string'):
strings = self.to_list()
if any(((item in nonfinit_dict) for item in strings)):
numbers = self._backend.index_nplike.empty(self.starts.length, dtype=np.float64)
has_another_string = False
for (i, val) in enumerate(strings):
if (val in nonfinit_dict):
numbers[i] = nonfinit_dict[val]
else:
numbers[i] = None
has_another_string = True
content = ak.contents.NumpyArray(numbers)
if has_another_string:
union_tags = ak.index.Index8.zeros(content.length, nplike=self._backend.index_nplike)
content.backend.nplike.isnan(content._data, union_tags._data)
union_index = Index64(self._backend.index_nplike.arange(content.length, dtype=np.int64), nplike=self._backend.index_nplike)
return ak.contents.UnionArray(tags=union_tags, index=union_index, contents=[content, self.to_ListOffsetArray64(True)])
return content
def _is_equal_to(self, other: Self, index_dtype: bool, numpyarray: bool, all_parameters: bool) -> bool:
return (self._is_equal_to_generic(other, all_parameters) and self._offsets.is_equal_to(other.offsets, index_dtype, numpyarray) and self._content._is_equal_to(other.content, index_dtype, numpyarray, all_parameters)) |
def train(model, optimizer, loader, device):
model.train()
total_loss = 0
for data in loader:
optimizer.zero_grad()
x = data.x.to(device)
out = model(x)
loss = F.l1_loss(out, x, reduction='mean')
loss.backward()
total_loss += loss.item()
optimizer.step()
return (total_loss / len(loader)) |
def p2():
csv = '4partitions.csv'
out_file_name = 'output.png'
out_file_name = os.path.join('.', out_file_name)
df = pd.read_csv(csv).query("dataset == 'cifar100'").query('epoch == 200')
ax = sns.barplot(x='epoch', y='test_acc', hue='alg', data=df)
model = pd.unique(df.model)
assert (len(model) == 1)
model = model[0]
ax.set_ylim(80, 83)
ax.set_title(model)
fig = ax.get_figure()
fig.savefig(out_file_name)
print(f'saving file to {out_file_name}') |
class ComplexConv2d(nn.Module):
def __init__(self, in_channel, out_channel, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, **kwargs):
super().__init__()
self.conv_re = nn.Conv2d(in_channel, out_channel, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias, **kwargs)
self.conv_im = nn.Conv2d(in_channel, out_channel, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias, **kwargs)
def forward(self, x):
real = (self.conv_re(x[(..., 0)]) - self.conv_im(x[(..., 1)]))
imaginary = (self.conv_re(x[(..., 1)]) + self.conv_im(x[(..., 0)]))
output = torch.stack((real, imaginary), dim=(- 1))
return output |
(for_each_device=True)
def cupy_launch(strFunction, strKernel):
return cupy.RawKernel(strKernel, strFunction) |
('/dialog_status/<cid>', methods=['GET'])
def showtree(cid):
try:
ctx = dmgr.get_ctx(cid)
if (not ctx):
return json.dumps({'Info': 'session not initialized.'})
tree_data = ctx.tree_manager.task_tree.tree_show()
except Exception as e:
msg = printException()
return {'status': 'error', 'msg': msg}
return render_template('tree.html', tree=tree_data) |
def mobius_matvec(m, x, *, c=1.0):
c = torch.as_tensor(c).type_as(x)
return _mobius_matvec(m, x, c) |
.parametrize('ctx, func_name', ctxs)
.parametrize('seed', [313])
.parametrize('shape', shapes)
def test_relu_forward_backward(seed, ctx, func_name, shape):
from nbla_test_utils import cap_ignore_region, function_tester
rng = np.random.RandomState(seed)
inputs = [cap_ignore_region((rng.randn(*shape).astype(np.float32) * 2), ((- 0.001), 0.001))]
function_tester(rng, F.relu, ref_relu, inputs, ctx=ctx, func_name=func_name) |
def convert_dense(vars, source_name, target_name):
weight = vars[(source_name + '/weight')].value().eval()
bias = vars[(source_name + '/bias')].value().eval()
dic = {'weight': weight.transpose((1, 0)), 'bias': bias}
dic_torch = {}
for (k, v) in dic.items():
dic_torch[((target_name + '.') + k)] = torch.from_numpy(v)
return dic_torch |
def power_of_two_quantization(activation_n_bits: int, quantization_params: dict) -> Callable:
activation_threshold = quantization_params.get(THRESHOLD)
activation_is_signed = quantization_params.get(SIGNED)
if (activation_threshold is None):
Logger.error('Activation threshold is None')
if (activation_is_signed is None):
Logger.error('activation_is_signed is None')
if (not threshold_is_power_of_two(activation_threshold, per_channel=False)):
Logger.error('Activation threshold is not power of two')
(min_value, max_value) = quantizer_min_max_calculator(activation_threshold, activation_n_bits, activation_is_signed)
return (lambda x: q(x, min_value, max_value, activation_n_bits)) |
def post_tokenization_processing(document_state: DocumentState, subword_tokenizer, max_segment_len=4096):
split_into_segments(document_state, max_segment_len, document_state.sentence_end, document_state.token_end)
sent_len_list = [len(sent) for sent in document_state.segments]
document_state.sent_len_list = sent_len_list
document_state.segments_indices = document_state.segments
tensorized_sent = [torch.unsqueeze(torch.tensor((([subword_tokenizer.cls_token_id] + sent) + [subword_tokenizer.sep_token_id])), dim=0) for sent in document_state.segments]
document_state.tensorized_sent = tensorized_sent
return document_state.finalize() |
class SLSQP(WrappedOptimizerBase):
def __init__(self, options: dict=None, callback=default_callback):
super().__init__()
if (options is None):
options = {}
self.tol = options.get('tol', 1e-06)
if ('tol' in options):
options.pop('tol')
self.options = options
self.set_callback(callback)
def minimize(self, fun: callable, x0: np.ndarray, grad: callable=None, bounds=None) -> OptimizerResult:
scipy_result = minimize(fun, jac=grad, x0=x0, method='SLSQP', options=self.options, bounds=bounds, tol=self.tol, callback=self.callback)
result = OptimizerResult()
result.x = scipy_result.x
result.nit = scipy_result.nit
result.fun = scipy_result.fun
return result |
_zero_only
def dump_yaml(cfg, yaml_dict, time_tag):
distiller = dict()
for attr in dir(cfg):
if ((not callable(getattr(cfg, attr))) and (not attr.startswith('_'))):
distiller[attr] = getattr(cfg, attr)
dump_dict = yaml_dict
for key in distiller:
if (key in ['activation_fn', 'extractor_mode', 'layer_type']):
dump_dict['distiller'][key] = str(distiller[key])
else:
dump_dict['distiller'][key] = distiller[key]
dump_dir = ((dump_dict['train']['base_dir'] + 'results/pretrain/') + dump_dict['train']['output_dir'])
os.makedirs(dump_dir, exist_ok=True)
with open(os.path.join(dump_dir, (time_tag + '.yaml')), 'w') as f:
yaml.dump(dump_dict, f, sort_keys=False)
return dump_dict |
def convert_example_to_features(example, max_seq_length, tokenizer):
tokens_a = example.tokens_a
tokens_b = example.tokens_b
raw_label = example.raw_label
if (SEP_TOKEN not in tokens_b):
logger.info('\n** ** * tokens_b: ', tokens_b)
_truncate_seq_pair(tokens_a, tokens_b, (max_seq_length - 3))
col_count = tokens_b.count(SEP_TOKEN)
raw_label = [i for i in raw_label if (i < (col_count + 1))]
sep_indices = [i for (i, x) in enumerate(tokens_b) if (x == '[SEP]')]
sep_col_ones = [ind for (i, ind) in enumerate(sep_indices) if ((i + 1) in raw_label)]
col_label = [(1 if (i in sep_col_ones) else (0 if ((i in sep_indices) and (i not in sep_col_ones)) else (- 1))) for (i, _) in enumerate(tokens_b)]
if (0 in raw_label):
col_label.insert(0, 1)
else:
col_label.insert(0, 0)
col_label_ids = ((([(- 1)] + ([(- 1)] * len(tokens_a))) + col_label) + [(- 1)])
(tokens_b, t2_label) = random_word(tokens_b, tokenizer)
lm_label_ids = (((([(- 1)] + ([(- 1)] * len(tokens_a))) + [(- 1)]) + t2_label) + [(- 1)])
tokens = []
segment_ids = []
tokens.append('[CLS]')
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append('[SEP]')
segment_ids.append(1)
assert (len(tokens_b) > 0)
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append('[SEP]')
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = ([1] * len(input_ids))
while (len(input_ids) < max_seq_length):
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
col_label_ids.append((- 1))
lm_label_ids.append((- 1))
assert (len(input_ids) == max_seq_length)
assert (len(input_mask) == max_seq_length)
assert (len(segment_ids) == max_seq_length)
assert (len(lm_label_ids) == max_seq_length)
assert (len(col_label_ids) == max_seq_length)
if (example.guid < 10):
logger.info('*** Example ***')
logger.info(('guid: %s' % example.guid))
logger.info(('tokens: %s' % ' '.join([str(x) for x in tokens])))
logger.info(('input_ids: %s' % ' '.join([str(x) for x in input_ids])))
logger.info(('input_mask: %s' % ' '.join([str(x) for x in input_mask])))
logger.info(('segment_ids: %s' % ' '.join([str(x) for x in segment_ids])))
logger.info(('LM label: %s ' % lm_label_ids))
logger.info(('col labels: %s ' % col_label_ids))
features = InputFeatures(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, lm_label_ids=lm_label_ids, col_label_ids=col_label_ids)
return features |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.