code stringlengths 101 5.91M |
|---|
class DistanceMetric(object):
def __init__(self, algorithm='euclidean', *args, **kwargs):
super(DistanceMetric, self).__init__()
self.algorithm = algorithm
self.metric = get_metric(algorithm, *args, **kwargs)
def train(self, model, data_loader):
if (self.algorithm == 'euclidean'):
return
(features, labels) = extract_features(model, data_loader)
features = torch.stack(features.values()).numpy()
labels = torch.Tensor(list(labels.values())).numpy()
self.metric.fit(features, labels)
def transform(self, X):
if torch.is_tensor(X):
X = X.numpy()
X = self.metric.transform(X)
X = torch.from_numpy(X)
else:
X = self.metric.transform(X)
return X |
def sql_functions_d_example(spark):
df = spark.createDataFrame([('2015-04-08',)], ['dt'])
df.select(date_add(df.dt, 1).alias('next_day')).show()
print('date_add API finished')
df = spark.createDataFrame([('2015-04-08',)], ['dt'])
df.select(date_format('dt', 'MM/dd/yyy').alias('date')).show()
print('date_format API finished')
df = spark.createDataFrame([('2015-04-08',)], ['dt'])
df.select(date_sub(df.dt, 1).alias('prev_date')).show()
print('date_sub API finished')
df = spark.createDataFrame([('1997-02-28 05:02:11',)], ['t'])
df.select(date_trunc('year', df.t).alias('year')).show()
df.select(date_trunc('mon', df.t).alias('month')).show()
print('date_trunc API finished')
df = spark.createDataFrame([('2015-04-08', '2015-05-10')], ['d1', 'd2'])
df.select(datediff(df.d2, df.d1).alias('diff')).show()
print('datediff API finished')
df = spark.createDataFrame([('2015-04-08',)], ['dt'])
df.select(dayofmonth('dt').alias('day')).show()
print('dayofmonth API finished')
df = spark.createDataFrame([('2015-04-08',)], ['dt'])
df.select(dayofweek('dt').alias('day')).show()
print('dayofweek API finished')
df = spark.createDataFrame([('2015-04-08',)], ['dt'])
df.select(dayofyear('dt').alias('day')).show()
print('dayofyear API finished')
df = spark.createDataFrame([(1, 'a'), (2, 'b'), (3, 'c')], ['n1', 's1'])
df.withColumn('encode', encode(df.s1, 'utf-8')).withColumn('decode', decode('encode', 'utf-8')).show()
print('decode API finished')
import math
df = spark.createDataFrame([(math.pi,), ((math.pi / 6),)], ['radians'])
df.select(degrees(df.radians)).show()
print('degrees API finished')
from pyspark.sql import Window
window = Window.orderBy('score')
df = spark.createDataFrame([('Bob', 90), ('Alice', 95), ('Coris', 90), ('David', 89)], ['name', 'score'])
df.withColumn('dense_rank', dense_rank().over(window)).show()
df.withColumn('rank', rank().over(window)).show()
print('dense_rank API finished')
df = spark.createDataFrame([(1, None), (10, 12), (8, 3), (None, 9), (9, 6)], ['n1', 'n2'])
df.sort(df.n1.desc()).show()
print('desc API finished')
df = spark.createDataFrame([(1, None), (10, 12), (8, 3), (None, 9), (9, 6)], ['n1', 'n2'])
df.sort(df.n1.desc_nulls_first()).show()
print('desc_nulls_first API finished')
df = spark.createDataFrame([(1, None), (10, 12), (8, 3), (None, 9), (9, 6)], ['n1', 'n2'])
df.sort(df.n1.desc_nulls_last()).show()
print('desc_nulls_last API finished')
print('Finish running function_d API') |
class MLPBoston():
base = MLPBase
base.log_noise = nn.Parameter(torch.log((torch.ones(1) * 7)))
args = list()
kwargs = {'in_dim': 13, 'layers': 1, 'hidden': 50}
transform_train = transforms.ToTensor()
transform_test = transforms.ToTensor() |
class NCEDataTest(TestCase):
def setUp(self):
self.dataset = load_dataset('example.csv')
def test_num_examples_for_different_batch_sizes(self):
len_1 = self._num_examples_with_batch_size(1)
for batch_size in range(2, 100):
len_x = self._num_examples_with_batch_size(batch_size)
self.assertEqual(len_x, len_1)
def _num_examples_with_batch_size(self, batch_size):
nce_data = NCEData(self.dataset, batch_size=batch_size, context_size=2, num_noise_words=3, max_size=1, num_workers=1)
num_batches = len(nce_data)
nce_data.start()
nce_generator = nce_data.get_generator()
total = 0
for _ in range(num_batches):
batch = next(nce_generator)
total += len(batch)
nce_data.stop()
return total
def test_multiple_iterations(self):
nce_data = NCEData(self.dataset, batch_size=16, context_size=3, num_noise_words=3, max_size=1, num_workers=1)
num_batches = len(nce_data)
nce_data.start()
nce_generator = nce_data.get_generator()
iter0_targets = []
for _ in range(num_batches):
batch = next(nce_generator)
iter0_targets.append([x[0] for x in batch.target_noise_ids])
iter1_targets = []
for _ in range(num_batches):
batch = next(nce_generator)
iter1_targets.append([x[0] for x in batch.target_noise_ids])
for (ts0, ts1) in zip(iter0_targets, iter1_targets):
for (t0, t1) in zip(ts0, ts0):
self.assertEqual(t0, t1)
nce_data.stop()
def test_different_batch_sizes(self):
nce_data = NCEData(self.dataset, batch_size=16, context_size=1, num_noise_words=3, max_size=1, num_workers=1)
num_batches = len(nce_data)
nce_data.start()
nce_generator = nce_data.get_generator()
targets0 = []
for _ in range(num_batches):
batch = next(nce_generator)
for ts in batch.target_noise_ids:
targets0.append(ts[0])
nce_data.stop()
nce_data = NCEData(self.dataset, batch_size=19, context_size=1, num_noise_words=3, max_size=1, num_workers=1)
num_batches = len(nce_data)
nce_data.start()
nce_generator = nce_data.get_generator()
targets1 = []
for _ in range(num_batches):
batch = next(nce_generator)
for ts in batch.target_noise_ids:
targets1.append(ts[0])
nce_data.stop()
for (t0, t1) in zip(targets0, targets1):
self.assertEqual(t0, t1)
def test_tensor_sizes(self):
nce_data = NCEData(self.dataset, batch_size=32, context_size=5, num_noise_words=3, max_size=1, num_workers=1)
nce_data.start()
nce_generator = nce_data.get_generator()
batch = next(nce_generator)
nce_data.stop()
self.assertEqual(batch.context_ids.size()[0], 32)
self.assertEqual(batch.context_ids.size()[1], 10)
self.assertEqual(batch.doc_ids.size()[0], 32)
self.assertEqual(batch.target_noise_ids.size()[0], 32)
self.assertEqual(batch.target_noise_ids.size()[1], 4)
def test_parallel(self):
nce_data = NCEData(self.dataset, batch_size=32, context_size=5, num_noise_words=1, max_size=3, num_workers=1)
nce_data.start()
time.sleep(1)
nce_data.stop()
state_serial = nce_data._generator._state
nce_data = NCEData(self.dataset, batch_size=32, context_size=5, num_noise_words=1, max_size=2, num_workers=2)
nce_data.start()
time.sleep(1)
nce_data.stop()
state_parallel = nce_data._generator._state
self.assertEqual(state_parallel._doc_id.value, state_serial._doc_id.value)
self.assertEqual(state_parallel._in_doc_pos.value, state_serial._in_doc_pos.value)
def test_no_context(self):
nce_data = NCEData(self.dataset, batch_size=16, context_size=0, num_noise_words=3, max_size=1, num_workers=1)
nce_data.start()
nce_generator = nce_data.get_generator()
batch = next(nce_generator)
nce_data.stop()
self.assertEqual(batch.context_ids, None) |
def _get_module_flops(module):
s = module.__flops__
for child in module.children():
s += _get_module_flops(child)
return s |
class State():
problem: Array
position: jnp.int32
capacity: jnp.float32
visited_mask: Array
order: Array
num_total_visits: jnp.int32 |
def get_argument():
parser = argparse.ArgumentParser()
parser.add_argument('--quantize', action='store_true')
parser.add_argument('--equalize', action='store_true')
parser.add_argument('--distill_range', action='store_true')
parser.add_argument('--correction', action='store_true')
parser.add_argument('--relu', action='store_true')
parser.add_argument('--clip_weight', action='store_true')
parser.add_argument('--resnet', action='store_true')
parser.add_argument('--bits_weight', type=int, default=8)
parser.add_argument('--bits_activation', type=int, default=8)
parser.add_argument('--bits_bias', type=int, default=32)
parser.add_argument('--dis_batch_size', type=int, default=64)
parser.add_argument('--dis_num_batch', type=int, default=8)
parser.add_argument('--ncnn_build', type=str, default='/home/jakc4103/Documents/ncnn/build')
parser.add_argument('--image_path', type=str, default='/home/jakc4103/workspace/DFQ/cali_images/')
parser.add_argument('--param', type=str, default='modeling/ncnn/model_int8.param', help='filename of .param')
parser.add_argument('--bin', type=str, default='modeling/ncnn/model_int8.bin', help='filename of .bin')
parser.add_argument('--table', type=str, default='modeling/ncnn/model_int8.table', help='filename of .table')
return parser.parse_args() |
class HeadSelectionTransformerDecoder(TransformerDecoder):
def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False, output_projection=None):
self.num_tasks = args.decoder_tasks
self.num_layers = args.decoder_layers
self.total_num_heads = args.total_decoder_attention_heads
self.num_heads = args.decoder_attention_heads
self.select_strategy = args.attn_head_select_strategy
super().__init__(args, dictionary, embed_tokens, no_encoder_attn=no_encoder_attn, output_projection=output_projection)
self.self_attn_head_selector = None
self.enc_attn_head_selector = None
if (safe_hasattr(args, 'decoder_self_attn_head_select') and args.decoder_self_attn_head_select):
self.self_attn_head_selector = AttnHeadSelector(self.num_tasks, self.num_layers, self.total_num_heads, self.num_heads, self.select_strategy)
if (safe_hasattr(args, 'dec_enc_attn_head_select') and args.dec_enc_attn_head_select):
self.enc_attn_head_selector = AttnHeadSelector(self.num_tasks, self.num_layers, self.total_num_heads, self.num_heads, self.select_strategy)
self.task_ids = None
self.layers = nn.ModuleList([self.build_head_selection_decoder_layer(args, no_encoder_attn, idx) for idx in range(args.decoder_layers)])
def set_task_ids(self, task_ids):
self.task_ids = task_ids
def build_head_selection_decoder_layer(self, args, no_encoder_attn=False, layer_idx=None):
return HeadSelectionTransformerDecoderLayer(args, layer_idx, self.self_attn_head_selector, self.enc_attn_head_selector, no_encoder_attn=no_encoder_attn)
def forward(self, prev_output_tokens, encoder_out: Optional[Dict[(str, List[Tensor])]]=None, incremental_state: Optional[Dict[(str, Dict[(str, Optional[Tensor])])]]=None, features_only: bool=False, full_context_alignment: bool=False, alignment_layer: Optional[int]=None, alignment_heads: Optional[int]=None, src_lengths: Optional[Any]=None, return_all_hiddens: bool=False):
if (self.self_attn_head_selector is not None):
self.self_attn_head_selector.head_select(self.task_ids)
if (self.enc_attn_head_selector is not None):
self.enc_attn_head_selector.head_select(self.task_ids)
return super().forward(prev_output_tokens=prev_output_tokens, encoder_out=encoder_out, incremental_state=incremental_state, features_only=features_only, full_context_alignment=full_context_alignment, alignment_layer=alignment_layer, alignment_heads=alignment_heads, src_lengths=src_lengths, return_all_hiddens=return_all_hiddens) |
def input_fn(is_training, data_dir, batch_size, num_epochs=1):
dataset = record_dataset(get_filenames(is_training, data_dir))
if is_training:
dataset = dataset.shuffle(buffer_size=NUM_IMAGES['train'])
dataset = dataset.map(parse_record)
dataset = dataset.map((lambda image, label: (preprocess_image(image, is_training), label)), num_parallel_calls=4)
dataset = dataset.prefetch((2 * batch_size))
dataset = dataset.repeat(num_epochs)
dataset = dataset.batch(batch_size)
iterator = dataset.make_one_shot_iterator()
(images, labels) = iterator.get_next()
return (images, labels) |
class JaccardLoss(nn.Module):
def __init__(self):
super().__init__()
def forward(self, pred, target):
pred = pred.squeeze(dim=1)
smooth = 1
dice = ((pred * target).sum(dim=1).sum(dim=1).sum(dim=1) / (((pred.pow(2).sum(dim=1).sum(dim=1).sum(dim=1) + target.pow(2).sum(dim=1).sum(dim=1).sum(dim=1)) - (pred * target).sum(dim=1).sum(dim=1).sum(dim=1)) + smooth))
return torch.clamp((1 - dice).mean(), 0, 1) |
def _make_pretrained_swin2t16_256(pretrained, hooks=None):
model = timm.create_model('swinv2_tiny_window16_256', pretrained=pretrained)
hooks = ([1, 1, 5, 1] if (hooks == None) else hooks)
return _make_swin_backbone(model, hooks=hooks, patch_grid=[64, 64]) |
class GradedSpikes(torch.nn.Module):
def __init__(self, size, constant_factor):
super().__init__()
self.size = size
if constant_factor:
weights = (torch.ones(size=[size, 1]) * constant_factor)
self.weights = torch.nn.Parameter(weights)
else:
weights = (torch.rand(size=[size, 1]) + 0.5)
self.weights = torch.nn.Parameter(weights)
def forward(self, x):
return torch.multiply(input=x, other=self.weights) |
def init_net(net, init_type='normal', init_gain=0.02, gpu_ids=[]):
if (len(gpu_ids) > 0):
assert torch.cuda.is_available()
net.to(gpu_ids[0])
net = torch.nn.DataParallel(net, gpu_ids)
init_weights(net, init_type, init_gain=init_gain)
return net |
(version='2.3.0', reason='Please use spark engine and ray engine.')
class TorchModel(Layer):
def __init__(self, jvalue, module_bytes, bigdl_type='float'):
self.value = jvalue
self.module_bytes = module_bytes
self.bigdl_type = bigdl_type
def from_value(model_value):
model_bytes = callZooFunc('float', 'getTorchModelBytes', model_value)
net = TorchModel(model_value, model_bytes)
return net
def from_pytorch(model):
weights = []
import types
if (isinstance(model, types.FunctionType) or isinstance(model, type)):
for param in trainable_param(model()):
weights.append(param.view((- 1)))
else:
for param in trainable_param(model):
weights.append(param.view((- 1)))
flatten_weight = torch.nn.utils.parameters_to_vector(weights).data.numpy()
bys = io.BytesIO()
torch.save(model, bys, pickle_module=zoo_pickle_module)
weights = JTensor.from_ndarray(flatten_weight)
jvalue = callZooFunc('float', 'createTorchModel', bys.getvalue(), weights)
net = TorchModel(jvalue, bys.getvalue())
return net
def to_pytorch(self):
new_weight = self.get_weights()
invalidInputError((len(new_weight) == 1), "TorchModel's weights should be one tensor")
m = torch.load(io.BytesIO(self.module_bytes), pickle_module=zoo_pickle_module)
import types
if (isinstance(m, types.FunctionType) or isinstance(m, type)):
m = m()
w = torch.Tensor(new_weight[0])
torch.nn.utils.vector_to_parameters(w, trainable_param(m))
new_extra_params = callZooFunc(self.bigdl_type, 'getModuleExtraParameters', self.value)
if (len(new_extra_params) != 0):
idx = 0
for named_buffer in m.named_buffers():
named_buffer[1].copy_(torch.reshape(torch.Tensor(new_extra_params[idx].to_ndarray()), named_buffer[1].size()))
idx += 1
return m
def saveModel(self, path, over_write=False):
from bigdl.dllib.utils.common import callBigDlFunc
callBigDlFunc(self.bigdl_type, 'modelSave', self.value, path, over_write)
def loadModel(path, bigdl_type='float'):
from bigdl.dllib.utils.common import callBigDlFunc
jmodel = callBigDlFunc(bigdl_type, 'loadBigDL', path)
return Layer.of(jmodel) |
def get_end_to_end_prefix_allowed_tokens_fn_hf(model, sentences: List[str], start_mention_token='{', end_mention_token='}', start_entity_token='[', end_entity_token=']', mention_trie: Trie=None, candidates_trie: Trie=None, mention_to_candidates_dict: Dict[(str, List[str])]=None):
return _get_end_to_end_prefix_allowed_tokens_fn((lambda x: model.tokenizer.encode(x)), (lambda x: model.tokenizer.decode(torch.tensor(x))), model.tokenizer.bos_token_id, model.tokenizer.pad_token_id, model.tokenizer.eos_token_id, (len(model.tokenizer) - 1), sentences, start_mention_token, end_mention_token, start_entity_token, end_entity_token, mention_trie, candidates_trie, mention_to_candidates_dict) |
_tokenizers
class GPTNeoXJapaneseTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = GPTNeoXJapaneseTokenizer
test_rust_tokenizer = False
from_pretrained_kwargs = {'do_clean_text': False, 'add_prefix_space': False}
def setUp(self):
super().setUp()
vocab_tokens = ['', '', '', '', ',', '', '', '<BR>', '<SP>', '<TAB>', '<URL>', '<EMAIL>', '<TEL>', '<DATE>', '<PRICE>', '<BLOCK>', '<KIGOU>', '<U2000U2BFF>', '<|emoji1|>', '<unk>', '<|startoftext|>', '<|endoftext|>']
emoji_tokens = {'emoji': {'\ud83d\ude00': '<|emoji1|>'}, 'emoji_inv': {'<|emoji1|>': '\ud83d\ude00'}}
self.special_tokens_map = {'unk_token': '<unk>'}
self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'])
self.emoji_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['emoji_file'])
with open(self.vocab_file, 'w', encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([(x + '\n') for x in vocab_tokens]))
with open(self.emoji_file, 'w') as emoji_writer:
emoji_writer.write(json.dumps(emoji_tokens))
def get_tokenizer(self, **kwargs):
kwargs.update(self.special_tokens_map)
return GPTNeoXJapaneseTokenizer.from_pretrained(self.tmpdirname, **kwargs)
def get_input_output_texts(self, tokenizer):
input_text = ' \n'
output_text = ' \n'
return (input_text, output_text)
def get_clean_sequence(self, tokenizer):
(input_text, output_text) = self.get_input_output_texts(tokenizer)
ids = tokenizer.encode(output_text, add_special_tokens=False)
text = tokenizer.decode(ids, clean_up_tokenization_spaces=False)
return (text, ids)
def test_pretokenized_inputs(self):
pass
def test_maximum_encoding_length_pair_input(self):
pass
def test_maximum_encoding_length_single_input(self):
pass
def test_full_tokenizer(self):
tokenizer = self.get_tokenizer()
input_text = '\u3000'
expected_token = ['', '', '', '', '', '<SP>', '', '', '', '', '']
tokens = tokenizer.tokenize(input_text)
self.assertListEqual(tokens, expected_token)
expected_ids = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
input_ids = tokenizer.convert_tokens_to_ids(tokens)
self.assertListEqual(input_ids, expected_ids)
input_tokens = (tokens + [tokenizer.unk_token])
expected_ids = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
input_ids = tokenizer.convert_tokens_to_ids(input_tokens)
self.assertListEqual(input_ids, expected_ids)
def test_sequence_builders(self):
tokenizer = self.tokenizer_class.from_pretrained('abeja/gpt-neox-japanese-2.7b')
ids_1 = tokenizer.encode('', add_special_tokens=False)
ids_2 = tokenizer.encode('', add_special_tokens=False)
encoded_sentence = tokenizer.build_inputs_with_special_tokens(ids_1)
encoded_pair = tokenizer.build_inputs_with_special_tokens(ids_1, ids_2)
assert (encoded_sentence == ids_1)
assert (encoded_pair == (ids_1 + ids_2))
def test_conversion_reversible(self):
pass
def test_padding_different_model_input_name(self):
pass |
def crossentropy_with_threshold(labels, logits, weights, threshold):
probabilities = tf.math.softmax(logits)
weights = (weights / tf.reduce_sum(weights))
entropies = ((- tf.reduce_sum((probabilities * logits), axis=1)) + tf.reduce_logsumexp(logits, axis=1))
costs = ((- tf.reduce_sum((labels * logits), axis=1)) + tf.reduce_logsumexp(logits, axis=1))
masks = tf.where((entropies <= threshold), tf.ones_like(entropies), tf.zeros_like(entropies))
cost_mask = tf.math.divide(tf.reduce_sum(((weights * masks) * costs)), tf.reduce_sum((weights * masks)))
return cost_mask |
class StanleyController(object):
def __init__(self, control_params: StanleyParams=StanleyParams(), vehicle_body: VehicleBody=VehicleBody(), vehicle_config: VehicleConfig=VehicleConfig()):
super().__init__()
self.k = control_params.k
self.Kp = control_params.Kp
self.Kp_braking = control_params.Kp_braking
self.dt = control_params.dt
self.L = vehicle_body.wb
self.max_steer = vehicle_config.delta_max
self.x_ref = []
self.y_ref = []
self.yaw_ref = []
self.v_ref = 0.0
self.target_idx = None
def set_ref_pose(self, x_ref: List[float], y_ref: List[float], yaw_ref: List[float]):
self.x_ref = x_ref
self.y_ref = y_ref
self.yaw_ref = yaw_ref
def set_ref_v(self, v_ref: float):
self.v_ref = v_ref
def set_target_idx(self, target_idx: int):
self.target_idx = target_idx
def calc_target_index(self, state: VehicleState):
fx = (state.x.x + (self.L * np.cos(state.e.psi)))
fy = (state.x.y + (self.L * np.sin(state.e.psi)))
dx = [(fx - icx) for icx in self.x_ref]
dy = [(fy - icy) for icy in self.y_ref]
d = np.hypot(dx, dy)
target_idx = np.argmin(d)
front_axle_vec = [(- np.cos((state.e.psi + (np.pi / 2)))), (- np.sin((state.e.psi + (np.pi / 2))))]
error_front_axle = np.dot([dx[target_idx], dy[target_idx]], front_axle_vec)
return (target_idx, error_front_axle)
def pid_control(self, target, current, braking=False):
if (not braking):
return (self.Kp * (target - current))
else:
return (self.Kp_braking * (target - current))
def stanley_control(self, state: VehicleState):
(current_target_idx, error_front_axle) = self.calc_target_index(state)
if (self.target_idx >= current_target_idx):
current_target_idx = self.target_idx
theta_e = normalize_angle((self.yaw_ref[current_target_idx] - state.e.psi))
theta_d = np.arctan2((self.k * error_front_axle), state.v.v)
delta = (theta_e + theta_d)
return (delta, current_target_idx)
def solve(self, state: VehicleState, braking=False):
a = self.pid_control(self.v_ref, state.v.v, braking)
(d, current_target_idx) = self.stanley_control(state)
return (a, d, current_target_idx)
def step(self, state: VehicleState, acceleration: float, delta: float):
state.u.u_a = acceleration
state.u.u_steer = delta
delta = np.clip(delta, (- self.max_steer), self.max_steer)
state.x.x += ((state.v.v * np.cos(state.e.psi)) * self.dt)
state.x.y += ((state.v.v * np.sin(state.e.psi)) * self.dt)
state.e.psi += (((state.v.v / self.L) * np.tan(delta)) * self.dt)
state.e.psi = normalize_angle(state.e.psi)
state.v.v += (acceleration * self.dt) |
class k8sClient(object):
_instance_lock = threading.Lock()
def __init__(self, namespace):
try:
if os.getenv('KUBERNETES_SERVICE_HOST'):
config.load_incluster_config()
logger.info('Load the incluster config.')
else:
config.load_kube_config()
logger.info('Load the kube config file.')
except Exception as ex:
logger.error('Failed to load configuration for Kubernetes:\n%s', ex)
self.client = client.CoreV1Api()
self.api_instance = client.CustomObjectsApi()
self.api_client = client.ApiClient()
self._namespace = namespace
_k8s_request
def list_namespaced_pod(self, label_selector):
pod_list = self.client.list_namespaced_pod(self._namespace, label_selector=label_selector)
return pod_list
_k8s_request
def create_custom_resource(self, group, version, plural, body):
self.api_instance.create_namespaced_custom_object(group=group, version=version, namespace=self._namespace, plural=plural, body=body)
_k8s_request
def patch_custom_resource(self, group, version, plural, name, body):
self.api_instance.patch_namespaced_custom_object(group=group, version=version, namespace=self._namespace, plural=plural, name=name, body=body)
def delete_custom_resource(self, group, version, plural, name):
try:
self.api_instance.delete_namespaced_custom_object(group=group, version=version, namespace=self._namespace, plural=plural, name=name)
except client.rest.ApiException as e:
if (e.reason != k8sAPIExceptionReason.NOT_FOUND):
logger.error('Fail to delete %s', name)
_k8s_request
def get_custom_resource(self, name, group, version, plural):
crd_object = self.api_instance.get_namespaced_custom_object(namespace=self._namespace, name=name, group=group, version=version, plural=plural)
return crd_object
_k8s_request
def get_configmap(self, name):
configmap = self.client.read_namespaced_config_map(namespace=self._namespace, name=name)
return configmap
def create_pod(self, pod):
try:
self.client.create_namespaced_pod(self._namespace, pod)
return True
except client.rest.ApiException as e:
logger.warning('Failed to create %s pod: %s\n', pod.metadata.name, e)
return False
_k8s_request
def get_pod(self, name):
return self.client.read_namespaced_pod(namespace=self._namespace, name=name)
def delete_pod(self, name):
try:
self.client.delete_namespaced_pod(name, self._namespace, body=client.V1DeleteOptions())
return True
except client.ApiException as e:
if (e.reason == k8sAPIExceptionReason.NOT_FOUND):
return True
logger.warning(('Exception when removing pod %s: %s\n' % (name, e)))
return False
_k8s_request
def patch_labels_to_pod(self, name, labels: Dict[(str, str)]):
body = {'metadata': {'labels': labels}}
return self.client.patch_namespaced_pod(name=name, namespace=self._namespace, body=body)
_k8s_request
def patch_annotations_to_pod(self, name, annotations: Dict[(str, str)]):
body = {'metadata': {'annotations': annotations}}
return self.client.patch_namespaced_pod(name=name, namespace=self._namespace, body=body)
def create_service(self, service: client.V1Service):
try:
self.client.create_namespaced_service(self._namespace, service)
return True
except client.rest.ApiException as e:
logger.warning(('Failed to create %s service: %s\n' % (service.metadata.name, e)))
return False
def patch_service(self, name, service: client.V1Service):
try:
self.client.patch_namespaced_service(name, self._namespace, service)
return True
except client.rest.ApiException as e:
logger.warning(('Failed to patch %s service: %s\n' % (name, e)))
return False
_k8s_request
def get_service(self, name):
return self.client.read_namespaced_service(name=name, namespace=self._namespace)
def create_pvc(self, pvc):
try:
self.client.create_namespaced_persistent_volume_claim(self._namespace, pvc)
return True
except client.rest.ApiException as e:
logger.warning(('Failed to create %s persistent volume claim: %s\n' % (pvc.metadata.name, e)))
return False
def singleton_instance(cls, *args, **kwargs):
if (not hasattr(k8sClient, '_instance')):
with k8sClient._instance_lock:
if (not hasattr(k8sClient, '_instance')):
k8sClient._instance = k8sClient(*args, **kwargs)
return k8sClient._instance
def create_owner_reference(cls, api_version, kind, name, uid):
owner_ref = client.V1OwnerReference(api_version=api_version, block_owner_deletion=True, kind=kind, name=name, uid=uid)
return owner_ref |
def is_alphabet(uchar):
if (((uchar >= u'A') and (uchar <= u'Z')) or ((uchar >= u'a') and (uchar <= u'z'))):
return True
else:
return False |
def render_pep440_pre(pieces):
if pieces['closest-tag']:
if pieces['distance']:
(tag_version, post_version) = pep440_split_post(pieces['closest-tag'])
rendered = tag_version
if (post_version is not None):
rendered += ('.post%d.dev%d' % ((post_version + 1), pieces['distance']))
else:
rendered += ('.post0.dev%d' % pieces['distance'])
else:
rendered = pieces['closest-tag']
else:
rendered = ('0.post0.dev%d' % pieces['distance'])
return rendered |
def read(in_files, l_files, input_file):
if os.path.isdir(input_file):
for file in os.listdir(input_file):
(in_files, l_files) = read(in_files, l_files, ((input_file + '/') + file))
elif input_file.endswith('.text'):
in_files.append(input_file)
l_files.append(input_file.replace('.text', '.label'))
return (in_files, l_files) |
def mobilenetv3_small_minimal_100(pretrained=False, **kwargs):
model = _gen_mobilenet_v3('mobilenetv3_small_minimal_100', 1.0, pretrained=pretrained, **kwargs)
return model |
class BatchResolver(Resolver):
def __init__(self, enable_tracking: bool=False, round_limit: Optional[int]=None, shuffle_batches: bool=False) -> None:
super().__init__(enable_tracking)
self.round_limit = round_limit
self.shuffle_batches = shuffle_batches
self.messages: DefaultDict[(AgentID, List[Message])] = defaultdict(list)
def reset(self) -> None:
self.messages.clear()
def handle_push(self, message: Message) -> None:
self.messages[message.receiver_id].append(message)
def resolve(self, network: 'Network', contexts: Mapping[(AgentID, Context)]) -> None:
iterator = (itertools.count() if (self.round_limit is None) else range(self.round_limit))
for i in iterator:
if (len(self.messages) == 0):
break
logger.log_resolver_round(i, self.round_limit)
processing_messages = self.messages
self.messages = defaultdict(list)
for (receiver_id, messages) in processing_messages.items():
if (receiver_id not in contexts):
continue
msgs = [m for m in messages if network.has_edge(m.sender_id, m.receiver_id)]
if self.shuffle_batches:
np.random.shuffle(msgs)
ctx = contexts[receiver_id]
responses = ctx.agent.handle_batch(ctx, msgs)
if (responses is not None):
for (sub_receiver_id, sub_payload) in responses:
network.send(receiver_id, sub_receiver_id, sub_payload)
if (len(self.messages) > 0):
raise RuntimeError(f'{len(self.messages)} message(s) still in queue after BatchResolver round limit reached.') |
def instanciate_transformation(cmd_line):
if (not isinstance(cmd_line, str)):
return cmd_line
cmd_line = ('tvf.Compose([%s])' % cmd_line)
try:
return eval(cmd_line)
except Exception as e:
print(('Cannot interpret this transform list: %s\nReason: %s' % (cmd_line, e))) |
class Blip2ForConditionalGeneration(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def get_lr_and_max_steps(examples_per_epoch, batch_size, num_gpus, lr_decay_factor, epochs_per_decay, initial_lr, global_step, staircase, max_epochs):
num_batches_per_epoch = ((examples_per_epoch / batch_size) / num_gpus)
if isinstance(lr_decay_factor, float):
decay_steps = int((num_batches_per_epoch * epochs_per_decay))
lr = tf.train.exponential_decay(initial_lr, global_step, decay_steps, lr_decay_factor, staircase=staircase)
max_steps = int((max_epochs * num_batches_per_epoch))
elif isinstance(lr_decay_factor, list):
boundaries = [(num_batches_per_epoch * epoch) for epoch in epochs_per_decay]
vals = [(initial_lr * decay) for decay in lr_decay_factor]
lr = tf.train.piecewise_constant(global_step, boundaries, vals)
max_steps = int((max_epochs * num_batches_per_epoch))
else:
raise ValueError('unknown lr policy')
return (lr, max_steps) |
def discriminator_gradient_penalty(d_result_real, reals, r1_gamma=10.0):
real_loss = d_result_real.sum()
real_grads = torch.autograd.grad(real_loss, reals, create_graph=True, retain_graph=True)[0]
r1_penalty = torch.sum(real_grads.pow(2.0), dim=[1, 2, 3])
loss = (r1_penalty * (r1_gamma * 0.5))
return loss.mean() |
class LlavaMetaModel():
def __init__(self, config):
super(LlavaMetaModel, self).__init__(config)
if hasattr(config, 'mm_vision_tower'):
self.vision_tower = build_vision_tower(config, delay_load=True)
self.mm_projector = build_vision_projector(config)
def get_vision_tower(self):
vision_tower = getattr(self, 'vision_tower', None)
if (type(vision_tower) is list):
vision_tower = vision_tower[0]
return vision_tower
def initialize_vision_modules(self, model_args, fsdp=None):
vision_tower = model_args.vision_tower
mm_vision_select_layer = model_args.mm_vision_select_layer
mm_vision_select_feature = model_args.mm_vision_select_feature
pretrain_mm_mlp_adapter = model_args.pretrain_mm_mlp_adapter
self.config.mm_vision_tower = vision_tower
vision_tower = build_vision_tower(model_args)
if ((fsdp is not None) and (len(fsdp) > 0)):
self.vision_tower = [vision_tower]
else:
self.vision_tower = vision_tower
self.config.use_mm_proj = True
self.config.mm_projector_type = getattr(model_args, 'mm_projector_type', 'linear')
self.config.mm_hidden_size = vision_tower.hidden_size
self.config.mm_vision_select_layer = mm_vision_select_layer
self.config.mm_vision_select_feature = mm_vision_select_feature
self.mm_projector = build_vision_projector(self.config)
if (pretrain_mm_mlp_adapter is not None):
mm_projector_weights = torch.load(pretrain_mm_mlp_adapter, map_location='cpu')
def get_w(weights, keyword):
return {k.split((keyword + '.'))[1]: v for (k, v) in weights.items() if (keyword in k)}
self.mm_projector.load_state_dict(get_w(mm_projector_weights, 'mm_projector')) |
class NormalizedDegree(object):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, data):
deg = degree(data.edge_index[0], dtype=torch.float)
deg = ((deg - self.mean) / self.std)
data.x = deg.view((- 1), 1)
return data |
def save_segmask_as_nifi_volume(seg_mask: np.ndarray, aff_func, path: str):
img = nib.Nifti1Image(seg_mask, aff_func)
img.to_filename(path) |
def fft2c(x):
axes = ((- 2), (- 1))
res = fftshift(fft2(ifftshift(x, axes=axes), norm='ortho'), axes=axes)
return res |
def calculate_number_of_labels_distribution(data_dir, filtered_by=None):
answers = get_all_answers(data_dir, filtered_by=filtered_by).values()
lengths = [len(ans_set) for ans_set in answers]
return Counter(lengths).items() |
def tgasPath(dr=1, old=False):
if old:
return [os.path.join(_GAIA_TOOLS_DATA, 'Gaia', 'tgas_source', 'fits', ('TgasSource_000-000-%03i.fits' % ii)) for ii in range(16)]
else:
return [os.path.join(_GAIA_TOOLS_DATA, 'Gaia', 'gdr1', 'tgas_source', 'fits', ('TgasSource_000-000-%03i.fits' % ii)) for ii in range(16)] |
def parse_url(url):
tokens = url.split('/')
folder = tokens[4]
tokens = tokens[5].split('?')
tokens.reverse()
file = '.'.join(tokens)
return ((('/dccstor/extrastore/Neural-Naturalist/data/resized_images/' + folder) + '.') + file) |
def test_modelcheckpoint_mode_options():
fpath = 'tests/test_model_functioning/modelcheckpoint/weights_out'
model_checkpoint_1 = ModelCheckpoint(filepath=fpath, monitor='val_loss', mode='min')
model_checkpoint_2 = ModelCheckpoint(filepath=fpath, monitor='val_loss')
model_checkpoint_3 = ModelCheckpoint(filepath=fpath, monitor='acc', mode='max')
model_checkpoint_4 = ModelCheckpoint(filepath=fpath, monitor='acc')
model_checkpoint_5 = ModelCheckpoint(filepath=None, monitor='acc')
is_min = (model_checkpoint_1.monitor_op is np.less)
best_inf = (model_checkpoint_1.best is np.Inf)
auto_is_min = (model_checkpoint_2.monitor_op is np.less)
auto_best_inf = (model_checkpoint_2.best is np.Inf)
is_max = (model_checkpoint_3.monitor_op is np.greater)
best_minus_inf = ((- model_checkpoint_3.best) == np.Inf)
auto_is_max = (model_checkpoint_4.monitor_op is np.greater)
auto_best_minus_inf = ((- model_checkpoint_4.best) == np.Inf)
auto_is_max = (model_checkpoint_5.monitor_op is np.greater)
auto_best_minus_inf = ((- model_checkpoint_5.best) == np.Inf)
shutil.rmtree('tests/test_model_functioning/modelcheckpoint/')
assert all([is_min, best_inf, is_max, best_minus_inf, auto_is_min, auto_best_inf, auto_is_max, auto_best_minus_inf]) |
class CosineWarmupLR(lr_sched._LRScheduler):
def __init__(self, optimizer, T_max, eta_min=0, last_epoch=(- 1)):
self.T_max = T_max
self.eta_min = eta_min
super(CosineWarmupLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
return [(self.eta_min + (((base_lr - self.eta_min) * (1 - math.cos(((math.pi * self.last_epoch) / self.T_max)))) / 2)) for base_lr in self.base_lrs] |
class A2CPipeline(BasicPipeline):
def __init__(self, name, net, abstractor, train_batcher, val_batcher, optim, grad_fn, reward_fn, gamma, stop_reward_fn, stop_coeff):
self.name = name
self._net = net
self._train_batcher = train_batcher
self._val_batcher = val_batcher
self._opt = optim
self._grad_fn = grad_fn
self._abstractor = abstractor
self._gamma = gamma
self._reward_fn = reward_fn
self._stop_reward_fn = stop_reward_fn
self._stop_coeff = stop_coeff
self._n_epoch = 0
def batches(self):
raise NotImplementedError('A2C does not use batcher')
def train_step(self):
self._net.train()
log_dict = a2c_train_step(self._net, self._abstractor, self._train_batcher, self._opt, self._grad_fn, self._gamma, self._reward_fn, self._stop_reward_fn, self._stop_coeff)
return log_dict
def validate(self):
return a2c_validate(self._net, self._abstractor, self._val_batcher)
def checkpoint(self, *args, **kwargs):
return super().checkpoint(*args, **kwargs)
def terminate(self):
pass |
def build_pnasnet_mobile(images, num_classes, is_training=True, final_endpoint=None, config=None):
hparams = (copy.deepcopy(config) if config else mobile_imagenet_config())
nasnet._update_hparams(hparams, is_training)
if (tf.test.is_gpu_available() and (hparams.data_format == 'NHWC')):
tf.logging.info('A GPU is available on the machine, consider using NCHW data format for increased speed on GPU.')
if (hparams.data_format == 'NCHW'):
images = tf.transpose(images, [0, 3, 1, 2])
total_num_cells = (hparams.num_cells + 2)
normal_cell = PNasNetNormalCell(hparams.num_conv_filters, hparams.drop_path_keep_prob, total_num_cells, hparams.total_training_steps, hparams.use_bounded_activation)
with arg_scope([slim.dropout, nasnet_utils.drop_path, slim.batch_norm], is_training=is_training):
with arg_scope([slim.avg_pool2d, slim.max_pool2d, slim.conv2d, slim.batch_norm, slim.separable_conv2d, nasnet_utils.factorized_reduction, nasnet_utils.global_avg_pool, nasnet_utils.get_channel_index, nasnet_utils.get_channel_dim], data_format=hparams.data_format):
return _build_pnasnet_base(images, normal_cell=normal_cell, num_classes=num_classes, hparams=hparams, is_training=is_training, final_endpoint=final_endpoint) |
class TestEqualize(unittest.TestCase):
def setUp(self):
self.check_keys = ('img', 'gt_bboxes', 'gt_bboxes_labels', 'gt_masks', 'gt_ignore_flags', 'gt_seg_map')
self.results_mask = construct_toy_data(poly2mask=True)
def test_equalize(self):
transform = Equalize(prob=0.0)
results_wo_equalize = transform(copy.deepcopy(self.results_mask))
check_result_same(self.results_mask, results_wo_equalize, self.check_keys)
transform = Equalize(prob=1.0)
transform(copy.deepcopy(self.results_mask))
def test_repr(self):
transform = Equalize(prob=1.0)
self.assertEqual(repr(transform), 'Equalize(prob=1.0, level=None, min_mag=0.1, max_mag=1.9)') |
def input_handler(data, context):
data_str = data.read().decode('utf-8')
jsonlines = data_str.split('\n')
session = json.loads(jsonlines[0])['instances']
return json.dumps({'instances': [session[(- 1)]]}) |
def find_lemmata(tokens):
for token in tokens:
(word, pos, lemma) = (token[0], token[1], token[0])
if pos.startswith(('DT',)):
lemma = singularize(word, pos='DT')
if pos.startswith('JJ'):
lemma = predicative(word)
if (pos == 'NNS'):
lemma = singularize(word)
if pos.startswith(('VB', 'MD')):
lemma = (conjugate(word, INFINITIVE) or word)
token.append(lemma.lower())
return tokens |
def communicate_gather(tensors, rank, gsize, communication_op, group, dst=0, attention=False):
flat_tensor = flatten_tensors(tensors)
if (rank == 0):
gather_list = [flat_tensor.clone() for _ in range(gsize)]
else:
gather_list = []
communication_op(tensor=flat_tensor, gather_list=gather_list, group=group, dst=dst)
if attention:
return (tensors / flat_tensor)
gather_parameters_list = []
if (rank == 0):
for i in range(gsize):
tensors_clone = copy.deepcopy(tensors)
for (f, t) in zip(unflatten_tensors(gather_list[i], tensors_clone), tensors_clone):
with torch.no_grad():
t.set_(f)
gather_parameters_list.append(tensors_clone)
return gather_parameters_list
else:
return gather_parameters_list |
_model
def hrnet_w44(pretrained=True, **kwargs):
return _create_hrnet('hrnet_w44', pretrained, **kwargs) |
def test_Combined15():
(l, b, d) = (10.0, 1.0, 2.0)
combined_ebv = mwdust.Combined15(filter='E(B-V)')
ebv = combined_ebv(l, b, d)
del combined_ebv
combined_b = mwdust.Combined15(filter='Landolt B')
ab = combined_b(l, b, d)
del combined_b
combined_v = mwdust.Combined15(filter='Landolt V')
av = combined_v(l, b, d)
assert (numpy.fabs((ebv - (ab - av))) < (10.0 ** (- 12.0))), 'Combined15 E(B-V) does not agree with A(B)-A(V)'
return None |
def build_features_t5(examples, data_type, out_file, tokenizer, max_input_length, max_output_length):
print('Processing {} examples...'.format(data_type))
total = 0
(input_inputs, output_inputs, turn_inputs, ids) = ([], [], [], [])
for example in tqdm(examples):
total += 1
input_input = tokenizer.convert_tokens_to_ids(example['input'])[:max_input_length]
input_inputs.append((input_input + ([0] * (max_input_length - len(input_input)))))
turn_input = [turn for turn in example['turn']][:max_input_length]
turn_inputs.append((turn_input + ([0] * (max_input_length - len(turn_input)))))
output = tokenizer.convert_tokens_to_ids(example['output'])
output_input = (([0] + output[:(max_output_length - 2)]) + [tokenizer.eos_token_id])
output_inputs.append((output_input + ([0] * (max_output_length - len(output_input)))))
ids.append(int(example['id']))
input_inputs = torch.tensor(input_inputs, dtype=torch.long)
turn_inputs = torch.tensor(turn_inputs, dtype=torch.long)
output_inputs = torch.tensor(output_inputs, dtype=torch.long)
ids = torch.tensor(ids, dtype=torch.long)
dataset = TensorDataset(input_inputs, output_inputs, turn_inputs, ids)
torch.save({'dataset': dataset}, out_file)
print('Built {} instances of features in total'.format(total)) |
def get_setup_file():
parser = argparse.ArgumentParser()
parser.add_argument('-f')
args = parser.parse_args()
return args.f |
def test(model, data_loader):
model.eval()
loss = 0
acc = 0
with torch.no_grad():
for (iter, (batch_graphs, batch_targets, batch_snorm_n, batch_snorm_e)) in enumerate(data_loader):
batch_x = batch_graphs.ndata['feat'].cuda()
batch_e = batch_graphs.edata['feat'].cuda()
batch_snorm_e = batch_snorm_e.cuda()
batch_targets = batch_targets.cuda()
batch_snorm_n = batch_snorm_n.cuda()
model.g = batch_graphs
batch_scores = model.forward(batch_x, batch_e, batch_snorm_n, batch_snorm_e)
loss = loss_fcn(batch_scores, batch_targets)
iter_loss = loss.item()
iter_acc = accuracy(batch_scores, batch_targets)
loss += iter_loss
acc += iter_acc
loss /= (iter + 1)
acc /= (iter + 1)
return (loss, acc) |
class ASPP(nn.Module):
def __init__(self, C, depth, num_classes, conv=nn.Conv2d, norm=nn.BatchNorm2d, momentum=0.0003, mult=1, phase='train'):
super(ASPP, self).__init__()
self._C = C
self._depth = depth
self._num_classes = num_classes
self.phase = phase
self.global_pooling = nn.AdaptiveAvgPool2d(1)
self.relu = nn.ReLU(inplace=True)
self.aspp1 = conv(C, depth, kernel_size=1, stride=1, bias=False)
self.aspp2 = conv(C, depth, kernel_size=3, stride=1, dilation=int((6 * mult)), padding=int((6 * mult)), bias=False)
self.aspp3 = conv(C, depth, kernel_size=3, stride=1, dilation=int((12 * mult)), padding=int((12 * mult)), bias=False)
self.aspp4 = conv(C, depth, kernel_size=3, stride=1, dilation=int((18 * mult)), padding=int((18 * mult)), bias=False)
self.aspp5 = conv(C, depth, kernel_size=1, stride=1, bias=False)
self.aspp1_bn = norm(depth, momentum)
self.aspp2_bn = norm(depth, momentum)
self.aspp3_bn = norm(depth, momentum)
self.aspp4_bn = norm(depth, momentum)
self.aspp5_bn = norm(depth, momentum)
self.conv2 = conv((depth * 5), depth, kernel_size=1, stride=1, bias=False)
self.bn2 = norm(depth, momentum)
self.dropout = nn.Dropout2d(p=0.5)
self.conv3 = nn.Conv2d(depth, num_classes, kernel_size=1, stride=1)
def forward(self, x):
x1 = self.aspp1(x)
x1 = self.aspp1_bn(x1)
x1 = self.relu(x1)
x2 = self.aspp2(x)
x2 = self.aspp2_bn(x2)
x2 = self.relu(x2)
x3 = self.aspp3(x)
x3 = self.aspp3_bn(x3)
x3 = self.relu(x3)
x4 = self.aspp4(x)
x4 = self.aspp4_bn(x4)
x4 = self.relu(x4)
x5 = self.global_pooling(x)
x5 = self.aspp5(x5)
x5 = self.aspp5_bn(x5)
x5 = self.relu(x5)
x5 = F.interpolate(x5, (x.shape[2], x.shape[3]), mode='bilinear', align_corners=True)
x = torch.cat((x1, x2, x3, x4, x5), 1)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
if (self.phase == 'test'):
x = self.dropout(x)
feat = x
x = self.conv3(x)
return (x, feat) |
def _elementwise_flops_compute(input, other):
if (not torch.is_tensor(input)):
if torch.is_tensor(other):
return (_prod(other.shape), 0)
else:
return (1, 0)
elif (not torch.is_tensor(other)):
return (_prod(input.shape), 0)
else:
dim_input = len(input.shape)
dim_other = len(other.shape)
max_dim = max(dim_input, dim_other)
final_shape = []
for i in range(max_dim):
in_i = (input.shape[i] if (i < dim_input) else 1)
ot_i = (other.shape[i] if (i < dim_other) else 1)
if (in_i > ot_i):
final_shape.append(in_i)
else:
final_shape.append(ot_i)
flops = _prod(final_shape)
return (flops, 0) |
class _MultiPageVIPSReader(_VIPSReader):
def _load_levels(self, vips_image: Optional['vips.Image']):
log.debug('Attempting to read levels from non-standard multi-page TIFF')
self.level_count = int(self.properties['n-pages'])
self.levels = []
for lev in range(self.level_count):
temp_img = vips.Image.new_from_file(self.path, page=lev)
width = int(temp_img.get('width'))
height = int(temp_img.get('height'))
downsample = float((int(self.properties[OPS_WIDTH]) / width))
self.levels += [{'dimensions': (width, height), 'width': width, 'height': height, 'downsample': downsample, 'level': lev}]
self.levels = sorted(self.levels, key=(lambda x: x['width']), reverse=True)
log.debug(f'Read {self.level_count} levels.')
self.level_downsamples = [lev['downsample'] for lev in self.levels]
self.level_dimensions = [lev['dimensions'] for lev in self.levels] |
def tfidf(name, analyzer=None, ngram_range=None, stop_words=None, lowercase=None, max_df=1.0, min_df=1, max_features=None, binary=None, norm=None, use_idf=False, smooth_idf=False, sublinear_tf=False):
def _name(msg):
return ('%s.%s_%s' % (name, 'tfidf', msg))
max_ngram = scope.int(hp.quniform(_name('max_ngram'), 1, 4, 1))
rval = scope.sklearn_Tfidf(stop_words=(hp.choice(_name('stop_words'), ['english', None]) if (analyzer is None) else analyzer), lowercase=(hp_bool(_name('lowercase')) if (lowercase is None) else lowercase), max_df=max_df, min_df=min_df, binary=(hp_bool(_name('binary')) if (binary is None) else binary), ngram_range=((1, max_ngram) if (ngram_range is None) else ngram_range), norm=norm, use_idf=use_idf, smooth_idf=smooth_idf, sublinear_tf=sublinear_tf)
return rval |
def np_to_creator(data):
def data_creator(config, batch_size):
return DataLoader(TensorDataset(torch.from_numpy(data[0]).float(), torch.from_numpy(data[1]).float()), batch_size=batch_size, shuffle=True)
return data_creator |
class FocalLoss(tf.keras.losses.Loss):
def __init__(self, gamma=2.0, alpha=4.0, reduction=tf.keras.losses.Reduction.AUTO, name='focal_loss'):
super(FocalLoss, self).__init__(reduction=reduction, name=name)
self.gamma = float(gamma)
self.alpha = float(alpha)
def call(self, y_true, y_pred):
epsilon = 1e-09
y_true = tf.convert_to_tensor(y_true, tf.float32)
y_pred = tf.convert_to_tensor(y_pred, tf.float32)
model_out = tf.add(y_pred, epsilon)
ce = tf.multiply(y_true, (- tf.math.log(model_out)))
weight = tf.multiply(y_true, tf.pow(tf.subtract(1.0, model_out), self.gamma))
fl = tf.multiply(self.alpha, tf.multiply(weight, ce))
reduced_fl = tf.reduce_max(fl, axis=1)
return tf.reduce_mean(reduced_fl) |
class SingleImageWrapper(gym.ObservationWrapper):
def __init__(self, env):
super().__init__(env)
template = env.observation_space[0].spaces[0]
shape = ((6,) + template.shape[1:])
self.observation_space = gym.spaces.Box(template.low.min(), template.high.max(), shape, template.dtype)
def observation(self, observation):
(observations, last_reward_action) = observation
observations = np.concatenate((observations[0], observations[1]), 0)
return observations |
class SentencePredictionConfig(FairseqDataclass):
classification_head_name: str = field(default='sentence_classification_head', metadata={'help': 'name of the classification head to use'})
regression_target: bool = field(default=False)
report_mcc: bool = False
report_acc_and_f1: bool = False
report_pearson_and_spearman: bool = False |
def check_bool(value, original_var_name):
if (not isinstance(value, bool)):
raise ValueError(f"'{original_var_name}' must be a boolean, got '{type(value)}'.") |
def resnet110(pretrained: bool=False, progress: bool=True, num_classes: int=10, layer_config: dict=None) -> ResNet:
print('Converting ResNet-110 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(small_resnet.resnet110, MODE, layer_config, pretrained, progress, num_classes) |
class Minitaur(object):
def __init__(self, pybullet_client, urdf_root=os.path.join(os.path.dirname(__file__), '../data'), time_step=0.01, self_collision_enabled=False, motor_velocity_limit=np.inf, pd_control_enabled=False, accurate_motor_model_enabled=False, motor_kp=1.0, motor_kd=0.02, torque_control_enabled=False, motor_overheat_protection=False, on_rack=False, kd_for_pd_controllers=0.3):
self.num_motors = 8
self.num_legs = int((self.num_motors / 2))
self._pybullet_client = pybullet_client
self._urdf_root = urdf_root
self._self_collision_enabled = self_collision_enabled
self._motor_velocity_limit = motor_velocity_limit
self._pd_control_enabled = pd_control_enabled
self._motor_direction = [(- 1), (- 1), (- 1), (- 1), 1, 1, 1, 1]
self._observed_motor_torques = np.zeros(self.num_motors)
self._applied_motor_torques = np.zeros(self.num_motors)
self._max_force = 3.5
self._accurate_motor_model_enabled = accurate_motor_model_enabled
self._torque_control_enabled = torque_control_enabled
self._motor_overheat_protection = motor_overheat_protection
self._on_rack = on_rack
if self._accurate_motor_model_enabled:
self._kp = motor_kp
self._kd = motor_kd
self._motor_model = motor.MotorModel(torque_control_enabled=self._torque_control_enabled, kp=self._kp, kd=self._kd)
elif self._pd_control_enabled:
self._kp = 8
self._kd = kd_for_pd_controllers
else:
self._kp = 1
self._kd = 1
self.time_step = time_step
self.Reset()
def _RecordMassInfoFromURDF(self):
self._base_mass_urdf = self._pybullet_client.getDynamicsInfo(self.quadruped, BASE_LINK_ID)[0]
self._leg_masses_urdf = []
self._leg_masses_urdf.append(self._pybullet_client.getDynamicsInfo(self.quadruped, LEG_LINK_ID[0])[0])
self._leg_masses_urdf.append(self._pybullet_client.getDynamicsInfo(self.quadruped, MOTOR_LINK_ID[0])[0])
def _BuildJointNameToIdDict(self):
num_joints = self._pybullet_client.getNumJoints(self.quadruped)
self._joint_name_to_id = {}
for i in range(num_joints):
joint_info = self._pybullet_client.getJointInfo(self.quadruped, i)
self._joint_name_to_id[joint_info[1].decode('UTF-8')] = joint_info[0]
def _BuildMotorIdList(self):
self._motor_id_list = [self._joint_name_to_id[motor_name] for motor_name in MOTOR_NAMES]
def Reset(self, reload_urdf=True):
if reload_urdf:
if self._self_collision_enabled:
self.quadruped = self._pybullet_client.loadURDF(('%s/quadruped/minitaur.urdf' % self._urdf_root), INIT_POSITION, flags=self._pybullet_client.URDF_USE_SELF_COLLISION)
else:
self.quadruped = self._pybullet_client.loadURDF(('%s/quadruped/minitaur.urdf' % self._urdf_root), INIT_POSITION)
self._BuildJointNameToIdDict()
self._BuildMotorIdList()
self._RecordMassInfoFromURDF()
self.ResetPose(add_constraint=True)
if self._on_rack:
self._pybullet_client.createConstraint(self.quadruped, (- 1), (- 1), (- 1), self._pybullet_client.JOINT_FIXED, [0, 0, 0], [0, 0, 0], [0, 0, 1])
else:
self._pybullet_client.resetBasePositionAndOrientation(self.quadruped, INIT_POSITION, INIT_ORIENTATION)
self._pybullet_client.resetBaseVelocity(self.quadruped, [0, 0, 0], [0, 0, 0])
self.ResetPose(add_constraint=False)
self._overheat_counter = np.zeros(self.num_motors)
self._motor_enabled_list = ([True] * self.num_motors)
def _SetMotorTorqueById(self, motor_id, torque):
self._pybullet_client.setJointMotorControl2(bodyIndex=self.quadruped, jointIndex=motor_id, controlMode=self._pybullet_client.TORQUE_CONTROL, force=torque)
def _SetDesiredMotorAngleById(self, motor_id, desired_angle):
self._pybullet_client.setJointMotorControl2(bodyIndex=self.quadruped, jointIndex=motor_id, controlMode=self._pybullet_client.POSITION_CONTROL, targetPosition=desired_angle, positionGain=self._kp, velocityGain=self._kd, force=self._max_force)
def _SetDesiredMotorAngleByName(self, motor_name, desired_angle):
self._SetDesiredMotorAngleById(self._joint_name_to_id[motor_name], desired_angle)
def ResetPose(self, add_constraint):
for i in range(self.num_legs):
self._ResetPoseForLeg(i, add_constraint)
def _ResetPoseForLeg(self, leg_id, add_constraint):
knee_friction_force = 0
half_pi = (math.pi / 2.0)
knee_angle = (- 2.1834)
leg_position = LEG_POSITION[leg_id]
self._pybullet_client.resetJointState(self.quadruped, self._joint_name_to_id[(('motor_' + leg_position) + 'L_joint')], (self._motor_direction[(2 * leg_id)] * half_pi), targetVelocity=0)
self._pybullet_client.resetJointState(self.quadruped, self._joint_name_to_id[(('knee_' + leg_position) + 'L_link')], (self._motor_direction[(2 * leg_id)] * knee_angle), targetVelocity=0)
self._pybullet_client.resetJointState(self.quadruped, self._joint_name_to_id[(('motor_' + leg_position) + 'R_joint')], (self._motor_direction[((2 * leg_id) + 1)] * half_pi), targetVelocity=0)
self._pybullet_client.resetJointState(self.quadruped, self._joint_name_to_id[(('knee_' + leg_position) + 'R_link')], (self._motor_direction[((2 * leg_id) + 1)] * knee_angle), targetVelocity=0)
if add_constraint:
self._pybullet_client.createConstraint(self.quadruped, self._joint_name_to_id[(('knee_' + leg_position) + 'R_link')], self.quadruped, self._joint_name_to_id[(('knee_' + leg_position) + 'L_link')], self._pybullet_client.JOINT_POINT2POINT, [0, 0, 0], KNEE_CONSTRAINT_POINT_RIGHT, KNEE_CONSTRAINT_POINT_LEFT)
if (self._accurate_motor_model_enabled or self._pd_control_enabled):
self._pybullet_client.setJointMotorControl2(bodyIndex=self.quadruped, jointIndex=self._joint_name_to_id[(('motor_' + leg_position) + 'L_joint')], controlMode=self._pybullet_client.VELOCITY_CONTROL, targetVelocity=0, force=knee_friction_force)
self._pybullet_client.setJointMotorControl2(bodyIndex=self.quadruped, jointIndex=self._joint_name_to_id[(('motor_' + leg_position) + 'R_joint')], controlMode=self._pybullet_client.VELOCITY_CONTROL, targetVelocity=0, force=knee_friction_force)
else:
self._SetDesiredMotorAngleByName((('motor_' + leg_position) + 'L_joint'), (self._motor_direction[(2 * leg_id)] * half_pi))
self._SetDesiredMotorAngleByName((('motor_' + leg_position) + 'R_joint'), (self._motor_direction[((2 * leg_id) + 1)] * half_pi))
self._pybullet_client.setJointMotorControl2(bodyIndex=self.quadruped, jointIndex=self._joint_name_to_id[(('knee_' + leg_position) + 'L_link')], controlMode=self._pybullet_client.VELOCITY_CONTROL, targetVelocity=0, force=knee_friction_force)
self._pybullet_client.setJointMotorControl2(bodyIndex=self.quadruped, jointIndex=self._joint_name_to_id[(('knee_' + leg_position) + 'R_link')], controlMode=self._pybullet_client.VELOCITY_CONTROL, targetVelocity=0, force=knee_friction_force)
def GetBasePosition(self):
(position, _) = self._pybullet_client.getBasePositionAndOrientation(self.quadruped)
return position
def GetBaseOrientation(self):
(_, orientation) = self._pybullet_client.getBasePositionAndOrientation(self.quadruped)
return orientation
def GetActionDimension(self):
return self.num_motors
def GetObservationUpperBound(self):
upper_bound = np.array(([0.0] * self.GetObservationDimension()))
upper_bound[0:self.num_motors] = math.pi
upper_bound[self.num_motors:(2 * self.num_motors)] = motor.MOTOR_SPEED_LIMIT
upper_bound[(2 * self.num_motors):(3 * self.num_motors)] = motor.OBSERVED_TORQUE_LIMIT
upper_bound[(3 * self.num_motors):((3 * self.num_motors) + 4)] = 1.0
upper_bound[((3 * self.num_motors) + 4):] = 1.0
return upper_bound
def GetObservationLowerBound(self):
return (- self.GetObservationUpperBound())
def GetObservationDimension(self):
return len(self.GetObservation())
def GetObservation(self):
observation = []
observation.extend(self.GetMotorAngles().tolist())
observation.extend(self.GetMotorVelocities().tolist())
observation.extend(self.GetMotorTorques().tolist())
observation.extend(list(self.GetBaseOrientation()))
observation.extend(list(self.GetBasePosition()))
return observation
def ApplyAction(self, motor_commands):
if (self._motor_velocity_limit < np.inf):
current_motor_angle = self.GetMotorAngles()
motor_commands_max = (current_motor_angle + (self.time_step * self._motor_velocity_limit))
motor_commands_min = (current_motor_angle - (self.time_step * self._motor_velocity_limit))
motor_commands = np.clip(motor_commands, motor_commands_min, motor_commands_max)
if (self._accurate_motor_model_enabled or self._pd_control_enabled):
q = self.GetMotorAngles()
qdot = self.GetMotorVelocities()
if self._accurate_motor_model_enabled:
(actual_torque, observed_torque) = self._motor_model.convert_to_torque(motor_commands, q, qdot)
if self._motor_overheat_protection:
for i in range(self.num_motors):
if (abs(actual_torque[i]) > OVERHEAT_SHUTDOWN_TORQUE):
self._overheat_counter[i] += 1
else:
self._overheat_counter[i] = 0
if (self._overheat_counter[i] > (OVERHEAT_SHUTDOWN_TIME / self.time_step)):
self._motor_enabled_list[i] = False
self._observed_motor_torques = observed_torque
self._applied_motor_torque = np.multiply(actual_torque, self._motor_direction)
for (motor_id, motor_torque, motor_enabled) in zip(self._motor_id_list, self._applied_motor_torque, self._motor_enabled_list):
if motor_enabled:
self._SetMotorTorqueById(motor_id, motor_torque)
else:
self._SetMotorTorqueById(motor_id, 0)
else:
torque_commands = (((- self._kp) * (q - motor_commands)) - (self._kd * qdot))
self._observed_motor_torques = torque_commands
self._applied_motor_torques = np.multiply(self._observed_motor_torques, self._motor_direction)
for (motor_id, motor_torque) in zip(self._motor_id_list, self._applied_motor_torques):
self._SetMotorTorqueById(motor_id, motor_torque)
else:
motor_commands_with_direction = np.multiply(motor_commands, self._motor_direction)
for (motor_id, motor_command_with_direction) in zip(self._motor_id_list, motor_commands_with_direction):
self._SetDesiredMotorAngleById(motor_id, motor_command_with_direction)
def GetMotorAngles(self):
motor_angles = [self._pybullet_client.getJointState(self.quadruped, motor_id)[0] for motor_id in self._motor_id_list]
motor_angles = np.multiply(motor_angles, self._motor_direction)
return motor_angles
def GetMotorVelocities(self):
motor_velocities = [self._pybullet_client.getJointState(self.quadruped, motor_id)[1] for motor_id in self._motor_id_list]
motor_velocities = np.multiply(motor_velocities, self._motor_direction)
return motor_velocities
def GetMotorTorques(self):
if (self._accurate_motor_model_enabled or self._pd_control_enabled):
return self._observed_motor_torques
else:
motor_torques = [self._pybullet_client.getJointState(self.quadruped, motor_id)[3] for motor_id in self._motor_id_list]
motor_torques = np.multiply(motor_torques, self._motor_direction)
return motor_torques
def ConvertFromLegModel(self, actions):
motor_angle = copy.deepcopy(actions)
scale_for_singularity = 1
offset_for_singularity = 1.5
half_num_motors = int((self.num_motors / 2))
quater_pi = (math.pi / 4)
for i in range(self.num_motors):
action_idx = (i // 2)
forward_backward_component = (((- scale_for_singularity) * quater_pi) * (actions[(action_idx + half_num_motors)] + offset_for_singularity))
extension_component = ((((- 1) ** i) * quater_pi) * actions[action_idx])
if (i >= half_num_motors):
extension_component = (- extension_component)
motor_angle[i] = ((math.pi + forward_backward_component) + extension_component)
return motor_angle
def GetBaseMassFromURDF(self):
return self._base_mass_urdf
def GetLegMassesFromURDF(self):
return self._leg_masses_urdf
def SetBaseMass(self, base_mass):
self._pybullet_client.changeDynamics(self.quadruped, BASE_LINK_ID, mass=base_mass)
def SetLegMasses(self, leg_masses):
for link_id in LEG_LINK_ID:
self._pybullet_client.changeDynamics(self.quadruped, link_id, mass=leg_masses[0])
for link_id in MOTOR_LINK_ID:
self._pybullet_client.changeDynamics(self.quadruped, link_id, mass=leg_masses[1])
def SetFootFriction(self, foot_friction):
for link_id in FOOT_LINK_ID:
self._pybullet_client.changeDynamics(self.quadruped, link_id, lateralFriction=foot_friction)
def SetBatteryVoltage(self, voltage):
if self._accurate_motor_model_enabled:
self._motor_model.set_voltage(voltage)
def SetMotorViscousDamping(self, viscous_damping):
if self._accurate_motor_model_enabled:
self._motor_model.set_viscous_damping(viscous_damping) |
class common_solver(solver.solver):
def __init__(self, models, optimizers, kernel_processer, model_name, save_path='checkpoints'):
super(common_solver, self).__init__(models, optimizers, kernel_processer, model_name, save_path)
def test_model(self, param_dict, mode='test'):
loader_choice = {'test': 'test_loader', 'val': 'val_loader'}
self.eval_mode()
dataloader = param_dict[loader_choice[mode]]
counter = 0.0
evaluate_value = 0.0
evaluate_dict = None
for (step, data) in enumerate(dataloader):
for i in range(0, len(data)):
data[i] = data[i].cuda()
(data_counter, key_value, output_dict) = self.kernel_processer.test(step, data)
counter += data_counter
evaluate_value += (key_value * data_counter)
if (evaluate_dict is None):
evaluate_dict = {}
for key in output_dict.keys():
evaluate_dict[key] = (output_dict[key] * data_counter)
else:
for key in output_dict.keys():
evaluate_dict[key] += (output_dict[key] * data_counter)
for key in evaluate_dict.keys():
evaluate_dict[key] = (evaluate_dict[key] / counter)
evaluate_value = (evaluate_value / counter)
return (evaluate_value, evaluate_dict)
def evaluate_model(self, param_dict, mode='val'):
loader_choice = {'test': 'test_loader', 'val': 'val_loader'}
self.eval_mode()
dataloader = param_dict[loader_choice[mode]]
counter = 0.0
evaluate_value = 0.0
evaluate_dict = None
for (step, data) in enumerate(dataloader):
for i in range(0, len(data)):
data[i] = data[i].cuda()
(data_counter, key_value, output_dict) = self.kernel_processer.evaluate(step, data)
counter += data_counter
evaluate_value += (key_value * data_counter)
if (evaluate_dict is None):
evaluate_dict = {}
for key in output_dict.keys():
evaluate_dict[key] = (output_dict[key] * data_counter)
else:
for key in output_dict.keys():
evaluate_dict[key] += (output_dict[key] * data_counter)
for key in evaluate_dict.keys():
evaluate_dict[key] = (evaluate_dict[key] / counter)
evaluate_value = (evaluate_value / counter)
return (evaluate_value, evaluate_dict)
def train_model(self, epoch, param_dict):
self.train_mode()
dataloader = param_dict['train_loader']
dataset_numbers = dataloader.dataset.__len__()
it_numbers = int((((dataset_numbers + dataloader.batch_size) - 1) / dataloader.batch_size))
for (step, data) in enumerate(dataloader):
for i in range(0, len(data)):
data[i] = data[i].cuda()
evaluate_dict = self.kernel_processer.train(step, data)
self.write_log(evaluate_dict, ((epoch * it_numbers) + step))
self.output_loss(evaluate_dict, epoch, step)
self.kernel_processer.update_optimizers(epoch, step, it_numbers)
def main(self, param_dict):
best_value = .0
iteration_count = 0
epochs = param_dict['epochs']
for i in range(0, epochs):
self.train_model(i, param_dict)
(evaluate_value, evaluate_dict) = self.evaluate_model(param_dict, 'val')
self.output_loss(evaluate_dict, i, 0)
self.write_log(evaluate_dict, i)
if (evaluate_value < best_value):
best_value = evaluate_value
self.save_params('best')
self.restore_params(self.time_string, 'best')
(tev, ted) = self.test_model(param_dict, 'test')
self.write_log(ted, (epochs + 5))
d = self.kernel_processer.on_finish()
self.write_log(d, 500)
print(d)
time.sleep(10)
self.writer.close()
return (tev, ted) |
def _convert_dataset(split_name, filenames, class_names_to_ids, dataset_dir):
assert (split_name in ['train', 'validation'])
num_per_shard = int(math.ceil((len(filenames) / float(_NUM_SHARDS))))
with tf.Graph().as_default():
image_reader = ImageReader()
with tf.Session('') as sess:
for shard_id in range(_NUM_SHARDS):
output_filename = _get_dataset_filename(dataset_dir, split_name, shard_id)
with tf.python_io.TFRecordWriter(output_filename) as tfrecord_writer:
start_ndx = (shard_id * num_per_shard)
end_ndx = min(((shard_id + 1) * num_per_shard), len(filenames))
for i in range(start_ndx, end_ndx):
sys.stdout.write(('\r>> Converting image %d/%d shard %d' % ((i + 1), len(filenames), shard_id)))
sys.stdout.flush()
image_data = tf.gfile.FastGFile(filenames[i], 'r').read()
(height, width) = image_reader.read_image_dims(sess, image_data)
class_name = os.path.basename(os.path.dirname(filenames[i]))
class_id = class_names_to_ids[class_name]
example = dataset_utils.image_to_tfexample(image_data, 'jpg', height, width, class_id)
tfrecord_writer.write(example.SerializeToString())
sys.stdout.write('\n')
sys.stdout.flush() |
def propagate_through_subgraph(node, new_sharding_spec, sharding_specs, contracted_graph, nx_graph):
agg_nodes = contracted_graph.nodes[node]['aggregated_nodes']
prop_spec = dict(((k, sharding_specs[k]) for k in sharding_specs.keys() if ((k in agg_nodes) and (k != node))))
if (new_sharding_spec == sharding_specs[node]['output_spec']):
return prop_spec
new_sharding_spec_cp = copy.deepcopy(new_sharding_spec)
prop_spec_cp = copy.deepcopy(prop_spec)
visited = dict(((node, False) for node in agg_nodes))
queue = deque()
queue.append(node)
while (len(queue) > 0):
visit_node = queue.popleft()
node_target = nx_graph.nodes[visit_node]['target']
if (not visited[visit_node]):
for user_node in nx_graph.successors(visit_node):
if ((user_node in agg_nodes) and (not visited[user_node])):
queue.append(user_node)
if (visit_node == node):
continue
if (node_target not in _RESHAPE_OPS):
predecessor = list(nx_graph.predecessors(visit_node))[0]
predecessor_output_spec = (new_sharding_spec_cp if (predecessor == node) else prop_spec_cp[predecessor]['output_spec'])
prop_spec_cp[visit_node]['input_spec'][predecessor] = predecessor_output_spec
prop_spec_cp[visit_node]['output_spec'] = (predecessor_output_spec if ((node_target != operator.getitem) or isinstance(predecessor_output_spec, MeshShardingSpec)) else operator.getitem(predecessor_output_spec, nx_graph.nodes[visit_node]['args'][(- 1)]))
visited[visit_node] = True
return prop_spec_cp |
def test_get_kbs_invalidates_cache_if_input_changes():
journals = {'Journal of Testing': 'J.Testing'}
first_cache = get_kbs(custom_kbs={'journals': journals}).copy()
journals = journals = {'Journal of Testing': 'J.Test.'}
second_cache = get_kbs(custom_kbs={'journals': journals})
assert all(((cached_first is not cached_second) for (cached_first, cached_second) in zip(first_cache['journals'], second_cache['journals'])))
assert (len(second_cache['journals']) == 3)
assert (['JOURNAL OF TESTING', 'J TEST'] == second_cache['journals'][(- 1)]) |
def gen_mask(corr_dict):
mask_AB = torch.max(corr_dict['corr_AB'], dim=1, keepdim=True)[0]
mask_BA = torch.max(corr_dict['corr_BA'], dim=1, keepdim=True)[0]
mask_dict = {'mask_AB': mask_AB, 'mask_BA': mask_BA}
return mask_dict |
class FMClassifier(sklearn.base.BaseEstimator):
def __init__(self, embedding_size=20, nb_iterations=40):
super().__init__()
self.embedding_size = embedding_size
self.nb_iterations = nb_iterations
def fit(self, X, y):
fm = pywFM.FM(task='classification', num_iter=self.nb_iterations, k2=self.embedding_size, rlog=True)
model = fm.run(X, y, X, y)
self.mu = model.global_bias
self.W = np.array(model.weights)
self.V = model.pairwise_interactions
self.V2 = np.power(self.V, 2)
self.rlog = model.rlog
return self
def predict_proba(self, X):
X2 = X.copy()
if scipy.sparse.issparse(X):
X2.data **= 2
else:
X2 **= 2
y_pred = ((self.mu + (X self.W)) + (0.5 * (np.power((X self.V), 2).sum(axis=1) - (X2 self.V2).sum(axis=1)).A1))
return sigmoid(y_pred) |
def get_data(data_name):
if (data_name == 'bmnist'):
from fuel.datasets.binarized_mnist import BinarizedMNIST
x_dim = (28 * 28)
data_train = BinarizedMNIST(which_sets=['train'], sources=['features'])
data_valid = BinarizedMNIST(which_sets=['valid'], sources=['features'])
data_test = BinarizedMNIST(which_sets=['test'], sources=['features'])
elif (data_name == 'mnist'):
from fuel.datasets.mnist import MNIST
x_dim = (28 * 28)
data_train = MNIST(which_sets=['train'], sources=['features'])
data_valid = MNIST(which_sets=['test'], sources=['features'])
data_test = MNIST(which_sets=['test'], sources=['features'])
elif (data_name == 'silhouettes'):
from fuel.datasets.caltech101_silhouettes import CalTech101Silhouettes
size = 28
x_dim = (size * size)
data_train = CalTech101Silhouettes(which_sets=['train'], size=size, sources=['features'])
data_valid = CalTech101Silhouettes(which_sets=['valid'], size=size, sources=['features'])
data_test = CalTech101Silhouettes(which_sets=['test'], size=size, sources=['features'])
elif (data_name == 'tfd'):
from fuel.datasets.toronto_face_database import TorontoFaceDatabase
size = 48
x_dim = (size * size)
data_train = TorontoFaceDatabase(which_sets=['unlabeled'], size=size, sources=['features'])
data_valid = TorontoFaceDatabase(which_sets=['valid'], size=size, sources=['features'])
data_test = TorontoFaceDatabase(which_sets=['test'], size=size, sources=['features'])
elif (data_name == 'bars'):
from bars_data import Bars
width = 4
x_dim = (width * width)
data_train = Bars(num_examples=5000, width=width, sources=['features'])
data_valid = Bars(num_examples=5000, width=width, sources=['features'])
data_test = Bars(num_examples=5000, width=width, sources=['features'])
elif (data_name in local_datasets):
from fuel.datasets.hdf5 import H5PYDataset
fname = (('data/' + data_name) + '.hdf5')
data_train = H5PYDataset(fname, which_sets=['train'], sources=['features'], load_in_memory=True)
data_valid = H5PYDataset(fname, which_sets=['valid'], sources=['features'], load_in_memory=True)
data_test = H5PYDataset(fname, which_sets=['test'], sources=['features'], load_in_memory=True)
some_features = data_train.get_data(None, slice(0, 100))[0]
assert (some_features.shape[0] == 100)
some_features = some_features.reshape([100, (- 1)])
x_dim = some_features.shape[1]
else:
raise ValueError(('Unknown dataset %s' % data_name))
return (x_dim, data_train, data_valid, data_test) |
def gptneox_set_state_data(ctx: gptneox_context_p, src) -> int:
return _lib.gptneox_set_state_data(ctx, src) |
def param_name_dict():
layer = caffe_pb2.LayerParameter()
param_names = [f.name for f in layer.DESCRIPTOR.fields if f.name.endswith('_param')]
param_type_names = [type(getattr(layer, s)).__name__ for s in param_names]
param_names = [s[:(- len('_param'))] for s in param_names]
param_type_names = [s[:(- len('Parameter'))] for s in param_type_names]
return dict(zip(param_type_names, param_names)) |
class TabPerceiver(BaseTabularModelWithAttention):
def __init__(self, column_idx: Dict[(str, int)], cat_embed_input: Optional[List[Tuple[(str, int)]]]=None, cat_embed_dropout: float=0.1, use_cat_bias: bool=False, cat_embed_activation: Optional[str]=None, full_embed_dropout: bool=False, shared_embed: bool=False, add_shared_embed: bool=False, frac_shared_embed: float=0.25, continuous_cols: Optional[List[str]]=None, cont_norm_layer: str=None, cont_embed_dropout: float=0.1, use_cont_bias: bool=True, cont_embed_activation: Optional[str]=None, input_dim: int=32, n_cross_attns: int=1, n_cross_attn_heads: int=4, n_latents: int=16, latent_dim: int=128, n_latent_heads: int=4, n_latent_blocks: int=4, n_perceiver_blocks: int=4, share_weights: bool=False, attn_dropout: float=0.1, ff_dropout: float=0.1, ff_factor: int=4, transformer_activation: str='geglu', mlp_hidden_dims: Optional[List[int]]=None, mlp_activation: str='relu', mlp_dropout: float=0.1, mlp_batchnorm: bool=False, mlp_batchnorm_last: bool=False, mlp_linear_first: bool=True):
super(TabPerceiver, self).__init__(column_idx=column_idx, cat_embed_input=cat_embed_input, cat_embed_dropout=cat_embed_dropout, use_cat_bias=use_cat_bias, cat_embed_activation=cat_embed_activation, full_embed_dropout=full_embed_dropout, shared_embed=shared_embed, add_shared_embed=add_shared_embed, frac_shared_embed=frac_shared_embed, continuous_cols=continuous_cols, cont_norm_layer=cont_norm_layer, embed_continuous=True, cont_embed_dropout=cont_embed_dropout, use_cont_bias=use_cont_bias, cont_embed_activation=cont_embed_activation, input_dim=input_dim)
self.n_cross_attns = n_cross_attns
self.n_cross_attn_heads = n_cross_attn_heads
self.n_latents = n_latents
self.latent_dim = latent_dim
self.n_latent_heads = n_latent_heads
self.n_latent_blocks = n_latent_blocks
self.n_perceiver_blocks = n_perceiver_blocks
self.share_weights = share_weights
self.attn_dropout = attn_dropout
self.ff_dropout = ff_dropout
self.ff_factor = ff_factor
self.transformer_activation = transformer_activation
self.mlp_hidden_dims = mlp_hidden_dims
self.mlp_activation = mlp_activation
self.mlp_dropout = mlp_dropout
self.mlp_batchnorm = mlp_batchnorm
self.mlp_batchnorm_last = mlp_batchnorm_last
self.mlp_linear_first = mlp_linear_first
self.latents = nn.init.trunc_normal_(nn.Parameter(torch.empty(n_latents, latent_dim)))
self.encoder = nn.ModuleDict()
first_perceiver_block = self._build_perceiver_block()
self.encoder['perceiver_block0'] = first_perceiver_block
if share_weights:
for n in range(1, n_perceiver_blocks):
self.encoder[('perceiver_block' + str(n))] = first_perceiver_block
else:
for n in range(1, n_perceiver_blocks):
self.encoder[('perceiver_block' + str(n))] = self._build_perceiver_block()
self.mlp_first_hidden_dim = self.latent_dim
if (mlp_hidden_dims is not None):
self.mlp = MLP(([self.mlp_first_hidden_dim] + mlp_hidden_dims), mlp_activation, mlp_dropout, mlp_batchnorm, mlp_batchnorm_last, mlp_linear_first)
else:
self.mlp = None
def forward(self, X: Tensor) -> Tensor:
x_emb = self._get_embeddings(X)
x = einops.repeat(self.latents, 'n d -> b n d', b=X.shape[0])
for n in range(self.n_perceiver_blocks):
cross_attns = self.encoder[('perceiver_block' + str(n))]['cross_attns']
latent_transformer = self.encoder[('perceiver_block' + str(n))]['latent_transformer']
for cross_attn in cross_attns:
x = cross_attn(x, x_emb)
x = latent_transformer(x)
x = x.mean(dim=1)
if (self.mlp is not None):
x = self.mlp(x)
return x
def output_dim(self) -> int:
return (self.mlp_hidden_dims[(- 1)] if (self.mlp_hidden_dims is not None) else self.mlp_first_hidden_dim)
def attention_weights(self) -> List:
if self.share_weights:
cross_attns = self.encoder['perceiver_block0']['cross_attns']
latent_transformer = self.encoder['perceiver_block0']['latent_transformer']
attention_weights = self._extract_attn_weights(cross_attns, latent_transformer)
else:
attention_weights = []
for n in range(self.n_perceiver_blocks):
cross_attns = self.encoder[('perceiver_block' + str(n))]['cross_attns']
latent_transformer = self.encoder[('perceiver_block' + str(n))]['latent_transformer']
attention_weights.append(self._extract_attn_weights(cross_attns, latent_transformer))
return attention_weights
def _build_perceiver_block(self) -> nn.ModuleDict:
perceiver_block = nn.ModuleDict()
cross_attns = nn.ModuleList()
for _ in range(self.n_cross_attns):
cross_attns.append(PerceiverEncoder(self.input_dim, self.n_cross_attn_heads, False, self.attn_dropout, self.ff_dropout, self.ff_factor, self.transformer_activation, self.latent_dim))
perceiver_block['cross_attns'] = cross_attns
latent_transformer = nn.Sequential()
for i in range(self.n_latent_blocks):
latent_transformer.add_module(('latent_block' + str(i)), PerceiverEncoder(self.latent_dim, self.n_latent_heads, False, self.attn_dropout, self.ff_dropout, self.ff_factor, self.transformer_activation))
perceiver_block['latent_transformer'] = latent_transformer
return perceiver_block
def _extract_attn_weights(cross_attns, latent_transformer) -> List:
attention_weights = []
for cross_attn in cross_attns:
attention_weights.append(cross_attn.attn.attn_weights)
for latent_block in latent_transformer:
attention_weights.append(latent_block.attn.attn_weights)
return attention_weights |
class EuroRadCase(Case):
def _in(self, metadata):
return (self.url in list(metadata['url']))
def to_standard(self, eurorad_record):
standard_patient = {'sex': eurorad_record['sex'], 'age': eurorad_record['age'], 'clinical_history': eurorad_record.get('CLINICAL HISTORY'), 'finding': eurorad_record.get('FINAL DIAGNOSIS'), 'misc': {}}
standard_document = {'doi': None, 'url': eurorad_record['url'], 'license': 'CC BY-NC-SA 4.0'}
images = eurorad_record['images']
descriptions = eurorad_record['image_descriptions']
standard_images = []
for (image, description) in zip(images, descriptions):
standard_images.append({'url': image, 'image_description': description, 'modality': ('CT' if ('CT' in description) else 'X-ray')})
return {'patient': standard_patient, 'images': standard_images, 'document': standard_document} |
class RealmTokenizerFast(metaclass=DummyObject):
_backends = ['tokenizers']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tokenizers']) |
def get_query_based_summarization_set(dataset: SummDataset, size=1) -> Tuple[(List, List, List)]:
subset = []
for i in range(size):
subset.append(next(dataset.train_set))
(src, tgt, queries) = zip(*list(map((lambda x: (x.source, x.summary, x.query)), subset)))
return (list(src), list(tgt), list(queries)) |
class TFRobertaMainLayer(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
class KNeighborsAlgorithm(KNNFit):
algorithm_name = 'k-Nearest Neighbors'
algorithm_short_name = 'Nearest Neighbors'
def __init__(self, params):
super(KNeighborsAlgorithm, self).__init__(params)
logger.debug('KNeighborsAlgorithm.__init__')
self.library_version = sklearn.__version__
self.max_iters = 1
self.model = KNeighborsClassifier(n_neighbors=params.get('n_neighbors', 3), weights=params.get('weights', 'uniform'), algorithm='kd_tree', n_jobs=params.get('n_jobs', (- 1))) |
class TacotronSTFT(torch.nn.Module):
def __init__(self, filter_length=1024, hop_length=256, win_length=1024, n_mel_channels=80, sampling_rate=22050, mel_fmin=0.0, mel_fmax=8000.0):
super(TacotronSTFT, self).__init__()
self.n_mel_channels = n_mel_channels
self.sampling_rate = sampling_rate
self.stft_fn = STFT(filter_length, hop_length, win_length)
mel_basis = librosa_mel_fn(sampling_rate, filter_length, n_mel_channels, mel_fmin, mel_fmax)
mel_basis = torch.from_numpy(mel_basis).float()
self.register_buffer('mel_basis', mel_basis)
def spectral_normalize(self, magnitudes):
output = dynamic_range_compression(magnitudes)
return output
def spectral_de_normalize(self, magnitudes):
output = dynamic_range_decompression(magnitudes)
return output
def mel_spectrogram(self, y):
assert (torch.min(y.data) >= (- 1))
assert (torch.max(y.data) <= 1)
(magnitudes, phases) = self.stft_fn.transform(y)
magnitudes = magnitudes.data
mel_output = torch.matmul(self.mel_basis, magnitudes)
mel_output = self.spectral_normalize(mel_output)
return mel_output |
def compose_transformations(*args: Callable[([GraphModule], Optional[GraphModule])], inplace: bool=False) -> GraphModule:
args = list(args)
if (not inplace):
args.insert(0, deepcopy_graph)
for (i, transformation) in enumerate(args[:(- 1)]):
sig = signature(transformation)
if getattr(transformation, '_is_transformation', False):
transformation = transformation.__wrapped__
if ('lint_and_recompile' in sig.parameters):
args[i] = functools.partial(transformation, lint_and_recompile=False)
def reduce_func(f, g):
def compose_f_and_g(gm):
output_g = g(gm)
if (output_g is None):
output_g = gm
output_f = f(output_g)
if (output_f is None):
output_f = gm
return output_f
return compose_f_and_g
return functools.reduce(reduce_func, reversed(args), (lambda x: x)) |
class Mul(ZooKerasLayer):
def __init__(self, input_shape=None, **kwargs):
super(Mul, self).__init__(None, (list(input_shape) if input_shape else None), **kwargs) |
class TestTrain(unittest.TestCase):
def setUp(self):
self.p = pctsp.Pctsp()
self.p.prize = np.array([0, 4, 8, 3])
self.p.penal = np.array([1000, 7, 11, 17])
self.p.cost = np.array([[0, 1, 1, 1], [1, 0, 1, 1], [1, 1, 0, 1], [1, 1, 1, 0]])
def test_quality(self):
s = solution.Solution(self.p)
s.route = [0, 1, 2, 3]
print('Quality: ', s.quality)
self.assertEqual(s.quality, 4)
def test_quality_2(self):
s = solution.Solution(self.p, size=2)
s.route = [0, 1, 2, 3]
print('Quality: ', s.quality)
self.assertEqual(s.quality, 30)
def test_swap(self):
s = solution.Solution(self.p, size=3)
s.route = [0, 1, 2, 3]
s.swap(1, 3)
print('Quality: ', s.quality)
print('route:', s.route)
self.assertEqual(s.quality, 10)
def test_add_city(self):
s = solution.Solution(self.p, size=3)
s.route = [0, 1, 2, 3]
s.add_city()
print('Quality: ', s.quality)
self.assertEqual(s.quality, 4)
def test_remove_city(self):
s = solution.Solution(self.p)
s.route = [0, 1, 2, 3]
s.remove_city(3)
print('Quality: ', s.quality)
self.assertEqual(s.quality, 20)
def test_remove_cities(self):
s = solution.Solution(self.p)
s.route = [0, 1, 2, 3]
s.remove_cities(quant=3)
self.assertEqual(s.quality, 35) |
class Data():
def __init__(self, train, valid):
self.train = train
self.valid = valid |
class ResNet(nn.Module):
def __init__(self, block, num_blocks, class_num=10):
super(ResNet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear((512 * block.expansion), class_num)
def _make_layer(self, block, planes, num_blocks, stride):
strides = ([stride] + ([1] * (num_blocks - 1)))
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = (planes * block.expansion)
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), (- 1))
out = self.linear(out)
return out |
class AccLossProbe(StatsProbe):
def __init__(self, **kwargs):
super(AccLossProbe, self).__init__()
self.type = kwargs['type']
assert ((self.type == 'train') or (self.type == 'test'))
self.last_epoch_stats = {}
def get_last_epoch_stats(self):
return self.last_epoch_stats
def epoch_prologue(self):
self.last_epoch_stats = {}
def add_data(self, **kwargs):
loss_key = (self.type + '_loss')
acc_key = (self.type + '_acc')
self.last_epoch_stats[loss_key] = kwargs[loss_key]
self.last_epoch_stats[acc_key] = kwargs[acc_key] |
_cache(1)
def get_kahypar_profile_dir():
import kahypar
import re
m = re.compile('(\\d+)\\.(\\d+)\\.(\\d+)').match(kahypar.__version__)
path_components = [abspath(dirname(__file__)), 'kahypar_profiles']
if (m is not None):
version = tuple(map(int, m.groups()))
if (version <= (1, 1, 6)):
path_components.append('old')
return join(*path_components) |
def get_latent_vectors(model, set, device, params):
if DEBUG:
embeddings = np.random.rand(len(set), 256)
return embeddings
model.eval()
embeddings_l = []
for elem_ndx in set:
x = load_data_item(set[elem_ndx]['query'], params)
with torch.no_grad():
batch = {}
if params.use_cloud:
coords = ME.utils.sparse_quantize(coordinates=x['coords'], quantization_size=params.model_params.mink_quantization_size)
bcoords = ME.utils.batched_coordinates([coords]).to(device)
feats = torch.ones((bcoords.shape[0], 1), dtype=torch.float32).to(device)
batch['coords'] = bcoords
batch['features'] = feats
if params.use_rgb:
batch['images'] = x['image'].unsqueeze(0).to(device)
x = model(batch)
embedding = x['embedding']
if params.normalize_embeddings:
embedding = torch.nn.functional.normalize(embedding, p=2, dim=1)
embedding = embedding.detach().cpu().numpy()
embeddings_l.append(embedding)
embeddings = np.vstack(embeddings_l)
return embeddings |
def initialize_lr_scheduler(train_config, optimizer):
learning_rate = train_config['learning_rate']
warmup_epochs = train_config['warmup_epochs']
scheduler_strategy = train_config['lr_scheduler']
scheduler_config = train_config['scheduler_config']
lr_scheduler = None
if (scheduler_strategy in scheduler_config.keys()):
scheduler_config = scheduler_config[scheduler_strategy]
if (scheduler_strategy == 'cosine'):
lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, **scheduler_config)
elif (scheduler_strategy == 'multiple_steps'):
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, **scheduler_config)
elif (scheduler_strategy == 'reduce_on_plateau'):
lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, **scheduler_config)
elif (scheduler_strategy == 'exponential'):
lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, **scheduler_config)
if (warmup_epochs > 0):
warmup_scheduler = WarmupLRScheduler(optimizer, warmup_epochs, learning_rate)
else:
warmup_scheduler = None
return (lr_scheduler, warmup_scheduler) |
_GENERATOR_REGISTRY.register()
class RPN(nn.Module):
def __init__(self, *, in_features: List[str], head: nn.Module, anchor_generator: nn.Module, anchor_matcher: Matcher, box2box_transform: Box2BoxTransform, batch_size_per_image: int, positive_fraction: float, pre_nms_topk: Tuple[(float, float)], post_nms_topk: Tuple[(float, float)], nms_thresh: float=0.7, min_box_size: float=0.0, anchor_boundary_thresh: float=(- 1.0), loss_weight: Union[(float, Dict[(str, float)])]=1.0, box_reg_loss_type: str='smooth_l1', smooth_l1_beta: float=0.0):
super().__init__()
self.in_features = in_features
self.rpn_head = head
self.anchor_generator = anchor_generator
self.anchor_matcher = anchor_matcher
self.box2box_transform = box2box_transform
self.batch_size_per_image = batch_size_per_image
self.positive_fraction = positive_fraction
self.pre_nms_topk = {True: pre_nms_topk[0], False: pre_nms_topk[1]}
self.post_nms_topk = {True: post_nms_topk[0], False: post_nms_topk[1]}
self.nms_thresh = nms_thresh
self.min_box_size = float(min_box_size)
self.anchor_boundary_thresh = anchor_boundary_thresh
if isinstance(loss_weight, float):
loss_weight = {'loss_rpn_cls': loss_weight, 'loss_rpn_loc': loss_weight}
self.loss_weight = loss_weight
self.box_reg_loss_type = box_reg_loss_type
self.smooth_l1_beta = smooth_l1_beta
def from_config(cls, cfg, input_shape: Dict[(str, ShapeSpec)]):
in_features = cfg.MODEL.RPN.IN_FEATURES
ret = {'in_features': in_features, 'min_box_size': cfg.MODEL.PROPOSAL_GENERATOR.MIN_SIZE, 'nms_thresh': cfg.MODEL.RPN.NMS_THRESH, 'batch_size_per_image': cfg.MODEL.RPN.BATCH_SIZE_PER_IMAGE, 'positive_fraction': cfg.MODEL.RPN.POSITIVE_FRACTION, 'loss_weight': {'loss_rpn_cls': cfg.MODEL.RPN.LOSS_WEIGHT, 'loss_rpn_loc': (cfg.MODEL.RPN.BBOX_REG_LOSS_WEIGHT * cfg.MODEL.RPN.LOSS_WEIGHT)}, 'anchor_boundary_thresh': cfg.MODEL.RPN.BOUNDARY_THRESH, 'box2box_transform': Box2BoxTransform(weights=cfg.MODEL.RPN.BBOX_REG_WEIGHTS), 'box_reg_loss_type': cfg.MODEL.RPN.BBOX_REG_LOSS_TYPE, 'smooth_l1_beta': cfg.MODEL.RPN.SMOOTH_L1_BETA}
ret['pre_nms_topk'] = (cfg.MODEL.RPN.PRE_NMS_TOPK_TRAIN, cfg.MODEL.RPN.PRE_NMS_TOPK_TEST)
ret['post_nms_topk'] = (cfg.MODEL.RPN.POST_NMS_TOPK_TRAIN, cfg.MODEL.RPN.POST_NMS_TOPK_TEST)
ret['anchor_generator'] = build_anchor_generator(cfg, [input_shape[f] for f in in_features])
ret['anchor_matcher'] = Matcher(cfg.MODEL.RPN.IOU_THRESHOLDS, cfg.MODEL.RPN.IOU_LABELS, allow_low_quality_matches=True)
ret['head'] = build_rpn_head(cfg, [input_shape[f] for f in in_features])
return ret
def _subsample_labels(self, label):
(pos_idx, neg_idx) = subsample_labels(label, self.batch_size_per_image, self.positive_fraction, 0)
label.fill_((- 1))
label.scatter_(0, pos_idx, 1)
label.scatter_(0, neg_idx, 0)
return label
.unused
_grad()
def label_and_sample_anchors(self, anchors: List[Boxes], gt_instances: List[Instances]) -> Tuple[(List[torch.Tensor], List[torch.Tensor])]:
anchors = Boxes.cat(anchors)
gt_boxes = [x.gt_boxes for x in gt_instances]
image_sizes = [x.image_size for x in gt_instances]
del gt_instances
gt_labels = []
matched_gt_boxes = []
for (image_size_i, gt_boxes_i) in zip(image_sizes, gt_boxes):
match_quality_matrix = retry_if_cuda_oom(pairwise_iou)(gt_boxes_i, anchors)
(matched_idxs, gt_labels_i) = retry_if_cuda_oom(self.anchor_matcher)(match_quality_matrix)
gt_labels_i = gt_labels_i.to(device=gt_boxes_i.device)
del match_quality_matrix
if (self.anchor_boundary_thresh >= 0):
anchors_inside_image = anchors.inside_box(image_size_i, self.anchor_boundary_thresh)
gt_labels_i[(~ anchors_inside_image)] = (- 1)
gt_labels_i = self._subsample_labels(gt_labels_i)
if (len(gt_boxes_i) == 0):
matched_gt_boxes_i = torch.zeros_like(anchors.tensor)
else:
matched_gt_boxes_i = gt_boxes_i[matched_idxs].tensor
gt_labels.append(gt_labels_i)
matched_gt_boxes.append(matched_gt_boxes_i)
return (gt_labels, matched_gt_boxes)
.unused
def losses(self, anchors: List[Boxes], pred_objectness_logits: List[torch.Tensor], gt_labels: List[torch.Tensor], pred_anchor_deltas: List[torch.Tensor], gt_boxes: List[torch.Tensor]) -> Dict[(str, torch.Tensor)]:
num_images = len(gt_labels)
gt_labels = torch.stack(gt_labels)
pos_mask = (gt_labels == 1)
num_pos_anchors = pos_mask.sum().item()
num_neg_anchors = (gt_labels == 0).sum().item()
storage = get_event_storage()
storage.put_scalar('rpn/num_pos_anchors', (num_pos_anchors / num_images))
storage.put_scalar('rpn/num_neg_anchors', (num_neg_anchors / num_images))
localization_loss = _dense_box_regression_loss(anchors, self.box2box_transform, pred_anchor_deltas, gt_boxes, pos_mask, box_reg_loss_type=self.box_reg_loss_type, smooth_l1_beta=self.smooth_l1_beta)
valid_mask = (gt_labels >= 0)
objectness_loss = F.binary_cross_entropy_with_logits(cat(pred_objectness_logits, dim=1)[valid_mask], gt_labels[valid_mask].to(torch.float32), reduction='sum')
normalizer = (self.batch_size_per_image * num_images)
losses = {'loss_rpn_cls': (objectness_loss / normalizer), 'loss_rpn_loc': (localization_loss / normalizer)}
losses = {k: (v * self.loss_weight.get(k, 1.0)) for (k, v) in losses.items()}
return losses
def forward(self, images: ImageList, features: Dict[(str, torch.Tensor)], gt_instances: Optional[List[Instances]]=None):
features = [features[f] for f in self.in_features]
anchors = self.anchor_generator(features)
(pred_objectness_logits, pred_anchor_deltas) = self.rpn_head(features)
pred_objectness_logits = [score.permute(0, 2, 3, 1).flatten(1) for score in pred_objectness_logits]
pred_anchor_deltas = [x.view(x.shape[0], (- 1), self.anchor_generator.box_dim, x.shape[(- 2)], x.shape[(- 1)]).permute(0, 3, 4, 1, 2).flatten(1, (- 2)) for x in pred_anchor_deltas]
if self.training:
assert (gt_instances is not None), 'RPN requires gt_instances in training!'
(gt_labels, gt_boxes) = self.label_and_sample_anchors(anchors, gt_instances)
losses = self.losses(anchors, pred_objectness_logits, gt_labels, pred_anchor_deltas, gt_boxes)
else:
losses = {}
proposals = self.predict_proposals(anchors, pred_objectness_logits, pred_anchor_deltas, images.image_sizes)
return (proposals, losses)
def predict_proposals(self, anchors: List[Boxes], pred_objectness_logits: List[torch.Tensor], pred_anchor_deltas: List[torch.Tensor], image_sizes: List[Tuple[(int, int)]]):
with torch.no_grad():
pred_proposals = self._decode_proposals(anchors, pred_anchor_deltas)
return find_top_rpn_proposals(pred_proposals, pred_objectness_logits, image_sizes, self.nms_thresh, self.pre_nms_topk[self.training], self.post_nms_topk[self.training], self.min_box_size, self.training)
def _decode_proposals(self, anchors: List[Boxes], pred_anchor_deltas: List[torch.Tensor]):
N = pred_anchor_deltas[0].shape[0]
proposals = []
for (anchors_i, pred_anchor_deltas_i) in zip(anchors, pred_anchor_deltas):
B = anchors_i.tensor.size(1)
pred_anchor_deltas_i = pred_anchor_deltas_i.reshape((- 1), B)
anchors_i = anchors_i.tensor.unsqueeze(0).expand(N, (- 1), (- 1)).reshape((- 1), B)
proposals_i = self.box2box_transform.apply_deltas(pred_anchor_deltas_i, anchors_i)
proposals.append(proposals_i.view(N, (- 1), B))
return proposals |
class A2CTrainer(SingleTrainer, A2CModel):
def __init__(self, name, env_kwargs, model_kwargs, max_time_steps, **kwargs):
super().__init__(max_time_steps=max_time_steps, env_kwargs=env_kwargs, model_kwargs=model_kwargs)
self.max_time_steps = max_time_steps
self.name = name
self.num_steps = 5
self.num_processes = 16
self.gamma = 0.99
self.allow_gpu = True
self.log_dir = None
self.win = None
def _initialize(self, **model_kwargs):
model = super()._build_graph(self.allow_gpu, **model_kwargs)
self._tstart = time.time()
self.rollouts = RolloutStorage(self.env.reset(), self._initial_states(self.num_processes))
return model
def save(self, path):
super().save(path)
self._save(path)
def _finalize(self):
if (self.log_dir is not None):
self.log_dir.cleanup()
def create_env(self, env):
self.log_dir = tempfile.TemporaryDirectory()
seed = 1
self.validation_env = make_vec_envs(env, seed, 1, self.gamma, self.log_dir.name, None, allow_early_resets=True)
if (len(self.validation_env.observation_space.shape) == 4):
self.validation_env = VecTransposeImage(self.validation_env)
envs = make_vec_envs(env, (seed + 1), self.num_processes, self.gamma, self.log_dir.name, None, False)
if (len(envs.observation_space.shape) == 4):
envs = VecTransposeImage(envs)
return envs
def process(self, context, mode='train', **kwargs):
metric_context = MetricContext()
if (mode == 'train'):
return self._process_train(context, metric_context)
elif (mode == 'validation'):
return self._process_validation(metric_context)
else:
raise Exception('Mode not supported')
def _process_validation(self, metric_context):
done = False
states = self._initial_states(1)
ep_reward = 0.0
ep_length = 0
n_steps = 0
observations = self.validation_env.reset()
while (not done):
(action, _, _, states) = self._step(observations, np.ones((1, 1), dtype=np.float32), states)
(observations, reward, done, infos) = self.validation_env.step(action)
done = done[0]
info = infos[0]
if ('episode' in info.keys()):
ep_length = info['episode']['l']
ep_reward = info['episode']['r']
n_steps += 1
return (n_steps, (ep_length, ep_reward), metric_context)
def _sample_experience_batch(self):
finished_episodes = ([], [])
for _ in range(self.num_steps):
(actions, values, action_log_prob, states) = self._step(self.rollouts.observations, self.rollouts.masks, self.rollouts.states)
(observations, rewards, terminals, infos) = self.env.step(actions)
for info in infos:
if ('episode' in info.keys()):
finished_episodes[0].append(info['episode']['l'])
finished_episodes[1].append(info['episode']['r'])
self.rollouts.insert(observations, actions, rewards, terminals, values, states)
(last_values, _) = self._value(self.rollouts.observations, self.rollouts.masks, self.rollouts.states)
batched = self.rollouts.batch(last_values, self.gamma)
return (batched, ((len(finished_episodes[0]),) + finished_episodes))
def _process_train(self, context, metric_context):
(batch, report) = self._sample_experience_batch()
(loss, value_loss, action_loss, dist_entropy) = self._train(*batch)
fps = int((self._global_t / (time.time() - self._tstart)))
metric_context.add_cummulative('updates', 1)
metric_context.add_scalar('loss', loss)
metric_context.add_scalar('value_loss', value_loss)
metric_context.add_scalar('action_loss', action_loss)
metric_context.add_scalar('entropy', dist_entropy)
metric_context.add_last_value_scalar('fps', fps)
return ((self.num_steps * self.num_processes), report, metric_context) |
def MusicTaggerCRNN(weights='msd', input_tensor=None, include_top=True):
if (weights not in {'msd', None}):
raise ValueError('The `weights` argument should be either `None` (random initialization) or `msd` (pre-training on Million Song Dataset).')
if (K.image_dim_ordering() == 'th'):
input_shape = (1, 96, 1366)
else:
input_shape = (96, 1366, 1)
if (input_tensor is None):
melgram_input = Input(shape=input_shape)
elif (not K.is_keras_tensor(input_tensor)):
melgram_input = Input(tensor=input_tensor, shape=input_shape)
else:
melgram_input = input_tensor
if (K.image_dim_ordering() == 'th'):
channel_axis = 1
freq_axis = 2
time_axis = 3
else:
channel_axis = 3
freq_axis = 1
time_axis = 2
x = ZeroPadding2D(padding=(0, 37))(melgram_input)
x = BatchNormalization(axis=time_axis, name='bn_0_freq')(x)
x = Convolution2D(64, 3, 3, border_mode='same', name='conv1')(x)
x = BatchNormalization(axis=channel_axis, mode=0, name='bn1')(x)
x = ELU()(x)
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='pool1')(x)
x = Convolution2D(128, 3, 3, border_mode='same', name='conv2')(x)
x = BatchNormalization(axis=channel_axis, mode=0, name='bn2')(x)
x = ELU()(x)
x = MaxPooling2D(pool_size=(3, 3), strides=(3, 3), name='pool2')(x)
x = Convolution2D(128, 3, 3, border_mode='same', name='conv3')(x)
x = BatchNormalization(axis=channel_axis, mode=0, name='bn3')(x)
x = ELU()(x)
x = MaxPooling2D(pool_size=(4, 4), strides=(4, 4), name='pool3')(x)
x = Convolution2D(128, 3, 3, border_mode='same', name='conv4')(x)
x = BatchNormalization(axis=channel_axis, mode=0, name='bn4')(x)
x = ELU()(x)
x = MaxPooling2D(pool_size=(4, 4), strides=(4, 4), name='pool4')(x)
if (K.image_dim_ordering() == 'th'):
x = Permute((3, 1, 2))(x)
x = Reshape((15, 128))(x)
x = GRU(32, return_sequences=True, name='gru1')(x)
x = GRU(32, return_sequences=False, name='gru2')(x)
if include_top:
x = Dense(50, activation='sigmoid', name='output')(x)
model = Model(melgram_input, x)
if (weights is None):
return model
else:
if (K.image_dim_ordering() == 'tf'):
weights_path = get_file('music_tagger_crnn_weights_tf_kernels_tf_dim_ordering.h5', TF_WEIGHTS_PATH, cache_subdir='models')
else:
weights_path = get_file('music_tagger_crnn_weights_tf_kernels_th_dim_ordering.h5', TH_WEIGHTS_PATH, cache_subdir='models')
model.load_weights(weights_path, by_name=True)
if (K.backend() == 'theano'):
convert_all_kernels_in_model(model)
return model |
def get_hash(in_str):
hash_object = hashlib.sha512(in_str.encode('utf-8'))
return str(hash_object.hexdigest()) |
def test_double_double_system(vrblvl=0):
polynomials = ['x^3 + 2*x*y - 1;', 'x + y - 1/3;', 'x - 1;']
dim = number_of_symbols(polynomials, vrblvl)
print('number of symbols :', dim)
if (dim == len(polynomials)):
print('The system is square.')
else:
print('number of polynomials :', len(polynomials))
print(' number of variables :', dim)
print('The system is not square.')
set_double_double_system(dim, polynomials, vrblvl)
pols = get_double_double_system(vrblvl)
print('the retrieved polynomials :')
for pol in pols:
print(pol)
return int((len(pols) != 3)) |
def parse_args():
parser = argparse.ArgumentParser(description='Video Classification')
parser.add_argument('--mode', type=str, default='test', help='train/test')
parser.add_argument('--model', type=str, default='r21d', help='c3d/r3d/r21d')
parser.add_argument('--dataset', type=str, default='K400', help='ucf101/hmdb51/K400')
parser.add_argument('--split', type=str, default='1', help='dataset split')
parser.add_argument('--cl', type=int, default=16, help='clip length')
parser.add_argument('--gpu', type=int, default=0, help='GPU id')
parser.add_argument('--lr', type=float, default=0.0001, help='learning rate')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum')
parser.add_argument('--wd', type=float, default=0.0005, help='weight decay')
parser.add_argument('--log', type=str, help='log directory')
parser.add_argument('--ckpt', type=str, default='log/UCF101_TCG_split1_finetuned_loss_r21d_cl16_/model_13.pt', help='checkpoint path')
parser.add_argument('--desp', type=str, help='additional description')
parser.add_argument('--epochs', type=int, default=150, help='number of total epochs to run')
parser.add_argument('--start-epoch', type=int, default=1, help='manual epoch number (useful on restarts)')
parser.add_argument('--bs', type=int, default=16, help='mini-batch size')
parser.add_argument('--workers', type=int, default=4, help='number of data loading workers')
parser.add_argument('--pf', type=int, default=100, help='print frequency every batch')
parser.add_argument('--seed', type=int, default=632, help='seed for initializing training.')
args = parser.parse_args()
return args |
def cplx_batch_norm(input, running_mean, running_var, weight=None, bias=None, training=True, momentum=0.1, eps=1e-05):
assert (((running_mean is None) and (running_var is None)) or ((running_mean is not None) and (running_var is not None)))
assert (((weight is None) and (bias is None)) or ((weight is not None) and (bias is not None)))
x = torch.stack([input.real, input.imag], dim=0)
z = whiten2x2(x, training=training, running_mean=running_mean, running_cov=running_var, momentum=momentum, nugget=eps)
if ((weight is not None) and (bias is not None)):
shape = (1, x.shape[2], *([1] * (x.dim() - 3)))
weight = weight.reshape(2, 2, *shape)
z = (torch.stack([((z[0] * weight[(0, 0)]) + (z[1] * weight[(0, 1)])), ((z[0] * weight[(1, 0)]) + (z[1] * weight[(1, 1)]))], dim=0) + bias.reshape(2, *shape))
return cplx.Cplx(z[0], z[1]) |
class OrnsteinUhlenbeckActionNoise(ActionNoise):
def __init__(self, mu, sigma, theta=0.15, dt=0.01, x0=None):
self.theta = theta
self.mu = mu
self.sigma = sigma
self.dt = dt
self.x0 = x0
self.reset()
def __call__(self):
x = ((self.x_prev + ((self.theta * (self.mu - self.x_prev)) * self.dt)) + ((self.sigma * np.sqrt(self.dt)) * np.random.normal(size=self.mu.shape)))
self.x_prev = x
return x
def reset(self):
self.x_prev = (self.x0 if (self.x0 is not None) else np.zeros_like(self.mu))
def __repr__(self):
return 'OrnsteinUhlenbeckActionNoise(mu={}, sigma={})'.format(self.mu, self.sigma) |
class ByClassDataset(Dataset):
def __init__(self, ds):
self.dataset = ds
self.idx_by_class = {}
for (idx, (_, c)) in enumerate(ds):
self.idx_by_class.setdefault(c, [])
self.idx_by_class[c].append(idx)
def __len__(self):
return min([len(d) for d in self.idx_by_class.values()])
def __getitem__(self, idx):
idx_per_class = [self.idx_by_class[c][idx] for c in range(len(self.idx_by_class))]
labels = torch.LongTensor([self.dataset[i][1] for i in idx_per_class])
items = [self.dataset[i][0] for i in idx_per_class]
if torch.is_tensor(items[0]):
items = torch.stack(items)
return (items, labels) |
def run():
drop_table()
create_table()
info_path = Task.parse(project_dir)
parse_data(info_path)
clear_dataset()
export_data()
project_name = os.path.basename(os.path.normpath(project_dir))
sql_query = "\n SELECT id FROM method WHERE project_name='{}';\n ".format(project_name)
start_generation(sql_query, multiprocess=False, repair=True, confirmed=False)
result_analysis() |
class DefaultInitFun():
_target_: str = 'dynamics.init_coordinates.DefaultInitFun'
h_dims: Tuple[int] = field(default_factory=(lambda : (II('dataset.N_CLASSES'),)))
param_map: Optional[Any] = MISSING |
class Boxban_Env0(BoxobanEnv):
metadata = {'render.modes': ['human', 'rgb_array', 'tiny_human', 'tiny_rgb_array']}
def __init__(self):
super(Boxban_Env0, self).__init__(max_steps=200, difficulty='unfiltered', split='train') |
def encode_huffman_tree(root, dtype):
converter = {'float32': float2bitstr, 'int32': int2bitstr}
code_list = []
def encode_node(node):
if (node.value is not None):
code_list.append('1')
lst = list(converter[dtype](node.value))
code_list.append(lst)
else:
code_list.append('0')
encode_node(node.left)
encode_node(node.right)
encode_node(root)
return ''.join(code_list) |
class VitHgface(torch.nn.Module):
transform = transforms.Lambda(vit_transform)
name = 'ViT_hgface'
def __init__(self):
super().__init__()
self.vit = ViTModel.from_pretrained(VIT_MODEL)
def forward(self, x):
x = x.view((- 1), 3, 224, 224)
with torch.no_grad():
out = self.vit(pixel_values=x)
return out.pooler_output |
def main_worker(gpu, ngpus_per_node, args):
global best_acc1
args.gpu = gpu
if (args.gpu is not None):
print('Use GPU: {} for training'.format(args.gpu))
if args.distributed:
if ((args.dist_url == 'env://') and (args.rank == (- 1))):
args.rank = int(os.environ['RANK'])
if args.multiprocessing_distributed:
args.rank = ((args.rank * ngpus_per_node) + gpu)
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank)
if args.pretrained:
print("=> using pre-trained model '{}'".format(args.arch))
model = models.__dict__[args.arch](pretrained=True)
else:
print("=> creating model '{}'".format(args.arch))
model = models.__dict__[args.arch]()
print(model)
if args.distributed:
if (args.gpu is not None):
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
args.batch_size = int((args.batch_size / ngpus_per_node))
args.workers = int((args.workers / ngpus_per_node))
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
else:
model.cuda()
model = torch.nn.parallel.DistributedDataParallel(model)
elif (args.gpu is not None):
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
elif (args.arch.startswith('alexnet') or args.arch.startswith('vgg')):
model.features = torch.nn.DataParallel(model.features)
model.cuda()
else:
model = torch.nn.DataParallel(model).cuda()
criterion = nn.CrossEntropyLoss().cuda(args.gpu)
optimizer = torch.optim.SGD(model.parameters(), args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_acc1 = checkpoint['best_acc1']
if (args.gpu is not None):
best_acc1 = best_acc1.to(args.gpu)
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})".format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
train_dataset = datasets.ImageFolder(traindir, transforms.Compose([transforms.RandomResizedCrop(224), transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize]))
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
else:
train_sampler = None
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None), num_workers=args.workers, pin_memory=True, sampler=train_sampler)
val_loader = torch.utils.data.DataLoader(datasets.ImageFolder(valdir, transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize])), batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True)
args.decay_step = len(train_loader)
if args.evaluate:
validate(val_loader, model, criterion, args)
return
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
train(train_loader, model, criterion, optimizer, epoch, args)
acc1 = validate(val_loader, model, criterion, args)
is_best = (acc1 > best_acc1)
best_acc1 = max(acc1, best_acc1)
if ((not args.multiprocessing_distributed) or (args.multiprocessing_distributed and ((args.rank % ngpus_per_node) == 0))):
save_checkpoint({'epoch': (epoch + 1), 'arch': args.arch, 'state_dict': model.state_dict(), 'best_acc1': best_acc1, 'optimizer': optimizer.state_dict()}, is_best) |
_module()
class CityscapesDataset(CocoDataset):
CLASSES = ('person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle', 'bicycle')
PALETTE = [(220, 20, 60), (255, 0, 0), (0, 0, 142), (0, 0, 70), (0, 60, 100), (0, 80, 100), (0, 0, 230), (119, 11, 32)]
def _filter_imgs(self, min_size=32):
valid_inds = []
ids_with_ann = set((_['image_id'] for _ in self.coco.anns.values()))
ids_in_cat = set()
for (i, class_id) in enumerate(self.cat_ids):
ids_in_cat |= set(self.coco.cat_img_map[class_id])
ids_in_cat &= ids_with_ann
valid_img_ids = []
for (i, img_info) in enumerate(self.data_infos):
img_id = img_info['id']
ann_ids = self.coco.getAnnIds(imgIds=[img_id])
ann_info = self.coco.loadAnns(ann_ids)
all_iscrowd = all([_['iscrowd'] for _ in ann_info])
if (self.filter_empty_gt and ((self.img_ids[i] not in ids_in_cat) or all_iscrowd)):
continue
if (min(img_info['width'], img_info['height']) >= min_size):
valid_inds.append(i)
valid_img_ids.append(img_id)
self.img_ids = valid_img_ids
return valid_inds
def _parse_ann_info(self, img_info, ann_info):
gt_bboxes = []
gt_labels = []
gt_bboxes_ignore = []
gt_masks_ann = []
for (i, ann) in enumerate(ann_info):
if ann.get('ignore', False):
continue
(x1, y1, w, h) = ann['bbox']
if ((ann['area'] <= 0) or (w < 1) or (h < 1)):
continue
if (ann['category_id'] not in self.cat_ids):
continue
bbox = [x1, y1, (x1 + w), (y1 + h)]
if ann.get('iscrowd', False):
gt_bboxes_ignore.append(bbox)
else:
gt_bboxes.append(bbox)
gt_labels.append(self.cat2label[ann['category_id']])
gt_masks_ann.append(ann['segmentation'])
if gt_bboxes:
gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
gt_labels = np.array(gt_labels, dtype=np.int64)
else:
gt_bboxes = np.zeros((0, 4), dtype=np.float32)
gt_labels = np.array([], dtype=np.int64)
if gt_bboxes_ignore:
gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)
else:
gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)
ann = dict(bboxes=gt_bboxes, labels=gt_labels, bboxes_ignore=gt_bboxes_ignore, masks=gt_masks_ann, seg_map=img_info['segm_file'])
return ann
def results2txt(self, results, outfile_prefix):
try:
import cityscapesscripts.helpers.labels as CSLabels
except ImportError:
raise ImportError('Please run "pip install citscapesscripts" to install cityscapesscripts first.')
result_files = []
os.makedirs(outfile_prefix, exist_ok=True)
prog_bar = mmcv.ProgressBar(len(self))
for idx in range(len(self)):
result = results[idx]
filename = self.data_infos[idx]['filename']
basename = osp.splitext(osp.basename(filename))[0]
pred_txt = osp.join(outfile_prefix, (basename + '_pred.txt'))
(bbox_result, segm_result) = result
bboxes = np.vstack(bbox_result)
if isinstance(segm_result, tuple):
segms = mmcv.concat_list(segm_result[0])
mask_score = segm_result[1]
else:
segms = mmcv.concat_list(segm_result)
mask_score = [bbox[(- 1)] for bbox in bboxes]
labels = [np.full(bbox.shape[0], i, dtype=np.int32) for (i, bbox) in enumerate(bbox_result)]
labels = np.concatenate(labels)
assert (len(bboxes) == len(segms) == len(labels))
num_instances = len(bboxes)
prog_bar.update()
with open(pred_txt, 'w') as fout:
for i in range(num_instances):
pred_class = labels[i]
classes = self.CLASSES[pred_class]
class_id = CSLabels.name2label[classes].id
score = mask_score[i]
mask = maskUtils.decode(segms[i]).astype(np.uint8)
png_filename = osp.join(outfile_prefix, (basename + f'_{i}_{classes}.png'))
mmcv.imwrite(mask, png_filename)
fout.write(f'''{osp.basename(png_filename)} {class_id} {score}
''')
result_files.append(pred_txt)
return result_files
def format_results(self, results, txtfile_prefix=None):
assert isinstance(results, list), 'results must be a list'
assert (len(results) == len(self)), 'The length of results is not equal to the dataset len: {} != {}'.format(len(results), len(self))
assert isinstance(results, list), 'results must be a list'
assert (len(results) == len(self)), 'The length of results is not equal to the dataset len: {} != {}'.format(len(results), len(self))
if (txtfile_prefix is None):
tmp_dir = tempfile.TemporaryDirectory()
txtfile_prefix = osp.join(tmp_dir.name, 'results')
else:
tmp_dir = None
result_files = self.results2txt(results, txtfile_prefix)
return (result_files, tmp_dir)
def evaluate(self, results, metric='bbox', logger=None, outfile_prefix=None, classwise=False, proposal_nums=(100, 300, 1000), iou_thrs=np.arange(0.5, 0.96, 0.05)):
eval_results = dict()
metrics = (metric.copy() if isinstance(metric, list) else [metric])
if ('cityscapes' in metrics):
eval_results.update(self._evaluate_cityscapes(results, outfile_prefix, logger))
metrics.remove('cityscapes')
if (len(metrics) > 0):
self_coco = CocoDataset(self.ann_file, self.pipeline.transforms, None, self.data_root, self.img_prefix, self.seg_prefix, self.proposal_file, self.test_mode, self.filter_empty_gt)
self_coco.CLASSES = self.CLASSES
self_coco.data_infos = self_coco.load_annotations(self.ann_file)
eval_results.update(self_coco.evaluate(results, metrics, logger, outfile_prefix, classwise, proposal_nums, iou_thrs))
return eval_results
def _evaluate_cityscapes(self, results, txtfile_prefix, logger):
try:
import cityscapesscripts.evaluation.evalInstanceLevelSemanticLabeling as CSEval
except ImportError:
raise ImportError('Please run "pip install citscapesscripts" to install cityscapesscripts first.')
msg = 'Evaluating in Cityscapes style'
if (logger is None):
msg = ('\n' + msg)
print_log(msg, logger=logger)
(result_files, tmp_dir) = self.format_results(results, txtfile_prefix)
if (tmp_dir is None):
result_dir = osp.join(txtfile_prefix, 'results')
else:
result_dir = osp.join(tmp_dir.name, 'results')
eval_results = OrderedDict()
print_log(f'Evaluating results under {result_dir} ...', logger=logger)
CSEval.args.cityscapesPath = os.path.join(self.img_prefix, '../..')
CSEval.args.predictionPath = os.path.abspath(result_dir)
CSEval.args.predictionWalk = None
CSEval.args.JSONOutput = False
CSEval.args.colorized = False
CSEval.args.gtInstancesFile = os.path.join(result_dir, 'gtInstances.json')
CSEval.args.groundTruthSearch = os.path.join(self.img_prefix.replace('leftImg8bit', 'gtFine'), '*/*_gtFine_instanceIds.png')
groundTruthImgList = glob.glob(CSEval.args.groundTruthSearch)
assert len(groundTruthImgList), f'Cannot find ground truth images in {CSEval.args.groundTruthSearch}.'
predictionImgList = []
for gt in groundTruthImgList:
predictionImgList.append(CSEval.getPrediction(gt, CSEval.args))
CSEval_results = CSEval.evaluateImgLists(predictionImgList, groundTruthImgList, CSEval.args)['averages']
eval_results['mAP'] = CSEval_results['allAp']
eval_results[''] = CSEval_results['allAp50%']
if (tmp_dir is not None):
tmp_dir.cleanup()
return eval_results |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.