code stringlengths 17 6.64M |
|---|
def _make_cross_attention_qkv(d, db, input, keys_input, output, num_heads=8, key_dim=64, value_dim=64, ff_init=("variance_scaling_initializer(mode='fan_in', distribution='uniform', scale=%s)" % 1.0)):
d[(output + '_query0')] = {'class': 'linear', 'activation': None, 'with_bias': False, 'from': [input], 'n_out': (num_heads * key_dim), 'forward_weights_init': ff_init}
db[(output + '_key0')] = {'class': 'linear', 'activation': None, 'with_bias': False, 'from': [keys_input], 'n_out': (num_heads * key_dim), 'forward_weights_init': ff_init}
db[(output + '_value0')] = {'class': 'linear', 'activation': None, 'with_bias': False, 'from': [keys_input], 'n_out': (num_heads * value_dim), 'forward_weights_init': ff_init}
d[(output + '_query_unnamed')] = {'class': 'split_dims', 'axis': 'F', 'dims': (num_heads, key_dim), 'from': [(output + '_query0')]}
db[(output + '_key_unnamed')] = {'class': 'split_dims', 'axis': 'F', 'dims': (num_heads, key_dim), 'from': [(output + '_key0')]}
db[(output + '_value_unnamed')] = {'class': 'split_dims', 'axis': 'F', 'dims': (num_heads, value_dim), 'from': [(output + '_value0')]}
d[(output + '_query')] = {'class': 'name_axis', 'axis': 'static:-2', 'description': 'att-heads', 'from': [(output + '_query_unnamed')]}
db[(output + '_key')] = {'class': 'name_axis', 'axis': 'static:-2', 'description': 'att-heads', 'from': [(output + '_key_unnamed')]}
db[(output + '_value')] = {'class': 'name_axis', 'axis': 'static:-2', 'description': 'att-heads', 'from': [(output + '_value_unnamed')]}
|
def generic_add_lsh_attention_layer(d, queries_input, keys_input, values_input, output, *, query_time_axis, key_time_axis, num_heads=8, num_rounds=1, key_dim=64, value_dim=64, dropout=0.0, num_hashes, query_chunk_size, key_chunk_size, key_chunks_before=None, key_chunks_after=None, hash_init=("variance_scaling_initializer(mode='fan_in', distribution='uniform', scale=%s)" % 1.0), small_mask_value=float((- (10 ** 5))), past_only=None, mask_current=None, mask_different_hashes=True, allow_duplicate_attention=False, chunk_alignment, shuffle_kv=False, query_hash_dropin=0.0, key_hash_dropin=0.0, debug_print=False):
'\n Computes LSH attention for an entire sequence.\n\n :param dict[str,dict] d:\n :param str queries_input:\n :param str keys_input:\n :param str values_input:\n :param str output:\n :param str query_time_axis:\n :param str key_time_axis: key and value time axis\n :param int num_heads:\n :param int num_rounds:\n :param int key_dim:\n :param int value_dim:\n :param float dropout:\n :param int num_hashes:\n :param int query_chunk_size:\n :param int key_chunk_size:\n :param int|None key_chunks_before:\n :param int|None key_chunks_after:\n :param str hash_init: for hash generator matrices\n :param float small_mask_value:\n :param None|bool past_only: for self attention\n :param None|bool mask_current: for self attention\n :param bool mask_different_hashes:\n :param bool allow_duplicate_attention:\n :param str chunk_alignment:\n :param bool shuffle_kv:\n :param float query_hash_dropin: chance of assigning a random hash to a query\n :param float key_hash_dropin: chance of assigning a random hash to a key\n :param bool debug_print:\n '
assert (query_time_axis.startswith('stag:') and key_time_axis.startswith('stag:'))
self_attention = (query_time_axis == key_time_axis)
assert (self_attention == (past_only is not None) == (mask_current is not None))
if (key_chunks_before is None):
key_chunks_before = 1
if (key_chunks_after is None):
key_chunks_after = (0 if (past_only and (chunk_alignment == 'identity')) else 1)
assert ((key_chunks_before >= 0) and (key_chunks_after >= 0))
hash_mask_value = ((2 ** 31) - 1)
assert (hash_mask_value > num_hashes)
assert (chunk_alignment in {'identity', 'search_bounds_centered'})
assert (small_mask_value < 0)
even_smaller_mask_value = (small_mask_value * (10 ** 5))
def chunk_query_sequence(name, pad_value, have_feature_dim=False):
'\n :param str name:\n :param float pad_value:\n :param bool have_feature_dim:\n '
d[(output + ('_sorted_chunked_%s_%s' % (name, ('unnamed' if have_feature_dim else 'feature'))))] = {'class': 'split_dims', 'from': [(output + ('_sorted_%s' % name))], 'pad_value': pad_value, 'axis': query_time_axis, 'dims': [(- 1), query_chunk_size]}
if (not have_feature_dim):
d[(output + ('_sorted_chunked_%s_unnamed' % name))] = {'class': 'reinterpret_data', 'from': [(output + ('_sorted_chunked_%s_feature' % name))], 'set_axes': {'F': None}}
d[(output + ('_sorted_chunked_%s' % name))] = {'class': 'name_axis', 'axis': ['T', 'T+1'], 'description': ['query-chunk', 'query-window'], 'from': [(output + ('_sorted_chunked_%s_unnamed' % name))]}
def chunk_key_sequence(name, pad_value, have_feature_dim=False):
'\n :param str name:\n :param float pad_value:\n :param bool have_feature_dim:\n '
d[(output + ('_sorted_chunked_%s_%s' % (name, ('unnamed' if have_feature_dim else 'feature'))))] = {'class': 'split_dims', 'from': [(output + ('_sorted_%s' % name))], 'pad_value': pad_value, 'axis': key_time_axis, 'dims': [(- 1), key_chunk_size]}
if (not have_feature_dim):
d[(output + ('_sorted_chunked_%s_unnamed' % name))] = {'class': 'reinterpret_data', 'from': [(output + ('_sorted_chunked_%s_feature' % name))], 'set_axes': {'F': None}}
d[(output + ('_sorted_chunked_%s' % name))] = {'class': 'name_axis', 'axis': ['T', 'T+1'], 'description': ['key-chunk', 'key-window'], 'from': [(output + ('_sorted_chunked_%s_unnamed' % name))]}
def stack_chunked_key_sequence(name):
'\n :param str name:\n '
d[(output + ('_sorted_chunked_stacked_%s_unflattened' % name))] = {'class': 'gather', 'from': [(output + ('_sorted_chunked_%s' % name))], 'position': (output + '_query_chunk_alignment'), 'axis': 'stag:key-chunk'}
d[(output + ('_sorted_chunked_stacked_%s_unnamed' % name))] = {'class': 'merge_dims', 'from': [(output + ('_sorted_chunked_stacked_%s_unflattened' % name))], 'keep_order': True, 'axes': ['stag:key-chunk-offset', 'stag:key-window']}
d[(output + ('_sorted_chunked_stacked_%s' % name))] = {'class': 'name_axis', 'from': [(output + ('_sorted_chunked_stacked_%s_unnamed' % name))], 'axis': 'stag:query-chunk+1', 'description': 'stacked-key-window'}
d[(output + '_keys_all_indices')] = {'class': 'range_in_axis', 'from': [keys_input], 'axis': key_time_axis, 'keepdims': False}
d[(output + '_queries_all_indices')] = {'class': 'range_in_axis', 'from': [queries_input], 'axis': query_time_axis, 'keepdims': False}
make_lsh_hash_gen(d, (output + '_hash_gen'), key_dim=key_dim, num_hashes=num_hashes, num_heads=num_heads, num_rounds=num_rounds, hash_init=hash_init)
for (neg, mask_value) in [('', hash_mask_value), ('_neg_mask', (- hash_mask_value))]:
apply_lsh_hash_gen(d, input=queries_input, hash_gen_input=(output + '_hash_gen'), output=(output + ('_queries_hashed%s' % neg)), time_axis=query_time_axis, num_hashes=num_hashes, hash_mask_value=mask_value, hash_dropin=query_hash_dropin)
apply_lsh_hash_gen(d, input=keys_input, hash_gen_input=(output + '_hash_gen'), output=(output + ('_keys_hashed%s' % neg)), time_axis=key_time_axis, num_hashes=num_hashes, hash_mask_value=mask_value, hash_dropin=key_hash_dropin)
d[(output + '_sorted_queries_orig_indices')] = {'class': 'eval', 'eval': (argsort_eval % query_time_axis), 'from': [(output + '_queries_hashed')]}
if shuffle_kv:
d[(output + '_shuffled_keys_orig_indices')] = {'class': 'eval', 'from': [(output + '_keys_all_indices')], 'eval': 'tf.random.shuffle(source(0))'}
d[(output + '_shuffled_keys_hashed')] = {'class': 'gather', 'from': [(output + '_keys_hashed')], 'position': (output + '_shuffled_keys_orig_indices'), 'axis': key_time_axis}
d[(output + '_sorted_keys_shuffled_indices')] = {'class': 'eval', 'eval': (argsort_eval % key_time_axis), 'from': [(output + '_shuffled_keys_hashed')]}
d[(output + '_sorted_keys_orig_indices')] = {'class': 'gather', 'from': [(output + '_shuffled_keys_orig_indices')], 'position': (output + '_sorted_keys_shuffled_indices'), 'axis': key_time_axis}
else:
d[(output + '_sorted_keys_orig_indices')] = {'class': 'eval', 'eval': (argsort_eval % key_time_axis), 'from': [(output + '_keys_hashed')]}
chunk_query_sequence('queries_orig_indices', pad_value=hash_mask_value)
chunk_key_sequence('keys_orig_indices', pad_value=hash_mask_value)
stack_chunked_key_sequence('keys_orig_indices')
d[(output + '_sorted_chunked_queries_orig_indices_clipped')] = {'class': 'eval', 'from': [(output + '_sorted_chunked_queries_orig_indices')], 'eval': clip_eval, 'eval_locals': {'mask_value': hash_mask_value}}
d[(output + '_sorted_chunked_stacked_keys_orig_indices_clipped')] = {'class': 'eval', 'from': [(output + '_sorted_chunked_stacked_keys_orig_indices')], 'eval': clip_eval, 'eval_locals': {'mask_value': hash_mask_value}}
d[(output + '_queries_sort_indices')] = {'class': 'scatter_nd', 'from': [(output + '_queries_all_indices')], 'position': (output + '_sorted_queries_orig_indices'), 'position_axis': query_time_axis, 'output_dim_via_time_from': (output + '_sorted_queries_orig_indices')}
d[(output + '_keys_sort_indices')] = {'class': 'scatter_nd', 'from': [(output + '_keys_all_indices')], 'position': (output + '_sorted_keys_orig_indices'), 'position_axis': key_time_axis, 'output_dim_via_time_from': (output + '_sorted_keys_orig_indices')}
d[(output + '_sorted_queries_hashed')] = {'class': 'gather', 'from': [(output + '_queries_hashed')], 'axis': query_time_axis, 'position': (output + '_sorted_queries_orig_indices')}
d[(output + '_sorted_keys_hashed')] = {'class': 'gather', 'from': [(output + '_keys_hashed')], 'axis': key_time_axis, 'position': (output + '_sorted_keys_orig_indices')}
d[(output + '_sorted_queries_hashed_neg_mask')] = {'class': 'gather', 'from': [(output + '_queries_hashed_neg_mask')], 'axis': query_time_axis, 'position': (output + '_sorted_queries_orig_indices')}
d[(output + '_sorted_keys_hashed_neg_mask')] = {'class': 'gather', 'from': [(output + '_keys_hashed_neg_mask')], 'axis': key_time_axis, 'position': (output + '_sorted_keys_orig_indices')}
chunk_query_sequence('queries_hashed', pad_value=hash_mask_value)
chunk_key_sequence('keys_hashed', pad_value=hash_mask_value)
stack_chunked_key_sequence('keys_hashed')
chunk_query_sequence('queries_hashed_neg_mask', pad_value=(- hash_mask_value))
chunk_key_sequence('keys_hashed_neg_mask', pad_value=(- hash_mask_value))
d[(output + '_sorted_queries_unscaled')] = {'class': 'gather', 'from': [queries_input], 'axis': query_time_axis, 'position': (output + '_sorted_queries_orig_indices')}
d[(output + '_sorted_queries')] = {'class': 'eval', 'eval': ('%s * source(0)' % (key_dim ** (- 0.5))), 'from': [(output + '_sorted_queries_unscaled')]}
d[(output + '_sorted_keys')] = {'class': 'gather', 'from': [keys_input], 'axis': key_time_axis, 'position': (output + '_sorted_keys_orig_indices')}
d[(output + '_sorted_values')] = {'class': 'gather', 'from': [values_input], 'axis': key_time_axis, 'position': (output + '_sorted_keys_orig_indices')}
chunk_query_sequence('queries', pad_value=0.0, have_feature_dim=True)
chunk_key_sequence('keys', pad_value=0.0, have_feature_dim=True)
chunk_key_sequence('values', pad_value=0.0, have_feature_dim=True)
d[(output + '_sorted_chunked_valid_query_position')] = {'class': 'compare', 'from': [(output + '_sorted_chunked_queries_hashed')], 'value': hash_mask_value, 'kind': 'not_equal'}
d[(output + '_sorted_chunked_valid_key_position')] = {'class': 'compare', 'from': [(output + '_sorted_chunked_stacked_keys_hashed')], 'value': hash_mask_value, 'kind': 'not_equal'}
if (chunk_alignment == 'identity'):
d[(output + '_query_chunk_alignment_center')] = {'class': 'range_in_axis', 'axis': 'stag:query-chunk', 'keepdims': False, 'from': [(output + '_sorted_chunked_queries')]}
elif (chunk_alignment == 'search_bounds_centered'):
assert (key_chunks_before == key_chunks_after)
d[(output + '_sorted_chunked_queries_hashed_min')] = {'class': 'reduce', 'mode': 'min', 'axis': 'stag:query-window', 'from': [(output + '_sorted_chunked_queries_hashed')]}
d[(output + '_sorted_chunked_queries_hashed_max')] = {'class': 'reduce', 'mode': 'max', 'axis': 'stag:query-window', 'from': [(output + '_sorted_chunked_queries_hashed_neg_mask')]}
d[(output + '_sorted_chunked_keys_hashed_min')] = {'class': 'reduce', 'mode': 'min', 'axis': 'stag:key-window', 'from': [(output + '_sorted_chunked_keys_hashed')]}
d[(output + '_sorted_chunked_keys_hashed_max')] = {'class': 'reduce', 'mode': 'max', 'axis': 'stag:key-window', 'from': [(output + '_sorted_chunked_keys_hashed_neg_mask')]}
d[(output + '_query_chunk_alignment_lower_key_chunk')] = {'class': 'search_sorted', 'axis': 'stag:key-chunk', 'side': 'left', 'sorted_sequence': (output + '_sorted_chunked_keys_hashed_min'), 'values': (output + '_sorted_chunked_queries_hashed_min')}
d[(output + '_query_chunk_alignment_upper_key_chunk')] = {'class': 'search_sorted', 'axis': 'stag:key-chunk', 'side': 'right', 'sorted_sequence': (output + '_sorted_chunked_keys_hashed_max'), 'values': (output + '_sorted_chunked_queries_hashed_max')}
d[(output + '_query_chunk_alignment_center')] = {'class': 'eval', 'from': [(output + '_query_chunk_alignment_lower_key_chunk'), (output + '_query_chunk_alignment_upper_key_chunk')], 'eval': 'tf.cast(tf.round((source(0) + source(1) - 1) / 2), dtype="int32")', 'out_type': {'dtype': 'int32'}}
d[(output + '_query_chunk_alignment_offset_unnamed')] = {'class': 'range', 'start': (- key_chunks_before), 'delta': 1, 'limit': (key_chunks_after + 1)}
d[(output + '_query_chunk_alignment_offset')] = {'class': 'name_axis', 'from': [(output + '_query_chunk_alignment_offset_unnamed')], 'axis': 'F', 'description': 'key-chunk-offset'}
d[(output + '_query_chunk_alignment_unbounded')] = {'class': 'combine', 'from': [(output + '_query_chunk_alignment_center'), (output + '_query_chunk_alignment_offset')], 'kind': 'add'}
d[(output + '_key_chunk_count_individual')] = {'class': 'length', 'from': [(output + '_sorted_chunked_keys')]}
d[(output + '_key_chunk_count')] = {'class': 'reduce', 'mode': 'max', 'from': [(output + '_key_chunk_count_individual')], 'axis': 'B'}
d[(output + '_query_chunk_alignment')] = {'class': 'eval', 'from': [(output + '_query_chunk_alignment_unbounded'), (output + '_key_chunk_count')], 'eval': 'tf.math.floormod(source(0), source(1))'}
d[(output + '_query_chunk_alignment_other')] = {'class': 'name_axis', 'from': [(output + '_query_chunk_alignment')], 'axis': 'stag:key-chunk-offset', 'description': 'other-key-chunk-offset'}
d[(output + '_query_chunk_alignment_indices')] = {'class': 'range_in_axis', 'from': [(output + '_query_chunk_alignment')], 'axis': 'stag:key-chunk-offset', 'keepdims': False}
d[(output + '_query_chunk_alignment_other_indices')] = {'class': 'range_in_axis', 'from': [(output + '_query_chunk_alignment_other')], 'axis': 'stag:other-key-chunk-offset', 'keepdims': False}
d[(output + '_query_chunk_alignment_compare')] = {'class': 'compare', 'from': [(output + '_query_chunk_alignment'), (output + '_query_chunk_alignment_other')], 'kind': 'equal'}
d[(output + '_query_chunk_alignment_left_only')] = {'class': 'compare', 'from': [(output + '_query_chunk_alignment_indices'), (output + '_query_chunk_alignment_other_indices')], 'kind': 'greater'}
d[(output + '_query_chunk_alignment_compare_left_only')] = {'class': 'combine', 'from': [(output + '_query_chunk_alignment_compare'), (output + '_query_chunk_alignment_left_only')], 'kind': 'logical_and'}
d[(output + '_query_chunk_alignment_duplicate_mask')] = {'class': 'reduce', 'mode': 'any', 'from': [(output + '_query_chunk_alignment_compare_left_only')], 'axis': 'stag:other-key-chunk-offset'}
if (not allow_duplicate_attention):
assert ((num_rounds == 1) or mask_different_hashes), 'cannot be implemented efficiently'
d[(output + '_other_round_queries_hashed')] = {'class': 'name_axis', 'from': [(output + '_queries_hashed')], 'axis': 'stag:att-round', 'description': 'other-att-round'}
d[(output + '_sorted_chunked_other_round_queries_hashed')] = {'class': 'gather', 'from': [(output + '_other_round_queries_hashed')], 'position': (output + '_sorted_chunked_queries_orig_indices_clipped'), 'axis': query_time_axis}
d[(output + '_other_round_keys_hashed')] = {'class': 'name_axis', 'from': [(output + '_keys_hashed')], 'axis': 'stag:att-round', 'description': 'other-att-round'}
d[(output + '_sorted_chunked_stacked_other_round_keys_hashed')] = {'class': 'gather', 'from': [(output + '_other_round_keys_hashed')], 'position': (output + '_sorted_chunked_stacked_keys_orig_indices_clipped'), 'axis': key_time_axis}
d[(output + '_sorted_chunked_other_round_compare')] = {'class': 'compare', 'from': [(output + '_sorted_chunked_other_round_queries_hashed'), (output + '_sorted_chunked_stacked_other_round_keys_hashed')], 'kind': 'equal'}
d[(output + '_round_indices')] = {'class': 'range_in_axis', 'from': [(output + '_queries_hashed')], 'axis': 'stag:att-round', 'keepdims': False}
d[(output + '_other_round_indices')] = {'class': 'range_in_axis', 'from': [(output + '_other_round_queries_hashed')], 'axis': 'stag:other-att-round', 'keepdims': False}
d[(output + '_round_left_only')] = {'class': 'compare', 'from': [(output + '_round_indices'), (output + '_other_round_indices')], 'kind': 'greater'}
d[(output + '_sorted_chunked_other_round_compare_left_only')] = {'class': 'combine', 'from': [(output + '_sorted_chunked_other_round_compare'), (output + '_round_left_only'), (output + '_sorted_chunked_valid_query_position'), (output + '_sorted_chunked_valid_key_position')], 'kind': 'logical_and'}
d[(output + '_sorted_chunked_round_duplicate_mask')] = {'class': 'reduce', 'mode': 'any', 'from': [(output + '_sorted_chunked_other_round_compare_left_only')], 'axis': 'stag:other-att-round'}
stack_chunked_key_sequence('keys')
stack_chunked_key_sequence('values')
large_masking_layers_from = []
small_masking_layers_from = []
if past_only:
d[(output + '_sorted_chunked_mask_past_only')] = {'class': 'compare', 'from': [(output + '_sorted_chunked_queries_orig_indices'), (output + '_sorted_chunked_stacked_keys_orig_indices')], 'kind': 'less'}
large_masking_layers_from.append((output + '_sorted_chunked_mask_past_only'))
if mask_different_hashes:
is_small = (not mask_current)
d[(output + ('_sorted_chunked%s_mask_matching_hash%s' % (('_small', '_all') if is_small else ('', ''))))] = {'class': 'compare', 'from': [(output + '_sorted_chunked_queries_hashed'), (output + '_sorted_chunked_stacked_keys_hashed')], 'kind': 'not_equal'}
if is_small:
d[(output + '_sorted_chunked_small_mask_matching_hash')] = {'class': 'eval', 'from': [(output + '_sorted_chunked_small_mask_matching_hash_all'), (output + '_sorted_chunked_mask')], 'eval': 'tf.logical_and(source(0), tf.logical_not(source(1)))'}
small_masking_layers_from.append((output + '_sorted_chunked_small_mask_matching_hash'))
else:
large_masking_layers_from.append((output + '_sorted_chunked_mask_matching_hash'))
d[(output + '_sorted_chunked_mask_valid_key_position')] = {'class': 'eval', 'from': [(output + '_sorted_chunked_valid_key_position')], 'eval': 'tf.logical_not(source(0))'}
large_masking_layers_from.append((output + '_sorted_chunked_mask_valid_key_position'))
d[(output + '_sorted_chunked_mask_key_chunk_duplicates_unnamed')] = {'class': 'repeat', 'from': [(output + '_query_chunk_alignment_duplicate_mask')], 'repetitions': key_chunk_size, 'axis': 'stag:key-chunk-offset'}
d[(output + '_sorted_chunked_mask_key_chunk_duplicates')] = {'class': 'name_axis', 'from': [(output + '_sorted_chunked_mask_key_chunk_duplicates_unnamed')], 'axis': 'stag:repeated|stag:key-chunk-offset', 'description': 'stacked-key-window'}
large_masking_layers_from.append((output + '_sorted_chunked_mask_key_chunk_duplicates'))
if (not allow_duplicate_attention):
large_masking_layers_from.append((output + '_sorted_chunked_round_duplicate_mask'))
if mask_current:
d[(output + '_sorted_chunked_small_mask_current')] = {'class': 'compare', 'from': [(output + '_sorted_chunked_queries_orig_indices'), (output + '_sorted_chunked_stacked_keys_orig_indices')], 'kind': 'equal'}
small_masking_layers_from.append((output + '_sorted_chunked_small_mask_current'))
if (len(large_masking_layers_from) > 1):
d[(output + '_sorted_chunked_mask')] = {'class': 'combine', 'from': large_masking_layers_from, 'kind': 'logical_or'}
else:
d[(output + '_sorted_chunked_mask')] = {'class': 'copy', 'from': large_masking_layers_from}
if (len(small_masking_layers_from) > 1):
d[(output + '_sorted_chunked_small_mask')] = {'class': 'combine', 'from': small_masking_layers_from, 'kind': 'logical_or'}
elif (len(small_masking_layers_from) == 1):
d[(output + '_sorted_chunked_small_mask')] = {'class': 'copy', 'from': small_masking_layers_from}
else:
d[(output + '_sorted_chunked_small_mask')] = {'class': 'constant', 'value': False, 'dtype': 'bool'}
d[(output + '_sorted_chunked_final_mask')] = {'class': 'reduce', 'from': [(output + '_sorted_chunked_mask')], 'mode': 'all', 'axis': 'stag:stacked-key-window'}
d[(output + '_sorted_chunked_energy_unmasked1')] = {'class': 'dot', 'red1': 'static:-1', 'red2': 'static:-1', 'var1': 'stag:query-window', 'var2': 'stag:stacked-key-window', 'from': [(output + '_sorted_chunked_queries'), (output + '_sorted_chunked_stacked_keys')], 'debug': True}
d[(output + '_sorted_chunked_energy_unmasked2')] = {'class': 'switch', 'condition': (output + '_sorted_chunked_mask'), 'true_from': float('-inf'), 'false_from': (output + '_sorted_chunked_energy_unmasked1')}
d[(output + '_sorted_chunked_energy_unmasked3')] = {'class': 'switch', 'condition': (output + '_sorted_chunked_small_mask'), 'true_from': small_mask_value, 'false_from': (output + '_sorted_chunked_energy_unmasked2')}
d[(output + '_sorted_chunked_energy_feature')] = {'class': 'switch', 'condition': (output + '_sorted_chunked_final_mask'), 'true_from': even_smaller_mask_value, 'false_from': (output + '_sorted_chunked_energy_unmasked3')}
d[(output + '_sorted_chunked_energy')] = {'class': 'reinterpret_data', 'from': [(output + '_sorted_chunked_energy_feature')], 'set_axes': {'F': None}}
d[(output + '_sorted_chunked_energy_logsumexp')] = {'class': 'reduce', 'mode': 'logsumexp', 'axis': 'stag:stacked-key-window', 'from': [(output + '_sorted_chunked_energy')]}
d[(output + '_sorted_chunked_weights')] = {'class': 'eval', 'from': [(output + '_sorted_chunked_energy'), (output + '_sorted_chunked_energy_logsumexp')], 'eval': 'tf.exp(source(0) - source(1))'}
d[(output + '_sorted_chunked_weights_drop')] = {'class': 'dropout', 'dropout_noise_shape': {'*': None}, 'from': [(output + '_sorted_chunked_weights')], 'dropout': dropout}
d[(output + '_sorted_chunked_round_output')] = {'class': 'dot', 'red1': 'stag:stacked-key-window', 'red2': 'stag:stacked-key-window', 'var1': 'stag:query-window', 'var2': 'static:-1', 'debug': True, 'from': [(output + '_sorted_chunked_weights_drop'), (output + '_sorted_chunked_stacked_values')]}
d[(output + '_sorted_round_output')] = {'class': 'merge_dims', 'axes': ['stag:query-chunk', 'stag:query-window'], 'keep_order': True, 'from': [(output + '_sorted_chunked_round_output')]}
d[(output + '_round_output')] = {'class': 'gather', 'from': [(output + '_sorted_round_output')], 'axis': 'T', 'position': (output + '_queries_sort_indices')}
d[(output + '_sorted_energy_logsumexp')] = {'class': 'merge_dims', 'axes': ['stag:query-chunk', 'stag:query-window'], 'keep_order': True, 'from': [(output + '_sorted_chunked_energy_logsumexp')]}
d[(output + '_energy_logsumexp')] = {'class': 'gather', 'from': [(output + '_sorted_energy_logsumexp')], 'axis': 'T', 'position': (output + '_queries_sort_indices')}
d[(output + '_round_output_weights')] = {'class': 'softmax_over_spatial', 'axis': 'stag:att-rounds', 'use_time_mask': False, 'energy_factor': 1.0, 'from': [(output + '_energy_logsumexp')]}
d[(output + '_round_output_weighted')] = {'class': 'combine', 'kind': 'mul', 'from': [(output + '_round_output_weights'), (output + '_round_output')]}
d[(output + '_output')] = {'class': 'reduce', 'axis': 'stag:att-rounds', 'mode': 'sum', 'from': [(output + '_round_output_weighted')]}
d[(output + '_output_unnamed')] = {'class': 'name_axis', 'axis': 'stag:att-heads', 'description': None, 'from': [(output + '_output')]}
d[(output + '_att_all')] = {'class': 'merge_dims', 'axes': 'static', 'from': [(output + '_output_unnamed')]}
if debug_print:
for name in ((([(output + n) for n in ['_keys_hashed', '_queries_hashed', '_sorted_queries_orig_indices', '_sorted_keys_orig_indices', '_queries_sort_indices', '_sorted_queries', '_sorted_keys', '_sorted_values', '_sorted_chunked_queries', '_sorted_chunked_keys', '_sorted_chunked_stacked_keys', '_sorted_chunked_stacked_values', '_sorted_queries_hashed', '_sorted_keys_hashed', '_sorted_chunked_keys_hashed', '_query_chunk_alignment', '_sorted_chunked_queries_orig_indices', '_sorted_chunked_stacked_keys_orig_indices', '_sorted_chunked_stacked_keys_hashed', '_query_chunk_alignment_duplicate_mask', '_sorted_chunked_mask', '_sorted_chunked_small_mask', '_sorted_chunked_energy_unmasked1', '_sorted_chunked_energy_unmasked2', '_sorted_chunked_energy', '_sorted_chunked_weights', '_sorted_chunked_round_output', '_att_all']] + large_masking_layers_from) + small_masking_layers_from) + [keys_input, queries_input]):
if name.startswith('base:'):
d[('print_' + name[len('base:'):])] = {'class': 'print', 'from': [name], 'is_output_layer': True}
else:
assert ((name in d) and ((name + '_orig') not in d))
d[(name + '_orig')] = d[name]
d[name] = {'class': 'print', 'from': [(name + '_orig')]}
|
def add_lsh_self_attention_layer(d, input, output, inside_rec_layer=True, past_only=None, time_axis=None, *, num_heads=8, num_rounds=1, key_dim=64, value_dim=64, dropout=0.0, num_hashes, chunk_size, chunks_before=None, chunks_after=None, ff_init=("variance_scaling_initializer(mode='fan_in', distribution='uniform', scale=%s)" % 1.0), mask_current=True, small_mask_value=float((- (10 ** 5))), share_key_query=True, normalize_keys=None, mask_different_hashes=True, allow_duplicate_attention=False, chunk_alignment, shuffle_kv=False, debug_print=False):
'\n Essentially this does (but for LSH attention)\n d[output + \'_att\'] = {"class": "self_attention", "num_heads": num_heads,\n "total_key_dim": num_heads * key_dim,\n "n_out": num_heads * value_dim, "from": [input],\n "attention_left_only": left_only,\n "attention_dropout": dropout, "forward_weights_init": self.ff_init}\n But using multiple layers.\n\n :param dict[str,dict] d: the network dict to write into\n :param str input: input layer, of shape [B,query_axis?,F]\n :param str output: prefix of all layers generated. Output is written into output + \'_att\' layer.\n Will use the name output + \'_...\' for all internal layers here.\n :param bool inside_rec_layer: whether this is used inside a RecLayer, meaning that the time axis may or may not always\n exist.\n :param bool|None past_only: if set, will mask attention s.t. it cannot attend to the future.\n Must be set if used inside a RecLayer.\n :param str|None time_axis: name of the time axis\n :param int num_heads: number of attention heads\n :param int num_rounds: number of hashing rounds.\n Similar to attention heads but attend to same query/key/value sequence but with different hash matrix.\n :param int key_dim: feature dimension of keys and queries\n :param int value_dim: feature dimension of values\n :param int dropout: apply dropout to the attention weights\n :param int num_hashes: number of different attention hashes, must be an even number\n :param int chunk_size: window size within a single chunk\n :param int|None chunks_before: number of chunks we look into the past\n :param int|None chunks_after: number of chunks we look into the future\n :param str ff_init: initializer for the weight matrices, including the hash generator matrices\n :param bool mask_current: whether a query may attend to the key corresponding to the same position\n :param float|None mask_current_value: if mask_current, the attention energy if query=key is set to this.\n All other masked values are set to -inf, thus if mask_current_value is something low but higher than -inf, will\n attend to key=query exactly iff it is the only possible key to attend to\n :param bool small_mask_value: whether a query may only attend to keys with the same hash\n :param bool share_key_query: whether to set the key sequence equal to the query sequence\n :param bool normalize_keys: whether to normalize the key sequence in euclidean norm\n :param bool allow_duplicate_attention: whether to mask attention s.t. it only attends to each key once.\n Attending to a key twice can e.g. happen for multi-round attention,\n or if the (effective) chunk size is larger than the sequence length.\n :param str chunk_alignment:\n :param bool shuffle_kv: whether to shuffle the keys and values before sorting them by their hashes.\n :param bool debug_print: will print layers contents for debugging\n '
if (past_only is None):
past_only = inside_rec_layer
if (time_axis is None):
time_axis = ('stag:extern_data:classes' if inside_rec_layer else 'stag:extern_data:data')
assert time_axis.startswith('stag:')
assert ((not inside_rec_layer) or past_only)
if (normalize_keys is None):
normalize_keys = share_key_query
if share_key_query:
d[(output + '_qv0')] = {'class': 'linear', 'activation': None, 'with_bias': False, 'from': [input], 'n_out': (num_heads * (key_dim + value_dim)), 'forward_weights_init': ff_init}
d[(output + '_qv_unnamed')] = {'class': 'split_dims', 'axis': 'F', 'dims': (num_heads, (key_dim + value_dim)), 'from': [(output + '_qv0')]}
d[(output + '_qv')] = {'class': 'name_axis', 'axis': 'static:-2', 'description': 'att-heads', 'from': [(output + '_qv_unnamed')]}
d[(output + '_qv_split')] = {'class': 'split', 'axis': 'F', 'size_splits': (key_dim, value_dim), 'from': [(output + '_qv')]}
d[(output + '_query')] = {'class': 'copy', 'from': [(output + '_qv_split/0')]}
if normalize_keys:
d[(output + '_key')] = {'class': 'eval', 'eval': normalize_eval, 'from': [(output + '_query')]}
else:
d[(output + '_key')] = {'class': 'copy', 'from': [(output + '_query')]}
d[(output + '_value')] = {'class': 'copy', 'from': [(output + '_qv_split/1')]}
else:
d[(output + '_qkv0')] = {'class': 'linear', 'activation': None, 'with_bias': False, 'from': [input], 'n_out': (num_heads * ((2 * key_dim) + value_dim)), 'forward_weights_init': ff_init}
d[(output + '_qkv_unnamed')] = {'class': 'split_dims', 'axis': 'F', 'dims': (num_heads, ((2 * key_dim) + value_dim)), 'from': [(output + '_qkv0')]}
d[(output + '_qkv')] = {'class': 'name_axis', 'axis': 'static:-2', 'description': 'att-heads', 'from': [(output + '_qkv_unnamed')]}
d[(output + '_qkv_split')] = {'class': 'split', 'axis': 'F', 'size_splits': (key_dim, key_dim, value_dim), 'from': [(output + '_qkv')]}
d[(output + '_query')] = {'class': 'copy', 'from': [(output + '_qkv_split/0')]}
if normalize_keys:
d[(output + '_key')] = {'class': 'eval', 'eval': normalize_eval, 'from': [(output + '_qkv_split/1')]}
else:
d[(output + '_key')] = {'class': 'copy', 'from': [(output + '_qkv_split/1')]}
d[(output + '_value')] = {'class': 'copy', 'from': [(output + '_qkv_split/2')]}
if inside_rec_layer:
(queries_input, keys_input, values_input) = ((output + '_query_accum'), (output + '_key_accum'), (output + '_value_accum'))
for qkv in ('query', 'key', 'value'):
d[(output + ('_%s_accum' % qkv))] = {'class': 'cum_concat', 'from': [(output + ('_%s' % qkv))], 'axis': time_axis}
time_axis_ = 'stag:rec-history'
else:
(queries_input, keys_input, values_input) = ((output + '_query'), (output + '_key'), (output + '_value'))
time_axis_ = time_axis
generic_add_lsh_attention_layer(d, queries_input=queries_input, keys_input=keys_input, values_input=values_input, output=output, query_time_axis=time_axis_, key_time_axis=time_axis_, num_heads=num_heads, num_rounds=num_rounds, key_dim=key_dim, value_dim=value_dim, dropout=dropout, num_hashes=num_hashes, query_chunk_size=chunk_size, key_chunk_size=chunk_size, key_chunks_before=chunks_before, key_chunks_after=chunks_after, hash_init=ff_init, small_mask_value=small_mask_value, past_only=past_only, mask_current=mask_current, mask_different_hashes=mask_different_hashes, allow_duplicate_attention=allow_duplicate_attention, chunk_alignment=chunk_alignment, shuffle_kv=shuffle_kv, debug_print=debug_print)
if inside_rec_layer:
d[(output + '_att')] = {'class': 'gather', 'from': [(output + '_att_all')], 'position': ':i', 'axis': time_axis_}
else:
d[(output + '_att')] = {'class': 'copy', 'from': [(output + '_att_all')]}
|
def add_lsh_cross_attention_layer(d, db, input, keys_input, output, query_time_axis=None, key_time_axis=None, *, num_heads=8, num_rounds=1, key_dim=64, value_dim=64, dropout=0.0, num_hashes, key_chunk_size, query_chunk_size, key_chunks_before=None, key_chunks_after=None, ff_init=("variance_scaling_initializer(mode='fan_in', distribution='uniform', scale=%s)" % 1.0), hash_init=("variance_scaling_initializer(mode='fan_in', distribution='uniform', scale=%s)" % 1.0), small_mask_value=float((- (10 ** 5))), mask_different_hashes=True, allow_duplicate_attention=False, chunk_alignment, shuffle_kv=False, query_hash_dropin=0.0, key_hash_dropin=0.0, debug_print=False):
(query_time_axis, key_time_axis) = _query_key_time_default(query_time_axis, key_time_axis)
assert keys_input.startswith('base:')
keys_input = keys_input[len('base:'):]
_make_cross_attention_qkv(d=d, db=db, input=input, keys_input=keys_input, output=output, num_heads=num_heads, key_dim=key_dim, value_dim=value_dim, ff_init=ff_init)
queries_input = (output + '_query_accum')
d[(output + '_query_accum')] = {'class': 'cum_concat', 'from': [(output + '_query')], 'axis': query_time_axis}
generic_add_lsh_attention_layer(d=d, queries_input=queries_input, keys_input=(('base:' + output) + '_key'), values_input=(('base:' + output) + '_value'), output=output, query_time_axis='stag:rec-history', key_time_axis=key_time_axis, num_heads=num_heads, num_rounds=num_rounds, key_dim=key_dim, value_dim=value_dim, dropout=dropout, num_hashes=num_hashes, query_chunk_size=query_chunk_size, key_chunk_size=key_chunk_size, key_chunks_before=key_chunks_before, key_chunks_after=key_chunks_after, hash_init=hash_init, small_mask_value=small_mask_value, mask_different_hashes=mask_different_hashes, allow_duplicate_attention=allow_duplicate_attention, chunk_alignment=chunk_alignment, shuffle_kv=shuffle_kv, query_hash_dropin=query_hash_dropin, key_hash_dropin=key_hash_dropin, debug_print=debug_print)
d[(output + '_att')] = {'class': 'gather', 'from': [(output + '_att_all')], 'position': ':i', 'axis': 'stag:rec-history'}
|
def add_full_lsh_cross_attention_layer(d, db, input, keys_input, output, query_time_axis=None, key_time_axis=None, num_heads=8, key_dim=64, value_dim=64, dropout=0.0, ff_init=("variance_scaling_initializer(mode='fan_in', distribution='uniform', scale=%s)" % 1.0), num_hashes=14, num_rounds=1, mask_current_value=float((- (10 ** 5))), mask_different_hashes=True, debug_print=False):
'\n Add a cross-attention layer with masking as in the LSH case.\n This way, you can e.g. train a system using LSH attention, but then do search using this.\n\n :param dict[str, Any] d:\n :param dict[str, Any] db:\n :param str input:\n :param str keys_input:\n :param str output:\n :param None|str query_time_axis:\n :param None|str key_time_axis:\n :param int num_heads:\n :param int key_dim:\n :param int value_dim:\n :param float dropout:\n :param str ff_init:\n :param int num_hashes:\n :param int num_rounds:\n :param float mask_current_value:\n :param bool mask_different_hashes:\n :param bool debug_print:\n '
(query_time_axis, key_time_axis) = _query_key_time_default(query_time_axis, key_time_axis)
assert keys_input.startswith('base:')
add_vanilla_cross_attention_layer(d=d, db=db, input=input, keys_input=keys_input, output=output, query_time_axis=query_time_axis, key_time_axis=key_time_axis, num_heads=num_heads, key_dim=key_dim, value_dim=value_dim, dropout=dropout, ff_init=ff_init)
assert mask_different_hashes, 'can just call add_vanilla_cross_attention_layer(..) instead'
make_lsh_hash_gen(db, (output + '_hash_gen'), key_dim=key_dim, num_hashes=num_hashes, num_heads=num_heads, num_rounds=num_rounds, hash_init=ff_init)
apply_lsh_hash_gen(d, input=(output + '_query'), hash_gen_input=(('base:' + output) + '_hash_gen'), output=(output + '_query_hash'), time_axis=query_time_axis, num_hashes=num_hashes, hash_mask_value=None)
apply_lsh_hash_gen(db, input=(output + '_key'), hash_gen_input=(output + '_hash_gen'), output=(output + '_key_hash'), time_axis=key_time_axis, num_hashes=num_hashes, hash_mask_value=None)
assert (num_rounds == 1), 'not implemented yet otherwise'
d[(output + '_energy_mask_rounds')] = {'class': 'compare', 'from': [(output + '_query_hash'), (('base:' + output) + '_key_hash')], 'kind': 'equal'}
d[(output + '_energy_mask')] = {'class': 'squeeze', 'axis': 'stag:att-round', 'from': [(output + '_energy_mask_rounds')]}
assert ((output + '_energy') in d)
d[(output + '_energy_unmasked')] = d[(output + '_energy')]
d[(output + '_energy')] = {'class': 'switch', 'condition': (output + '_energy_mask'), 'true_from': (output + '_energy_unmasked'), 'false_from': mask_current_value}
if debug_print:
for name in ([(output + n) for n in ['_query', '_query_hash', '_energy_mask', '_energy_unmasked', '_energy', '_weights']] + [(('base:' + output) + '_key'), (('base:' + output) + '_key_hash')]):
if name.startswith('base:'):
name = name[len('base:'):]
assert ((name in db) and ((name + '_orig') not in db))
db[(name + '_orig')] = db[name]
db[name] = {'class': 'print', 'from': [(name + '_orig')]}
else:
assert ((name in d) and ((name + '_orig') not in d))
d[(name + '_orig')] = d[name]
d[name] = {'class': 'print', 'from': [(name + '_orig')]}
|
def add_vanilla_self_attention_layer(d, input, output, inside_rec_layer=True, past_only=None, time_axis=None, num_heads=8, key_dim=64, value_dim=64, dropout=0.0, ff_init=("variance_scaling_initializer(mode='fan_in', distribution='uniform', scale=%s)" % 1.0), share_key_query=False, normalize_keys=None, mask_current=False, mask_current_value=float((- (10 ** 5)))):
'\n Essentially this does\n d[output + \'_att\'] = {"class": "self_attention", "num_heads": num_heads,\n "total_key_dim": num_heads * key_dim,\n "n_out": num_heads * value_dim, "from": [input],\n "attention_left_only": past_only,\n "attention_dropout": dropout, "forward_weights_init": self.ff_init}\n But using multiple layers that can be extended on\n '
if (past_only is None):
past_only = inside_rec_layer
if (time_axis is None):
time_axis = ('stag:extern_data:classes' if inside_rec_layer else 'stag:extern_data:data')
assert time_axis.startswith('stag:')
assert ((not inside_rec_layer) or past_only)
if (normalize_keys is None):
normalize_keys = share_key_query
if (not share_key_query):
d[(output + '_qkv0')] = {'class': 'linear', 'activation': None, 'with_bias': False, 'from': [input], 'n_out': (num_heads * ((2 * key_dim) + value_dim)), 'forward_weights_init': ff_init}
d[(output + '_qkv')] = {'class': 'split_dims', 'axis': 'F', 'dims': (num_heads, ((2 * key_dim) + value_dim)), 'from': [(output + '_qkv0')]}
d[(output + '_qkv_split')] = {'class': 'split', 'axis': 'F', 'size_splits': (key_dim, key_dim, value_dim), 'from': [(output + '_qkv')]}
d[(output + '_query')] = {'class': 'copy', 'from': [(output + '_qkv_split/0')]}
if normalize_keys:
d[(output + '_key')] = {'class': 'eval', 'eval': normalize_eval, 'from': [(output + '_qkv_split/1')]}
else:
d[(output + '_key')] = {'class': 'copy', 'from': [(output + '_qkv_split/1')]}
d[(output + '_value')] = {'class': 'copy', 'from': [(output + '_qkv_split/2')]}
else:
d[(output + '_qv0')] = {'class': 'linear', 'activation': None, 'with_bias': False, 'from': [input], 'n_out': (num_heads * (key_dim + value_dim)), 'forward_weights_init': ff_init}
d[(output + '_qv')] = {'class': 'split_dims', 'axis': 'F', 'dims': (num_heads, (key_dim + value_dim)), 'from': [(output + '_qv0')]}
d[(output + '_qv_split')] = {'class': 'split', 'axis': 'F', 'size_splits': (key_dim, value_dim), 'from': [(output + '_qv')]}
d[(output + '_query')] = {'class': 'copy', 'from': [(output + '_qv_split/0')]}
if normalize_keys:
d[(output + '_key')] = {'class': 'eval', 'eval': normalize_eval, 'from': [(output + '_query')]}
else:
d[(output + '_key')] = {'class': 'copy', 'from': [(output + '_query')]}
d[(output + '_value')] = {'class': 'copy', 'from': [(output + '_qv_split/1')]}
if inside_rec_layer:
d[(output + '_key_accum')] = {'class': 'cum_concat', 'from': [(output + '_key')]}
d[(output + '_value_accum')] = {'class': 'cum_concat', 'from': [(output + '_value')]}
key_axis = 'stag:rec-history'
else:
key_dim_tag = DimensionTag(kind=DimensionTag.Types.Time, description='self-att-keys')
d[(output + '_key_accum')] = {'class': 'reinterpret_data', 'set_dim_tags': {time_axis: key_dim_tag}, 'from': [(output + '_key')]}
d[(output + '_value_accum')] = {'class': 'reinterpret_data', 'set_dim_tags': {time_axis: key_dim_tag}, 'from': [(output + '_value')]}
key_axis = ('stag:' + key_dim_tag.description)
d[(output + '_energy')] = {'class': 'dot', 'from': [(output + '_query'), (output + '_key_accum')], 'red1': 'static:-1', 'red2': 'static:-1', 'var1': (time_axis + '?'), 'var2': key_axis}
need_indices = (past_only or mask_current)
if need_indices:
if inside_rec_layer:
query_indices_from = ':i'
else:
d[(output + '_query_indices')] = {'class': 'range_in_axis', 'from': [input], 'axis': time_axis, 'keepdims': False}
query_indices_from = (output + '_query_indices')
d[(output + '_key_accum_indices')] = {'class': 'range_in_axis', 'from': [(output + '_key_accum')], 'axis': key_axis, 'keepdims': False}
if past_only:
d[(output + '_energy_unmasked')] = d[(output + '_energy')]
d[(output + '_energy_mask')] = {'class': 'compare', 'kind': 'greater_equal', 'from': [query_indices_from, (output + '_key_accum_indices')]}
d[(output + '_energy')] = {'class': 'switch', 'true_from': (output + '_energy_unmasked'), 'false_from': float('-inf'), 'condition': (output + '_energy_mask')}
if mask_current:
d[(output + '_energy_unmasked_current')] = d[(output + '_energy')]
d[(output + '_energy_mask_current')] = {'class': 'compare', 'kind': 'equal', 'from': [query_indices_from, (output + '_key_accum_indices')]}
d[(output + '_energy')] = {'class': 'switch', 'true_from': mask_current_value, 'false_from': (output + '_energy_unmasked_current'), 'condition': (output + '_energy_mask_current')}
d[(output + '_weights')] = {'class': 'softmax_over_spatial', 'from': [(output + '_energy')], 'axis': key_axis, 'energy_factor': (key_dim ** (- 0.5)), 'use_time_mask': (not past_only)}
d[(output + '_weights_drop')] = {'class': 'dropout', 'dropout_noise_shape': {'*': None}, 'from': [(output + '_weights')], 'dropout': dropout}
d[(output + '_output')] = {'class': 'dot', 'from': [(output + '_weights_drop'), (output + '_value_accum')], 'red1': key_axis, 'red2': key_axis, 'var1': (time_axis + '?'), 'var2': 'static:-1'}
d[(output + '_att')] = {'class': 'merge_dims', 'axes': 'static', 'from': [(output + '_output')]}
|
def add_vanilla_cross_attention_layer(d, db, input, keys_input, output, query_time_axis=None, key_time_axis=None, num_heads=8, key_dim=64, value_dim=64, dropout=0.0, ff_init=("variance_scaling_initializer(mode='fan_in', distribution='uniform', scale=%s)" % 1.0)):
'\n Add a cross-attention layer.\n\n :param dict[str, Any] d:\n :param dict[str, Any] db:\n :param str input:\n :param str keys_input:\n :param str output:\n :param None|str query_time_axis:\n :param None|str key_time_axis:\n :param int num_heads:\n :param int key_dim:\n :param int value_dim:\n :param float dropout:\n :param str ff_init:\n '
(query_time_axis, key_time_axis) = _query_key_time_default(query_time_axis, key_time_axis)
assert keys_input.startswith('base:')
keys_input = keys_input[len('base:'):]
_make_cross_attention_qkv(d=d, db=db, input=input, keys_input=keys_input, output=output, num_heads=num_heads, key_dim=key_dim, value_dim=value_dim, ff_init=ff_init)
d[(output + '_energy')] = {'class': 'dot', 'from': [(output + '_query'), (('base:' + output) + '_key')], 'red1': 'static:-1', 'red2': 'static:-1', 'var1': (query_time_axis + '?'), 'var2': key_time_axis}
d[(output + '_weights')] = {'class': 'softmax_over_spatial', 'from': [(output + '_energy')], 'axis': key_time_axis, 'energy_factor': (key_dim ** (- 0.5)), 'use_time_mask': True}
d[(output + '_weights_drop')] = {'class': 'dropout', 'dropout_noise_shape': {'*': None}, 'from': [(output + '_weights')], 'dropout': dropout}
d[(output + '_output_named')] = {'class': 'dot', 'from': [(output + '_weights_drop'), (('base:' + output) + '_value')], 'red1': key_time_axis, 'red2': key_time_axis, 'var1': (query_time_axis + '?'), 'var2': 'static:-1'}
d[(output + '_output')] = {'class': 'name_axis', 'from': [(output + '_output_named')], 'axis': 'stag:att-heads', 'description': None}
d[(output + '_att')] = {'class': 'merge_dims', 'axes': 'static', 'from': [(output + '_output')]}
d[(output + '_weights_unnamed')] = {'class': 'name_axis', 'from': [(output + '_weights')], 'axis': 'stag:att-heads', 'description': None}
|
class DataLoader(object):
'\n Only load data file and information file.\n '
@staticmethod
def parse_data_args(parser):
'\n data loader related command line arguments parser\n :param parser:\n :return:\n '
parser.add_argument('--path', type=str, default='../dataset/', help='Input data dir.')
parser.add_argument('--dataset', type=str, default='ml100k-1-5', help='Choose a dataset.')
parser.add_argument('--sep', type=str, default='\t', help='sep of csv file.')
parser.add_argument('--label', type=str, default='label', help='name of dataset label column.')
return parser
def __init__(self, path, dataset, label='label', load_data=True, sep='\t', seqs_sep=','):
'\n Initialization\n :param path: dataset path\n :param dataset: dataset name\n :param label: label column name\n :param load_data: True for load dataset file, False for only load info file\n :param sep: the separator for .csv file\n :param seqs_sep: the separator for negative/history sequence, for example: "1,2,3"\n '
self.dataset = dataset
self.path = os.path.join(path, dataset)
self.train_file = os.path.join(self.path, (dataset + global_p.TRAIN_SUFFIX))
self.validation_file = os.path.join(self.path, (dataset + global_p.VALIDATION_SUFFIX))
self.test_file = os.path.join(self.path, (dataset + global_p.TEST_SUFFIX))
self.info_file = os.path.join(self.path, (dataset + global_p.INFO_SUFFIX))
self.user_file = os.path.join(self.path, (dataset + global_p.USER_SUFFIX))
self.item_file = os.path.join(self.path, (dataset + global_p.ITEM_SUFFIX))
self.train_his_file = os.path.join(self.path, (dataset + global_p.TRAIN_GROUP_SUFFIX))
self.vt_his_file = os.path.join(self.path, (dataset + global_p.VT_GROUP_SUFFIX))
(self.sep, self.seqs_sep) = (sep, seqs_sep)
self.load_data = load_data
self.label = label
(self.train_df, self.validation_df, self.test_df) = (None, None, None)
self._load_user_item()
self._load_data()
self._load_his()
self._load_info()
def _load_user_item(self):
'\n load user .csv file\n :return:\n '
(self.user_df, self.item_df) = (None, None)
if (os.path.exists(self.user_file) and self.load_data):
logging.info('load user csv...')
self.user_df = pd.read_csv(self.user_file, sep='\t')
if (os.path.exists(self.item_file) and self.load_data):
logging.info('load item csv...')
self.item_df = pd.read_csv(self.item_file, sep='\t')
def _load_data(self):
'\n load train, validation, test .csv files\n :return:\n '
if (os.path.exists(self.train_file) and self.load_data):
logging.info('load train csv...')
self.train_df = pd.read_csv(self.train_file, sep=self.sep)
logging.info(('size of train: %d' % len(self.train_df)))
if (os.path.exists(self.validation_file) and self.load_data):
logging.info('load validation csv...')
self.validation_df = pd.read_csv(self.validation_file, sep=self.sep)
logging.info(('size of validation: %d' % len(self.validation_df)))
if (os.path.exists(self.test_file) and self.load_data):
logging.info('load test csv...')
self.test_df = pd.read_csv(self.test_file, sep=self.sep)
logging.info(('size of test: %d' % len(self.test_df)))
def _load_info(self):
'\n load dataset information file. If not found then create one.\n :return:\n '
def json_type(o):
if isinstance(o, np.int64):
return int(o)
raise TypeError
(max_dict, min_dict) = ({}, {})
if (not os.path.exists(self.info_file)):
for df in [self.train_df, self.validation_df, self.test_df, self.user_df, self.item_df]:
if (df is None):
continue
for c in df.columns:
if (c not in max_dict):
max_dict[c] = df[c].max()
else:
max_dict[c] = max(df[c].max(), max_dict[c])
if (c not in min_dict):
min_dict[c] = df[c].min()
else:
min_dict[c] = min(df[c].min(), min_dict[c])
max_json = json.dumps(max_dict, default=json_type)
min_json = json.dumps(min_dict, default=json_type)
out_f = open(self.info_file, 'w')
out_f.write(((max_json + os.linesep) + min_json))
else:
lines = open(self.info_file, 'r').readlines()
max_dict = json.loads(lines[0])
min_dict = json.loads(lines[1])
self.column_max = max_dict
self.column_min = min_dict
self.label_max = self.column_max[self.label]
self.label_min = self.column_min[self.label]
logging.info(('label: %d-%d' % (self.label_min, self.label_max)))
(self.user_num, self.item_num) = (0, 0)
if ('uid' in self.column_max):
self.user_num = (self.column_max['uid'] + 1)
if ('iid' in self.column_max):
self.item_num = (self.column_max['iid'] + 1)
logging.info(('# of users: %d' % self.user_num))
logging.info(('# of items: %d' % self.item_num))
self.user_features = [f for f in self.column_max.keys() if f.startswith('u_')]
logging.info(('# of user features: %d' % len(self.user_features)))
self.item_features = [f for f in self.column_max.keys() if f.startswith('i_')]
logging.info(('# of item features: %d' % len(self.item_features)))
self.context_features = [f for f in self.column_max.keys() if f.startswith('c_')]
logging.info(('# of context features: %d' % len(self.context_features)))
self.features = ((self.context_features + self.user_features) + self.item_features)
logging.info(('# of features: %d' % len(self.features)))
def _load_his(self):
"\n load dataset interactions which have been grouped by uid.\n two columns 'uid' and 'iid'.\n :return:\n "
if (not self.load_data):
return
if (not os.path.exists(self.train_his_file)):
logging.info('building train history csv...')
train_his_df = group_user_interactions_df(self.train_df, label=self.label, seq_sep=self.seqs_sep)
train_his_df.to_csv(self.train_his_file, index=False, sep=self.sep)
if (not os.path.exists(self.vt_his_file)):
logging.info('building vt history csv...')
vt_df = pd.concat([self.validation_df, self.test_df])
vt_his_df = group_user_interactions_df(vt_df, label=self.label, seq_sep=self.seqs_sep)
vt_his_df.to_csv(self.vt_his_file, index=False, sep=self.sep)
def build_his(his_df, seqs_sep):
uids = his_df['uid'].tolist()
iids = his_df['iids'].str.split(seqs_sep).values
iids = [[int(j) for j in i] for i in iids]
user_his = dict(zip(uids, iids))
return user_his
(self.train_his_df, self.train_user_his) = (None, None)
(self.vt_his_df, self.vt_user_his) = (None, None)
if self.load_data:
logging.info('load history csv...')
self.train_his_df = pd.read_csv(self.train_his_file, sep=self.sep)
self.train_user_his = build_his(self.train_his_df, self.seqs_sep)
self.vt_his_df = pd.read_csv(self.vt_his_file, sep=self.sep)
self.vt_user_his = build_his(self.vt_his_df, self.seqs_sep)
def feature_info(self, include_id=True, include_item_features=True, include_user_features=True):
features = []
if include_id:
features.extend(['uid', 'iid'])
if include_user_features:
features.extend(self.user_features)
if include_item_features:
features.extend(self.item_features)
feature_dims = 0
(feature_min, feature_max) = ([], [])
for f in features:
feature_min.append(feature_dims)
feature_dims += int((self.column_max[f] + 1))
feature_max.append((feature_dims - 1))
logging.info(('Model # of features %d' % len(features)))
logging.info(('Model # of feature dims %d' % feature_dims))
return (features, feature_dims, feature_min, feature_max)
def append_his(self, last_n=10, supply=True, neg=False, neg_column=True):
'\n Generate user history interaction sequence.\n Data in train,validation,test must be sorted by timestamp. Train data comes earlier\n than validation and earlier than test.\n :param last_n: the max history length to keep,<=0 value means keeps all.\n :param supply: True for appending -1 to make up the length\n :param neg: if the interactions including negative feedbacks\n :param neg_column: if add a new column for negative sequence,iff neg_include=True is valid.\n :return:\n '
(his_dict, neg_dict) = ({}, {})
for df in [self.train_df, self.validation_df, self.test_df]:
if (df is None):
continue
(history, neg_history) = ([], [])
(uids, iids, labels) = (df['uid'].tolist(), df['iid'].tolist(), df[self.label].tolist())
for (i, uid) in enumerate(uids):
(iid, label) = (str(iids[i]), labels[i])
if (uid not in his_dict):
his_dict[uid] = []
if (uid not in neg_dict):
neg_dict[uid] = []
tmp_his = (his_dict[uid] if (last_n <= 0) else his_dict[uid][(- last_n):])
tmp_neg = (neg_dict[uid] if (last_n <= 0) else neg_dict[uid][(- last_n):])
if supply:
tmp_his = (tmp_his + (['-1'] * last_n))
tmp_neg = (tmp_neg + (['-1'] * last_n))
history.append(','.join(tmp_his[:last_n]))
neg_history.append(','.join(tmp_neg[:last_n]))
if ((label <= 0) and (not neg_column) and neg):
his_dict[uid].append(('~' + iid))
elif ((label <= 0) and neg_column):
neg_dict[uid].append(iid)
elif (label > 0):
his_dict[uid].append(iid)
df[global_p.C_HISTORY] = history
if (neg and neg_column):
df[global_p.C_HISTORY_NEG] = neg_history
def drop_neg(self):
logging.info('Drop Neg Samples...')
self.train_df = self.train_df[(self.train_df[self.label] > 0)].reset_index(drop=True)
self.validation_df = self.validation_df[(self.validation_df[self.label] > 0)].reset_index(drop=True)
self.test_df = self.test_df[(self.test_df[self.label] > 0)].reset_index(drop=True)
self.train_df[self.label] = 1
self.validation_df[self.label] = 1
self.test_df[self.label] = 1
logging.info(('size of train: %d' % len(self.train_df)))
logging.info(('size of validation: %d' % len(self.validation_df)))
logging.info(('size of test: %d' % len(self.test_df)))
|
class DataProcessor(object):
data_columns = ['X']
@staticmethod
def parse_dp_args(parser):
'\n parse data processor related command line arguments\n '
parser.add_argument('--test_neg_n', type=int, default=10, help='Negative sample num for each instance in test/validation set.')
return parser
def __init__(self, data_loader, model, rank, test_neg_n):
'\n Initialization\n :param data_loader: DataLoader object\n :param model: Model object\n :param rank: 1=ranking, 0=rating prediction\n :param test_neg_n: ranking negative sample rate, pos:neg=1:test_neg_n\n '
self.data_loader = data_loader
self.model = model
self.rank = rank
(self.train_data, self.validation_data, self.test_data) = (None, None, None)
self.test_neg_n = test_neg_n
if (self.rank == 1):
self.train_history_dict = defaultdict(set)
for uid in data_loader.train_user_his.keys():
self.train_history_dict[uid] = set(data_loader.train_user_his[uid])
self.vt_history_dict = defaultdict(set)
for uid in data_loader.vt_user_his.keys():
self.vt_history_dict[uid] = set(data_loader.vt_user_his[uid])
self.vt_batches_buffer = {}
def get_train_data(self, epoch):
if ((self.train_data is None) or (epoch < 0)):
logging.info('Prepare Train Data...')
self.train_data = self.format_data_dict(self.data_loader.train_df)
self.train_data[global_p.K_SAMPLE_ID] = np.arange(0, len(self.train_data['Y']))
if (epoch >= 0):
utils.shuffle_in_unison_scary(self.train_data)
return self.train_data
def get_validation_data(self):
if (self.validation_data is None):
logging.info('Prepare Validation Data...')
df = self.data_loader.validation_df
if (self.rank == 1):
neg_df = self.generate_neg_df(uid_list=df['uid'].tolist(), iid_list=df['iid'].tolist(), df=df, neg_n=self.test_neg_n, train=False)
df = pd.concat([df, neg_df], ignore_index=True)
self.validation_data = self.format_data_dict(df)
self.validation_data[global_p.K_SAMPLE_ID] = np.arange(0, len(self.validation_data['Y']))
return self.validation_data
def get_test_data(self):
if (self.test_data is None):
logging.info('Prepare Test Data...')
df = self.data_loader.test_df
if (self.rank == 1):
neg_df = self.generate_neg_df(uid_list=df['uid'].tolist(), iid_list=df['iid'].tolist(), df=df, neg_n=self.test_neg_n, train=False)
df = pd.concat([df, neg_df], ignore_index=True)
self.test_data = self.format_data_dict(df)
self.test_data[global_p.K_SAMPLE_ID] = np.arange(0, len(self.test_data['Y']))
return self.test_data
def get_train_batches(self, batch_size, epoch):
return self.prepare_batches(self.get_train_data(epoch), batch_size, train=True)
def get_validation_batches(self, batch_size):
return self.prepare_batches(self.get_validation_data(), batch_size, train=False)
def get_test_batches(self, batch_size):
return self.prepare_batches(self.get_test_data(), batch_size, train=False)
def _get_feed_dict_rt(self, data, batch_start, batch_size, train):
batch_end = min(len(data['X']), (batch_start + batch_size))
real_batch_size = (batch_end - batch_start)
feed_dict = {'train': train, 'rank': 0, global_p.K_SAMPLE_ID: data[global_p.K_SAMPLE_ID][batch_start:(batch_start + real_batch_size)]}
if ('Y' in data):
feed_dict['Y'] = utils.numpy_to_torch(data['Y'][batch_start:(batch_start + real_batch_size)])
else:
feed_dict['Y'] = utils.numpy_to_torch(np.zeros(shape=real_batch_size))
for c in self.data_columns:
feed_dict[c] = utils.numpy_to_torch(data[c][batch_start:(batch_start + real_batch_size)])
return feed_dict
def _get_feed_dict_rk(self, data, batch_start, batch_size, train, neg_data=None):
if (not train):
feed_dict = self._get_feed_dict_rt(data=data, batch_start=batch_start, batch_size=batch_size, train=train)
feed_dict['rank'] = 1
else:
batch_end = min(len(data['X']), (batch_start + batch_size))
real_batch_size = (batch_end - batch_start)
neg_columns_dict = {}
if (neg_data is None):
logging.warning('neg_data is None')
neg_df = self.generate_neg_df(uid_list=data['uid'][batch_start:(batch_start + real_batch_size)], iid_list=data['iid'][batch_start:(batch_start + real_batch_size)], df=self.data_loader.train_df, neg_n=1, train=True)
neg_data = self.format_data_dict(neg_df)
for c in self.data_columns:
neg_columns_dict[c] = neg_data[c]
else:
for c in self.data_columns:
neg_columns_dict[c] = neg_data[c][batch_start:(batch_start + real_batch_size)]
y = np.concatenate([np.ones(shape=real_batch_size, dtype=np.float32), np.zeros(shape=real_batch_size, dtype=np.float32)])
sample_id = data[global_p.K_SAMPLE_ID][batch_start:(batch_start + real_batch_size)]
neg_sample_id = (sample_id + len(self.train_data['Y']))
feed_dict = {'train': train, 'rank': 1, 'Y': utils.numpy_to_torch(y), global_p.K_SAMPLE_ID: np.concatenate([sample_id, neg_sample_id])}
for c in self.data_columns:
feed_dict[c] = utils.numpy_to_torch(np.concatenate([data[c][batch_start:(batch_start + real_batch_size)], neg_columns_dict[c]]))
return feed_dict
def _prepare_batches_rt(self, data, batch_size, train):
'\n for rating/clicking prediction\n '
if (data is None):
return None
num_example = len(data['X'])
total_batch = int((((num_example + batch_size) - 1) / batch_size))
assert (num_example > 0)
batches = []
for batch in tqdm(range(total_batch), leave=False, ncols=100, mininterval=1, desc='Prepare Batches'):
batches.append(self._get_feed_dict_rt(data, (batch * batch_size), batch_size, train))
return batches
def _prepare_batches_rk(self, data, batch_size, train):
'\n for ranking task\n '
if (data is None):
return None
num_example = len(data['X'])
total_batch = int((((num_example + batch_size) - 1) / batch_size))
assert (num_example > 0)
neg_data = None
if train:
neg_df = self.generate_neg_df(uid_list=data['uid'], iid_list=data['iid'], df=self.data_loader.train_df, neg_n=1, train=True)
neg_data = self.format_data_dict(neg_df)
batches = []
for batch in tqdm(range(total_batch), leave=False, ncols=100, mininterval=1, desc='Prepare Batches'):
batches.append(self._get_feed_dict_rk(data, (batch * batch_size), batch_size, train, neg_data))
return batches
def prepare_batches(self, data, batch_size, train):
'\n convert data dict to batches\n :param data: dict generated by self.get_*_data() and self.format_data_dict()\n :param batch_size: batch size\n :param train: train or validation/test\n :return: list of batches\n '
buffer_key = ''
if (data is self.validation_data):
buffer_key = ('validation_' + str(batch_size))
elif (data is self.test_data):
buffer_key = ('test_' + str(batch_size))
if (buffer_key in self.vt_batches_buffer):
return self.vt_batches_buffer[buffer_key]
if (self.rank == 1):
batches = self._prepare_batches_rk(data=data, batch_size=batch_size, train=train)
else:
batches = self._prepare_batches_rt(data=data, batch_size=batch_size, train=train)
if (buffer_key != ''):
self.vt_batches_buffer[buffer_key] = batches
return batches
def get_feed_dict(self, data, batch_start, batch_size, train, neg_data=None):
'\n :param data: data dict,generated by self.get_*_data() and self.format_data_dict()\n :param batch_start: start index of each batch\n :param batch_size: batch size\n :param train: train or validation/test\n :param neg_data: negative sample data dictionary\n :return: feed dict\n :return:\n '
if (self.rank == 1):
return self._get_feed_dict_rk(data=data, batch_start=batch_start, batch_size=batch_size, train=train, neg_data=neg_data)
return self._get_feed_dict_rt(data=data, batch_start=batch_start, batch_size=batch_size, train=train)
def format_data_dict(self, df):
"\n format Dataframe to data dictionary\n :param df: pandas Dataframe, contains 'uid','iid','label' three columns (at least)\n :return: data dict\n "
(data_loader, model) = (self.data_loader, self.model)
data = {}
out_columns = []
if ('uid' in df):
out_columns.append('uid')
data['uid'] = df['uid'].values
if ('iid' in df):
out_columns.append('iid')
data['iid'] = df['iid'].values
if (data_loader.label in df.columns):
data['Y'] = np.array(df[data_loader.label], dtype=np.float32)
else:
logging.warning(('No Labels In Data: ' + data_loader.label))
data['Y'] = np.zeros(len(df), dtype=np.float32)
ui_id = df[out_columns]
out_df = ui_id
if ((data_loader.user_df is not None) and model.include_user_features):
out_columns.extend(data_loader.user_features)
out_df = pd.merge(out_df, data_loader.user_df, on='uid', how='left')
if ((data_loader.item_df is not None) and model.include_item_features):
out_columns.extend(data_loader.item_features)
out_df = pd.merge(out_df, data_loader.item_df, on='iid', how='left')
out_df = out_df.fillna(0)
if model.include_context_features:
context = df[data_loader.context_features]
out_df = pd.concat([out_df, context], axis=1, ignore_index=True)
if (not model.include_id):
out_df = out_df.drop(columns=['uid', 'iid'])
base = 0
for feature in out_df.columns:
out_df[feature] = out_df[feature].apply((lambda x: (x + base)))
base += int((data_loader.column_max[feature] + 1))
if model.append_id:
x = pd.concat([ui_id, out_df], axis=1, ignore_index=True)
data['X'] = x.values.astype(int)
else:
data['X'] = out_df.values.astype(int)
assert (len(data['X']) == len(data['Y']))
return data
def generate_neg_df(self, uid_list, iid_list, df, neg_n, train):
'\n Generate negative samples\n :param uid_list: users who need to get negative samples\n :param iid_list: users observed interactions\n :param df: dataframe information\n :param neg_n: number of negative samples\n :param train: sample for train or validation/test\n :return:\n '
neg_df = self._sample_neg_from_uid_list(uids=uid_list, neg_n=neg_n, train=train, other_infos={'iid': iid_list})
neg_df = pd.merge(neg_df, df, on=['uid', 'iid'], how='left')
neg_df = neg_df.drop(columns=['iid'])
neg_df = neg_df.rename(columns={'iid_neg': 'iid'})
neg_df = neg_df[df.columns]
neg_df[self.data_loader.label] = 0
return neg_df
def _sample_neg_from_uid_list(self, uids, neg_n, train, other_infos=None):
'\n Get negative samples based on user history\n :param uids: uid list\n :param neg_n: the number of negative samples\n :param train: sample for train data or validation/testing\n :param other_infos: other than uid,iid,label,history interactions are included here\n :return: DataFrame, which needs self.format_data_dict() to convert to data dictionary\n '
if (other_infos is None):
other_infos = {}
(uid_list, iid_list) = ([], [])
other_info_list = {}
for info in other_infos:
other_info_list[info] = []
tmp_history_dict = defaultdict(set)
item_num = self.data_loader.item_num
for (index, uid) in enumerate(uids):
if train:
inter_iids = (self.train_history_dict[uid] | tmp_history_dict[uid])
else:
inter_iids = ((self.train_history_dict[uid] | self.vt_history_dict[uid]) | tmp_history_dict[uid])
remain_iids_num = (item_num - len(inter_iids))
remain_iids = None
if (((1.0 * remain_iids_num) / item_num) < 0.2):
remain_iids = [i for i in range(1, item_num) if (i not in inter_iids)]
assert (remain_iids_num >= neg_n)
if (remain_iids is None):
for i in range(neg_n):
iid = np.random.randint(1, self.data_loader.item_num)
while (iid in inter_iids):
iid = np.random.randint(1, self.data_loader.item_num)
uid_list.append(uid)
iid_list.append(iid)
tmp_history_dict[uid].add(iid)
else:
iids = np.random.choice(remain_iids, neg_n, replace=False)
uid_list.extend(([uid] * neg_n))
iid_list.extend(iids)
tmp_history_dict[uid].update(iids)
for info in other_infos:
other_info_list[info].extend(([other_infos[info][index]] * neg_n))
neg_df = pd.DataFrame(data=list(zip(uid_list, iid_list)), columns=['uid', 'iid_neg'])
for info in other_infos:
neg_df[info] = other_info_list[info]
return neg_df
|
class HisDataProcessor(DataProcessor):
data_columns = ['X', global_p.C_HISTORY, global_p.C_HISTORY_LENGTH]
@staticmethod
def parse_dp_args(parser):
'\n parse data processor related arguments\n '
parser.add_argument('--max_his', type=int, default=(- 1), help='Max history length.')
parser.add_argument('--sup_his', type=int, default=0, help='If sup_his > 0, supplement history list with -1 at the beginning')
parser.add_argument('--sparse_his', type=int, default=1, help='Whether use sparse representation of user history.')
return DataProcessor.parse_dp_args(parser)
def __init__(self, data_loader, model, rank, test_neg_n, max_his, sup_his, sparse_his):
DataProcessor.__init__(self, data_loader=data_loader, model=model, rank=rank, test_neg_n=test_neg_n)
self.max_his = max_his
self.sparse_his = sparse_his
self.sup_his = sup_his
self.boolean_test_data = None
def _get_feed_dict_rt(self, data, batch_start, batch_size, train):
'\n generate a batch for rating/clicking prediction\n :param data: data dict,generated by self.get_*_data() and self.format_data_dict()\n :param batch_start: start index of current batch\n :param batch_size: batch size\n :param train: train or validation/test\n :return: batch的feed dict\n '
batch_end = min(len(data['X']), (batch_start + batch_size))
real_batch_size = (batch_end - batch_start)
feed_dict = {'train': train, 'rank': 0, global_p.K_SAMPLE_ID: data[global_p.K_SAMPLE_ID][batch_start:(batch_start + real_batch_size)]}
if ('Y' in data):
feed_dict['Y'] = utils.numpy_to_torch(data['Y'][batch_start:(batch_start + real_batch_size)])
else:
feed_dict['Y'] = utils.numpy_to_torch(np.zeros(shape=real_batch_size))
for c in self.data_columns:
d = data[c][batch_start:(batch_start + real_batch_size)]
if ((c == global_p.C_HISTORY) and (self.sparse_his == 1)):
(x, y) = ([], [])
for (idx, iids) in enumerate(d):
x.extend(([idx] * len(iids)))
y.extend(iids)
if (len(x) <= 0):
i = torch.LongTensor([[0], [0]])
v = torch.FloatTensor([0.0])
else:
i = torch.LongTensor([x, y])
v = torch.FloatTensor(([1.0] * len(x)))
history = torch.sparse.FloatTensor(i, v, torch.Size([real_batch_size, self.data_loader.item_num]))
if (torch.cuda.device_count() > 0):
history = history.cuda()
feed_dict[c] = history
else:
feed_dict[c] = utils.numpy_to_torch(d)
return feed_dict
def _get_feed_dict_rk(self, data, batch_start, batch_size, train, neg_data=None):
if (not train):
feed_dict = self._get_feed_dict_rt(data=data, batch_start=batch_start, batch_size=batch_size, train=train)
feed_dict['rank'] = 1
else:
batch_end = min(len(data['X']), (batch_start + batch_size))
real_batch_size = (batch_end - batch_start)
neg_columns_dict = {}
if (neg_data is None):
logging.warning('neg_data is None')
neg_df = self.generate_neg_df(uid_list=data['uid'][batch_start:(batch_start + real_batch_size)], iid_list=data['iid'][batch_start:(batch_start + real_batch_size)], df=self.data_loader.train_df, neg_n=1, train=True)
neg_data = self.format_data_dict(neg_df)
for c in self.data_columns:
neg_columns_dict[c] = neg_data[c]
else:
for c in self.data_columns:
neg_columns_dict[c] = neg_data[c][batch_start:(batch_start + real_batch_size)]
y = np.concatenate([np.ones(shape=real_batch_size, dtype=np.float32), np.zeros(shape=real_batch_size, dtype=np.float32)])
sample_id = data[global_p.K_SAMPLE_ID][batch_start:(batch_start + real_batch_size)]
neg_sample_id = (sample_id + len(self.train_data['Y']))
feed_dict = {'train': train, 'rank': 1, 'Y': utils.numpy_to_torch(y), global_p.K_SAMPLE_ID: np.concatenate([sample_id, neg_sample_id])}
for c in self.data_columns:
d = np.concatenate([data[c][batch_start:(batch_start + real_batch_size)], neg_columns_dict[c]])
if ((c == global_p.C_HISTORY) and (self.sparse_his == 1)):
(x, y) = ([], [])
for (idx, iids) in enumerate(d):
x.extend(([idx] * len(iids)))
y.extend(iids)
if (len(x) <= 0):
i = torch.LongTensor([[0], [0]])
v = torch.FloatTensor([0.0])
else:
i = torch.LongTensor([x, y])
v = torch.FloatTensor(([1.0] * len(x)))
history = torch.sparse.FloatTensor(i, v, torch.Size([(real_batch_size * 2), self.data_loader.item_num]))
if (torch.cuda.device_count() > 0):
history = history.cuda()
feed_dict[c] = history
else:
feed_dict[c] = utils.numpy_to_torch(d)
return feed_dict
def _prepare_batches_rt(self, data, batch_size, train):
if ((self.sparse_his == 1) or (self.sup_his == 1)):
return DataProcessor._prepare_batches_rt(self, data=data, batch_size=batch_size, train=train)
if (data is None):
return None
num_example = len(data['X'])
assert (num_example > 0)
length_dict = {}
lengths = [len(x) for x in data[global_p.C_HISTORY]]
for (idx, l) in enumerate(lengths):
if (l not in length_dict):
length_dict[l] = []
length_dict[l].append(idx)
lengths = list(length_dict.keys())
batches = []
for l in tqdm(lengths, leave=False, ncols=100, mininterval=1, desc='Prepare Batches'):
rows = length_dict[l]
tmp_data = {}
for key in data:
if (data[key].dtype == np.object):
tmp_data[key] = np.array([np.array(data[key][r]) for r in rows])
else:
tmp_data[key] = data[key][rows]
tmp_total_batch = int((((len(rows) + batch_size) - 1) / batch_size))
for batch in range(tmp_total_batch):
batches.append(self._get_feed_dict_rt(tmp_data, (batch * batch_size), batch_size, train))
np.random.shuffle(batches)
return batches
def _prepare_batches_rk(self, data, batch_size, train):
if ((self.sparse_his == 1) or (self.sup_his == 1)):
return DataProcessor._prepare_batches_rk(self, data=data, batch_size=batch_size, train=train)
if (data is None):
return None
num_example = len(data['X'])
assert (num_example > 0)
neg_data = None
if train:
neg_df = self.generate_neg_df(uid_list=data['uid'], iid_list=data['iid'], df=self.data_loader.train_df, neg_n=1, train=True)
neg_data = self.format_data_dict(neg_df)
length_dict = {}
lengths = [len(x) for x in data[global_p.C_HISTORY]]
for (idx, l) in enumerate(lengths):
if (l not in length_dict):
length_dict[l] = []
length_dict[l].append(idx)
lengths = list(length_dict.keys())
batches = []
for l in tqdm(lengths, leave=False, ncols=100, mininterval=1, desc='Prepare Batches'):
rows = length_dict[l]
tmp_data = {}
for key in data:
if (data[key].dtype == np.object):
tmp_data[key] = np.array([np.array(data[key][r]) for r in rows])
else:
tmp_data[key] = data[key][rows]
tmp_neg_data = ({} if train else None)
if train:
for key in self.data_columns:
if (data[key].dtype == np.object):
tmp_neg_data[key] = np.array([np.array(neg_data[key][r]) for r in rows])
else:
tmp_neg_data[key] = neg_data[key][rows]
tmp_total_batch = int((((len(rows) + batch_size) - 1) / batch_size))
for batch in range(tmp_total_batch):
batches.append(self._get_feed_dict_rk(tmp_data, (batch * batch_size), batch_size, train, neg_data=tmp_neg_data))
np.random.shuffle(batches)
return batches
def format_data_dict(self, df):
if (global_p.C_HISTORY in df):
history = df[[global_p.C_HISTORY]]
else:
uids = df[['uid']]
history = pd.merge(uids, self.data_loader.train_his_df, on='uid', how='left')
history = history.rename(columns={'iids': global_p.C_HISTORY})
history[global_p.C_HISTORY] = history[global_p.C_HISTORY].fillna('')
data_dict = DataProcessor.format_data_dict(self, df)
if ((self.max_his > 0) and (self.sup_his == 1)):
data_dict[global_p.C_HISTORY] = history[global_p.C_HISTORY].apply((lambda x: (np.array((([(- 1)] * self.max_his) + [int(i) for i in x.split(',')])[(- self.max_his):]) if (x != '') else np.array([])))).values
elif ((self.max_his > 0) and (self.sup_his == 0)):
data_dict[global_p.C_HISTORY] = history[global_p.C_HISTORY].apply((lambda x: (np.array([int(i) for i in x.split(',')][(- self.max_his):]) if (x != '') else np.array([])))).values
else:
data_dict[global_p.C_HISTORY] = history[global_p.C_HISTORY].apply((lambda x: ([int(i) for i in x.split(',')] if (x != '') else np.array([])))).values
data_dict[global_p.C_HISTORY_LENGTH] = np.array([len(h) for h in data_dict[global_p.C_HISTORY]])
return data_dict
|
class ProLogicRecDP(HisDataProcessor):
data_columns = ['X', global_p.C_HISTORY, global_p.C_HISTORY_POS_TAG, global_p.C_HISTORY_LENGTH]
def format_data_dict(self, df):
'\n 除了常规的uid,iid,label,user、item、context特征外,还需处理历史交互\n :param df: 训练、验证、测试df\n :return:\n '
his_list = df[global_p.C_HISTORY].apply((lambda x: x.split(',')))
his_length = his_list.apply((lambda x: (0 if (x[0] == '') else len(x))))
his_length = his_length[(his_length > 0)]
(df, his_list) = (df.loc[his_length.index], his_list.loc[his_length.index])
data_dict = DataProcessor.format_data_dict(self, df)
history_pos_tag = his_list.apply((lambda x: [(0 if i.startswith('~') else 1) for i in x]))
history = his_list.apply((lambda x: [(int(i[1:]) if i.startswith('~') else int(i)) for i in x]))
data_dict[global_p.C_HISTORY] = history.values
data_dict[global_p.C_HISTORY_POS_TAG] = history_pos_tag.values
data_dict[global_p.C_HISTORY_LENGTH] = np.array([len(h) for h in data_dict[global_p.C_HISTORY]])
return data_dict
def get_boolean_test_data(self):
logging.info('Prepare Boolean Test Data...')
df = self.data_loader.test_df
self.boolean_test_data = self.format_data_dict(df)
self.boolean_test_data[global_p.K_SAMPLE_ID] = np.arange(0, len(self.boolean_test_data['Y']))
return self.boolean_test_data
|
def main():
init_parser = argparse.ArgumentParser(description='Model')
init_parser.add_argument('--rank', type=int, default=1, help='1=ranking, 0=rating/click')
init_parser.add_argument('--data_loader', type=str, default='DataLoader', help='Choose data_loader')
init_parser.add_argument('--model_name', type=str, default='BaseModel', help='Choose model to run.')
init_parser.add_argument('--runner', type=str, default='BaseRunner', help='Choose runner')
init_parser.add_argument('--data_processor', type=str, default='DataProcessor', help='Choose runner')
(init_args, init_extras) = init_parser.parse_known_args()
data_loader_name = eval(init_args.data_loader)
model_name = eval(init_args.model_name)
if (init_args.model_name in ['NCR']):
init_args.runner_name = 'ProLogicRunner'
else:
init_args.runner_name = 'BaseRunner'
runner_name = eval(init_args.runner_name)
if (init_args.model_name in ['SVDPP']):
init_args.data_processor = 'HisDataProcessor'
elif (init_args.model_name in ['NCR', 'RNNModel', 'CompareModel', 'GRU4Rec', 'STAMP']):
init_args.data_processor = 'ProLogicRecDP'
data_processor_name = eval(init_args.data_processor)
parser = argparse.ArgumentParser(description='')
parser = utils.parse_global_args(parser)
parser = data_loader_name.parse_data_args(parser)
parser = model_name.parse_model_args(parser, model_name=init_args.model_name)
parser = runner_name.parse_runner_args(parser)
parser = data_processor_name.parse_dp_args(parser)
(args, extras) = parser.parse_known_args()
log_file_name = [str(init_args.rank), init_args.model_name, args.dataset, str(args.random_seed), ('optimizer=' + args.optimizer), ('lr=' + str(args.lr)), ('l2=' + str(args.l2)), ('dropout=' + str(args.dropout)), ('batch_size=' + str(args.batch_size))]
log_file_name = '__'.join(log_file_name).replace(' ', '__')
if (args.log_file == '../log/log.txt'):
args.log_file = ('../log/%s.txt' % log_file_name)
if (args.result_file == '../result/result.npy'):
args.result_file = ('../result/%s.npy' % log_file_name)
if (args.model_path == ('../model/%s/%s.pt' % (init_args.model_name, init_args.model_name))):
args.model_path = ('../model/%s/%s.pt' % (init_args.model_name, log_file_name))
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
logging.basicConfig(filename=args.log_file, level=args.verbose)
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
logging.info(vars(init_args))
logging.info(vars(args))
logging.info(('DataLoader: ' + init_args.data_loader))
logging.info(('Model: ' + init_args.model_name))
logging.info(('Runner: ' + init_args.runner_name))
logging.info(('DataProcessor: ' + init_args.data_processor))
torch.manual_seed(args.random_seed)
torch.cuda.manual_seed(args.random_seed)
np.random.seed(args.random_seed)
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
logging.info(('# cuda devices: %d' % torch.cuda.device_count()))
data_loader = data_loader_name(path=args.path, dataset=args.dataset, label=args.label, sep=args.sep)
(features, feature_dims, feature_min, feature_max) = data_loader.feature_info(include_id=model_name.include_id, include_item_features=model_name.include_item_features, include_user_features=model_name.include_user_features)
if (init_args.model_name in ['BaseModel']):
model = model_name(label_min=data_loader.label_min, label_max=data_loader.label_max, feature_num=len(features), random_seed=args.random_seed, model_path=args.model_path)
elif (init_args.model_name in ['RecModel', 'BiasedMF', 'SVDPP']):
model = model_name(label_min=data_loader.label_min, label_max=data_loader.label_max, feature_num=0, user_num=data_loader.user_num, item_num=data_loader.item_num, u_vector_size=args.u_vector_size, i_vector_size=args.i_vector_size, random_seed=args.random_seed, model_path=args.model_path)
elif (init_args.model_name in ['GRU4Rec']):
model = model_name(neg_emb=args.neg_emb, neg_layer=args.neg_layer, hidden_size=args.hidden_size, num_layers=args.num_layers, p_layers=args.p_layers, label_min=data_loader.label_min, label_max=data_loader.label_max, feature_num=0, user_num=data_loader.user_num, item_num=data_loader.item_num, u_vector_size=args.u_vector_size, i_vector_size=args.i_vector_size, random_seed=args.random_seed, model_path=args.model_path)
elif (init_args.model_name in ['STAMP']):
model = model_name(neg_emb=args.neg_emb, neg_layer=args.neg_layer, hidden_size=args.hidden_size, num_layers=args.num_layers, p_layers=args.p_layers, label_min=data_loader.label_min, label_max=data_loader.label_max, feature_num=0, user_num=data_loader.user_num, item_num=data_loader.item_num, u_vector_size=args.u_vector_size, i_vector_size=args.i_vector_size, random_seed=args.random_seed, model_path=args.model_path, attention_size=args.attention_size)
elif (init_args.model_name in ['NCR', 'CompareModel']):
model = model_name(label_min=data_loader.label_min, label_max=data_loader.label_max, feature_num=0, user_num=data_loader.user_num, item_num=data_loader.item_num, u_vector_size=args.u_vector_size, i_vector_size=args.i_vector_size, r_weight=args.r_weight, ppl_weight=args.ppl_weight, pos_weight=args.pos_weight, random_seed=args.random_seed, model_path=args.model_path)
elif (init_args.model_name in ['RNNModel']):
model = model_name(label_min=data_loader.label_min, label_max=data_loader.label_max, feature_num=0, user_num=data_loader.user_num, item_num=data_loader.item_num, u_vector_size=args.u_vector_size, i_vector_size=args.i_vector_size, random_seed=args.random_seed, model_path=args.model_path)
else:
logging.error(('Unknown Model: ' + init_args.model_name))
return
model.apply(model.init_paras)
if (torch.cuda.device_count() > 0):
model = model.cuda()
if (init_args.model_name in ['NCR', 'RNNModel', 'CompareModel', 'GRU4Rec', 'STAMP']):
data_loader.append_his(last_n=args.max_his, supply=False, neg=True, neg_column=False)
if (init_args.rank == 1):
data_loader.drop_neg()
if (init_args.data_processor in ['ProLogicRecDP']):
data_processor = data_processor_name(data_loader, model, rank=init_args.rank, test_neg_n=args.test_neg_n, max_his=args.max_his, sup_his=0, sparse_his=0)
elif (init_args.data_processor in ['HisDataProcessor']):
data_processor = data_processor_name(data_loader, model, rank=init_args.rank, test_neg_n=args.test_neg_n, sup_his=args.sup_his, max_his=args.max_his, sparse_his=args.sparse_his)
else:
data_processor = data_processor_name(data_loader, model, rank=init_args.rank, test_neg_n=args.test_neg_n)
if (init_args.runner_name in ['BaseRunner', 'ProLogicRunner']):
runner = runner_name(optimizer=args.optimizer, learning_rate=args.lr, epoch=args.epoch, batch_size=args.batch_size, eval_batch_size=args.eval_batch_size, dropout=args.dropout, l2=args.l2, metrics=args.metric, check_epoch=args.check_epoch, early_stop=args.early_stop)
else:
logging.error(('Unknown Runner: ' + init_args.runner_name))
return
logging.info(((('Test Before Training = ' + utils.format_metric(runner.evaluate(model, data_processor.get_test_data(), data_processor))) + ' ') + ','.join(runner.metrics)))
if (args.load > 0):
model.load_model()
if (args.train > 0):
runner.train(model, data_processor, skip_eval=args.skip_eval)
logging.info(((('Test After Training = ' + utils.format_metric(runner.evaluate(model, data_processor.get_test_data(), data_processor))) + ' ') + ','.join(runner.metrics)))
np.save(args.result_file, runner.predict(model, data_processor.get_test_data(), data_processor))
logging.info(('Save Test Results to ' + args.result_file))
logging.debug(runner.evaluate(model, data_processor.get_test_data(), data_processor))
logging.debug(runner.evaluate(model, data_processor.get_test_data(), data_processor))
return
|
class BaseModel(torch.nn.Module):
'\n Base model, the following methods need to be overridden.\n parse_model_args,\n __init__,\n _init_weights,\n predict,\n forward,\n '
append_id = False
include_id = True
include_user_features = True
include_item_features = True
include_context_features = False
@staticmethod
def parse_model_args(parser, model_name='BaseModel'):
'\n parse model related arguments\n :param parser: parser object, which is from main() method\n :param model_name: model name\n :return:\n '
parser.add_argument('--model_path', type=str, default=('../model/%s/%s.pt' % (model_name, model_name)), help='Model save path.')
return parser
@staticmethod
def evaluate_method(p, data, metrics):
"\n calculate evaluation metrics\n :param p: prediction valuds,np.array,generated by runner.predict()\n :param data: data dict,generated by DataProcessor\n :param metrics: metrics list,generated by runner.metrics,for example ['rmse', 'auc']\n :return:\n "
l = data['Y']
evaluations = []
for metric in metrics:
if (metric == 'rmse'):
evaluations.append(np.sqrt(mean_squared_error(l, p)))
elif (metric == 'mae'):
evaluations.append(mean_absolute_error(l, p))
elif (metric == 'auc'):
evaluations.append(roc_auc_score(l, p))
elif (metric == 'f1'):
evaluations.append(f1_score(l, p))
elif (metric == 'accuracy'):
evaluations.append(accuracy_score(l, p))
elif (metric == 'precision'):
evaluations.append(precision_score(l, p))
elif (metric == 'recall'):
evaluations.append(recall_score(l, p))
else:
k = int(metric.split('@')[(- 1)])
df = pd.DataFrame()
df['uid'] = data['uid']
df['p'] = p
df['l'] = l
df = df.sort_values(by='p', ascending=False)
df_group = df.groupby('uid')
if metric.startswith('ndcg@'):
ndcgs = []
for (uid, group) in df_group:
ndcgs.append(ndcg_at_k(group['l'].tolist()[:k], k=k, method=1))
evaluations.append(np.average(ndcgs))
elif metric.startswith('hit@'):
hits = []
for (uid, group) in df_group:
hits.append(int((np.sum(group['l'][:k]) > 0)))
evaluations.append(np.average(hits))
elif metric.startswith('precision@'):
precisions = []
for (uid, group) in df_group:
precisions.append(precision_at_k(group['l'].tolist()[:k], k=k))
evaluations.append(np.average(precisions))
elif metric.startswith('recall@'):
recalls = []
for (uid, group) in df_group:
recalls.append(((1.0 * np.sum(group['l'][:k])) / np.sum(group['l'])))
evaluations.append(np.average(recalls))
return evaluations
@staticmethod
def init_paras(m):
'\n Initialize model parameters\n :param m: model parameters\n :return:\n '
if (type(m) == torch.nn.Linear):
torch.nn.init.normal_(m.weight, mean=0.0, std=0.01)
if (m.bias is not None):
torch.nn.init.normal_(m.bias, mean=0.0, std=0.01)
elif (type(m) == torch.nn.Embedding):
torch.nn.init.normal_(m.weight, mean=0.0, std=0.01)
def __init__(self, label_min, label_max, feature_num, random_seed=2018, model_path='../model/Model/Model.pt'):
super(BaseModel, self).__init__()
self.label_min = label_min
self.label_max = label_max
self.feature_num = feature_num
self.random_seed = random_seed
torch.manual_seed(self.random_seed)
torch.cuda.manual_seed(self.random_seed)
self.model_path = model_path
self._init_weights()
logging.debug(list(self.parameters()))
self.total_parameters = self.count_variables()
logging.info(('# of params: %d' % self.total_parameters))
self.optimizer = None
def _init_weights(self):
self.x_bn = torch.nn.BatchNorm1d(self.feature_num)
self.prediction = torch.nn.Linear(self.feature_num, 1)
def count_variables(self):
'\n count number of parameters in the model\n :return:\n '
total_parameters = sum((p.numel() for p in self.parameters() if p.requires_grad))
return total_parameters
def l2(self):
'\n calculate l2 regularization\n :return:\n '
l2 = 0
for p in self.parameters():
l2 += (p ** 2).sum()
return l2
def predict(self, feed_dict):
'\n prediction only. No loss calculation\n :param feed_dict: input data dictionary\n :return: output dictionary,prediction: contains predicted values; check: holds info\n to be output in the training process (for monitoring).\n '
check_list = []
x = self.x_bn(feed_dict['X'].float())
x = torch.nn.Dropout(p=feed_dict['dropout'])(x)
prediction = F.relu(self.prediction(x)).view([(- 1)])
out_dict = {'prediction': prediction, 'check': check_list}
return out_dict
def forward(self, feed_dict):
'\n calculate loss\n :param feed_dict: input dictionary\n :return: output dictionary, which add loss info to the output dictionary of predict() method.\n '
out_dict = self.predict(feed_dict)
if (feed_dict['rank'] == 1):
batch_size = int((feed_dict['Y'].shape[0] / 2))
(pos, neg) = (out_dict['prediction'][:batch_size], out_dict['prediction'][batch_size:])
loss = (- (pos - neg).sigmoid().log().sum())
else:
loss = torch.nn.MSELoss()(out_dict['prediction'], feed_dict['Y'])
out_dict['loss'] = loss
return out_dict
def lrp(self):
pass
def save_model(self, model_path=None):
'\n save model\n '
if (model_path is None):
model_path = self.model_path
dir_path = os.path.dirname(model_path)
if (not os.path.exists(dir_path)):
os.mkdir(dir_path)
torch.save(self.state_dict(), model_path)
logging.info(('Save model to ' + model_path))
def load_model(self, model_path=None):
'\n load model\n '
if (model_path is None):
model_path = self.model_path
self.load_state_dict(torch.load(model_path))
self.eval()
logging.info(('Load model from ' + model_path))
|
class BaseRunner(object):
@staticmethod
def parse_runner_args(parser):
'\n 跑模型的命令行参数\n :param parser:\n :return:\n '
parser.add_argument('--load', type=int, default=0, help='Whether load model and continue to train')
parser.add_argument('--epoch', type=int, default=100, help='Number of epochs.')
parser.add_argument('--check_epoch', type=int, default=1, help='Check every epochs.')
parser.add_argument('--early_stop', type=int, default=1, help='whether to early-stop.')
parser.add_argument('--lr', type=float, default=0.01, help='Learning rate.')
parser.add_argument('--batch_size', type=int, default=128, help='Batch size during training.')
parser.add_argument('--eval_batch_size', type=int, default=(128 * 128), help='Batch size during testing.')
parser.add_argument('--dropout', type=float, default=0.2, help='Dropout probability for each deep layer')
parser.add_argument('--l2', type=float, default=0.0001, help='Weight of l2_regularize in loss.')
parser.add_argument('--optimizer', type=str, default='GD', help='optimizer: GD, Adam, Adagrad')
parser.add_argument('--metric', type=str, default='RMSE', help='metrics: RMSE, MAE, AUC, F1, Accuracy, Precision, Recall')
parser.add_argument('--skip_eval', type=int, default=0, help='number of epochs without evaluation')
return parser
def __init__(self, optimizer='GD', learning_rate=0.01, epoch=100, batch_size=128, eval_batch_size=(128 * 128), dropout=0.2, l2=1e-05, metrics='RMSE', check_epoch=10, early_stop=1):
'\n 初始化\n :param optimizer: 优化器名字\n :param learning_rate: 学习率\n :param epoch: 总共跑几轮\n :param batch_size: 训练batch大小\n :param eval_batch_size: 测试batch大小\n :param dropout: dropout比例\n :param l2: l2权重\n :param metrics: 评价指标,逗号分隔\n :param check_epoch: 每几轮输出check一次模型中间的一些tensor\n :param early_stop: 是否自动提前终止训练\n '
self.optimizer_name = optimizer
self.learning_rate = learning_rate
self.epoch = epoch
self.batch_size = batch_size
self.eval_batch_size = eval_batch_size
self.dropout = dropout
self.no_dropout = 0.0
self.l2_weight = l2
self.metrics = metrics.lower().split(',')
self.check_epoch = check_epoch
self.early_stop = early_stop
self.time = None
(self.train_results, self.valid_results, self.test_results) = ([], [], [])
def _build_optimizer(self, model):
'\n 创建优化器\n :param model: 模型\n :return: 优化器\n '
optimizer_name = self.optimizer_name.lower()
if (optimizer_name == 'gd'):
logging.info('Optimizer: GD')
optimizer = torch.optim.SGD(model.parameters(), lr=self.learning_rate, weight_decay=self.l2_weight)
elif (optimizer_name == 'adagrad'):
logging.info('Optimizer: Adagrad')
optimizer = torch.optim.Adagrad(model.parameters(), lr=self.learning_rate, weight_decay=self.l2_weight)
elif (optimizer_name == 'adam'):
logging.info('Optimizer: Adam')
optimizer = torch.optim.Adam(model.parameters(), lr=self.learning_rate, weight_decay=self.l2_weight)
else:
logging.error(('Unknown Optimizer: ' + self.optimizer_name))
assert (self.optimizer_name in ['GD', 'Adagrad', 'Adam'])
optimizer = torch.optim.SGD(model.parameters(), lr=self.learning_rate, weight_decay=self.l2_weight)
return optimizer
def _check_time(self, start=False):
'\n 记录时间用,self.time保存了[起始时间,上一步时间]\n :param start: 是否开始计时\n :return: 上一步到当前位置的时间\n '
if ((self.time is None) or start):
self.time = ([time()] * 2)
return self.time[0]
tmp_time = self.time[1]
self.time[1] = time()
return (self.time[1] - tmp_time)
def batches_add_control(self, batches, train):
"\n 向所有batch添加一些控制信息比如'dropout'\n :param batches: 所有batch的list,由DataProcessor产生\n :param train: 是否是训练阶段\n :return: 所有batch的list\n "
for batch in batches:
batch['train'] = train
batch['dropout'] = (self.dropout if train else self.no_dropout)
return batches
def predict(self, model, data, data_processor):
'\n 预测,不训练\n :param model: 模型\n :param data: 数据dict,由DataProcessor的self.get_*_data()和self.format_data_dict()系列函数产生\n :param data_processor: DataProcessor实例\n :return: prediction 拼接好的 np.array\n '
batches = data_processor.prepare_batches(data, self.eval_batch_size, train=False)
batches = self.batches_add_control(batches, train=False)
model.eval()
predictions = []
for batch in tqdm(batches, leave=False, ncols=100, mininterval=1, desc='Predict'):
prediction = model.predict(batch)['prediction']
predictions.append(prediction.detach())
predictions = np.concatenate(predictions)
sample_ids = np.concatenate([b[global_p.K_SAMPLE_ID] for b in batches])
reorder_dict = dict(zip(sample_ids, predictions))
predictions = np.array([reorder_dict[i] for i in data[global_p.K_SAMPLE_ID]])
return predictions
def fit(self, model, data, data_processor, epoch=(- 1)):
'\n 训练\n :param model: 模型\n :param data: 数据dict,由DataProcessor的self.get_*_data()和self.format_data_dict()系列函数产生\n :param data_processor: DataProcessor实例\n :param epoch: 第几轮\n :return: 返回最后一轮的输出,可供self.check函数检查一些中间结果\n '
if (model.optimizer is None):
model.optimizer = self._build_optimizer(model)
batches = data_processor.prepare_batches(data, self.batch_size, train=True)
batches = self.batches_add_control(batches, train=True)
batch_size = (self.batch_size if (data_processor.rank == 0) else (self.batch_size * 2))
model.train()
accumulate_size = 0
for batch in tqdm(batches, leave=False, desc=('Epoch %5d' % (epoch + 1)), ncols=100, mininterval=1):
accumulate_size += len(batch['Y'])
model.optimizer.zero_grad()
output_dict = model(batch)
loss = (output_dict['loss'] + (model.l2() * self.l2_weight))
loss.backward()
torch.nn.utils.clip_grad_value_(model.parameters(), 50)
if ((accumulate_size >= batch_size) or (batch is batches[(- 1)])):
model.optimizer.step()
accumulate_size = 0
model.eval()
return output_dict
def eva_termination(self, model):
'\n 检查是否终止训练,基于验证集\n :param model: 模型\n :return: 是否终止训练\n '
metric = self.metrics[0]
valid = self.valid_results
if ((len(valid) > 20) and (metric in utils.LOWER_METRIC_LIST) and utils.strictly_increasing(valid[(- 5):])):
return True
elif ((len(valid) > 20) and (metric not in utils.LOWER_METRIC_LIST) and utils.strictly_decreasing(valid[(- 5):])):
return True
elif ((len(valid) - valid.index(utils.best_result(metric, valid))) > 20):
return True
return False
def train(self, model, data_processor, skip_eval=0):
'\n 训练模型\n :param model: 模型\n :param data_processor: DataProcessor实例\n :param skip_eval: number of epochs to skip for evaluations\n :return:\n '
train_data = data_processor.get_train_data(epoch=(- 1))
validation_data = data_processor.get_validation_data()
test_data = data_processor.get_test_data()
self._check_time(start=True)
init_train = (self.evaluate(model, train_data, data_processor, metrics=['rmse', 'mae']) if (train_data is not None) else ([(- 1.0)] * len(self.metrics)))
init_valid = (self.evaluate(model, validation_data, data_processor) if (validation_data is not None) else ([(- 1.0)] * len(self.metrics)))
init_test = (self.evaluate(model, test_data, data_processor) if (test_data is not None) else ([(- 1.0)] * len(self.metrics)))
logging.info((('Init: \t train= %s validation= %s test= %s [%.1f s] ' % (utils.format_metric(init_train), utils.format_metric(init_valid), utils.format_metric(init_test), self._check_time())) + ','.join(self.metrics)))
try:
for epoch in range(self.epoch):
self._check_time()
epoch_train_data = data_processor.get_train_data(epoch=epoch)
last_batch = self.fit(model, epoch_train_data, data_processor, epoch=epoch)
if ((self.check_epoch > 0) and ((epoch == 1) or ((epoch % self.check_epoch) == 0))):
self.check(model, last_batch)
training_time = self._check_time()
if (epoch >= skip_eval):
train_result = (self.evaluate(model, train_data, data_processor, metrics=['rmse', 'mae']) if (train_data is not None) else ([(- 1.0)] * len(self.metrics)))
valid_result = (self.evaluate(model, validation_data, data_processor) if (validation_data is not None) else ([(- 1.0)] * len(self.metrics)))
test_result = (self.evaluate(model, test_data, data_processor) if (test_data is not None) else ([(- 1.0)] * len(self.metrics)))
testing_time = self._check_time()
self.train_results.append(train_result)
self.valid_results.append(valid_result)
self.test_results.append(test_result)
logging.info((('Epoch %5d [%.1f s]\t train= %s validation= %s test= %s [%.1f s] ' % ((epoch + 1), training_time, utils.format_metric(train_result), utils.format_metric(valid_result), utils.format_metric(test_result), testing_time)) + ','.join(self.metrics)))
if (utils.best_result(self.metrics[0], self.valid_results) == self.valid_results[(- 1)]):
model.save_model()
if (self.eva_termination(model) and (self.early_stop == 1)):
logging.info(('Early stop at %d based on validation result.' % (epoch + 1)))
break
if (epoch < skip_eval):
logging.info(('Epoch %5d [%.1f s]' % ((epoch + 1), training_time)))
except KeyboardInterrupt:
logging.info('Early stop manually')
save_here = input('Save here? (1/0) (default 0):')
if str(save_here).lower().startswith('1'):
model.save_model()
best_valid_score = utils.best_result(self.metrics[0], self.valid_results)
best_epoch = self.valid_results.index(best_valid_score)
logging.info((('Best Iter(validation)= %5d\t train= %s valid= %s test= %s [%.1f s] ' % ((best_epoch + 1), utils.format_metric(self.train_results[best_epoch]), utils.format_metric(self.valid_results[best_epoch]), utils.format_metric(self.test_results[best_epoch]), (self.time[1] - self.time[0]))) + ','.join(self.metrics)))
best_test_score = utils.best_result(self.metrics[0], self.test_results)
best_epoch = self.test_results.index(best_test_score)
logging.info((('Best Iter(test)= %5d\t train= %s valid= %s test= %s [%.1f s] ' % ((best_epoch + 1), utils.format_metric(self.train_results[best_epoch]), utils.format_metric(self.valid_results[best_epoch]), utils.format_metric(self.test_results[best_epoch]), (self.time[1] - self.time[0]))) + ','.join(self.metrics)))
model.load_model()
def evaluate(self, model, data, data_processor, metrics=None):
'\n evaluate模型效果\n :param model: 模型\n :param data: 数据dict,由DataProcessor的self.get_*_data()和self.format_data_dict()系列函数产生\n :param data_processor: DataProcessor\n :param metrics: list of str\n :return: list of float 每个对应一个 metric\n '
if (metrics is None):
metrics = self.metrics
predictions = self.predict(model, data, data_processor)
return model.evaluate_method(predictions, data, metrics=metrics)
def check(self, model, out_dict):
'\n 检查模型中间结果\n :param model: 模型\n :param out_dict: 某一个batch的模型输出结果\n :return:\n '
check = out_dict
logging.info(os.linesep)
for (i, t) in enumerate(check['check']):
d = np.array(t[1].detach())
logging.info((os.linesep.join([((t[0] + '\t') + str(d.shape)), np.array2string(d, threshold=20)]) + os.linesep))
(loss, l2) = (check['loss'], model.l2())
l2 = (l2 * self.l2_weight)
logging.info(('loss = %.4f, l2 = %.4f' % (loss, l2)))
if (not ((loss.abs() * 0.005) < l2 < (loss.abs() * 0.1))):
logging.warning(('l2 inappropriate: loss = %.4f, l2 = %.4f' % (loss, l2)))
|
class ProLogicRunner(BaseRunner):
@staticmethod
def parse_runner_args(parser):
'\n 跑模型的命令行参数\n :param parser:\n :return:\n '
parser.add_argument('--load', type=int, default=0, help='Whether load model and continue to train')
parser.add_argument('--epoch', type=int, default=100, help='Number of epochs.')
parser.add_argument('--check_epoch', type=int, default=1, help='Check every epochs.')
parser.add_argument('--early_stop', type=int, default=1, help='whether to early-stop.')
parser.add_argument('--lr', type=float, default=0.01, help='Learning rate.')
parser.add_argument('--batch_size', type=int, default=128, help='Batch size during training.')
parser.add_argument('--eval_batch_size', type=int, default=(128 * 128), help='Batch size during testing.')
parser.add_argument('--dropout', type=float, default=0.2, help='Dropout probability for each deep layer')
parser.add_argument('--l2', type=float, default=0.0001, help='Weight of l2_regularize in loss.')
parser.add_argument('--optimizer', type=str, default='GD', help='optimizer: GD, Adam, Adagrad')
parser.add_argument('--metric', type=str, default='RMSE', help='metrics: RMSE, MAE, AUC, F1, Accuracy, Precision, Recall')
parser.add_argument('--skip_eval', type=int, default=0, help='number of epochs without evaluation')
return parser
def __init__(self, optimizer='GD', learning_rate=0.01, epoch=100, batch_size=128, eval_batch_size=(128 * 128), dropout=0.2, l2=1e-05, metrics='RMSE', check_epoch=10, early_stop=1):
'\n 初始化\n :param optimizer: 优化器名字\n :param learning_rate: 学习率\n :param epoch: 总共跑几轮\n :param batch_size: 训练batch大小\n :param eval_batch_size: 测试batch大小\n :param dropout: dropout比例\n :param l2: l2权重\n :param metrics: 评价指标,逗号分隔\n :param check_epoch: 每几轮输出check一次模型中间的一些tensor\n :param early_stop: 是否自动提前终止训练\n '
BaseRunner.__init__(self, optimizer=optimizer, learning_rate=learning_rate, epoch=epoch, batch_size=batch_size, eval_batch_size=eval_batch_size, dropout=dropout, l2=l2, metrics=metrics, check_epoch=check_epoch, early_stop=early_stop)
def accuracy_calc(self, p, l):
'\n calculate the accuracy with each bit flip\n :param p: predicted value\n :param l: ground truth value calculated by expression_evaluator\n :return: accuracy rate\n '
return accuracy_score(l, p)
def _data_reformat(self, data, bit_reverse_indices):
'\n update the x_tag\n :param data: data dictionary\n :param bit_reverse_indices: a list with the indices of the bit to be reversed\n :return:\n '
new_data = copy.deepcopy(data)
for tag in new_data[global_p.C_HISTORY_POS_TAG]:
for index in bit_reverse_indices:
tag[index] = (1 - tag[index])
return new_data
def _boolean_evaluate(self, model, data, data_processor, bit_reverse_index):
new_data = self._data_reformat(data, bit_reverse_index)
batches = data_processor.prepare_batches(new_data, self.eval_batch_size, train=False)
batches = self.batches_add_control(batches, train=False)
predictions = []
interims = []
model.eval()
for batch in tqdm(batches, leave=False, ncols=100, mininterval=1, desc='Predict'):
result = model.predict(batch)
prediction = result['prediction']
interim = result['interim']
interims.append(interim.detach())
predictions.append(prediction.detach())
predictions = np.concatenate(predictions)
interims = np.concatenate(interims, axis=0)
sample_ids = np.concatenate([b[global_p.K_SAMPLE_ID] for b in batches])
reorder_dict = dict(zip(sample_ids, predictions))
predictions = np.array([reorder_dict[i] for i in data[global_p.K_SAMPLE_ID]])
reorder_dict_2 = dict(zip(sample_ids, interims))
interims = np.array([reorder_dict_2[i] for i in data[global_p.K_SAMPLE_ID]])
return (predictions, interims)
@staticmethod
def _enum_subsets(input_set):
'\n enumerate all the subsets of given input_set\n return: a dictionary with key for the number of elements in the subsets and\n value is a list of elements\n '
result_dict = {}
for i in range(1, (len(input_set) + 1)):
tmp_list = list(map(list, itertools.combinations(input_set, i)))
result_dict[i] = tmp_list
return result_dict
@staticmethod
def _gen_prediction_dict(p, data):
df = pd.DataFrame()
df['uid'] = data['uid']
df['iid'] = data['iid']
df['p'] = p
df = df.sort_values(by='p', ascending=False)
df_group = df.groupby('uid')
y_dict = {}
for (uid, group) in df_group:
tmp_iid = group['iid'].tolist()[:1][0]
y_dict[uid] = tmp_iid
return y_dict
@staticmethod
def _accuracy_calc_from_dict(original_dict, updated_dict):
assert (len(original_dict) == len(updated_dict))
counter = 0
for key in original_dict:
if (updated_dict[key] == original_dict[key]):
counter += 1
return (counter, len(original_dict))
@staticmethod
def _statistic_info(data):
path = './ml100k_freq_info.pkl'
with open(path, 'rb') as file:
item_dict = pickle.load(file)
tmp_list = []
for key in data:
tmp_list.append(item_dict[data[key]])
tmp_list = np.array(tmp_list)
logging.info(('\n average frequency: %.1f' % tmp_list.mean()))
logging.info(('\n max frequency: %.1f' % tmp_list.max()))
logging.info(('\n min frequency: %.1f' % tmp_list.min()))
@staticmethod
def _statistic_of_difference(original, updated):
path = './ml100k_freq_info.pkl'
with open(path, 'rb') as file:
item_dict = pickle.load(file)
unchanged_dict = {}
changed_dict = {}
for key in original:
if (original[key] == updated[key]):
unchanged_dict[original[key]] = item_dict[original[key]]
else:
changed_dict[key] = {original[key]: item_dict[original[key]], updated[key]: item_dict[updated[key]]}
unchanged_freq_max = max(unchanged_dict, key=unchanged_dict.get)
unchanged_freq_min = min(unchanged_dict, key=unchanged_dict.get)
unchanged_freq_mean = np.array([unchanged_dict[k] for k in unchanged_dict]).mean()
logging.info('unchanged_freq_max: {}'.format(unchanged_dict[unchanged_freq_max]))
logging.info('unchanged_freq_min: {}'.format(unchanged_dict[unchanged_freq_min]))
logging.info('unchanged_freq_mean: {}'.format(unchanged_freq_mean))
return (unchanged_dict, changed_dict)
def boolean_test(self, model, data, data_processor):
'\n reverse bits to test the boolean sensitivity\n :param model: model name\n :param data: data to use\n :param data_processor: data processor\n :return:\n '
length_dict = {}
lengths = [len(x) for x in data[global_p.C_HISTORY]]
for (idx, l) in enumerate(lengths):
if (l not in length_dict):
length_dict[l] = []
length_dict[l].append(idx)
lengths = list(length_dict.keys())
result_dict = {}
counter_dict = {}
info_dict = {}
for l in tqdm(lengths, leave=False, ncols=100, mininterval=1, desc='Prepare Batches'):
rows = length_dict[l]
tmp_data = {}
for key in data:
if (data[key].dtype == np.object):
tmp_data[key] = np.array([np.array(data[key][r]) for r in rows])
else:
tmp_data[key] = data[key][rows]
expression_length = len(tmp_data[global_p.C_HISTORY][0])
index_set = [i for i in range(expression_length)]
index_sets_dict = self._enum_subsets(index_set)
tmp_interim = None
for key in index_sets_dict:
acc_counter = 0
acc_len = 0
acc_sim = 0
sim_counter = 0
for index_list in index_sets_dict[key]:
p = self.predict(model, tmp_data, data_processor)
original_predict = self._gen_prediction_dict(p, tmp_data)
(predictions, interims) = self._boolean_evaluate(model, tmp_data, data_processor, index_list)
updated_predict = self._gen_prediction_dict(predictions, tmp_data)
if (tmp_interim is None):
tmp_interim = copy.deepcopy(interims)
else:
acc_sim += F.cosine_similarity(torch.from_numpy(tmp_interim), torch.from_numpy(interims), dim=(- 1)).mean()
tmp_interim = copy.deepcopy(interims)
sim_counter += 1
self._statistic_info(original_predict)
(unchanged_dict, changed_dict) = self._statistic_of_difference(original_predict, updated_predict)
print(asasd)
(tmp_counter, tmp_len) = self._accuracy_calc_from_dict(original_predict, updated_predict)
acc_counter += tmp_counter
acc_len += tmp_len
tmp_str = ' '.join([str(e) for e in index_list])
if (tmp_str not in info_dict):
info_dict[tmp_str] = (tmp_counter / tmp_len)
accuracy = (acc_counter / acc_len)
similarity = (acc_sim / sim_counter)
if (key not in result_dict):
result_dict[key] = {'accuracy': accuracy, 'similarity': similarity}
counter_dict[key] = 1
else:
result_dict[key]['accuracy'] += accuracy
result_dict[key]['similarity'] += similarity
counter_dict[key] += 1
for key in result_dict:
logging.info('{} bit reverse average accuracy: {}\taverage similarity: {}'.format(str(key), (result_dict[key]['accuracy'] / counter_dict[key]), (result_dict[key]['similarity'] / counter_dict[key])))
logging.info('----------- Details ------------')
for key in info_dict:
logging.info(((str(key) + ': ') + str(info_dict[key])))
|
def random_split_data(all_data_file, dataset_name, vt_ratio=0.1, u_f=None, i_f=None):
'\n 随机切分已经生成的数据集文件 *.all.csv -> *.train.csv,*.validation.csv,*.test.csv\n :param all_data_file: 数据预处理完的文件 *.all.csv\n :param dataset_name: 给数据集起个名字\n :param vt_ratio: 验证集合测试集比例\n :param u_f: 用户特征文件 *.user.csv\n :param i_f: 物品特征文件 *.item.csv\n :return: pandas dataframe 训练集,验证集,测试集\n '
dir_name = os.path.join(global_p.DATASET_DIR, dataset_name)
print('random_split_data', dir_name)
if (not os.path.exists(dir_name)):
os.mkdir(dir_name)
all_data = pd.read_csv(all_data_file, sep='\t')
vt_size = int((len(all_data) * vt_ratio))
validation_set = all_data.sample(n=vt_size).sort_index()
all_data = all_data.drop(validation_set.index)
test_set = all_data.sample(n=vt_size).sort_index()
train_set = all_data.drop(test_set.index)
train_set.to_csv(os.path.join(dir_name, (dataset_name + '.train.csv')), index=False, sep='\t')
validation_set.to_csv(os.path.join(dir_name, (dataset_name + '.validation.csv')), index=False, sep='\t')
test_set.to_csv(os.path.join(dir_name, (dataset_name + '.test.csv')), index=False, sep='\t')
if (u_f is not None):
copyfile(u_f, os.path.join(dir_name, (dataset_name + '.user.csv')))
if (i_f is not None):
copyfile(i_f, os.path.join(dir_name, (dataset_name + '.item.csv')))
return (train_set, validation_set, test_set)
|
def leave_out_by_time(all_data_file, dataset_name, leave_n=1, warm_n=5, u_f=None, i_f=None):
'\n Split train/validation/test by timestamp.\n By default, the interactions in all_data_file are already sorted by timestamp.\n :param all_data_file: preprocessed dataset file *.all.csv,which is sorted by timestamp.\n :param dataset_name: dataset name (used as the processed dataset name)\n :param leave_n: number of items that are left in validation and test set.\n :param warm_n: minimum number of interactions to leave in training dataset for each user.\n :param u_f: user feature file (not used here)\n :param i_f: item feature file (not used here)\n :return: pandas dataframe for training/validation/test sets\n '
dir_name = os.path.join(global_p.DATASET_DIR, dataset_name)
print('leave_out_by_time', dir_name, leave_n, warm_n)
if (not os.path.exists(dir_name)):
os.mkdir(dir_name)
all_data = pd.read_csv(all_data_file, sep='\t')
min_label = all_data['label'].min()
if (min_label > 0):
"\n Keep at least 'warm_n' number of interactions in training dataset. \n If user has less than 'warm_n' interactions, then keep all the interactions in training set.\n This is to guarantee that no cold start issue for validation and testing.\n "
train_set = all_data.groupby('uid').head(warm_n)
all_data = all_data.drop(train_set.index)
test_set = all_data.groupby('uid').tail(leave_n)
all_data = all_data.drop(test_set.index)
validation_set = all_data.groupby('uid').tail(leave_n)
all_data = all_data.drop(validation_set.index)
else:
"\n Keep at least 'warm_n' number of interactions in training dataset. \n If user has less than 'warm_n' interactions, then keep all the interactions in training set.\n This is to guarantee that no cold start issue for validation and testing.\n "
train_set = []
for (uid, group) in all_data.groupby('uid'):
(found, found_idx) = (0, (- 1))
for idx in group.index:
if (group.loc[(idx, 'label')] > 0):
found_idx = idx
found += 1
if (found >= warm_n):
break
if (found_idx > 0):
train_set.append(group.loc[:(found_idx + 1)])
train_set = pd.concat(train_set)
all_data = all_data.drop(train_set.index)
test_set = []
for (uid, group) in all_data.groupby('uid'):
(found, found_idx) = (0, (- 1))
for idx in reversed(group.index):
if (group.loc[(idx, 'label')] > 0):
found_idx = idx
found += 1
if (found >= leave_n):
break
if (found_idx > 0):
test_set.append(group.loc[found_idx:])
test_set = pd.concat(test_set)
all_data = all_data.drop(test_set.index)
validation_set = []
for (uid, group) in all_data.groupby('uid'):
(found, found_idx) = (0, (- 1))
for idx in reversed(group.index):
if (group.loc[(idx, 'label')] > 0):
found_idx = idx
found += 1
if (found >= leave_n):
break
if (found_idx > 0):
validation_set.append(group.loc[found_idx:])
validation_set = pd.concat(validation_set)
all_data = all_data.drop(validation_set.index)
train_set = pd.concat([train_set, all_data]).sort_index()
(validation_set, test_set) = (validation_set.sort_index(), test_set.sort_index())
train_set.to_csv(os.path.join(dir_name, (dataset_name + '.train.csv')), index=False, sep='\t')
validation_set.to_csv(os.path.join(dir_name, (dataset_name + '.validation.csv')), index=False, sep='\t')
test_set.to_csv(os.path.join(dir_name, (dataset_name + '.test.csv')), index=False, sep='\t')
if (u_f is not None):
copyfile(u_f, os.path.join(dir_name, (dataset_name + '.user.csv')))
if (i_f is not None):
copyfile(i_f, os.path.join(dir_name, (dataset_name + '.item.csv')))
return (train_set, validation_set, test_set)
|
def group_user_interactions_csv(in_csv, out_csv, label='label', sep='\t'):
print('group_user_interactions_csv', out_csv)
all_data = pd.read_csv(in_csv, sep=sep)
group_inters = group_user_interactions_df(in_df=all_data, label=label)
group_inters.to_csv(out_csv, sep=sep, index=False)
return group_inters
|
def group_user_interactions_df(in_df, label='label', seq_sep=','):
all_data = in_df
if (label in all_data.columns):
all_data = all_data[(all_data[label] > 0)]
(uids, inters) = ([], [])
for (name, group) in all_data.groupby('uid'):
uids.append(name)
inters.append(seq_sep.join(group['iid'].astype(str).tolist()))
group_inters = pd.DataFrame()
group_inters['uid'] = uids
group_inters['iids'] = inters
return group_inters
|
def mean_reciprocal_rank(rs):
"Score is reciprocal of the rank of the first relevant item\n First element is 'rank 1'. Relevance is binary (nonzero is relevant).\n Example from http://en.wikipedia.org/wiki/Mean_reciprocal_rank\n >>> rs = [[0, 0, 1], [0, 1, 0], [1, 0, 0]]\n >>> mean_reciprocal_rank(rs)\n 0.61111111111111105\n >>> rs = np.array([[0, 0, 0], [0, 1, 0], [1, 0, 0]])\n >>> mean_reciprocal_rank(rs)\n 0.5\n >>> rs = [[0, 0, 0, 1], [1, 0, 0], [1, 0, 0]]\n >>> mean_reciprocal_rank(rs)\n 0.75\n Args:\n rs: Iterator of relevance scores (list or numpy) in rank order\n (first element is the first item)\n Returns:\n Mean reciprocal rank\n "
rs = (np.asarray(r).nonzero()[0] for r in rs)
return np.mean([((1.0 / (r[0] + 1)) if r.size else 0.0) for r in rs])
|
def r_precision(r):
'Score is precision after all relevant documents have been retrieved\n Relevance is binary (nonzero is relevant).\n >>> r = [0, 0, 1]\n >>> r_precision(r)\n 0.33333333333333331\n >>> r = [0, 1, 0]\n >>> r_precision(r)\n 0.5\n >>> r = [1, 0, 0]\n >>> r_precision(r)\n 1.0\n Args:\n r: Relevance scores (list or numpy) in rank order\n (first element is the first item)\n Returns:\n R Precision\n '
r = (np.asarray(r) != 0)
z = r.nonzero()[0]
if (not z.size):
return 0.0
return np.mean(r[:(z[(- 1)] + 1)])
|
def precision_at_k(r, k):
'Score is precision @ k\n Relevance is binary (nonzero is relevant).\n >>> r = [0, 0, 1]\n >>> precision_at_k(r, 1)\n 0.0\n >>> precision_at_k(r, 2)\n 0.0\n >>> precision_at_k(r, 3)\n 0.33333333333333331\n >>> precision_at_k(r, 4)\n Traceback (most recent call last):\n File "<stdin>", line 1, in ?\n ValueError: Relevance score length < k\n Args:\n r: Relevance scores (list or numpy) in rank order\n (first element is the first item)\n Returns:\n Precision @ k\n Raises:\n ValueError: len(r) must be >= k\n '
assert (k >= 1)
r = (np.asarray(r)[:k] != 0)
if (r.size != k):
raise ValueError('Relevance score length < k')
return np.mean(r)
|
def average_precision(r):
'Score is average precision (area under PR curve)\n Relevance is binary (nonzero is relevant).\n >>> r = [1, 1, 0, 1, 0, 1, 0, 0, 0, 1]\n >>> delta_r = 1. / sum(r)\n >>> sum([sum(r[:x + 1]) / (x + 1.) * delta_r for x, y in enumerate(r) if y])\n 0.7833333333333333\n >>> average_precision(r)\n 0.78333333333333333\n Args:\n r: Relevance scores (list or numpy) in rank order\n (first element is the first item)\n Returns:\n Average precision\n '
r = (np.asarray(r) != 0)
out = [precision_at_k(r, (k + 1)) for k in range(r.size) if r[k]]
if (not out):
return 0.0
return np.mean(out)
|
def mean_average_precision(rs):
'Score is mean average precision\n Relevance is binary (nonzero is relevant).\n >>> rs = [[1, 1, 0, 1, 0, 1, 0, 0, 0, 1]]\n >>> mean_average_precision(rs)\n 0.78333333333333333\n >>> rs = [[1, 1, 0, 1, 0, 1, 0, 0, 0, 1], [0]]\n >>> mean_average_precision(rs)\n 0.39166666666666666\n Args:\n rs: Iterator of relevance scores (list or numpy) in rank order\n (first element is the first item)\n Returns:\n Mean average precision\n '
return np.mean([average_precision(r) for r in rs])
|
def dcg_at_k(r, k, method=0):
'Score is discounted cumulative gain (dcg)\n Relevance is positive real values. Can use binary\n as the previous methods.\n Example from\n http://www.stanford.edu/class/cs276/handouts/EvaluationNew-handout-6-per.pdf\n >>> r = [3, 2, 3, 0, 0, 1, 2, 2, 3, 0]\n >>> dcg_at_k(r, 1)\n 3.0\n >>> dcg_at_k(r, 1, method=1)\n 3.0\n >>> dcg_at_k(r, 2)\n 5.0\n >>> dcg_at_k(r, 2, method=1)\n 4.2618595071429155\n >>> dcg_at_k(r, 10)\n 9.6051177391888114\n >>> dcg_at_k(r, 11)\n 9.6051177391888114\n Args:\n r: Relevance scores (list or numpy) in rank order\n (first element is the first item)\n k: Number of results to consider\n method: If 0 then weights are [1.0, 1.0, 0.6309, 0.5, 0.4307, ...]\n If 1 then weights are [1.0, 0.6309, 0.5, 0.4307, ...]\n Returns:\n Discounted cumulative gain\n '
r = np.asfarray(r)[:k]
if r.size:
if (method == 0):
return (r[0] + np.sum((r[1:] / np.log2(np.arange(2, (r.size + 1))))))
elif (method == 1):
return np.sum((r / np.log2(np.arange(2, (r.size + 2)))))
else:
raise ValueError('method must be 0 or 1.')
return 0.0
|
def ndcg_at_k(r, k, method=0):
'Score is normalized discounted cumulative gain (ndcg)\n Relevance is positive real values. Can use binary\n as the previous methods.\n Example from\n http://www.stanford.edu/class/cs276/handouts/EvaluationNew-handout-6-per.pdf\n >>> r = [3, 2, 3, 0, 0, 1, 2, 2, 3, 0]\n >>> ndcg_at_k(r, 1)\n 1.0\n >>> r = [2, 1, 2, 0]\n >>> ndcg_at_k(r, 4)\n 0.9203032077642922\n >>> ndcg_at_k(r, 4, method=1)\n 0.96519546960144276\n >>> ndcg_at_k([0], 1)\n 0.0\n >>> ndcg_at_k([1], 2)\n 1.0\n Args:\n r: Relevance scores (list or numpy) in rank order\n (first element is the first item)\n k: Number of results to consider\n method: If 0 then weights are [1.0, 1.0, 0.6309, 0.5, 0.4307, ...]\n If 1 then weights are [1.0, 0.6309, 0.5, 0.4307, ...]\n Returns:\n Normalized discounted cumulative gain\n '
dcg_max = dcg_at_k(sorted(r, reverse=True), k, method)
if (not dcg_max):
return 0.0
return (dcg_at_k(r, k, method) / dcg_max)
|
def parse_global_args(parser):
parser.add_argument('--gpu', type=str, default='0', help='Set CUDA_VISIBLE_DEVICES')
parser.add_argument('--verbose', type=int, default=logging.INFO, help='Logging Level, 0, 10, ..., 50')
parser.add_argument('--log_file', type=str, default='../log/log.txt', help='Logging file path')
parser.add_argument('--result_file', type=str, default='../result/result.npy', help='Result file path')
parser.add_argument('--random_seed', type=int, default=2022, help='Random seed of numpy and pytorch')
parser.add_argument('--train', type=int, default=1, help='To train the model or not.')
return parser
|
def balance_data(data):
pos_indexes = np.where((data['Y'] == 1))[0]
copy_num = int(((len(data['Y']) - len(pos_indexes)) / len(pos_indexes)))
if (copy_num > 1):
copy_indexes = np.tile(pos_indexes, copy_num)
sample_index = np.concatenate([np.arange(0, len(data['Y'])), copy_indexes])
for k in data:
data[k] = data[k][sample_index]
return data
|
def input_data_is_list(data):
if ((type(data) is list) or (type(data) is tuple)):
print('input_data_is_list')
new_data = {}
for k in data[0]:
new_data[k] = np.concatenate([d[k] for d in data])
return new_data
return data
|
def format_metric(metric):
'\n convert output into string\n :param metric:\n :return:\n '
if ((type(metric) is not tuple) and (type(metric) is not list)):
metric = [metric]
format_str = []
if ((type(metric) is tuple) or (type(metric) is list)):
for m in metric:
if ((type(m) is float) or (type(m) is np.float) or (type(m) is np.float32) or (type(m) is np.float64)):
format_str.append(('%.4f' % m))
elif ((type(m) is int) or (type(m) is np.int) or (type(m) is np.int32) or (type(m) is np.int64)):
format_str.append(('%d' % m))
return ','.join(format_str)
|
def shuffle_in_unison_scary(data):
rng_state = np.random.get_state()
for d in data:
np.random.set_state(rng_state)
np.random.shuffle(data[d])
return data
|
def best_result(metric, results_list):
if ((type(metric) is list) or (type(metric) is tuple)):
metric = metric[0]
if (metric in LOWER_METRIC_LIST):
return min(results_list)
return max(results_list)
|
def strictly_increasing(l):
return all(((x < y) for (x, y) in zip(l, l[1:])))
|
def strictly_decreasing(l):
return all(((x > y) for (x, y) in zip(l, l[1:])))
|
def non_increasing(l):
return all(((x >= y) for (x, y) in zip(l, l[1:])))
|
def non_decreasing(l):
return all(((x <= y) for (x, y) in zip(l, l[1:])))
|
def monotonic(l):
return (non_increasing(l) or non_decreasing(l))
|
def numpy_to_torch(d):
t = torch.from_numpy(d)
if (torch.cuda.device_count() > 0):
t = t.cuda()
return t
|
def conv3x3(in_planes, out_planes, stride=1):
'3x3 convolution with padding'
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
|
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if (self.downsample is not None):
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
|
class ResNet(nn.Module):
def __init__(self, block, layers, embedding_size=64):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.avgpool = nn.AvgPool2d(7)
self.fc_embed = nn.Linear((256 * block.expansion), embedding_size)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(nn.Conv2d(self.inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((planes * block.expansion)))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = (planes * block.expansion)
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.avgpool(x)
x = x.view(x.size(0), (- 1))
x = self.fc_embed(x)
return x
|
def resnet18(pretrained=False, **kwargs):
'Constructs a ResNet-18 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
model = ResNet(BasicBlock, [2, 2, 2], **kwargs)
if pretrained:
state = model.state_dict()
loaded_state_dict = model_zoo.load_url(model_urls['resnet18'])
for k in loaded_state_dict:
if (k in state):
state[k] = loaded_state_dict[k]
model.load_state_dict(state)
return model
|
def main():
global args, best_acc
args = parser.parse_args()
args.cuda = ((not args.no_cuda) and torch.cuda.is_available())
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
if args.visdom:
global plotter
plotter = VisdomLinePlotter(env_name=args.name)
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
global conditions
if (args.conditions is not None):
conditions = args.conditions
else:
conditions = [0, 1, 2, 3]
kwargs = ({'num_workers': 8, 'pin_memory': True} if args.cuda else {})
print('Loading Train Dataset')
train_loader = torch.utils.data.DataLoader(TripletImageLoader('data', 'ut-zap50k-images', 'filenames.json', conditions, 'train', n_triplets=args.num_traintriplets, transform=transforms.Compose([transforms.Resize(112), transforms.CenterCrop(112), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize])), batch_size=args.batch_size, shuffle=True, **kwargs)
print('Loading Test Dataset')
test_loader = torch.utils.data.DataLoader(TripletImageLoader('data', 'ut-zap50k-images', 'filenames.json', conditions, 'test', n_triplets=160000, transform=transforms.Compose([transforms.Resize(112), transforms.CenterCrop(112), transforms.ToTensor(), normalize])), batch_size=64, shuffle=True, **kwargs)
print('Loading Val Dataset')
val_loader = torch.utils.data.DataLoader(TripletImageLoader('data', 'ut-zap50k-images', 'filenames.json', conditions, 'val', n_triplets=80000, transform=transforms.Compose([transforms.Resize(112), transforms.CenterCrop(112), transforms.ToTensor(), normalize])), batch_size=64, shuffle=True, **kwargs)
model = Resnet_18.resnet18(pretrained=True, embedding_size=args.dim_embed)
csn_model = ConditionalSimNet(model, n_conditions=args.num_concepts, embedding_size=args.dim_embed, learnedmask=args.learned, prein=args.prein)
global mask_var
mask_var = csn_model.masks.weight
tnet = CS_Tripletnet(csn_model, args.num_concepts)
if args.cuda:
tnet.cuda()
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
tnet.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint '{}' (epoch {})".format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
criterion = torch.nn.MarginRankingLoss(margin=args.margin)
parameters = filter((lambda p: p.requires_grad), tnet.parameters())
optimizer = optim.Adam(parameters, lr=args.lr)
n_parameters = sum([p.data.nelement() for p in tnet.parameters()])
print(' + Number of params: {}'.format(n_parameters))
if args.test:
checkpoint = torch.load((('runs/%s/' % 'new_context_4/') + 'model_best.pth.tar'))
tnet.load_state_dict(checkpoint['state_dict'])
test_acc = test(test_loader, tnet, criterion, 1)
sys.exit()
for epoch in range(args.start_epoch, (args.epochs + 1)):
adjust_learning_rate(optimizer, epoch)
train(train_loader, tnet, criterion, optimizer, epoch)
acc = test(val_loader, tnet, criterion, epoch)
is_best = (acc > best_acc)
best_acc = max(acc, best_acc)
save_checkpoint({'epoch': (epoch + 1), 'state_dict': tnet.state_dict(), 'best_prec1': best_acc}, is_best)
checkpoint = torch.load((('runs/%s/' % args.name) + 'model_best.pth.tar'))
tnet.load_state_dict(checkpoint['state_dict'])
test_acc = test(test_loader, tnet, criterion, 1)
|
def train(train_loader, tnet, criterion, optimizer, epoch):
losses = AverageMeter()
accs = AverageMeter()
emb_norms = AverageMeter()
mask_norms = AverageMeter()
tnet.train()
for (batch_idx, (data1, data2, data3, c)) in enumerate(train_loader):
if args.cuda:
(data1, data2, data3, c) = (data1.cuda(), data2.cuda(), data3.cuda(), c.cuda())
(data1, data2, data3, c) = (Variable(data1), Variable(data2), Variable(data3), Variable(c))
(dista, distb, mask_norm, embed_norm, mask_embed_norm) = tnet(data1, data2, data3, c)
target = torch.FloatTensor(dista.size()).fill_(1)
if args.cuda:
target = target.cuda()
target = Variable(target)
loss_triplet = criterion(dista, distb, target)
loss_embedd = (embed_norm / np.sqrt(data1.size(0)))
loss_mask = (mask_norm / data1.size(0))
loss = ((loss_triplet + (args.embed_loss * loss_embedd)) + (args.mask_loss * loss_mask))
acc = accuracy(dista, distb)
losses.update(loss_triplet.data[0], data1.size(0))
accs.update(acc, data1.size(0))
emb_norms.update(loss_embedd.data[0])
mask_norms.update(loss_mask.data[0])
optimizer.zero_grad()
loss.backward()
optimizer.step()
if ((batch_idx % args.log_interval) == 0):
print('Train Epoch: {} [{}/{}]\tLoss: {:.4f} ({:.4f}) \tAcc: {:.2f}% ({:.2f}%) \tEmb_Norm: {:.2f} ({:.2f})'.format(epoch, (batch_idx * len(data1)), len(train_loader.dataset), losses.val, losses.avg, (100.0 * accs.val), (100.0 * accs.avg), emb_norms.val, emb_norms.avg))
if args.visdom:
plotter.plot('acc', 'train', epoch, accs.avg)
plotter.plot('loss', 'train', epoch, losses.avg)
plotter.plot('emb_norms', 'train', epoch, emb_norms.avg)
plotter.plot('mask_norms', 'train', epoch, mask_norms.avg)
if ((epoch % 10) == 0):
plotter.plot_mask(torch.nn.functional.relu(mask_var).data.cpu().numpy().T, epoch)
|
def test(test_loader, tnet, criterion, epoch):
losses = AverageMeter()
accs = AverageMeter()
accs_cs = {}
for condition in conditions:
accs_cs[condition] = AverageMeter()
tnet.eval()
tnet.embeddingnet.eval()
tnet.embeddingnet.embeddingnet.eval()
for (batch_idx, (data1, data2, data3, c)) in enumerate(test_loader):
if args.cuda:
(data1, data2, data3, c) = (data1.cuda(), data2.cuda(), data3.cuda(), c.cuda())
(data1, data2, data3, c) = (Variable(data1), Variable(data2), Variable(data3), Variable(c))
c_test = c
(dista, distb, _, _, _) = tnet(data1, data2, data3, c)
target = torch.FloatTensor(dista.size()).fill_(1)
if args.cuda:
target = target.cuda()
target = Variable(target)
test_loss = criterion(dista, distb, target).data[0]
acc = accuracy(dista, distb)
accs.update(acc, data1.size(0))
for condition in conditions:
accs_cs[condition].update(accuracy_id(dista, distb, c_test, condition), data1.size(0))
losses.update(test_loss, data1.size(0))
print('\nTest set: Average loss: {:.4f}, Accuracy: {:.2f}%\n'.format(losses.avg, (100.0 * accs.avg)))
if args.visdom:
for condition in conditions:
plotter.plot('accs', 'acc_{}'.format(condition), epoch, accs_cs[condition].avg)
plotter.plot(args.name, args.name, epoch, accs.avg, env='overview')
plotter.plot('acc', 'test', epoch, accs.avg)
plotter.plot('loss', 'test', epoch, losses.avg)
return accs.avg
|
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
'Saves checkpoint to disk'
directory = ('runs/%s/' % args.name)
if (not os.path.exists(directory)):
os.makedirs(directory)
filename = (directory + filename)
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, (('runs/%s/' % args.name) + 'model_best.pth.tar'))
|
class VisdomLinePlotter(object):
'Plots to Visdom'
def __init__(self, env_name='main'):
self.viz = Visdom()
self.env = env_name
self.plots = {}
def plot(self, var_name, split_name, x, y, env=None):
if (env is not None):
print_env = env
else:
print_env = self.env
if (var_name not in self.plots):
self.plots[var_name] = self.viz.line(X=np.array([x, x]), Y=np.array([y, y]), env=print_env, opts=dict(legend=[split_name], title=var_name, xlabel='Epochs', ylabel=var_name))
else:
self.viz.updateTrace(X=np.array([x]), Y=np.array([y]), env=print_env, win=self.plots[var_name], name=split_name)
def plot_mask(self, masks, epoch):
self.viz.bar(X=masks, env=self.env, opts=dict(stacked=True, title=epoch))
|
class AverageMeter(object):
'Computes and stores the average and current value'
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += (val * n)
self.count += n
self.avg = (self.sum / self.count)
|
def adjust_learning_rate(optimizer, epoch):
'Sets the learning rate to the initial LR decayed by 10 every 30 epochs'
lr = (args.lr * ((1 - 0.015) ** epoch))
if args.visdom:
plotter.plot('lr', 'learning rate', epoch, lr)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
|
def accuracy(dista, distb):
margin = 0
pred = ((dista - distb) - margin).cpu().data
return (((pred > 0).sum() * 1.0) / dista.size()[0])
|
def accuracy_id(dista, distb, c, c_id):
margin = 0
pred = ((dista - distb) - margin).cpu().data
return ((((pred > 0) * (c.cpu().data == c_id)).sum() * 1.0) / (c.cpu().data == c_id).sum())
|
def default_image_loader(path):
return Image.open(path).convert('RGB')
|
class TripletImageLoader(torch.utils.data.Dataset):
def __init__(self, root, base_path, filenames_filename, conditions, split, n_triplets, transform=None, loader=default_image_loader):
" filenames_filename: A text file with each line containing the path to an image e.g.,\n images/class1/sample.jpg\n triplets_file_name: A text file with each line containing three integers, \n where integer i refers to the i-th image in the filenames file. \n For a line of intergers 'a b c', a triplet is defined such that image a is more \n similar to image c than it is to image b, e.g., \n 0 2017 42 "
self.root = root
self.base_path = base_path
self.filenamelist = []
for line in open(os.path.join(self.root, filenames_filename)):
self.filenamelist.append(line.rstrip('\n'))
triplets = []
if (split == 'train'):
fnames = filenames['train']
elif (split == 'val'):
fnames = filenames['val']
else:
fnames = filenames['test']
for condition in conditions:
for line in open(os.path.join(self.root, 'tripletlists', fnames[condition])):
triplets.append((line.split()[0], line.split()[1], line.split()[2], condition))
np.random.shuffle(triplets)
self.triplets = triplets[:int((((n_triplets * 1.0) * len(conditions)) / 4))]
self.transform = transform
self.loader = loader
def __getitem__(self, index):
(path1, path2, path3, c) = self.triplets[index]
if (os.path.exists(os.path.join(self.root, self.base_path, self.filenamelist[int(path1)])) and os.path.exists(os.path.join(self.root, self.base_path, self.filenamelist[int(path1)])) and os.path.exists(os.path.join(self.root, self.base_path, self.filenamelist[int(path1)]))):
img1 = self.loader(os.path.join(self.root, self.base_path, self.filenamelist[int(path1)]))
img2 = self.loader(os.path.join(self.root, self.base_path, self.filenamelist[int(path2)]))
img3 = self.loader(os.path.join(self.root, self.base_path, self.filenamelist[int(path3)]))
if (self.transform is not None):
img1 = self.transform(img1)
img2 = self.transform(img2)
img3 = self.transform(img3)
return (img1, img2, img3, c)
else:
return None
def __len__(self):
return len(self.triplets)
|
def get_params(p):
with open(p, 'r') as f:
return yaml.safe_load(f)
|
def generate_launch_description():
logger = LaunchConfiguration('log_level')
share_dir = get_package_share_directory('online_fgo')
xacro_path = os.path.join(share_dir, 'config/aachen_lc', 'dienstwagen.urdf.xacro')
config_common_path = LaunchConfiguration('config_common_path')
default_config_common = os.path.join(get_package_share_directory('online_fgo'), 'config/aachen_lc', 'common.yaml')
default_config_integrator = os.path.join(get_package_share_directory('online_fgo'), 'config/aachen_lc', 'integrator.yaml')
default_config_optimizer = os.path.join(get_package_share_directory('online_fgo'), 'config/aachen_lc', 'optimizer.yaml')
declare_config_common_path_cmd = DeclareLaunchArgument('config_common_path', default_value=default_config_common, description='CommonParameters')
declare_config_integrtor_path_cmd = DeclareLaunchArgument('config_common_path', default_value=default_config_integrator, description='IntegratorParameters')
declare_config_optimizer_path_cmd = DeclareLaunchArgument('config_common_path', default_value=default_config_optimizer, description='OptimizerParameters')
online_fgo_node = Node(package='online_fgo', executable='gt_node', name='online_fgo', namespace='deutschland', output='screen', emulate_tty=True, parameters=[config_common_path, default_config_common, default_config_integrator, default_config_optimizer, {}], remappings=[('/irt_gpio/novatel/pps', '/irt_gpio_novatel/jetson_pps')])
robot_des = Node(package='robot_state_publisher', executable='robot_state_publisher', name='robot_state_publisher', output='screen', parameters=[{'robot_description': Command(['xacro', ' ', xacro_path])}])
plot_node = Node(package='rqt_plot', executable='rqt_plot', name='rqt_plot_fgo', output='screen', parameters=[{'use_sim_time': True}])
ld = LaunchDescription()
ld.add_action(DeclareLaunchArgument('log_level', default_value=['debug'], description='Logging level'))
ld.add_action(declare_config_common_path_cmd)
ld.add_action(declare_config_integrtor_path_cmd)
ld.add_action(declare_config_optimizer_path_cmd)
ld.add_action(online_fgo_node)
ld.add_action(robot_des)
return ld
|
def get_params(p):
with open(p, 'r') as f:
return yaml.safe_load(f)
|
def generate_launch_description():
logger = LaunchConfiguration('log_level')
share_dir = get_package_share_directory('online_fgo')
xacro_path = os.path.join(share_dir, 'config/aachen_tc', 'dienstwagen.urdf.xacro')
config_common_path = LaunchConfiguration('config_common_path')
default_config_common = os.path.join(get_package_share_directory('online_fgo'), 'config/aachen_tc', 'common.yaml')
default_config_integrator = os.path.join(get_package_share_directory('online_fgo'), 'config/aachen_tc', 'integrator.yaml')
default_config_optimizer = os.path.join(get_package_share_directory('online_fgo'), 'config/aachen_tc', 'optimizer.yaml')
declare_config_common_path_cmd = DeclareLaunchArgument('config_common_path', default_value=default_config_common, description='CommonParameters')
declare_config_integrtor_path_cmd = DeclareLaunchArgument('config_common_path', default_value=default_config_integrator, description='IntegratorParameters')
declare_config_optimizer_path_cmd = DeclareLaunchArgument('config_common_path', default_value=default_config_optimizer, description='OptimizerParameters')
online_fgo_node = Node(package='online_fgo', executable='online_fgo_node', name='online_fgo', namespace='deutschland', output='screen', emulate_tty=True, parameters=[config_common_path, default_config_common, default_config_integrator, default_config_optimizer, {}], remappings=[('/irt_gpio/novatel/pps', '/irt_gpio_novatel/jetson_pps')])
robot_des = Node(package='robot_state_publisher', executable='robot_state_publisher', name='robot_state_publisher', output='screen', parameters=[{'robot_description': Command(['xacro', ' ', xacro_path])}])
plot_node = Node(package='rqt_plot', executable='rqt_plot', name='rqt_plot_fgo', output='screen', parameters=[{'use_sim_time': True}])
ld = LaunchDescription()
ld.add_action(DeclareLaunchArgument('log_level', default_value=['debug'], description='Logging level'))
ld.add_action(declare_config_common_path_cmd)
ld.add_action(declare_config_integrtor_path_cmd)
ld.add_action(declare_config_optimizer_path_cmd)
ld.add_action(online_fgo_node)
ld.add_action(robot_des)
return ld
|
def get_params(p):
with open(p, 'r') as f:
return yaml.safe_load(f)
|
def generate_launch_description():
logger = LaunchConfiguration('log_level')
share_dir = get_package_share_directory('online_fgo')
xacro_path = os.path.join(share_dir, 'config/boreas', 'car_boreas.urdf.xacro')
config_common_path = LaunchConfiguration('config_common_path')
default_config_common = os.path.join(get_package_share_directory('online_fgo'), 'config/boreas', 'common.yaml')
default_config_integrator = os.path.join(get_package_share_directory('online_fgo'), 'config/boreas', 'integrator.yaml')
default_config_optimizer = os.path.join(get_package_share_directory('online_fgo'), 'config/boreas', 'optimizer.yaml')
declare_config_common_path_cmd = DeclareLaunchArgument('config_common_path', default_value=default_config_common, description='CommonParameters')
declare_config_integrtor_path_cmd = DeclareLaunchArgument('config_common_path', default_value=default_config_integrator, description='IntegratorParameters')
declare_config_optimizer_path_cmd = DeclareLaunchArgument('config_common_path', default_value=default_config_optimizer, description='OptimizerParameters')
online_fgo_node = Node(package='online_fgo', executable='boreas_node', name='online_fgo', namespace='boreas', output='screen', emulate_tty=True, parameters=[config_common_path, default_config_common, default_config_integrator, default_config_optimizer, {}])
robot_des = Node(package='robot_state_publisher', executable='robot_state_publisher', name='robot_state_publisher', output='screen', parameters=[{'robot_description': Command(['xacro', ' ', xacro_path])}])
plot_node = Node(package='rqt_plot', executable='rqt_plot', name='rqt_plot_fgo', output='screen', parameters=[{'use_sim_time': True}])
ld = LaunchDescription()
ld.add_action(DeclareLaunchArgument('log_level', default_value=['debug'], description='Logging level'))
ld.add_action(declare_config_common_path_cmd)
ld.add_action(declare_config_integrtor_path_cmd)
ld.add_action(declare_config_optimizer_path_cmd)
ld.add_action(online_fgo_node)
ld.add_action(robot_des)
return ld
|
def get_params(p):
with open(p, 'r') as f:
return yaml.safe_load(f)
|
def generate_launch_description():
logger = LaunchConfiguration('log_level')
share_dir = get_package_share_directory('online_fgo')
xacro_path = os.path.join(share_dir, 'config/urbanloco_ca', 'car_ca.urdf.xacro')
config_common_path = LaunchConfiguration('config_common_path')
default_config_common = os.path.join(get_package_share_directory('online_fgo'), 'config/urbanloco_ca', 'common.yaml')
default_config_integrator = os.path.join(get_package_share_directory('online_fgo'), 'config/urbanloco_ca', 'integrator.yaml')
default_config_optimizer = os.path.join(get_package_share_directory('online_fgo'), 'config/urbanloco_ca', 'optimizer.yaml')
declare_config_common_path_cmd = DeclareLaunchArgument('config_common_path', default_value=default_config_common, description='CommonParameters')
declare_config_integrtor_path_cmd = DeclareLaunchArgument('config_common_path', default_value=default_config_integrator, description='IntegratorParameters')
declare_config_optimizer_path_cmd = DeclareLaunchArgument('config_common_path', default_value=default_config_optimizer, description='OptimizerParameters')
online_fgo_node = Node(package='online_fgo', executable='urbanloco_node', name='online_fgo', namespace='urbanloco', output='screen', emulate_tty=True, parameters=[config_common_path, default_config_common, default_config_integrator, default_config_optimizer, {}])
robot_des = Node(package='robot_state_publisher', executable='robot_state_publisher', name='robot_state_publisher', output='screen', parameters=[{'robot_description': Command(['xacro', ' ', xacro_path])}])
plot_node = Node(package='rqt_plot', executable='rqt_plot', name='rqt_plot_fgo', output='screen', parameters=[{'use_sim_time': True}])
ld = LaunchDescription()
ld.add_action(DeclareLaunchArgument('log_level', default_value=['debug'], description='Logging level'))
ld.add_action(declare_config_common_path_cmd)
ld.add_action(declare_config_integrtor_path_cmd)
ld.add_action(declare_config_optimizer_path_cmd)
ld.add_action(online_fgo_node)
ld.add_action(robot_des)
return ld
|
def get_params(p):
with open(p, 'r') as f:
return yaml.safe_load(f)
|
def generate_launch_description():
logger = LaunchConfiguration('log_level')
share_dir = get_package_share_directory('online_fgo')
xacro_path = os.path.join(share_dir, 'config/deuschland_lc', 'dienstwagen.urdf.xacro')
config_common_path = LaunchConfiguration('config_common_path')
default_config_common = os.path.join(get_package_share_directory('online_fgo'), 'config/deuschland_lc', 'common.yaml')
default_config_integrator = os.path.join(get_package_share_directory('online_fgo'), 'config/deuschland_lc', 'integrator.yaml')
default_config_optimizer = os.path.join(get_package_share_directory('online_fgo'), 'config/deuschland_lc', 'optimizer.yaml')
declare_config_common_path_cmd = DeclareLaunchArgument('config_common_path', default_value=default_config_common, description='CommonParameters')
declare_config_integrtor_path_cmd = DeclareLaunchArgument('config_common_path', default_value=default_config_integrator, description='IntegratorParameters')
declare_config_optimizer_path_cmd = DeclareLaunchArgument('config_common_path', default_value=default_config_optimizer, description='OptimizerParameters')
online_fgo_node = Node(package='online_fgo', executable='gt_node', name='online_fgo', namespace='deutschland', output='screen', emulate_tty=True, parameters=[config_common_path, default_config_common, default_config_integrator, default_config_optimizer, {}])
robot_des = Node(package='robot_state_publisher', executable='robot_state_publisher', name='robot_state_publisher', output='screen', parameters=[{'robot_description': Command(['xacro', ' ', xacro_path])}])
plot_node = Node(package='rqt_plot', executable='rqt_plot', name='rqt_plot_fgo', output='screen', parameters=[{'use_sim_time': True}])
ld = LaunchDescription()
ld.add_action(DeclareLaunchArgument('log_level', default_value=['debug'], description='Logging level'))
ld.add_action(declare_config_common_path_cmd)
ld.add_action(declare_config_integrtor_path_cmd)
ld.add_action(declare_config_optimizer_path_cmd)
ld.add_action(online_fgo_node)
ld.add_action(robot_des)
return ld
|
def get_params(p):
with open(p, 'r') as f:
return yaml.safe_load(f)
|
def generate_launch_description():
logger = LaunchConfiguration('log_level')
share_dir = get_package_share_directory('online_fgo')
xacro_path = os.path.join(share_dir, 'config/deuschland_lc', 'dienstwagen.urdf.xacro')
config_common_path = LaunchConfiguration('config_common_path')
default_config_common = os.path.join(get_package_share_directory('online_fgo'), 'config/deutschland_tc', 'common.yaml')
default_config_integrator = os.path.join(get_package_share_directory('online_fgo'), 'config/deutschland_tc', 'integrator.yaml')
default_config_optimizer = os.path.join(get_package_share_directory('online_fgo'), 'config/deutschland_tc', 'optimizer.yaml')
declare_config_common_path_cmd = DeclareLaunchArgument('config_common_path', default_value=default_config_common, description='CommonParameters')
declare_config_integrtor_path_cmd = DeclareLaunchArgument('config_common_path', default_value=default_config_integrator, description='IntegratorParameters')
declare_config_optimizer_path_cmd = DeclareLaunchArgument('config_common_path', default_value=default_config_optimizer, description='OptimizerParameters')
online_fgo_node = Node(package='online_fgo', executable='online_fgo_node', name='online_fgo', namespace='deutschland', output='screen', emulate_tty=True, parameters=[config_common_path, default_config_common, default_config_integrator, default_config_optimizer, {}])
robot_des = Node(package='robot_state_publisher', executable='robot_state_publisher', name='robot_state_publisher', output='screen', parameters=[{'robot_description': Command(['xacro', ' ', xacro_path])}])
plot_node = Node(package='rqt_plot', executable='rqt_plot', name='rqt_plot_fgo', output='screen', parameters=[{'use_sim_time': True}])
ld = LaunchDescription()
ld.add_action(DeclareLaunchArgument('log_level', default_value=['debug'], description='Logging level'))
ld.add_action(declare_config_common_path_cmd)
ld.add_action(declare_config_integrtor_path_cmd)
ld.add_action(declare_config_optimizer_path_cmd)
ld.add_action(online_fgo_node)
ld.add_action(robot_des)
return ld
|
def get_params(p):
with open(p, 'r') as f:
return yaml.safe_load(f)
|
def generate_launch_description():
logger = LaunchConfiguration('log_level')
share_dir = get_package_share_directory('online_fgo')
xacro_path = os.path.join(share_dir, 'config/urbanloco_hk', 'car_ca.urdf.xacro')
config_common_path = LaunchConfiguration('config_common_path')
default_config_common = os.path.join(get_package_share_directory('online_fgo'), 'config/urbanloco_hk', 'common.yaml')
default_config_integrator = os.path.join(get_package_share_directory('online_fgo'), 'config/urbanloco_hk', 'integrator.yaml')
default_config_optimizer = os.path.join(get_package_share_directory('online_fgo'), 'config/urbanloco_hk', 'optimizer.yaml')
declare_config_common_path_cmd = DeclareLaunchArgument('config_common_path', default_value=default_config_common, description='CommonParameters')
declare_config_integrtor_path_cmd = DeclareLaunchArgument('config_common_path', default_value=default_config_integrator, description='IntegratorParameters')
declare_config_optimizer_path_cmd = DeclareLaunchArgument('config_common_path', default_value=default_config_optimizer, description='OptimizerParameters')
online_fgo_node = Node(package='online_fgo', executable='urbanloco_node', name='online_fgo', namespace='urbanloco', output='screen', emulate_tty=True, parameters=[config_common_path, default_config_common, default_config_integrator, default_config_optimizer, {}])
robot_des = Node(package='robot_state_publisher', executable='robot_state_publisher', name='robot_state_publisher', output='screen', parameters=[{'robot_description': Command(['xacro', ' ', xacro_path])}])
plot_node = Node(package='rqt_plot', executable='rqt_plot', name='rqt_plot_fgo', output='screen', parameters=[{'use_sim_time': True}])
ld = LaunchDescription()
ld.add_action(DeclareLaunchArgument('log_level', default_value=['debug'], description='Logging level'))
ld.add_action(declare_config_common_path_cmd)
ld.add_action(declare_config_integrtor_path_cmd)
ld.add_action(declare_config_optimizer_path_cmd)
ld.add_action(online_fgo_node)
ld.add_action(robot_des)
return ld
|
def get_params(p):
with open(p, 'r') as f:
return yaml.safe_load(f)
|
def generate_launch_description():
logger = LaunchConfiguration('log_level')
share_dir = get_package_share_directory('online_fgo')
config_common_path = LaunchConfiguration('config_common_path')
default_config_common = os.path.join(get_package_share_directory('online_fgo'), 'config/shipping', 'common.yaml')
default_config_integrator = os.path.join(get_package_share_directory('online_fgo'), 'config/shipping', 'integrator.yaml')
default_config_optimizer = os.path.join(get_package_share_directory('online_fgo'), 'config/shipping', 'optimizer.yaml')
declare_config_common_path_cmd = DeclareLaunchArgument('config_common_path', default_value=default_config_common, description='CommonParameters')
declare_config_integrtor_path_cmd = DeclareLaunchArgument('config_common_path', default_value=default_config_integrator, description='IntegratorParameters')
declare_config_optimizer_path_cmd = DeclareLaunchArgument('config_common_path', default_value=default_config_optimizer, description='OptimizerParameters')
online_fgo_node = Node(package='online_fgo', executable='boreas_node', name='online_fgo', namespace='boreas', output='screen', emulate_tty=True, parameters=[config_common_path, default_config_common, default_config_integrator, default_config_optimizer, {}])
plot_node = Node(package='rqt_plot', executable='rqt_plot', name='rqt_plot_fgo', output='screen', parameters=[{'use_sim_time': True}])
ld = LaunchDescription()
ld.add_action(DeclareLaunchArgument('log_level', default_value=['debug'], description='Logging level'))
ld.add_action(declare_config_common_path_cmd)
ld.add_action(declare_config_integrtor_path_cmd)
ld.add_action(declare_config_optimizer_path_cmd)
ld.add_action(online_fgo_node)
return ld
|
def get_params(p):
with open(p, 'r') as f:
return yaml.safe_load(f)
|
def generate_launch_description():
logger = LaunchConfiguration('log_level')
share_dir = get_package_share_directory('online_fgo')
xacro_path = os.path.join(share_dir, 'config/aachen_lc', 'dienstwagen.urdf.xacro')
config_common_path = LaunchConfiguration('config_common_path')
default_config_common = os.path.join(get_package_share_directory('online_fgo'), 'config/aachen_lc', 'common.yaml')
default_config_integrator = os.path.join(get_package_share_directory('online_fgo'), 'config/aachen_lc', 'integrator.yaml')
default_config_optimizer = os.path.join(get_package_share_directory('online_fgo'), 'config/aachen_lc', 'optimizer.yaml')
declare_config_common_path_cmd = DeclareLaunchArgument('config_common_path', default_value=default_config_common, description='CommonParameters')
declare_config_integrtor_path_cmd = DeclareLaunchArgument('config_common_path', default_value=default_config_integrator, description='IntegratorParameters')
declare_config_optimizer_path_cmd = DeclareLaunchArgument('config_common_path', default_value=default_config_optimizer, description='OptimizerParameters')
online_fgo_node = Node(package='online_fgo', executable='gt_node', name='online_fgo', namespace='deutschland', output='screen', emulate_tty=True, parameters=[config_common_path, default_config_common, default_config_integrator, default_config_optimizer, {}], remappings=[('/irt_gpio/novatel/pps', '/irt_gpio_novatel/jetson_pps')])
robot_des = Node(package='robot_state_publisher', executable='robot_state_publisher', name='robot_state_publisher', output='screen', parameters=[{'robot_description': Command(['xacro', ' ', xacro_path])}])
plot_node = Node(package='rqt_plot', executable='rqt_plot', name='rqt_plot_fgo', output='screen', parameters=[{'use_sim_time': True}])
ld = LaunchDescription()
ld.add_action(DeclareLaunchArgument('log_level', default_value=['debug'], description='Logging level'))
ld.add_action(declare_config_common_path_cmd)
ld.add_action(declare_config_integrtor_path_cmd)
ld.add_action(declare_config_optimizer_path_cmd)
ld.add_action(online_fgo_node)
ld.add_action(robot_des)
return ld
|
def get_params(p):
with open(p, 'r') as f:
return yaml.safe_load(f)
|
def generate_launch_description():
logger = LaunchConfiguration('log_level')
share_dir = get_package_share_directory('online_fgo')
xacro_path = os.path.join(share_dir, 'config/aachen_tc', 'dienstwagen.urdf.xacro')
config_common_path = LaunchConfiguration('config_common_path')
default_config_common = os.path.join(get_package_share_directory('online_fgo'), 'config/aachen_tc', 'common.yaml')
default_config_integrator = os.path.join(get_package_share_directory('online_fgo'), 'config/aachen_tc', 'integrator.yaml')
default_config_optimizer = os.path.join(get_package_share_directory('online_fgo'), 'config/aachen_tc', 'optimizer.yaml')
declare_config_common_path_cmd = DeclareLaunchArgument('config_common_path', default_value=default_config_common, description='CommonParameters')
declare_config_integrtor_path_cmd = DeclareLaunchArgument('config_common_path', default_value=default_config_integrator, description='IntegratorParameters')
declare_config_optimizer_path_cmd = DeclareLaunchArgument('config_common_path', default_value=default_config_optimizer, description='OptimizerParameters')
online_fgo_node = Node(package='online_fgo', executable='online_fgo_node', name='online_fgo', namespace='deutschland', output='screen', emulate_tty=True, parameters=[config_common_path, default_config_common, default_config_integrator, default_config_optimizer, {}], remappings=[('/irt_gpio/novatel/pps', '/irt_gpio_novatel/jetson_pps')])
robot_des = Node(package='robot_state_publisher', executable='robot_state_publisher', name='robot_state_publisher', output='screen', parameters=[{'robot_description': Command(['xacro', ' ', xacro_path])}])
plot_node = Node(package='rqt_plot', executable='rqt_plot', name='rqt_plot_fgo', output='screen', parameters=[{'use_sim_time': True}])
ld = LaunchDescription()
ld.add_action(DeclareLaunchArgument('log_level', default_value=['debug'], description='Logging level'))
ld.add_action(declare_config_common_path_cmd)
ld.add_action(declare_config_integrtor_path_cmd)
ld.add_action(declare_config_optimizer_path_cmd)
ld.add_action(online_fgo_node)
ld.add_action(robot_des)
return ld
|
def get_params(p):
with open(p, 'r') as f:
return yaml.safe_load(f)
|
def generate_launch_description():
logger = LaunchConfiguration('log_level')
share_dir = get_package_share_directory('online_fgo')
xacro_path = os.path.join(share_dir, 'config/boreas', 'car_boreas.urdf.xacro')
config_common_path = LaunchConfiguration('config_common_path')
default_config_common = os.path.join(get_package_share_directory('online_fgo'), 'config/boreas', 'common.yaml')
default_config_integrator = os.path.join(get_package_share_directory('online_fgo'), 'config/boreas', 'integrator.yaml')
default_config_optimizer = os.path.join(get_package_share_directory('online_fgo'), 'config/boreas', 'optimizer.yaml')
declare_config_common_path_cmd = DeclareLaunchArgument('config_common_path', default_value=default_config_common, description='CommonParameters')
declare_config_integrtor_path_cmd = DeclareLaunchArgument('config_common_path', default_value=default_config_integrator, description='IntegratorParameters')
declare_config_optimizer_path_cmd = DeclareLaunchArgument('config_common_path', default_value=default_config_optimizer, description='OptimizerParameters')
online_fgo_node = Node(package='online_fgo', executable='boreas_node', name='online_fgo', namespace='boreas', output='screen', emulate_tty=True, parameters=[config_common_path, default_config_common, default_config_integrator, default_config_optimizer, {}])
robot_des = Node(package='robot_state_publisher', executable='robot_state_publisher', name='robot_state_publisher', output='screen', parameters=[{'robot_description': Command(['xacro', ' ', xacro_path])}])
plot_node = Node(package='rqt_plot', executable='rqt_plot', name='rqt_plot_fgo', output='screen', parameters=[{'use_sim_time': True}])
ld = LaunchDescription()
ld.add_action(DeclareLaunchArgument('log_level', default_value=['debug'], description='Logging level'))
ld.add_action(declare_config_common_path_cmd)
ld.add_action(declare_config_integrtor_path_cmd)
ld.add_action(declare_config_optimizer_path_cmd)
ld.add_action(online_fgo_node)
ld.add_action(robot_des)
return ld
|
def get_params(p):
with open(p, 'r') as f:
return yaml.safe_load(f)
|
def generate_launch_description():
logger = LaunchConfiguration('log_level')
share_dir = get_package_share_directory('online_fgo')
xacro_path = os.path.join(share_dir, 'config/urbanloco_ca', 'car_ca.urdf.xacro')
config_common_path = LaunchConfiguration('config_common_path')
default_config_common = os.path.join(get_package_share_directory('online_fgo'), 'config/urbanloco_ca', 'common.yaml')
default_config_integrator = os.path.join(get_package_share_directory('online_fgo'), 'config/urbanloco_ca', 'integrator.yaml')
default_config_optimizer = os.path.join(get_package_share_directory('online_fgo'), 'config/urbanloco_ca', 'optimizer.yaml')
declare_config_common_path_cmd = DeclareLaunchArgument('config_common_path', default_value=default_config_common, description='CommonParameters')
declare_config_integrtor_path_cmd = DeclareLaunchArgument('config_common_path', default_value=default_config_integrator, description='IntegratorParameters')
declare_config_optimizer_path_cmd = DeclareLaunchArgument('config_common_path', default_value=default_config_optimizer, description='OptimizerParameters')
online_fgo_node = Node(package='online_fgo', executable='urbanloco_node', name='online_fgo', namespace='urbanloco', output='screen', emulate_tty=True, parameters=[config_common_path, default_config_common, default_config_integrator, default_config_optimizer, {}])
robot_des = Node(package='robot_state_publisher', executable='robot_state_publisher', name='robot_state_publisher', output='screen', parameters=[{'robot_description': Command(['xacro', ' ', xacro_path])}])
plot_node = Node(package='rqt_plot', executable='rqt_plot', name='rqt_plot_fgo', output='screen', parameters=[{'use_sim_time': True}])
ld = LaunchDescription()
ld.add_action(DeclareLaunchArgument('log_level', default_value=['debug'], description='Logging level'))
ld.add_action(declare_config_common_path_cmd)
ld.add_action(declare_config_integrtor_path_cmd)
ld.add_action(declare_config_optimizer_path_cmd)
ld.add_action(online_fgo_node)
ld.add_action(robot_des)
return ld
|
def get_params(p):
with open(p, 'r') as f:
return yaml.safe_load(f)
|
def generate_launch_description():
logger = LaunchConfiguration('log_level')
share_dir = get_package_share_directory('online_fgo')
xacro_path = os.path.join(share_dir, 'config/deuschland_lc', 'dienstwagen.urdf.xacro')
config_common_path = LaunchConfiguration('config_common_path')
default_config_common = os.path.join(get_package_share_directory('online_fgo'), 'config/deuschland_lc', 'common.yaml')
default_config_integrator = os.path.join(get_package_share_directory('online_fgo'), 'config/deuschland_lc', 'integrator.yaml')
default_config_optimizer = os.path.join(get_package_share_directory('online_fgo'), 'config/deuschland_lc', 'optimizer.yaml')
declare_config_common_path_cmd = DeclareLaunchArgument('config_common_path', default_value=default_config_common, description='CommonParameters')
declare_config_integrtor_path_cmd = DeclareLaunchArgument('config_common_path', default_value=default_config_integrator, description='IntegratorParameters')
declare_config_optimizer_path_cmd = DeclareLaunchArgument('config_common_path', default_value=default_config_optimizer, description='OptimizerParameters')
online_fgo_node = Node(package='online_fgo', executable='gt_node', name='online_fgo', namespace='deutschland', output='screen', emulate_tty=True, parameters=[config_common_path, default_config_common, default_config_integrator, default_config_optimizer, {}])
robot_des = Node(package='robot_state_publisher', executable='robot_state_publisher', name='robot_state_publisher', output='screen', parameters=[{'robot_description': Command(['xacro', ' ', xacro_path])}])
plot_node = Node(package='rqt_plot', executable='rqt_plot', name='rqt_plot_fgo', output='screen', parameters=[{'use_sim_time': True}])
ld = LaunchDescription()
ld.add_action(DeclareLaunchArgument('log_level', default_value=['debug'], description='Logging level'))
ld.add_action(declare_config_common_path_cmd)
ld.add_action(declare_config_integrtor_path_cmd)
ld.add_action(declare_config_optimizer_path_cmd)
ld.add_action(online_fgo_node)
ld.add_action(robot_des)
return ld
|
def get_params(p):
with open(p, 'r') as f:
return yaml.safe_load(f)
|
def generate_launch_description():
logger = LaunchConfiguration('log_level')
share_dir = get_package_share_directory('online_fgo')
xacro_path = os.path.join(share_dir, 'config/deuschland_lc', 'dienstwagen.urdf.xacro')
config_common_path = LaunchConfiguration('config_common_path')
default_config_common = os.path.join(get_package_share_directory('online_fgo'), 'config/deutschland_tc', 'common.yaml')
default_config_integrator = os.path.join(get_package_share_directory('online_fgo'), 'config/deutschland_tc', 'integrator.yaml')
default_config_optimizer = os.path.join(get_package_share_directory('online_fgo'), 'config/deutschland_tc', 'optimizer.yaml')
declare_config_common_path_cmd = DeclareLaunchArgument('config_common_path', default_value=default_config_common, description='CommonParameters')
declare_config_integrtor_path_cmd = DeclareLaunchArgument('config_common_path', default_value=default_config_integrator, description='IntegratorParameters')
declare_config_optimizer_path_cmd = DeclareLaunchArgument('config_common_path', default_value=default_config_optimizer, description='OptimizerParameters')
online_fgo_node = Node(package='online_fgo', executable='online_fgo_node', name='online_fgo', namespace='deutschland', output='screen', emulate_tty=True, parameters=[config_common_path, default_config_common, default_config_integrator, default_config_optimizer, {}])
robot_des = Node(package='robot_state_publisher', executable='robot_state_publisher', name='robot_state_publisher', output='screen', parameters=[{'robot_description': Command(['xacro', ' ', xacro_path])}])
plot_node = Node(package='rqt_plot', executable='rqt_plot', name='rqt_plot_fgo', output='screen', parameters=[{'use_sim_time': True}])
ld = LaunchDescription()
ld.add_action(DeclareLaunchArgument('log_level', default_value=['debug'], description='Logging level'))
ld.add_action(declare_config_common_path_cmd)
ld.add_action(declare_config_integrtor_path_cmd)
ld.add_action(declare_config_optimizer_path_cmd)
ld.add_action(online_fgo_node)
ld.add_action(robot_des)
return ld
|
def get_params(p):
with open(p, 'r') as f:
return yaml.safe_load(f)
|
def generate_launch_description():
logger = LaunchConfiguration('log_level')
share_dir = get_package_share_directory('online_fgo')
xacro_path = os.path.join(share_dir, 'config/urbanloco_hk', 'car_ca.urdf.xacro')
config_common_path = LaunchConfiguration('config_common_path')
default_config_common = os.path.join(get_package_share_directory('online_fgo'), 'config/urbanloco_hk', 'common.yaml')
default_config_integrator = os.path.join(get_package_share_directory('online_fgo'), 'config/urbanloco_hk', 'integrator.yaml')
default_config_optimizer = os.path.join(get_package_share_directory('online_fgo'), 'config/urbanloco_hk', 'optimizer.yaml')
declare_config_common_path_cmd = DeclareLaunchArgument('config_common_path', default_value=default_config_common, description='CommonParameters')
declare_config_integrtor_path_cmd = DeclareLaunchArgument('config_common_path', default_value=default_config_integrator, description='IntegratorParameters')
declare_config_optimizer_path_cmd = DeclareLaunchArgument('config_common_path', default_value=default_config_optimizer, description='OptimizerParameters')
online_fgo_node = Node(package='online_fgo', executable='urbanloco_node', name='online_fgo', namespace='urbanloco', output='screen', emulate_tty=True, parameters=[config_common_path, default_config_common, default_config_integrator, default_config_optimizer, {}])
robot_des = Node(package='robot_state_publisher', executable='robot_state_publisher', name='robot_state_publisher', output='screen', parameters=[{'robot_description': Command(['xacro', ' ', xacro_path])}])
plot_node = Node(package='rqt_plot', executable='rqt_plot', name='rqt_plot_fgo', output='screen', parameters=[{'use_sim_time': True}])
ld = LaunchDescription()
ld.add_action(DeclareLaunchArgument('log_level', default_value=['debug'], description='Logging level'))
ld.add_action(declare_config_common_path_cmd)
ld.add_action(declare_config_integrtor_path_cmd)
ld.add_action(declare_config_optimizer_path_cmd)
ld.add_action(online_fgo_node)
ld.add_action(robot_des)
return ld
|
def get_params(p):
with open(p, 'r') as f:
return yaml.safe_load(f)
|
def generate_launch_description():
logger = LaunchConfiguration('log_level')
share_dir = get_package_share_directory('online_fgo')
config_common_path = LaunchConfiguration('config_common_path')
default_config_common = os.path.join(get_package_share_directory('online_fgo'), 'config/shipping', 'common.yaml')
default_config_integrator = os.path.join(get_package_share_directory('online_fgo'), 'config/shipping', 'integrator.yaml')
default_config_optimizer = os.path.join(get_package_share_directory('online_fgo'), 'config/shipping', 'optimizer.yaml')
declare_config_common_path_cmd = DeclareLaunchArgument('config_common_path', default_value=default_config_common, description='CommonParameters')
declare_config_integrtor_path_cmd = DeclareLaunchArgument('config_common_path', default_value=default_config_integrator, description='IntegratorParameters')
declare_config_optimizer_path_cmd = DeclareLaunchArgument('config_common_path', default_value=default_config_optimizer, description='OptimizerParameters')
online_fgo_node = Node(package='online_fgo', executable='boreas_node', name='online_fgo', namespace='boreas', output='screen', emulate_tty=True, parameters=[config_common_path, default_config_common, default_config_integrator, default_config_optimizer, {}])
plot_node = Node(package='rqt_plot', executable='rqt_plot', name='rqt_plot_fgo', output='screen', parameters=[{'use_sim_time': True}])
ld = LaunchDescription()
ld.add_action(DeclareLaunchArgument('log_level', default_value=['debug'], description='Logging level'))
ld.add_action(declare_config_common_path_cmd)
ld.add_action(declare_config_integrtor_path_cmd)
ld.add_action(declare_config_optimizer_path_cmd)
ld.add_action(online_fgo_node)
return ld
|
class Config():
def __init__(self):
root = self.Scope('')
for (k, v) in FLAGS.__dict__['__flags'].iteritems():
root[k] = v
self.stack = [root]
def iteritems(self):
return self.to_dict().iteritems()
def to_dict(self):
self._pop_stale()
out = {}
for i in range(len(self.stack)):
cs = self.stack[(- i)]
for name in cs:
out[name] = cs[name]
return out
def _pop_stale(self):
var_scope_name = tf.get_variable_scope().name
top = self.stack[0]
while (not top.contains(var_scope_name)):
self.stack.pop(0)
top = self.stack[0]
def __getitem__(self, name):
self._pop_stale()
for i in range(len(self.stack)):
cs = self.stack[i]
if (name in cs):
return cs[name]
raise KeyError(name)
def set_default(self, name, value):
if (not (name in self)):
self[name] = value
def __contains__(self, name):
self._pop_stale()
for i in range(len(self.stack)):
cs = self.stack[i]
if (name in cs):
return True
return False
def __setitem__(self, name, value):
self._pop_stale()
top = self.stack[0]
var_scope_name = tf.get_variable_scope().name
assert top.contains(var_scope_name)
if (top.name != var_scope_name):
top = self.Scope(var_scope_name)
self.stack.insert(0, top)
top[name] = value
class Scope(dict):
def __init__(self, name):
self.name = name
def contains(self, var_scope_name):
return var_scope_name.startswith(self.name)
|
def inputs(dataset, batch_size=None, num_preprocess_threads=None):
'Generate batches of ImageNet images for evaluation.\n\n Use this function as the inputs for evaluating a network.\n\n Note that some (minimal) image preprocessing occurs during evaluation\n including central cropping and resizing of the image to fit the network.\n\n Args:\n dataset: instance of Dataset class specifying the dataset.\n batch_size: integer, number of examples in batch\n num_preprocess_threads: integer, total number of preprocessing threads but\n None defaults to FLAGS.num_preprocess_threads.\n\n Returns:\n images: Images. 4D tensor of size [batch_size, FLAGS.image_size,\n image_size, 3].\n labels: 1-D integer Tensor of [FLAGS.batch_size].\n '
if (not batch_size):
batch_size = FLAGS.batch_size
with tf.device('/cpu:0'):
(images, labels) = batch_inputs(dataset, batch_size, train=False, num_preprocess_threads=num_preprocess_threads, num_readers=1)
return (images, labels)
|
def decode_jpeg(image_buffer, scope=None):
'Decode a JPEG string into one 3-D float image Tensor.\n\n Args:\n image_buffer: scalar string Tensor.\n scope: Optional scope for op_scope.\n Returns:\n 3-D float Tensor with values ranging from [0, 1).\n '
with tf.op_scope([image_buffer], scope, 'decode_jpeg'):
image = tf.image.decode_jpeg(image_buffer, channels=3)
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
return image
|
def distort_color(image, thread_id=0, scope=None):
'Distort the color of the image.\n\n Each color distortion is non-commutative and thus ordering of the color ops\n matters. Ideally we would randomly permute the ordering of the color ops.\n Rather then adding that level of complication, we select a distinct ordering\n of color ops for each preprocessing thread.\n\n Args:\n image: Tensor containing single image.\n thread_id: preprocessing thread ID.\n scope: Optional scope for op_scope.\n Returns:\n color-distorted image\n '
with tf.op_scope([image], scope, 'distort_color'):
color_ordering = (thread_id % 2)
if (color_ordering == 0):
image = tf.image.random_brightness(image, max_delta=(32.0 / 255.0))
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
elif (color_ordering == 1):
image = tf.image.random_brightness(image, max_delta=(32.0 / 255.0))
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.clip_by_value(image, 0.0, 1.0)
return image
|
def distort_image(image, height, width, bbox, thread_id=0, scope=None):
'Distort one image for training a network.\n\n Distorting images provides a useful technique for augmenting the data\n set during training in order to make the network invariant to aspects\n of the image that do not effect the label.\n\n Args:\n image: 3-D float Tensor of image\n height: integer\n width: integer\n bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]\n where each coordinate is [0, 1) and the coordinates are arranged\n as [ymin, xmin, ymax, xmax].\n thread_id: integer indicating the preprocessing thread.\n scope: Optional scope for op_scope.\n Returns:\n 3-D float Tensor of distorted image used for training.\n '
with tf.op_scope([image, height, width, bbox], scope, 'distort_image'):
distorted_image = image
resize_method = (thread_id % 4)
distorted_image = tf.image.resize_images(distorted_image, height, width, resize_method)
distorted_image.set_shape([height, width, 3])
if (not thread_id):
tf.image_summary('cropped_resized_image', tf.expand_dims(distorted_image, 0))
distorted_image = tf.image.random_flip_left_right(distorted_image)
distorted_image = distort_color(distorted_image, thread_id)
if (not thread_id):
tf.image_summary('final_distorted_image', tf.expand_dims(distorted_image, 0))
return distorted_image
|
def eval_image(image, height, width, scope=None):
'Prepare one image for evaluation.\n\n Args:\n image: 3-D float Tensor\n height: integer\n width: integer\n scope: Optional scope for op_scope.\n Returns:\n 3-D float Tensor of prepared image.\n '
with tf.op_scope([image, height, width], scope, 'eval_image'):
image = tf.image.central_crop(image, central_fraction=0.875)
image = tf.expand_dims(image, 0)
image = tf.image.resize_bilinear(image, [height, width], align_corners=False)
image = tf.squeeze(image, [0])
return image
|
def image_preprocessing(image_buffer, bbox, train, thread_id=0):
'Decode and preprocess one image for evaluation or training.\n\n Args:\n image_buffer: JPEG encoded string Tensor\n bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]\n where each coordinate is [0, 1) and the coordinates are arranged as\n [ymin, xmin, ymax, xmax].\n train: boolean\n thread_id: integer indicating preprocessing thread\n\n Returns:\n 3-D float Tensor containing an appropriately scaled image\n\n Raises:\n ValueError: if user does not provide bounding box\n '
if (bbox is None):
raise ValueError('Please supply a bounding box.')
image = decode_jpeg(image_buffer)
height = FLAGS.input_size
width = FLAGS.input_size
if train:
image = distort_image(image, height, width, bbox, thread_id)
else:
image = eval_image(image, height, width)
image = tf.sub(image, 0.5)
image = tf.mul(image, 2.0)
return image
|
def parse_example_proto(example_serialized):
"Parses an Example proto containing a training example of an image.\n\n The output of the build_image_data.py image preprocessing script is a dataset\n containing serialized Example protocol buffers. Each Example proto contains\n the following fields:\n\n image/height: 462\n image/width: 581\n image/colorspace: 'RGB'\n image/channels: 3\n image/class/label: 615\n image/class/synset: 'n03623198'\n image/class/text: 'knee pad'\n image/object/bbox/xmin: 0.1\n image/object/bbox/xmax: 0.9\n image/object/bbox/ymin: 0.2\n image/object/bbox/ymax: 0.6\n image/object/bbox/label: 615\n image/format: 'JPEG'\n image/filename: 'ILSVRC2012_val_00041207.JPEG'\n image/encoded: <JPEG encoded string>\n\n Args:\n example_serialized: scalar Tensor tf.string containing a serialized\n Example protocol buffer.\n\n Returns:\n filename: Tensor tf.string containing the filename\n label: Tensor tf.int32 containing the label.\n bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]\n where each coordinate is [0, 1) and the coordinates are arranged as\n [ymin, xmin, ymax, xmax].\n text: Tensor tf.string containing the human-readable label.\n "
feature_map = {'image/filename': tf.FixedLenFeature([], dtype=tf.string, default_value=''), 'image/class/label': tf.FixedLenFeature([1], dtype=tf.int64, default_value=(- 1)), 'image/class/text': tf.FixedLenFeature([], dtype=tf.string, default_value='')}
sparse_float32 = tf.VarLenFeature(dtype=tf.float32)
feature_map.update({k: sparse_float32 for k in ['image/object/bbox/xmin', 'image/object/bbox/ymin', 'image/object/bbox/xmax', 'image/object/bbox/ymax']})
features = tf.parse_single_example(example_serialized, feature_map)
label = tf.cast(features['image/class/label'], dtype=tf.int32)
xmin = tf.expand_dims(features['image/object/bbox/xmin'].values, 0)
ymin = tf.expand_dims(features['image/object/bbox/ymin'].values, 0)
xmax = tf.expand_dims(features['image/object/bbox/xmax'].values, 0)
ymax = tf.expand_dims(features['image/object/bbox/ymax'].values, 0)
bbox = tf.concat(0, [ymin, xmin, ymax, xmax])
bbox = tf.expand_dims(bbox, 0)
bbox = tf.transpose(bbox, [0, 2, 1])
return (features['image/filename'], label, bbox, features['image/class/text'])
|
def batch_inputs(dataset, batch_size, train, num_preprocess_threads=None, num_readers=1):
'Contruct batches of training or evaluation examples from the image dataset.\n\n Args:\n dataset: instance of Dataset class specifying the dataset.\n See dataset.py for details.\n batch_size: integer\n train: boolean\n num_preprocess_threads: integer, total number of preprocessing threads\n num_readers: integer, number of parallel readers\n\n Returns:\n images: 4-D float Tensor of a batch of images\n labels: 1-D integer Tensor of [batch_size].\n\n Raises:\n ValueError: if data is not found\n '
with tf.name_scope('batch_processing'):
data_files = dataset.data_files()
if (data_files is None):
raise ValueError('No data files found for this dataset')
if train:
filename_queue = tf.train.string_input_producer(data_files, shuffle=True, capacity=16)
else:
filename_queue = tf.train.string_input_producer(data_files, shuffle=False, capacity=1)
if (num_preprocess_threads is None):
num_preprocess_threads = FLAGS.num_preprocess_threads
if (num_preprocess_threads % 4):
raise ValueError('Please make num_preprocess_threads a multiple of 4 (%d % 4 != 0).', num_preprocess_threads)
if (num_readers is None):
num_readers = FLAGS.num_readers
if (num_readers < 1):
raise ValueError('Please make num_readers at least 1')
examples_per_shard = 1024
min_queue_examples = (examples_per_shard * FLAGS.input_queue_memory_factor)
if train:
examples_queue = tf.RandomShuffleQueue(capacity=(min_queue_examples + (3 * batch_size)), min_after_dequeue=min_queue_examples, dtypes=[tf.string])
else:
examples_queue = tf.FIFOQueue(capacity=(examples_per_shard + (3 * batch_size)), dtypes=[tf.string])
reader = tf.TFRecordReader()
(_, example_serialized) = reader.read(filename_queue)
(filename, label_index, bbox, label_text) = parse_example_proto(example_serialized)
fn = ((((FLAGS.data_dir + '/') + label_text) + '/') + filename)
examples_qr = tf.train.queue_runner.QueueRunner(examples_queue, [examples_queue.enqueue([fn])])
tf.train.queue_runner.add_queue_runner(examples_qr)
images_and_labels = []
for thread_id in range(num_preprocess_threads):
whole_file_reader = tf.WholeFileReader()
(_, image_buffer) = whole_file_reader.read(examples_queue)
image = image_preprocessing(image_buffer, bbox, train, thread_id)
images_and_labels.append([image, label_index])
(images, label_index_batch) = tf.train.batch_join(images_and_labels, batch_size=batch_size, capacity=((2 * num_preprocess_threads) * batch_size))
height = FLAGS.image_size
width = FLAGS.image_size
depth = 3
images = tf.cast(images, tf.float32)
images = tf.reshape(images, shape=[batch_size, height, width, depth])
tf.image_summary('images', images)
return (images, tf.reshape(label_index_batch, [batch_size]))
|
def inference(x, is_training, num_classes=1000, num_blocks=[3, 4, 6, 3], use_bias=False, bottleneck=True):
c = Config()
c['bottleneck'] = bottleneck
c['is_training'] = tf.convert_to_tensor(is_training, dtype='bool', name='is_training')
c['ksize'] = 3
c['stride'] = 1
c['use_bias'] = use_bias
c['fc_units_out'] = num_classes
c['num_blocks'] = num_blocks
c['stack_stride'] = 2
with tf.variable_scope('scale1'):
c['conv_filters_out'] = 64
c['ksize'] = 7
c['stride'] = 2
x = conv(x, c)
x = bn(x, c)
x = activation(x)
with tf.variable_scope('scale2'):
x = _max_pool(x, ksize=3, stride=2)
c['num_blocks'] = num_blocks[0]
c['stack_stride'] = 1
c['block_filters_internal'] = 64
x = stack(x, c)
with tf.variable_scope('scale3'):
c['num_blocks'] = num_blocks[1]
c['block_filters_internal'] = 128
assert (c['stack_stride'] == 2)
x = stack(x, c)
with tf.variable_scope('scale4'):
c['num_blocks'] = num_blocks[2]
c['block_filters_internal'] = 256
x = stack(x, c)
with tf.variable_scope('scale5'):
c['num_blocks'] = num_blocks[3]
c['block_filters_internal'] = 512
x = stack(x, c)
x = tf.reduce_mean(x, reduction_indices=[1, 2], name='avg_pool')
if (num_classes != None):
with tf.variable_scope('fc'):
x = fc(x, c)
return x
|
def inference_small(x, is_training, num_blocks=3, use_bias=False, num_classes=10):
c = Config()
c['is_training'] = tf.convert_to_tensor(is_training, dtype='bool', name='is_training')
c['use_bias'] = use_bias
c['fc_units_out'] = num_classes
c['num_blocks'] = num_blocks
c['num_classes'] = num_classes
inference_small_config(x, c)
|
def inference_small_config(x, c):
c['bottleneck'] = False
c['ksize'] = 3
c['stride'] = 1
with tf.variable_scope('scale1'):
c['conv_filters_out'] = 16
c['block_filters_internal'] = 16
c['stack_stride'] = 1
x = conv(x, c)
x = bn(x, c)
x = activation(x)
x = stack(x, c)
with tf.variable_scope('scale2'):
c['block_filters_internal'] = 32
c['stack_stride'] = 2
x = stack(x, c)
with tf.variable_scope('scale3'):
c['block_filters_internal'] = 64
c['stack_stride'] = 2
x = stack(x, c)
x = tf.reduce_mean(x, reduction_indices=[1, 2], name='avg_pool')
if (c['num_classes'] != None):
with tf.variable_scope('fc'):
x = fc(x, c)
return x
|
def _imagenet_preprocess(rgb):
'Changes RGB [0,1] valued image to BGR [0,255] with mean subtracted.'
(red, green, blue) = tf.split(3, 3, (rgb * 255.0))
bgr = tf.concat(3, [blue, green, red])
bgr -= IMAGENET_MEAN_BGR
return bgr
|
def loss(logits, labels):
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits, labels)
cross_entropy_mean = tf.reduce_mean(cross_entropy)
regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
loss_ = tf.add_n(([cross_entropy_mean] + regularization_losses))
tf.scalar_summary('loss', loss_)
return loss_
|
def stack(x, c):
for n in range(c['num_blocks']):
s = (c['stack_stride'] if (n == 0) else 1)
c['block_stride'] = s
with tf.variable_scope(('block%d' % (n + 1))):
x = block(x, c)
return x
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.