body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
|---|---|---|---|---|---|---|---|
def largest_negative_number(seq_seq):
'\n Returns the largest NEGATIVE number in the given sequence of\n sequences of numbers. Returns None if there are no negative numbers\n in the sequence of sequences.\n\n For example, if the given argument is:\n [(30, -5, 8, -20),\n (100, -2.6, 88, -40, -5),\n (400, 500)\n ]\n then this function returns -2.6.\n\n As another example, if the given argument is:\n [(200, 2, 20), (500, 400)]\n then this function returns None.\n\n Preconditions:\n :type seq_seq: (list, tuple)\n and the given argument is a sequence of sequences,\n where each subsequence contains only numbers.\n '
s = []
for k in range(len(seq_seq)):
s2 = seq_seq[k]
if (s2 != []):
s = (s + [max(s2)])
return max(s)
| -3,513,091,658,034,349,600
|
Returns the largest NEGATIVE number in the given sequence of
sequences of numbers. Returns None if there are no negative numbers
in the sequence of sequences.
For example, if the given argument is:
[(30, -5, 8, -20),
(100, -2.6, 88, -40, -5),
(400, 500)
]
then this function returns -2.6.
As another example, if the given argument is:
[(200, 2, 20), (500, 400)]
then this function returns None.
Preconditions:
:type seq_seq: (list, tuple)
and the given argument is a sequence of sequences,
where each subsequence contains only numbers.
|
src/m3_more_nested_loops_in_sequences.py
|
largest_negative_number
|
dalesil/19-MoreLoopsWithinLoops
|
python
|
def largest_negative_number(seq_seq):
'\n Returns the largest NEGATIVE number in the given sequence of\n sequences of numbers. Returns None if there are no negative numbers\n in the sequence of sequences.\n\n For example, if the given argument is:\n [(30, -5, 8, -20),\n (100, -2.6, 88, -40, -5),\n (400, 500)\n ]\n then this function returns -2.6.\n\n As another example, if the given argument is:\n [(200, 2, 20), (500, 400)]\n then this function returns None.\n\n Preconditions:\n :type seq_seq: (list, tuple)\n and the given argument is a sequence of sequences,\n where each subsequence contains only numbers.\n '
s = []
for k in range(len(seq_seq)):
s2 = seq_seq[k]
if (s2 != []):
s = (s + [max(s2)])
return max(s)
|
def run_test_first_is_elsewhere_too():
' Tests the first_is_elsewhere_too function. '
print()
print('-------------------------------------')
print('Testing the FIRST_IS_ELSEWHERE_TOO function:')
print('-------------------------------------')
message = {True: 'Your code PASSED this test.\n', False: 'Your code FAILED this test.\n'}
no_failures = True
expected = True
answer = first_is_elsewhere_too([(3, 1, 4), (13, 10, 11, 7, 10), [11, 12, 3, 10]])
print('Expected and actual are:', expected, answer)
print(message[(answer == expected)])
no_failures = (no_failures and (answer == expected))
expected = False
answer = first_is_elsewhere_too([(3, 1, 4), (13, 10, 11, 7, 10), [11, 2, 13, 14]])
print('Expected and actual are:', expected, answer)
print(message[(answer == expected)])
no_failures = (no_failures and (answer == expected))
expected = False
answer = first_is_elsewhere_too([[], [1, 2], [1, 2]])
print('Expected and actual are:', expected, answer)
print(message[(answer == expected)])
no_failures = (no_failures and (answer == expected))
expected = True
answer = first_is_elsewhere_too([('a', 9), (13, 10, 11, 7, 'a'), [11, 12, 3, 10]])
print('Expected and actual are:', expected, answer)
print(message[(answer == expected)])
no_failures = (no_failures and (answer == expected))
expected = False
answer = first_is_elsewhere_too([('a', 9), (13, 10, 11, 7, 'aa'), [11, 12, 3, 10]])
print('Expected and actual are:', expected, answer)
print(message[(answer == expected)])
no_failures = (no_failures and (answer == expected))
expected = False
answer = first_is_elsewhere_too([('a', 'a', 'b', 'b', 'a', 'b')])
print('Expected and actual are:', expected, answer)
print(message[(answer == expected)])
no_failures = (no_failures and (answer == expected))
expected = False
answer = first_is_elsewhere_too([()])
print('Expected and actual are:', expected, answer)
print(message[(answer == expected)])
no_failures = (no_failures and (answer == expected))
expected = True
answer = first_is_elsewhere_too(['a', (), (), (), 'a'])
print('Expected and actual are:', expected, answer)
print(message[(answer == expected)])
no_failures = (no_failures and (answer == expected))
expected = True
answer = first_is_elsewhere_too(['a', (), (), (), 'a', ()])
print('Expected and actual are:', expected, answer)
print(message[(answer == expected)])
no_failures = (no_failures and (answer == expected))
expected = False
answer = first_is_elsewhere_too(['a', (), (), (), 'b', ()])
print('Expected and actual are:', expected, answer)
print(message[(answer == expected)])
no_failures = (no_failures and (answer == expected))
expected = True
answer = first_is_elsewhere_too(['hello', 'goodbye'])
print('Expected and actual are:', expected, answer)
print(message[(answer == expected)])
no_failures = (no_failures and (answer == expected))
expected = False
answer = first_is_elsewhere_too(['hello', 'xxxxxxxxxxx'])
print('Expected and actual are:', expected, answer)
print(message[(answer == expected)])
no_failures = (no_failures and (answer == expected))
expected = False
answer = first_is_elsewhere_too(['1234567890', 'one two three', 'i am free', 'four five six', 'get my sticks', 'seven eight nine', 'i am fine'])
print('Expected and actual are:', expected, answer)
print(message[(answer == expected)])
no_failures = (no_failures and (answer == expected))
expected = True
answer = first_is_elsewhere_too([(((1000 * 'a') + 'b') + (500 * 'a')), (((800 * 'c') + 'd') + (1200 * 'c')), 'b'])
print('Expected and actual are:', expected, answer)
print(message[(answer == expected)])
no_failures = (no_failures and (answer == expected))
expected = True
answer = first_is_elsewhere_too([(((1000 * 'a') + 'b') + (500 * 'a')), (((800 * 'c') + 'd') + (1200 * 'c')), (((700 * 'eee') + 'b') + (90 * 'd')), (((800 * 'c') + 'd') + (1200 * 'c'))])
print('Expected and actual are:', expected, answer)
print(message[(answer == expected)])
no_failures = (no_failures and (answer == expected))
expected = True
answer = first_is_elsewhere_too([(((1000 * 'b') + 'acd') + (500 * 'f')), ((800 * '1') + '234a'), 'eeee'])
print('Expected and actual are:', expected, answer)
print(message[(answer == expected)])
no_failures = (no_failures and (answer == expected))
expected = True
answer = first_is_elsewhere_too([(((1000 * 'b') + 'acd') + (500 * 'f')), (('a' + (800 * '1')) + '234'), '123'])
print('Expected and actual are:', expected, answer)
print(message[(answer == expected)])
no_failures = (no_failures and (answer == expected))
test1 = [(((1000 * 'b') + 'acd') + (500 * 'f')), ((800 * '1') + '234'), '123']
for k in range(95):
test1.append((k * chr(k)))
test2 = []
for k in range(30):
test2.append((k * chr(k)))
expected = True
answer = first_is_elsewhere_too(((test1 + ['a']) + test2))
print('Expected and actual are:', expected, answer)
print(message[(answer == expected)])
no_failures = (no_failures and (answer == expected))
expected = False
answer = first_is_elsewhere_too((test1 + test2))
print('Expected and actual are:', expected, answer)
print(message[(answer == expected)])
no_failures = (no_failures and (answer == expected))
expected = True
a_inside = (((100 * 'b') + 'a') + (100 * 'b'))
answer = first_is_elsewhere_too(((test1 + [a_inside]) + test2))
print('Expected and actual are:', expected, answer)
print(message[(answer == expected)])
no_failures = (no_failures and (answer == expected))
if no_failures:
print('*** Your code PASSED all')
else:
print('!!! Your code FAILED some')
print(' of the tests for first_is_elsewhere_too')
| -4,606,426,094,174,220,000
|
Tests the first_is_elsewhere_too function.
|
src/m3_more_nested_loops_in_sequences.py
|
run_test_first_is_elsewhere_too
|
dalesil/19-MoreLoopsWithinLoops
|
python
|
def run_test_first_is_elsewhere_too():
' '
print()
print('-------------------------------------')
print('Testing the FIRST_IS_ELSEWHERE_TOO function:')
print('-------------------------------------')
message = {True: 'Your code PASSED this test.\n', False: 'Your code FAILED this test.\n'}
no_failures = True
expected = True
answer = first_is_elsewhere_too([(3, 1, 4), (13, 10, 11, 7, 10), [11, 12, 3, 10]])
print('Expected and actual are:', expected, answer)
print(message[(answer == expected)])
no_failures = (no_failures and (answer == expected))
expected = False
answer = first_is_elsewhere_too([(3, 1, 4), (13, 10, 11, 7, 10), [11, 2, 13, 14]])
print('Expected and actual are:', expected, answer)
print(message[(answer == expected)])
no_failures = (no_failures and (answer == expected))
expected = False
answer = first_is_elsewhere_too([[], [1, 2], [1, 2]])
print('Expected and actual are:', expected, answer)
print(message[(answer == expected)])
no_failures = (no_failures and (answer == expected))
expected = True
answer = first_is_elsewhere_too([('a', 9), (13, 10, 11, 7, 'a'), [11, 12, 3, 10]])
print('Expected and actual are:', expected, answer)
print(message[(answer == expected)])
no_failures = (no_failures and (answer == expected))
expected = False
answer = first_is_elsewhere_too([('a', 9), (13, 10, 11, 7, 'aa'), [11, 12, 3, 10]])
print('Expected and actual are:', expected, answer)
print(message[(answer == expected)])
no_failures = (no_failures and (answer == expected))
expected = False
answer = first_is_elsewhere_too([('a', 'a', 'b', 'b', 'a', 'b')])
print('Expected and actual are:', expected, answer)
print(message[(answer == expected)])
no_failures = (no_failures and (answer == expected))
expected = False
answer = first_is_elsewhere_too([()])
print('Expected and actual are:', expected, answer)
print(message[(answer == expected)])
no_failures = (no_failures and (answer == expected))
expected = True
answer = first_is_elsewhere_too(['a', (), (), (), 'a'])
print('Expected and actual are:', expected, answer)
print(message[(answer == expected)])
no_failures = (no_failures and (answer == expected))
expected = True
answer = first_is_elsewhere_too(['a', (), (), (), 'a', ()])
print('Expected and actual are:', expected, answer)
print(message[(answer == expected)])
no_failures = (no_failures and (answer == expected))
expected = False
answer = first_is_elsewhere_too(['a', (), (), (), 'b', ()])
print('Expected and actual are:', expected, answer)
print(message[(answer == expected)])
no_failures = (no_failures and (answer == expected))
expected = True
answer = first_is_elsewhere_too(['hello', 'goodbye'])
print('Expected and actual are:', expected, answer)
print(message[(answer == expected)])
no_failures = (no_failures and (answer == expected))
expected = False
answer = first_is_elsewhere_too(['hello', 'xxxxxxxxxxx'])
print('Expected and actual are:', expected, answer)
print(message[(answer == expected)])
no_failures = (no_failures and (answer == expected))
expected = False
answer = first_is_elsewhere_too(['1234567890', 'one two three', 'i am free', 'four five six', 'get my sticks', 'seven eight nine', 'i am fine'])
print('Expected and actual are:', expected, answer)
print(message[(answer == expected)])
no_failures = (no_failures and (answer == expected))
expected = True
answer = first_is_elsewhere_too([(((1000 * 'a') + 'b') + (500 * 'a')), (((800 * 'c') + 'd') + (1200 * 'c')), 'b'])
print('Expected and actual are:', expected, answer)
print(message[(answer == expected)])
no_failures = (no_failures and (answer == expected))
expected = True
answer = first_is_elsewhere_too([(((1000 * 'a') + 'b') + (500 * 'a')), (((800 * 'c') + 'd') + (1200 * 'c')), (((700 * 'eee') + 'b') + (90 * 'd')), (((800 * 'c') + 'd') + (1200 * 'c'))])
print('Expected and actual are:', expected, answer)
print(message[(answer == expected)])
no_failures = (no_failures and (answer == expected))
expected = True
answer = first_is_elsewhere_too([(((1000 * 'b') + 'acd') + (500 * 'f')), ((800 * '1') + '234a'), 'eeee'])
print('Expected and actual are:', expected, answer)
print(message[(answer == expected)])
no_failures = (no_failures and (answer == expected))
expected = True
answer = first_is_elsewhere_too([(((1000 * 'b') + 'acd') + (500 * 'f')), (('a' + (800 * '1')) + '234'), '123'])
print('Expected and actual are:', expected, answer)
print(message[(answer == expected)])
no_failures = (no_failures and (answer == expected))
test1 = [(((1000 * 'b') + 'acd') + (500 * 'f')), ((800 * '1') + '234'), '123']
for k in range(95):
test1.append((k * chr(k)))
test2 = []
for k in range(30):
test2.append((k * chr(k)))
expected = True
answer = first_is_elsewhere_too(((test1 + ['a']) + test2))
print('Expected and actual are:', expected, answer)
print(message[(answer == expected)])
no_failures = (no_failures and (answer == expected))
expected = False
answer = first_is_elsewhere_too((test1 + test2))
print('Expected and actual are:', expected, answer)
print(message[(answer == expected)])
no_failures = (no_failures and (answer == expected))
expected = True
a_inside = (((100 * 'b') + 'a') + (100 * 'b'))
answer = first_is_elsewhere_too(((test1 + [a_inside]) + test2))
print('Expected and actual are:', expected, answer)
print(message[(answer == expected)])
no_failures = (no_failures and (answer == expected))
if no_failures:
print('*** Your code PASSED all')
else:
print('!!! Your code FAILED some')
print(' of the tests for first_is_elsewhere_too')
|
def first_is_elsewhere_too(seq_seq):
'\n Given a sequence of subsequences:\n -- Returns True if any element of the first (initial) subsequence\n appears in any of the other subsequences.\n -- Returns False otherwise.\n\n For example, if the given argument is:\n [(3, 1, 4),\n (13, 10, 11, 7, 10),\n [11, 12, 3, 10]]\n then this function returns True because 3 appears\n in the first subsequence and also in the third subsequence.\n\n As another example, if the given argument is:\n [(3, 1, 4),\n (13, 10, 11, 7, 10),\n [11, 2, 13, 14]]\n then this function returns False because 3 does not appear in\n any subsequence except the first, 1 does not appear in any\n subsequence except the first, and 4 does not appear in any\n subsequence except the first.\n\n As yet another example, if the given argument is:\n ([], [1, 2], [1, 2])\n then this function returns False since no element of the first\n subsequence appears elsewhere.\n\n Preconditions:\n :type seq_seq: (list, tuple)\n and the given argument is a sequence of sequences.\n '
for j in range(len(seq_seq[0])):
for k in range(1, len(seq_seq)):
for i in range(len(seq_seq[k])):
if (seq_seq[k][i] == seq_seq[0][j]):
return True
return False
| 5,612,261,895,344,195,000
|
Given a sequence of subsequences:
-- Returns True if any element of the first (initial) subsequence
appears in any of the other subsequences.
-- Returns False otherwise.
For example, if the given argument is:
[(3, 1, 4),
(13, 10, 11, 7, 10),
[11, 12, 3, 10]]
then this function returns True because 3 appears
in the first subsequence and also in the third subsequence.
As another example, if the given argument is:
[(3, 1, 4),
(13, 10, 11, 7, 10),
[11, 2, 13, 14]]
then this function returns False because 3 does not appear in
any subsequence except the first, 1 does not appear in any
subsequence except the first, and 4 does not appear in any
subsequence except the first.
As yet another example, if the given argument is:
([], [1, 2], [1, 2])
then this function returns False since no element of the first
subsequence appears elsewhere.
Preconditions:
:type seq_seq: (list, tuple)
and the given argument is a sequence of sequences.
|
src/m3_more_nested_loops_in_sequences.py
|
first_is_elsewhere_too
|
dalesil/19-MoreLoopsWithinLoops
|
python
|
def first_is_elsewhere_too(seq_seq):
'\n Given a sequence of subsequences:\n -- Returns True if any element of the first (initial) subsequence\n appears in any of the other subsequences.\n -- Returns False otherwise.\n\n For example, if the given argument is:\n [(3, 1, 4),\n (13, 10, 11, 7, 10),\n [11, 12, 3, 10]]\n then this function returns True because 3 appears\n in the first subsequence and also in the third subsequence.\n\n As another example, if the given argument is:\n [(3, 1, 4),\n (13, 10, 11, 7, 10),\n [11, 2, 13, 14]]\n then this function returns False because 3 does not appear in\n any subsequence except the first, 1 does not appear in any\n subsequence except the first, and 4 does not appear in any\n subsequence except the first.\n\n As yet another example, if the given argument is:\n ([], [1, 2], [1, 2])\n then this function returns False since no element of the first\n subsequence appears elsewhere.\n\n Preconditions:\n :type seq_seq: (list, tuple)\n and the given argument is a sequence of sequences.\n '
for j in range(len(seq_seq[0])):
for k in range(1, len(seq_seq)):
for i in range(len(seq_seq[k])):
if (seq_seq[k][i] == seq_seq[0][j]):
return True
return False
|
def _to_DataFrame(self):
'Returns all of the channels parsed from the file as a pandas DataFrame'
import pandas as pd
time_index = pd.to_timedelta([f[0] for f in self._motions], unit='s')
frames = [f[1] for f in self._motions]
channels = np.asarray([[channel[2] for channel in frame] for frame in frames])
column_names = [('%s_%s' % (c[0], c[1])) for c in self._motion_channels]
return pd.DataFrame(data=channels, index=time_index, columns=column_names)
| 1,907,929,460,344,979,500
|
Returns all of the channels parsed from the file as a pandas DataFrame
|
app/resources/pymo/pymo/parsers.py
|
_to_DataFrame
|
seanschneeweiss/RoSeMotion
|
python
|
def _to_DataFrame(self):
import pandas as pd
time_index = pd.to_timedelta([f[0] for f in self._motions], unit='s')
frames = [f[1] for f in self._motions]
channels = np.asarray([[channel[2] for channel in frame] for frame in frames])
column_names = [('%s_%s' % (c[0], c[1])) for c in self._motion_channels]
return pd.DataFrame(data=channels, index=time_index, columns=column_names)
|
def find_path(entityset, source_entity, target_entity):
'Find a path of the source entity to the target_entity.'
nodes_pipe = [target_entity]
parent_dict = {target_entity: None}
while len(nodes_pipe):
parent_node = nodes_pipe.pop()
if (parent_node == source_entity):
break
child_nodes = ([e[0] for e in entityset.get_backward_entities(parent_node)] + [e[0] for e in entityset.get_forward_entities(parent_node)])
for child in child_nodes:
if (child not in parent_dict):
parent_dict[child] = parent_node
nodes_pipe.append(child)
node = source_entity
paths = [[node]]
while (node != target_entity):
node = parent_dict[node]
paths.append((paths[(- 1)] + [node]))
return paths
| 2,792,276,967,203,366,400
|
Find a path of the source entity to the target_entity.
|
vbridge/utils/entityset_helpers.py
|
find_path
|
sibyl-dev/VBridge
|
python
|
def find_path(entityset, source_entity, target_entity):
nodes_pipe = [target_entity]
parent_dict = {target_entity: None}
while len(nodes_pipe):
parent_node = nodes_pipe.pop()
if (parent_node == source_entity):
break
child_nodes = ([e[0] for e in entityset.get_backward_entities(parent_node)] + [e[0] for e in entityset.get_forward_entities(parent_node)])
for child in child_nodes:
if (child not in parent_dict):
parent_dict[child] = parent_node
nodes_pipe.append(child)
node = source_entity
paths = [[node]]
while (node != target_entity):
node = parent_dict[node]
paths.append((paths[(- 1)] + [node]))
return paths
|
def get_event_ids(storage):
'Get the highest event id from the entities and the eventid of the most recent event\n\n :param storage: GOB (events + entities)\n :return:highest entity eventid and last eventid\n '
with storage.get_session():
entity_max_eventid = storage.get_entity_max_eventid()
last_eventid = storage.get_last_eventid()
return (entity_max_eventid, last_eventid)
| 8,561,335,916,100,775,000
|
Get the highest event id from the entities and the eventid of the most recent event
:param storage: GOB (events + entities)
:return:highest entity eventid and last eventid
|
src/gobupload/utils.py
|
get_event_ids
|
Amsterdam/GOB-Upload
|
python
|
def get_event_ids(storage):
'Get the highest event id from the entities and the eventid of the most recent event\n\n :param storage: GOB (events + entities)\n :return:highest entity eventid and last eventid\n '
with storage.get_session():
entity_max_eventid = storage.get_entity_max_eventid()
last_eventid = storage.get_last_eventid()
return (entity_max_eventid, last_eventid)
|
def random_string(length):
'Returns a random string of length :length: consisting of lowercase characters and digits\n\n :param length:\n :return:\n '
assert (length > 0)
characters = (string.ascii_lowercase + ''.join([str(i) for i in range(10)]))
return ''.join([random.choice(characters) for _ in range(length)])
| -2,064,968,224,007,040,000
|
Returns a random string of length :length: consisting of lowercase characters and digits
:param length:
:return:
|
src/gobupload/utils.py
|
random_string
|
Amsterdam/GOB-Upload
|
python
|
def random_string(length):
'Returns a random string of length :length: consisting of lowercase characters and digits\n\n :param length:\n :return:\n '
assert (length > 0)
characters = (string.ascii_lowercase + .join([str(i) for i in range(10)]))
return .join([random.choice(characters) for _ in range(length)])
|
def evaluate_rnn(model, dataloader, print_every=5, init_dir=None, allow_gpu_mem_growth=True, gpu_memory_fraction=0.3):
'\n This function initialized a model from the <init_from> directory and calculates\n probabilities, and confusion matrices based on all data stored in\n one epoch of dataloader (usually test data)\n\n\n :param model: rnn_model object containing tensorflow graph\n :param dataloader: DataLoader object for loading batches\n :param print_every: console log frequency\n :param allow_gpu_mem_growth: dynamic growth of gpu vram\n :param gpu_memory_fraction: hard upper limit for gpu vram\n\n :returns confusion_matrix <float> [n_classes x n_classes] rows as targets cols as predicted\n :returns probabilities <float> [all observations x n_classes] probabilities for each class per observation\n :returns targets <bool> [all observations x n_classes] reference data for each class per observation\n :returns observations <int> [all_observations]position of observation in the sequence\n e.g. [1,2,3,4,1,2,3,4,5,6,1,2,3,4, ...]\n '
saver = tf.train.Saver()
total_cm = np.zeros((model.n_classes, model.n_classes))
all_scores = np.array([])
all_targets = np.array([])
all_obs = np.array([])
step = 0
t_last = datetime.datetime.now()
config = tf.ConfigProto()
config.gpu_options.allow_growth = allow_gpu_mem_growth
config.gpu_options.per_process_gpu_memory_fraction = gpu_memory_fraction
config.allow_soft_placement = True
print('start')
with tf.Session(config=config) as sess:
sess.run([model.init_op])
if (init_dir is not None):
if os.path.exists(init_dir):
ckpt = tf.train.get_checkpoint_state(init_dir)
print(('restoring model from %s' % ckpt.model_checkpoint_path))
saver.restore(sess, ckpt.model_checkpoint_path)
for i in range(1, dataloader.num_batches):
step += dataloader.batch_size
s_db = datetime.datetime.now()
(X, y, seq_lengths) = dataloader.next_batch()
e_db = datetime.datetime.now()
feed = {model.X: X, model.y_: y, model.seq_lengths: seq_lengths}
(cm, scores, targets, obs) = sess.run([model.confusion_matrix, model.scores, model.targets, model.obs], feed_dict=feed)
all_obs = np.append(all_obs, obs)
all_scores = np.append(all_scores, scores)
all_targets = np.append(all_targets, targets)
e_tr = datetime.datetime.now()
dt_db = (e_db - s_db)
dt_tr = (e_tr - e_db)
field_per_s = (dataloader.batch_size / (datetime.datetime.now() - t_last).total_seconds())
approx_calc_time = ((dataloader.num_feat - step) / field_per_s)
eta = (datetime.datetime.now() + datetime.timedelta(seconds=approx_calc_time))
t_last = datetime.datetime.now()
if ((i % print_every) == 0):
cross_entropy = sess.run(model.cross_entropy, feed_dict=feed)
msg = 'Gathering: Iteration {}, feature {}, epoch {}, batch {}/{}: xentr {:.2f} (time: db {}ms; eval {}ms, {} feat/s, eta: {})'.format(i, step, dataloader.epoch, dataloader.batch, dataloader.num_batches, cross_entropy, int((dt_db.total_seconds() * 1000)), int((dt_tr.total_seconds() * 1000)), int(field_per_s), eta.strftime('%d.%b %H:%M'))
print(msg)
return (all_scores.reshape((- 1), model.n_classes), all_targets.reshape((- 1), model.n_classes).astype(bool), all_obs)
| 2,253,965,908,330,062,300
|
This function initialized a model from the <init_from> directory and calculates
probabilities, and confusion matrices based on all data stored in
one epoch of dataloader (usually test data)
:param model: rnn_model object containing tensorflow graph
:param dataloader: DataLoader object for loading batches
:param print_every: console log frequency
:param allow_gpu_mem_growth: dynamic growth of gpu vram
:param gpu_memory_fraction: hard upper limit for gpu vram
:returns confusion_matrix <float> [n_classes x n_classes] rows as targets cols as predicted
:returns probabilities <float> [all observations x n_classes] probabilities for each class per observation
:returns targets <bool> [all observations x n_classes] reference data for each class per observation
:returns observations <int> [all_observations]position of observation in the sequence
e.g. [1,2,3,4,1,2,3,4,5,6,1,2,3,4, ...]
|
evaluate.py
|
evaluate_rnn
|
TUM-LMF/fieldRNN
|
python
|
def evaluate_rnn(model, dataloader, print_every=5, init_dir=None, allow_gpu_mem_growth=True, gpu_memory_fraction=0.3):
'\n This function initialized a model from the <init_from> directory and calculates\n probabilities, and confusion matrices based on all data stored in\n one epoch of dataloader (usually test data)\n\n\n :param model: rnn_model object containing tensorflow graph\n :param dataloader: DataLoader object for loading batches\n :param print_every: console log frequency\n :param allow_gpu_mem_growth: dynamic growth of gpu vram\n :param gpu_memory_fraction: hard upper limit for gpu vram\n\n :returns confusion_matrix <float> [n_classes x n_classes] rows as targets cols as predicted\n :returns probabilities <float> [all observations x n_classes] probabilities for each class per observation\n :returns targets <bool> [all observations x n_classes] reference data for each class per observation\n :returns observations <int> [all_observations]position of observation in the sequence\n e.g. [1,2,3,4,1,2,3,4,5,6,1,2,3,4, ...]\n '
saver = tf.train.Saver()
total_cm = np.zeros((model.n_classes, model.n_classes))
all_scores = np.array([])
all_targets = np.array([])
all_obs = np.array([])
step = 0
t_last = datetime.datetime.now()
config = tf.ConfigProto()
config.gpu_options.allow_growth = allow_gpu_mem_growth
config.gpu_options.per_process_gpu_memory_fraction = gpu_memory_fraction
config.allow_soft_placement = True
print('start')
with tf.Session(config=config) as sess:
sess.run([model.init_op])
if (init_dir is not None):
if os.path.exists(init_dir):
ckpt = tf.train.get_checkpoint_state(init_dir)
print(('restoring model from %s' % ckpt.model_checkpoint_path))
saver.restore(sess, ckpt.model_checkpoint_path)
for i in range(1, dataloader.num_batches):
step += dataloader.batch_size
s_db = datetime.datetime.now()
(X, y, seq_lengths) = dataloader.next_batch()
e_db = datetime.datetime.now()
feed = {model.X: X, model.y_: y, model.seq_lengths: seq_lengths}
(cm, scores, targets, obs) = sess.run([model.confusion_matrix, model.scores, model.targets, model.obs], feed_dict=feed)
all_obs = np.append(all_obs, obs)
all_scores = np.append(all_scores, scores)
all_targets = np.append(all_targets, targets)
e_tr = datetime.datetime.now()
dt_db = (e_db - s_db)
dt_tr = (e_tr - e_db)
field_per_s = (dataloader.batch_size / (datetime.datetime.now() - t_last).total_seconds())
approx_calc_time = ((dataloader.num_feat - step) / field_per_s)
eta = (datetime.datetime.now() + datetime.timedelta(seconds=approx_calc_time))
t_last = datetime.datetime.now()
if ((i % print_every) == 0):
cross_entropy = sess.run(model.cross_entropy, feed_dict=feed)
msg = 'Gathering: Iteration {}, feature {}, epoch {}, batch {}/{}: xentr {:.2f} (time: db {}ms; eval {}ms, {} feat/s, eta: {})'.format(i, step, dataloader.epoch, dataloader.batch, dataloader.num_batches, cross_entropy, int((dt_db.total_seconds() * 1000)), int((dt_tr.total_seconds() * 1000)), int(field_per_s), eta.strftime('%d.%b %H:%M'))
print(msg)
return (all_scores.reshape((- 1), model.n_classes), all_targets.reshape((- 1), model.n_classes).astype(bool), all_obs)
|
def evaluate_cnn(model, dataloader, print_every=5, init_dir=None, allow_gpu_mem_growth=True, gpu_memory_fraction=0.3):
'\n This function initialized a model from the <init_from> directory and calculates\n probabilities, and confusion matrices based on all data stored in\n one epoch of dataloader (usually test data)\n\n\n :param model: rnn_model object containing tensorflow graph\n :param dataloader: DataLoader object for loading batches\n :param print_every: console log frequency\n :param allow_gpu_mem_growth: dynamic growth of gpu vram\n :param gpu_memory_fraction: hard upper limit for gpu vram\n\n :returns confusion_matrix <float> [n_classes x n_classes] rows as targets cols as predicted\n :returns probabilities <float> [all observations x n_classes] probabilities for each class per observation\n :returns targets <bool> [all observations x n_classes] reference data for each class per observation\n :returns observations <int> [all_observations]position of observation in the sequence\n e.g. [1,2,3,4,1,2,3,4,5,6,1,2,3,4, ...]\n '
saver = tf.train.Saver()
total_cm = np.zeros((model.n_classes, model.n_classes))
all_scores = np.array([])
all_targets = np.array([])
all_obs = np.array([])
step = 0
t_last = datetime.datetime.now()
config = tf.ConfigProto()
config.gpu_options.allow_growth = allow_gpu_mem_growth
config.gpu_options.per_process_gpu_memory_fraction = gpu_memory_fraction
config.allow_soft_placement = True
print('start')
with tf.Session(config=config) as sess:
sess.run([model.init_op])
if (init_dir is not None):
if os.path.exists(init_dir):
ckpt = tf.train.get_checkpoint_state(init_dir)
print(('restoring model from %s' % ckpt.model_checkpoint_path))
saver.restore(sess, ckpt.model_checkpoint_path)
with open((init_dir + '/steps.txt'), 'r') as f:
line = f.read()
(step_, epoch_) = line.split(' ')
step = int(step_)
dataloader.epoch = int(epoch_)
for i in range(1, dataloader.num_batches):
step += dataloader.batch_size
s_db = datetime.datetime.now()
(X, y, seq_lengths) = dataloader.next_batch()
e_db = datetime.datetime.now()
(batch_size, max_seqlengths, n_input) = X.shape
ones = np.ones([batch_size, max_seqlengths])
mask_ = ((np.arange(0, max_seqlengths) * ones) < (seq_lengths * ones.T).T)
mask = mask_.reshape((- 1))
obs_ = (np.arange(0, max_seqlengths) * ones)
obs = obs_.reshape((- 1))[mask]
' unroll data '
(X, y) = unroll(X, y, seq_lengths)
feed = {model.X: X, model.y: y, model.batch_size: X.shape[0]}
(scores, targets) = sess.run([model.scores, model.targets], feed_dict=feed)
all_scores = np.append(all_scores, scores)
all_targets = np.append(all_targets, targets)
e_tr = datetime.datetime.now()
dt_db = (e_db - s_db)
dt_tr = (e_tr - e_db)
field_per_s = (dataloader.batch_size / (datetime.datetime.now() - t_last).total_seconds())
approx_calc_time = ((dataloader.num_feat - step) / field_per_s)
eta = (datetime.datetime.now() + datetime.timedelta(seconds=approx_calc_time))
t_last = datetime.datetime.now()
if ((i % print_every) == 0):
cross_entropy = sess.run(model.cross_entropy, feed_dict=feed)
msg = 'Gathering: Iteration {}, feature {}, epoch {}, batch {}/{}: xentr {:.2f} (time: db {}ms; eval {}ms, {} feat/s, eta: {})'.format(i, step, dataloader.epoch, dataloader.batch, dataloader.num_batches, cross_entropy, int((dt_db.total_seconds() * 1000)), int((dt_tr.total_seconds() * 1000)), int(field_per_s), eta.strftime('%d.%b %H:%M'))
print(msg)
return (all_scores.reshape((- 1), model.n_classes), all_targets.reshape((- 1), model.n_classes).astype(bool), obs)
| -8,771,434,458,588,027,000
|
This function initialized a model from the <init_from> directory and calculates
probabilities, and confusion matrices based on all data stored in
one epoch of dataloader (usually test data)
:param model: rnn_model object containing tensorflow graph
:param dataloader: DataLoader object for loading batches
:param print_every: console log frequency
:param allow_gpu_mem_growth: dynamic growth of gpu vram
:param gpu_memory_fraction: hard upper limit for gpu vram
:returns confusion_matrix <float> [n_classes x n_classes] rows as targets cols as predicted
:returns probabilities <float> [all observations x n_classes] probabilities for each class per observation
:returns targets <bool> [all observations x n_classes] reference data for each class per observation
:returns observations <int> [all_observations]position of observation in the sequence
e.g. [1,2,3,4,1,2,3,4,5,6,1,2,3,4, ...]
|
evaluate.py
|
evaluate_cnn
|
TUM-LMF/fieldRNN
|
python
|
def evaluate_cnn(model, dataloader, print_every=5, init_dir=None, allow_gpu_mem_growth=True, gpu_memory_fraction=0.3):
'\n This function initialized a model from the <init_from> directory and calculates\n probabilities, and confusion matrices based on all data stored in\n one epoch of dataloader (usually test data)\n\n\n :param model: rnn_model object containing tensorflow graph\n :param dataloader: DataLoader object for loading batches\n :param print_every: console log frequency\n :param allow_gpu_mem_growth: dynamic growth of gpu vram\n :param gpu_memory_fraction: hard upper limit for gpu vram\n\n :returns confusion_matrix <float> [n_classes x n_classes] rows as targets cols as predicted\n :returns probabilities <float> [all observations x n_classes] probabilities for each class per observation\n :returns targets <bool> [all observations x n_classes] reference data for each class per observation\n :returns observations <int> [all_observations]position of observation in the sequence\n e.g. [1,2,3,4,1,2,3,4,5,6,1,2,3,4, ...]\n '
saver = tf.train.Saver()
total_cm = np.zeros((model.n_classes, model.n_classes))
all_scores = np.array([])
all_targets = np.array([])
all_obs = np.array([])
step = 0
t_last = datetime.datetime.now()
config = tf.ConfigProto()
config.gpu_options.allow_growth = allow_gpu_mem_growth
config.gpu_options.per_process_gpu_memory_fraction = gpu_memory_fraction
config.allow_soft_placement = True
print('start')
with tf.Session(config=config) as sess:
sess.run([model.init_op])
if (init_dir is not None):
if os.path.exists(init_dir):
ckpt = tf.train.get_checkpoint_state(init_dir)
print(('restoring model from %s' % ckpt.model_checkpoint_path))
saver.restore(sess, ckpt.model_checkpoint_path)
with open((init_dir + '/steps.txt'), 'r') as f:
line = f.read()
(step_, epoch_) = line.split(' ')
step = int(step_)
dataloader.epoch = int(epoch_)
for i in range(1, dataloader.num_batches):
step += dataloader.batch_size
s_db = datetime.datetime.now()
(X, y, seq_lengths) = dataloader.next_batch()
e_db = datetime.datetime.now()
(batch_size, max_seqlengths, n_input) = X.shape
ones = np.ones([batch_size, max_seqlengths])
mask_ = ((np.arange(0, max_seqlengths) * ones) < (seq_lengths * ones.T).T)
mask = mask_.reshape((- 1))
obs_ = (np.arange(0, max_seqlengths) * ones)
obs = obs_.reshape((- 1))[mask]
' unroll data '
(X, y) = unroll(X, y, seq_lengths)
feed = {model.X: X, model.y: y, model.batch_size: X.shape[0]}
(scores, targets) = sess.run([model.scores, model.targets], feed_dict=feed)
all_scores = np.append(all_scores, scores)
all_targets = np.append(all_targets, targets)
e_tr = datetime.datetime.now()
dt_db = (e_db - s_db)
dt_tr = (e_tr - e_db)
field_per_s = (dataloader.batch_size / (datetime.datetime.now() - t_last).total_seconds())
approx_calc_time = ((dataloader.num_feat - step) / field_per_s)
eta = (datetime.datetime.now() + datetime.timedelta(seconds=approx_calc_time))
t_last = datetime.datetime.now()
if ((i % print_every) == 0):
cross_entropy = sess.run(model.cross_entropy, feed_dict=feed)
msg = 'Gathering: Iteration {}, feature {}, epoch {}, batch {}/{}: xentr {:.2f} (time: db {}ms; eval {}ms, {} feat/s, eta: {})'.format(i, step, dataloader.epoch, dataloader.batch, dataloader.num_batches, cross_entropy, int((dt_db.total_seconds() * 1000)), int((dt_tr.total_seconds() * 1000)), int(field_per_s), eta.strftime('%d.%b %H:%M'))
print(msg)
return (all_scores.reshape((- 1), model.n_classes), all_targets.reshape((- 1), model.n_classes).astype(bool), obs)
|
def is_allowed_create(self, resource_type, context_id):
'Whether or not the user is allowed to create a resource of the specified\n type in the context.'
return self._is_allowed(Permission('create', resource_type, context_id))
| 2,661,049,642,559,629,000
|
Whether or not the user is allowed to create a resource of the specified
type in the context.
|
src/ggrc/rbac/permissions_provider.py
|
is_allowed_create
|
sriharshakappala/ggrc-core
|
python
|
def is_allowed_create(self, resource_type, context_id):
'Whether or not the user is allowed to create a resource of the specified\n type in the context.'
return self._is_allowed(Permission('create', resource_type, context_id))
|
def is_allowed_read(self, resource_type, context_id):
'Whether or not the user is allowed to read a resource of the specified\n type in the context.'
return self._is_allowed(Permission('read', resource_type, context_id))
| 7,899,654,231,519,762,000
|
Whether or not the user is allowed to read a resource of the specified
type in the context.
|
src/ggrc/rbac/permissions_provider.py
|
is_allowed_read
|
sriharshakappala/ggrc-core
|
python
|
def is_allowed_read(self, resource_type, context_id):
'Whether or not the user is allowed to read a resource of the specified\n type in the context.'
return self._is_allowed(Permission('read', resource_type, context_id))
|
def is_allowed_update(self, resource_type, context_id):
'Whether or not the user is allowed to update a resource of the specified\n type in the context.'
return self._is_allowed(Permission('update', resource_type, context_id))
| 1,961,168,208,370,677,500
|
Whether or not the user is allowed to update a resource of the specified
type in the context.
|
src/ggrc/rbac/permissions_provider.py
|
is_allowed_update
|
sriharshakappala/ggrc-core
|
python
|
def is_allowed_update(self, resource_type, context_id):
'Whether or not the user is allowed to update a resource of the specified\n type in the context.'
return self._is_allowed(Permission('update', resource_type, context_id))
|
def is_allowed_delete(self, resource_type, context_id):
'Whether or not the user is allowed to delete a resource of the specified\n type in the context.'
return self._is_allowed(Permission('delete', resource_type, context_id))
| 3,184,699,235,176,215,600
|
Whether or not the user is allowed to delete a resource of the specified
type in the context.
|
src/ggrc/rbac/permissions_provider.py
|
is_allowed_delete
|
sriharshakappala/ggrc-core
|
python
|
def is_allowed_delete(self, resource_type, context_id):
'Whether or not the user is allowed to delete a resource of the specified\n type in the context.'
return self._is_allowed(Permission('delete', resource_type, context_id))
|
def create_contexts_for(self, resource_type):
'All contexts in which the user has create permission.'
return self._get_contexts_for('create', resource_type)
| -6,525,482,749,111,687,000
|
All contexts in which the user has create permission.
|
src/ggrc/rbac/permissions_provider.py
|
create_contexts_for
|
sriharshakappala/ggrc-core
|
python
|
def create_contexts_for(self, resource_type):
return self._get_contexts_for('create', resource_type)
|
def read_contexts_for(self, resource_type):
'All contexts in which the user has read permission.'
return self._get_contexts_for('read', resource_type)
| -775,641,103,843,699,100
|
All contexts in which the user has read permission.
|
src/ggrc/rbac/permissions_provider.py
|
read_contexts_for
|
sriharshakappala/ggrc-core
|
python
|
def read_contexts_for(self, resource_type):
return self._get_contexts_for('read', resource_type)
|
def update_contexts_for(self, resource_type):
'All contexts in which the user has update permission.'
return self._get_contexts_for('update', resource_type)
| 3,080,554,081,914,556,000
|
All contexts in which the user has update permission.
|
src/ggrc/rbac/permissions_provider.py
|
update_contexts_for
|
sriharshakappala/ggrc-core
|
python
|
def update_contexts_for(self, resource_type):
return self._get_contexts_for('update', resource_type)
|
def delete_contexts_for(self, resource_type):
'All contexts in which the user has delete permission.'
return self._get_contexts_for('delete', resource_type)
| 1,119,226,877,921,123,100
|
All contexts in which the user has delete permission.
|
src/ggrc/rbac/permissions_provider.py
|
delete_contexts_for
|
sriharshakappala/ggrc-core
|
python
|
def delete_contexts_for(self, resource_type):
return self._get_contexts_for('delete', resource_type)
|
def fit(self, data):
'\n Apply standard scale for input data\n Parameters\n ----------\n data: data_instance, input data\n\n Returns\n ----------\n data:data_instance, data after scale\n mean: list, each column mean value\n std: list, each column standard deviation\n '
(self.column_min_value, self.column_max_value) = self._get_min_max_value(data)
self.scale_column_idx = self._get_scale_column_idx(data)
self.header = self._get_header(data)
self.data_shape = self._get_data_shape(data)
data = self.fit_feature_range(data)
if ((not self.with_mean) and (not self.with_std)):
self.mean = [0 for _ in range(self.data_shape)]
self.std = [1 for _ in range(self.data_shape)]
else:
self.summary_obj = MultivariateStatisticalSummary(data, (- 1))
if self.with_mean:
self.mean = self.summary_obj.get_mean()
self.mean = [self.mean[key] for key in self.header]
else:
self.mean = [0 for _ in range(self.data_shape)]
if self.with_std:
self.std = self.summary_obj.get_std_variance()
self.std = [self.std[key] for key in self.header]
for (i, value) in enumerate(self.std):
if (np.abs((value - 0)) < 1e-06):
self.std[i] = 1
else:
self.std = [1 for _ in range(self.data_shape)]
f = functools.partial(self.__scale, mean=self.mean, std=self.std, process_cols_list=self.scale_column_idx)
fit_data = data.mapValues(f)
return fit_data
| -1,999,165,563,163,743,500
|
Apply standard scale for input data
Parameters
----------
data: data_instance, input data
Returns
----------
data:data_instance, data after scale
mean: list, each column mean value
std: list, each column standard deviation
|
federatedml/feature/feature_scale/standard_scale.py
|
fit
|
0xqq/FATE
|
python
|
def fit(self, data):
'\n Apply standard scale for input data\n Parameters\n ----------\n data: data_instance, input data\n\n Returns\n ----------\n data:data_instance, data after scale\n mean: list, each column mean value\n std: list, each column standard deviation\n '
(self.column_min_value, self.column_max_value) = self._get_min_max_value(data)
self.scale_column_idx = self._get_scale_column_idx(data)
self.header = self._get_header(data)
self.data_shape = self._get_data_shape(data)
data = self.fit_feature_range(data)
if ((not self.with_mean) and (not self.with_std)):
self.mean = [0 for _ in range(self.data_shape)]
self.std = [1 for _ in range(self.data_shape)]
else:
self.summary_obj = MultivariateStatisticalSummary(data, (- 1))
if self.with_mean:
self.mean = self.summary_obj.get_mean()
self.mean = [self.mean[key] for key in self.header]
else:
self.mean = [0 for _ in range(self.data_shape)]
if self.with_std:
self.std = self.summary_obj.get_std_variance()
self.std = [self.std[key] for key in self.header]
for (i, value) in enumerate(self.std):
if (np.abs((value - 0)) < 1e-06):
self.std[i] = 1
else:
self.std = [1 for _ in range(self.data_shape)]
f = functools.partial(self.__scale, mean=self.mean, std=self.std, process_cols_list=self.scale_column_idx)
fit_data = data.mapValues(f)
return fit_data
|
def transform(self, data):
'\n Transform input data using standard scale with fit results\n Parameters\n ----------\n data: data_instance, input data\n\n Returns\n ----------\n transform_data:data_instance, data after transform\n '
f = functools.partial(self.__scale_with_column_range, column_upper=self.column_max_value, column_lower=self.column_min_value, mean=self.mean, std=self.std, process_cols_list=self.scale_column_idx)
transform_data = data.mapValues(f)
return transform_data
| 871,618,339,265,289,500
|
Transform input data using standard scale with fit results
Parameters
----------
data: data_instance, input data
Returns
----------
transform_data:data_instance, data after transform
|
federatedml/feature/feature_scale/standard_scale.py
|
transform
|
0xqq/FATE
|
python
|
def transform(self, data):
'\n Transform input data using standard scale with fit results\n Parameters\n ----------\n data: data_instance, input data\n\n Returns\n ----------\n transform_data:data_instance, data after transform\n '
f = functools.partial(self.__scale_with_column_range, column_upper=self.column_max_value, column_lower=self.column_min_value, mean=self.mean, std=self.std, process_cols_list=self.scale_column_idx)
transform_data = data.mapValues(f)
return transform_data
|
def build_prep(model_path='.', server_config=None, server_port=9090):
'Prepares the model to be Dockerised by generating a dockerimage'
model_path = osp.abspath(model_path)
(model_tag, model_version) = get_model_tag_and_version(model_path)
if (server_config is None):
server_config = 'false'
kwargs = {'catwalk_version': catwalk_version, 'model_tag': model_tag, 'model_version': model_version, 'server_config': server_config, 'server_port': server_port}
files_to_create = ['Dockerfile', '.dockerignore']
env = Environment(loader=PackageLoader('catwalk', 'templates'))
for f in files_to_create:
template_file = (f + '.j2')
if (template_file[0] == '.'):
template_file = template_file[1:]
template = env.get_template(template_file)
rendered = template.render(**kwargs)
out_path = osp.join(model_path, f)
with open(out_path, 'w') as fp:
fp.write(rendered)
logger.info(('Wrote ' + f))
| -3,235,559,846,212,837,400
|
Prepares the model to be Dockerised by generating a dockerimage
|
catwalk/cicd/build_steps.py
|
build_prep
|
LeapBeyond/catwalk
|
python
|
def build_prep(model_path='.', server_config=None, server_port=9090):
model_path = osp.abspath(model_path)
(model_tag, model_version) = get_model_tag_and_version(model_path)
if (server_config is None):
server_config = 'false'
kwargs = {'catwalk_version': catwalk_version, 'model_tag': model_tag, 'model_version': model_version, 'server_config': server_config, 'server_port': server_port}
files_to_create = ['Dockerfile', '.dockerignore']
env = Environment(loader=PackageLoader('catwalk', 'templates'))
for f in files_to_create:
template_file = (f + '.j2')
if (template_file[0] == '.'):
template_file = template_file[1:]
template = env.get_template(template_file)
rendered = template.render(**kwargs)
out_path = osp.join(model_path, f)
with open(out_path, 'w') as fp:
fp.write(rendered)
logger.info(('Wrote ' + f))
|
def build(model_path='.', docker_registry=None, push=True, no_cache=False):
'Builds the model into a Dockerised model server image.'
model_path = osp.abspath(model_path)
(model_tag, model_version) = get_model_tag_and_version(model_path)
model_path = osp.abspath(model_path)
image_name_parts = [model_tag]
if (docker_registry is not None):
image_name_parts.insert(0, docker_registry)
image_name = '/'.join(image_name_parts)
docker_tag = ((image_name + ':') + model_version)
cmd = ['docker', 'build', model_path]
cmd += ['-t', docker_tag]
if no_cache:
cmd += ['--no-cache']
logger.info(' '.join(cmd))
result = subprocess.run(cmd, check=True)
if (result.returncode != 0):
return result.returncode
logger.info(('Successfully built ' + docker_tag))
if (not push):
return 0
cmd = ['docker', 'push', docker_tag]
logger.info(' '.join(cmd))
result = subprocess.run(cmd, check=True)
return result.returncode
| -7,023,126,925,768,950,000
|
Builds the model into a Dockerised model server image.
|
catwalk/cicd/build_steps.py
|
build
|
LeapBeyond/catwalk
|
python
|
def build(model_path='.', docker_registry=None, push=True, no_cache=False):
model_path = osp.abspath(model_path)
(model_tag, model_version) = get_model_tag_and_version(model_path)
model_path = osp.abspath(model_path)
image_name_parts = [model_tag]
if (docker_registry is not None):
image_name_parts.insert(0, docker_registry)
image_name = '/'.join(image_name_parts)
docker_tag = ((image_name + ':') + model_version)
cmd = ['docker', 'build', model_path]
cmd += ['-t', docker_tag]
if no_cache:
cmd += ['--no-cache']
logger.info(' '.join(cmd))
result = subprocess.run(cmd, check=True)
if (result.returncode != 0):
return result.returncode
logger.info(('Successfully built ' + docker_tag))
if (not push):
return 0
cmd = ['docker', 'push', docker_tag]
logger.info(' '.join(cmd))
result = subprocess.run(cmd, check=True)
return result.returncode
|
def from_dict(data, require=None):
'Validates a dictionary containing Google service account data.\n\n Creates and returns a :class:`google.auth.crypt.Signer` instance from the\n private key specified in the data.\n\n Args:\n data (Mapping[str, str]): The service account data\n require (Sequence[str]): List of keys required to be present in the\n info.\n\n Returns:\n google.auth.crypt.Signer: A signer created from the private key in the\n service account file.\n\n Raises:\n ValueError: if the data was in the wrong format, or if one of the\n required keys is missing.\n '
keys_needed = set((require if (require is not None) else []))
missing = keys_needed.difference(six.iterkeys(data))
if missing:
raise ValueError('Service account info was not in the expected format, missing fields {}.'.format(', '.join(missing)))
signer = crypt.RSASigner.from_service_account_info(data)
return signer
| 5,287,973,729,651,305,000
|
Validates a dictionary containing Google service account data.
Creates and returns a :class:`google.auth.crypt.Signer` instance from the
private key specified in the data.
Args:
data (Mapping[str, str]): The service account data
require (Sequence[str]): List of keys required to be present in the
info.
Returns:
google.auth.crypt.Signer: A signer created from the private key in the
service account file.
Raises:
ValueError: if the data was in the wrong format, or if one of the
required keys is missing.
|
google/auth/_service_account_info.py
|
from_dict
|
CodingFanSteve/google-auth-library-python
|
python
|
def from_dict(data, require=None):
'Validates a dictionary containing Google service account data.\n\n Creates and returns a :class:`google.auth.crypt.Signer` instance from the\n private key specified in the data.\n\n Args:\n data (Mapping[str, str]): The service account data\n require (Sequence[str]): List of keys required to be present in the\n info.\n\n Returns:\n google.auth.crypt.Signer: A signer created from the private key in the\n service account file.\n\n Raises:\n ValueError: if the data was in the wrong format, or if one of the\n required keys is missing.\n '
keys_needed = set((require if (require is not None) else []))
missing = keys_needed.difference(six.iterkeys(data))
if missing:
raise ValueError('Service account info was not in the expected format, missing fields {}.'.format(', '.join(missing)))
signer = crypt.RSASigner.from_service_account_info(data)
return signer
|
def from_filename(filename, require=None):
'Reads a Google service account JSON file and returns its parsed info.\n\n Args:\n filename (str): The path to the service account .json file.\n require (Sequence[str]): List of keys required to be present in the\n info.\n\n Returns:\n Tuple[ Mapping[str, str], google.auth.crypt.Signer ]: The verified\n info and a signer instance.\n '
with io.open(filename, 'r', encoding='utf-8') as json_file:
data = json.load(json_file)
return (data, from_dict(data, require=require))
| 7,935,664,125,326,170,000
|
Reads a Google service account JSON file and returns its parsed info.
Args:
filename (str): The path to the service account .json file.
require (Sequence[str]): List of keys required to be present in the
info.
Returns:
Tuple[ Mapping[str, str], google.auth.crypt.Signer ]: The verified
info and a signer instance.
|
google/auth/_service_account_info.py
|
from_filename
|
CodingFanSteve/google-auth-library-python
|
python
|
def from_filename(filename, require=None):
'Reads a Google service account JSON file and returns its parsed info.\n\n Args:\n filename (str): The path to the service account .json file.\n require (Sequence[str]): List of keys required to be present in the\n info.\n\n Returns:\n Tuple[ Mapping[str, str], google.auth.crypt.Signer ]: The verified\n info and a signer instance.\n '
with io.open(filename, 'r', encoding='utf-8') as json_file:
data = json.load(json_file)
return (data, from_dict(data, require=require))
|
def __init__(self, end_time_usecs=None, location=None, policy_id=None, protection_job_id=None, protection_job_name=None, retention_period=None, start_time_usecs=None, storage_domain=None, total_snapshots=None):
'Constructor for the ProtectionInfo class'
self.end_time_usecs = end_time_usecs
self.location = location
self.policy_id = policy_id
self.protection_job_id = protection_job_id
self.protection_job_name = protection_job_name
self.retention_period = retention_period
self.start_time_usecs = start_time_usecs
self.storage_domain = storage_domain
self.total_snapshots = total_snapshots
| -5,818,597,911,769,055,000
|
Constructor for the ProtectionInfo class
|
cohesity_management_sdk/models/protection_info.py
|
__init__
|
cohesity/management-sdk-python
|
python
|
def __init__(self, end_time_usecs=None, location=None, policy_id=None, protection_job_id=None, protection_job_name=None, retention_period=None, start_time_usecs=None, storage_domain=None, total_snapshots=None):
self.end_time_usecs = end_time_usecs
self.location = location
self.policy_id = policy_id
self.protection_job_id = protection_job_id
self.protection_job_name = protection_job_name
self.retention_period = retention_period
self.start_time_usecs = start_time_usecs
self.storage_domain = storage_domain
self.total_snapshots = total_snapshots
|
@classmethod
def from_dictionary(cls, dictionary):
"Creates an instance of this model from a dictionary\n\n Args:\n dictionary (dictionary): A dictionary representation of the object as\n obtained from the deserialization of the server's response. The keys\n MUST match property names in the API description.\n\n Returns:\n object: An instance of this structure class.\n\n "
if (dictionary is None):
return None
end_time_usecs = dictionary.get('endTimeUsecs')
location = dictionary.get('location')
policy_id = dictionary.get('policyId')
protection_job_id = dictionary.get('protectionJobId')
protection_job_name = dictionary.get('protectionJobName')
retention_period = dictionary.get('retentionPeriod')
start_time_usecs = dictionary.get('startTimeUsecs')
storage_domain = dictionary.get('storageDomain')
total_snapshots = dictionary.get('totalSnapshots')
return cls(end_time_usecs, location, policy_id, protection_job_id, protection_job_name, retention_period, start_time_usecs, storage_domain, total_snapshots)
| 8,686,142,287,780,077,000
|
Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
|
cohesity_management_sdk/models/protection_info.py
|
from_dictionary
|
cohesity/management-sdk-python
|
python
|
@classmethod
def from_dictionary(cls, dictionary):
"Creates an instance of this model from a dictionary\n\n Args:\n dictionary (dictionary): A dictionary representation of the object as\n obtained from the deserialization of the server's response. The keys\n MUST match property names in the API description.\n\n Returns:\n object: An instance of this structure class.\n\n "
if (dictionary is None):
return None
end_time_usecs = dictionary.get('endTimeUsecs')
location = dictionary.get('location')
policy_id = dictionary.get('policyId')
protection_job_id = dictionary.get('protectionJobId')
protection_job_name = dictionary.get('protectionJobName')
retention_period = dictionary.get('retentionPeriod')
start_time_usecs = dictionary.get('startTimeUsecs')
storage_domain = dictionary.get('storageDomain')
total_snapshots = dictionary.get('totalSnapshots')
return cls(end_time_usecs, location, policy_id, protection_job_id, protection_job_name, retention_period, start_time_usecs, storage_domain, total_snapshots)
|
def make_datetime(time_string, formats=None):
'Makes datetime from string based on one of the formats.\n\n :param string time_string: time in string\n :param list formats: list of accepted formats\n :return datetime.datetime: datetime or None if no format is matched\n '
if (formats is None):
formats = ['%Y-%m-%d %H:%M:%S', '%Y-%m-%d']
for fmt in formats:
try:
return datetime.datetime.strptime(time_string, fmt)
except ValueError:
pass
return None
| 912,480,247,123,594,900
|
Makes datetime from string based on one of the formats.
:param string time_string: time in string
:param list formats: list of accepted formats
:return datetime.datetime: datetime or None if no format is matched
|
archdiffer/flask_frontend/request_parser.py
|
make_datetime
|
Kratochvilova/archdiffer
|
python
|
def make_datetime(time_string, formats=None):
'Makes datetime from string based on one of the formats.\n\n :param string time_string: time in string\n :param list formats: list of accepted formats\n :return datetime.datetime: datetime or None if no format is matched\n '
if (formats is None):
formats = ['%Y-%m-%d %H:%M:%S', '%Y-%m-%d']
for fmt in formats:
try:
return datetime.datetime.strptime(time_string, fmt)
except ValueError:
pass
return None
|
def before(column, name='before'):
'Make filter template for filtering column values less or equal to\n datetime.\n\n :param column: database model\n :param string name: name used in the filter template\n :return dict: resulting template\n '
return {name: (column, operator.le, make_datetime)}
| 3,500,485,523,387,236,000
|
Make filter template for filtering column values less or equal to
datetime.
:param column: database model
:param string name: name used in the filter template
:return dict: resulting template
|
archdiffer/flask_frontend/request_parser.py
|
before
|
Kratochvilova/archdiffer
|
python
|
def before(column, name='before'):
'Make filter template for filtering column values less or equal to\n datetime.\n\n :param column: database model\n :param string name: name used in the filter template\n :return dict: resulting template\n '
return {name: (column, operator.le, make_datetime)}
|
def after(column, name='after'):
'Make filter template for filtering column values greater or equal to\n datetime.\n\n :param column: database model\n :param string name: name used in the filter template\n :return dict: resulting template\n '
return {name: (column, operator.ge, make_datetime)}
| 484,768,844,085,137,860
|
Make filter template for filtering column values greater or equal to
datetime.
:param column: database model
:param string name: name used in the filter template
:return dict: resulting template
|
archdiffer/flask_frontend/request_parser.py
|
after
|
Kratochvilova/archdiffer
|
python
|
def after(column, name='after'):
'Make filter template for filtering column values greater or equal to\n datetime.\n\n :param column: database model\n :param string name: name used in the filter template\n :return dict: resulting template\n '
return {name: (column, operator.ge, make_datetime)}
|
def time(column, name='time'):
'Make filter template for filtering column values equal to datetime.\n\n :param column: database model\n :param string name: name used in the filter template\n :return dict: resulting template\n '
return {name: (column, operator.eq, make_datetime)}
| -7,892,127,272,377,402,000
|
Make filter template for filtering column values equal to datetime.
:param column: database model
:param string name: name used in the filter template
:return dict: resulting template
|
archdiffer/flask_frontend/request_parser.py
|
time
|
Kratochvilova/archdiffer
|
python
|
def time(column, name='time'):
'Make filter template for filtering column values equal to datetime.\n\n :param column: database model\n :param string name: name used in the filter template\n :return dict: resulting template\n '
return {name: (column, operator.eq, make_datetime)}
|
def equals(column, name='id', function=(lambda x: x)):
'Make filter template for filtering column values equal to value\n transformed by given function.\n\n :param column: database model\n :param string name: name used in the filter template\n :param callable function: function for transforming the value\n :return dict: resulting template\n '
return {name: (column, operator.eq, function)}
| 3,664,609,797,446,414,000
|
Make filter template for filtering column values equal to value
transformed by given function.
:param column: database model
:param string name: name used in the filter template
:param callable function: function for transforming the value
:return dict: resulting template
|
archdiffer/flask_frontend/request_parser.py
|
equals
|
Kratochvilova/archdiffer
|
python
|
def equals(column, name='id', function=(lambda x: x)):
'Make filter template for filtering column values equal to value\n transformed by given function.\n\n :param column: database model\n :param string name: name used in the filter template\n :param callable function: function for transforming the value\n :return dict: resulting template\n '
return {name: (column, operator.eq, function)}
|
def parse_request(filters=None, defaults=None):
'Parse arguments in request according to the _TRANSFORMATIONS or given\n filters.\n Requests containing other keys are considered invalid.\n\n :param dict filters: dict of filter templates containing for each key\n (column, operator, function transforming value from request argument)\n :param dict defaults: default values of modifiers\n :return dict: dict of parsed arguments\n :raises werkzeug.exceptions.BadRequest: if one of the request arguments is\n not recognized\n '
if (filters is None):
filters = {}
if (defaults is not None):
args_dict = defaults.copy()
else:
args_dict = {}
filters_list = []
for (key, value) in request.args.items():
if (key in _TRANSFORMATIONS):
try:
args_dict[key] = _TRANSFORMATIONS[key](value)
except ValueError:
raise BadRequest(('Argument has invalid value "%s".' % value))
elif (key in filters.keys()):
filters_list.append(filters[key][1](filters[key][0], filters[key][2](value)))
else:
raise BadRequest(('Argument "%s" not recognized.' % key))
if ('filter' not in args_dict.keys()):
args_dict['filter'] = []
args_dict['filter'] += filters_list
return args_dict
| 5,116,512,422,071,438,000
|
Parse arguments in request according to the _TRANSFORMATIONS or given
filters.
Requests containing other keys are considered invalid.
:param dict filters: dict of filter templates containing for each key
(column, operator, function transforming value from request argument)
:param dict defaults: default values of modifiers
:return dict: dict of parsed arguments
:raises werkzeug.exceptions.BadRequest: if one of the request arguments is
not recognized
|
archdiffer/flask_frontend/request_parser.py
|
parse_request
|
Kratochvilova/archdiffer
|
python
|
def parse_request(filters=None, defaults=None):
'Parse arguments in request according to the _TRANSFORMATIONS or given\n filters.\n Requests containing other keys are considered invalid.\n\n :param dict filters: dict of filter templates containing for each key\n (column, operator, function transforming value from request argument)\n :param dict defaults: default values of modifiers\n :return dict: dict of parsed arguments\n :raises werkzeug.exceptions.BadRequest: if one of the request arguments is\n not recognized\n '
if (filters is None):
filters = {}
if (defaults is not None):
args_dict = defaults.copy()
else:
args_dict = {}
filters_list = []
for (key, value) in request.args.items():
if (key in _TRANSFORMATIONS):
try:
args_dict[key] = _TRANSFORMATIONS[key](value)
except ValueError:
raise BadRequest(('Argument has invalid value "%s".' % value))
elif (key in filters.keys()):
filters_list.append(filters[key][1](filters[key][0], filters[key][2](value)))
else:
raise BadRequest(('Argument "%s" not recognized.' % key))
if ('filter' not in args_dict.keys()):
args_dict['filter'] = []
args_dict['filter'] += filters_list
return args_dict
|
def get_request_arguments(*names, args_dict=None, invert=False):
'Get arguments from args_dict or request if they match given names.\n\n :param *names: names of arguments\n :param dict args_dict: dict of arguments\n :param bool invert: True if names should be exclueded instead\n :return dict: dict of arguments\n '
if (args_dict is None):
args_dict = parse_request()
if invert:
return {k: v for (k, v) in args_dict.items() if (k not in names)}
return {k: v for (k, v) in args_dict.items() if (k in names)}
| -4,561,038,898,568,742,400
|
Get arguments from args_dict or request if they match given names.
:param *names: names of arguments
:param dict args_dict: dict of arguments
:param bool invert: True if names should be exclueded instead
:return dict: dict of arguments
|
archdiffer/flask_frontend/request_parser.py
|
get_request_arguments
|
Kratochvilova/archdiffer
|
python
|
def get_request_arguments(*names, args_dict=None, invert=False):
'Get arguments from args_dict or request if they match given names.\n\n :param *names: names of arguments\n :param dict args_dict: dict of arguments\n :param bool invert: True if names should be exclueded instead\n :return dict: dict of arguments\n '
if (args_dict is None):
args_dict = parse_request()
if invert:
return {k: v for (k, v) in args_dict.items() if (k not in names)}
return {k: v for (k, v) in args_dict.items() if (k in names)}
|
def update_modifiers(old_modifiers, new_modifiers):
'Update modifiers.\n\n :param dict old_modifiers: old modifiers\n :param dict old_modifiers: new modifiers\n :return dict: resulting modifiers\n '
modifiers = old_modifiers.copy()
for (key, value) in new_modifiers.items():
if (key in old_modifiers):
if (_TRANSFORMATIONS.get(key) == _list_transform):
modifiers[key] += value
elif (_TRANSFORMATIONS.get(key) == _dict_transform):
modifiers[key].update(value)
else:
modifiers[key] = value
else:
modifiers[key] = value
return modifiers
| -3,875,344,573,319,031,000
|
Update modifiers.
:param dict old_modifiers: old modifiers
:param dict old_modifiers: new modifiers
:return dict: resulting modifiers
|
archdiffer/flask_frontend/request_parser.py
|
update_modifiers
|
Kratochvilova/archdiffer
|
python
|
def update_modifiers(old_modifiers, new_modifiers):
'Update modifiers.\n\n :param dict old_modifiers: old modifiers\n :param dict old_modifiers: new modifiers\n :return dict: resulting modifiers\n '
modifiers = old_modifiers.copy()
for (key, value) in new_modifiers.items():
if (key in old_modifiers):
if (_TRANSFORMATIONS.get(key) == _list_transform):
modifiers[key] += value
elif (_TRANSFORMATIONS.get(key) == _dict_transform):
modifiers[key].update(value)
else:
modifiers[key] = value
else:
modifiers[key] = value
return modifiers
|
def generate_models(x_shape, number_of_classes, number_of_models=5, metrics=['accuracy'], model_type=None, cnn_min_layers=5, cnn_max_layers=10, cnn_min_filters=25, cnn_max_filters=100, cnn_min_fc_nodes=500, cnn_max_fc_nodes=1000, deepconvlstm_min_conv_layers=3, deepconvlstm_max_conv_layers=7, deepconvlstm_min_conv_filters=25, deepconvlstm_max_conv_filters=100, deepconvlstm_min_lstm_layers=1, deepconvlstm_max_lstm_layers=3, deepconvlstm_min_lstm_dims=100, deepconvlstm_max_lstm_dims=500, low_lr=1, high_lr=4, low_reg=1, high_reg=3):
"\n Generate one or multiple untrained Keras models with random hyperparameters.\n\n Parameters\n ----------\n x_shape : tuple\n Shape of the input dataset: (num_samples, num_timesteps, num_channels)\n number_of_classes : int\n Number of classes for classification task\n number_of_models : int\n Number of models to generate\n metrics : list\n Metrics to calculate on the validation set.\n See https://keras.io/metrics/ for possible values.\n model_type : str, optional\n Type of model to build: 'CNN' or 'DeepConvLSTM'.\n Default option None generates both models.\n cnn_min_layers : int\n minimum of Conv layers in CNN model\n cnn_max_layers : int\n maximum of Conv layers in CNN model\n cnn_min_filters : int\n minimum number of filters per Conv layer in CNN model\n cnn_max_filters : int\n maximum number of filters per Conv layer in CNN model\n cnn_min_fc_nodes : int\n minimum number of hidden nodes per Dense layer in CNN model\n cnn_max_fc_nodes : int\n maximum number of hidden nodes per Dense layer in CNN model\n deepconvlstm_min_conv_layers : int\n minimum number of Conv layers in DeepConvLSTM model\n deepconvlstm_max_conv_layers : int\n maximum number of Conv layers in DeepConvLSTM model\n deepconvlstm_min_conv_filters : int\n minimum number of filters per Conv layer in DeepConvLSTM model\n deepconvlstm_max_conv_filters : int\n maximum number of filters per Conv layer in DeepConvLSTM model\n deepconvlstm_min_lstm_layers : int\n minimum number of Conv layers in DeepConvLSTM model\n deepconvlstm_max_lstm_layers : int\n maximum number of Conv layers in DeepConvLSTM model\n deepconvlstm_min_lstm_dims : int\n minimum number of hidden nodes per LSTM layer in DeepConvLSTM model\n deepconvlstm_max_lstm_dims : int\n maximum number of hidden nodes per LSTM layer in DeepConvLSTM model\n low_lr : float\n minimum of log range for learning rate: learning rate is sampled\n between `10**(-low_reg)` and `10**(-high_reg)`\n high_lr : float\n maximum of log range for learning rate: learning rate is sampled\n between `10**(-low_reg)` and `10**(-high_reg)`\n low_reg : float\n minimum of log range for regularization rate: regularization rate is\n sampled between `10**(-low_reg)` and `10**(-high_reg)`\n high_reg : float\n maximum of log range for regularization rate: regularization rate is\n sampled between `10**(-low_reg)` and `10**(-high_reg)`\n\n Returns\n -------\n models : list\n List of compiled models\n "
models = []
for _ in range(0, number_of_models):
if (model_type is None):
current_model_type = ('CNN' if (np.random.random() < 0.5) else 'DeepConvLSTM')
else:
current_model_type = model_type
generate_model = None
if (current_model_type == 'CNN'):
generate_model = generate_CNN_model
hyperparameters = generate_CNN_hyperparameter_set(min_layers=cnn_min_layers, max_layers=cnn_max_layers, min_filters=cnn_min_filters, max_filters=cnn_max_filters, min_fc_nodes=cnn_min_fc_nodes, max_fc_nodes=cnn_max_fc_nodes, low_lr=low_lr, high_lr=high_lr, low_reg=low_reg, high_reg=high_reg)
if (current_model_type == 'DeepConvLSTM'):
generate_model = generate_DeepConvLSTM_model
hyperparameters = generate_DeepConvLSTM_hyperparameter_set(min_conv_layers=deepconvlstm_min_conv_layers, max_conv_layers=deepconvlstm_max_conv_layers, min_conv_filters=deepconvlstm_min_conv_filters, max_conv_filters=deepconvlstm_max_conv_filters, min_lstm_layers=deepconvlstm_min_lstm_layers, max_lstm_layers=deepconvlstm_max_lstm_layers, min_lstm_dims=deepconvlstm_min_lstm_dims, max_lstm_dims=deepconvlstm_max_lstm_dims, low_lr=low_lr, high_lr=high_lr, low_reg=low_reg, high_reg=high_reg)
models.append((generate_model(x_shape, number_of_classes, metrics=metrics, **hyperparameters), hyperparameters, current_model_type))
return models
| -7,336,653,422,567,980,000
|
Generate one or multiple untrained Keras models with random hyperparameters.
Parameters
----------
x_shape : tuple
Shape of the input dataset: (num_samples, num_timesteps, num_channels)
number_of_classes : int
Number of classes for classification task
number_of_models : int
Number of models to generate
metrics : list
Metrics to calculate on the validation set.
See https://keras.io/metrics/ for possible values.
model_type : str, optional
Type of model to build: 'CNN' or 'DeepConvLSTM'.
Default option None generates both models.
cnn_min_layers : int
minimum of Conv layers in CNN model
cnn_max_layers : int
maximum of Conv layers in CNN model
cnn_min_filters : int
minimum number of filters per Conv layer in CNN model
cnn_max_filters : int
maximum number of filters per Conv layer in CNN model
cnn_min_fc_nodes : int
minimum number of hidden nodes per Dense layer in CNN model
cnn_max_fc_nodes : int
maximum number of hidden nodes per Dense layer in CNN model
deepconvlstm_min_conv_layers : int
minimum number of Conv layers in DeepConvLSTM model
deepconvlstm_max_conv_layers : int
maximum number of Conv layers in DeepConvLSTM model
deepconvlstm_min_conv_filters : int
minimum number of filters per Conv layer in DeepConvLSTM model
deepconvlstm_max_conv_filters : int
maximum number of filters per Conv layer in DeepConvLSTM model
deepconvlstm_min_lstm_layers : int
minimum number of Conv layers in DeepConvLSTM model
deepconvlstm_max_lstm_layers : int
maximum number of Conv layers in DeepConvLSTM model
deepconvlstm_min_lstm_dims : int
minimum number of hidden nodes per LSTM layer in DeepConvLSTM model
deepconvlstm_max_lstm_dims : int
maximum number of hidden nodes per LSTM layer in DeepConvLSTM model
low_lr : float
minimum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
high_lr : float
maximum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
low_reg : float
minimum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
high_reg : float
maximum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
Returns
-------
models : list
List of compiled models
|
mcfly/modelgen.py
|
generate_models
|
wadpac/mcfly
|
python
|
def generate_models(x_shape, number_of_classes, number_of_models=5, metrics=['accuracy'], model_type=None, cnn_min_layers=5, cnn_max_layers=10, cnn_min_filters=25, cnn_max_filters=100, cnn_min_fc_nodes=500, cnn_max_fc_nodes=1000, deepconvlstm_min_conv_layers=3, deepconvlstm_max_conv_layers=7, deepconvlstm_min_conv_filters=25, deepconvlstm_max_conv_filters=100, deepconvlstm_min_lstm_layers=1, deepconvlstm_max_lstm_layers=3, deepconvlstm_min_lstm_dims=100, deepconvlstm_max_lstm_dims=500, low_lr=1, high_lr=4, low_reg=1, high_reg=3):
"\n Generate one or multiple untrained Keras models with random hyperparameters.\n\n Parameters\n ----------\n x_shape : tuple\n Shape of the input dataset: (num_samples, num_timesteps, num_channels)\n number_of_classes : int\n Number of classes for classification task\n number_of_models : int\n Number of models to generate\n metrics : list\n Metrics to calculate on the validation set.\n See https://keras.io/metrics/ for possible values.\n model_type : str, optional\n Type of model to build: 'CNN' or 'DeepConvLSTM'.\n Default option None generates both models.\n cnn_min_layers : int\n minimum of Conv layers in CNN model\n cnn_max_layers : int\n maximum of Conv layers in CNN model\n cnn_min_filters : int\n minimum number of filters per Conv layer in CNN model\n cnn_max_filters : int\n maximum number of filters per Conv layer in CNN model\n cnn_min_fc_nodes : int\n minimum number of hidden nodes per Dense layer in CNN model\n cnn_max_fc_nodes : int\n maximum number of hidden nodes per Dense layer in CNN model\n deepconvlstm_min_conv_layers : int\n minimum number of Conv layers in DeepConvLSTM model\n deepconvlstm_max_conv_layers : int\n maximum number of Conv layers in DeepConvLSTM model\n deepconvlstm_min_conv_filters : int\n minimum number of filters per Conv layer in DeepConvLSTM model\n deepconvlstm_max_conv_filters : int\n maximum number of filters per Conv layer in DeepConvLSTM model\n deepconvlstm_min_lstm_layers : int\n minimum number of Conv layers in DeepConvLSTM model\n deepconvlstm_max_lstm_layers : int\n maximum number of Conv layers in DeepConvLSTM model\n deepconvlstm_min_lstm_dims : int\n minimum number of hidden nodes per LSTM layer in DeepConvLSTM model\n deepconvlstm_max_lstm_dims : int\n maximum number of hidden nodes per LSTM layer in DeepConvLSTM model\n low_lr : float\n minimum of log range for learning rate: learning rate is sampled\n between `10**(-low_reg)` and `10**(-high_reg)`\n high_lr : float\n maximum of log range for learning rate: learning rate is sampled\n between `10**(-low_reg)` and `10**(-high_reg)`\n low_reg : float\n minimum of log range for regularization rate: regularization rate is\n sampled between `10**(-low_reg)` and `10**(-high_reg)`\n high_reg : float\n maximum of log range for regularization rate: regularization rate is\n sampled between `10**(-low_reg)` and `10**(-high_reg)`\n\n Returns\n -------\n models : list\n List of compiled models\n "
models = []
for _ in range(0, number_of_models):
if (model_type is None):
current_model_type = ('CNN' if (np.random.random() < 0.5) else 'DeepConvLSTM')
else:
current_model_type = model_type
generate_model = None
if (current_model_type == 'CNN'):
generate_model = generate_CNN_model
hyperparameters = generate_CNN_hyperparameter_set(min_layers=cnn_min_layers, max_layers=cnn_max_layers, min_filters=cnn_min_filters, max_filters=cnn_max_filters, min_fc_nodes=cnn_min_fc_nodes, max_fc_nodes=cnn_max_fc_nodes, low_lr=low_lr, high_lr=high_lr, low_reg=low_reg, high_reg=high_reg)
if (current_model_type == 'DeepConvLSTM'):
generate_model = generate_DeepConvLSTM_model
hyperparameters = generate_DeepConvLSTM_hyperparameter_set(min_conv_layers=deepconvlstm_min_conv_layers, max_conv_layers=deepconvlstm_max_conv_layers, min_conv_filters=deepconvlstm_min_conv_filters, max_conv_filters=deepconvlstm_max_conv_filters, min_lstm_layers=deepconvlstm_min_lstm_layers, max_lstm_layers=deepconvlstm_max_lstm_layers, min_lstm_dims=deepconvlstm_min_lstm_dims, max_lstm_dims=deepconvlstm_max_lstm_dims, low_lr=low_lr, high_lr=high_lr, low_reg=low_reg, high_reg=high_reg)
models.append((generate_model(x_shape, number_of_classes, metrics=metrics, **hyperparameters), hyperparameters, current_model_type))
return models
|
def generate_DeepConvLSTM_model(x_shape, class_number, filters, lstm_dims, learning_rate=0.01, regularization_rate=0.01, metrics=['accuracy']):
'\n Generate a model with convolution and LSTM layers.\n See Ordonez et al., 2016, http://dx.doi.org/10.3390/s16010115\n\n Parameters\n ----------\n x_shape : tuple\n Shape of the input dataset: (num_samples, num_timesteps, num_channels)\n class_number : int\n Number of classes for classification task\n filters : list of ints\n number of filters for each convolutional layer\n lstm_dims : list of ints\n number of hidden nodes for each LSTM layer\n learning_rate : float\n learning rate\n regularization_rate : float\n regularization rate\n metrics : list\n Metrics to calculate on the validation set.\n See https://keras.io/metrics/ for possible values.\n\n Returns\n -------\n model : Keras model\n The compiled Keras model\n '
dim_length = x_shape[1]
dim_channels = x_shape[2]
output_dim = class_number
weightinit = 'lecun_uniform'
model = Sequential()
model.add(BatchNormalization(input_shape=(dim_length, dim_channels)))
model.add(Reshape(target_shape=(dim_length, dim_channels, 1)))
for filt in filters:
model.add(Convolution2D(filt, kernel_size=(3, 1), padding='same', kernel_regularizer=l2(regularization_rate), kernel_initializer=weightinit))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Reshape(target_shape=(dim_length, (filters[(- 1)] * dim_channels))))
for lstm_dim in lstm_dims:
model.add(CuDNNLSTM(units=lstm_dim, return_sequences=True))
model.add(Dropout(0.5))
model.add(GlobalAveragePooling1D())
model.add(Dense(units=output_dim, kernel_initializer=weightinit))
model.add(BatchNormalization())
model.add(Activation('softmax'))
loss = 'categorical_crossentropy'
model.compile(loss=loss, optimizer=Adam(lr=learning_rate), metrics=metrics)
return model
| 3,397,434,799,327,207,400
|
Generate a model with convolution and LSTM layers.
See Ordonez et al., 2016, http://dx.doi.org/10.3390/s16010115
Parameters
----------
x_shape : tuple
Shape of the input dataset: (num_samples, num_timesteps, num_channels)
class_number : int
Number of classes for classification task
filters : list of ints
number of filters for each convolutional layer
lstm_dims : list of ints
number of hidden nodes for each LSTM layer
learning_rate : float
learning rate
regularization_rate : float
regularization rate
metrics : list
Metrics to calculate on the validation set.
See https://keras.io/metrics/ for possible values.
Returns
-------
model : Keras model
The compiled Keras model
|
mcfly/modelgen.py
|
generate_DeepConvLSTM_model
|
wadpac/mcfly
|
python
|
def generate_DeepConvLSTM_model(x_shape, class_number, filters, lstm_dims, learning_rate=0.01, regularization_rate=0.01, metrics=['accuracy']):
'\n Generate a model with convolution and LSTM layers.\n See Ordonez et al., 2016, http://dx.doi.org/10.3390/s16010115\n\n Parameters\n ----------\n x_shape : tuple\n Shape of the input dataset: (num_samples, num_timesteps, num_channels)\n class_number : int\n Number of classes for classification task\n filters : list of ints\n number of filters for each convolutional layer\n lstm_dims : list of ints\n number of hidden nodes for each LSTM layer\n learning_rate : float\n learning rate\n regularization_rate : float\n regularization rate\n metrics : list\n Metrics to calculate on the validation set.\n See https://keras.io/metrics/ for possible values.\n\n Returns\n -------\n model : Keras model\n The compiled Keras model\n '
dim_length = x_shape[1]
dim_channels = x_shape[2]
output_dim = class_number
weightinit = 'lecun_uniform'
model = Sequential()
model.add(BatchNormalization(input_shape=(dim_length, dim_channels)))
model.add(Reshape(target_shape=(dim_length, dim_channels, 1)))
for filt in filters:
model.add(Convolution2D(filt, kernel_size=(3, 1), padding='same', kernel_regularizer=l2(regularization_rate), kernel_initializer=weightinit))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Reshape(target_shape=(dim_length, (filters[(- 1)] * dim_channels))))
for lstm_dim in lstm_dims:
model.add(CuDNNLSTM(units=lstm_dim, return_sequences=True))
model.add(Dropout(0.5))
model.add(GlobalAveragePooling1D())
model.add(Dense(units=output_dim, kernel_initializer=weightinit))
model.add(BatchNormalization())
model.add(Activation('softmax'))
loss = 'categorical_crossentropy'
model.compile(loss=loss, optimizer=Adam(lr=learning_rate), metrics=metrics)
return model
|
def generate_CNN_model(x_shape, class_number, filters, fc_hidden_nodes, learning_rate=0.01, regularization_rate=0.01, metrics=['accuracy']):
'\n Generate a convolutional neural network (CNN) model.\n\n The compiled Keras model is returned.\n\n Parameters\n ----------\n x_shape : tuple\n Shape of the input dataset: (num_samples, num_timesteps, num_channels)\n class_number : int\n Number of classes for classification task\n filters : list of ints\n number of filters for each convolutional layer\n fc_hidden_nodes : int\n number of hidden nodes for the hidden dense layer\n learning_rate : float\n learning rate\n regularization_rate : float\n regularization rate\n metrics : list\n Metrics to calculate on the validation set.\n See https://keras.io/metrics/ for possible values.\n\n Returns\n -------\n model : Keras model\n The compiled Keras model\n '
dim_length = x_shape[1]
dim_channels = x_shape[2]
outputdim = class_number
weightinit = 'lecun_uniform'
model = Sequential()
model.add(BatchNormalization(input_shape=(dim_length, dim_channels)))
for filter_number in filters:
model.add(Convolution1D(filter_number, kernel_size=3, padding='same', kernel_regularizer=l2(regularization_rate), kernel_initializer=weightinit))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Flatten())
model.add(Dense(units=fc_hidden_nodes, kernel_regularizer=l2(regularization_rate), kernel_initializer=weightinit))
model.add(Activation('relu'))
model.add(Dense(units=outputdim, kernel_initializer=weightinit))
model.add(BatchNormalization())
model.add(Activation('softmax'))
loss = 'categorical_crossentropy'
model.compile(loss=loss, optimizer=Adam(lr=learning_rate), metrics=metrics)
return model
| -6,212,186,200,602,782,000
|
Generate a convolutional neural network (CNN) model.
The compiled Keras model is returned.
Parameters
----------
x_shape : tuple
Shape of the input dataset: (num_samples, num_timesteps, num_channels)
class_number : int
Number of classes for classification task
filters : list of ints
number of filters for each convolutional layer
fc_hidden_nodes : int
number of hidden nodes for the hidden dense layer
learning_rate : float
learning rate
regularization_rate : float
regularization rate
metrics : list
Metrics to calculate on the validation set.
See https://keras.io/metrics/ for possible values.
Returns
-------
model : Keras model
The compiled Keras model
|
mcfly/modelgen.py
|
generate_CNN_model
|
wadpac/mcfly
|
python
|
def generate_CNN_model(x_shape, class_number, filters, fc_hidden_nodes, learning_rate=0.01, regularization_rate=0.01, metrics=['accuracy']):
'\n Generate a convolutional neural network (CNN) model.\n\n The compiled Keras model is returned.\n\n Parameters\n ----------\n x_shape : tuple\n Shape of the input dataset: (num_samples, num_timesteps, num_channels)\n class_number : int\n Number of classes for classification task\n filters : list of ints\n number of filters for each convolutional layer\n fc_hidden_nodes : int\n number of hidden nodes for the hidden dense layer\n learning_rate : float\n learning rate\n regularization_rate : float\n regularization rate\n metrics : list\n Metrics to calculate on the validation set.\n See https://keras.io/metrics/ for possible values.\n\n Returns\n -------\n model : Keras model\n The compiled Keras model\n '
dim_length = x_shape[1]
dim_channels = x_shape[2]
outputdim = class_number
weightinit = 'lecun_uniform'
model = Sequential()
model.add(BatchNormalization(input_shape=(dim_length, dim_channels)))
for filter_number in filters:
model.add(Convolution1D(filter_number, kernel_size=3, padding='same', kernel_regularizer=l2(regularization_rate), kernel_initializer=weightinit))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Flatten())
model.add(Dense(units=fc_hidden_nodes, kernel_regularizer=l2(regularization_rate), kernel_initializer=weightinit))
model.add(Activation('relu'))
model.add(Dense(units=outputdim, kernel_initializer=weightinit))
model.add(BatchNormalization())
model.add(Activation('softmax'))
loss = 'categorical_crossentropy'
model.compile(loss=loss, optimizer=Adam(lr=learning_rate), metrics=metrics)
return model
|
def generate_CNN_hyperparameter_set(min_layers=1, max_layers=10, min_filters=10, max_filters=100, min_fc_nodes=10, max_fc_nodes=2000, low_lr=1, high_lr=4, low_reg=1, high_reg=4):
' Generate a hyperparameter set that define a CNN model.\n\n Parameters\n ----------\n min_layers : int\n minimum of Conv layers\n max_layers : int\n maximum of Conv layers\n min_filters : int\n minimum number of filters per Conv layer\n max_filters : int\n maximum number of filters per Conv layer\n min_fc_nodes : int\n minimum number of hidden nodes per Dense layer\n max_fc_nodes : int\n maximum number of hidden nodes per Dense layer\n low_lr : float\n minimum of log range for learning rate: learning rate is sampled\n between `10**(-low_reg)` and `10**(-high_reg)`\n high_lr : float\n maximum of log range for learning rate: learning rate is sampled\n between `10**(-low_reg)` and `10**(-high_reg)`\n low_reg : float\n minimum of log range for regularization rate: regularization rate is\n sampled between `10**(-low_reg)` and `10**(-high_reg)`\n high_reg : float\n maximum of log range for regularization rate: regularization rate is\n sampled between `10**(-low_reg)` and `10**(-high_reg)`\n\n Returns\n ----------\n hyperparameters : dict\n parameters for a CNN model\n '
hyperparameters = generate_base_hyper_parameter_set(low_lr, high_lr, low_reg, high_reg)
number_of_layers = np.random.randint(min_layers, (max_layers + 1))
hyperparameters['filters'] = np.random.randint(min_filters, (max_filters + 1), number_of_layers)
hyperparameters['fc_hidden_nodes'] = np.random.randint(min_fc_nodes, (max_fc_nodes + 1))
return hyperparameters
| -6,703,070,403,318,698,000
|
Generate a hyperparameter set that define a CNN model.
Parameters
----------
min_layers : int
minimum of Conv layers
max_layers : int
maximum of Conv layers
min_filters : int
minimum number of filters per Conv layer
max_filters : int
maximum number of filters per Conv layer
min_fc_nodes : int
minimum number of hidden nodes per Dense layer
max_fc_nodes : int
maximum number of hidden nodes per Dense layer
low_lr : float
minimum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
high_lr : float
maximum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
low_reg : float
minimum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
high_reg : float
maximum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
Returns
----------
hyperparameters : dict
parameters for a CNN model
|
mcfly/modelgen.py
|
generate_CNN_hyperparameter_set
|
wadpac/mcfly
|
python
|
def generate_CNN_hyperparameter_set(min_layers=1, max_layers=10, min_filters=10, max_filters=100, min_fc_nodes=10, max_fc_nodes=2000, low_lr=1, high_lr=4, low_reg=1, high_reg=4):
' Generate a hyperparameter set that define a CNN model.\n\n Parameters\n ----------\n min_layers : int\n minimum of Conv layers\n max_layers : int\n maximum of Conv layers\n min_filters : int\n minimum number of filters per Conv layer\n max_filters : int\n maximum number of filters per Conv layer\n min_fc_nodes : int\n minimum number of hidden nodes per Dense layer\n max_fc_nodes : int\n maximum number of hidden nodes per Dense layer\n low_lr : float\n minimum of log range for learning rate: learning rate is sampled\n between `10**(-low_reg)` and `10**(-high_reg)`\n high_lr : float\n maximum of log range for learning rate: learning rate is sampled\n between `10**(-low_reg)` and `10**(-high_reg)`\n low_reg : float\n minimum of log range for regularization rate: regularization rate is\n sampled between `10**(-low_reg)` and `10**(-high_reg)`\n high_reg : float\n maximum of log range for regularization rate: regularization rate is\n sampled between `10**(-low_reg)` and `10**(-high_reg)`\n\n Returns\n ----------\n hyperparameters : dict\n parameters for a CNN model\n '
hyperparameters = generate_base_hyper_parameter_set(low_lr, high_lr, low_reg, high_reg)
number_of_layers = np.random.randint(min_layers, (max_layers + 1))
hyperparameters['filters'] = np.random.randint(min_filters, (max_filters + 1), number_of_layers)
hyperparameters['fc_hidden_nodes'] = np.random.randint(min_fc_nodes, (max_fc_nodes + 1))
return hyperparameters
|
def generate_DeepConvLSTM_hyperparameter_set(min_conv_layers=1, max_conv_layers=10, min_conv_filters=10, max_conv_filters=100, min_lstm_layers=1, max_lstm_layers=5, min_lstm_dims=10, max_lstm_dims=100, low_lr=1, high_lr=4, low_reg=1, high_reg=4):
' Generate a hyperparameter set that defines a DeepConvLSTM model.\n\n Parameters\n ----------\n min_conv_layers : int\n minimum number of Conv layers in DeepConvLSTM model\n max_conv_layers : int\n maximum number of Conv layers in DeepConvLSTM model\n min_conv_filters : int\n minimum number of filters per Conv layer in DeepConvLSTM model\n max_conv_filters : int\n maximum number of filters per Conv layer in DeepConvLSTM model\n min_lstm_layers : int\n minimum number of Conv layers in DeepConvLSTM model\n max_lstm_layers : int\n maximum number of Conv layers in DeepConvLSTM model\n min_lstm_dims : int\n minimum number of hidden nodes per LSTM layer in DeepConvLSTM model\n max_lstm_dims : int\n maximum number of hidden nodes per LSTM layer in DeepConvLSTM model\n low_lr : float\n minimum of log range for learning rate: learning rate is sampled\n between `10**(-low_reg)` and `10**(-high_reg)`\n high_lr : float\n maximum of log range for learning rate: learning rate is sampled\n between `10**(-low_reg)` and `10**(-high_reg)`\n low_reg : float\n minimum of log range for regularization rate: regularization rate is\n sampled between `10**(-low_reg)` and `10**(-high_reg)`\n high_reg : float\n maximum of log range for regularization rate: regularization rate is\n sampled between `10**(-low_reg)` and `10**(-high_reg)`\n\n Returns\n ----------\n hyperparameters: dict\n hyperparameters for a DeepConvLSTM model\n '
hyperparameters = generate_base_hyper_parameter_set(low_lr, high_lr, low_reg, high_reg)
number_of_conv_layers = np.random.randint(min_conv_layers, (max_conv_layers + 1))
hyperparameters['filters'] = np.random.randint(min_conv_filters, (max_conv_filters + 1), number_of_conv_layers).tolist()
number_of_lstm_layers = np.random.randint(min_lstm_layers, (max_lstm_layers + 1))
hyperparameters['lstm_dims'] = np.random.randint(min_lstm_dims, (max_lstm_dims + 1), number_of_lstm_layers).tolist()
return hyperparameters
| 2,699,689,129,207,129,000
|
Generate a hyperparameter set that defines a DeepConvLSTM model.
Parameters
----------
min_conv_layers : int
minimum number of Conv layers in DeepConvLSTM model
max_conv_layers : int
maximum number of Conv layers in DeepConvLSTM model
min_conv_filters : int
minimum number of filters per Conv layer in DeepConvLSTM model
max_conv_filters : int
maximum number of filters per Conv layer in DeepConvLSTM model
min_lstm_layers : int
minimum number of Conv layers in DeepConvLSTM model
max_lstm_layers : int
maximum number of Conv layers in DeepConvLSTM model
min_lstm_dims : int
minimum number of hidden nodes per LSTM layer in DeepConvLSTM model
max_lstm_dims : int
maximum number of hidden nodes per LSTM layer in DeepConvLSTM model
low_lr : float
minimum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
high_lr : float
maximum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
low_reg : float
minimum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
high_reg : float
maximum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
Returns
----------
hyperparameters: dict
hyperparameters for a DeepConvLSTM model
|
mcfly/modelgen.py
|
generate_DeepConvLSTM_hyperparameter_set
|
wadpac/mcfly
|
python
|
def generate_DeepConvLSTM_hyperparameter_set(min_conv_layers=1, max_conv_layers=10, min_conv_filters=10, max_conv_filters=100, min_lstm_layers=1, max_lstm_layers=5, min_lstm_dims=10, max_lstm_dims=100, low_lr=1, high_lr=4, low_reg=1, high_reg=4):
' Generate a hyperparameter set that defines a DeepConvLSTM model.\n\n Parameters\n ----------\n min_conv_layers : int\n minimum number of Conv layers in DeepConvLSTM model\n max_conv_layers : int\n maximum number of Conv layers in DeepConvLSTM model\n min_conv_filters : int\n minimum number of filters per Conv layer in DeepConvLSTM model\n max_conv_filters : int\n maximum number of filters per Conv layer in DeepConvLSTM model\n min_lstm_layers : int\n minimum number of Conv layers in DeepConvLSTM model\n max_lstm_layers : int\n maximum number of Conv layers in DeepConvLSTM model\n min_lstm_dims : int\n minimum number of hidden nodes per LSTM layer in DeepConvLSTM model\n max_lstm_dims : int\n maximum number of hidden nodes per LSTM layer in DeepConvLSTM model\n low_lr : float\n minimum of log range for learning rate: learning rate is sampled\n between `10**(-low_reg)` and `10**(-high_reg)`\n high_lr : float\n maximum of log range for learning rate: learning rate is sampled\n between `10**(-low_reg)` and `10**(-high_reg)`\n low_reg : float\n minimum of log range for regularization rate: regularization rate is\n sampled between `10**(-low_reg)` and `10**(-high_reg)`\n high_reg : float\n maximum of log range for regularization rate: regularization rate is\n sampled between `10**(-low_reg)` and `10**(-high_reg)`\n\n Returns\n ----------\n hyperparameters: dict\n hyperparameters for a DeepConvLSTM model\n '
hyperparameters = generate_base_hyper_parameter_set(low_lr, high_lr, low_reg, high_reg)
number_of_conv_layers = np.random.randint(min_conv_layers, (max_conv_layers + 1))
hyperparameters['filters'] = np.random.randint(min_conv_filters, (max_conv_filters + 1), number_of_conv_layers).tolist()
number_of_lstm_layers = np.random.randint(min_lstm_layers, (max_lstm_layers + 1))
hyperparameters['lstm_dims'] = np.random.randint(min_lstm_dims, (max_lstm_dims + 1), number_of_lstm_layers).tolist()
return hyperparameters
|
def generate_base_hyper_parameter_set(low_lr=1, high_lr=4, low_reg=1, high_reg=4):
' Generate a base set of hyperparameters that are necessary for any\n model, but sufficient for none.\n\n Parameters\n ----------\n low_lr : float\n minimum of log range for learning rate: learning rate is sampled\n between `10**(-low_reg)` and `10**(-high_reg)`\n high_lr : float\n maximum of log range for learning rate: learning rate is sampled\n between `10**(-low_reg)` and `10**(-high_reg)`\n low_reg : float\n minimum of log range for regularization rate: regularization rate is\n sampled between `10**(-low_reg)` and `10**(-high_reg)`\n high_reg : float\n maximum of log range for regularization rate: regularization rate is\n sampled between `10**(-low_reg)` and `10**(-high_reg)`\n\n Returns\n -------\n hyperparameters : dict\n basis hyperpameters\n '
hyperparameters = {}
hyperparameters['learning_rate'] = get_learning_rate(low_lr, high_lr)
hyperparameters['regularization_rate'] = get_regularization(low_reg, high_reg)
return hyperparameters
| 5,598,553,892,270,986,000
|
Generate a base set of hyperparameters that are necessary for any
model, but sufficient for none.
Parameters
----------
low_lr : float
minimum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
high_lr : float
maximum of log range for learning rate: learning rate is sampled
between `10**(-low_reg)` and `10**(-high_reg)`
low_reg : float
minimum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
high_reg : float
maximum of log range for regularization rate: regularization rate is
sampled between `10**(-low_reg)` and `10**(-high_reg)`
Returns
-------
hyperparameters : dict
basis hyperpameters
|
mcfly/modelgen.py
|
generate_base_hyper_parameter_set
|
wadpac/mcfly
|
python
|
def generate_base_hyper_parameter_set(low_lr=1, high_lr=4, low_reg=1, high_reg=4):
' Generate a base set of hyperparameters that are necessary for any\n model, but sufficient for none.\n\n Parameters\n ----------\n low_lr : float\n minimum of log range for learning rate: learning rate is sampled\n between `10**(-low_reg)` and `10**(-high_reg)`\n high_lr : float\n maximum of log range for learning rate: learning rate is sampled\n between `10**(-low_reg)` and `10**(-high_reg)`\n low_reg : float\n minimum of log range for regularization rate: regularization rate is\n sampled between `10**(-low_reg)` and `10**(-high_reg)`\n high_reg : float\n maximum of log range for regularization rate: regularization rate is\n sampled between `10**(-low_reg)` and `10**(-high_reg)`\n\n Returns\n -------\n hyperparameters : dict\n basis hyperpameters\n '
hyperparameters = {}
hyperparameters['learning_rate'] = get_learning_rate(low_lr, high_lr)
hyperparameters['regularization_rate'] = get_regularization(low_reg, high_reg)
return hyperparameters
|
def get_learning_rate(low=1, high=4):
' Return random learning rate 10^-n where n is sampled uniformly between\n low and high bounds.\n\n Parameters\n ----------\n low : float\n low bound\n high : float\n high bound\n\n Returns\n -------\n learning_rate : float\n learning rate\n '
result = 0.001
return result
| 1,192,930,097,796,020,000
|
Return random learning rate 10^-n where n is sampled uniformly between
low and high bounds.
Parameters
----------
low : float
low bound
high : float
high bound
Returns
-------
learning_rate : float
learning rate
|
mcfly/modelgen.py
|
get_learning_rate
|
wadpac/mcfly
|
python
|
def get_learning_rate(low=1, high=4):
' Return random learning rate 10^-n where n is sampled uniformly between\n low and high bounds.\n\n Parameters\n ----------\n low : float\n low bound\n high : float\n high bound\n\n Returns\n -------\n learning_rate : float\n learning rate\n '
result = 0.001
return result
|
def get_regularization(low=1, high=4):
' Return random regularization rate 10^-n where n is sampled uniformly\n between low and high bounds.\n\n Parameters\n ----------\n low : float\n low bound\n high : float\n high bound\n\n Returns\n -------\n regularization_rate : float\n regularization rate\n '
return (10 ** (- np.random.uniform(low, high)))
| -6,022,252,116,884,650,000
|
Return random regularization rate 10^-n where n is sampled uniformly
between low and high bounds.
Parameters
----------
low : float
low bound
high : float
high bound
Returns
-------
regularization_rate : float
regularization rate
|
mcfly/modelgen.py
|
get_regularization
|
wadpac/mcfly
|
python
|
def get_regularization(low=1, high=4):
' Return random regularization rate 10^-n where n is sampled uniformly\n between low and high bounds.\n\n Parameters\n ----------\n low : float\n low bound\n high : float\n high bound\n\n Returns\n -------\n regularization_rate : float\n regularization rate\n '
return (10 ** (- np.random.uniform(low, high)))
|
def drawSparseMatrix(ax, mat, **kwargs):
"Draw a view of a matrix into the axes.\n\n Parameters\n ----------\n ax : mpl axis instance, optional\n Axis instance where the matrix will be plotted.\n\n mat: pg.matrix.SparseMatrix or pg.matrix.SparseMapMatrix\n\n Returns\n -------\n mpl.lines.line2d\n\n Examples\n --------\n >>> import numpy as np\n >>> import pygimli as pg\n >>> from pygimli.viewer.mpl import drawSparseMatrix\n >>> A = pg.randn((10,10), seed=0)\n >>> SM = pg.core.SparseMapMatrix()\n >>> for i in range(10):\n ... SM.setVal(i, i, 5.0)\n >>> fig, (ax1, ax2) = pg.plt.subplots(1, 2, sharey=True, sharex=True)\n >>> _ = drawSparseMatrix(ax1, A, colOffset=5, rowOffset=5, color='blue')\n >>> _ = drawSparseMatrix(ax2, SM, color='green')\n "
row = kwargs.pop('rowOffset', 0)
col = kwargs.pop('colOffset', 0)
color = kwargs.pop('color', None)
mat = pg.utils.sparseMatrix2coo(mat)
mat.row += row
mat.col += col
gci = ax.spy(mat, color=color)
ax.autoscale(enable=True, axis='both', tight=True)
return gci
| 816,415,907,612,042,400
|
Draw a view of a matrix into the axes.
Parameters
----------
ax : mpl axis instance, optional
Axis instance where the matrix will be plotted.
mat: pg.matrix.SparseMatrix or pg.matrix.SparseMapMatrix
Returns
-------
mpl.lines.line2d
Examples
--------
>>> import numpy as np
>>> import pygimli as pg
>>> from pygimli.viewer.mpl import drawSparseMatrix
>>> A = pg.randn((10,10), seed=0)
>>> SM = pg.core.SparseMapMatrix()
>>> for i in range(10):
... SM.setVal(i, i, 5.0)
>>> fig, (ax1, ax2) = pg.plt.subplots(1, 2, sharey=True, sharex=True)
>>> _ = drawSparseMatrix(ax1, A, colOffset=5, rowOffset=5, color='blue')
>>> _ = drawSparseMatrix(ax2, SM, color='green')
|
pygimli/viewer/mpl/matrixview.py
|
drawSparseMatrix
|
JuliusHen/gimli
|
python
|
def drawSparseMatrix(ax, mat, **kwargs):
"Draw a view of a matrix into the axes.\n\n Parameters\n ----------\n ax : mpl axis instance, optional\n Axis instance where the matrix will be plotted.\n\n mat: pg.matrix.SparseMatrix or pg.matrix.SparseMapMatrix\n\n Returns\n -------\n mpl.lines.line2d\n\n Examples\n --------\n >>> import numpy as np\n >>> import pygimli as pg\n >>> from pygimli.viewer.mpl import drawSparseMatrix\n >>> A = pg.randn((10,10), seed=0)\n >>> SM = pg.core.SparseMapMatrix()\n >>> for i in range(10):\n ... SM.setVal(i, i, 5.0)\n >>> fig, (ax1, ax2) = pg.plt.subplots(1, 2, sharey=True, sharex=True)\n >>> _ = drawSparseMatrix(ax1, A, colOffset=5, rowOffset=5, color='blue')\n >>> _ = drawSparseMatrix(ax2, SM, color='green')\n "
row = kwargs.pop('rowOffset', 0)
col = kwargs.pop('colOffset', 0)
color = kwargs.pop('color', None)
mat = pg.utils.sparseMatrix2coo(mat)
mat.row += row
mat.col += col
gci = ax.spy(mat, color=color)
ax.autoscale(enable=True, axis='both', tight=True)
return gci
|
def drawBlockMatrix(ax, mat, **kwargs):
'Draw a view of a matrix into the axes.\n\n Arguments\n ---------\n\n ax : mpl axis instance, optional\n Axis instance where the matrix will be plotted.\n\n mat: pg.Matrix.BlockMatrix\n\n Keyword Arguments\n -----------------\n spy: bool [False]\n Draw all matrix entries instead of colored blocks\n\n Returns\n -------\n ax:\n\n Examples\n --------\n >>> import numpy as np\n >>> import pygimli as pg\n >>> I = pg.matrix.IdentityMatrix(10)\n >>> SM = pg.matrix.SparseMapMatrix()\n >>> for i in range(10):\n ... SM.setVal(i, 10 - i, 5.0)\n ... SM.setVal(i, i, 5.0)\n >>> B = pg.matrix.BlockMatrix()\n >>> B.add(I, 0, 0)\n 0\n >>> B.add(SM, 10, 10)\n 1\n >>> print(B)\n pg.matrix.BlockMatrix of size 20 x 21 consisting of 2 submatrices.\n >>> fig, (ax1, ax2) = pg.plt.subplots(1, 2, sharey=True)\n >>> _ = pg.show(B, ax=ax1)\n >>> _ = pg.show(B, spy=True, ax=ax2)\n '
if kwargs.pop('spy', False):
gci = []
ids = pg.unique([e.matrixID for e in mat.entries()])
cMap = pg.plt.cm.get_cmap('Set3', len(ids))
for e in mat.entries():
mid = e.matrixID
mati = mat.mat(mid)
if isinstance(mati, pg.core.IdentityMatrix):
mati = np.eye(mati.size())
gci.append(drawSparseMatrix(ax, mati, rowOffset=e.rowStart, colOffset=e.colStart, color=cMap(mid)))
return (gci, None)
else:
plcs = []
for e in mat.entries():
mid = e.matrixID
widthy = (mat.mat(mid).rows() - 0.1)
widthx = (mat.mat(mid).cols() - 0.1)
plc = pg.meshtools.createRectangle([e.colStart, e.rowStart], [(e.colStart + widthx), (e.rowStart + widthy)], marker=mid)
plcs.append(plc)
bm = pg.meshtools.mergePLC(plcs)
(gci, cBar) = pg.viewer.mpl.drawPLC(ax, bm, fitView=False)
ax.invert_yaxis()
ax.xaxis.tick_top()
cBar.set_label('Matrix ID')
if (len(mat.entries()) > 10):
gci.set_cmap('viridis')
return (gci, cBar)
| 7,353,387,242,353,312,000
|
Draw a view of a matrix into the axes.
Arguments
---------
ax : mpl axis instance, optional
Axis instance where the matrix will be plotted.
mat: pg.Matrix.BlockMatrix
Keyword Arguments
-----------------
spy: bool [False]
Draw all matrix entries instead of colored blocks
Returns
-------
ax:
Examples
--------
>>> import numpy as np
>>> import pygimli as pg
>>> I = pg.matrix.IdentityMatrix(10)
>>> SM = pg.matrix.SparseMapMatrix()
>>> for i in range(10):
... SM.setVal(i, 10 - i, 5.0)
... SM.setVal(i, i, 5.0)
>>> B = pg.matrix.BlockMatrix()
>>> B.add(I, 0, 0)
0
>>> B.add(SM, 10, 10)
1
>>> print(B)
pg.matrix.BlockMatrix of size 20 x 21 consisting of 2 submatrices.
>>> fig, (ax1, ax2) = pg.plt.subplots(1, 2, sharey=True)
>>> _ = pg.show(B, ax=ax1)
>>> _ = pg.show(B, spy=True, ax=ax2)
|
pygimli/viewer/mpl/matrixview.py
|
drawBlockMatrix
|
JuliusHen/gimli
|
python
|
def drawBlockMatrix(ax, mat, **kwargs):
'Draw a view of a matrix into the axes.\n\n Arguments\n ---------\n\n ax : mpl axis instance, optional\n Axis instance where the matrix will be plotted.\n\n mat: pg.Matrix.BlockMatrix\n\n Keyword Arguments\n -----------------\n spy: bool [False]\n Draw all matrix entries instead of colored blocks\n\n Returns\n -------\n ax:\n\n Examples\n --------\n >>> import numpy as np\n >>> import pygimli as pg\n >>> I = pg.matrix.IdentityMatrix(10)\n >>> SM = pg.matrix.SparseMapMatrix()\n >>> for i in range(10):\n ... SM.setVal(i, 10 - i, 5.0)\n ... SM.setVal(i, i, 5.0)\n >>> B = pg.matrix.BlockMatrix()\n >>> B.add(I, 0, 0)\n 0\n >>> B.add(SM, 10, 10)\n 1\n >>> print(B)\n pg.matrix.BlockMatrix of size 20 x 21 consisting of 2 submatrices.\n >>> fig, (ax1, ax2) = pg.plt.subplots(1, 2, sharey=True)\n >>> _ = pg.show(B, ax=ax1)\n >>> _ = pg.show(B, spy=True, ax=ax2)\n '
if kwargs.pop('spy', False):
gci = []
ids = pg.unique([e.matrixID for e in mat.entries()])
cMap = pg.plt.cm.get_cmap('Set3', len(ids))
for e in mat.entries():
mid = e.matrixID
mati = mat.mat(mid)
if isinstance(mati, pg.core.IdentityMatrix):
mati = np.eye(mati.size())
gci.append(drawSparseMatrix(ax, mati, rowOffset=e.rowStart, colOffset=e.colStart, color=cMap(mid)))
return (gci, None)
else:
plcs = []
for e in mat.entries():
mid = e.matrixID
widthy = (mat.mat(mid).rows() - 0.1)
widthx = (mat.mat(mid).cols() - 0.1)
plc = pg.meshtools.createRectangle([e.colStart, e.rowStart], [(e.colStart + widthx), (e.rowStart + widthy)], marker=mid)
plcs.append(plc)
bm = pg.meshtools.mergePLC(plcs)
(gci, cBar) = pg.viewer.mpl.drawPLC(ax, bm, fitView=False)
ax.invert_yaxis()
ax.xaxis.tick_top()
cBar.set_label('Matrix ID')
if (len(mat.entries()) > 10):
gci.set_cmap('viridis')
return (gci, cBar)
|
def sigma_pentagonal_numbers(limit):
'\n >>> list(sigma_pentagonal_numbers(16))\n [1, 2, 5, 7, 12, 15]\n '
n = 1
p = 1
while (p <= limit):
(yield p)
if (n > 0):
n = (- n)
else:
n = ((- n) + 1)
p = ((((3 * n) * n) - n) // 2)
| -6,706,061,342,674,910,000
|
>>> list(sigma_pentagonal_numbers(16))
[1, 2, 5, 7, 12, 15]
|
advent/year2015/day20.py
|
sigma_pentagonal_numbers
|
davweb/advent-of-code
|
python
|
def sigma_pentagonal_numbers(limit):
'\n >>> list(sigma_pentagonal_numbers(16))\n [1, 2, 5, 7, 12, 15]\n '
n = 1
p = 1
while (p <= limit):
(yield p)
if (n > 0):
n = (- n)
else:
n = ((- n) + 1)
p = ((((3 * n) * n) - n) // 2)
|
@cache
def presents_for_house(house):
'\n https://math.stackexchange.com/a/22744\n\n >>> presents_for_house(1)\n 10\n >>> presents_for_house(2)\n 30\n >>> presents_for_house(3)\n 40\n >>> presents_for_house(8)\n 150\n >>> presents_for_house(9)\n 130\n '
if (house == 1):
return 10
presents = 0
sign = sigma_sign_generator()
for p in sigma_pentagonal_numbers(house):
n = (house - p)
if (n == 0):
presents += ((house * next(sign)) * 10)
else:
presents += (presents_for_house(n) * next(sign))
return presents
| -763,015,109,654,025,600
|
https://math.stackexchange.com/a/22744
>>> presents_for_house(1)
10
>>> presents_for_house(2)
30
>>> presents_for_house(3)
40
>>> presents_for_house(8)
150
>>> presents_for_house(9)
130
|
advent/year2015/day20.py
|
presents_for_house
|
davweb/advent-of-code
|
python
|
@cache
def presents_for_house(house):
'\n https://math.stackexchange.com/a/22744\n\n >>> presents_for_house(1)\n 10\n >>> presents_for_house(2)\n 30\n >>> presents_for_house(3)\n 40\n >>> presents_for_house(8)\n 150\n >>> presents_for_house(9)\n 130\n '
if (house == 1):
return 10
presents = 0
sign = sigma_sign_generator()
for p in sigma_pentagonal_numbers(house):
n = (house - p)
if (n == 0):
presents += ((house * next(sign)) * 10)
else:
presents += (presents_for_house(n) * next(sign))
return presents
|
def part1(data):
'\n #\xa0Takes too long so commented out\n # >>> part1(INPUT)\n # 776160\n '
house = 0
presents = 0
max = 0
while (presents < data):
house += 1
presents = presents_for_house(house)
if (presents > max):
max = presents
print(max)
return house
| -2,077,811,953,429,830,100
|
#Β Takes too long so commented out
# >>> part1(INPUT)
# 776160
|
advent/year2015/day20.py
|
part1
|
davweb/advent-of-code
|
python
|
def part1(data):
'\n #\xa0Takes too long so commented out\n # >>> part1(INPUT)\n # 776160\n '
house = 0
presents = 0
max = 0
while (presents < data):
house += 1
presents = presents_for_house(house)
if (presents > max):
max = presents
print(max)
return house
|
def part2(data):
'\n >>> part2(INPUT)\n 786240\n '
upper_limit = INPUT
house = ([0] * (upper_limit + 1))
elf = 1
while (elf <= upper_limit):
elf_end = min((elf * 50), upper_limit)
for number in range(elf, (elf_end + 1), elf):
index = (number - 1)
house[index] += (11 * elf)
if (house[index] >= data):
upper_limit = min(number, upper_limit)
elf += 1
for (i, value) in enumerate(house):
if (value >= data):
return (i + 1)
raise ValueError()
| -7,750,473,585,390,069,000
|
>>> part2(INPUT)
786240
|
advent/year2015/day20.py
|
part2
|
davweb/advent-of-code
|
python
|
def part2(data):
'\n >>> part2(INPUT)\n 786240\n '
upper_limit = INPUT
house = ([0] * (upper_limit + 1))
elf = 1
while (elf <= upper_limit):
elf_end = min((elf * 50), upper_limit)
for number in range(elf, (elf_end + 1), elf):
index = (number - 1)
house[index] += (11 * elf)
if (house[index] >= data):
upper_limit = min(number, upper_limit)
elf += 1
for (i, value) in enumerate(house):
if (value >= data):
return (i + 1)
raise ValueError()
|
def get_version(version=None):
'Return a PEP 440-compliant version number from VERSION.'
version = get_complete_version(version)
main = get_main_version(version)
sub = ''
if ((version[3] == 'alpha') and (version[4] == 0)):
git_changeset = get_git_changeset()
if git_changeset:
sub = ('.dev%s' % git_changeset)
elif (version[3] != 'final'):
mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'rc'}
sub = (mapping[version[3]] + str(version[4]))
return (main + sub)
| 4,286,137,061,568,923,600
|
Return a PEP 440-compliant version number from VERSION.
|
django-src/utils/version.py
|
get_version
|
ch1huizong/Scode
|
python
|
def get_version(version=None):
version = get_complete_version(version)
main = get_main_version(version)
sub =
if ((version[3] == 'alpha') and (version[4] == 0)):
git_changeset = get_git_changeset()
if git_changeset:
sub = ('.dev%s' % git_changeset)
elif (version[3] != 'final'):
mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'rc'}
sub = (mapping[version[3]] + str(version[4]))
return (main + sub)
|
def get_main_version(version=None):
'Return main version (X.Y[.Z]) from VERSION.'
version = get_complete_version(version)
parts = (2 if (version[2] == 0) else 3)
return '.'.join((str(x) for x in version[:parts]))
| 9,013,525,789,992,150,000
|
Return main version (X.Y[.Z]) from VERSION.
|
django-src/utils/version.py
|
get_main_version
|
ch1huizong/Scode
|
python
|
def get_main_version(version=None):
version = get_complete_version(version)
parts = (2 if (version[2] == 0) else 3)
return '.'.join((str(x) for x in version[:parts]))
|
def get_complete_version(version=None):
'\n Return a tuple of the django version. If version argument is non-empty,\n check for correctness of the tuple provided.\n '
if (version is None):
from django import VERSION as version
else:
assert (len(version) == 5)
assert (version[3] in ('alpha', 'beta', 'rc', 'final'))
return version
| 2,632,988,490,259,573,000
|
Return a tuple of the django version. If version argument is non-empty,
check for correctness of the tuple provided.
|
django-src/utils/version.py
|
get_complete_version
|
ch1huizong/Scode
|
python
|
def get_complete_version(version=None):
'\n Return a tuple of the django version. If version argument is non-empty,\n check for correctness of the tuple provided.\n '
if (version is None):
from django import VERSION as version
else:
assert (len(version) == 5)
assert (version[3] in ('alpha', 'beta', 'rc', 'final'))
return version
|
@functools.lru_cache()
def get_git_changeset():
"Return a numeric identifier of the latest git changeset.\n\n The result is the UTC timestamp of the changeset in YYYYMMDDHHMMSS format.\n This value isn't guaranteed to be unique, but collisions are very unlikely,\n so it's sufficient for generating the development version numbers.\n "
repo_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
git_log = subprocess.Popen('git log --pretty=format:%ct --quiet -1 HEAD', stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, cwd=repo_dir, universal_newlines=True)
timestamp = git_log.communicate()[0]
try:
timestamp = datetime.datetime.utcfromtimestamp(int(timestamp))
except ValueError:
return None
return timestamp.strftime('%Y%m%d%H%M%S')
| -4,377,290,538,242,741,000
|
Return a numeric identifier of the latest git changeset.
The result is the UTC timestamp of the changeset in YYYYMMDDHHMMSS format.
This value isn't guaranteed to be unique, but collisions are very unlikely,
so it's sufficient for generating the development version numbers.
|
django-src/utils/version.py
|
get_git_changeset
|
ch1huizong/Scode
|
python
|
@functools.lru_cache()
def get_git_changeset():
"Return a numeric identifier of the latest git changeset.\n\n The result is the UTC timestamp of the changeset in YYYYMMDDHHMMSS format.\n This value isn't guaranteed to be unique, but collisions are very unlikely,\n so it's sufficient for generating the development version numbers.\n "
repo_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
git_log = subprocess.Popen('git log --pretty=format:%ct --quiet -1 HEAD', stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, cwd=repo_dir, universal_newlines=True)
timestamp = git_log.communicate()[0]
try:
timestamp = datetime.datetime.utcfromtimestamp(int(timestamp))
except ValueError:
return None
return timestamp.strftime('%Y%m%d%H%M%S')
|
def findMedianSortedArrays(self, nums1, nums2):
'\n :type nums1: List[int]\n :type nums2: List[int]\n :rtype: float\n '
odd = ((len(nums1) + len(nums2)) % 2)
if odd:
half = ((len(nums1) + len(nums2)) // 2)
else:
half = (((len(nums1) + len(nums2)) // 2) - 1)
for _ in range(half):
__ = self.pop_num(nums1, nums2)
if odd:
return float(self.pop_num(nums1, nums2))
else:
t1 = self.pop_num(nums1, nums2)
t2 = self.pop_num(nums1, nums2)
return ((t1 + t2) / 2)
| -5,411,036,675,623,418,000
|
:type nums1: List[int]
:type nums2: List[int]
:rtype: float
|
4+Median+of+Two+Sorted+Arrays/alg.py
|
findMedianSortedArrays
|
xiaoh12/leetcode
|
python
|
def findMedianSortedArrays(self, nums1, nums2):
'\n :type nums1: List[int]\n :type nums2: List[int]\n :rtype: float\n '
odd = ((len(nums1) + len(nums2)) % 2)
if odd:
half = ((len(nums1) + len(nums2)) // 2)
else:
half = (((len(nums1) + len(nums2)) // 2) - 1)
for _ in range(half):
__ = self.pop_num(nums1, nums2)
if odd:
return float(self.pop_num(nums1, nums2))
else:
t1 = self.pop_num(nums1, nums2)
t2 = self.pop_num(nums1, nums2)
return ((t1 + t2) / 2)
|
def get_port(self, context):
'Get available port for consoles.'
return CONF.console_vmrc_port
| 2,535,733,782,578,544,000
|
Get available port for consoles.
|
nova/console/vmrc.py
|
get_port
|
ONOP/nova
|
python
|
def get_port(self, context):
return CONF.console_vmrc_port
|
def setup_console(self, context, console):
'Sets up console.'
pass
| -4,836,120,128,759,881,000
|
Sets up console.
|
nova/console/vmrc.py
|
setup_console
|
ONOP/nova
|
python
|
def setup_console(self, context, console):
pass
|
def teardown_console(self, context, console):
'Tears down console.'
pass
| -6,899,172,463,449,520,000
|
Tears down console.
|
nova/console/vmrc.py
|
teardown_console
|
ONOP/nova
|
python
|
def teardown_console(self, context, console):
pass
|
def init_host(self):
'Perform console initialization.'
pass
| 5,688,657,367,007,553,000
|
Perform console initialization.
|
nova/console/vmrc.py
|
init_host
|
ONOP/nova
|
python
|
def init_host(self):
pass
|
def fix_pool_password(self, password):
'Encode password.'
return password
| 1,070,836,154,636,971,800
|
Encode password.
|
nova/console/vmrc.py
|
fix_pool_password
|
ONOP/nova
|
python
|
def fix_pool_password(self, password):
return password
|
def generate_password(self, vim_session, pool, instance_name):
"Returns VMRC Connection credentials.\n\n Return string is of the form '<VM PATH>:<ESX Username>@<ESX Password>'.\n\n "
(username, password) = (pool['username'], pool['password'])
vms = vim_session._call_method(vim_util, 'get_objects', 'VirtualMachine', ['name', 'config.files.vmPathName'])
vm_ds_path_name = None
vm_ref = None
for vm in vms:
vm_name = None
ds_path_name = None
for prop in vm.propSet:
if (prop.name == 'name'):
vm_name = prop.val
elif (prop.name == 'config.files.vmPathName'):
ds_path_name = prop.val
if (vm_name == instance_name):
vm_ref = vm.obj
vm_ds_path_name = ds_path_name
break
if (vm_ref is None):
raise exception.InstanceNotFound(instance_id=instance_name)
json_data = jsonutils.dumps({'vm_id': vm_ds_path_name, 'username': username, 'password': password})
return base64.b64encode(json_data)
| -17,592,870,140,672,932
|
Returns VMRC Connection credentials.
Return string is of the form '<VM PATH>:<ESX Username>@<ESX Password>'.
|
nova/console/vmrc.py
|
generate_password
|
ONOP/nova
|
python
|
def generate_password(self, vim_session, pool, instance_name):
"Returns VMRC Connection credentials.\n\n Return string is of the form '<VM PATH>:<ESX Username>@<ESX Password>'.\n\n "
(username, password) = (pool['username'], pool['password'])
vms = vim_session._call_method(vim_util, 'get_objects', 'VirtualMachine', ['name', 'config.files.vmPathName'])
vm_ds_path_name = None
vm_ref = None
for vm in vms:
vm_name = None
ds_path_name = None
for prop in vm.propSet:
if (prop.name == 'name'):
vm_name = prop.val
elif (prop.name == 'config.files.vmPathName'):
ds_path_name = prop.val
if (vm_name == instance_name):
vm_ref = vm.obj
vm_ds_path_name = ds_path_name
break
if (vm_ref is None):
raise exception.InstanceNotFound(instance_id=instance_name)
json_data = jsonutils.dumps({'vm_id': vm_ds_path_name, 'username': username, 'password': password})
return base64.b64encode(json_data)
|
def is_otp(self):
'Is one time password or not.'
return False
| -8,124,443,781,481,316,000
|
Is one time password or not.
|
nova/console/vmrc.py
|
is_otp
|
ONOP/nova
|
python
|
def is_otp(self):
return False
|
def generate_password(self, vim_session, pool, instance_name):
"Returns a VMRC Session.\n\n Return string is of the form '<VM MOID>:<VMRC Ticket>'.\n\n "
vms = vim_session._call_method(vim_util, 'get_objects', 'VirtualMachine', ['name'])
vm_ref = None
for vm in vms:
if (vm.propSet[0].val == instance_name):
vm_ref = vm.obj
if (vm_ref is None):
raise exception.InstanceNotFound(instance_id=instance_name)
virtual_machine_ticket = vim_session._call_method(vim_session._get_vim(), 'AcquireCloneTicket', vim_session._get_vim().get_service_content().sessionManager)
json_data = jsonutils.dumps({'vm_id': str(vm_ref.value), 'username': virtual_machine_ticket, 'password': virtual_machine_ticket})
return base64.b64encode(json_data)
| 6,486,637,346,679,642,000
|
Returns a VMRC Session.
Return string is of the form '<VM MOID>:<VMRC Ticket>'.
|
nova/console/vmrc.py
|
generate_password
|
ONOP/nova
|
python
|
def generate_password(self, vim_session, pool, instance_name):
"Returns a VMRC Session.\n\n Return string is of the form '<VM MOID>:<VMRC Ticket>'.\n\n "
vms = vim_session._call_method(vim_util, 'get_objects', 'VirtualMachine', ['name'])
vm_ref = None
for vm in vms:
if (vm.propSet[0].val == instance_name):
vm_ref = vm.obj
if (vm_ref is None):
raise exception.InstanceNotFound(instance_id=instance_name)
virtual_machine_ticket = vim_session._call_method(vim_session._get_vim(), 'AcquireCloneTicket', vim_session._get_vim().get_service_content().sessionManager)
json_data = jsonutils.dumps({'vm_id': str(vm_ref.value), 'username': virtual_machine_ticket, 'password': virtual_machine_ticket})
return base64.b64encode(json_data)
|
def is_otp(self):
'Is one time password or not.'
return True
| 3,635,665,800,062,393,300
|
Is one time password or not.
|
nova/console/vmrc.py
|
is_otp
|
ONOP/nova
|
python
|
def is_otp(self):
return True
|
def testInputOutput(self):
'\n Test InputOutput\n '
model = ProcessMaker_PMIO.models.input_output.InputOutput()
| -8,925,060,231,664,973,000
|
Test InputOutput
|
test/test_input_output.py
|
testInputOutput
|
ProcessMaker/pmio-sdk-python
|
python
|
def testInputOutput(self):
'\n \n '
model = ProcessMaker_PMIO.models.input_output.InputOutput()
|
def get_correctness_test_inputs(use_numpy, with_distribution, x_train, y_train, x_predict):
'Generates the inputs for correctness check when enable Keras with DS.'
global_batch_size = 64
batch_size = global_batch_size
use_per_core_batch_size = (with_distribution and (with_distribution.__class__.__name__ != 'TPUStrategy'))
if use_per_core_batch_size:
batch_size //= with_distribution.num_replicas_in_sync
if use_numpy:
training_inputs = {'batch_size': batch_size, 'x': x_train, 'y': y_train, 'epochs': 1, 'shuffle': False}
eval_inputs = {'batch_size': batch_size, 'x': x_train, 'y': y_train}
if with_distribution:
if use_per_core_batch_size:
predict_batch_size = (len(x_predict) // with_distribution.num_replicas_in_sync)
else:
predict_batch_size = len(x_predict)
else:
predict_batch_size = None
predict_inputs = {'batch_size': predict_batch_size, 'x': np.array(x_predict, dtype=np.float32)}
else:
train_dataset = dataset_ops.Dataset.from_tensor_slices((x_train, y_train))
x = batch_wrapper(train_dataset, batch_size, with_distribution)
training_inputs = {'batch_size': None, 'x': x, 'y': None, 'epochs': 1, 'shuffle': False, 'steps_per_epoch': (len(x_train) // global_batch_size)}
eval_inputs = {'batch_size': None, 'x': x, 'y': None, 'steps': 20}
predict_batch_size = len(x_predict)
if use_per_core_batch_size:
predict_batch_size //= with_distribution.num_replicas_in_sync
predict_dataset = dataset_ops.Dataset.from_tensor_slices(x_predict)
predict_dataset = batch_wrapper(predict_dataset, predict_batch_size, with_distribution)
predict_inputs = {'batch_size': None, 'steps': 1, 'x': predict_dataset}
return (training_inputs, eval_inputs, predict_inputs)
| 4,754,634,288,906,665,000
|
Generates the inputs for correctness check when enable Keras with DS.
|
tensorflow/contrib/distribute/python/keras_test.py
|
get_correctness_test_inputs
|
unnir/tensorflow
|
python
|
def get_correctness_test_inputs(use_numpy, with_distribution, x_train, y_train, x_predict):
global_batch_size = 64
batch_size = global_batch_size
use_per_core_batch_size = (with_distribution and (with_distribution.__class__.__name__ != 'TPUStrategy'))
if use_per_core_batch_size:
batch_size //= with_distribution.num_replicas_in_sync
if use_numpy:
training_inputs = {'batch_size': batch_size, 'x': x_train, 'y': y_train, 'epochs': 1, 'shuffle': False}
eval_inputs = {'batch_size': batch_size, 'x': x_train, 'y': y_train}
if with_distribution:
if use_per_core_batch_size:
predict_batch_size = (len(x_predict) // with_distribution.num_replicas_in_sync)
else:
predict_batch_size = len(x_predict)
else:
predict_batch_size = None
predict_inputs = {'batch_size': predict_batch_size, 'x': np.array(x_predict, dtype=np.float32)}
else:
train_dataset = dataset_ops.Dataset.from_tensor_slices((x_train, y_train))
x = batch_wrapper(train_dataset, batch_size, with_distribution)
training_inputs = {'batch_size': None, 'x': x, 'y': None, 'epochs': 1, 'shuffle': False, 'steps_per_epoch': (len(x_train) // global_batch_size)}
eval_inputs = {'batch_size': None, 'x': x, 'y': None, 'steps': 20}
predict_batch_size = len(x_predict)
if use_per_core_batch_size:
predict_batch_size //= with_distribution.num_replicas_in_sync
predict_dataset = dataset_ops.Dataset.from_tensor_slices(x_predict)
predict_dataset = batch_wrapper(predict_dataset, predict_batch_size, with_distribution)
predict_inputs = {'batch_size': None, 'steps': 1, 'x': predict_dataset}
return (training_inputs, eval_inputs, predict_inputs)
|
def read_lexiconp(filename):
"Reads the lexiconp.txt file in 'filename', with lines like 'word pron p1 p2 ...'.\n Returns a list of tuples (word, pron_prob, pron), where 'word' is a string,\n 'pron_prob', a float, is the pronunciation probability (which must be >0.0\n and would normally be <=1.0), and 'pron' is a list of strings representing phones.\n An element in the returned list might be ('hello', 1.0, ['h', 'eh', 'l', 'ow']).\n "
ans = []
found_empty_prons = False
found_large_pronprobs = False
with open(filename, 'r', encoding='latin-1') as f:
whitespace = re.compile('[ \t]+')
for line in f:
a = whitespace.split(line.strip(' \t\r\n'))
if (len(a) < 2):
print("{0}: error: found bad line '{1}' in lexicon file {2} ".format(sys.argv[0], line.strip(' \t\r\n'), filename), file=sys.stderr)
sys.exit(1)
word = a[0]
if (word == '<eps>'):
print('{0}: error: found <eps> as a word in lexicon file {1}'.format(line.strip(' \t\r\n'), filename), file=sys.stderr)
sys.exit(1)
try:
pron_prob = float(a[1])
except:
print("{0}: error: found bad line '{1}' in lexicon file {2}, 2nd field should be pron-prob".format(sys.argv[0], line.strip(' \t\r\n'), filename), file=sys.stderr)
sys.exit(1)
prons = a[2:]
if (pron_prob <= 0.0):
print("{0}: error: invalid pron-prob in line '{1}' of lexicon file {1} ".format(sys.argv[0], line.strip(' \t\r\n'), filename), file=sys.stderr)
sys.exit(1)
if (len(prons) == 0):
found_empty_prons = True
ans.append((word, pron_prob, prons))
if (pron_prob > 1.0):
found_large_pronprobs = True
if found_empty_prons:
print('{0}: warning: found at least one word with an empty pronunciation in lexicon file {1}.'.format(sys.argv[0], filename), file=sys.stderr)
if found_large_pronprobs:
print('{0}: warning: found at least one word with pron-prob >1.0 in {1}'.format(sys.argv[0], filename), file=sys.stderr)
if (len(ans) == 0):
print('{0}: error: found no pronunciations in lexicon file {1}'.format(sys.argv[0], filename), file=sys.stderr)
sys.exit(1)
return ans
| -5,861,199,520,399,420,000
|
Reads the lexiconp.txt file in 'filename', with lines like 'word pron p1 p2 ...'.
Returns a list of tuples (word, pron_prob, pron), where 'word' is a string,
'pron_prob', a float, is the pronunciation probability (which must be >0.0
and would normally be <=1.0), and 'pron' is a list of strings representing phones.
An element in the returned list might be ('hello', 1.0, ['h', 'eh', 'l', 'ow']).
|
egs/wsj/s5/utils/lang/make_lexicon_fst.py
|
read_lexiconp
|
Anusha-G-Rao/kaldi
|
python
|
def read_lexiconp(filename):
"Reads the lexiconp.txt file in 'filename', with lines like 'word pron p1 p2 ...'.\n Returns a list of tuples (word, pron_prob, pron), where 'word' is a string,\n 'pron_prob', a float, is the pronunciation probability (which must be >0.0\n and would normally be <=1.0), and 'pron' is a list of strings representing phones.\n An element in the returned list might be ('hello', 1.0, ['h', 'eh', 'l', 'ow']).\n "
ans = []
found_empty_prons = False
found_large_pronprobs = False
with open(filename, 'r', encoding='latin-1') as f:
whitespace = re.compile('[ \t]+')
for line in f:
a = whitespace.split(line.strip(' \t\r\n'))
if (len(a) < 2):
print("{0}: error: found bad line '{1}' in lexicon file {2} ".format(sys.argv[0], line.strip(' \t\r\n'), filename), file=sys.stderr)
sys.exit(1)
word = a[0]
if (word == '<eps>'):
print('{0}: error: found <eps> as a word in lexicon file {1}'.format(line.strip(' \t\r\n'), filename), file=sys.stderr)
sys.exit(1)
try:
pron_prob = float(a[1])
except:
print("{0}: error: found bad line '{1}' in lexicon file {2}, 2nd field should be pron-prob".format(sys.argv[0], line.strip(' \t\r\n'), filename), file=sys.stderr)
sys.exit(1)
prons = a[2:]
if (pron_prob <= 0.0):
print("{0}: error: invalid pron-prob in line '{1}' of lexicon file {1} ".format(sys.argv[0], line.strip(' \t\r\n'), filename), file=sys.stderr)
sys.exit(1)
if (len(prons) == 0):
found_empty_prons = True
ans.append((word, pron_prob, prons))
if (pron_prob > 1.0):
found_large_pronprobs = True
if found_empty_prons:
print('{0}: warning: found at least one word with an empty pronunciation in lexicon file {1}.'.format(sys.argv[0], filename), file=sys.stderr)
if found_large_pronprobs:
print('{0}: warning: found at least one word with pron-prob >1.0 in {1}'.format(sys.argv[0], filename), file=sys.stderr)
if (len(ans) == 0):
print('{0}: error: found no pronunciations in lexicon file {1}'.format(sys.argv[0], filename), file=sys.stderr)
sys.exit(1)
return ans
|
def write_nonterminal_arcs(start_state, loop_state, next_state, nonterminals, left_context_phones):
'This function relates to the grammar-decoding setup, see\n kaldi-asr.org/doc/grammar.html. It is called from write_fst_no_silence\n and write_fst_silence, and writes to the stdout some extra arcs\n in the lexicon FST that relate to nonterminal symbols.\n See the section "Special symbols in L.fst,\n kaldi-asr.org/doc/grammar.html#grammar_special_l.\n start_state: the start-state of L.fst.\n loop_state: the state of high out-degree in L.fst where words leave\n and enter.\n next_state: the number from which this function can start allocating its\n own states. the updated value of next_state will be returned.\n nonterminals: the user-defined nonterminal symbols as a list of\n strings, e.g. [\'#nonterm:contact_list\', ... ].\n left_context_phones: a list of phones that may appear as left-context,\n e.g. [\'a\', \'ah\', ... \'#nonterm_bos\'].\n '
shared_state = next_state
next_state += 1
final_state = next_state
next_state += 1
print('{src}\t{dest}\t{phone}\t{word}\t{cost}'.format(src=start_state, dest=shared_state, phone='#nonterm_begin', word='#nonterm_begin', cost=0.0))
for nonterminal in nonterminals:
print('{src}\t{dest}\t{phone}\t{word}\t{cost}'.format(src=loop_state, dest=shared_state, phone=nonterminal, word=nonterminal, cost=0.0))
this_cost = (- math.log((1.0 / len(left_context_phones))))
for left_context_phone in left_context_phones:
print('{src}\t{dest}\t{phone}\t{word}\t{cost}'.format(src=shared_state, dest=loop_state, phone=left_context_phone, word='<eps>', cost=this_cost))
print('{src}\t{dest}\t{phone}\t{word}\t{cost}'.format(src=loop_state, dest=final_state, phone='#nonterm_end', word='#nonterm_end', cost=0.0))
print('{state}\t{final_cost}'.format(state=final_state, final_cost=0.0))
return next_state
| -8,162,289,252,516,565,000
|
This function relates to the grammar-decoding setup, see
kaldi-asr.org/doc/grammar.html. It is called from write_fst_no_silence
and write_fst_silence, and writes to the stdout some extra arcs
in the lexicon FST that relate to nonterminal symbols.
See the section "Special symbols in L.fst,
kaldi-asr.org/doc/grammar.html#grammar_special_l.
start_state: the start-state of L.fst.
loop_state: the state of high out-degree in L.fst where words leave
and enter.
next_state: the number from which this function can start allocating its
own states. the updated value of next_state will be returned.
nonterminals: the user-defined nonterminal symbols as a list of
strings, e.g. ['#nonterm:contact_list', ... ].
left_context_phones: a list of phones that may appear as left-context,
e.g. ['a', 'ah', ... '#nonterm_bos'].
|
egs/wsj/s5/utils/lang/make_lexicon_fst.py
|
write_nonterminal_arcs
|
Anusha-G-Rao/kaldi
|
python
|
def write_nonterminal_arcs(start_state, loop_state, next_state, nonterminals, left_context_phones):
'This function relates to the grammar-decoding setup, see\n kaldi-asr.org/doc/grammar.html. It is called from write_fst_no_silence\n and write_fst_silence, and writes to the stdout some extra arcs\n in the lexicon FST that relate to nonterminal symbols.\n See the section "Special symbols in L.fst,\n kaldi-asr.org/doc/grammar.html#grammar_special_l.\n start_state: the start-state of L.fst.\n loop_state: the state of high out-degree in L.fst where words leave\n and enter.\n next_state: the number from which this function can start allocating its\n own states. the updated value of next_state will be returned.\n nonterminals: the user-defined nonterminal symbols as a list of\n strings, e.g. [\'#nonterm:contact_list\', ... ].\n left_context_phones: a list of phones that may appear as left-context,\n e.g. [\'a\', \'ah\', ... \'#nonterm_bos\'].\n '
shared_state = next_state
next_state += 1
final_state = next_state
next_state += 1
print('{src}\t{dest}\t{phone}\t{word}\t{cost}'.format(src=start_state, dest=shared_state, phone='#nonterm_begin', word='#nonterm_begin', cost=0.0))
for nonterminal in nonterminals:
print('{src}\t{dest}\t{phone}\t{word}\t{cost}'.format(src=loop_state, dest=shared_state, phone=nonterminal, word=nonterminal, cost=0.0))
this_cost = (- math.log((1.0 / len(left_context_phones))))
for left_context_phone in left_context_phones:
print('{src}\t{dest}\t{phone}\t{word}\t{cost}'.format(src=shared_state, dest=loop_state, phone=left_context_phone, word='<eps>', cost=this_cost))
print('{src}\t{dest}\t{phone}\t{word}\t{cost}'.format(src=loop_state, dest=final_state, phone='#nonterm_end', word='#nonterm_end', cost=0.0))
print('{state}\t{final_cost}'.format(state=final_state, final_cost=0.0))
return next_state
|
def write_fst_no_silence(lexicon, nonterminals=None, left_context_phones=None):
"Writes the text format of L.fst to the standard output. This version is for\n when --sil-prob=0.0, meaning there is no optional silence allowed.\n\n 'lexicon' is a list of 3-tuples (word, pron-prob, prons) as returned by\n read_lexiconp().\n 'nonterminals', which relates to grammar decoding (see kaldi-asr.org/doc/grammar.html),\n is either None, or the user-defined nonterminal symbols as a list of\n strings, e.g. ['#nonterm:contact_list', ... ].\n 'left_context_phones', which also relates to grammar decoding, and must be\n supplied if 'nonterminals' is supplied is either None or a list of\n phones that may appear as left-context, e.g. ['a', 'ah', ... '#nonterm_bos'].\n "
loop_state = 0
next_state = 1
for (word, pronprob, pron) in lexicon:
cost = (- math.log(pronprob))
cur_state = loop_state
for i in range((len(pron) - 1)):
print('{src}\t{dest}\t{phone}\t{word}\t{cost}'.format(src=cur_state, dest=next_state, phone=pron[i], word=(word if (i == 0) else '<eps>'), cost=(cost if (i == 0) else 0.0)))
cur_state = next_state
next_state += 1
i = (len(pron) - 1)
print('{src}\t{dest}\t{phone}\t{word}\t{cost}'.format(src=cur_state, dest=loop_state, phone=(pron[i] if (i >= 0) else '<eps>'), word=(word if (i <= 0) else '<eps>'), cost=(cost if (i <= 0) else 0.0)))
if (nonterminals is not None):
next_state = write_nonterminal_arcs(start_state, loop_state, next_state, nonterminals, left_context_phones)
print('{state}\t{final_cost}'.format(state=loop_state, final_cost=0.0))
| -4,885,482,295,403,309,000
|
Writes the text format of L.fst to the standard output. This version is for
when --sil-prob=0.0, meaning there is no optional silence allowed.
'lexicon' is a list of 3-tuples (word, pron-prob, prons) as returned by
read_lexiconp().
'nonterminals', which relates to grammar decoding (see kaldi-asr.org/doc/grammar.html),
is either None, or the user-defined nonterminal symbols as a list of
strings, e.g. ['#nonterm:contact_list', ... ].
'left_context_phones', which also relates to grammar decoding, and must be
supplied if 'nonterminals' is supplied is either None or a list of
phones that may appear as left-context, e.g. ['a', 'ah', ... '#nonterm_bos'].
|
egs/wsj/s5/utils/lang/make_lexicon_fst.py
|
write_fst_no_silence
|
Anusha-G-Rao/kaldi
|
python
|
def write_fst_no_silence(lexicon, nonterminals=None, left_context_phones=None):
"Writes the text format of L.fst to the standard output. This version is for\n when --sil-prob=0.0, meaning there is no optional silence allowed.\n\n 'lexicon' is a list of 3-tuples (word, pron-prob, prons) as returned by\n read_lexiconp().\n 'nonterminals', which relates to grammar decoding (see kaldi-asr.org/doc/grammar.html),\n is either None, or the user-defined nonterminal symbols as a list of\n strings, e.g. ['#nonterm:contact_list', ... ].\n 'left_context_phones', which also relates to grammar decoding, and must be\n supplied if 'nonterminals' is supplied is either None or a list of\n phones that may appear as left-context, e.g. ['a', 'ah', ... '#nonterm_bos'].\n "
loop_state = 0
next_state = 1
for (word, pronprob, pron) in lexicon:
cost = (- math.log(pronprob))
cur_state = loop_state
for i in range((len(pron) - 1)):
print('{src}\t{dest}\t{phone}\t{word}\t{cost}'.format(src=cur_state, dest=next_state, phone=pron[i], word=(word if (i == 0) else '<eps>'), cost=(cost if (i == 0) else 0.0)))
cur_state = next_state
next_state += 1
i = (len(pron) - 1)
print('{src}\t{dest}\t{phone}\t{word}\t{cost}'.format(src=cur_state, dest=loop_state, phone=(pron[i] if (i >= 0) else '<eps>'), word=(word if (i <= 0) else '<eps>'), cost=(cost if (i <= 0) else 0.0)))
if (nonterminals is not None):
next_state = write_nonterminal_arcs(start_state, loop_state, next_state, nonterminals, left_context_phones)
print('{state}\t{final_cost}'.format(state=loop_state, final_cost=0.0))
|
def write_fst_with_silence(lexicon, sil_prob, sil_phone, sil_disambig, nonterminals=None, left_context_phones=None):
'Writes the text format of L.fst to the standard output. This version is for\n when --sil-prob != 0.0, meaning there is optional silence\n \'lexicon\' is a list of 3-tuples (word, pron-prob, prons)\n as returned by read_lexiconp().\n \'sil_prob\', which is expected to be strictly between 0.. and 1.0, is the\n probability of silence\n \'sil_phone\' is the silence phone, e.g. "SIL".\n \'sil_disambig\' is either None, or the silence disambiguation symbol, e.g. "#5".\n \'nonterminals\', which relates to grammar decoding (see kaldi-asr.org/doc/grammar.html),\n is either None, or the user-defined nonterminal symbols as a list of\n strings, e.g. [\'#nonterm:contact_list\', ... ].\n \'left_context_phones\', which also relates to grammar decoding, and must be\n supplied if \'nonterminals\' is supplied is either None or a list of\n phones that may appear as left-context, e.g. [\'a\', \'ah\', ... \'#nonterm_bos\'].\n '
assert ((sil_prob > 0.0) and (sil_prob < 1.0))
sil_cost = (- math.log(sil_prob))
no_sil_cost = (- math.log((1.0 - sil_prob)))
start_state = 0
loop_state = 1
sil_state = 2
next_state = 3
print('{src}\t{dest}\t{phone}\t{word}\t{cost}'.format(src=start_state, dest=loop_state, phone='<eps>', word='<eps>', cost=no_sil_cost))
print('{src}\t{dest}\t{phone}\t{word}\t{cost}'.format(src=start_state, dest=sil_state, phone='<eps>', word='<eps>', cost=sil_cost))
if (sil_disambig is None):
print('{src}\t{dest}\t{phone}\t{word}\t{cost}'.format(src=sil_state, dest=loop_state, phone=sil_phone, word='<eps>', cost=0.0))
else:
sil_disambig_state = next_state
next_state += 1
print('{src}\t{dest}\t{phone}\t{word}\t{cost}'.format(src=sil_state, dest=sil_disambig_state, phone=sil_phone, word='<eps>', cost=0.0))
print('{src}\t{dest}\t{phone}\t{word}\t{cost}'.format(src=sil_disambig_state, dest=loop_state, phone=sil_disambig, word='<eps>', cost=0.0))
for (word, pronprob, pron) in lexicon:
pron_cost = (- math.log(pronprob))
cur_state = loop_state
for i in range((len(pron) - 1)):
print('{src}\t{dest}\t{phone}\t{word}\t{cost}'.format(src=cur_state, dest=next_state, phone=pron[i], word=(word if (i == 0) else '<eps>'), cost=(pron_cost if (i == 0) else 0.0)))
cur_state = next_state
next_state += 1
i = (len(pron) - 1)
print('{src}\t{dest}\t{phone}\t{word}\t{cost}'.format(src=cur_state, dest=loop_state, phone=(pron[i] if (i >= 0) else '<eps>'), word=(word if (i <= 0) else '<eps>'), cost=(no_sil_cost + (pron_cost if (i <= 0) else 0.0))))
print('{src}\t{dest}\t{phone}\t{word}\t{cost}'.format(src=cur_state, dest=sil_state, phone=(pron[i] if (i >= 0) else '<eps>'), word=(word if (i <= 0) else '<eps>'), cost=(sil_cost + (pron_cost if (i <= 0) else 0.0))))
if (nonterminals is not None):
next_state = write_nonterminal_arcs(start_state, loop_state, next_state, nonterminals, left_context_phones)
print('{state}\t{final_cost}'.format(state=loop_state, final_cost=0.0))
| 1,575,721,830,482,665,700
|
Writes the text format of L.fst to the standard output. This version is for
when --sil-prob != 0.0, meaning there is optional silence
'lexicon' is a list of 3-tuples (word, pron-prob, prons)
as returned by read_lexiconp().
'sil_prob', which is expected to be strictly between 0.. and 1.0, is the
probability of silence
'sil_phone' is the silence phone, e.g. "SIL".
'sil_disambig' is either None, or the silence disambiguation symbol, e.g. "#5".
'nonterminals', which relates to grammar decoding (see kaldi-asr.org/doc/grammar.html),
is either None, or the user-defined nonterminal symbols as a list of
strings, e.g. ['#nonterm:contact_list', ... ].
'left_context_phones', which also relates to grammar decoding, and must be
supplied if 'nonterminals' is supplied is either None or a list of
phones that may appear as left-context, e.g. ['a', 'ah', ... '#nonterm_bos'].
|
egs/wsj/s5/utils/lang/make_lexicon_fst.py
|
write_fst_with_silence
|
Anusha-G-Rao/kaldi
|
python
|
def write_fst_with_silence(lexicon, sil_prob, sil_phone, sil_disambig, nonterminals=None, left_context_phones=None):
'Writes the text format of L.fst to the standard output. This version is for\n when --sil-prob != 0.0, meaning there is optional silence\n \'lexicon\' is a list of 3-tuples (word, pron-prob, prons)\n as returned by read_lexiconp().\n \'sil_prob\', which is expected to be strictly between 0.. and 1.0, is the\n probability of silence\n \'sil_phone\' is the silence phone, e.g. "SIL".\n \'sil_disambig\' is either None, or the silence disambiguation symbol, e.g. "#5".\n \'nonterminals\', which relates to grammar decoding (see kaldi-asr.org/doc/grammar.html),\n is either None, or the user-defined nonterminal symbols as a list of\n strings, e.g. [\'#nonterm:contact_list\', ... ].\n \'left_context_phones\', which also relates to grammar decoding, and must be\n supplied if \'nonterminals\' is supplied is either None or a list of\n phones that may appear as left-context, e.g. [\'a\', \'ah\', ... \'#nonterm_bos\'].\n '
assert ((sil_prob > 0.0) and (sil_prob < 1.0))
sil_cost = (- math.log(sil_prob))
no_sil_cost = (- math.log((1.0 - sil_prob)))
start_state = 0
loop_state = 1
sil_state = 2
next_state = 3
print('{src}\t{dest}\t{phone}\t{word}\t{cost}'.format(src=start_state, dest=loop_state, phone='<eps>', word='<eps>', cost=no_sil_cost))
print('{src}\t{dest}\t{phone}\t{word}\t{cost}'.format(src=start_state, dest=sil_state, phone='<eps>', word='<eps>', cost=sil_cost))
if (sil_disambig is None):
print('{src}\t{dest}\t{phone}\t{word}\t{cost}'.format(src=sil_state, dest=loop_state, phone=sil_phone, word='<eps>', cost=0.0))
else:
sil_disambig_state = next_state
next_state += 1
print('{src}\t{dest}\t{phone}\t{word}\t{cost}'.format(src=sil_state, dest=sil_disambig_state, phone=sil_phone, word='<eps>', cost=0.0))
print('{src}\t{dest}\t{phone}\t{word}\t{cost}'.format(src=sil_disambig_state, dest=loop_state, phone=sil_disambig, word='<eps>', cost=0.0))
for (word, pronprob, pron) in lexicon:
pron_cost = (- math.log(pronprob))
cur_state = loop_state
for i in range((len(pron) - 1)):
print('{src}\t{dest}\t{phone}\t{word}\t{cost}'.format(src=cur_state, dest=next_state, phone=pron[i], word=(word if (i == 0) else '<eps>'), cost=(pron_cost if (i == 0) else 0.0)))
cur_state = next_state
next_state += 1
i = (len(pron) - 1)
print('{src}\t{dest}\t{phone}\t{word}\t{cost}'.format(src=cur_state, dest=loop_state, phone=(pron[i] if (i >= 0) else '<eps>'), word=(word if (i <= 0) else '<eps>'), cost=(no_sil_cost + (pron_cost if (i <= 0) else 0.0))))
print('{src}\t{dest}\t{phone}\t{word}\t{cost}'.format(src=cur_state, dest=sil_state, phone=(pron[i] if (i >= 0) else '<eps>'), word=(word if (i <= 0) else '<eps>'), cost=(sil_cost + (pron_cost if (i <= 0) else 0.0))))
if (nonterminals is not None):
next_state = write_nonterminal_arcs(start_state, loop_state, next_state, nonterminals, left_context_phones)
print('{state}\t{final_cost}'.format(state=loop_state, final_cost=0.0))
|
def write_words_txt(orig_lines, highest_numbered_symbol, nonterminals, filename):
"Writes updated words.txt to 'filename'. 'orig_lines' is the original lines\n in the words.txt file as a list of strings (without the newlines);\n highest_numbered_symbol is the highest numbered symbol in the original\n words.txt; nonterminals is a list of strings like '#nonterm:foo'."
with open(filename, 'w', encoding='latin-1') as f:
for l in orig_lines:
print(l, file=f)
cur_symbol = (highest_numbered_symbol + 1)
for n in (['#nonterm_begin', '#nonterm_end'] + nonterminals):
print('{0} {1}'.format(n, cur_symbol), file=f)
cur_symbol = (cur_symbol + 1)
| 4,574,954,868,212,090,000
|
Writes updated words.txt to 'filename'. 'orig_lines' is the original lines
in the words.txt file as a list of strings (without the newlines);
highest_numbered_symbol is the highest numbered symbol in the original
words.txt; nonterminals is a list of strings like '#nonterm:foo'.
|
egs/wsj/s5/utils/lang/make_lexicon_fst.py
|
write_words_txt
|
Anusha-G-Rao/kaldi
|
python
|
def write_words_txt(orig_lines, highest_numbered_symbol, nonterminals, filename):
"Writes updated words.txt to 'filename'. 'orig_lines' is the original lines\n in the words.txt file as a list of strings (without the newlines);\n highest_numbered_symbol is the highest numbered symbol in the original\n words.txt; nonterminals is a list of strings like '#nonterm:foo'."
with open(filename, 'w', encoding='latin-1') as f:
for l in orig_lines:
print(l, file=f)
cur_symbol = (highest_numbered_symbol + 1)
for n in (['#nonterm_begin', '#nonterm_end'] + nonterminals):
print('{0} {1}'.format(n, cur_symbol), file=f)
cur_symbol = (cur_symbol + 1)
|
def read_nonterminals(filename):
"Reads the user-defined nonterminal symbols in 'filename', checks that\n it has the expected format and has no duplicates, and returns the nonterminal\n symbols as a list of strings, e.g.\n ['#nonterm:contact_list', '#nonterm:phone_number', ... ]. "
ans = [line.strip(' \t\r\n') for line in open(filename, 'r', encoding='latin-1')]
if (len(ans) == 0):
raise RuntimeError('The file {0} contains no nonterminals symbols.'.format(filename))
for nonterm in ans:
if (nonterm[:9] != '#nonterm:'):
raise RuntimeError("In file '{0}', expected nonterminal symbols to start with '#nonterm:', found '{1}'".format(filename, nonterm))
if (len(set(ans)) != len(ans)):
raise RuntimeError('Duplicate nonterminal symbols are present in file {0}'.format(filename))
return ans
| -2,603,101,064,308,610,600
|
Reads the user-defined nonterminal symbols in 'filename', checks that
it has the expected format and has no duplicates, and returns the nonterminal
symbols as a list of strings, e.g.
['#nonterm:contact_list', '#nonterm:phone_number', ... ].
|
egs/wsj/s5/utils/lang/make_lexicon_fst.py
|
read_nonterminals
|
Anusha-G-Rao/kaldi
|
python
|
def read_nonterminals(filename):
"Reads the user-defined nonterminal symbols in 'filename', checks that\n it has the expected format and has no duplicates, and returns the nonterminal\n symbols as a list of strings, e.g.\n ['#nonterm:contact_list', '#nonterm:phone_number', ... ]. "
ans = [line.strip(' \t\r\n') for line in open(filename, 'r', encoding='latin-1')]
if (len(ans) == 0):
raise RuntimeError('The file {0} contains no nonterminals symbols.'.format(filename))
for nonterm in ans:
if (nonterm[:9] != '#nonterm:'):
raise RuntimeError("In file '{0}', expected nonterminal symbols to start with '#nonterm:', found '{1}'".format(filename, nonterm))
if (len(set(ans)) != len(ans)):
raise RuntimeError('Duplicate nonterminal symbols are present in file {0}'.format(filename))
return ans
|
def read_left_context_phones(filename):
"Reads, checks, and returns a list of left-context phones, in text form, one\n per line. Returns a list of strings, e.g. ['a', 'ah', ..., '#nonterm_bos' ]"
ans = [line.strip(' \t\r\n') for line in open(filename, 'r', encoding='latin-1')]
if (len(ans) == 0):
raise RuntimeError('The file {0} contains no left-context phones.'.format(filename))
whitespace = re.compile('[ \t]+')
for s in ans:
if (len(whitespace.split(s)) != 1):
raise RuntimeError("The file {0} contains an invalid line '{1}'".format(filename, s))
if (len(set(ans)) != len(ans)):
raise RuntimeError('Duplicate nonterminal symbols are present in file {0}'.format(filename))
return ans
| -7,915,289,802,565,721,000
|
Reads, checks, and returns a list of left-context phones, in text form, one
per line. Returns a list of strings, e.g. ['a', 'ah', ..., '#nonterm_bos' ]
|
egs/wsj/s5/utils/lang/make_lexicon_fst.py
|
read_left_context_phones
|
Anusha-G-Rao/kaldi
|
python
|
def read_left_context_phones(filename):
"Reads, checks, and returns a list of left-context phones, in text form, one\n per line. Returns a list of strings, e.g. ['a', 'ah', ..., '#nonterm_bos' ]"
ans = [line.strip(' \t\r\n') for line in open(filename, 'r', encoding='latin-1')]
if (len(ans) == 0):
raise RuntimeError('The file {0} contains no left-context phones.'.format(filename))
whitespace = re.compile('[ \t]+')
for s in ans:
if (len(whitespace.split(s)) != 1):
raise RuntimeError("The file {0} contains an invalid line '{1}'".format(filename, s))
if (len(set(ans)) != len(ans)):
raise RuntimeError('Duplicate nonterminal symbols are present in file {0}'.format(filename))
return ans
|
def is_token(s):
'Returns true if s is a string and is space-free.'
if (not isinstance(s, str)):
return False
whitespace = re.compile('[ \t\r\n]+')
split_str = whitespace.split(s)
return ((len(split_str) == 1) and (s == split_str[0]))
| 5,402,618,036,967,467,000
|
Returns true if s is a string and is space-free.
|
egs/wsj/s5/utils/lang/make_lexicon_fst.py
|
is_token
|
Anusha-G-Rao/kaldi
|
python
|
def is_token(s):
if (not isinstance(s, str)):
return False
whitespace = re.compile('[ \t\r\n]+')
split_str = whitespace.split(s)
return ((len(split_str) == 1) and (s == split_str[0]))
|
def inspect_app(app: App) -> 'AppInfo':
'Inspects an application.\n\n Args:\n app (falcon.App): The application to inspect. Works with both\n :class:`falcon.App` and :class:`falcon.asgi.App`.\n\n Returns:\n AppInfo: The information regarding the application. Call\n :meth:`~.AppInfo.to_string` on the result to obtain a human-friendly\n representation.\n '
routes = inspect_routes(app)
static = inspect_static_routes(app)
sinks = inspect_sinks(app)
error_handlers = inspect_error_handlers(app)
middleware = inspect_middlewares(app)
return AppInfo(routes, middleware, static, sinks, error_handlers, app._ASGI)
| 4,346,341,443,215,193,600
|
Inspects an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
AppInfo: The information regarding the application. Call
:meth:`~.AppInfo.to_string` on the result to obtain a human-friendly
representation.
|
falcon/inspect.py
|
inspect_app
|
hzdwang/falcon-1
|
python
|
def inspect_app(app: App) -> 'AppInfo':
'Inspects an application.\n\n Args:\n app (falcon.App): The application to inspect. Works with both\n :class:`falcon.App` and :class:`falcon.asgi.App`.\n\n Returns:\n AppInfo: The information regarding the application. Call\n :meth:`~.AppInfo.to_string` on the result to obtain a human-friendly\n representation.\n '
routes = inspect_routes(app)
static = inspect_static_routes(app)
sinks = inspect_sinks(app)
error_handlers = inspect_error_handlers(app)
middleware = inspect_middlewares(app)
return AppInfo(routes, middleware, static, sinks, error_handlers, app._ASGI)
|
def inspect_routes(app: App) -> 'List[RouteInfo]':
'Inspects the routes of an application.\n\n Args:\n app (falcon.App): The application to inspect. Works with both\n :class:`falcon.App` and :class:`falcon.asgi.App`.\n\n Returns:\n List[RouteInfo]: A list of route descriptions for the application.\n '
router = app._router
inspect_function = _supported_routers.get(type(router))
if (inspect_function is None):
raise TypeError('Unsupported router class {}. Use "register_router" to register a function that can inspect the router used by the provided application'.format(type(router)))
return inspect_function(router)
| 9,208,210,767,539,110,000
|
Inspects the routes of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
List[RouteInfo]: A list of route descriptions for the application.
|
falcon/inspect.py
|
inspect_routes
|
hzdwang/falcon-1
|
python
|
def inspect_routes(app: App) -> 'List[RouteInfo]':
'Inspects the routes of an application.\n\n Args:\n app (falcon.App): The application to inspect. Works with both\n :class:`falcon.App` and :class:`falcon.asgi.App`.\n\n Returns:\n List[RouteInfo]: A list of route descriptions for the application.\n '
router = app._router
inspect_function = _supported_routers.get(type(router))
if (inspect_function is None):
raise TypeError('Unsupported router class {}. Use "register_router" to register a function that can inspect the router used by the provided application'.format(type(router)))
return inspect_function(router)
|
def register_router(router_class):
"Register a function to inspect a particular router.\n\n This decorator registers a new function for a custom router\n class, so that it can be inspected with the function\n :func:`.inspect_routes`.\n An inspection function takes the router instance used by the\n application and returns a list of :class:`.RouteInfo`. Eg::\n\n @register_router(MyRouterClass)\n def inspect_my_router(router):\n return [RouteInfo('foo', 'bar', '/path/to/foo.py:42', [])]\n\n Args:\n router_class (Type): The router class to register. If\n already registered an error will be raised.\n "
def wraps(fn):
if (router_class in _supported_routers):
raise ValueError('Another function is already registered for the router {}'.format(router_class))
_supported_routers[router_class] = fn
return fn
return wraps
| 5,343,638,293,906,582,000
|
Register a function to inspect a particular router.
This decorator registers a new function for a custom router
class, so that it can be inspected with the function
:func:`.inspect_routes`.
An inspection function takes the router instance used by the
application and returns a list of :class:`.RouteInfo`. Eg::
@register_router(MyRouterClass)
def inspect_my_router(router):
return [RouteInfo('foo', 'bar', '/path/to/foo.py:42', [])]
Args:
router_class (Type): The router class to register. If
already registered an error will be raised.
|
falcon/inspect.py
|
register_router
|
hzdwang/falcon-1
|
python
|
def register_router(router_class):
"Register a function to inspect a particular router.\n\n This decorator registers a new function for a custom router\n class, so that it can be inspected with the function\n :func:`.inspect_routes`.\n An inspection function takes the router instance used by the\n application and returns a list of :class:`.RouteInfo`. Eg::\n\n @register_router(MyRouterClass)\n def inspect_my_router(router):\n return [RouteInfo('foo', 'bar', '/path/to/foo.py:42', [])]\n\n Args:\n router_class (Type): The router class to register. If\n already registered an error will be raised.\n "
def wraps(fn):
if (router_class in _supported_routers):
raise ValueError('Another function is already registered for the router {}'.format(router_class))
_supported_routers[router_class] = fn
return fn
return wraps
|
def inspect_static_routes(app: App) -> 'List[StaticRouteInfo]':
'Inspects the static routes of an application.\n\n Args:\n app (falcon.App): The application to inspect. Works with both\n :class:`falcon.App` and :class:`falcon.asgi.App`.\n\n Returns:\n List[StaticRouteInfo]: A list of static routes that have\n been added to the application.\n '
routes = []
for (sr, _, _) in app._static_routes:
info = StaticRouteInfo(sr._prefix, sr._directory, sr._fallback_filename)
routes.append(info)
return routes
| -8,759,942,310,408,179,000
|
Inspects the static routes of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
List[StaticRouteInfo]: A list of static routes that have
been added to the application.
|
falcon/inspect.py
|
inspect_static_routes
|
hzdwang/falcon-1
|
python
|
def inspect_static_routes(app: App) -> 'List[StaticRouteInfo]':
'Inspects the static routes of an application.\n\n Args:\n app (falcon.App): The application to inspect. Works with both\n :class:`falcon.App` and :class:`falcon.asgi.App`.\n\n Returns:\n List[StaticRouteInfo]: A list of static routes that have\n been added to the application.\n '
routes = []
for (sr, _, _) in app._static_routes:
info = StaticRouteInfo(sr._prefix, sr._directory, sr._fallback_filename)
routes.append(info)
return routes
|
def inspect_sinks(app: App) -> 'List[SinkInfo]':
'Inspects the sinks of an application.\n\n Args:\n app (falcon.App): The application to inspect. Works with both\n :class:`falcon.App` and :class:`falcon.asgi.App`.\n\n Returns:\n List[SinkInfo]: A list of sinks used by the application.\n '
sinks = []
for (prefix, sink, _) in app._sinks:
(source_info, name) = _get_source_info_and_name(sink)
info = SinkInfo(prefix.pattern, name, source_info)
sinks.append(info)
return sinks
| 3,921,220,580,018,919,400
|
Inspects the sinks of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
List[SinkInfo]: A list of sinks used by the application.
|
falcon/inspect.py
|
inspect_sinks
|
hzdwang/falcon-1
|
python
|
def inspect_sinks(app: App) -> 'List[SinkInfo]':
'Inspects the sinks of an application.\n\n Args:\n app (falcon.App): The application to inspect. Works with both\n :class:`falcon.App` and :class:`falcon.asgi.App`.\n\n Returns:\n List[SinkInfo]: A list of sinks used by the application.\n '
sinks = []
for (prefix, sink, _) in app._sinks:
(source_info, name) = _get_source_info_and_name(sink)
info = SinkInfo(prefix.pattern, name, source_info)
sinks.append(info)
return sinks
|
def inspect_error_handlers(app: App) -> 'List[ErrorHandlerInfo]':
'Inspects the error handlers of an application.\n\n Args:\n app (falcon.App): The application to inspect. Works with both\n :class:`falcon.App` and :class:`falcon.asgi.App`.\n\n Returns:\n List[ErrorHandlerInfo]: A list of error handlers used by the\n application.\n '
errors = []
for (exc, fn) in app._error_handlers.items():
(source_info, name) = _get_source_info_and_name(fn)
info = ErrorHandlerInfo(exc.__name__, name, source_info, _is_internal(fn))
errors.append(info)
return errors
| -8,406,878,317,633,376,000
|
Inspects the error handlers of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
List[ErrorHandlerInfo]: A list of error handlers used by the
application.
|
falcon/inspect.py
|
inspect_error_handlers
|
hzdwang/falcon-1
|
python
|
def inspect_error_handlers(app: App) -> 'List[ErrorHandlerInfo]':
'Inspects the error handlers of an application.\n\n Args:\n app (falcon.App): The application to inspect. Works with both\n :class:`falcon.App` and :class:`falcon.asgi.App`.\n\n Returns:\n List[ErrorHandlerInfo]: A list of error handlers used by the\n application.\n '
errors = []
for (exc, fn) in app._error_handlers.items():
(source_info, name) = _get_source_info_and_name(fn)
info = ErrorHandlerInfo(exc.__name__, name, source_info, _is_internal(fn))
errors.append(info)
return errors
|
def inspect_middlewares(app: App) -> 'MiddlewareInfo':
"Inspects the middleware components of an application.\n\n Args:\n app (falcon.App): The application to inspect. Works with both\n :class:`falcon.App` and :class:`falcon.asgi.App`.\n\n Returns:\n MiddlewareInfo: Information about the app's middleware components.\n "
types_ = app_helpers.prepare_middleware(app._unprepared_middleware, True, app._ASGI)
type_infos = []
for stack in types_:
current = []
for method in stack:
(_, name) = _get_source_info_and_name(method)
cls = type(method.__self__)
(_, cls_name) = _get_source_info_and_name(cls)
current.append(MiddlewareTreeItemInfo(name, cls_name))
type_infos.append(current)
middlewareTree = MiddlewareTreeInfo(*type_infos)
middlewareClasses = []
names = ('Process request', 'Process resource', 'Process response')
for m in app._unprepared_middleware:
fns = app_helpers.prepare_middleware([m], True, app._ASGI)
(class_source_info, cls_name) = _get_source_info_and_name(type(m))
methods = []
for (method, name) in zip(fns, names):
if method:
real_func = method[0]
source_info = _get_source_info(real_func)
methods.append(MiddlewareMethodInfo(real_func.__name__, source_info))
m_info = MiddlewareClassInfo(cls_name, class_source_info, methods)
middlewareClasses.append(m_info)
return MiddlewareInfo(middlewareTree, middlewareClasses, app._independent_middleware)
| 1,525,322,864,409,902,300
|
Inspects the middleware components of an application.
Args:
app (falcon.App): The application to inspect. Works with both
:class:`falcon.App` and :class:`falcon.asgi.App`.
Returns:
MiddlewareInfo: Information about the app's middleware components.
|
falcon/inspect.py
|
inspect_middlewares
|
hzdwang/falcon-1
|
python
|
def inspect_middlewares(app: App) -> 'MiddlewareInfo':
"Inspects the middleware components of an application.\n\n Args:\n app (falcon.App): The application to inspect. Works with both\n :class:`falcon.App` and :class:`falcon.asgi.App`.\n\n Returns:\n MiddlewareInfo: Information about the app's middleware components.\n "
types_ = app_helpers.prepare_middleware(app._unprepared_middleware, True, app._ASGI)
type_infos = []
for stack in types_:
current = []
for method in stack:
(_, name) = _get_source_info_and_name(method)
cls = type(method.__self__)
(_, cls_name) = _get_source_info_and_name(cls)
current.append(MiddlewareTreeItemInfo(name, cls_name))
type_infos.append(current)
middlewareTree = MiddlewareTreeInfo(*type_infos)
middlewareClasses = []
names = ('Process request', 'Process resource', 'Process response')
for m in app._unprepared_middleware:
fns = app_helpers.prepare_middleware([m], True, app._ASGI)
(class_source_info, cls_name) = _get_source_info_and_name(type(m))
methods = []
for (method, name) in zip(fns, names):
if method:
real_func = method[0]
source_info = _get_source_info(real_func)
methods.append(MiddlewareMethodInfo(real_func.__name__, source_info))
m_info = MiddlewareClassInfo(cls_name, class_source_info, methods)
middlewareClasses.append(m_info)
return MiddlewareInfo(middlewareTree, middlewareClasses, app._independent_middleware)
|
@register_router(CompiledRouter)
def inspect_compiled_router(router: CompiledRouter) -> 'List[RouteInfo]':
'Walk an instance of :class:`~.CompiledRouter` to return a list of defined routes.\n\n Default route inspector for CompiledRouter.\n\n Args:\n router (CompiledRouter): The router to inspect.\n\n Returns:\n List[RouteInfo]: A list of :class:`~.RouteInfo`.\n '
def _traverse(roots, parent):
for root in roots:
path = ((parent + '/') + root.raw_segment)
if (root.resource is not None):
methods = []
if root.method_map:
for (method, func) in root.method_map.items():
if isinstance(func, partial):
real_func = func.func
else:
real_func = func
source_info = _get_source_info(real_func)
internal = _is_internal(real_func)
method_info = RouteMethodInfo(method, source_info, real_func.__name__, internal)
methods.append(method_info)
(source_info, class_name) = _get_source_info_and_name(root.resource)
route_info = RouteInfo(path, class_name, source_info, methods)
routes.append(route_info)
if root.children:
_traverse(root.children, path)
routes = []
_traverse(router._roots, '')
return routes
| 2,829,765,905,918,934,000
|
Walk an instance of :class:`~.CompiledRouter` to return a list of defined routes.
Default route inspector for CompiledRouter.
Args:
router (CompiledRouter): The router to inspect.
Returns:
List[RouteInfo]: A list of :class:`~.RouteInfo`.
|
falcon/inspect.py
|
inspect_compiled_router
|
hzdwang/falcon-1
|
python
|
@register_router(CompiledRouter)
def inspect_compiled_router(router: CompiledRouter) -> 'List[RouteInfo]':
'Walk an instance of :class:`~.CompiledRouter` to return a list of defined routes.\n\n Default route inspector for CompiledRouter.\n\n Args:\n router (CompiledRouter): The router to inspect.\n\n Returns:\n List[RouteInfo]: A list of :class:`~.RouteInfo`.\n '
def _traverse(roots, parent):
for root in roots:
path = ((parent + '/') + root.raw_segment)
if (root.resource is not None):
methods = []
if root.method_map:
for (method, func) in root.method_map.items():
if isinstance(func, partial):
real_func = func.func
else:
real_func = func
source_info = _get_source_info(real_func)
internal = _is_internal(real_func)
method_info = RouteMethodInfo(method, source_info, real_func.__name__, internal)
methods.append(method_info)
(source_info, class_name) = _get_source_info_and_name(root.resource)
route_info = RouteInfo(path, class_name, source_info, methods)
routes.append(route_info)
if root.children:
_traverse(root.children, path)
routes = []
_traverse(router._roots, )
return routes
|
def _get_source_info(obj, default='[unknown file]'):
'Try to get the definition file and line of obj.\n\n Return default on error.\n '
try:
source_file = inspect.getsourcefile(obj)
source_lines = inspect.findsource(obj)
source_info = '{}:{}'.format(source_file, source_lines[1])
except Exception:
source_info = default
return source_info
| -391,650,700,617,766,000
|
Try to get the definition file and line of obj.
Return default on error.
|
falcon/inspect.py
|
_get_source_info
|
hzdwang/falcon-1
|
python
|
def _get_source_info(obj, default='[unknown file]'):
'Try to get the definition file and line of obj.\n\n Return default on error.\n '
try:
source_file = inspect.getsourcefile(obj)
source_lines = inspect.findsource(obj)
source_info = '{}:{}'.format(source_file, source_lines[1])
except Exception:
source_info = default
return source_info
|
def _get_source_info_and_name(obj):
'Attempt to get the definition file and line of obj and its name.'
source_info = _get_source_info(obj, None)
if (source_info is None):
source_info = _get_source_info(type(obj))
name = getattr(obj, '__name__', None)
if (name is None):
name = getattr(type(obj), '__name__', '[unknown]')
return (source_info, name)
| -3,760,626,473,023,653,000
|
Attempt to get the definition file and line of obj and its name.
|
falcon/inspect.py
|
_get_source_info_and_name
|
hzdwang/falcon-1
|
python
|
def _get_source_info_and_name(obj):
source_info = _get_source_info(obj, None)
if (source_info is None):
source_info = _get_source_info(type(obj))
name = getattr(obj, '__name__', None)
if (name is None):
name = getattr(type(obj), '__name__', '[unknown]')
return (source_info, name)
|
def _is_internal(obj):
'Check if the module of the object is a falcon module.'
module = inspect.getmodule(obj)
if module:
return module.__name__.startswith('falcon.')
return False
| -9,039,669,792,405,178,000
|
Check if the module of the object is a falcon module.
|
falcon/inspect.py
|
_is_internal
|
hzdwang/falcon-1
|
python
|
def _is_internal(obj):
module = inspect.getmodule(obj)
if module:
return module.__name__.startswith('falcon.')
return False
|
def _filter_internal(iterable, return_internal):
'Filter the internal elements of an iterable.'
if return_internal:
return iterable
return [el for el in iterable if (not el.internal)]
| 8,104,579,376,676,779,000
|
Filter the internal elements of an iterable.
|
falcon/inspect.py
|
_filter_internal
|
hzdwang/falcon-1
|
python
|
def _filter_internal(iterable, return_internal):
if return_internal:
return iterable
return [el for el in iterable if (not el.internal)]
|
def to_string(self, verbose=False, internal=False) -> str:
'Return a string representation of this class.\n\n Args:\n verbose (bool, optional): Adds more information. Defaults to False.\n internal (bool, optional): Also include internal route methods\n and error handlers added by the framework. Defaults to\n ``False``.\n\n Returns:\n str: string representation of this class.\n '
return StringVisitor(verbose, internal).process(self)
| 357,918,309,221,823,740
|
Return a string representation of this class.
Args:
verbose (bool, optional): Adds more information. Defaults to False.
internal (bool, optional): Also include internal route methods
and error handlers added by the framework. Defaults to
``False``.
Returns:
str: string representation of this class.
|
falcon/inspect.py
|
to_string
|
hzdwang/falcon-1
|
python
|
def to_string(self, verbose=False, internal=False) -> str:
'Return a string representation of this class.\n\n Args:\n verbose (bool, optional): Adds more information. Defaults to False.\n internal (bool, optional): Also include internal route methods\n and error handlers added by the framework. Defaults to\n ``False``.\n\n Returns:\n str: string representation of this class.\n '
return StringVisitor(verbose, internal).process(self)
|
def to_string(self, verbose=False, internal=False, name='') -> str:
"Return a string representation of this class.\n\n Args:\n verbose (bool, optional): Adds more information. Defaults to False.\n internal (bool, optional): Also include internal falcon route methods\n and error handlers. Defaults to ``False``.\n name (str, optional): The name of the application, to be output at the\n beginning of the text. Defaults to ``'Falcon App'``.\n Returns:\n str: A string representation of the application.\n "
return StringVisitor(verbose, internal, name).process(self)
| 3,752,347,903,184,052,700
|
Return a string representation of this class.
Args:
verbose (bool, optional): Adds more information. Defaults to False.
internal (bool, optional): Also include internal falcon route methods
and error handlers. Defaults to ``False``.
name (str, optional): The name of the application, to be output at the
beginning of the text. Defaults to ``'Falcon App'``.
Returns:
str: A string representation of the application.
|
falcon/inspect.py
|
to_string
|
hzdwang/falcon-1
|
python
|
def to_string(self, verbose=False, internal=False, name=) -> str:
"Return a string representation of this class.\n\n Args:\n verbose (bool, optional): Adds more information. Defaults to False.\n internal (bool, optional): Also include internal falcon route methods\n and error handlers. Defaults to ``False``.\n name (str, optional): The name of the application, to be output at the\n beginning of the text. Defaults to ``'Falcon App'``.\n Returns:\n str: A string representation of the application.\n "
return StringVisitor(verbose, internal, name).process(self)
|
def process(self, instance: _Traversable):
'Process the instance, by calling the appropriate visit method.\n\n Uses the `__visit_name__` attribute of the `instance` to obtain the method to use.\n\n Args:\n instance (_Traversable): The instance to process.\n '
try:
return getattr(self, 'visit_{}'.format(instance.__visit_name__))(instance)
except AttributeError as e:
raise RuntimeError('This visitor does not support {}'.format(type(instance))) from e
| 4,237,623,031,210,916,400
|
Process the instance, by calling the appropriate visit method.
Uses the `__visit_name__` attribute of the `instance` to obtain the method to use.
Args:
instance (_Traversable): The instance to process.
|
falcon/inspect.py
|
process
|
hzdwang/falcon-1
|
python
|
def process(self, instance: _Traversable):
'Process the instance, by calling the appropriate visit method.\n\n Uses the `__visit_name__` attribute of the `instance` to obtain the method to use.\n\n Args:\n instance (_Traversable): The instance to process.\n '
try:
return getattr(self, 'visit_{}'.format(instance.__visit_name__))(instance)
except AttributeError as e:
raise RuntimeError('This visitor does not support {}'.format(type(instance))) from e
|
@property
def tab(self):
'Get the current tabulation.'
return (' ' * self.indent)
| -6,399,728,926,710,062,000
|
Get the current tabulation.
|
falcon/inspect.py
|
tab
|
hzdwang/falcon-1
|
python
|
@property
def tab(self):
return (' ' * self.indent)
|
def visit_route_method(self, route_method: RouteMethodInfo) -> str:
'Visit a RouteMethodInfo instance. Usually called by `process`.'
text = '{0.method} - {0.function_name}'.format(route_method)
if self.verbose:
text += ' ({0.source_info})'.format(route_method)
return text
| 7,306,008,936,031,749,000
|
Visit a RouteMethodInfo instance. Usually called by `process`.
|
falcon/inspect.py
|
visit_route_method
|
hzdwang/falcon-1
|
python
|
def visit_route_method(self, route_method: RouteMethodInfo) -> str:
text = '{0.method} - {0.function_name}'.format(route_method)
if self.verbose:
text += ' ({0.source_info})'.format(route_method)
return text
|
def _methods_to_string(self, methods: List):
'Return a string from the list of methods.'
tab = (self.tab + (' ' * 3))
methods = _filter_internal(methods, self.internal)
if (not methods):
return ''
text_list = [self.process(m) for m in methods]
method_text = ['{}βββ {}'.format(tab, m) for m in text_list[:(- 1)]]
method_text += ['{}βββ {}'.format(tab, m) for m in text_list[(- 1):]]
return '\n'.join(method_text)
| -4,648,375,267,960,320,000
|
Return a string from the list of methods.
|
falcon/inspect.py
|
_methods_to_string
|
hzdwang/falcon-1
|
python
|
def _methods_to_string(self, methods: List):
tab = (self.tab + (' ' * 3))
methods = _filter_internal(methods, self.internal)
if (not methods):
return
text_list = [self.process(m) for m in methods]
method_text = ['{}βββ {}'.format(tab, m) for m in text_list[:(- 1)]]
method_text += ['{}βββ {}'.format(tab, m) for m in text_list[(- 1):]]
return '\n'.join(method_text)
|
def visit_route(self, route: RouteInfo) -> str:
'Visit a RouteInfo instance. Usually called by `process`.'
text = '{0}β {1.path} - {1.class_name}'.format(self.tab, route)
if self.verbose:
text += ' ({0.source_info})'.format(route)
method_text = self._methods_to_string(route.methods)
if (not method_text):
return text
return '{}:\n{}'.format(text, method_text)
| 2,163,025,547,150,710,300
|
Visit a RouteInfo instance. Usually called by `process`.
|
falcon/inspect.py
|
visit_route
|
hzdwang/falcon-1
|
python
|
def visit_route(self, route: RouteInfo) -> str:
text = '{0}β {1.path} - {1.class_name}'.format(self.tab, route)
if self.verbose:
text += ' ({0.source_info})'.format(route)
method_text = self._methods_to_string(route.methods)
if (not method_text):
return text
return '{}:\n{}'.format(text, method_text)
|
def visit_static_route(self, static_route: StaticRouteInfo) -> str:
'Visit a StaticRouteInfo instance. Usually called by `process`.'
text = '{0}β¦ {1.prefix} {1.directory}'.format(self.tab, static_route)
if static_route.fallback_filename:
text += ' [{0.fallback_filename}]'.format(static_route)
return text
| 7,074,009,647,409,785,000
|
Visit a StaticRouteInfo instance. Usually called by `process`.
|
falcon/inspect.py
|
visit_static_route
|
hzdwang/falcon-1
|
python
|
def visit_static_route(self, static_route: StaticRouteInfo) -> str:
text = '{0}β¦ {1.prefix} {1.directory}'.format(self.tab, static_route)
if static_route.fallback_filename:
text += ' [{0.fallback_filename}]'.format(static_route)
return text
|
def visit_sink(self, sink: SinkInfo) -> str:
'Visit a SinkInfo instance. Usually called by `process`.'
text = '{0}β₯ {1.prefix} {1.name}'.format(self.tab, sink)
if self.verbose:
text += ' ({0.source_info})'.format(sink)
return text
| 3,417,290,023,421,739,000
|
Visit a SinkInfo instance. Usually called by `process`.
|
falcon/inspect.py
|
visit_sink
|
hzdwang/falcon-1
|
python
|
def visit_sink(self, sink: SinkInfo) -> str:
text = '{0}β₯ {1.prefix} {1.name}'.format(self.tab, sink)
if self.verbose:
text += ' ({0.source_info})'.format(sink)
return text
|
def visit_error_handler(self, error_handler: ErrorHandlerInfo) -> str:
'Visit a ErrorHandlerInfo instance. Usually called by `process`.'
text = '{0}β {1.error} {1.name}'.format(self.tab, error_handler)
if self.verbose:
text += ' ({0.source_info})'.format(error_handler)
return text
| -205,411,815,747,033,000
|
Visit a ErrorHandlerInfo instance. Usually called by `process`.
|
falcon/inspect.py
|
visit_error_handler
|
hzdwang/falcon-1
|
python
|
def visit_error_handler(self, error_handler: ErrorHandlerInfo) -> str:
text = '{0}β {1.error} {1.name}'.format(self.tab, error_handler)
if self.verbose:
text += ' ({0.source_info})'.format(error_handler)
return text
|
def visit_middleware_method(self, middleware_method: MiddlewareMethodInfo) -> str:
'Visit a MiddlewareMethodInfo instance. Usually called by `process`.'
text = '{0.function_name}'.format(middleware_method)
if self.verbose:
text += ' ({0.source_info})'.format(middleware_method)
return text
| 7,950,787,544,159,926,000
|
Visit a MiddlewareMethodInfo instance. Usually called by `process`.
|
falcon/inspect.py
|
visit_middleware_method
|
hzdwang/falcon-1
|
python
|
def visit_middleware_method(self, middleware_method: MiddlewareMethodInfo) -> str:
text = '{0.function_name}'.format(middleware_method)
if self.verbose:
text += ' ({0.source_info})'.format(middleware_method)
return text
|
def visit_middleware_class(self, middleware_class: MiddlewareClassInfo) -> str:
'Visit a ErrorHandlerInfo instance. Usually called by `process`.'
text = '{0}β£ {1.name}'.format(self.tab, middleware_class)
if self.verbose:
text += ' ({0.source_info})'.format(middleware_class)
method_text = self._methods_to_string(middleware_class.methods)
if (not method_text):
return text
return '{}:\n{}'.format(text, method_text)
| 737,925,779,832,650,900
|
Visit a ErrorHandlerInfo instance. Usually called by `process`.
|
falcon/inspect.py
|
visit_middleware_class
|
hzdwang/falcon-1
|
python
|
def visit_middleware_class(self, middleware_class: MiddlewareClassInfo) -> str:
text = '{0}β£ {1.name}'.format(self.tab, middleware_class)
if self.verbose:
text += ' ({0.source_info})'.format(middleware_class)
method_text = self._methods_to_string(middleware_class.methods)
if (not method_text):
return text
return '{}:\n{}'.format(text, method_text)
|
def visit_middleware_tree_item(self, mti: MiddlewareTreeItemInfo) -> str:
'Visit a MiddlewareTreeItemInfo instance. Usually called by `process`.'
symbol = mti._symbols.get(mti.name, 'β')
return '{0}{1} {2.class_name}.{2.name}'.format(self.tab, symbol, mti)
| 1,270,719,916,785,987,300
|
Visit a MiddlewareTreeItemInfo instance. Usually called by `process`.
|
falcon/inspect.py
|
visit_middleware_tree_item
|
hzdwang/falcon-1
|
python
|
def visit_middleware_tree_item(self, mti: MiddlewareTreeItemInfo) -> str:
symbol = mti._symbols.get(mti.name, 'β')
return '{0}{1} {2.class_name}.{2.name}'.format(self.tab, symbol, mti)
|
def visit_middleware_tree(self, m_tree: MiddlewareTreeInfo) -> str:
'Visit a MiddlewareTreeInfo instance. Usually called by `process`.'
before = (len(m_tree.request) + len(m_tree.resource))
after = len(m_tree.response)
if ((before + after) == 0):
return ''
each = 2
initial = self.indent
if (after > before):
self.indent += (each * (after - before))
text = []
for r in m_tree.request:
text.append(self.process(r))
self.indent += each
if text:
text.append('')
for r in m_tree.resource:
text.append(self.process(r))
self.indent += each
if (m_tree.resource or (not text)):
text.append('')
self.indent += each
text.append('{}βββ Process route responder'.format(self.tab))
self.indent -= each
if m_tree.response:
text.append('')
for r in m_tree.response:
self.indent -= each
text.append(self.process(r))
self.indent = initial
return '\n'.join(text)
| -5,830,221,220,495,931,000
|
Visit a MiddlewareTreeInfo instance. Usually called by `process`.
|
falcon/inspect.py
|
visit_middleware_tree
|
hzdwang/falcon-1
|
python
|
def visit_middleware_tree(self, m_tree: MiddlewareTreeInfo) -> str:
before = (len(m_tree.request) + len(m_tree.resource))
after = len(m_tree.response)
if ((before + after) == 0):
return
each = 2
initial = self.indent
if (after > before):
self.indent += (each * (after - before))
text = []
for r in m_tree.request:
text.append(self.process(r))
self.indent += each
if text:
text.append()
for r in m_tree.resource:
text.append(self.process(r))
self.indent += each
if (m_tree.resource or (not text)):
text.append()
self.indent += each
text.append('{}βββ Process route responder'.format(self.tab))
self.indent -= each
if m_tree.response:
text.append()
for r in m_tree.response:
self.indent -= each
text.append(self.process(r))
self.indent = initial
return '\n'.join(text)
|
def visit_middleware(self, middleware: MiddlewareInfo) -> str:
'Visit a MiddlewareInfo instance. Usually called by `process`.'
text = self.process(middleware.middleware_tree)
if self.verbose:
self.indent += 4
m_text = '\n'.join((self.process(m) for m in middleware.middleware_classes))
self.indent -= 4
if m_text:
text += '\n{}- Middlewares classes:\n{}'.format(self.tab, m_text)
return text
| -6,325,941,541,130,197,000
|
Visit a MiddlewareInfo instance. Usually called by `process`.
|
falcon/inspect.py
|
visit_middleware
|
hzdwang/falcon-1
|
python
|
def visit_middleware(self, middleware: MiddlewareInfo) -> str:
text = self.process(middleware.middleware_tree)
if self.verbose:
self.indent += 4
m_text = '\n'.join((self.process(m) for m in middleware.middleware_classes))
self.indent -= 4
if m_text:
text += '\n{}- Middlewares classes:\n{}'.format(self.tab, m_text)
return text
|
def visit_app(self, app: AppInfo) -> str:
'Visit a AppInfo instance. Usually called by `process`.'
type_ = ('ASGI' if app.asgi else 'WSGI')
self.indent = 4
text = '{} ({})'.format((self.name or 'Falcon App'), type_)
if app.routes:
routes = '\n'.join((self.process(r) for r in app.routes))
text += '\nβ’ Routes:\n{}'.format(routes)
middleware_text = self.process(app.middleware)
if middleware_text:
text += '\nβ’ Middleware ({}):\n{}'.format(app.middleware.independent_text, middleware_text)
if app.static_routes:
static_routes = '\n'.join((self.process(sr) for sr in app.static_routes))
text += '\nβ’ Static routes:\n{}'.format(static_routes)
if app.sinks:
sinks = '\n'.join((self.process(s) for s in app.sinks))
text += '\nβ’ Sinks:\n{}'.format(sinks)
errors = _filter_internal(app.error_handlers, self.internal)
if errors:
errs = '\n'.join((self.process(e) for e in errors))
text += '\nβ’ Error handlers:\n{}'.format(errs)
return text
| 6,827,144,084,587,439,000
|
Visit a AppInfo instance. Usually called by `process`.
|
falcon/inspect.py
|
visit_app
|
hzdwang/falcon-1
|
python
|
def visit_app(self, app: AppInfo) -> str:
type_ = ('ASGI' if app.asgi else 'WSGI')
self.indent = 4
text = '{} ({})'.format((self.name or 'Falcon App'), type_)
if app.routes:
routes = '\n'.join((self.process(r) for r in app.routes))
text += '\nβ’ Routes:\n{}'.format(routes)
middleware_text = self.process(app.middleware)
if middleware_text:
text += '\nβ’ Middleware ({}):\n{}'.format(app.middleware.independent_text, middleware_text)
if app.static_routes:
static_routes = '\n'.join((self.process(sr) for sr in app.static_routes))
text += '\nβ’ Static routes:\n{}'.format(static_routes)
if app.sinks:
sinks = '\n'.join((self.process(s) for s in app.sinks))
text += '\nβ’ Sinks:\n{}'.format(sinks)
errors = _filter_internal(app.error_handlers, self.internal)
if errors:
errs = '\n'.join((self.process(e) for e in errors))
text += '\nβ’ Error handlers:\n{}'.format(errs)
return text
|
def get_lbs_for_random_crop(crop_size, data_shape, margins):
'\n :param crop_size:\n :param data_shape: (b,c,x,y(,z)) must be the whole thing!\n :param margins:\n :return:\n '
lbs = []
for i in range((len(data_shape) - 2)):
if (((data_shape[(i + 2)] - crop_size[i]) - margins[i]) > margins[i]):
lbs.append(np.random.randint(margins[i], ((data_shape[(i + 2)] - crop_size[i]) - margins[i])))
else:
lbs.append(((data_shape[(i + 2)] - crop_size[i]) // 2))
return lbs
| 293,492,568,942,654,500
|
:param crop_size:
:param data_shape: (b,c,x,y(,z)) must be the whole thing!
:param margins:
:return:
|
data/crop_and_pad_augmentations.py
|
get_lbs_for_random_crop
|
bowang-lab/shape-attentive-unet
|
python
|
def get_lbs_for_random_crop(crop_size, data_shape, margins):
'\n :param crop_size:\n :param data_shape: (b,c,x,y(,z)) must be the whole thing!\n :param margins:\n :return:\n '
lbs = []
for i in range((len(data_shape) - 2)):
if (((data_shape[(i + 2)] - crop_size[i]) - margins[i]) > margins[i]):
lbs.append(np.random.randint(margins[i], ((data_shape[(i + 2)] - crop_size[i]) - margins[i])))
else:
lbs.append(((data_shape[(i + 2)] - crop_size[i]) // 2))
return lbs
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.