function stringlengths 11 56k | repo_name stringlengths 5 60 | features list |
|---|---|---|
def epsilon_greedy(state_vector, epsilon):
"""Returns an action selected by an epsilon-greedy exploration policy
Args:
state_vector (torch.FloatTensor): extracted vector representation
theta (np.ndarray): current weight matrix
epsilon (float): the probability of choosing a random command
Returns:
(int, int): the indices describing the action/object to take
"""
if np.random.binomial(1, epsilon):
action_index, object_index = np.random.randint(0, NUM_ACTIONS), np.random.randint(0, NUM_OBJECTS)
else:
act_arr, obj_arr = model(state_vector)
action_index, object_index = torch.argmax(act_arr), torch.argmax(obj_arr)
return (action_index, object_index) | xunilrj/sandbox | [
8,
4,
8,
117,
1469995922
] |
def __init__(self, state_dim, action_dim, object_dim, hidden_size=100):
super(DQN, self).__init__()
self.state_encoder = nn.Linear(state_dim, hidden_size)
self.state2action = nn.Linear(hidden_size, action_dim)
self.state2object = nn.Linear(hidden_size, object_dim) | xunilrj/sandbox | [
8,
4,
8,
117,
1469995922
] |
def deep_q_learning(current_state_vector, action_index, object_index, reward,
next_state_vector, terminal):
"""Updates the weights of the DQN for a given transition
Args:
current_state_vector (torch.FloatTensor): vector representation of current state
action_index (int): index of the current action
object_index (int): index of the current object
reward (float): the immediate reward the agent recieves from playing current command
next_state_vector (torch.FloatTensor): vector representation of next state
terminal (bool): True if this epsiode is over
Returns:
None
"""
with torch.no_grad():
q_values_action_next, q_values_object_next = model(next_state_vector)
maxq_next = 1 / 2 * (q_values_action_next.max()
+ q_values_object_next.max())
q_value_cur_state = model(current_state_vector)
Q_val_cur = 1/2 * (q_value_cur_state[0][action_index] + q_value_cur_state[1][object_index]) # Current Q value
maxQ = 0.0 if terminal else maxq_next
y = reward + GAMMA*maxQ # Target
loss = 1/2 * (y - Q_val_cur)**2
optimizer.zero_grad()
loss.backward()
optimizer.step() | xunilrj/sandbox | [
8,
4,
8,
117,
1469995922
] |
def run_episode(for_training):
"""
Runs one episode
If for training, update Q function
If for testing, computes and return cumulative discounted reward
"""
epsilon = TRAINING_EP if for_training else TESTING_EP
# initialize for each episode
i = 0
epi_reward = 0
(current_room_desc, current_quest_desc, terminal) = framework.newGame() | xunilrj/sandbox | [
8,
4,
8,
117,
1469995922
] |
def run_epoch():
"""Runs one epoch and returns reward averaged over test episodes"""
rewards = []
for _ in range(NUM_EPIS_TRAIN):
run_episode(for_training=True)
for _ in range(NUM_EPIS_TEST):
rewards.append(run_episode(for_training=False))
return np.mean(np.array(rewards)) | xunilrj/sandbox | [
8,
4,
8,
117,
1469995922
] |
def test_args(kwargs, expected):
assert tftest.parse_args() == []
assert tftest.parse_args(**kwargs) == expected | GoogleCloudPlatform/terraform-python-testing-helper | [
154,
30,
154,
4,
1553196033
] |
def test_terragrunt_args(kwargs, expected):
assert tftest.parse_args(**kwargs) == expected | GoogleCloudPlatform/terraform-python-testing-helper | [
154,
30,
154,
4,
1553196033
] |
def test_import(self):
self.assertTrue(hasattr(fedjax, 'FederatedAlgorithm'))
self.assertTrue(hasattr(fedjax.aggregators, 'Aggregator'))
self.assertTrue(hasattr(fedjax.algorithms, 'fed_avg'))
self.assertTrue(hasattr(fedjax.datasets, 'emnist'))
self.assertTrue(hasattr(fedjax.models, 'emnist'))
self.assertTrue(hasattr(fedjax.training, 'save_checkpoint')) | google/fedjax | [
221,
41,
221,
10,
1608648243
] |
def h2oinit():
"""
Python API test: h2o.init(url=None, ip=None, port=None, name = None, https=None, insecure=None,
username=None, password=None, ookies=None, proxy=None, start_h2o=True, nthreads=-1, ice_root=None,
enable_assertions=True, max_mem_size=None, min_mem_size=None, strict_version_check=None, **kwargs)
"""
start_h2o = False
strict_version_check = False
print("Testing h2o.init() command...")
try:
h2o.init(start_h2o=start_h2o)
print("h2o.init() command works!")
except Exception as e: # some errors are okay like version mismatch
print("error message type is {0} and the error message is \n".format(e.__class__.__name__, e.args[0]))
assert_is_type(e, H2OConnectionError)
try:
h2o.init(strict_version_check=strict_version_check, start_h2o=start_h2o)
except Exception as e:
print("error message type is {0} and the error message is \n".format(e.__class__.__name__, e.args[0]))
assert_is_type(e, H2OConnectionError)
# try to join a cluster and test out various command arguments
ipS = "127.16.2.27"
portS = "54321"
nthread = 2
max_mem_size=10
min_mem_size=3
try:
h2o.init(ip=ipS, port=portS, nthreads=nthread, max_mem_size=max_mem_size, min_mem_size=min_mem_size,
start_h2o=start_h2o, strict_version_check=strict_version_check)
print("Command h2o.init(ip=ipS, port=portS, nthreads=nthread, max_mem_size=max_mem_size, "
"min_mem_size=min_mem_size,start_h2o=start_h2o, strict_version_check=strict_version_check) works!")
except Exception as e: # make sure correct error message is received
print("error message type is {0} and the error message is \n".format(e.__class__.__name__, e.args[0]))
assert_is_type(e, H2OConnectionError) | h2oai/h2o-3 | [
6169,
1943,
6169,
208,
1393862887
] |
def h2oinit_default_log_dir():
tmpdir = tempfile.mkdtemp()
try:
h2o.init(strict_version_check=False, name="default_log", ice_root=tmpdir)
except H2OConnectionError as e: # some errors are okay like version mismatch
print("error message type is {0} and the error message is {1}\n".format(e.__class__.__name__, e.args[0]))
finally:
assert os.path.exists(os.path.join(tmpdir, "h2ologs")) == True
shutil.rmtree(tmpdir)
h2o.cluster().shutdown() | h2oai/h2o-3 | [
6169,
1943,
6169,
208,
1393862887
] |
def h2oinit_fail_invalid_log_level():
try:
h2o.init(strict_version_check=False, log_level="BAD_LOG_LEVEL")
assert False, "Should fail to start an h2o instance with an invalid log level."
except H2OConnectionError as e: # some errors are okay like version mismatch
assert False, "Should fail to start an h2o instance with an invalid log level but H2OConnectionError was thrown."
except H2OValueError:
print("H2OValueError properly thrown")
return
finally:
h2o.cluster().shutdown() | h2oai/h2o-3 | [
6169,
1943,
6169,
208,
1393862887
] |
def __init__(self,
units=64,
mode=modes.Modes.TRAINING,
inference_batch_size=1,
return_sequences=False,
use_peepholes=False,
num_proj=128,
unroll=False,
stateful=False,
name='LSTM',
**kwargs):
super(LSTM, self).__init__(**kwargs)
self.mode = mode
self.inference_batch_size = inference_batch_size
self.units = units
self.return_sequences = return_sequences
self.num_proj = num_proj
self.use_peepholes = use_peepholes
self.stateful = stateful
if mode != modes.Modes.TRAINING: # in any inference mode
# let's unroll lstm, so there is no symbolic loops / control flow
unroll = True
self.unroll = unroll
if self.mode in (modes.Modes.TRAINING, modes.Modes.NON_STREAM_INFERENCE):
if use_peepholes:
self.lstm_cell = tf1.nn.rnn_cell.LSTMCell(
num_units=units, use_peepholes=True, num_proj=num_proj, name='cell')
self.lstm = tf.keras.layers.RNN(
cell=self.lstm_cell,
return_sequences=return_sequences,
unroll=self.unroll,
stateful=self.stateful)
else:
self.lstm = tf.keras.layers.LSTM(
units=units,
return_sequences=return_sequences,
name='cell',
unroll=self.unroll,
stateful=self.stateful)
if self.mode == modes.Modes.STREAM_INTERNAL_STATE_INFERENCE:
# create state varaible for stateful streamable inference
self.input_state1 = self.add_weight(
name='input_state1',
shape=[inference_batch_size, units],
trainable=False,
initializer=tf.zeros_initializer)
if use_peepholes:
# second state in peepholes LSTM has different dimensions with
# the first state due to projection layer with dim num_proj
self.input_state2 = self.add_weight(
name='input_state2',
shape=[inference_batch_size, num_proj],
trainable=False,
initializer=tf.zeros_initializer)
self.lstm_cell = tf1.nn.rnn_cell.LSTMCell(
num_units=units, use_peepholes=True, num_proj=num_proj, name='cell')
else:
# second state in the standard LSTM has the same dimensions with
# the first state
self.input_state2 = self.add_weight(
name='input_state2',
shape=[inference_batch_size, units],
trainable=False,
initializer=tf.zeros_initializer)
self.lstm_cell = tf.keras.layers.LSTMCell(units=units, name='cell')
self.lstm = None
elif self.mode == modes.Modes.STREAM_EXTERNAL_STATE_INFERENCE:
# in streaming mode with external state state,
# state becomes an input output placeholders
self.input_state1 = tf.keras.layers.Input(
shape=(units,),
batch_size=inference_batch_size,
name=self.name + 'input_state1')
if use_peepholes:
self.input_state2 = tf.keras.layers.Input(
shape=(num_proj,),
batch_size=inference_batch_size,
name=self.name + 'input_state2')
self.lstm_cell = tf1.nn.rnn_cell.LSTMCell(
num_units=units, use_peepholes=True, num_proj=num_proj)
else:
self.input_state2 = tf.keras.layers.Input(
shape=(units,),
batch_size=inference_batch_size,
name=self.name + 'input_state2')
self.lstm_cell = tf.keras.layers.LSTMCell(units=units, name='cell')
self.lstm = None
self.output_state1 = None
self.output_state2 = None | google-research/google-research | [
27788,
6881,
27788,
944,
1538678568
] |
def get_config(self):
config = {
'mode': self.mode,
'inference_batch_size': self.inference_batch_size,
'units': self.units,
'return_sequences': self.return_sequences,
'unroll': self.unroll,
'num_proj': self.num_proj,
'use_peepholes': self.use_peepholes,
'stateful': self.stateful,
}
base_config = super(LSTM, self).get_config()
return dict(list(base_config.items()) + list(config.items())) | google-research/google-research | [
27788,
6881,
27788,
944,
1538678568
] |
def get_output_state(self):
# output state is used only for STREAM_EXTERNAL_STATE_INFERENCE mode
if self.mode == modes.Modes.STREAM_EXTERNAL_STATE_INFERENCE:
return [self.output_state1, self.output_state2]
else:
raise ValueError('Expected the layer to be in external streaming mode, '
f'not `{self.mode}`.') | google-research/google-research | [
27788,
6881,
27788,
944,
1538678568
] |
def _streaming_external_state(self, inputs, state1, state2):
# first dimension is batch size
if inputs.shape[0] != self.inference_batch_size:
raise ValueError(
'inputs.shape[0]:%d must be = self.inference_batch_size:%d' %
(inputs.shape[0], self.inference_batch_size))
# receive inputs: [batch, 1, feature]
# convert it for lstm cell to inputs1: [batch, feature]
inputs1 = tf.keras.backend.squeeze(inputs, axis=1)
output, states = self.lstm_cell(inputs1, [state1, state2])
# output [batch, 1, feature]
output = tf.keras.backend.expand_dims(output, axis=1)
return output, states[0], states[1] | google-research/google-research | [
27788,
6881,
27788,
944,
1538678568
] |
def Params(cls):
p = super().Params()
p.Define('split', True, '')
return p | tensorflow/lingvo | [
2689,
429,
2689,
115,
1532471428
] |
def InfeedBatchSize(self):
if self.params.split:
return 10 / 2
return 10 | tensorflow/lingvo | [
2689,
429,
2689,
115,
1532471428
] |
def _InputParams(self):
p = input_generator.NmtInput.Params()
input_file = test_helper.test_src_dir_path(
'tasks/mt/testdata/wmt14_ende_wpm_32k_test.tfrecord')
vocab_file = test_helper.test_src_dir_path(
'tasks/mt/testdata/wmt14_ende_wpm_32k_test.vocab')
p.file_pattern = 'tfrecord:' + input_file
p.file_random_seed = 31415
p.file_parallelism = 1
p.bucket_upper_bound = [40]
p.bucket_batch_limit = [8]
p.source_max_length = 200
p.target_max_length = 200
p.tokenizer.token_vocab_filepath = vocab_file
p.tokenizer.vocab_size = 32000
return p | tensorflow/lingvo | [
2689,
429,
2689,
115,
1532471428
] |
def _DecoderParams(self):
p = decoder.TransformerDecoder.Params()
p.name = 'decoder'
p.random_seed = 1234
p.source_dim = 4
p.model_dim = 4
p.token_emb.embedding_dim = 4
p.token_emb.max_num_shards = 1
p.token_emb.params_init = py_utils.WeightInit.GaussianSqrtDim(
seed=p.random_seed)
p.position_emb.embedding_dim = 4
p.trans_tpl.source_dim = 4
p.trans_tpl.tr_atten_tpl.source_dim = 4
p.trans_tpl.tr_atten_tpl.num_attention_heads = 2
p.trans_tpl.tr_fflayer_tpl.input_dim = 4
p.trans_tpl.tr_fflayer_tpl.hidden_dim = 8
p.softmax.num_shards = 1
p.target_seq_len = 5
return p | tensorflow/lingvo | [
2689,
429,
2689,
115,
1532471428
] |
def testConstruction(self):
with self.session():
p = self._testParams()
mdl = p.Instantiate()
print('vars = ', mdl.vars)
flatten_vars = mdl.vars.Flatten()
print('vars flattened = ', flatten_vars)
self.assertEqual(len(flatten_vars), 238)
# Should match tf.trainable_variables().
self.assertEqual(len(tf.trainable_variables()), len(flatten_vars)) | tensorflow/lingvo | [
2689,
429,
2689,
115,
1532471428
] |
def testFPropEvalMode(self):
with self.session(), self.SetEval(True):
tf.random.set_seed(_TF_RANDOM_SEED)
p = self._testParams()
mdl = p.Instantiate()
mdl.FPropDefaultTheta()
loss = mdl.loss
logp = mdl.eval_metrics['log_pplx'][0]
self.evaluate(tf.global_variables_initializer())
vals = []
for _ in range(5):
vals += [self.evaluate((loss, logp))]
print('actual vals = ', vals)
self.assertAllClose(vals, [(226.99771, 10.377038), (243.92978, 10.379991),
(260.7751, 10.379107), (201.10846, 10.379791),
(272.22006, 10.370288)]) | tensorflow/lingvo | [
2689,
429,
2689,
115,
1532471428
] |
def testBPropWithAccumComparison(self):
def _SetDefaults(p):
p.random_seed = 12345
p.decoder.input_dropout_prob = 0.0
mp = p.encoder.transformer_stack.transparent_merger_tpl
mp.weighted_merger_dropout_prob = 0.0
disable_vn = py_utils.VariationalNoiseParams(1.0, False, False)
for lp in base_layer.RecursiveFindLayerParams(p):
# TODO(lepikhin): lp.dtype = dtype
lp.params_init = py_utils.WeightInit.Gaussian(0.1, 12345)
lp.vn = disable_vn
tp = p.train
assert tp.l2_regularizer_weight is None
tp.clip_gradient_norm_to_value = False
tp.grad_norm_to_clip_to_zero = False
tp.optimizer = optimizer.SGD.Params()
tp.learning_rate = 1e-2
tp.lr_schedule = schedule.ContinuousSchedule.Params()
for l in p.ToText().split('\n'):
print(l)
return p
with self.session(use_gpu=False, graph=tf.Graph()):
tf.random.set_seed(_TF_RANDOM_SEED)
p = self._testParams()
p.input = TestInputGenerator.Params()
p.input.split = True
p = _SetDefaults(p)
p.train.optimizer = optimizer.Accumulator.Params().Set(
accum_steps=2, optimizer_tpl=p.train.optimizer)
mdl = p.Instantiate()
mdl.FPropDefaultTheta()
mdl.BProp()
self.evaluate(tf.global_variables_initializer())
for _ in range(2):
self.evaluate(mdl.train_op)
expected = self.evaluate(mdl.dec.softmax.vars['weight_0'])
with self.session(use_gpu=False, graph=tf.Graph()):
tf.random.set_seed(_TF_RANDOM_SEED)
p = self._testParams()
p.input = TestInputGenerator.Params()
p.input.split = False
p = _SetDefaults(p)
mdl = p.Instantiate()
mdl.FPropDefaultTheta()
mdl.BProp()
self.evaluate(tf.global_variables_initializer())
self.evaluate(mdl.train_op)
actual = self.evaluate(mdl.dec.softmax.vars['weight_0'])
self.assertAllClose(expected, actual, rtol=1e-2, atol=1e-2) | tensorflow/lingvo | [
2689,
429,
2689,
115,
1532471428
] |
def Run(num_splits):
with self.session(use_gpu=False, graph=tf.Graph()):
tf.random.set_seed(93820981)
p = self._testParams()
p.input.bucket_batch_limit = [
b * 2 / num_splits for b in p.input.bucket_batch_limit
]
with cluster_factory.ForTestingWorker(gpus=num_splits):
mdl = p.Instantiate()
metrics = mdl.FPropDefaultTheta()[0]
self.evaluate(tf.global_variables_initializer())
return self.evaluate(metrics['loss']) | tensorflow/lingvo | [
2689,
429,
2689,
115,
1532471428
] |
def testBatchSizeInInputGenerator(self):
with self.session():
tf.random.set_seed(_TF_RANDOM_SEED)
p = self._testParams()
with cluster_factory.ForTestingWorker(
mode='sync', job='trainer_client', gpus=5):
mdl = p.Instantiate()
mdl.FPropDefaultTheta()
loss = mdl.loss
self.evaluate(tf.global_variables_initializer())
_ = self.evaluate(loss)
self.assertEqual(mdl.input_generator.infeed_bucket_batch_limit, [40]) | tensorflow/lingvo | [
2689,
429,
2689,
115,
1532471428
] |
def _InputParams(self):
p = input_generator.NmtInput.Params()
input_file = test_helper.test_src_dir_path(
'tasks/mt/testdata/wmt14_ende_wpm_32k_test.tfrecord')
vocab_file = test_helper.test_src_dir_path(
'tasks/mt/testdata/wmt14_ende_wpm_32k_test.vocab')
p.file_pattern = 'tfrecord:' + input_file
p.file_random_seed = 31415
p.file_parallelism = 1
p.bucket_upper_bound = [40]
p.bucket_batch_limit = [8]
p.source_max_length = 200
p.target_max_length = 200
p.tokenizer.token_vocab_filepath = vocab_file
p.tokenizer.vocab_size = 32000
return p | tensorflow/lingvo | [
2689,
429,
2689,
115,
1532471428
] |
def _DecoderParams(self):
p = decoder.MTDecoderV1.Params()
p.name = 'decoder'
p.source_dim = 4
p.emb.vocab_size = 32000
p.emb.embedding_dim = 4
p.emb.max_num_shards = 1
p.rnn_cell_dim = 4
p.rnn_layers = 3
p.attention.hidden_dim = 2
p.softmax.num_classes = 32000
p.softmax.num_shards = 1
return p | tensorflow/lingvo | [
2689,
429,
2689,
115,
1532471428
] |
def testConstruction(self):
with self.session():
p = self._testParams()
mdl = p.Instantiate()
flatten_vars = mdl.vars.Flatten()
# encoder/embedding: 1
# encoder/lstms: 2 * (3 (forward) + 3 (backward))
# encoder/proj: 2
# decoder/embedding: 1
# decoder/atten: 3
# decoder/lstms: 2 * 3
# decoder/softmax: 2
self.assertEqual(len(flatten_vars), 1 + 12 + 2 + 1 + 3 + 6 + 2)
# Should match tf.trainable_variables().
self.assertEqual(len(tf.trainable_variables()), len(flatten_vars)) | tensorflow/lingvo | [
2689,
429,
2689,
115,
1532471428
] |
def testFPropEvalMode(self):
with self.session(), self.SetEval(True):
tf.random.set_seed(_TF_RANDOM_SEED)
p = self._testParams()
mdl = p.Instantiate()
mdl.FPropDefaultTheta()
loss = mdl.loss
logp = mdl.eval_metrics['log_pplx'][0]
self.evaluate(tf.global_variables_initializer())
vals = []
for _ in range(5):
vals += [self.evaluate((loss, logp))]
print('actual vals = %s' % np.array_repr(np.array(vals)))
self.assertAllClose(vals, [[226.92014, 10.373492], [243.77704, 10.373491],
[260.63403, 10.373494], [200.98639, 10.373491],
[272.30417, 10.373492]]) | tensorflow/lingvo | [
2689,
429,
2689,
115,
1532471428
] |
def testDecode(self):
with self.session(use_gpu=False), self.SetEval(True):
tf.random.set_seed(93820985)
p = self._testParams()
mdl = p.Instantiate()
input_batch = mdl.input_generator.GetPreprocessedInputBatch()
dec_out_dict = mdl.Decode(input_batch)
self.evaluate(tf.global_variables_initializer())
dec_out = self.evaluate(dec_out_dict)
metrics_dict = mdl.CreateDecoderMetrics()
key_value_pairs = mdl.PostProcessDecodeOut(dec_out, metrics_dict)
self.assertNear(0.0, metrics_dict['corpus_bleu'].value, 1.0e-5)
self.assertLen(key_value_pairs, 8)
for k, v in key_value_pairs:
self.assertIn(k, v) | tensorflow/lingvo | [
2689,
429,
2689,
115,
1532471428
] |
def Run(num_splits):
with self.session(use_gpu=False, graph=tf.Graph()):
tf.random.set_seed(93820981)
p = self._testParams()
p.input.bucket_batch_limit = [
b * 2 / num_splits for b in p.input.bucket_batch_limit
]
with cluster_factory.ForTestingWorker(gpus=num_splits):
mdl = p.Instantiate()
metrics = mdl.FPropDefaultTheta()[0]
self.evaluate(tf.global_variables_initializer())
return self.evaluate(metrics['loss']) | tensorflow/lingvo | [
2689,
429,
2689,
115,
1532471428
] |
def testBatchSizeInInputGenerator(self):
with self.session():
tf.random.set_seed(_TF_RANDOM_SEED)
p = self._testParams()
cluster_params = cluster_factory.Cluster.Params()
cluster_params.mode = 'sync'
cluster_params.job = 'trainer_client'
cluster_params.worker.name = '/job:localhost'
cluster_params.worker.gpus_per_replica = 5
cluster_params.input.name = '/job:localhost'
cluster_params.input.replicas = 1
cluster_params.input.gpus_per_replica = 0
with cluster_params.Instantiate():
mdl = p.Instantiate()
mdl.FPropDefaultTheta()
loss = mdl.loss
self.evaluate(tf.global_variables_initializer())
_ = self.evaluate(loss)
self.assertEqual(mdl.input_generator.infeed_bucket_batch_limit, [40]) | tensorflow/lingvo | [
2689,
429,
2689,
115,
1532471428
] |
def _InputParams(self):
p = input_generator.NmtInput.Params()
input_file = test_helper.test_src_dir_path(
'tasks/mt/testdata/wmt14_ende_wpm_32k_test.tfrecord')
vocab_file = test_helper.test_src_dir_path(
'tasks/mt/testdata/wmt14_ende_wpm_32k_test.vocab')
p.file_pattern = 'tfrecord:' + input_file
p.file_random_seed = 31415
p.file_parallelism = 1
p.bucket_upper_bound = [40]
p.bucket_batch_limit = [8]
p.source_max_length = 200
p.target_max_length = 200
p.tokenizer.token_vocab_filepath = vocab_file
p.tokenizer.vocab_size = 32000
return p | tensorflow/lingvo | [
2689,
429,
2689,
115,
1532471428
] |
def _DecoderParams(self):
p = decoder.MTDecoderV1.Params()
p.name = 'decoder'
p.source_dim = 4
p.emb.vocab_size = 32000
p.emb.embedding_dim = 4
p.emb.max_num_shards = 1
p.rnn_cell_dim = 4
p.rnn_layers = 3
p.attention.hidden_dim = 2
p.softmax.num_classes = 32000
p.softmax.num_shards = 1
return p | tensorflow/lingvo | [
2689,
429,
2689,
115,
1532471428
] |
def testConstruction(self):
with self.session():
p = self._testParams()
mdl = p.Instantiate()
flatten_vars = mdl.vars.Flatten()
print('vars flattened = ', flatten_vars)
# encoder: 91 (1 + 36 + 54)
# encoder/embedding: 1
# encoder/ff_layer: 6 * 6
# encoder/attention: 9 * 6
# decoder: 12 (1 + 3 + 6 + 2)
# decoder/embedding: 1
# decoder/atten: 3
# decoder/lstms: 2 * 3
# decoder/softmax: 2
self.assertEqual(len(flatten_vars), 91 + 12)
# Should match tf.trainable_variables().
self.assertEqual(len(tf.trainable_variables()), len(flatten_vars)) | tensorflow/lingvo | [
2689,
429,
2689,
115,
1532471428
] |
def testFPropEvalMode(self):
with self.session(), self.SetEval(True):
tf.random.set_seed(_TF_RANDOM_SEED)
p = self._testParams()
mdl = p.Instantiate()
mdl.FPropDefaultTheta()
loss = mdl.loss
logp = mdl.eval_metrics['log_pplx'][0]
self.evaluate(tf.global_variables_initializer())
vals = []
for _ in range(5):
vals += [self.evaluate((loss, logp))]
print('actual vals = %s' % np.array_repr(np.array(vals)))
self.assertAllClose(vals, [[226.91527, 10.373269], [243.76906, 10.373152],
[260.62787, 10.373248], [200.98814, 10.373582],
[272.297, 10.373219]]) | tensorflow/lingvo | [
2689,
429,
2689,
115,
1532471428
] |
def testDecode(self):
with self.session(use_gpu=False), self.SetEval(True):
tf.random.set_seed(93820985)
p = self._testParams()
mdl = p.Instantiate()
input_batch = mdl.input_generator.GetPreprocessedInputBatch()
dec_out_dict = mdl.Decode(input_batch)
self.evaluate(tf.global_variables_initializer())
dec_out = self.evaluate(dec_out_dict)
metrics_dict = mdl.CreateDecoderMetrics()
key_value_pairs = mdl.PostProcessDecodeOut(dec_out, metrics_dict)
self.assertNear(0.0, metrics_dict['corpus_bleu'].value, 1.0e-5)
self.assertLen(key_value_pairs, 8)
for k, v in key_value_pairs:
self.assertIn(k, v) | tensorflow/lingvo | [
2689,
429,
2689,
115,
1532471428
] |
def Run(num_splits):
with self.session(use_gpu=False, graph=tf.Graph()):
tf.random.set_seed(93820981)
p = self._testParams()
p.input.bucket_batch_limit = [
b * 2 / num_splits for b in p.input.bucket_batch_limit
]
with cluster_factory.ForTestingWorker(gpus=num_splits):
mdl = p.Instantiate()
metrics = mdl.FPropDefaultTheta()[0]
self.evaluate(tf.global_variables_initializer())
return self.evaluate(metrics['loss']) | tensorflow/lingvo | [
2689,
429,
2689,
115,
1532471428
] |
def testBatchSizeInInputGenerator(self):
with self.session():
tf.random.set_seed(_TF_RANDOM_SEED)
p = self._testParams()
cluster_params = cluster_factory.Cluster.Params()
cluster_params.mode = 'sync'
cluster_params.job = 'trainer_client'
cluster_params.worker.name = '/job:localhost'
cluster_params.worker.gpus_per_replica = 5
cluster_params.input.name = '/job:localhost'
cluster_params.input.replicas = 1
cluster_params.input.gpus_per_replica = 0
with cluster_params.Instantiate():
mdl = p.Instantiate()
mdl.FPropDefaultTheta()
loss = mdl.loss
self.evaluate(tf.global_variables_initializer())
_ = self.evaluate(loss)
self.assertEqual(mdl.input_generator.infeed_bucket_batch_limit, [40]) | tensorflow/lingvo | [
2689,
429,
2689,
115,
1532471428
] |
def _InputParams(self):
p = input_generator.NmtInput.Params()
input_file = test_helper.test_src_dir_path(
'tasks/mt/testdata/wmt14_ende_wpm_32k_test.tfrecord')
vocab_file = test_helper.test_src_dir_path(
'tasks/mt/testdata/wmt14_ende_wpm_32k_test.vocab')
p.file_pattern = 'tfrecord:' + input_file
p.file_random_seed = 31415
p.file_parallelism = 1
p.bucket_upper_bound = [40]
p.bucket_batch_limit = [8]
p.source_max_length = 200
p.target_max_length = 200
p.tokenizer.token_vocab_filepath = vocab_file
p.tokenizer.vocab_size = 32000
return p | tensorflow/lingvo | [
2689,
429,
2689,
115,
1532471428
] |
def _testParams(self):
p = model.InsertionModel.Params()
p.name = 'insertion'
p.input = self._InputParams()
p.decoder = self._DecoderParams()
p.random_seed = 12345
return p | tensorflow/lingvo | [
2689,
429,
2689,
115,
1532471428
] |
def testCreateCanvasAndTargets(self):
with self.session():
tf.random.set_seed(_TF_RANDOM_SEED)
batch = py_utils.NestedMap(
src=py_utils.NestedMap(
ids=tf.convert_to_tensor(
np.asarray([
[10, 11, 12, 14, 2, 0],
[20, 21, 22, 24, 25, 2],
], np.int32)),
paddings=tf.convert_to_tensor(
np.asarray([[0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 0]],
np.float32))),
tgt=py_utils.NestedMap(
ids=tf.convert_to_tensor(
np.asarray([[100, 101, 102, 104, 2, 0],
[200, 201, 202, 204, 205, 2]], np.int32)),
paddings=tf.convert_to_tensor(
np.asarray([[0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 0]],
np.float32))))
p = self._testParams()
mdl = p.Instantiate()
descriptor = mdl._CreateCanvasAndTargets(batch)
canvas, canvas_paddings, target_indices, target_weights = self.evaluate([
descriptor.canvas, descriptor.canvas_paddings,
descriptor.target_indices, descriptor.target_weights
])
canvas_gold = np.asarray([
[32014, 32002, 104, 2, 0, 0, 0, 0],
[32020, 32021, 32022, 32002, 200, 201, 202, 2],
], np.int32)
canvas_paddings_gold = np.asarray(
[[0., 0., 0., 0., 1., 1., 1., 1.], [0., 0., 0., 0., 0., 0., 0., 0.]],
np.float32)
target_indices_gold = np.asarray(
[[0, 0, 10], [0, 0, 11], [0, 0, 12], [0, 0, 2], [0, 1, 2], [1, 0, 2],
[1, 1, 2], [1, 2, 2], [1, 3, 24], [1, 3, 25], [1, 3, 2], [0, 2, 100],
[0, 2, 101], [0, 2, 102], [0, 2, 2], [0, 3, 2], [1, 4, 2], [1, 5, 2],
[1, 6, 2], [1, 7, 204], [1, 7, 205], [1, 7, 2]], np.int32)
target_weights_gold = np.asarray([1, 1, 1, 0, 1] + [1, 1, 1, 1, 1, 0] +
[1, 1, 1, 0, 1] + [1, 1, 1, 1, 1, 0],
np.float32)
target_weights_gold = np.reshape(target_weights_gold,
[target_weights_gold.shape[0], 1])
self.assertAllEqual(canvas, canvas_gold)
self.assertAllEqual(canvas_paddings, canvas_paddings_gold)
self.assertAllEqual(target_indices, target_indices_gold)
self.assertAllEqual(target_weights, target_weights_gold) | tensorflow/lingvo | [
2689,
429,
2689,
115,
1532471428
] |
def testFPropGraph(self):
"""Test the construction of the fprop graph, then fprop the graph."""
with self.session():
p = self._testParams()
mdl = p.Instantiate()
mdl.FPropDefaultTheta()
self.evaluate(tf.global_variables_initializer())
self.evaluate(mdl.loss) | tensorflow/lingvo | [
2689,
429,
2689,
115,
1532471428
] |
def _InputParams(self):
p = input_generator.NmtDoubleInput.Params()
input_file = test_helper.test_src_dir_path(
'tasks/mt/testdata/wmt14_ende_wpm_32k_doublebatch_test-000-001')
p.file_pattern = 'tfrecord:' + input_file
p.tokenizer.token_vocab_filepath = test_helper.test_src_dir_path(
'tasks/mt/testdata/wmt14_ende_wpm_32k_test.vocab')
p.file_random_seed = 31415
p.file_parallelism = 1
p.bucket_upper_bound = [10, 20]
p.bucket_batch_limit = [4, 2]
p.source_mask_ratio = -1
p.source_mask_ratio_beta = '2,6'
p.mask_word_id = 31999
p.pad_id = 31998
p.mask_words_ratio = 0.25
p.permutation_distance = 3
p.vocab_file = p.tokenizer.token_vocab_filepath
p.packed_input = False
return p | tensorflow/lingvo | [
2689,
429,
2689,
115,
1532471428
] |
def _DecoderParams(self):
p = decoder.TransformerXDecoder.Params()
p.name = 'mix_decoder'
p.token_emb.params_init = py_utils.WeightInit.GaussianSqrtDim()
p.token_emb.vocab_size = 32000
p.token_emb.embedding_dim = 4
p.token_emb.max_num_shards = 1
p.token_emb.scale_sqrt_depth = True
p.token_emb.vn = py_utils.VariationalNoiseParams(1.0, False, False)
p.position_emb.embedding_dim = 4
p.position_emb.trainable_scaling = False
p.model_dim = 4
p.source_dim = 4
p.num_trans_layers = 6
p.trans_tpl.source_dim = p.model_dim
p.trans_tpl.tr_atten_tpl.source_dim = p.model_dim
p.trans_tpl.tr_atten_tpl.num_attention_heads = 2
p.trans_tpl.tr_atten_tpl.atten_hidden_dim = 4
p.trans_tpl.tr_atten_tpl.atten_tpl.context_dim = p.model_dim
p.trans_tpl.tr_fflayer_tpl.hidden_dim = 4
p.trans_tpl.tr_fflayer_tpl.input_dim = p.model_dim
p.label_smoothing = layers.UniformLabelSmoother.Params()
p.label_smoothing.uncertainty = 0.1
p.per_word_avg_loss = True
p.softmax.num_classes = 32000
p.softmax.num_shards = 1
p.random_seed = 54321
return p | tensorflow/lingvo | [
2689,
429,
2689,
115,
1532471428
] |
def testFProp(self, dtype=tf.float32, fprop_dtype=tf.float32):
with self.session(use_gpu=False):
tf.random.set_seed(_TF_RANDOM_SEED)
p = self._testParams()
p.dtype = dtype
if fprop_dtype:
p.fprop_dtype = fprop_dtype
p.input.dtype = fprop_dtype
mdl = p.Instantiate()
dec_metrics, _ = mdl.FPropDefaultTheta()
self.evaluate(tf.global_variables_initializer())
vals = []
print(mdl)
for _ in range(5):
vals += [
self.evaluate(
(dec_metrics['clean_loss'][0], dec_metrics['other_loss'][0],
dec_metrics['mix_loss'][0], dec_metrics['loss'][0]))
]
print('actual vals = %s' % np.array_repr(np.array(vals)))
self.assertAllClose(
vals, [[10.373864, 10.371083, 10.372491, 31.11744],
[10.36428, 10.379262, 10.366394, 31.109936],
[10.369206, 10.372709, 10.369126, 31.111042],
[10.363656, 10.364362, 10.362683, 31.090702],
[10.371622, 10.374066, 10.371591, 31.11728]],
rtol=1e-02,
atol=1e-02) | tensorflow/lingvo | [
2689,
429,
2689,
115,
1532471428
] |
def setUpTestData(cls):
parent_tenant_groups = (
TenantGroup(name='Parent Tenant Group 1', slug='parent-tenant-group-1'),
TenantGroup(name='Parent Tenant Group 2', slug='parent-tenant-group-2'),
TenantGroup(name='Parent Tenant Group 3', slug='parent-tenant-group-3'),
)
for tenantgroup in parent_tenant_groups:
tenantgroup.save()
tenant_groups = (
TenantGroup(name='Tenant Group 1', slug='tenant-group-1', parent=parent_tenant_groups[0], description='A'),
TenantGroup(name='Tenant Group 2', slug='tenant-group-2', parent=parent_tenant_groups[1], description='B'),
TenantGroup(name='Tenant Group 3', slug='tenant-group-3', parent=parent_tenant_groups[2], description='C'),
)
for tenantgroup in tenant_groups:
tenantgroup.save() | digitalocean/netbox | [
12158,
2099,
12158,
303,
1456755346
] |
def test_name(self):
params = {'name': ['Tenant Group 1', 'Tenant Group 2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2) | digitalocean/netbox | [
12158,
2099,
12158,
303,
1456755346
] |
def test_description(self):
params = {'description': ['A', 'B']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2) | digitalocean/netbox | [
12158,
2099,
12158,
303,
1456755346
] |
def setUpTestData(cls):
tenant_groups = (
TenantGroup(name='Tenant Group 1', slug='tenant-group-1'),
TenantGroup(name='Tenant Group 2', slug='tenant-group-2'),
TenantGroup(name='Tenant Group 3', slug='tenant-group-3'),
)
for tenantgroup in tenant_groups:
tenantgroup.save()
tenants = (
Tenant(name='Tenant 1', slug='tenant-1', group=tenant_groups[0]),
Tenant(name='Tenant 2', slug='tenant-2', group=tenant_groups[1]),
Tenant(name='Tenant 3', slug='tenant-3', group=tenant_groups[2]),
)
Tenant.objects.bulk_create(tenants) | digitalocean/netbox | [
12158,
2099,
12158,
303,
1456755346
] |
def test_name(self):
params = {'name': ['Tenant 1', 'Tenant 2']}
self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2) | digitalocean/netbox | [
12158,
2099,
12158,
303,
1456755346
] |
def crop_and_pad_voxels(voxels, start_coordinates, end_coordinates):
"""Crops a voxel region and pads past the boundaries with zeros.
This accepts start and end coordinates past the limits of the voxel grid,
and uses it to calculate how much top/left/right/bottom padding to add.
Args:
voxels: A tf.float32 tensor of shape [x, y, z, f] to crop
start_coordinates: A list of len 4 with the [x, y, z, f] starting location
of our crop. This can be negative, which indicates left/top padding.
end_coordinates: A list of len 4 with the [x, y, z, f] ending location of
our crop. This can be beyond the size of the voxel tensor, which indicates
padding.
Returns:
cropped_and_padded_voxels: A voxel grid with shape
[end_coordinates[0] - start_coordinates[0],
end_coordinates[1] - start_coordinates[1],
end_coordinates[2] - start_coordinates[2],
end_coordinates[3] - start_coordinates[3]]
Raises:
ValueError: If requested crop and pad is outside the bounds of what the
function supports.
"""
if len(start_coordinates) != 4:
raise ValueError('start_coordinates should be of length 4')
if len(end_coordinates) != 4:
raise ValueError('end_coordinates should be of length 4')
if any([coord <= 0 for coord in end_coordinates]):
raise ValueError('Requested end coordinates should be > 0')
start_coordinates = tf.convert_to_tensor(start_coordinates, tf.int32)
end_coordinates = tf.convert_to_tensor(end_coordinates, tf.int32)
# Clip the coordinates to within the voxel grid
clipped_start_coordinates = tf.maximum(0, start_coordinates)
clipped_end_coordinates = tf.minimum(voxels.shape, end_coordinates)
cropped_voxels = tf.slice(voxels,
begin=clipped_start_coordinates,
size=(clipped_end_coordinates -
clipped_start_coordinates))
top_and_left_padding = tf.maximum(0, -start_coordinates)
bottom_and_right_padding = tf.maximum(0, end_coordinates - voxels.shape)
padding = tf.stack([top_and_left_padding, bottom_and_right_padding], axis=1)
return tf.pad(cropped_voxels, padding) | google-research/google-research | [
27788,
6881,
27788,
944,
1538678568
] |
def voxels_to_points(voxels, segment_ids):
"""Convert voxels back to points given their segment id.
Args:
voxels: A tf.float32 tensor representing a voxel grid. Expect shape
[x, y, z, f].
segment_ids: A tf.int32 tensor representing the segment id of each point
in the original pointcloud we want to project voxel features back to.
Returns:
point_features: A tf.float32 tensor of shape [N, f] where each point
now has the features in the associated voxel cell.
"""
flattened_voxels = tf.reshape(voxels, shape=(-1, voxels.shape[-1]))
return tf.gather(flattened_voxels, segment_ids) | google-research/google-research | [
27788,
6881,
27788,
944,
1538678568
] |
def points_offset_in_voxels(points, grid_cell_size):
"""Converts points into offsets in voxel grid.
Args:
points: A tf.float32 tensor of size [batch_size, N, 3].
grid_cell_size: The size of the grid cells in x, y, z dimensions in the
voxel grid. It should be either a tf.float32 tensor, a numpy array or a
list of size [3].
Returns:
voxel_xyz_offsets: A tf.float32 tensor of size [batch_size, N, 3].
"""
batch_size = points.get_shape().as_list()[0]
def fn(i):
return _points_offset_in_voxels_unbatched(
points=points[i, :, :], grid_cell_size=grid_cell_size)
return tf.map_fn(fn=fn, elems=tf.range(batch_size), dtype=tf.float32) | google-research/google-research | [
27788,
6881,
27788,
944,
1538678568
] |
def pointcloud_to_sparse_voxel_grid_unbatched(points, features, grid_cell_size,
segment_func):
"""Converts a pointcloud into a voxel grid.
This function does not handle batch size and only works for a single batch
of points. The function `pointcloud_to_sparse_voxel_grid` below calls this
function in a while loop to map a batch of points to a batch of voxels.
A sparse voxel grid is represented by only keeping the voxels that
have points in them in memory. Assuming that N' voxels have points in them,
we represent a sparse voxel grid by
(a) voxel_features, a [N', F] or [N', G, F] tensor containing the feature
vector for each voxel.
(b) voxel_indices, a [N', 3] tensor containing the x, y, z index of each
voxel.
Args:
points: A tf.float32 tensor of size [N, 3].
features: A tf.float32 tensor of size [N, F].
grid_cell_size: The size of the grid cells in x, y, z dimensions in the
voxel grid. It should be either a tf.float32 tensor, a numpy array or a
list of size [3].
segment_func: A tensorflow function that operates on segments. Examples are
one of tf.math.unsorted_segment_{min/max/mean/prod/sum}.
Returns:
voxel_features: A tf.float32 tensor of size [N', F] or [N', G, F] where G is
the number of points sampled per voxel.
voxel_indices: A tf.int32 tensor of size [N', 3].
segment_ids: A size [N] tf.int32 tensor of IDs for each point indicating
which (flattened) voxel cell its data was mapped to.
voxel_start_location: A tf.float32 tensor of size [3] containing the start
location of the voxels.
Raises:
ValueError: If pooling method is unknown.
"""
grid_cell_size = tf.convert_to_tensor(grid_cell_size, dtype=tf.float32)
voxel_xyz_indices, voxel_single_number_indices, voxel_start_location = (
_points_to_voxel_indices(points=points, grid_cell_size=grid_cell_size))
voxel_features, segment_ids, num_segments = pool_features_given_indices(
features=features,
indices=voxel_single_number_indices,
segment_func=segment_func)
voxel_xyz_indices = tf.math.unsorted_segment_max(
data=voxel_xyz_indices,
segment_ids=segment_ids,
num_segments=num_segments)
return voxel_features, voxel_xyz_indices, segment_ids, voxel_start_location | google-research/google-research | [
27788,
6881,
27788,
944,
1538678568
] |
def pointcloud_to_sparse_voxel_grid(points, features, num_valid_points,
grid_cell_size, voxels_pad_or_clip_size,
segment_func):
"""Converts a pointcloud into a voxel grid.
This function calls the `pointcloud_to_sparse_voxel_grid_unbatched`
function above in a while loop to map a batch of points to a batch of voxels.
Args:
points: A tf.float32 tensor of size [batch_size, N, 3].
features: A tf.float32 tensor of size [batch_size, N, F].
num_valid_points: A tf.int32 tensor of size [num_batches] containing the
number of valid points in each batch example.
grid_cell_size: A tf.float32 tensor of size [3].
voxels_pad_or_clip_size: Number of target voxels to pad or clip to. If None,
it will not perform the padding.
segment_func: A tensorflow function that operates on segments. Examples are
one of tf.math.unsorted_segment_{min/max/mean/prod/sum}.
Returns:
voxel_features: A tf.float32 tensor of size [batch_size, N', F]
or [batch_size, N', G, F] where G is the number of points sampled per
voxel.
voxel_indices: A tf.int32 tensor of size [batch_size, N', 3].
num_valid_voxels: A tf.int32 tensor of size [batch_size].
segment_ids: A size [batch_size, N] tf.int32 tensor of IDs for each point
indicating which (flattened) voxel cell its data was mapped to.
voxel_start_location: A size [batch_size, 3] tf.float32 tensor of voxel
start locations.
Raises:
ValueError: If pooling method is unknown.
"""
batch_size = points.get_shape().as_list()[0]
if batch_size is None:
batch_size = tf.shape(points)[0]
num_points = tf.shape(points)[1]
def fn(i):
"""Map function."""
num_valid_points_i = num_valid_points[i]
points_i = points[i, :num_valid_points_i, :]
features_i = features[i, :num_valid_points_i, :]
voxel_features_i, voxel_indices_i, segment_ids_i, voxel_start_location_i = (
pointcloud_to_sparse_voxel_grid_unbatched(
points=points_i,
features=features_i,
grid_cell_size=grid_cell_size,
segment_func=segment_func))
num_valid_voxels_i = tf.shape(voxel_features_i)[0]
(voxel_features_i, voxel_indices_i, num_valid_voxels_i,
segment_ids_i) = _pad_or_clip_voxels(
voxel_features=voxel_features_i,
voxel_indices=voxel_indices_i,
num_valid_voxels=num_valid_voxels_i,
segment_ids=segment_ids_i,
voxels_pad_or_clip_size=voxels_pad_or_clip_size)
segment_ids_i = tf.pad(
segment_ids_i, paddings=[[0, num_points - num_valid_points_i]])
return (voxel_features_i, voxel_indices_i, num_valid_voxels_i,
segment_ids_i, voxel_start_location_i)
return tf.map_fn(
fn=fn,
elems=tf.range(batch_size),
dtype=(tf.float32, tf.int32, tf.int32, tf.int32, tf.float32)) | google-research/google-research | [
27788,
6881,
27788,
944,
1538678568
] |
def fn(i):
num_valid_voxels_i = num_valid_voxels[i]
num_valid_points_i = num_valid_points[i]
voxel_features_i = voxel_features[i, :num_valid_voxels_i, :]
segment_ids_i = segment_ids[i, :num_valid_points_i]
point_features = tf.gather(voxel_features_i, segment_ids_i)
point_features_rank = len(point_features.get_shape().as_list())
point_features_paddings = [[0, num_points - num_valid_points_i]]
for _ in range(point_features_rank - 1):
point_features_paddings.append([0, 0])
point_features = tf.pad(point_features, paddings=point_features_paddings)
return point_features | google-research/google-research | [
27788,
6881,
27788,
944,
1538678568
] |
def per_voxel_point_sample_segment_func(data, segment_ids, num_segments,
num_samples_per_voxel):
"""Samples features from the points within each voxel.
Args:
data: A tf.float32 tensor of size [N, F].
segment_ids: A tf.int32 tensor of size [N].
num_segments: Number of segments.
num_samples_per_voxel: Number of features to sample per voxel. If the voxel
has less number of points in it, the point features will be padded by 0.
Returns:
A tf.float32 tensor of size [num_segments, num_samples_per_voxel, F].
A tf.int32 indices of size [N, num_samples_per_voxel].
"""
num_channels = data.get_shape().as_list()[1]
if num_channels is None:
raise ValueError('num_channels is None.')
n = tf.shape(segment_ids)[0]
def _body_fn(i, indices_range, indices):
"""Computes the indices of the i-th point feature in each segment."""
indices_i = tf.math.unsorted_segment_max(
data=indices_range, segment_ids=segment_ids, num_segments=num_segments)
indices_i_positive_mask = tf.greater(indices_i, 0)
indices_i_positive = tf.boolean_mask(indices_i, indices_i_positive_mask)
boolean_mask = tf.scatter_nd(
indices=tf.cast(
tf.expand_dims(indices_i_positive - 1, axis=1), dtype=tf.int64),
updates=tf.ones_like(indices_i_positive, dtype=tf.int32),
shape=(n,))
indices_range *= (1 - boolean_mask)
indices_i *= tf.cast(indices_i_positive_mask, dtype=tf.int32)
indices_i = tf.pad(
tf.expand_dims(indices_i, axis=1),
paddings=[[0, 0], [i, num_samples_per_voxel - i - 1]])
indices += indices_i
i = i + 1
return i, indices_range, indices
cond = lambda i, indices_range, indices: i < num_samples_per_voxel
(_, _, indices) = tf.while_loop(
cond=cond,
body=_body_fn,
loop_vars=(tf.constant(0, dtype=tf.int32), tf.range(n) + 1,
tf.zeros([num_segments, num_samples_per_voxel],
dtype=tf.int32)))
data = tf.pad(data, paddings=[[1, 0], [0, 0]])
voxel_features = tf.gather(data, tf.reshape(indices, [-1]))
return tf.reshape(voxel_features,
[num_segments, num_samples_per_voxel, num_channels]) | google-research/google-research | [
27788,
6881,
27788,
944,
1538678568
] |
def get_inference_spec(num_receivers=1,
num_samples=None):
"""Returns a specification of features in tf.Examples in roomsim format."""
spec = {}
spec[Features.RECEIVER_AUDIO] = tf.FixedLenFeature(
[num_receivers, num_samples], tf.float32)
return spec | google-research/sound-separation | [
484,
105,
484,
13,
1583214909
] |
def placeholders_from_spec(feature_spec):
"""Returns placeholders compatible with a given feature spec."""
placeholders = {}
for key, feature in feature_spec.items():
placeholders[key] = tf.placeholder(dtype=feature.dtype,
shape=[1] + feature.shape,
name=key)
return placeholders | google-research/sound-separation | [
484,
105,
484,
13,
1583214909
] |
def _pad_mics_tf(signal, new_mics):
"""Pads new mic channels to an input tensor and returns the updated tensor.
Args:
signal: A tf.tensor of shape (input_mics, samples)
new_mics: The number of new mic channels to be added (integer scalar tensor)
Returns:
padded_signal: A tf.tensor of shape (input_mics + new_mics, samples)
"""
# Take first new_mics channels and shift them by 1 sample.
new_inputs = tf.roll(signal[:new_mics, :], shift=1, axis=-1)
# Add noise 1e-3 times the RMS value in the signal.
noise_scale = 1e-3 * tf.sqrt(tf.reduce_mean(tf.square(new_inputs)))
new_inputs += noise_scale * tf.random.normal(tf.shape(new_inputs))
return tf.concat((signal, new_inputs), axis=0) | google-research/sound-separation | [
484,
105,
484,
13,
1583214909
] |
def utterance_info_generator():
"""Yields utterance informations from each meeting.
Utterance info is in the form of a 6-tuple:
wav_path, diarization, spkidx, meeting_scale, start, gain.
"""
default_diarization = np.zeros((max_dia_seg_per_utt, 2), dtype=np.int32)
default_utt = ('0', default_diarization, -1, 0.0, 0, 0.0)
for one_meeting in meeting_list:
meeting_info = collections.defaultdict(list)
sources_start_end = one_meeting['utterance_start_end']
num_utt_in_meeting = len(sources_start_end)
spk_num_in_meeting = {}
new_spknum = 0
spkids_in_meeting = []
spk_utt_idx = collections.defaultdict(int)
meeting_scale = float(one_meeting['meeting_scale'])
for utt_idx in range(num_utt_in_meeting):
start, end, spkid, wav_path = sources_start_end[utt_idx]
spkidx = spkid2idx[spkid]
if start >= samples:
continue
if end >= samples:
end = samples
if spkidx in spk_num_in_meeting:
spknum = spk_num_in_meeting[spkidx]
else:
spknum = new_spknum
if spknum > max_num_spk:
continue
spkids_in_meeting.append(spkidx)
spk_num_in_meeting[spkidx] = spknum
new_spknum += 1
if use_relative_path:
wav_path = os.path.join(base_path, wav_path)
gain = one_meeting['utterance_gain'][utt_idx]
# Make diarization_labels array.
diarization = np.zeros((max_dia_seg_per_utt, 2), dtype=np.int32)
spk_utt_idx[spknum] += 1
diarization_info = \
one_meeting['diarization_label'][spkid][spk_utt_idx[spknum] - 1]
# Go over diarization segments in utterance.
for i, segment_st_end in enumerate(diarization_info):
segment_start, segment_end = segment_st_end
if segment_start >= samples:
continue
if segment_end > samples:
segment_end = samples
adjusted_start = segment_start - start
adjusted_end = segment_end - start
diarization[i, 0] = adjusted_start
diarization[i, 1] = adjusted_end
meeting_info[spknum].append((wav_path, diarization, spkidx,
meeting_scale, start, gain))
for spknum in range(max_num_spk):
if spknum in meeting_info:
for utt in range(max_num_utt_per_spk):
if utt < len(meeting_info[spknum]):
yield meeting_info[spknum][utt]
else:
yield default_utt
else:
for utt in range(max_num_utt_per_spk):
yield default_utt | google-research/sound-separation | [
484,
105,
484,
13,
1583214909
] |
def decode_wav(wav):
audio_bytes = tf.read_file(wav)
waveform, _ = tf.audio.decode_wav(audio_bytes,
desired_samples=max_utt_length)
waveform = tf.transpose(waveform)
num_read_mics = tf.shape(waveform)[0]
waveform = tf.cond(num_read_mics >= num_mics,
lambda: waveform[:num_mics, :],
lambda: _pad_mics_tf(waveform, num_mics - num_read_mics))
waveform = tf.reshape(waveform, (num_mics, max_utt_length))
return waveform | google-research/sound-separation | [
484,
105,
484,
13,
1583214909
] |
def utterance_reader(wav_path, diarization, spkidx, meet_scale, start, gain):
"""Reads wave file for utterance and scale it."""
utt_tensor = decode_wav_or_return_zeros(wav_path[0], gain=gain)
return utt_tensor, diarization, spkidx, meet_scale, start | google-research/sound-separation | [
484,
105,
484,
13,
1583214909
] |
def pad_utterance(utt_tensor, diarization, spkidx, meeting_scale, start):
"""Pads utterance to meeting length.
Args:
utt_tensor: Utterance with shape (num_mics, max_utt_length).
diarization: Diarization with shape (max_dia_seg_per_utt, 2).
spkidx: Speaker index (global) for the utterance.
meeting_scale: Target meeting scale.
start: Start index of utterance in the meeting.
Returns:
utt_tensor_padded: Padded utt tensor (num_mics, samples + max_utt_length)
diarization_padded: Diarization updated using the start index.
spkidx: Speaker index passed unchanged.
meeting_scale: Target meeting scale passed unchanged.
"""
start = start[0]
end_paddings = samples - start
utt_tensor_padded = tf.pad(utt_tensor, ((0, 0), (start, end_paddings)))
diarization_padded = start + diarization
return utt_tensor_padded, diarization_padded, spkidx, meeting_scale | google-research/sound-separation | [
484,
105,
484,
13,
1583214909
] |
def make_reference(utt_tensor, diarization, spkidx, meeting_scale):
"""Makes a reference from fixed length utterance tensors.
Args:
utt_tensor: Utterances with shape
(max_num_utt_per_spk, num_mics, samples + max_utt_len)
diarization: Diarization ranges with shape
(max_num_utt_per_spk, max_dia_seg_per_utt, 2).
spkidx: Speaker indices (repeated) with shape (max_num_utt_per_spk)
meeting_scale: Target meeting scale (repeated).
Returns:
reference: Meeting audio with shape (num_mics, samples)
diarization_labels: tf.bool with shape (samples)
spkidx: Scalar speaker index.
meeting_scale: Target meeting scale.
"""
reference_waveform = tf.reduce_sum(utt_tensor, axis=0)
reference_waveform = reference_waveform[:, :samples]
diarization = tf.reshape(diarization,
(max_num_utt_per_spk * max_dia_seg_per_utt, 2))
active_samples_list = [
tf.range(diarization[i, 0], diarization[i, 1]) for i in
range(max_num_utt_per_spk * max_dia_seg_per_utt)]
active_samples = tf.reshape(
tf.concat(active_samples_list, axis=0), (-1, 1))
dia_full_init = tf.zeros((samples + max_utt_length, 1), dtype=tf.int32)
dia_full = tf.tensor_scatter_add(
dia_full_init, active_samples, tf.ones(tf.shape(active_samples),
dtype=tf.int32))
dia_full = tf.cast(dia_full[:samples, 0], dtype=tf.bool)
spkidx = spkidx[0]
meeting_scale = meeting_scale[0]
return reference_waveform, dia_full, spkidx, meeting_scale | google-research/sound-separation | [
484,
105,
484,
13,
1583214909
] |
def chop_meeting_data(reference_waveforms, diarization_labels, speaker_ids,
meeting_scale, nsplit=num_meeting_subdivisions):
samples = tf.shape(reference_waveforms)[-1]
new_samples = nsplit * (samples // nsplit)
reference_waveforms = tf.stack(
tf.split(reference_waveforms[..., :new_samples],
nsplit, axis=-1), axis=0)
diarization_labels = tf.stack(
tf.split(diarization_labels[..., :new_samples],
nsplit, axis=-1), axis=0)
speaker_ids = tf.reshape(speaker_ids, (1, max_num_spk))
speaker_ids = tf.broadcast_to(speaker_ids, (nsplit, max_num_spk))
meeting_scale = meeting_scale[0] * tf.ones((nsplit, max_num_spk))
return tf.data.Dataset.from_tensor_slices((reference_waveforms,
diarization_labels,
speaker_ids,
meeting_scale)) | google-research/sound-separation | [
484,
105,
484,
13,
1583214909
] |
def combine_mixture_and_sources(reference_waveforms, diarization_labels,
speaker_ids, meeting_scale):
# waveforms has shape (num_sources, num_mics, num_samples).
speaker_ids = tf.reshape(speaker_ids, (max_num_spk,))
meeting_scale = meeting_scale[0]
mixture_waveform = tf.reduce_sum(reference_waveforms, axis=0)
current_mixture_scale = tf.reduce_max(tf.abs(mixture_waveform))
# Note that when meetings are chopped, we cannot apply a meeting level
# scale. Instead, we apply the scale in the chunk level so that each
# chunk has a maximum scale equal to the meeting_scale. However, we should
# not apply any gain to an all noise chunk to avoid amplifying the noise,
# so we try not to scale those chunks by checking the current_mixture_scale
# value.
scale_refs = tf.cond(current_mixture_scale > 0.005,
lambda: meeting_scale / current_mixture_scale,
lambda: 1.0)
reference_waveforms *= scale_refs
num_sources = max_num_spk
if sensor_noise_range[1] > 0.0:
num_sources += 1
sensor_noise_gain = tf.random.uniform((), minval=sensor_noise_range[0],
maxval=sensor_noise_range[1])
sensor_noise = sensor_noise_gain * tf.random.normal(
(1, num_mics, samples))
reference_waveforms = tf.concat(
(sensor_noise, reference_waveforms), axis=0)
mixture_waveform = tf.reduce_sum(reference_waveforms, axis=0)
reference_waveforms.set_shape((num_sources, num_mics, samples))
mixture_waveform.set_shape((num_mics, samples))
diarization_labels.set_shape((max_num_spk, samples))
speaker_ids.set_shape((max_num_spk,))
return {'receiver_audio': mixture_waveform,
'source_images': reference_waveforms,
'diarization_labels': diarization_labels,
'speaker_indices': speaker_ids,
} | google-research/sound-separation | [
484,
105,
484,
13,
1583214909
] |
def render(self, context):
try:
return self._render(context)
except Exception:
if settings.DEBUG:
raise
# TODO: Log error
return self.nodelist_empty.render(context) | mattaustin/django-thummer | [
19,
3,
19,
1,
1324113517
] |
def __init__(self, parser, token):
bits = token.split_contents()
if len(bits) < 5 or bits[-2] != 'as':
raise TemplateSyntaxError(self.error_msg)
self.url = parser.compile_filter(bits[1])
self.geometry = parser.compile_filter(bits[2])
self.options = []
for bit in bits[3:-2]:
m = kw_pat.match(bit)
if not m:
raise TemplateSyntaxError(self.error_msg)
key = smart_str(m.group('key'))
expr = parser.compile_filter(m.group('value'))
self.options.append((key, expr))
self.as_var = bits[-1]
self.nodelist_url = parser.parse(('empty', 'endthummer',))
if parser.next_token().contents == 'empty':
self.nodelist_empty = parser.parse(('endthummer',))
parser.delete_first_token() | mattaustin/django-thummer | [
19,
3,
19,
1,
1324113517
] |
def supported(event):
return event.device.known_to_be_rollershutter | tchellomello/home-assistant | [
7,
1,
7,
6,
1467778429
] |
def cover_update(event, device_id):
"""Handle cover updates from the RFXtrx gateway."""
if not supported(event):
return
if device_id in device_ids:
return
device_ids.add(device_id)
_LOGGER.info(
"Added cover (Device ID: %s Class: %s Sub: %s, Event: %s)",
event.device.id_string.lower(),
event.device.__class__.__name__,
event.device.subtype,
"".join(f"{x:02x}" for x in event.data),
)
entity = RfxtrxCover(
event.device, device_id, DEFAULT_SIGNAL_REPETITIONS, event=event
)
async_add_entities([entity]) | tchellomello/home-assistant | [
7,
1,
7,
6,
1467778429
] |
def is_closed(self):
"""Return if the cover is closed."""
return not self._state | tchellomello/home-assistant | [
7,
1,
7,
6,
1467778429
] |
def _apply_event(self, event):
"""Apply command from rfxtrx."""
super()._apply_event(event)
if event.values["Command"] in COMMAND_ON_LIST:
self._state = True
elif event.values["Command"] in COMMAND_OFF_LIST:
self._state = False | tchellomello/home-assistant | [
7,
1,
7,
6,
1467778429
] |
def __init__(self, _orient_socket):
super(CommandMessage, self).__init__(_orient_socket)
self._query = ''
self._limit = 20
self._fetch_plan = '*:0'
self._command_type = QUERY_SYNC
self._mod_byte = 's'
self._append((FIELD_BYTE, COMMAND_OP)) | orientechnologies/pyorient | [
117,
36,
117,
15,
1419963921
] |
def prepare(self, params=None):
if isinstance(params, tuple) or isinstance(params, list):
try:
self.set_command_type(params[0])
self._query = params[1]
self._limit = params[2]
self._fetch_plan = params[3]
# callback function use to operate
# over the async fetched records
self.set_callback(params[4])
except IndexError:
# Use default for non existent indexes
pass
if self._command_type == QUERY_CMD \
or self._command_type == QUERY_SYNC \
or self._command_type == QUERY_SCRIPT \
or self._command_type == QUERY_GREMLIN:
self._mod_byte = 's'
else:
if self._callback is None:
raise PyOrientBadMethodCallException("No callback was provided.", [])
self._mod_byte = 'a'
_payload_definition = [
(FIELD_STRING, self._command_type),
(FIELD_STRING, self._query)
]
if self._command_type == QUERY_ASYNC \
or self._command_type == QUERY_SYNC \
or self._command_type == QUERY_GREMLIN:
# a limit specified in a sql string should always override a
# limit parameter pass to prepare()
if ' LIMIT ' not in self._query.upper() or self._command_type == QUERY_GREMLIN:
_payload_definition.append((FIELD_INT, self._limit))
else:
_payload_definition.append((FIELD_INT, -1))
_payload_definition.append((FIELD_STRING, self._fetch_plan))
if self._command_type == QUERY_SCRIPT:
_payload_definition.insert(1, (FIELD_STRING, 'sql'))
_payload_definition.append((FIELD_INT, 0))
payload = b''.join(
self._encode_field(x) for x in _payload_definition
)
self._append((FIELD_BYTE, self._mod_byte))
self._append((FIELD_STRING, payload))
return super(CommandMessage, self).prepare() | orientechnologies/pyorient | [
117,
36,
117,
15,
1419963921
] |
def set_command_type(self, _command_type):
if _command_type in QUERY_TYPES:
# user choice if present
self._command_type = _command_type
else:
raise PyOrientBadMethodCallException(
_command_type + ' is not a valid command type', []
)
return self | orientechnologies/pyorient | [
117,
36,
117,
15,
1419963921
] |
def set_query(self, _query):
self._query = _query
return self | orientechnologies/pyorient | [
117,
36,
117,
15,
1419963921
] |
def _read_sync(self):
# type of response
# decode body char with flag continue ( Header already read )
response_type = self._decode_field(FIELD_CHAR)
if not isinstance(response_type, str):
response_type = response_type.decode()
res = []
if response_type == 'n':
self._append(FIELD_CHAR)
super(CommandMessage, self).fetch_response(True)
# end Line \x00
return None
elif response_type == 'r' or response_type == 'w':
res = [self._read_record()]
self._append(FIELD_CHAR)
# end Line \x00
_res = super(CommandMessage, self).fetch_response(True)
if response_type == 'w':
res = [res[0].oRecordData['result']]
elif response_type == 'a':
self._append(FIELD_STRING)
self._append(FIELD_CHAR)
res = [super(CommandMessage, self).fetch_response(True)[0]]
elif response_type == 'l':
self._append(FIELD_INT)
list_len = super(CommandMessage, self).fetch_response(True)[0]
for n in range(0, list_len):
res.append(self._read_record())
# async-result-type can be:
# 0: no records remain to be fetched
# 1: a record is returned as a result set
# 2: a record is returned as pre-fetched to be loaded in client's
# cache only. It's not part of the result set but the client
# knows that it's available for later access
cached_results = self._read_async_records()
# cache = cached_results['cached']
else:
# this should be never happen, used only to debug the protocol
msg = b''
self._orientSocket._socket.setblocking(0)
m = self._orientSocket.read(1)
while m != "":
msg += m
m = self._orientSocket.read(1)
return res | orientechnologies/pyorient | [
117,
36,
117,
15,
1419963921
] |
def __init__(self, _orient_socket):
super(_TXCommitMessage, self).__init__(_orient_socket)
self._tx_id = -1
self._operation_stack = []
self._pre_operation_records = {}
self._operation_records = {}
self._temp_cluster_position_seq = -2
# order matters
self._append((FIELD_BYTE, TX_COMMIT_OP))
self._command = TX_COMMIT_OP | orientechnologies/pyorient | [
117,
36,
117,
15,
1419963921
] |
def prepare(self, params=None):
self._append((FIELD_INT, self.get_transaction_id()))
self._append((FIELD_BOOLEAN, True))
for k, v in enumerate(self._operation_stack):
self._append((FIELD_BYTE, chr(1))) # start of records
for field in v:
self._append(field)
self._append((FIELD_BYTE, chr(0)))
self._append((FIELD_STRING, ""))
return super(_TXCommitMessage, self).prepare() | orientechnologies/pyorient | [
117,
36,
117,
15,
1419963921
] |
def fetch_response(self):
# self.dump_streams()
super(_TXCommitMessage, self).fetch_response()
result = {
'created': [],
'updated': [],
'changes': []
}
items = self._decode_field(FIELD_INT)
for x in range(0, items):
# (created-record-count:int)
# [
# (client-specified-cluster-id:short)
# (client-specified-cluster-position:long)
# (created-cluster-id:short)
# (created-cluster-position:long)
# ]*
result['created'].append(
{
'client_c_id': self._decode_field(FIELD_SHORT),
'client_c_pos': self._decode_field(FIELD_LONG),
'created_c_id': self._decode_field(FIELD_SHORT),
'created_c_pos': self._decode_field(FIELD_LONG)
}
)
operation = self._pre_operation_records[
str(result['created'][-1]['client_c_pos'])
]
rid = "#" + str(result['created'][-1]['created_c_id']) + \
":" + str(result['created'][-1]['created_c_pos'])
record = getattr(operation, "_record_content")
record.update(__version=1, __rid=rid)
self._operation_records[rid] = record
items = self._decode_field(FIELD_INT)
for x in range(0, items):
# (updated-record-count:int)
# [
# (updated-cluster-id:short)
# (updated-cluster-position:long)
# (new-record-version:int)
# ]*
result['updated'].append(
{
'updated_c_id': self._decode_field(FIELD_SHORT),
'updated_c_pos': self._decode_field(FIELD_LONG),
'new_version': self._decode_field(FIELD_INT),
}
)
try:
operation = self._pre_operation_records[
str(result['updated'][-1]['updated_c_pos'])
]
record = getattr(operation, "_record_content")
rid = "#" + str(result['updated'][-1]['updated_c_id']) + \
":" + str(result['updated'][-1]['updated_c_pos'])
record.update(
__version=result['updated'][-1]['new_version'],
__rid=rid
)
self._operation_records[rid] = record
except KeyError:
pass
if self.get_protocol() > 23:
items = self._decode_field(FIELD_INT)
for x in range(0, items):
# (count-of-collection-changes:int)
# [
# (uuid-most-sig-bits:long)
# (uuid-least-sig-bits:long)
# (updated-file-id:long)
# (updated-page-index:long)
# (updated-page-offset:int)
# ]*
result['updated'].append(
{
'uuid_high': self._decode_field(FIELD_LONG),
'uuid_low': self._decode_field(FIELD_LONG),
'file_id': self._decode_field(FIELD_LONG),
'page_index': self._decode_field(FIELD_LONG),
'page_offset': self._decode_field(FIELD_INT),
}
)
self.dump_streams()
return self._operation_records # [self._operation_records, result] | orientechnologies/pyorient | [
117,
36,
117,
15,
1419963921
] |
def get_transaction_id(self):
if self._tx_id < 0:
from datetime import datetime
my_epoch = datetime(2014, 7, 1)
now = datetime.now()
delta = now - my_epoch
# write in extended mode to make it easy to read
# seconds * 1000000 to get the equivalent microseconds
_sm = (delta.seconds + delta.days * 24 * 3600) * 10 ** 6
_ms = delta.microseconds
_mstime = _sm + _ms
# remove sign
# treat as unsigned even when the INT is signed
# and take 4 Bytes
# ( 32 bit uniqueness is not ensured in any way,
# but is surely unique in this session )
# we need only a transaction unique for this session
# not a real UUID
if _mstime & 0x80000000:
self._tx_id = int((_mstime - 0x80000000) & 0xFFFFFFFF)
else:
self._tx_id = int(_mstime & 0xFFFFFFFF)
return self._tx_id | orientechnologies/pyorient | [
117,
36,
117,
15,
1419963921
] |
def commit(self):
self._orientSocket.in_transaction = False
result = self.prepare().send().fetch_response()
self._operation_stack = []
self._pre_operation_records = {}
self._operation_records = {}
self._tx_id = -1
self._temp_cluster_position_seq = -2
return result | orientechnologies/pyorient | [
117,
36,
117,
15,
1419963921
] |
def __init__(self, _orient_socket):
self._transaction = _TXCommitMessage(_orient_socket)
pass | orientechnologies/pyorient | [
117,
36,
117,
15,
1419963921
] |
def begin(self):
self._transaction.begin()
return self | orientechnologies/pyorient | [
117,
36,
117,
15,
1419963921
] |
def rollback(self):
return self._transaction.rollback() | orientechnologies/pyorient | [
117,
36,
117,
15,
1419963921
] |
def __init__(self, metrics_provider: BaseMetrics) -> None:
super().__init__()
self.metrics = metrics_provider | Yelp/paasta | [
1644,
229,
1644,
129,
1445895353
] |
def run(self) -> None:
while True:
last_run_time = time.time()
self.run_once()
time.sleep(last_run_time + 20 - time.time()) | Yelp/paasta | [
1644,
229,
1644,
129,
1445895353
] |
def __init__(
self,
queue: DelayDeadlineQueueProtocol,
workers: List[PaastaDeployWorker],
cluster: str,
metrics_provider: BaseMetrics, | Yelp/paasta | [
1644,
229,
1644,
129,
1445895353
] |
def clean_cidr_block(self, cidr_block):
if cidr_block not in self.vpc.cidr_block:
raise errors.InvalidParameter(
"{} not inside network {}".format(self.cidr_block, self.vpc.cidr_block)
)
return cidr_block | yaybu/touchdown | [
11,
4,
11,
17,
1410353271
] |
def get_describe_filters(self):
vpc = self.runner.get_plan(self.resource.vpc)
if not vpc.resource_id:
return None
return {
"Filters": [
{"Name": "cidrBlock", "Values": [str(self.resource.cidr_block)]},
{"Name": "vpcId", "Values": [vpc.resource_id]},
]
} | yaybu/touchdown | [
11,
4,
11,
17,
1410353271
] |
def update_object(self):
if self.resource.route_table:
if not self.object.get("RouteTableAssociationId", None):
yield self.generic_action(
"Associate route table",
self.client.associate_route_table,
SubnetId=serializers.Identifier(),
RouteTableId=self.resource.route_table.identifier(),
)
elif (
self.object["RouteTableId"]
!= self.runner.get_plan(self.resource.route_table).resource_id
):
yield self.generic_action(
"Replace route table association",
self.client.replace_route_table_association,
AssociationId=self.object["RouteTableAssociationId"],
RouteTableId=self.resource.route_table.identifier(),
)
elif self.object.get("RouteTableAssociationId", None):
yield self.generic_action(
"Disassociate route table",
self.client.disassociate_route_table,
AssociationId=self.object["RouteTableAssociationId"],
)
naa_changed = False
if not self.resource.network_acl:
return
if not self.object:
naa_changed = True
elif not self.object.get("NetworkAclAssociationId", None):
naa_changed = True
elif self.runner.get_plan(
self.resource.network_acl
).resource_id != self.object.get("NetworkAclId", None):
naa_changed = True
if naa_changed:
yield self.generic_action(
"Replace Network ACL association",
self.client.replace_network_acl_association,
AssociationId=serializers.Property("NetworkAclAssociationId"),
NetworkAclId=self.resource.network_acl.identifier(),
) | yaybu/touchdown | [
11,
4,
11,
17,
1410353271
] |
def apply_migration(apps, schema_editor):
Group = apps.get_model('auth', 'Group')
public_group = Group()
public_group.name = "public"
public_group.id = PUBLIC_ID
public_group.save() | kartta-labs/noter-backend | [
1,
2,
1,
11,
1596553520
] |
def ndb_wsgi_middleware(wsgi_app):
def middleware(environ, start_response):
with client.context():
return wsgi_app(environ, start_response)
return middleware | GoogleCloudPlatform/python-docs-samples | [
6120,
5980,
6120,
108,
1430781973
] |
def list_books():
books = Book.query()
return str([book.to_dict() for book in books]) | GoogleCloudPlatform/python-docs-samples | [
6120,
5980,
6120,
108,
1430781973
] |
def test_get_param_includes(self):
bad_testcases = [{}, [[]], [{}]]
for bad in bad_testcases:
with self.assertRaises(TaskCatException):
param_list_to_dict(bad) | aws-quickstart/taskcat | [
1061,
211,
1061,
39,
1479169741
] |
def test_name_from_stack_id(self):
actual = name_from_stack_id("arn:::us-east-1::Stack/test-name")
self.assertEqual("test-name", actual) | aws-quickstart/taskcat | [
1061,
211,
1061,
39,
1479169741
] |
def test_s3_url_maker(self, m_get_s3_domain):
m_s3 = mock.Mock()
m_s3.get_bucket_location.return_value = {"LocationConstraint": None}
actual = s3_url_maker("test-bucket", "test-key/1", m_s3)
self.assertEqual(
"https://test-bucket.s3.us-east-1.amazonaws.com/test-key/1", actual
)
m_s3.get_bucket_location.return_value = {"LocationConstraint": "us-west-2"}
actual = s3_url_maker("test-bucket", "test-key/1", m_s3)
self.assertEqual(
"https://test-bucket.s3.us-west-2.amazonaws.com/test-key/1", actual
)
m_get_s3_domain.assert_called_once() | aws-quickstart/taskcat | [
1061,
211,
1061,
39,
1479169741
] |
def test_merge_dicts(self):
input = [{}, {}]
actual = merge_dicts(input)
self.assertEqual({}, actual)
input = [{"a": 1}, {"b": 2}]
actual = merge_dicts(input)
self.assertEqual({"a": 1, "b": 2}, actual) | aws-quickstart/taskcat | [
1061,
211,
1061,
39,
1479169741
] |
def test_make_dir(self):
path = "/tmp/test_make_dir_path"
try:
os.rmdir(path)
except FileNotFoundError:
pass
os.makedirs(path)
make_dir(path)
os.rmdir(path)
make_dir(path)
self.assertEqual(os.path.isdir(path), True)
with self.assertRaises(FileExistsError) as cm:
make_dir(path, False)
self.assertEqual(cm.exception.errno, errno.EEXIST)
os.rmdir(path) | aws-quickstart/taskcat | [
1061,
211,
1061,
39,
1479169741
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.