function
stringlengths
11
56k
repo_name
stringlengths
5
60
features
list
def testRandom(self): # Random shapes of rank 4, random indices for _ in range(5): shape = np.random.randint(1, 20, size=4) indices = np.random.randint(shape[0], size=2 * shape[0]) self._TestCase(_AsLong(list(shape)), list(indices))
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def testSubRandom(self): # Random shapes of rank 4, random indices for _ in range(5): shape = np.random.randint(1, 20, size=4) indices = np.random.randint(shape[0], size=2 * shape[0]) self._TestCase(_AsLong(list(shape)), list(indices), state_ops.scatter_sub)
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def testWrongShape(self): # Indices and values mismatch. var = variables.Variable( array_ops.zeros(shape=[1024, 64, 64], dtype=dtypes.float32)) indices = array_ops.placeholder(dtypes.int32, shape=[32]) values = array_ops.placeholder(dtypes.float32, shape=[33, 64, 64]) with self.assertRaises(ValueError): state_ops.scatter_add(var, indices, values) # Var and values mismatch. values = array_ops.placeholder(dtypes.float32, shape=[32, 64, 63]) with self.assertRaises(ValueError): state_ops.scatter_add(var, indices, values)
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def _EmbeddingParams(num_shards, vocab_size, dtype=dtypes.float32, shape=None, use_shapeless_placeholder=False): p = [] params = {} feed_dict = {} if not shape: shape = [10] for i in range(num_shards): shard_shape = [vocab_size // num_shards] + shape if i < vocab_size % num_shards: # Excess goes evenly on the first shards shard_shape[0] += 1 param_name = _PName(i) if use_shapeless_placeholder: param = array_ops.placeholder(dtype, shape=None, name=param_name) else: param = constant_op.constant( 1.0, shape=shard_shape, dtype=dtype, name=param_name) p.append(param) np_type = "f" if dtype == dtypes.float32 else "d" val = (np.random.rand(*shard_shape).astype(np_type)) + 1 params[param_name + ":0"] = val feed_dict[param.name] = val return p, params, feed_dict
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def _EmbeddingResult(params, id_vals, num_shards, vocab_size, partition_strategy="mod", weight_vals=None): if weight_vals is None: weight_vals = np.copy(id_vals) weight_vals.fill(1) values = [] weights = [] weights_squared = [] for ids, wts in zip(id_vals, weight_vals): value_aggregation = None weight_aggregation = None squared_weight_aggregation = None if isinstance(ids, compat.integral_types): ids = [ids] wts = [wts] for i, weight_value in zip(ids, wts): if partition_strategy == "mod": val = np.copy(params[_PName(i % num_shards) + ":0"][ i // num_shards, :]) * weight_value elif partition_strategy == "div": ids_per_partition, extras = divmod(vocab_size, num_shards) threshold = extras * (ids_per_partition + 1) if i < threshold: partition = i // (ids_per_partition + 1) offset = i % (ids_per_partition + 1) else: partition = extras + (i - threshold) // ids_per_partition offset = (i - threshold) % ids_per_partition val = np.copy( params[_PName(partition) + ":0"][offset, :]) * weight_value else: assert False if value_aggregation is None: assert weight_aggregation is None assert squared_weight_aggregation is None value_aggregation = val weight_aggregation = weight_value squared_weight_aggregation = weight_value * weight_value else: assert weight_aggregation is not None assert squared_weight_aggregation is not None value_aggregation += val weight_aggregation += weight_value squared_weight_aggregation += weight_value * weight_value values.append(value_aggregation) weights.append(weight_aggregation) weights_squared.append(squared_weight_aggregation) values = np.array(values).astype(np.float32) weights = np.array(weights).astype(np.float32) weights_squared = np.array(weights_squared).astype(np.float32) return values, weights, weights_squared
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def testSimpleSharded(self): with self.cached_session(): num_shards = 2 vocab_size = 4 p, params, feed_dict = _EmbeddingParams(num_shards, vocab_size) id_vals = np.array([0, 0]) ids = constant_op.constant(list(id_vals), dtype=dtypes.int32) print("Construct ids", ids.get_shape()) embedding = embedding_ops.embedding_lookup(p, ids) tf_result = embedding.eval(feed_dict=feed_dict) np_result, _, _ = _EmbeddingResult(params, id_vals, num_shards, vocab_size) self.assertAllEqual(np_result, tf_result) self.assertShapeEqual(np_result, embedding)
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def testMaxNorm(self): with self.cached_session(): embeddings = constant_op.constant([[2.0]]) ids = constant_op.constant([0], dtype=dtypes.int32) embedding = embedding_ops.embedding_lookup( [embeddings], ids, max_norm=1.0) self.assertAllEqual(embedding, [[1.0]])
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def testMaxNormNontrivial(self): with self.cached_session(): embeddings = constant_op.constant([[2.0, 4.0], [3.0, 1.0]]) ids = constant_op.constant([0, 1], dtype=dtypes.int32) embedding = embedding_ops.embedding_lookup( [embeddings], ids, max_norm=2.0) norms = math_ops.sqrt( math_ops.reduce_sum(embeddings * embeddings, axis=1)) normalized = embeddings / array_ops.stack([norms, norms], axis=1) self.assertAllClose(embedding, 2 * self.evaluate(normalized))
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def testSimpleShardedPartitionedVariable(self): with self.cached_session() as sess: num_shards = 2 vocab_size = 4 p, p_variable, params, feed_dict = _EmbeddingParamsAsPartitionedVariable( num_shards, vocab_size) id_vals = np.array([0, 0]) ids = constant_op.constant(list(id_vals), dtype=dtypes.int32) print("Construct ids", ids.get_shape()) embedding = embedding_ops.embedding_lookup(p_variable, ids) self.evaluate(variables.global_variables_initializer()) params_values = [params[p_i.name] for p_i in p] # Test that the PartitionedVariable components equal the list in p p_var_val = self.evaluate(list(p_variable)) # Actual test tf_result = embedding.eval(feed_dict=feed_dict) np_result, _, _ = _EmbeddingResult(params, id_vals, num_shards, vocab_size) self.assertAllEqual(params_values, p_var_val) self.assertAllEqual(np_result, tf_result) self.assertShapeEqual(np_result, embedding)
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def testSimpleShardedPartitionedResourceVariable(self): with self.cached_session() as sess: num_shards = 2 vocab_size = 4 p, p_variable, params, _ = _EmbeddingParamsAsPartitionedVariable( num_shards, vocab_size, use_resource=True) id_vals = np.array([0, 0]) ids = constant_op.constant(list(id_vals), dtype=dtypes.int32) print("Construct ids", ids.get_shape()) embedding = embedding_ops.embedding_lookup(p_variable, ids) self.evaluate(variables.global_variables_initializer()) params_values = [params[p_i.name] for p_i in p] # Test that the PartitionedVariable components equal the list in p p_var_val = self.evaluate(list(p_variable)) # Actual test print(ops.get_default_graph().as_graph_def()) tf_result = self.evaluate(embedding) np_result, _, _ = _EmbeddingResult(params, id_vals, num_shards, vocab_size) self.assertAllEqual(params_values, p_var_val) self.assertAllEqual(np_result, tf_result) self.assertShapeEqual(np_result, embedding)
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def testShardedModPartitioningInt32Ids(self): with self.cached_session(): num_shards = 5 vocab_size = 13 # Embedding dimensions is 10. The vocab_size x 10 embedding # parameters are spread in num_shards matrices, so the first # 3 shards are 3 x 10 and the last 2 shards are 2 x 10. p, params, feed_dict = _EmbeddingParams(num_shards, vocab_size) num_vals = 30 # Fetch num_vals embeddings for random word ids. Since # num_vals > vocab_size, this ought to have repetitions, so # will test that aspect. id_vals = np.random.randint(vocab_size, size=num_vals) ids = constant_op.constant(list(id_vals), dtype=dtypes.int32) embedding = embedding_ops.embedding_lookup(p, ids) tf_result = embedding.eval(feed_dict=feed_dict) np_result, _, _ = _EmbeddingResult(params, id_vals, num_shards, vocab_size) self.assertAllEqual(np_result, tf_result) self.assertShapeEqual(np_result, embedding)
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def testShardedModPartitioningInt64Ids(self): with self.cached_session(): num_shards = 5 vocab_size = 13 # Embedding dimensions is 10. The vocab_size x 10 embedding # parameters are spread in num_shards matrices, so the first # 3 shards are 3 x 10 and the last 2 shards are 2 x 10. p, params, feed_dict = _EmbeddingParams(num_shards, vocab_size) num_vals = 30 # Fetch num_vals embeddings for random word ids. Since # num_vals > vocab_size, this ought to have repetitions, so # will test that aspect. id_vals = np.random.randint(vocab_size, size=num_vals) ids = constant_op.constant(list(id_vals), dtype=dtypes.int64) embedding = embedding_ops.embedding_lookup(p, ids) tf_result = embedding.eval(feed_dict=feed_dict) np_result, _, _ = _EmbeddingResult(params, id_vals, num_shards, vocab_size) self.assertAllEqual(np_result, tf_result) self.assertShapeEqual(np_result, embedding)
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def testShardedDivPartitioningInt32Ids(self): with self.cached_session(): num_shards = 5 vocab_size = 13 # Embedding dimensions is 10. The vocab_size x 10 embedding # parameters are spread in num_shards matrices, so the first # 3 shards are 3 x 10 and the last 2 shards are 2 x 10. p, params, feed_dict = _EmbeddingParams(num_shards, vocab_size) num_vals = 30 # Fetch num_vals embeddings for random word ids. Since # num_vals > vocab_size, this ought to have repetitions, so # will test that aspect. id_vals = np.random.randint(vocab_size, size=num_vals) ids = constant_op.constant(list(id_vals), dtype=dtypes.int32) embedding = embedding_ops.embedding_lookup( p, ids, partition_strategy="div") tf_result = embedding.eval(feed_dict=feed_dict) np_result, _, _ = _EmbeddingResult( params, id_vals, num_shards, vocab_size, partition_strategy="div") self.assertAllEqual(np_result, tf_result) self.assertShapeEqual(np_result, embedding)
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def testShardedDivPartitioningInt32IdsPartitionedVariable(self): with self.cached_session(): num_shards = 5 vocab_size = 13 # Embedding dimensions is 10. The vocab_size x 10 embedding # parameters are spread in num_shards matrices, so the first # 3 shards are 3 x 10 and the last 2 shards are 2 x 10. _, p_variable, params, feed_dict = _EmbeddingParamsAsPartitionedVariable( num_shards, vocab_size) num_vals = 30 # Fetch num_vals embeddings for random word ids. Since # num_vals > vocab_size, this ought to have repetitions, so # will test that aspect. id_vals = np.random.randint(vocab_size, size=num_vals) ids = constant_op.constant(list(id_vals), dtype=dtypes.int32) self.evaluate(variables.global_variables_initializer()) embedding = embedding_ops.embedding_lookup( p_variable, ids, partition_strategy="div") tf_result = embedding.eval(feed_dict=feed_dict) np_result, _, _ = _EmbeddingResult( params, id_vals, num_shards, vocab_size, partition_strategy="div") self.assertAllEqual(np_result, tf_result) self.assertShapeEqual(np_result, embedding)
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def testShardedDivPartitioningInt64Ids(self): with self.cached_session(): num_shards = 5 vocab_size = 13 # Embedding dimensions is 10. The vocab_size x 10 embedding # parameters are spread in num_shards matrices, so the first # 3 shards are 3 x 10 and the last 2 shards are 2 x 10. p, params, feed_dict = _EmbeddingParams(num_shards, vocab_size) num_vals = 30 # Fetch num_vals embeddings for random word ids. Since # num_vals > vocab_size, this ought to have repetitions, so # will test that aspect. id_vals = np.random.randint(vocab_size, size=num_vals) ids = constant_op.constant(list(id_vals), dtype=dtypes.int64) embedding = embedding_ops.embedding_lookup( p, ids, partition_strategy="div") tf_result = embedding.eval(feed_dict=feed_dict) np_result, _, _ = _EmbeddingResult( params, id_vals, num_shards, vocab_size, partition_strategy="div") self.assertAllEqual(np_result, tf_result) self.assertShapeEqual(np_result, embedding)
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def testShardedDivPartitioningUnknownParamShape(self): with self.cached_session(): num_shards = 5 vocab_size = 13 # Embedding dimensions is 10. The vocab_size x 10 embedding # parameters are spread in num_shards matrices, so the first # 3 shards are 3 x 10 and the last 2 shards are 2 x 10. # We clear parameter shapes, to test when shape is not statically known. p, params, feed_dict = _EmbeddingParams( num_shards, vocab_size, use_shapeless_placeholder=True) num_vals = 30 # Fetch num_vals embeddings for random word ids. Since # num_vals > vocab_size, this ought to have repetitions, so # will test that aspect. id_vals = np.random.randint(vocab_size, size=num_vals) ids = constant_op.constant(list(id_vals), dtype=dtypes.int64) embedding = embedding_ops.embedding_lookup( p, ids, partition_strategy="div") tf_result = embedding.eval(feed_dict=feed_dict) np_result, _, _ = _EmbeddingResult( params, id_vals, num_shards, vocab_size, partition_strategy="div") self.assertAllEqual(np_result, tf_result)
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def testGradientsEmbeddingLookup(self): vocab_size = 9 num_ids = 10 id_vals = list(np.random.randint(vocab_size, size=num_ids)) tf_logging.vlog(1, id_vals) for ids_shape in [(10,), (2, 5)]: for num_shards in [1, 3]: with self.cached_session(): ids = constant_op.constant( id_vals, shape=ids_shape, dtype=dtypes.int32) x, params, _ = _EmbeddingParams(num_shards, vocab_size, shape=[2]) y = embedding_ops.embedding_lookup(x, ids) y_shape = ids_shape + tuple(params[_PName(0) + ":0"].shape[1:]) x_name = [_PName(i) for i in range(num_shards)] x_init_value = [params[x_n + ":0"] for x_n in x_name] x_shape = [i.shape for i in x_init_value] err = gradient_checker.compute_gradient_error( x, x_shape, y, y_shape, x_init_value=x_init_value) self.assertLess(err, 1e-4)
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def testGradientsEmbeddingLookupWithComputedParams(self): vocab_size = 9 num_ids = 5 id_vals = list(np.random.randint(vocab_size, size=num_ids)) tf_logging.vlog(1, id_vals) for num_shards in [1, 3]: with self.cached_session(): ids = constant_op.constant(id_vals, dtype=dtypes.int32) x, params, _ = _EmbeddingParams(num_shards, vocab_size, shape=[2]) # This will force a conversion from IndexedSlices to Tensor. x_squared = [math_ops.square(elem) for elem in x] y = embedding_ops.embedding_lookup(x_squared, ids) y_shape = [num_ids] + list(params[_PName(0) + ":0"].shape[1:]) x_name = [_PName(i) for i in range(num_shards)] x_init_value = [params[x_n + ":0"] for x_n in x_name] x_shape = [i.shape for i in x_init_value] err = gradient_checker.compute_gradient_error( x, x_shape, y, y_shape, x_init_value=x_init_value) self.assertLess(err, 1e-3)
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def testConstructionSharded(self): with ops.Graph().as_default(): p = [] for _ in range(2): p += [ variables.Variable( array_ops.zeros(shape=[100, 100], dtype=dtypes.float32)) ] ids = constant_op.constant([0, 1, 1, 17], dtype=dtypes.int32) embedding_ops.embedding_lookup(p, ids)
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def testHigherRank(self): np.random.seed(8) with self.cached_session(): for params_shape in (12,), (6, 3): params = np.random.randn(*params_shape) for ids_shape in (3, 2), (4, 3): ids = np.random.randint( params.shape[0], size=np.prod(ids_shape)).reshape(ids_shape) # Compare nonsharded to gather simple = embedding_ops.embedding_lookup(params, ids) self.assertAllEqual(simple, array_ops.gather(params, ids)) # Run a few random sharded versions for procs in 1, 2, 3: stride = procs * math_ops.range(params.shape[0] // procs) split_params = [ array_ops.gather(params, stride + p) for p in range(procs) ] sharded = embedding_ops.embedding_lookup(split_params, ids) self.assertAllEqual(simple, sharded)
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def testHigherRankMaxNorm(self): np.random.seed(8) with self.cached_session(): for params_shape in (12,), (6, 3), (6, 2, 3): # Test embedding rank 0, 1, 2. # Note: the first dimension must be a common multiple of procs below. params = 2 * np.ones(params_shape) params_norm = params / np.sqrt( np.sum( params * params, tuple(range(params.ndim)[1:]), keepdims=True)) for ids_shape in (), (3), (4, 3), (2, 3, 4): ids = np.random.randint( params.shape[0], size=np.prod(ids_shape, dtype=np.int64)).reshape(ids_shape) # Compare nonsharded to gather simple = embedding_ops.embedding_lookup(params, ids, max_norm=1.0) # assertAllClose is used here as different implementations of sqrt may # be used to compute each of the values being compared. For example, # on AVX512 builds the embedding operation makes use of Eigen's fast # vectorized square root algorithm for doubles. These different # implementations of sqrt are not guaranteed to produce exactly the # same results. Therefore, an exact comparison cannot be made. self.assertAllClose(simple, array_ops.gather(params_norm, ids)) # Run a few different sharded versions. for procs in 1, 2, 3: stride = procs * math_ops.range(params.shape[0] // procs) split_params = [ array_ops.gather(params, stride + p) for p in range(procs) ] sharded = embedding_ops.embedding_lookup( split_params, ids, max_norm=1.0) self.assertAllEqual(simple, sharded)
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def testTransform(self): # This tests all combinations of: # - ids rank 0, 1, >1 # - params sharded/unsharded # It always applies max_norm. np.random.seed(8) l2_norm = 2. with self.cached_session(): # Param values are in [l2_norm, l2_norm+1) so it will always clip. params = np.random.rand(6, 3) + l2_norm params_norm = l2_norm * params / np.sqrt( np.sum(params * params, axis=1, keepdims=True)) # Compute the norm of each embedding. This will change the embedding # rank to 0. params_norm = np.linalg.norm(params_norm, axis=1) transform = lambda x: linalg_ops.norm(x, axis=1) for ids_shape in (), (3), (4, 3), (2, 3, 4): # Test ids rank 0, 1, 2, 3. ids = np.random.randint( params.shape[0], size=np.prod(ids_shape, dtype=np.int64)).reshape(ids_shape) # Compare nonsharded to gather. simple = embedding_ops._embedding_lookup_and_transform( params, ids, max_norm=l2_norm, transform_fn=transform) self.assertAllClose(simple, array_ops.gather(params_norm, ids)) # Run a few different sharded versions. for procs in 1, 2, 3: stride = procs * math_ops.range(params.shape[0] // procs) split_params = [ array_ops.gather(params, stride + p) for p in range(procs) ] sharded = embedding_ops._embedding_lookup_and_transform( split_params, ids, max_norm=l2_norm, transform_fn=transform) # assertAllClose is used here as different implementations of sqrt may # be used to compute each of the values being compared. For example, # on AVX512 builds the embedding operation makes use of Eigen's fast # vectorized square root algorithm for doubles. These different # implementations of sqrt are not guaranteed to produce exactly the # same results. Therefore, an exact comparison cannot be made. self.assertAllClose(simple, sharded)
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def _RandomIdsAndWeights(self, batch_size, vocab_size): max_val_per_entry = 6 vals_per_batch_entry = np.random.randint( 1, max_val_per_entry, size=batch_size) num_vals = np.sum(vals_per_batch_entry) ids = np.random.randint(vocab_size, size=num_vals) weights = 1 + np.random.rand(num_vals) indices = [] for batch_entry, num_val in enumerate(vals_per_batch_entry): for val_index in range(num_val): indices.append([batch_entry, val_index]) shape = [batch_size, max_val_per_entry] sp_ids = sparse_tensor.SparseTensor( constant_op.constant(indices, dtypes.int64), constant_op.constant(ids, dtypes.int32), constant_op.constant(shape, dtypes.int64)) sp_weights = sparse_tensor.SparseTensor( constant_op.constant(indices, dtypes.int64), constant_op.constant(weights, dtypes.float32), constant_op.constant(shape, dtypes.int64)) return sp_ids, sp_weights, ids, weights, vals_per_batch_entry
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def testEmbeddingLookupSparse(self): vocab_size = 13 batch_size = 10 param_shape = [2, 5] expected_lookup_result_shape = [None] + param_shape sp_ids, sp_weights, ids, weights, vals_per_batch_entry = ( self._RandomIdsAndWeights(batch_size, vocab_size)) grouped_ids = self._GroupByBatchEntry(ids, vals_per_batch_entry) grouped_weights = self._GroupByBatchEntry(weights, vals_per_batch_entry) grouped_ignored_weights = self._GroupByBatchEntry( np.ones(np.sum(vals_per_batch_entry)), vals_per_batch_entry) for num_shards, combiner, dtype, ignore_weights in itertools.product( [1, 5], ["sum", "mean", "sqrtn"], [dtypes.float16, dtypes.bfloat16, dtypes.float32, dtypes.float64], [True, False]): with self.cached_session(): p, params, feed_dict = _EmbeddingParams( num_shards, vocab_size, shape=param_shape, dtype=dtype) embedding_sum = embedding_ops.embedding_lookup_sparse( p, sp_ids, None if ignore_weights else sp_weights, combiner=combiner) self.assertEqual(embedding_sum.get_shape().as_list(), expected_lookup_result_shape) self.assertEqual(embedding_sum.dtype, dtype) tf_embedding_sum = embedding_sum.eval(feed_dict=feed_dict) np_embedding_sum, np_weight_sum, np_weight_sq_sum = _EmbeddingResult( params, grouped_ids, num_shards, vocab_size, weight_vals=grouped_ignored_weights if ignore_weights else grouped_weights) if combiner == "mean": np_embedding_sum /= np.reshape(np_weight_sum, (batch_size, 1, 1)) if combiner == "sqrtn": np_embedding_sum /= np.reshape( np.sqrt(np_weight_sq_sum), (batch_size, 1, 1)) rtol = 1e-6 if dtype == dtypes.bfloat16: rtol = 1e-2 elif dtype == dtypes.float16: rtol = 1e-3 atol = rtol self.assertAllClose(np_embedding_sum, tf_embedding_sum, rtol, atol)
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def testGradientsEmbeddingLookupSparse(self): vocab_size = 12 batch_size = 4 param_shape = [2, 3] sp_ids, sp_weights, _, _, _ = (self._RandomIdsAndWeights( batch_size, vocab_size)) for num_shards, combiner, dtype, ignore_weights in itertools.product( [1, 3], ["sum", "mean", "sqrtn"], [dtypes.float32, dtypes.float64], [True, False]): with self.cached_session(): x, params, _ = _EmbeddingParams( num_shards, vocab_size, shape=param_shape, dtype=dtype) y = embedding_ops.embedding_lookup_sparse( x, sp_ids, None if ignore_weights else sp_weights, combiner=combiner) x_name = [_PName(i) for i in range(num_shards)] x_init_value = [params[x_n + ":0"] for x_n in x_name] x_shape = [i.shape for i in x_init_value] y_shape = [batch_size] + list(params[_PName(0) + ":0"].shape[1:]) err = gradient_checker.compute_gradient_error( x, x_shape, y, y_shape, x_init_value=x_init_value) self.assertLess(err, 1e-5 if dtype == dtypes.float64 else 2e-3)
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def testIncompatibleShapes(self): with self.cached_session(): x, _, _ = _EmbeddingParams(1, 10, dtype=dtypes.float32) sp_ids = sparse_tensor.SparseTensor( constant_op.constant([[0, 0], [0, 1], [1, 0]], dtypes.int64), constant_op.constant([0, 1, 2], dtypes.int32), constant_op.constant([2, 2], dtypes.int64)) sp_weights = sparse_tensor.SparseTensor( constant_op.constant([[0, 0], [0, 1]], dtypes.int64), constant_op.constant([12.0, 5.0], dtypes.float32), constant_op.constant([1, 2], dtypes.int64)) with self.assertRaises(ValueError): embedding_ops.embedding_lookup_sparse( x, sp_ids, sp_weights, combiner="mean")
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def _random_weights(self, vocab_size=4, embed_dim=4, num_shards=1): assert vocab_size > 0 assert embed_dim > 0 assert num_shards > 0 assert num_shards <= vocab_size initializer = init_ops.truncated_normal_initializer( mean=0.0, stddev=1.0 / math.sqrt(vocab_size), dtype=dtypes.float32) embedding_weights = list(variable_scope.get_variable( name="embedding_weights", shape=[vocab_size, embed_dim], partitioner=partitioned_variables.fixed_size_partitioner(num_shards), initializer=initializer)) for w in embedding_weights: self.evaluate(w.initializer) embedding_weights = [self.evaluate(w) for w in embedding_weights] return embedding_weights
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def _ids_and_weights_3d(self): # Each (2-D) index demonstrates a test case: # Index 0, 0: multiple valid ids, 1 invalid id, weighted mean # Index 0, 1: all ids are invalid (leaving no valid ids after pruning) # Index 0, 2: no ids to begin with # Index 1, 0: single id # Index 1, 1: all ids have <=0 weight # Index 1, 2: no ids to begin with indices = [[0, 0, 0], [0, 0, 1], [0, 0, 2], [0, 1, 0], [1, 0, 0], [1, 1, 0], [1, 1, 1]] ids = [0, 1, -1, -1, 2, 0, 1] weights = [1.0, 2.0, 1.0, 1.0, 3.0, 0.0, -0.5] shape = [2, 3, 4] sparse_ids = sparse_tensor.SparseTensor( constant_op.constant(indices, dtypes.int64), constant_op.constant(ids, dtypes.int64), constant_op.constant(shape, dtypes.int64)) sparse_weights = sparse_tensor.SparseTensor( constant_op.constant(indices, dtypes.int64), constant_op.constant(weights, dtypes.float32), constant_op.constant(shape, dtypes.int64)) return sparse_ids, sparse_weights
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def test_safe_embedding_lookup_sparse_return_zero_vector(self): with self.cached_session(): embedding_weights = self._random_weights() sparse_ids, sparse_weights = self._ids_and_weights_2d() embedding_lookup_result = ( embedding_ops.safe_embedding_lookup_sparse_v2(embedding_weights, sparse_ids, sparse_weights)) self.assertAllClose( embedding_lookup_result, [(1.0 * embedding_weights[0][0] + 2.0 * embedding_weights[0][1]) / 3.0, [0] * 4, [0] * 4, embedding_weights[0][2], [0] * 4])
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def test_safe_embedding_lookup_sparse_return_special_vector(self): with self.cached_session(): embedding_weights = self._random_weights() sparse_ids, sparse_weights = self._ids_and_weights_2d() embedding_lookup_result = ( embedding_ops.safe_embedding_lookup_sparse_v2( embedding_weights, sparse_ids, sparse_weights, default_id=3)) self.assertAllClose( embedding_lookup_result, [(1.0 * embedding_weights[0][0] + 2.0 * embedding_weights[0][1]) / 3.0, embedding_weights[0][3], embedding_weights[0][3], embedding_weights[0][2], embedding_weights[0][3]])
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def test_safe_embedding_lookup_sparse_no_weights(self): with self.cached_session(): embedding_weights = self._random_weights() sparse_ids, _ = self._ids_and_weights_2d() embedding_lookup_result = ( embedding_ops.safe_embedding_lookup_sparse_v2(embedding_weights, sparse_ids, None)) self.assertAllClose( embedding_lookup_result, [(embedding_weights[0][0] + embedding_weights[0][1]) / 2.0, [0] * 4, [0] * 4, embedding_weights[0][2], ( embedding_weights[0][0] + embedding_weights[0][1]) / 2.0])
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def test_safe_embedding_lookup_sparse_partitioned(self): with self.cached_session(): embedding_weights = self._random_weights(num_shards=3) sparse_ids, _ = self._ids_and_weights_2d() embedding_lookup_result = ( embedding_ops.safe_embedding_lookup_sparse_v2(embedding_weights, sparse_ids, None)) embedding_weights = list(itertools.chain(*embedding_weights)) self.assertAllClose(embedding_lookup_result, [(embedding_weights[0] + embedding_weights[1]) / 2.0, [0] * 4, [0] * 4, embedding_weights[2], (embedding_weights[0] + embedding_weights[1]) / 2.0])
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def test_safe_embedding_lookup_sparse_partitioned_inconsistent_weights(self): with self.cached_session(): embedding_weights = self._random_weights(num_shards=3) sparse_ids, sparse_weights = self._ids_and_weights_2d() embedding_weights[1] = embedding_weights[1].astype(np.float64) self.assertRaises(TypeError, embedding_ops.safe_embedding_lookup_sparse, embedding_weights, sparse_ids) embedding_weights = [ constant_op.constant(w, dtype=dtypes.float64) for w in embedding_weights ] self.assertRaises(ValueError, embedding_ops.safe_embedding_lookup_sparse, embedding_weights, sparse_ids, sparse_weights)
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def test_safe_embedding_lookup_sparse_3d_return_zero_vector(self): with self.cached_session(): embedding_weights = self._random_weights() sparse_ids, sparse_weights = self._ids_and_weights_3d() embedding_lookup_result = ( embedding_ops.safe_embedding_lookup_sparse_v2(embedding_weights, sparse_ids, sparse_weights)) self.assertAllClose(embedding_lookup_result, [[ (1.0 * embedding_weights[0][0] + 2.0 * embedding_weights[0][1]) / 3.0, [0] * 4, [0] * 4 ], [embedding_weights[0][2], [0] * 4, [0] * 4]])
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def test_safe_embedding_lookup_sparse_3d_return_special_vector(self): with self.cached_session(): embedding_weights = self._random_weights() sparse_ids, sparse_weights = self._ids_and_weights_3d() embedding_lookup_result = ( embedding_ops.safe_embedding_lookup_sparse_v2( embedding_weights, sparse_ids, sparse_weights, default_id=3)) self.assertAllClose( embedding_lookup_result, [[(1.0 * embedding_weights[0][0] + 2.0 * embedding_weights[0][1]) / 3.0, embedding_weights[0][3], embedding_weights[0][3]], [ embedding_weights[0][2], embedding_weights[0][3], embedding_weights[0][3] ]])
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def test_safe_embedding_lookup_sparse_3d_no_weights(self): with self.cached_session(): embedding_weights = self._random_weights() sparse_ids, _ = self._ids_and_weights_3d() embedding_lookup_result = ( embedding_ops.safe_embedding_lookup_sparse_v2(embedding_weights, sparse_ids, None)) self.assertAllClose(embedding_lookup_result, [[( embedding_weights[0][0] + embedding_weights[0][1]) / 2.0, [0] * 4, [ 0 ] * 4], [ embedding_weights[0][2], (embedding_weights[0][0] + embedding_weights[0][1]) / 2.0, [0] * 4 ]])
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def test_safe_embedding_lookup_sparse_3d_partitioned(self): with self.cached_session(): embedding_weights = self._random_weights(num_shards=3) sparse_ids, _ = self._ids_and_weights_3d() embedding_lookup_result = ( embedding_ops.safe_embedding_lookup_sparse_v2(embedding_weights, sparse_ids, None)) embedding_weights = list(itertools.chain(*embedding_weights)) self.assertAllClose(embedding_lookup_result, [[ (embedding_weights[0] + embedding_weights[1]) / 2.0, [0] * 4, [0] * 4 ], [ embedding_weights[2], (embedding_weights[0] + embedding_weights[1]) / 2.0, [0] * 4 ]])
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def test_safe_embedding_lookup_sparse_3d_partitioned_inconsistent_weights( self): with self.cached_session(): embedding_weights = self._random_weights(num_shards=3) sparse_ids, sparse_weights = self._ids_and_weights_3d() embedding_weights[1] = embedding_weights[1].astype(np.float64) self.assertRaises(TypeError, embedding_ops.safe_embedding_lookup_sparse, embedding_weights, sparse_ids) embedding_weights = [ constant_op.constant(w, dtype=dtypes.float64) for w in embedding_weights ] self.assertRaises(ValueError, embedding_ops.safe_embedding_lookup_sparse, embedding_weights, sparse_ids, sparse_weights)
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def testCint32Cpu(self): with self.session(use_gpu=False): indices = [ ops.convert_to_tensor([0, 1, 2]), ops.convert_to_tensor([2, 3]) ] values = [ ops.convert_to_tensor([12, 23, 34]), ops.convert_to_tensor([1, 2]) ] self.assertAllEqual( data_flow_ops.dynamic_stitch(indices, values), [12, 23, 1, 2])
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def testCint32Gpu(self): with self.session(): indices = [ ops.convert_to_tensor([0, 1, 2]), ops.convert_to_tensor([2, 3]) ] values = [ ops.convert_to_tensor([12, 23, 34]), ops.convert_to_tensor([1, 2]) ] self.assertAllEqual( data_flow_ops.dynamic_stitch(indices, values), [12, 23, 1, 2])
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def testInt32Cpu(self): with self.session(use_gpu=False): indices = [ ops.convert_to_tensor([0, 1, 2]), ops.convert_to_tensor([2, 3]) ] values = [ ops.convert_to_tensor([12, 23, 34]), ops.convert_to_tensor([1, 2]) ] self.assertAllEqual( data_flow_ops.dynamic_stitch(indices, values), [12, 23, 1, 2])
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def testInt32Gpu(self): with self.session(): indices = [ ops.convert_to_tensor([0, 1, 2]), ops.convert_to_tensor([2, 3]) ] values = [ ops.convert_to_tensor([12, 23, 34]), ops.convert_to_tensor([1, 2]) ] self.assertAllEqual( data_flow_ops.dynamic_stitch(indices, values), [12, 23, 1, 2])
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def testSumGradArgs(self): with self.session(use_gpu=False): indices = [ ops.convert_to_tensor([0, 1, 2, 3]), ops.convert_to_tensor([2, 3]) ] values = [ ops.convert_to_tensor([2, 3, 5, 7]), ops.convert_to_tensor([1, 1]) ] self.assertAllEqual( data_flow_ops.dynamic_stitch(indices, values), [2, 3, 1, 1])
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def testStitchOrder(self): with self.cached_session(): indices = [] np_values = [] values = [] for _ in range(10): indices.extend([ops.convert_to_tensor(np.arange(100).astype(np.int32))]) np_values.extend([np.random.uniform(size=100)]) values.extend([ops.convert_to_tensor(np_values[-1])]) stitched = data_flow_ops.dynamic_stitch(indices, values) self.assertAllEqual(np_values[-1], stitched)
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def testCint32Cpu(self): with self.session(use_gpu=False): indices = [ ops.convert_to_tensor([0, 1, 4, 6]), ops.convert_to_tensor([2, 3, 5]) ] values = [ ops.convert_to_tensor([12, 23, 34, 45]), ops.convert_to_tensor([1, 2, 3]) ] self.assertAllEqual( data_flow_ops.parallel_dynamic_stitch(indices, values), [12, 23, 1, 2, 34, 3, 45])
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def testInt32Cpu(self): with self.session(use_gpu=False): indices = [ ops.convert_to_tensor([0, 1, 5, 6, 7]), ops.convert_to_tensor([2, 4, 3]) ] values = [ ops.convert_to_tensor([12, 23, 34, 45, 56]), ops.convert_to_tensor([1, 3, 2]) ] self.assertAllEqual( data_flow_ops.parallel_dynamic_stitch(indices, values), [12, 23, 1, 2, 3, 34, 45, 56])
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def testSimple(self): with self.session(use_gpu=False): indices = [ops.convert_to_tensor([0, 1]), ops.convert_to_tensor([2, 3])] values = [ops.convert_to_tensor([2, 3]), ops.convert_to_tensor([1, 1])] self.assertAllEqual( data_flow_ops.parallel_dynamic_stitch(indices, values), [2, 3, 1, 1])
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def ahead_by(self): self._completeIfNotSet(self._ahead_by) return self._NoneIfNotSet(self._ahead_by)
azumimuo/family-xbmc-addon
[ 1, 3, 1, 2, 1456692116 ]
def base_commit(self): self._completeIfNotSet(self._base_commit) return self._NoneIfNotSet(self._base_commit)
azumimuo/family-xbmc-addon
[ 1, 3, 1, 2, 1456692116 ]
def behind_by(self): self._completeIfNotSet(self._behind_by) return self._NoneIfNotSet(self._behind_by)
azumimuo/family-xbmc-addon
[ 1, 3, 1, 2, 1456692116 ]
def commits(self): self._completeIfNotSet(self._commits) return self._NoneIfNotSet(self._commits)
azumimuo/family-xbmc-addon
[ 1, 3, 1, 2, 1456692116 ]
def diff_url(self): self._completeIfNotSet(self._diff_url) return self._NoneIfNotSet(self._diff_url)
azumimuo/family-xbmc-addon
[ 1, 3, 1, 2, 1456692116 ]
def files(self): self._completeIfNotSet(self._files) return self._NoneIfNotSet(self._files)
azumimuo/family-xbmc-addon
[ 1, 3, 1, 2, 1456692116 ]
def html_url(self): self._completeIfNotSet(self._html_url) return self._NoneIfNotSet(self._html_url)
azumimuo/family-xbmc-addon
[ 1, 3, 1, 2, 1456692116 ]
def patch_url(self): self._completeIfNotSet(self._patch_url) return self._NoneIfNotSet(self._patch_url)
azumimuo/family-xbmc-addon
[ 1, 3, 1, 2, 1456692116 ]
def permalink_url(self): self._completeIfNotSet(self._permalink_url) return self._NoneIfNotSet(self._permalink_url)
azumimuo/family-xbmc-addon
[ 1, 3, 1, 2, 1456692116 ]
def status(self): self._completeIfNotSet(self._status) return self._NoneIfNotSet(self._status)
azumimuo/family-xbmc-addon
[ 1, 3, 1, 2, 1456692116 ]
def total_commits(self): self._completeIfNotSet(self._total_commits) return self._NoneIfNotSet(self._total_commits)
azumimuo/family-xbmc-addon
[ 1, 3, 1, 2, 1456692116 ]
def url(self): self._completeIfNotSet(self._url) return self._NoneIfNotSet(self._url)
azumimuo/family-xbmc-addon
[ 1, 3, 1, 2, 1456692116 ]
def __init__(self, logits=None, probs=None, validate_args=True, allow_nan_stats=False, name="Geometric"): """Construct Geometric distributions. Args: logits: Floating-point `Tensor` with shape `[B1, ..., Bb]` where `b >= 0` indicates the number of batch dimensions. Each entry represents logits for the probability of success for independent Geometric distributions and must be in the range `(-inf, inf]`. Only one of `logits` or `probs` should be specified. probs: Positive floating-point `Tensor` with shape `[B1, ..., Bb]` where `b >= 0` indicates the number of batch dimensions. Each entry represents the probability of success for independent Geometric distributions and must be in the range `(0, 1]`. Only one of `logits` or `probs` should be specified. validate_args: Python `bool`, default `False`. When `True` distribution parameters are checked for validity despite possibly degrading runtime performance. When `False` invalid inputs may silently render incorrect outputs. allow_nan_stats: Python `bool`, default `True`. When `True`, statistics (e.g., mean, mode, variance) use the value "`NaN`" to indicate the result is undefined. When `False`, an exception is raised if one or more of the statistic's batch members are undefined. name: Python `str` name prefixed to Ops created by this class. """ parameters = locals() with ops.name_scope(name, values=[logits, probs]): self._logits, self._probs = distribution_util.get_logits_and_probs( logits, probs, validate_args=validate_args, name=name) with ops.control_dependencies( [check_ops.assert_positive(self._probs)] if validate_args else []): self._probs = array_ops.identity(self._probs, name="probs") super(Geometric, self).__init__( dtype=self._probs.dtype, reparameterization_type=distribution.NOT_REPARAMETERIZED, validate_args=validate_args, allow_nan_stats=allow_nan_stats, parameters=parameters, graph_parents=[self._probs, self._logits], name=name)
unnikrishnankgs/va
[ 1, 5, 1, 10, 1496432585 ]
def logits(self): """Log-odds of a `1` outcome (vs `0`).""" return self._logits
unnikrishnankgs/va
[ 1, 5, 1, 10, 1496432585 ]
def probs(self): """Probability of a `1` outcome (vs `0`).""" return self._probs
unnikrishnankgs/va
[ 1, 5, 1, 10, 1496432585 ]
def _batch_shape(self): return self.probs.get_shape()
unnikrishnankgs/va
[ 1, 5, 1, 10, 1496432585 ]
def _event_shape(self): return tensor_shape.scalar()
unnikrishnankgs/va
[ 1, 5, 1, 10, 1496432585 ]
def _cdf(self, counts): if self.validate_args: # We set `check_integer=False` since the CDF is defined on whole real # line. counts = math_ops.floor( distribution_util.embed_check_nonnegative_discrete( counts, check_integer=False)) counts *= array_ops.ones_like(self.probs) return array_ops.where( counts < 0., array_ops.zeros_like(counts), -math_ops.expm1( (counts + 1) * math_ops.log1p(-self.probs)))
unnikrishnankgs/va
[ 1, 5, 1, 10, 1496432585 ]
def _entropy(self): probs = self._probs if self.validate_args: probs = control_flow_ops.with_dependencies( [check_ops.assert_less( probs, constant_op.constant(1., probs.dtype), message="Entropy is undefined when logits = inf or probs = 1.")], probs) # Claim: entropy(p) = softplus(s)/p - s # where s=logits and p=probs. # # Proof: # # entropy(p) # := -[(1-p)log(1-p) + plog(p)]/p # = -[log(1-p) + plog(p/(1-p))]/p # = -[-softplus(s) + ps]/p # = softplus(s)/p - s # # since, # log[1-sigmoid(s)] # = log[1/(1+exp(s)] # = -log[1+exp(s)] # = -softplus(s) # # using the fact that, # 1-sigmoid(s) = sigmoid(-s) = 1/(1+exp(s)) return nn.softplus(self.logits) / probs - self.logits
unnikrishnankgs/va
[ 1, 5, 1, 10, 1496432585 ]
def _variance(self): return self._mean() / self.probs
unnikrishnankgs/va
[ 1, 5, 1, 10, 1496432585 ]
def __init__(self, context, query, image=None, fanart=None): if image is None: image = context.create_resource_path('media/search.png') pass DirectoryItem.__init__(self, query, context.create_uri([constants.paths.SEARCH, 'query'], {'q': query}), image=image) if fanart: self.set_fanart(fanart) pass else: self.set_fanart(context.get_fanart()) pass context_menu = [(context.localize(constants.localize.SEARCH_REMOVE), 'RunPlugin(%s)' % context.create_uri([constants.paths.SEARCH, 'remove'], params={'q': query})), (context.localize(constants.localize.SEARCH_RENAME), 'RunPlugin(%s)' % context.create_uri([constants.paths.SEARCH, 'rename'], params={'q': query})), (context.localize(constants.localize.SEARCH_CLEAR), 'RunPlugin(%s)' % context.create_uri([constants.paths.SEARCH, 'clear']))] self.set_context_menu(context_menu) pass
azumimuo/family-xbmc-addon
[ 1, 3, 1, 2, 1456692116 ]
def maxNumEdgesToRemove(self, n: int, edges: List[List[int]]) -> int: parent = list(range(n + 1)) def findParent(i): while parent[i] != i: parent[i] = parent[parent[i]] i = parent[i] return i
jiadaizhao/LeetCode
[ 39, 21, 39, 2, 1502171846 ]
def union(u, v): pu = findParent(u) pv = findParent(v) if pu != pv: parent[pv] = pu return 1 else: return 0
jiadaizhao/LeetCode
[ 39, 21, 39, 2, 1502171846 ]
def __str__(self): return self.name
IQSS/miniverse
[ 2, 7, 2, 26, 1467318581 ]
def save(self, *args, **kwargs): self.slug = slugify(self.name) super(Installation, self).save(*args, **kwargs)
IQSS/miniverse
[ 2, 7, 2, 26, 1467318581 ]
def view_marker(self): #return self.logo.url if self.marker: im = '<img src="%s" />' % (self.marker.url) return im return 'n/a'
IQSS/miniverse
[ 2, 7, 2, 26, 1467318581 ]
def view_logo(self, force_width=None): #return self.logo.url if self.logo: if force_width: im = ('<img src="{0}" width="{1}"/ >' '<br />(width forced to {1}px)').format(\ self.logo.url, force_width) return im else: im = '<img src="%s" />' % (self.logo.url) return im return 'n/a'
IQSS/miniverse
[ 2, 7, 2, 26, 1467318581 ]
def to_json(self, as_string=False, pretty=False): """Returns an OrderedDict of the installation attributes""" od = OrderedDict() od['id'] = self.id od['name'] = self.name od['full_name'] = self.full_name od['is_active'] = self.is_active od['description'] = self.description od['lat'] = self.lat od['lng'] = self.lng od['logo'] = '%s://%s%s' % (settings.SWAGGER_SCHEME, settings.SWAGGER_HOST, self.logo.url) #marker = self.marker od['url'] = self.url od['slug'] = self.slug od['version'] = self.version if self.version else None return od
IQSS/miniverse
[ 2, 7, 2, 26, 1467318581 ]
def __init__( self, **kwargs
Azure/azure-sdk-for-python
[ 3526, 2256, 3526, 986, 1335285972 ]
def __init__( self, **kwargs
Azure/azure-sdk-for-python
[ 3526, 2256, 3526, 986, 1335285972 ]
def __init__( self, **kwargs
Azure/azure-sdk-for-python
[ 3526, 2256, 3526, 986, 1335285972 ]
def __init__( self, **kwargs
Azure/azure-sdk-for-python
[ 3526, 2256, 3526, 986, 1335285972 ]
def __init__( self, **kwargs
Azure/azure-sdk-for-python
[ 3526, 2256, 3526, 986, 1335285972 ]
def __init__( self, **kwargs
Azure/azure-sdk-for-python
[ 3526, 2256, 3526, 986, 1335285972 ]
def __init__( self, **kwargs
Azure/azure-sdk-for-python
[ 3526, 2256, 3526, 986, 1335285972 ]
def __init__( self, **kwargs
Azure/azure-sdk-for-python
[ 3526, 2256, 3526, 986, 1335285972 ]
def __init__( self, **kwargs
Azure/azure-sdk-for-python
[ 3526, 2256, 3526, 986, 1335285972 ]
def __init__( self, **kwargs
Azure/azure-sdk-for-python
[ 3526, 2256, 3526, 986, 1335285972 ]
def __init__( self, **kwargs
Azure/azure-sdk-for-python
[ 3526, 2256, 3526, 986, 1335285972 ]
def __init__( self, **kwargs
Azure/azure-sdk-for-python
[ 3526, 2256, 3526, 986, 1335285972 ]
def __init__( self, **kwargs
Azure/azure-sdk-for-python
[ 3526, 2256, 3526, 986, 1335285972 ]
def __init__( self, **kwargs
Azure/azure-sdk-for-python
[ 3526, 2256, 3526, 986, 1335285972 ]
def __init__( self, **kwargs
Azure/azure-sdk-for-python
[ 3526, 2256, 3526, 986, 1335285972 ]
def __init__( self, **kwargs
Azure/azure-sdk-for-python
[ 3526, 2256, 3526, 986, 1335285972 ]
def __init__( self, **kwargs
Azure/azure-sdk-for-python
[ 3526, 2256, 3526, 986, 1335285972 ]
def __init__( self, **kwargs
Azure/azure-sdk-for-python
[ 3526, 2256, 3526, 986, 1335285972 ]
def __init__( self, **kwargs
Azure/azure-sdk-for-python
[ 3526, 2256, 3526, 986, 1335285972 ]
def __init__( self, **kwargs
Azure/azure-sdk-for-python
[ 3526, 2256, 3526, 986, 1335285972 ]
def __init__( self, **kwargs
Azure/azure-sdk-for-python
[ 3526, 2256, 3526, 986, 1335285972 ]
def __init__( self, **kwargs
Azure/azure-sdk-for-python
[ 3526, 2256, 3526, 986, 1335285972 ]
def __init__( self, **kwargs
Azure/azure-sdk-for-python
[ 3526, 2256, 3526, 986, 1335285972 ]
def __init__( self, **kwargs
Azure/azure-sdk-for-python
[ 3526, 2256, 3526, 986, 1335285972 ]
def __init__( self, **kwargs
Azure/azure-sdk-for-python
[ 3526, 2256, 3526, 986, 1335285972 ]