repo
stringlengths 7
55
| path
stringlengths 4
223
| func_name
stringlengths 1
134
| original_string
stringlengths 75
104k
| language
stringclasses 1
value | code
stringlengths 75
104k
| code_tokens
listlengths 19
28.4k
| docstring
stringlengths 1
46.9k
| docstring_tokens
listlengths 1
1.97k
| sha
stringlengths 40
40
| url
stringlengths 87
315
| partition
stringclasses 1
value |
|---|---|---|---|---|---|---|---|---|---|---|---|
tensorflow/tensor2tensor
|
tensor2tensor/trax/models/transformer.py
|
ResidualFeedForward
|
def ResidualFeedForward(feature_depth,
feedforward_depth,
dropout,
mode):
"""Residual feed-forward layer with normalization at start."""
return layers.Residual(
layers.LayerNorm(),
layers.Dense(feedforward_depth),
layers.Relu(),
layers.Dropout(rate=dropout, mode=mode),
layers.Dense(feature_depth),
layers.Dropout(rate=dropout, mode=mode)
)
|
python
|
def ResidualFeedForward(feature_depth,
feedforward_depth,
dropout,
mode):
"""Residual feed-forward layer with normalization at start."""
return layers.Residual(
layers.LayerNorm(),
layers.Dense(feedforward_depth),
layers.Relu(),
layers.Dropout(rate=dropout, mode=mode),
layers.Dense(feature_depth),
layers.Dropout(rate=dropout, mode=mode)
)
|
[
"def",
"ResidualFeedForward",
"(",
"feature_depth",
",",
"feedforward_depth",
",",
"dropout",
",",
"mode",
")",
":",
"return",
"layers",
".",
"Residual",
"(",
"layers",
".",
"LayerNorm",
"(",
")",
",",
"layers",
".",
"Dense",
"(",
"feedforward_depth",
")",
",",
"layers",
".",
"Relu",
"(",
")",
",",
"layers",
".",
"Dropout",
"(",
"rate",
"=",
"dropout",
",",
"mode",
"=",
"mode",
")",
",",
"layers",
".",
"Dense",
"(",
"feature_depth",
")",
",",
"layers",
".",
"Dropout",
"(",
"rate",
"=",
"dropout",
",",
"mode",
"=",
"mode",
")",
")"
] |
Residual feed-forward layer with normalization at start.
|
[
"Residual",
"feed",
"-",
"forward",
"layer",
"with",
"normalization",
"at",
"start",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/models/transformer.py#L24-L36
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/trax/models/transformer.py
|
EncoderLayer
|
def EncoderLayer(feature_depth,
feedforward_depth,
num_heads,
dropout,
mode):
"""Transformer encoder layer.
The input to the encoder is a pair (embedded source, mask) where
the mask is created from the original source to prevent attending
to the padding part of the input.
Args:
feature_depth: int: depth of embedding
feedforward_depth: int: depth of feed-forward layer
num_heads: int: number of attention heads
dropout: float: dropout rate (how much to drop out)
mode: str: 'train' or 'eval'
Returns:
the layer, returning a pair (actiavtions, mask).
"""
# The encoder block expects (activation, mask) as input and returns
# the new activations only, we add the mask back to output next.
encoder_block = layers.Serial(
layers.Residual( # Attention block here.
layers.Parallel(layers.LayerNorm(), layers.Identity()),
layers.MultiHeadedAttention(feature_depth, num_heads=num_heads,
dropout=dropout, mode=mode),
layers.Dropout(rate=dropout, mode=mode),
shortcut=layers.FirstBranch()
),
ResidualFeedForward(feature_depth, feedforward_depth, dropout, mode=mode)
)
# Now we add the mask back.
return layers.Serial(
layers.Reorder(output=((0, 1), 1)), # (x, mask) --> ((x, mask), mask)
layers.Parallel(encoder_block, layers.Identity())
)
|
python
|
def EncoderLayer(feature_depth,
feedforward_depth,
num_heads,
dropout,
mode):
"""Transformer encoder layer.
The input to the encoder is a pair (embedded source, mask) where
the mask is created from the original source to prevent attending
to the padding part of the input.
Args:
feature_depth: int: depth of embedding
feedforward_depth: int: depth of feed-forward layer
num_heads: int: number of attention heads
dropout: float: dropout rate (how much to drop out)
mode: str: 'train' or 'eval'
Returns:
the layer, returning a pair (actiavtions, mask).
"""
# The encoder block expects (activation, mask) as input and returns
# the new activations only, we add the mask back to output next.
encoder_block = layers.Serial(
layers.Residual( # Attention block here.
layers.Parallel(layers.LayerNorm(), layers.Identity()),
layers.MultiHeadedAttention(feature_depth, num_heads=num_heads,
dropout=dropout, mode=mode),
layers.Dropout(rate=dropout, mode=mode),
shortcut=layers.FirstBranch()
),
ResidualFeedForward(feature_depth, feedforward_depth, dropout, mode=mode)
)
# Now we add the mask back.
return layers.Serial(
layers.Reorder(output=((0, 1), 1)), # (x, mask) --> ((x, mask), mask)
layers.Parallel(encoder_block, layers.Identity())
)
|
[
"def",
"EncoderLayer",
"(",
"feature_depth",
",",
"feedforward_depth",
",",
"num_heads",
",",
"dropout",
",",
"mode",
")",
":",
"# The encoder block expects (activation, mask) as input and returns",
"# the new activations only, we add the mask back to output next.",
"encoder_block",
"=",
"layers",
".",
"Serial",
"(",
"layers",
".",
"Residual",
"(",
"# Attention block here.",
"layers",
".",
"Parallel",
"(",
"layers",
".",
"LayerNorm",
"(",
")",
",",
"layers",
".",
"Identity",
"(",
")",
")",
",",
"layers",
".",
"MultiHeadedAttention",
"(",
"feature_depth",
",",
"num_heads",
"=",
"num_heads",
",",
"dropout",
"=",
"dropout",
",",
"mode",
"=",
"mode",
")",
",",
"layers",
".",
"Dropout",
"(",
"rate",
"=",
"dropout",
",",
"mode",
"=",
"mode",
")",
",",
"shortcut",
"=",
"layers",
".",
"FirstBranch",
"(",
")",
")",
",",
"ResidualFeedForward",
"(",
"feature_depth",
",",
"feedforward_depth",
",",
"dropout",
",",
"mode",
"=",
"mode",
")",
")",
"# Now we add the mask back.",
"return",
"layers",
".",
"Serial",
"(",
"layers",
".",
"Reorder",
"(",
"output",
"=",
"(",
"(",
"0",
",",
"1",
")",
",",
"1",
")",
")",
",",
"# (x, mask) --> ((x, mask), mask)",
"layers",
".",
"Parallel",
"(",
"encoder_block",
",",
"layers",
".",
"Identity",
"(",
")",
")",
")"
] |
Transformer encoder layer.
The input to the encoder is a pair (embedded source, mask) where
the mask is created from the original source to prevent attending
to the padding part of the input.
Args:
feature_depth: int: depth of embedding
feedforward_depth: int: depth of feed-forward layer
num_heads: int: number of attention heads
dropout: float: dropout rate (how much to drop out)
mode: str: 'train' or 'eval'
Returns:
the layer, returning a pair (actiavtions, mask).
|
[
"Transformer",
"encoder",
"layer",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/models/transformer.py#L39-L76
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/trax/models/transformer.py
|
TransformerEncoder
|
def TransformerEncoder(vocab_size,
num_classes=10,
feature_depth=512,
feedforward_depth=2048,
num_layers=6,
num_heads=8,
dropout=0.1,
max_len=2048,
mode='train'):
"""Transformer encoder.
Args:
vocab_size: int: vocab size
num_classes: how many classes on output
feature_depth: int: depth of embedding
feedforward_depth: int: depth of feed-forward layer
num_layers: int: number of encoder/decoder layers
num_heads: int: number of attention heads
dropout: float: dropout rate (how much to drop out)
max_len: int: maximum symbol length for positional encoding
mode: str: 'train' or 'eval'
Returns:
the Transformer encoder layer.
"""
input_embedding = layers.Serial(
layers.Embedding(feature_depth, vocab_size),
layers.Dropout(rate=dropout, mode=mode),
layers.PositionalEncoding(max_len=max_len)
)
return layers.Serial(
layers.Branch(), # Branch input to create embedding and mask.
layers.Parallel(input_embedding, layers.PaddingMask()),
layers.Serial(*[EncoderLayer(feature_depth, feedforward_depth, num_heads,
dropout, mode)
for _ in range(num_layers)]),
layers.FirstBranch(), # Drop the mask.
layers.LayerNorm(),
layers.Mean(axis=1), # Average on length.
layers.Dense(num_classes),
layers.LogSoftmax()
)
|
python
|
def TransformerEncoder(vocab_size,
num_classes=10,
feature_depth=512,
feedforward_depth=2048,
num_layers=6,
num_heads=8,
dropout=0.1,
max_len=2048,
mode='train'):
"""Transformer encoder.
Args:
vocab_size: int: vocab size
num_classes: how many classes on output
feature_depth: int: depth of embedding
feedforward_depth: int: depth of feed-forward layer
num_layers: int: number of encoder/decoder layers
num_heads: int: number of attention heads
dropout: float: dropout rate (how much to drop out)
max_len: int: maximum symbol length for positional encoding
mode: str: 'train' or 'eval'
Returns:
the Transformer encoder layer.
"""
input_embedding = layers.Serial(
layers.Embedding(feature_depth, vocab_size),
layers.Dropout(rate=dropout, mode=mode),
layers.PositionalEncoding(max_len=max_len)
)
return layers.Serial(
layers.Branch(), # Branch input to create embedding and mask.
layers.Parallel(input_embedding, layers.PaddingMask()),
layers.Serial(*[EncoderLayer(feature_depth, feedforward_depth, num_heads,
dropout, mode)
for _ in range(num_layers)]),
layers.FirstBranch(), # Drop the mask.
layers.LayerNorm(),
layers.Mean(axis=1), # Average on length.
layers.Dense(num_classes),
layers.LogSoftmax()
)
|
[
"def",
"TransformerEncoder",
"(",
"vocab_size",
",",
"num_classes",
"=",
"10",
",",
"feature_depth",
"=",
"512",
",",
"feedforward_depth",
"=",
"2048",
",",
"num_layers",
"=",
"6",
",",
"num_heads",
"=",
"8",
",",
"dropout",
"=",
"0.1",
",",
"max_len",
"=",
"2048",
",",
"mode",
"=",
"'train'",
")",
":",
"input_embedding",
"=",
"layers",
".",
"Serial",
"(",
"layers",
".",
"Embedding",
"(",
"feature_depth",
",",
"vocab_size",
")",
",",
"layers",
".",
"Dropout",
"(",
"rate",
"=",
"dropout",
",",
"mode",
"=",
"mode",
")",
",",
"layers",
".",
"PositionalEncoding",
"(",
"max_len",
"=",
"max_len",
")",
")",
"return",
"layers",
".",
"Serial",
"(",
"layers",
".",
"Branch",
"(",
")",
",",
"# Branch input to create embedding and mask.",
"layers",
".",
"Parallel",
"(",
"input_embedding",
",",
"layers",
".",
"PaddingMask",
"(",
")",
")",
",",
"layers",
".",
"Serial",
"(",
"*",
"[",
"EncoderLayer",
"(",
"feature_depth",
",",
"feedforward_depth",
",",
"num_heads",
",",
"dropout",
",",
"mode",
")",
"for",
"_",
"in",
"range",
"(",
"num_layers",
")",
"]",
")",
",",
"layers",
".",
"FirstBranch",
"(",
")",
",",
"# Drop the mask.",
"layers",
".",
"LayerNorm",
"(",
")",
",",
"layers",
".",
"Mean",
"(",
"axis",
"=",
"1",
")",
",",
"# Average on length.",
"layers",
".",
"Dense",
"(",
"num_classes",
")",
",",
"layers",
".",
"LogSoftmax",
"(",
")",
")"
] |
Transformer encoder.
Args:
vocab_size: int: vocab size
num_classes: how many classes on output
feature_depth: int: depth of embedding
feedforward_depth: int: depth of feed-forward layer
num_layers: int: number of encoder/decoder layers
num_heads: int: number of attention heads
dropout: float: dropout rate (how much to drop out)
max_len: int: maximum symbol length for positional encoding
mode: str: 'train' or 'eval'
Returns:
the Transformer encoder layer.
|
[
"Transformer",
"encoder",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/models/transformer.py#L79-L120
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/trax/models/transformer.py
|
DecoderLayer
|
def DecoderLayer(feature_depth,
feedforward_depth,
num_heads,
dropout,
mode):
"""Transformer decoder layer.
Args:
feature_depth: int: depth of embedding
feedforward_depth: int: depth of feed-forward layer
num_heads: int: number of attention heads
dropout: float: dropout rate (how much to drop out)
mode: str: 'train' or 'eval'
Returns:
the layer.
"""
return layers.Serial(
layers.Residual( # Self-attention block.
layers.LayerNorm(),
layers.Branch(),
layers.Parallel(layers.Identity(), # activation for (q, k, v)
layers.CausalMask(axis=-2)), # attention mask
layers.MultiHeadedAttention(feature_depth, num_heads=num_heads,
dropout=dropout, mode=mode),
layers.Dropout(rate=dropout, mode=mode)
),
ResidualFeedForward(feature_depth, feedforward_depth, dropout, mode=mode)
)
|
python
|
def DecoderLayer(feature_depth,
feedforward_depth,
num_heads,
dropout,
mode):
"""Transformer decoder layer.
Args:
feature_depth: int: depth of embedding
feedforward_depth: int: depth of feed-forward layer
num_heads: int: number of attention heads
dropout: float: dropout rate (how much to drop out)
mode: str: 'train' or 'eval'
Returns:
the layer.
"""
return layers.Serial(
layers.Residual( # Self-attention block.
layers.LayerNorm(),
layers.Branch(),
layers.Parallel(layers.Identity(), # activation for (q, k, v)
layers.CausalMask(axis=-2)), # attention mask
layers.MultiHeadedAttention(feature_depth, num_heads=num_heads,
dropout=dropout, mode=mode),
layers.Dropout(rate=dropout, mode=mode)
),
ResidualFeedForward(feature_depth, feedforward_depth, dropout, mode=mode)
)
|
[
"def",
"DecoderLayer",
"(",
"feature_depth",
",",
"feedforward_depth",
",",
"num_heads",
",",
"dropout",
",",
"mode",
")",
":",
"return",
"layers",
".",
"Serial",
"(",
"layers",
".",
"Residual",
"(",
"# Self-attention block.",
"layers",
".",
"LayerNorm",
"(",
")",
",",
"layers",
".",
"Branch",
"(",
")",
",",
"layers",
".",
"Parallel",
"(",
"layers",
".",
"Identity",
"(",
")",
",",
"# activation for (q, k, v)",
"layers",
".",
"CausalMask",
"(",
"axis",
"=",
"-",
"2",
")",
")",
",",
"# attention mask",
"layers",
".",
"MultiHeadedAttention",
"(",
"feature_depth",
",",
"num_heads",
"=",
"num_heads",
",",
"dropout",
"=",
"dropout",
",",
"mode",
"=",
"mode",
")",
",",
"layers",
".",
"Dropout",
"(",
"rate",
"=",
"dropout",
",",
"mode",
"=",
"mode",
")",
")",
",",
"ResidualFeedForward",
"(",
"feature_depth",
",",
"feedforward_depth",
",",
"dropout",
",",
"mode",
"=",
"mode",
")",
")"
] |
Transformer decoder layer.
Args:
feature_depth: int: depth of embedding
feedforward_depth: int: depth of feed-forward layer
num_heads: int: number of attention heads
dropout: float: dropout rate (how much to drop out)
mode: str: 'train' or 'eval'
Returns:
the layer.
|
[
"Transformer",
"decoder",
"layer",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/models/transformer.py#L123-L151
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/trax/models/transformer.py
|
TransformerLM
|
def TransformerLM(vocab_size,
feature_depth=512,
feedforward_depth=2048,
num_layers=6,
num_heads=8,
dropout=0.1,
max_len=2048,
mode='train'):
"""Transformer language model (only uses the decoder part of Transformer).
Args:
vocab_size: int: vocab size
feature_depth: int: depth of embedding
feedforward_depth: int: depth of feed-forward layer
num_layers: int: number of encoder/decoder layers
num_heads: int: number of attention heads
dropout: float: dropout rate (how much to drop out)
max_len: int: maximum symbol length for positional encoding
mode: str: 'train' or 'eval'
Returns:
the layer.
"""
return layers.Serial(
layers.ShiftRight(),
layers.Embedding(feature_depth, vocab_size),
layers.Dropout(rate=dropout, mode=mode),
layers.PositionalEncoding(max_len=max_len),
layers.Serial(*[DecoderLayer(feature_depth, feedforward_depth, num_heads,
dropout, mode)
for _ in range(num_layers)]),
layers.LayerNorm(),
layers.Dense(vocab_size),
layers.LogSoftmax()
)
|
python
|
def TransformerLM(vocab_size,
feature_depth=512,
feedforward_depth=2048,
num_layers=6,
num_heads=8,
dropout=0.1,
max_len=2048,
mode='train'):
"""Transformer language model (only uses the decoder part of Transformer).
Args:
vocab_size: int: vocab size
feature_depth: int: depth of embedding
feedforward_depth: int: depth of feed-forward layer
num_layers: int: number of encoder/decoder layers
num_heads: int: number of attention heads
dropout: float: dropout rate (how much to drop out)
max_len: int: maximum symbol length for positional encoding
mode: str: 'train' or 'eval'
Returns:
the layer.
"""
return layers.Serial(
layers.ShiftRight(),
layers.Embedding(feature_depth, vocab_size),
layers.Dropout(rate=dropout, mode=mode),
layers.PositionalEncoding(max_len=max_len),
layers.Serial(*[DecoderLayer(feature_depth, feedforward_depth, num_heads,
dropout, mode)
for _ in range(num_layers)]),
layers.LayerNorm(),
layers.Dense(vocab_size),
layers.LogSoftmax()
)
|
[
"def",
"TransformerLM",
"(",
"vocab_size",
",",
"feature_depth",
"=",
"512",
",",
"feedforward_depth",
"=",
"2048",
",",
"num_layers",
"=",
"6",
",",
"num_heads",
"=",
"8",
",",
"dropout",
"=",
"0.1",
",",
"max_len",
"=",
"2048",
",",
"mode",
"=",
"'train'",
")",
":",
"return",
"layers",
".",
"Serial",
"(",
"layers",
".",
"ShiftRight",
"(",
")",
",",
"layers",
".",
"Embedding",
"(",
"feature_depth",
",",
"vocab_size",
")",
",",
"layers",
".",
"Dropout",
"(",
"rate",
"=",
"dropout",
",",
"mode",
"=",
"mode",
")",
",",
"layers",
".",
"PositionalEncoding",
"(",
"max_len",
"=",
"max_len",
")",
",",
"layers",
".",
"Serial",
"(",
"*",
"[",
"DecoderLayer",
"(",
"feature_depth",
",",
"feedforward_depth",
",",
"num_heads",
",",
"dropout",
",",
"mode",
")",
"for",
"_",
"in",
"range",
"(",
"num_layers",
")",
"]",
")",
",",
"layers",
".",
"LayerNorm",
"(",
")",
",",
"layers",
".",
"Dense",
"(",
"vocab_size",
")",
",",
"layers",
".",
"LogSoftmax",
"(",
")",
")"
] |
Transformer language model (only uses the decoder part of Transformer).
Args:
vocab_size: int: vocab size
feature_depth: int: depth of embedding
feedforward_depth: int: depth of feed-forward layer
num_layers: int: number of encoder/decoder layers
num_heads: int: number of attention heads
dropout: float: dropout rate (how much to drop out)
max_len: int: maximum symbol length for positional encoding
mode: str: 'train' or 'eval'
Returns:
the layer.
|
[
"Transformer",
"language",
"model",
"(",
"only",
"uses",
"the",
"decoder",
"part",
"of",
"Transformer",
")",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/models/transformer.py#L154-L188
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/trax/models/transformer.py
|
ChunkedDecoderLayer
|
def ChunkedDecoderLayer(feature_depth,
feedforward_depth,
num_heads,
dropout,
chunk_selector,
mode):
"""Transformer decoder layer operating on chunks.
Args:
feature_depth: int: depth of embedding
feedforward_depth: int: depth of feed-forward layer
num_heads: int: number of attention heads
dropout: float: dropout rate (how much to drop out)
chunk_selector: a function from chunk number to list of chunks to attend.
mode: str: 'train' or 'eval'
Returns:
the layer.
"""
return layers.Serial(
layers.Residual( # Self-attention block.
layers.Map(layers.LayerNorm()),
layers.ChunkedCausalMultiHeadedAttention(
feature_depth, num_heads=num_heads, dropout=dropout,
chunk_selector=chunk_selector, mode=mode),
layers.Map(layers.Dropout(rate=dropout, mode=mode)),
),
layers.Map(ResidualFeedForward(
feature_depth, feedforward_depth, dropout, mode=mode))
)
|
python
|
def ChunkedDecoderLayer(feature_depth,
feedforward_depth,
num_heads,
dropout,
chunk_selector,
mode):
"""Transformer decoder layer operating on chunks.
Args:
feature_depth: int: depth of embedding
feedforward_depth: int: depth of feed-forward layer
num_heads: int: number of attention heads
dropout: float: dropout rate (how much to drop out)
chunk_selector: a function from chunk number to list of chunks to attend.
mode: str: 'train' or 'eval'
Returns:
the layer.
"""
return layers.Serial(
layers.Residual( # Self-attention block.
layers.Map(layers.LayerNorm()),
layers.ChunkedCausalMultiHeadedAttention(
feature_depth, num_heads=num_heads, dropout=dropout,
chunk_selector=chunk_selector, mode=mode),
layers.Map(layers.Dropout(rate=dropout, mode=mode)),
),
layers.Map(ResidualFeedForward(
feature_depth, feedforward_depth, dropout, mode=mode))
)
|
[
"def",
"ChunkedDecoderLayer",
"(",
"feature_depth",
",",
"feedforward_depth",
",",
"num_heads",
",",
"dropout",
",",
"chunk_selector",
",",
"mode",
")",
":",
"return",
"layers",
".",
"Serial",
"(",
"layers",
".",
"Residual",
"(",
"# Self-attention block.",
"layers",
".",
"Map",
"(",
"layers",
".",
"LayerNorm",
"(",
")",
")",
",",
"layers",
".",
"ChunkedCausalMultiHeadedAttention",
"(",
"feature_depth",
",",
"num_heads",
"=",
"num_heads",
",",
"dropout",
"=",
"dropout",
",",
"chunk_selector",
"=",
"chunk_selector",
",",
"mode",
"=",
"mode",
")",
",",
"layers",
".",
"Map",
"(",
"layers",
".",
"Dropout",
"(",
"rate",
"=",
"dropout",
",",
"mode",
"=",
"mode",
")",
")",
",",
")",
",",
"layers",
".",
"Map",
"(",
"ResidualFeedForward",
"(",
"feature_depth",
",",
"feedforward_depth",
",",
"dropout",
",",
"mode",
"=",
"mode",
")",
")",
")"
] |
Transformer decoder layer operating on chunks.
Args:
feature_depth: int: depth of embedding
feedforward_depth: int: depth of feed-forward layer
num_heads: int: number of attention heads
dropout: float: dropout rate (how much to drop out)
chunk_selector: a function from chunk number to list of chunks to attend.
mode: str: 'train' or 'eval'
Returns:
the layer.
|
[
"Transformer",
"decoder",
"layer",
"operating",
"on",
"chunks",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/models/transformer.py#L191-L220
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/trax/models/transformer.py
|
ChunkedTransformerLM
|
def ChunkedTransformerLM(vocab_size,
feature_depth=512,
feedforward_depth=2048,
num_layers=6,
num_heads=8,
dropout=0.1,
chunk_selector=None,
max_len=2048,
mode='train'):
"""Transformer language model operating on chunks.
The input to this model is a sequence presented as a list or tuple of chunks:
(chunk1, chunk2, chunks3, ..., chunkN).
Each chunk should have the same shape (batch, chunk-length) and together they
represent a long sequence that's a concatenation chunk1,chunk2,...,chunkN.
Chunked Transformer emulates the operation of a Transformer on this long
sequence except for the chunked attention layer, which may attend to only
a subset of the chunks to reduce memory use.
Args:
vocab_size: int: vocab size
feature_depth: int: depth of embedding
feedforward_depth: int: depth of feed-forward layer
num_layers: int: number of encoder/decoder layers
num_heads: int: number of attention heads
dropout: float: dropout rate (how much to drop out)
chunk_selector: a function from chunk number to list of chunks to attend
(if None, attends to the previous chunks which is equivalent to setting
chunk_selector(x) = [] if x < 1 else [x-1] (TransformerXL); we attend
to the current chunk with a causal mask too, selected chunks unmasked).
max_len: int: maximum symbol length for positional encoding
mode: str: 'train' or 'eval'
Returns:
the layer.
"""
stack = [ChunkedDecoderLayer(feature_depth, feedforward_depth, num_heads,
dropout, chunk_selector, mode)
for _ in range(num_layers)]
# Below each Map(L) applies the layer L to each chunk independently.
return layers.Serial(
layers.ShiftRight(),
layers.Map(layers.Embedding(feature_depth, vocab_size)),
layers.Map(layers.Dropout(rate=dropout, mode=mode)),
layers.PositionalEncoding(max_len=max_len),
layers.Serial(*stack),
layers.Map(layers.LayerNorm()),
layers.Map(layers.Dense(vocab_size)),
layers.Map(layers.LogSoftmax()),
)
|
python
|
def ChunkedTransformerLM(vocab_size,
feature_depth=512,
feedforward_depth=2048,
num_layers=6,
num_heads=8,
dropout=0.1,
chunk_selector=None,
max_len=2048,
mode='train'):
"""Transformer language model operating on chunks.
The input to this model is a sequence presented as a list or tuple of chunks:
(chunk1, chunk2, chunks3, ..., chunkN).
Each chunk should have the same shape (batch, chunk-length) and together they
represent a long sequence that's a concatenation chunk1,chunk2,...,chunkN.
Chunked Transformer emulates the operation of a Transformer on this long
sequence except for the chunked attention layer, which may attend to only
a subset of the chunks to reduce memory use.
Args:
vocab_size: int: vocab size
feature_depth: int: depth of embedding
feedforward_depth: int: depth of feed-forward layer
num_layers: int: number of encoder/decoder layers
num_heads: int: number of attention heads
dropout: float: dropout rate (how much to drop out)
chunk_selector: a function from chunk number to list of chunks to attend
(if None, attends to the previous chunks which is equivalent to setting
chunk_selector(x) = [] if x < 1 else [x-1] (TransformerXL); we attend
to the current chunk with a causal mask too, selected chunks unmasked).
max_len: int: maximum symbol length for positional encoding
mode: str: 'train' or 'eval'
Returns:
the layer.
"""
stack = [ChunkedDecoderLayer(feature_depth, feedforward_depth, num_heads,
dropout, chunk_selector, mode)
for _ in range(num_layers)]
# Below each Map(L) applies the layer L to each chunk independently.
return layers.Serial(
layers.ShiftRight(),
layers.Map(layers.Embedding(feature_depth, vocab_size)),
layers.Map(layers.Dropout(rate=dropout, mode=mode)),
layers.PositionalEncoding(max_len=max_len),
layers.Serial(*stack),
layers.Map(layers.LayerNorm()),
layers.Map(layers.Dense(vocab_size)),
layers.Map(layers.LogSoftmax()),
)
|
[
"def",
"ChunkedTransformerLM",
"(",
"vocab_size",
",",
"feature_depth",
"=",
"512",
",",
"feedforward_depth",
"=",
"2048",
",",
"num_layers",
"=",
"6",
",",
"num_heads",
"=",
"8",
",",
"dropout",
"=",
"0.1",
",",
"chunk_selector",
"=",
"None",
",",
"max_len",
"=",
"2048",
",",
"mode",
"=",
"'train'",
")",
":",
"stack",
"=",
"[",
"ChunkedDecoderLayer",
"(",
"feature_depth",
",",
"feedforward_depth",
",",
"num_heads",
",",
"dropout",
",",
"chunk_selector",
",",
"mode",
")",
"for",
"_",
"in",
"range",
"(",
"num_layers",
")",
"]",
"# Below each Map(L) applies the layer L to each chunk independently.",
"return",
"layers",
".",
"Serial",
"(",
"layers",
".",
"ShiftRight",
"(",
")",
",",
"layers",
".",
"Map",
"(",
"layers",
".",
"Embedding",
"(",
"feature_depth",
",",
"vocab_size",
")",
")",
",",
"layers",
".",
"Map",
"(",
"layers",
".",
"Dropout",
"(",
"rate",
"=",
"dropout",
",",
"mode",
"=",
"mode",
")",
")",
",",
"layers",
".",
"PositionalEncoding",
"(",
"max_len",
"=",
"max_len",
")",
",",
"layers",
".",
"Serial",
"(",
"*",
"stack",
")",
",",
"layers",
".",
"Map",
"(",
"layers",
".",
"LayerNorm",
"(",
")",
")",
",",
"layers",
".",
"Map",
"(",
"layers",
".",
"Dense",
"(",
"vocab_size",
")",
")",
",",
"layers",
".",
"Map",
"(",
"layers",
".",
"LogSoftmax",
"(",
")",
")",
",",
")"
] |
Transformer language model operating on chunks.
The input to this model is a sequence presented as a list or tuple of chunks:
(chunk1, chunk2, chunks3, ..., chunkN).
Each chunk should have the same shape (batch, chunk-length) and together they
represent a long sequence that's a concatenation chunk1,chunk2,...,chunkN.
Chunked Transformer emulates the operation of a Transformer on this long
sequence except for the chunked attention layer, which may attend to only
a subset of the chunks to reduce memory use.
Args:
vocab_size: int: vocab size
feature_depth: int: depth of embedding
feedforward_depth: int: depth of feed-forward layer
num_layers: int: number of encoder/decoder layers
num_heads: int: number of attention heads
dropout: float: dropout rate (how much to drop out)
chunk_selector: a function from chunk number to list of chunks to attend
(if None, attends to the previous chunks which is equivalent to setting
chunk_selector(x) = [] if x < 1 else [x-1] (TransformerXL); we attend
to the current chunk with a causal mask too, selected chunks unmasked).
max_len: int: maximum symbol length for positional encoding
mode: str: 'train' or 'eval'
Returns:
the layer.
|
[
"Transformer",
"language",
"model",
"operating",
"on",
"chunks",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/models/transformer.py#L223-L273
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/trax/models/transformer.py
|
Transformer
|
def Transformer(source_vocab_size,
target_vocab_size,
mode='train',
num_layers=6,
feature_depth=512,
feedforward_depth=2048,
num_heads=8,
dropout=0.1,
shared_embedding=True,
max_len=200,
return_evals=False):
"""Transformer model.
Args:
source_vocab_size: int: source vocab size
target_vocab_size: int: target vocab size
mode: str: 'train' or 'eval'
num_layers: int: number of encoder/decoder layers
feature_depth: int: depth of embedding
feedforward_depth: int: depth of feed-forward layer
num_heads: int: number of attention heads
dropout: float: dropout rate (how much to drop out)
shared_embedding: bool: specify whether source/target embeddings are tied.
max_len: int: maximum symbol length for positional encoding
return_evals: bool: whether to generate decode-time evaluation functions
Returns:
A namedtuple containing model 'init' and 'apply' functions for training and
the 'evals' functions that itself returns a namedtuple containing evaluation
functions for the trained encoder, decoder, and generator substax.
"""
# Input embedding and positional encoding
inject_position = layers.Serial(
layers.Dropout(dropout, mode=mode),
layers.PositionalEncoding(feature_depth, max_len=max_len)
)
if shared_embedding:
assert source_vocab_size == target_vocab_size
# Weight-shared Embedding
embedding = layers.Share(layers.Embedding(feature_depth, source_vocab_size))
source_embedding_layer = layers.Serial(embedding, inject_position)
target_embedding_layer = source_embedding_layer
else:
source_embedding = layers.Embedding(feature_depth, source_vocab_size)
target_embedding = layers.Embedding(feature_depth, target_vocab_size)
source_embedding_layer = layers.Serial(source_embedding, inject_position)
target_embedding_layer = layers.Serial(target_embedding, inject_position)
# Multi-headed Attention and Feed-forward layers
multi_attention = layers.MultiHeadedAttention(
feature_depth, num_heads=num_heads, dropout=dropout, mode=mode)
# Encoder
@layers.Lambda
def Encoder(source, source_mask):
"""Transformer encoder stack.
Args:
source: layer variable: raw source sequences
source_mask: layer variable: self-attention mask
Returns:
Layer variable that outputs encoded source.
"""
encoder_layer = layers.Serial(
# input attends to self
layers.Residual(layers.LayerNorm(),
layers.Branch(size=4),
layers.Parallel(layers.Identity(), # query
layers.Identity(), # key
layers.Identity(), # value
source_mask), # attention mask
multi_attention,
layers.Dropout(dropout, mode=mode)),
# feed-forward
ResidualFeedForward(
feature_depth, feedforward_depth, dropout, mode=mode),
)
return layers.Serial(
source,
source_embedding_layer,
layers.repeat(encoder_layer, num_layers),
layers.LayerNorm(),
)
# Decoder
@layers.Lambda
def Decoder(memory, target, target_mask, memory_mask):
"""Transformer decoder stack.
Args:
memory: layer variable: encoded source sequences
target: layer variable: raw target sequences
target_mask: layer variable: self-attention mask
memory_mask: layer variable: memory attention mask
Returns:
Layer variable that outputs encoded source.
"""
decoder_layer = layers.Serial(
# target attends to self
layers.Residual(layers.LayerNorm(),
layers.Branch(size=4),
layers.Parallel(layers.Identity(), # query
layers.Identity(), # key
layers.Identity(), # value
target_mask), # attention mask
multi_attention,
layers.Dropout(dropout, mode=mode)),
# target attends to encoded source
layers.Residual(layers.LayerNorm(),
layers.Branch(size=4),
layers.Parallel(layers.Identity(), # query
memory, # key
memory, # value
memory_mask), # attention mask
multi_attention,
layers.Dropout(dropout, mode=mode)),
# feed-forward
ResidualFeedForward(
feature_depth, feedforward_depth, dropout, mode=mode)
)
return layers.Serial(
target,
target_embedding_layer,
layers.repeat(decoder_layer, num_layers),
layers.LayerNorm(),
)
# The Transformer
@layers.Lambda
def transformer(source, target, source_mask, target_mask, memory_mask): # pylint: disable=invalid-name
encoded_source = Encoder(source, source_mask)
return Decoder(encoded_source, target, target_mask, memory_mask)
# Finally, bind the generator transform to use later for inference.
@layers.Lambda
def Generator(encoded_target):
return layers.Serial(
encoded_target,
layers.Dense(target_vocab_size),
layers.LogSoftmax
)
# Model-Building and Evaluation Functions
# Get entire model's the layer pair
top_init, top_apply = Generator(transformer)
# By default act as a normal constructor and emit an (init, apply) pair.
if not return_evals:
return (top_init, top_apply)
else:
raise ValueError('inference in this model is still a work in progress')
|
python
|
def Transformer(source_vocab_size,
target_vocab_size,
mode='train',
num_layers=6,
feature_depth=512,
feedforward_depth=2048,
num_heads=8,
dropout=0.1,
shared_embedding=True,
max_len=200,
return_evals=False):
"""Transformer model.
Args:
source_vocab_size: int: source vocab size
target_vocab_size: int: target vocab size
mode: str: 'train' or 'eval'
num_layers: int: number of encoder/decoder layers
feature_depth: int: depth of embedding
feedforward_depth: int: depth of feed-forward layer
num_heads: int: number of attention heads
dropout: float: dropout rate (how much to drop out)
shared_embedding: bool: specify whether source/target embeddings are tied.
max_len: int: maximum symbol length for positional encoding
return_evals: bool: whether to generate decode-time evaluation functions
Returns:
A namedtuple containing model 'init' and 'apply' functions for training and
the 'evals' functions that itself returns a namedtuple containing evaluation
functions for the trained encoder, decoder, and generator substax.
"""
# Input embedding and positional encoding
inject_position = layers.Serial(
layers.Dropout(dropout, mode=mode),
layers.PositionalEncoding(feature_depth, max_len=max_len)
)
if shared_embedding:
assert source_vocab_size == target_vocab_size
# Weight-shared Embedding
embedding = layers.Share(layers.Embedding(feature_depth, source_vocab_size))
source_embedding_layer = layers.Serial(embedding, inject_position)
target_embedding_layer = source_embedding_layer
else:
source_embedding = layers.Embedding(feature_depth, source_vocab_size)
target_embedding = layers.Embedding(feature_depth, target_vocab_size)
source_embedding_layer = layers.Serial(source_embedding, inject_position)
target_embedding_layer = layers.Serial(target_embedding, inject_position)
# Multi-headed Attention and Feed-forward layers
multi_attention = layers.MultiHeadedAttention(
feature_depth, num_heads=num_heads, dropout=dropout, mode=mode)
# Encoder
@layers.Lambda
def Encoder(source, source_mask):
"""Transformer encoder stack.
Args:
source: layer variable: raw source sequences
source_mask: layer variable: self-attention mask
Returns:
Layer variable that outputs encoded source.
"""
encoder_layer = layers.Serial(
# input attends to self
layers.Residual(layers.LayerNorm(),
layers.Branch(size=4),
layers.Parallel(layers.Identity(), # query
layers.Identity(), # key
layers.Identity(), # value
source_mask), # attention mask
multi_attention,
layers.Dropout(dropout, mode=mode)),
# feed-forward
ResidualFeedForward(
feature_depth, feedforward_depth, dropout, mode=mode),
)
return layers.Serial(
source,
source_embedding_layer,
layers.repeat(encoder_layer, num_layers),
layers.LayerNorm(),
)
# Decoder
@layers.Lambda
def Decoder(memory, target, target_mask, memory_mask):
"""Transformer decoder stack.
Args:
memory: layer variable: encoded source sequences
target: layer variable: raw target sequences
target_mask: layer variable: self-attention mask
memory_mask: layer variable: memory attention mask
Returns:
Layer variable that outputs encoded source.
"""
decoder_layer = layers.Serial(
# target attends to self
layers.Residual(layers.LayerNorm(),
layers.Branch(size=4),
layers.Parallel(layers.Identity(), # query
layers.Identity(), # key
layers.Identity(), # value
target_mask), # attention mask
multi_attention,
layers.Dropout(dropout, mode=mode)),
# target attends to encoded source
layers.Residual(layers.LayerNorm(),
layers.Branch(size=4),
layers.Parallel(layers.Identity(), # query
memory, # key
memory, # value
memory_mask), # attention mask
multi_attention,
layers.Dropout(dropout, mode=mode)),
# feed-forward
ResidualFeedForward(
feature_depth, feedforward_depth, dropout, mode=mode)
)
return layers.Serial(
target,
target_embedding_layer,
layers.repeat(decoder_layer, num_layers),
layers.LayerNorm(),
)
# The Transformer
@layers.Lambda
def transformer(source, target, source_mask, target_mask, memory_mask): # pylint: disable=invalid-name
encoded_source = Encoder(source, source_mask)
return Decoder(encoded_source, target, target_mask, memory_mask)
# Finally, bind the generator transform to use later for inference.
@layers.Lambda
def Generator(encoded_target):
return layers.Serial(
encoded_target,
layers.Dense(target_vocab_size),
layers.LogSoftmax
)
# Model-Building and Evaluation Functions
# Get entire model's the layer pair
top_init, top_apply = Generator(transformer)
# By default act as a normal constructor and emit an (init, apply) pair.
if not return_evals:
return (top_init, top_apply)
else:
raise ValueError('inference in this model is still a work in progress')
|
[
"def",
"Transformer",
"(",
"source_vocab_size",
",",
"target_vocab_size",
",",
"mode",
"=",
"'train'",
",",
"num_layers",
"=",
"6",
",",
"feature_depth",
"=",
"512",
",",
"feedforward_depth",
"=",
"2048",
",",
"num_heads",
"=",
"8",
",",
"dropout",
"=",
"0.1",
",",
"shared_embedding",
"=",
"True",
",",
"max_len",
"=",
"200",
",",
"return_evals",
"=",
"False",
")",
":",
"# Input embedding and positional encoding",
"inject_position",
"=",
"layers",
".",
"Serial",
"(",
"layers",
".",
"Dropout",
"(",
"dropout",
",",
"mode",
"=",
"mode",
")",
",",
"layers",
".",
"PositionalEncoding",
"(",
"feature_depth",
",",
"max_len",
"=",
"max_len",
")",
")",
"if",
"shared_embedding",
":",
"assert",
"source_vocab_size",
"==",
"target_vocab_size",
"# Weight-shared Embedding",
"embedding",
"=",
"layers",
".",
"Share",
"(",
"layers",
".",
"Embedding",
"(",
"feature_depth",
",",
"source_vocab_size",
")",
")",
"source_embedding_layer",
"=",
"layers",
".",
"Serial",
"(",
"embedding",
",",
"inject_position",
")",
"target_embedding_layer",
"=",
"source_embedding_layer",
"else",
":",
"source_embedding",
"=",
"layers",
".",
"Embedding",
"(",
"feature_depth",
",",
"source_vocab_size",
")",
"target_embedding",
"=",
"layers",
".",
"Embedding",
"(",
"feature_depth",
",",
"target_vocab_size",
")",
"source_embedding_layer",
"=",
"layers",
".",
"Serial",
"(",
"source_embedding",
",",
"inject_position",
")",
"target_embedding_layer",
"=",
"layers",
".",
"Serial",
"(",
"target_embedding",
",",
"inject_position",
")",
"# Multi-headed Attention and Feed-forward layers",
"multi_attention",
"=",
"layers",
".",
"MultiHeadedAttention",
"(",
"feature_depth",
",",
"num_heads",
"=",
"num_heads",
",",
"dropout",
"=",
"dropout",
",",
"mode",
"=",
"mode",
")",
"# Encoder",
"@",
"layers",
".",
"Lambda",
"def",
"Encoder",
"(",
"source",
",",
"source_mask",
")",
":",
"\"\"\"Transformer encoder stack.\n\n Args:\n source: layer variable: raw source sequences\n source_mask: layer variable: self-attention mask\n\n Returns:\n Layer variable that outputs encoded source.\n \"\"\"",
"encoder_layer",
"=",
"layers",
".",
"Serial",
"(",
"# input attends to self",
"layers",
".",
"Residual",
"(",
"layers",
".",
"LayerNorm",
"(",
")",
",",
"layers",
".",
"Branch",
"(",
"size",
"=",
"4",
")",
",",
"layers",
".",
"Parallel",
"(",
"layers",
".",
"Identity",
"(",
")",
",",
"# query",
"layers",
".",
"Identity",
"(",
")",
",",
"# key",
"layers",
".",
"Identity",
"(",
")",
",",
"# value",
"source_mask",
")",
",",
"# attention mask",
"multi_attention",
",",
"layers",
".",
"Dropout",
"(",
"dropout",
",",
"mode",
"=",
"mode",
")",
")",
",",
"# feed-forward",
"ResidualFeedForward",
"(",
"feature_depth",
",",
"feedforward_depth",
",",
"dropout",
",",
"mode",
"=",
"mode",
")",
",",
")",
"return",
"layers",
".",
"Serial",
"(",
"source",
",",
"source_embedding_layer",
",",
"layers",
".",
"repeat",
"(",
"encoder_layer",
",",
"num_layers",
")",
",",
"layers",
".",
"LayerNorm",
"(",
")",
",",
")",
"# Decoder",
"@",
"layers",
".",
"Lambda",
"def",
"Decoder",
"(",
"memory",
",",
"target",
",",
"target_mask",
",",
"memory_mask",
")",
":",
"\"\"\"Transformer decoder stack.\n\n Args:\n memory: layer variable: encoded source sequences\n target: layer variable: raw target sequences\n target_mask: layer variable: self-attention mask\n memory_mask: layer variable: memory attention mask\n\n Returns:\n Layer variable that outputs encoded source.\n \"\"\"",
"decoder_layer",
"=",
"layers",
".",
"Serial",
"(",
"# target attends to self",
"layers",
".",
"Residual",
"(",
"layers",
".",
"LayerNorm",
"(",
")",
",",
"layers",
".",
"Branch",
"(",
"size",
"=",
"4",
")",
",",
"layers",
".",
"Parallel",
"(",
"layers",
".",
"Identity",
"(",
")",
",",
"# query",
"layers",
".",
"Identity",
"(",
")",
",",
"# key",
"layers",
".",
"Identity",
"(",
")",
",",
"# value",
"target_mask",
")",
",",
"# attention mask",
"multi_attention",
",",
"layers",
".",
"Dropout",
"(",
"dropout",
",",
"mode",
"=",
"mode",
")",
")",
",",
"# target attends to encoded source",
"layers",
".",
"Residual",
"(",
"layers",
".",
"LayerNorm",
"(",
")",
",",
"layers",
".",
"Branch",
"(",
"size",
"=",
"4",
")",
",",
"layers",
".",
"Parallel",
"(",
"layers",
".",
"Identity",
"(",
")",
",",
"# query",
"memory",
",",
"# key",
"memory",
",",
"# value",
"memory_mask",
")",
",",
"# attention mask",
"multi_attention",
",",
"layers",
".",
"Dropout",
"(",
"dropout",
",",
"mode",
"=",
"mode",
")",
")",
",",
"# feed-forward",
"ResidualFeedForward",
"(",
"feature_depth",
",",
"feedforward_depth",
",",
"dropout",
",",
"mode",
"=",
"mode",
")",
")",
"return",
"layers",
".",
"Serial",
"(",
"target",
",",
"target_embedding_layer",
",",
"layers",
".",
"repeat",
"(",
"decoder_layer",
",",
"num_layers",
")",
",",
"layers",
".",
"LayerNorm",
"(",
")",
",",
")",
"# The Transformer",
"@",
"layers",
".",
"Lambda",
"def",
"transformer",
"(",
"source",
",",
"target",
",",
"source_mask",
",",
"target_mask",
",",
"memory_mask",
")",
":",
"# pylint: disable=invalid-name",
"encoded_source",
"=",
"Encoder",
"(",
"source",
",",
"source_mask",
")",
"return",
"Decoder",
"(",
"encoded_source",
",",
"target",
",",
"target_mask",
",",
"memory_mask",
")",
"# Finally, bind the generator transform to use later for inference.",
"@",
"layers",
".",
"Lambda",
"def",
"Generator",
"(",
"encoded_target",
")",
":",
"return",
"layers",
".",
"Serial",
"(",
"encoded_target",
",",
"layers",
".",
"Dense",
"(",
"target_vocab_size",
")",
",",
"layers",
".",
"LogSoftmax",
")",
"# Model-Building and Evaluation Functions",
"# Get entire model's the layer pair",
"top_init",
",",
"top_apply",
"=",
"Generator",
"(",
"transformer",
")",
"# By default act as a normal constructor and emit an (init, apply) pair.",
"if",
"not",
"return_evals",
":",
"return",
"(",
"top_init",
",",
"top_apply",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'inference in this model is still a work in progress'",
")"
] |
Transformer model.
Args:
source_vocab_size: int: source vocab size
target_vocab_size: int: target vocab size
mode: str: 'train' or 'eval'
num_layers: int: number of encoder/decoder layers
feature_depth: int: depth of embedding
feedforward_depth: int: depth of feed-forward layer
num_heads: int: number of attention heads
dropout: float: dropout rate (how much to drop out)
shared_embedding: bool: specify whether source/target embeddings are tied.
max_len: int: maximum symbol length for positional encoding
return_evals: bool: whether to generate decode-time evaluation functions
Returns:
A namedtuple containing model 'init' and 'apply' functions for training and
the 'evals' functions that itself returns a namedtuple containing evaluation
functions for the trained encoder, decoder, and generator substax.
|
[
"Transformer",
"model",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/models/transformer.py#L279-L431
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/mtf_transformer.py
|
mtf_transformer_base
|
def mtf_transformer_base():
"""Set of hyperparameters."""
hparams = common_hparams.basic_params1()
hparams.no_data_parallelism = True
hparams.use_fixed_batch_size = True
hparams.add_hparam("mtf_mode", True)
hparams.batch_size = 64
hparams.max_length = 256
hparams.add_hparam("d_model", 512)
hparams.add_hparam("d_kv", 128)
hparams.add_hparam("local_attention_window_size", 128)
hparams.label_smoothing = 0.1
# 8-way model-parallelism
hparams.add_hparam("mesh_shape", "model:8")
hparams.add_hparam("layout", "batch:batch;vocab:model;d_ff:model;heads:model")
hparams.add_hparam("num_heads", 8)
hparams.add_hparam("d_ff", 2048)
hparams.add_hparam("encoder_replicate_factor", 1)
hparams.add_hparam("decoder_replicate_factor", 1)
hparams.add_hparam("encoder_layers", ["att", "drd"] * 6)
hparams.add_hparam("decoder_layers", ["att", "enc_att", "drd"] * 6)
hparams.add_hparam("attention_dropout", 0.1)
hparams.add_hparam("relu_dropout", 0.1)
hparams.layer_prepostprocess_dropout = 0.1
# Describes what model architecture:
# "encdec": encoder + autoregressive decoder
# "decoder": single-stack autoregressive sequence model.
# "encoder": single-stack non-autoregressive model
# with equal-length inputs and outputs.
hparams.add_hparam("transformer_type", "encdec")
# What does the decoder do:
# "autoregressive": Decoder left to right
# "denoising": Fills in masked-out values simultaneously
hparams.add_hparam("decoder_type", "autoregressive")
# Parameters describing the noising algorithm for denoising decoders
hparams.add_hparam("noising_spec_train", {"type": "mask", "prob": 0.15})
hparams.add_hparam("noising_spec_eval", {"type": "mask", "prob": 0.15})
# during training, we use the eval noiser with this probability
hparams.add_hparam("noising_use_eval_during_train", 0.1)
# round up vocab sizes to be a multiple of this value
hparams.vocab_divisor = 128
# options are dense_relu_dense, moe, hmoe
hparams.add_hparam("feedforward_layer", "drd")
# If True, then reuse targets_embedding_var * rsqrt(d_model) as softmax_var
# If hparams.transformer_type == "encoder", then there is no targets embedding
# so we reuse the inputs embedding instead.
hparams.shared_embedding_and_softmax_weights = True
# Reuse targets_embedding_var as inputs_embedding_var
# relevant only if hparams.transformer_type == "encdec"
hparams.shared_embedding = True
hparams.optimizer = "Adafactor"
hparams.learning_rate_schedule = "linear_warmup*rsqrt_decay*linear_decay"
hparams.learning_rate_warmup_steps = 10000
hparams.add_hparam("master_dtype", "bfloat16")
hparams.add_hparam("slice_dtype", "float32")
hparams.activation_dtype = "bfloat16"
# These parameters make Transformer model compatible with MtfTransformer
# Do not override these, as mtf_transformer does not support other options.
hparams.clip_grad_norm = 0. # i.e. no gradient clipping
hparams.bottom = {
"inputs": modalities.identity_bottom,
"targets": modalities.identity_bottom,
}
hparams.top = {
"targets": modalities.identity_top,
}
# Parameters for computing the maximum decode length in beam search.
# Maximum decode length is:
# min(max_length,
# decode_length_multiplier * input_length + decode_length_constant)
hparams.add_hparam("decode_length_multiplier", 1.5)
hparams.add_hparam("decode_length_constant", 10.0)
# If nonzero, we split the batch across two tensor-dimensions named
# "outer_batch" and "inner_batch", allowing for splitting across two mesh
# dimensions. This is necessary for hierarchical mixture of experts.
# The two tensor dimensions have sizes hparams.outer_batch_size and
# hparams.batch_size // hparams.outer_batch_size.
hparams.add_hparam("outer_batch_size", 0)
# TODO(noam): file a bug
hparams.add_hparam("reshape_logits_hack", False)
hparams.add_hparam("compression_factor", 4)
return hparams
|
python
|
def mtf_transformer_base():
"""Set of hyperparameters."""
hparams = common_hparams.basic_params1()
hparams.no_data_parallelism = True
hparams.use_fixed_batch_size = True
hparams.add_hparam("mtf_mode", True)
hparams.batch_size = 64
hparams.max_length = 256
hparams.add_hparam("d_model", 512)
hparams.add_hparam("d_kv", 128)
hparams.add_hparam("local_attention_window_size", 128)
hparams.label_smoothing = 0.1
# 8-way model-parallelism
hparams.add_hparam("mesh_shape", "model:8")
hparams.add_hparam("layout", "batch:batch;vocab:model;d_ff:model;heads:model")
hparams.add_hparam("num_heads", 8)
hparams.add_hparam("d_ff", 2048)
hparams.add_hparam("encoder_replicate_factor", 1)
hparams.add_hparam("decoder_replicate_factor", 1)
hparams.add_hparam("encoder_layers", ["att", "drd"] * 6)
hparams.add_hparam("decoder_layers", ["att", "enc_att", "drd"] * 6)
hparams.add_hparam("attention_dropout", 0.1)
hparams.add_hparam("relu_dropout", 0.1)
hparams.layer_prepostprocess_dropout = 0.1
# Describes what model architecture:
# "encdec": encoder + autoregressive decoder
# "decoder": single-stack autoregressive sequence model.
# "encoder": single-stack non-autoregressive model
# with equal-length inputs and outputs.
hparams.add_hparam("transformer_type", "encdec")
# What does the decoder do:
# "autoregressive": Decoder left to right
# "denoising": Fills in masked-out values simultaneously
hparams.add_hparam("decoder_type", "autoregressive")
# Parameters describing the noising algorithm for denoising decoders
hparams.add_hparam("noising_spec_train", {"type": "mask", "prob": 0.15})
hparams.add_hparam("noising_spec_eval", {"type": "mask", "prob": 0.15})
# during training, we use the eval noiser with this probability
hparams.add_hparam("noising_use_eval_during_train", 0.1)
# round up vocab sizes to be a multiple of this value
hparams.vocab_divisor = 128
# options are dense_relu_dense, moe, hmoe
hparams.add_hparam("feedforward_layer", "drd")
# If True, then reuse targets_embedding_var * rsqrt(d_model) as softmax_var
# If hparams.transformer_type == "encoder", then there is no targets embedding
# so we reuse the inputs embedding instead.
hparams.shared_embedding_and_softmax_weights = True
# Reuse targets_embedding_var as inputs_embedding_var
# relevant only if hparams.transformer_type == "encdec"
hparams.shared_embedding = True
hparams.optimizer = "Adafactor"
hparams.learning_rate_schedule = "linear_warmup*rsqrt_decay*linear_decay"
hparams.learning_rate_warmup_steps = 10000
hparams.add_hparam("master_dtype", "bfloat16")
hparams.add_hparam("slice_dtype", "float32")
hparams.activation_dtype = "bfloat16"
# These parameters make Transformer model compatible with MtfTransformer
# Do not override these, as mtf_transformer does not support other options.
hparams.clip_grad_norm = 0. # i.e. no gradient clipping
hparams.bottom = {
"inputs": modalities.identity_bottom,
"targets": modalities.identity_bottom,
}
hparams.top = {
"targets": modalities.identity_top,
}
# Parameters for computing the maximum decode length in beam search.
# Maximum decode length is:
# min(max_length,
# decode_length_multiplier * input_length + decode_length_constant)
hparams.add_hparam("decode_length_multiplier", 1.5)
hparams.add_hparam("decode_length_constant", 10.0)
# If nonzero, we split the batch across two tensor-dimensions named
# "outer_batch" and "inner_batch", allowing for splitting across two mesh
# dimensions. This is necessary for hierarchical mixture of experts.
# The two tensor dimensions have sizes hparams.outer_batch_size and
# hparams.batch_size // hparams.outer_batch_size.
hparams.add_hparam("outer_batch_size", 0)
# TODO(noam): file a bug
hparams.add_hparam("reshape_logits_hack", False)
hparams.add_hparam("compression_factor", 4)
return hparams
|
[
"def",
"mtf_transformer_base",
"(",
")",
":",
"hparams",
"=",
"common_hparams",
".",
"basic_params1",
"(",
")",
"hparams",
".",
"no_data_parallelism",
"=",
"True",
"hparams",
".",
"use_fixed_batch_size",
"=",
"True",
"hparams",
".",
"add_hparam",
"(",
"\"mtf_mode\"",
",",
"True",
")",
"hparams",
".",
"batch_size",
"=",
"64",
"hparams",
".",
"max_length",
"=",
"256",
"hparams",
".",
"add_hparam",
"(",
"\"d_model\"",
",",
"512",
")",
"hparams",
".",
"add_hparam",
"(",
"\"d_kv\"",
",",
"128",
")",
"hparams",
".",
"add_hparam",
"(",
"\"local_attention_window_size\"",
",",
"128",
")",
"hparams",
".",
"label_smoothing",
"=",
"0.1",
"# 8-way model-parallelism",
"hparams",
".",
"add_hparam",
"(",
"\"mesh_shape\"",
",",
"\"model:8\"",
")",
"hparams",
".",
"add_hparam",
"(",
"\"layout\"",
",",
"\"batch:batch;vocab:model;d_ff:model;heads:model\"",
")",
"hparams",
".",
"add_hparam",
"(",
"\"num_heads\"",
",",
"8",
")",
"hparams",
".",
"add_hparam",
"(",
"\"d_ff\"",
",",
"2048",
")",
"hparams",
".",
"add_hparam",
"(",
"\"encoder_replicate_factor\"",
",",
"1",
")",
"hparams",
".",
"add_hparam",
"(",
"\"decoder_replicate_factor\"",
",",
"1",
")",
"hparams",
".",
"add_hparam",
"(",
"\"encoder_layers\"",
",",
"[",
"\"att\"",
",",
"\"drd\"",
"]",
"*",
"6",
")",
"hparams",
".",
"add_hparam",
"(",
"\"decoder_layers\"",
",",
"[",
"\"att\"",
",",
"\"enc_att\"",
",",
"\"drd\"",
"]",
"*",
"6",
")",
"hparams",
".",
"add_hparam",
"(",
"\"attention_dropout\"",
",",
"0.1",
")",
"hparams",
".",
"add_hparam",
"(",
"\"relu_dropout\"",
",",
"0.1",
")",
"hparams",
".",
"layer_prepostprocess_dropout",
"=",
"0.1",
"# Describes what model architecture:",
"# \"encdec\": encoder + autoregressive decoder",
"# \"decoder\": single-stack autoregressive sequence model.",
"# \"encoder\": single-stack non-autoregressive model",
"# with equal-length inputs and outputs.",
"hparams",
".",
"add_hparam",
"(",
"\"transformer_type\"",
",",
"\"encdec\"",
")",
"# What does the decoder do:",
"# \"autoregressive\": Decoder left to right",
"# \"denoising\": Fills in masked-out values simultaneously",
"hparams",
".",
"add_hparam",
"(",
"\"decoder_type\"",
",",
"\"autoregressive\"",
")",
"# Parameters describing the noising algorithm for denoising decoders",
"hparams",
".",
"add_hparam",
"(",
"\"noising_spec_train\"",
",",
"{",
"\"type\"",
":",
"\"mask\"",
",",
"\"prob\"",
":",
"0.15",
"}",
")",
"hparams",
".",
"add_hparam",
"(",
"\"noising_spec_eval\"",
",",
"{",
"\"type\"",
":",
"\"mask\"",
",",
"\"prob\"",
":",
"0.15",
"}",
")",
"# during training, we use the eval noiser with this probability",
"hparams",
".",
"add_hparam",
"(",
"\"noising_use_eval_during_train\"",
",",
"0.1",
")",
"# round up vocab sizes to be a multiple of this value",
"hparams",
".",
"vocab_divisor",
"=",
"128",
"# options are dense_relu_dense, moe, hmoe",
"hparams",
".",
"add_hparam",
"(",
"\"feedforward_layer\"",
",",
"\"drd\"",
")",
"# If True, then reuse targets_embedding_var * rsqrt(d_model) as softmax_var",
"# If hparams.transformer_type == \"encoder\", then there is no targets embedding",
"# so we reuse the inputs embedding instead.",
"hparams",
".",
"shared_embedding_and_softmax_weights",
"=",
"True",
"# Reuse targets_embedding_var as inputs_embedding_var",
"# relevant only if hparams.transformer_type == \"encdec\"",
"hparams",
".",
"shared_embedding",
"=",
"True",
"hparams",
".",
"optimizer",
"=",
"\"Adafactor\"",
"hparams",
".",
"learning_rate_schedule",
"=",
"\"linear_warmup*rsqrt_decay*linear_decay\"",
"hparams",
".",
"learning_rate_warmup_steps",
"=",
"10000",
"hparams",
".",
"add_hparam",
"(",
"\"master_dtype\"",
",",
"\"bfloat16\"",
")",
"hparams",
".",
"add_hparam",
"(",
"\"slice_dtype\"",
",",
"\"float32\"",
")",
"hparams",
".",
"activation_dtype",
"=",
"\"bfloat16\"",
"# These parameters make Transformer model compatible with MtfTransformer",
"# Do not override these, as mtf_transformer does not support other options.",
"hparams",
".",
"clip_grad_norm",
"=",
"0.",
"# i.e. no gradient clipping",
"hparams",
".",
"bottom",
"=",
"{",
"\"inputs\"",
":",
"modalities",
".",
"identity_bottom",
",",
"\"targets\"",
":",
"modalities",
".",
"identity_bottom",
",",
"}",
"hparams",
".",
"top",
"=",
"{",
"\"targets\"",
":",
"modalities",
".",
"identity_top",
",",
"}",
"# Parameters for computing the maximum decode length in beam search.",
"# Maximum decode length is:",
"# min(max_length,",
"# decode_length_multiplier * input_length + decode_length_constant)",
"hparams",
".",
"add_hparam",
"(",
"\"decode_length_multiplier\"",
",",
"1.5",
")",
"hparams",
".",
"add_hparam",
"(",
"\"decode_length_constant\"",
",",
"10.0",
")",
"# If nonzero, we split the batch across two tensor-dimensions named",
"# \"outer_batch\" and \"inner_batch\", allowing for splitting across two mesh",
"# dimensions. This is necessary for hierarchical mixture of experts.",
"# The two tensor dimensions have sizes hparams.outer_batch_size and",
"# hparams.batch_size // hparams.outer_batch_size.",
"hparams",
".",
"add_hparam",
"(",
"\"outer_batch_size\"",
",",
"0",
")",
"# TODO(noam): file a bug",
"hparams",
".",
"add_hparam",
"(",
"\"reshape_logits_hack\"",
",",
"False",
")",
"hparams",
".",
"add_hparam",
"(",
"\"compression_factor\"",
",",
"4",
")",
"return",
"hparams"
] |
Set of hyperparameters.
|
[
"Set",
"of",
"hyperparameters",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/mtf_transformer.py#L791-L883
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/mtf_transformer.py
|
mtf_transformer_tiny
|
def mtf_transformer_tiny():
"""Catch bugs locally..."""
hparams = mtf_transformer_base()
hparams.d_model = 128
hparams.d_ff = 512
hparams.batch_size = 8
hparams.encoder_layers = ["att", "drd"] * 2
hparams.decoder_layers = ["att", "enc_att", "drd"] * 2
hparams.num_heads = 8
# data parallelism and model-parallelism
hparams.mesh_shape = "batch:2;model:4"
hparams.activation_dtype = "float32"
return hparams
|
python
|
def mtf_transformer_tiny():
"""Catch bugs locally..."""
hparams = mtf_transformer_base()
hparams.d_model = 128
hparams.d_ff = 512
hparams.batch_size = 8
hparams.encoder_layers = ["att", "drd"] * 2
hparams.decoder_layers = ["att", "enc_att", "drd"] * 2
hparams.num_heads = 8
# data parallelism and model-parallelism
hparams.mesh_shape = "batch:2;model:4"
hparams.activation_dtype = "float32"
return hparams
|
[
"def",
"mtf_transformer_tiny",
"(",
")",
":",
"hparams",
"=",
"mtf_transformer_base",
"(",
")",
"hparams",
".",
"d_model",
"=",
"128",
"hparams",
".",
"d_ff",
"=",
"512",
"hparams",
".",
"batch_size",
"=",
"8",
"hparams",
".",
"encoder_layers",
"=",
"[",
"\"att\"",
",",
"\"drd\"",
"]",
"*",
"2",
"hparams",
".",
"decoder_layers",
"=",
"[",
"\"att\"",
",",
"\"enc_att\"",
",",
"\"drd\"",
"]",
"*",
"2",
"hparams",
".",
"num_heads",
"=",
"8",
"# data parallelism and model-parallelism",
"hparams",
".",
"mesh_shape",
"=",
"\"batch:2;model:4\"",
"hparams",
".",
"activation_dtype",
"=",
"\"float32\"",
"return",
"hparams"
] |
Catch bugs locally...
|
[
"Catch",
"bugs",
"locally",
"..."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/mtf_transformer.py#L897-L909
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/mtf_transformer.py
|
mtf_transformer_paper_lm
|
def mtf_transformer_paper_lm(size):
"""Config for language-model experiments.
Train these on languagemodel_lm1b32k_packed for 136000 steps (10 epochs)
The size parameter is an integer that controls the number of heads and the
size of the size of the feedforward hidden layers. Increasing size by 1
doubles each of these.
Results:
size params/10^9 log-ppl(per-token)
-1 0.14 3.209
0 0.22 3.119
1 0.37 3.037
2 0.67 2.969
3 1.28 2.912
4 2.48 2.874
5 4.90 2.871
(to get word-level log-ppl, multiply by 1.1078)
Args:
size: an integer
Returns:
a hparams object
"""
n = 2 ** size
hparams = mtf_transformer_base_lm()
hparams.batch_size = 256
hparams.d_model = 1024
hparams.d_ff = int(8192 * n)
hparams.d_kv = 256
hparams.num_heads = int(8 * n)
hparams.shared_embedding_and_softmax_weights = False
# one epoch for languagemodel_lm1b32k_packed = 13600 steps
hparams.learning_rate_decay_steps = 13600
return hparams
|
python
|
def mtf_transformer_paper_lm(size):
"""Config for language-model experiments.
Train these on languagemodel_lm1b32k_packed for 136000 steps (10 epochs)
The size parameter is an integer that controls the number of heads and the
size of the size of the feedforward hidden layers. Increasing size by 1
doubles each of these.
Results:
size params/10^9 log-ppl(per-token)
-1 0.14 3.209
0 0.22 3.119
1 0.37 3.037
2 0.67 2.969
3 1.28 2.912
4 2.48 2.874
5 4.90 2.871
(to get word-level log-ppl, multiply by 1.1078)
Args:
size: an integer
Returns:
a hparams object
"""
n = 2 ** size
hparams = mtf_transformer_base_lm()
hparams.batch_size = 256
hparams.d_model = 1024
hparams.d_ff = int(8192 * n)
hparams.d_kv = 256
hparams.num_heads = int(8 * n)
hparams.shared_embedding_and_softmax_weights = False
# one epoch for languagemodel_lm1b32k_packed = 13600 steps
hparams.learning_rate_decay_steps = 13600
return hparams
|
[
"def",
"mtf_transformer_paper_lm",
"(",
"size",
")",
":",
"n",
"=",
"2",
"**",
"size",
"hparams",
"=",
"mtf_transformer_base_lm",
"(",
")",
"hparams",
".",
"batch_size",
"=",
"256",
"hparams",
".",
"d_model",
"=",
"1024",
"hparams",
".",
"d_ff",
"=",
"int",
"(",
"8192",
"*",
"n",
")",
"hparams",
".",
"d_kv",
"=",
"256",
"hparams",
".",
"num_heads",
"=",
"int",
"(",
"8",
"*",
"n",
")",
"hparams",
".",
"shared_embedding_and_softmax_weights",
"=",
"False",
"# one epoch for languagemodel_lm1b32k_packed = 13600 steps",
"hparams",
".",
"learning_rate_decay_steps",
"=",
"13600",
"return",
"hparams"
] |
Config for language-model experiments.
Train these on languagemodel_lm1b32k_packed for 136000 steps (10 epochs)
The size parameter is an integer that controls the number of heads and the
size of the size of the feedforward hidden layers. Increasing size by 1
doubles each of these.
Results:
size params/10^9 log-ppl(per-token)
-1 0.14 3.209
0 0.22 3.119
1 0.37 3.037
2 0.67 2.969
3 1.28 2.912
4 2.48 2.874
5 4.90 2.871
(to get word-level log-ppl, multiply by 1.1078)
Args:
size: an integer
Returns:
a hparams object
|
[
"Config",
"for",
"language",
"-",
"model",
"experiments",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/mtf_transformer.py#L953-L989
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/mtf_transformer.py
|
mtf_transformer_paper_tr
|
def mtf_transformer_paper_tr(size):
"""Config for translation experiments.
Train these on translate_enfr_wmt32k_packed for 154000 steps (3 epochs)
The size parameter is an integer that controls the number of heads and the
size of the size of the feedforward hidden layers. Increasing size by 1
doubles each of these.
Args:
size: an integer
Returns:
a hparams object
"""
n = 2 ** size
hparams = mtf_transformer_base()
hparams.label_smoothing = 0.1
hparams.batch_size = 128
hparams.d_model = 1024
hparams.d_ff = int(4096 * n)
hparams.num_heads = int(8 * n)
hparams.shared_embedding_and_softmax_weights = False
# one epoch for translate_enfr_wmt32k_packed = 51400 steps
hparams.learning_rate_decay_steps = 51400
return hparams
|
python
|
def mtf_transformer_paper_tr(size):
"""Config for translation experiments.
Train these on translate_enfr_wmt32k_packed for 154000 steps (3 epochs)
The size parameter is an integer that controls the number of heads and the
size of the size of the feedforward hidden layers. Increasing size by 1
doubles each of these.
Args:
size: an integer
Returns:
a hparams object
"""
n = 2 ** size
hparams = mtf_transformer_base()
hparams.label_smoothing = 0.1
hparams.batch_size = 128
hparams.d_model = 1024
hparams.d_ff = int(4096 * n)
hparams.num_heads = int(8 * n)
hparams.shared_embedding_and_softmax_weights = False
# one epoch for translate_enfr_wmt32k_packed = 51400 steps
hparams.learning_rate_decay_steps = 51400
return hparams
|
[
"def",
"mtf_transformer_paper_tr",
"(",
"size",
")",
":",
"n",
"=",
"2",
"**",
"size",
"hparams",
"=",
"mtf_transformer_base",
"(",
")",
"hparams",
".",
"label_smoothing",
"=",
"0.1",
"hparams",
".",
"batch_size",
"=",
"128",
"hparams",
".",
"d_model",
"=",
"1024",
"hparams",
".",
"d_ff",
"=",
"int",
"(",
"4096",
"*",
"n",
")",
"hparams",
".",
"num_heads",
"=",
"int",
"(",
"8",
"*",
"n",
")",
"hparams",
".",
"shared_embedding_and_softmax_weights",
"=",
"False",
"# one epoch for translate_enfr_wmt32k_packed = 51400 steps",
"hparams",
".",
"learning_rate_decay_steps",
"=",
"51400",
"return",
"hparams"
] |
Config for translation experiments.
Train these on translate_enfr_wmt32k_packed for 154000 steps (3 epochs)
The size parameter is an integer that controls the number of heads and the
size of the size of the feedforward hidden layers. Increasing size by 1
doubles each of these.
Args:
size: an integer
Returns:
a hparams object
|
[
"Config",
"for",
"translation",
"experiments",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/mtf_transformer.py#L1041-L1065
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/mtf_transformer.py
|
mtf_transformer_lm_baseline
|
def mtf_transformer_lm_baseline():
"""Small language model to run on 1 TPU.
Run this on 2x2 on languagemodel_lm1b32k_packed for 272000 steps (10 epochs)
Results:
params/10^9 log-ppl(per-token)
0.14 3.202
Returns:
a hparams
"""
hparams = mtf_transformer_paper_lm(-1)
hparams.batch_size = 128
hparams.learning_rate_decay_steps = 27200 # one epoch on lm1b
hparams.mesh_shape = "batch:8"
return hparams
|
python
|
def mtf_transformer_lm_baseline():
"""Small language model to run on 1 TPU.
Run this on 2x2 on languagemodel_lm1b32k_packed for 272000 steps (10 epochs)
Results:
params/10^9 log-ppl(per-token)
0.14 3.202
Returns:
a hparams
"""
hparams = mtf_transformer_paper_lm(-1)
hparams.batch_size = 128
hparams.learning_rate_decay_steps = 27200 # one epoch on lm1b
hparams.mesh_shape = "batch:8"
return hparams
|
[
"def",
"mtf_transformer_lm_baseline",
"(",
")",
":",
"hparams",
"=",
"mtf_transformer_paper_lm",
"(",
"-",
"1",
")",
"hparams",
".",
"batch_size",
"=",
"128",
"hparams",
".",
"learning_rate_decay_steps",
"=",
"27200",
"# one epoch on lm1b",
"hparams",
".",
"mesh_shape",
"=",
"\"batch:8\"",
"return",
"hparams"
] |
Small language model to run on 1 TPU.
Run this on 2x2 on languagemodel_lm1b32k_packed for 272000 steps (10 epochs)
Results:
params/10^9 log-ppl(per-token)
0.14 3.202
Returns:
a hparams
|
[
"Small",
"language",
"model",
"to",
"run",
"on",
"1",
"TPU",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/mtf_transformer.py#L1171-L1186
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/message_passing_attention.py
|
multihead_graph_attention
|
def multihead_graph_attention(query_antecedent,
memory_antecedent,
bias,
total_key_depth,
total_value_depth,
output_depth,
num_heads,
dropout_rate,
image_shapes=None,
attention_type="edge_vector",
name="multihead_graph_attention",
save_weights_to=None,
make_image_summary=True,
dropout_broadcast_dims=None,
adjacency_matrix=None,
num_edge_types=5,
vars_3d=False,
**kwargs):
"""Multihead scaled-dot-product attention with input/output transformations.
Args:
query_antecedent: a Tensor with shape [batch, length_q, channels]
memory_antecedent: a Tensor with shape [batch, length_m, channels] or None
bias: bias Tensor (see attention_bias())
total_key_depth: an integer
total_value_depth: an integer
output_depth: an integer
num_heads: an integer dividing total_key_depth and total_value_depth
dropout_rate: a floating point number
image_shapes: optional tuple of integer scalars.
see comments for attention_image_summary()
attention_type: a string, either "dot_product", "dot_product_relative",
"local_mask_right", "local_unmasked", "masked_dilated_1d",
"unmasked_dilated_1d", graph, or any attention function
with the signature (query, key, value, **kwargs)
name: an optional string.
save_weights_to: an optional dictionary to capture attention weights
for vizualization; the weights tensor will be appended there under
a string key created from the variable scope (including name).
make_image_summary: Whether to make an attention image summary.
dropout_broadcast_dims: an optional list of integers less than 4
specifying in which dimensions to broadcast the dropout decisions.
saves memory.
adjacency_matrix: an optional tensor of shape [batch, len_q, len_q]
containing edge vectors for attention
num_edge_types: number of edge types, an int
vars_3d: use 3-dimensional variables for input/output transformations
**kwargs (dict): Parameters for the attention function
Returns:
The result of the attention transformation. The output shape is
[batch_size, length_q, output_depth]
Raises:
ValueError: if the key depth or value depth are not divisible by the
number of attention heads.
"""
if total_key_depth % num_heads != 0:
raise ValueError("Key depth (%d) must be divisible by the number of "
"attention heads (%d)." % (total_key_depth, num_heads))
if total_value_depth % num_heads != 0:
raise ValueError("Value depth (%d) must be divisible by the number of "
"attention heads (%d)." % (total_value_depth, num_heads))
vars_3d_num_heads = num_heads if vars_3d else None
with tf.variable_scope(
name,
default_name="multihead_attention",
values=[query_antecedent, memory_antecedent]):
q, k, v = common_attention.compute_qkv(
query_antecedent,
memory_antecedent,
total_key_depth,
total_value_depth,
vars_3d_num_heads=vars_3d_num_heads)
q = common_attention.split_heads(q, num_heads)
k = common_attention.split_heads(k, num_heads)
v = common_attention.split_heads(v, num_heads)
key_depth_per_head = total_key_depth // num_heads
if not vars_3d:
q *= key_depth_per_head**-0.5
additional_returned_value = None
if callable(attention_type): # Generic way to extend multihead_attention
x = attention_type(q, k, v, **kwargs)
if isinstance(x, tuple):
x, additional_returned_value = x # Unpack
elif attention_type == "edge_vector":
x = graph_attention(
q,
k,
v,
bias,
dropout_rate,
image_shapes,
save_weights_to=save_weights_to,
make_image_summary=make_image_summary,
dropout_broadcast_dims=dropout_broadcast_dims,
adjacency_matrix=adjacency_matrix,
num_edge_types=num_edge_types)
x = common_attention.combine_heads(x)
# Set last dim specifically.
x.set_shape(x.shape.as_list()[:-1] + [total_value_depth])
if vars_3d:
o_var = tf.get_variable(
"o", [num_heads, total_value_depth // num_heads, output_depth])
o_var = tf.reshape(o_var, [total_value_depth, output_depth])
x = tf.tensordot(x, o_var, axes=1)
else:
x = common_layers.dense(
x, output_depth, use_bias=False, name="output_transform")
if additional_returned_value is not None:
return x, additional_returned_value
return x
|
python
|
def multihead_graph_attention(query_antecedent,
memory_antecedent,
bias,
total_key_depth,
total_value_depth,
output_depth,
num_heads,
dropout_rate,
image_shapes=None,
attention_type="edge_vector",
name="multihead_graph_attention",
save_weights_to=None,
make_image_summary=True,
dropout_broadcast_dims=None,
adjacency_matrix=None,
num_edge_types=5,
vars_3d=False,
**kwargs):
"""Multihead scaled-dot-product attention with input/output transformations.
Args:
query_antecedent: a Tensor with shape [batch, length_q, channels]
memory_antecedent: a Tensor with shape [batch, length_m, channels] or None
bias: bias Tensor (see attention_bias())
total_key_depth: an integer
total_value_depth: an integer
output_depth: an integer
num_heads: an integer dividing total_key_depth and total_value_depth
dropout_rate: a floating point number
image_shapes: optional tuple of integer scalars.
see comments for attention_image_summary()
attention_type: a string, either "dot_product", "dot_product_relative",
"local_mask_right", "local_unmasked", "masked_dilated_1d",
"unmasked_dilated_1d", graph, or any attention function
with the signature (query, key, value, **kwargs)
name: an optional string.
save_weights_to: an optional dictionary to capture attention weights
for vizualization; the weights tensor will be appended there under
a string key created from the variable scope (including name).
make_image_summary: Whether to make an attention image summary.
dropout_broadcast_dims: an optional list of integers less than 4
specifying in which dimensions to broadcast the dropout decisions.
saves memory.
adjacency_matrix: an optional tensor of shape [batch, len_q, len_q]
containing edge vectors for attention
num_edge_types: number of edge types, an int
vars_3d: use 3-dimensional variables for input/output transformations
**kwargs (dict): Parameters for the attention function
Returns:
The result of the attention transformation. The output shape is
[batch_size, length_q, output_depth]
Raises:
ValueError: if the key depth or value depth are not divisible by the
number of attention heads.
"""
if total_key_depth % num_heads != 0:
raise ValueError("Key depth (%d) must be divisible by the number of "
"attention heads (%d)." % (total_key_depth, num_heads))
if total_value_depth % num_heads != 0:
raise ValueError("Value depth (%d) must be divisible by the number of "
"attention heads (%d)." % (total_value_depth, num_heads))
vars_3d_num_heads = num_heads if vars_3d else None
with tf.variable_scope(
name,
default_name="multihead_attention",
values=[query_antecedent, memory_antecedent]):
q, k, v = common_attention.compute_qkv(
query_antecedent,
memory_antecedent,
total_key_depth,
total_value_depth,
vars_3d_num_heads=vars_3d_num_heads)
q = common_attention.split_heads(q, num_heads)
k = common_attention.split_heads(k, num_heads)
v = common_attention.split_heads(v, num_heads)
key_depth_per_head = total_key_depth // num_heads
if not vars_3d:
q *= key_depth_per_head**-0.5
additional_returned_value = None
if callable(attention_type): # Generic way to extend multihead_attention
x = attention_type(q, k, v, **kwargs)
if isinstance(x, tuple):
x, additional_returned_value = x # Unpack
elif attention_type == "edge_vector":
x = graph_attention(
q,
k,
v,
bias,
dropout_rate,
image_shapes,
save_weights_to=save_weights_to,
make_image_summary=make_image_summary,
dropout_broadcast_dims=dropout_broadcast_dims,
adjacency_matrix=adjacency_matrix,
num_edge_types=num_edge_types)
x = common_attention.combine_heads(x)
# Set last dim specifically.
x.set_shape(x.shape.as_list()[:-1] + [total_value_depth])
if vars_3d:
o_var = tf.get_variable(
"o", [num_heads, total_value_depth // num_heads, output_depth])
o_var = tf.reshape(o_var, [total_value_depth, output_depth])
x = tf.tensordot(x, o_var, axes=1)
else:
x = common_layers.dense(
x, output_depth, use_bias=False, name="output_transform")
if additional_returned_value is not None:
return x, additional_returned_value
return x
|
[
"def",
"multihead_graph_attention",
"(",
"query_antecedent",
",",
"memory_antecedent",
",",
"bias",
",",
"total_key_depth",
",",
"total_value_depth",
",",
"output_depth",
",",
"num_heads",
",",
"dropout_rate",
",",
"image_shapes",
"=",
"None",
",",
"attention_type",
"=",
"\"edge_vector\"",
",",
"name",
"=",
"\"multihead_graph_attention\"",
",",
"save_weights_to",
"=",
"None",
",",
"make_image_summary",
"=",
"True",
",",
"dropout_broadcast_dims",
"=",
"None",
",",
"adjacency_matrix",
"=",
"None",
",",
"num_edge_types",
"=",
"5",
",",
"vars_3d",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"total_key_depth",
"%",
"num_heads",
"!=",
"0",
":",
"raise",
"ValueError",
"(",
"\"Key depth (%d) must be divisible by the number of \"",
"\"attention heads (%d).\"",
"%",
"(",
"total_key_depth",
",",
"num_heads",
")",
")",
"if",
"total_value_depth",
"%",
"num_heads",
"!=",
"0",
":",
"raise",
"ValueError",
"(",
"\"Value depth (%d) must be divisible by the number of \"",
"\"attention heads (%d).\"",
"%",
"(",
"total_value_depth",
",",
"num_heads",
")",
")",
"vars_3d_num_heads",
"=",
"num_heads",
"if",
"vars_3d",
"else",
"None",
"with",
"tf",
".",
"variable_scope",
"(",
"name",
",",
"default_name",
"=",
"\"multihead_attention\"",
",",
"values",
"=",
"[",
"query_antecedent",
",",
"memory_antecedent",
"]",
")",
":",
"q",
",",
"k",
",",
"v",
"=",
"common_attention",
".",
"compute_qkv",
"(",
"query_antecedent",
",",
"memory_antecedent",
",",
"total_key_depth",
",",
"total_value_depth",
",",
"vars_3d_num_heads",
"=",
"vars_3d_num_heads",
")",
"q",
"=",
"common_attention",
".",
"split_heads",
"(",
"q",
",",
"num_heads",
")",
"k",
"=",
"common_attention",
".",
"split_heads",
"(",
"k",
",",
"num_heads",
")",
"v",
"=",
"common_attention",
".",
"split_heads",
"(",
"v",
",",
"num_heads",
")",
"key_depth_per_head",
"=",
"total_key_depth",
"//",
"num_heads",
"if",
"not",
"vars_3d",
":",
"q",
"*=",
"key_depth_per_head",
"**",
"-",
"0.5",
"additional_returned_value",
"=",
"None",
"if",
"callable",
"(",
"attention_type",
")",
":",
"# Generic way to extend multihead_attention",
"x",
"=",
"attention_type",
"(",
"q",
",",
"k",
",",
"v",
",",
"*",
"*",
"kwargs",
")",
"if",
"isinstance",
"(",
"x",
",",
"tuple",
")",
":",
"x",
",",
"additional_returned_value",
"=",
"x",
"# Unpack",
"elif",
"attention_type",
"==",
"\"edge_vector\"",
":",
"x",
"=",
"graph_attention",
"(",
"q",
",",
"k",
",",
"v",
",",
"bias",
",",
"dropout_rate",
",",
"image_shapes",
",",
"save_weights_to",
"=",
"save_weights_to",
",",
"make_image_summary",
"=",
"make_image_summary",
",",
"dropout_broadcast_dims",
"=",
"dropout_broadcast_dims",
",",
"adjacency_matrix",
"=",
"adjacency_matrix",
",",
"num_edge_types",
"=",
"num_edge_types",
")",
"x",
"=",
"common_attention",
".",
"combine_heads",
"(",
"x",
")",
"# Set last dim specifically.",
"x",
".",
"set_shape",
"(",
"x",
".",
"shape",
".",
"as_list",
"(",
")",
"[",
":",
"-",
"1",
"]",
"+",
"[",
"total_value_depth",
"]",
")",
"if",
"vars_3d",
":",
"o_var",
"=",
"tf",
".",
"get_variable",
"(",
"\"o\"",
",",
"[",
"num_heads",
",",
"total_value_depth",
"//",
"num_heads",
",",
"output_depth",
"]",
")",
"o_var",
"=",
"tf",
".",
"reshape",
"(",
"o_var",
",",
"[",
"total_value_depth",
",",
"output_depth",
"]",
")",
"x",
"=",
"tf",
".",
"tensordot",
"(",
"x",
",",
"o_var",
",",
"axes",
"=",
"1",
")",
"else",
":",
"x",
"=",
"common_layers",
".",
"dense",
"(",
"x",
",",
"output_depth",
",",
"use_bias",
"=",
"False",
",",
"name",
"=",
"\"output_transform\"",
")",
"if",
"additional_returned_value",
"is",
"not",
"None",
":",
"return",
"x",
",",
"additional_returned_value",
"return",
"x"
] |
Multihead scaled-dot-product attention with input/output transformations.
Args:
query_antecedent: a Tensor with shape [batch, length_q, channels]
memory_antecedent: a Tensor with shape [batch, length_m, channels] or None
bias: bias Tensor (see attention_bias())
total_key_depth: an integer
total_value_depth: an integer
output_depth: an integer
num_heads: an integer dividing total_key_depth and total_value_depth
dropout_rate: a floating point number
image_shapes: optional tuple of integer scalars.
see comments for attention_image_summary()
attention_type: a string, either "dot_product", "dot_product_relative",
"local_mask_right", "local_unmasked", "masked_dilated_1d",
"unmasked_dilated_1d", graph, or any attention function
with the signature (query, key, value, **kwargs)
name: an optional string.
save_weights_to: an optional dictionary to capture attention weights
for vizualization; the weights tensor will be appended there under
a string key created from the variable scope (including name).
make_image_summary: Whether to make an attention image summary.
dropout_broadcast_dims: an optional list of integers less than 4
specifying in which dimensions to broadcast the dropout decisions.
saves memory.
adjacency_matrix: an optional tensor of shape [batch, len_q, len_q]
containing edge vectors for attention
num_edge_types: number of edge types, an int
vars_3d: use 3-dimensional variables for input/output transformations
**kwargs (dict): Parameters for the attention function
Returns:
The result of the attention transformation. The output shape is
[batch_size, length_q, output_depth]
Raises:
ValueError: if the key depth or value depth are not divisible by the
number of attention heads.
|
[
"Multihead",
"scaled",
"-",
"dot",
"-",
"product",
"attention",
"with",
"input",
"/",
"output",
"transformations",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/message_passing_attention.py#L28-L146
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/message_passing_attention.py
|
graph_attention
|
def graph_attention(q,
k,
v,
bias,
dropout_rate=0.0,
image_shapes=None,
name=None,
make_image_summary=True,
save_weights_to=None,
dropout_broadcast_dims=None,
adjacency_matrix=None,
num_edge_types=5):
"""graph attention.
Args:
q: a Tensor with shape [batch, heads, length_q, depth_k]
k: a Tensor with shape [batch, heads, length_kv, depth_k]
v: a Tensor with shape [batch, heads, length_kv, depth_v]
bias: bias Tensor (see attention_bias())
dropout_rate: a floating point number
image_shapes: optional tuple of integer scalars.
see comments for attention_image_summary()
name: an optional string
make_image_summary: True if you want an image summary.
save_weights_to: an optional dictionary to capture attention weights
for vizualization; the weights tensor will be appended there under
a string key created from the variable scope (including name).
dropout_broadcast_dims: an optional list of integers less than 4
specifying in which dimensions to broadcast the dropout decisions.
saves memory.
adjacency_matrix: optional matrix of [batch, length, length] ids indicating
edge type
num_edge_types: an int indicating number of edge types
Returns:
A Tensor of shape [batch, length, depth(q)]
"""
with tf.variable_scope(
name, default_name="dot_product_attention", values=[q, k, v]) as scope:
# [batch, num_heads, query_length, memory_length]
logits = tf.matmul(q, k, transpose_b=True)
if adjacency_matrix is not None:
key_head_depth = common_layers.shape_list(q)[-1]
adjacency_vectors = make_edge_vectors(
adjacency_matrix,
num_edge_types,
key_head_depth,
name=name)
# transposing q to be [batch, length_q, heads, depth_k]
# to allow for matmul with [batch, length_q, length_q, depth_k]
q_t = tf.transpose(q, [0, 2, 1, 3])
adj_logits = tf.matmul(q_t, adjacency_vectors, transpose_b=True)
logits += tf.transpose(adj_logits, [0, 2, 1, 3])
# [batch, depth, num_nodes, num_nodes]
if bias is not None:
logits += bias
weights = tf.nn.softmax(logits, name="attention_weights")
if save_weights_to is not None:
save_weights_to[scope.name] = weights
# dropping out the attention links for each of the heads
weights = common_layers.dropout_with_broadcast_dims(
weights, 1.0 - dropout_rate, broadcast_dims=dropout_broadcast_dims)
if common_layers.should_generate_summaries() and make_image_summary:
common_attention.attention_image_summary(weights, image_shapes)
return tf.matmul(weights, v)
|
python
|
def graph_attention(q,
k,
v,
bias,
dropout_rate=0.0,
image_shapes=None,
name=None,
make_image_summary=True,
save_weights_to=None,
dropout_broadcast_dims=None,
adjacency_matrix=None,
num_edge_types=5):
"""graph attention.
Args:
q: a Tensor with shape [batch, heads, length_q, depth_k]
k: a Tensor with shape [batch, heads, length_kv, depth_k]
v: a Tensor with shape [batch, heads, length_kv, depth_v]
bias: bias Tensor (see attention_bias())
dropout_rate: a floating point number
image_shapes: optional tuple of integer scalars.
see comments for attention_image_summary()
name: an optional string
make_image_summary: True if you want an image summary.
save_weights_to: an optional dictionary to capture attention weights
for vizualization; the weights tensor will be appended there under
a string key created from the variable scope (including name).
dropout_broadcast_dims: an optional list of integers less than 4
specifying in which dimensions to broadcast the dropout decisions.
saves memory.
adjacency_matrix: optional matrix of [batch, length, length] ids indicating
edge type
num_edge_types: an int indicating number of edge types
Returns:
A Tensor of shape [batch, length, depth(q)]
"""
with tf.variable_scope(
name, default_name="dot_product_attention", values=[q, k, v]) as scope:
# [batch, num_heads, query_length, memory_length]
logits = tf.matmul(q, k, transpose_b=True)
if adjacency_matrix is not None:
key_head_depth = common_layers.shape_list(q)[-1]
adjacency_vectors = make_edge_vectors(
adjacency_matrix,
num_edge_types,
key_head_depth,
name=name)
# transposing q to be [batch, length_q, heads, depth_k]
# to allow for matmul with [batch, length_q, length_q, depth_k]
q_t = tf.transpose(q, [0, 2, 1, 3])
adj_logits = tf.matmul(q_t, adjacency_vectors, transpose_b=True)
logits += tf.transpose(adj_logits, [0, 2, 1, 3])
# [batch, depth, num_nodes, num_nodes]
if bias is not None:
logits += bias
weights = tf.nn.softmax(logits, name="attention_weights")
if save_weights_to is not None:
save_weights_to[scope.name] = weights
# dropping out the attention links for each of the heads
weights = common_layers.dropout_with_broadcast_dims(
weights, 1.0 - dropout_rate, broadcast_dims=dropout_broadcast_dims)
if common_layers.should_generate_summaries() and make_image_summary:
common_attention.attention_image_summary(weights, image_shapes)
return tf.matmul(weights, v)
|
[
"def",
"graph_attention",
"(",
"q",
",",
"k",
",",
"v",
",",
"bias",
",",
"dropout_rate",
"=",
"0.0",
",",
"image_shapes",
"=",
"None",
",",
"name",
"=",
"None",
",",
"make_image_summary",
"=",
"True",
",",
"save_weights_to",
"=",
"None",
",",
"dropout_broadcast_dims",
"=",
"None",
",",
"adjacency_matrix",
"=",
"None",
",",
"num_edge_types",
"=",
"5",
")",
":",
"with",
"tf",
".",
"variable_scope",
"(",
"name",
",",
"default_name",
"=",
"\"dot_product_attention\"",
",",
"values",
"=",
"[",
"q",
",",
"k",
",",
"v",
"]",
")",
"as",
"scope",
":",
"# [batch, num_heads, query_length, memory_length]",
"logits",
"=",
"tf",
".",
"matmul",
"(",
"q",
",",
"k",
",",
"transpose_b",
"=",
"True",
")",
"if",
"adjacency_matrix",
"is",
"not",
"None",
":",
"key_head_depth",
"=",
"common_layers",
".",
"shape_list",
"(",
"q",
")",
"[",
"-",
"1",
"]",
"adjacency_vectors",
"=",
"make_edge_vectors",
"(",
"adjacency_matrix",
",",
"num_edge_types",
",",
"key_head_depth",
",",
"name",
"=",
"name",
")",
"# transposing q to be [batch, length_q, heads, depth_k]",
"# to allow for matmul with [batch, length_q, length_q, depth_k]",
"q_t",
"=",
"tf",
".",
"transpose",
"(",
"q",
",",
"[",
"0",
",",
"2",
",",
"1",
",",
"3",
"]",
")",
"adj_logits",
"=",
"tf",
".",
"matmul",
"(",
"q_t",
",",
"adjacency_vectors",
",",
"transpose_b",
"=",
"True",
")",
"logits",
"+=",
"tf",
".",
"transpose",
"(",
"adj_logits",
",",
"[",
"0",
",",
"2",
",",
"1",
",",
"3",
"]",
")",
"# [batch, depth, num_nodes, num_nodes]",
"if",
"bias",
"is",
"not",
"None",
":",
"logits",
"+=",
"bias",
"weights",
"=",
"tf",
".",
"nn",
".",
"softmax",
"(",
"logits",
",",
"name",
"=",
"\"attention_weights\"",
")",
"if",
"save_weights_to",
"is",
"not",
"None",
":",
"save_weights_to",
"[",
"scope",
".",
"name",
"]",
"=",
"weights",
"# dropping out the attention links for each of the heads",
"weights",
"=",
"common_layers",
".",
"dropout_with_broadcast_dims",
"(",
"weights",
",",
"1.0",
"-",
"dropout_rate",
",",
"broadcast_dims",
"=",
"dropout_broadcast_dims",
")",
"if",
"common_layers",
".",
"should_generate_summaries",
"(",
")",
"and",
"make_image_summary",
":",
"common_attention",
".",
"attention_image_summary",
"(",
"weights",
",",
"image_shapes",
")",
"return",
"tf",
".",
"matmul",
"(",
"weights",
",",
"v",
")"
] |
graph attention.
Args:
q: a Tensor with shape [batch, heads, length_q, depth_k]
k: a Tensor with shape [batch, heads, length_kv, depth_k]
v: a Tensor with shape [batch, heads, length_kv, depth_v]
bias: bias Tensor (see attention_bias())
dropout_rate: a floating point number
image_shapes: optional tuple of integer scalars.
see comments for attention_image_summary()
name: an optional string
make_image_summary: True if you want an image summary.
save_weights_to: an optional dictionary to capture attention weights
for vizualization; the weights tensor will be appended there under
a string key created from the variable scope (including name).
dropout_broadcast_dims: an optional list of integers less than 4
specifying in which dimensions to broadcast the dropout decisions.
saves memory.
adjacency_matrix: optional matrix of [batch, length, length] ids indicating
edge type
num_edge_types: an int indicating number of edge types
Returns:
A Tensor of shape [batch, length, depth(q)]
|
[
"graph",
"attention",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/message_passing_attention.py#L185-L248
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/message_passing_attention.py
|
_compute_edge_transforms
|
def _compute_edge_transforms(node_states,
depth,
num_transforms,
name="transform"):
"""Helper function that computes transformation for keys and values.
Let B be the number of batches.
Let N be the number of nodes in the graph.
Let D be the size of the node hidden states.
Let K be the size of the attention keys/queries (total_key_depth).
Let V be the size of the attention values (total_value_depth).
Let T be the total number of transforms (num_transforms).
Computes the transforms for keys or values for attention.
* For each node N_j and edge type t, a key K_jt of size K is computed. When an
edge of type t goes from node N_j to any other node, K_jt is the key that is
in the attention process.
* For each node N_j and edge type t, a value V_jt of size V is computed. When
an edge of type t goes from node N_j to node N_i, Attention(Q_i, K_jt)
produces a weight w_ijt. The message sent along this edge is w_ijt * V_jt.
Args:
node_states: A tensor of shape [B, L, D]
depth: An integer (K or V)
num_transforms: An integer (T),
name: A name for the function
Returns:
x: A The attention keys or values for each node and edge type
(shape [B, N*T, K or V])
"""
node_shapes = common_layers.shape_list(node_states)
x = common_layers.dense(
node_states,
depth * num_transforms,
use_bias=False,
name=name)
batch = node_shapes[0] # B.
length = node_shapes[1] # N.
# Making the fourth dimension explicit by separating the vectors of size
# K*T (in k) and V*T (in v) into two-dimensional matrices with shape [K, T]
# (in k) and [V, T] in v.
#
x = tf.reshape(x, [batch, length, num_transforms, depth])
# Flatten out the fourth dimension.
x = tf.reshape(x, [batch, length * num_transforms, depth])
return x
|
python
|
def _compute_edge_transforms(node_states,
depth,
num_transforms,
name="transform"):
"""Helper function that computes transformation for keys and values.
Let B be the number of batches.
Let N be the number of nodes in the graph.
Let D be the size of the node hidden states.
Let K be the size of the attention keys/queries (total_key_depth).
Let V be the size of the attention values (total_value_depth).
Let T be the total number of transforms (num_transforms).
Computes the transforms for keys or values for attention.
* For each node N_j and edge type t, a key K_jt of size K is computed. When an
edge of type t goes from node N_j to any other node, K_jt is the key that is
in the attention process.
* For each node N_j and edge type t, a value V_jt of size V is computed. When
an edge of type t goes from node N_j to node N_i, Attention(Q_i, K_jt)
produces a weight w_ijt. The message sent along this edge is w_ijt * V_jt.
Args:
node_states: A tensor of shape [B, L, D]
depth: An integer (K or V)
num_transforms: An integer (T),
name: A name for the function
Returns:
x: A The attention keys or values for each node and edge type
(shape [B, N*T, K or V])
"""
node_shapes = common_layers.shape_list(node_states)
x = common_layers.dense(
node_states,
depth * num_transforms,
use_bias=False,
name=name)
batch = node_shapes[0] # B.
length = node_shapes[1] # N.
# Making the fourth dimension explicit by separating the vectors of size
# K*T (in k) and V*T (in v) into two-dimensional matrices with shape [K, T]
# (in k) and [V, T] in v.
#
x = tf.reshape(x, [batch, length, num_transforms, depth])
# Flatten out the fourth dimension.
x = tf.reshape(x, [batch, length * num_transforms, depth])
return x
|
[
"def",
"_compute_edge_transforms",
"(",
"node_states",
",",
"depth",
",",
"num_transforms",
",",
"name",
"=",
"\"transform\"",
")",
":",
"node_shapes",
"=",
"common_layers",
".",
"shape_list",
"(",
"node_states",
")",
"x",
"=",
"common_layers",
".",
"dense",
"(",
"node_states",
",",
"depth",
"*",
"num_transforms",
",",
"use_bias",
"=",
"False",
",",
"name",
"=",
"name",
")",
"batch",
"=",
"node_shapes",
"[",
"0",
"]",
"# B.",
"length",
"=",
"node_shapes",
"[",
"1",
"]",
"# N.",
"# Making the fourth dimension explicit by separating the vectors of size",
"# K*T (in k) and V*T (in v) into two-dimensional matrices with shape [K, T]",
"# (in k) and [V, T] in v.",
"#",
"x",
"=",
"tf",
".",
"reshape",
"(",
"x",
",",
"[",
"batch",
",",
"length",
",",
"num_transforms",
",",
"depth",
"]",
")",
"# Flatten out the fourth dimension.",
"x",
"=",
"tf",
".",
"reshape",
"(",
"x",
",",
"[",
"batch",
",",
"length",
"*",
"num_transforms",
",",
"depth",
"]",
")",
"return",
"x"
] |
Helper function that computes transformation for keys and values.
Let B be the number of batches.
Let N be the number of nodes in the graph.
Let D be the size of the node hidden states.
Let K be the size of the attention keys/queries (total_key_depth).
Let V be the size of the attention values (total_value_depth).
Let T be the total number of transforms (num_transforms).
Computes the transforms for keys or values for attention.
* For each node N_j and edge type t, a key K_jt of size K is computed. When an
edge of type t goes from node N_j to any other node, K_jt is the key that is
in the attention process.
* For each node N_j and edge type t, a value V_jt of size V is computed. When
an edge of type t goes from node N_j to node N_i, Attention(Q_i, K_jt)
produces a weight w_ijt. The message sent along this edge is w_ijt * V_jt.
Args:
node_states: A tensor of shape [B, L, D]
depth: An integer (K or V)
num_transforms: An integer (T),
name: A name for the function
Returns:
x: A The attention keys or values for each node and edge type
(shape [B, N*T, K or V])
|
[
"Helper",
"function",
"that",
"computes",
"transformation",
"for",
"keys",
"and",
"values",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/message_passing_attention.py#L251-L301
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/message_passing_attention.py
|
compute_mpnn_qkv
|
def compute_mpnn_qkv(node_states,
total_key_depth,
total_value_depth,
num_transforms):
"""Computes query, key and value for edge matrices.
Let B be the number of batches.
Let N be the number of nodes in the graph.
Let D be the size of the node hidden states.
Let K be the size of the attention keys/queries (total_key_depth).
Let V be the size of the attention values (total_value_depth).
Let T be the total number of transforms (num_transforms).
Computes the queries, keys, and values for attention.
* For each node N_i in the graph, a query Q_i of size K is computed. This
query is used to determine the relative weights to give to each of the
node's incoming edges.
* For each node N_j and edge type t, a key K_jt of size K is computed. When an
edge of type t goes from node N_j to any other node, K_jt is the key that is
in the attention process.
* For each node N_j and edge type t, a value V_jt of size V is computed. When
an edge of type t goes from node N_j to node N_i, Attention(Q_i, K_jt)
produces a weight w_ijt. The message sent along this edge is w_ijt * V_jt.
Args:
node_states: A Tensor with shape [B, N, D].
total_key_depth: an integer (K).
total_value_depth: an integer (V).
num_transforms: a integer specifying number of transforms (T). This is
typically the number of edge types.
Returns:
q: The attention queries for each destination node (shape [B, N, K]).
k: The attention keys for each node and edge type (shape [B, N*T, K]).
v: The attention values for each node and edge type (shape [B, N*T, V]).
"""
# node_states is initially a tensor with shape [B, N, D]. The call to dense
# creates a D x K kernel that serves as a fully-connected layer.
#
# For each possible batch b and node n in the first two dimensions of
# node_states, the corresponding size-D vector (the third dimension of
# node_states) is the hidden state for node n in batch b. Each of these size-D
# vectors is multiplied by the kernel to produce an attention query of size K.
# The result is a tensor of size [B, N, K] containing the attention queries
# for each node in each batch.
q = common_layers.dense(
node_states, total_key_depth, use_bias=False, name="q_mpnn")
# Creates the attention keys in a manner similar to the process of creating
# the attention queries. One key is created for each type of outgoing edge the
# corresponding node might have, meaning k will have shape [B, N, K*T].
k = _compute_edge_transforms(node_states,
total_key_depth,
num_transforms,
name="k_mpnn")
v = _compute_edge_transforms(node_states,
total_value_depth,
num_transforms,
name="v_mpnn")
return q, k, v
|
python
|
def compute_mpnn_qkv(node_states,
total_key_depth,
total_value_depth,
num_transforms):
"""Computes query, key and value for edge matrices.
Let B be the number of batches.
Let N be the number of nodes in the graph.
Let D be the size of the node hidden states.
Let K be the size of the attention keys/queries (total_key_depth).
Let V be the size of the attention values (total_value_depth).
Let T be the total number of transforms (num_transforms).
Computes the queries, keys, and values for attention.
* For each node N_i in the graph, a query Q_i of size K is computed. This
query is used to determine the relative weights to give to each of the
node's incoming edges.
* For each node N_j and edge type t, a key K_jt of size K is computed. When an
edge of type t goes from node N_j to any other node, K_jt is the key that is
in the attention process.
* For each node N_j and edge type t, a value V_jt of size V is computed. When
an edge of type t goes from node N_j to node N_i, Attention(Q_i, K_jt)
produces a weight w_ijt. The message sent along this edge is w_ijt * V_jt.
Args:
node_states: A Tensor with shape [B, N, D].
total_key_depth: an integer (K).
total_value_depth: an integer (V).
num_transforms: a integer specifying number of transforms (T). This is
typically the number of edge types.
Returns:
q: The attention queries for each destination node (shape [B, N, K]).
k: The attention keys for each node and edge type (shape [B, N*T, K]).
v: The attention values for each node and edge type (shape [B, N*T, V]).
"""
# node_states is initially a tensor with shape [B, N, D]. The call to dense
# creates a D x K kernel that serves as a fully-connected layer.
#
# For each possible batch b and node n in the first two dimensions of
# node_states, the corresponding size-D vector (the third dimension of
# node_states) is the hidden state for node n in batch b. Each of these size-D
# vectors is multiplied by the kernel to produce an attention query of size K.
# The result is a tensor of size [B, N, K] containing the attention queries
# for each node in each batch.
q = common_layers.dense(
node_states, total_key_depth, use_bias=False, name="q_mpnn")
# Creates the attention keys in a manner similar to the process of creating
# the attention queries. One key is created for each type of outgoing edge the
# corresponding node might have, meaning k will have shape [B, N, K*T].
k = _compute_edge_transforms(node_states,
total_key_depth,
num_transforms,
name="k_mpnn")
v = _compute_edge_transforms(node_states,
total_value_depth,
num_transforms,
name="v_mpnn")
return q, k, v
|
[
"def",
"compute_mpnn_qkv",
"(",
"node_states",
",",
"total_key_depth",
",",
"total_value_depth",
",",
"num_transforms",
")",
":",
"# node_states is initially a tensor with shape [B, N, D]. The call to dense",
"# creates a D x K kernel that serves as a fully-connected layer.",
"#",
"# For each possible batch b and node n in the first two dimensions of",
"# node_states, the corresponding size-D vector (the third dimension of",
"# node_states) is the hidden state for node n in batch b. Each of these size-D",
"# vectors is multiplied by the kernel to produce an attention query of size K.",
"# The result is a tensor of size [B, N, K] containing the attention queries",
"# for each node in each batch.",
"q",
"=",
"common_layers",
".",
"dense",
"(",
"node_states",
",",
"total_key_depth",
",",
"use_bias",
"=",
"False",
",",
"name",
"=",
"\"q_mpnn\"",
")",
"# Creates the attention keys in a manner similar to the process of creating",
"# the attention queries. One key is created for each type of outgoing edge the",
"# corresponding node might have, meaning k will have shape [B, N, K*T].",
"k",
"=",
"_compute_edge_transforms",
"(",
"node_states",
",",
"total_key_depth",
",",
"num_transforms",
",",
"name",
"=",
"\"k_mpnn\"",
")",
"v",
"=",
"_compute_edge_transforms",
"(",
"node_states",
",",
"total_value_depth",
",",
"num_transforms",
",",
"name",
"=",
"\"v_mpnn\"",
")",
"return",
"q",
",",
"k",
",",
"v"
] |
Computes query, key and value for edge matrices.
Let B be the number of batches.
Let N be the number of nodes in the graph.
Let D be the size of the node hidden states.
Let K be the size of the attention keys/queries (total_key_depth).
Let V be the size of the attention values (total_value_depth).
Let T be the total number of transforms (num_transforms).
Computes the queries, keys, and values for attention.
* For each node N_i in the graph, a query Q_i of size K is computed. This
query is used to determine the relative weights to give to each of the
node's incoming edges.
* For each node N_j and edge type t, a key K_jt of size K is computed. When an
edge of type t goes from node N_j to any other node, K_jt is the key that is
in the attention process.
* For each node N_j and edge type t, a value V_jt of size V is computed. When
an edge of type t goes from node N_j to node N_i, Attention(Q_i, K_jt)
produces a weight w_ijt. The message sent along this edge is w_ijt * V_jt.
Args:
node_states: A Tensor with shape [B, N, D].
total_key_depth: an integer (K).
total_value_depth: an integer (V).
num_transforms: a integer specifying number of transforms (T). This is
typically the number of edge types.
Returns:
q: The attention queries for each destination node (shape [B, N, K]).
k: The attention keys for each node and edge type (shape [B, N*T, K]).
v: The attention values for each node and edge type (shape [B, N*T, V]).
|
[
"Computes",
"query",
"key",
"and",
"value",
"for",
"edge",
"matrices",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/message_passing_attention.py#L304-L364
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/message_passing_attention.py
|
sparse_message_pass_batched
|
def sparse_message_pass_batched(node_states,
adjacency_matrices,
num_edge_types,
hidden_size,
use_bias=True,
average_aggregation=False,
name="sparse_ggnn_batched"):
"""Identical to sparse_ggnn except that each input has a batch dimension.
B = The batch size.
N = The number of nodes in each batch.
H = The size of the hidden states.
T = The number of edge types.
Args:
node_states: Initial states of each node in the graph. Shape: [B, N, H]
adjacency_matrices: Adjacency matrices of directed edges for each edge
type and batch. Shape: [B, N, N, T] (sparse).
num_edge_types: The number of edge types. T.
hidden_size: The size of the hidden layer. H.
use_bias: Whether to use bias in the hidden layer.
average_aggregation: How to aggregate the incoming node messages. If
average_aggregation is true, the messages are averaged. If it is false,
they are summed.
name: (optional) The scope within which tf variables should be created.
Returns:
The result of one round of message-passing of shape [B, N, H].
"""
b, n = tf.shape(node_states)[0], tf.shape(node_states)[1]
# Flatten the batch dimension of the node states.
node_states = tf.reshape(node_states, [b*n, hidden_size])
# Flatten the batch dimension of the adjacency matrices.
indices = adjacency_matrices.indices
new_index2 = indices[:, 3] # The edge type dimension.
# Offset N x N adjacency matrix by the batch number in which it appears.
new_index0 = indices[:, 1] + indices[:, 0] * tf.cast(n, tf.int64)
new_index1 = indices[:, 2] + indices[:, 0] * tf.cast(n, tf.int64)
# Combine these indices as triples.
new_indices = tf.stack([new_index0, new_index1, new_index2], axis=1)
# Build the new sparse matrix.
new_shape = [tf.cast(b*n, tf.int64), tf.cast(b*n, tf.int64), num_edge_types]
adjacency_matrices = tf.SparseTensor(indices=new_indices,
values=adjacency_matrices.values,
dense_shape=new_shape)
# Run a message-passing step and return the result with the batch dimension.
node_states = sparse_message_pass(
node_states,
adjacency_matrices,
num_edge_types,
hidden_size,
use_bias=use_bias,
average_aggregation=average_aggregation,
name=name)
return tf.reshape(node_states, [b, n, hidden_size])
|
python
|
def sparse_message_pass_batched(node_states,
adjacency_matrices,
num_edge_types,
hidden_size,
use_bias=True,
average_aggregation=False,
name="sparse_ggnn_batched"):
"""Identical to sparse_ggnn except that each input has a batch dimension.
B = The batch size.
N = The number of nodes in each batch.
H = The size of the hidden states.
T = The number of edge types.
Args:
node_states: Initial states of each node in the graph. Shape: [B, N, H]
adjacency_matrices: Adjacency matrices of directed edges for each edge
type and batch. Shape: [B, N, N, T] (sparse).
num_edge_types: The number of edge types. T.
hidden_size: The size of the hidden layer. H.
use_bias: Whether to use bias in the hidden layer.
average_aggregation: How to aggregate the incoming node messages. If
average_aggregation is true, the messages are averaged. If it is false,
they are summed.
name: (optional) The scope within which tf variables should be created.
Returns:
The result of one round of message-passing of shape [B, N, H].
"""
b, n = tf.shape(node_states)[0], tf.shape(node_states)[1]
# Flatten the batch dimension of the node states.
node_states = tf.reshape(node_states, [b*n, hidden_size])
# Flatten the batch dimension of the adjacency matrices.
indices = adjacency_matrices.indices
new_index2 = indices[:, 3] # The edge type dimension.
# Offset N x N adjacency matrix by the batch number in which it appears.
new_index0 = indices[:, 1] + indices[:, 0] * tf.cast(n, tf.int64)
new_index1 = indices[:, 2] + indices[:, 0] * tf.cast(n, tf.int64)
# Combine these indices as triples.
new_indices = tf.stack([new_index0, new_index1, new_index2], axis=1)
# Build the new sparse matrix.
new_shape = [tf.cast(b*n, tf.int64), tf.cast(b*n, tf.int64), num_edge_types]
adjacency_matrices = tf.SparseTensor(indices=new_indices,
values=adjacency_matrices.values,
dense_shape=new_shape)
# Run a message-passing step and return the result with the batch dimension.
node_states = sparse_message_pass(
node_states,
adjacency_matrices,
num_edge_types,
hidden_size,
use_bias=use_bias,
average_aggregation=average_aggregation,
name=name)
return tf.reshape(node_states, [b, n, hidden_size])
|
[
"def",
"sparse_message_pass_batched",
"(",
"node_states",
",",
"adjacency_matrices",
",",
"num_edge_types",
",",
"hidden_size",
",",
"use_bias",
"=",
"True",
",",
"average_aggregation",
"=",
"False",
",",
"name",
"=",
"\"sparse_ggnn_batched\"",
")",
":",
"b",
",",
"n",
"=",
"tf",
".",
"shape",
"(",
"node_states",
")",
"[",
"0",
"]",
",",
"tf",
".",
"shape",
"(",
"node_states",
")",
"[",
"1",
"]",
"# Flatten the batch dimension of the node states.",
"node_states",
"=",
"tf",
".",
"reshape",
"(",
"node_states",
",",
"[",
"b",
"*",
"n",
",",
"hidden_size",
"]",
")",
"# Flatten the batch dimension of the adjacency matrices.",
"indices",
"=",
"adjacency_matrices",
".",
"indices",
"new_index2",
"=",
"indices",
"[",
":",
",",
"3",
"]",
"# The edge type dimension.",
"# Offset N x N adjacency matrix by the batch number in which it appears.",
"new_index0",
"=",
"indices",
"[",
":",
",",
"1",
"]",
"+",
"indices",
"[",
":",
",",
"0",
"]",
"*",
"tf",
".",
"cast",
"(",
"n",
",",
"tf",
".",
"int64",
")",
"new_index1",
"=",
"indices",
"[",
":",
",",
"2",
"]",
"+",
"indices",
"[",
":",
",",
"0",
"]",
"*",
"tf",
".",
"cast",
"(",
"n",
",",
"tf",
".",
"int64",
")",
"# Combine these indices as triples.",
"new_indices",
"=",
"tf",
".",
"stack",
"(",
"[",
"new_index0",
",",
"new_index1",
",",
"new_index2",
"]",
",",
"axis",
"=",
"1",
")",
"# Build the new sparse matrix.",
"new_shape",
"=",
"[",
"tf",
".",
"cast",
"(",
"b",
"*",
"n",
",",
"tf",
".",
"int64",
")",
",",
"tf",
".",
"cast",
"(",
"b",
"*",
"n",
",",
"tf",
".",
"int64",
")",
",",
"num_edge_types",
"]",
"adjacency_matrices",
"=",
"tf",
".",
"SparseTensor",
"(",
"indices",
"=",
"new_indices",
",",
"values",
"=",
"adjacency_matrices",
".",
"values",
",",
"dense_shape",
"=",
"new_shape",
")",
"# Run a message-passing step and return the result with the batch dimension.",
"node_states",
"=",
"sparse_message_pass",
"(",
"node_states",
",",
"adjacency_matrices",
",",
"num_edge_types",
",",
"hidden_size",
",",
"use_bias",
"=",
"use_bias",
",",
"average_aggregation",
"=",
"average_aggregation",
",",
"name",
"=",
"name",
")",
"return",
"tf",
".",
"reshape",
"(",
"node_states",
",",
"[",
"b",
",",
"n",
",",
"hidden_size",
"]",
")"
] |
Identical to sparse_ggnn except that each input has a batch dimension.
B = The batch size.
N = The number of nodes in each batch.
H = The size of the hidden states.
T = The number of edge types.
Args:
node_states: Initial states of each node in the graph. Shape: [B, N, H]
adjacency_matrices: Adjacency matrices of directed edges for each edge
type and batch. Shape: [B, N, N, T] (sparse).
num_edge_types: The number of edge types. T.
hidden_size: The size of the hidden layer. H.
use_bias: Whether to use bias in the hidden layer.
average_aggregation: How to aggregate the incoming node messages. If
average_aggregation is true, the messages are averaged. If it is false,
they are summed.
name: (optional) The scope within which tf variables should be created.
Returns:
The result of one round of message-passing of shape [B, N, H].
|
[
"Identical",
"to",
"sparse_ggnn",
"except",
"that",
"each",
"input",
"has",
"a",
"batch",
"dimension",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/message_passing_attention.py#L367-L428
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/message_passing_attention.py
|
sparse_message_pass
|
def sparse_message_pass(node_states,
adjacency_matrices,
num_edge_types,
hidden_size,
use_bias=True,
average_aggregation=False,
name="sparse_ggnn"):
"""One message-passing step for a GNN with a sparse adjacency matrix.
Implements equation 2 (the message passing step) in
[Li et al. 2015](https://arxiv.org/abs/1511.05493).
N = The number of nodes in each batch.
H = The size of the hidden states.
T = The number of edge types.
Args:
node_states: Initial states of each node in the graph. Shape is [N, H].
adjacency_matrices: Adjacency matrix of directed edges for each edge
type. Shape is [N, N, T] (sparse tensor).
num_edge_types: The number of edge types. T.
hidden_size: The size of the hidden state. H.
use_bias: Whether to use bias in the hidden layer.
average_aggregation: How to aggregate the incoming node messages. If
average_aggregation is true, the messages are averaged. If it is false,
they are summed.
name: (optional) The scope within which tf variables should be created.
Returns:
The result of one step of Gated Graph Neural Network (GGNN) message passing.
Shape: [N, H]
"""
n = tf.shape(node_states)[0]
t = num_edge_types
incoming_edges_per_type = tf.sparse_reduce_sum(adjacency_matrices, axis=1)
# Convert the adjacency matrix into shape [T, N, N] - one [N, N] adjacency
# matrix for each edge type. Since sparse tensor multiplication only supports
# two-dimensional tensors, we actually convert the adjacency matrix into a
# [T * N, N] tensor.
adjacency_matrices = tf.sparse_transpose(adjacency_matrices, [2, 0, 1])
adjacency_matrices = tf.sparse_reshape(adjacency_matrices, [t * n, n])
# Multiply the adjacency matrix by the node states, producing a [T * N, H]
# tensor. For each (edge type, node) pair, this tensor stores the sum of
# the hidden states of the node's neighbors over incoming edges of that type.
messages = tf.sparse_tensor_dense_matmul(adjacency_matrices, node_states)
# Rearrange this tensor to have shape [N, T * H]. The incoming states of each
# nodes neighbors are summed by edge type and then concatenated together into
# a single T * H vector.
messages = tf.reshape(messages, [t, n, hidden_size])
messages = tf.transpose(messages, [1, 0, 2])
messages = tf.reshape(messages, [n, t * hidden_size])
# Run each of those T * H vectors through a linear layer that produces
# a vector of size H. This process is equivalent to running each H-sized
# vector through a separate linear layer for each edge type and then adding
# the results together.
#
# Note that, earlier on, we added together all of the states of neighbors
# that were connected by edges of the same edge type. Since addition and
# multiplying by a linear layer are commutative, this process was equivalent
# to running each incoming edge through a linear layer separately and then
# adding everything at the end.
with tf.variable_scope(name, default_name="sparse_ggnn"):
final_node_states = common_layers.dense(
messages, hidden_size, use_bias=False)
# Multiply the bias by for each edge type by the number of incoming nodes
# of that edge type.
if use_bias:
bias = tf.get_variable("bias", initializer=tf.zeros([t, hidden_size]))
final_node_states += tf.matmul(incoming_edges_per_type, bias)
if average_aggregation:
incoming_edges = tf.reduce_sum(incoming_edges_per_type, -1, keepdims=True)
incoming_edges = tf.tile(incoming_edges, [1, hidden_size])
final_node_states /= incoming_edges + 1e-7
return tf.reshape(final_node_states, [n, hidden_size])
|
python
|
def sparse_message_pass(node_states,
adjacency_matrices,
num_edge_types,
hidden_size,
use_bias=True,
average_aggregation=False,
name="sparse_ggnn"):
"""One message-passing step for a GNN with a sparse adjacency matrix.
Implements equation 2 (the message passing step) in
[Li et al. 2015](https://arxiv.org/abs/1511.05493).
N = The number of nodes in each batch.
H = The size of the hidden states.
T = The number of edge types.
Args:
node_states: Initial states of each node in the graph. Shape is [N, H].
adjacency_matrices: Adjacency matrix of directed edges for each edge
type. Shape is [N, N, T] (sparse tensor).
num_edge_types: The number of edge types. T.
hidden_size: The size of the hidden state. H.
use_bias: Whether to use bias in the hidden layer.
average_aggregation: How to aggregate the incoming node messages. If
average_aggregation is true, the messages are averaged. If it is false,
they are summed.
name: (optional) The scope within which tf variables should be created.
Returns:
The result of one step of Gated Graph Neural Network (GGNN) message passing.
Shape: [N, H]
"""
n = tf.shape(node_states)[0]
t = num_edge_types
incoming_edges_per_type = tf.sparse_reduce_sum(adjacency_matrices, axis=1)
# Convert the adjacency matrix into shape [T, N, N] - one [N, N] adjacency
# matrix for each edge type. Since sparse tensor multiplication only supports
# two-dimensional tensors, we actually convert the adjacency matrix into a
# [T * N, N] tensor.
adjacency_matrices = tf.sparse_transpose(adjacency_matrices, [2, 0, 1])
adjacency_matrices = tf.sparse_reshape(adjacency_matrices, [t * n, n])
# Multiply the adjacency matrix by the node states, producing a [T * N, H]
# tensor. For each (edge type, node) pair, this tensor stores the sum of
# the hidden states of the node's neighbors over incoming edges of that type.
messages = tf.sparse_tensor_dense_matmul(adjacency_matrices, node_states)
# Rearrange this tensor to have shape [N, T * H]. The incoming states of each
# nodes neighbors are summed by edge type and then concatenated together into
# a single T * H vector.
messages = tf.reshape(messages, [t, n, hidden_size])
messages = tf.transpose(messages, [1, 0, 2])
messages = tf.reshape(messages, [n, t * hidden_size])
# Run each of those T * H vectors through a linear layer that produces
# a vector of size H. This process is equivalent to running each H-sized
# vector through a separate linear layer for each edge type and then adding
# the results together.
#
# Note that, earlier on, we added together all of the states of neighbors
# that were connected by edges of the same edge type. Since addition and
# multiplying by a linear layer are commutative, this process was equivalent
# to running each incoming edge through a linear layer separately and then
# adding everything at the end.
with tf.variable_scope(name, default_name="sparse_ggnn"):
final_node_states = common_layers.dense(
messages, hidden_size, use_bias=False)
# Multiply the bias by for each edge type by the number of incoming nodes
# of that edge type.
if use_bias:
bias = tf.get_variable("bias", initializer=tf.zeros([t, hidden_size]))
final_node_states += tf.matmul(incoming_edges_per_type, bias)
if average_aggregation:
incoming_edges = tf.reduce_sum(incoming_edges_per_type, -1, keepdims=True)
incoming_edges = tf.tile(incoming_edges, [1, hidden_size])
final_node_states /= incoming_edges + 1e-7
return tf.reshape(final_node_states, [n, hidden_size])
|
[
"def",
"sparse_message_pass",
"(",
"node_states",
",",
"adjacency_matrices",
",",
"num_edge_types",
",",
"hidden_size",
",",
"use_bias",
"=",
"True",
",",
"average_aggregation",
"=",
"False",
",",
"name",
"=",
"\"sparse_ggnn\"",
")",
":",
"n",
"=",
"tf",
".",
"shape",
"(",
"node_states",
")",
"[",
"0",
"]",
"t",
"=",
"num_edge_types",
"incoming_edges_per_type",
"=",
"tf",
".",
"sparse_reduce_sum",
"(",
"adjacency_matrices",
",",
"axis",
"=",
"1",
")",
"# Convert the adjacency matrix into shape [T, N, N] - one [N, N] adjacency",
"# matrix for each edge type. Since sparse tensor multiplication only supports",
"# two-dimensional tensors, we actually convert the adjacency matrix into a",
"# [T * N, N] tensor.",
"adjacency_matrices",
"=",
"tf",
".",
"sparse_transpose",
"(",
"adjacency_matrices",
",",
"[",
"2",
",",
"0",
",",
"1",
"]",
")",
"adjacency_matrices",
"=",
"tf",
".",
"sparse_reshape",
"(",
"adjacency_matrices",
",",
"[",
"t",
"*",
"n",
",",
"n",
"]",
")",
"# Multiply the adjacency matrix by the node states, producing a [T * N, H]",
"# tensor. For each (edge type, node) pair, this tensor stores the sum of",
"# the hidden states of the node's neighbors over incoming edges of that type.",
"messages",
"=",
"tf",
".",
"sparse_tensor_dense_matmul",
"(",
"adjacency_matrices",
",",
"node_states",
")",
"# Rearrange this tensor to have shape [N, T * H]. The incoming states of each",
"# nodes neighbors are summed by edge type and then concatenated together into",
"# a single T * H vector.",
"messages",
"=",
"tf",
".",
"reshape",
"(",
"messages",
",",
"[",
"t",
",",
"n",
",",
"hidden_size",
"]",
")",
"messages",
"=",
"tf",
".",
"transpose",
"(",
"messages",
",",
"[",
"1",
",",
"0",
",",
"2",
"]",
")",
"messages",
"=",
"tf",
".",
"reshape",
"(",
"messages",
",",
"[",
"n",
",",
"t",
"*",
"hidden_size",
"]",
")",
"# Run each of those T * H vectors through a linear layer that produces",
"# a vector of size H. This process is equivalent to running each H-sized",
"# vector through a separate linear layer for each edge type and then adding",
"# the results together.",
"#",
"# Note that, earlier on, we added together all of the states of neighbors",
"# that were connected by edges of the same edge type. Since addition and",
"# multiplying by a linear layer are commutative, this process was equivalent",
"# to running each incoming edge through a linear layer separately and then",
"# adding everything at the end.",
"with",
"tf",
".",
"variable_scope",
"(",
"name",
",",
"default_name",
"=",
"\"sparse_ggnn\"",
")",
":",
"final_node_states",
"=",
"common_layers",
".",
"dense",
"(",
"messages",
",",
"hidden_size",
",",
"use_bias",
"=",
"False",
")",
"# Multiply the bias by for each edge type by the number of incoming nodes",
"# of that edge type.",
"if",
"use_bias",
":",
"bias",
"=",
"tf",
".",
"get_variable",
"(",
"\"bias\"",
",",
"initializer",
"=",
"tf",
".",
"zeros",
"(",
"[",
"t",
",",
"hidden_size",
"]",
")",
")",
"final_node_states",
"+=",
"tf",
".",
"matmul",
"(",
"incoming_edges_per_type",
",",
"bias",
")",
"if",
"average_aggregation",
":",
"incoming_edges",
"=",
"tf",
".",
"reduce_sum",
"(",
"incoming_edges_per_type",
",",
"-",
"1",
",",
"keepdims",
"=",
"True",
")",
"incoming_edges",
"=",
"tf",
".",
"tile",
"(",
"incoming_edges",
",",
"[",
"1",
",",
"hidden_size",
"]",
")",
"final_node_states",
"/=",
"incoming_edges",
"+",
"1e-7",
"return",
"tf",
".",
"reshape",
"(",
"final_node_states",
",",
"[",
"n",
",",
"hidden_size",
"]",
")"
] |
One message-passing step for a GNN with a sparse adjacency matrix.
Implements equation 2 (the message passing step) in
[Li et al. 2015](https://arxiv.org/abs/1511.05493).
N = The number of nodes in each batch.
H = The size of the hidden states.
T = The number of edge types.
Args:
node_states: Initial states of each node in the graph. Shape is [N, H].
adjacency_matrices: Adjacency matrix of directed edges for each edge
type. Shape is [N, N, T] (sparse tensor).
num_edge_types: The number of edge types. T.
hidden_size: The size of the hidden state. H.
use_bias: Whether to use bias in the hidden layer.
average_aggregation: How to aggregate the incoming node messages. If
average_aggregation is true, the messages are averaged. If it is false,
they are summed.
name: (optional) The scope within which tf variables should be created.
Returns:
The result of one step of Gated Graph Neural Network (GGNN) message passing.
Shape: [N, H]
|
[
"One",
"message",
"-",
"passing",
"step",
"for",
"a",
"GNN",
"with",
"a",
"sparse",
"adjacency",
"matrix",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/message_passing_attention.py#L431-L511
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/message_passing_attention.py
|
multihead_mpnn_attention
|
def multihead_mpnn_attention(node_states,
total_key_depth,
total_value_depth,
output_depth,
num_heads,
adjacency_matrix=None,
num_edge_types=5,
num_transforms=None,
use_weighted_sum=False,
name="mpnn_attention"):
"""Multihead scaled-dot-product attention with input/output transformations.
Let B be the number of batches.
Let N be the number of nodes in the graph.
Let D be the size of the node hidden states.
Let K be the size of the attention keys/queries (total_key_depth).
Let V be the size of the attention values (total_value_depth).
Let O be the size of the attention output (output_depth).
Let H be the number of heads (num_heads).
Let T be the total number of transforms (num_transforms).
The key and value depths are split across all of the heads. For example, if
the key depth is 6 and there are three heads, then the key for each head has
depth 2.
Args:
node_states: A Tensor with shape [B, N, D]
total_key_depth: An integer (K).
total_value_depth: An integer (V).
output_depth: An integer (O).
num_heads: An integer (H).
adjacency_matrix: An Tensor of ints with shape [B, T, N, N]. If there is an
edge from node j to node i in batch b, then adjacency_matrix[b, i, j]
contains the type of that edge as an integer. Otherwise, it contains 0.
num_edge_types: An integer indicating number of edge types.
num_transforms: An integer indicating number of transforms (T). If None,
then num_transforms will be equal to num_edge_types.
use_weighted_sum: If False, will only use a single transform per edge type.
Otherwise, use a learned weighted sum of transforms per edge type.
name: A string.
Returns:
The result of the attention transformation. The output shape is [B, N, O].
Raises:
ValueError: if the key depth or value depth are not divisible by the
number of attention heads.
"""
if total_key_depth % num_heads != 0:
raise ValueError("Key depth (%d) must be divisible by the number of "
"attention heads (%d)." % (total_key_depth, num_heads))
if total_value_depth % num_heads != 0:
raise ValueError("Value depth (%d) must be divisible by the number of "
"attention heads (%d)." % (total_value_depth, num_heads))
with tf.variable_scope(
name, default_name="multihead_mpnn_attention", values=[node_states]):
# If not explicitly set, use num_transforms set to num_edge_types.
num_transforms = (
num_edge_types if num_transforms is None else num_transforms)
# Create the query for each node's incoming edges.
# Create the keys/values for each node for each possible outgoing edge type.
q, k, v = compute_mpnn_qkv(
node_states,
total_key_depth,
total_value_depth,
num_transforms)
q_shape = tf.shape(q) # As above, q_shape is [B, N, K].
# Divides each query/key/value into separate heads. Specifically, the
# query/key/value for each (batch, node) pair (i.e., the third dimensions
# of q, k, and v) are broken into H separate pieces. These pieces are used
# as the separate attention heads. The resulting tensors have shape
# [B, H, N, ?/H], where ? = K, K*T or V*T as appropriate.
q = common_attention.split_heads(q, num_heads) # Shape [B, H, N, K/H].
k = common_attention.split_heads(k, num_heads) # Shape [B, H, N, K*T/H].
v = common_attention.split_heads(v, num_heads) # Shape [B, H, N, V*T/H].
key_depth_per_head = total_key_depth // num_heads
# Ensures that the logits don't have too large of a magnitude.
q *= key_depth_per_head**-0.5
# Rearrange the dimensions so that the head is first. This will make
# subsequent steps easier (we loop over the head).
q = tf.transpose(q, [1, 0, 2, 3]) # Shape [H, B, N, K/H].
k = tf.transpose(k, [1, 0, 2, 3]) # Shape [H, B, N, K*T/H].
v = tf.transpose(v, [1, 0, 2, 3]) # Shape [H, B, N, V*T/H].
# Split the keys and values into separate per-edge-type keys and values.
k = tf.reshape(k, [
num_heads, q_shape[0], q_shape[1], num_transforms,
total_key_depth // num_heads
]) # Shape [H, B, N, T, K/H].
k = tf.transpose(k, [0, 1, 3, 2, 4]) # Shape [H, B, T, N, K/H].
v = tf.reshape(v, [
num_heads, q_shape[0], q_shape[1], num_transforms,
total_value_depth // num_heads
]) # Shape [H, B, N, T, V/H].
v = tf.transpose(v, [0, 1, 3, 2, 4]) # Shape [H, B, T, N, V/H].
# Perform attention for each head and combine the results into a list.
# head_outputs stores a list of tensors, each with shape [1, B, N, V/H].
# The last dimension contains the values computed for each attention head.
# Each value was determined by computing attention over all of the
# incoming edges for node n, weighting the incoming values accordingly,
# and adding those weighted values together.
head_outputs = []
for head_id in range(num_heads):
output = dot_product_mpnn_attention(
q[head_id],
k[head_id],
v[head_id],
adjacency_matrix,
num_edge_types,
num_transforms=num_transforms,
use_weighted_sum=use_weighted_sum)
# Store this result in the list of attention results for each head.
# The call to expand_dims gives output shape [1, B, N, V/H], which will
# come in handy when we combine the heads together.
head_outputs.append(tf.expand_dims(output, axis=0))
# Combine the heads together into one tensor and rearrange the dimensions.
x = tf.concat(head_outputs, axis=0) # Shape [H, B, N, V/H].
x = tf.transpose(x, [1, 0, 2, 3]) # Shape [B, H, N, V/H].
# Concatenate the values produced by each head together into one vector.
x = common_attention.combine_heads(x) # Shape [B, N, V].
# A fully-connected linear layer to convert from the value vectors of size V
# to output vectors of length O (the appropriate output length).
x = common_layers.dense(
x, output_depth, use_bias=False, name="output_transform")
return x
|
python
|
def multihead_mpnn_attention(node_states,
total_key_depth,
total_value_depth,
output_depth,
num_heads,
adjacency_matrix=None,
num_edge_types=5,
num_transforms=None,
use_weighted_sum=False,
name="mpnn_attention"):
"""Multihead scaled-dot-product attention with input/output transformations.
Let B be the number of batches.
Let N be the number of nodes in the graph.
Let D be the size of the node hidden states.
Let K be the size of the attention keys/queries (total_key_depth).
Let V be the size of the attention values (total_value_depth).
Let O be the size of the attention output (output_depth).
Let H be the number of heads (num_heads).
Let T be the total number of transforms (num_transforms).
The key and value depths are split across all of the heads. For example, if
the key depth is 6 and there are three heads, then the key for each head has
depth 2.
Args:
node_states: A Tensor with shape [B, N, D]
total_key_depth: An integer (K).
total_value_depth: An integer (V).
output_depth: An integer (O).
num_heads: An integer (H).
adjacency_matrix: An Tensor of ints with shape [B, T, N, N]. If there is an
edge from node j to node i in batch b, then adjacency_matrix[b, i, j]
contains the type of that edge as an integer. Otherwise, it contains 0.
num_edge_types: An integer indicating number of edge types.
num_transforms: An integer indicating number of transforms (T). If None,
then num_transforms will be equal to num_edge_types.
use_weighted_sum: If False, will only use a single transform per edge type.
Otherwise, use a learned weighted sum of transforms per edge type.
name: A string.
Returns:
The result of the attention transformation. The output shape is [B, N, O].
Raises:
ValueError: if the key depth or value depth are not divisible by the
number of attention heads.
"""
if total_key_depth % num_heads != 0:
raise ValueError("Key depth (%d) must be divisible by the number of "
"attention heads (%d)." % (total_key_depth, num_heads))
if total_value_depth % num_heads != 0:
raise ValueError("Value depth (%d) must be divisible by the number of "
"attention heads (%d)." % (total_value_depth, num_heads))
with tf.variable_scope(
name, default_name="multihead_mpnn_attention", values=[node_states]):
# If not explicitly set, use num_transforms set to num_edge_types.
num_transforms = (
num_edge_types if num_transforms is None else num_transforms)
# Create the query for each node's incoming edges.
# Create the keys/values for each node for each possible outgoing edge type.
q, k, v = compute_mpnn_qkv(
node_states,
total_key_depth,
total_value_depth,
num_transforms)
q_shape = tf.shape(q) # As above, q_shape is [B, N, K].
# Divides each query/key/value into separate heads. Specifically, the
# query/key/value for each (batch, node) pair (i.e., the third dimensions
# of q, k, and v) are broken into H separate pieces. These pieces are used
# as the separate attention heads. The resulting tensors have shape
# [B, H, N, ?/H], where ? = K, K*T or V*T as appropriate.
q = common_attention.split_heads(q, num_heads) # Shape [B, H, N, K/H].
k = common_attention.split_heads(k, num_heads) # Shape [B, H, N, K*T/H].
v = common_attention.split_heads(v, num_heads) # Shape [B, H, N, V*T/H].
key_depth_per_head = total_key_depth // num_heads
# Ensures that the logits don't have too large of a magnitude.
q *= key_depth_per_head**-0.5
# Rearrange the dimensions so that the head is first. This will make
# subsequent steps easier (we loop over the head).
q = tf.transpose(q, [1, 0, 2, 3]) # Shape [H, B, N, K/H].
k = tf.transpose(k, [1, 0, 2, 3]) # Shape [H, B, N, K*T/H].
v = tf.transpose(v, [1, 0, 2, 3]) # Shape [H, B, N, V*T/H].
# Split the keys and values into separate per-edge-type keys and values.
k = tf.reshape(k, [
num_heads, q_shape[0], q_shape[1], num_transforms,
total_key_depth // num_heads
]) # Shape [H, B, N, T, K/H].
k = tf.transpose(k, [0, 1, 3, 2, 4]) # Shape [H, B, T, N, K/H].
v = tf.reshape(v, [
num_heads, q_shape[0], q_shape[1], num_transforms,
total_value_depth // num_heads
]) # Shape [H, B, N, T, V/H].
v = tf.transpose(v, [0, 1, 3, 2, 4]) # Shape [H, B, T, N, V/H].
# Perform attention for each head and combine the results into a list.
# head_outputs stores a list of tensors, each with shape [1, B, N, V/H].
# The last dimension contains the values computed for each attention head.
# Each value was determined by computing attention over all of the
# incoming edges for node n, weighting the incoming values accordingly,
# and adding those weighted values together.
head_outputs = []
for head_id in range(num_heads):
output = dot_product_mpnn_attention(
q[head_id],
k[head_id],
v[head_id],
adjacency_matrix,
num_edge_types,
num_transforms=num_transforms,
use_weighted_sum=use_weighted_sum)
# Store this result in the list of attention results for each head.
# The call to expand_dims gives output shape [1, B, N, V/H], which will
# come in handy when we combine the heads together.
head_outputs.append(tf.expand_dims(output, axis=0))
# Combine the heads together into one tensor and rearrange the dimensions.
x = tf.concat(head_outputs, axis=0) # Shape [H, B, N, V/H].
x = tf.transpose(x, [1, 0, 2, 3]) # Shape [B, H, N, V/H].
# Concatenate the values produced by each head together into one vector.
x = common_attention.combine_heads(x) # Shape [B, N, V].
# A fully-connected linear layer to convert from the value vectors of size V
# to output vectors of length O (the appropriate output length).
x = common_layers.dense(
x, output_depth, use_bias=False, name="output_transform")
return x
|
[
"def",
"multihead_mpnn_attention",
"(",
"node_states",
",",
"total_key_depth",
",",
"total_value_depth",
",",
"output_depth",
",",
"num_heads",
",",
"adjacency_matrix",
"=",
"None",
",",
"num_edge_types",
"=",
"5",
",",
"num_transforms",
"=",
"None",
",",
"use_weighted_sum",
"=",
"False",
",",
"name",
"=",
"\"mpnn_attention\"",
")",
":",
"if",
"total_key_depth",
"%",
"num_heads",
"!=",
"0",
":",
"raise",
"ValueError",
"(",
"\"Key depth (%d) must be divisible by the number of \"",
"\"attention heads (%d).\"",
"%",
"(",
"total_key_depth",
",",
"num_heads",
")",
")",
"if",
"total_value_depth",
"%",
"num_heads",
"!=",
"0",
":",
"raise",
"ValueError",
"(",
"\"Value depth (%d) must be divisible by the number of \"",
"\"attention heads (%d).\"",
"%",
"(",
"total_value_depth",
",",
"num_heads",
")",
")",
"with",
"tf",
".",
"variable_scope",
"(",
"name",
",",
"default_name",
"=",
"\"multihead_mpnn_attention\"",
",",
"values",
"=",
"[",
"node_states",
"]",
")",
":",
"# If not explicitly set, use num_transforms set to num_edge_types.",
"num_transforms",
"=",
"(",
"num_edge_types",
"if",
"num_transforms",
"is",
"None",
"else",
"num_transforms",
")",
"# Create the query for each node's incoming edges.",
"# Create the keys/values for each node for each possible outgoing edge type.",
"q",
",",
"k",
",",
"v",
"=",
"compute_mpnn_qkv",
"(",
"node_states",
",",
"total_key_depth",
",",
"total_value_depth",
",",
"num_transforms",
")",
"q_shape",
"=",
"tf",
".",
"shape",
"(",
"q",
")",
"# As above, q_shape is [B, N, K].",
"# Divides each query/key/value into separate heads. Specifically, the",
"# query/key/value for each (batch, node) pair (i.e., the third dimensions",
"# of q, k, and v) are broken into H separate pieces. These pieces are used",
"# as the separate attention heads. The resulting tensors have shape",
"# [B, H, N, ?/H], where ? = K, K*T or V*T as appropriate.",
"q",
"=",
"common_attention",
".",
"split_heads",
"(",
"q",
",",
"num_heads",
")",
"# Shape [B, H, N, K/H].",
"k",
"=",
"common_attention",
".",
"split_heads",
"(",
"k",
",",
"num_heads",
")",
"# Shape [B, H, N, K*T/H].",
"v",
"=",
"common_attention",
".",
"split_heads",
"(",
"v",
",",
"num_heads",
")",
"# Shape [B, H, N, V*T/H].",
"key_depth_per_head",
"=",
"total_key_depth",
"//",
"num_heads",
"# Ensures that the logits don't have too large of a magnitude.",
"q",
"*=",
"key_depth_per_head",
"**",
"-",
"0.5",
"# Rearrange the dimensions so that the head is first. This will make",
"# subsequent steps easier (we loop over the head).",
"q",
"=",
"tf",
".",
"transpose",
"(",
"q",
",",
"[",
"1",
",",
"0",
",",
"2",
",",
"3",
"]",
")",
"# Shape [H, B, N, K/H].",
"k",
"=",
"tf",
".",
"transpose",
"(",
"k",
",",
"[",
"1",
",",
"0",
",",
"2",
",",
"3",
"]",
")",
"# Shape [H, B, N, K*T/H].",
"v",
"=",
"tf",
".",
"transpose",
"(",
"v",
",",
"[",
"1",
",",
"0",
",",
"2",
",",
"3",
"]",
")",
"# Shape [H, B, N, V*T/H].",
"# Split the keys and values into separate per-edge-type keys and values.",
"k",
"=",
"tf",
".",
"reshape",
"(",
"k",
",",
"[",
"num_heads",
",",
"q_shape",
"[",
"0",
"]",
",",
"q_shape",
"[",
"1",
"]",
",",
"num_transforms",
",",
"total_key_depth",
"//",
"num_heads",
"]",
")",
"# Shape [H, B, N, T, K/H].",
"k",
"=",
"tf",
".",
"transpose",
"(",
"k",
",",
"[",
"0",
",",
"1",
",",
"3",
",",
"2",
",",
"4",
"]",
")",
"# Shape [H, B, T, N, K/H].",
"v",
"=",
"tf",
".",
"reshape",
"(",
"v",
",",
"[",
"num_heads",
",",
"q_shape",
"[",
"0",
"]",
",",
"q_shape",
"[",
"1",
"]",
",",
"num_transforms",
",",
"total_value_depth",
"//",
"num_heads",
"]",
")",
"# Shape [H, B, N, T, V/H].",
"v",
"=",
"tf",
".",
"transpose",
"(",
"v",
",",
"[",
"0",
",",
"1",
",",
"3",
",",
"2",
",",
"4",
"]",
")",
"# Shape [H, B, T, N, V/H].",
"# Perform attention for each head and combine the results into a list.",
"# head_outputs stores a list of tensors, each with shape [1, B, N, V/H].",
"# The last dimension contains the values computed for each attention head.",
"# Each value was determined by computing attention over all of the",
"# incoming edges for node n, weighting the incoming values accordingly,",
"# and adding those weighted values together.",
"head_outputs",
"=",
"[",
"]",
"for",
"head_id",
"in",
"range",
"(",
"num_heads",
")",
":",
"output",
"=",
"dot_product_mpnn_attention",
"(",
"q",
"[",
"head_id",
"]",
",",
"k",
"[",
"head_id",
"]",
",",
"v",
"[",
"head_id",
"]",
",",
"adjacency_matrix",
",",
"num_edge_types",
",",
"num_transforms",
"=",
"num_transforms",
",",
"use_weighted_sum",
"=",
"use_weighted_sum",
")",
"# Store this result in the list of attention results for each head.",
"# The call to expand_dims gives output shape [1, B, N, V/H], which will",
"# come in handy when we combine the heads together.",
"head_outputs",
".",
"append",
"(",
"tf",
".",
"expand_dims",
"(",
"output",
",",
"axis",
"=",
"0",
")",
")",
"# Combine the heads together into one tensor and rearrange the dimensions.",
"x",
"=",
"tf",
".",
"concat",
"(",
"head_outputs",
",",
"axis",
"=",
"0",
")",
"# Shape [H, B, N, V/H].",
"x",
"=",
"tf",
".",
"transpose",
"(",
"x",
",",
"[",
"1",
",",
"0",
",",
"2",
",",
"3",
"]",
")",
"# Shape [B, H, N, V/H].",
"# Concatenate the values produced by each head together into one vector.",
"x",
"=",
"common_attention",
".",
"combine_heads",
"(",
"x",
")",
"# Shape [B, N, V].",
"# A fully-connected linear layer to convert from the value vectors of size V",
"# to output vectors of length O (the appropriate output length).",
"x",
"=",
"common_layers",
".",
"dense",
"(",
"x",
",",
"output_depth",
",",
"use_bias",
"=",
"False",
",",
"name",
"=",
"\"output_transform\"",
")",
"return",
"x"
] |
Multihead scaled-dot-product attention with input/output transformations.
Let B be the number of batches.
Let N be the number of nodes in the graph.
Let D be the size of the node hidden states.
Let K be the size of the attention keys/queries (total_key_depth).
Let V be the size of the attention values (total_value_depth).
Let O be the size of the attention output (output_depth).
Let H be the number of heads (num_heads).
Let T be the total number of transforms (num_transforms).
The key and value depths are split across all of the heads. For example, if
the key depth is 6 and there are three heads, then the key for each head has
depth 2.
Args:
node_states: A Tensor with shape [B, N, D]
total_key_depth: An integer (K).
total_value_depth: An integer (V).
output_depth: An integer (O).
num_heads: An integer (H).
adjacency_matrix: An Tensor of ints with shape [B, T, N, N]. If there is an
edge from node j to node i in batch b, then adjacency_matrix[b, i, j]
contains the type of that edge as an integer. Otherwise, it contains 0.
num_edge_types: An integer indicating number of edge types.
num_transforms: An integer indicating number of transforms (T). If None,
then num_transforms will be equal to num_edge_types.
use_weighted_sum: If False, will only use a single transform per edge type.
Otherwise, use a learned weighted sum of transforms per edge type.
name: A string.
Returns:
The result of the attention transformation. The output shape is [B, N, O].
Raises:
ValueError: if the key depth or value depth are not divisible by the
number of attention heads.
|
[
"Multihead",
"scaled",
"-",
"dot",
"-",
"product",
"attention",
"with",
"input",
"/",
"output",
"transformations",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/message_passing_attention.py#L514-L649
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/message_passing_attention.py
|
dot_product_mpnn_attention
|
def dot_product_mpnn_attention(q,
k,
v,
adjacency_matrix,
num_edge_types,
num_transforms=None,
use_weighted_sum=False,
name=None):
"""Dot product attention with edge vectors.
Let B be the number of batches.
Let N be the number of nodes in the graph.
Let K be the size of the attention keys/queries.
Let V be the size of the attention values.
Let T be the total number of transforms (num_transforms).
Args:
q: The query Tensor of shape [B, N, K].
k: The key Tensor of shape [B, T, N, K].
v: The value Tensor of shape [B, T, N, V].
adjacency_matrix: A Tensor of shape [B, N, N, T]. An entry at
indices b, i, j, k is the indicator of the edge
from node j to node i in batch b. A standard adjacency matrix will only
have one edge type while a mutigraph will have multiple edge types.
num_edge_types: An integer specifying number of edge types.
num_transforms: An integer indicating number of transforms (T). If None,
then num_transforms will be equal to num_edge_types.
use_weighted_sum: If False, will only use a single transform per edge type.
Otherwise, use a learned weighted sum of transforms per edge type.
name: A string.
Returns:
A Tensor of shape [B, N, V] storing the result of computing attention
weights using the queries and keys and combining the values according to
those weights.
Raises:
ValueError: if num_transforms doesn't equal num_edge_types and not using
weighted sum.
"""
with tf.variable_scope(
name,
default_name="dot_product_mpnn_attention",
values=[q, k, v, adjacency_matrix, num_edge_types]):
# If not explicitly set, use num_transforms set to num_edge_types.
num_transforms = (
num_edge_types if num_transforms is None else num_transforms)
if not use_weighted_sum and num_transforms != num_edge_types:
raise ValueError("num_transforms must equal num_edge_types unless "
"use_weighted_sum is True")
# Computes the raw dot-product attention values between each query and
# the corresponding keys it needs to consider.
#
# This operation takes the dot product of (the query for
# each node) and (the key for each node for each possible edge type),
# creating an N x N matrix for each edge type. The entry at index (i, j)
# is the dot-product for the edge from node i to node j of the appropriate
# type. These dot products will eventually become attention weights
# specifying how much node i weights an edge of that type coming from node
# j.
all_edge_logits = tf.matmul(
tf.tile(tf.expand_dims(q, axis=1), [1, num_edge_types, 1, 1]),
k,
transpose_b=True)
# The adjacency matrix assumes there is only one directed edge (i <- j) for
# each pair of nodes. If such an edge exists, it contains the integer
# type of that edge at position (i, j) of the adjacency matrix.
#
# Construct edge_vectors of shape [B, N, N, T].
if use_weighted_sum:
# Use dense representation for edge vectors.
edge_vectors = make_edge_vectors(
adjacency_matrix,
num_edge_types,
num_transforms)
else:
# Generate one-hot vectors based on edge types.
# If there is an edge from node j to node i of type t, then index t of the
# last dimension is 1 for entry (i, j) of the second and third dimensions.
edge_vectors = tf.one_hot(adjacency_matrix, num_transforms)
# Rearranging the dimensions to match the shape of all_edge_logits.
edge_vectors = tf.transpose(edge_vectors, [0, 3, 1, 2])
# Element-wise multiplies all_edge_logits and edge_vectors.
#
# In other words: all_edge_logits contains N x N matrices of query-key
# products. This element-wise multiplication zeroes out entries that do not
# correspond to actual edges in the graph of the appropriate edge type.
# all_edge_logits retains shape [B, T, N, N].
all_edge_logits *= edge_vectors
# Since there can only be one edge from node A to node B, we can collapse
# the T different adjacency matrices containing key-query pairs into one
# adjacency matrix. logits is [B, N, N].
# TODO(dbieber): Use a reshape instead of reduce sum to attend over all
# edges instead of over all neighboring nodes to handle the multigraph case.
logits = tf.reduce_sum(all_edge_logits, axis=1)
# For pairs of nodes with no edges between them, add a large negative bias
# to each location without an edge so that the softmax of entries with the
# value 0 become a small negative number instead.
bias = 0
bias = tf.to_float(tf.equal(
tf.reduce_sum(adjacency_matrix, axis=-1), 0)) * -1e9
logits += bias
# Turn the raw key-query products into a probability distribution (or,
# in terms of attention, weights). The softmax is computed across the
# last dimension of logits.
compatibility = tf.nn.softmax(logits) # Shape [B, N, N].
# Computes a summary showing the attention matrix as an image. Does not do
# any work toward actually performing attention.
common_attention.attention_image_summary(
tf.expand_dims(compatibility, axis=1), None)
# Repeats the attention matrix T times for each batch, producing
# a tensor with shape [B, T, N, N] where the [N, N] component is T
# repeats of the values found in compatibility.
edge_compatibility = tf.tile(
tf.expand_dims(compatibility, axis=1), [1, num_edge_types, 1, 1])
# Zeroes out the entries in edge_compatibility that do not correspond to
# actual edges.
edge_compatibility *= edge_vectors # Shape [B, T, N, N].
output = compute_values(edge_compatibility, v)
return output
|
python
|
def dot_product_mpnn_attention(q,
k,
v,
adjacency_matrix,
num_edge_types,
num_transforms=None,
use_weighted_sum=False,
name=None):
"""Dot product attention with edge vectors.
Let B be the number of batches.
Let N be the number of nodes in the graph.
Let K be the size of the attention keys/queries.
Let V be the size of the attention values.
Let T be the total number of transforms (num_transforms).
Args:
q: The query Tensor of shape [B, N, K].
k: The key Tensor of shape [B, T, N, K].
v: The value Tensor of shape [B, T, N, V].
adjacency_matrix: A Tensor of shape [B, N, N, T]. An entry at
indices b, i, j, k is the indicator of the edge
from node j to node i in batch b. A standard adjacency matrix will only
have one edge type while a mutigraph will have multiple edge types.
num_edge_types: An integer specifying number of edge types.
num_transforms: An integer indicating number of transforms (T). If None,
then num_transforms will be equal to num_edge_types.
use_weighted_sum: If False, will only use a single transform per edge type.
Otherwise, use a learned weighted sum of transforms per edge type.
name: A string.
Returns:
A Tensor of shape [B, N, V] storing the result of computing attention
weights using the queries and keys and combining the values according to
those weights.
Raises:
ValueError: if num_transforms doesn't equal num_edge_types and not using
weighted sum.
"""
with tf.variable_scope(
name,
default_name="dot_product_mpnn_attention",
values=[q, k, v, adjacency_matrix, num_edge_types]):
# If not explicitly set, use num_transforms set to num_edge_types.
num_transforms = (
num_edge_types if num_transforms is None else num_transforms)
if not use_weighted_sum and num_transforms != num_edge_types:
raise ValueError("num_transforms must equal num_edge_types unless "
"use_weighted_sum is True")
# Computes the raw dot-product attention values between each query and
# the corresponding keys it needs to consider.
#
# This operation takes the dot product of (the query for
# each node) and (the key for each node for each possible edge type),
# creating an N x N matrix for each edge type. The entry at index (i, j)
# is the dot-product for the edge from node i to node j of the appropriate
# type. These dot products will eventually become attention weights
# specifying how much node i weights an edge of that type coming from node
# j.
all_edge_logits = tf.matmul(
tf.tile(tf.expand_dims(q, axis=1), [1, num_edge_types, 1, 1]),
k,
transpose_b=True)
# The adjacency matrix assumes there is only one directed edge (i <- j) for
# each pair of nodes. If such an edge exists, it contains the integer
# type of that edge at position (i, j) of the adjacency matrix.
#
# Construct edge_vectors of shape [B, N, N, T].
if use_weighted_sum:
# Use dense representation for edge vectors.
edge_vectors = make_edge_vectors(
adjacency_matrix,
num_edge_types,
num_transforms)
else:
# Generate one-hot vectors based on edge types.
# If there is an edge from node j to node i of type t, then index t of the
# last dimension is 1 for entry (i, j) of the second and third dimensions.
edge_vectors = tf.one_hot(adjacency_matrix, num_transforms)
# Rearranging the dimensions to match the shape of all_edge_logits.
edge_vectors = tf.transpose(edge_vectors, [0, 3, 1, 2])
# Element-wise multiplies all_edge_logits and edge_vectors.
#
# In other words: all_edge_logits contains N x N matrices of query-key
# products. This element-wise multiplication zeroes out entries that do not
# correspond to actual edges in the graph of the appropriate edge type.
# all_edge_logits retains shape [B, T, N, N].
all_edge_logits *= edge_vectors
# Since there can only be one edge from node A to node B, we can collapse
# the T different adjacency matrices containing key-query pairs into one
# adjacency matrix. logits is [B, N, N].
# TODO(dbieber): Use a reshape instead of reduce sum to attend over all
# edges instead of over all neighboring nodes to handle the multigraph case.
logits = tf.reduce_sum(all_edge_logits, axis=1)
# For pairs of nodes with no edges between them, add a large negative bias
# to each location without an edge so that the softmax of entries with the
# value 0 become a small negative number instead.
bias = 0
bias = tf.to_float(tf.equal(
tf.reduce_sum(adjacency_matrix, axis=-1), 0)) * -1e9
logits += bias
# Turn the raw key-query products into a probability distribution (or,
# in terms of attention, weights). The softmax is computed across the
# last dimension of logits.
compatibility = tf.nn.softmax(logits) # Shape [B, N, N].
# Computes a summary showing the attention matrix as an image. Does not do
# any work toward actually performing attention.
common_attention.attention_image_summary(
tf.expand_dims(compatibility, axis=1), None)
# Repeats the attention matrix T times for each batch, producing
# a tensor with shape [B, T, N, N] where the [N, N] component is T
# repeats of the values found in compatibility.
edge_compatibility = tf.tile(
tf.expand_dims(compatibility, axis=1), [1, num_edge_types, 1, 1])
# Zeroes out the entries in edge_compatibility that do not correspond to
# actual edges.
edge_compatibility *= edge_vectors # Shape [B, T, N, N].
output = compute_values(edge_compatibility, v)
return output
|
[
"def",
"dot_product_mpnn_attention",
"(",
"q",
",",
"k",
",",
"v",
",",
"adjacency_matrix",
",",
"num_edge_types",
",",
"num_transforms",
"=",
"None",
",",
"use_weighted_sum",
"=",
"False",
",",
"name",
"=",
"None",
")",
":",
"with",
"tf",
".",
"variable_scope",
"(",
"name",
",",
"default_name",
"=",
"\"dot_product_mpnn_attention\"",
",",
"values",
"=",
"[",
"q",
",",
"k",
",",
"v",
",",
"adjacency_matrix",
",",
"num_edge_types",
"]",
")",
":",
"# If not explicitly set, use num_transforms set to num_edge_types.",
"num_transforms",
"=",
"(",
"num_edge_types",
"if",
"num_transforms",
"is",
"None",
"else",
"num_transforms",
")",
"if",
"not",
"use_weighted_sum",
"and",
"num_transforms",
"!=",
"num_edge_types",
":",
"raise",
"ValueError",
"(",
"\"num_transforms must equal num_edge_types unless \"",
"\"use_weighted_sum is True\"",
")",
"# Computes the raw dot-product attention values between each query and",
"# the corresponding keys it needs to consider.",
"#",
"# This operation takes the dot product of (the query for",
"# each node) and (the key for each node for each possible edge type),",
"# creating an N x N matrix for each edge type. The entry at index (i, j)",
"# is the dot-product for the edge from node i to node j of the appropriate",
"# type. These dot products will eventually become attention weights",
"# specifying how much node i weights an edge of that type coming from node",
"# j.",
"all_edge_logits",
"=",
"tf",
".",
"matmul",
"(",
"tf",
".",
"tile",
"(",
"tf",
".",
"expand_dims",
"(",
"q",
",",
"axis",
"=",
"1",
")",
",",
"[",
"1",
",",
"num_edge_types",
",",
"1",
",",
"1",
"]",
")",
",",
"k",
",",
"transpose_b",
"=",
"True",
")",
"# The adjacency matrix assumes there is only one directed edge (i <- j) for",
"# each pair of nodes. If such an edge exists, it contains the integer",
"# type of that edge at position (i, j) of the adjacency matrix.",
"#",
"# Construct edge_vectors of shape [B, N, N, T].",
"if",
"use_weighted_sum",
":",
"# Use dense representation for edge vectors.",
"edge_vectors",
"=",
"make_edge_vectors",
"(",
"adjacency_matrix",
",",
"num_edge_types",
",",
"num_transforms",
")",
"else",
":",
"# Generate one-hot vectors based on edge types.",
"# If there is an edge from node j to node i of type t, then index t of the",
"# last dimension is 1 for entry (i, j) of the second and third dimensions.",
"edge_vectors",
"=",
"tf",
".",
"one_hot",
"(",
"adjacency_matrix",
",",
"num_transforms",
")",
"# Rearranging the dimensions to match the shape of all_edge_logits.",
"edge_vectors",
"=",
"tf",
".",
"transpose",
"(",
"edge_vectors",
",",
"[",
"0",
",",
"3",
",",
"1",
",",
"2",
"]",
")",
"# Element-wise multiplies all_edge_logits and edge_vectors.",
"#",
"# In other words: all_edge_logits contains N x N matrices of query-key",
"# products. This element-wise multiplication zeroes out entries that do not",
"# correspond to actual edges in the graph of the appropriate edge type.",
"# all_edge_logits retains shape [B, T, N, N].",
"all_edge_logits",
"*=",
"edge_vectors",
"# Since there can only be one edge from node A to node B, we can collapse",
"# the T different adjacency matrices containing key-query pairs into one",
"# adjacency matrix. logits is [B, N, N].",
"# TODO(dbieber): Use a reshape instead of reduce sum to attend over all",
"# edges instead of over all neighboring nodes to handle the multigraph case.",
"logits",
"=",
"tf",
".",
"reduce_sum",
"(",
"all_edge_logits",
",",
"axis",
"=",
"1",
")",
"# For pairs of nodes with no edges between them, add a large negative bias",
"# to each location without an edge so that the softmax of entries with the",
"# value 0 become a small negative number instead.",
"bias",
"=",
"0",
"bias",
"=",
"tf",
".",
"to_float",
"(",
"tf",
".",
"equal",
"(",
"tf",
".",
"reduce_sum",
"(",
"adjacency_matrix",
",",
"axis",
"=",
"-",
"1",
")",
",",
"0",
")",
")",
"*",
"-",
"1e9",
"logits",
"+=",
"bias",
"# Turn the raw key-query products into a probability distribution (or,",
"# in terms of attention, weights). The softmax is computed across the",
"# last dimension of logits.",
"compatibility",
"=",
"tf",
".",
"nn",
".",
"softmax",
"(",
"logits",
")",
"# Shape [B, N, N].",
"# Computes a summary showing the attention matrix as an image. Does not do",
"# any work toward actually performing attention.",
"common_attention",
".",
"attention_image_summary",
"(",
"tf",
".",
"expand_dims",
"(",
"compatibility",
",",
"axis",
"=",
"1",
")",
",",
"None",
")",
"# Repeats the attention matrix T times for each batch, producing",
"# a tensor with shape [B, T, N, N] where the [N, N] component is T",
"# repeats of the values found in compatibility.",
"edge_compatibility",
"=",
"tf",
".",
"tile",
"(",
"tf",
".",
"expand_dims",
"(",
"compatibility",
",",
"axis",
"=",
"1",
")",
",",
"[",
"1",
",",
"num_edge_types",
",",
"1",
",",
"1",
"]",
")",
"# Zeroes out the entries in edge_compatibility that do not correspond to",
"# actual edges.",
"edge_compatibility",
"*=",
"edge_vectors",
"# Shape [B, T, N, N].",
"output",
"=",
"compute_values",
"(",
"edge_compatibility",
",",
"v",
")",
"return",
"output"
] |
Dot product attention with edge vectors.
Let B be the number of batches.
Let N be the number of nodes in the graph.
Let K be the size of the attention keys/queries.
Let V be the size of the attention values.
Let T be the total number of transforms (num_transforms).
Args:
q: The query Tensor of shape [B, N, K].
k: The key Tensor of shape [B, T, N, K].
v: The value Tensor of shape [B, T, N, V].
adjacency_matrix: A Tensor of shape [B, N, N, T]. An entry at
indices b, i, j, k is the indicator of the edge
from node j to node i in batch b. A standard adjacency matrix will only
have one edge type while a mutigraph will have multiple edge types.
num_edge_types: An integer specifying number of edge types.
num_transforms: An integer indicating number of transforms (T). If None,
then num_transforms will be equal to num_edge_types.
use_weighted_sum: If False, will only use a single transform per edge type.
Otherwise, use a learned weighted sum of transforms per edge type.
name: A string.
Returns:
A Tensor of shape [B, N, V] storing the result of computing attention
weights using the queries and keys and combining the values according to
those weights.
Raises:
ValueError: if num_transforms doesn't equal num_edge_types and not using
weighted sum.
|
[
"Dot",
"product",
"attention",
"with",
"edge",
"vectors",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/message_passing_attention.py#L652-L783
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/message_passing_attention.py
|
ggnn_fast_dense
|
def ggnn_fast_dense(node_states,
adjacency_matrix,
num_edge_types,
total_value_depth,
name=None):
"""ggnn version of the MPNN from Gilmer et al.
Let B be the number of batches.
Let D be the size of the node hidden states.
Let K be the size of the attention keys/queries.
Let V be the size of the output of the ggnn.
Let T be the number of transforms / edge types.
Args:
node_states: The value Tensor of shape [B, T, N, D].
adjacency_matrix: A Tensor of shape [B, N, N, T]. An entry at
indices b, i, j, k is the indicator of the edge from node j to node i in
batch b. A standard adjacency matrix will only have values of one, while a
mutigraph may have larger integer values.
num_edge_types: An integer specifying number of edge types.
total_value_depth: An integer (V)
name: A string.
Returns:
A Tensor of shape [B, N, V] storing the result of computing attention
weights using the queries and keys and combining the values according to
those weights.
Raises:
ValueError: if num_transforms doesn't equal num_edge_types and not using
weighted sum.
"""
# between the same nodes (with only one edge of each type. adjacency_matrix
# will need to be converted to shape [B, T, N, N].
with tf.variable_scope(
name,
default_name="ggnn_fast_dense",
values=[node_states, adjacency_matrix, num_edge_types]):
nodes_shape = common_layers.shape_list(node_states)
v = _compute_edge_transforms(node_states,
total_value_depth,
num_edge_types,
name="v_mpnn")
v = tf.reshape(v, [nodes_shape[0], nodes_shape[1], num_edge_types,
total_value_depth
]) # Shape [B, N, T, V].
v = tf.transpose(v, [0, 2, 1, 3]) # Shape [B, T, N, V].
# Rearranging the dimensions to match the shape of all_edge_logits.
edge_vectors = tf.transpose(adjacency_matrix, [0, 3, 1, 2])
output = compute_values(edge_vectors, v)
return output
|
python
|
def ggnn_fast_dense(node_states,
adjacency_matrix,
num_edge_types,
total_value_depth,
name=None):
"""ggnn version of the MPNN from Gilmer et al.
Let B be the number of batches.
Let D be the size of the node hidden states.
Let K be the size of the attention keys/queries.
Let V be the size of the output of the ggnn.
Let T be the number of transforms / edge types.
Args:
node_states: The value Tensor of shape [B, T, N, D].
adjacency_matrix: A Tensor of shape [B, N, N, T]. An entry at
indices b, i, j, k is the indicator of the edge from node j to node i in
batch b. A standard adjacency matrix will only have values of one, while a
mutigraph may have larger integer values.
num_edge_types: An integer specifying number of edge types.
total_value_depth: An integer (V)
name: A string.
Returns:
A Tensor of shape [B, N, V] storing the result of computing attention
weights using the queries and keys and combining the values according to
those weights.
Raises:
ValueError: if num_transforms doesn't equal num_edge_types and not using
weighted sum.
"""
# between the same nodes (with only one edge of each type. adjacency_matrix
# will need to be converted to shape [B, T, N, N].
with tf.variable_scope(
name,
default_name="ggnn_fast_dense",
values=[node_states, adjacency_matrix, num_edge_types]):
nodes_shape = common_layers.shape_list(node_states)
v = _compute_edge_transforms(node_states,
total_value_depth,
num_edge_types,
name="v_mpnn")
v = tf.reshape(v, [nodes_shape[0], nodes_shape[1], num_edge_types,
total_value_depth
]) # Shape [B, N, T, V].
v = tf.transpose(v, [0, 2, 1, 3]) # Shape [B, T, N, V].
# Rearranging the dimensions to match the shape of all_edge_logits.
edge_vectors = tf.transpose(adjacency_matrix, [0, 3, 1, 2])
output = compute_values(edge_vectors, v)
return output
|
[
"def",
"ggnn_fast_dense",
"(",
"node_states",
",",
"adjacency_matrix",
",",
"num_edge_types",
",",
"total_value_depth",
",",
"name",
"=",
"None",
")",
":",
"# between the same nodes (with only one edge of each type. adjacency_matrix",
"# will need to be converted to shape [B, T, N, N].",
"with",
"tf",
".",
"variable_scope",
"(",
"name",
",",
"default_name",
"=",
"\"ggnn_fast_dense\"",
",",
"values",
"=",
"[",
"node_states",
",",
"adjacency_matrix",
",",
"num_edge_types",
"]",
")",
":",
"nodes_shape",
"=",
"common_layers",
".",
"shape_list",
"(",
"node_states",
")",
"v",
"=",
"_compute_edge_transforms",
"(",
"node_states",
",",
"total_value_depth",
",",
"num_edge_types",
",",
"name",
"=",
"\"v_mpnn\"",
")",
"v",
"=",
"tf",
".",
"reshape",
"(",
"v",
",",
"[",
"nodes_shape",
"[",
"0",
"]",
",",
"nodes_shape",
"[",
"1",
"]",
",",
"num_edge_types",
",",
"total_value_depth",
"]",
")",
"# Shape [B, N, T, V].",
"v",
"=",
"tf",
".",
"transpose",
"(",
"v",
",",
"[",
"0",
",",
"2",
",",
"1",
",",
"3",
"]",
")",
"# Shape [B, T, N, V].",
"# Rearranging the dimensions to match the shape of all_edge_logits.",
"edge_vectors",
"=",
"tf",
".",
"transpose",
"(",
"adjacency_matrix",
",",
"[",
"0",
",",
"3",
",",
"1",
",",
"2",
"]",
")",
"output",
"=",
"compute_values",
"(",
"edge_vectors",
",",
"v",
")",
"return",
"output"
] |
ggnn version of the MPNN from Gilmer et al.
Let B be the number of batches.
Let D be the size of the node hidden states.
Let K be the size of the attention keys/queries.
Let V be the size of the output of the ggnn.
Let T be the number of transforms / edge types.
Args:
node_states: The value Tensor of shape [B, T, N, D].
adjacency_matrix: A Tensor of shape [B, N, N, T]. An entry at
indices b, i, j, k is the indicator of the edge from node j to node i in
batch b. A standard adjacency matrix will only have values of one, while a
mutigraph may have larger integer values.
num_edge_types: An integer specifying number of edge types.
total_value_depth: An integer (V)
name: A string.
Returns:
A Tensor of shape [B, N, V] storing the result of computing attention
weights using the queries and keys and combining the values according to
those weights.
Raises:
ValueError: if num_transforms doesn't equal num_edge_types and not using
weighted sum.
|
[
"ggnn",
"version",
"of",
"the",
"MPNN",
"from",
"Gilmer",
"et",
"al",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/message_passing_attention.py#L786-L837
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/message_passing_attention.py
|
compute_values
|
def compute_values(edge_compatibility, v):
"""Compute values. If edge compatibilities is just adjacency, we get ggnn.
Args:
edge_compatibility: A tensor of shape [batch, num_transforms, length, depth]
v: A tensor of shape [batch, num_transforms, length, depth]
Returns:
output: A [batch, length, depth] tensor
"""
# Computes the incoming value vectors for each node by weighting them
# according to the attention weights. These values are still segregated by
# edge type.
# Shape = [B, T, N, V].
all_edge_values = tf.matmul(tf.to_float(edge_compatibility), v)
# Combines the weighted value vectors together across edge types into a
# single N x V matrix for each batch.
output = tf.reduce_sum(all_edge_values, axis=1) # Shape [B, N, V].
return output
|
python
|
def compute_values(edge_compatibility, v):
"""Compute values. If edge compatibilities is just adjacency, we get ggnn.
Args:
edge_compatibility: A tensor of shape [batch, num_transforms, length, depth]
v: A tensor of shape [batch, num_transforms, length, depth]
Returns:
output: A [batch, length, depth] tensor
"""
# Computes the incoming value vectors for each node by weighting them
# according to the attention weights. These values are still segregated by
# edge type.
# Shape = [B, T, N, V].
all_edge_values = tf.matmul(tf.to_float(edge_compatibility), v)
# Combines the weighted value vectors together across edge types into a
# single N x V matrix for each batch.
output = tf.reduce_sum(all_edge_values, axis=1) # Shape [B, N, V].
return output
|
[
"def",
"compute_values",
"(",
"edge_compatibility",
",",
"v",
")",
":",
"# Computes the incoming value vectors for each node by weighting them",
"# according to the attention weights. These values are still segregated by",
"# edge type.",
"# Shape = [B, T, N, V].",
"all_edge_values",
"=",
"tf",
".",
"matmul",
"(",
"tf",
".",
"to_float",
"(",
"edge_compatibility",
")",
",",
"v",
")",
"# Combines the weighted value vectors together across edge types into a",
"# single N x V matrix for each batch.",
"output",
"=",
"tf",
".",
"reduce_sum",
"(",
"all_edge_values",
",",
"axis",
"=",
"1",
")",
"# Shape [B, N, V].",
"return",
"output"
] |
Compute values. If edge compatibilities is just adjacency, we get ggnn.
Args:
edge_compatibility: A tensor of shape [batch, num_transforms, length, depth]
v: A tensor of shape [batch, num_transforms, length, depth]
Returns:
output: A [batch, length, depth] tensor
|
[
"Compute",
"values",
".",
"If",
"edge",
"compatibilities",
"is",
"just",
"adjacency",
"we",
"get",
"ggnn",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/message_passing_attention.py#L840-L860
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/message_passing_attention.py
|
precompute_edge_matrices
|
def precompute_edge_matrices(adjacency, hparams):
"""Precompute the a_in and a_out tensors.
(we don't want to add to the graph everytime _fprop is called)
Args:
adjacency: placeholder of real valued vectors of shape [B, L, L, E]
hparams: HParams object
Returns:
edge_matrices: [batch, L * D, L * D] the dense matrix for message passing
viewed as a block matrix (L,L) blocks of size (D,D). Each plot is a function
of the edge vector of the adjacency matrix at that spot.
"""
batch_size, num_nodes, _, edge_dim = common_layers.shape_list(adjacency)
# build the edge_network for incoming edges
with tf.variable_scope("edge_network"):
x = tf.reshape(
adjacency, [batch_size * num_nodes * num_nodes, edge_dim],
name="adj_reshape_in")
for ip_layer in range(hparams.edge_network_layers):
name = "edge_network_layer_%d"%ip_layer
x = tf.layers.dense(common_layers.layer_preprocess(x, hparams),
hparams.edge_network_hidden_size,
activation=tf.nn.relu,
name=name)
x = tf.layers.dense(common_layers.layer_preprocess(x, hparams),
hparams.hidden_size**2,
activation=None,
name="edge_network_output")
# x = [batch * l * l, d *d]
edge_matrices_flat = tf.reshape(x, [batch_size, num_nodes,
num_nodes, hparams.hidden_size,
hparams.hidden_size])
# reshape to [batch, l * d, l *d]
edge_matrices = tf.reshape(
tf.transpose(edge_matrices_flat, [0, 1, 3, 2, 4]), [
-1, num_nodes * hparams.hidden_size,
num_nodes * hparams.hidden_size
],
name="edge_matrices")
return edge_matrices
|
python
|
def precompute_edge_matrices(adjacency, hparams):
"""Precompute the a_in and a_out tensors.
(we don't want to add to the graph everytime _fprop is called)
Args:
adjacency: placeholder of real valued vectors of shape [B, L, L, E]
hparams: HParams object
Returns:
edge_matrices: [batch, L * D, L * D] the dense matrix for message passing
viewed as a block matrix (L,L) blocks of size (D,D). Each plot is a function
of the edge vector of the adjacency matrix at that spot.
"""
batch_size, num_nodes, _, edge_dim = common_layers.shape_list(adjacency)
# build the edge_network for incoming edges
with tf.variable_scope("edge_network"):
x = tf.reshape(
adjacency, [batch_size * num_nodes * num_nodes, edge_dim],
name="adj_reshape_in")
for ip_layer in range(hparams.edge_network_layers):
name = "edge_network_layer_%d"%ip_layer
x = tf.layers.dense(common_layers.layer_preprocess(x, hparams),
hparams.edge_network_hidden_size,
activation=tf.nn.relu,
name=name)
x = tf.layers.dense(common_layers.layer_preprocess(x, hparams),
hparams.hidden_size**2,
activation=None,
name="edge_network_output")
# x = [batch * l * l, d *d]
edge_matrices_flat = tf.reshape(x, [batch_size, num_nodes,
num_nodes, hparams.hidden_size,
hparams.hidden_size])
# reshape to [batch, l * d, l *d]
edge_matrices = tf.reshape(
tf.transpose(edge_matrices_flat, [0, 1, 3, 2, 4]), [
-1, num_nodes * hparams.hidden_size,
num_nodes * hparams.hidden_size
],
name="edge_matrices")
return edge_matrices
|
[
"def",
"precompute_edge_matrices",
"(",
"adjacency",
",",
"hparams",
")",
":",
"batch_size",
",",
"num_nodes",
",",
"_",
",",
"edge_dim",
"=",
"common_layers",
".",
"shape_list",
"(",
"adjacency",
")",
"# build the edge_network for incoming edges",
"with",
"tf",
".",
"variable_scope",
"(",
"\"edge_network\"",
")",
":",
"x",
"=",
"tf",
".",
"reshape",
"(",
"adjacency",
",",
"[",
"batch_size",
"*",
"num_nodes",
"*",
"num_nodes",
",",
"edge_dim",
"]",
",",
"name",
"=",
"\"adj_reshape_in\"",
")",
"for",
"ip_layer",
"in",
"range",
"(",
"hparams",
".",
"edge_network_layers",
")",
":",
"name",
"=",
"\"edge_network_layer_%d\"",
"%",
"ip_layer",
"x",
"=",
"tf",
".",
"layers",
".",
"dense",
"(",
"common_layers",
".",
"layer_preprocess",
"(",
"x",
",",
"hparams",
")",
",",
"hparams",
".",
"edge_network_hidden_size",
",",
"activation",
"=",
"tf",
".",
"nn",
".",
"relu",
",",
"name",
"=",
"name",
")",
"x",
"=",
"tf",
".",
"layers",
".",
"dense",
"(",
"common_layers",
".",
"layer_preprocess",
"(",
"x",
",",
"hparams",
")",
",",
"hparams",
".",
"hidden_size",
"**",
"2",
",",
"activation",
"=",
"None",
",",
"name",
"=",
"\"edge_network_output\"",
")",
"# x = [batch * l * l, d *d]",
"edge_matrices_flat",
"=",
"tf",
".",
"reshape",
"(",
"x",
",",
"[",
"batch_size",
",",
"num_nodes",
",",
"num_nodes",
",",
"hparams",
".",
"hidden_size",
",",
"hparams",
".",
"hidden_size",
"]",
")",
"# reshape to [batch, l * d, l *d]",
"edge_matrices",
"=",
"tf",
".",
"reshape",
"(",
"tf",
".",
"transpose",
"(",
"edge_matrices_flat",
",",
"[",
"0",
",",
"1",
",",
"3",
",",
"2",
",",
"4",
"]",
")",
",",
"[",
"-",
"1",
",",
"num_nodes",
"*",
"hparams",
".",
"hidden_size",
",",
"num_nodes",
"*",
"hparams",
".",
"hidden_size",
"]",
",",
"name",
"=",
"\"edge_matrices\"",
")",
"return",
"edge_matrices"
] |
Precompute the a_in and a_out tensors.
(we don't want to add to the graph everytime _fprop is called)
Args:
adjacency: placeholder of real valued vectors of shape [B, L, L, E]
hparams: HParams object
Returns:
edge_matrices: [batch, L * D, L * D] the dense matrix for message passing
viewed as a block matrix (L,L) blocks of size (D,D). Each plot is a function
of the edge vector of the adjacency matrix at that spot.
|
[
"Precompute",
"the",
"a_in",
"and",
"a_out",
"tensors",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/message_passing_attention.py#L863-L907
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/layers/message_passing_attention.py
|
dense_message_pass
|
def dense_message_pass(node_states, edge_matrices):
"""Computes a_t from h_{t-1}, see bottom of page 3 in the paper.
Args:
node_states: [B, L, D] tensor (h_{t-1})
edge_matrices (tf.float32): [B, L*D, L*D]
Returns:
messages (tf.float32): [B, L, D] For each pair
of nodes in the graph a message is sent along both the incoming and
outgoing edge.
"""
batch_size, num_nodes, node_dim = common_layers.shape_list(node_states)
# Stack the nodes as a big column vector.
h_flat = tf.reshape(
node_states, [batch_size, num_nodes * node_dim, 1], name="h_flat")
messages = tf.reshape(
tf.matmul(edge_matrices, h_flat), [batch_size * num_nodes, node_dim],
name="messages_matmul")
message_bias = tf.get_variable("message_bias", shape=node_dim)
messages = messages + message_bias
messages = tf.reshape(messages, [batch_size, num_nodes, node_dim])
return messages
|
python
|
def dense_message_pass(node_states, edge_matrices):
"""Computes a_t from h_{t-1}, see bottom of page 3 in the paper.
Args:
node_states: [B, L, D] tensor (h_{t-1})
edge_matrices (tf.float32): [B, L*D, L*D]
Returns:
messages (tf.float32): [B, L, D] For each pair
of nodes in the graph a message is sent along both the incoming and
outgoing edge.
"""
batch_size, num_nodes, node_dim = common_layers.shape_list(node_states)
# Stack the nodes as a big column vector.
h_flat = tf.reshape(
node_states, [batch_size, num_nodes * node_dim, 1], name="h_flat")
messages = tf.reshape(
tf.matmul(edge_matrices, h_flat), [batch_size * num_nodes, node_dim],
name="messages_matmul")
message_bias = tf.get_variable("message_bias", shape=node_dim)
messages = messages + message_bias
messages = tf.reshape(messages, [batch_size, num_nodes, node_dim])
return messages
|
[
"def",
"dense_message_pass",
"(",
"node_states",
",",
"edge_matrices",
")",
":",
"batch_size",
",",
"num_nodes",
",",
"node_dim",
"=",
"common_layers",
".",
"shape_list",
"(",
"node_states",
")",
"# Stack the nodes as a big column vector.",
"h_flat",
"=",
"tf",
".",
"reshape",
"(",
"node_states",
",",
"[",
"batch_size",
",",
"num_nodes",
"*",
"node_dim",
",",
"1",
"]",
",",
"name",
"=",
"\"h_flat\"",
")",
"messages",
"=",
"tf",
".",
"reshape",
"(",
"tf",
".",
"matmul",
"(",
"edge_matrices",
",",
"h_flat",
")",
",",
"[",
"batch_size",
"*",
"num_nodes",
",",
"node_dim",
"]",
",",
"name",
"=",
"\"messages_matmul\"",
")",
"message_bias",
"=",
"tf",
".",
"get_variable",
"(",
"\"message_bias\"",
",",
"shape",
"=",
"node_dim",
")",
"messages",
"=",
"messages",
"+",
"message_bias",
"messages",
"=",
"tf",
".",
"reshape",
"(",
"messages",
",",
"[",
"batch_size",
",",
"num_nodes",
",",
"node_dim",
"]",
")",
"return",
"messages"
] |
Computes a_t from h_{t-1}, see bottom of page 3 in the paper.
Args:
node_states: [B, L, D] tensor (h_{t-1})
edge_matrices (tf.float32): [B, L*D, L*D]
Returns:
messages (tf.float32): [B, L, D] For each pair
of nodes in the graph a message is sent along both the incoming and
outgoing edge.
|
[
"Computes",
"a_t",
"from",
"h_",
"{",
"t",
"-",
"1",
"}",
"see",
"bottom",
"of",
"page",
"3",
"in",
"the",
"paper",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/message_passing_attention.py#L910-L935
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/generator_utils.py
|
to_example
|
def to_example(dictionary):
"""Helper: build tf.Example from (string -> int/float/str list) dictionary."""
features = {}
for (k, v) in six.iteritems(dictionary):
if not v:
raise ValueError("Empty generated field: %s" % str((k, v)))
if isinstance(v[0], six.integer_types):
features[k] = tf.train.Feature(int64_list=tf.train.Int64List(value=v))
elif isinstance(v[0], float):
features[k] = tf.train.Feature(float_list=tf.train.FloatList(value=v))
elif isinstance(v[0], six.string_types):
if not six.PY2: # Convert in python 3.
v = [bytes(x, "utf-8") for x in v]
features[k] = tf.train.Feature(bytes_list=tf.train.BytesList(value=v))
elif isinstance(v[0], bytes):
features[k] = tf.train.Feature(bytes_list=tf.train.BytesList(value=v))
else:
raise ValueError("Value for %s is not a recognized type; v: %s type: %s" %
(k, str(v[0]), str(type(v[0]))))
return tf.train.Example(features=tf.train.Features(feature=features))
|
python
|
def to_example(dictionary):
"""Helper: build tf.Example from (string -> int/float/str list) dictionary."""
features = {}
for (k, v) in six.iteritems(dictionary):
if not v:
raise ValueError("Empty generated field: %s" % str((k, v)))
if isinstance(v[0], six.integer_types):
features[k] = tf.train.Feature(int64_list=tf.train.Int64List(value=v))
elif isinstance(v[0], float):
features[k] = tf.train.Feature(float_list=tf.train.FloatList(value=v))
elif isinstance(v[0], six.string_types):
if not six.PY2: # Convert in python 3.
v = [bytes(x, "utf-8") for x in v]
features[k] = tf.train.Feature(bytes_list=tf.train.BytesList(value=v))
elif isinstance(v[0], bytes):
features[k] = tf.train.Feature(bytes_list=tf.train.BytesList(value=v))
else:
raise ValueError("Value for %s is not a recognized type; v: %s type: %s" %
(k, str(v[0]), str(type(v[0]))))
return tf.train.Example(features=tf.train.Features(feature=features))
|
[
"def",
"to_example",
"(",
"dictionary",
")",
":",
"features",
"=",
"{",
"}",
"for",
"(",
"k",
",",
"v",
")",
"in",
"six",
".",
"iteritems",
"(",
"dictionary",
")",
":",
"if",
"not",
"v",
":",
"raise",
"ValueError",
"(",
"\"Empty generated field: %s\"",
"%",
"str",
"(",
"(",
"k",
",",
"v",
")",
")",
")",
"if",
"isinstance",
"(",
"v",
"[",
"0",
"]",
",",
"six",
".",
"integer_types",
")",
":",
"features",
"[",
"k",
"]",
"=",
"tf",
".",
"train",
".",
"Feature",
"(",
"int64_list",
"=",
"tf",
".",
"train",
".",
"Int64List",
"(",
"value",
"=",
"v",
")",
")",
"elif",
"isinstance",
"(",
"v",
"[",
"0",
"]",
",",
"float",
")",
":",
"features",
"[",
"k",
"]",
"=",
"tf",
".",
"train",
".",
"Feature",
"(",
"float_list",
"=",
"tf",
".",
"train",
".",
"FloatList",
"(",
"value",
"=",
"v",
")",
")",
"elif",
"isinstance",
"(",
"v",
"[",
"0",
"]",
",",
"six",
".",
"string_types",
")",
":",
"if",
"not",
"six",
".",
"PY2",
":",
"# Convert in python 3.",
"v",
"=",
"[",
"bytes",
"(",
"x",
",",
"\"utf-8\"",
")",
"for",
"x",
"in",
"v",
"]",
"features",
"[",
"k",
"]",
"=",
"tf",
".",
"train",
".",
"Feature",
"(",
"bytes_list",
"=",
"tf",
".",
"train",
".",
"BytesList",
"(",
"value",
"=",
"v",
")",
")",
"elif",
"isinstance",
"(",
"v",
"[",
"0",
"]",
",",
"bytes",
")",
":",
"features",
"[",
"k",
"]",
"=",
"tf",
".",
"train",
".",
"Feature",
"(",
"bytes_list",
"=",
"tf",
".",
"train",
".",
"BytesList",
"(",
"value",
"=",
"v",
")",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Value for %s is not a recognized type; v: %s type: %s\"",
"%",
"(",
"k",
",",
"str",
"(",
"v",
"[",
"0",
"]",
")",
",",
"str",
"(",
"type",
"(",
"v",
"[",
"0",
"]",
")",
")",
")",
")",
"return",
"tf",
".",
"train",
".",
"Example",
"(",
"features",
"=",
"tf",
".",
"train",
".",
"Features",
"(",
"feature",
"=",
"features",
")",
")"
] |
Helper: build tf.Example from (string -> int/float/str list) dictionary.
|
[
"Helper",
":",
"build",
"tf",
".",
"Example",
"from",
"(",
"string",
"-",
">",
"int",
"/",
"float",
"/",
"str",
"list",
")",
"dictionary",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/generator_utils.py#L43-L62
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/generator_utils.py
|
generate_files_distributed
|
def generate_files_distributed(generator,
output_name,
output_dir,
num_shards=1,
max_cases=None,
task_id=0):
"""generate_files but with a single writer writing to shard task_id."""
assert task_id < num_shards
output_filename = sharded_name(output_name, task_id, num_shards)
output_file = os.path.join(output_dir, output_filename)
tf.logging.info("Writing to file %s", output_file)
writer = tf.python_io.TFRecordWriter(output_file)
counter = 0
for case in generator:
if counter % 100000 == 0:
tf.logging.info("Generating case %d for %s." % (counter, output_name))
counter += 1
if max_cases and counter > max_cases:
break
example = to_example(case)
writer.write(example.SerializeToString())
writer.close()
return output_file
|
python
|
def generate_files_distributed(generator,
output_name,
output_dir,
num_shards=1,
max_cases=None,
task_id=0):
"""generate_files but with a single writer writing to shard task_id."""
assert task_id < num_shards
output_filename = sharded_name(output_name, task_id, num_shards)
output_file = os.path.join(output_dir, output_filename)
tf.logging.info("Writing to file %s", output_file)
writer = tf.python_io.TFRecordWriter(output_file)
counter = 0
for case in generator:
if counter % 100000 == 0:
tf.logging.info("Generating case %d for %s." % (counter, output_name))
counter += 1
if max_cases and counter > max_cases:
break
example = to_example(case)
writer.write(example.SerializeToString())
writer.close()
return output_file
|
[
"def",
"generate_files_distributed",
"(",
"generator",
",",
"output_name",
",",
"output_dir",
",",
"num_shards",
"=",
"1",
",",
"max_cases",
"=",
"None",
",",
"task_id",
"=",
"0",
")",
":",
"assert",
"task_id",
"<",
"num_shards",
"output_filename",
"=",
"sharded_name",
"(",
"output_name",
",",
"task_id",
",",
"num_shards",
")",
"output_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"output_dir",
",",
"output_filename",
")",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Writing to file %s\"",
",",
"output_file",
")",
"writer",
"=",
"tf",
".",
"python_io",
".",
"TFRecordWriter",
"(",
"output_file",
")",
"counter",
"=",
"0",
"for",
"case",
"in",
"generator",
":",
"if",
"counter",
"%",
"100000",
"==",
"0",
":",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Generating case %d for %s.\"",
"%",
"(",
"counter",
",",
"output_name",
")",
")",
"counter",
"+=",
"1",
"if",
"max_cases",
"and",
"counter",
">",
"max_cases",
":",
"break",
"example",
"=",
"to_example",
"(",
"case",
")",
"writer",
".",
"write",
"(",
"example",
".",
"SerializeToString",
"(",
")",
")",
"writer",
".",
"close",
"(",
")",
"return",
"output_file"
] |
generate_files but with a single writer writing to shard task_id.
|
[
"generate_files",
"but",
"with",
"a",
"single",
"writer",
"writing",
"to",
"shard",
"task_id",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/generator_utils.py#L65-L89
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/generator_utils.py
|
generate_files
|
def generate_files(generator, output_filenames,
max_cases=None, cycle_every_n=1):
"""Generate cases from a generator and save as TFRecord files.
Generated cases are transformed to tf.Example protos and saved as TFRecords
in sharded files named output_dir/output_name-00..N-of-00..M=num_shards.
Args:
generator: a generator yielding (string -> int/float/str list) dictionaries.
output_filenames: List of output file paths.
max_cases: maximum number of cases to get from the generator;
if None (default), we use the generator until StopIteration is raised.
cycle_every_n: how many cases from the generator to take before
switching to the next shard; by default set to 1, switch every case.
"""
if outputs_exist(output_filenames):
tf.logging.info("Skipping generator because outputs files exists at {}"
.format(output_filenames))
return
tmp_filenames = [fname + ".incomplete" for fname in output_filenames]
num_shards = len(output_filenames)
# Check if is training or eval, ref: train_data_filenames().
if num_shards > 0:
if "-train" in output_filenames[0]:
tag = "train"
elif "-dev" in output_filenames[0]:
tag = "eval"
else:
tag = "other"
writers = [tf.python_io.TFRecordWriter(fname) for fname in tmp_filenames]
counter, shard = 0, 0
for case in generator:
if case is None:
continue
if counter % 100000 == 0:
tf.logging.info("Generating case %d." % counter)
counter += 1
if max_cases and counter > max_cases:
break
example = to_example(case)
writers[shard].write(example.SerializeToString())
if counter % cycle_every_n == 0:
shard = (shard + 1) % num_shards
for writer in writers:
writer.close()
for tmp_name, final_name in zip(tmp_filenames, output_filenames):
tf.gfile.Rename(tmp_name, final_name)
if num_shards > 0:
if tag == "train":
mlperf_log.transformer_print(
key=mlperf_log.PREPROC_NUM_TRAIN_EXAMPLES, value=counter)
elif tag == "eval":
mlperf_log.transformer_print(
key=mlperf_log.PREPROC_NUM_EVAL_EXAMPLES, value=counter)
tf.logging.info("Generated %s Examples", counter)
|
python
|
def generate_files(generator, output_filenames,
max_cases=None, cycle_every_n=1):
"""Generate cases from a generator and save as TFRecord files.
Generated cases are transformed to tf.Example protos and saved as TFRecords
in sharded files named output_dir/output_name-00..N-of-00..M=num_shards.
Args:
generator: a generator yielding (string -> int/float/str list) dictionaries.
output_filenames: List of output file paths.
max_cases: maximum number of cases to get from the generator;
if None (default), we use the generator until StopIteration is raised.
cycle_every_n: how many cases from the generator to take before
switching to the next shard; by default set to 1, switch every case.
"""
if outputs_exist(output_filenames):
tf.logging.info("Skipping generator because outputs files exists at {}"
.format(output_filenames))
return
tmp_filenames = [fname + ".incomplete" for fname in output_filenames]
num_shards = len(output_filenames)
# Check if is training or eval, ref: train_data_filenames().
if num_shards > 0:
if "-train" in output_filenames[0]:
tag = "train"
elif "-dev" in output_filenames[0]:
tag = "eval"
else:
tag = "other"
writers = [tf.python_io.TFRecordWriter(fname) for fname in tmp_filenames]
counter, shard = 0, 0
for case in generator:
if case is None:
continue
if counter % 100000 == 0:
tf.logging.info("Generating case %d." % counter)
counter += 1
if max_cases and counter > max_cases:
break
example = to_example(case)
writers[shard].write(example.SerializeToString())
if counter % cycle_every_n == 0:
shard = (shard + 1) % num_shards
for writer in writers:
writer.close()
for tmp_name, final_name in zip(tmp_filenames, output_filenames):
tf.gfile.Rename(tmp_name, final_name)
if num_shards > 0:
if tag == "train":
mlperf_log.transformer_print(
key=mlperf_log.PREPROC_NUM_TRAIN_EXAMPLES, value=counter)
elif tag == "eval":
mlperf_log.transformer_print(
key=mlperf_log.PREPROC_NUM_EVAL_EXAMPLES, value=counter)
tf.logging.info("Generated %s Examples", counter)
|
[
"def",
"generate_files",
"(",
"generator",
",",
"output_filenames",
",",
"max_cases",
"=",
"None",
",",
"cycle_every_n",
"=",
"1",
")",
":",
"if",
"outputs_exist",
"(",
"output_filenames",
")",
":",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Skipping generator because outputs files exists at {}\"",
".",
"format",
"(",
"output_filenames",
")",
")",
"return",
"tmp_filenames",
"=",
"[",
"fname",
"+",
"\".incomplete\"",
"for",
"fname",
"in",
"output_filenames",
"]",
"num_shards",
"=",
"len",
"(",
"output_filenames",
")",
"# Check if is training or eval, ref: train_data_filenames().",
"if",
"num_shards",
">",
"0",
":",
"if",
"\"-train\"",
"in",
"output_filenames",
"[",
"0",
"]",
":",
"tag",
"=",
"\"train\"",
"elif",
"\"-dev\"",
"in",
"output_filenames",
"[",
"0",
"]",
":",
"tag",
"=",
"\"eval\"",
"else",
":",
"tag",
"=",
"\"other\"",
"writers",
"=",
"[",
"tf",
".",
"python_io",
".",
"TFRecordWriter",
"(",
"fname",
")",
"for",
"fname",
"in",
"tmp_filenames",
"]",
"counter",
",",
"shard",
"=",
"0",
",",
"0",
"for",
"case",
"in",
"generator",
":",
"if",
"case",
"is",
"None",
":",
"continue",
"if",
"counter",
"%",
"100000",
"==",
"0",
":",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Generating case %d.\"",
"%",
"counter",
")",
"counter",
"+=",
"1",
"if",
"max_cases",
"and",
"counter",
">",
"max_cases",
":",
"break",
"example",
"=",
"to_example",
"(",
"case",
")",
"writers",
"[",
"shard",
"]",
".",
"write",
"(",
"example",
".",
"SerializeToString",
"(",
")",
")",
"if",
"counter",
"%",
"cycle_every_n",
"==",
"0",
":",
"shard",
"=",
"(",
"shard",
"+",
"1",
")",
"%",
"num_shards",
"for",
"writer",
"in",
"writers",
":",
"writer",
".",
"close",
"(",
")",
"for",
"tmp_name",
",",
"final_name",
"in",
"zip",
"(",
"tmp_filenames",
",",
"output_filenames",
")",
":",
"tf",
".",
"gfile",
".",
"Rename",
"(",
"tmp_name",
",",
"final_name",
")",
"if",
"num_shards",
">",
"0",
":",
"if",
"tag",
"==",
"\"train\"",
":",
"mlperf_log",
".",
"transformer_print",
"(",
"key",
"=",
"mlperf_log",
".",
"PREPROC_NUM_TRAIN_EXAMPLES",
",",
"value",
"=",
"counter",
")",
"elif",
"tag",
"==",
"\"eval\"",
":",
"mlperf_log",
".",
"transformer_print",
"(",
"key",
"=",
"mlperf_log",
".",
"PREPROC_NUM_EVAL_EXAMPLES",
",",
"value",
"=",
"counter",
")",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Generated %s Examples\"",
",",
"counter",
")"
] |
Generate cases from a generator and save as TFRecord files.
Generated cases are transformed to tf.Example protos and saved as TFRecords
in sharded files named output_dir/output_name-00..N-of-00..M=num_shards.
Args:
generator: a generator yielding (string -> int/float/str list) dictionaries.
output_filenames: List of output file paths.
max_cases: maximum number of cases to get from the generator;
if None (default), we use the generator until StopIteration is raised.
cycle_every_n: how many cases from the generator to take before
switching to the next shard; by default set to 1, switch every case.
|
[
"Generate",
"cases",
"from",
"a",
"generator",
"and",
"save",
"as",
"TFRecord",
"files",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/generator_utils.py#L134-L193
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/generator_utils.py
|
download_report_hook
|
def download_report_hook(count, block_size, total_size):
"""Report hook for download progress.
Args:
count: current block number
block_size: block size
total_size: total size
"""
percent = int(count * block_size * 100 / total_size)
print("\r%d%%" % percent + " completed", end="\r")
|
python
|
def download_report_hook(count, block_size, total_size):
"""Report hook for download progress.
Args:
count: current block number
block_size: block size
total_size: total size
"""
percent = int(count * block_size * 100 / total_size)
print("\r%d%%" % percent + " completed", end="\r")
|
[
"def",
"download_report_hook",
"(",
"count",
",",
"block_size",
",",
"total_size",
")",
":",
"percent",
"=",
"int",
"(",
"count",
"*",
"block_size",
"*",
"100",
"/",
"total_size",
")",
"print",
"(",
"\"\\r%d%%\"",
"%",
"percent",
"+",
"\" completed\"",
",",
"end",
"=",
"\"\\r\"",
")"
] |
Report hook for download progress.
Args:
count: current block number
block_size: block size
total_size: total size
|
[
"Report",
"hook",
"for",
"download",
"progress",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/generator_utils.py#L196-L205
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/generator_utils.py
|
maybe_download
|
def maybe_download(directory, filename, uri):
"""Download filename from uri unless it's already in directory.
Copies a remote file to local if that local file does not already exist. If
the local file pre-exists this function call, it does not check that the local
file is a copy of the remote.
Remote filenames can be filepaths, any URI readable by tensorflow.gfile, or a
URL.
Args:
directory: path to the directory that will be used.
filename: name of the file to download to (do nothing if it already exists).
uri: URI to copy (or download) from.
Returns:
The path to the downloaded file.
"""
tf.gfile.MakeDirs(directory)
filepath = os.path.join(directory, filename)
if tf.gfile.Exists(filepath):
tf.logging.info("Not downloading, file already found: %s" % filepath)
return filepath
tf.logging.info("Downloading %s to %s" % (uri, filepath))
try:
tf.gfile.Copy(uri, filepath)
except tf.errors.UnimplementedError:
if uri.startswith("http"):
inprogress_filepath = filepath + ".incomplete"
inprogress_filepath, _ = urllib.urlretrieve(
uri, inprogress_filepath, reporthook=download_report_hook)
# Print newline to clear the carriage return from the download progress
print()
tf.gfile.Rename(inprogress_filepath, filepath)
else:
raise ValueError("Unrecognized URI: " + filepath)
statinfo = os.stat(filepath)
tf.logging.info("Successfully downloaded %s, %s bytes." %
(filename, statinfo.st_size))
return filepath
|
python
|
def maybe_download(directory, filename, uri):
"""Download filename from uri unless it's already in directory.
Copies a remote file to local if that local file does not already exist. If
the local file pre-exists this function call, it does not check that the local
file is a copy of the remote.
Remote filenames can be filepaths, any URI readable by tensorflow.gfile, or a
URL.
Args:
directory: path to the directory that will be used.
filename: name of the file to download to (do nothing if it already exists).
uri: URI to copy (or download) from.
Returns:
The path to the downloaded file.
"""
tf.gfile.MakeDirs(directory)
filepath = os.path.join(directory, filename)
if tf.gfile.Exists(filepath):
tf.logging.info("Not downloading, file already found: %s" % filepath)
return filepath
tf.logging.info("Downloading %s to %s" % (uri, filepath))
try:
tf.gfile.Copy(uri, filepath)
except tf.errors.UnimplementedError:
if uri.startswith("http"):
inprogress_filepath = filepath + ".incomplete"
inprogress_filepath, _ = urllib.urlretrieve(
uri, inprogress_filepath, reporthook=download_report_hook)
# Print newline to clear the carriage return from the download progress
print()
tf.gfile.Rename(inprogress_filepath, filepath)
else:
raise ValueError("Unrecognized URI: " + filepath)
statinfo = os.stat(filepath)
tf.logging.info("Successfully downloaded %s, %s bytes." %
(filename, statinfo.st_size))
return filepath
|
[
"def",
"maybe_download",
"(",
"directory",
",",
"filename",
",",
"uri",
")",
":",
"tf",
".",
"gfile",
".",
"MakeDirs",
"(",
"directory",
")",
"filepath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"filename",
")",
"if",
"tf",
".",
"gfile",
".",
"Exists",
"(",
"filepath",
")",
":",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Not downloading, file already found: %s\"",
"%",
"filepath",
")",
"return",
"filepath",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Downloading %s to %s\"",
"%",
"(",
"uri",
",",
"filepath",
")",
")",
"try",
":",
"tf",
".",
"gfile",
".",
"Copy",
"(",
"uri",
",",
"filepath",
")",
"except",
"tf",
".",
"errors",
".",
"UnimplementedError",
":",
"if",
"uri",
".",
"startswith",
"(",
"\"http\"",
")",
":",
"inprogress_filepath",
"=",
"filepath",
"+",
"\".incomplete\"",
"inprogress_filepath",
",",
"_",
"=",
"urllib",
".",
"urlretrieve",
"(",
"uri",
",",
"inprogress_filepath",
",",
"reporthook",
"=",
"download_report_hook",
")",
"# Print newline to clear the carriage return from the download progress",
"print",
"(",
")",
"tf",
".",
"gfile",
".",
"Rename",
"(",
"inprogress_filepath",
",",
"filepath",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Unrecognized URI: \"",
"+",
"filepath",
")",
"statinfo",
"=",
"os",
".",
"stat",
"(",
"filepath",
")",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Successfully downloaded %s, %s bytes.\"",
"%",
"(",
"filename",
",",
"statinfo",
".",
"st_size",
")",
")",
"return",
"filepath"
] |
Download filename from uri unless it's already in directory.
Copies a remote file to local if that local file does not already exist. If
the local file pre-exists this function call, it does not check that the local
file is a copy of the remote.
Remote filenames can be filepaths, any URI readable by tensorflow.gfile, or a
URL.
Args:
directory: path to the directory that will be used.
filename: name of the file to download to (do nothing if it already exists).
uri: URI to copy (or download) from.
Returns:
The path to the downloaded file.
|
[
"Download",
"filename",
"from",
"uri",
"unless",
"it",
"s",
"already",
"in",
"directory",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/generator_utils.py#L208-L248
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/generator_utils.py
|
maybe_download_from_drive
|
def maybe_download_from_drive(directory, filename, url):
"""Download filename from Google drive unless it's already in directory.
Args:
directory: path to the directory that will be used.
filename: name of the file to download to (do nothing if it already exists).
url: URL to download from.
Returns:
The path to the downloaded file.
"""
if not tf.gfile.Exists(directory):
tf.logging.info("Creating directory %s" % directory)
tf.gfile.MakeDirs(directory)
filepath = os.path.join(directory, filename)
confirm_token = None
if tf.gfile.Exists(filepath):
tf.logging.info("Not downloading, file already found: %s" % filepath)
return filepath
# Since the file is big, drive will scan it for virus and take it to a
# warning page. We find the confirm token on this page and append it to the
# URL to start the download process.
confirm_token = None
session = requests.Session()
response = session.get(url, stream=True)
for k, v in response.cookies.items():
if k.startswith("download_warning"):
confirm_token = v
if confirm_token:
url = url + "&confirm=" + confirm_token
tf.logging.info("Downloading %s to %s" % (url, filepath))
response = session.get(url, stream=True)
# Now begin the download.
chunk_size = 16 * 1024
with open(filepath, "wb") as f:
for chunk in response.iter_content(chunk_size):
if chunk:
f.write(chunk)
# Print newline to clear the carriage return from the download progress
print()
statinfo = os.stat(filepath)
tf.logging.info("Successfully downloaded %s, %s bytes." % (filename,
statinfo.st_size))
return filepath
|
python
|
def maybe_download_from_drive(directory, filename, url):
"""Download filename from Google drive unless it's already in directory.
Args:
directory: path to the directory that will be used.
filename: name of the file to download to (do nothing if it already exists).
url: URL to download from.
Returns:
The path to the downloaded file.
"""
if not tf.gfile.Exists(directory):
tf.logging.info("Creating directory %s" % directory)
tf.gfile.MakeDirs(directory)
filepath = os.path.join(directory, filename)
confirm_token = None
if tf.gfile.Exists(filepath):
tf.logging.info("Not downloading, file already found: %s" % filepath)
return filepath
# Since the file is big, drive will scan it for virus and take it to a
# warning page. We find the confirm token on this page and append it to the
# URL to start the download process.
confirm_token = None
session = requests.Session()
response = session.get(url, stream=True)
for k, v in response.cookies.items():
if k.startswith("download_warning"):
confirm_token = v
if confirm_token:
url = url + "&confirm=" + confirm_token
tf.logging.info("Downloading %s to %s" % (url, filepath))
response = session.get(url, stream=True)
# Now begin the download.
chunk_size = 16 * 1024
with open(filepath, "wb") as f:
for chunk in response.iter_content(chunk_size):
if chunk:
f.write(chunk)
# Print newline to clear the carriage return from the download progress
print()
statinfo = os.stat(filepath)
tf.logging.info("Successfully downloaded %s, %s bytes." % (filename,
statinfo.st_size))
return filepath
|
[
"def",
"maybe_download_from_drive",
"(",
"directory",
",",
"filename",
",",
"url",
")",
":",
"if",
"not",
"tf",
".",
"gfile",
".",
"Exists",
"(",
"directory",
")",
":",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Creating directory %s\"",
"%",
"directory",
")",
"tf",
".",
"gfile",
".",
"MakeDirs",
"(",
"directory",
")",
"filepath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"filename",
")",
"confirm_token",
"=",
"None",
"if",
"tf",
".",
"gfile",
".",
"Exists",
"(",
"filepath",
")",
":",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Not downloading, file already found: %s\"",
"%",
"filepath",
")",
"return",
"filepath",
"# Since the file is big, drive will scan it for virus and take it to a",
"# warning page. We find the confirm token on this page and append it to the",
"# URL to start the download process.",
"confirm_token",
"=",
"None",
"session",
"=",
"requests",
".",
"Session",
"(",
")",
"response",
"=",
"session",
".",
"get",
"(",
"url",
",",
"stream",
"=",
"True",
")",
"for",
"k",
",",
"v",
"in",
"response",
".",
"cookies",
".",
"items",
"(",
")",
":",
"if",
"k",
".",
"startswith",
"(",
"\"download_warning\"",
")",
":",
"confirm_token",
"=",
"v",
"if",
"confirm_token",
":",
"url",
"=",
"url",
"+",
"\"&confirm=\"",
"+",
"confirm_token",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Downloading %s to %s\"",
"%",
"(",
"url",
",",
"filepath",
")",
")",
"response",
"=",
"session",
".",
"get",
"(",
"url",
",",
"stream",
"=",
"True",
")",
"# Now begin the download.",
"chunk_size",
"=",
"16",
"*",
"1024",
"with",
"open",
"(",
"filepath",
",",
"\"wb\"",
")",
"as",
"f",
":",
"for",
"chunk",
"in",
"response",
".",
"iter_content",
"(",
"chunk_size",
")",
":",
"if",
"chunk",
":",
"f",
".",
"write",
"(",
"chunk",
")",
"# Print newline to clear the carriage return from the download progress",
"print",
"(",
")",
"statinfo",
"=",
"os",
".",
"stat",
"(",
"filepath",
")",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Successfully downloaded %s, %s bytes.\"",
"%",
"(",
"filename",
",",
"statinfo",
".",
"st_size",
")",
")",
"return",
"filepath"
] |
Download filename from Google drive unless it's already in directory.
Args:
directory: path to the directory that will be used.
filename: name of the file to download to (do nothing if it already exists).
url: URL to download from.
Returns:
The path to the downloaded file.
|
[
"Download",
"filename",
"from",
"Google",
"drive",
"unless",
"it",
"s",
"already",
"in",
"directory",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/generator_utils.py#L251-L298
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/generator_utils.py
|
gunzip_file
|
def gunzip_file(gz_path, new_path):
"""Unzips from gz_path into new_path.
Args:
gz_path: path to the zipped file.
new_path: path to where the file will be unzipped.
"""
if tf.gfile.Exists(new_path):
tf.logging.info("File %s already exists, skipping unpacking" % new_path)
return
tf.logging.info("Unpacking %s to %s" % (gz_path, new_path))
# We may be unpacking into a newly created directory, add write mode.
mode = stat.S_IRWXU or stat.S_IXGRP or stat.S_IRGRP or stat.S_IROTH
os.chmod(os.path.dirname(new_path), mode)
with gzip.open(gz_path, "rb") as gz_file:
with tf.gfile.GFile(new_path, mode="wb") as new_file:
for line in gz_file:
new_file.write(line)
|
python
|
def gunzip_file(gz_path, new_path):
"""Unzips from gz_path into new_path.
Args:
gz_path: path to the zipped file.
new_path: path to where the file will be unzipped.
"""
if tf.gfile.Exists(new_path):
tf.logging.info("File %s already exists, skipping unpacking" % new_path)
return
tf.logging.info("Unpacking %s to %s" % (gz_path, new_path))
# We may be unpacking into a newly created directory, add write mode.
mode = stat.S_IRWXU or stat.S_IXGRP or stat.S_IRGRP or stat.S_IROTH
os.chmod(os.path.dirname(new_path), mode)
with gzip.open(gz_path, "rb") as gz_file:
with tf.gfile.GFile(new_path, mode="wb") as new_file:
for line in gz_file:
new_file.write(line)
|
[
"def",
"gunzip_file",
"(",
"gz_path",
",",
"new_path",
")",
":",
"if",
"tf",
".",
"gfile",
".",
"Exists",
"(",
"new_path",
")",
":",
"tf",
".",
"logging",
".",
"info",
"(",
"\"File %s already exists, skipping unpacking\"",
"%",
"new_path",
")",
"return",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Unpacking %s to %s\"",
"%",
"(",
"gz_path",
",",
"new_path",
")",
")",
"# We may be unpacking into a newly created directory, add write mode.",
"mode",
"=",
"stat",
".",
"S_IRWXU",
"or",
"stat",
".",
"S_IXGRP",
"or",
"stat",
".",
"S_IRGRP",
"or",
"stat",
".",
"S_IROTH",
"os",
".",
"chmod",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"new_path",
")",
",",
"mode",
")",
"with",
"gzip",
".",
"open",
"(",
"gz_path",
",",
"\"rb\"",
")",
"as",
"gz_file",
":",
"with",
"tf",
".",
"gfile",
".",
"GFile",
"(",
"new_path",
",",
"mode",
"=",
"\"wb\"",
")",
"as",
"new_file",
":",
"for",
"line",
"in",
"gz_file",
":",
"new_file",
".",
"write",
"(",
"line",
")"
] |
Unzips from gz_path into new_path.
Args:
gz_path: path to the zipped file.
new_path: path to where the file will be unzipped.
|
[
"Unzips",
"from",
"gz_path",
"into",
"new_path",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/generator_utils.py#L301-L318
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/generator_utils.py
|
get_or_generate_vocab_inner
|
def get_or_generate_vocab_inner(data_dir, vocab_filename, vocab_size,
generator, max_subtoken_length=None,
reserved_tokens=None):
"""Inner implementation for vocab generators.
Args:
data_dir: The base directory where data and vocab files are stored. If None,
then do not save the vocab even if it doesn't exist.
vocab_filename: relative filename where vocab file is stored
vocab_size: target size of the vocabulary constructed by SubwordTextEncoder
generator: a generator that produces tokens from the vocabulary
max_subtoken_length: an optional integer. Set this to a finite value to
avoid quadratic costs during vocab building.
reserved_tokens: List of reserved tokens. `text_encoder.RESERVED_TOKENS`
should be a prefix of `reserved_tokens`. If `None`, defaults to
`RESERVED_TOKENS`.
Returns:
A SubwordTextEncoder vocabulary object.
"""
if data_dir and vocab_filename:
vocab_filepath = os.path.join(data_dir, vocab_filename)
if tf.gfile.Exists(vocab_filepath):
tf.logging.info("Found vocab file: %s", vocab_filepath)
return text_encoder.SubwordTextEncoder(vocab_filepath)
else:
vocab_filepath = None
tf.logging.info("Generating vocab file: %s", vocab_filepath)
vocab = text_encoder.SubwordTextEncoder.build_from_generator(
generator, vocab_size, max_subtoken_length=max_subtoken_length,
reserved_tokens=reserved_tokens)
if vocab_filepath:
tf.gfile.MakeDirs(data_dir)
vocab.store_to_file(vocab_filepath)
return vocab
|
python
|
def get_or_generate_vocab_inner(data_dir, vocab_filename, vocab_size,
generator, max_subtoken_length=None,
reserved_tokens=None):
"""Inner implementation for vocab generators.
Args:
data_dir: The base directory where data and vocab files are stored. If None,
then do not save the vocab even if it doesn't exist.
vocab_filename: relative filename where vocab file is stored
vocab_size: target size of the vocabulary constructed by SubwordTextEncoder
generator: a generator that produces tokens from the vocabulary
max_subtoken_length: an optional integer. Set this to a finite value to
avoid quadratic costs during vocab building.
reserved_tokens: List of reserved tokens. `text_encoder.RESERVED_TOKENS`
should be a prefix of `reserved_tokens`. If `None`, defaults to
`RESERVED_TOKENS`.
Returns:
A SubwordTextEncoder vocabulary object.
"""
if data_dir and vocab_filename:
vocab_filepath = os.path.join(data_dir, vocab_filename)
if tf.gfile.Exists(vocab_filepath):
tf.logging.info("Found vocab file: %s", vocab_filepath)
return text_encoder.SubwordTextEncoder(vocab_filepath)
else:
vocab_filepath = None
tf.logging.info("Generating vocab file: %s", vocab_filepath)
vocab = text_encoder.SubwordTextEncoder.build_from_generator(
generator, vocab_size, max_subtoken_length=max_subtoken_length,
reserved_tokens=reserved_tokens)
if vocab_filepath:
tf.gfile.MakeDirs(data_dir)
vocab.store_to_file(vocab_filepath)
return vocab
|
[
"def",
"get_or_generate_vocab_inner",
"(",
"data_dir",
",",
"vocab_filename",
",",
"vocab_size",
",",
"generator",
",",
"max_subtoken_length",
"=",
"None",
",",
"reserved_tokens",
"=",
"None",
")",
":",
"if",
"data_dir",
"and",
"vocab_filename",
":",
"vocab_filepath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data_dir",
",",
"vocab_filename",
")",
"if",
"tf",
".",
"gfile",
".",
"Exists",
"(",
"vocab_filepath",
")",
":",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Found vocab file: %s\"",
",",
"vocab_filepath",
")",
"return",
"text_encoder",
".",
"SubwordTextEncoder",
"(",
"vocab_filepath",
")",
"else",
":",
"vocab_filepath",
"=",
"None",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Generating vocab file: %s\"",
",",
"vocab_filepath",
")",
"vocab",
"=",
"text_encoder",
".",
"SubwordTextEncoder",
".",
"build_from_generator",
"(",
"generator",
",",
"vocab_size",
",",
"max_subtoken_length",
"=",
"max_subtoken_length",
",",
"reserved_tokens",
"=",
"reserved_tokens",
")",
"if",
"vocab_filepath",
":",
"tf",
".",
"gfile",
".",
"MakeDirs",
"(",
"data_dir",
")",
"vocab",
".",
"store_to_file",
"(",
"vocab_filepath",
")",
"return",
"vocab"
] |
Inner implementation for vocab generators.
Args:
data_dir: The base directory where data and vocab files are stored. If None,
then do not save the vocab even if it doesn't exist.
vocab_filename: relative filename where vocab file is stored
vocab_size: target size of the vocabulary constructed by SubwordTextEncoder
generator: a generator that produces tokens from the vocabulary
max_subtoken_length: an optional integer. Set this to a finite value to
avoid quadratic costs during vocab building.
reserved_tokens: List of reserved tokens. `text_encoder.RESERVED_TOKENS`
should be a prefix of `reserved_tokens`. If `None`, defaults to
`RESERVED_TOKENS`.
Returns:
A SubwordTextEncoder vocabulary object.
|
[
"Inner",
"implementation",
"for",
"vocab",
"generators",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/generator_utils.py#L321-L358
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/generator_utils.py
|
get_or_generate_vocab
|
def get_or_generate_vocab(data_dir, tmp_dir, vocab_filename, vocab_size,
sources, file_byte_budget=1e6,
max_subtoken_length=None):
"""Generate a vocabulary from the datasets in sources."""
vocab_generator = generate_lines_for_vocab(tmp_dir, sources, file_byte_budget)
return get_or_generate_vocab_inner(data_dir, vocab_filename, vocab_size,
vocab_generator, max_subtoken_length)
|
python
|
def get_or_generate_vocab(data_dir, tmp_dir, vocab_filename, vocab_size,
sources, file_byte_budget=1e6,
max_subtoken_length=None):
"""Generate a vocabulary from the datasets in sources."""
vocab_generator = generate_lines_for_vocab(tmp_dir, sources, file_byte_budget)
return get_or_generate_vocab_inner(data_dir, vocab_filename, vocab_size,
vocab_generator, max_subtoken_length)
|
[
"def",
"get_or_generate_vocab",
"(",
"data_dir",
",",
"tmp_dir",
",",
"vocab_filename",
",",
"vocab_size",
",",
"sources",
",",
"file_byte_budget",
"=",
"1e6",
",",
"max_subtoken_length",
"=",
"None",
")",
":",
"vocab_generator",
"=",
"generate_lines_for_vocab",
"(",
"tmp_dir",
",",
"sources",
",",
"file_byte_budget",
")",
"return",
"get_or_generate_vocab_inner",
"(",
"data_dir",
",",
"vocab_filename",
",",
"vocab_size",
",",
"vocab_generator",
",",
"max_subtoken_length",
")"
] |
Generate a vocabulary from the datasets in sources.
|
[
"Generate",
"a",
"vocabulary",
"from",
"the",
"datasets",
"in",
"sources",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/generator_utils.py#L361-L368
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/generator_utils.py
|
generate_lines_for_vocab
|
def generate_lines_for_vocab(tmp_dir, sources, file_byte_budget=1e6):
"""Generate lines for vocabulary generation."""
tf.logging.info("Generating vocab from: %s", str(sources))
for source in sources:
url = source[0]
filename = os.path.basename(url)
compressed_file = maybe_download(tmp_dir, filename, url)
for lang_file in source[1]:
tf.logging.info("Reading file: %s" % lang_file)
filepath = os.path.join(tmp_dir, lang_file)
# Extract from tar if needed.
if not tf.gfile.Exists(filepath):
read_type = "r:gz" if filename.endswith("tgz") else "r"
with tarfile.open(compressed_file, read_type) as corpus_tar:
corpus_tar.extractall(tmp_dir)
# For some datasets a second extraction is necessary.
if lang_file.endswith(".gz"):
new_filepath = os.path.join(tmp_dir, lang_file[:-3])
if tf.gfile.Exists(new_filepath):
tf.logging.info(
"Subdirectory %s already exists, skipping unpacking" % filepath)
else:
tf.logging.info("Unpacking subdirectory %s" % filepath)
gunzip_file(filepath, new_filepath)
filepath = new_filepath
with tf.gfile.GFile(filepath, mode="r") as source_file:
file_byte_budget_ = file_byte_budget
counter = 0
countermax = int(source_file.size() / file_byte_budget_ / 2)
for line in source_file:
if counter < countermax:
counter += 1
else:
if file_byte_budget_ <= 0:
break
line = line.strip()
file_byte_budget_ -= len(line)
counter = 0
yield line
|
python
|
def generate_lines_for_vocab(tmp_dir, sources, file_byte_budget=1e6):
"""Generate lines for vocabulary generation."""
tf.logging.info("Generating vocab from: %s", str(sources))
for source in sources:
url = source[0]
filename = os.path.basename(url)
compressed_file = maybe_download(tmp_dir, filename, url)
for lang_file in source[1]:
tf.logging.info("Reading file: %s" % lang_file)
filepath = os.path.join(tmp_dir, lang_file)
# Extract from tar if needed.
if not tf.gfile.Exists(filepath):
read_type = "r:gz" if filename.endswith("tgz") else "r"
with tarfile.open(compressed_file, read_type) as corpus_tar:
corpus_tar.extractall(tmp_dir)
# For some datasets a second extraction is necessary.
if lang_file.endswith(".gz"):
new_filepath = os.path.join(tmp_dir, lang_file[:-3])
if tf.gfile.Exists(new_filepath):
tf.logging.info(
"Subdirectory %s already exists, skipping unpacking" % filepath)
else:
tf.logging.info("Unpacking subdirectory %s" % filepath)
gunzip_file(filepath, new_filepath)
filepath = new_filepath
with tf.gfile.GFile(filepath, mode="r") as source_file:
file_byte_budget_ = file_byte_budget
counter = 0
countermax = int(source_file.size() / file_byte_budget_ / 2)
for line in source_file:
if counter < countermax:
counter += 1
else:
if file_byte_budget_ <= 0:
break
line = line.strip()
file_byte_budget_ -= len(line)
counter = 0
yield line
|
[
"def",
"generate_lines_for_vocab",
"(",
"tmp_dir",
",",
"sources",
",",
"file_byte_budget",
"=",
"1e6",
")",
":",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Generating vocab from: %s\"",
",",
"str",
"(",
"sources",
")",
")",
"for",
"source",
"in",
"sources",
":",
"url",
"=",
"source",
"[",
"0",
"]",
"filename",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"url",
")",
"compressed_file",
"=",
"maybe_download",
"(",
"tmp_dir",
",",
"filename",
",",
"url",
")",
"for",
"lang_file",
"in",
"source",
"[",
"1",
"]",
":",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Reading file: %s\"",
"%",
"lang_file",
")",
"filepath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"tmp_dir",
",",
"lang_file",
")",
"# Extract from tar if needed.",
"if",
"not",
"tf",
".",
"gfile",
".",
"Exists",
"(",
"filepath",
")",
":",
"read_type",
"=",
"\"r:gz\"",
"if",
"filename",
".",
"endswith",
"(",
"\"tgz\"",
")",
"else",
"\"r\"",
"with",
"tarfile",
".",
"open",
"(",
"compressed_file",
",",
"read_type",
")",
"as",
"corpus_tar",
":",
"corpus_tar",
".",
"extractall",
"(",
"tmp_dir",
")",
"# For some datasets a second extraction is necessary.",
"if",
"lang_file",
".",
"endswith",
"(",
"\".gz\"",
")",
":",
"new_filepath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"tmp_dir",
",",
"lang_file",
"[",
":",
"-",
"3",
"]",
")",
"if",
"tf",
".",
"gfile",
".",
"Exists",
"(",
"new_filepath",
")",
":",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Subdirectory %s already exists, skipping unpacking\"",
"%",
"filepath",
")",
"else",
":",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Unpacking subdirectory %s\"",
"%",
"filepath",
")",
"gunzip_file",
"(",
"filepath",
",",
"new_filepath",
")",
"filepath",
"=",
"new_filepath",
"with",
"tf",
".",
"gfile",
".",
"GFile",
"(",
"filepath",
",",
"mode",
"=",
"\"r\"",
")",
"as",
"source_file",
":",
"file_byte_budget_",
"=",
"file_byte_budget",
"counter",
"=",
"0",
"countermax",
"=",
"int",
"(",
"source_file",
".",
"size",
"(",
")",
"/",
"file_byte_budget_",
"/",
"2",
")",
"for",
"line",
"in",
"source_file",
":",
"if",
"counter",
"<",
"countermax",
":",
"counter",
"+=",
"1",
"else",
":",
"if",
"file_byte_budget_",
"<=",
"0",
":",
"break",
"line",
"=",
"line",
".",
"strip",
"(",
")",
"file_byte_budget_",
"-=",
"len",
"(",
"line",
")",
"counter",
"=",
"0",
"yield",
"line"
] |
Generate lines for vocabulary generation.
|
[
"Generate",
"lines",
"for",
"vocabulary",
"generation",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/generator_utils.py#L371-L413
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/generator_utils.py
|
get_or_generate_tabbed_vocab
|
def get_or_generate_tabbed_vocab(data_dir, tmp_dir, source_filename,
index, vocab_filename, vocab_size):
r"""Generate a vocabulary from a tabbed source file.
The source is a file of source, target pairs, where each line contains
a source string and a target string, separated by a tab ('\t') character.
The index parameter specifies 0 for the source or 1 for the target.
Args:
data_dir: path to the data directory.
tmp_dir: path to the temporary directory.
source_filename: the name of the tab-separated source file.
index: index.
vocab_filename: the name of the vocabulary file.
vocab_size: vocabulary size.
Returns:
The vocabulary.
"""
def generate():
filepath = os.path.join(tmp_dir, source_filename)
tf.logging.info("Generating vocab from %s", filepath)
with tf.gfile.GFile(filepath, mode="r") as source_file:
for line in source_file:
line = line.strip()
if line and "\t" in line:
parts = line.split("\t", 1)
part = parts[index].strip()
yield part
return get_or_generate_vocab_inner(data_dir, vocab_filename, vocab_size,
generate())
|
python
|
def get_or_generate_tabbed_vocab(data_dir, tmp_dir, source_filename,
index, vocab_filename, vocab_size):
r"""Generate a vocabulary from a tabbed source file.
The source is a file of source, target pairs, where each line contains
a source string and a target string, separated by a tab ('\t') character.
The index parameter specifies 0 for the source or 1 for the target.
Args:
data_dir: path to the data directory.
tmp_dir: path to the temporary directory.
source_filename: the name of the tab-separated source file.
index: index.
vocab_filename: the name of the vocabulary file.
vocab_size: vocabulary size.
Returns:
The vocabulary.
"""
def generate():
filepath = os.path.join(tmp_dir, source_filename)
tf.logging.info("Generating vocab from %s", filepath)
with tf.gfile.GFile(filepath, mode="r") as source_file:
for line in source_file:
line = line.strip()
if line and "\t" in line:
parts = line.split("\t", 1)
part = parts[index].strip()
yield part
return get_or_generate_vocab_inner(data_dir, vocab_filename, vocab_size,
generate())
|
[
"def",
"get_or_generate_tabbed_vocab",
"(",
"data_dir",
",",
"tmp_dir",
",",
"source_filename",
",",
"index",
",",
"vocab_filename",
",",
"vocab_size",
")",
":",
"def",
"generate",
"(",
")",
":",
"filepath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"tmp_dir",
",",
"source_filename",
")",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Generating vocab from %s\"",
",",
"filepath",
")",
"with",
"tf",
".",
"gfile",
".",
"GFile",
"(",
"filepath",
",",
"mode",
"=",
"\"r\"",
")",
"as",
"source_file",
":",
"for",
"line",
"in",
"source_file",
":",
"line",
"=",
"line",
".",
"strip",
"(",
")",
"if",
"line",
"and",
"\"\\t\"",
"in",
"line",
":",
"parts",
"=",
"line",
".",
"split",
"(",
"\"\\t\"",
",",
"1",
")",
"part",
"=",
"parts",
"[",
"index",
"]",
".",
"strip",
"(",
")",
"yield",
"part",
"return",
"get_or_generate_vocab_inner",
"(",
"data_dir",
",",
"vocab_filename",
",",
"vocab_size",
",",
"generate",
"(",
")",
")"
] |
r"""Generate a vocabulary from a tabbed source file.
The source is a file of source, target pairs, where each line contains
a source string and a target string, separated by a tab ('\t') character.
The index parameter specifies 0 for the source or 1 for the target.
Args:
data_dir: path to the data directory.
tmp_dir: path to the temporary directory.
source_filename: the name of the tab-separated source file.
index: index.
vocab_filename: the name of the vocabulary file.
vocab_size: vocabulary size.
Returns:
The vocabulary.
|
[
"r",
"Generate",
"a",
"vocabulary",
"from",
"a",
"tabbed",
"source",
"file",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/generator_utils.py#L416-L447
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/generator_utils.py
|
get_or_generate_txt_vocab
|
def get_or_generate_txt_vocab(data_dir, vocab_filename, vocab_size,
filepatterns):
"""Generate a vocabulary from txt files with example-per-line."""
if isinstance(filepatterns, str):
filepatterns = [filepatterns]
def generate():
tf.logging.info("Generating vocab from %s", filepatterns)
for filepattern in filepatterns:
for filename in tf.gfile.Glob(filepattern):
with tf.gfile.GFile(filename, mode="r") as source_file:
for line in source_file:
yield line.strip()
return get_or_generate_vocab_inner(data_dir, vocab_filename, vocab_size,
generate())
|
python
|
def get_or_generate_txt_vocab(data_dir, vocab_filename, vocab_size,
filepatterns):
"""Generate a vocabulary from txt files with example-per-line."""
if isinstance(filepatterns, str):
filepatterns = [filepatterns]
def generate():
tf.logging.info("Generating vocab from %s", filepatterns)
for filepattern in filepatterns:
for filename in tf.gfile.Glob(filepattern):
with tf.gfile.GFile(filename, mode="r") as source_file:
for line in source_file:
yield line.strip()
return get_or_generate_vocab_inner(data_dir, vocab_filename, vocab_size,
generate())
|
[
"def",
"get_or_generate_txt_vocab",
"(",
"data_dir",
",",
"vocab_filename",
",",
"vocab_size",
",",
"filepatterns",
")",
":",
"if",
"isinstance",
"(",
"filepatterns",
",",
"str",
")",
":",
"filepatterns",
"=",
"[",
"filepatterns",
"]",
"def",
"generate",
"(",
")",
":",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Generating vocab from %s\"",
",",
"filepatterns",
")",
"for",
"filepattern",
"in",
"filepatterns",
":",
"for",
"filename",
"in",
"tf",
".",
"gfile",
".",
"Glob",
"(",
"filepattern",
")",
":",
"with",
"tf",
".",
"gfile",
".",
"GFile",
"(",
"filename",
",",
"mode",
"=",
"\"r\"",
")",
"as",
"source_file",
":",
"for",
"line",
"in",
"source_file",
":",
"yield",
"line",
".",
"strip",
"(",
")",
"return",
"get_or_generate_vocab_inner",
"(",
"data_dir",
",",
"vocab_filename",
",",
"vocab_size",
",",
"generate",
"(",
")",
")"
] |
Generate a vocabulary from txt files with example-per-line.
|
[
"Generate",
"a",
"vocabulary",
"from",
"txt",
"files",
"with",
"example",
"-",
"per",
"-",
"line",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/generator_utils.py#L450-L465
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/generator_utils.py
|
_shuffle_single
|
def _shuffle_single(fname, extra_fn=None):
"""Shuffle a single file of records.
Args:
fname: a string
extra_fn: an optional function from list of TFRecords to list of TFRecords
to be called after shuffling.
"""
records = read_records(fname)
random.shuffle(records)
if extra_fn is not None:
records = extra_fn(records)
out_fname = fname.replace(UNSHUFFLED_SUFFIX, "")
write_records(records, out_fname)
tf.gfile.Remove(fname)
|
python
|
def _shuffle_single(fname, extra_fn=None):
"""Shuffle a single file of records.
Args:
fname: a string
extra_fn: an optional function from list of TFRecords to list of TFRecords
to be called after shuffling.
"""
records = read_records(fname)
random.shuffle(records)
if extra_fn is not None:
records = extra_fn(records)
out_fname = fname.replace(UNSHUFFLED_SUFFIX, "")
write_records(records, out_fname)
tf.gfile.Remove(fname)
|
[
"def",
"_shuffle_single",
"(",
"fname",
",",
"extra_fn",
"=",
"None",
")",
":",
"records",
"=",
"read_records",
"(",
"fname",
")",
"random",
".",
"shuffle",
"(",
"records",
")",
"if",
"extra_fn",
"is",
"not",
"None",
":",
"records",
"=",
"extra_fn",
"(",
"records",
")",
"out_fname",
"=",
"fname",
".",
"replace",
"(",
"UNSHUFFLED_SUFFIX",
",",
"\"\"",
")",
"write_records",
"(",
"records",
",",
"out_fname",
")",
"tf",
".",
"gfile",
".",
"Remove",
"(",
"fname",
")"
] |
Shuffle a single file of records.
Args:
fname: a string
extra_fn: an optional function from list of TFRecords to list of TFRecords
to be called after shuffling.
|
[
"Shuffle",
"a",
"single",
"file",
"of",
"records",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/generator_utils.py#L499-L513
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/generator_utils.py
|
shuffle_dataset
|
def shuffle_dataset(filenames, extra_fn=None):
"""Shuffles the dataset.
Args:
filenames: a list of strings
extra_fn: an optional function from list of records to list of records
to be called after shuffling a file.
"""
if outputs_exist(filenames):
tf.logging.info("Skipping shuffle because output files exist")
return
tf.logging.info("Shuffling data...")
for filename in filenames:
_shuffle_single(filename, extra_fn=extra_fn)
tf.logging.info("Data shuffled.")
|
python
|
def shuffle_dataset(filenames, extra_fn=None):
"""Shuffles the dataset.
Args:
filenames: a list of strings
extra_fn: an optional function from list of records to list of records
to be called after shuffling a file.
"""
if outputs_exist(filenames):
tf.logging.info("Skipping shuffle because output files exist")
return
tf.logging.info("Shuffling data...")
for filename in filenames:
_shuffle_single(filename, extra_fn=extra_fn)
tf.logging.info("Data shuffled.")
|
[
"def",
"shuffle_dataset",
"(",
"filenames",
",",
"extra_fn",
"=",
"None",
")",
":",
"if",
"outputs_exist",
"(",
"filenames",
")",
":",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Skipping shuffle because output files exist\"",
")",
"return",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Shuffling data...\"",
")",
"for",
"filename",
"in",
"filenames",
":",
"_shuffle_single",
"(",
"filename",
",",
"extra_fn",
"=",
"extra_fn",
")",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Data shuffled.\"",
")"
] |
Shuffles the dataset.
Args:
filenames: a list of strings
extra_fn: an optional function from list of records to list of records
to be called after shuffling a file.
|
[
"Shuffles",
"the",
"dataset",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/generator_utils.py#L516-L530
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/generator_utils.py
|
pack_examples
|
def pack_examples(examples,
has_inputs,
packed_length=256,
spacing=2,
queue_size=10,
chop_long_sequences=False):
"""Pack examples into longer examples.
If has_inputs=False, we are packing single-sequence examples with
targets only and no inputs.
In this case, we concatenate the targets from several examples to form
each new example. We insert a number of zeros for spacing between the
original sequences. This is to help the sequences stay separate
under convolutions. If chop_long_sequences is set, then any input sequence
longer than packed_length gets chopped up into multiple examples. Otherwise,
long sequences are emitted as singletons.
If has_inputs=True, then we are packing sequence-to-sequence
examples. We combine several examples by concatenating the inputs
(as above) and concatenating the targets (as above). Chopping of
long sequences is not supported.
The packed examples are represented as dictionaries containing:
"inputs", "targets": the packed sequences described above
"inputs_segmentation", "targets_segmentation":
Sequences aligned with "inputs", "targets" specifying to which original
sequence each position belongs. Numbering starts from 1, and 0 is used
for spacing. This information is useful for preventing attention across
segments.
e.g. [1 1 1 1 1 1 0 0 2 2 2 0 0 3 3 3 3 3 0 0 4 4 4]
"inputs_position", "targets_position":
Sequences aligned with "inputs", "targets" specifying position within
the original sequence. This is useful for positional encodings.
e.g. [0 1 2 3 4 5 0 0 0 1 2 0 0 0 1 2 3 4 0 0 0 1 2]
Args:
examples: a generator returning feature dictionaries.
has_inputs: a boolean
packed_length: an integer
spacing: an integer
queue_size: an integer
chop_long_sequences: a boolean
Yields:
feature dictionaries.
"""
packer = SequencePairPacker if has_inputs else SequencePacker
combined = []
for example in examples:
x = ((example["inputs"], example["targets"])
if has_inputs else example["targets"])
if chop_long_sequences and len(x) > packed_length:
assert not has_inputs
num_fragments = len(x) // packed_length
for i in range(num_fragments):
yield packer(
x[packed_length * i:packed_length * (i + 1)], spacing).to_dict()
x = x[packed_length * num_fragments:]
added = False
for c in combined:
if c.can_fit(x, packed_length):
c.add(x)
added = True
break
if not added:
if len(combined) == queue_size:
yield combined[0].to_dict()
combined = combined[1:]
combined.append(packer(x, spacing))
for c in combined:
yield c.to_dict()
|
python
|
def pack_examples(examples,
has_inputs,
packed_length=256,
spacing=2,
queue_size=10,
chop_long_sequences=False):
"""Pack examples into longer examples.
If has_inputs=False, we are packing single-sequence examples with
targets only and no inputs.
In this case, we concatenate the targets from several examples to form
each new example. We insert a number of zeros for spacing between the
original sequences. This is to help the sequences stay separate
under convolutions. If chop_long_sequences is set, then any input sequence
longer than packed_length gets chopped up into multiple examples. Otherwise,
long sequences are emitted as singletons.
If has_inputs=True, then we are packing sequence-to-sequence
examples. We combine several examples by concatenating the inputs
(as above) and concatenating the targets (as above). Chopping of
long sequences is not supported.
The packed examples are represented as dictionaries containing:
"inputs", "targets": the packed sequences described above
"inputs_segmentation", "targets_segmentation":
Sequences aligned with "inputs", "targets" specifying to which original
sequence each position belongs. Numbering starts from 1, and 0 is used
for spacing. This information is useful for preventing attention across
segments.
e.g. [1 1 1 1 1 1 0 0 2 2 2 0 0 3 3 3 3 3 0 0 4 4 4]
"inputs_position", "targets_position":
Sequences aligned with "inputs", "targets" specifying position within
the original sequence. This is useful for positional encodings.
e.g. [0 1 2 3 4 5 0 0 0 1 2 0 0 0 1 2 3 4 0 0 0 1 2]
Args:
examples: a generator returning feature dictionaries.
has_inputs: a boolean
packed_length: an integer
spacing: an integer
queue_size: an integer
chop_long_sequences: a boolean
Yields:
feature dictionaries.
"""
packer = SequencePairPacker if has_inputs else SequencePacker
combined = []
for example in examples:
x = ((example["inputs"], example["targets"])
if has_inputs else example["targets"])
if chop_long_sequences and len(x) > packed_length:
assert not has_inputs
num_fragments = len(x) // packed_length
for i in range(num_fragments):
yield packer(
x[packed_length * i:packed_length * (i + 1)], spacing).to_dict()
x = x[packed_length * num_fragments:]
added = False
for c in combined:
if c.can_fit(x, packed_length):
c.add(x)
added = True
break
if not added:
if len(combined) == queue_size:
yield combined[0].to_dict()
combined = combined[1:]
combined.append(packer(x, spacing))
for c in combined:
yield c.to_dict()
|
[
"def",
"pack_examples",
"(",
"examples",
",",
"has_inputs",
",",
"packed_length",
"=",
"256",
",",
"spacing",
"=",
"2",
",",
"queue_size",
"=",
"10",
",",
"chop_long_sequences",
"=",
"False",
")",
":",
"packer",
"=",
"SequencePairPacker",
"if",
"has_inputs",
"else",
"SequencePacker",
"combined",
"=",
"[",
"]",
"for",
"example",
"in",
"examples",
":",
"x",
"=",
"(",
"(",
"example",
"[",
"\"inputs\"",
"]",
",",
"example",
"[",
"\"targets\"",
"]",
")",
"if",
"has_inputs",
"else",
"example",
"[",
"\"targets\"",
"]",
")",
"if",
"chop_long_sequences",
"and",
"len",
"(",
"x",
")",
">",
"packed_length",
":",
"assert",
"not",
"has_inputs",
"num_fragments",
"=",
"len",
"(",
"x",
")",
"//",
"packed_length",
"for",
"i",
"in",
"range",
"(",
"num_fragments",
")",
":",
"yield",
"packer",
"(",
"x",
"[",
"packed_length",
"*",
"i",
":",
"packed_length",
"*",
"(",
"i",
"+",
"1",
")",
"]",
",",
"spacing",
")",
".",
"to_dict",
"(",
")",
"x",
"=",
"x",
"[",
"packed_length",
"*",
"num_fragments",
":",
"]",
"added",
"=",
"False",
"for",
"c",
"in",
"combined",
":",
"if",
"c",
".",
"can_fit",
"(",
"x",
",",
"packed_length",
")",
":",
"c",
".",
"add",
"(",
"x",
")",
"added",
"=",
"True",
"break",
"if",
"not",
"added",
":",
"if",
"len",
"(",
"combined",
")",
"==",
"queue_size",
":",
"yield",
"combined",
"[",
"0",
"]",
".",
"to_dict",
"(",
")",
"combined",
"=",
"combined",
"[",
"1",
":",
"]",
"combined",
".",
"append",
"(",
"packer",
"(",
"x",
",",
"spacing",
")",
")",
"for",
"c",
"in",
"combined",
":",
"yield",
"c",
".",
"to_dict",
"(",
")"
] |
Pack examples into longer examples.
If has_inputs=False, we are packing single-sequence examples with
targets only and no inputs.
In this case, we concatenate the targets from several examples to form
each new example. We insert a number of zeros for spacing between the
original sequences. This is to help the sequences stay separate
under convolutions. If chop_long_sequences is set, then any input sequence
longer than packed_length gets chopped up into multiple examples. Otherwise,
long sequences are emitted as singletons.
If has_inputs=True, then we are packing sequence-to-sequence
examples. We combine several examples by concatenating the inputs
(as above) and concatenating the targets (as above). Chopping of
long sequences is not supported.
The packed examples are represented as dictionaries containing:
"inputs", "targets": the packed sequences described above
"inputs_segmentation", "targets_segmentation":
Sequences aligned with "inputs", "targets" specifying to which original
sequence each position belongs. Numbering starts from 1, and 0 is used
for spacing. This information is useful for preventing attention across
segments.
e.g. [1 1 1 1 1 1 0 0 2 2 2 0 0 3 3 3 3 3 0 0 4 4 4]
"inputs_position", "targets_position":
Sequences aligned with "inputs", "targets" specifying position within
the original sequence. This is useful for positional encodings.
e.g. [0 1 2 3 4 5 0 0 0 1 2 0 0 0 1 2 3 4 0 0 0 1 2]
Args:
examples: a generator returning feature dictionaries.
has_inputs: a boolean
packed_length: an integer
spacing: an integer
queue_size: an integer
chop_long_sequences: a boolean
Yields:
feature dictionaries.
|
[
"Pack",
"examples",
"into",
"longer",
"examples",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/generator_utils.py#L589-L660
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/generator_utils.py
|
_pack_with_custom_ops
|
def _pack_with_custom_ops(dataset, keys, length):
"""Helper-function for packing a dataset which has already been batched.
See pack_dataset()
Relies on custom ops which require a custom compiled binary.
Faster than _pack_with_tf_ops(), and denser packing.
Args:
dataset: a dataset containing padded batches of examples.
keys: a list of strings (must have length 2)
length: an integer
Returns:
a dataset.
"""
from tensor2tensor.data_generators.ops import pack_sequences_ops # pylint: disable=g-import-not-at-top
# faster and better packing but requires custom-built binary.
k1, k2 = keys
def map_fn_custom(x):
"""Map-function."""
(k1_packed, k1_segmengation, k1_position,
k2_packed, k2_segmentation, k2_position) = (
pack_sequences_ops.pack_sequences2(x[k1], x[k2], length))
packed = {
k1: k1_packed,
k1 + "_segmentation": k1_segmengation,
k1 + "_position": k1_position,
k2: k2_packed,
k2 + "_segmentation": k2_segmentation,
k2 + "_position": k2_position,
}
return tf.data.Dataset.from_tensor_slices(packed)
dataset = dataset.flat_map(map_fn_custom)
return dataset
|
python
|
def _pack_with_custom_ops(dataset, keys, length):
"""Helper-function for packing a dataset which has already been batched.
See pack_dataset()
Relies on custom ops which require a custom compiled binary.
Faster than _pack_with_tf_ops(), and denser packing.
Args:
dataset: a dataset containing padded batches of examples.
keys: a list of strings (must have length 2)
length: an integer
Returns:
a dataset.
"""
from tensor2tensor.data_generators.ops import pack_sequences_ops # pylint: disable=g-import-not-at-top
# faster and better packing but requires custom-built binary.
k1, k2 = keys
def map_fn_custom(x):
"""Map-function."""
(k1_packed, k1_segmengation, k1_position,
k2_packed, k2_segmentation, k2_position) = (
pack_sequences_ops.pack_sequences2(x[k1], x[k2], length))
packed = {
k1: k1_packed,
k1 + "_segmentation": k1_segmengation,
k1 + "_position": k1_position,
k2: k2_packed,
k2 + "_segmentation": k2_segmentation,
k2 + "_position": k2_position,
}
return tf.data.Dataset.from_tensor_slices(packed)
dataset = dataset.flat_map(map_fn_custom)
return dataset
|
[
"def",
"_pack_with_custom_ops",
"(",
"dataset",
",",
"keys",
",",
"length",
")",
":",
"from",
"tensor2tensor",
".",
"data_generators",
".",
"ops",
"import",
"pack_sequences_ops",
"# pylint: disable=g-import-not-at-top",
"# faster and better packing but requires custom-built binary.",
"k1",
",",
"k2",
"=",
"keys",
"def",
"map_fn_custom",
"(",
"x",
")",
":",
"\"\"\"Map-function.\"\"\"",
"(",
"k1_packed",
",",
"k1_segmengation",
",",
"k1_position",
",",
"k2_packed",
",",
"k2_segmentation",
",",
"k2_position",
")",
"=",
"(",
"pack_sequences_ops",
".",
"pack_sequences2",
"(",
"x",
"[",
"k1",
"]",
",",
"x",
"[",
"k2",
"]",
",",
"length",
")",
")",
"packed",
"=",
"{",
"k1",
":",
"k1_packed",
",",
"k1",
"+",
"\"_segmentation\"",
":",
"k1_segmengation",
",",
"k1",
"+",
"\"_position\"",
":",
"k1_position",
",",
"k2",
":",
"k2_packed",
",",
"k2",
"+",
"\"_segmentation\"",
":",
"k2_segmentation",
",",
"k2",
"+",
"\"_position\"",
":",
"k2_position",
",",
"}",
"return",
"tf",
".",
"data",
".",
"Dataset",
".",
"from_tensor_slices",
"(",
"packed",
")",
"dataset",
"=",
"dataset",
".",
"flat_map",
"(",
"map_fn_custom",
")",
"return",
"dataset"
] |
Helper-function for packing a dataset which has already been batched.
See pack_dataset()
Relies on custom ops which require a custom compiled binary.
Faster than _pack_with_tf_ops(), and denser packing.
Args:
dataset: a dataset containing padded batches of examples.
keys: a list of strings (must have length 2)
length: an integer
Returns:
a dataset.
|
[
"Helper",
"-",
"function",
"for",
"packing",
"a",
"dataset",
"which",
"has",
"already",
"been",
"batched",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/generator_utils.py#L736-L770
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/generator_utils.py
|
make_tmp_dir
|
def make_tmp_dir(suffix="", prefix="tmp", dir=None): # pylint: disable=redefined-builtin
"""Make a temporary directory."""
if dir is None:
return tempfile.mkdtemp(suffix, prefix, dir)
else:
while True:
rand_term = random.randint(1, 9999)
tmp_dir = os.path.join(dir, "%s%d%s" % (prefix, rand_term, suffix))
if tf.gfile.Exists(tmp_dir):
continue
tf.gfile.MakeDirs(tmp_dir)
break
return tmp_dir
|
python
|
def make_tmp_dir(suffix="", prefix="tmp", dir=None): # pylint: disable=redefined-builtin
"""Make a temporary directory."""
if dir is None:
return tempfile.mkdtemp(suffix, prefix, dir)
else:
while True:
rand_term = random.randint(1, 9999)
tmp_dir = os.path.join(dir, "%s%d%s" % (prefix, rand_term, suffix))
if tf.gfile.Exists(tmp_dir):
continue
tf.gfile.MakeDirs(tmp_dir)
break
return tmp_dir
|
[
"def",
"make_tmp_dir",
"(",
"suffix",
"=",
"\"\"",
",",
"prefix",
"=",
"\"tmp\"",
",",
"dir",
"=",
"None",
")",
":",
"# pylint: disable=redefined-builtin",
"if",
"dir",
"is",
"None",
":",
"return",
"tempfile",
".",
"mkdtemp",
"(",
"suffix",
",",
"prefix",
",",
"dir",
")",
"else",
":",
"while",
"True",
":",
"rand_term",
"=",
"random",
".",
"randint",
"(",
"1",
",",
"9999",
")",
"tmp_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dir",
",",
"\"%s%d%s\"",
"%",
"(",
"prefix",
",",
"rand_term",
",",
"suffix",
")",
")",
"if",
"tf",
".",
"gfile",
".",
"Exists",
"(",
"tmp_dir",
")",
":",
"continue",
"tf",
".",
"gfile",
".",
"MakeDirs",
"(",
"tmp_dir",
")",
"break",
"return",
"tmp_dir"
] |
Make a temporary directory.
|
[
"Make",
"a",
"temporary",
"directory",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/generator_utils.py#L883-L895
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/generator_utils.py
|
tfrecord_iterator_for_problem
|
def tfrecord_iterator_for_problem(problem, data_dir,
dataset_split=tf.estimator.ModeKeys.TRAIN):
"""Iterate over the records on disk for the Problem."""
filenames = tf.gfile.Glob(problem.filepattern(data_dir, mode=dataset_split))
example_spec = problem.example_reading_spec()[0]
return tfrecord_iterator(filenames, example_spec=example_spec)
|
python
|
def tfrecord_iterator_for_problem(problem, data_dir,
dataset_split=tf.estimator.ModeKeys.TRAIN):
"""Iterate over the records on disk for the Problem."""
filenames = tf.gfile.Glob(problem.filepattern(data_dir, mode=dataset_split))
example_spec = problem.example_reading_spec()[0]
return tfrecord_iterator(filenames, example_spec=example_spec)
|
[
"def",
"tfrecord_iterator_for_problem",
"(",
"problem",
",",
"data_dir",
",",
"dataset_split",
"=",
"tf",
".",
"estimator",
".",
"ModeKeys",
".",
"TRAIN",
")",
":",
"filenames",
"=",
"tf",
".",
"gfile",
".",
"Glob",
"(",
"problem",
".",
"filepattern",
"(",
"data_dir",
",",
"mode",
"=",
"dataset_split",
")",
")",
"example_spec",
"=",
"problem",
".",
"example_reading_spec",
"(",
")",
"[",
"0",
"]",
"return",
"tfrecord_iterator",
"(",
"filenames",
",",
"example_spec",
"=",
"example_spec",
")"
] |
Iterate over the records on disk for the Problem.
|
[
"Iterate",
"over",
"the",
"records",
"on",
"disk",
"for",
"the",
"Problem",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/generator_utils.py#L898-L903
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/generator_utils.py
|
tfrecord_iterator
|
def tfrecord_iterator(filenames, gzipped=False, example_spec=None):
"""Yields records from TFRecord files.
Args:
filenames: list<str>, list of TFRecord filenames to read from.
gzipped: bool, whether the TFRecord files are gzip-encoded.
example_spec: dict<str feature name, tf.VarLenFeature/tf.FixedLenFeature>,
if provided, will parse each record as a tensorflow.Example proto.
Yields:
Records (or parsed Examples, if example_spec is provided) from files.
"""
with tf.Graph().as_default():
dataset = tf.data.Dataset.from_tensor_slices(filenames)
def _load_records(filename):
return tf.data.TFRecordDataset(
filename,
compression_type=tf.constant("GZIP") if gzipped else None,
buffer_size=16 * 1000 * 1000)
dataset = dataset.flat_map(_load_records)
def _parse_example(ex_ser):
return tf.parse_single_example(ex_ser, example_spec)
if example_spec:
dataset = dataset.map(_parse_example, num_parallel_calls=32)
dataset = dataset.prefetch(100)
record_it = dataset.make_one_shot_iterator().get_next()
with tf.Session() as sess:
while True:
try:
ex = sess.run(record_it)
yield ex
except tf.errors.OutOfRangeError:
break
|
python
|
def tfrecord_iterator(filenames, gzipped=False, example_spec=None):
"""Yields records from TFRecord files.
Args:
filenames: list<str>, list of TFRecord filenames to read from.
gzipped: bool, whether the TFRecord files are gzip-encoded.
example_spec: dict<str feature name, tf.VarLenFeature/tf.FixedLenFeature>,
if provided, will parse each record as a tensorflow.Example proto.
Yields:
Records (or parsed Examples, if example_spec is provided) from files.
"""
with tf.Graph().as_default():
dataset = tf.data.Dataset.from_tensor_slices(filenames)
def _load_records(filename):
return tf.data.TFRecordDataset(
filename,
compression_type=tf.constant("GZIP") if gzipped else None,
buffer_size=16 * 1000 * 1000)
dataset = dataset.flat_map(_load_records)
def _parse_example(ex_ser):
return tf.parse_single_example(ex_ser, example_spec)
if example_spec:
dataset = dataset.map(_parse_example, num_parallel_calls=32)
dataset = dataset.prefetch(100)
record_it = dataset.make_one_shot_iterator().get_next()
with tf.Session() as sess:
while True:
try:
ex = sess.run(record_it)
yield ex
except tf.errors.OutOfRangeError:
break
|
[
"def",
"tfrecord_iterator",
"(",
"filenames",
",",
"gzipped",
"=",
"False",
",",
"example_spec",
"=",
"None",
")",
":",
"with",
"tf",
".",
"Graph",
"(",
")",
".",
"as_default",
"(",
")",
":",
"dataset",
"=",
"tf",
".",
"data",
".",
"Dataset",
".",
"from_tensor_slices",
"(",
"filenames",
")",
"def",
"_load_records",
"(",
"filename",
")",
":",
"return",
"tf",
".",
"data",
".",
"TFRecordDataset",
"(",
"filename",
",",
"compression_type",
"=",
"tf",
".",
"constant",
"(",
"\"GZIP\"",
")",
"if",
"gzipped",
"else",
"None",
",",
"buffer_size",
"=",
"16",
"*",
"1000",
"*",
"1000",
")",
"dataset",
"=",
"dataset",
".",
"flat_map",
"(",
"_load_records",
")",
"def",
"_parse_example",
"(",
"ex_ser",
")",
":",
"return",
"tf",
".",
"parse_single_example",
"(",
"ex_ser",
",",
"example_spec",
")",
"if",
"example_spec",
":",
"dataset",
"=",
"dataset",
".",
"map",
"(",
"_parse_example",
",",
"num_parallel_calls",
"=",
"32",
")",
"dataset",
"=",
"dataset",
".",
"prefetch",
"(",
"100",
")",
"record_it",
"=",
"dataset",
".",
"make_one_shot_iterator",
"(",
")",
".",
"get_next",
"(",
")",
"with",
"tf",
".",
"Session",
"(",
")",
"as",
"sess",
":",
"while",
"True",
":",
"try",
":",
"ex",
"=",
"sess",
".",
"run",
"(",
"record_it",
")",
"yield",
"ex",
"except",
"tf",
".",
"errors",
".",
"OutOfRangeError",
":",
"break"
] |
Yields records from TFRecord files.
Args:
filenames: list<str>, list of TFRecord filenames to read from.
gzipped: bool, whether the TFRecord files are gzip-encoded.
example_spec: dict<str feature name, tf.VarLenFeature/tf.FixedLenFeature>,
if provided, will parse each record as a tensorflow.Example proto.
Yields:
Records (or parsed Examples, if example_spec is provided) from files.
|
[
"Yields",
"records",
"from",
"TFRecord",
"files",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/generator_utils.py#L906-L943
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/generator_utils.py
|
random_deinterleave
|
def random_deinterleave(text, separator_symbol="X"):
"""Create a fill-in-the-blanks training example from text.
Split on spaces, then cut into segments at random points. Alternate segments
are assigned to the two output strings. separator_symbol separates segments
within each of the outputs.
example:
text="The quick brown fox jumps over the lazy dog."
returns: ("X quick brown X the lazy X", "The X fox jumps over X dog.")
The two outputs can also be reversed to yield an instance of the same problem.
Args:
text: a string
separator_symbol: a string
Returns:
a pair of strings
"""
words = text.strip().split(" ")
n = len(words)
if n <= 1:
return text, ""
cut = [False] * n
cut[0] = True
num_cuts = int(math.exp(random.uniform(0, math.log(n))))
for _ in range(num_cuts):
cut[random.randint(1, n -1)] = True
out = [[], []]
part = random.randint(0, 1)
for i in range(n):
if cut[i]:
out[part].append(separator_symbol)
part = 1 - part
out[part].append(words[i])
return " ".join(out[0]), " ".join(out[1])
|
python
|
def random_deinterleave(text, separator_symbol="X"):
"""Create a fill-in-the-blanks training example from text.
Split on spaces, then cut into segments at random points. Alternate segments
are assigned to the two output strings. separator_symbol separates segments
within each of the outputs.
example:
text="The quick brown fox jumps over the lazy dog."
returns: ("X quick brown X the lazy X", "The X fox jumps over X dog.")
The two outputs can also be reversed to yield an instance of the same problem.
Args:
text: a string
separator_symbol: a string
Returns:
a pair of strings
"""
words = text.strip().split(" ")
n = len(words)
if n <= 1:
return text, ""
cut = [False] * n
cut[0] = True
num_cuts = int(math.exp(random.uniform(0, math.log(n))))
for _ in range(num_cuts):
cut[random.randint(1, n -1)] = True
out = [[], []]
part = random.randint(0, 1)
for i in range(n):
if cut[i]:
out[part].append(separator_symbol)
part = 1 - part
out[part].append(words[i])
return " ".join(out[0]), " ".join(out[1])
|
[
"def",
"random_deinterleave",
"(",
"text",
",",
"separator_symbol",
"=",
"\"X\"",
")",
":",
"words",
"=",
"text",
".",
"strip",
"(",
")",
".",
"split",
"(",
"\" \"",
")",
"n",
"=",
"len",
"(",
"words",
")",
"if",
"n",
"<=",
"1",
":",
"return",
"text",
",",
"\"\"",
"cut",
"=",
"[",
"False",
"]",
"*",
"n",
"cut",
"[",
"0",
"]",
"=",
"True",
"num_cuts",
"=",
"int",
"(",
"math",
".",
"exp",
"(",
"random",
".",
"uniform",
"(",
"0",
",",
"math",
".",
"log",
"(",
"n",
")",
")",
")",
")",
"for",
"_",
"in",
"range",
"(",
"num_cuts",
")",
":",
"cut",
"[",
"random",
".",
"randint",
"(",
"1",
",",
"n",
"-",
"1",
")",
"]",
"=",
"True",
"out",
"=",
"[",
"[",
"]",
",",
"[",
"]",
"]",
"part",
"=",
"random",
".",
"randint",
"(",
"0",
",",
"1",
")",
"for",
"i",
"in",
"range",
"(",
"n",
")",
":",
"if",
"cut",
"[",
"i",
"]",
":",
"out",
"[",
"part",
"]",
".",
"append",
"(",
"separator_symbol",
")",
"part",
"=",
"1",
"-",
"part",
"out",
"[",
"part",
"]",
".",
"append",
"(",
"words",
"[",
"i",
"]",
")",
"return",
"\" \"",
".",
"join",
"(",
"out",
"[",
"0",
"]",
")",
",",
"\" \"",
".",
"join",
"(",
"out",
"[",
"1",
"]",
")"
] |
Create a fill-in-the-blanks training example from text.
Split on spaces, then cut into segments at random points. Alternate segments
are assigned to the two output strings. separator_symbol separates segments
within each of the outputs.
example:
text="The quick brown fox jumps over the lazy dog."
returns: ("X quick brown X the lazy X", "The X fox jumps over X dog.")
The two outputs can also be reversed to yield an instance of the same problem.
Args:
text: a string
separator_symbol: a string
Returns:
a pair of strings
|
[
"Create",
"a",
"fill",
"-",
"in",
"-",
"the",
"-",
"blanks",
"training",
"example",
"from",
"text",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/generator_utils.py#L946-L981
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/neural_gpu.py
|
neural_gpu_body
|
def neural_gpu_body(inputs, hparams, name=None):
"""The core Neural GPU."""
with tf.variable_scope(name, "neural_gpu"):
def step(state, inp): # pylint: disable=missing-docstring
x = tf.nn.dropout(state, 1.0 - hparams.dropout)
for layer in range(hparams.num_hidden_layers):
x = common_layers.conv_gru(
x, (hparams.kernel_height, hparams.kernel_width),
hparams.hidden_size,
name="cgru_%d" % layer)
# Padding input is zeroed-out in the modality, we check this by summing.
padding_inp = tf.less(tf.reduce_sum(tf.abs(inp), axis=[1, 2]), 0.00001)
new_state = tf.where(padding_inp, state, x) # No-op where inp is padding.
return new_state
return tf.foldl(
step,
tf.transpose(inputs, [1, 0, 2, 3]),
initializer=inputs,
parallel_iterations=1,
swap_memory=True)
|
python
|
def neural_gpu_body(inputs, hparams, name=None):
"""The core Neural GPU."""
with tf.variable_scope(name, "neural_gpu"):
def step(state, inp): # pylint: disable=missing-docstring
x = tf.nn.dropout(state, 1.0 - hparams.dropout)
for layer in range(hparams.num_hidden_layers):
x = common_layers.conv_gru(
x, (hparams.kernel_height, hparams.kernel_width),
hparams.hidden_size,
name="cgru_%d" % layer)
# Padding input is zeroed-out in the modality, we check this by summing.
padding_inp = tf.less(tf.reduce_sum(tf.abs(inp), axis=[1, 2]), 0.00001)
new_state = tf.where(padding_inp, state, x) # No-op where inp is padding.
return new_state
return tf.foldl(
step,
tf.transpose(inputs, [1, 0, 2, 3]),
initializer=inputs,
parallel_iterations=1,
swap_memory=True)
|
[
"def",
"neural_gpu_body",
"(",
"inputs",
",",
"hparams",
",",
"name",
"=",
"None",
")",
":",
"with",
"tf",
".",
"variable_scope",
"(",
"name",
",",
"\"neural_gpu\"",
")",
":",
"def",
"step",
"(",
"state",
",",
"inp",
")",
":",
"# pylint: disable=missing-docstring",
"x",
"=",
"tf",
".",
"nn",
".",
"dropout",
"(",
"state",
",",
"1.0",
"-",
"hparams",
".",
"dropout",
")",
"for",
"layer",
"in",
"range",
"(",
"hparams",
".",
"num_hidden_layers",
")",
":",
"x",
"=",
"common_layers",
".",
"conv_gru",
"(",
"x",
",",
"(",
"hparams",
".",
"kernel_height",
",",
"hparams",
".",
"kernel_width",
")",
",",
"hparams",
".",
"hidden_size",
",",
"name",
"=",
"\"cgru_%d\"",
"%",
"layer",
")",
"# Padding input is zeroed-out in the modality, we check this by summing.",
"padding_inp",
"=",
"tf",
".",
"less",
"(",
"tf",
".",
"reduce_sum",
"(",
"tf",
".",
"abs",
"(",
"inp",
")",
",",
"axis",
"=",
"[",
"1",
",",
"2",
"]",
")",
",",
"0.00001",
")",
"new_state",
"=",
"tf",
".",
"where",
"(",
"padding_inp",
",",
"state",
",",
"x",
")",
"# No-op where inp is padding.",
"return",
"new_state",
"return",
"tf",
".",
"foldl",
"(",
"step",
",",
"tf",
".",
"transpose",
"(",
"inputs",
",",
"[",
"1",
",",
"0",
",",
"2",
",",
"3",
"]",
")",
",",
"initializer",
"=",
"inputs",
",",
"parallel_iterations",
"=",
"1",
",",
"swap_memory",
"=",
"True",
")"
] |
The core Neural GPU.
|
[
"The",
"core",
"Neural",
"GPU",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/neural_gpu.py#L31-L52
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/neural_gpu.py
|
diagonal_neural_gpu
|
def diagonal_neural_gpu(inputs, hparams, name=None):
"""Improved Neural GPU as in https://arxiv.org/abs/1702.08727."""
with tf.variable_scope(name, "diagonal_neural_gpu"):
def step(state_tup, inp):
"""Single step of the improved Neural GPU."""
state, _ = state_tup
x = state
for layer in range(hparams.num_hidden_layers):
x, new_loss = common_layers.diagonal_conv_gru(
x, (hparams.kernel_height, hparams.kernel_width),
hparams.hidden_size,
dropout=hparams.dropout,
name="dcgru_%d" % layer)
# Padding input is zeroed-out in the modality, we check this by summing.
padding_inp = tf.less(tf.reduce_sum(tf.abs(inp), axis=[1, 2]), 0.00001)
new_state = tf.where(padding_inp, state, x) # No-op where inp is padding.
return new_state, new_loss
final_state, losses = tf.scan(
step,
tf.transpose(inputs, [1, 0, 2, 3]),
initializer=(inputs, tf.constant(0.0)),
parallel_iterations=1,
swap_memory=True)
return final_state[0, :, :, :, :], 2.0 * tf.reduce_mean(losses)
|
python
|
def diagonal_neural_gpu(inputs, hparams, name=None):
"""Improved Neural GPU as in https://arxiv.org/abs/1702.08727."""
with tf.variable_scope(name, "diagonal_neural_gpu"):
def step(state_tup, inp):
"""Single step of the improved Neural GPU."""
state, _ = state_tup
x = state
for layer in range(hparams.num_hidden_layers):
x, new_loss = common_layers.diagonal_conv_gru(
x, (hparams.kernel_height, hparams.kernel_width),
hparams.hidden_size,
dropout=hparams.dropout,
name="dcgru_%d" % layer)
# Padding input is zeroed-out in the modality, we check this by summing.
padding_inp = tf.less(tf.reduce_sum(tf.abs(inp), axis=[1, 2]), 0.00001)
new_state = tf.where(padding_inp, state, x) # No-op where inp is padding.
return new_state, new_loss
final_state, losses = tf.scan(
step,
tf.transpose(inputs, [1, 0, 2, 3]),
initializer=(inputs, tf.constant(0.0)),
parallel_iterations=1,
swap_memory=True)
return final_state[0, :, :, :, :], 2.0 * tf.reduce_mean(losses)
|
[
"def",
"diagonal_neural_gpu",
"(",
"inputs",
",",
"hparams",
",",
"name",
"=",
"None",
")",
":",
"with",
"tf",
".",
"variable_scope",
"(",
"name",
",",
"\"diagonal_neural_gpu\"",
")",
":",
"def",
"step",
"(",
"state_tup",
",",
"inp",
")",
":",
"\"\"\"Single step of the improved Neural GPU.\"\"\"",
"state",
",",
"_",
"=",
"state_tup",
"x",
"=",
"state",
"for",
"layer",
"in",
"range",
"(",
"hparams",
".",
"num_hidden_layers",
")",
":",
"x",
",",
"new_loss",
"=",
"common_layers",
".",
"diagonal_conv_gru",
"(",
"x",
",",
"(",
"hparams",
".",
"kernel_height",
",",
"hparams",
".",
"kernel_width",
")",
",",
"hparams",
".",
"hidden_size",
",",
"dropout",
"=",
"hparams",
".",
"dropout",
",",
"name",
"=",
"\"dcgru_%d\"",
"%",
"layer",
")",
"# Padding input is zeroed-out in the modality, we check this by summing.",
"padding_inp",
"=",
"tf",
".",
"less",
"(",
"tf",
".",
"reduce_sum",
"(",
"tf",
".",
"abs",
"(",
"inp",
")",
",",
"axis",
"=",
"[",
"1",
",",
"2",
"]",
")",
",",
"0.00001",
")",
"new_state",
"=",
"tf",
".",
"where",
"(",
"padding_inp",
",",
"state",
",",
"x",
")",
"# No-op where inp is padding.",
"return",
"new_state",
",",
"new_loss",
"final_state",
",",
"losses",
"=",
"tf",
".",
"scan",
"(",
"step",
",",
"tf",
".",
"transpose",
"(",
"inputs",
",",
"[",
"1",
",",
"0",
",",
"2",
",",
"3",
"]",
")",
",",
"initializer",
"=",
"(",
"inputs",
",",
"tf",
".",
"constant",
"(",
"0.0",
")",
")",
",",
"parallel_iterations",
"=",
"1",
",",
"swap_memory",
"=",
"True",
")",
"return",
"final_state",
"[",
"0",
",",
":",
",",
":",
",",
":",
",",
":",
"]",
",",
"2.0",
"*",
"tf",
".",
"reduce_mean",
"(",
"losses",
")"
] |
Improved Neural GPU as in https://arxiv.org/abs/1702.08727.
|
[
"Improved",
"Neural",
"GPU",
"as",
"in",
"https",
":",
"//",
"arxiv",
".",
"org",
"/",
"abs",
"/",
"1702",
".",
"08727",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/neural_gpu.py#L62-L87
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/trax/layers/combinators.py
|
_reorder_shape
|
def _reorder_shape(input_shape, output=None): # pylint: disable=invalid-name
"""Helper to determine the shape of reorder output."""
if output is None:
return input_shape
return base.nested_map(output, lambda i: input_shape[i])
|
python
|
def _reorder_shape(input_shape, output=None): # pylint: disable=invalid-name
"""Helper to determine the shape of reorder output."""
if output is None:
return input_shape
return base.nested_map(output, lambda i: input_shape[i])
|
[
"def",
"_reorder_shape",
"(",
"input_shape",
",",
"output",
"=",
"None",
")",
":",
"# pylint: disable=invalid-name",
"if",
"output",
"is",
"None",
":",
"return",
"input_shape",
"return",
"base",
".",
"nested_map",
"(",
"output",
",",
"lambda",
"i",
":",
"input_shape",
"[",
"i",
"]",
")"
] |
Helper to determine the shape of reorder output.
|
[
"Helper",
"to",
"determine",
"the",
"shape",
"of",
"reorder",
"output",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/layers/combinators.py#L77-L81
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/trax/layers/combinators.py
|
Reorder
|
def Reorder(x, params, output=None, **kwargs):
"""Reorder a tuple into another tuple.
For example, we can re-order (x, y) into (y, x) or even (y, (x, y), y).
The output argument specifies how to re-order, using integers that refer
to indices in the input tuple. For example, if
input = (x, y, z)
then
Reorder(input, output=(1, 0, 2)) = (y, x, z)
Reorder(input, output=(0, 0)) = (x, x)
Reorder(input, output=(0, (1, 1))) = (x, (y, y))
Reorder(input, output=((2, 0), (1, 1))) = ((z, x), (y, y))
By default (if no output is given) Reorder does nothing (Identity).
Args:
x: the input tuple to re-order.
params: layer parameters (unused).
output: the specification of the output tuple: a nested tuple of ints.
**kwargs: other arguments (unused).
Returns:
The re-ordered tuple with the same shape as output.
"""
del params, kwargs
if output is None:
return x
return base.nested_map(output, lambda i: x[i])
|
python
|
def Reorder(x, params, output=None, **kwargs):
"""Reorder a tuple into another tuple.
For example, we can re-order (x, y) into (y, x) or even (y, (x, y), y).
The output argument specifies how to re-order, using integers that refer
to indices in the input tuple. For example, if
input = (x, y, z)
then
Reorder(input, output=(1, 0, 2)) = (y, x, z)
Reorder(input, output=(0, 0)) = (x, x)
Reorder(input, output=(0, (1, 1))) = (x, (y, y))
Reorder(input, output=((2, 0), (1, 1))) = ((z, x), (y, y))
By default (if no output is given) Reorder does nothing (Identity).
Args:
x: the input tuple to re-order.
params: layer parameters (unused).
output: the specification of the output tuple: a nested tuple of ints.
**kwargs: other arguments (unused).
Returns:
The re-ordered tuple with the same shape as output.
"""
del params, kwargs
if output is None:
return x
return base.nested_map(output, lambda i: x[i])
|
[
"def",
"Reorder",
"(",
"x",
",",
"params",
",",
"output",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"del",
"params",
",",
"kwargs",
"if",
"output",
"is",
"None",
":",
"return",
"x",
"return",
"base",
".",
"nested_map",
"(",
"output",
",",
"lambda",
"i",
":",
"x",
"[",
"i",
"]",
")"
] |
Reorder a tuple into another tuple.
For example, we can re-order (x, y) into (y, x) or even (y, (x, y), y).
The output argument specifies how to re-order, using integers that refer
to indices in the input tuple. For example, if
input = (x, y, z)
then
Reorder(input, output=(1, 0, 2)) = (y, x, z)
Reorder(input, output=(0, 0)) = (x, x)
Reorder(input, output=(0, (1, 1))) = (x, (y, y))
Reorder(input, output=((2, 0), (1, 1))) = ((z, x), (y, y))
By default (if no output is given) Reorder does nothing (Identity).
Args:
x: the input tuple to re-order.
params: layer parameters (unused).
output: the specification of the output tuple: a nested tuple of ints.
**kwargs: other arguments (unused).
Returns:
The re-ordered tuple with the same shape as output.
|
[
"Reorder",
"a",
"tuple",
"into",
"another",
"tuple",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/layers/combinators.py#L85-L115
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/trax/layers/combinators.py
|
_nested_op
|
def _nested_op(inputs, op): # pylint: disable=invalid-name
"""Helper: sum a list of arrays or nested arrays."""
# First the simple non-nested case.
if not isinstance(inputs[0], (list, tuple)):
return op(inputs)
# In the nested case, sum on each axis separately.
result_list = []
for i in range(len(inputs[0])):
result_list.append(_nested_op([x[i] for x in inputs], op=op))
if isinstance(inputs[0], list):
return result_list
return tuple(result_list)
|
python
|
def _nested_op(inputs, op): # pylint: disable=invalid-name
"""Helper: sum a list of arrays or nested arrays."""
# First the simple non-nested case.
if not isinstance(inputs[0], (list, tuple)):
return op(inputs)
# In the nested case, sum on each axis separately.
result_list = []
for i in range(len(inputs[0])):
result_list.append(_nested_op([x[i] for x in inputs], op=op))
if isinstance(inputs[0], list):
return result_list
return tuple(result_list)
|
[
"def",
"_nested_op",
"(",
"inputs",
",",
"op",
")",
":",
"# pylint: disable=invalid-name",
"# First the simple non-nested case.",
"if",
"not",
"isinstance",
"(",
"inputs",
"[",
"0",
"]",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"return",
"op",
"(",
"inputs",
")",
"# In the nested case, sum on each axis separately.",
"result_list",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"inputs",
"[",
"0",
"]",
")",
")",
":",
"result_list",
".",
"append",
"(",
"_nested_op",
"(",
"[",
"x",
"[",
"i",
"]",
"for",
"x",
"in",
"inputs",
"]",
",",
"op",
"=",
"op",
")",
")",
"if",
"isinstance",
"(",
"inputs",
"[",
"0",
"]",
",",
"list",
")",
":",
"return",
"result_list",
"return",
"tuple",
"(",
"result_list",
")"
] |
Helper: sum a list of arrays or nested arrays.
|
[
"Helper",
":",
"sum",
"a",
"list",
"of",
"arrays",
"or",
"nested",
"arrays",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/layers/combinators.py#L134-L145
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/trax/layers/combinators.py
|
GateBranches
|
def GateBranches(x, **unused_kwargs):
"""Implements a gating function on a (memory, gate, candidate) tuple.
Final update is memory * gate + (1-gate) * candidate
This gating equation may also be referred to as Highway Network.
Highway Networks: https://arxiv.org/abs/1505.00387
Args:
x: A tuple of (memory, gate, candidate)
Returns:
The result of applying gating.
"""
assert len(x) == 3, x
state, gate, candidate = x
return gate * state + (1.0 - gate) * candidate
|
python
|
def GateBranches(x, **unused_kwargs):
"""Implements a gating function on a (memory, gate, candidate) tuple.
Final update is memory * gate + (1-gate) * candidate
This gating equation may also be referred to as Highway Network.
Highway Networks: https://arxiv.org/abs/1505.00387
Args:
x: A tuple of (memory, gate, candidate)
Returns:
The result of applying gating.
"""
assert len(x) == 3, x
state, gate, candidate = x
return gate * state + (1.0 - gate) * candidate
|
[
"def",
"GateBranches",
"(",
"x",
",",
"*",
"*",
"unused_kwargs",
")",
":",
"assert",
"len",
"(",
"x",
")",
"==",
"3",
",",
"x",
"state",
",",
"gate",
",",
"candidate",
"=",
"x",
"return",
"gate",
"*",
"state",
"+",
"(",
"1.0",
"-",
"gate",
")",
"*",
"candidate"
] |
Implements a gating function on a (memory, gate, candidate) tuple.
Final update is memory * gate + (1-gate) * candidate
This gating equation may also be referred to as Highway Network.
Highway Networks: https://arxiv.org/abs/1505.00387
Args:
x: A tuple of (memory, gate, candidate)
Returns:
The result of applying gating.
|
[
"Implements",
"a",
"gating",
"function",
"on",
"a",
"(",
"memory",
"gate",
"candidate",
")",
"tuple",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/layers/combinators.py#L170-L186
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/trax/layers/combinators.py
|
_concatenate_shape
|
def _concatenate_shape(input_shape, axis=-1): # pylint: disable=invalid-name
"""Helper to determine the shape of Concatenate output."""
ax = axis % len(input_shape[0])
concat_size = sum(shape[ax] for shape in input_shape)
out_shape = input_shape[0][:ax] + (concat_size,) + input_shape[0][ax+1:]
return out_shape
|
python
|
def _concatenate_shape(input_shape, axis=-1): # pylint: disable=invalid-name
"""Helper to determine the shape of Concatenate output."""
ax = axis % len(input_shape[0])
concat_size = sum(shape[ax] for shape in input_shape)
out_shape = input_shape[0][:ax] + (concat_size,) + input_shape[0][ax+1:]
return out_shape
|
[
"def",
"_concatenate_shape",
"(",
"input_shape",
",",
"axis",
"=",
"-",
"1",
")",
":",
"# pylint: disable=invalid-name",
"ax",
"=",
"axis",
"%",
"len",
"(",
"input_shape",
"[",
"0",
"]",
")",
"concat_size",
"=",
"sum",
"(",
"shape",
"[",
"ax",
"]",
"for",
"shape",
"in",
"input_shape",
")",
"out_shape",
"=",
"input_shape",
"[",
"0",
"]",
"[",
":",
"ax",
"]",
"+",
"(",
"concat_size",
",",
")",
"+",
"input_shape",
"[",
"0",
"]",
"[",
"ax",
"+",
"1",
":",
"]",
"return",
"out_shape"
] |
Helper to determine the shape of Concatenate output.
|
[
"Helper",
"to",
"determine",
"the",
"shape",
"of",
"Concatenate",
"output",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/layers/combinators.py#L189-L194
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/trax/layers/combinators.py
|
Residual
|
def Residual(*layers, **kwargs):
"""Constructs a residual version of layers, summing input to layers output."""
shortcut = kwargs.get('shortcut', Identity()) # pylint: disable=no-value-for-parameter
if len(layers) > 1:
return Serial(
Branch(), # pylint: disable=no-value-for-parameter
Parallel(Serial(*layers), shortcut),
SumBranches() # pylint: disable=no-value-for-parameter
)
elif len(layers) == 1:
return Serial(
Branch(), # pylint: disable=no-value-for-parameter
Parallel(layers[0], shortcut),
SumBranches() # pylint: disable=no-value-for-parameter
)
else:
raise ValueError('Empty residual combinator.')
|
python
|
def Residual(*layers, **kwargs):
"""Constructs a residual version of layers, summing input to layers output."""
shortcut = kwargs.get('shortcut', Identity()) # pylint: disable=no-value-for-parameter
if len(layers) > 1:
return Serial(
Branch(), # pylint: disable=no-value-for-parameter
Parallel(Serial(*layers), shortcut),
SumBranches() # pylint: disable=no-value-for-parameter
)
elif len(layers) == 1:
return Serial(
Branch(), # pylint: disable=no-value-for-parameter
Parallel(layers[0], shortcut),
SumBranches() # pylint: disable=no-value-for-parameter
)
else:
raise ValueError('Empty residual combinator.')
|
[
"def",
"Residual",
"(",
"*",
"layers",
",",
"*",
"*",
"kwargs",
")",
":",
"shortcut",
"=",
"kwargs",
".",
"get",
"(",
"'shortcut'",
",",
"Identity",
"(",
")",
")",
"# pylint: disable=no-value-for-parameter",
"if",
"len",
"(",
"layers",
")",
">",
"1",
":",
"return",
"Serial",
"(",
"Branch",
"(",
")",
",",
"# pylint: disable=no-value-for-parameter",
"Parallel",
"(",
"Serial",
"(",
"*",
"layers",
")",
",",
"shortcut",
")",
",",
"SumBranches",
"(",
")",
"# pylint: disable=no-value-for-parameter",
")",
"elif",
"len",
"(",
"layers",
")",
"==",
"1",
":",
"return",
"Serial",
"(",
"Branch",
"(",
")",
",",
"# pylint: disable=no-value-for-parameter",
"Parallel",
"(",
"layers",
"[",
"0",
"]",
",",
"shortcut",
")",
",",
"SumBranches",
"(",
")",
"# pylint: disable=no-value-for-parameter",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Empty residual combinator.'",
")"
] |
Constructs a residual version of layers, summing input to layers output.
|
[
"Constructs",
"a",
"residual",
"version",
"of",
"layers",
"summing",
"input",
"to",
"layers",
"output",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/layers/combinators.py#L240-L256
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/rl/policy_learner.py
|
PolicyLearner.train
|
def train(
self,
env_fn,
hparams,
simulated,
save_continuously,
epoch,
sampling_temp=1.0,
num_env_steps=None,
env_step_multiplier=1,
eval_env_fn=None,
report_fn=None
):
"""Train."""
raise NotImplementedError()
|
python
|
def train(
self,
env_fn,
hparams,
simulated,
save_continuously,
epoch,
sampling_temp=1.0,
num_env_steps=None,
env_step_multiplier=1,
eval_env_fn=None,
report_fn=None
):
"""Train."""
raise NotImplementedError()
|
[
"def",
"train",
"(",
"self",
",",
"env_fn",
",",
"hparams",
",",
"simulated",
",",
"save_continuously",
",",
"epoch",
",",
"sampling_temp",
"=",
"1.0",
",",
"num_env_steps",
"=",
"None",
",",
"env_step_multiplier",
"=",
"1",
",",
"eval_env_fn",
"=",
"None",
",",
"report_fn",
"=",
"None",
")",
":",
"raise",
"NotImplementedError",
"(",
")"
] |
Train.
|
[
"Train",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/policy_learner.py#L34-L48
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/research/universal_transformer.py
|
update_hparams_for_universal_transformer
|
def update_hparams_for_universal_transformer(hparams):
"""Adds default hparams for all of the variants of the Universal Transformer.
Args:
hparams: default hparams (usually one of the standard hparams from
transformer model (like "transformer_base")
Returns:
hparams with default values for Universal Transformers hyper-parameters
"""
hparams.daisy_chain_variables = False # Breaks multi-gpu in while loops.
# If not None, mixes vanilla transformer with Universal Transformer.
# Options: None, "before_ut", and "after_ut".
hparams.add_hparam("mix_with_transformer", None)
# Number of vanilla transformer layers used to be mixed with u-transofmer.
hparams.add_hparam("num_mixedin_layers", 2)
# Number of transformer layers within the recurrent block (default is 1).
hparams.add_hparam("num_inrecurrence_layers", 1)
# Type of recurrency:
# basic, highway, skip, dwa, act, rnn, gru, lstm.
hparams.add_hparam("recurrence_type", "basic")
# Number of steps (which is equivalent to num layer in transformer).
hparams.add_hparam("num_rec_steps", hparams.num_hidden_layers)
# Add the positional mebedding at each step(horisontal timing)
hparams.add_hparam("add_position_timing_signal", True)
if hparams.add_position_timing_signal:
hparams.pos = None
# Logic of position shifting when using timing signal:
# None, "random", "step"
hparams.add_hparam("position_start_index", None)
# Add an step embedding at each step (vertical timing)
hparams.add_hparam("add_step_timing_signal", True)
# Either "learned" or "sinusoid"
hparams.add_hparam("step_timing_signal_type", "learned")
# Add or concat the timing signal (applied both on position and step timing).
# Options: "add" and "concat".
hparams.add_hparam("add_or_concat_timing_signal", "add")
# Add SRU at the beginning of each Universal Transformer step.
# This can be considered as a position timing signal
hparams.add_hparam("add_sru", False)
# Default ffn layer is separable convolution.
# Options: "fc" and "sepconv".
hparams.add_hparam("transformer_ffn_type", "fc")
# Transform bias (in models with highway or skip connection).
hparams.add_hparam("transform_bias_init", -1.0)
hparams.add_hparam("couple_carry_transform_gates", True)
# Depth-wise attention (grid-transformer!) hparams:
# Adds depth embedding, if true.
hparams.add_hparam("depth_embedding", True)
# Learns attention weights for elements (instead of positions), if true.
hparams.add_hparam("dwa_elements", True)
# Type of ffn_layer used for gate in skip, highway, etc.
# "dense" or "dense_dropconnect".
# With dense_relu_dense, the bias/kernel initializations will not be applied.
hparams.add_hparam("gate_ffn_layer", "dense")
# LSTM forget bias for lstm style recurrence.
hparams.add_hparam("lstm_forget_bias", 1.0)
# Uses the memory at the last step as the final output, if true.
hparams.add_hparam("use_memory_as_final_state", False)
# if also add a ffn unit to the transition function when using gru/lstm
hparams.add_hparam("add_ffn_unit_to_the_transition_function", False)
# Type of act: basic/accumulated/global (instead of position-wise!)/random.
hparams.add_hparam("act_type", "basic")
# Max number of steps (forces halting at this step).
hparams.add_hparam("act_max_steps", 2 * hparams.num_hidden_layers)
hparams.add_hparam("act_halting_bias_init", 1.0)
hparams.add_hparam("act_epsilon", 0.01)
hparams.add_hparam("act_loss_weight", 0.01)
return hparams
|
python
|
def update_hparams_for_universal_transformer(hparams):
"""Adds default hparams for all of the variants of the Universal Transformer.
Args:
hparams: default hparams (usually one of the standard hparams from
transformer model (like "transformer_base")
Returns:
hparams with default values for Universal Transformers hyper-parameters
"""
hparams.daisy_chain_variables = False # Breaks multi-gpu in while loops.
# If not None, mixes vanilla transformer with Universal Transformer.
# Options: None, "before_ut", and "after_ut".
hparams.add_hparam("mix_with_transformer", None)
# Number of vanilla transformer layers used to be mixed with u-transofmer.
hparams.add_hparam("num_mixedin_layers", 2)
# Number of transformer layers within the recurrent block (default is 1).
hparams.add_hparam("num_inrecurrence_layers", 1)
# Type of recurrency:
# basic, highway, skip, dwa, act, rnn, gru, lstm.
hparams.add_hparam("recurrence_type", "basic")
# Number of steps (which is equivalent to num layer in transformer).
hparams.add_hparam("num_rec_steps", hparams.num_hidden_layers)
# Add the positional mebedding at each step(horisontal timing)
hparams.add_hparam("add_position_timing_signal", True)
if hparams.add_position_timing_signal:
hparams.pos = None
# Logic of position shifting when using timing signal:
# None, "random", "step"
hparams.add_hparam("position_start_index", None)
# Add an step embedding at each step (vertical timing)
hparams.add_hparam("add_step_timing_signal", True)
# Either "learned" or "sinusoid"
hparams.add_hparam("step_timing_signal_type", "learned")
# Add or concat the timing signal (applied both on position and step timing).
# Options: "add" and "concat".
hparams.add_hparam("add_or_concat_timing_signal", "add")
# Add SRU at the beginning of each Universal Transformer step.
# This can be considered as a position timing signal
hparams.add_hparam("add_sru", False)
# Default ffn layer is separable convolution.
# Options: "fc" and "sepconv".
hparams.add_hparam("transformer_ffn_type", "fc")
# Transform bias (in models with highway or skip connection).
hparams.add_hparam("transform_bias_init", -1.0)
hparams.add_hparam("couple_carry_transform_gates", True)
# Depth-wise attention (grid-transformer!) hparams:
# Adds depth embedding, if true.
hparams.add_hparam("depth_embedding", True)
# Learns attention weights for elements (instead of positions), if true.
hparams.add_hparam("dwa_elements", True)
# Type of ffn_layer used for gate in skip, highway, etc.
# "dense" or "dense_dropconnect".
# With dense_relu_dense, the bias/kernel initializations will not be applied.
hparams.add_hparam("gate_ffn_layer", "dense")
# LSTM forget bias for lstm style recurrence.
hparams.add_hparam("lstm_forget_bias", 1.0)
# Uses the memory at the last step as the final output, if true.
hparams.add_hparam("use_memory_as_final_state", False)
# if also add a ffn unit to the transition function when using gru/lstm
hparams.add_hparam("add_ffn_unit_to_the_transition_function", False)
# Type of act: basic/accumulated/global (instead of position-wise!)/random.
hparams.add_hparam("act_type", "basic")
# Max number of steps (forces halting at this step).
hparams.add_hparam("act_max_steps", 2 * hparams.num_hidden_layers)
hparams.add_hparam("act_halting_bias_init", 1.0)
hparams.add_hparam("act_epsilon", 0.01)
hparams.add_hparam("act_loss_weight", 0.01)
return hparams
|
[
"def",
"update_hparams_for_universal_transformer",
"(",
"hparams",
")",
":",
"hparams",
".",
"daisy_chain_variables",
"=",
"False",
"# Breaks multi-gpu in while loops.",
"# If not None, mixes vanilla transformer with Universal Transformer.",
"# Options: None, \"before_ut\", and \"after_ut\".",
"hparams",
".",
"add_hparam",
"(",
"\"mix_with_transformer\"",
",",
"None",
")",
"# Number of vanilla transformer layers used to be mixed with u-transofmer.",
"hparams",
".",
"add_hparam",
"(",
"\"num_mixedin_layers\"",
",",
"2",
")",
"# Number of transformer layers within the recurrent block (default is 1).",
"hparams",
".",
"add_hparam",
"(",
"\"num_inrecurrence_layers\"",
",",
"1",
")",
"# Type of recurrency:",
"# basic, highway, skip, dwa, act, rnn, gru, lstm.",
"hparams",
".",
"add_hparam",
"(",
"\"recurrence_type\"",
",",
"\"basic\"",
")",
"# Number of steps (which is equivalent to num layer in transformer).",
"hparams",
".",
"add_hparam",
"(",
"\"num_rec_steps\"",
",",
"hparams",
".",
"num_hidden_layers",
")",
"# Add the positional mebedding at each step(horisontal timing)",
"hparams",
".",
"add_hparam",
"(",
"\"add_position_timing_signal\"",
",",
"True",
")",
"if",
"hparams",
".",
"add_position_timing_signal",
":",
"hparams",
".",
"pos",
"=",
"None",
"# Logic of position shifting when using timing signal:",
"# None, \"random\", \"step\"",
"hparams",
".",
"add_hparam",
"(",
"\"position_start_index\"",
",",
"None",
")",
"# Add an step embedding at each step (vertical timing)",
"hparams",
".",
"add_hparam",
"(",
"\"add_step_timing_signal\"",
",",
"True",
")",
"# Either \"learned\" or \"sinusoid\"",
"hparams",
".",
"add_hparam",
"(",
"\"step_timing_signal_type\"",
",",
"\"learned\"",
")",
"# Add or concat the timing signal (applied both on position and step timing).",
"# Options: \"add\" and \"concat\".",
"hparams",
".",
"add_hparam",
"(",
"\"add_or_concat_timing_signal\"",
",",
"\"add\"",
")",
"# Add SRU at the beginning of each Universal Transformer step.",
"# This can be considered as a position timing signal",
"hparams",
".",
"add_hparam",
"(",
"\"add_sru\"",
",",
"False",
")",
"# Default ffn layer is separable convolution.",
"# Options: \"fc\" and \"sepconv\".",
"hparams",
".",
"add_hparam",
"(",
"\"transformer_ffn_type\"",
",",
"\"fc\"",
")",
"# Transform bias (in models with highway or skip connection).",
"hparams",
".",
"add_hparam",
"(",
"\"transform_bias_init\"",
",",
"-",
"1.0",
")",
"hparams",
".",
"add_hparam",
"(",
"\"couple_carry_transform_gates\"",
",",
"True",
")",
"# Depth-wise attention (grid-transformer!) hparams:",
"# Adds depth embedding, if true.",
"hparams",
".",
"add_hparam",
"(",
"\"depth_embedding\"",
",",
"True",
")",
"# Learns attention weights for elements (instead of positions), if true.",
"hparams",
".",
"add_hparam",
"(",
"\"dwa_elements\"",
",",
"True",
")",
"# Type of ffn_layer used for gate in skip, highway, etc.",
"# \"dense\" or \"dense_dropconnect\".",
"# With dense_relu_dense, the bias/kernel initializations will not be applied.",
"hparams",
".",
"add_hparam",
"(",
"\"gate_ffn_layer\"",
",",
"\"dense\"",
")",
"# LSTM forget bias for lstm style recurrence.",
"hparams",
".",
"add_hparam",
"(",
"\"lstm_forget_bias\"",
",",
"1.0",
")",
"# Uses the memory at the last step as the final output, if true.",
"hparams",
".",
"add_hparam",
"(",
"\"use_memory_as_final_state\"",
",",
"False",
")",
"# if also add a ffn unit to the transition function when using gru/lstm",
"hparams",
".",
"add_hparam",
"(",
"\"add_ffn_unit_to_the_transition_function\"",
",",
"False",
")",
"# Type of act: basic/accumulated/global (instead of position-wise!)/random.",
"hparams",
".",
"add_hparam",
"(",
"\"act_type\"",
",",
"\"basic\"",
")",
"# Max number of steps (forces halting at this step).",
"hparams",
".",
"add_hparam",
"(",
"\"act_max_steps\"",
",",
"2",
"*",
"hparams",
".",
"num_hidden_layers",
")",
"hparams",
".",
"add_hparam",
"(",
"\"act_halting_bias_init\"",
",",
"1.0",
")",
"hparams",
".",
"add_hparam",
"(",
"\"act_epsilon\"",
",",
"0.01",
")",
"hparams",
".",
"add_hparam",
"(",
"\"act_loss_weight\"",
",",
"0.01",
")",
"return",
"hparams"
] |
Adds default hparams for all of the variants of the Universal Transformer.
Args:
hparams: default hparams (usually one of the standard hparams from
transformer model (like "transformer_base")
Returns:
hparams with default values for Universal Transformers hyper-parameters
|
[
"Adds",
"default",
"hparams",
"for",
"all",
"of",
"the",
"variants",
"of",
"the",
"Universal",
"Transformer",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/universal_transformer.py#L352-L436
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/research/universal_transformer.py
|
universal_transformer_base
|
def universal_transformer_base():
"""Base parameters for Universal Transformer."""
hparams = transformer.transformer_base()
# To have a similar capacity to the transformer_base with 6 layers,
# we need to increase the size of the UT's layer
# since, in fact, UT has a single layer repeating multiple times.
hparams.hidden_size = 1024
hparams.filter_size = 4096
hparams.num_heads = 16
hparams.layer_prepostprocess_dropout = 0.3
hparams = update_hparams_for_universal_transformer(hparams)
return hparams
|
python
|
def universal_transformer_base():
"""Base parameters for Universal Transformer."""
hparams = transformer.transformer_base()
# To have a similar capacity to the transformer_base with 6 layers,
# we need to increase the size of the UT's layer
# since, in fact, UT has a single layer repeating multiple times.
hparams.hidden_size = 1024
hparams.filter_size = 4096
hparams.num_heads = 16
hparams.layer_prepostprocess_dropout = 0.3
hparams = update_hparams_for_universal_transformer(hparams)
return hparams
|
[
"def",
"universal_transformer_base",
"(",
")",
":",
"hparams",
"=",
"transformer",
".",
"transformer_base",
"(",
")",
"# To have a similar capacity to the transformer_base with 6 layers,",
"# we need to increase the size of the UT's layer",
"# since, in fact, UT has a single layer repeating multiple times.",
"hparams",
".",
"hidden_size",
"=",
"1024",
"hparams",
".",
"filter_size",
"=",
"4096",
"hparams",
".",
"num_heads",
"=",
"16",
"hparams",
".",
"layer_prepostprocess_dropout",
"=",
"0.3",
"hparams",
"=",
"update_hparams_for_universal_transformer",
"(",
"hparams",
")",
"return",
"hparams"
] |
Base parameters for Universal Transformer.
|
[
"Base",
"parameters",
"for",
"Universal",
"Transformer",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/universal_transformer.py#L440-L451
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/research/universal_transformer.py
|
adaptive_universal_transformer_multilayer_tpu
|
def adaptive_universal_transformer_multilayer_tpu():
"""Multi-layer config for adaptive Transformer on TPU."""
hparams = adaptive_universal_transformer_base_tpu()
hparams.num_inrecurrence_layers = 2
hparams.mix_with_transformer = "before_ut,after_ut"
hparams.num_mixedin_layers = 1
hparams.transformer_ffn_type = "sepconv"
# TODO(lukaszkaiser): the options below don't work on TPU yet, make them work.
# hparams.add_step_timing_signal = True
# hparams.add_sru = True
# hparams.self_attention_type = "dot_product_relative_v2"
# hparams.max_relative_position = 256
return hparams
|
python
|
def adaptive_universal_transformer_multilayer_tpu():
"""Multi-layer config for adaptive Transformer on TPU."""
hparams = adaptive_universal_transformer_base_tpu()
hparams.num_inrecurrence_layers = 2
hparams.mix_with_transformer = "before_ut,after_ut"
hparams.num_mixedin_layers = 1
hparams.transformer_ffn_type = "sepconv"
# TODO(lukaszkaiser): the options below don't work on TPU yet, make them work.
# hparams.add_step_timing_signal = True
# hparams.add_sru = True
# hparams.self_attention_type = "dot_product_relative_v2"
# hparams.max_relative_position = 256
return hparams
|
[
"def",
"adaptive_universal_transformer_multilayer_tpu",
"(",
")",
":",
"hparams",
"=",
"adaptive_universal_transformer_base_tpu",
"(",
")",
"hparams",
".",
"num_inrecurrence_layers",
"=",
"2",
"hparams",
".",
"mix_with_transformer",
"=",
"\"before_ut,after_ut\"",
"hparams",
".",
"num_mixedin_layers",
"=",
"1",
"hparams",
".",
"transformer_ffn_type",
"=",
"\"sepconv\"",
"# TODO(lukaszkaiser): the options below don't work on TPU yet, make them work.",
"# hparams.add_step_timing_signal = True",
"# hparams.add_sru = True",
"# hparams.self_attention_type = \"dot_product_relative_v2\"",
"# hparams.max_relative_position = 256",
"return",
"hparams"
] |
Multi-layer config for adaptive Transformer on TPU.
|
[
"Multi",
"-",
"layer",
"config",
"for",
"adaptive",
"Transformer",
"on",
"TPU",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/universal_transformer.py#L543-L555
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/research/universal_transformer.py
|
adaptive_universal_transformer_multilayer_hard
|
def adaptive_universal_transformer_multilayer_hard():
"""Multi-layer config for adaptive Transformer with hard attention."""
hparams = adaptive_universal_transformer_multilayer_tpu()
hparams.batch_size = 256
hparams.hard_attention_k = 8
hparams.add_step_timing_signal = True
# hparams.add_sru = True # This is very slow on GPUs, does it help?
hparams.self_attention_type = "dot_product_relative_v2"
hparams.max_relative_position = 256
return hparams
|
python
|
def adaptive_universal_transformer_multilayer_hard():
"""Multi-layer config for adaptive Transformer with hard attention."""
hparams = adaptive_universal_transformer_multilayer_tpu()
hparams.batch_size = 256
hparams.hard_attention_k = 8
hparams.add_step_timing_signal = True
# hparams.add_sru = True # This is very slow on GPUs, does it help?
hparams.self_attention_type = "dot_product_relative_v2"
hparams.max_relative_position = 256
return hparams
|
[
"def",
"adaptive_universal_transformer_multilayer_hard",
"(",
")",
":",
"hparams",
"=",
"adaptive_universal_transformer_multilayer_tpu",
"(",
")",
"hparams",
".",
"batch_size",
"=",
"256",
"hparams",
".",
"hard_attention_k",
"=",
"8",
"hparams",
".",
"add_step_timing_signal",
"=",
"True",
"# hparams.add_sru = True # This is very slow on GPUs, does it help?",
"hparams",
".",
"self_attention_type",
"=",
"\"dot_product_relative_v2\"",
"hparams",
".",
"max_relative_position",
"=",
"256",
"return",
"hparams"
] |
Multi-layer config for adaptive Transformer with hard attention.
|
[
"Multi",
"-",
"layer",
"config",
"for",
"adaptive",
"Transformer",
"with",
"hard",
"attention",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/universal_transformer.py#L559-L568
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/research/universal_transformer.py
|
universal_transformer_base_range
|
def universal_transformer_base_range(rhp):
"""Range of hyperparameters."""
# After starting from base, set intervals for some parameters.
rhp.set_discrete("num_rec_steps", [6, 8, 10])
rhp.set_discrete("hidden_size", [1024, 2048, 4096])
rhp.set_discrete("filter_size", [2048, 4096, 8192])
rhp.set_discrete("num_heads", [8, 16, 32])
rhp.set_discrete("transformer_ffn_type", ["sepconv", "fc"])
rhp.set_float("learning_rate", 0.3, 3.0, scale=rhp.LOG_SCALE)
rhp.set_float("weight_decay", 0.0, 2.0)
|
python
|
def universal_transformer_base_range(rhp):
"""Range of hyperparameters."""
# After starting from base, set intervals for some parameters.
rhp.set_discrete("num_rec_steps", [6, 8, 10])
rhp.set_discrete("hidden_size", [1024, 2048, 4096])
rhp.set_discrete("filter_size", [2048, 4096, 8192])
rhp.set_discrete("num_heads", [8, 16, 32])
rhp.set_discrete("transformer_ffn_type", ["sepconv", "fc"])
rhp.set_float("learning_rate", 0.3, 3.0, scale=rhp.LOG_SCALE)
rhp.set_float("weight_decay", 0.0, 2.0)
|
[
"def",
"universal_transformer_base_range",
"(",
"rhp",
")",
":",
"# After starting from base, set intervals for some parameters.",
"rhp",
".",
"set_discrete",
"(",
"\"num_rec_steps\"",
",",
"[",
"6",
",",
"8",
",",
"10",
"]",
")",
"rhp",
".",
"set_discrete",
"(",
"\"hidden_size\"",
",",
"[",
"1024",
",",
"2048",
",",
"4096",
"]",
")",
"rhp",
".",
"set_discrete",
"(",
"\"filter_size\"",
",",
"[",
"2048",
",",
"4096",
",",
"8192",
"]",
")",
"rhp",
".",
"set_discrete",
"(",
"\"num_heads\"",
",",
"[",
"8",
",",
"16",
",",
"32",
"]",
")",
"rhp",
".",
"set_discrete",
"(",
"\"transformer_ffn_type\"",
",",
"[",
"\"sepconv\"",
",",
"\"fc\"",
"]",
")",
"rhp",
".",
"set_float",
"(",
"\"learning_rate\"",
",",
"0.3",
",",
"3.0",
",",
"scale",
"=",
"rhp",
".",
"LOG_SCALE",
")",
"rhp",
".",
"set_float",
"(",
"\"weight_decay\"",
",",
"0.0",
",",
"2.0",
")"
] |
Range of hyperparameters.
|
[
"Range",
"of",
"hyperparameters",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/universal_transformer.py#L788-L797
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/models/research/universal_transformer.py
|
adaptive_universal_transformer_base_range
|
def adaptive_universal_transformer_base_range(rhp):
"""Range of hyperparameters."""
# After starting from base, set intervals for some parameters.
rhp.set_discrete("act_max_steps", [8, 16, 32])
rhp.set_float("act_loss_weight", 0.0, 0.5)
rhp.set_discrete("hidden_size", [1024, 2048, 4096])
rhp.set_discrete("filter_size", [2048, 4096, 8192])
rhp.set_discrete("num_heads", [8, 16, 32])
rhp.set_discrete("transformer_ffn_type", ["sepconv", "fc"])
rhp.set_float("learning_rate", 0.3, 3.0, scale=rhp.LOG_SCALE)
rhp.set_float("weight_decay", 0.0, 2.0)
|
python
|
def adaptive_universal_transformer_base_range(rhp):
"""Range of hyperparameters."""
# After starting from base, set intervals for some parameters.
rhp.set_discrete("act_max_steps", [8, 16, 32])
rhp.set_float("act_loss_weight", 0.0, 0.5)
rhp.set_discrete("hidden_size", [1024, 2048, 4096])
rhp.set_discrete("filter_size", [2048, 4096, 8192])
rhp.set_discrete("num_heads", [8, 16, 32])
rhp.set_discrete("transformer_ffn_type", ["sepconv", "fc"])
rhp.set_float("learning_rate", 0.3, 3.0, scale=rhp.LOG_SCALE)
rhp.set_float("weight_decay", 0.0, 2.0)
|
[
"def",
"adaptive_universal_transformer_base_range",
"(",
"rhp",
")",
":",
"# After starting from base, set intervals for some parameters.",
"rhp",
".",
"set_discrete",
"(",
"\"act_max_steps\"",
",",
"[",
"8",
",",
"16",
",",
"32",
"]",
")",
"rhp",
".",
"set_float",
"(",
"\"act_loss_weight\"",
",",
"0.0",
",",
"0.5",
")",
"rhp",
".",
"set_discrete",
"(",
"\"hidden_size\"",
",",
"[",
"1024",
",",
"2048",
",",
"4096",
"]",
")",
"rhp",
".",
"set_discrete",
"(",
"\"filter_size\"",
",",
"[",
"2048",
",",
"4096",
",",
"8192",
"]",
")",
"rhp",
".",
"set_discrete",
"(",
"\"num_heads\"",
",",
"[",
"8",
",",
"16",
",",
"32",
"]",
")",
"rhp",
".",
"set_discrete",
"(",
"\"transformer_ffn_type\"",
",",
"[",
"\"sepconv\"",
",",
"\"fc\"",
"]",
")",
"rhp",
".",
"set_float",
"(",
"\"learning_rate\"",
",",
"0.3",
",",
"3.0",
",",
"scale",
"=",
"rhp",
".",
"LOG_SCALE",
")",
"rhp",
".",
"set_float",
"(",
"\"weight_decay\"",
",",
"0.0",
",",
"2.0",
")"
] |
Range of hyperparameters.
|
[
"Range",
"of",
"hyperparameters",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/universal_transformer.py#L801-L811
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/trax/models/neural_gpu.py
|
DiagonalGate
|
def DiagonalGate(x, params, **kwargs):
"""Split channels in 3 parts. Shifts 1st and 3rd sections to left/right."""
del params
del kwargs
# x : [batch, 1, length, depth]
x = np.pad(
x, [(0, 0), (0, 0), (1, 1), (0, 0)], mode='constant', constant_values=0.0)
depth = x.shape[-1] // 3
assert 3 * depth == x.shape[-1], ('Depth must be divisible by 3', depth,
x.shape)
xs = [
x[:, :, :-2, :depth], x[:, :, 1:-1, depth:2 * depth],
x[:, :, 2:, 2 * depth:3 * depth]
]
return np.concatenate(xs, axis=3)
|
python
|
def DiagonalGate(x, params, **kwargs):
"""Split channels in 3 parts. Shifts 1st and 3rd sections to left/right."""
del params
del kwargs
# x : [batch, 1, length, depth]
x = np.pad(
x, [(0, 0), (0, 0), (1, 1), (0, 0)], mode='constant', constant_values=0.0)
depth = x.shape[-1] // 3
assert 3 * depth == x.shape[-1], ('Depth must be divisible by 3', depth,
x.shape)
xs = [
x[:, :, :-2, :depth], x[:, :, 1:-1, depth:2 * depth],
x[:, :, 2:, 2 * depth:3 * depth]
]
return np.concatenate(xs, axis=3)
|
[
"def",
"DiagonalGate",
"(",
"x",
",",
"params",
",",
"*",
"*",
"kwargs",
")",
":",
"del",
"params",
"del",
"kwargs",
"# x : [batch, 1, length, depth]",
"x",
"=",
"np",
".",
"pad",
"(",
"x",
",",
"[",
"(",
"0",
",",
"0",
")",
",",
"(",
"0",
",",
"0",
")",
",",
"(",
"1",
",",
"1",
")",
",",
"(",
"0",
",",
"0",
")",
"]",
",",
"mode",
"=",
"'constant'",
",",
"constant_values",
"=",
"0.0",
")",
"depth",
"=",
"x",
".",
"shape",
"[",
"-",
"1",
"]",
"//",
"3",
"assert",
"3",
"*",
"depth",
"==",
"x",
".",
"shape",
"[",
"-",
"1",
"]",
",",
"(",
"'Depth must be divisible by 3'",
",",
"depth",
",",
"x",
".",
"shape",
")",
"xs",
"=",
"[",
"x",
"[",
":",
",",
":",
",",
":",
"-",
"2",
",",
":",
"depth",
"]",
",",
"x",
"[",
":",
",",
":",
",",
"1",
":",
"-",
"1",
",",
"depth",
":",
"2",
"*",
"depth",
"]",
",",
"x",
"[",
":",
",",
":",
",",
"2",
":",
",",
"2",
"*",
"depth",
":",
"3",
"*",
"depth",
"]",
"]",
"return",
"np",
".",
"concatenate",
"(",
"xs",
",",
"axis",
"=",
"3",
")"
] |
Split channels in 3 parts. Shifts 1st and 3rd sections to left/right.
|
[
"Split",
"channels",
"in",
"3",
"parts",
".",
"Shifts",
"1st",
"and",
"3rd",
"sections",
"to",
"left",
"/",
"right",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/models/neural_gpu.py#L33-L47
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/trax/models/neural_gpu.py
|
ConvDiagonalGRU
|
def ConvDiagonalGRU(units, kernel_size=(3, 3)):
"""Build convolutional GRU with diagonal gating as in ImprovedNGPU."""
def BuildConv():
return layers.Conv(filters=units, kernel_size=kernel_size, padding='SAME')
return layers.GeneralGRUCell(
candidate_transform=BuildConv,
memory_transform=DiagonalGate,
gate_nonlinearity=layers.HardSigmoid,
candidate_nonlinearity=layers.HardTanh)
|
python
|
def ConvDiagonalGRU(units, kernel_size=(3, 3)):
"""Build convolutional GRU with diagonal gating as in ImprovedNGPU."""
def BuildConv():
return layers.Conv(filters=units, kernel_size=kernel_size, padding='SAME')
return layers.GeneralGRUCell(
candidate_transform=BuildConv,
memory_transform=DiagonalGate,
gate_nonlinearity=layers.HardSigmoid,
candidate_nonlinearity=layers.HardTanh)
|
[
"def",
"ConvDiagonalGRU",
"(",
"units",
",",
"kernel_size",
"=",
"(",
"3",
",",
"3",
")",
")",
":",
"def",
"BuildConv",
"(",
")",
":",
"return",
"layers",
".",
"Conv",
"(",
"filters",
"=",
"units",
",",
"kernel_size",
"=",
"kernel_size",
",",
"padding",
"=",
"'SAME'",
")",
"return",
"layers",
".",
"GeneralGRUCell",
"(",
"candidate_transform",
"=",
"BuildConv",
",",
"memory_transform",
"=",
"DiagonalGate",
",",
"gate_nonlinearity",
"=",
"layers",
".",
"HardSigmoid",
",",
"candidate_nonlinearity",
"=",
"layers",
".",
"HardTanh",
")"
] |
Build convolutional GRU with diagonal gating as in ImprovedNGPU.
|
[
"Build",
"convolutional",
"GRU",
"with",
"diagonal",
"gating",
"as",
"in",
"ImprovedNGPU",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/models/neural_gpu.py#L50-L60
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/trax/models/neural_gpu.py
|
NeuralGPU
|
def NeuralGPU(feature_depth=96, steps=16, vocab_size=2):
"""Implementation of Neural GPU: https://arxiv.org/abs/1702.08727.
Args:
feature_depth: Number of memory channels
steps: Number of times depthwise recurrence steps.
vocab_size: Vocabulary size.
Returns:
A NeuralGPU Stax model.
"""
xs = []
xs.append(
layers.Embedding(feature_depth=feature_depth, vocab_size=vocab_size))
core = ConvDiagonalGRU(units=feature_depth)
xs.extend([core] * steps)
xs.append(layers.Dense(vocab_size))
xs.append(layers.LogSoftmax())
return layers.Serial(*xs)
|
python
|
def NeuralGPU(feature_depth=96, steps=16, vocab_size=2):
"""Implementation of Neural GPU: https://arxiv.org/abs/1702.08727.
Args:
feature_depth: Number of memory channels
steps: Number of times depthwise recurrence steps.
vocab_size: Vocabulary size.
Returns:
A NeuralGPU Stax model.
"""
xs = []
xs.append(
layers.Embedding(feature_depth=feature_depth, vocab_size=vocab_size))
core = ConvDiagonalGRU(units=feature_depth)
xs.extend([core] * steps)
xs.append(layers.Dense(vocab_size))
xs.append(layers.LogSoftmax())
return layers.Serial(*xs)
|
[
"def",
"NeuralGPU",
"(",
"feature_depth",
"=",
"96",
",",
"steps",
"=",
"16",
",",
"vocab_size",
"=",
"2",
")",
":",
"xs",
"=",
"[",
"]",
"xs",
".",
"append",
"(",
"layers",
".",
"Embedding",
"(",
"feature_depth",
"=",
"feature_depth",
",",
"vocab_size",
"=",
"vocab_size",
")",
")",
"core",
"=",
"ConvDiagonalGRU",
"(",
"units",
"=",
"feature_depth",
")",
"xs",
".",
"extend",
"(",
"[",
"core",
"]",
"*",
"steps",
")",
"xs",
".",
"append",
"(",
"layers",
".",
"Dense",
"(",
"vocab_size",
")",
")",
"xs",
".",
"append",
"(",
"layers",
".",
"LogSoftmax",
"(",
")",
")",
"return",
"layers",
".",
"Serial",
"(",
"*",
"xs",
")"
] |
Implementation of Neural GPU: https://arxiv.org/abs/1702.08727.
Args:
feature_depth: Number of memory channels
steps: Number of times depthwise recurrence steps.
vocab_size: Vocabulary size.
Returns:
A NeuralGPU Stax model.
|
[
"Implementation",
"of",
"Neural",
"GPU",
":",
"https",
":",
"//",
"arxiv",
".",
"org",
"/",
"abs",
"/",
"1702",
".",
"08727",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/models/neural_gpu.py#L63-L82
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/text_encoder.py
|
strip_ids
|
def strip_ids(ids, ids_to_strip):
"""Strip ids_to_strip from the end ids."""
ids = list(ids)
while ids and ids[-1] in ids_to_strip:
ids.pop()
return ids
|
python
|
def strip_ids(ids, ids_to_strip):
"""Strip ids_to_strip from the end ids."""
ids = list(ids)
while ids and ids[-1] in ids_to_strip:
ids.pop()
return ids
|
[
"def",
"strip_ids",
"(",
"ids",
",",
"ids_to_strip",
")",
":",
"ids",
"=",
"list",
"(",
"ids",
")",
"while",
"ids",
"and",
"ids",
"[",
"-",
"1",
"]",
"in",
"ids_to_strip",
":",
"ids",
".",
"pop",
"(",
")",
"return",
"ids"
] |
Strip ids_to_strip from the end ids.
|
[
"Strip",
"ids_to_strip",
"from",
"the",
"end",
"ids",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/text_encoder.py#L99-L104
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/text_encoder.py
|
_escape_token
|
def _escape_token(token, alphabet):
"""Escape away underscores and OOV characters and append '_'.
This allows the token to be expressed as the concatenation of a list
of subtokens from the vocabulary. The underscore acts as a sentinel
which allows us to invertibly concatenate multiple such lists.
Args:
token: A unicode string to be escaped.
alphabet: A set of all characters in the vocabulary's alphabet.
Returns:
escaped_token: An escaped unicode string.
Raises:
ValueError: If the provided token is not unicode.
"""
if not isinstance(token, six.text_type):
raise ValueError("Expected string type for token, got %s" % type(token))
token = token.replace(u"\\", u"\\\\").replace(u"_", u"\\u")
ret = [c if c in alphabet and c != u"\n" else r"\%d;" % ord(c) for c in token]
return u"".join(ret) + "_"
|
python
|
def _escape_token(token, alphabet):
"""Escape away underscores and OOV characters and append '_'.
This allows the token to be expressed as the concatenation of a list
of subtokens from the vocabulary. The underscore acts as a sentinel
which allows us to invertibly concatenate multiple such lists.
Args:
token: A unicode string to be escaped.
alphabet: A set of all characters in the vocabulary's alphabet.
Returns:
escaped_token: An escaped unicode string.
Raises:
ValueError: If the provided token is not unicode.
"""
if not isinstance(token, six.text_type):
raise ValueError("Expected string type for token, got %s" % type(token))
token = token.replace(u"\\", u"\\\\").replace(u"_", u"\\u")
ret = [c if c in alphabet and c != u"\n" else r"\%d;" % ord(c) for c in token]
return u"".join(ret) + "_"
|
[
"def",
"_escape_token",
"(",
"token",
",",
"alphabet",
")",
":",
"if",
"not",
"isinstance",
"(",
"token",
",",
"six",
".",
"text_type",
")",
":",
"raise",
"ValueError",
"(",
"\"Expected string type for token, got %s\"",
"%",
"type",
"(",
"token",
")",
")",
"token",
"=",
"token",
".",
"replace",
"(",
"u\"\\\\\"",
",",
"u\"\\\\\\\\\"",
")",
".",
"replace",
"(",
"u\"_\"",
",",
"u\"\\\\u\"",
")",
"ret",
"=",
"[",
"c",
"if",
"c",
"in",
"alphabet",
"and",
"c",
"!=",
"u\"\\n\"",
"else",
"r\"\\%d;\"",
"%",
"ord",
"(",
"c",
")",
"for",
"c",
"in",
"token",
"]",
"return",
"u\"\"",
".",
"join",
"(",
"ret",
")",
"+",
"\"_\""
] |
Escape away underscores and OOV characters and append '_'.
This allows the token to be expressed as the concatenation of a list
of subtokens from the vocabulary. The underscore acts as a sentinel
which allows us to invertibly concatenate multiple such lists.
Args:
token: A unicode string to be escaped.
alphabet: A set of all characters in the vocabulary's alphabet.
Returns:
escaped_token: An escaped unicode string.
Raises:
ValueError: If the provided token is not unicode.
|
[
"Escape",
"away",
"underscores",
"and",
"OOV",
"characters",
"and",
"append",
"_",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/text_encoder.py#L400-L422
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/text_encoder.py
|
TextEncoder.encode
|
def encode(self, s):
"""Transform a human-readable string into a sequence of int ids.
The ids should be in the range [num_reserved_ids, vocab_size). Ids [0,
num_reserved_ids) are reserved.
EOS is not appended.
Args:
s: human-readable string to be converted.
Returns:
ids: list of integers
"""
return [int(w) + self._num_reserved_ids for w in s.split()]
|
python
|
def encode(self, s):
"""Transform a human-readable string into a sequence of int ids.
The ids should be in the range [num_reserved_ids, vocab_size). Ids [0,
num_reserved_ids) are reserved.
EOS is not appended.
Args:
s: human-readable string to be converted.
Returns:
ids: list of integers
"""
return [int(w) + self._num_reserved_ids for w in s.split()]
|
[
"def",
"encode",
"(",
"self",
",",
"s",
")",
":",
"return",
"[",
"int",
"(",
"w",
")",
"+",
"self",
".",
"_num_reserved_ids",
"for",
"w",
"in",
"s",
".",
"split",
"(",
")",
"]"
] |
Transform a human-readable string into a sequence of int ids.
The ids should be in the range [num_reserved_ids, vocab_size). Ids [0,
num_reserved_ids) are reserved.
EOS is not appended.
Args:
s: human-readable string to be converted.
Returns:
ids: list of integers
|
[
"Transform",
"a",
"human",
"-",
"readable",
"string",
"into",
"a",
"sequence",
"of",
"int",
"ids",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/text_encoder.py#L117-L131
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/text_encoder.py
|
TextEncoder.decode
|
def decode(self, ids, strip_extraneous=False):
"""Transform a sequence of int ids into a human-readable string.
EOS is not expected in ids.
Args:
ids: list of integers to be converted.
strip_extraneous: bool, whether to strip off extraneous tokens
(EOS and PAD).
Returns:
s: human-readable string.
"""
if strip_extraneous:
ids = strip_ids(ids, list(range(self._num_reserved_ids or 0)))
return " ".join(self.decode_list(ids))
|
python
|
def decode(self, ids, strip_extraneous=False):
"""Transform a sequence of int ids into a human-readable string.
EOS is not expected in ids.
Args:
ids: list of integers to be converted.
strip_extraneous: bool, whether to strip off extraneous tokens
(EOS and PAD).
Returns:
s: human-readable string.
"""
if strip_extraneous:
ids = strip_ids(ids, list(range(self._num_reserved_ids or 0)))
return " ".join(self.decode_list(ids))
|
[
"def",
"decode",
"(",
"self",
",",
"ids",
",",
"strip_extraneous",
"=",
"False",
")",
":",
"if",
"strip_extraneous",
":",
"ids",
"=",
"strip_ids",
"(",
"ids",
",",
"list",
"(",
"range",
"(",
"self",
".",
"_num_reserved_ids",
"or",
"0",
")",
")",
")",
"return",
"\" \"",
".",
"join",
"(",
"self",
".",
"decode_list",
"(",
"ids",
")",
")"
] |
Transform a sequence of int ids into a human-readable string.
EOS is not expected in ids.
Args:
ids: list of integers to be converted.
strip_extraneous: bool, whether to strip off extraneous tokens
(EOS and PAD).
Returns:
s: human-readable string.
|
[
"Transform",
"a",
"sequence",
"of",
"int",
"ids",
"into",
"a",
"human",
"-",
"readable",
"string",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/text_encoder.py#L133-L148
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/text_encoder.py
|
TextEncoder.decode_list
|
def decode_list(self, ids):
"""Transform a sequence of int ids into a their string versions.
This method supports transforming individual input/output ids to their
string versions so that sequence to/from text conversions can be visualized
in a human readable format.
Args:
ids: list of integers to be converted.
Returns:
strs: list of human-readable string.
"""
decoded_ids = []
for id_ in ids:
if 0 <= id_ < self._num_reserved_ids:
decoded_ids.append(RESERVED_TOKENS[int(id_)])
else:
decoded_ids.append(id_ - self._num_reserved_ids)
return [str(d) for d in decoded_ids]
|
python
|
def decode_list(self, ids):
"""Transform a sequence of int ids into a their string versions.
This method supports transforming individual input/output ids to their
string versions so that sequence to/from text conversions can be visualized
in a human readable format.
Args:
ids: list of integers to be converted.
Returns:
strs: list of human-readable string.
"""
decoded_ids = []
for id_ in ids:
if 0 <= id_ < self._num_reserved_ids:
decoded_ids.append(RESERVED_TOKENS[int(id_)])
else:
decoded_ids.append(id_ - self._num_reserved_ids)
return [str(d) for d in decoded_ids]
|
[
"def",
"decode_list",
"(",
"self",
",",
"ids",
")",
":",
"decoded_ids",
"=",
"[",
"]",
"for",
"id_",
"in",
"ids",
":",
"if",
"0",
"<=",
"id_",
"<",
"self",
".",
"_num_reserved_ids",
":",
"decoded_ids",
".",
"append",
"(",
"RESERVED_TOKENS",
"[",
"int",
"(",
"id_",
")",
"]",
")",
"else",
":",
"decoded_ids",
".",
"append",
"(",
"id_",
"-",
"self",
".",
"_num_reserved_ids",
")",
"return",
"[",
"str",
"(",
"d",
")",
"for",
"d",
"in",
"decoded_ids",
"]"
] |
Transform a sequence of int ids into a their string versions.
This method supports transforming individual input/output ids to their
string versions so that sequence to/from text conversions can be visualized
in a human readable format.
Args:
ids: list of integers to be converted.
Returns:
strs: list of human-readable string.
|
[
"Transform",
"a",
"sequence",
"of",
"int",
"ids",
"into",
"a",
"their",
"string",
"versions",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/text_encoder.py#L150-L169
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/text_encoder.py
|
TokenTextEncoder.encode
|
def encode(self, s):
"""Converts a space-separated string of tokens to a list of ids."""
sentence = s
tokens = sentence.strip().split()
if self._replace_oov is not None:
tokens = [t if t in self._token_to_id else self._replace_oov
for t in tokens]
ret = [self._token_to_id[tok] for tok in tokens]
return ret[::-1] if self._reverse else ret
|
python
|
def encode(self, s):
"""Converts a space-separated string of tokens to a list of ids."""
sentence = s
tokens = sentence.strip().split()
if self._replace_oov is not None:
tokens = [t if t in self._token_to_id else self._replace_oov
for t in tokens]
ret = [self._token_to_id[tok] for tok in tokens]
return ret[::-1] if self._reverse else ret
|
[
"def",
"encode",
"(",
"self",
",",
"s",
")",
":",
"sentence",
"=",
"s",
"tokens",
"=",
"sentence",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"if",
"self",
".",
"_replace_oov",
"is",
"not",
"None",
":",
"tokens",
"=",
"[",
"t",
"if",
"t",
"in",
"self",
".",
"_token_to_id",
"else",
"self",
".",
"_replace_oov",
"for",
"t",
"in",
"tokens",
"]",
"ret",
"=",
"[",
"self",
".",
"_token_to_id",
"[",
"tok",
"]",
"for",
"tok",
"in",
"tokens",
"]",
"return",
"ret",
"[",
":",
":",
"-",
"1",
"]",
"if",
"self",
".",
"_reverse",
"else",
"ret"
] |
Converts a space-separated string of tokens to a list of ids.
|
[
"Converts",
"a",
"space",
"-",
"separated",
"string",
"of",
"tokens",
"to",
"a",
"list",
"of",
"ids",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/text_encoder.py#L314-L322
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/text_encoder.py
|
TokenTextEncoder._init_vocab_from_file
|
def _init_vocab_from_file(self, filename):
"""Load vocab from a file.
Args:
filename: The file to load vocabulary from.
"""
with tf.gfile.Open(filename) as f:
tokens = [token.strip() for token in f.readlines()]
def token_gen():
for token in tokens:
yield token
self._init_vocab(token_gen(), add_reserved_tokens=False)
|
python
|
def _init_vocab_from_file(self, filename):
"""Load vocab from a file.
Args:
filename: The file to load vocabulary from.
"""
with tf.gfile.Open(filename) as f:
tokens = [token.strip() for token in f.readlines()]
def token_gen():
for token in tokens:
yield token
self._init_vocab(token_gen(), add_reserved_tokens=False)
|
[
"def",
"_init_vocab_from_file",
"(",
"self",
",",
"filename",
")",
":",
"with",
"tf",
".",
"gfile",
".",
"Open",
"(",
"filename",
")",
"as",
"f",
":",
"tokens",
"=",
"[",
"token",
".",
"strip",
"(",
")",
"for",
"token",
"in",
"f",
".",
"readlines",
"(",
")",
"]",
"def",
"token_gen",
"(",
")",
":",
"for",
"token",
"in",
"tokens",
":",
"yield",
"token",
"self",
".",
"_init_vocab",
"(",
"token_gen",
"(",
")",
",",
"add_reserved_tokens",
"=",
"False",
")"
] |
Load vocab from a file.
Args:
filename: The file to load vocabulary from.
|
[
"Load",
"vocab",
"from",
"a",
"file",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/text_encoder.py#L338-L351
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/text_encoder.py
|
TokenTextEncoder._init_vocab_from_list
|
def _init_vocab_from_list(self, vocab_list):
"""Initialize tokens from a list of tokens.
It is ok if reserved tokens appear in the vocab list. They will be
removed. The set of tokens in vocab_list should be unique.
Args:
vocab_list: A list of tokens.
"""
def token_gen():
for token in vocab_list:
if token not in RESERVED_TOKENS:
yield token
self._init_vocab(token_gen())
|
python
|
def _init_vocab_from_list(self, vocab_list):
"""Initialize tokens from a list of tokens.
It is ok if reserved tokens appear in the vocab list. They will be
removed. The set of tokens in vocab_list should be unique.
Args:
vocab_list: A list of tokens.
"""
def token_gen():
for token in vocab_list:
if token not in RESERVED_TOKENS:
yield token
self._init_vocab(token_gen())
|
[
"def",
"_init_vocab_from_list",
"(",
"self",
",",
"vocab_list",
")",
":",
"def",
"token_gen",
"(",
")",
":",
"for",
"token",
"in",
"vocab_list",
":",
"if",
"token",
"not",
"in",
"RESERVED_TOKENS",
":",
"yield",
"token",
"self",
".",
"_init_vocab",
"(",
"token_gen",
"(",
")",
")"
] |
Initialize tokens from a list of tokens.
It is ok if reserved tokens appear in the vocab list. They will be
removed. The set of tokens in vocab_list should be unique.
Args:
vocab_list: A list of tokens.
|
[
"Initialize",
"tokens",
"from",
"a",
"list",
"of",
"tokens",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/text_encoder.py#L353-L367
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/text_encoder.py
|
TokenTextEncoder._init_vocab
|
def _init_vocab(self, token_generator, add_reserved_tokens=True):
"""Initialize vocabulary with tokens from token_generator."""
self._id_to_token = {}
non_reserved_start_index = 0
if add_reserved_tokens:
self._id_to_token.update(enumerate(RESERVED_TOKENS))
non_reserved_start_index = len(RESERVED_TOKENS)
self._id_to_token.update(
enumerate(token_generator, start=non_reserved_start_index))
# _token_to_id is the reverse of _id_to_token
self._token_to_id = dict((v, k)
for k, v in six.iteritems(self._id_to_token))
|
python
|
def _init_vocab(self, token_generator, add_reserved_tokens=True):
"""Initialize vocabulary with tokens from token_generator."""
self._id_to_token = {}
non_reserved_start_index = 0
if add_reserved_tokens:
self._id_to_token.update(enumerate(RESERVED_TOKENS))
non_reserved_start_index = len(RESERVED_TOKENS)
self._id_to_token.update(
enumerate(token_generator, start=non_reserved_start_index))
# _token_to_id is the reverse of _id_to_token
self._token_to_id = dict((v, k)
for k, v in six.iteritems(self._id_to_token))
|
[
"def",
"_init_vocab",
"(",
"self",
",",
"token_generator",
",",
"add_reserved_tokens",
"=",
"True",
")",
":",
"self",
".",
"_id_to_token",
"=",
"{",
"}",
"non_reserved_start_index",
"=",
"0",
"if",
"add_reserved_tokens",
":",
"self",
".",
"_id_to_token",
".",
"update",
"(",
"enumerate",
"(",
"RESERVED_TOKENS",
")",
")",
"non_reserved_start_index",
"=",
"len",
"(",
"RESERVED_TOKENS",
")",
"self",
".",
"_id_to_token",
".",
"update",
"(",
"enumerate",
"(",
"token_generator",
",",
"start",
"=",
"non_reserved_start_index",
")",
")",
"# _token_to_id is the reverse of _id_to_token",
"self",
".",
"_token_to_id",
"=",
"dict",
"(",
"(",
"v",
",",
"k",
")",
"for",
"k",
",",
"v",
"in",
"six",
".",
"iteritems",
"(",
"self",
".",
"_id_to_token",
")",
")"
] |
Initialize vocabulary with tokens from token_generator.
|
[
"Initialize",
"vocabulary",
"with",
"tokens",
"from",
"token_generator",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/text_encoder.py#L369-L384
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/text_encoder.py
|
TokenTextEncoder.store_to_file
|
def store_to_file(self, filename):
"""Write vocab file to disk.
Vocab files have one token per line. The file ends in a newline. Reserved
tokens are written to the vocab file as well.
Args:
filename: Full path of the file to store the vocab to.
"""
with tf.gfile.Open(filename, "w") as f:
for i in range(len(self._id_to_token)):
f.write(self._id_to_token[i] + "\n")
|
python
|
def store_to_file(self, filename):
"""Write vocab file to disk.
Vocab files have one token per line. The file ends in a newline. Reserved
tokens are written to the vocab file as well.
Args:
filename: Full path of the file to store the vocab to.
"""
with tf.gfile.Open(filename, "w") as f:
for i in range(len(self._id_to_token)):
f.write(self._id_to_token[i] + "\n")
|
[
"def",
"store_to_file",
"(",
"self",
",",
"filename",
")",
":",
"with",
"tf",
".",
"gfile",
".",
"Open",
"(",
"filename",
",",
"\"w\"",
")",
"as",
"f",
":",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"_id_to_token",
")",
")",
":",
"f",
".",
"write",
"(",
"self",
".",
"_id_to_token",
"[",
"i",
"]",
"+",
"\"\\n\"",
")"
] |
Write vocab file to disk.
Vocab files have one token per line. The file ends in a newline. Reserved
tokens are written to the vocab file as well.
Args:
filename: Full path of the file to store the vocab to.
|
[
"Write",
"vocab",
"file",
"to",
"disk",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/text_encoder.py#L386-L397
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/text_encoder.py
|
SubwordTextEncoder.decode
|
def decode(self, ids, strip_extraneous=False):
"""Converts a sequence of subtoken ids to a native string.
Args:
ids: a list of integers in the range [0, vocab_size)
strip_extraneous: bool, whether to strip off extraneous tokens
(EOS and PAD).
Returns:
a native string
"""
if strip_extraneous:
ids = strip_ids(ids, list(range(self._num_reserved_ids or 0)))
return unicode_to_native(
tokenizer.decode(self._subtoken_ids_to_tokens(ids)))
|
python
|
def decode(self, ids, strip_extraneous=False):
"""Converts a sequence of subtoken ids to a native string.
Args:
ids: a list of integers in the range [0, vocab_size)
strip_extraneous: bool, whether to strip off extraneous tokens
(EOS and PAD).
Returns:
a native string
"""
if strip_extraneous:
ids = strip_ids(ids, list(range(self._num_reserved_ids or 0)))
return unicode_to_native(
tokenizer.decode(self._subtoken_ids_to_tokens(ids)))
|
[
"def",
"decode",
"(",
"self",
",",
"ids",
",",
"strip_extraneous",
"=",
"False",
")",
":",
"if",
"strip_extraneous",
":",
"ids",
"=",
"strip_ids",
"(",
"ids",
",",
"list",
"(",
"range",
"(",
"self",
".",
"_num_reserved_ids",
"or",
"0",
")",
")",
")",
"return",
"unicode_to_native",
"(",
"tokenizer",
".",
"decode",
"(",
"self",
".",
"_subtoken_ids_to_tokens",
"(",
"ids",
")",
")",
")"
] |
Converts a sequence of subtoken ids to a native string.
Args:
ids: a list of integers in the range [0, vocab_size)
strip_extraneous: bool, whether to strip off extraneous tokens
(EOS and PAD).
Returns:
a native string
|
[
"Converts",
"a",
"sequence",
"of",
"subtoken",
"ids",
"to",
"a",
"native",
"string",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/text_encoder.py#L522-L536
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/text_encoder.py
|
SubwordTextEncoder._tokens_to_subtoken_ids
|
def _tokens_to_subtoken_ids(self, tokens):
"""Converts a list of tokens to a list of subtoken ids.
Args:
tokens: a list of strings.
Returns:
a list of integers in the range [0, vocab_size)
"""
ret = []
for token in tokens:
ret.extend(self._token_to_subtoken_ids(token))
return ret
|
python
|
def _tokens_to_subtoken_ids(self, tokens):
"""Converts a list of tokens to a list of subtoken ids.
Args:
tokens: a list of strings.
Returns:
a list of integers in the range [0, vocab_size)
"""
ret = []
for token in tokens:
ret.extend(self._token_to_subtoken_ids(token))
return ret
|
[
"def",
"_tokens_to_subtoken_ids",
"(",
"self",
",",
"tokens",
")",
":",
"ret",
"=",
"[",
"]",
"for",
"token",
"in",
"tokens",
":",
"ret",
".",
"extend",
"(",
"self",
".",
"_token_to_subtoken_ids",
"(",
"token",
")",
")",
"return",
"ret"
] |
Converts a list of tokens to a list of subtoken ids.
Args:
tokens: a list of strings.
Returns:
a list of integers in the range [0, vocab_size)
|
[
"Converts",
"a",
"list",
"of",
"tokens",
"to",
"a",
"list",
"of",
"subtoken",
"ids",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/text_encoder.py#L546-L557
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/text_encoder.py
|
SubwordTextEncoder._token_to_subtoken_ids
|
def _token_to_subtoken_ids(self, token):
"""Converts token to a list of subtoken ids.
Args:
token: a string.
Returns:
a list of integers in the range [0, vocab_size)
"""
cache_location = hash(token) % self._cache_size
cache_key, cache_value = self._cache[cache_location]
if cache_key == token:
return cache_value
ret = self._escaped_token_to_subtoken_ids(
_escape_token(token, self._alphabet))
self._cache[cache_location] = (token, ret)
return ret
|
python
|
def _token_to_subtoken_ids(self, token):
"""Converts token to a list of subtoken ids.
Args:
token: a string.
Returns:
a list of integers in the range [0, vocab_size)
"""
cache_location = hash(token) % self._cache_size
cache_key, cache_value = self._cache[cache_location]
if cache_key == token:
return cache_value
ret = self._escaped_token_to_subtoken_ids(
_escape_token(token, self._alphabet))
self._cache[cache_location] = (token, ret)
return ret
|
[
"def",
"_token_to_subtoken_ids",
"(",
"self",
",",
"token",
")",
":",
"cache_location",
"=",
"hash",
"(",
"token",
")",
"%",
"self",
".",
"_cache_size",
"cache_key",
",",
"cache_value",
"=",
"self",
".",
"_cache",
"[",
"cache_location",
"]",
"if",
"cache_key",
"==",
"token",
":",
"return",
"cache_value",
"ret",
"=",
"self",
".",
"_escaped_token_to_subtoken_ids",
"(",
"_escape_token",
"(",
"token",
",",
"self",
".",
"_alphabet",
")",
")",
"self",
".",
"_cache",
"[",
"cache_location",
"]",
"=",
"(",
"token",
",",
"ret",
")",
"return",
"ret"
] |
Converts token to a list of subtoken ids.
Args:
token: a string.
Returns:
a list of integers in the range [0, vocab_size)
|
[
"Converts",
"token",
"to",
"a",
"list",
"of",
"subtoken",
"ids",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/text_encoder.py#L559-L574
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/text_encoder.py
|
SubwordTextEncoder._subtoken_ids_to_tokens
|
def _subtoken_ids_to_tokens(self, subtokens):
"""Converts a list of subtoken ids to a list of tokens.
Args:
subtokens: a list of integers in the range [0, vocab_size)
Returns:
a list of strings.
"""
concatenated = "".join(
[self._subtoken_id_to_subtoken_string(s) for s in subtokens])
split = concatenated.split("_")
ret = []
for t in split:
if t:
unescaped = _unescape_token(t + "_")
if unescaped:
ret.append(unescaped)
return ret
|
python
|
def _subtoken_ids_to_tokens(self, subtokens):
"""Converts a list of subtoken ids to a list of tokens.
Args:
subtokens: a list of integers in the range [0, vocab_size)
Returns:
a list of strings.
"""
concatenated = "".join(
[self._subtoken_id_to_subtoken_string(s) for s in subtokens])
split = concatenated.split("_")
ret = []
for t in split:
if t:
unescaped = _unescape_token(t + "_")
if unescaped:
ret.append(unescaped)
return ret
|
[
"def",
"_subtoken_ids_to_tokens",
"(",
"self",
",",
"subtokens",
")",
":",
"concatenated",
"=",
"\"\"",
".",
"join",
"(",
"[",
"self",
".",
"_subtoken_id_to_subtoken_string",
"(",
"s",
")",
"for",
"s",
"in",
"subtokens",
"]",
")",
"split",
"=",
"concatenated",
".",
"split",
"(",
"\"_\"",
")",
"ret",
"=",
"[",
"]",
"for",
"t",
"in",
"split",
":",
"if",
"t",
":",
"unescaped",
"=",
"_unescape_token",
"(",
"t",
"+",
"\"_\"",
")",
"if",
"unescaped",
":",
"ret",
".",
"append",
"(",
"unescaped",
")",
"return",
"ret"
] |
Converts a list of subtoken ids to a list of tokens.
Args:
subtokens: a list of integers in the range [0, vocab_size)
Returns:
a list of strings.
|
[
"Converts",
"a",
"list",
"of",
"subtoken",
"ids",
"to",
"a",
"list",
"of",
"tokens",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/text_encoder.py#L576-L593
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/text_encoder.py
|
SubwordTextEncoder._subtoken_id_to_subtoken_string
|
def _subtoken_id_to_subtoken_string(self, subtoken):
"""Converts a subtoken integer ID to a subtoken string."""
if 0 <= subtoken < self.vocab_size:
return self._all_subtoken_strings[subtoken]
return u""
|
python
|
def _subtoken_id_to_subtoken_string(self, subtoken):
"""Converts a subtoken integer ID to a subtoken string."""
if 0 <= subtoken < self.vocab_size:
return self._all_subtoken_strings[subtoken]
return u""
|
[
"def",
"_subtoken_id_to_subtoken_string",
"(",
"self",
",",
"subtoken",
")",
":",
"if",
"0",
"<=",
"subtoken",
"<",
"self",
".",
"vocab_size",
":",
"return",
"self",
".",
"_all_subtoken_strings",
"[",
"subtoken",
"]",
"return",
"u\"\""
] |
Converts a subtoken integer ID to a subtoken string.
|
[
"Converts",
"a",
"subtoken",
"integer",
"ID",
"to",
"a",
"subtoken",
"string",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/text_encoder.py#L595-L599
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/text_encoder.py
|
SubwordTextEncoder._escaped_token_to_subtoken_strings
|
def _escaped_token_to_subtoken_strings(self, escaped_token):
"""Converts an escaped token string to a list of subtoken strings.
Args:
escaped_token: An escaped token as a unicode string.
Returns:
A list of subtokens as unicode strings.
"""
# NOTE: This algorithm is greedy; it won't necessarily produce the "best"
# list of subtokens.
ret = []
start = 0
token_len = len(escaped_token)
while start < token_len:
for end in range(
min(token_len, start + self._max_subtoken_len), start, -1):
subtoken = escaped_token[start:end]
if subtoken in self._subtoken_string_to_id:
ret.append(subtoken)
start = end
break
else: # Did not break
# If there is no possible encoding of the escaped token then one of the
# characters in the token is not in the alphabet. This should be
# impossible and would be indicative of a bug.
assert False, "Token substring not found in subtoken vocabulary."
return ret
|
python
|
def _escaped_token_to_subtoken_strings(self, escaped_token):
"""Converts an escaped token string to a list of subtoken strings.
Args:
escaped_token: An escaped token as a unicode string.
Returns:
A list of subtokens as unicode strings.
"""
# NOTE: This algorithm is greedy; it won't necessarily produce the "best"
# list of subtokens.
ret = []
start = 0
token_len = len(escaped_token)
while start < token_len:
for end in range(
min(token_len, start + self._max_subtoken_len), start, -1):
subtoken = escaped_token[start:end]
if subtoken in self._subtoken_string_to_id:
ret.append(subtoken)
start = end
break
else: # Did not break
# If there is no possible encoding of the escaped token then one of the
# characters in the token is not in the alphabet. This should be
# impossible and would be indicative of a bug.
assert False, "Token substring not found in subtoken vocabulary."
return ret
|
[
"def",
"_escaped_token_to_subtoken_strings",
"(",
"self",
",",
"escaped_token",
")",
":",
"# NOTE: This algorithm is greedy; it won't necessarily produce the \"best\"",
"# list of subtokens.",
"ret",
"=",
"[",
"]",
"start",
"=",
"0",
"token_len",
"=",
"len",
"(",
"escaped_token",
")",
"while",
"start",
"<",
"token_len",
":",
"for",
"end",
"in",
"range",
"(",
"min",
"(",
"token_len",
",",
"start",
"+",
"self",
".",
"_max_subtoken_len",
")",
",",
"start",
",",
"-",
"1",
")",
":",
"subtoken",
"=",
"escaped_token",
"[",
"start",
":",
"end",
"]",
"if",
"subtoken",
"in",
"self",
".",
"_subtoken_string_to_id",
":",
"ret",
".",
"append",
"(",
"subtoken",
")",
"start",
"=",
"end",
"break",
"else",
":",
"# Did not break",
"# If there is no possible encoding of the escaped token then one of the",
"# characters in the token is not in the alphabet. This should be",
"# impossible and would be indicative of a bug.",
"assert",
"False",
",",
"\"Token substring not found in subtoken vocabulary.\"",
"return",
"ret"
] |
Converts an escaped token string to a list of subtoken strings.
Args:
escaped_token: An escaped token as a unicode string.
Returns:
A list of subtokens as unicode strings.
|
[
"Converts",
"an",
"escaped",
"token",
"string",
"to",
"a",
"list",
"of",
"subtoken",
"strings",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/text_encoder.py#L601-L629
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/text_encoder.py
|
SubwordTextEncoder._escaped_token_to_subtoken_ids
|
def _escaped_token_to_subtoken_ids(self, escaped_token):
"""Converts an escaped token string to a list of subtoken IDs.
Args:
escaped_token: An escaped token as a unicode string.
Returns:
A list of subtoken IDs as integers.
"""
return [
self._subtoken_string_to_id[subtoken]
for subtoken in self._escaped_token_to_subtoken_strings(escaped_token)
]
|
python
|
def _escaped_token_to_subtoken_ids(self, escaped_token):
"""Converts an escaped token string to a list of subtoken IDs.
Args:
escaped_token: An escaped token as a unicode string.
Returns:
A list of subtoken IDs as integers.
"""
return [
self._subtoken_string_to_id[subtoken]
for subtoken in self._escaped_token_to_subtoken_strings(escaped_token)
]
|
[
"def",
"_escaped_token_to_subtoken_ids",
"(",
"self",
",",
"escaped_token",
")",
":",
"return",
"[",
"self",
".",
"_subtoken_string_to_id",
"[",
"subtoken",
"]",
"for",
"subtoken",
"in",
"self",
".",
"_escaped_token_to_subtoken_strings",
"(",
"escaped_token",
")",
"]"
] |
Converts an escaped token string to a list of subtoken IDs.
Args:
escaped_token: An escaped token as a unicode string.
Returns:
A list of subtoken IDs as integers.
|
[
"Converts",
"an",
"escaped",
"token",
"string",
"to",
"a",
"list",
"of",
"subtoken",
"IDs",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/text_encoder.py#L631-L642
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/text_encoder.py
|
SubwordTextEncoder.build_from_generator
|
def build_from_generator(cls,
generator,
target_size,
max_subtoken_length=None,
reserved_tokens=None):
"""Builds a SubwordTextEncoder from the generated text.
Args:
generator: yields text.
target_size: int, approximate vocabulary size to create.
max_subtoken_length: Maximum length of a subtoken. If this is not set,
then the runtime and memory use of creating the vocab is quadratic in
the length of the longest token. If this is set, then it is instead
O(max_subtoken_length * length of longest token).
reserved_tokens: List of reserved tokens. The global variable
`RESERVED_TOKENS` must be a prefix of `reserved_tokens`. If this
argument is `None`, it will use `RESERVED_TOKENS`.
Returns:
SubwordTextEncoder with `vocab_size` approximately `target_size`.
"""
token_counts = collections.defaultdict(int)
for item in generator:
for tok in tokenizer.encode(native_to_unicode(item)):
token_counts[tok] += 1
encoder = cls.build_to_target_size(
target_size, token_counts, 1, 1e3,
max_subtoken_length=max_subtoken_length,
reserved_tokens=reserved_tokens)
return encoder
|
python
|
def build_from_generator(cls,
generator,
target_size,
max_subtoken_length=None,
reserved_tokens=None):
"""Builds a SubwordTextEncoder from the generated text.
Args:
generator: yields text.
target_size: int, approximate vocabulary size to create.
max_subtoken_length: Maximum length of a subtoken. If this is not set,
then the runtime and memory use of creating the vocab is quadratic in
the length of the longest token. If this is set, then it is instead
O(max_subtoken_length * length of longest token).
reserved_tokens: List of reserved tokens. The global variable
`RESERVED_TOKENS` must be a prefix of `reserved_tokens`. If this
argument is `None`, it will use `RESERVED_TOKENS`.
Returns:
SubwordTextEncoder with `vocab_size` approximately `target_size`.
"""
token_counts = collections.defaultdict(int)
for item in generator:
for tok in tokenizer.encode(native_to_unicode(item)):
token_counts[tok] += 1
encoder = cls.build_to_target_size(
target_size, token_counts, 1, 1e3,
max_subtoken_length=max_subtoken_length,
reserved_tokens=reserved_tokens)
return encoder
|
[
"def",
"build_from_generator",
"(",
"cls",
",",
"generator",
",",
"target_size",
",",
"max_subtoken_length",
"=",
"None",
",",
"reserved_tokens",
"=",
"None",
")",
":",
"token_counts",
"=",
"collections",
".",
"defaultdict",
"(",
"int",
")",
"for",
"item",
"in",
"generator",
":",
"for",
"tok",
"in",
"tokenizer",
".",
"encode",
"(",
"native_to_unicode",
"(",
"item",
")",
")",
":",
"token_counts",
"[",
"tok",
"]",
"+=",
"1",
"encoder",
"=",
"cls",
".",
"build_to_target_size",
"(",
"target_size",
",",
"token_counts",
",",
"1",
",",
"1e3",
",",
"max_subtoken_length",
"=",
"max_subtoken_length",
",",
"reserved_tokens",
"=",
"reserved_tokens",
")",
"return",
"encoder"
] |
Builds a SubwordTextEncoder from the generated text.
Args:
generator: yields text.
target_size: int, approximate vocabulary size to create.
max_subtoken_length: Maximum length of a subtoken. If this is not set,
then the runtime and memory use of creating the vocab is quadratic in
the length of the longest token. If this is set, then it is instead
O(max_subtoken_length * length of longest token).
reserved_tokens: List of reserved tokens. The global variable
`RESERVED_TOKENS` must be a prefix of `reserved_tokens`. If this
argument is `None`, it will use `RESERVED_TOKENS`.
Returns:
SubwordTextEncoder with `vocab_size` approximately `target_size`.
|
[
"Builds",
"a",
"SubwordTextEncoder",
"from",
"the",
"generated",
"text",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/text_encoder.py#L645-L674
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/text_encoder.py
|
SubwordTextEncoder.build_to_target_size
|
def build_to_target_size(cls,
target_size,
token_counts,
min_val,
max_val,
max_subtoken_length=None,
reserved_tokens=None,
num_iterations=4):
"""Builds a SubwordTextEncoder that has `vocab_size` near `target_size`.
Uses simple recursive binary search to find a minimum token count that most
closely matches the `target_size`.
Args:
target_size: Desired vocab_size to approximate.
token_counts: A dictionary of token counts, mapping string to int.
min_val: An integer; lower bound for the minimum token count.
max_val: An integer; upper bound for the minimum token count.
max_subtoken_length: Maximum length of a subtoken. If this is not set,
then the runtime and memory use of creating the vocab is quadratic in
the length of the longest token. If this is set, then it is instead
O(max_subtoken_length * length of longest token).
reserved_tokens: List of reserved tokens. The global variable
`RESERVED_TOKENS` must be a prefix of `reserved_tokens`. If this
argument is `None`, it will use `RESERVED_TOKENS`.
num_iterations: An integer; how many iterations of refinement.
Returns:
A SubwordTextEncoder instance.
Raises:
ValueError: If `min_val` is greater than `max_val`.
"""
if min_val > max_val:
raise ValueError("Lower bound for the minimum token count "
"is greater than the upper bound.")
if target_size < 1:
raise ValueError("Target size must be positive.")
if reserved_tokens is None:
reserved_tokens = RESERVED_TOKENS
def bisect(min_val, max_val):
"""Bisection to find the right size."""
present_count = (max_val + min_val) // 2
tf.logging.info("Trying min_count %d" % present_count)
subtokenizer = cls()
subtokenizer.build_from_token_counts(
token_counts, present_count, num_iterations,
max_subtoken_length=max_subtoken_length,
reserved_tokens=reserved_tokens)
# Being within 1% of the target size is ok.
is_ok = abs(subtokenizer.vocab_size - target_size) * 100 < target_size
# If min_val == max_val, we can't do any better than this.
if is_ok or min_val >= max_val or present_count < 2:
return subtokenizer
if subtokenizer.vocab_size > target_size:
other_subtokenizer = bisect(present_count + 1, max_val)
else:
other_subtokenizer = bisect(min_val, present_count - 1)
if other_subtokenizer is None:
return subtokenizer
if (abs(other_subtokenizer.vocab_size - target_size) <
abs(subtokenizer.vocab_size - target_size)):
return other_subtokenizer
return subtokenizer
return bisect(min_val, max_val)
|
python
|
def build_to_target_size(cls,
target_size,
token_counts,
min_val,
max_val,
max_subtoken_length=None,
reserved_tokens=None,
num_iterations=4):
"""Builds a SubwordTextEncoder that has `vocab_size` near `target_size`.
Uses simple recursive binary search to find a minimum token count that most
closely matches the `target_size`.
Args:
target_size: Desired vocab_size to approximate.
token_counts: A dictionary of token counts, mapping string to int.
min_val: An integer; lower bound for the minimum token count.
max_val: An integer; upper bound for the minimum token count.
max_subtoken_length: Maximum length of a subtoken. If this is not set,
then the runtime and memory use of creating the vocab is quadratic in
the length of the longest token. If this is set, then it is instead
O(max_subtoken_length * length of longest token).
reserved_tokens: List of reserved tokens. The global variable
`RESERVED_TOKENS` must be a prefix of `reserved_tokens`. If this
argument is `None`, it will use `RESERVED_TOKENS`.
num_iterations: An integer; how many iterations of refinement.
Returns:
A SubwordTextEncoder instance.
Raises:
ValueError: If `min_val` is greater than `max_val`.
"""
if min_val > max_val:
raise ValueError("Lower bound for the minimum token count "
"is greater than the upper bound.")
if target_size < 1:
raise ValueError("Target size must be positive.")
if reserved_tokens is None:
reserved_tokens = RESERVED_TOKENS
def bisect(min_val, max_val):
"""Bisection to find the right size."""
present_count = (max_val + min_val) // 2
tf.logging.info("Trying min_count %d" % present_count)
subtokenizer = cls()
subtokenizer.build_from_token_counts(
token_counts, present_count, num_iterations,
max_subtoken_length=max_subtoken_length,
reserved_tokens=reserved_tokens)
# Being within 1% of the target size is ok.
is_ok = abs(subtokenizer.vocab_size - target_size) * 100 < target_size
# If min_val == max_val, we can't do any better than this.
if is_ok or min_val >= max_val or present_count < 2:
return subtokenizer
if subtokenizer.vocab_size > target_size:
other_subtokenizer = bisect(present_count + 1, max_val)
else:
other_subtokenizer = bisect(min_val, present_count - 1)
if other_subtokenizer is None:
return subtokenizer
if (abs(other_subtokenizer.vocab_size - target_size) <
abs(subtokenizer.vocab_size - target_size)):
return other_subtokenizer
return subtokenizer
return bisect(min_val, max_val)
|
[
"def",
"build_to_target_size",
"(",
"cls",
",",
"target_size",
",",
"token_counts",
",",
"min_val",
",",
"max_val",
",",
"max_subtoken_length",
"=",
"None",
",",
"reserved_tokens",
"=",
"None",
",",
"num_iterations",
"=",
"4",
")",
":",
"if",
"min_val",
">",
"max_val",
":",
"raise",
"ValueError",
"(",
"\"Lower bound for the minimum token count \"",
"\"is greater than the upper bound.\"",
")",
"if",
"target_size",
"<",
"1",
":",
"raise",
"ValueError",
"(",
"\"Target size must be positive.\"",
")",
"if",
"reserved_tokens",
"is",
"None",
":",
"reserved_tokens",
"=",
"RESERVED_TOKENS",
"def",
"bisect",
"(",
"min_val",
",",
"max_val",
")",
":",
"\"\"\"Bisection to find the right size.\"\"\"",
"present_count",
"=",
"(",
"max_val",
"+",
"min_val",
")",
"//",
"2",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Trying min_count %d\"",
"%",
"present_count",
")",
"subtokenizer",
"=",
"cls",
"(",
")",
"subtokenizer",
".",
"build_from_token_counts",
"(",
"token_counts",
",",
"present_count",
",",
"num_iterations",
",",
"max_subtoken_length",
"=",
"max_subtoken_length",
",",
"reserved_tokens",
"=",
"reserved_tokens",
")",
"# Being within 1% of the target size is ok.",
"is_ok",
"=",
"abs",
"(",
"subtokenizer",
".",
"vocab_size",
"-",
"target_size",
")",
"*",
"100",
"<",
"target_size",
"# If min_val == max_val, we can't do any better than this.",
"if",
"is_ok",
"or",
"min_val",
">=",
"max_val",
"or",
"present_count",
"<",
"2",
":",
"return",
"subtokenizer",
"if",
"subtokenizer",
".",
"vocab_size",
">",
"target_size",
":",
"other_subtokenizer",
"=",
"bisect",
"(",
"present_count",
"+",
"1",
",",
"max_val",
")",
"else",
":",
"other_subtokenizer",
"=",
"bisect",
"(",
"min_val",
",",
"present_count",
"-",
"1",
")",
"if",
"other_subtokenizer",
"is",
"None",
":",
"return",
"subtokenizer",
"if",
"(",
"abs",
"(",
"other_subtokenizer",
".",
"vocab_size",
"-",
"target_size",
")",
"<",
"abs",
"(",
"subtokenizer",
".",
"vocab_size",
"-",
"target_size",
")",
")",
":",
"return",
"other_subtokenizer",
"return",
"subtokenizer",
"return",
"bisect",
"(",
"min_val",
",",
"max_val",
")"
] |
Builds a SubwordTextEncoder that has `vocab_size` near `target_size`.
Uses simple recursive binary search to find a minimum token count that most
closely matches the `target_size`.
Args:
target_size: Desired vocab_size to approximate.
token_counts: A dictionary of token counts, mapping string to int.
min_val: An integer; lower bound for the minimum token count.
max_val: An integer; upper bound for the minimum token count.
max_subtoken_length: Maximum length of a subtoken. If this is not set,
then the runtime and memory use of creating the vocab is quadratic in
the length of the longest token. If this is set, then it is instead
O(max_subtoken_length * length of longest token).
reserved_tokens: List of reserved tokens. The global variable
`RESERVED_TOKENS` must be a prefix of `reserved_tokens`. If this
argument is `None`, it will use `RESERVED_TOKENS`.
num_iterations: An integer; how many iterations of refinement.
Returns:
A SubwordTextEncoder instance.
Raises:
ValueError: If `min_val` is greater than `max_val`.
|
[
"Builds",
"a",
"SubwordTextEncoder",
"that",
"has",
"vocab_size",
"near",
"target_size",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/text_encoder.py#L677-L748
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/text_encoder.py
|
SubwordTextEncoder.build_from_token_counts
|
def build_from_token_counts(self,
token_counts,
min_count,
num_iterations=4,
reserved_tokens=None,
max_subtoken_length=None):
"""Train a SubwordTextEncoder based on a dictionary of word counts.
Args:
token_counts: a dictionary of Unicode strings to int.
min_count: an integer - discard subtokens with lower counts.
num_iterations: an integer. how many iterations of refinement.
reserved_tokens: List of reserved tokens. The global variable
`RESERVED_TOKENS` must be a prefix of `reserved_tokens`. If this
argument is `None`, it will use `RESERVED_TOKENS`.
max_subtoken_length: Maximum length of a subtoken. If this is not set,
then the runtime and memory use of creating the vocab is quadratic in
the length of the longest token. If this is set, then it is instead
O(max_subtoken_length * length of longest token).
Raises:
ValueError: if reserved is not 0 or len(RESERVED_TOKENS). In this case, it
is not clear what the space is being reserved for, or when it will be
filled in.
"""
if reserved_tokens is None:
reserved_tokens = RESERVED_TOKENS
else:
# There is not complete freedom in replacing RESERVED_TOKENS.
for default, proposed in zip(RESERVED_TOKENS, reserved_tokens):
if default != proposed:
raise ValueError("RESERVED_TOKENS must be a prefix of "
"reserved_tokens.")
# Initialize the alphabet. Note, this must include reserved tokens or it can
# result in encoding failures.
alphabet_tokens = chain(six.iterkeys(token_counts),
[native_to_unicode(t) for t in reserved_tokens])
self._init_alphabet_from_tokens(alphabet_tokens)
# Bootstrap the initial list of subtokens with the characters from the
# alphabet plus the escaping characters.
self._init_subtokens_from_list(list(self._alphabet),
reserved_tokens=reserved_tokens)
# We build iteratively. On each iteration, we segment all the words,
# then count the resulting potential subtokens, keeping the ones
# with high enough counts for our new vocabulary.
if min_count < 1:
min_count = 1
for i in range(num_iterations):
tf.logging.info("Iteration {0}".format(i))
# Collect all substrings of the encoded token that break along current
# subtoken boundaries.
subtoken_counts = collections.defaultdict(int)
for token, count in six.iteritems(token_counts):
iter_start_time = time.time()
escaped_token = _escape_token(token, self._alphabet)
subtokens = self._escaped_token_to_subtoken_strings(escaped_token)
start = 0
for subtoken in subtokens:
last_position = len(escaped_token) + 1
if max_subtoken_length is not None:
last_position = min(last_position, start + max_subtoken_length)
for end in range(start + 1, last_position):
new_subtoken = escaped_token[start:end]
subtoken_counts[new_subtoken] += count
start += len(subtoken)
iter_time_secs = time.time() - iter_start_time
if iter_time_secs > 0.1:
tf.logging.info(u"Processing token [{0}] took {1} seconds, consider "
"setting Text2TextProblem.max_subtoken_length to a "
"smaller value.".format(token, iter_time_secs))
# Array of sets of candidate subtoken strings, by length.
len_to_subtoken_strings = []
for subtoken_string, count in six.iteritems(subtoken_counts):
lsub = len(subtoken_string)
if count >= min_count:
while len(len_to_subtoken_strings) <= lsub:
len_to_subtoken_strings.append(set())
len_to_subtoken_strings[lsub].add(subtoken_string)
# Consider the candidates longest to shortest, so that if we accept
# a longer subtoken string, we can decrement the counts of its prefixes.
new_subtoken_strings = []
for lsub in range(len(len_to_subtoken_strings) - 1, 0, -1):
subtoken_strings = len_to_subtoken_strings[lsub]
for subtoken_string in subtoken_strings:
count = subtoken_counts[subtoken_string]
if count >= min_count:
# Exclude alphabet tokens here, as they must be included later,
# explicitly, regardless of count.
if subtoken_string not in self._alphabet:
new_subtoken_strings.append((count, subtoken_string))
for l in range(1, lsub):
subtoken_counts[subtoken_string[:l]] -= count
# Include the alphabet explicitly to guarantee all strings are encodable.
new_subtoken_strings.extend((subtoken_counts.get(a, 0), a)
for a in self._alphabet)
new_subtoken_strings.sort(reverse=True)
# Reinitialize to the candidate vocabulary.
new_subtoken_strings = [subtoken for _, subtoken in new_subtoken_strings]
if reserved_tokens:
escaped_reserved_tokens = [
_escape_token(native_to_unicode(t), self._alphabet)
for t in reserved_tokens
]
new_subtoken_strings = escaped_reserved_tokens + new_subtoken_strings
self._init_subtokens_from_list(new_subtoken_strings)
tf.logging.info("vocab_size = %d" % self.vocab_size)
|
python
|
def build_from_token_counts(self,
token_counts,
min_count,
num_iterations=4,
reserved_tokens=None,
max_subtoken_length=None):
"""Train a SubwordTextEncoder based on a dictionary of word counts.
Args:
token_counts: a dictionary of Unicode strings to int.
min_count: an integer - discard subtokens with lower counts.
num_iterations: an integer. how many iterations of refinement.
reserved_tokens: List of reserved tokens. The global variable
`RESERVED_TOKENS` must be a prefix of `reserved_tokens`. If this
argument is `None`, it will use `RESERVED_TOKENS`.
max_subtoken_length: Maximum length of a subtoken. If this is not set,
then the runtime and memory use of creating the vocab is quadratic in
the length of the longest token. If this is set, then it is instead
O(max_subtoken_length * length of longest token).
Raises:
ValueError: if reserved is not 0 or len(RESERVED_TOKENS). In this case, it
is not clear what the space is being reserved for, or when it will be
filled in.
"""
if reserved_tokens is None:
reserved_tokens = RESERVED_TOKENS
else:
# There is not complete freedom in replacing RESERVED_TOKENS.
for default, proposed in zip(RESERVED_TOKENS, reserved_tokens):
if default != proposed:
raise ValueError("RESERVED_TOKENS must be a prefix of "
"reserved_tokens.")
# Initialize the alphabet. Note, this must include reserved tokens or it can
# result in encoding failures.
alphabet_tokens = chain(six.iterkeys(token_counts),
[native_to_unicode(t) for t in reserved_tokens])
self._init_alphabet_from_tokens(alphabet_tokens)
# Bootstrap the initial list of subtokens with the characters from the
# alphabet plus the escaping characters.
self._init_subtokens_from_list(list(self._alphabet),
reserved_tokens=reserved_tokens)
# We build iteratively. On each iteration, we segment all the words,
# then count the resulting potential subtokens, keeping the ones
# with high enough counts for our new vocabulary.
if min_count < 1:
min_count = 1
for i in range(num_iterations):
tf.logging.info("Iteration {0}".format(i))
# Collect all substrings of the encoded token that break along current
# subtoken boundaries.
subtoken_counts = collections.defaultdict(int)
for token, count in six.iteritems(token_counts):
iter_start_time = time.time()
escaped_token = _escape_token(token, self._alphabet)
subtokens = self._escaped_token_to_subtoken_strings(escaped_token)
start = 0
for subtoken in subtokens:
last_position = len(escaped_token) + 1
if max_subtoken_length is not None:
last_position = min(last_position, start + max_subtoken_length)
for end in range(start + 1, last_position):
new_subtoken = escaped_token[start:end]
subtoken_counts[new_subtoken] += count
start += len(subtoken)
iter_time_secs = time.time() - iter_start_time
if iter_time_secs > 0.1:
tf.logging.info(u"Processing token [{0}] took {1} seconds, consider "
"setting Text2TextProblem.max_subtoken_length to a "
"smaller value.".format(token, iter_time_secs))
# Array of sets of candidate subtoken strings, by length.
len_to_subtoken_strings = []
for subtoken_string, count in six.iteritems(subtoken_counts):
lsub = len(subtoken_string)
if count >= min_count:
while len(len_to_subtoken_strings) <= lsub:
len_to_subtoken_strings.append(set())
len_to_subtoken_strings[lsub].add(subtoken_string)
# Consider the candidates longest to shortest, so that if we accept
# a longer subtoken string, we can decrement the counts of its prefixes.
new_subtoken_strings = []
for lsub in range(len(len_to_subtoken_strings) - 1, 0, -1):
subtoken_strings = len_to_subtoken_strings[lsub]
for subtoken_string in subtoken_strings:
count = subtoken_counts[subtoken_string]
if count >= min_count:
# Exclude alphabet tokens here, as they must be included later,
# explicitly, regardless of count.
if subtoken_string not in self._alphabet:
new_subtoken_strings.append((count, subtoken_string))
for l in range(1, lsub):
subtoken_counts[subtoken_string[:l]] -= count
# Include the alphabet explicitly to guarantee all strings are encodable.
new_subtoken_strings.extend((subtoken_counts.get(a, 0), a)
for a in self._alphabet)
new_subtoken_strings.sort(reverse=True)
# Reinitialize to the candidate vocabulary.
new_subtoken_strings = [subtoken for _, subtoken in new_subtoken_strings]
if reserved_tokens:
escaped_reserved_tokens = [
_escape_token(native_to_unicode(t), self._alphabet)
for t in reserved_tokens
]
new_subtoken_strings = escaped_reserved_tokens + new_subtoken_strings
self._init_subtokens_from_list(new_subtoken_strings)
tf.logging.info("vocab_size = %d" % self.vocab_size)
|
[
"def",
"build_from_token_counts",
"(",
"self",
",",
"token_counts",
",",
"min_count",
",",
"num_iterations",
"=",
"4",
",",
"reserved_tokens",
"=",
"None",
",",
"max_subtoken_length",
"=",
"None",
")",
":",
"if",
"reserved_tokens",
"is",
"None",
":",
"reserved_tokens",
"=",
"RESERVED_TOKENS",
"else",
":",
"# There is not complete freedom in replacing RESERVED_TOKENS.",
"for",
"default",
",",
"proposed",
"in",
"zip",
"(",
"RESERVED_TOKENS",
",",
"reserved_tokens",
")",
":",
"if",
"default",
"!=",
"proposed",
":",
"raise",
"ValueError",
"(",
"\"RESERVED_TOKENS must be a prefix of \"",
"\"reserved_tokens.\"",
")",
"# Initialize the alphabet. Note, this must include reserved tokens or it can",
"# result in encoding failures.",
"alphabet_tokens",
"=",
"chain",
"(",
"six",
".",
"iterkeys",
"(",
"token_counts",
")",
",",
"[",
"native_to_unicode",
"(",
"t",
")",
"for",
"t",
"in",
"reserved_tokens",
"]",
")",
"self",
".",
"_init_alphabet_from_tokens",
"(",
"alphabet_tokens",
")",
"# Bootstrap the initial list of subtokens with the characters from the",
"# alphabet plus the escaping characters.",
"self",
".",
"_init_subtokens_from_list",
"(",
"list",
"(",
"self",
".",
"_alphabet",
")",
",",
"reserved_tokens",
"=",
"reserved_tokens",
")",
"# We build iteratively. On each iteration, we segment all the words,",
"# then count the resulting potential subtokens, keeping the ones",
"# with high enough counts for our new vocabulary.",
"if",
"min_count",
"<",
"1",
":",
"min_count",
"=",
"1",
"for",
"i",
"in",
"range",
"(",
"num_iterations",
")",
":",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Iteration {0}\"",
".",
"format",
"(",
"i",
")",
")",
"# Collect all substrings of the encoded token that break along current",
"# subtoken boundaries.",
"subtoken_counts",
"=",
"collections",
".",
"defaultdict",
"(",
"int",
")",
"for",
"token",
",",
"count",
"in",
"six",
".",
"iteritems",
"(",
"token_counts",
")",
":",
"iter_start_time",
"=",
"time",
".",
"time",
"(",
")",
"escaped_token",
"=",
"_escape_token",
"(",
"token",
",",
"self",
".",
"_alphabet",
")",
"subtokens",
"=",
"self",
".",
"_escaped_token_to_subtoken_strings",
"(",
"escaped_token",
")",
"start",
"=",
"0",
"for",
"subtoken",
"in",
"subtokens",
":",
"last_position",
"=",
"len",
"(",
"escaped_token",
")",
"+",
"1",
"if",
"max_subtoken_length",
"is",
"not",
"None",
":",
"last_position",
"=",
"min",
"(",
"last_position",
",",
"start",
"+",
"max_subtoken_length",
")",
"for",
"end",
"in",
"range",
"(",
"start",
"+",
"1",
",",
"last_position",
")",
":",
"new_subtoken",
"=",
"escaped_token",
"[",
"start",
":",
"end",
"]",
"subtoken_counts",
"[",
"new_subtoken",
"]",
"+=",
"count",
"start",
"+=",
"len",
"(",
"subtoken",
")",
"iter_time_secs",
"=",
"time",
".",
"time",
"(",
")",
"-",
"iter_start_time",
"if",
"iter_time_secs",
">",
"0.1",
":",
"tf",
".",
"logging",
".",
"info",
"(",
"u\"Processing token [{0}] took {1} seconds, consider \"",
"\"setting Text2TextProblem.max_subtoken_length to a \"",
"\"smaller value.\"",
".",
"format",
"(",
"token",
",",
"iter_time_secs",
")",
")",
"# Array of sets of candidate subtoken strings, by length.",
"len_to_subtoken_strings",
"=",
"[",
"]",
"for",
"subtoken_string",
",",
"count",
"in",
"six",
".",
"iteritems",
"(",
"subtoken_counts",
")",
":",
"lsub",
"=",
"len",
"(",
"subtoken_string",
")",
"if",
"count",
">=",
"min_count",
":",
"while",
"len",
"(",
"len_to_subtoken_strings",
")",
"<=",
"lsub",
":",
"len_to_subtoken_strings",
".",
"append",
"(",
"set",
"(",
")",
")",
"len_to_subtoken_strings",
"[",
"lsub",
"]",
".",
"add",
"(",
"subtoken_string",
")",
"# Consider the candidates longest to shortest, so that if we accept",
"# a longer subtoken string, we can decrement the counts of its prefixes.",
"new_subtoken_strings",
"=",
"[",
"]",
"for",
"lsub",
"in",
"range",
"(",
"len",
"(",
"len_to_subtoken_strings",
")",
"-",
"1",
",",
"0",
",",
"-",
"1",
")",
":",
"subtoken_strings",
"=",
"len_to_subtoken_strings",
"[",
"lsub",
"]",
"for",
"subtoken_string",
"in",
"subtoken_strings",
":",
"count",
"=",
"subtoken_counts",
"[",
"subtoken_string",
"]",
"if",
"count",
">=",
"min_count",
":",
"# Exclude alphabet tokens here, as they must be included later,",
"# explicitly, regardless of count.",
"if",
"subtoken_string",
"not",
"in",
"self",
".",
"_alphabet",
":",
"new_subtoken_strings",
".",
"append",
"(",
"(",
"count",
",",
"subtoken_string",
")",
")",
"for",
"l",
"in",
"range",
"(",
"1",
",",
"lsub",
")",
":",
"subtoken_counts",
"[",
"subtoken_string",
"[",
":",
"l",
"]",
"]",
"-=",
"count",
"# Include the alphabet explicitly to guarantee all strings are encodable.",
"new_subtoken_strings",
".",
"extend",
"(",
"(",
"subtoken_counts",
".",
"get",
"(",
"a",
",",
"0",
")",
",",
"a",
")",
"for",
"a",
"in",
"self",
".",
"_alphabet",
")",
"new_subtoken_strings",
".",
"sort",
"(",
"reverse",
"=",
"True",
")",
"# Reinitialize to the candidate vocabulary.",
"new_subtoken_strings",
"=",
"[",
"subtoken",
"for",
"_",
",",
"subtoken",
"in",
"new_subtoken_strings",
"]",
"if",
"reserved_tokens",
":",
"escaped_reserved_tokens",
"=",
"[",
"_escape_token",
"(",
"native_to_unicode",
"(",
"t",
")",
",",
"self",
".",
"_alphabet",
")",
"for",
"t",
"in",
"reserved_tokens",
"]",
"new_subtoken_strings",
"=",
"escaped_reserved_tokens",
"+",
"new_subtoken_strings",
"self",
".",
"_init_subtokens_from_list",
"(",
"new_subtoken_strings",
")",
"tf",
".",
"logging",
".",
"info",
"(",
"\"vocab_size = %d\"",
"%",
"self",
".",
"vocab_size",
")"
] |
Train a SubwordTextEncoder based on a dictionary of word counts.
Args:
token_counts: a dictionary of Unicode strings to int.
min_count: an integer - discard subtokens with lower counts.
num_iterations: an integer. how many iterations of refinement.
reserved_tokens: List of reserved tokens. The global variable
`RESERVED_TOKENS` must be a prefix of `reserved_tokens`. If this
argument is `None`, it will use `RESERVED_TOKENS`.
max_subtoken_length: Maximum length of a subtoken. If this is not set,
then the runtime and memory use of creating the vocab is quadratic in
the length of the longest token. If this is set, then it is instead
O(max_subtoken_length * length of longest token).
Raises:
ValueError: if reserved is not 0 or len(RESERVED_TOKENS). In this case, it
is not clear what the space is being reserved for, or when it will be
filled in.
|
[
"Train",
"a",
"SubwordTextEncoder",
"based",
"on",
"a",
"dictionary",
"of",
"word",
"counts",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/text_encoder.py#L750-L866
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/text_encoder.py
|
SubwordTextEncoder.dump
|
def dump(self):
"""Debugging dump of the current subtoken vocabulary."""
subtoken_strings = [(i, s)
for s, i in six.iteritems(self._subtoken_string_to_id)]
print(u", ".join(u"{0} : '{1}'".format(i, s)
for i, s in sorted(subtoken_strings)))
|
python
|
def dump(self):
"""Debugging dump of the current subtoken vocabulary."""
subtoken_strings = [(i, s)
for s, i in six.iteritems(self._subtoken_string_to_id)]
print(u", ".join(u"{0} : '{1}'".format(i, s)
for i, s in sorted(subtoken_strings)))
|
[
"def",
"dump",
"(",
"self",
")",
":",
"subtoken_strings",
"=",
"[",
"(",
"i",
",",
"s",
")",
"for",
"s",
",",
"i",
"in",
"six",
".",
"iteritems",
"(",
"self",
".",
"_subtoken_string_to_id",
")",
"]",
"print",
"(",
"u\", \"",
".",
"join",
"(",
"u\"{0} : '{1}'\"",
".",
"format",
"(",
"i",
",",
"s",
")",
"for",
"i",
",",
"s",
"in",
"sorted",
"(",
"subtoken_strings",
")",
")",
")"
] |
Debugging dump of the current subtoken vocabulary.
|
[
"Debugging",
"dump",
"of",
"the",
"current",
"subtoken",
"vocabulary",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/text_encoder.py#L872-L877
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/text_encoder.py
|
SubwordTextEncoder._init_subtokens_from_list
|
def _init_subtokens_from_list(self, subtoken_strings, reserved_tokens=None):
"""Initialize token information from a list of subtoken strings.
Args:
subtoken_strings: a list of subtokens
reserved_tokens: List of reserved tokens. We must have `reserved_tokens`
as None or the empty list, or else the global variable `RESERVED_TOKENS`
must be a prefix of `reserved_tokens`.
Raises:
ValueError: if reserved is not 0 or len(RESERVED_TOKENS). In this case, it
is not clear what the space is being reserved for, or when it will be
filled in.
"""
if reserved_tokens is None:
reserved_tokens = []
if reserved_tokens:
self._all_subtoken_strings = reserved_tokens + subtoken_strings
else:
self._all_subtoken_strings = subtoken_strings
# we remember the maximum length of any subtoken to avoid having to
# check arbitrarily long strings.
self._max_subtoken_len = max([len(s) for s in subtoken_strings])
self._subtoken_string_to_id = {
s: i + len(reserved_tokens)
for i, s in enumerate(subtoken_strings) if s
}
# Initialize the cache to empty.
self._cache_size = 2 ** 20
self._cache = [(None, None)] * self._cache_size
|
python
|
def _init_subtokens_from_list(self, subtoken_strings, reserved_tokens=None):
"""Initialize token information from a list of subtoken strings.
Args:
subtoken_strings: a list of subtokens
reserved_tokens: List of reserved tokens. We must have `reserved_tokens`
as None or the empty list, or else the global variable `RESERVED_TOKENS`
must be a prefix of `reserved_tokens`.
Raises:
ValueError: if reserved is not 0 or len(RESERVED_TOKENS). In this case, it
is not clear what the space is being reserved for, or when it will be
filled in.
"""
if reserved_tokens is None:
reserved_tokens = []
if reserved_tokens:
self._all_subtoken_strings = reserved_tokens + subtoken_strings
else:
self._all_subtoken_strings = subtoken_strings
# we remember the maximum length of any subtoken to avoid having to
# check arbitrarily long strings.
self._max_subtoken_len = max([len(s) for s in subtoken_strings])
self._subtoken_string_to_id = {
s: i + len(reserved_tokens)
for i, s in enumerate(subtoken_strings) if s
}
# Initialize the cache to empty.
self._cache_size = 2 ** 20
self._cache = [(None, None)] * self._cache_size
|
[
"def",
"_init_subtokens_from_list",
"(",
"self",
",",
"subtoken_strings",
",",
"reserved_tokens",
"=",
"None",
")",
":",
"if",
"reserved_tokens",
"is",
"None",
":",
"reserved_tokens",
"=",
"[",
"]",
"if",
"reserved_tokens",
":",
"self",
".",
"_all_subtoken_strings",
"=",
"reserved_tokens",
"+",
"subtoken_strings",
"else",
":",
"self",
".",
"_all_subtoken_strings",
"=",
"subtoken_strings",
"# we remember the maximum length of any subtoken to avoid having to",
"# check arbitrarily long strings.",
"self",
".",
"_max_subtoken_len",
"=",
"max",
"(",
"[",
"len",
"(",
"s",
")",
"for",
"s",
"in",
"subtoken_strings",
"]",
")",
"self",
".",
"_subtoken_string_to_id",
"=",
"{",
"s",
":",
"i",
"+",
"len",
"(",
"reserved_tokens",
")",
"for",
"i",
",",
"s",
"in",
"enumerate",
"(",
"subtoken_strings",
")",
"if",
"s",
"}",
"# Initialize the cache to empty.",
"self",
".",
"_cache_size",
"=",
"2",
"**",
"20",
"self",
".",
"_cache",
"=",
"[",
"(",
"None",
",",
"None",
")",
"]",
"*",
"self",
".",
"_cache_size"
] |
Initialize token information from a list of subtoken strings.
Args:
subtoken_strings: a list of subtokens
reserved_tokens: List of reserved tokens. We must have `reserved_tokens`
as None or the empty list, or else the global variable `RESERVED_TOKENS`
must be a prefix of `reserved_tokens`.
Raises:
ValueError: if reserved is not 0 or len(RESERVED_TOKENS). In this case, it
is not clear what the space is being reserved for, or when it will be
filled in.
|
[
"Initialize",
"token",
"information",
"from",
"a",
"list",
"of",
"subtoken",
"strings",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/text_encoder.py#L879-L910
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/text_encoder.py
|
SubwordTextEncoder._load_from_file_object
|
def _load_from_file_object(self, f):
"""Load from a file object.
Args:
f: File object to load vocabulary from
"""
subtoken_strings = []
for line in f:
s = line.strip()
# Some vocab files wrap words in single quotes, but others don't
if ((s.startswith("'") and s.endswith("'")) or
(s.startswith("\"") and s.endswith("\""))):
s = s[1:-1]
subtoken_strings.append(native_to_unicode(s))
self._init_subtokens_from_list(subtoken_strings)
self._init_alphabet_from_tokens(subtoken_strings)
|
python
|
def _load_from_file_object(self, f):
"""Load from a file object.
Args:
f: File object to load vocabulary from
"""
subtoken_strings = []
for line in f:
s = line.strip()
# Some vocab files wrap words in single quotes, but others don't
if ((s.startswith("'") and s.endswith("'")) or
(s.startswith("\"") and s.endswith("\""))):
s = s[1:-1]
subtoken_strings.append(native_to_unicode(s))
self._init_subtokens_from_list(subtoken_strings)
self._init_alphabet_from_tokens(subtoken_strings)
|
[
"def",
"_load_from_file_object",
"(",
"self",
",",
"f",
")",
":",
"subtoken_strings",
"=",
"[",
"]",
"for",
"line",
"in",
"f",
":",
"s",
"=",
"line",
".",
"strip",
"(",
")",
"# Some vocab files wrap words in single quotes, but others don't",
"if",
"(",
"(",
"s",
".",
"startswith",
"(",
"\"'\"",
")",
"and",
"s",
".",
"endswith",
"(",
"\"'\"",
")",
")",
"or",
"(",
"s",
".",
"startswith",
"(",
"\"\\\"\"",
")",
"and",
"s",
".",
"endswith",
"(",
"\"\\\"\"",
")",
")",
")",
":",
"s",
"=",
"s",
"[",
"1",
":",
"-",
"1",
"]",
"subtoken_strings",
".",
"append",
"(",
"native_to_unicode",
"(",
"s",
")",
")",
"self",
".",
"_init_subtokens_from_list",
"(",
"subtoken_strings",
")",
"self",
".",
"_init_alphabet_from_tokens",
"(",
"subtoken_strings",
")"
] |
Load from a file object.
Args:
f: File object to load vocabulary from
|
[
"Load",
"from",
"a",
"file",
"object",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/text_encoder.py#L919-L934
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/text_encoder.py
|
SubwordTextEncoder._load_from_file
|
def _load_from_file(self, filename):
"""Load from a vocab file."""
if not tf.gfile.Exists(filename):
raise ValueError("File %s not found" % filename)
with tf.gfile.Open(filename) as f:
self._load_from_file_object(f)
|
python
|
def _load_from_file(self, filename):
"""Load from a vocab file."""
if not tf.gfile.Exists(filename):
raise ValueError("File %s not found" % filename)
with tf.gfile.Open(filename) as f:
self._load_from_file_object(f)
|
[
"def",
"_load_from_file",
"(",
"self",
",",
"filename",
")",
":",
"if",
"not",
"tf",
".",
"gfile",
".",
"Exists",
"(",
"filename",
")",
":",
"raise",
"ValueError",
"(",
"\"File %s not found\"",
"%",
"filename",
")",
"with",
"tf",
".",
"gfile",
".",
"Open",
"(",
"filename",
")",
"as",
"f",
":",
"self",
".",
"_load_from_file_object",
"(",
"f",
")"
] |
Load from a vocab file.
|
[
"Load",
"from",
"a",
"vocab",
"file",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/text_encoder.py#L936-L941
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/text_encoder.py
|
ImageEncoder.encode
|
def encode(self, s):
"""Transform a string with a filename into a list of RGB integers.
Args:
s: path to the file with an image.
Returns:
ids: list of integers
"""
try:
import matplotlib.image as im # pylint: disable=g-import-not-at-top
except ImportError as e:
tf.logging.warning(
"Reading an image requires matplotlib to be installed: %s", e)
raise NotImplementedError("Image reading not implemented.")
return im.imread(s)
|
python
|
def encode(self, s):
"""Transform a string with a filename into a list of RGB integers.
Args:
s: path to the file with an image.
Returns:
ids: list of integers
"""
try:
import matplotlib.image as im # pylint: disable=g-import-not-at-top
except ImportError as e:
tf.logging.warning(
"Reading an image requires matplotlib to be installed: %s", e)
raise NotImplementedError("Image reading not implemented.")
return im.imread(s)
|
[
"def",
"encode",
"(",
"self",
",",
"s",
")",
":",
"try",
":",
"import",
"matplotlib",
".",
"image",
"as",
"im",
"# pylint: disable=g-import-not-at-top",
"except",
"ImportError",
"as",
"e",
":",
"tf",
".",
"logging",
".",
"warning",
"(",
"\"Reading an image requires matplotlib to be installed: %s\"",
",",
"e",
")",
"raise",
"NotImplementedError",
"(",
"\"Image reading not implemented.\"",
")",
"return",
"im",
".",
"imread",
"(",
"s",
")"
] |
Transform a string with a filename into a list of RGB integers.
Args:
s: path to the file with an image.
Returns:
ids: list of integers
|
[
"Transform",
"a",
"string",
"with",
"a",
"filename",
"into",
"a",
"list",
"of",
"RGB",
"integers",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/text_encoder.py#L965-L980
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/text_encoder.py
|
ImageEncoder.decode
|
def decode(self, ids, strip_extraneous=False):
"""Transform a sequence of int ids into an image file.
Args:
ids: list of integers to be converted.
strip_extraneous: unused
Returns:
Path to the temporary file where the image was saved.
Raises:
ValueError: if the ids are not of the appropriate size.
"""
del strip_extraneous
_, tmp_file_path = tempfile.mkstemp("_decode.png")
if self._height is None or self._width is None:
size = int(math.sqrt(len(ids) / self._channels))
length = size * size * self._channels
else:
size = None
length = self._height * self._width * self._channels
if len(ids) != length:
raise ValueError("Length of ids (%d) must be height (%d) x width (%d) x "
"channels (%d); %d != %d.\n Ids: %s"
% (len(ids), self._height, self._width, self._channels,
len(ids), length, " ".join([str(i) for i in ids])))
with tf.Graph().as_default():
raw = tf.constant(ids, dtype=tf.uint8)
if size is None:
img = tf.reshape(raw, [self._height, self._width, self._channels])
else:
img = tf.reshape(raw, [size, size, self._channels])
png = tf.image.encode_png(img)
op = tf.write_file(tmp_file_path, png)
with tf.Session() as sess:
sess.run(op)
return tmp_file_path
|
python
|
def decode(self, ids, strip_extraneous=False):
"""Transform a sequence of int ids into an image file.
Args:
ids: list of integers to be converted.
strip_extraneous: unused
Returns:
Path to the temporary file where the image was saved.
Raises:
ValueError: if the ids are not of the appropriate size.
"""
del strip_extraneous
_, tmp_file_path = tempfile.mkstemp("_decode.png")
if self._height is None or self._width is None:
size = int(math.sqrt(len(ids) / self._channels))
length = size * size * self._channels
else:
size = None
length = self._height * self._width * self._channels
if len(ids) != length:
raise ValueError("Length of ids (%d) must be height (%d) x width (%d) x "
"channels (%d); %d != %d.\n Ids: %s"
% (len(ids), self._height, self._width, self._channels,
len(ids), length, " ".join([str(i) for i in ids])))
with tf.Graph().as_default():
raw = tf.constant(ids, dtype=tf.uint8)
if size is None:
img = tf.reshape(raw, [self._height, self._width, self._channels])
else:
img = tf.reshape(raw, [size, size, self._channels])
png = tf.image.encode_png(img)
op = tf.write_file(tmp_file_path, png)
with tf.Session() as sess:
sess.run(op)
return tmp_file_path
|
[
"def",
"decode",
"(",
"self",
",",
"ids",
",",
"strip_extraneous",
"=",
"False",
")",
":",
"del",
"strip_extraneous",
"_",
",",
"tmp_file_path",
"=",
"tempfile",
".",
"mkstemp",
"(",
"\"_decode.png\"",
")",
"if",
"self",
".",
"_height",
"is",
"None",
"or",
"self",
".",
"_width",
"is",
"None",
":",
"size",
"=",
"int",
"(",
"math",
".",
"sqrt",
"(",
"len",
"(",
"ids",
")",
"/",
"self",
".",
"_channels",
")",
")",
"length",
"=",
"size",
"*",
"size",
"*",
"self",
".",
"_channels",
"else",
":",
"size",
"=",
"None",
"length",
"=",
"self",
".",
"_height",
"*",
"self",
".",
"_width",
"*",
"self",
".",
"_channels",
"if",
"len",
"(",
"ids",
")",
"!=",
"length",
":",
"raise",
"ValueError",
"(",
"\"Length of ids (%d) must be height (%d) x width (%d) x \"",
"\"channels (%d); %d != %d.\\n Ids: %s\"",
"%",
"(",
"len",
"(",
"ids",
")",
",",
"self",
".",
"_height",
",",
"self",
".",
"_width",
",",
"self",
".",
"_channels",
",",
"len",
"(",
"ids",
")",
",",
"length",
",",
"\" \"",
".",
"join",
"(",
"[",
"str",
"(",
"i",
")",
"for",
"i",
"in",
"ids",
"]",
")",
")",
")",
"with",
"tf",
".",
"Graph",
"(",
")",
".",
"as_default",
"(",
")",
":",
"raw",
"=",
"tf",
".",
"constant",
"(",
"ids",
",",
"dtype",
"=",
"tf",
".",
"uint8",
")",
"if",
"size",
"is",
"None",
":",
"img",
"=",
"tf",
".",
"reshape",
"(",
"raw",
",",
"[",
"self",
".",
"_height",
",",
"self",
".",
"_width",
",",
"self",
".",
"_channels",
"]",
")",
"else",
":",
"img",
"=",
"tf",
".",
"reshape",
"(",
"raw",
",",
"[",
"size",
",",
"size",
",",
"self",
".",
"_channels",
"]",
")",
"png",
"=",
"tf",
".",
"image",
".",
"encode_png",
"(",
"img",
")",
"op",
"=",
"tf",
".",
"write_file",
"(",
"tmp_file_path",
",",
"png",
")",
"with",
"tf",
".",
"Session",
"(",
")",
"as",
"sess",
":",
"sess",
".",
"run",
"(",
"op",
")",
"return",
"tmp_file_path"
] |
Transform a sequence of int ids into an image file.
Args:
ids: list of integers to be converted.
strip_extraneous: unused
Returns:
Path to the temporary file where the image was saved.
Raises:
ValueError: if the ids are not of the appropriate size.
|
[
"Transform",
"a",
"sequence",
"of",
"int",
"ids",
"into",
"an",
"image",
"file",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/text_encoder.py#L982-L1018
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/data_generators/text_encoder.py
|
RealEncoder.decode
|
def decode(self, ids, strip_extraneous=False):
"""Transform sequence of float values into string (float values).
Args:
ids: array of floats to be converted.
strip_extraneous: unused
Returns:
String having space separated float values.
Raises:
ValueError: if the ids are not of the appropriate size.
"""
del strip_extraneous
return " ".join([str(i) for i in ids])
|
python
|
def decode(self, ids, strip_extraneous=False):
"""Transform sequence of float values into string (float values).
Args:
ids: array of floats to be converted.
strip_extraneous: unused
Returns:
String having space separated float values.
Raises:
ValueError: if the ids are not of the appropriate size.
"""
del strip_extraneous
return " ".join([str(i) for i in ids])
|
[
"def",
"decode",
"(",
"self",
",",
"ids",
",",
"strip_extraneous",
"=",
"False",
")",
":",
"del",
"strip_extraneous",
"return",
"\" \"",
".",
"join",
"(",
"[",
"str",
"(",
"i",
")",
"for",
"i",
"in",
"ids",
"]",
")"
] |
Transform sequence of float values into string (float values).
Args:
ids: array of floats to be converted.
strip_extraneous: unused
Returns:
String having space separated float values.
Raises:
ValueError: if the ids are not of the appropriate size.
|
[
"Transform",
"sequence",
"of",
"float",
"values",
"into",
"string",
"(",
"float",
"values",
")",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/text_encoder.py#L1050-L1064
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/trax/jaxboard.py
|
_pack_images
|
def _pack_images(images, rows, cols):
"""Helper utility to make a tiled field of images from numpy arrays.
Args:
images: Image tensor in shape [N, W, H, C].
rows: Number of images per row in tiled image.
cols: Number of images per column in tiled image.
Returns:
A tiled image of shape [W * rows, H * cols, C].
Truncates incomplete rows.
"""
shape = onp.shape(images)
width, height, depth = shape[-3:]
images = onp.reshape(images, (-1, width, height, depth))
batch = onp.shape(images)[0]
rows = onp.minimum(rows, batch)
cols = onp.minimum(batch // rows, cols)
images = images[:rows * cols]
images = onp.reshape(images, (rows, cols, width, height, depth))
images = onp.transpose(images, [0, 2, 1, 3, 4])
images = onp.reshape(images, [rows * width, cols * height, depth])
return images
|
python
|
def _pack_images(images, rows, cols):
"""Helper utility to make a tiled field of images from numpy arrays.
Args:
images: Image tensor in shape [N, W, H, C].
rows: Number of images per row in tiled image.
cols: Number of images per column in tiled image.
Returns:
A tiled image of shape [W * rows, H * cols, C].
Truncates incomplete rows.
"""
shape = onp.shape(images)
width, height, depth = shape[-3:]
images = onp.reshape(images, (-1, width, height, depth))
batch = onp.shape(images)[0]
rows = onp.minimum(rows, batch)
cols = onp.minimum(batch // rows, cols)
images = images[:rows * cols]
images = onp.reshape(images, (rows, cols, width, height, depth))
images = onp.transpose(images, [0, 2, 1, 3, 4])
images = onp.reshape(images, [rows * width, cols * height, depth])
return images
|
[
"def",
"_pack_images",
"(",
"images",
",",
"rows",
",",
"cols",
")",
":",
"shape",
"=",
"onp",
".",
"shape",
"(",
"images",
")",
"width",
",",
"height",
",",
"depth",
"=",
"shape",
"[",
"-",
"3",
":",
"]",
"images",
"=",
"onp",
".",
"reshape",
"(",
"images",
",",
"(",
"-",
"1",
",",
"width",
",",
"height",
",",
"depth",
")",
")",
"batch",
"=",
"onp",
".",
"shape",
"(",
"images",
")",
"[",
"0",
"]",
"rows",
"=",
"onp",
".",
"minimum",
"(",
"rows",
",",
"batch",
")",
"cols",
"=",
"onp",
".",
"minimum",
"(",
"batch",
"//",
"rows",
",",
"cols",
")",
"images",
"=",
"images",
"[",
":",
"rows",
"*",
"cols",
"]",
"images",
"=",
"onp",
".",
"reshape",
"(",
"images",
",",
"(",
"rows",
",",
"cols",
",",
"width",
",",
"height",
",",
"depth",
")",
")",
"images",
"=",
"onp",
".",
"transpose",
"(",
"images",
",",
"[",
"0",
",",
"2",
",",
"1",
",",
"3",
",",
"4",
"]",
")",
"images",
"=",
"onp",
".",
"reshape",
"(",
"images",
",",
"[",
"rows",
"*",
"width",
",",
"cols",
"*",
"height",
",",
"depth",
"]",
")",
"return",
"images"
] |
Helper utility to make a tiled field of images from numpy arrays.
Args:
images: Image tensor in shape [N, W, H, C].
rows: Number of images per row in tiled image.
cols: Number of images per column in tiled image.
Returns:
A tiled image of shape [W * rows, H * cols, C].
Truncates incomplete rows.
|
[
"Helper",
"utility",
"to",
"make",
"a",
"tiled",
"field",
"of",
"images",
"from",
"numpy",
"arrays",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/jaxboard.py#L49-L71
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/trax/jaxboard.py
|
markdownify_operative_config_str
|
def markdownify_operative_config_str(string):
"""Convert an operative config string to markdown format."""
# TODO(b/37527917): Total hack below. Implement more principled formatting.
def process(line):
"""Convert a single line to markdown format."""
if not line.startswith('#'):
return ' ' + line
line = line[2:]
if line.startswith('===='):
return ''
if line.startswith('None'):
return ' # None.'
if line.endswith(':'):
return '#### ' + line
return line
output_lines = []
for line in string.splitlines():
procd_line = process(line)
if procd_line is not None:
output_lines.append(procd_line)
return '\n'.join(output_lines)
|
python
|
def markdownify_operative_config_str(string):
"""Convert an operative config string to markdown format."""
# TODO(b/37527917): Total hack below. Implement more principled formatting.
def process(line):
"""Convert a single line to markdown format."""
if not line.startswith('#'):
return ' ' + line
line = line[2:]
if line.startswith('===='):
return ''
if line.startswith('None'):
return ' # None.'
if line.endswith(':'):
return '#### ' + line
return line
output_lines = []
for line in string.splitlines():
procd_line = process(line)
if procd_line is not None:
output_lines.append(procd_line)
return '\n'.join(output_lines)
|
[
"def",
"markdownify_operative_config_str",
"(",
"string",
")",
":",
"# TODO(b/37527917): Total hack below. Implement more principled formatting.",
"def",
"process",
"(",
"line",
")",
":",
"\"\"\"Convert a single line to markdown format.\"\"\"",
"if",
"not",
"line",
".",
"startswith",
"(",
"'#'",
")",
":",
"return",
"' '",
"+",
"line",
"line",
"=",
"line",
"[",
"2",
":",
"]",
"if",
"line",
".",
"startswith",
"(",
"'===='",
")",
":",
"return",
"''",
"if",
"line",
".",
"startswith",
"(",
"'None'",
")",
":",
"return",
"' # None.'",
"if",
"line",
".",
"endswith",
"(",
"':'",
")",
":",
"return",
"'#### '",
"+",
"line",
"return",
"line",
"output_lines",
"=",
"[",
"]",
"for",
"line",
"in",
"string",
".",
"splitlines",
"(",
")",
":",
"procd_line",
"=",
"process",
"(",
"line",
")",
"if",
"procd_line",
"is",
"not",
"None",
":",
"output_lines",
".",
"append",
"(",
"procd_line",
")",
"return",
"'\\n'",
".",
"join",
"(",
"output_lines",
")"
] |
Convert an operative config string to markdown format.
|
[
"Convert",
"an",
"operative",
"config",
"string",
"to",
"markdown",
"format",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/jaxboard.py#L326-L350
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/trax/jaxboard.py
|
SummaryWriter.close
|
def close(self):
"""Close SummaryWriter. Final!"""
if not self._closed:
self._event_writer.close()
self._closed = True
del self._event_writer
|
python
|
def close(self):
"""Close SummaryWriter. Final!"""
if not self._closed:
self._event_writer.close()
self._closed = True
del self._event_writer
|
[
"def",
"close",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_closed",
":",
"self",
".",
"_event_writer",
".",
"close",
"(",
")",
"self",
".",
"_closed",
"=",
"True",
"del",
"self",
".",
"_event_writer"
] |
Close SummaryWriter. Final!
|
[
"Close",
"SummaryWriter",
".",
"Final!"
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/jaxboard.py#L98-L103
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/trax/jaxboard.py
|
SummaryWriter.scalar
|
def scalar(self, tag, value, step=None):
"""Saves scalar value.
Args:
tag: str: label for this data
value: int/float: number to log
step: int: training step
"""
value = float(onp.array(value))
if step is None:
step = self._step
else:
self._step = step
summary = Summary(value=[Summary.Value(tag=tag, simple_value=value)])
self.add_summary(summary, step)
|
python
|
def scalar(self, tag, value, step=None):
"""Saves scalar value.
Args:
tag: str: label for this data
value: int/float: number to log
step: int: training step
"""
value = float(onp.array(value))
if step is None:
step = self._step
else:
self._step = step
summary = Summary(value=[Summary.Value(tag=tag, simple_value=value)])
self.add_summary(summary, step)
|
[
"def",
"scalar",
"(",
"self",
",",
"tag",
",",
"value",
",",
"step",
"=",
"None",
")",
":",
"value",
"=",
"float",
"(",
"onp",
".",
"array",
"(",
"value",
")",
")",
"if",
"step",
"is",
"None",
":",
"step",
"=",
"self",
".",
"_step",
"else",
":",
"self",
".",
"_step",
"=",
"step",
"summary",
"=",
"Summary",
"(",
"value",
"=",
"[",
"Summary",
".",
"Value",
"(",
"tag",
"=",
"tag",
",",
"simple_value",
"=",
"value",
")",
"]",
")",
"self",
".",
"add_summary",
"(",
"summary",
",",
"step",
")"
] |
Saves scalar value.
Args:
tag: str: label for this data
value: int/float: number to log
step: int: training step
|
[
"Saves",
"scalar",
"value",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/jaxboard.py#L111-L125
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/trax/jaxboard.py
|
SummaryWriter.image
|
def image(self, tag, image, step=None):
"""Saves RGB image summary from onp.ndarray [H,W], [H,W,1], or [H,W,3].
Args:
tag: str: label for this data
image: ndarray: [H,W], [H,W,1], [H,W,3] save image in greyscale or colors/
step: int: training step
"""
image = onp.array(image)
if step is None:
step = self._step
else:
self._step = step
if len(onp.shape(image)) == 2:
image = image[:, :, onp.newaxis]
if onp.shape(image)[-1] == 1:
image = onp.repeat(image, 3, axis=-1)
image_strio = io.BytesIO()
plt.imsave(image_strio, image, format='png')
image_summary = Summary.Image(
encoded_image_string=image_strio.getvalue(),
colorspace=3,
height=image.shape[0],
width=image.shape[1])
summary = Summary(value=[Summary.Value(tag=tag, image=image_summary)])
self.add_summary(summary, step)
|
python
|
def image(self, tag, image, step=None):
"""Saves RGB image summary from onp.ndarray [H,W], [H,W,1], or [H,W,3].
Args:
tag: str: label for this data
image: ndarray: [H,W], [H,W,1], [H,W,3] save image in greyscale or colors/
step: int: training step
"""
image = onp.array(image)
if step is None:
step = self._step
else:
self._step = step
if len(onp.shape(image)) == 2:
image = image[:, :, onp.newaxis]
if onp.shape(image)[-1] == 1:
image = onp.repeat(image, 3, axis=-1)
image_strio = io.BytesIO()
plt.imsave(image_strio, image, format='png')
image_summary = Summary.Image(
encoded_image_string=image_strio.getvalue(),
colorspace=3,
height=image.shape[0],
width=image.shape[1])
summary = Summary(value=[Summary.Value(tag=tag, image=image_summary)])
self.add_summary(summary, step)
|
[
"def",
"image",
"(",
"self",
",",
"tag",
",",
"image",
",",
"step",
"=",
"None",
")",
":",
"image",
"=",
"onp",
".",
"array",
"(",
"image",
")",
"if",
"step",
"is",
"None",
":",
"step",
"=",
"self",
".",
"_step",
"else",
":",
"self",
".",
"_step",
"=",
"step",
"if",
"len",
"(",
"onp",
".",
"shape",
"(",
"image",
")",
")",
"==",
"2",
":",
"image",
"=",
"image",
"[",
":",
",",
":",
",",
"onp",
".",
"newaxis",
"]",
"if",
"onp",
".",
"shape",
"(",
"image",
")",
"[",
"-",
"1",
"]",
"==",
"1",
":",
"image",
"=",
"onp",
".",
"repeat",
"(",
"image",
",",
"3",
",",
"axis",
"=",
"-",
"1",
")",
"image_strio",
"=",
"io",
".",
"BytesIO",
"(",
")",
"plt",
".",
"imsave",
"(",
"image_strio",
",",
"image",
",",
"format",
"=",
"'png'",
")",
"image_summary",
"=",
"Summary",
".",
"Image",
"(",
"encoded_image_string",
"=",
"image_strio",
".",
"getvalue",
"(",
")",
",",
"colorspace",
"=",
"3",
",",
"height",
"=",
"image",
".",
"shape",
"[",
"0",
"]",
",",
"width",
"=",
"image",
".",
"shape",
"[",
"1",
"]",
")",
"summary",
"=",
"Summary",
"(",
"value",
"=",
"[",
"Summary",
".",
"Value",
"(",
"tag",
"=",
"tag",
",",
"image",
"=",
"image_summary",
")",
"]",
")",
"self",
".",
"add_summary",
"(",
"summary",
",",
"step",
")"
] |
Saves RGB image summary from onp.ndarray [H,W], [H,W,1], or [H,W,3].
Args:
tag: str: label for this data
image: ndarray: [H,W], [H,W,1], [H,W,3] save image in greyscale or colors/
step: int: training step
|
[
"Saves",
"RGB",
"image",
"summary",
"from",
"onp",
".",
"ndarray",
"[",
"H",
"W",
"]",
"[",
"H",
"W",
"1",
"]",
"or",
"[",
"H",
"W",
"3",
"]",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/jaxboard.py#L127-L152
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/trax/jaxboard.py
|
SummaryWriter.images
|
def images(self, tag, images, step=None, rows=None, cols=None):
"""Saves (rows, cols) tiled images from onp.ndarray.
If either rows or cols aren't given, they are determined automatically
from the size of the image batch, if neither are given a long column
of images is produced. This truncates the image batch rather than padding
if it doesn't fill the final row.
Args:
tag: str: label for this data
images: ndarray: [N,H,W,1] or [N,H,W,3] to tile in 2d
step: int: training step
rows: int: number of rows in tile
cols: int: number of columns in tile
"""
images = onp.array(images)
if step is None:
step = self._step
else:
self._step = step
n_images = onp.shape(images)[0]
if rows is None and cols is None:
rows = 1
cols = n_images
elif rows is None:
rows = n_images // cols
elif cols is None:
cols = n_images // rows
tiled_images = _pack_images(images, rows, cols)
self.image(tag, tiled_images, step=step)
|
python
|
def images(self, tag, images, step=None, rows=None, cols=None):
"""Saves (rows, cols) tiled images from onp.ndarray.
If either rows or cols aren't given, they are determined automatically
from the size of the image batch, if neither are given a long column
of images is produced. This truncates the image batch rather than padding
if it doesn't fill the final row.
Args:
tag: str: label for this data
images: ndarray: [N,H,W,1] or [N,H,W,3] to tile in 2d
step: int: training step
rows: int: number of rows in tile
cols: int: number of columns in tile
"""
images = onp.array(images)
if step is None:
step = self._step
else:
self._step = step
n_images = onp.shape(images)[0]
if rows is None and cols is None:
rows = 1
cols = n_images
elif rows is None:
rows = n_images // cols
elif cols is None:
cols = n_images // rows
tiled_images = _pack_images(images, rows, cols)
self.image(tag, tiled_images, step=step)
|
[
"def",
"images",
"(",
"self",
",",
"tag",
",",
"images",
",",
"step",
"=",
"None",
",",
"rows",
"=",
"None",
",",
"cols",
"=",
"None",
")",
":",
"images",
"=",
"onp",
".",
"array",
"(",
"images",
")",
"if",
"step",
"is",
"None",
":",
"step",
"=",
"self",
".",
"_step",
"else",
":",
"self",
".",
"_step",
"=",
"step",
"n_images",
"=",
"onp",
".",
"shape",
"(",
"images",
")",
"[",
"0",
"]",
"if",
"rows",
"is",
"None",
"and",
"cols",
"is",
"None",
":",
"rows",
"=",
"1",
"cols",
"=",
"n_images",
"elif",
"rows",
"is",
"None",
":",
"rows",
"=",
"n_images",
"//",
"cols",
"elif",
"cols",
"is",
"None",
":",
"cols",
"=",
"n_images",
"//",
"rows",
"tiled_images",
"=",
"_pack_images",
"(",
"images",
",",
"rows",
",",
"cols",
")",
"self",
".",
"image",
"(",
"tag",
",",
"tiled_images",
",",
"step",
"=",
"step",
")"
] |
Saves (rows, cols) tiled images from onp.ndarray.
If either rows or cols aren't given, they are determined automatically
from the size of the image batch, if neither are given a long column
of images is produced. This truncates the image batch rather than padding
if it doesn't fill the final row.
Args:
tag: str: label for this data
images: ndarray: [N,H,W,1] or [N,H,W,3] to tile in 2d
step: int: training step
rows: int: number of rows in tile
cols: int: number of columns in tile
|
[
"Saves",
"(",
"rows",
"cols",
")",
"tiled",
"images",
"from",
"onp",
".",
"ndarray",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/jaxboard.py#L154-L183
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/trax/jaxboard.py
|
SummaryWriter.plot
|
def plot(self, tag, mpl_plt, step=None, close_plot=True):
"""Saves matplotlib plot output to summary image.
Args:
tag: str: label for this data
mpl_plt: matplotlib stateful pyplot object with prepared plotting state
step: int: training step
close_plot: bool: automatically closes plot
"""
if step is None:
step = self._step
else:
self._step = step
fig = mpl_plt.get_current_fig_manager()
img_w, img_h = fig.canvas.get_width_height()
image_buf = io.BytesIO()
mpl_plt.savefig(image_buf, format='png')
image_summary = Summary.Image(
encoded_image_string=image_buf.getvalue(),
colorspace=4, # RGBA
height=img_h,
width=img_w)
summary = Summary(value=[Summary.Value(tag=tag, image=image_summary)])
self.add_summary(summary, step)
if close_plot:
mpl_plt.close()
|
python
|
def plot(self, tag, mpl_plt, step=None, close_plot=True):
"""Saves matplotlib plot output to summary image.
Args:
tag: str: label for this data
mpl_plt: matplotlib stateful pyplot object with prepared plotting state
step: int: training step
close_plot: bool: automatically closes plot
"""
if step is None:
step = self._step
else:
self._step = step
fig = mpl_plt.get_current_fig_manager()
img_w, img_h = fig.canvas.get_width_height()
image_buf = io.BytesIO()
mpl_plt.savefig(image_buf, format='png')
image_summary = Summary.Image(
encoded_image_string=image_buf.getvalue(),
colorspace=4, # RGBA
height=img_h,
width=img_w)
summary = Summary(value=[Summary.Value(tag=tag, image=image_summary)])
self.add_summary(summary, step)
if close_plot:
mpl_plt.close()
|
[
"def",
"plot",
"(",
"self",
",",
"tag",
",",
"mpl_plt",
",",
"step",
"=",
"None",
",",
"close_plot",
"=",
"True",
")",
":",
"if",
"step",
"is",
"None",
":",
"step",
"=",
"self",
".",
"_step",
"else",
":",
"self",
".",
"_step",
"=",
"step",
"fig",
"=",
"mpl_plt",
".",
"get_current_fig_manager",
"(",
")",
"img_w",
",",
"img_h",
"=",
"fig",
".",
"canvas",
".",
"get_width_height",
"(",
")",
"image_buf",
"=",
"io",
".",
"BytesIO",
"(",
")",
"mpl_plt",
".",
"savefig",
"(",
"image_buf",
",",
"format",
"=",
"'png'",
")",
"image_summary",
"=",
"Summary",
".",
"Image",
"(",
"encoded_image_string",
"=",
"image_buf",
".",
"getvalue",
"(",
")",
",",
"colorspace",
"=",
"4",
",",
"# RGBA",
"height",
"=",
"img_h",
",",
"width",
"=",
"img_w",
")",
"summary",
"=",
"Summary",
"(",
"value",
"=",
"[",
"Summary",
".",
"Value",
"(",
"tag",
"=",
"tag",
",",
"image",
"=",
"image_summary",
")",
"]",
")",
"self",
".",
"add_summary",
"(",
"summary",
",",
"step",
")",
"if",
"close_plot",
":",
"mpl_plt",
".",
"close",
"(",
")"
] |
Saves matplotlib plot output to summary image.
Args:
tag: str: label for this data
mpl_plt: matplotlib stateful pyplot object with prepared plotting state
step: int: training step
close_plot: bool: automatically closes plot
|
[
"Saves",
"matplotlib",
"plot",
"output",
"to",
"summary",
"image",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/jaxboard.py#L185-L210
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/trax/jaxboard.py
|
SummaryWriter.audio
|
def audio(self, tag, audiodata, step=None, sample_rate=44100):
"""Saves audio.
NB: single channel only right now.
Args:
tag: str: label for this data
audiodata: ndarray [Nsamples,]: data between (-1.0,1.0) to save as wave
step: int: training step
sample_rate: sample rate of passed in audio buffer
"""
audiodata = onp.array(audiodata)
if step is None:
step = self._step
else:
self._step = step
audiodata = onp.clip(onp.squeeze(audiodata), -1, 1)
if audiodata.ndim != 1:
raise ValueError('Audio data must be 1D.')
sample_list = (32767.0 * audiodata).astype(int).tolist()
wio = io.BytesIO()
wav_buf = wave.open(wio, 'wb')
wav_buf.setnchannels(1)
wav_buf.setsampwidth(2)
wav_buf.setframerate(sample_rate)
enc = b''.join([struct.pack('<h', v) for v in sample_list])
wav_buf.writeframes(enc)
wav_buf.close()
encoded_audio_bytes = wio.getvalue()
wio.close()
audio = Summary.Audio(
sample_rate=sample_rate,
num_channels=1,
length_frames=len(sample_list),
encoded_audio_string=encoded_audio_bytes,
content_type='audio/wav')
summary = Summary(value=[Summary.Value(tag=tag, audio=audio)])
self.add_summary(summary, step)
|
python
|
def audio(self, tag, audiodata, step=None, sample_rate=44100):
"""Saves audio.
NB: single channel only right now.
Args:
tag: str: label for this data
audiodata: ndarray [Nsamples,]: data between (-1.0,1.0) to save as wave
step: int: training step
sample_rate: sample rate of passed in audio buffer
"""
audiodata = onp.array(audiodata)
if step is None:
step = self._step
else:
self._step = step
audiodata = onp.clip(onp.squeeze(audiodata), -1, 1)
if audiodata.ndim != 1:
raise ValueError('Audio data must be 1D.')
sample_list = (32767.0 * audiodata).astype(int).tolist()
wio = io.BytesIO()
wav_buf = wave.open(wio, 'wb')
wav_buf.setnchannels(1)
wav_buf.setsampwidth(2)
wav_buf.setframerate(sample_rate)
enc = b''.join([struct.pack('<h', v) for v in sample_list])
wav_buf.writeframes(enc)
wav_buf.close()
encoded_audio_bytes = wio.getvalue()
wio.close()
audio = Summary.Audio(
sample_rate=sample_rate,
num_channels=1,
length_frames=len(sample_list),
encoded_audio_string=encoded_audio_bytes,
content_type='audio/wav')
summary = Summary(value=[Summary.Value(tag=tag, audio=audio)])
self.add_summary(summary, step)
|
[
"def",
"audio",
"(",
"self",
",",
"tag",
",",
"audiodata",
",",
"step",
"=",
"None",
",",
"sample_rate",
"=",
"44100",
")",
":",
"audiodata",
"=",
"onp",
".",
"array",
"(",
"audiodata",
")",
"if",
"step",
"is",
"None",
":",
"step",
"=",
"self",
".",
"_step",
"else",
":",
"self",
".",
"_step",
"=",
"step",
"audiodata",
"=",
"onp",
".",
"clip",
"(",
"onp",
".",
"squeeze",
"(",
"audiodata",
")",
",",
"-",
"1",
",",
"1",
")",
"if",
"audiodata",
".",
"ndim",
"!=",
"1",
":",
"raise",
"ValueError",
"(",
"'Audio data must be 1D.'",
")",
"sample_list",
"=",
"(",
"32767.0",
"*",
"audiodata",
")",
".",
"astype",
"(",
"int",
")",
".",
"tolist",
"(",
")",
"wio",
"=",
"io",
".",
"BytesIO",
"(",
")",
"wav_buf",
"=",
"wave",
".",
"open",
"(",
"wio",
",",
"'wb'",
")",
"wav_buf",
".",
"setnchannels",
"(",
"1",
")",
"wav_buf",
".",
"setsampwidth",
"(",
"2",
")",
"wav_buf",
".",
"setframerate",
"(",
"sample_rate",
")",
"enc",
"=",
"b''",
".",
"join",
"(",
"[",
"struct",
".",
"pack",
"(",
"'<h'",
",",
"v",
")",
"for",
"v",
"in",
"sample_list",
"]",
")",
"wav_buf",
".",
"writeframes",
"(",
"enc",
")",
"wav_buf",
".",
"close",
"(",
")",
"encoded_audio_bytes",
"=",
"wio",
".",
"getvalue",
"(",
")",
"wio",
".",
"close",
"(",
")",
"audio",
"=",
"Summary",
".",
"Audio",
"(",
"sample_rate",
"=",
"sample_rate",
",",
"num_channels",
"=",
"1",
",",
"length_frames",
"=",
"len",
"(",
"sample_list",
")",
",",
"encoded_audio_string",
"=",
"encoded_audio_bytes",
",",
"content_type",
"=",
"'audio/wav'",
")",
"summary",
"=",
"Summary",
"(",
"value",
"=",
"[",
"Summary",
".",
"Value",
"(",
"tag",
"=",
"tag",
",",
"audio",
"=",
"audio",
")",
"]",
")",
"self",
".",
"add_summary",
"(",
"summary",
",",
"step",
")"
] |
Saves audio.
NB: single channel only right now.
Args:
tag: str: label for this data
audiodata: ndarray [Nsamples,]: data between (-1.0,1.0) to save as wave
step: int: training step
sample_rate: sample rate of passed in audio buffer
|
[
"Saves",
"audio",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/jaxboard.py#L212-L249
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/trax/jaxboard.py
|
SummaryWriter.histogram
|
def histogram(self, tag, values, bins, step=None):
"""Saves histogram of values.
Args:
tag: str: label for this data
values: ndarray: will be flattened by this routine
bins: number of bins in histogram, or array of bins for onp.histogram
step: int: training step
"""
if step is None:
step = self._step
else:
self._step = step
values = onp.array(values)
bins = onp.array(bins)
values = onp.reshape(values, -1)
counts, limits = onp.histogram(values, bins=bins)
# boundary logic
cum_counts = onp.cumsum(onp.greater(counts, 0, dtype=onp.int32))
start, end = onp.searchsorted(
cum_counts, [0, cum_counts[-1] - 1], side='right')
start, end = int(start), int(end) + 1
counts = (
counts[start -
1:end] if start > 0 else onp.concatenate([[0], counts[:end]]))
limits = limits[start:end + 1]
sum_sq = values.dot(values)
histo = HistogramProto(
min=values.min(),
max=values.max(),
num=len(values),
sum=values.sum(),
sum_squares=sum_sq,
bucket_limit=limits.tolist(),
bucket=counts.tolist())
summary = Summary(value=[Summary.Value(tag=tag, histo=histo)])
self.add_summary(summary, step)
|
python
|
def histogram(self, tag, values, bins, step=None):
"""Saves histogram of values.
Args:
tag: str: label for this data
values: ndarray: will be flattened by this routine
bins: number of bins in histogram, or array of bins for onp.histogram
step: int: training step
"""
if step is None:
step = self._step
else:
self._step = step
values = onp.array(values)
bins = onp.array(bins)
values = onp.reshape(values, -1)
counts, limits = onp.histogram(values, bins=bins)
# boundary logic
cum_counts = onp.cumsum(onp.greater(counts, 0, dtype=onp.int32))
start, end = onp.searchsorted(
cum_counts, [0, cum_counts[-1] - 1], side='right')
start, end = int(start), int(end) + 1
counts = (
counts[start -
1:end] if start > 0 else onp.concatenate([[0], counts[:end]]))
limits = limits[start:end + 1]
sum_sq = values.dot(values)
histo = HistogramProto(
min=values.min(),
max=values.max(),
num=len(values),
sum=values.sum(),
sum_squares=sum_sq,
bucket_limit=limits.tolist(),
bucket=counts.tolist())
summary = Summary(value=[Summary.Value(tag=tag, histo=histo)])
self.add_summary(summary, step)
|
[
"def",
"histogram",
"(",
"self",
",",
"tag",
",",
"values",
",",
"bins",
",",
"step",
"=",
"None",
")",
":",
"if",
"step",
"is",
"None",
":",
"step",
"=",
"self",
".",
"_step",
"else",
":",
"self",
".",
"_step",
"=",
"step",
"values",
"=",
"onp",
".",
"array",
"(",
"values",
")",
"bins",
"=",
"onp",
".",
"array",
"(",
"bins",
")",
"values",
"=",
"onp",
".",
"reshape",
"(",
"values",
",",
"-",
"1",
")",
"counts",
",",
"limits",
"=",
"onp",
".",
"histogram",
"(",
"values",
",",
"bins",
"=",
"bins",
")",
"# boundary logic",
"cum_counts",
"=",
"onp",
".",
"cumsum",
"(",
"onp",
".",
"greater",
"(",
"counts",
",",
"0",
",",
"dtype",
"=",
"onp",
".",
"int32",
")",
")",
"start",
",",
"end",
"=",
"onp",
".",
"searchsorted",
"(",
"cum_counts",
",",
"[",
"0",
",",
"cum_counts",
"[",
"-",
"1",
"]",
"-",
"1",
"]",
",",
"side",
"=",
"'right'",
")",
"start",
",",
"end",
"=",
"int",
"(",
"start",
")",
",",
"int",
"(",
"end",
")",
"+",
"1",
"counts",
"=",
"(",
"counts",
"[",
"start",
"-",
"1",
":",
"end",
"]",
"if",
"start",
">",
"0",
"else",
"onp",
".",
"concatenate",
"(",
"[",
"[",
"0",
"]",
",",
"counts",
"[",
":",
"end",
"]",
"]",
")",
")",
"limits",
"=",
"limits",
"[",
"start",
":",
"end",
"+",
"1",
"]",
"sum_sq",
"=",
"values",
".",
"dot",
"(",
"values",
")",
"histo",
"=",
"HistogramProto",
"(",
"min",
"=",
"values",
".",
"min",
"(",
")",
",",
"max",
"=",
"values",
".",
"max",
"(",
")",
",",
"num",
"=",
"len",
"(",
"values",
")",
",",
"sum",
"=",
"values",
".",
"sum",
"(",
")",
",",
"sum_squares",
"=",
"sum_sq",
",",
"bucket_limit",
"=",
"limits",
".",
"tolist",
"(",
")",
",",
"bucket",
"=",
"counts",
".",
"tolist",
"(",
")",
")",
"summary",
"=",
"Summary",
"(",
"value",
"=",
"[",
"Summary",
".",
"Value",
"(",
"tag",
"=",
"tag",
",",
"histo",
"=",
"histo",
")",
"]",
")",
"self",
".",
"add_summary",
"(",
"summary",
",",
"step",
")"
] |
Saves histogram of values.
Args:
tag: str: label for this data
values: ndarray: will be flattened by this routine
bins: number of bins in histogram, or array of bins for onp.histogram
step: int: training step
|
[
"Saves",
"histogram",
"of",
"values",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/jaxboard.py#L251-L287
|
train
|
tensorflow/tensor2tensor
|
tensor2tensor/trax/jaxboard.py
|
SummaryWriter.text
|
def text(self, tag, textdata, step=None):
"""Saves a text summary.
Args:
tag: str: label for this data
textdata: string, or 1D/2D list/numpy array of strings
step: int: training step
Note: markdown formatting is rendered by tensorboard.
"""
if step is None:
step = self._step
else:
self._step = step
smd = SummaryMetadata(
plugin_data=SummaryMetadata.PluginData(plugin_name='text'))
if isinstance(textdata, (str, bytes)):
tensor = tf.make_tensor_proto(
values=[textdata.encode(encoding='utf_8')], shape=(1,))
else:
textdata = onp.array(textdata) # convert lists, jax arrays, etc.
datashape = onp.shape(textdata)
if len(datashape) == 1:
tensor = tf.make_tensor_proto(
values=[td.encode(encoding='utf_8') for td in textdata],
shape=(datashape[0],))
elif len(datashape) == 2:
tensor = tf.make_tensor_proto(
values=[
td.encode(encoding='utf_8') for td in onp.reshape(textdata, -1)
],
shape=(datashape[0], datashape[1]))
summary = Summary(
value=[Summary.Value(tag=tag, metadata=smd, tensor=tensor)])
self.add_summary(summary, step)
|
python
|
def text(self, tag, textdata, step=None):
"""Saves a text summary.
Args:
tag: str: label for this data
textdata: string, or 1D/2D list/numpy array of strings
step: int: training step
Note: markdown formatting is rendered by tensorboard.
"""
if step is None:
step = self._step
else:
self._step = step
smd = SummaryMetadata(
plugin_data=SummaryMetadata.PluginData(plugin_name='text'))
if isinstance(textdata, (str, bytes)):
tensor = tf.make_tensor_proto(
values=[textdata.encode(encoding='utf_8')], shape=(1,))
else:
textdata = onp.array(textdata) # convert lists, jax arrays, etc.
datashape = onp.shape(textdata)
if len(datashape) == 1:
tensor = tf.make_tensor_proto(
values=[td.encode(encoding='utf_8') for td in textdata],
shape=(datashape[0],))
elif len(datashape) == 2:
tensor = tf.make_tensor_proto(
values=[
td.encode(encoding='utf_8') for td in onp.reshape(textdata, -1)
],
shape=(datashape[0], datashape[1]))
summary = Summary(
value=[Summary.Value(tag=tag, metadata=smd, tensor=tensor)])
self.add_summary(summary, step)
|
[
"def",
"text",
"(",
"self",
",",
"tag",
",",
"textdata",
",",
"step",
"=",
"None",
")",
":",
"if",
"step",
"is",
"None",
":",
"step",
"=",
"self",
".",
"_step",
"else",
":",
"self",
".",
"_step",
"=",
"step",
"smd",
"=",
"SummaryMetadata",
"(",
"plugin_data",
"=",
"SummaryMetadata",
".",
"PluginData",
"(",
"plugin_name",
"=",
"'text'",
")",
")",
"if",
"isinstance",
"(",
"textdata",
",",
"(",
"str",
",",
"bytes",
")",
")",
":",
"tensor",
"=",
"tf",
".",
"make_tensor_proto",
"(",
"values",
"=",
"[",
"textdata",
".",
"encode",
"(",
"encoding",
"=",
"'utf_8'",
")",
"]",
",",
"shape",
"=",
"(",
"1",
",",
")",
")",
"else",
":",
"textdata",
"=",
"onp",
".",
"array",
"(",
"textdata",
")",
"# convert lists, jax arrays, etc.",
"datashape",
"=",
"onp",
".",
"shape",
"(",
"textdata",
")",
"if",
"len",
"(",
"datashape",
")",
"==",
"1",
":",
"tensor",
"=",
"tf",
".",
"make_tensor_proto",
"(",
"values",
"=",
"[",
"td",
".",
"encode",
"(",
"encoding",
"=",
"'utf_8'",
")",
"for",
"td",
"in",
"textdata",
"]",
",",
"shape",
"=",
"(",
"datashape",
"[",
"0",
"]",
",",
")",
")",
"elif",
"len",
"(",
"datashape",
")",
"==",
"2",
":",
"tensor",
"=",
"tf",
".",
"make_tensor_proto",
"(",
"values",
"=",
"[",
"td",
".",
"encode",
"(",
"encoding",
"=",
"'utf_8'",
")",
"for",
"td",
"in",
"onp",
".",
"reshape",
"(",
"textdata",
",",
"-",
"1",
")",
"]",
",",
"shape",
"=",
"(",
"datashape",
"[",
"0",
"]",
",",
"datashape",
"[",
"1",
"]",
")",
")",
"summary",
"=",
"Summary",
"(",
"value",
"=",
"[",
"Summary",
".",
"Value",
"(",
"tag",
"=",
"tag",
",",
"metadata",
"=",
"smd",
",",
"tensor",
"=",
"tensor",
")",
"]",
")",
"self",
".",
"add_summary",
"(",
"summary",
",",
"step",
")"
] |
Saves a text summary.
Args:
tag: str: label for this data
textdata: string, or 1D/2D list/numpy array of strings
step: int: training step
Note: markdown formatting is rendered by tensorboard.
|
[
"Saves",
"a",
"text",
"summary",
"."
] |
272500b6efe353aeb638d2745ed56e519462ca31
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/jaxboard.py#L289-L322
|
train
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.