repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1 value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1 value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
Phylliade/ikpy | scripts/hand_follow.py | follow_hand | def follow_hand(poppy, delta):
"""Tell the right hand to follow the left hand"""
right_arm_position = poppy.l_arm_chain.end_effector + delta
poppy.r_arm_chain.goto(right_arm_position, 0.5, wait=True) | python | def follow_hand(poppy, delta):
"""Tell the right hand to follow the left hand"""
right_arm_position = poppy.l_arm_chain.end_effector + delta
poppy.r_arm_chain.goto(right_arm_position, 0.5, wait=True) | [
"def",
"follow_hand",
"(",
"poppy",
",",
"delta",
")",
":",
"right_arm_position",
"=",
"poppy",
".",
"l_arm_chain",
".",
"end_effector",
"+",
"delta",
"poppy",
".",
"r_arm_chain",
".",
"goto",
"(",
"right_arm_position",
",",
"0.5",
",",
"wait",
"=",
"True",
... | Tell the right hand to follow the left hand | [
"Tell",
"the",
"right",
"hand",
"to",
"follow",
"the",
"left",
"hand"
] | 60e36d6163136942bf520d952db17123c658d0b6 | https://github.com/Phylliade/ikpy/blob/60e36d6163136942bf520d952db17123c658d0b6/scripts/hand_follow.py#L27-L30 | train | 222,600 |
Phylliade/ikpy | src/ikpy/inverse_kinematics.py | inverse_kinematic_optimization | def inverse_kinematic_optimization(chain, target_frame, starting_nodes_angles, regularization_parameter=None, max_iter=None):
"""
Computes the inverse kinematic on the specified target with an optimization method
Parameters
----------
chain: ikpy.chain.Chain
The chain used for the Inverse kinematics.
target_frame: numpy.array
The desired target.
starting_nodes_angles: numpy.array
The initial pose of your chain.
regularization_parameter: float
The coefficient of the regularization.
max_iter: int
Maximum number of iterations for the optimisation algorithm.
"""
# Only get the position
target = target_frame[:3, 3]
if starting_nodes_angles is None:
raise ValueError("starting_nodes_angles must be specified")
# Compute squared distance to target
def optimize_target(x):
# y = np.append(starting_nodes_angles[:chain.first_active_joint], x)
y = chain.active_to_full(x, starting_nodes_angles)
squared_distance = np.linalg.norm(chain.forward_kinematics(y)[:3, -1] - target)
return squared_distance
# If a regularization is selected
if regularization_parameter is not None:
def optimize_total(x):
regularization = np.linalg.norm(x - starting_nodes_angles[chain.first_active_joint:])
return optimize_target(x) + regularization_parameter * regularization
else:
def optimize_total(x):
return optimize_target(x)
# Compute bounds
real_bounds = [link.bounds for link in chain.links]
# real_bounds = real_bounds[chain.first_active_joint:]
real_bounds = chain.active_from_full(real_bounds)
options = {}
# Manage iterations maximum
if max_iter is not None:
options["maxiter"] = max_iter
# Utilisation d'une optimisation L-BFGS-B
res = scipy.optimize.minimize(optimize_total, chain.active_from_full(starting_nodes_angles), method='L-BFGS-B', bounds=real_bounds, options=options)
logs.manager.info("Inverse kinematic optimisation OK, done in {} iterations".format(res.nit))
return chain.active_to_full(res.x, starting_nodes_angles) | python | def inverse_kinematic_optimization(chain, target_frame, starting_nodes_angles, regularization_parameter=None, max_iter=None):
"""
Computes the inverse kinematic on the specified target with an optimization method
Parameters
----------
chain: ikpy.chain.Chain
The chain used for the Inverse kinematics.
target_frame: numpy.array
The desired target.
starting_nodes_angles: numpy.array
The initial pose of your chain.
regularization_parameter: float
The coefficient of the regularization.
max_iter: int
Maximum number of iterations for the optimisation algorithm.
"""
# Only get the position
target = target_frame[:3, 3]
if starting_nodes_angles is None:
raise ValueError("starting_nodes_angles must be specified")
# Compute squared distance to target
def optimize_target(x):
# y = np.append(starting_nodes_angles[:chain.first_active_joint], x)
y = chain.active_to_full(x, starting_nodes_angles)
squared_distance = np.linalg.norm(chain.forward_kinematics(y)[:3, -1] - target)
return squared_distance
# If a regularization is selected
if regularization_parameter is not None:
def optimize_total(x):
regularization = np.linalg.norm(x - starting_nodes_angles[chain.first_active_joint:])
return optimize_target(x) + regularization_parameter * regularization
else:
def optimize_total(x):
return optimize_target(x)
# Compute bounds
real_bounds = [link.bounds for link in chain.links]
# real_bounds = real_bounds[chain.first_active_joint:]
real_bounds = chain.active_from_full(real_bounds)
options = {}
# Manage iterations maximum
if max_iter is not None:
options["maxiter"] = max_iter
# Utilisation d'une optimisation L-BFGS-B
res = scipy.optimize.minimize(optimize_total, chain.active_from_full(starting_nodes_angles), method='L-BFGS-B', bounds=real_bounds, options=options)
logs.manager.info("Inverse kinematic optimisation OK, done in {} iterations".format(res.nit))
return chain.active_to_full(res.x, starting_nodes_angles) | [
"def",
"inverse_kinematic_optimization",
"(",
"chain",
",",
"target_frame",
",",
"starting_nodes_angles",
",",
"regularization_parameter",
"=",
"None",
",",
"max_iter",
"=",
"None",
")",
":",
"# Only get the position",
"target",
"=",
"target_frame",
"[",
":",
"3",
"... | Computes the inverse kinematic on the specified target with an optimization method
Parameters
----------
chain: ikpy.chain.Chain
The chain used for the Inverse kinematics.
target_frame: numpy.array
The desired target.
starting_nodes_angles: numpy.array
The initial pose of your chain.
regularization_parameter: float
The coefficient of the regularization.
max_iter: int
Maximum number of iterations for the optimisation algorithm. | [
"Computes",
"the",
"inverse",
"kinematic",
"on",
"the",
"specified",
"target",
"with",
"an",
"optimization",
"method"
] | 60e36d6163136942bf520d952db17123c658d0b6 | https://github.com/Phylliade/ikpy/blob/60e36d6163136942bf520d952db17123c658d0b6/src/ikpy/inverse_kinematics.py#L7-L61 | train | 222,601 |
Phylliade/ikpy | src/ikpy/chain.py | Chain.forward_kinematics | def forward_kinematics(self, joints, full_kinematics=False):
"""Returns the transformation matrix of the forward kinematics
Parameters
----------
joints: list
The list of the positions of each joint. Note : Inactive joints must be in the list.
full_kinematics: bool
Return the transformation matrices of each joint
Returns
-------
frame_matrix:
The transformation matrix
"""
frame_matrix = np.eye(4)
if full_kinematics:
frame_matrixes = []
if len(self.links) != len(joints):
raise ValueError("Your joints vector length is {} but you have {} links".format(len(joints), len(self.links)))
for index, (link, joint_angle) in enumerate(zip(self.links, joints)):
# Compute iteratively the position
# NB : Use asarray to avoid old sympy problems
frame_matrix = np.dot(frame_matrix, np.asarray(link.get_transformation_matrix(joint_angle)))
if full_kinematics:
# rotation_axe = np.dot(frame_matrix, link.rotation)
frame_matrixes.append(frame_matrix)
# Return the matrix, or matrixes
if full_kinematics:
return frame_matrixes
else:
return frame_matrix | python | def forward_kinematics(self, joints, full_kinematics=False):
"""Returns the transformation matrix of the forward kinematics
Parameters
----------
joints: list
The list of the positions of each joint. Note : Inactive joints must be in the list.
full_kinematics: bool
Return the transformation matrices of each joint
Returns
-------
frame_matrix:
The transformation matrix
"""
frame_matrix = np.eye(4)
if full_kinematics:
frame_matrixes = []
if len(self.links) != len(joints):
raise ValueError("Your joints vector length is {} but you have {} links".format(len(joints), len(self.links)))
for index, (link, joint_angle) in enumerate(zip(self.links, joints)):
# Compute iteratively the position
# NB : Use asarray to avoid old sympy problems
frame_matrix = np.dot(frame_matrix, np.asarray(link.get_transformation_matrix(joint_angle)))
if full_kinematics:
# rotation_axe = np.dot(frame_matrix, link.rotation)
frame_matrixes.append(frame_matrix)
# Return the matrix, or matrixes
if full_kinematics:
return frame_matrixes
else:
return frame_matrix | [
"def",
"forward_kinematics",
"(",
"self",
",",
"joints",
",",
"full_kinematics",
"=",
"False",
")",
":",
"frame_matrix",
"=",
"np",
".",
"eye",
"(",
"4",
")",
"if",
"full_kinematics",
":",
"frame_matrixes",
"=",
"[",
"]",
"if",
"len",
"(",
"self",
".",
... | Returns the transformation matrix of the forward kinematics
Parameters
----------
joints: list
The list of the positions of each joint. Note : Inactive joints must be in the list.
full_kinematics: bool
Return the transformation matrices of each joint
Returns
-------
frame_matrix:
The transformation matrix | [
"Returns",
"the",
"transformation",
"matrix",
"of",
"the",
"forward",
"kinematics"
] | 60e36d6163136942bf520d952db17123c658d0b6 | https://github.com/Phylliade/ikpy/blob/60e36d6163136942bf520d952db17123c658d0b6/src/ikpy/chain.py#L48-L83 | train | 222,602 |
Phylliade/ikpy | src/ikpy/chain.py | Chain.inverse_kinematics | def inverse_kinematics(self, target, initial_position=None, **kwargs):
"""Computes the inverse kinematic on the specified target
Parameters
----------
target: numpy.array
The frame target of the inverse kinematic, in meters. It must be 4x4 transformation matrix
initial_position: numpy.array
Optional : the initial position of each joint of the chain. Defaults to 0 for each joint
Returns
-------
The list of the positions of each joint according to the target. Note : Inactive joints are in the list.
"""
# Checks on input
target = np.array(target)
if target.shape != (4, 4):
raise ValueError("Your target must be a 4x4 transformation matrix")
if initial_position is None:
initial_position = [0] * len(self.links)
return ik.inverse_kinematic_optimization(self, target, starting_nodes_angles=initial_position, **kwargs) | python | def inverse_kinematics(self, target, initial_position=None, **kwargs):
"""Computes the inverse kinematic on the specified target
Parameters
----------
target: numpy.array
The frame target of the inverse kinematic, in meters. It must be 4x4 transformation matrix
initial_position: numpy.array
Optional : the initial position of each joint of the chain. Defaults to 0 for each joint
Returns
-------
The list of the positions of each joint according to the target. Note : Inactive joints are in the list.
"""
# Checks on input
target = np.array(target)
if target.shape != (4, 4):
raise ValueError("Your target must be a 4x4 transformation matrix")
if initial_position is None:
initial_position = [0] * len(self.links)
return ik.inverse_kinematic_optimization(self, target, starting_nodes_angles=initial_position, **kwargs) | [
"def",
"inverse_kinematics",
"(",
"self",
",",
"target",
",",
"initial_position",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"# Checks on input",
"target",
"=",
"np",
".",
"array",
"(",
"target",
")",
"if",
"target",
".",
"shape",
"!=",
"(",
"4",
... | Computes the inverse kinematic on the specified target
Parameters
----------
target: numpy.array
The frame target of the inverse kinematic, in meters. It must be 4x4 transformation matrix
initial_position: numpy.array
Optional : the initial position of each joint of the chain. Defaults to 0 for each joint
Returns
-------
The list of the positions of each joint according to the target. Note : Inactive joints are in the list. | [
"Computes",
"the",
"inverse",
"kinematic",
"on",
"the",
"specified",
"target"
] | 60e36d6163136942bf520d952db17123c658d0b6 | https://github.com/Phylliade/ikpy/blob/60e36d6163136942bf520d952db17123c658d0b6/src/ikpy/chain.py#L85-L107 | train | 222,603 |
Phylliade/ikpy | src/ikpy/chain.py | Chain.plot | def plot(self, joints, ax, target=None, show=False):
"""Plots the Chain using Matplotlib
Parameters
----------
joints: list
The list of the positions of each joint
ax: matplotlib.axes.Axes
A matplotlib axes
target: numpy.array
An optional target
show: bool
Display the axe. Defaults to False
"""
from . import plot_utils
if ax is None:
# If ax is not given, create one
ax = plot_utils.init_3d_figure()
plot_utils.plot_chain(self, joints, ax)
plot_utils.plot_basis(ax, self._length)
# Plot the goal position
if target is not None:
plot_utils.plot_target(target, ax)
if show:
plot_utils.show_figure() | python | def plot(self, joints, ax, target=None, show=False):
"""Plots the Chain using Matplotlib
Parameters
----------
joints: list
The list of the positions of each joint
ax: matplotlib.axes.Axes
A matplotlib axes
target: numpy.array
An optional target
show: bool
Display the axe. Defaults to False
"""
from . import plot_utils
if ax is None:
# If ax is not given, create one
ax = plot_utils.init_3d_figure()
plot_utils.plot_chain(self, joints, ax)
plot_utils.plot_basis(ax, self._length)
# Plot the goal position
if target is not None:
plot_utils.plot_target(target, ax)
if show:
plot_utils.show_figure() | [
"def",
"plot",
"(",
"self",
",",
"joints",
",",
"ax",
",",
"target",
"=",
"None",
",",
"show",
"=",
"False",
")",
":",
"from",
".",
"import",
"plot_utils",
"if",
"ax",
"is",
"None",
":",
"# If ax is not given, create one",
"ax",
"=",
"plot_utils",
".",
... | Plots the Chain using Matplotlib
Parameters
----------
joints: list
The list of the positions of each joint
ax: matplotlib.axes.Axes
A matplotlib axes
target: numpy.array
An optional target
show: bool
Display the axe. Defaults to False | [
"Plots",
"the",
"Chain",
"using",
"Matplotlib"
] | 60e36d6163136942bf520d952db17123c658d0b6 | https://github.com/Phylliade/ikpy/blob/60e36d6163136942bf520d952db17123c658d0b6/src/ikpy/chain.py#L109-L135 | train | 222,604 |
Phylliade/ikpy | src/ikpy/chain.py | Chain.from_urdf_file | def from_urdf_file(cls, urdf_file, base_elements=None, last_link_vector=None, base_element_type="link", active_links_mask=None, name="chain"):
"""Creates a chain from an URDF file
Parameters
----------
urdf_file: str
The path of the URDF file
base_elements: list of strings
List of the links beginning the chain
last_link_vector: numpy.array
Optional : The translation vector of the tip.
name: str
The name of the Chain
base_element_type: str
active_links_mask: list[bool]
"""
if base_elements is None:
base_elements = ["base_link"]
links = URDF_utils.get_urdf_parameters(urdf_file, base_elements=base_elements, last_link_vector=last_link_vector, base_element_type=base_element_type)
# Add an origin link at the beginning
return cls([link_lib.OriginLink()] + links, active_links_mask=active_links_mask, name=name) | python | def from_urdf_file(cls, urdf_file, base_elements=None, last_link_vector=None, base_element_type="link", active_links_mask=None, name="chain"):
"""Creates a chain from an URDF file
Parameters
----------
urdf_file: str
The path of the URDF file
base_elements: list of strings
List of the links beginning the chain
last_link_vector: numpy.array
Optional : The translation vector of the tip.
name: str
The name of the Chain
base_element_type: str
active_links_mask: list[bool]
"""
if base_elements is None:
base_elements = ["base_link"]
links = URDF_utils.get_urdf_parameters(urdf_file, base_elements=base_elements, last_link_vector=last_link_vector, base_element_type=base_element_type)
# Add an origin link at the beginning
return cls([link_lib.OriginLink()] + links, active_links_mask=active_links_mask, name=name) | [
"def",
"from_urdf_file",
"(",
"cls",
",",
"urdf_file",
",",
"base_elements",
"=",
"None",
",",
"last_link_vector",
"=",
"None",
",",
"base_element_type",
"=",
"\"link\"",
",",
"active_links_mask",
"=",
"None",
",",
"name",
"=",
"\"chain\"",
")",
":",
"if",
"... | Creates a chain from an URDF file
Parameters
----------
urdf_file: str
The path of the URDF file
base_elements: list of strings
List of the links beginning the chain
last_link_vector: numpy.array
Optional : The translation vector of the tip.
name: str
The name of the Chain
base_element_type: str
active_links_mask: list[bool] | [
"Creates",
"a",
"chain",
"from",
"an",
"URDF",
"file"
] | 60e36d6163136942bf520d952db17123c658d0b6 | https://github.com/Phylliade/ikpy/blob/60e36d6163136942bf520d952db17123c658d0b6/src/ikpy/chain.py#L138-L159 | train | 222,605 |
blockchain-certificates/cert-issuer | cert_issuer/signer.py | check_internet_off | def check_internet_off(secrets_file_path):
"""If internet off and USB plugged in, returns true. Else, continues to wait..."""
while True:
if internet_on() is False and os.path.exists(secrets_file_path):
break
else:
print("Turn off your internet and plug in your USB to continue...")
time.sleep(10)
return True | python | def check_internet_off(secrets_file_path):
"""If internet off and USB plugged in, returns true. Else, continues to wait..."""
while True:
if internet_on() is False and os.path.exists(secrets_file_path):
break
else:
print("Turn off your internet and plug in your USB to continue...")
time.sleep(10)
return True | [
"def",
"check_internet_off",
"(",
"secrets_file_path",
")",
":",
"while",
"True",
":",
"if",
"internet_on",
"(",
")",
"is",
"False",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"secrets_file_path",
")",
":",
"break",
"else",
":",
"print",
"(",
"\"Turn of... | If internet off and USB plugged in, returns true. Else, continues to wait... | [
"If",
"internet",
"off",
"and",
"USB",
"plugged",
"in",
"returns",
"true",
".",
"Else",
"continues",
"to",
"wait",
"..."
] | e8a48e25472473b149bd411a9fd5f2ff0f8f100a | https://github.com/blockchain-certificates/cert-issuer/blob/e8a48e25472473b149bd411a9fd5f2ff0f8f100a/cert_issuer/signer.py#L66-L74 | train | 222,606 |
blockchain-certificates/cert-issuer | cert_issuer/signer.py | check_internet_on | def check_internet_on(secrets_file_path):
"""If internet on and USB unplugged, returns true. Else, continues to wait..."""
while True:
if internet_on() is True and not os.path.exists(secrets_file_path):
break
else:
print("Turn on your internet and unplug your USB to continue...")
time.sleep(10)
return True | python | def check_internet_on(secrets_file_path):
"""If internet on and USB unplugged, returns true. Else, continues to wait..."""
while True:
if internet_on() is True and not os.path.exists(secrets_file_path):
break
else:
print("Turn on your internet and unplug your USB to continue...")
time.sleep(10)
return True | [
"def",
"check_internet_on",
"(",
"secrets_file_path",
")",
":",
"while",
"True",
":",
"if",
"internet_on",
"(",
")",
"is",
"True",
"and",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"secrets_file_path",
")",
":",
"break",
"else",
":",
"print",
"(",
"\"... | If internet on and USB unplugged, returns true. Else, continues to wait... | [
"If",
"internet",
"on",
"and",
"USB",
"unplugged",
"returns",
"true",
".",
"Else",
"continues",
"to",
"wait",
"..."
] | e8a48e25472473b149bd411a9fd5f2ff0f8f100a | https://github.com/blockchain-certificates/cert-issuer/blob/e8a48e25472473b149bd411a9fd5f2ff0f8f100a/cert_issuer/signer.py#L77-L85 | train | 222,607 |
blockchain-certificates/cert-issuer | cert_issuer/blockchain_handlers/bitcoin/signer.py | verify_signature | def verify_signature(uid, signed_cert_file_name, issuing_address):
"""
Verify the certificate signature matches the expected. Double-check the uid field in the certificate and use
VerifyMessage to confirm that the signature in the certificate matches the issuing_address.
Raises error is verification fails.
Raises UnverifiedSignatureError if signature is invalid
:param uid:
:param signed_cert_file_name:
:param issuing_address:
:return:
"""
logging.info('verifying signature for certificate with uid=%s:', uid)
with open(signed_cert_file_name) as in_file:
signed_cert = in_file.read()
signed_cert_json = json.loads(signed_cert)
to_verify = uid
signature = signed_cert_json['signature']
verified = verify_message(issuing_address, to_verify, signature)
if not verified:
error_message = 'There was a problem with the signature for certificate uid={}'.format(uid)
raise UnverifiedSignatureError(error_message)
logging.info('verified signature') | python | def verify_signature(uid, signed_cert_file_name, issuing_address):
"""
Verify the certificate signature matches the expected. Double-check the uid field in the certificate and use
VerifyMessage to confirm that the signature in the certificate matches the issuing_address.
Raises error is verification fails.
Raises UnverifiedSignatureError if signature is invalid
:param uid:
:param signed_cert_file_name:
:param issuing_address:
:return:
"""
logging.info('verifying signature for certificate with uid=%s:', uid)
with open(signed_cert_file_name) as in_file:
signed_cert = in_file.read()
signed_cert_json = json.loads(signed_cert)
to_verify = uid
signature = signed_cert_json['signature']
verified = verify_message(issuing_address, to_verify, signature)
if not verified:
error_message = 'There was a problem with the signature for certificate uid={}'.format(uid)
raise UnverifiedSignatureError(error_message)
logging.info('verified signature') | [
"def",
"verify_signature",
"(",
"uid",
",",
"signed_cert_file_name",
",",
"issuing_address",
")",
":",
"logging",
".",
"info",
"(",
"'verifying signature for certificate with uid=%s:'",
",",
"uid",
")",
"with",
"open",
"(",
"signed_cert_file_name",
")",
"as",
"in_file... | Verify the certificate signature matches the expected. Double-check the uid field in the certificate and use
VerifyMessage to confirm that the signature in the certificate matches the issuing_address.
Raises error is verification fails.
Raises UnverifiedSignatureError if signature is invalid
:param uid:
:param signed_cert_file_name:
:param issuing_address:
:return: | [
"Verify",
"the",
"certificate",
"signature",
"matches",
"the",
"expected",
".",
"Double",
"-",
"check",
"the",
"uid",
"field",
"in",
"the",
"certificate",
"and",
"use",
"VerifyMessage",
"to",
"confirm",
"that",
"the",
"signature",
"in",
"the",
"certificate",
"... | e8a48e25472473b149bd411a9fd5f2ff0f8f100a | https://github.com/blockchain-certificates/cert-issuer/blob/e8a48e25472473b149bd411a9fd5f2ff0f8f100a/cert_issuer/blockchain_handlers/bitcoin/signer.py#L52-L78 | train | 222,608 |
blockchain-certificates/cert-issuer | cert_issuer/blockchain_handlers/ethereum/connectors.py | EtherscanBroadcaster.get_balance | def get_balance(self, address, api_token):
"""
returns the balance in wei
with some inspiration from PyWallet
"""
broadcast_url = self.base_url + '?module=account&action=balance'
broadcast_url += '&address=%s' % address
broadcast_url += '&tag=latest'
if api_token:
'&apikey=%s' % api_token
response = requests.get(broadcast_url)
if int(response.status_code) == 200:
balance = int(response.json().get('result', None))
logging.info('Balance check succeeded: %s', response.json())
return balance
raise BroadcastError(response.text) | python | def get_balance(self, address, api_token):
"""
returns the balance in wei
with some inspiration from PyWallet
"""
broadcast_url = self.base_url + '?module=account&action=balance'
broadcast_url += '&address=%s' % address
broadcast_url += '&tag=latest'
if api_token:
'&apikey=%s' % api_token
response = requests.get(broadcast_url)
if int(response.status_code) == 200:
balance = int(response.json().get('result', None))
logging.info('Balance check succeeded: %s', response.json())
return balance
raise BroadcastError(response.text) | [
"def",
"get_balance",
"(",
"self",
",",
"address",
",",
"api_token",
")",
":",
"broadcast_url",
"=",
"self",
".",
"base_url",
"+",
"'?module=account&action=balance'",
"broadcast_url",
"+=",
"'&address=%s'",
"%",
"address",
"broadcast_url",
"+=",
"'&tag=latest'",
"if... | returns the balance in wei
with some inspiration from PyWallet | [
"returns",
"the",
"balance",
"in",
"wei",
"with",
"some",
"inspiration",
"from",
"PyWallet"
] | e8a48e25472473b149bd411a9fd5f2ff0f8f100a | https://github.com/blockchain-certificates/cert-issuer/blob/e8a48e25472473b149bd411a9fd5f2ff0f8f100a/cert_issuer/blockchain_handlers/ethereum/connectors.py#L80-L95 | train | 222,609 |
blockchain-certificates/cert-issuer | cert_issuer/blockchain_handlers/ethereum/connectors.py | EtherscanBroadcaster.get_address_nonce | def get_address_nonce(self, address, api_token):
"""
Looks up the address nonce of this address
Neccesary for the transaction creation
"""
broadcast_url = self.base_url + '?module=proxy&action=eth_getTransactionCount'
broadcast_url += '&address=%s' % address
broadcast_url += '&tag=latest'
if api_token:
'&apikey=%s' % api_token
response = requests.get(broadcast_url, )
if int(response.status_code) == 200:
# the int(res, 0) transforms the hex nonce to int
nonce = int(response.json().get('result', None), 0)
logging.info('Nonce check went correct: %s', response.json())
return nonce
else:
logging.info('response error checking nonce')
raise BroadcastError('Error checking the nonce through the Etherscan API. Error msg: %s', response.text) | python | def get_address_nonce(self, address, api_token):
"""
Looks up the address nonce of this address
Neccesary for the transaction creation
"""
broadcast_url = self.base_url + '?module=proxy&action=eth_getTransactionCount'
broadcast_url += '&address=%s' % address
broadcast_url += '&tag=latest'
if api_token:
'&apikey=%s' % api_token
response = requests.get(broadcast_url, )
if int(response.status_code) == 200:
# the int(res, 0) transforms the hex nonce to int
nonce = int(response.json().get('result', None), 0)
logging.info('Nonce check went correct: %s', response.json())
return nonce
else:
logging.info('response error checking nonce')
raise BroadcastError('Error checking the nonce through the Etherscan API. Error msg: %s', response.text) | [
"def",
"get_address_nonce",
"(",
"self",
",",
"address",
",",
"api_token",
")",
":",
"broadcast_url",
"=",
"self",
".",
"base_url",
"+",
"'?module=proxy&action=eth_getTransactionCount'",
"broadcast_url",
"+=",
"'&address=%s'",
"%",
"address",
"broadcast_url",
"+=",
"'... | Looks up the address nonce of this address
Neccesary for the transaction creation | [
"Looks",
"up",
"the",
"address",
"nonce",
"of",
"this",
"address",
"Neccesary",
"for",
"the",
"transaction",
"creation"
] | e8a48e25472473b149bd411a9fd5f2ff0f8f100a | https://github.com/blockchain-certificates/cert-issuer/blob/e8a48e25472473b149bd411a9fd5f2ff0f8f100a/cert_issuer/blockchain_handlers/ethereum/connectors.py#L97-L115 | train | 222,610 |
tensorflow/mesh | mesh_tensorflow/tpu_variables.py | ReplicatedVariable._dense_var_to_tensor | def _dense_var_to_tensor(self, dtype=None, name=None, as_ref=False):
"""Converts a variable to a tensor."""
# pylint: disable=protected-access
if _enclosing_tpu_context() is None:
if hasattr(self._primary_var, '_dense_var_to_tensor'):
return self._primary_var._dense_var_to_tensor(dtype, name, as_ref)
else:
return ops.convert_to_tensor(self._primary_var)
# pylint: enable=protected-access
if dtype is not None and dtype != self.dtype:
return NotImplemented
if as_ref:
return self.handle
else:
return self.read_value() | python | def _dense_var_to_tensor(self, dtype=None, name=None, as_ref=False):
"""Converts a variable to a tensor."""
# pylint: disable=protected-access
if _enclosing_tpu_context() is None:
if hasattr(self._primary_var, '_dense_var_to_tensor'):
return self._primary_var._dense_var_to_tensor(dtype, name, as_ref)
else:
return ops.convert_to_tensor(self._primary_var)
# pylint: enable=protected-access
if dtype is not None and dtype != self.dtype:
return NotImplemented
if as_ref:
return self.handle
else:
return self.read_value() | [
"def",
"_dense_var_to_tensor",
"(",
"self",
",",
"dtype",
"=",
"None",
",",
"name",
"=",
"None",
",",
"as_ref",
"=",
"False",
")",
":",
"# pylint: disable=protected-access",
"if",
"_enclosing_tpu_context",
"(",
")",
"is",
"None",
":",
"if",
"hasattr",
"(",
"... | Converts a variable to a tensor. | [
"Converts",
"a",
"variable",
"to",
"a",
"tensor",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/tpu_variables.py#L183-L197 | train | 222,611 |
tensorflow/mesh | mesh_tensorflow/auto_mtf/memory_estimator.py | MemoryEstimator._compute_layout_validator | def _compute_layout_validator(self):
"""Computes self._layout_validator."""
self._layout_validator = valid_layouts.LayoutValidator(self.mtf_graph,
self.mesh_shape) | python | def _compute_layout_validator(self):
"""Computes self._layout_validator."""
self._layout_validator = valid_layouts.LayoutValidator(self.mtf_graph,
self.mesh_shape) | [
"def",
"_compute_layout_validator",
"(",
"self",
")",
":",
"self",
".",
"_layout_validator",
"=",
"valid_layouts",
".",
"LayoutValidator",
"(",
"self",
".",
"mtf_graph",
",",
"self",
".",
"mesh_shape",
")"
] | Computes self._layout_validator. | [
"Computes",
"self",
".",
"_layout_validator",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/auto_mtf/memory_estimator.py#L87-L90 | train | 222,612 |
tensorflow/mesh | mesh_tensorflow/auto_mtf/memory_estimator.py | MemoryEstimator._compute_graph_interface | def _compute_graph_interface(self):
"""Computes self._graph_interface."""
self._graph_interface = graph_interface.GraphInterface(self.mtf_graph)
for mtf_output in self.mtf_outputs:
self._graph_interface.set_tensor_final(mtf_output.name) | python | def _compute_graph_interface(self):
"""Computes self._graph_interface."""
self._graph_interface = graph_interface.GraphInterface(self.mtf_graph)
for mtf_output in self.mtf_outputs:
self._graph_interface.set_tensor_final(mtf_output.name) | [
"def",
"_compute_graph_interface",
"(",
"self",
")",
":",
"self",
".",
"_graph_interface",
"=",
"graph_interface",
".",
"GraphInterface",
"(",
"self",
".",
"mtf_graph",
")",
"for",
"mtf_output",
"in",
"self",
".",
"mtf_outputs",
":",
"self",
".",
"_graph_interfa... | Computes self._graph_interface. | [
"Computes",
"self",
".",
"_graph_interface",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/auto_mtf/memory_estimator.py#L92-L96 | train | 222,613 |
tensorflow/mesh | mesh_tensorflow/transformer/transformer.py | make_layer_stack | def make_layer_stack(layers=gin.REQUIRED, num_layers=6):
"""Configurable layer stack.
Args:
layers: a list of subclasses of TransformerLayer
num_layers: an integer
Returns:
a LayerStack
"""
return LayerStack([cls() for cls in layers] * num_layers) | python | def make_layer_stack(layers=gin.REQUIRED, num_layers=6):
"""Configurable layer stack.
Args:
layers: a list of subclasses of TransformerLayer
num_layers: an integer
Returns:
a LayerStack
"""
return LayerStack([cls() for cls in layers] * num_layers) | [
"def",
"make_layer_stack",
"(",
"layers",
"=",
"gin",
".",
"REQUIRED",
",",
"num_layers",
"=",
"6",
")",
":",
"return",
"LayerStack",
"(",
"[",
"cls",
"(",
")",
"for",
"cls",
"in",
"layers",
"]",
"*",
"num_layers",
")"
] | Configurable layer stack.
Args:
layers: a list of subclasses of TransformerLayer
num_layers: an integer
Returns:
a LayerStack | [
"Configurable",
"layer",
"stack",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/transformer/transformer.py#L946-L955 | train | 222,614 |
tensorflow/mesh | mesh_tensorflow/transformer/transformer.py | make_bitransformer | def make_bitransformer(
input_vocab_size=gin.REQUIRED,
output_vocab_size=gin.REQUIRED,
layout=None,
mesh_shape=None):
"""Gin-configurable bitransformer constructor.
In your config file you need to set the encoder and decoder layers like this:
encoder/make_layer_stack.layers = [
@transformer_layers.SelfAttention,
@transformer_layers.DenseReluDense,
]
decoder/make_layer_stack.layers = [
@transformer_layers.SelfAttention,
@transformer_layers.EncDecAttention,
@transformer_layers.DenseReluDense,
]
Args:
input_vocab_size: a integer
output_vocab_size: an integer
layout: optional - an input to mtf.convert_to_layout_rules
Some layers (e.g. MoE layers) cheat by looking at layout and mesh_shape
mesh_shape: optional - an input to mtf.convert_to_shape
Some layers (e.g. MoE layers) cheat by looking at layout and mesh_shape
Returns:
a Bitransformer
"""
with gin.config_scope("encoder"):
encoder = Unitransformer(
layer_stack=make_layer_stack(),
input_vocab_size=input_vocab_size,
output_vocab_size=None,
autoregressive=False,
name="encoder",
layout=layout,
mesh_shape=mesh_shape)
with gin.config_scope("decoder"):
decoder = Unitransformer(
layer_stack=make_layer_stack(),
input_vocab_size=output_vocab_size,
output_vocab_size=output_vocab_size,
autoregressive=True,
name="decoder",
layout=layout,
mesh_shape=mesh_shape)
return Bitransformer(encoder, decoder) | python | def make_bitransformer(
input_vocab_size=gin.REQUIRED,
output_vocab_size=gin.REQUIRED,
layout=None,
mesh_shape=None):
"""Gin-configurable bitransformer constructor.
In your config file you need to set the encoder and decoder layers like this:
encoder/make_layer_stack.layers = [
@transformer_layers.SelfAttention,
@transformer_layers.DenseReluDense,
]
decoder/make_layer_stack.layers = [
@transformer_layers.SelfAttention,
@transformer_layers.EncDecAttention,
@transformer_layers.DenseReluDense,
]
Args:
input_vocab_size: a integer
output_vocab_size: an integer
layout: optional - an input to mtf.convert_to_layout_rules
Some layers (e.g. MoE layers) cheat by looking at layout and mesh_shape
mesh_shape: optional - an input to mtf.convert_to_shape
Some layers (e.g. MoE layers) cheat by looking at layout and mesh_shape
Returns:
a Bitransformer
"""
with gin.config_scope("encoder"):
encoder = Unitransformer(
layer_stack=make_layer_stack(),
input_vocab_size=input_vocab_size,
output_vocab_size=None,
autoregressive=False,
name="encoder",
layout=layout,
mesh_shape=mesh_shape)
with gin.config_scope("decoder"):
decoder = Unitransformer(
layer_stack=make_layer_stack(),
input_vocab_size=output_vocab_size,
output_vocab_size=output_vocab_size,
autoregressive=True,
name="decoder",
layout=layout,
mesh_shape=mesh_shape)
return Bitransformer(encoder, decoder) | [
"def",
"make_bitransformer",
"(",
"input_vocab_size",
"=",
"gin",
".",
"REQUIRED",
",",
"output_vocab_size",
"=",
"gin",
".",
"REQUIRED",
",",
"layout",
"=",
"None",
",",
"mesh_shape",
"=",
"None",
")",
":",
"with",
"gin",
".",
"config_scope",
"(",
"\"encode... | Gin-configurable bitransformer constructor.
In your config file you need to set the encoder and decoder layers like this:
encoder/make_layer_stack.layers = [
@transformer_layers.SelfAttention,
@transformer_layers.DenseReluDense,
]
decoder/make_layer_stack.layers = [
@transformer_layers.SelfAttention,
@transformer_layers.EncDecAttention,
@transformer_layers.DenseReluDense,
]
Args:
input_vocab_size: a integer
output_vocab_size: an integer
layout: optional - an input to mtf.convert_to_layout_rules
Some layers (e.g. MoE layers) cheat by looking at layout and mesh_shape
mesh_shape: optional - an input to mtf.convert_to_shape
Some layers (e.g. MoE layers) cheat by looking at layout and mesh_shape
Returns:
a Bitransformer | [
"Gin",
"-",
"configurable",
"bitransformer",
"constructor",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/transformer/transformer.py#L959-L1005 | train | 222,615 |
tensorflow/mesh | mesh_tensorflow/transformer/transformer.py | Context.get_states | def get_states(self, n):
"""Get the next n recurrent states.
Called by layers in "incremental" mode.
Args:
n: an integer
Returns:
a list of n Tensors
"""
return self.states[len(self.new_states):len(self.new_states) + n] | python | def get_states(self, n):
"""Get the next n recurrent states.
Called by layers in "incremental" mode.
Args:
n: an integer
Returns:
a list of n Tensors
"""
return self.states[len(self.new_states):len(self.new_states) + n] | [
"def",
"get_states",
"(",
"self",
",",
"n",
")",
":",
"return",
"self",
".",
"states",
"[",
"len",
"(",
"self",
".",
"new_states",
")",
":",
"len",
"(",
"self",
".",
"new_states",
")",
"+",
"n",
"]"
] | Get the next n recurrent states.
Called by layers in "incremental" mode.
Args:
n: an integer
Returns:
a list of n Tensors | [
"Get",
"the",
"next",
"n",
"recurrent",
"states",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/transformer/transformer.py#L219-L229 | train | 222,616 |
tensorflow/mesh | mesh_tensorflow/transformer/transformer.py | Context.get_constant_state | def get_constant_state(self):
"""Read state that was written in "first_part" mode.
Returns:
a structure
"""
ret = self.constant_states[self.next_constant_state]
self.next_constant_state += 1
return ret | python | def get_constant_state(self):
"""Read state that was written in "first_part" mode.
Returns:
a structure
"""
ret = self.constant_states[self.next_constant_state]
self.next_constant_state += 1
return ret | [
"def",
"get_constant_state",
"(",
"self",
")",
":",
"ret",
"=",
"self",
".",
"constant_states",
"[",
"self",
".",
"next_constant_state",
"]",
"self",
".",
"next_constant_state",
"+=",
"1",
"return",
"ret"
] | Read state that was written in "first_part" mode.
Returns:
a structure | [
"Read",
"state",
"that",
"was",
"written",
"in",
"first_part",
"mode",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/transformer/transformer.py#L252-L260 | train | 222,617 |
tensorflow/mesh | mesh_tensorflow/transformer/transformer.py | Context.nonpadding | def nonpadding(self):
"""Tensor with zeros in padding positions and ones elsewhere."""
if self.sequence_id is None:
return None
if self.sequence_id == 1:
return 1
else:
return mtf.cast(
mtf.not_equal(self.sequence_id, 0), self.activation_dtype) | python | def nonpadding(self):
"""Tensor with zeros in padding positions and ones elsewhere."""
if self.sequence_id is None:
return None
if self.sequence_id == 1:
return 1
else:
return mtf.cast(
mtf.not_equal(self.sequence_id, 0), self.activation_dtype) | [
"def",
"nonpadding",
"(",
"self",
")",
":",
"if",
"self",
".",
"sequence_id",
"is",
"None",
":",
"return",
"None",
"if",
"self",
".",
"sequence_id",
"==",
"1",
":",
"return",
"1",
"else",
":",
"return",
"mtf",
".",
"cast",
"(",
"mtf",
".",
"not_equal... | Tensor with zeros in padding positions and ones elsewhere. | [
"Tensor",
"with",
"zeros",
"in",
"padding",
"positions",
"and",
"ones",
"elsewhere",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/transformer/transformer.py#L263-L271 | train | 222,618 |
tensorflow/mesh | mesh_tensorflow/transformer/metrics.py | sequence_accuracy | def sequence_accuracy(labels, outputs):
"""Compute the sequence-level accuracy.
A sequence is only considered correct if all of its entries were predicted
correctly.
Args:
labels: ground-truth labels, shape=(batch, packed_seq_length)
outputs: predicted tokens, shape=(batch, seq_length)
Returns:
Two ops, one for getting the current average accuracy and another for
updating the running average estimate.
"""
# A sequence is correct if all of the non-padded entries are correct
all_correct = tf.reduce_all(
tf.logical_or(tf.equal(labels, outputs), tf.equal(labels, 0)), axis=-1
)
return tf.metrics.mean(all_correct) | python | def sequence_accuracy(labels, outputs):
"""Compute the sequence-level accuracy.
A sequence is only considered correct if all of its entries were predicted
correctly.
Args:
labels: ground-truth labels, shape=(batch, packed_seq_length)
outputs: predicted tokens, shape=(batch, seq_length)
Returns:
Two ops, one for getting the current average accuracy and another for
updating the running average estimate.
"""
# A sequence is correct if all of the non-padded entries are correct
all_correct = tf.reduce_all(
tf.logical_or(tf.equal(labels, outputs), tf.equal(labels, 0)), axis=-1
)
return tf.metrics.mean(all_correct) | [
"def",
"sequence_accuracy",
"(",
"labels",
",",
"outputs",
")",
":",
"# A sequence is correct if all of the non-padded entries are correct",
"all_correct",
"=",
"tf",
".",
"reduce_all",
"(",
"tf",
".",
"logical_or",
"(",
"tf",
".",
"equal",
"(",
"labels",
",",
"outp... | Compute the sequence-level accuracy.
A sequence is only considered correct if all of its entries were predicted
correctly.
Args:
labels: ground-truth labels, shape=(batch, packed_seq_length)
outputs: predicted tokens, shape=(batch, seq_length)
Returns:
Two ops, one for getting the current average accuracy and another for
updating the running average estimate. | [
"Compute",
"the",
"sequence",
"-",
"level",
"accuracy",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/transformer/metrics.py#L46-L63 | train | 222,619 |
tensorflow/mesh | mesh_tensorflow/auto_mtf/graph_interface.py | GraphInterface.get_operation_input_names | def get_operation_input_names(self, operation_name):
"""Generates the names of all input tensors of an operation.
Args:
operation_name: a string, the name of an operation in the graph.
Yields:
a string, the name of an input tensor.
"""
for input_tensor in self._name_to_operation(operation_name).inputs:
yield input_tensor.name | python | def get_operation_input_names(self, operation_name):
"""Generates the names of all input tensors of an operation.
Args:
operation_name: a string, the name of an operation in the graph.
Yields:
a string, the name of an input tensor.
"""
for input_tensor in self._name_to_operation(operation_name).inputs:
yield input_tensor.name | [
"def",
"get_operation_input_names",
"(",
"self",
",",
"operation_name",
")",
":",
"for",
"input_tensor",
"in",
"self",
".",
"_name_to_operation",
"(",
"operation_name",
")",
".",
"inputs",
":",
"yield",
"input_tensor",
".",
"name"
] | Generates the names of all input tensors of an operation.
Args:
operation_name: a string, the name of an operation in the graph.
Yields:
a string, the name of an input tensor. | [
"Generates",
"the",
"names",
"of",
"all",
"input",
"tensors",
"of",
"an",
"operation",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/auto_mtf/graph_interface.py#L93-L103 | train | 222,620 |
tensorflow/mesh | mesh_tensorflow/auto_mtf/graph_interface.py | GraphInterface.get_operation_output_names | def get_operation_output_names(self, operation_name):
"""Generates the names of all output tensors of an operation.
Args:
operation_name: a string, the name of an operation in the graph.
Yields:
a string, the name of an output tensor.
"""
for output_tensor in self._name_to_operation(operation_name).outputs:
yield output_tensor.name | python | def get_operation_output_names(self, operation_name):
"""Generates the names of all output tensors of an operation.
Args:
operation_name: a string, the name of an operation in the graph.
Yields:
a string, the name of an output tensor.
"""
for output_tensor in self._name_to_operation(operation_name).outputs:
yield output_tensor.name | [
"def",
"get_operation_output_names",
"(",
"self",
",",
"operation_name",
")",
":",
"for",
"output_tensor",
"in",
"self",
".",
"_name_to_operation",
"(",
"operation_name",
")",
".",
"outputs",
":",
"yield",
"output_tensor",
".",
"name"
] | Generates the names of all output tensors of an operation.
Args:
operation_name: a string, the name of an operation in the graph.
Yields:
a string, the name of an output tensor. | [
"Generates",
"the",
"names",
"of",
"all",
"output",
"tensors",
"of",
"an",
"operation",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/auto_mtf/graph_interface.py#L105-L115 | train | 222,621 |
tensorflow/mesh | mesh_tensorflow/auto_mtf/graph_interface.py | GraphInterface.get_tensor_shape | def get_tensor_shape(self, tensor_name):
"""The tf.TensorShape of a tensor.
Args:
tensor_name: string, the name of a tensor in the graph.
Returns:
a tf.TensorShape
"""
tensor = self._name_to_tensor(tensor_name)
if isinstance(tensor, mtf.Tensor):
return tf.TensorShape(tensor.shape.to_integer_list)
else: # tf.Tensor
return tensor.shape | python | def get_tensor_shape(self, tensor_name):
"""The tf.TensorShape of a tensor.
Args:
tensor_name: string, the name of a tensor in the graph.
Returns:
a tf.TensorShape
"""
tensor = self._name_to_tensor(tensor_name)
if isinstance(tensor, mtf.Tensor):
return tf.TensorShape(tensor.shape.to_integer_list)
else: # tf.Tensor
return tensor.shape | [
"def",
"get_tensor_shape",
"(",
"self",
",",
"tensor_name",
")",
":",
"tensor",
"=",
"self",
".",
"_name_to_tensor",
"(",
"tensor_name",
")",
"if",
"isinstance",
"(",
"tensor",
",",
"mtf",
".",
"Tensor",
")",
":",
"return",
"tf",
".",
"TensorShape",
"(",
... | The tf.TensorShape of a tensor.
Args:
tensor_name: string, the name of a tensor in the graph.
Returns:
a tf.TensorShape | [
"The",
"tf",
".",
"TensorShape",
"of",
"a",
"tensor",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/auto_mtf/graph_interface.py#L137-L151 | train | 222,622 |
tensorflow/mesh | mesh_tensorflow/auto_mtf/graph_interface.py | GraphInterface.get_tensor_num_entries | def get_tensor_num_entries(self, tensor_name, partial_layout=None,
mesh_dimension_to_size=None):
"""The number of entries in a tensor.
If partial_layout is specified, then mesh_dimension_to_size must also be. In
this case, the number of entries on a single device is returned.
Args:
tensor_name: a string, name of a tensor in the graph.
partial_layout: an optional {string: string}, from MTF dimension name to
mesh dimension name.
mesh_dimension_to_size: an optional {string: int}, from mesh dimension
name to size.
Returns:
an integer
"""
shape = self.get_tensor_shape(tensor_name)
# We don't have to worry about divisiblity issues because Mesh TensorFlow
# only allows evenly divisible assignments.
num_entries = 1
for dim in shape.dims:
num_entries = num_entries * dim.value
if not partial_layout:
return num_entries
for mtf_dimension_name in self.get_tensor_mtf_dimension_names(tensor_name):
if mtf_dimension_name not in partial_layout:
continue
mesh_dimension_name = partial_layout[mtf_dimension_name]
mesh_dimension_size = mesh_dimension_to_size[mesh_dimension_name]
num_entries = int(math.ceil(num_entries / mesh_dimension_size))
return num_entries | python | def get_tensor_num_entries(self, tensor_name, partial_layout=None,
mesh_dimension_to_size=None):
"""The number of entries in a tensor.
If partial_layout is specified, then mesh_dimension_to_size must also be. In
this case, the number of entries on a single device is returned.
Args:
tensor_name: a string, name of a tensor in the graph.
partial_layout: an optional {string: string}, from MTF dimension name to
mesh dimension name.
mesh_dimension_to_size: an optional {string: int}, from mesh dimension
name to size.
Returns:
an integer
"""
shape = self.get_tensor_shape(tensor_name)
# We don't have to worry about divisiblity issues because Mesh TensorFlow
# only allows evenly divisible assignments.
num_entries = 1
for dim in shape.dims:
num_entries = num_entries * dim.value
if not partial_layout:
return num_entries
for mtf_dimension_name in self.get_tensor_mtf_dimension_names(tensor_name):
if mtf_dimension_name not in partial_layout:
continue
mesh_dimension_name = partial_layout[mtf_dimension_name]
mesh_dimension_size = mesh_dimension_to_size[mesh_dimension_name]
num_entries = int(math.ceil(num_entries / mesh_dimension_size))
return num_entries | [
"def",
"get_tensor_num_entries",
"(",
"self",
",",
"tensor_name",
",",
"partial_layout",
"=",
"None",
",",
"mesh_dimension_to_size",
"=",
"None",
")",
":",
"shape",
"=",
"self",
".",
"get_tensor_shape",
"(",
"tensor_name",
")",
"# We don't have to worry about divisibl... | The number of entries in a tensor.
If partial_layout is specified, then mesh_dimension_to_size must also be. In
this case, the number of entries on a single device is returned.
Args:
tensor_name: a string, name of a tensor in the graph.
partial_layout: an optional {string: string}, from MTF dimension name to
mesh dimension name.
mesh_dimension_to_size: an optional {string: int}, from mesh dimension
name to size.
Returns:
an integer | [
"The",
"number",
"of",
"entries",
"in",
"a",
"tensor",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/auto_mtf/graph_interface.py#L153-L187 | train | 222,623 |
tensorflow/mesh | mesh_tensorflow/auto_mtf/graph_interface.py | GraphInterface.get_tensor_size | def get_tensor_size(self, tensor_name, partial_layout=None,
mesh_dimension_to_size=None):
"""The size of a tensor in bytes.
If partial_layout is specified, then mesh_dimension_to_size must also be. In
this case, the size on a single device is returned.
Args:
tensor_name: a string, name of a tensor in the graph.
partial_layout: an optional {string: string}, from MTF dimension name to
mesh dimension name.
mesh_dimension_to_size: an optional {string: int}, from mesh dimension
name to size.
Returns:
an integer
"""
return (self.get_tensor_dtype(tensor_name).size *
self.get_tensor_num_entries(tensor_name, partial_layout,
mesh_dimension_to_size)) | python | def get_tensor_size(self, tensor_name, partial_layout=None,
mesh_dimension_to_size=None):
"""The size of a tensor in bytes.
If partial_layout is specified, then mesh_dimension_to_size must also be. In
this case, the size on a single device is returned.
Args:
tensor_name: a string, name of a tensor in the graph.
partial_layout: an optional {string: string}, from MTF dimension name to
mesh dimension name.
mesh_dimension_to_size: an optional {string: int}, from mesh dimension
name to size.
Returns:
an integer
"""
return (self.get_tensor_dtype(tensor_name).size *
self.get_tensor_num_entries(tensor_name, partial_layout,
mesh_dimension_to_size)) | [
"def",
"get_tensor_size",
"(",
"self",
",",
"tensor_name",
",",
"partial_layout",
"=",
"None",
",",
"mesh_dimension_to_size",
"=",
"None",
")",
":",
"return",
"(",
"self",
".",
"get_tensor_dtype",
"(",
"tensor_name",
")",
".",
"size",
"*",
"self",
".",
"get_... | The size of a tensor in bytes.
If partial_layout is specified, then mesh_dimension_to_size must also be. In
this case, the size on a single device is returned.
Args:
tensor_name: a string, name of a tensor in the graph.
partial_layout: an optional {string: string}, from MTF dimension name to
mesh dimension name.
mesh_dimension_to_size: an optional {string: int}, from mesh dimension
name to size.
Returns:
an integer | [
"The",
"size",
"of",
"a",
"tensor",
"in",
"bytes",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/auto_mtf/graph_interface.py#L189-L208 | train | 222,624 |
tensorflow/mesh | mesh_tensorflow/auto_mtf/graph_interface.py | GraphInterface.get_tensor_device | def get_tensor_device(self, tensor_name):
"""The device of a tensor.
Note that only tf tensors have device assignments.
Args:
tensor_name: a string, name of a tensor in the graph.
Returns:
a string or None, representing the device name.
"""
tensor = self._name_to_tensor(tensor_name)
if isinstance(tensor, tf.Tensor):
return tensor.device
else: # mtf.Tensor
return None | python | def get_tensor_device(self, tensor_name):
"""The device of a tensor.
Note that only tf tensors have device assignments.
Args:
tensor_name: a string, name of a tensor in the graph.
Returns:
a string or None, representing the device name.
"""
tensor = self._name_to_tensor(tensor_name)
if isinstance(tensor, tf.Tensor):
return tensor.device
else: # mtf.Tensor
return None | [
"def",
"get_tensor_device",
"(",
"self",
",",
"tensor_name",
")",
":",
"tensor",
"=",
"self",
".",
"_name_to_tensor",
"(",
"tensor_name",
")",
"if",
"isinstance",
"(",
"tensor",
",",
"tf",
".",
"Tensor",
")",
":",
"return",
"tensor",
".",
"device",
"else",... | The device of a tensor.
Note that only tf tensors have device assignments.
Args:
tensor_name: a string, name of a tensor in the graph.
Returns:
a string or None, representing the device name. | [
"The",
"device",
"of",
"a",
"tensor",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/auto_mtf/graph_interface.py#L210-L225 | train | 222,625 |
tensorflow/mesh | mesh_tensorflow/auto_mtf/graph_interface.py | GraphInterface.get_operation_device | def get_operation_device(self, operation_name):
"""The device of an operation.
Note that only tf operations have device assignments.
Args:
operation_name: a string, name of an operation in the graph.
Returns:
a string or None, representing the device name.
"""
operation = self._name_to_operation(operation_name)
if isinstance(operation, tf.Operation):
return operation.device
else: # mtf.Operation
return None | python | def get_operation_device(self, operation_name):
"""The device of an operation.
Note that only tf operations have device assignments.
Args:
operation_name: a string, name of an operation in the graph.
Returns:
a string or None, representing the device name.
"""
operation = self._name_to_operation(operation_name)
if isinstance(operation, tf.Operation):
return operation.device
else: # mtf.Operation
return None | [
"def",
"get_operation_device",
"(",
"self",
",",
"operation_name",
")",
":",
"operation",
"=",
"self",
".",
"_name_to_operation",
"(",
"operation_name",
")",
"if",
"isinstance",
"(",
"operation",
",",
"tf",
".",
"Operation",
")",
":",
"return",
"operation",
".... | The device of an operation.
Note that only tf operations have device assignments.
Args:
operation_name: a string, name of an operation in the graph.
Returns:
a string or None, representing the device name. | [
"The",
"device",
"of",
"an",
"operation",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/auto_mtf/graph_interface.py#L242-L257 | train | 222,626 |
tensorflow/mesh | mesh_tensorflow/auto_mtf/graph_interface.py | GraphInterface.get_tensor_mtf_dimension_names | def get_tensor_mtf_dimension_names(self, tensor_name):
"""The Mesh TensorFlow dimensions associated with a tensor.
Args:
tensor_name: a string, name of a tensor in the graph.
Returns:
a [string], the names of Mesh TensorFlow dimensions.
"""
tensor = self._name_to_tensor(tensor_name)
if isinstance(tensor, mtf.Tensor):
return tensor.shape.dimension_names
else: # tf.Tensor
return [] | python | def get_tensor_mtf_dimension_names(self, tensor_name):
"""The Mesh TensorFlow dimensions associated with a tensor.
Args:
tensor_name: a string, name of a tensor in the graph.
Returns:
a [string], the names of Mesh TensorFlow dimensions.
"""
tensor = self._name_to_tensor(tensor_name)
if isinstance(tensor, mtf.Tensor):
return tensor.shape.dimension_names
else: # tf.Tensor
return [] | [
"def",
"get_tensor_mtf_dimension_names",
"(",
"self",
",",
"tensor_name",
")",
":",
"tensor",
"=",
"self",
".",
"_name_to_tensor",
"(",
"tensor_name",
")",
"if",
"isinstance",
"(",
"tensor",
",",
"mtf",
".",
"Tensor",
")",
":",
"return",
"tensor",
".",
"shap... | The Mesh TensorFlow dimensions associated with a tensor.
Args:
tensor_name: a string, name of a tensor in the graph.
Returns:
a [string], the names of Mesh TensorFlow dimensions. | [
"The",
"Mesh",
"TensorFlow",
"dimensions",
"associated",
"with",
"a",
"tensor",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/auto_mtf/graph_interface.py#L259-L272 | train | 222,627 |
tensorflow/mesh | mesh_tensorflow/auto_mtf/graph_interface.py | GraphInterface.get_operation_mtf_dimension_names | def get_operation_mtf_dimension_names(self, operation_name):
"""The Mesh TensorFlow dimensions associated with an operation.
Args:
operation_name: a string, name of an operation in the graph.
Returns:
a set(string), the names of Mesh TensorFlow dimensions.
"""
mtf_dimension_names = set()
for tensor_name in self.get_operation_input_names(operation_name):
mtf_dimension_names.update(self.get_tensor_mtf_dimension_names(
tensor_name))
for tensor_name in self.get_operation_output_names(operation_name):
mtf_dimension_names.update(self.get_tensor_mtf_dimension_names(
tensor_name))
return mtf_dimension_names | python | def get_operation_mtf_dimension_names(self, operation_name):
"""The Mesh TensorFlow dimensions associated with an operation.
Args:
operation_name: a string, name of an operation in the graph.
Returns:
a set(string), the names of Mesh TensorFlow dimensions.
"""
mtf_dimension_names = set()
for tensor_name in self.get_operation_input_names(operation_name):
mtf_dimension_names.update(self.get_tensor_mtf_dimension_names(
tensor_name))
for tensor_name in self.get_operation_output_names(operation_name):
mtf_dimension_names.update(self.get_tensor_mtf_dimension_names(
tensor_name))
return mtf_dimension_names | [
"def",
"get_operation_mtf_dimension_names",
"(",
"self",
",",
"operation_name",
")",
":",
"mtf_dimension_names",
"=",
"set",
"(",
")",
"for",
"tensor_name",
"in",
"self",
".",
"get_operation_input_names",
"(",
"operation_name",
")",
":",
"mtf_dimension_names",
".",
... | The Mesh TensorFlow dimensions associated with an operation.
Args:
operation_name: a string, name of an operation in the graph.
Returns:
a set(string), the names of Mesh TensorFlow dimensions. | [
"The",
"Mesh",
"TensorFlow",
"dimensions",
"associated",
"with",
"an",
"operation",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/auto_mtf/graph_interface.py#L274-L290 | train | 222,628 |
tensorflow/mesh | mesh_tensorflow/auto_mtf/graph_interface.py | GraphInterface.set_tensor_final | def set_tensor_final(self, tensor_name):
"""Denotes a tensor as a final output of the computation.
Args:
tensor_name: a string, name of a tensor in the graph.
"""
tensor = self._name_to_tensor(tensor_name)
self._final_tensors.add(tensor) | python | def set_tensor_final(self, tensor_name):
"""Denotes a tensor as a final output of the computation.
Args:
tensor_name: a string, name of a tensor in the graph.
"""
tensor = self._name_to_tensor(tensor_name)
self._final_tensors.add(tensor) | [
"def",
"set_tensor_final",
"(",
"self",
",",
"tensor_name",
")",
":",
"tensor",
"=",
"self",
".",
"_name_to_tensor",
"(",
"tensor_name",
")",
"self",
".",
"_final_tensors",
".",
"add",
"(",
"tensor",
")"
] | Denotes a tensor as a final output of the computation.
Args:
tensor_name: a string, name of a tensor in the graph. | [
"Denotes",
"a",
"tensor",
"as",
"a",
"final",
"output",
"of",
"the",
"computation",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/auto_mtf/graph_interface.py#L292-L299 | train | 222,629 |
tensorflow/mesh | mesh_tensorflow/auto_mtf/graph_interface.py | GraphInterface.is_tensor_final | def is_tensor_final(self, tensor_name):
"""Whether a tensor is a final output of the computation.
Args:
tensor_name: a string, name of a tensor in the graph.
Returns:
a boolean indicating whether the tensor was a final output.
"""
tensor = self._name_to_tensor(tensor_name)
return tensor in self._final_tensors | python | def is_tensor_final(self, tensor_name):
"""Whether a tensor is a final output of the computation.
Args:
tensor_name: a string, name of a tensor in the graph.
Returns:
a boolean indicating whether the tensor was a final output.
"""
tensor = self._name_to_tensor(tensor_name)
return tensor in self._final_tensors | [
"def",
"is_tensor_final",
"(",
"self",
",",
"tensor_name",
")",
":",
"tensor",
"=",
"self",
".",
"_name_to_tensor",
"(",
"tensor_name",
")",
"return",
"tensor",
"in",
"self",
".",
"_final_tensors"
] | Whether a tensor is a final output of the computation.
Args:
tensor_name: a string, name of a tensor in the graph.
Returns:
a boolean indicating whether the tensor was a final output. | [
"Whether",
"a",
"tensor",
"is",
"a",
"final",
"output",
"of",
"the",
"computation",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/auto_mtf/graph_interface.py#L301-L311 | train | 222,630 |
tensorflow/mesh | mesh_tensorflow/auto_mtf/graph_interface.py | GraphInterface.compute_cost_graph | def compute_cost_graph(self, devices=None):
"""Computes a CostGraphDef protobuf based on this graph.
Defined in tensorflow/core/framework/cost_graph.proto.
Args:
devices: optional [string], the names of devices to consider. If
specified, any tensor on a device not listed is given a size of zero.
Any device-less tensor (e.g. Mesh TensorFlow tensor) is not affected.
Returns:
a CostGraphDef protobuf with a Node for every operation in the graph, each
of which is populated with size/dtype information for its inputs and
outputs (which match the input/output order of the operation).
"""
cost_graph_def = cost_graph_pb2.CostGraphDef()
for i, operation_name in enumerate(self.get_all_operation_names()):
node = cost_graph_def.node.add(
name=operation_name,
device=self.get_operation_device(operation_name),
id=i)
for input_name in self.get_operation_input_names(operation_name):
id1, id2 = self._tensor_name_to_ids[input_name]
node.input_info.add(preceding_node=id1, preceding_port=id2)
for output_name in self.get_operation_output_names(operation_name):
tensor_device = self.get_tensor_device(output_name)
# devices = [] is not the same as None, and tensor_device = '' is also
# not the same as None.
if devices is None or tensor_device is None or tensor_device in devices:
node.output_info.add(
size=self.get_tensor_num_entries(output_name),
alias_input_port=-1,
dtype=self.get_tensor_dtype(output_name).as_datatype_enum,
shape=self.get_tensor_shape(output_name).as_proto(),
)
else:
node.output_info.add(
size=0,
alias_input_port=-1,
dtype=self.get_tensor_dtype(output_name).as_datatype_enum,
)
# NOTE(joshuawang): Unfortunately, the CostGraphDef protobuf has final
# operations, not tensors. As a result, we have to declare any operation
# that outputs a final tensor as final, which may expand the final set
# of tensors to keep in memory. This issue also arises in the scheduler
# code we will interface with.
if self.is_tensor_final(output_name):
node.is_final = True
return cost_graph_def | python | def compute_cost_graph(self, devices=None):
"""Computes a CostGraphDef protobuf based on this graph.
Defined in tensorflow/core/framework/cost_graph.proto.
Args:
devices: optional [string], the names of devices to consider. If
specified, any tensor on a device not listed is given a size of zero.
Any device-less tensor (e.g. Mesh TensorFlow tensor) is not affected.
Returns:
a CostGraphDef protobuf with a Node for every operation in the graph, each
of which is populated with size/dtype information for its inputs and
outputs (which match the input/output order of the operation).
"""
cost_graph_def = cost_graph_pb2.CostGraphDef()
for i, operation_name in enumerate(self.get_all_operation_names()):
node = cost_graph_def.node.add(
name=operation_name,
device=self.get_operation_device(operation_name),
id=i)
for input_name in self.get_operation_input_names(operation_name):
id1, id2 = self._tensor_name_to_ids[input_name]
node.input_info.add(preceding_node=id1, preceding_port=id2)
for output_name in self.get_operation_output_names(operation_name):
tensor_device = self.get_tensor_device(output_name)
# devices = [] is not the same as None, and tensor_device = '' is also
# not the same as None.
if devices is None or tensor_device is None or tensor_device in devices:
node.output_info.add(
size=self.get_tensor_num_entries(output_name),
alias_input_port=-1,
dtype=self.get_tensor_dtype(output_name).as_datatype_enum,
shape=self.get_tensor_shape(output_name).as_proto(),
)
else:
node.output_info.add(
size=0,
alias_input_port=-1,
dtype=self.get_tensor_dtype(output_name).as_datatype_enum,
)
# NOTE(joshuawang): Unfortunately, the CostGraphDef protobuf has final
# operations, not tensors. As a result, we have to declare any operation
# that outputs a final tensor as final, which may expand the final set
# of tensors to keep in memory. This issue also arises in the scheduler
# code we will interface with.
if self.is_tensor_final(output_name):
node.is_final = True
return cost_graph_def | [
"def",
"compute_cost_graph",
"(",
"self",
",",
"devices",
"=",
"None",
")",
":",
"cost_graph_def",
"=",
"cost_graph_pb2",
".",
"CostGraphDef",
"(",
")",
"for",
"i",
",",
"operation_name",
"in",
"enumerate",
"(",
"self",
".",
"get_all_operation_names",
"(",
")"... | Computes a CostGraphDef protobuf based on this graph.
Defined in tensorflow/core/framework/cost_graph.proto.
Args:
devices: optional [string], the names of devices to consider. If
specified, any tensor on a device not listed is given a size of zero.
Any device-less tensor (e.g. Mesh TensorFlow tensor) is not affected.
Returns:
a CostGraphDef protobuf with a Node for every operation in the graph, each
of which is populated with size/dtype information for its inputs and
outputs (which match the input/output order of the operation). | [
"Computes",
"a",
"CostGraphDef",
"protobuf",
"based",
"on",
"this",
"graph",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/auto_mtf/graph_interface.py#L313-L365 | train | 222,631 |
tensorflow/mesh | mesh_tensorflow/auto_mtf/graph_interface.py | GraphInterface.compute_memory_contents_under_schedule | def compute_memory_contents_under_schedule(self, schedule):
"""The in-memory tensors present when executing each operation in schedule.
Simulates running operations in the order given by a schedule. Keeps track
of the tensors in memory at every point in time, and outputs a list (one
entry for each point in time) of all sets of all memory contents (i.e. a
frozenset of strings) ever seen in this execution.
It is assumed (but not checked) that schedule is a valid topological sort of
the operations in this graph.
Args:
schedule: A list of integer ids; the order to run operations in.
Returns:
a list of frozenset of strings, where the ith entry describes the tensors
in memory when executing operation i (where schedule[i] is an index into
get_all_operation_names()).
"""
out_degree = self._compute_initial_out_degree()
curr_memory_contents = set()
memory_contents_for_each_operation = []
for operation_id in schedule:
operation_name = self._operations[operation_id].name
# Allocate new memory to perform the computation at this node.
for output_name in self.get_operation_output_names(operation_name):
curr_memory_contents.add(output_name)
memory_contents_for_each_operation.append(frozenset(curr_memory_contents))
# Free any tensors which are no longer needed.
for output_name in self.get_operation_output_names(operation_name):
if out_degree[output_name] == 0:
curr_memory_contents.remove(output_name)
for input_name in self.get_operation_input_names(operation_name):
out_degree[input_name] -= 1
if out_degree[input_name] == 0:
curr_memory_contents.remove(input_name)
return memory_contents_for_each_operation | python | def compute_memory_contents_under_schedule(self, schedule):
"""The in-memory tensors present when executing each operation in schedule.
Simulates running operations in the order given by a schedule. Keeps track
of the tensors in memory at every point in time, and outputs a list (one
entry for each point in time) of all sets of all memory contents (i.e. a
frozenset of strings) ever seen in this execution.
It is assumed (but not checked) that schedule is a valid topological sort of
the operations in this graph.
Args:
schedule: A list of integer ids; the order to run operations in.
Returns:
a list of frozenset of strings, where the ith entry describes the tensors
in memory when executing operation i (where schedule[i] is an index into
get_all_operation_names()).
"""
out_degree = self._compute_initial_out_degree()
curr_memory_contents = set()
memory_contents_for_each_operation = []
for operation_id in schedule:
operation_name = self._operations[operation_id].name
# Allocate new memory to perform the computation at this node.
for output_name in self.get_operation_output_names(operation_name):
curr_memory_contents.add(output_name)
memory_contents_for_each_operation.append(frozenset(curr_memory_contents))
# Free any tensors which are no longer needed.
for output_name in self.get_operation_output_names(operation_name):
if out_degree[output_name] == 0:
curr_memory_contents.remove(output_name)
for input_name in self.get_operation_input_names(operation_name):
out_degree[input_name] -= 1
if out_degree[input_name] == 0:
curr_memory_contents.remove(input_name)
return memory_contents_for_each_operation | [
"def",
"compute_memory_contents_under_schedule",
"(",
"self",
",",
"schedule",
")",
":",
"out_degree",
"=",
"self",
".",
"_compute_initial_out_degree",
"(",
")",
"curr_memory_contents",
"=",
"set",
"(",
")",
"memory_contents_for_each_operation",
"=",
"[",
"]",
"for",
... | The in-memory tensors present when executing each operation in schedule.
Simulates running operations in the order given by a schedule. Keeps track
of the tensors in memory at every point in time, and outputs a list (one
entry for each point in time) of all sets of all memory contents (i.e. a
frozenset of strings) ever seen in this execution.
It is assumed (but not checked) that schedule is a valid topological sort of
the operations in this graph.
Args:
schedule: A list of integer ids; the order to run operations in.
Returns:
a list of frozenset of strings, where the ith entry describes the tensors
in memory when executing operation i (where schedule[i] is an index into
get_all_operation_names()). | [
"The",
"in",
"-",
"memory",
"tensors",
"present",
"when",
"executing",
"each",
"operation",
"in",
"schedule",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/auto_mtf/graph_interface.py#L367-L407 | train | 222,632 |
tensorflow/mesh | mesh_tensorflow/auto_mtf/graph_interface.py | GraphInterface._initialize_operations | def _initialize_operations(self):
"""Initializer for _operations.
Raises:
TypeError: _graph is not a tf.Graph or mtf.Graph.
Returns:
a list of (tf.Operation or mtf.Operation)
"""
if isinstance(self._graph, tf.Graph):
return self._graph.get_operations()
elif isinstance(self._graph, mtf.Graph):
return self._graph.operations
else:
raise TypeError('Graph is not tf.Graph or mtf.Graph: {}'
.format(type(self._graph))) | python | def _initialize_operations(self):
"""Initializer for _operations.
Raises:
TypeError: _graph is not a tf.Graph or mtf.Graph.
Returns:
a list of (tf.Operation or mtf.Operation)
"""
if isinstance(self._graph, tf.Graph):
return self._graph.get_operations()
elif isinstance(self._graph, mtf.Graph):
return self._graph.operations
else:
raise TypeError('Graph is not tf.Graph or mtf.Graph: {}'
.format(type(self._graph))) | [
"def",
"_initialize_operations",
"(",
"self",
")",
":",
"if",
"isinstance",
"(",
"self",
".",
"_graph",
",",
"tf",
".",
"Graph",
")",
":",
"return",
"self",
".",
"_graph",
".",
"get_operations",
"(",
")",
"elif",
"isinstance",
"(",
"self",
".",
"_graph",... | Initializer for _operations.
Raises:
TypeError: _graph is not a tf.Graph or mtf.Graph.
Returns:
a list of (tf.Operation or mtf.Operation) | [
"Initializer",
"for",
"_operations",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/auto_mtf/graph_interface.py#L409-L424 | train | 222,633 |
tensorflow/mesh | mesh_tensorflow/auto_mtf/graph_interface.py | GraphInterface._initialize_operation_name_to_id | def _initialize_operation_name_to_id(self):
"""Initializer for _operation_name_to_id.
Returns:
a {string: int}, mapping operation names to their index in _operations.
"""
operation_name_to_id = {}
for i, operation in enumerate(self._operations):
operation_name_to_id[operation.name] = i
return operation_name_to_id | python | def _initialize_operation_name_to_id(self):
"""Initializer for _operation_name_to_id.
Returns:
a {string: int}, mapping operation names to their index in _operations.
"""
operation_name_to_id = {}
for i, operation in enumerate(self._operations):
operation_name_to_id[operation.name] = i
return operation_name_to_id | [
"def",
"_initialize_operation_name_to_id",
"(",
"self",
")",
":",
"operation_name_to_id",
"=",
"{",
"}",
"for",
"i",
",",
"operation",
"in",
"enumerate",
"(",
"self",
".",
"_operations",
")",
":",
"operation_name_to_id",
"[",
"operation",
".",
"name",
"]",
"="... | Initializer for _operation_name_to_id.
Returns:
a {string: int}, mapping operation names to their index in _operations. | [
"Initializer",
"for",
"_operation_name_to_id",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/auto_mtf/graph_interface.py#L426-L435 | train | 222,634 |
tensorflow/mesh | mesh_tensorflow/auto_mtf/graph_interface.py | GraphInterface._initialize_tensor_name_to_ids | def _initialize_tensor_name_to_ids(self):
"""Initializer for _tensor_name_to_ids.
Returns:
a {string: (int, int)}, mapping the name of tensor T to the index of T's
operation in _operations and T's index in T's operation's outputs.
"""
tensor_name_to_ids = {}
for i, operation in enumerate(self._operations):
for j, tensor in enumerate(operation.outputs):
tensor_name_to_ids[tensor.name] = (i, j)
return tensor_name_to_ids | python | def _initialize_tensor_name_to_ids(self):
"""Initializer for _tensor_name_to_ids.
Returns:
a {string: (int, int)}, mapping the name of tensor T to the index of T's
operation in _operations and T's index in T's operation's outputs.
"""
tensor_name_to_ids = {}
for i, operation in enumerate(self._operations):
for j, tensor in enumerate(operation.outputs):
tensor_name_to_ids[tensor.name] = (i, j)
return tensor_name_to_ids | [
"def",
"_initialize_tensor_name_to_ids",
"(",
"self",
")",
":",
"tensor_name_to_ids",
"=",
"{",
"}",
"for",
"i",
",",
"operation",
"in",
"enumerate",
"(",
"self",
".",
"_operations",
")",
":",
"for",
"j",
",",
"tensor",
"in",
"enumerate",
"(",
"operation",
... | Initializer for _tensor_name_to_ids.
Returns:
a {string: (int, int)}, mapping the name of tensor T to the index of T's
operation in _operations and T's index in T's operation's outputs. | [
"Initializer",
"for",
"_tensor_name_to_ids",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/auto_mtf/graph_interface.py#L437-L448 | train | 222,635 |
tensorflow/mesh | mesh_tensorflow/auto_mtf/graph_interface.py | GraphInterface._name_to_tensor | def _name_to_tensor(self, tensor_name):
"""The tensor with the given name.
Args:
tensor_name: a string, name of a tensor in the graph.
Returns:
a tf.Tensor or mtf.Tensor
"""
id1, id2 = self._tensor_name_to_ids[tensor_name]
return self._operations[id1].outputs[id2] | python | def _name_to_tensor(self, tensor_name):
"""The tensor with the given name.
Args:
tensor_name: a string, name of a tensor in the graph.
Returns:
a tf.Tensor or mtf.Tensor
"""
id1, id2 = self._tensor_name_to_ids[tensor_name]
return self._operations[id1].outputs[id2] | [
"def",
"_name_to_tensor",
"(",
"self",
",",
"tensor_name",
")",
":",
"id1",
",",
"id2",
"=",
"self",
".",
"_tensor_name_to_ids",
"[",
"tensor_name",
"]",
"return",
"self",
".",
"_operations",
"[",
"id1",
"]",
".",
"outputs",
"[",
"id2",
"]"
] | The tensor with the given name.
Args:
tensor_name: a string, name of a tensor in the graph.
Returns:
a tf.Tensor or mtf.Tensor | [
"The",
"tensor",
"with",
"the",
"given",
"name",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/auto_mtf/graph_interface.py#L471-L481 | train | 222,636 |
tensorflow/mesh | mesh_tensorflow/auto_mtf/graph_interface.py | GraphInterface._compute_initial_out_degree | def _compute_initial_out_degree(self):
"""The number of operations which use each tensor as input.
Returns:
a {string, int} mapping tensor name to the number of operations which use
it as input, or one plus that quantity if the tensor is final.
"""
out_degree = collections.defaultdict(int)
# Pretend that final tensors have an additional degree so they are not
# freed.
for tensor_name in self.get_all_tensor_names():
if self.is_tensor_final(tensor_name):
out_degree[tensor_name] = 1
for operation_name in self.get_all_operation_names():
for input_name in self.get_operation_input_names(operation_name):
out_degree[input_name] += 1
return out_degree | python | def _compute_initial_out_degree(self):
"""The number of operations which use each tensor as input.
Returns:
a {string, int} mapping tensor name to the number of operations which use
it as input, or one plus that quantity if the tensor is final.
"""
out_degree = collections.defaultdict(int)
# Pretend that final tensors have an additional degree so they are not
# freed.
for tensor_name in self.get_all_tensor_names():
if self.is_tensor_final(tensor_name):
out_degree[tensor_name] = 1
for operation_name in self.get_all_operation_names():
for input_name in self.get_operation_input_names(operation_name):
out_degree[input_name] += 1
return out_degree | [
"def",
"_compute_initial_out_degree",
"(",
"self",
")",
":",
"out_degree",
"=",
"collections",
".",
"defaultdict",
"(",
"int",
")",
"# Pretend that final tensors have an additional degree so they are not",
"# freed.",
"for",
"tensor_name",
"in",
"self",
".",
"get_all_tensor... | The number of operations which use each tensor as input.
Returns:
a {string, int} mapping tensor name to the number of operations which use
it as input, or one plus that quantity if the tensor is final. | [
"The",
"number",
"of",
"operations",
"which",
"use",
"each",
"tensor",
"as",
"input",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/auto_mtf/graph_interface.py#L483-L502 | train | 222,637 |
tensorflow/mesh | mesh_tensorflow/layers.py | layer_norm | def layer_norm(x, dim, epsilon=1e-6, name="layer_prepostprocess"):
"""Layer normalization over dimension dim.
Args:
x: a mtf.Tensor whose shape contains dim.
dim: a mtf.Dimension
epsilon: a floating point number
name: a string. variable scope.
Returns:
a mtf.Tensor with same shape as x.
"""
with tf.variable_scope(name + "/layer_norm"):
scale = mtf.get_variable(
x.mesh,
"layer_norm_scale",
mtf.Shape([dim]),
initializer=tf.ones_initializer(),
activation_dtype=x.dtype)
bias = mtf.get_variable(
x.mesh,
"layer_norm_bias",
mtf.Shape([dim]),
initializer=tf.zeros_initializer(),
activation_dtype=x.dtype)
reduced_shape = x.shape - dim
mean = mtf.reduce_mean(x, output_shape=reduced_shape)
variance = mtf.reduce_mean(mtf.square(x - mean), output_shape=reduced_shape)
norm_x = (x - mean) * mtf.rsqrt(variance + epsilon)
return norm_x * scale + bias | python | def layer_norm(x, dim, epsilon=1e-6, name="layer_prepostprocess"):
"""Layer normalization over dimension dim.
Args:
x: a mtf.Tensor whose shape contains dim.
dim: a mtf.Dimension
epsilon: a floating point number
name: a string. variable scope.
Returns:
a mtf.Tensor with same shape as x.
"""
with tf.variable_scope(name + "/layer_norm"):
scale = mtf.get_variable(
x.mesh,
"layer_norm_scale",
mtf.Shape([dim]),
initializer=tf.ones_initializer(),
activation_dtype=x.dtype)
bias = mtf.get_variable(
x.mesh,
"layer_norm_bias",
mtf.Shape([dim]),
initializer=tf.zeros_initializer(),
activation_dtype=x.dtype)
reduced_shape = x.shape - dim
mean = mtf.reduce_mean(x, output_shape=reduced_shape)
variance = mtf.reduce_mean(mtf.square(x - mean), output_shape=reduced_shape)
norm_x = (x - mean) * mtf.rsqrt(variance + epsilon)
return norm_x * scale + bias | [
"def",
"layer_norm",
"(",
"x",
",",
"dim",
",",
"epsilon",
"=",
"1e-6",
",",
"name",
"=",
"\"layer_prepostprocess\"",
")",
":",
"with",
"tf",
".",
"variable_scope",
"(",
"name",
"+",
"\"/layer_norm\"",
")",
":",
"scale",
"=",
"mtf",
".",
"get_variable",
... | Layer normalization over dimension dim.
Args:
x: a mtf.Tensor whose shape contains dim.
dim: a mtf.Dimension
epsilon: a floating point number
name: a string. variable scope.
Returns:
a mtf.Tensor with same shape as x. | [
"Layer",
"normalization",
"over",
"dimension",
"dim",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/layers.py#L85-L114 | train | 222,638 |
tensorflow/mesh | mesh_tensorflow/layers.py | softmax_cross_entropy_with_logits | def softmax_cross_entropy_with_logits(logits, targets, vocab_dim, z_loss=0.0):
"""Per-example softmax loss.
if z_loss is nonzero, we add a loss equal to z_loss*log(z)^2, where z is the
partition function. Example value: z_loss=1e-4. Two uses of z_loss are:
- To keep the logits from drifting too far from zero, which can cause
unacceptable roundoff errors in bfloat16.
- To encourage the logits to be normalized log-probabilities.
Args:
logits: a mtf.Tensor whose shape contains vocab_dim
targets: a mtf.Tensor with the same shape as logits
vocab_dim: a mtf.Dimension
z_loss: a float
Returns:
a mtf.Tensor whose shape is equal to logits.shape - vocab_dim
Raises:
ValueError: if the shapes do not match.
"""
if logits.shape != targets.shape:
raise ValueError(
"logits shape must equal targets shape"
"logits=%s targets=%s" % (logits.to_string, targets.to_string))
if vocab_dim not in logits.shape.dims:
raise ValueError("vocab_dim must be in logits.shape.dims")
log_z = mtf.reduce_logsumexp(logits, vocab_dim)
log_softmax = logits - log_z
loss = mtf.negative(
mtf.reduce_sum(log_softmax * targets, reduced_dim=vocab_dim))
if z_loss != 0:
loss += z_loss * mtf.square(log_z)
return loss | python | def softmax_cross_entropy_with_logits(logits, targets, vocab_dim, z_loss=0.0):
"""Per-example softmax loss.
if z_loss is nonzero, we add a loss equal to z_loss*log(z)^2, where z is the
partition function. Example value: z_loss=1e-4. Two uses of z_loss are:
- To keep the logits from drifting too far from zero, which can cause
unacceptable roundoff errors in bfloat16.
- To encourage the logits to be normalized log-probabilities.
Args:
logits: a mtf.Tensor whose shape contains vocab_dim
targets: a mtf.Tensor with the same shape as logits
vocab_dim: a mtf.Dimension
z_loss: a float
Returns:
a mtf.Tensor whose shape is equal to logits.shape - vocab_dim
Raises:
ValueError: if the shapes do not match.
"""
if logits.shape != targets.shape:
raise ValueError(
"logits shape must equal targets shape"
"logits=%s targets=%s" % (logits.to_string, targets.to_string))
if vocab_dim not in logits.shape.dims:
raise ValueError("vocab_dim must be in logits.shape.dims")
log_z = mtf.reduce_logsumexp(logits, vocab_dim)
log_softmax = logits - log_z
loss = mtf.negative(
mtf.reduce_sum(log_softmax * targets, reduced_dim=vocab_dim))
if z_loss != 0:
loss += z_loss * mtf.square(log_z)
return loss | [
"def",
"softmax_cross_entropy_with_logits",
"(",
"logits",
",",
"targets",
",",
"vocab_dim",
",",
"z_loss",
"=",
"0.0",
")",
":",
"if",
"logits",
".",
"shape",
"!=",
"targets",
".",
"shape",
":",
"raise",
"ValueError",
"(",
"\"logits shape must equal targets shape... | Per-example softmax loss.
if z_loss is nonzero, we add a loss equal to z_loss*log(z)^2, where z is the
partition function. Example value: z_loss=1e-4. Two uses of z_loss are:
- To keep the logits from drifting too far from zero, which can cause
unacceptable roundoff errors in bfloat16.
- To encourage the logits to be normalized log-probabilities.
Args:
logits: a mtf.Tensor whose shape contains vocab_dim
targets: a mtf.Tensor with the same shape as logits
vocab_dim: a mtf.Dimension
z_loss: a float
Returns:
a mtf.Tensor whose shape is equal to logits.shape - vocab_dim
Raises:
ValueError: if the shapes do not match. | [
"Per",
"-",
"example",
"softmax",
"loss",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/layers.py#L187-L220 | train | 222,639 |
tensorflow/mesh | mesh_tensorflow/layers.py | sigmoid_cross_entropy_with_logits | def sigmoid_cross_entropy_with_logits(logits, targets):
"""Sigmoid cross-entropy loss.
Args:
logits: a mtf.Tensor
targets: a mtf.Tensor with the same shape as logits
Returns:
a mtf.Tensor whose shape is equal to logits.shape
Raises:
ValueError: if the shapes do not match.
"""
if logits.shape != targets.shape:
raise ValueError(
"logits shape must equal targets shape"
"logits=%s targets=%s" % (logits.to_string, targets.to_string))
x = logits
z = targets
return mtf.relu(x) - x * z + mtf.log(1 + mtf.exp(-mtf.abs(x))) | python | def sigmoid_cross_entropy_with_logits(logits, targets):
"""Sigmoid cross-entropy loss.
Args:
logits: a mtf.Tensor
targets: a mtf.Tensor with the same shape as logits
Returns:
a mtf.Tensor whose shape is equal to logits.shape
Raises:
ValueError: if the shapes do not match.
"""
if logits.shape != targets.shape:
raise ValueError(
"logits shape must equal targets shape"
"logits=%s targets=%s" % (logits.to_string, targets.to_string))
x = logits
z = targets
return mtf.relu(x) - x * z + mtf.log(1 + mtf.exp(-mtf.abs(x))) | [
"def",
"sigmoid_cross_entropy_with_logits",
"(",
"logits",
",",
"targets",
")",
":",
"if",
"logits",
".",
"shape",
"!=",
"targets",
".",
"shape",
":",
"raise",
"ValueError",
"(",
"\"logits shape must equal targets shape\"",
"\"logits=%s targets=%s\"",
"%",
"(",
"logit... | Sigmoid cross-entropy loss.
Args:
logits: a mtf.Tensor
targets: a mtf.Tensor with the same shape as logits
Returns:
a mtf.Tensor whose shape is equal to logits.shape
Raises:
ValueError: if the shapes do not match. | [
"Sigmoid",
"cross",
"-",
"entropy",
"loss",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/layers.py#L223-L242 | train | 222,640 |
tensorflow/mesh | mesh_tensorflow/layers.py | dense_relu_dense | def dense_relu_dense(x,
hidden_channels,
dropout=0.0,
dropout_broadcast_dims=None,
master_dtype=tf.float32,
slice_dtype=tf.float32, name=None):
"""Hidden layer with ReLU activation followed by linear projection.
The output has the same number of channels as the input.
Args:
x: a mtf.Tensor
hidden_channels: a mtf.Dimension - channels in the hidden layer
dropout: an optional float
dropout_broadcast_dims: an optional list of mtf.Dimension
master_dtype: a tf.dtype
slice_dtype: a tf.dtype
name: an optional string
Returns:
a mtf.Tensor with the same shape as x.
"""
with tf.variable_scope(name, default_name="dense_relu_dense"):
io_channels = x.shape.dims[-1]
h = dense(x, hidden_channels,
use_bias=False, activation=mtf.relu,
master_dtype=master_dtype, slice_dtype=slice_dtype, name="wi")
if dropout != 0.0:
h = mtf.dropout(h, 1.0 - dropout,
noise_shape=h.shape - dropout_broadcast_dims)
return dense(h, io_channels, use_bias=False, activation=None,
master_dtype=master_dtype, slice_dtype=slice_dtype,
name="wo") | python | def dense_relu_dense(x,
hidden_channels,
dropout=0.0,
dropout_broadcast_dims=None,
master_dtype=tf.float32,
slice_dtype=tf.float32, name=None):
"""Hidden layer with ReLU activation followed by linear projection.
The output has the same number of channels as the input.
Args:
x: a mtf.Tensor
hidden_channels: a mtf.Dimension - channels in the hidden layer
dropout: an optional float
dropout_broadcast_dims: an optional list of mtf.Dimension
master_dtype: a tf.dtype
slice_dtype: a tf.dtype
name: an optional string
Returns:
a mtf.Tensor with the same shape as x.
"""
with tf.variable_scope(name, default_name="dense_relu_dense"):
io_channels = x.shape.dims[-1]
h = dense(x, hidden_channels,
use_bias=False, activation=mtf.relu,
master_dtype=master_dtype, slice_dtype=slice_dtype, name="wi")
if dropout != 0.0:
h = mtf.dropout(h, 1.0 - dropout,
noise_shape=h.shape - dropout_broadcast_dims)
return dense(h, io_channels, use_bias=False, activation=None,
master_dtype=master_dtype, slice_dtype=slice_dtype,
name="wo") | [
"def",
"dense_relu_dense",
"(",
"x",
",",
"hidden_channels",
",",
"dropout",
"=",
"0.0",
",",
"dropout_broadcast_dims",
"=",
"None",
",",
"master_dtype",
"=",
"tf",
".",
"float32",
",",
"slice_dtype",
"=",
"tf",
".",
"float32",
",",
"name",
"=",
"None",
")... | Hidden layer with ReLU activation followed by linear projection.
The output has the same number of channels as the input.
Args:
x: a mtf.Tensor
hidden_channels: a mtf.Dimension - channels in the hidden layer
dropout: an optional float
dropout_broadcast_dims: an optional list of mtf.Dimension
master_dtype: a tf.dtype
slice_dtype: a tf.dtype
name: an optional string
Returns:
a mtf.Tensor with the same shape as x. | [
"Hidden",
"layer",
"with",
"ReLU",
"activation",
"followed",
"by",
"linear",
"projection",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/layers.py#L251-L283 | train | 222,641 |
tensorflow/mesh | mesh_tensorflow/layers.py | local_1d_halo_exchange | def local_1d_halo_exchange(k, v, num_w_blocks, w_dim, mask_right):
"""Halo exchange for keys and values for Local 1D attention."""
if num_w_blocks is not None:
if mask_right:
k = mtf.left_halo_exchange(k, num_w_blocks, w_dim, w_dim.size)
v = mtf.left_halo_exchange(v, num_w_blocks, w_dim, w_dim.size)
else:
k = mtf.halo_exchange(k, num_w_blocks, w_dim, w_dim.size)
v = mtf.halo_exchange(v, num_w_blocks, w_dim, w_dim.size)
else:
if mask_right:
k = mtf.pad(k, [w_dim, None], w_dim.name)
v = mtf.pad(v, [w_dim, None], w_dim.name)
else:
k = mtf.pad(k, [w_dim, w_dim], w_dim.name)
v = mtf.pad(v, [w_dim, w_dim], w_dim.name)
return k, v | python | def local_1d_halo_exchange(k, v, num_w_blocks, w_dim, mask_right):
"""Halo exchange for keys and values for Local 1D attention."""
if num_w_blocks is not None:
if mask_right:
k = mtf.left_halo_exchange(k, num_w_blocks, w_dim, w_dim.size)
v = mtf.left_halo_exchange(v, num_w_blocks, w_dim, w_dim.size)
else:
k = mtf.halo_exchange(k, num_w_blocks, w_dim, w_dim.size)
v = mtf.halo_exchange(v, num_w_blocks, w_dim, w_dim.size)
else:
if mask_right:
k = mtf.pad(k, [w_dim, None], w_dim.name)
v = mtf.pad(v, [w_dim, None], w_dim.name)
else:
k = mtf.pad(k, [w_dim, w_dim], w_dim.name)
v = mtf.pad(v, [w_dim, w_dim], w_dim.name)
return k, v | [
"def",
"local_1d_halo_exchange",
"(",
"k",
",",
"v",
",",
"num_w_blocks",
",",
"w_dim",
",",
"mask_right",
")",
":",
"if",
"num_w_blocks",
"is",
"not",
"None",
":",
"if",
"mask_right",
":",
"k",
"=",
"mtf",
".",
"left_halo_exchange",
"(",
"k",
",",
"num_... | Halo exchange for keys and values for Local 1D attention. | [
"Halo",
"exchange",
"for",
"keys",
"and",
"values",
"for",
"Local",
"1D",
"attention",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/layers.py#L286-L302 | train | 222,642 |
tensorflow/mesh | mesh_tensorflow/layers.py | local_2d_halo_exchange | def local_2d_halo_exchange(k, v, num_h_blocks, h_dim,
num_w_blocks, w_dim, mask_right):
"""Halo exchange for keys and values for Local 2D attention."""
for blocks_dim, block_size_dim, halo_size in [
(num_h_blocks, h_dim, h_dim.size),
(num_w_blocks, w_dim, w_dim.size)]:
# shape of k is [num_h_blocks, num_w_blocks, h_dim, w_dim, kv_channels]
if halo_size > 0:
if blocks_dim is not None:
if mask_right:
k = mtf.left_halo_exchange(k, blocks_dim, block_size_dim, halo_size)
v = mtf.left_halo_exchange(v, blocks_dim, block_size_dim, halo_size)
else:
k = mtf.halo_exchange(k, blocks_dim, block_size_dim, halo_size)
v = mtf.halo_exchange(v, blocks_dim, block_size_dim, halo_size)
else:
if mask_right:
k = mtf.pad(k, [halo_size, None], block_size_dim.name)
v = mtf.pad(v, [halo_size, None], block_size_dim.name)
else:
k = mtf.pad(k, [halo_size, halo_size], block_size_dim.name)
v = mtf.pad(v, [halo_size, halo_size], block_size_dim.name)
return k, v | python | def local_2d_halo_exchange(k, v, num_h_blocks, h_dim,
num_w_blocks, w_dim, mask_right):
"""Halo exchange for keys and values for Local 2D attention."""
for blocks_dim, block_size_dim, halo_size in [
(num_h_blocks, h_dim, h_dim.size),
(num_w_blocks, w_dim, w_dim.size)]:
# shape of k is [num_h_blocks, num_w_blocks, h_dim, w_dim, kv_channels]
if halo_size > 0:
if blocks_dim is not None:
if mask_right:
k = mtf.left_halo_exchange(k, blocks_dim, block_size_dim, halo_size)
v = mtf.left_halo_exchange(v, blocks_dim, block_size_dim, halo_size)
else:
k = mtf.halo_exchange(k, blocks_dim, block_size_dim, halo_size)
v = mtf.halo_exchange(v, blocks_dim, block_size_dim, halo_size)
else:
if mask_right:
k = mtf.pad(k, [halo_size, None], block_size_dim.name)
v = mtf.pad(v, [halo_size, None], block_size_dim.name)
else:
k = mtf.pad(k, [halo_size, halo_size], block_size_dim.name)
v = mtf.pad(v, [halo_size, halo_size], block_size_dim.name)
return k, v | [
"def",
"local_2d_halo_exchange",
"(",
"k",
",",
"v",
",",
"num_h_blocks",
",",
"h_dim",
",",
"num_w_blocks",
",",
"w_dim",
",",
"mask_right",
")",
":",
"for",
"blocks_dim",
",",
"block_size_dim",
",",
"halo_size",
"in",
"[",
"(",
"num_h_blocks",
",",
"h_dim"... | Halo exchange for keys and values for Local 2D attention. | [
"Halo",
"exchange",
"for",
"keys",
"and",
"values",
"for",
"Local",
"2D",
"attention",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/layers.py#L535-L557 | train | 222,643 |
tensorflow/mesh | mesh_tensorflow/layers.py | local_2d_self_attention_spatial_blocks | def local_2d_self_attention_spatial_blocks(query_antecedent,
kv_channels,
heads,
memory_h_dim=None,
memory_w_dim=None,
mask_right=False,
master_dtype=tf.float32,
slice_dtype=tf.float32,
name=None):
"""Attention to the source position and a neighborhood to the left or right.
The sequence is divided into blocks of length block_size.
Attention for a given query position can only see memory positions
less than or equal to the query position, in the corresponding block
and the previous block.
Args:
query_antecedent: a mtf.Tensor with shape [batch, num_h_blocks,
num_w_blocks, h_dim, w_dim, io_channels] must have the same size as
query_length, but a different name.
kv_channels: a mtf.Dimension (the size of the key and value vectors)
heads: a mtf.Dimension (the number of heads)
memory_h_dim: mtf Dimension, for the memory height block.
memory_w_dim: mtf Dimension, for the memory width block.
mask_right: bool, flag specifying whether we mask out attention to the right
for the decoder.
master_dtype: a tf.dtype
slice_dtype: a tf.dtype
name: an optional string.
Returns:
a Tensor of shape
[batch, num_h_blocks, num_w_blocks, h_dim, w_dim, io_channels]
Raises:
ValueError: if channels or depth don't match.
"""
with tf.variable_scope(
name, default_name="multihead_attention", values=[query_antecedent]):
h_dim, w_dim, io_channels = query_antecedent.shape.dims[-3:]
batch, num_h_blocks, num_w_blocks = query_antecedent.shape.dims[:3]
wq, wk, wv, wo = multihead_attention_vars(
query_antecedent.mesh, heads, io_channels, kv_channels,
master_dtype, slice_dtype, query_antecedent.dtype)
# Rename dimensions for the memory height and width.
memory_antecedent = mtf.rename_dimension(query_antecedent, h_dim.name,
"memory_" + h_dim.name)
memory_antecedent = mtf.rename_dimension(memory_antecedent, w_dim.name,
"memory_" + w_dim.name)
memory_h_dim, memory_w_dim = memory_antecedent.shape.dims[-3:-1]
# Call einsum over the query and memory to get query q, keys k and values v.
q = mtf.einsum([query_antecedent, wq],
mtf.Shape([
batch, heads, num_h_blocks, num_w_blocks, h_dim, w_dim,
kv_channels
]))
k = mtf.einsum([memory_antecedent, wk],
mtf.Shape([batch, heads, num_h_blocks, num_w_blocks,
memory_h_dim, memory_w_dim, kv_channels]))
v = mtf.einsum([memory_antecedent, wv],
mtf.Shape([batch, heads, num_h_blocks, num_w_blocks,
memory_h_dim, memory_w_dim, kv_channels]))
# Halo exchange for memory blocks.
k, v = local_2d_halo_exchange(k, v, num_h_blocks, memory_h_dim,
num_w_blocks, memory_w_dim, mask_right)
# Calculate the causal mask to avoid peeking into the future. We compute
# this once and reuse it for all blocks since the block_size is known.
mask = None
if mask_right:
mask = attention_bias_local_2d_block(query_antecedent.mesh, h_dim, w_dim,
memory_h_dim, memory_w_dim)
output = dot_product_attention(q, k, v, mask=mask)
return mtf.einsum(
[output, wo],
mtf.Shape(
[batch, num_h_blocks, num_w_blocks, h_dim, w_dim, io_channels])) | python | def local_2d_self_attention_spatial_blocks(query_antecedent,
kv_channels,
heads,
memory_h_dim=None,
memory_w_dim=None,
mask_right=False,
master_dtype=tf.float32,
slice_dtype=tf.float32,
name=None):
"""Attention to the source position and a neighborhood to the left or right.
The sequence is divided into blocks of length block_size.
Attention for a given query position can only see memory positions
less than or equal to the query position, in the corresponding block
and the previous block.
Args:
query_antecedent: a mtf.Tensor with shape [batch, num_h_blocks,
num_w_blocks, h_dim, w_dim, io_channels] must have the same size as
query_length, but a different name.
kv_channels: a mtf.Dimension (the size of the key and value vectors)
heads: a mtf.Dimension (the number of heads)
memory_h_dim: mtf Dimension, for the memory height block.
memory_w_dim: mtf Dimension, for the memory width block.
mask_right: bool, flag specifying whether we mask out attention to the right
for the decoder.
master_dtype: a tf.dtype
slice_dtype: a tf.dtype
name: an optional string.
Returns:
a Tensor of shape
[batch, num_h_blocks, num_w_blocks, h_dim, w_dim, io_channels]
Raises:
ValueError: if channels or depth don't match.
"""
with tf.variable_scope(
name, default_name="multihead_attention", values=[query_antecedent]):
h_dim, w_dim, io_channels = query_antecedent.shape.dims[-3:]
batch, num_h_blocks, num_w_blocks = query_antecedent.shape.dims[:3]
wq, wk, wv, wo = multihead_attention_vars(
query_antecedent.mesh, heads, io_channels, kv_channels,
master_dtype, slice_dtype, query_antecedent.dtype)
# Rename dimensions for the memory height and width.
memory_antecedent = mtf.rename_dimension(query_antecedent, h_dim.name,
"memory_" + h_dim.name)
memory_antecedent = mtf.rename_dimension(memory_antecedent, w_dim.name,
"memory_" + w_dim.name)
memory_h_dim, memory_w_dim = memory_antecedent.shape.dims[-3:-1]
# Call einsum over the query and memory to get query q, keys k and values v.
q = mtf.einsum([query_antecedent, wq],
mtf.Shape([
batch, heads, num_h_blocks, num_w_blocks, h_dim, w_dim,
kv_channels
]))
k = mtf.einsum([memory_antecedent, wk],
mtf.Shape([batch, heads, num_h_blocks, num_w_blocks,
memory_h_dim, memory_w_dim, kv_channels]))
v = mtf.einsum([memory_antecedent, wv],
mtf.Shape([batch, heads, num_h_blocks, num_w_blocks,
memory_h_dim, memory_w_dim, kv_channels]))
# Halo exchange for memory blocks.
k, v = local_2d_halo_exchange(k, v, num_h_blocks, memory_h_dim,
num_w_blocks, memory_w_dim, mask_right)
# Calculate the causal mask to avoid peeking into the future. We compute
# this once and reuse it for all blocks since the block_size is known.
mask = None
if mask_right:
mask = attention_bias_local_2d_block(query_antecedent.mesh, h_dim, w_dim,
memory_h_dim, memory_w_dim)
output = dot_product_attention(q, k, v, mask=mask)
return mtf.einsum(
[output, wo],
mtf.Shape(
[batch, num_h_blocks, num_w_blocks, h_dim, w_dim, io_channels])) | [
"def",
"local_2d_self_attention_spatial_blocks",
"(",
"query_antecedent",
",",
"kv_channels",
",",
"heads",
",",
"memory_h_dim",
"=",
"None",
",",
"memory_w_dim",
"=",
"None",
",",
"mask_right",
"=",
"False",
",",
"master_dtype",
"=",
"tf",
".",
"float32",
",",
... | Attention to the source position and a neighborhood to the left or right.
The sequence is divided into blocks of length block_size.
Attention for a given query position can only see memory positions
less than or equal to the query position, in the corresponding block
and the previous block.
Args:
query_antecedent: a mtf.Tensor with shape [batch, num_h_blocks,
num_w_blocks, h_dim, w_dim, io_channels] must have the same size as
query_length, but a different name.
kv_channels: a mtf.Dimension (the size of the key and value vectors)
heads: a mtf.Dimension (the number of heads)
memory_h_dim: mtf Dimension, for the memory height block.
memory_w_dim: mtf Dimension, for the memory width block.
mask_right: bool, flag specifying whether we mask out attention to the right
for the decoder.
master_dtype: a tf.dtype
slice_dtype: a tf.dtype
name: an optional string.
Returns:
a Tensor of shape
[batch, num_h_blocks, num_w_blocks, h_dim, w_dim, io_channels]
Raises:
ValueError: if channels or depth don't match. | [
"Attention",
"to",
"the",
"source",
"position",
"and",
"a",
"neighborhood",
"to",
"the",
"left",
"or",
"right",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/layers.py#L560-L642 | train | 222,644 |
tensorflow/mesh | mesh_tensorflow/layers.py | multihead_attention_vars | def multihead_attention_vars(
mesh, heads, io_channels, kv_channels,
master_dtype, slice_dtype, activation_dtype):
"""Deprecated version of multihead_attention_params with combine=True."""
return multihead_attention_params(
mesh, heads, io_channels, kv_channels,
mtf.VariableDType(master_dtype, slice_dtype, activation_dtype),
combine=True) | python | def multihead_attention_vars(
mesh, heads, io_channels, kv_channels,
master_dtype, slice_dtype, activation_dtype):
"""Deprecated version of multihead_attention_params with combine=True."""
return multihead_attention_params(
mesh, heads, io_channels, kv_channels,
mtf.VariableDType(master_dtype, slice_dtype, activation_dtype),
combine=True) | [
"def",
"multihead_attention_vars",
"(",
"mesh",
",",
"heads",
",",
"io_channels",
",",
"kv_channels",
",",
"master_dtype",
",",
"slice_dtype",
",",
"activation_dtype",
")",
":",
"return",
"multihead_attention_params",
"(",
"mesh",
",",
"heads",
",",
"io_channels",
... | Deprecated version of multihead_attention_params with combine=True. | [
"Deprecated",
"version",
"of",
"multihead_attention_params",
"with",
"combine",
"=",
"True",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/layers.py#L650-L657 | train | 222,645 |
tensorflow/mesh | mesh_tensorflow/layers.py | multihead_attention_params | def multihead_attention_params(mesh, heads, io_channels, kv_channels,
variable_dtype, combine=False):
"""Create Parameters for Multihead Attention.
If the combine flag is set to True, then we create only one variable
which stacks together all of the parameters. Otherwise, we create four
separate variables.
Args:
mesh: a Mesh
heads: a Dimension
io_channels: a Dimension
kv_channels: a Dimension
variable_dtype: a mtf.VariableDType
combine: a boolean
Returns:
wq: a Tensor with shape [heads, io_channels, kv_channels]
wk: a Tensor with shape [heads, io_channels, kv_channels]
wv: a Tensor with shape [heads, io_channels, kv_channels]
wo: a Tensor with shape [heads, io_channels, kv_channels]
"""
qkvo = mtf.Dimension("qkvo", 4)
qk_stddev = (io_channels.size ** -0.5) * (kv_channels.size ** -0.25)
v_stddev = io_channels.size ** -0.5
# TODO(noam): should be: o_stddev = (kv_channels.size * heads.size) ** -0.5
# verify that this still works and change it.
o_stddev = (io_channels.size * heads.size) ** -0.5
if combine:
def qkvo_initializer(shape,
dtype=None,
partition_info=None,
verify_shape=None):
del partition_info, verify_shape
return tf.random_normal(shape, dtype=dtype) * tf.reshape(
tf.cast([qk_stddev, qk_stddev, v_stddev, o_stddev],
dtype or tf.float32), [4, 1, 1, 1])
var = mtf.get_variable(
mesh, "qkvo", mtf.Shape([qkvo, heads, io_channels, kv_channels]),
initializer=qkvo_initializer, dtype=variable_dtype)
return mtf.unstack(var, qkvo)
else:
return [mtf.get_variable(
mesh, name, mtf.Shape([heads, io_channels, kv_channels]),
initializer=tf.random_normal_initializer(stddev=stddev),
dtype=variable_dtype) for name, stddev in zip(
["q", "k", "v", "o"],
[qk_stddev, qk_stddev, v_stddev, o_stddev])] | python | def multihead_attention_params(mesh, heads, io_channels, kv_channels,
variable_dtype, combine=False):
"""Create Parameters for Multihead Attention.
If the combine flag is set to True, then we create only one variable
which stacks together all of the parameters. Otherwise, we create four
separate variables.
Args:
mesh: a Mesh
heads: a Dimension
io_channels: a Dimension
kv_channels: a Dimension
variable_dtype: a mtf.VariableDType
combine: a boolean
Returns:
wq: a Tensor with shape [heads, io_channels, kv_channels]
wk: a Tensor with shape [heads, io_channels, kv_channels]
wv: a Tensor with shape [heads, io_channels, kv_channels]
wo: a Tensor with shape [heads, io_channels, kv_channels]
"""
qkvo = mtf.Dimension("qkvo", 4)
qk_stddev = (io_channels.size ** -0.5) * (kv_channels.size ** -0.25)
v_stddev = io_channels.size ** -0.5
# TODO(noam): should be: o_stddev = (kv_channels.size * heads.size) ** -0.5
# verify that this still works and change it.
o_stddev = (io_channels.size * heads.size) ** -0.5
if combine:
def qkvo_initializer(shape,
dtype=None,
partition_info=None,
verify_shape=None):
del partition_info, verify_shape
return tf.random_normal(shape, dtype=dtype) * tf.reshape(
tf.cast([qk_stddev, qk_stddev, v_stddev, o_stddev],
dtype or tf.float32), [4, 1, 1, 1])
var = mtf.get_variable(
mesh, "qkvo", mtf.Shape([qkvo, heads, io_channels, kv_channels]),
initializer=qkvo_initializer, dtype=variable_dtype)
return mtf.unstack(var, qkvo)
else:
return [mtf.get_variable(
mesh, name, mtf.Shape([heads, io_channels, kv_channels]),
initializer=tf.random_normal_initializer(stddev=stddev),
dtype=variable_dtype) for name, stddev in zip(
["q", "k", "v", "o"],
[qk_stddev, qk_stddev, v_stddev, o_stddev])] | [
"def",
"multihead_attention_params",
"(",
"mesh",
",",
"heads",
",",
"io_channels",
",",
"kv_channels",
",",
"variable_dtype",
",",
"combine",
"=",
"False",
")",
":",
"qkvo",
"=",
"mtf",
".",
"Dimension",
"(",
"\"qkvo\"",
",",
"4",
")",
"qk_stddev",
"=",
"... | Create Parameters for Multihead Attention.
If the combine flag is set to True, then we create only one variable
which stacks together all of the parameters. Otherwise, we create four
separate variables.
Args:
mesh: a Mesh
heads: a Dimension
io_channels: a Dimension
kv_channels: a Dimension
variable_dtype: a mtf.VariableDType
combine: a boolean
Returns:
wq: a Tensor with shape [heads, io_channels, kv_channels]
wk: a Tensor with shape [heads, io_channels, kv_channels]
wv: a Tensor with shape [heads, io_channels, kv_channels]
wo: a Tensor with shape [heads, io_channels, kv_channels] | [
"Create",
"Parameters",
"for",
"Multihead",
"Attention",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/layers.py#L660-L707 | train | 222,646 |
tensorflow/mesh | mesh_tensorflow/layers.py | attention_mask_ignore_padding | def attention_mask_ignore_padding(inputs, dtype=tf.float32):
"""Bias for encoder-decoder attention.
Args:
inputs: a mtf.Tensor with shape [..., length_dim]
dtype: a tf.dtype
Returns:
a mtf.Tensor with shape [..., memory_length_dim]
"""
inputs = rename_length_to_memory_length(inputs)
return mtf.cast(mtf.equal(inputs, 0), dtype) * -1e9 | python | def attention_mask_ignore_padding(inputs, dtype=tf.float32):
"""Bias for encoder-decoder attention.
Args:
inputs: a mtf.Tensor with shape [..., length_dim]
dtype: a tf.dtype
Returns:
a mtf.Tensor with shape [..., memory_length_dim]
"""
inputs = rename_length_to_memory_length(inputs)
return mtf.cast(mtf.equal(inputs, 0), dtype) * -1e9 | [
"def",
"attention_mask_ignore_padding",
"(",
"inputs",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
":",
"inputs",
"=",
"rename_length_to_memory_length",
"(",
"inputs",
")",
"return",
"mtf",
".",
"cast",
"(",
"mtf",
".",
"equal",
"(",
"inputs",
",",
"0",
... | Bias for encoder-decoder attention.
Args:
inputs: a mtf.Tensor with shape [..., length_dim]
dtype: a tf.dtype
Returns:
a mtf.Tensor with shape [..., memory_length_dim] | [
"Bias",
"for",
"encoder",
"-",
"decoder",
"attention",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/layers.py#L918-L929 | train | 222,647 |
tensorflow/mesh | mesh_tensorflow/layers.py | attention_mask_autoregressive | def attention_mask_autoregressive(query_pos, dtype=tf.float32):
"""Bias for self-attention where attention to the right is disallowed.
Args:
query_pos: a mtf.Tensor with shape [..., length_dim]
dtype: a tf.dtype
Returns:
a mtf.Tensor with shape [..., length_dim, memory_length_dim]
"""
memory_pos = rename_length_to_memory_length(query_pos)
return mtf.cast(mtf.less(query_pos, memory_pos), dtype) * -1e9 | python | def attention_mask_autoregressive(query_pos, dtype=tf.float32):
"""Bias for self-attention where attention to the right is disallowed.
Args:
query_pos: a mtf.Tensor with shape [..., length_dim]
dtype: a tf.dtype
Returns:
a mtf.Tensor with shape [..., length_dim, memory_length_dim]
"""
memory_pos = rename_length_to_memory_length(query_pos)
return mtf.cast(mtf.less(query_pos, memory_pos), dtype) * -1e9 | [
"def",
"attention_mask_autoregressive",
"(",
"query_pos",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
":",
"memory_pos",
"=",
"rename_length_to_memory_length",
"(",
"query_pos",
")",
"return",
"mtf",
".",
"cast",
"(",
"mtf",
".",
"less",
"(",
"query_pos",
",... | Bias for self-attention where attention to the right is disallowed.
Args:
query_pos: a mtf.Tensor with shape [..., length_dim]
dtype: a tf.dtype
Returns:
a mtf.Tensor with shape [..., length_dim, memory_length_dim] | [
"Bias",
"for",
"self",
"-",
"attention",
"where",
"attention",
"to",
"the",
"right",
"is",
"disallowed",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/layers.py#L932-L943 | train | 222,648 |
tensorflow/mesh | mesh_tensorflow/layers.py | attention_mask_same_segment | def attention_mask_same_segment(
query_segment, memory_segment=None, dtype=tf.float32):
"""Bias for attention where attention between segments is disallowed.
Args:
query_segment: a mtf.Tensor with shape [..., length_dim]
memory_segment: a mtf.Tensor with shape [..., memory_length_dim]
dtype: a tf.dtype
Returns:
a mtf.Tensor with shape [..., length_dim, memory_length_dim]
"""
memory_segment = rename_length_to_memory_length(
memory_segment or query_segment)
return mtf.cast(mtf.not_equal(query_segment, memory_segment), dtype) * -1e9 | python | def attention_mask_same_segment(
query_segment, memory_segment=None, dtype=tf.float32):
"""Bias for attention where attention between segments is disallowed.
Args:
query_segment: a mtf.Tensor with shape [..., length_dim]
memory_segment: a mtf.Tensor with shape [..., memory_length_dim]
dtype: a tf.dtype
Returns:
a mtf.Tensor with shape [..., length_dim, memory_length_dim]
"""
memory_segment = rename_length_to_memory_length(
memory_segment or query_segment)
return mtf.cast(mtf.not_equal(query_segment, memory_segment), dtype) * -1e9 | [
"def",
"attention_mask_same_segment",
"(",
"query_segment",
",",
"memory_segment",
"=",
"None",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
":",
"memory_segment",
"=",
"rename_length_to_memory_length",
"(",
"memory_segment",
"or",
"query_segment",
")",
"return",
"... | Bias for attention where attention between segments is disallowed.
Args:
query_segment: a mtf.Tensor with shape [..., length_dim]
memory_segment: a mtf.Tensor with shape [..., memory_length_dim]
dtype: a tf.dtype
Returns:
a mtf.Tensor with shape [..., length_dim, memory_length_dim] | [
"Bias",
"for",
"attention",
"where",
"attention",
"between",
"segments",
"is",
"disallowed",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/layers.py#L946-L960 | train | 222,649 |
tensorflow/mesh | mesh_tensorflow/layers.py | multiplicative_jitter | def multiplicative_jitter(x, epsilon=1e-2):
"""Multiply values by a random number between 1-epsilon and 1+epsilon.
Makes models more resilient to rounding errors introduced by bfloat16.
This seems particularly important for logits.
Args:
x: a mtf.Tensor
epsilon: a floating point value
Returns:
a mtf.Tensor with the same type and shape as x.
"""
if epsilon == 0:
return x
return x * mtf.random_uniform(
x.mesh, x.shape, minval=1.0 - epsilon, maxval=1.0+epsilon, dtype=x.dtype) | python | def multiplicative_jitter(x, epsilon=1e-2):
"""Multiply values by a random number between 1-epsilon and 1+epsilon.
Makes models more resilient to rounding errors introduced by bfloat16.
This seems particularly important for logits.
Args:
x: a mtf.Tensor
epsilon: a floating point value
Returns:
a mtf.Tensor with the same type and shape as x.
"""
if epsilon == 0:
return x
return x * mtf.random_uniform(
x.mesh, x.shape, minval=1.0 - epsilon, maxval=1.0+epsilon, dtype=x.dtype) | [
"def",
"multiplicative_jitter",
"(",
"x",
",",
"epsilon",
"=",
"1e-2",
")",
":",
"if",
"epsilon",
"==",
"0",
":",
"return",
"x",
"return",
"x",
"*",
"mtf",
".",
"random_uniform",
"(",
"x",
".",
"mesh",
",",
"x",
".",
"shape",
",",
"minval",
"=",
"1... | Multiply values by a random number between 1-epsilon and 1+epsilon.
Makes models more resilient to rounding errors introduced by bfloat16.
This seems particularly important for logits.
Args:
x: a mtf.Tensor
epsilon: a floating point value
Returns:
a mtf.Tensor with the same type and shape as x. | [
"Multiply",
"values",
"by",
"a",
"random",
"number",
"between",
"1",
"-",
"epsilon",
"and",
"1",
"+",
"epsilon",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/layers.py#L1029-L1045 | train | 222,650 |
tensorflow/mesh | mesh_tensorflow/layers.py | multihead_self_attention_memory_compressed | def multihead_self_attention_memory_compressed(x,
mask_right,
compression_factor,
kv_channels,
heads,
dropout=0.0,
dropout_broadcast_dims=None,
master_dtype=tf.float32,
slice_dtype=tf.float32,
name="multihead_attention"):
"""Memory-compressed self-attention.
The memory is first average-pooled (strided) to make it shorter by
a factor of compression_factor.
Args:
x: a mtf.Tensor with shape
[<batch_dims>, query_length, io_channels]
mask_right: a boolean
compression_factor: an integer
kv_channels: a mtf.Dimension (the size of the key and value vectors)
heads: a mtf.Dimension (the number of heads)
dropout: a floating point value
dropout_broadcast_dims: an optional list of mtf.Dimension
master_dtype: a tf.dtype
slice_dtype: a tf.dtype
name: an optional string.
Returns:
A mtf.Tensor with shape [batch, query_length, io_channels]
Raises:
ValueError: if the dimensions do not match.
"""
batch_dims = x.shape.dims[:-2]
length, io_channels = x.shape.dims[-2:]
with tf.variable_scope(name,
default_name="compressed_attention",
values=[x]):
wq, wk, wv, wo = multihead_attention_vars(
x.mesh, heads, io_channels, kv_channels,
master_dtype, slice_dtype, x.dtype)
memory_antecedent = compress_mean(x, length, compression_factor)
memory_antecedent = rename_length_to_memory_length(memory_antecedent)
memory_length = memory_antecedent.shape.dims[-2]
q = mtf.einsum(
[x, wq],
mtf.Shape(batch_dims + [heads, length, kv_channels]))
k = mtf.einsum(
[memory_antecedent, wk],
mtf.Shape(batch_dims + [heads, memory_length, kv_channels]))
v = mtf.einsum(
[memory_antecedent, wv],
mtf.Shape(batch_dims + [heads, memory_length, kv_channels]))
if mask_right:
query_pos = mtf.range(x.mesh, length, dtype=tf.int32)
memory_pos = (
mtf.range(x.mesh, memory_length, dtype=tf.int32) * compression_factor
+ (compression_factor - 1))
mask = mtf.cast(mtf.greater(memory_pos, query_pos), x.dtype) * -1e9
else:
mask = None
o = dot_product_attention(
q, k, v, mask, dropout, dropout_broadcast_dims, extra_logit=0.0)
return mtf.einsum(
[o, wo], mtf.Shape(batch_dims + [length, io_channels])) | python | def multihead_self_attention_memory_compressed(x,
mask_right,
compression_factor,
kv_channels,
heads,
dropout=0.0,
dropout_broadcast_dims=None,
master_dtype=tf.float32,
slice_dtype=tf.float32,
name="multihead_attention"):
"""Memory-compressed self-attention.
The memory is first average-pooled (strided) to make it shorter by
a factor of compression_factor.
Args:
x: a mtf.Tensor with shape
[<batch_dims>, query_length, io_channels]
mask_right: a boolean
compression_factor: an integer
kv_channels: a mtf.Dimension (the size of the key and value vectors)
heads: a mtf.Dimension (the number of heads)
dropout: a floating point value
dropout_broadcast_dims: an optional list of mtf.Dimension
master_dtype: a tf.dtype
slice_dtype: a tf.dtype
name: an optional string.
Returns:
A mtf.Tensor with shape [batch, query_length, io_channels]
Raises:
ValueError: if the dimensions do not match.
"""
batch_dims = x.shape.dims[:-2]
length, io_channels = x.shape.dims[-2:]
with tf.variable_scope(name,
default_name="compressed_attention",
values=[x]):
wq, wk, wv, wo = multihead_attention_vars(
x.mesh, heads, io_channels, kv_channels,
master_dtype, slice_dtype, x.dtype)
memory_antecedent = compress_mean(x, length, compression_factor)
memory_antecedent = rename_length_to_memory_length(memory_antecedent)
memory_length = memory_antecedent.shape.dims[-2]
q = mtf.einsum(
[x, wq],
mtf.Shape(batch_dims + [heads, length, kv_channels]))
k = mtf.einsum(
[memory_antecedent, wk],
mtf.Shape(batch_dims + [heads, memory_length, kv_channels]))
v = mtf.einsum(
[memory_antecedent, wv],
mtf.Shape(batch_dims + [heads, memory_length, kv_channels]))
if mask_right:
query_pos = mtf.range(x.mesh, length, dtype=tf.int32)
memory_pos = (
mtf.range(x.mesh, memory_length, dtype=tf.int32) * compression_factor
+ (compression_factor - 1))
mask = mtf.cast(mtf.greater(memory_pos, query_pos), x.dtype) * -1e9
else:
mask = None
o = dot_product_attention(
q, k, v, mask, dropout, dropout_broadcast_dims, extra_logit=0.0)
return mtf.einsum(
[o, wo], mtf.Shape(batch_dims + [length, io_channels])) | [
"def",
"multihead_self_attention_memory_compressed",
"(",
"x",
",",
"mask_right",
",",
"compression_factor",
",",
"kv_channels",
",",
"heads",
",",
"dropout",
"=",
"0.0",
",",
"dropout_broadcast_dims",
"=",
"None",
",",
"master_dtype",
"=",
"tf",
".",
"float32",
"... | Memory-compressed self-attention.
The memory is first average-pooled (strided) to make it shorter by
a factor of compression_factor.
Args:
x: a mtf.Tensor with shape
[<batch_dims>, query_length, io_channels]
mask_right: a boolean
compression_factor: an integer
kv_channels: a mtf.Dimension (the size of the key and value vectors)
heads: a mtf.Dimension (the number of heads)
dropout: a floating point value
dropout_broadcast_dims: an optional list of mtf.Dimension
master_dtype: a tf.dtype
slice_dtype: a tf.dtype
name: an optional string.
Returns:
A mtf.Tensor with shape [batch, query_length, io_channels]
Raises:
ValueError: if the dimensions do not match. | [
"Memory",
"-",
"compressed",
"self",
"-",
"attention",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/layers.py#L1048-L1113 | train | 222,651 |
tensorflow/mesh | mesh_tensorflow/layers.py | compress_mean | def compress_mean(x, dim, compression_factor):
"""Compress by taking group means.
Args:
x: a Tensor
dim: a dimension in x.shape
compression_factor: an integer
Returns:
a Tensor
"""
dims = x.shape.dims
pos = dims.index(dim)
compressed_dim = mtf.Dimension(dim.name, dim.size // compression_factor)
compression_factor_dim = mtf.Dimension(
"compression_factor", compression_factor)
new_shape = (
dims[:pos] + [compressed_dim, compression_factor_dim] + dims[pos + 1:])
x = mtf.reshape(x, new_shape)
x = mtf.reduce_mean(x, reduced_dim=compression_factor_dim)
return x | python | def compress_mean(x, dim, compression_factor):
"""Compress by taking group means.
Args:
x: a Tensor
dim: a dimension in x.shape
compression_factor: an integer
Returns:
a Tensor
"""
dims = x.shape.dims
pos = dims.index(dim)
compressed_dim = mtf.Dimension(dim.name, dim.size // compression_factor)
compression_factor_dim = mtf.Dimension(
"compression_factor", compression_factor)
new_shape = (
dims[:pos] + [compressed_dim, compression_factor_dim] + dims[pos + 1:])
x = mtf.reshape(x, new_shape)
x = mtf.reduce_mean(x, reduced_dim=compression_factor_dim)
return x | [
"def",
"compress_mean",
"(",
"x",
",",
"dim",
",",
"compression_factor",
")",
":",
"dims",
"=",
"x",
".",
"shape",
".",
"dims",
"pos",
"=",
"dims",
".",
"index",
"(",
"dim",
")",
"compressed_dim",
"=",
"mtf",
".",
"Dimension",
"(",
"dim",
".",
"name"... | Compress by taking group means.
Args:
x: a Tensor
dim: a dimension in x.shape
compression_factor: an integer
Returns:
a Tensor | [
"Compress",
"by",
"taking",
"group",
"means",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/layers.py#L1116-L1136 | train | 222,652 |
tensorflow/mesh | mesh_tensorflow/layers.py | embedding | def embedding(indices, vocab_dim, output_dim, variable_dtype, name="embedding"):
"""Embedding layer."""
weights = embedding_weights(
indices.mesh, vocab_dim, output_dim, variable_dtype, name)
return mtf.gather(weights, indices, vocab_dim) | python | def embedding(indices, vocab_dim, output_dim, variable_dtype, name="embedding"):
"""Embedding layer."""
weights = embedding_weights(
indices.mesh, vocab_dim, output_dim, variable_dtype, name)
return mtf.gather(weights, indices, vocab_dim) | [
"def",
"embedding",
"(",
"indices",
",",
"vocab_dim",
",",
"output_dim",
",",
"variable_dtype",
",",
"name",
"=",
"\"embedding\"",
")",
":",
"weights",
"=",
"embedding_weights",
"(",
"indices",
".",
"mesh",
",",
"vocab_dim",
",",
"output_dim",
",",
"variable_d... | Embedding layer. | [
"Embedding",
"layer",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/layers.py#L1146-L1150 | train | 222,653 |
tensorflow/mesh | mesh_tensorflow/transformer/transformer_layers.py | attention_params | def attention_params(context,
kv_dim,
num_heads,
num_memory_heads=0,
shared_kv=False):
"""Attention Parameters for Transformer Layers.
The num_heads argument indicates the number of read-heads.
For the familiar behavior described in "Attention Is All You Need", set
num_memory_heads=0.
If num_memory_heads==1, then there is only a single write-head, and multiple
read-heads. This leads to faster incremental decoding, since the
recurrent state is smaller
If num_memory_heads > 1, then num_memory_heads indicates the number of
write-heads. A fraction of the read-heads read each write-head.
num_memory_heads must divide num_heads. This behavior has not yet been tested.
Args:
context: a transformer.Context
kv_dim: a dimension (for key and value channels)
num_heads: an integer
num_memory_heads: an optional integer
shared_kv: a boolean
Returns:
an attention.AttentionParams object
"""
if num_heads == 1:
query_heads_dims = None
memory_heads_dims = None
elif num_memory_heads == 0:
query_heads_dims = [mtf.Dimension("heads", num_heads)]
memory_heads_dims = query_heads_dims
elif num_memory_heads == 1:
query_heads_dims = [mtf.Dimension("heads", num_heads)]
memory_heads_dims = None
else:
if num_heads % num_memory_heads != 0:
raise ValueError("num_memory_heads must divide num_heads")
memory_heads_dims = [mtf.Dimension("heads", num_memory_heads)]
query_heads_dims = memory_heads_dims + [
mtf.Dimension("query_heads", num_heads // num_memory_heads)]
return attention.AttentionParams(
context.mesh,
query_input_dim=context.model_dim,
memory_input_dim=context.model_dim,
output_dim=context.model_dim,
key_dim=kv_dim,
value_dim=kv_dim,
query_heads_dims=query_heads_dims,
memory_heads_dims=memory_heads_dims,
variable_dtype=context.variable_dtype,
shared_kv=shared_kv) | python | def attention_params(context,
kv_dim,
num_heads,
num_memory_heads=0,
shared_kv=False):
"""Attention Parameters for Transformer Layers.
The num_heads argument indicates the number of read-heads.
For the familiar behavior described in "Attention Is All You Need", set
num_memory_heads=0.
If num_memory_heads==1, then there is only a single write-head, and multiple
read-heads. This leads to faster incremental decoding, since the
recurrent state is smaller
If num_memory_heads > 1, then num_memory_heads indicates the number of
write-heads. A fraction of the read-heads read each write-head.
num_memory_heads must divide num_heads. This behavior has not yet been tested.
Args:
context: a transformer.Context
kv_dim: a dimension (for key and value channels)
num_heads: an integer
num_memory_heads: an optional integer
shared_kv: a boolean
Returns:
an attention.AttentionParams object
"""
if num_heads == 1:
query_heads_dims = None
memory_heads_dims = None
elif num_memory_heads == 0:
query_heads_dims = [mtf.Dimension("heads", num_heads)]
memory_heads_dims = query_heads_dims
elif num_memory_heads == 1:
query_heads_dims = [mtf.Dimension("heads", num_heads)]
memory_heads_dims = None
else:
if num_heads % num_memory_heads != 0:
raise ValueError("num_memory_heads must divide num_heads")
memory_heads_dims = [mtf.Dimension("heads", num_memory_heads)]
query_heads_dims = memory_heads_dims + [
mtf.Dimension("query_heads", num_heads // num_memory_heads)]
return attention.AttentionParams(
context.mesh,
query_input_dim=context.model_dim,
memory_input_dim=context.model_dim,
output_dim=context.model_dim,
key_dim=kv_dim,
value_dim=kv_dim,
query_heads_dims=query_heads_dims,
memory_heads_dims=memory_heads_dims,
variable_dtype=context.variable_dtype,
shared_kv=shared_kv) | [
"def",
"attention_params",
"(",
"context",
",",
"kv_dim",
",",
"num_heads",
",",
"num_memory_heads",
"=",
"0",
",",
"shared_kv",
"=",
"False",
")",
":",
"if",
"num_heads",
"==",
"1",
":",
"query_heads_dims",
"=",
"None",
"memory_heads_dims",
"=",
"None",
"el... | Attention Parameters for Transformer Layers.
The num_heads argument indicates the number of read-heads.
For the familiar behavior described in "Attention Is All You Need", set
num_memory_heads=0.
If num_memory_heads==1, then there is only a single write-head, and multiple
read-heads. This leads to faster incremental decoding, since the
recurrent state is smaller
If num_memory_heads > 1, then num_memory_heads indicates the number of
write-heads. A fraction of the read-heads read each write-head.
num_memory_heads must divide num_heads. This behavior has not yet been tested.
Args:
context: a transformer.Context
kv_dim: a dimension (for key and value channels)
num_heads: an integer
num_memory_heads: an optional integer
shared_kv: a boolean
Returns:
an attention.AttentionParams object | [
"Attention",
"Parameters",
"for",
"Transformer",
"Layers",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/transformer/transformer_layers.py#L62-L116 | train | 222,654 |
tensorflow/mesh | mesh_tensorflow/transformer/metric_utils.py | get_metric_fns | def get_metric_fns(metric_names, labels, outputs):
"""Generate a dictionary of metric name to metric function.
Args:
metric_names: list of strings in the format "prefix/metric_function_name".
metric_function_name should refer to a function name in metrics.py. The
prefix will be included in the key in the returned dict.
labels: a tensor where batch is the first dimension.
outputs: a tensor of model predictions, same dimensionality as labels.
Returns:
metric_fns: dict of metric functions keyed by their name.
"""
metric_fns = {}
for metric_name in metric_names:
metric_fn_name = metric_name.split("/")[-1]
if hasattr(metrics, metric_fn_name):
metric_fn = getattr(metrics, metric_fn_name)
metric_fns[metric_name] = metric_fn(labels, outputs)
else:
raise ValueError("Metric {} is not implemented".format(metric_fn_name))
return metric_fns | python | def get_metric_fns(metric_names, labels, outputs):
"""Generate a dictionary of metric name to metric function.
Args:
metric_names: list of strings in the format "prefix/metric_function_name".
metric_function_name should refer to a function name in metrics.py. The
prefix will be included in the key in the returned dict.
labels: a tensor where batch is the first dimension.
outputs: a tensor of model predictions, same dimensionality as labels.
Returns:
metric_fns: dict of metric functions keyed by their name.
"""
metric_fns = {}
for metric_name in metric_names:
metric_fn_name = metric_name.split("/")[-1]
if hasattr(metrics, metric_fn_name):
metric_fn = getattr(metrics, metric_fn_name)
metric_fns[metric_name] = metric_fn(labels, outputs)
else:
raise ValueError("Metric {} is not implemented".format(metric_fn_name))
return metric_fns | [
"def",
"get_metric_fns",
"(",
"metric_names",
",",
"labels",
",",
"outputs",
")",
":",
"metric_fns",
"=",
"{",
"}",
"for",
"metric_name",
"in",
"metric_names",
":",
"metric_fn_name",
"=",
"metric_name",
".",
"split",
"(",
"\"/\"",
")",
"[",
"-",
"1",
"]",
... | Generate a dictionary of metric name to metric function.
Args:
metric_names: list of strings in the format "prefix/metric_function_name".
metric_function_name should refer to a function name in metrics.py. The
prefix will be included in the key in the returned dict.
labels: a tensor where batch is the first dimension.
outputs: a tensor of model predictions, same dimensionality as labels.
Returns:
metric_fns: dict of metric functions keyed by their name. | [
"Generate",
"a",
"dictionary",
"of",
"metric",
"name",
"to",
"metric",
"function",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/transformer/metric_utils.py#L28-L50 | train | 222,655 |
tensorflow/mesh | mesh_tensorflow/auto_mtf/scheduler.py | minimize_peak_memory | def minimize_peak_memory(graph, scheduler_alg):
"""Computes a schedule to minimize peak memory.
Args:
graph: an mtf.auto_mtf.graph_interface.GraphInterface.
scheduler_alg: a string, one of 'NAIVE' or 'LIST'
Returns:
an iterable of integers representing the schedule.
"""
if scheduler_alg == 'NAIVE':
return _minimize_peak_memory_naive(graph)
elif scheduler_alg == 'LIST':
return _minimize_peak_memory_list(graph)
else:
raise NotImplementedError('{} is not a scheduler algorithm. It should be '
'one of NAIVE or LIST.'
.format(scheduler_alg)) | python | def minimize_peak_memory(graph, scheduler_alg):
"""Computes a schedule to minimize peak memory.
Args:
graph: an mtf.auto_mtf.graph_interface.GraphInterface.
scheduler_alg: a string, one of 'NAIVE' or 'LIST'
Returns:
an iterable of integers representing the schedule.
"""
if scheduler_alg == 'NAIVE':
return _minimize_peak_memory_naive(graph)
elif scheduler_alg == 'LIST':
return _minimize_peak_memory_list(graph)
else:
raise NotImplementedError('{} is not a scheduler algorithm. It should be '
'one of NAIVE or LIST.'
.format(scheduler_alg)) | [
"def",
"minimize_peak_memory",
"(",
"graph",
",",
"scheduler_alg",
")",
":",
"if",
"scheduler_alg",
"==",
"'NAIVE'",
":",
"return",
"_minimize_peak_memory_naive",
"(",
"graph",
")",
"elif",
"scheduler_alg",
"==",
"'LIST'",
":",
"return",
"_minimize_peak_memory_list",
... | Computes a schedule to minimize peak memory.
Args:
graph: an mtf.auto_mtf.graph_interface.GraphInterface.
scheduler_alg: a string, one of 'NAIVE' or 'LIST'
Returns:
an iterable of integers representing the schedule. | [
"Computes",
"a",
"schedule",
"to",
"minimize",
"peak",
"memory",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/auto_mtf/scheduler.py#L35-L52 | train | 222,656 |
tensorflow/mesh | mesh_tensorflow/auto_mtf/scheduler.py | _minimize_peak_memory_list | def _minimize_peak_memory_list(graph):
"""Computes schedule according to the greedy list heuristic.
Greedy list heuristic: schedule the operation which results in the most bytes
of memory being (immediately) freed.
TODO(joshuawang): Experiment with tiebreaking by preferring more successors.
Args:
graph: an mtf.auto_mtf.graph_interface.GraphInterface.
Returns:
an iterable of integers representing the schedule.
"""
schedule = []
bytes_freed = {} # {operation_name: bytes freed}
users_of = collections.defaultdict(set) # {tensor_name: set(operation_name)}
in_degree = collections.defaultdict(int) # {operation_name: in degree}
operation_id = {} # {operation_name: id}
# We want an updatable priority queue, so we use the following workaround:
# docs.python.org/2/library/heapq.html#priority-queue-implementation-notes
priority_queue = [] # (negative bytes freed, operation name)
# Set up the (greedy) topological sort.
for i, operation_name in enumerate(graph.get_all_operation_names()):
operation_id[operation_name] = i
for input_name in graph.get_operation_input_names(operation_name):
# Note that in _HybridGraphInterface, an operation may use a tensor twice,
# but we deduplicate (with respect to in_degree) so that we can later use
# users_of to decrement in_degree.
if operation_name in users_of[input_name]:
continue
users_of[input_name].add(operation_name)
in_degree[operation_name] += 1
for operation_name in graph.get_all_operation_names():
bytes_freed[operation_name] = 0
# For each input, this operation frees memory if it is the final consumer.
for input_name in graph.get_operation_input_names(operation_name):
if len(users_of[input_name]) == 1 and not graph.is_tensor_final(
input_name):
bytes_freed[operation_name] += graph.get_tensor_size(input_name)
# For each output, this operation will require additional bytes of memory
# (hence negative bytes freed).
for output_name in graph.get_operation_output_names(operation_name):
# If the output is used (or is final), then it eats memory.
if users_of[output_name] or graph.is_tensor_final(output_name):
bytes_freed[operation_name] -= graph.get_tensor_size(output_name)
for operation_name in graph.get_all_operation_names():
if in_degree[operation_name] == 0:
heapq.heappush(priority_queue,
(-bytes_freed[operation_name], operation_name))
# Do the (greedy) topological sort.
while priority_queue:
neg_bytes_freed, operation_name = heapq.heappop(priority_queue)
if bytes_freed[operation_name] != -neg_bytes_freed:
continue
schedule.append(operation_id[operation_name])
bytes_freed[operation_name] = None
for output_name in graph.get_operation_output_names(operation_name):
for other_operation_name in users_of[output_name]:
in_degree[other_operation_name] -= 1
if in_degree[other_operation_name] == 0:
heapq.heappush(priority_queue,
(-bytes_freed[other_operation_name],
other_operation_name))
for input_name in graph.get_operation_input_names(operation_name):
if operation_name not in users_of[input_name]:
# Used twice by this operation and hence already removed.
continue
users_of[input_name].remove(operation_name)
if len(users_of[input_name]) != 1 or graph.is_tensor_final(output_name):
continue
(other_operation_name,) = users_of[input_name]
bytes_freed[other_operation_name] += graph.get_tensor_size(
input_name)
if in_degree[other_operation_name] > 0:
continue
# Push another copy into the priority queue with our updated value.
# The original copy will be ignored since it does not match bytes_freed.
heapq.heappush(priority_queue, (-bytes_freed[other_operation_name],
other_operation_name))
return schedule | python | def _minimize_peak_memory_list(graph):
"""Computes schedule according to the greedy list heuristic.
Greedy list heuristic: schedule the operation which results in the most bytes
of memory being (immediately) freed.
TODO(joshuawang): Experiment with tiebreaking by preferring more successors.
Args:
graph: an mtf.auto_mtf.graph_interface.GraphInterface.
Returns:
an iterable of integers representing the schedule.
"""
schedule = []
bytes_freed = {} # {operation_name: bytes freed}
users_of = collections.defaultdict(set) # {tensor_name: set(operation_name)}
in_degree = collections.defaultdict(int) # {operation_name: in degree}
operation_id = {} # {operation_name: id}
# We want an updatable priority queue, so we use the following workaround:
# docs.python.org/2/library/heapq.html#priority-queue-implementation-notes
priority_queue = [] # (negative bytes freed, operation name)
# Set up the (greedy) topological sort.
for i, operation_name in enumerate(graph.get_all_operation_names()):
operation_id[operation_name] = i
for input_name in graph.get_operation_input_names(operation_name):
# Note that in _HybridGraphInterface, an operation may use a tensor twice,
# but we deduplicate (with respect to in_degree) so that we can later use
# users_of to decrement in_degree.
if operation_name in users_of[input_name]:
continue
users_of[input_name].add(operation_name)
in_degree[operation_name] += 1
for operation_name in graph.get_all_operation_names():
bytes_freed[operation_name] = 0
# For each input, this operation frees memory if it is the final consumer.
for input_name in graph.get_operation_input_names(operation_name):
if len(users_of[input_name]) == 1 and not graph.is_tensor_final(
input_name):
bytes_freed[operation_name] += graph.get_tensor_size(input_name)
# For each output, this operation will require additional bytes of memory
# (hence negative bytes freed).
for output_name in graph.get_operation_output_names(operation_name):
# If the output is used (or is final), then it eats memory.
if users_of[output_name] or graph.is_tensor_final(output_name):
bytes_freed[operation_name] -= graph.get_tensor_size(output_name)
for operation_name in graph.get_all_operation_names():
if in_degree[operation_name] == 0:
heapq.heappush(priority_queue,
(-bytes_freed[operation_name], operation_name))
# Do the (greedy) topological sort.
while priority_queue:
neg_bytes_freed, operation_name = heapq.heappop(priority_queue)
if bytes_freed[operation_name] != -neg_bytes_freed:
continue
schedule.append(operation_id[operation_name])
bytes_freed[operation_name] = None
for output_name in graph.get_operation_output_names(operation_name):
for other_operation_name in users_of[output_name]:
in_degree[other_operation_name] -= 1
if in_degree[other_operation_name] == 0:
heapq.heappush(priority_queue,
(-bytes_freed[other_operation_name],
other_operation_name))
for input_name in graph.get_operation_input_names(operation_name):
if operation_name not in users_of[input_name]:
# Used twice by this operation and hence already removed.
continue
users_of[input_name].remove(operation_name)
if len(users_of[input_name]) != 1 or graph.is_tensor_final(output_name):
continue
(other_operation_name,) = users_of[input_name]
bytes_freed[other_operation_name] += graph.get_tensor_size(
input_name)
if in_degree[other_operation_name] > 0:
continue
# Push another copy into the priority queue with our updated value.
# The original copy will be ignored since it does not match bytes_freed.
heapq.heappush(priority_queue, (-bytes_freed[other_operation_name],
other_operation_name))
return schedule | [
"def",
"_minimize_peak_memory_list",
"(",
"graph",
")",
":",
"schedule",
"=",
"[",
"]",
"bytes_freed",
"=",
"{",
"}",
"# {operation_name: bytes freed}",
"users_of",
"=",
"collections",
".",
"defaultdict",
"(",
"set",
")",
"# {tensor_name: set(operation_name)}",
"in_de... | Computes schedule according to the greedy list heuristic.
Greedy list heuristic: schedule the operation which results in the most bytes
of memory being (immediately) freed.
TODO(joshuawang): Experiment with tiebreaking by preferring more successors.
Args:
graph: an mtf.auto_mtf.graph_interface.GraphInterface.
Returns:
an iterable of integers representing the schedule. | [
"Computes",
"schedule",
"according",
"to",
"the",
"greedy",
"list",
"heuristic",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/auto_mtf/scheduler.py#L67-L154 | train | 222,657 |
tensorflow/mesh | mesh_tensorflow/auto_mtf/layout.py | layout | def layout(mtf_graph, mesh_shape, mtf_outputs=()):
"""Compute layout rules based on a computational graph and mesh shape.
Args:
mtf_graph: a mtf.Graph.
mesh_shape: an mtf.Shape, str, or listlike of mtf.Dimension.
mtf_outputs: an optional iterable of mtf.Tensor, representing the outputs
of the computation.
Returns:
a mtf.LayoutRules
"""
mesh_shape = mtf.convert_to_shape(mesh_shape)
estimator = memory_estimator.MemoryEstimator(mtf_graph, mesh_shape,
mtf_outputs)
optimizer = layout_optimizer.LayoutOptimizer(estimator)
return mtf.convert_to_layout_rules(optimizer.solve()) | python | def layout(mtf_graph, mesh_shape, mtf_outputs=()):
"""Compute layout rules based on a computational graph and mesh shape.
Args:
mtf_graph: a mtf.Graph.
mesh_shape: an mtf.Shape, str, or listlike of mtf.Dimension.
mtf_outputs: an optional iterable of mtf.Tensor, representing the outputs
of the computation.
Returns:
a mtf.LayoutRules
"""
mesh_shape = mtf.convert_to_shape(mesh_shape)
estimator = memory_estimator.MemoryEstimator(mtf_graph, mesh_shape,
mtf_outputs)
optimizer = layout_optimizer.LayoutOptimizer(estimator)
return mtf.convert_to_layout_rules(optimizer.solve()) | [
"def",
"layout",
"(",
"mtf_graph",
",",
"mesh_shape",
",",
"mtf_outputs",
"=",
"(",
")",
")",
":",
"mesh_shape",
"=",
"mtf",
".",
"convert_to_shape",
"(",
"mesh_shape",
")",
"estimator",
"=",
"memory_estimator",
".",
"MemoryEstimator",
"(",
"mtf_graph",
",",
... | Compute layout rules based on a computational graph and mesh shape.
Args:
mtf_graph: a mtf.Graph.
mesh_shape: an mtf.Shape, str, or listlike of mtf.Dimension.
mtf_outputs: an optional iterable of mtf.Tensor, representing the outputs
of the computation.
Returns:
a mtf.LayoutRules | [
"Compute",
"layout",
"rules",
"based",
"on",
"a",
"computational",
"graph",
"and",
"mesh",
"shape",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/auto_mtf/layout.py#L47-L63 | train | 222,658 |
tensorflow/mesh | mesh_tensorflow/optimize.py | Optimizer.apply_grads | def apply_grads(self, grads, variables):
"""Apply gradients to variables.
Call this function externally instead of apply_grad(). This causes the
operations to be combined, which is necessary for stacking variables
see mtf.rewrite_stack_variables().
Args:
grads: a list of Tensor
variables: a list of Variables
Returns:
a list of Operations
"""
ops = []
for grad, var in zip(grads, variables):
ops.extend(self.apply_grad(grad, var))
if not ops:
return ops
return variables[0].graph.combine_assignments(ops) | python | def apply_grads(self, grads, variables):
"""Apply gradients to variables.
Call this function externally instead of apply_grad(). This causes the
operations to be combined, which is necessary for stacking variables
see mtf.rewrite_stack_variables().
Args:
grads: a list of Tensor
variables: a list of Variables
Returns:
a list of Operations
"""
ops = []
for grad, var in zip(grads, variables):
ops.extend(self.apply_grad(grad, var))
if not ops:
return ops
return variables[0].graph.combine_assignments(ops) | [
"def",
"apply_grads",
"(",
"self",
",",
"grads",
",",
"variables",
")",
":",
"ops",
"=",
"[",
"]",
"for",
"grad",
",",
"var",
"in",
"zip",
"(",
"grads",
",",
"variables",
")",
":",
"ops",
".",
"extend",
"(",
"self",
".",
"apply_grad",
"(",
"grad",
... | Apply gradients to variables.
Call this function externally instead of apply_grad(). This causes the
operations to be combined, which is necessary for stacking variables
see mtf.rewrite_stack_variables().
Args:
grads: a list of Tensor
variables: a list of Variables
Returns:
a list of Operations | [
"Apply",
"gradients",
"to",
"variables",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/optimize.py#L39-L57 | train | 222,659 |
tensorflow/mesh | mesh_tensorflow/optimize.py | AdafactorOptimizer._factored_dims | def _factored_dims(self, shape):
"""Should we use a factored second moment estimator.
Based on the shape of the variable.
If we factor the accumulator, then this function returns a list of two
mtf.Dimensions to reduce over. We always pick the two largest dimensions.
If there are not two dimensions of size >= min_dim_size_to_factor, then we
do not factor.
Args:
shape: a Shape
Returns:
either a list of 2 Dimensions or None
"""
if not self._factored or shape.ndims < 2:
return None
sorted_dims = sorted(shape.dims, key=lambda d: -d.size)
if sorted_dims[1].size < self._min_dim_size_to_factor:
return None
return sorted_dims[:2] | python | def _factored_dims(self, shape):
"""Should we use a factored second moment estimator.
Based on the shape of the variable.
If we factor the accumulator, then this function returns a list of two
mtf.Dimensions to reduce over. We always pick the two largest dimensions.
If there are not two dimensions of size >= min_dim_size_to_factor, then we
do not factor.
Args:
shape: a Shape
Returns:
either a list of 2 Dimensions or None
"""
if not self._factored or shape.ndims < 2:
return None
sorted_dims = sorted(shape.dims, key=lambda d: -d.size)
if sorted_dims[1].size < self._min_dim_size_to_factor:
return None
return sorted_dims[:2] | [
"def",
"_factored_dims",
"(",
"self",
",",
"shape",
")",
":",
"if",
"not",
"self",
".",
"_factored",
"or",
"shape",
".",
"ndims",
"<",
"2",
":",
"return",
"None",
"sorted_dims",
"=",
"sorted",
"(",
"shape",
".",
"dims",
",",
"key",
"=",
"lambda",
"d"... | Should we use a factored second moment estimator.
Based on the shape of the variable.
If we factor the accumulator, then this function returns a list of two
mtf.Dimensions to reduce over. We always pick the two largest dimensions.
If there are not two dimensions of size >= min_dim_size_to_factor, then we
do not factor.
Args:
shape: a Shape
Returns:
either a list of 2 Dimensions or None | [
"Should",
"we",
"use",
"a",
"factored",
"second",
"moment",
"estimator",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/optimize.py#L139-L158 | train | 222,660 |
tensorflow/mesh | mesh_tensorflow/auto_mtf/valid_layouts.py | LayoutValidator.is_valid_assignment | def is_valid_assignment(self, mtf_dimension_name, mesh_dimension_name):
"""Whether this MTF dimension may be assigned to this mesh dimension.
Args:
mtf_dimension_name: string, the name of a Mesh TensorFlow dimension.
mesh_dimension_name: string, the name of a mesh dimension.
Returns:
A boolean indicating whether the assignment is valid.
"""
return ((mtf_dimension_name in self._splittable_mtf_dimension_names) and
(self._mtf_dimension_name_to_size_gcd[mtf_dimension_name] %
self._mesh_dimension_name_to_size[mesh_dimension_name] == 0)) | python | def is_valid_assignment(self, mtf_dimension_name, mesh_dimension_name):
"""Whether this MTF dimension may be assigned to this mesh dimension.
Args:
mtf_dimension_name: string, the name of a Mesh TensorFlow dimension.
mesh_dimension_name: string, the name of a mesh dimension.
Returns:
A boolean indicating whether the assignment is valid.
"""
return ((mtf_dimension_name in self._splittable_mtf_dimension_names) and
(self._mtf_dimension_name_to_size_gcd[mtf_dimension_name] %
self._mesh_dimension_name_to_size[mesh_dimension_name] == 0)) | [
"def",
"is_valid_assignment",
"(",
"self",
",",
"mtf_dimension_name",
",",
"mesh_dimension_name",
")",
":",
"return",
"(",
"(",
"mtf_dimension_name",
"in",
"self",
".",
"_splittable_mtf_dimension_names",
")",
"and",
"(",
"self",
".",
"_mtf_dimension_name_to_size_gcd",
... | Whether this MTF dimension may be assigned to this mesh dimension.
Args:
mtf_dimension_name: string, the name of a Mesh TensorFlow dimension.
mesh_dimension_name: string, the name of a mesh dimension.
Returns:
A boolean indicating whether the assignment is valid. | [
"Whether",
"this",
"MTF",
"dimension",
"may",
"be",
"assigned",
"to",
"this",
"mesh",
"dimension",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/auto_mtf/valid_layouts.py#L83-L95 | train | 222,661 |
tensorflow/mesh | mesh_tensorflow/auto_mtf/valid_layouts.py | LayoutValidator._initialize_splittable_dimensions | def _initialize_splittable_dimensions(self, mtf_graph):
"""Initializer for self._splittable_mtf_dimension_names.
Args:
mtf_graph: an mtf.Graph.
Returns:
A set(string) of the names of Mesh TensorFlow dimensions that may be
assigned in a layout.
"""
all_mtf_dimension_names = set() # set(string)
for mtf_operation in mtf_graph.operations:
for mtf_tensor in mtf_operation.outputs:
for mtf_dimension in mtf_tensor.shape.dims:
if not re.match(r"_anonymous_\d*", mtf_dimension.name):
all_mtf_dimension_names.add(mtf_dimension.name)
unsplittable_mtf_dimension_names = set() # set(string)
for mtf_operation in mtf_graph.operations:
unsplittable_mtf_dimension_names.update(mtf_operation.unsplittable_dims)
return all_mtf_dimension_names - unsplittable_mtf_dimension_names | python | def _initialize_splittable_dimensions(self, mtf_graph):
"""Initializer for self._splittable_mtf_dimension_names.
Args:
mtf_graph: an mtf.Graph.
Returns:
A set(string) of the names of Mesh TensorFlow dimensions that may be
assigned in a layout.
"""
all_mtf_dimension_names = set() # set(string)
for mtf_operation in mtf_graph.operations:
for mtf_tensor in mtf_operation.outputs:
for mtf_dimension in mtf_tensor.shape.dims:
if not re.match(r"_anonymous_\d*", mtf_dimension.name):
all_mtf_dimension_names.add(mtf_dimension.name)
unsplittable_mtf_dimension_names = set() # set(string)
for mtf_operation in mtf_graph.operations:
unsplittable_mtf_dimension_names.update(mtf_operation.unsplittable_dims)
return all_mtf_dimension_names - unsplittable_mtf_dimension_names | [
"def",
"_initialize_splittable_dimensions",
"(",
"self",
",",
"mtf_graph",
")",
":",
"all_mtf_dimension_names",
"=",
"set",
"(",
")",
"# set(string)",
"for",
"mtf_operation",
"in",
"mtf_graph",
".",
"operations",
":",
"for",
"mtf_tensor",
"in",
"mtf_operation",
".",... | Initializer for self._splittable_mtf_dimension_names.
Args:
mtf_graph: an mtf.Graph.
Returns:
A set(string) of the names of Mesh TensorFlow dimensions that may be
assigned in a layout. | [
"Initializer",
"for",
"self",
".",
"_splittable_mtf_dimension_names",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/auto_mtf/valid_layouts.py#L97-L118 | train | 222,662 |
tensorflow/mesh | mesh_tensorflow/auto_mtf/valid_layouts.py | LayoutValidator._initialize_mtf_dimension_name_to_size_gcd | def _initialize_mtf_dimension_name_to_size_gcd(self, mtf_graph):
"""Initializer for self._mtf_dimension_name_to_size_gcd.
Args:
mtf_graph: an mtf.Graph.
Returns:
A {string: int}, mapping the name of an MTF dimension to the greatest
common divisor of all the sizes it has. All these sizes being evenly
divisible by some x is equivalent to the GCD being divisible by x.
"""
mtf_dimension_name_to_size_gcd = {}
for mtf_operation in mtf_graph.operations:
for mtf_tensor in mtf_operation.outputs:
for mtf_dimension in mtf_tensor.shape.dims:
mtf_dimension_name_to_size_gcd[mtf_dimension.name] = fractions.gcd(
mtf_dimension_name_to_size_gcd.get(mtf_dimension.name,
mtf_dimension.size),
mtf_dimension.size)
return mtf_dimension_name_to_size_gcd | python | def _initialize_mtf_dimension_name_to_size_gcd(self, mtf_graph):
"""Initializer for self._mtf_dimension_name_to_size_gcd.
Args:
mtf_graph: an mtf.Graph.
Returns:
A {string: int}, mapping the name of an MTF dimension to the greatest
common divisor of all the sizes it has. All these sizes being evenly
divisible by some x is equivalent to the GCD being divisible by x.
"""
mtf_dimension_name_to_size_gcd = {}
for mtf_operation in mtf_graph.operations:
for mtf_tensor in mtf_operation.outputs:
for mtf_dimension in mtf_tensor.shape.dims:
mtf_dimension_name_to_size_gcd[mtf_dimension.name] = fractions.gcd(
mtf_dimension_name_to_size_gcd.get(mtf_dimension.name,
mtf_dimension.size),
mtf_dimension.size)
return mtf_dimension_name_to_size_gcd | [
"def",
"_initialize_mtf_dimension_name_to_size_gcd",
"(",
"self",
",",
"mtf_graph",
")",
":",
"mtf_dimension_name_to_size_gcd",
"=",
"{",
"}",
"for",
"mtf_operation",
"in",
"mtf_graph",
".",
"operations",
":",
"for",
"mtf_tensor",
"in",
"mtf_operation",
".",
"outputs"... | Initializer for self._mtf_dimension_name_to_size_gcd.
Args:
mtf_graph: an mtf.Graph.
Returns:
A {string: int}, mapping the name of an MTF dimension to the greatest
common divisor of all the sizes it has. All these sizes being evenly
divisible by some x is equivalent to the GCD being divisible by x. | [
"Initializer",
"for",
"self",
".",
"_mtf_dimension_name_to_size_gcd",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/auto_mtf/valid_layouts.py#L120-L140 | train | 222,663 |
tensorflow/mesh | mesh_tensorflow/auto_mtf/valid_layouts.py | LayoutValidator._initialize_mesh_dimension_name_to_size | def _initialize_mesh_dimension_name_to_size(self, mesh_shape):
"""Initializer for self._mesh_dimension_name_to_size.
Args:
mesh_shape: an mtf.Shape.
Returns:
A {string: int} mapping mesh dimension names to their sizes.
"""
mesh_dimension_name_to_size = {} # {string: int}
for mesh_dimension in mesh_shape.dims:
mesh_dimension_name_to_size[mesh_dimension.name] = mesh_dimension.size
return mesh_dimension_name_to_size | python | def _initialize_mesh_dimension_name_to_size(self, mesh_shape):
"""Initializer for self._mesh_dimension_name_to_size.
Args:
mesh_shape: an mtf.Shape.
Returns:
A {string: int} mapping mesh dimension names to their sizes.
"""
mesh_dimension_name_to_size = {} # {string: int}
for mesh_dimension in mesh_shape.dims:
mesh_dimension_name_to_size[mesh_dimension.name] = mesh_dimension.size
return mesh_dimension_name_to_size | [
"def",
"_initialize_mesh_dimension_name_to_size",
"(",
"self",
",",
"mesh_shape",
")",
":",
"mesh_dimension_name_to_size",
"=",
"{",
"}",
"# {string: int}",
"for",
"mesh_dimension",
"in",
"mesh_shape",
".",
"dims",
":",
"mesh_dimension_name_to_size",
"[",
"mesh_dimension"... | Initializer for self._mesh_dimension_name_to_size.
Args:
mesh_shape: an mtf.Shape.
Returns:
A {string: int} mapping mesh dimension names to their sizes. | [
"Initializer",
"for",
"self",
".",
"_mesh_dimension_name_to_size",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/auto_mtf/valid_layouts.py#L142-L154 | train | 222,664 |
tensorflow/mesh | mesh_tensorflow/placement_mesh_impl.py | allconcat_ring | def allconcat_ring(xs, devices, concat_axis):
"""Concatenate all Tensors everywhere.
Performance-optimized for a ring of devices.
Args:
xs: a list of n tf.Tensors
devices: a list of n strings
concat_axis: an integer
Returns:
a list of n Tensors
"""
n = len(xs)
if n == 1:
return xs
# [target, source]
parts = [[xs[target] if target == source else None for source in xrange(n)]
for target in xrange(n)]
for distance in xrange(1, n // 2 + 1):
for target in xrange(n):
source = (target + distance) % n
if parts[target][source] is None:
with tf.device(devices[target]):
parts[target][source] = tf.identity(parts[(target + 1) % n][source])
source = (target - distance) % n
if parts[target][source] is None:
with tf.device(devices[target]):
parts[target][source] = tf.identity(parts[(target - 1) % n][source])
return mtf.parallel(devices, tf.concat, parts, axis=[concat_axis] * n) | python | def allconcat_ring(xs, devices, concat_axis):
"""Concatenate all Tensors everywhere.
Performance-optimized for a ring of devices.
Args:
xs: a list of n tf.Tensors
devices: a list of n strings
concat_axis: an integer
Returns:
a list of n Tensors
"""
n = len(xs)
if n == 1:
return xs
# [target, source]
parts = [[xs[target] if target == source else None for source in xrange(n)]
for target in xrange(n)]
for distance in xrange(1, n // 2 + 1):
for target in xrange(n):
source = (target + distance) % n
if parts[target][source] is None:
with tf.device(devices[target]):
parts[target][source] = tf.identity(parts[(target + 1) % n][source])
source = (target - distance) % n
if parts[target][source] is None:
with tf.device(devices[target]):
parts[target][source] = tf.identity(parts[(target - 1) % n][source])
return mtf.parallel(devices, tf.concat, parts, axis=[concat_axis] * n) | [
"def",
"allconcat_ring",
"(",
"xs",
",",
"devices",
",",
"concat_axis",
")",
":",
"n",
"=",
"len",
"(",
"xs",
")",
"if",
"n",
"==",
"1",
":",
"return",
"xs",
"# [target, source]",
"parts",
"=",
"[",
"[",
"xs",
"[",
"target",
"]",
"if",
"target",
"=... | Concatenate all Tensors everywhere.
Performance-optimized for a ring of devices.
Args:
xs: a list of n tf.Tensors
devices: a list of n strings
concat_axis: an integer
Returns:
a list of n Tensors | [
"Concatenate",
"all",
"Tensors",
"everywhere",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/placement_mesh_impl.py#L462-L491 | train | 222,665 |
tensorflow/mesh | mesh_tensorflow/placement_mesh_impl.py | PlacementMeshImpl.Print | def Print(self, x, data, message, **kwargs): # pylint: disable=invalid-name
"""call tf.Print.
Args:
x: a LaidOutTensor
data: a list of LaidOutTensor
message: a string
**kwargs: keyword arguments to tf.print
Returns:
a LaidOutTensor
"""
tf.logging.info("PlacementMeshImpl::Print")
new_slices = x.tensor_list[:]
with tf.device(self._devices[0]):
new_slices[0] = tf.Print(
new_slices[0], [t for d in data for t in d.tensor_list],
message, **kwargs)
return self.LaidOutTensor(new_slices) | python | def Print(self, x, data, message, **kwargs): # pylint: disable=invalid-name
"""call tf.Print.
Args:
x: a LaidOutTensor
data: a list of LaidOutTensor
message: a string
**kwargs: keyword arguments to tf.print
Returns:
a LaidOutTensor
"""
tf.logging.info("PlacementMeshImpl::Print")
new_slices = x.tensor_list[:]
with tf.device(self._devices[0]):
new_slices[0] = tf.Print(
new_slices[0], [t for d in data for t in d.tensor_list],
message, **kwargs)
return self.LaidOutTensor(new_slices) | [
"def",
"Print",
"(",
"self",
",",
"x",
",",
"data",
",",
"message",
",",
"*",
"*",
"kwargs",
")",
":",
"# pylint: disable=invalid-name",
"tf",
".",
"logging",
".",
"info",
"(",
"\"PlacementMeshImpl::Print\"",
")",
"new_slices",
"=",
"x",
".",
"tensor_list",
... | call tf.Print.
Args:
x: a LaidOutTensor
data: a list of LaidOutTensor
message: a string
**kwargs: keyword arguments to tf.print
Returns:
a LaidOutTensor | [
"call",
"tf",
".",
"Print",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/placement_mesh_impl.py#L185-L202 | train | 222,666 |
tensorflow/mesh | mesh_tensorflow/placement_mesh_impl.py | PlacementMeshImpl.alltoall | def alltoall(self, x, mesh_axis, split_axis, concat_axis):
"""Grouped alltoall.
Args:
x: a LaidOutTensor
mesh_axis: an integer the mesh axis along which to group
split_axis: an integer (the Tensor axis along which to split)
concat_axis: an integer (the Tensor axis along which to concatenate)
Returns:
a LaidOutTensor
"""
return self._collective_with_groups(
x, [mesh_axis],
functools.partial(
alltoall_ring, split_axis=split_axis, concat_axis=concat_axis)) | python | def alltoall(self, x, mesh_axis, split_axis, concat_axis):
"""Grouped alltoall.
Args:
x: a LaidOutTensor
mesh_axis: an integer the mesh axis along which to group
split_axis: an integer (the Tensor axis along which to split)
concat_axis: an integer (the Tensor axis along which to concatenate)
Returns:
a LaidOutTensor
"""
return self._collective_with_groups(
x, [mesh_axis],
functools.partial(
alltoall_ring, split_axis=split_axis, concat_axis=concat_axis)) | [
"def",
"alltoall",
"(",
"self",
",",
"x",
",",
"mesh_axis",
",",
"split_axis",
",",
"concat_axis",
")",
":",
"return",
"self",
".",
"_collective_with_groups",
"(",
"x",
",",
"[",
"mesh_axis",
"]",
",",
"functools",
".",
"partial",
"(",
"alltoall_ring",
","... | Grouped alltoall.
Args:
x: a LaidOutTensor
mesh_axis: an integer the mesh axis along which to group
split_axis: an integer (the Tensor axis along which to split)
concat_axis: an integer (the Tensor axis along which to concatenate)
Returns:
a LaidOutTensor | [
"Grouped",
"alltoall",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/placement_mesh_impl.py#L232-L246 | train | 222,667 |
tensorflow/mesh | mesh_tensorflow/placement_mesh_impl.py | PlacementMeshImpl.import_tf_tensor | def import_tf_tensor(self, x, tf_x):
"""Import a tf.Tensor, producing a LaidOutTensor.
Args:
x: a Tensor
tf_x: a tf.Tensor
Returns:
a LaidOutTensor
"""
return self.LaidOutTensor(self.make_slices(tf_x, x.shape)) | python | def import_tf_tensor(self, x, tf_x):
"""Import a tf.Tensor, producing a LaidOutTensor.
Args:
x: a Tensor
tf_x: a tf.Tensor
Returns:
a LaidOutTensor
"""
return self.LaidOutTensor(self.make_slices(tf_x, x.shape)) | [
"def",
"import_tf_tensor",
"(",
"self",
",",
"x",
",",
"tf_x",
")",
":",
"return",
"self",
".",
"LaidOutTensor",
"(",
"self",
".",
"make_slices",
"(",
"tf_x",
",",
"x",
".",
"shape",
")",
")"
] | Import a tf.Tensor, producing a LaidOutTensor.
Args:
x: a Tensor
tf_x: a tf.Tensor
Returns:
a LaidOutTensor | [
"Import",
"a",
"tf",
".",
"Tensor",
"producing",
"a",
"LaidOutTensor",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/placement_mesh_impl.py#L350-L359 | train | 222,668 |
tensorflow/mesh | mesh_tensorflow/transformer/attention.py | attention | def attention(q,
k,
v,
memory_length_dim,
key_dim,
value_dim,
mask=None,
dropout_rate=0.0,
dropout_broadcast_dims=None,
extra_logit=None):
"""Dot-product attention - doesn't use positional dimensions.
key_dim is a Dimension representing the channels in the queries and keys
value_dim is a Dimension representing the channels in values
memory_length_dim is a Dimension representing the different key/value pairs.
Dimensions of q: other_query_dims + {key_dim}
Dimensions of k: other_memory_dims + {memory_length_dim, key_dim}
Dimensions of v: other_memory_dims + {memory_length_dim, value_dim}
other_memory_dims is a subset of other_query_dims
Typically, other_query_dims={batch, heads, length}
Typically, other_memory_dims={batch, heads}
Args:
q: a Tensor
k: a Tensor
v: a Tensor
memory_length_dim: a Dimension
key_dim: a Dimension
value_dim: a Dimension
mask: mask Tensor (see attention_mask())
dropout_rate: a float.
dropout_broadcast_dims: an optional list of mtf.Dimension
extra_logit: an optional scalar or tensor
Returns:
Tensor with shape q.shape - key_dim + value_dim
"""
logits = mtf.einsum([q, k], reduced_dims=[key_dim])
if mask is not None:
logits += mask
weights = mtf.softmax(logits, memory_length_dim, extra_logit=extra_logit)
if dropout_rate != 0.0:
weights = mtf.dropout(
weights, 1.0 - dropout_rate,
noise_shape=weights.shape - dropout_broadcast_dims)
outputs_shape = q.shape - key_dim + value_dim
outputs = mtf.einsum([weights, v], outputs_shape)
return outputs | python | def attention(q,
k,
v,
memory_length_dim,
key_dim,
value_dim,
mask=None,
dropout_rate=0.0,
dropout_broadcast_dims=None,
extra_logit=None):
"""Dot-product attention - doesn't use positional dimensions.
key_dim is a Dimension representing the channels in the queries and keys
value_dim is a Dimension representing the channels in values
memory_length_dim is a Dimension representing the different key/value pairs.
Dimensions of q: other_query_dims + {key_dim}
Dimensions of k: other_memory_dims + {memory_length_dim, key_dim}
Dimensions of v: other_memory_dims + {memory_length_dim, value_dim}
other_memory_dims is a subset of other_query_dims
Typically, other_query_dims={batch, heads, length}
Typically, other_memory_dims={batch, heads}
Args:
q: a Tensor
k: a Tensor
v: a Tensor
memory_length_dim: a Dimension
key_dim: a Dimension
value_dim: a Dimension
mask: mask Tensor (see attention_mask())
dropout_rate: a float.
dropout_broadcast_dims: an optional list of mtf.Dimension
extra_logit: an optional scalar or tensor
Returns:
Tensor with shape q.shape - key_dim + value_dim
"""
logits = mtf.einsum([q, k], reduced_dims=[key_dim])
if mask is not None:
logits += mask
weights = mtf.softmax(logits, memory_length_dim, extra_logit=extra_logit)
if dropout_rate != 0.0:
weights = mtf.dropout(
weights, 1.0 - dropout_rate,
noise_shape=weights.shape - dropout_broadcast_dims)
outputs_shape = q.shape - key_dim + value_dim
outputs = mtf.einsum([weights, v], outputs_shape)
return outputs | [
"def",
"attention",
"(",
"q",
",",
"k",
",",
"v",
",",
"memory_length_dim",
",",
"key_dim",
",",
"value_dim",
",",
"mask",
"=",
"None",
",",
"dropout_rate",
"=",
"0.0",
",",
"dropout_broadcast_dims",
"=",
"None",
",",
"extra_logit",
"=",
"None",
")",
":"... | Dot-product attention - doesn't use positional dimensions.
key_dim is a Dimension representing the channels in the queries and keys
value_dim is a Dimension representing the channels in values
memory_length_dim is a Dimension representing the different key/value pairs.
Dimensions of q: other_query_dims + {key_dim}
Dimensions of k: other_memory_dims + {memory_length_dim, key_dim}
Dimensions of v: other_memory_dims + {memory_length_dim, value_dim}
other_memory_dims is a subset of other_query_dims
Typically, other_query_dims={batch, heads, length}
Typically, other_memory_dims={batch, heads}
Args:
q: a Tensor
k: a Tensor
v: a Tensor
memory_length_dim: a Dimension
key_dim: a Dimension
value_dim: a Dimension
mask: mask Tensor (see attention_mask())
dropout_rate: a float.
dropout_broadcast_dims: an optional list of mtf.Dimension
extra_logit: an optional scalar or tensor
Returns:
Tensor with shape q.shape - key_dim + value_dim | [
"Dot",
"-",
"product",
"attention",
"-",
"doesn",
"t",
"use",
"positional",
"dimensions",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/transformer/attention.py#L27-L76 | train | 222,669 |
tensorflow/mesh | mesh_tensorflow/transformer/attention.py | attention_params_simple | def attention_params_simple(
mesh, io_dim, kv_dim, heads_dim, variable_dtype):
"""Common case attention parameters.
Args:
mesh: a Mesh
io_dim: a Dimension (channels dimension of inputs and outputs)
kv_dim: a Dimension (channels in keys and values)
heads_dim: a Dimension (number of attention "heads")
variable_dtype: a mtf.VariableDType
Returns:
an AttentionParams
"""
return AttentionParams(
mesh,
query_input_dim=io_dim,
memory_input_dim=io_dim,
output_dim=io_dim,
key_dim=kv_dim,
value_dim=kv_dim,
query_heads_dims=[heads_dim],
memory_heads_dims=[heads_dim],
variable_dtype=variable_dtype) | python | def attention_params_simple(
mesh, io_dim, kv_dim, heads_dim, variable_dtype):
"""Common case attention parameters.
Args:
mesh: a Mesh
io_dim: a Dimension (channels dimension of inputs and outputs)
kv_dim: a Dimension (channels in keys and values)
heads_dim: a Dimension (number of attention "heads")
variable_dtype: a mtf.VariableDType
Returns:
an AttentionParams
"""
return AttentionParams(
mesh,
query_input_dim=io_dim,
memory_input_dim=io_dim,
output_dim=io_dim,
key_dim=kv_dim,
value_dim=kv_dim,
query_heads_dims=[heads_dim],
memory_heads_dims=[heads_dim],
variable_dtype=variable_dtype) | [
"def",
"attention_params_simple",
"(",
"mesh",
",",
"io_dim",
",",
"kv_dim",
",",
"heads_dim",
",",
"variable_dtype",
")",
":",
"return",
"AttentionParams",
"(",
"mesh",
",",
"query_input_dim",
"=",
"io_dim",
",",
"memory_input_dim",
"=",
"io_dim",
",",
"output_... | Common case attention parameters.
Args:
mesh: a Mesh
io_dim: a Dimension (channels dimension of inputs and outputs)
kv_dim: a Dimension (channels in keys and values)
heads_dim: a Dimension (number of attention "heads")
variable_dtype: a mtf.VariableDType
Returns:
an AttentionParams | [
"Common",
"case",
"attention",
"parameters",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/transformer/attention.py#L264-L286 | train | 222,670 |
tensorflow/mesh | mesh_tensorflow/transformer/attention.py | local_attention_1d | def local_attention_1d(q,
k,
v,
length_dim,
key_dim,
value_dim,
autoregressive=True,
length_dim_num_splits=1,
radius=128,
sequence_id=1,
attention_kwargs=None):
"""Attention to the a neighborood around the source.
If autoregressive, then query position p can only see memory positions
in the range (p - radius, p].
If not autoregressive, then query position p can only see memory positions
in the range (p - window_size, p + radius].
Args:
q: a Tensor containing length_dim
k: a Tensor containing length_dim
v: an optional Tensor containing length_dim. If none then uses v=k.
length_dim: a Dimension
key_dim: a Dimension (the channels dimension of q and k)
value_dim: a Dimension (the channels dimension of v)
autoregressive: a boolean
length_dim_num_splits: an optional integer indicating how many ways the
length dimension is split
radius: an integer
sequence_id: a Tensor or an integer
attention_kwargs: optional keyword arguments for attention()
Returns:
a Tensor with the shape x.shape - key_dim + value_dim
Raises:
ValueError: if channels or depth don't match.
"""
# Choose a suitable block size.
# We choose the greatest divisor of length_per_split less than or equal
# to max(window_size, 128)
length_per_split = length_dim.size // length_dim_num_splits
block_length = max(radius, 128)
while length_per_split % block_length != 0:
block_length -= 1
query_block_length = mtf.Dimension("query_block_length", block_length)
memory_block_length = mtf.Dimension("memory_block_length", block_length)
# The num_blocks dimension gets the same name as the length dimension,
# so it will be split in the same way.
num_blocks = mtf.Dimension(length_dim.name, length_dim.size // block_length)
def _reshape_query(x):
return mtf.replace_dimensions(
x, length_dim, [num_blocks, query_block_length])
def _reshape_memory(x):
x = mtf.replace_dimensions(
x, length_dim, [num_blocks, memory_block_length])
return (mtf.left_halo_exchange if autoregressive else mtf.halo_exchange)(
x, num_blocks, memory_block_length, radius)
q = _reshape_query(q)
k = _reshape_memory(k)
if v:
v = _reshape_memory(v)
else:
v = k
if sequence_id is None:
sequence_id = 1
if (not isinstance(sequence_id, mtf.Tensor) or
length_dim not in sequence_id.shape.dims):
sequence_id += mtf.zeros(q.mesh, [length_dim], tf.int32)
q_sequence_id = _reshape_query(sequence_id)
m_sequence_id = _reshape_memory(sequence_id)
pos = mtf.range(q.mesh, length_dim, dtype=tf.int32)
q_pos = _reshape_query(pos)
m_pos = _reshape_memory(pos)
padded_memory_block_length = mtf.Dimension(
"memory_block_length",
(1 if autoregressive else 2) * radius + block_length)
relative_position = m_pos - q_pos
illegal = mtf.not_equal(q_sequence_id, m_sequence_id)
illegal = mtf.logical_or(illegal, mtf.less_equal(relative_position, -radius))
illegal = mtf.logical_or(illegal, mtf.greater(
relative_position, 0 if autoregressive else radius))
mask = mtf.cast(illegal, q.dtype) * -1e9
o = attention(q, k, v, padded_memory_block_length,
key_dim, value_dim, mask, **attention_kwargs)
return mtf.replace_dimensions(o, [num_blocks, query_block_length], length_dim) | python | def local_attention_1d(q,
k,
v,
length_dim,
key_dim,
value_dim,
autoregressive=True,
length_dim_num_splits=1,
radius=128,
sequence_id=1,
attention_kwargs=None):
"""Attention to the a neighborood around the source.
If autoregressive, then query position p can only see memory positions
in the range (p - radius, p].
If not autoregressive, then query position p can only see memory positions
in the range (p - window_size, p + radius].
Args:
q: a Tensor containing length_dim
k: a Tensor containing length_dim
v: an optional Tensor containing length_dim. If none then uses v=k.
length_dim: a Dimension
key_dim: a Dimension (the channels dimension of q and k)
value_dim: a Dimension (the channels dimension of v)
autoregressive: a boolean
length_dim_num_splits: an optional integer indicating how many ways the
length dimension is split
radius: an integer
sequence_id: a Tensor or an integer
attention_kwargs: optional keyword arguments for attention()
Returns:
a Tensor with the shape x.shape - key_dim + value_dim
Raises:
ValueError: if channels or depth don't match.
"""
# Choose a suitable block size.
# We choose the greatest divisor of length_per_split less than or equal
# to max(window_size, 128)
length_per_split = length_dim.size // length_dim_num_splits
block_length = max(radius, 128)
while length_per_split % block_length != 0:
block_length -= 1
query_block_length = mtf.Dimension("query_block_length", block_length)
memory_block_length = mtf.Dimension("memory_block_length", block_length)
# The num_blocks dimension gets the same name as the length dimension,
# so it will be split in the same way.
num_blocks = mtf.Dimension(length_dim.name, length_dim.size // block_length)
def _reshape_query(x):
return mtf.replace_dimensions(
x, length_dim, [num_blocks, query_block_length])
def _reshape_memory(x):
x = mtf.replace_dimensions(
x, length_dim, [num_blocks, memory_block_length])
return (mtf.left_halo_exchange if autoregressive else mtf.halo_exchange)(
x, num_blocks, memory_block_length, radius)
q = _reshape_query(q)
k = _reshape_memory(k)
if v:
v = _reshape_memory(v)
else:
v = k
if sequence_id is None:
sequence_id = 1
if (not isinstance(sequence_id, mtf.Tensor) or
length_dim not in sequence_id.shape.dims):
sequence_id += mtf.zeros(q.mesh, [length_dim], tf.int32)
q_sequence_id = _reshape_query(sequence_id)
m_sequence_id = _reshape_memory(sequence_id)
pos = mtf.range(q.mesh, length_dim, dtype=tf.int32)
q_pos = _reshape_query(pos)
m_pos = _reshape_memory(pos)
padded_memory_block_length = mtf.Dimension(
"memory_block_length",
(1 if autoregressive else 2) * radius + block_length)
relative_position = m_pos - q_pos
illegal = mtf.not_equal(q_sequence_id, m_sequence_id)
illegal = mtf.logical_or(illegal, mtf.less_equal(relative_position, -radius))
illegal = mtf.logical_or(illegal, mtf.greater(
relative_position, 0 if autoregressive else radius))
mask = mtf.cast(illegal, q.dtype) * -1e9
o = attention(q, k, v, padded_memory_block_length,
key_dim, value_dim, mask, **attention_kwargs)
return mtf.replace_dimensions(o, [num_blocks, query_block_length], length_dim) | [
"def",
"local_attention_1d",
"(",
"q",
",",
"k",
",",
"v",
",",
"length_dim",
",",
"key_dim",
",",
"value_dim",
",",
"autoregressive",
"=",
"True",
",",
"length_dim_num_splits",
"=",
"1",
",",
"radius",
"=",
"128",
",",
"sequence_id",
"=",
"1",
",",
"att... | Attention to the a neighborood around the source.
If autoregressive, then query position p can only see memory positions
in the range (p - radius, p].
If not autoregressive, then query position p can only see memory positions
in the range (p - window_size, p + radius].
Args:
q: a Tensor containing length_dim
k: a Tensor containing length_dim
v: an optional Tensor containing length_dim. If none then uses v=k.
length_dim: a Dimension
key_dim: a Dimension (the channels dimension of q and k)
value_dim: a Dimension (the channels dimension of v)
autoregressive: a boolean
length_dim_num_splits: an optional integer indicating how many ways the
length dimension is split
radius: an integer
sequence_id: a Tensor or an integer
attention_kwargs: optional keyword arguments for attention()
Returns:
a Tensor with the shape x.shape - key_dim + value_dim
Raises:
ValueError: if channels or depth don't match. | [
"Attention",
"to",
"the",
"a",
"neighborood",
"around",
"the",
"source",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/transformer/attention.py#L289-L377 | train | 222,671 |
tensorflow/mesh | mesh_tensorflow/transformer/attention.py | AttentionParams.compute_q | def compute_q(self, query_antecedent):
"""Compute query Tensor q.
Args:
query_antecedent: a Tensor with dimensions
{query_input_dim} + other_dims
Returns:
a Tensor with dimensions
query_heads_dims + {key_dim} + other_dims
"""
ret = mtf.einsum(
[query_antecedent, self.wq], reduced_dims=[self.query_input_dim])
if self.combine_dims:
ret = mtf.replace_dimensions(ret, ret.shape.dims[-1], self.q_dims)
return ret | python | def compute_q(self, query_antecedent):
"""Compute query Tensor q.
Args:
query_antecedent: a Tensor with dimensions
{query_input_dim} + other_dims
Returns:
a Tensor with dimensions
query_heads_dims + {key_dim} + other_dims
"""
ret = mtf.einsum(
[query_antecedent, self.wq], reduced_dims=[self.query_input_dim])
if self.combine_dims:
ret = mtf.replace_dimensions(ret, ret.shape.dims[-1], self.q_dims)
return ret | [
"def",
"compute_q",
"(",
"self",
",",
"query_antecedent",
")",
":",
"ret",
"=",
"mtf",
".",
"einsum",
"(",
"[",
"query_antecedent",
",",
"self",
".",
"wq",
"]",
",",
"reduced_dims",
"=",
"[",
"self",
".",
"query_input_dim",
"]",
")",
"if",
"self",
".",... | Compute query Tensor q.
Args:
query_antecedent: a Tensor with dimensions
{query_input_dim} + other_dims
Returns:
a Tensor with dimensions
query_heads_dims + {key_dim} + other_dims | [
"Compute",
"query",
"Tensor",
"q",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/transformer/attention.py#L153-L167 | train | 222,672 |
tensorflow/mesh | mesh_tensorflow/transformer/attention.py | AttentionParams.compute_k | def compute_k(self, memory_antecedent):
"""Compute key Tensor k.
Args:
memory_antecedent: a Tensor with dimensions
{memory_input_dim} + other_dims
Returns:
a Tensor with dimensions
memory_heads_dims + {key_dim} + other_dims
"""
if self.shared_kv:
raise ValueError("compute_k cannot be called with shared_kv")
ret = mtf.einsum(
[memory_antecedent, self.wk], reduced_dims=[self.memory_input_dim])
if self.combine_dims:
ret = mtf.replace_dimensions(ret, ret.shape.dims[-1], self.k_dims)
return ret | python | def compute_k(self, memory_antecedent):
"""Compute key Tensor k.
Args:
memory_antecedent: a Tensor with dimensions
{memory_input_dim} + other_dims
Returns:
a Tensor with dimensions
memory_heads_dims + {key_dim} + other_dims
"""
if self.shared_kv:
raise ValueError("compute_k cannot be called with shared_kv")
ret = mtf.einsum(
[memory_antecedent, self.wk], reduced_dims=[self.memory_input_dim])
if self.combine_dims:
ret = mtf.replace_dimensions(ret, ret.shape.dims[-1], self.k_dims)
return ret | [
"def",
"compute_k",
"(",
"self",
",",
"memory_antecedent",
")",
":",
"if",
"self",
".",
"shared_kv",
":",
"raise",
"ValueError",
"(",
"\"compute_k cannot be called with shared_kv\"",
")",
"ret",
"=",
"mtf",
".",
"einsum",
"(",
"[",
"memory_antecedent",
",",
"sel... | Compute key Tensor k.
Args:
memory_antecedent: a Tensor with dimensions
{memory_input_dim} + other_dims
Returns:
a Tensor with dimensions
memory_heads_dims + {key_dim} + other_dims | [
"Compute",
"key",
"Tensor",
"k",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/transformer/attention.py#L187-L203 | train | 222,673 |
tensorflow/mesh | mesh_tensorflow/transformer/attention.py | AttentionParams.compute_v | def compute_v(self, memory_antecedent):
"""Compute value Tensor v.
Args:
memory_antecedent: a Tensor with dimensions
{memory_input_dim} + other_dims
Returns:
a Tensor with dimensions
memory_heads_dims + {value_dim} + other_dims
"""
if self.shared_kv:
raise ValueError("compute_v cannot be called with shared_kv")
ret = mtf.einsum(
[memory_antecedent, self.wv], reduced_dims=[self.memory_input_dim])
if self.combine_dims:
ret = mtf.replace_dimensions(ret, ret.shape.dims[-1], self.v_dims)
return ret | python | def compute_v(self, memory_antecedent):
"""Compute value Tensor v.
Args:
memory_antecedent: a Tensor with dimensions
{memory_input_dim} + other_dims
Returns:
a Tensor with dimensions
memory_heads_dims + {value_dim} + other_dims
"""
if self.shared_kv:
raise ValueError("compute_v cannot be called with shared_kv")
ret = mtf.einsum(
[memory_antecedent, self.wv], reduced_dims=[self.memory_input_dim])
if self.combine_dims:
ret = mtf.replace_dimensions(ret, ret.shape.dims[-1], self.v_dims)
return ret | [
"def",
"compute_v",
"(",
"self",
",",
"memory_antecedent",
")",
":",
"if",
"self",
".",
"shared_kv",
":",
"raise",
"ValueError",
"(",
"\"compute_v cannot be called with shared_kv\"",
")",
"ret",
"=",
"mtf",
".",
"einsum",
"(",
"[",
"memory_antecedent",
",",
"sel... | Compute value Tensor v.
Args:
memory_antecedent: a Tensor with dimensions
{memory_input_dim} + other_dims
Returns:
a Tensor with dimensions
memory_heads_dims + {value_dim} + other_dims | [
"Compute",
"value",
"Tensor",
"v",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/transformer/attention.py#L205-L221 | train | 222,674 |
tensorflow/mesh | mesh_tensorflow/transformer/attention.py | AttentionParams.compute_output | def compute_output(self, o, output_shape=None):
"""Compute output of multihead attention.
Args:
o: a Tensor with dimensions
query_heads_dims + {value_dim} + other_dims
output_shape: an optional Shape
Returns:
a Tensor with shape:
{output_dim} + other_dims
"""
if self.combine_dims:
o = mtf.transpose(o, o.shape - self.o_dims + self.o_dims)
o = mtf.replace_dimensions(o, self.o_dims, self.wo.shape.dims[0])
reduced_dims = [self.wo.shape.dims[0]]
else:
reduced_dims = self.o_dims
return mtf.einsum(
[o, self.wo], output_shape=output_shape, reduced_dims=reduced_dims) | python | def compute_output(self, o, output_shape=None):
"""Compute output of multihead attention.
Args:
o: a Tensor with dimensions
query_heads_dims + {value_dim} + other_dims
output_shape: an optional Shape
Returns:
a Tensor with shape:
{output_dim} + other_dims
"""
if self.combine_dims:
o = mtf.transpose(o, o.shape - self.o_dims + self.o_dims)
o = mtf.replace_dimensions(o, self.o_dims, self.wo.shape.dims[0])
reduced_dims = [self.wo.shape.dims[0]]
else:
reduced_dims = self.o_dims
return mtf.einsum(
[o, self.wo], output_shape=output_shape, reduced_dims=reduced_dims) | [
"def",
"compute_output",
"(",
"self",
",",
"o",
",",
"output_shape",
"=",
"None",
")",
":",
"if",
"self",
".",
"combine_dims",
":",
"o",
"=",
"mtf",
".",
"transpose",
"(",
"o",
",",
"o",
".",
"shape",
"-",
"self",
".",
"o_dims",
"+",
"self",
".",
... | Compute output of multihead attention.
Args:
o: a Tensor with dimensions
query_heads_dims + {value_dim} + other_dims
output_shape: an optional Shape
Returns:
a Tensor with shape:
{output_dim} + other_dims | [
"Compute",
"output",
"of",
"multihead",
"attention",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/transformer/attention.py#L223-L241 | train | 222,675 |
tensorflow/mesh | mesh_tensorflow/transformer/t2t_vocabulary.py | T2tVocabulary.encode_tf | def encode_tf(self, s):
"""Encode a tf.Scalar string to a tf.Tensor.
This will be necessary for on-the-fly tokenization.
Args:
s: a tf.Scalar with dtype tf.string
Returns:
a 1d tf.Tensor with dtype tf.int32
"""
ids = subword_text_encoder_ops.subword_text_encoder_encode(
s, self._filepath)
# the c++ op apppends 1=EOS - drop it.
return ids[:-1] | python | def encode_tf(self, s):
"""Encode a tf.Scalar string to a tf.Tensor.
This will be necessary for on-the-fly tokenization.
Args:
s: a tf.Scalar with dtype tf.string
Returns:
a 1d tf.Tensor with dtype tf.int32
"""
ids = subword_text_encoder_ops.subword_text_encoder_encode(
s, self._filepath)
# the c++ op apppends 1=EOS - drop it.
return ids[:-1] | [
"def",
"encode_tf",
"(",
"self",
",",
"s",
")",
":",
"ids",
"=",
"subword_text_encoder_ops",
".",
"subword_text_encoder_encode",
"(",
"s",
",",
"self",
".",
"_filepath",
")",
"# the c++ op apppends 1=EOS - drop it.",
"return",
"ids",
"[",
":",
"-",
"1",
"]"
] | Encode a tf.Scalar string to a tf.Tensor.
This will be necessary for on-the-fly tokenization.
Args:
s: a tf.Scalar with dtype tf.string
Returns:
a 1d tf.Tensor with dtype tf.int32 | [
"Encode",
"a",
"tf",
".",
"Scalar",
"string",
"to",
"a",
"tf",
".",
"Tensor",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/transformer/t2t_vocabulary.py#L79-L92 | train | 222,676 |
tensorflow/mesh | mesh_tensorflow/transformer/model_builder.py | simple_layer_stack | def simple_layer_stack(include_encdec_attention,
num_layers=6,
d_ff=2048,
num_heads=8,
d_kv=128,
dropout_rate=0.1):
"""Create a layer stack.
Args:
include_encdec_attention: a boolean
num_layers: an integer
d_ff: an integer
num_heads: an integer
d_kv: an integer
dropout_rate: a float
Returns:
a LayerStack
"""
ret = []
for _ in xrange(num_layers):
ret.append(
transformer_layers.SelfAttention(
num_heads=num_heads,
key_value_size=d_kv,
attention_kwargs={"dropout_rate": dropout_rate}))
if include_encdec_attention:
ret.append(
transformer_layers.EncDecAttention(
num_heads=num_heads,
key_value_size=d_kv,
attention_kwargs={"dropout_rate": dropout_rate}))
ret.append(
transformer_layers.DenseReluDense(
hidden_size=d_ff,
dropout_rate=dropout_rate))
return transformer.LayerStack(ret) | python | def simple_layer_stack(include_encdec_attention,
num_layers=6,
d_ff=2048,
num_heads=8,
d_kv=128,
dropout_rate=0.1):
"""Create a layer stack.
Args:
include_encdec_attention: a boolean
num_layers: an integer
d_ff: an integer
num_heads: an integer
d_kv: an integer
dropout_rate: a float
Returns:
a LayerStack
"""
ret = []
for _ in xrange(num_layers):
ret.append(
transformer_layers.SelfAttention(
num_heads=num_heads,
key_value_size=d_kv,
attention_kwargs={"dropout_rate": dropout_rate}))
if include_encdec_attention:
ret.append(
transformer_layers.EncDecAttention(
num_heads=num_heads,
key_value_size=d_kv,
attention_kwargs={"dropout_rate": dropout_rate}))
ret.append(
transformer_layers.DenseReluDense(
hidden_size=d_ff,
dropout_rate=dropout_rate))
return transformer.LayerStack(ret) | [
"def",
"simple_layer_stack",
"(",
"include_encdec_attention",
",",
"num_layers",
"=",
"6",
",",
"d_ff",
"=",
"2048",
",",
"num_heads",
"=",
"8",
",",
"d_kv",
"=",
"128",
",",
"dropout_rate",
"=",
"0.1",
")",
":",
"ret",
"=",
"[",
"]",
"for",
"_",
"in",... | Create a layer stack.
Args:
include_encdec_attention: a boolean
num_layers: an integer
d_ff: an integer
num_heads: an integer
d_kv: an integer
dropout_rate: a float
Returns:
a LayerStack | [
"Create",
"a",
"layer",
"stack",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/transformer/model_builder.py#L30-L66 | train | 222,677 |
tensorflow/mesh | examples/toy_model_tpu.py | toy_model | def toy_model(features, mesh):
"""A toy model implemented by mesh tensorlfow."""
batch_dim = mtf.Dimension('batch', FLAGS.batch_size)
io_dim = mtf.Dimension('io', FLAGS.io_size)
master_dtype = tf.as_dtype(FLAGS.master_dtype)
slice_dtype = tf.as_dtype(FLAGS.slice_dtype)
activation_dtype = tf.as_dtype(FLAGS.activation_dtype)
x = mtf.import_tf_tensor(mesh, features, mtf.Shape([batch_dim, io_dim]))
x = mtf.cast(x, activation_dtype)
h = x
for lnum in xrange(1, FLAGS.num_hidden_layers + 2):
if lnum + 1 == FLAGS.num_hidden_layers + 2:
# output layer
dim = io_dim
elif lnum % 2 == 0:
dim = mtf.Dimension('hidden_even', FLAGS.hidden_size)
else:
dim = mtf.Dimension('hidden_odd', FLAGS.hidden_size)
h = mtf.layers.dense(
h, dim,
use_bias=False,
master_dtype=master_dtype,
slice_dtype=slice_dtype,
name='layer_%d' % lnum)
y = h
g = tf.train.get_global_step()
if FLAGS.step_with_nan >= 0:
# Trigger NaN in the forward pass, this is used for testing whether
# MeshTensorFlow can handle occasional NaN value.
y += mtf.import_tf_tensor(
mesh,
tf.divide(
0.0,
tf.cond(tf.equal(g, FLAGS.step_with_nan), lambda: 0., lambda: 1.)),
mtf.Shape([]))
loss = mtf.reduce_mean(mtf.square(y - x))
return y, loss | python | def toy_model(features, mesh):
"""A toy model implemented by mesh tensorlfow."""
batch_dim = mtf.Dimension('batch', FLAGS.batch_size)
io_dim = mtf.Dimension('io', FLAGS.io_size)
master_dtype = tf.as_dtype(FLAGS.master_dtype)
slice_dtype = tf.as_dtype(FLAGS.slice_dtype)
activation_dtype = tf.as_dtype(FLAGS.activation_dtype)
x = mtf.import_tf_tensor(mesh, features, mtf.Shape([batch_dim, io_dim]))
x = mtf.cast(x, activation_dtype)
h = x
for lnum in xrange(1, FLAGS.num_hidden_layers + 2):
if lnum + 1 == FLAGS.num_hidden_layers + 2:
# output layer
dim = io_dim
elif lnum % 2 == 0:
dim = mtf.Dimension('hidden_even', FLAGS.hidden_size)
else:
dim = mtf.Dimension('hidden_odd', FLAGS.hidden_size)
h = mtf.layers.dense(
h, dim,
use_bias=False,
master_dtype=master_dtype,
slice_dtype=slice_dtype,
name='layer_%d' % lnum)
y = h
g = tf.train.get_global_step()
if FLAGS.step_with_nan >= 0:
# Trigger NaN in the forward pass, this is used for testing whether
# MeshTensorFlow can handle occasional NaN value.
y += mtf.import_tf_tensor(
mesh,
tf.divide(
0.0,
tf.cond(tf.equal(g, FLAGS.step_with_nan), lambda: 0., lambda: 1.)),
mtf.Shape([]))
loss = mtf.reduce_mean(mtf.square(y - x))
return y, loss | [
"def",
"toy_model",
"(",
"features",
",",
"mesh",
")",
":",
"batch_dim",
"=",
"mtf",
".",
"Dimension",
"(",
"'batch'",
",",
"FLAGS",
".",
"batch_size",
")",
"io_dim",
"=",
"mtf",
".",
"Dimension",
"(",
"'io'",
",",
"FLAGS",
".",
"io_size",
")",
"master... | A toy model implemented by mesh tensorlfow. | [
"A",
"toy",
"model",
"implemented",
"by",
"mesh",
"tensorlfow",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/examples/toy_model_tpu.py#L103-L142 | train | 222,678 |
tensorflow/mesh | examples/toy_model_tpu.py | run_toy_model_tpu | def run_toy_model_tpu():
"""Run a toy model on TPU."""
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
FLAGS.tpu, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
iterations_per_loop = FLAGS.iterations
mesh_shape = mtf.convert_to_shape(FLAGS.mesh_shape)
config = tpu_config.RunConfig(
cluster=tpu_cluster_resolver,
model_dir=FLAGS.model_dir,
save_checkpoints_steps=None, # Disable the default saver
save_checkpoints_secs=None, # Disable the default saver
log_step_count_steps=iterations_per_loop,
save_summary_steps=iterations_per_loop,
tpu_config=tpu_config.TPUConfig(
num_shards=mesh_shape.size,
iterations_per_loop=iterations_per_loop,
num_cores_per_replica=1,
per_host_input_for_training=tpu_config.InputPipelineConfig.BROADCAST))
classifier = tpu_estimator.TPUEstimator(
use_tpu=True,
model_fn=model_fn,
config=config,
train_batch_size=FLAGS.batch_size,
eval_batch_size=FLAGS.batch_size)
current_step = estimator_lib._load_global_step_from_checkpoint_dir(FLAGS.model_dir) # pylint: disable=protected-access,line-too-long
logging.info('Current step %d', current_step)
if FLAGS.steps_per_checkpoint == 0:
classifier.train(input_fn=ToyModelInput(), max_steps=FLAGS.train_steps)
return
while current_step < FLAGS.train_steps:
next_checkpoint = min(current_step + FLAGS.steps_per_checkpoint,
FLAGS.train_steps)
classifier.train(input_fn=ToyModelInput(), max_steps=next_checkpoint)
current_step = next_checkpoint
logging.info('Starting to evaluate.')
eval_results = classifier.evaluate(
input_fn=ToyModelInput(),
steps=156) # since we have 10000 examples and batch_size = 64 per host
logging.info('Eval results: %s', eval_results) | python | def run_toy_model_tpu():
"""Run a toy model on TPU."""
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
FLAGS.tpu, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
iterations_per_loop = FLAGS.iterations
mesh_shape = mtf.convert_to_shape(FLAGS.mesh_shape)
config = tpu_config.RunConfig(
cluster=tpu_cluster_resolver,
model_dir=FLAGS.model_dir,
save_checkpoints_steps=None, # Disable the default saver
save_checkpoints_secs=None, # Disable the default saver
log_step_count_steps=iterations_per_loop,
save_summary_steps=iterations_per_loop,
tpu_config=tpu_config.TPUConfig(
num_shards=mesh_shape.size,
iterations_per_loop=iterations_per_loop,
num_cores_per_replica=1,
per_host_input_for_training=tpu_config.InputPipelineConfig.BROADCAST))
classifier = tpu_estimator.TPUEstimator(
use_tpu=True,
model_fn=model_fn,
config=config,
train_batch_size=FLAGS.batch_size,
eval_batch_size=FLAGS.batch_size)
current_step = estimator_lib._load_global_step_from_checkpoint_dir(FLAGS.model_dir) # pylint: disable=protected-access,line-too-long
logging.info('Current step %d', current_step)
if FLAGS.steps_per_checkpoint == 0:
classifier.train(input_fn=ToyModelInput(), max_steps=FLAGS.train_steps)
return
while current_step < FLAGS.train_steps:
next_checkpoint = min(current_step + FLAGS.steps_per_checkpoint,
FLAGS.train_steps)
classifier.train(input_fn=ToyModelInput(), max_steps=next_checkpoint)
current_step = next_checkpoint
logging.info('Starting to evaluate.')
eval_results = classifier.evaluate(
input_fn=ToyModelInput(),
steps=156) # since we have 10000 examples and batch_size = 64 per host
logging.info('Eval results: %s', eval_results) | [
"def",
"run_toy_model_tpu",
"(",
")",
":",
"tpu_cluster_resolver",
"=",
"tf",
".",
"contrib",
".",
"cluster_resolver",
".",
"TPUClusterResolver",
"(",
"FLAGS",
".",
"tpu",
",",
"zone",
"=",
"FLAGS",
".",
"tpu_zone",
",",
"project",
"=",
"FLAGS",
".",
"gcp_pr... | Run a toy model on TPU. | [
"Run",
"a",
"toy",
"model",
"on",
"TPU",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/examples/toy_model_tpu.py#L243-L282 | train | 222,679 |
tensorflow/mesh | examples/mnist.py | mnist_model | def mnist_model(image, labels, mesh):
"""The model.
Args:
image: tf.Tensor with shape [batch, 28*28]
labels: a tf.Tensor with shape [batch] and dtype tf.int32
mesh: a mtf.Mesh
Returns:
logits: a mtf.Tensor with shape [batch, 10]
loss: a mtf.Tensor with shape []
"""
batch_dim = mtf.Dimension("batch", FLAGS.batch_size)
row_blocks_dim = mtf.Dimension("row_blocks", 4)
col_blocks_dim = mtf.Dimension("col_blocks", 4)
rows_dim = mtf.Dimension("rows_size", 7)
cols_dim = mtf.Dimension("cols_size", 7)
classes_dim = mtf.Dimension("classes", 10)
one_channel_dim = mtf.Dimension("one_channel", 1)
x = mtf.import_tf_tensor(
mesh, tf.reshape(image, [FLAGS.batch_size, 4, 7, 4, 7, 1]),
mtf.Shape(
[batch_dim, row_blocks_dim, rows_dim,
col_blocks_dim, cols_dim, one_channel_dim]))
x = mtf.transpose(x, [
batch_dim, row_blocks_dim, col_blocks_dim,
rows_dim, cols_dim, one_channel_dim])
# add some convolutional layers to demonstrate that convolution works.
fh_dim = mtf.Dimension("fh", 9)
fw_dim = mtf.Dimension("fw", 9)
filters1_dim = mtf.Dimension("filters1", 16)
filters2_dim = mtf.Dimension("filters2", 16)
kernel1 = mtf.get_variable(
mesh, "kernel1", [fh_dim, fw_dim, one_channel_dim, filters1_dim])
kernel2 = mtf.get_variable(
mesh, "kernel2", [fh_dim, fw_dim, filters1_dim, filters2_dim])
f1 = mtf.relu(mtf.conv2d_with_blocks(
x, kernel1, strides=[1, 1, 1, 1], padding="SAME",
h_blocks_dim=row_blocks_dim, w_blocks_dim=col_blocks_dim))
f2 = mtf.relu(mtf.conv2d_with_blocks(
f1, kernel2, strides=[1, 1, 1, 1], padding="SAME",
h_blocks_dim=row_blocks_dim, w_blocks_dim=col_blocks_dim))
x = mtf.reduce_mean(f2, reduced_dim=filters2_dim)
# add some fully-connected dense layers.
hidden_dim1 = mtf.Dimension("hidden1", FLAGS.hidden_size)
hidden_dim2 = mtf.Dimension("hidden2", FLAGS.hidden_size)
h1 = mtf.layers.dense(
x, hidden_dim1,
reduced_dims=x.shape.dims[-4:],
activation=mtf.relu, name="hidden1")
h2 = mtf.layers.dense(
h1, hidden_dim2,
activation=mtf.relu, name="hidden2")
logits = mtf.layers.dense(h2, classes_dim, name="logits")
if labels is None:
loss = None
else:
labels = mtf.import_tf_tensor(
mesh, tf.reshape(labels, [FLAGS.batch_size]), mtf.Shape([batch_dim]))
loss = mtf.layers.softmax_cross_entropy_with_logits(
logits, mtf.one_hot(labels, classes_dim), classes_dim)
loss = mtf.reduce_mean(loss)
return logits, loss | python | def mnist_model(image, labels, mesh):
"""The model.
Args:
image: tf.Tensor with shape [batch, 28*28]
labels: a tf.Tensor with shape [batch] and dtype tf.int32
mesh: a mtf.Mesh
Returns:
logits: a mtf.Tensor with shape [batch, 10]
loss: a mtf.Tensor with shape []
"""
batch_dim = mtf.Dimension("batch", FLAGS.batch_size)
row_blocks_dim = mtf.Dimension("row_blocks", 4)
col_blocks_dim = mtf.Dimension("col_blocks", 4)
rows_dim = mtf.Dimension("rows_size", 7)
cols_dim = mtf.Dimension("cols_size", 7)
classes_dim = mtf.Dimension("classes", 10)
one_channel_dim = mtf.Dimension("one_channel", 1)
x = mtf.import_tf_tensor(
mesh, tf.reshape(image, [FLAGS.batch_size, 4, 7, 4, 7, 1]),
mtf.Shape(
[batch_dim, row_blocks_dim, rows_dim,
col_blocks_dim, cols_dim, one_channel_dim]))
x = mtf.transpose(x, [
batch_dim, row_blocks_dim, col_blocks_dim,
rows_dim, cols_dim, one_channel_dim])
# add some convolutional layers to demonstrate that convolution works.
fh_dim = mtf.Dimension("fh", 9)
fw_dim = mtf.Dimension("fw", 9)
filters1_dim = mtf.Dimension("filters1", 16)
filters2_dim = mtf.Dimension("filters2", 16)
kernel1 = mtf.get_variable(
mesh, "kernel1", [fh_dim, fw_dim, one_channel_dim, filters1_dim])
kernel2 = mtf.get_variable(
mesh, "kernel2", [fh_dim, fw_dim, filters1_dim, filters2_dim])
f1 = mtf.relu(mtf.conv2d_with_blocks(
x, kernel1, strides=[1, 1, 1, 1], padding="SAME",
h_blocks_dim=row_blocks_dim, w_blocks_dim=col_blocks_dim))
f2 = mtf.relu(mtf.conv2d_with_blocks(
f1, kernel2, strides=[1, 1, 1, 1], padding="SAME",
h_blocks_dim=row_blocks_dim, w_blocks_dim=col_blocks_dim))
x = mtf.reduce_mean(f2, reduced_dim=filters2_dim)
# add some fully-connected dense layers.
hidden_dim1 = mtf.Dimension("hidden1", FLAGS.hidden_size)
hidden_dim2 = mtf.Dimension("hidden2", FLAGS.hidden_size)
h1 = mtf.layers.dense(
x, hidden_dim1,
reduced_dims=x.shape.dims[-4:],
activation=mtf.relu, name="hidden1")
h2 = mtf.layers.dense(
h1, hidden_dim2,
activation=mtf.relu, name="hidden2")
logits = mtf.layers.dense(h2, classes_dim, name="logits")
if labels is None:
loss = None
else:
labels = mtf.import_tf_tensor(
mesh, tf.reshape(labels, [FLAGS.batch_size]), mtf.Shape([batch_dim]))
loss = mtf.layers.softmax_cross_entropy_with_logits(
logits, mtf.one_hot(labels, classes_dim), classes_dim)
loss = mtf.reduce_mean(loss)
return logits, loss | [
"def",
"mnist_model",
"(",
"image",
",",
"labels",
",",
"mesh",
")",
":",
"batch_dim",
"=",
"mtf",
".",
"Dimension",
"(",
"\"batch\"",
",",
"FLAGS",
".",
"batch_size",
")",
"row_blocks_dim",
"=",
"mtf",
".",
"Dimension",
"(",
"\"row_blocks\"",
",",
"4",
... | The model.
Args:
image: tf.Tensor with shape [batch, 28*28]
labels: a tf.Tensor with shape [batch] and dtype tf.int32
mesh: a mtf.Mesh
Returns:
logits: a mtf.Tensor with shape [batch, 10]
loss: a mtf.Tensor with shape [] | [
"The",
"model",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/examples/mnist.py#L50-L118 | train | 222,680 |
tensorflow/mesh | examples/mnist.py | run_mnist | def run_mnist():
"""Run MNIST training and eval loop."""
mnist_classifier = tf.estimator.Estimator(
model_fn=model_fn,
model_dir=FLAGS.model_dir)
# Set up training and evaluation input functions.
def train_input_fn():
"""Prepare data for training."""
# When choosing shuffle buffer sizes, larger sizes result in better
# randomness, while smaller sizes use less memory. MNIST is a small
# enough dataset that we can easily shuffle the full epoch.
ds = dataset.train(FLAGS.data_dir)
ds_batched = ds.cache().shuffle(buffer_size=50000).batch(FLAGS.batch_size)
# Iterate through the dataset a set number (`epochs_between_evals`) of times
# during each training session.
ds = ds_batched.repeat(FLAGS.epochs_between_evals)
return ds
def eval_input_fn():
return dataset.test(FLAGS.data_dir).batch(
FLAGS.batch_size).make_one_shot_iterator().get_next()
# Train and evaluate model.
for _ in range(FLAGS.train_epochs // FLAGS.epochs_between_evals):
mnist_classifier.train(input_fn=train_input_fn, hooks=None)
eval_results = mnist_classifier.evaluate(input_fn=eval_input_fn)
print("\nEvaluation results:\n\t%s\n" % eval_results) | python | def run_mnist():
"""Run MNIST training and eval loop."""
mnist_classifier = tf.estimator.Estimator(
model_fn=model_fn,
model_dir=FLAGS.model_dir)
# Set up training and evaluation input functions.
def train_input_fn():
"""Prepare data for training."""
# When choosing shuffle buffer sizes, larger sizes result in better
# randomness, while smaller sizes use less memory. MNIST is a small
# enough dataset that we can easily shuffle the full epoch.
ds = dataset.train(FLAGS.data_dir)
ds_batched = ds.cache().shuffle(buffer_size=50000).batch(FLAGS.batch_size)
# Iterate through the dataset a set number (`epochs_between_evals`) of times
# during each training session.
ds = ds_batched.repeat(FLAGS.epochs_between_evals)
return ds
def eval_input_fn():
return dataset.test(FLAGS.data_dir).batch(
FLAGS.batch_size).make_one_shot_iterator().get_next()
# Train and evaluate model.
for _ in range(FLAGS.train_epochs // FLAGS.epochs_between_evals):
mnist_classifier.train(input_fn=train_input_fn, hooks=None)
eval_results = mnist_classifier.evaluate(input_fn=eval_input_fn)
print("\nEvaluation results:\n\t%s\n" % eval_results) | [
"def",
"run_mnist",
"(",
")",
":",
"mnist_classifier",
"=",
"tf",
".",
"estimator",
".",
"Estimator",
"(",
"model_fn",
"=",
"model_fn",
",",
"model_dir",
"=",
"FLAGS",
".",
"model_dir",
")",
"# Set up training and evaluation input functions.",
"def",
"train_input_fn... | Run MNIST training and eval loop. | [
"Run",
"MNIST",
"training",
"and",
"eval",
"loop",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/examples/mnist.py#L207-L236 | train | 222,681 |
tensorflow/mesh | mesh_tensorflow/transformer/moe.py | MoE2D.call | def call(self, context, x, losses=None):
"""Call the layer."""
has_length_dim = context.length_dim in x.shape.dims
if not has_length_dim:
x_shape = x.shape
shape_with_length = mtf.Shape(
x_shape.dims[:-1] + [mtf.Dimension("length", 1)]
+ x_shape.dims[-1:])
x = mtf.reshape(x, shape_with_length)
y, loss = transformer_moe_layer_v2(
x,
context.model_dim,
self._hparams,
context.train,
context.variable_dtype,
layout=context.layout,
mesh_shape=context.mesh_shape,
nonpadding=context.nonpadding)
if context.losses is not None:
context.losses.append(loss)
if not has_length_dim:
y = mtf.reshape(y, x_shape)
return y | python | def call(self, context, x, losses=None):
"""Call the layer."""
has_length_dim = context.length_dim in x.shape.dims
if not has_length_dim:
x_shape = x.shape
shape_with_length = mtf.Shape(
x_shape.dims[:-1] + [mtf.Dimension("length", 1)]
+ x_shape.dims[-1:])
x = mtf.reshape(x, shape_with_length)
y, loss = transformer_moe_layer_v2(
x,
context.model_dim,
self._hparams,
context.train,
context.variable_dtype,
layout=context.layout,
mesh_shape=context.mesh_shape,
nonpadding=context.nonpadding)
if context.losses is not None:
context.losses.append(loss)
if not has_length_dim:
y = mtf.reshape(y, x_shape)
return y | [
"def",
"call",
"(",
"self",
",",
"context",
",",
"x",
",",
"losses",
"=",
"None",
")",
":",
"has_length_dim",
"=",
"context",
".",
"length_dim",
"in",
"x",
".",
"shape",
".",
"dims",
"if",
"not",
"has_length_dim",
":",
"x_shape",
"=",
"x",
".",
"shap... | Call the layer. | [
"Call",
"the",
"layer",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/transformer/moe.py#L123-L145 | train | 222,682 |
tensorflow/mesh | mesh_tensorflow/auto_mtf/print_cp_model_solution.py | print_solution | def print_solution(model, solver):
"""Prints the solution associated with solver.
If solver has already had Solve() called on it, prints the solution. This
includes each variable and its assignment, along with the objective function
and its optimal value.
If solver has not had Solve() called on it, or there is no feasible solution,
this will probably crash.
Args:
model: A pywrapcp.CpModel object.
solver: A pywrapcp.CpSolver object.
Returns:
Nothing, but prints the solution associated with solver.
"""
model_proto = model.Proto()
response_proto = solver.ResponseProto()
variables_in_objective_map = {}
maximization = False
if model_proto.HasField('objective'):
objective = model_proto.objective
for i in range(len(objective.vars)):
variables_in_objective_map[objective.vars[i]] = objective.coeffs[i]
if objective.scaling_factor < 0.0:
maximization = True
variable_assignments = []
variables_in_objective = []
num_vars = len(model_proto.variables)
for var_index in range(num_vars):
if not model_proto.variables[var_index].name:
continue
variable_name = model_proto.variables[var_index].name
if var_index in variables_in_objective_map:
coefficient = variables_in_objective_map[var_index]
if coefficient:
if maximization:
coefficient *= -1
if coefficient < 0:
variables_in_objective.append(' - {} * {}'.format(
-coefficient, variable_name))
elif coefficient > 0:
variables_in_objective.append(' + {} * {}'.format(
coefficient, variable_name))
variable_assignments.append(' {} = {}\n'.format(
variable_name, response_proto.solution[var_index]))
print(''.join(variable_assignments), end='')
# Strip the leading '+' if it exists.
if variables_in_objective and variables_in_objective[0][1] == '+':
variables_in_objective[0] = variables_in_objective[0][2:]
print('{}:{}'.format('Maximize' if maximization else 'Minimize',
''.join(variables_in_objective)))
print('Objective value: {}\n'.format(solver.ObjectiveValue())) | python | def print_solution(model, solver):
"""Prints the solution associated with solver.
If solver has already had Solve() called on it, prints the solution. This
includes each variable and its assignment, along with the objective function
and its optimal value.
If solver has not had Solve() called on it, or there is no feasible solution,
this will probably crash.
Args:
model: A pywrapcp.CpModel object.
solver: A pywrapcp.CpSolver object.
Returns:
Nothing, but prints the solution associated with solver.
"""
model_proto = model.Proto()
response_proto = solver.ResponseProto()
variables_in_objective_map = {}
maximization = False
if model_proto.HasField('objective'):
objective = model_proto.objective
for i in range(len(objective.vars)):
variables_in_objective_map[objective.vars[i]] = objective.coeffs[i]
if objective.scaling_factor < 0.0:
maximization = True
variable_assignments = []
variables_in_objective = []
num_vars = len(model_proto.variables)
for var_index in range(num_vars):
if not model_proto.variables[var_index].name:
continue
variable_name = model_proto.variables[var_index].name
if var_index in variables_in_objective_map:
coefficient = variables_in_objective_map[var_index]
if coefficient:
if maximization:
coefficient *= -1
if coefficient < 0:
variables_in_objective.append(' - {} * {}'.format(
-coefficient, variable_name))
elif coefficient > 0:
variables_in_objective.append(' + {} * {}'.format(
coefficient, variable_name))
variable_assignments.append(' {} = {}\n'.format(
variable_name, response_proto.solution[var_index]))
print(''.join(variable_assignments), end='')
# Strip the leading '+' if it exists.
if variables_in_objective and variables_in_objective[0][1] == '+':
variables_in_objective[0] = variables_in_objective[0][2:]
print('{}:{}'.format('Maximize' if maximization else 'Minimize',
''.join(variables_in_objective)))
print('Objective value: {}\n'.format(solver.ObjectiveValue())) | [
"def",
"print_solution",
"(",
"model",
",",
"solver",
")",
":",
"model_proto",
"=",
"model",
".",
"Proto",
"(",
")",
"response_proto",
"=",
"solver",
".",
"ResponseProto",
"(",
")",
"variables_in_objective_map",
"=",
"{",
"}",
"maximization",
"=",
"False",
"... | Prints the solution associated with solver.
If solver has already had Solve() called on it, prints the solution. This
includes each variable and its assignment, along with the objective function
and its optimal value.
If solver has not had Solve() called on it, or there is no feasible solution,
this will probably crash.
Args:
model: A pywrapcp.CpModel object.
solver: A pywrapcp.CpSolver object.
Returns:
Nothing, but prints the solution associated with solver. | [
"Prints",
"the",
"solution",
"associated",
"with",
"solver",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/auto_mtf/print_cp_model_solution.py#L32-L84 | train | 222,683 |
tensorflow/mesh | mesh_tensorflow/auto_mtf/layout_optimizer.py | _local_var_name | def _local_var_name(splittable_dimensions, assignment):
"""Name for a local variable.
Args:
splittable_dimensions: frozenset of names of splittable dimensions.
assignment: dict from names of splittable dimensions to names of mesh
dimensions.
Returns:
A string, the variable name.
"""
assignment_string = []
for splittable in sorted(splittable_dimensions):
if splittable in assignment:
assignment_string.append("{}:{}".format(splittable,
assignment[splittable]))
else:
assignment_string.append("{}".format(splittable))
return "y_(" + ",".join(assignment_string) + ")" | python | def _local_var_name(splittable_dimensions, assignment):
"""Name for a local variable.
Args:
splittable_dimensions: frozenset of names of splittable dimensions.
assignment: dict from names of splittable dimensions to names of mesh
dimensions.
Returns:
A string, the variable name.
"""
assignment_string = []
for splittable in sorted(splittable_dimensions):
if splittable in assignment:
assignment_string.append("{}:{}".format(splittable,
assignment[splittable]))
else:
assignment_string.append("{}".format(splittable))
return "y_(" + ",".join(assignment_string) + ")" | [
"def",
"_local_var_name",
"(",
"splittable_dimensions",
",",
"assignment",
")",
":",
"assignment_string",
"=",
"[",
"]",
"for",
"splittable",
"in",
"sorted",
"(",
"splittable_dimensions",
")",
":",
"if",
"splittable",
"in",
"assignment",
":",
"assignment_string",
... | Name for a local variable.
Args:
splittable_dimensions: frozenset of names of splittable dimensions.
assignment: dict from names of splittable dimensions to names of mesh
dimensions.
Returns:
A string, the variable name. | [
"Name",
"for",
"a",
"local",
"variable",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/auto_mtf/layout_optimizer.py#L383-L401 | train | 222,684 |
tensorflow/mesh | mesh_tensorflow/auto_mtf/layout_optimizer.py | _generate_assignments | def _generate_assignments(splittable_dimensions, mesh_dimension_to_size):
"""Generates all ways to map splittable dimensions to mesh dimensions.
Args:
splittable_dimensions: a frozenset of the names of splittable dimensions.
mesh_dimension_to_size: a dictionary from mesh dimension name to size.
Returns:
A list of the valid assignments. Each assignment is a dict keyed by every
splittable dimension, whose value is either a mesh dimension or None.
"""
assignments = []
for assignment_size in six.moves.xrange(
1 + min(len(splittable_dimensions), len(mesh_dimension_to_size))):
for s_dims_chosen in itertools.combinations(splittable_dimensions,
assignment_size):
for m_dims_chosen in itertools.permutations(mesh_dimension_to_size,
assignment_size):
assignments.append(dict(zip(s_dims_chosen, m_dims_chosen)))
return assignments | python | def _generate_assignments(splittable_dimensions, mesh_dimension_to_size):
"""Generates all ways to map splittable dimensions to mesh dimensions.
Args:
splittable_dimensions: a frozenset of the names of splittable dimensions.
mesh_dimension_to_size: a dictionary from mesh dimension name to size.
Returns:
A list of the valid assignments. Each assignment is a dict keyed by every
splittable dimension, whose value is either a mesh dimension or None.
"""
assignments = []
for assignment_size in six.moves.xrange(
1 + min(len(splittable_dimensions), len(mesh_dimension_to_size))):
for s_dims_chosen in itertools.combinations(splittable_dimensions,
assignment_size):
for m_dims_chosen in itertools.permutations(mesh_dimension_to_size,
assignment_size):
assignments.append(dict(zip(s_dims_chosen, m_dims_chosen)))
return assignments | [
"def",
"_generate_assignments",
"(",
"splittable_dimensions",
",",
"mesh_dimension_to_size",
")",
":",
"assignments",
"=",
"[",
"]",
"for",
"assignment_size",
"in",
"six",
".",
"moves",
".",
"xrange",
"(",
"1",
"+",
"min",
"(",
"len",
"(",
"splittable_dimensions... | Generates all ways to map splittable dimensions to mesh dimensions.
Args:
splittable_dimensions: a frozenset of the names of splittable dimensions.
mesh_dimension_to_size: a dictionary from mesh dimension name to size.
Returns:
A list of the valid assignments. Each assignment is a dict keyed by every
splittable dimension, whose value is either a mesh dimension or None. | [
"Generates",
"all",
"ways",
"to",
"map",
"splittable",
"dimensions",
"to",
"mesh",
"dimensions",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/auto_mtf/layout_optimizer.py#L404-L423 | train | 222,685 |
tensorflow/mesh | mesh_tensorflow/auto_mtf/layout_optimizer.py | LayoutOptimizer._preprocess_input | def _preprocess_input(self):
"""Computing useful input data structures to ease IP construction."""
# Compute the sets of MTF dimensions used in operations/tensors.
# a {string: frozenset(string)}, mapping operation name to MTF dimension
# names.
self._operation_name_to_mtf_dimension_set = {}
# a {string: frozenset(string)}, mapping tensor name to MTF dimension names.
self._tensor_name_to_mtf_dimension_set = {}
for operation_name in self._graph.get_all_operation_names():
self._operation_name_to_mtf_dimension_set[operation_name] = frozenset(
set(self._graph.get_operation_mtf_dimension_names(
operation_name)).intersection(
self._layout_validator.splittable_mtf_dimension_names))
for tensor_name in self._graph.get_all_tensor_names():
self._tensor_name_to_mtf_dimension_set[tensor_name] = frozenset(
set(self._graph.get_tensor_mtf_dimension_names(tensor_name))
.intersection(self._layout_validator.splittable_mtf_dimension_names))
self._operation_mtf_dimension_sets = set(
self._operation_name_to_mtf_dimension_set.values())
self._mtf_dimension_sets = self._operation_mtf_dimension_sets | set(
self._tensor_name_to_mtf_dimension_set.values())
# Compute possible assignments for each set of MTF dimensions.
self._assignments = {} # indexed by MTF dimension set
for mtf_dimension_set in self._mtf_dimension_sets:
self._assignments[mtf_dimension_set] = _generate_assignments(
mtf_dimension_set, self._layout_validator.mesh_dimension_name_to_size) | python | def _preprocess_input(self):
"""Computing useful input data structures to ease IP construction."""
# Compute the sets of MTF dimensions used in operations/tensors.
# a {string: frozenset(string)}, mapping operation name to MTF dimension
# names.
self._operation_name_to_mtf_dimension_set = {}
# a {string: frozenset(string)}, mapping tensor name to MTF dimension names.
self._tensor_name_to_mtf_dimension_set = {}
for operation_name in self._graph.get_all_operation_names():
self._operation_name_to_mtf_dimension_set[operation_name] = frozenset(
set(self._graph.get_operation_mtf_dimension_names(
operation_name)).intersection(
self._layout_validator.splittable_mtf_dimension_names))
for tensor_name in self._graph.get_all_tensor_names():
self._tensor_name_to_mtf_dimension_set[tensor_name] = frozenset(
set(self._graph.get_tensor_mtf_dimension_names(tensor_name))
.intersection(self._layout_validator.splittable_mtf_dimension_names))
self._operation_mtf_dimension_sets = set(
self._operation_name_to_mtf_dimension_set.values())
self._mtf_dimension_sets = self._operation_mtf_dimension_sets | set(
self._tensor_name_to_mtf_dimension_set.values())
# Compute possible assignments for each set of MTF dimensions.
self._assignments = {} # indexed by MTF dimension set
for mtf_dimension_set in self._mtf_dimension_sets:
self._assignments[mtf_dimension_set] = _generate_assignments(
mtf_dimension_set, self._layout_validator.mesh_dimension_name_to_size) | [
"def",
"_preprocess_input",
"(",
"self",
")",
":",
"# Compute the sets of MTF dimensions used in operations/tensors.",
"# a {string: frozenset(string)}, mapping operation name to MTF dimension",
"# names.",
"self",
".",
"_operation_name_to_mtf_dimension_set",
"=",
"{",
"}",
"# a {strin... | Computing useful input data structures to ease IP construction. | [
"Computing",
"useful",
"input",
"data",
"structures",
"to",
"ease",
"IP",
"construction",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/auto_mtf/layout_optimizer.py#L123-L152 | train | 222,686 |
tensorflow/mesh | mesh_tensorflow/auto_mtf/layout_optimizer.py | LayoutOptimizer._initialize_variables | def _initialize_variables(self):
"""Initializing the variables of the IP."""
# Initialize global variables.
self._global_vars = {} # Indexed by (MTF dimension, mesh dimension)
for mtf_dimension_name in (
self._layout_validator.splittable_mtf_dimension_names):
for mesh_dimension_name in (
self._layout_validator.mesh_dimension_name_to_size):
name = _global_var_name(mtf_dimension_name, mesh_dimension_name)
self._global_vars[(mtf_dimension_name, mesh_dimension_name)] = (
self._model.NewBoolVar(name))
# Initialize local variables.
self._local_vars = {} # Indexed by (tensorflow dimension set), then name of
# assignment.
for mtf_dimension_set in self._mtf_dimension_sets:
self._local_vars[mtf_dimension_set] = {}
for assignment in self._assignments[mtf_dimension_set]:
# TODO(joshuawang): Avoid hash collision no matter what dimension names
# are; don't hash by this local var name, swap to using a tuple encoding
# of the full assignment instead.
name = _local_var_name(mtf_dimension_set, assignment)
self._local_vars[mtf_dimension_set][name] = (
self._model.NewBoolVar(name))
# Initialize memory variable. We need a crude upper bound on memory, so we
# use the total size of all tensors under the empty assignment.
# NOTE(joshuawang): This bound could be improved by factoring in the
# schedule.
memory_upper_bound = 0
for tensor_name in self._graph.get_all_tensor_names():
if self._graph.is_tensor_on_canonical_device(tensor_name):
memory_upper_bound += int(self._graph.get_tensor_size(tensor_name))
self._memory_var = self._model.NewIntVar(0, memory_upper_bound, "z") | python | def _initialize_variables(self):
"""Initializing the variables of the IP."""
# Initialize global variables.
self._global_vars = {} # Indexed by (MTF dimension, mesh dimension)
for mtf_dimension_name in (
self._layout_validator.splittable_mtf_dimension_names):
for mesh_dimension_name in (
self._layout_validator.mesh_dimension_name_to_size):
name = _global_var_name(mtf_dimension_name, mesh_dimension_name)
self._global_vars[(mtf_dimension_name, mesh_dimension_name)] = (
self._model.NewBoolVar(name))
# Initialize local variables.
self._local_vars = {} # Indexed by (tensorflow dimension set), then name of
# assignment.
for mtf_dimension_set in self._mtf_dimension_sets:
self._local_vars[mtf_dimension_set] = {}
for assignment in self._assignments[mtf_dimension_set]:
# TODO(joshuawang): Avoid hash collision no matter what dimension names
# are; don't hash by this local var name, swap to using a tuple encoding
# of the full assignment instead.
name = _local_var_name(mtf_dimension_set, assignment)
self._local_vars[mtf_dimension_set][name] = (
self._model.NewBoolVar(name))
# Initialize memory variable. We need a crude upper bound on memory, so we
# use the total size of all tensors under the empty assignment.
# NOTE(joshuawang): This bound could be improved by factoring in the
# schedule.
memory_upper_bound = 0
for tensor_name in self._graph.get_all_tensor_names():
if self._graph.is_tensor_on_canonical_device(tensor_name):
memory_upper_bound += int(self._graph.get_tensor_size(tensor_name))
self._memory_var = self._model.NewIntVar(0, memory_upper_bound, "z") | [
"def",
"_initialize_variables",
"(",
"self",
")",
":",
"# Initialize global variables.",
"self",
".",
"_global_vars",
"=",
"{",
"}",
"# Indexed by (MTF dimension, mesh dimension)",
"for",
"mtf_dimension_name",
"in",
"(",
"self",
".",
"_layout_validator",
".",
"splittable_... | Initializing the variables of the IP. | [
"Initializing",
"the",
"variables",
"of",
"the",
"IP",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/auto_mtf/layout_optimizer.py#L154-L187 | train | 222,687 |
tensorflow/mesh | mesh_tensorflow/auto_mtf/layout_optimizer.py | LayoutOptimizer._add_constraints | def _add_constraints(self):
"""Adding constraints to the IP."""
# Add operation constraints.
for mesh_dimension_name in (
self._layout_validator.mesh_dimension_name_to_size):
for mtf_dimension_set in self._operation_mtf_dimension_sets:
self._model.Add(
sum(self._global_vars[(mtf_dimension_name, mesh_dimension_name)]
for mtf_dimension_name in mtf_dimension_set) <= 1)
# Add global constraints.
for mtf_dimension_name in (
self._layout_validator.splittable_mtf_dimension_names):
self._model.Add(
sum(self._global_vars[(mtf_dimension_name, mesh_dimension_name)]
for mesh_dimension_name in (
self._layout_validator.mesh_dimension_name_to_size)) <= 1)
# Add divisibility constraints.
for mtf_dimension_name in (
self._layout_validator.splittable_mtf_dimension_names):
for mesh_dimension_name in (
self._layout_validator.mesh_dimension_name_to_size):
if not self._layout_validator.is_valid_assignment(mtf_dimension_name,
mesh_dimension_name):
self._model.Add(self._global_vars[(mtf_dimension_name,
mesh_dimension_name)] == 0)
# Add local constraints.
for mtf_dimension_set in self._mtf_dimension_sets:
self._model.Add(
sum(self._local_vars[mtf_dimension_set][_local_var_name(
mtf_dimension_set, assignment)]
for assignment in self._assignments[mtf_dimension_set]) == 1)
# Add local-to-global constraints.
for mtf_dimension_set in self._mtf_dimension_sets:
for assignment in self._assignments[mtf_dimension_set]:
name = _local_var_name(mtf_dimension_set, assignment)
for mtf_dimension_name in mtf_dimension_set:
if mtf_dimension_name in assignment:
mesh_dimension_name = assignment[mtf_dimension_name]
self._model.AddImplication(
self._local_vars[mtf_dimension_set][name],
self._global_vars[(mtf_dimension_name, mesh_dimension_name)])
else:
for mesh_dimension_name in (
self._layout_validator.mesh_dimension_name_to_size):
self._model.AddImplication(
self._global_vars[(mtf_dimension_name, mesh_dimension_name)],
self._local_vars[mtf_dimension_set][name].Not())
# Add memory constraints.
tensor_memory_sum = {}
for tensor_name in self._graph.get_all_tensor_names():
tensor_memory_sum[tensor_name] = 0
mtf_dimension_set = self._tensor_name_to_mtf_dimension_set[tensor_name]
if not self._graph.is_tensor_on_canonical_device(tensor_name):
continue
for assignment in self._assignments[mtf_dimension_set]:
size_under_assignment = self._graph.get_tensor_size(
tensor_name, assignment,
self._layout_validator.mesh_dimension_name_to_size)
name = _local_var_name(mtf_dimension_set, assignment)
tensor_memory_sum[tensor_name] += (
size_under_assignment * self._local_vars[mtf_dimension_set][name])
for tensor_names in self._get_memory_contents():
self._model.Add(
sum(tensor_memory_sum[tensor_name]
for tensor_name in tensor_names) <= self._memory_var) | python | def _add_constraints(self):
"""Adding constraints to the IP."""
# Add operation constraints.
for mesh_dimension_name in (
self._layout_validator.mesh_dimension_name_to_size):
for mtf_dimension_set in self._operation_mtf_dimension_sets:
self._model.Add(
sum(self._global_vars[(mtf_dimension_name, mesh_dimension_name)]
for mtf_dimension_name in mtf_dimension_set) <= 1)
# Add global constraints.
for mtf_dimension_name in (
self._layout_validator.splittable_mtf_dimension_names):
self._model.Add(
sum(self._global_vars[(mtf_dimension_name, mesh_dimension_name)]
for mesh_dimension_name in (
self._layout_validator.mesh_dimension_name_to_size)) <= 1)
# Add divisibility constraints.
for mtf_dimension_name in (
self._layout_validator.splittable_mtf_dimension_names):
for mesh_dimension_name in (
self._layout_validator.mesh_dimension_name_to_size):
if not self._layout_validator.is_valid_assignment(mtf_dimension_name,
mesh_dimension_name):
self._model.Add(self._global_vars[(mtf_dimension_name,
mesh_dimension_name)] == 0)
# Add local constraints.
for mtf_dimension_set in self._mtf_dimension_sets:
self._model.Add(
sum(self._local_vars[mtf_dimension_set][_local_var_name(
mtf_dimension_set, assignment)]
for assignment in self._assignments[mtf_dimension_set]) == 1)
# Add local-to-global constraints.
for mtf_dimension_set in self._mtf_dimension_sets:
for assignment in self._assignments[mtf_dimension_set]:
name = _local_var_name(mtf_dimension_set, assignment)
for mtf_dimension_name in mtf_dimension_set:
if mtf_dimension_name in assignment:
mesh_dimension_name = assignment[mtf_dimension_name]
self._model.AddImplication(
self._local_vars[mtf_dimension_set][name],
self._global_vars[(mtf_dimension_name, mesh_dimension_name)])
else:
for mesh_dimension_name in (
self._layout_validator.mesh_dimension_name_to_size):
self._model.AddImplication(
self._global_vars[(mtf_dimension_name, mesh_dimension_name)],
self._local_vars[mtf_dimension_set][name].Not())
# Add memory constraints.
tensor_memory_sum = {}
for tensor_name in self._graph.get_all_tensor_names():
tensor_memory_sum[tensor_name] = 0
mtf_dimension_set = self._tensor_name_to_mtf_dimension_set[tensor_name]
if not self._graph.is_tensor_on_canonical_device(tensor_name):
continue
for assignment in self._assignments[mtf_dimension_set]:
size_under_assignment = self._graph.get_tensor_size(
tensor_name, assignment,
self._layout_validator.mesh_dimension_name_to_size)
name = _local_var_name(mtf_dimension_set, assignment)
tensor_memory_sum[tensor_name] += (
size_under_assignment * self._local_vars[mtf_dimension_set][name])
for tensor_names in self._get_memory_contents():
self._model.Add(
sum(tensor_memory_sum[tensor_name]
for tensor_name in tensor_names) <= self._memory_var) | [
"def",
"_add_constraints",
"(",
"self",
")",
":",
"# Add operation constraints.",
"for",
"mesh_dimension_name",
"in",
"(",
"self",
".",
"_layout_validator",
".",
"mesh_dimension_name_to_size",
")",
":",
"for",
"mtf_dimension_set",
"in",
"self",
".",
"_operation_mtf_dime... | Adding constraints to the IP. | [
"Adding",
"constraints",
"to",
"the",
"IP",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/auto_mtf/layout_optimizer.py#L189-L262 | train | 222,688 |
tensorflow/mesh | mesh_tensorflow/auto_mtf/layout_optimizer.py | LayoutOptimizer._get_memory_contents | def _get_memory_contents(self):
"""Runs the scheduler to determine memory contents at every point in time.
Returns:
a list of frozenset of strings, where the ith entry describes the tensors
in memory when executing operation i (where schedule[i] is an index into
GetAllOperationNames()).
"""
if self._memory_contents is not None:
return self._memory_contents
schedule = scheduler.minimize_peak_memory(self._graph, self._scheduler_alg)
self._memory_contents = self._graph.compute_memory_contents_under_schedule(
schedule)
return self._memory_contents | python | def _get_memory_contents(self):
"""Runs the scheduler to determine memory contents at every point in time.
Returns:
a list of frozenset of strings, where the ith entry describes the tensors
in memory when executing operation i (where schedule[i] is an index into
GetAllOperationNames()).
"""
if self._memory_contents is not None:
return self._memory_contents
schedule = scheduler.minimize_peak_memory(self._graph, self._scheduler_alg)
self._memory_contents = self._graph.compute_memory_contents_under_schedule(
schedule)
return self._memory_contents | [
"def",
"_get_memory_contents",
"(",
"self",
")",
":",
"if",
"self",
".",
"_memory_contents",
"is",
"not",
"None",
":",
"return",
"self",
".",
"_memory_contents",
"schedule",
"=",
"scheduler",
".",
"minimize_peak_memory",
"(",
"self",
".",
"_graph",
",",
"self"... | Runs the scheduler to determine memory contents at every point in time.
Returns:
a list of frozenset of strings, where the ith entry describes the tensors
in memory when executing operation i (where schedule[i] is an index into
GetAllOperationNames()). | [
"Runs",
"the",
"scheduler",
"to",
"determine",
"memory",
"contents",
"at",
"every",
"point",
"in",
"time",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/auto_mtf/layout_optimizer.py#L268-L283 | train | 222,689 |
tensorflow/mesh | mesh_tensorflow/auto_mtf/layout_optimizer.py | LayoutOptimizer.solve | def solve(self, print_solution=False):
"""Solves the current integer program and returns the computed layout.
Args:
print_solution: An optional boolean indicating whether to print the full
solution in human-readable format.
Returns:
The computed layout (as a string).
Raises:
SolverError: the internal solver could not find a solution, or the
solution found is infeasible.
"""
# Solve and see how well the solver did.
self._cp_solver = cp_model.CpSolver()
status = self._cp_solver.Solve(self._model)
if status != cp_model.OPTIMAL:
if status == cp_model.FEASIBLE:
logging.warning("A potentially suboptimal solution was found.")
else:
logging.error("Solver returned status %d.", status)
raise SolverError("The solver could not solve the problem and returned "
"status {}.".format(status))
# TODO(joshuawang): Verify the solver's solution.
if print_solution:
print_cp_model_solution.print_solution(self._model, self._cp_solver)
# Reconstruct layout from solution.
layout = []
for mtf_dimension_name in (
self._layout_validator.splittable_mtf_dimension_names):
for mesh_dimension_name in (
self._layout_validator.mesh_dimension_name_to_size):
value = self._cp_solver.Value(self._global_vars[(mtf_dimension_name,
mesh_dimension_name)])
if value: # Value is integer.
layout.append(mtf_dimension_name + ":" + mesh_dimension_name)
layout.sort()
return ";".join(layout) | python | def solve(self, print_solution=False):
"""Solves the current integer program and returns the computed layout.
Args:
print_solution: An optional boolean indicating whether to print the full
solution in human-readable format.
Returns:
The computed layout (as a string).
Raises:
SolverError: the internal solver could not find a solution, or the
solution found is infeasible.
"""
# Solve and see how well the solver did.
self._cp_solver = cp_model.CpSolver()
status = self._cp_solver.Solve(self._model)
if status != cp_model.OPTIMAL:
if status == cp_model.FEASIBLE:
logging.warning("A potentially suboptimal solution was found.")
else:
logging.error("Solver returned status %d.", status)
raise SolverError("The solver could not solve the problem and returned "
"status {}.".format(status))
# TODO(joshuawang): Verify the solver's solution.
if print_solution:
print_cp_model_solution.print_solution(self._model, self._cp_solver)
# Reconstruct layout from solution.
layout = []
for mtf_dimension_name in (
self._layout_validator.splittable_mtf_dimension_names):
for mesh_dimension_name in (
self._layout_validator.mesh_dimension_name_to_size):
value = self._cp_solver.Value(self._global_vars[(mtf_dimension_name,
mesh_dimension_name)])
if value: # Value is integer.
layout.append(mtf_dimension_name + ":" + mesh_dimension_name)
layout.sort()
return ";".join(layout) | [
"def",
"solve",
"(",
"self",
",",
"print_solution",
"=",
"False",
")",
":",
"# Solve and see how well the solver did.",
"self",
".",
"_cp_solver",
"=",
"cp_model",
".",
"CpSolver",
"(",
")",
"status",
"=",
"self",
".",
"_cp_solver",
".",
"Solve",
"(",
"self",
... | Solves the current integer program and returns the computed layout.
Args:
print_solution: An optional boolean indicating whether to print the full
solution in human-readable format.
Returns:
The computed layout (as a string).
Raises:
SolverError: the internal solver could not find a solution, or the
solution found is infeasible. | [
"Solves",
"the",
"current",
"integer",
"program",
"and",
"returns",
"the",
"computed",
"layout",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/auto_mtf/layout_optimizer.py#L285-L326 | train | 222,690 |
tensorflow/mesh | mesh_tensorflow/auto_mtf/layout_optimizer.py | LayoutOptimizer.evaluate_layout | def evaluate_layout(self, layout):
"""The current objective value for the given layout.
TODO(joshuawang): The current function does not check that the given
layout is valid.
Args:
layout: a string, representing a layout to evaluate (e.g.
"d_ff:m1;heads:m2").
Returns:
A float, the objective value.
"""
layout_dict = {}
if layout:
for pair in layout.split(";"):
mtf_dimension_name, mesh_dimension_name = pair.split(":", 1)
if (mtf_dimension_name in
self._layout_validator.splittable_mtf_dimension_names):
layout_dict[mtf_dimension_name] = mesh_dimension_name
else:
logging.warning("Skipping unsplittable dimension %s.",
mtf_dimension_name)
tensor_memory = {} # {string: float}, size of each tensor under our layout
for tensor_name in self._graph.get_all_tensor_names():
if self._graph.is_tensor_on_canonical_device(tensor_name):
tensor_memory[tensor_name] = self._graph.get_tensor_size(
tensor_name, layout_dict,
self._layout_validator.mesh_dimension_name_to_size)
else:
tensor_memory[tensor_name] = 0.0
peak_memory_usage = 0.0
for tensor_names in self._get_memory_contents():
memory_usage = 0.0
for tensor_name in tensor_names:
memory_usage += tensor_memory[tensor_name]
peak_memory_usage = max(peak_memory_usage, memory_usage)
return peak_memory_usage | python | def evaluate_layout(self, layout):
"""The current objective value for the given layout.
TODO(joshuawang): The current function does not check that the given
layout is valid.
Args:
layout: a string, representing a layout to evaluate (e.g.
"d_ff:m1;heads:m2").
Returns:
A float, the objective value.
"""
layout_dict = {}
if layout:
for pair in layout.split(";"):
mtf_dimension_name, mesh_dimension_name = pair.split(":", 1)
if (mtf_dimension_name in
self._layout_validator.splittable_mtf_dimension_names):
layout_dict[mtf_dimension_name] = mesh_dimension_name
else:
logging.warning("Skipping unsplittable dimension %s.",
mtf_dimension_name)
tensor_memory = {} # {string: float}, size of each tensor under our layout
for tensor_name in self._graph.get_all_tensor_names():
if self._graph.is_tensor_on_canonical_device(tensor_name):
tensor_memory[tensor_name] = self._graph.get_tensor_size(
tensor_name, layout_dict,
self._layout_validator.mesh_dimension_name_to_size)
else:
tensor_memory[tensor_name] = 0.0
peak_memory_usage = 0.0
for tensor_names in self._get_memory_contents():
memory_usage = 0.0
for tensor_name in tensor_names:
memory_usage += tensor_memory[tensor_name]
peak_memory_usage = max(peak_memory_usage, memory_usage)
return peak_memory_usage | [
"def",
"evaluate_layout",
"(",
"self",
",",
"layout",
")",
":",
"layout_dict",
"=",
"{",
"}",
"if",
"layout",
":",
"for",
"pair",
"in",
"layout",
".",
"split",
"(",
"\";\"",
")",
":",
"mtf_dimension_name",
",",
"mesh_dimension_name",
"=",
"pair",
".",
"s... | The current objective value for the given layout.
TODO(joshuawang): The current function does not check that the given
layout is valid.
Args:
layout: a string, representing a layout to evaluate (e.g.
"d_ff:m1;heads:m2").
Returns:
A float, the objective value. | [
"The",
"current",
"objective",
"value",
"for",
"the",
"given",
"layout",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/auto_mtf/layout_optimizer.py#L328-L367 | train | 222,691 |
tensorflow/mesh | mesh_tensorflow/utils.py | BalancedVariablePlacer.device_function | def device_function(self, var):
"""Choose a device for the input variable.
Args:
var: an Variable.
Returns:
The device for placing the var.
"""
if var.type not in ('Variable', 'VariableV2', 'VarHandleOp'):
tf.logging.debug('Place {} on last device: {}.'.format(
var.name, self._last_device))
return self._last_device
shape = tf.TensorShape(var.get_attr('shape'))
assert shape.num_elements() is not None
size = var.get_attr('dtype').size
mem, device = heapq.heappop(self._mem_device_heap)
mem += shape.num_elements() * size
heapq.heappush(self._mem_device_heap, (mem, device))
tf.logging.debug('Place variable {} on {} and consumes {} Bytes.'.format(
var.name, device, mem))
self._last_device = device
return device | python | def device_function(self, var):
"""Choose a device for the input variable.
Args:
var: an Variable.
Returns:
The device for placing the var.
"""
if var.type not in ('Variable', 'VariableV2', 'VarHandleOp'):
tf.logging.debug('Place {} on last device: {}.'.format(
var.name, self._last_device))
return self._last_device
shape = tf.TensorShape(var.get_attr('shape'))
assert shape.num_elements() is not None
size = var.get_attr('dtype').size
mem, device = heapq.heappop(self._mem_device_heap)
mem += shape.num_elements() * size
heapq.heappush(self._mem_device_heap, (mem, device))
tf.logging.debug('Place variable {} on {} and consumes {} Bytes.'.format(
var.name, device, mem))
self._last_device = device
return device | [
"def",
"device_function",
"(",
"self",
",",
"var",
")",
":",
"if",
"var",
".",
"type",
"not",
"in",
"(",
"'Variable'",
",",
"'VariableV2'",
",",
"'VarHandleOp'",
")",
":",
"tf",
".",
"logging",
".",
"debug",
"(",
"'Place {} on last device: {}.'",
".",
"for... | Choose a device for the input variable.
Args:
var: an Variable.
Returns:
The device for placing the var. | [
"Choose",
"a",
"device",
"for",
"the",
"input",
"variable",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/utils.py#L45-L70 | train | 222,692 |
tensorflow/mesh | mesh_tensorflow/beam_search.py | greedy_decode | def greedy_decode(logits_fn,
initial_ids,
temperature=0.0,
initial_states=None,
eos_id=EOS_ID,
forced_ids=None,
use_tpu=True):
"""Greedy decoding.
Args:
logits_fn: Interface to the model, to provide logits.
Shoud take:
step_num - mtf Scalar
ids - mtf Tensor with shape [..., length]
states - list of mtf.Tensor
Should return:
logits - [batch, vocab_size]
new_states - list of mtf.Tensor
initial_ids: mtf.Tensor with shape [..., length], containing zeros.
temperature: a float between 0.0 (argmax) and 1.0 (random)
initial_states: list of mtf.Tensor
eos_id: ID for end of sentence.
forced_ids: optional mtf.Tensor with shape [..., length]
use_tpu: a boolean
Returns:
Tensor with shape [..., length]
"""
length_dim = initial_ids.shape.dims[-1]
mesh = initial_ids.mesh
num_steps = mtf.constant(mesh, length_dim.size, dtype=tf.int32)
def cond_fn(step_num, prev_ids, *unused_states):
"""Should we run another loop iteration."""
overflow = mtf.equal(step_num, num_steps)
has_eos = mtf.reduce_any(
mtf.equal(prev_ids, eos_id), reduced_dim=length_dim)
all_has_eos = mtf.reduce_all(has_eos)
return mtf.logical_not(mtf.logical_or(overflow, all_has_eos))
def body_fn(step_num, ids, *states):
"""Body function for greedy decoding.
Args:
step_num: a mtf.Tensor
ids: a mtf.Tensor
*states: additional mtf.Tensors
Returns:
new_step_num, new_ids, *new_states
"""
logits, new_states = logits_fn(step_num, ids, states)
vocab_dim = logits.shape.dims[-1]
new_ids = mtf.sample_with_temperature(
logits, vocab_dim, temperature)
if forced_ids is not None:
# force the new ids to equal the partial targets where specified
# (positions where partial_targets contain nonzero values)
forced = mtf.gather(forced_ids, step_num, length_dim)
new_ids = forced + new_ids * mtf.to_int32(mtf.equal(forced, 0))
ids += new_ids * mtf.one_hot(step_num, length_dim, dtype=tf.int32)
new_step_num = step_num + 1
return [new_step_num, ids] + new_states
initial_step_num = mtf.constant(mesh, 0, dtype=tf.int32)
while_loop_inputs = [initial_step_num, initial_ids] + initial_states
final_step_num, mtf_samples = mtf.while_loop(
cond_fn, body_fn, while_loop_inputs,
num_loop_vars=None if use_tpu else 2)[:2]
mtf_samples = mtf.Print(mtf_samples, [final_step_num], "output_length")
return mtf_samples | python | def greedy_decode(logits_fn,
initial_ids,
temperature=0.0,
initial_states=None,
eos_id=EOS_ID,
forced_ids=None,
use_tpu=True):
"""Greedy decoding.
Args:
logits_fn: Interface to the model, to provide logits.
Shoud take:
step_num - mtf Scalar
ids - mtf Tensor with shape [..., length]
states - list of mtf.Tensor
Should return:
logits - [batch, vocab_size]
new_states - list of mtf.Tensor
initial_ids: mtf.Tensor with shape [..., length], containing zeros.
temperature: a float between 0.0 (argmax) and 1.0 (random)
initial_states: list of mtf.Tensor
eos_id: ID for end of sentence.
forced_ids: optional mtf.Tensor with shape [..., length]
use_tpu: a boolean
Returns:
Tensor with shape [..., length]
"""
length_dim = initial_ids.shape.dims[-1]
mesh = initial_ids.mesh
num_steps = mtf.constant(mesh, length_dim.size, dtype=tf.int32)
def cond_fn(step_num, prev_ids, *unused_states):
"""Should we run another loop iteration."""
overflow = mtf.equal(step_num, num_steps)
has_eos = mtf.reduce_any(
mtf.equal(prev_ids, eos_id), reduced_dim=length_dim)
all_has_eos = mtf.reduce_all(has_eos)
return mtf.logical_not(mtf.logical_or(overflow, all_has_eos))
def body_fn(step_num, ids, *states):
"""Body function for greedy decoding.
Args:
step_num: a mtf.Tensor
ids: a mtf.Tensor
*states: additional mtf.Tensors
Returns:
new_step_num, new_ids, *new_states
"""
logits, new_states = logits_fn(step_num, ids, states)
vocab_dim = logits.shape.dims[-1]
new_ids = mtf.sample_with_temperature(
logits, vocab_dim, temperature)
if forced_ids is not None:
# force the new ids to equal the partial targets where specified
# (positions where partial_targets contain nonzero values)
forced = mtf.gather(forced_ids, step_num, length_dim)
new_ids = forced + new_ids * mtf.to_int32(mtf.equal(forced, 0))
ids += new_ids * mtf.one_hot(step_num, length_dim, dtype=tf.int32)
new_step_num = step_num + 1
return [new_step_num, ids] + new_states
initial_step_num = mtf.constant(mesh, 0, dtype=tf.int32)
while_loop_inputs = [initial_step_num, initial_ids] + initial_states
final_step_num, mtf_samples = mtf.while_loop(
cond_fn, body_fn, while_loop_inputs,
num_loop_vars=None if use_tpu else 2)[:2]
mtf_samples = mtf.Print(mtf_samples, [final_step_num], "output_length")
return mtf_samples | [
"def",
"greedy_decode",
"(",
"logits_fn",
",",
"initial_ids",
",",
"temperature",
"=",
"0.0",
",",
"initial_states",
"=",
"None",
",",
"eos_id",
"=",
"EOS_ID",
",",
"forced_ids",
"=",
"None",
",",
"use_tpu",
"=",
"True",
")",
":",
"length_dim",
"=",
"initi... | Greedy decoding.
Args:
logits_fn: Interface to the model, to provide logits.
Shoud take:
step_num - mtf Scalar
ids - mtf Tensor with shape [..., length]
states - list of mtf.Tensor
Should return:
logits - [batch, vocab_size]
new_states - list of mtf.Tensor
initial_ids: mtf.Tensor with shape [..., length], containing zeros.
temperature: a float between 0.0 (argmax) and 1.0 (random)
initial_states: list of mtf.Tensor
eos_id: ID for end of sentence.
forced_ids: optional mtf.Tensor with shape [..., length]
use_tpu: a boolean
Returns:
Tensor with shape [..., length] | [
"Greedy",
"decoding",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/beam_search.py#L577-L642 | train | 222,693 |
tensorflow/mesh | mesh_tensorflow/transformer/dataset.py | pack_and_batch | def pack_and_batch(dataset, batch_size, length, pack=True):
"""Create a tf.data.Dataset which emits training batches.
The input dataset emits feature-dictionaries where each feature is a vector
of integers ending in EOS=1
The tensors in the returned tf.data.Dataset have shape
[batch_size, length]. Zeros indicate padding.
length indicates the length of the emitted examples. Examples with
inputs/targets longer than length get truncated.
TODO(noam): for text2self problems, we should just chop too-long
sequences into multiple parts and train on all of them.
If pack=False, then each emitted example will contain one
example emitted by load_internal().
If pack=True, then multiple examples emitted by load_internal() are
concatenated to form one combined example with the given length.
See comments in the function pack_dataset().
batch_size indicates the number of (combined) examples per batch,
across all cores.
Args:
dataset: a tf.data.Dataset
batch_size: an integer
length: an integer
pack: a boolean
Returns:
a tf.data.Dataset where all features have fixed shape [batch, length].
"""
if pack:
dataset = pack_dataset(dataset, length=length)
# Pad/trim length of each example to length
dataset = dataset.map(
functools.partial(trim_and_pad_all_features, length=length),
num_parallel_calls=tf.data.experimental.AUTOTUNE
)
dataset = dataset.batch(batch_size, drop_remainder=False)
# Pad batch size of each batch to batch_size
dataset = dataset.map(
functools.partial(trim_and_pad_all_features, length=batch_size),
num_parallel_calls=tf.data.experimental.AUTOTUNE
)
# Remind TensorFlow of the shape
dataset = dataset.map(
lambda x: {k: tf.reshape(v, (batch_size, length)) for k, v in x.items()},
num_parallel_calls=tf.data.experimental.AUTOTUNE
)
dataset = dataset.prefetch(100)
return dataset | python | def pack_and_batch(dataset, batch_size, length, pack=True):
"""Create a tf.data.Dataset which emits training batches.
The input dataset emits feature-dictionaries where each feature is a vector
of integers ending in EOS=1
The tensors in the returned tf.data.Dataset have shape
[batch_size, length]. Zeros indicate padding.
length indicates the length of the emitted examples. Examples with
inputs/targets longer than length get truncated.
TODO(noam): for text2self problems, we should just chop too-long
sequences into multiple parts and train on all of them.
If pack=False, then each emitted example will contain one
example emitted by load_internal().
If pack=True, then multiple examples emitted by load_internal() are
concatenated to form one combined example with the given length.
See comments in the function pack_dataset().
batch_size indicates the number of (combined) examples per batch,
across all cores.
Args:
dataset: a tf.data.Dataset
batch_size: an integer
length: an integer
pack: a boolean
Returns:
a tf.data.Dataset where all features have fixed shape [batch, length].
"""
if pack:
dataset = pack_dataset(dataset, length=length)
# Pad/trim length of each example to length
dataset = dataset.map(
functools.partial(trim_and_pad_all_features, length=length),
num_parallel_calls=tf.data.experimental.AUTOTUNE
)
dataset = dataset.batch(batch_size, drop_remainder=False)
# Pad batch size of each batch to batch_size
dataset = dataset.map(
functools.partial(trim_and_pad_all_features, length=batch_size),
num_parallel_calls=tf.data.experimental.AUTOTUNE
)
# Remind TensorFlow of the shape
dataset = dataset.map(
lambda x: {k: tf.reshape(v, (batch_size, length)) for k, v in x.items()},
num_parallel_calls=tf.data.experimental.AUTOTUNE
)
dataset = dataset.prefetch(100)
return dataset | [
"def",
"pack_and_batch",
"(",
"dataset",
",",
"batch_size",
",",
"length",
",",
"pack",
"=",
"True",
")",
":",
"if",
"pack",
":",
"dataset",
"=",
"pack_dataset",
"(",
"dataset",
",",
"length",
"=",
"length",
")",
"# Pad/trim length of each example to length",
... | Create a tf.data.Dataset which emits training batches.
The input dataset emits feature-dictionaries where each feature is a vector
of integers ending in EOS=1
The tensors in the returned tf.data.Dataset have shape
[batch_size, length]. Zeros indicate padding.
length indicates the length of the emitted examples. Examples with
inputs/targets longer than length get truncated.
TODO(noam): for text2self problems, we should just chop too-long
sequences into multiple parts and train on all of them.
If pack=False, then each emitted example will contain one
example emitted by load_internal().
If pack=True, then multiple examples emitted by load_internal() are
concatenated to form one combined example with the given length.
See comments in the function pack_dataset().
batch_size indicates the number of (combined) examples per batch,
across all cores.
Args:
dataset: a tf.data.Dataset
batch_size: an integer
length: an integer
pack: a boolean
Returns:
a tf.data.Dataset where all features have fixed shape [batch, length]. | [
"Create",
"a",
"tf",
".",
"data",
".",
"Dataset",
"which",
"emits",
"training",
"batches",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/transformer/dataset.py#L98-L150 | train | 222,694 |
tensorflow/mesh | mesh_tensorflow/transformer/dataset.py | encode_dataset | def encode_dataset(dataset, vocabulary):
"""Encode from strings to token ids.
Args:
dataset: a tf.data.Dataset with string values.
vocabulary: a mesh_tensorflow.transformer.Vocabulary
Returns:
a tf.data.Dataset with integer-vector values ending in EOS=1
"""
def encode(features):
return {k: vocabulary.encode_tf(v) for k, v in features.items()}
return dataset.map(encode, num_parallel_calls=tf.data.experimental.AUTOTUNE) | python | def encode_dataset(dataset, vocabulary):
"""Encode from strings to token ids.
Args:
dataset: a tf.data.Dataset with string values.
vocabulary: a mesh_tensorflow.transformer.Vocabulary
Returns:
a tf.data.Dataset with integer-vector values ending in EOS=1
"""
def encode(features):
return {k: vocabulary.encode_tf(v) for k, v in features.items()}
return dataset.map(encode, num_parallel_calls=tf.data.experimental.AUTOTUNE) | [
"def",
"encode_dataset",
"(",
"dataset",
",",
"vocabulary",
")",
":",
"def",
"encode",
"(",
"features",
")",
":",
"return",
"{",
"k",
":",
"vocabulary",
".",
"encode_tf",
"(",
"v",
")",
"for",
"k",
",",
"v",
"in",
"features",
".",
"items",
"(",
")",
... | Encode from strings to token ids.
Args:
dataset: a tf.data.Dataset with string values.
vocabulary: a mesh_tensorflow.transformer.Vocabulary
Returns:
a tf.data.Dataset with integer-vector values ending in EOS=1 | [
"Encode",
"from",
"strings",
"to",
"token",
"ids",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/transformer/dataset.py#L153-L164 | train | 222,695 |
tensorflow/mesh | mesh_tensorflow/transformer/dataset.py | packed_parallel_tsv_dataset | def packed_parallel_tsv_dataset(filenames=gin.REQUIRED,
dataset_split=gin.REQUIRED,
batch_size=gin.REQUIRED,
sequence_length=gin.REQUIRED,
vocabulary=gin.REQUIRED,
append_eos=True,
shuffle_buffer_size=10000,
eos_id=1):
"""Reads parallel tab-separated text file. One example per line."""
dataset = tf.data.TextLineDataset(filenames)
if dataset_split == "train":
dataset = dataset.repeat()
dataset = dataset.shuffle(shuffle_buffer_size)
def _parse_fn(record): # pylint: disable=missing-docstring
tokens = tf.decode_csv(
record,
record_defaults=[""] * 2,
field_delim="\t",
use_quote_delim=False)
return {"inputs": tokens[0], "targets": tokens[1]}
def _encode_fn(features): # pylint: disable=missing-docstring
inputs_vocabulary = vocabulary[0] if isinstance(vocabulary,
tuple) else vocabulary
targets_vocabulary = vocabulary[1] if isinstance(vocabulary,
tuple) else vocabulary
inputs_enc = inputs_vocabulary.encode_tf(features["inputs"])
targets_enc = targets_vocabulary.encode_tf(features["targets"])
if append_eos:
inputs_enc = tf.concat([tf.to_int64(inputs_enc), [eos_id]], 0)
targets_enc = tf.concat([tf.to_int64(targets_enc), [eos_id]], 0)
return {"inputs": inputs_enc, "targets": targets_enc}
dataset = dataset.map(_parse_fn)
dataset = dataset.map(_encode_fn)
return pack_and_batch(dataset, batch_size, sequence_length) | python | def packed_parallel_tsv_dataset(filenames=gin.REQUIRED,
dataset_split=gin.REQUIRED,
batch_size=gin.REQUIRED,
sequence_length=gin.REQUIRED,
vocabulary=gin.REQUIRED,
append_eos=True,
shuffle_buffer_size=10000,
eos_id=1):
"""Reads parallel tab-separated text file. One example per line."""
dataset = tf.data.TextLineDataset(filenames)
if dataset_split == "train":
dataset = dataset.repeat()
dataset = dataset.shuffle(shuffle_buffer_size)
def _parse_fn(record): # pylint: disable=missing-docstring
tokens = tf.decode_csv(
record,
record_defaults=[""] * 2,
field_delim="\t",
use_quote_delim=False)
return {"inputs": tokens[0], "targets": tokens[1]}
def _encode_fn(features): # pylint: disable=missing-docstring
inputs_vocabulary = vocabulary[0] if isinstance(vocabulary,
tuple) else vocabulary
targets_vocabulary = vocabulary[1] if isinstance(vocabulary,
tuple) else vocabulary
inputs_enc = inputs_vocabulary.encode_tf(features["inputs"])
targets_enc = targets_vocabulary.encode_tf(features["targets"])
if append_eos:
inputs_enc = tf.concat([tf.to_int64(inputs_enc), [eos_id]], 0)
targets_enc = tf.concat([tf.to_int64(targets_enc), [eos_id]], 0)
return {"inputs": inputs_enc, "targets": targets_enc}
dataset = dataset.map(_parse_fn)
dataset = dataset.map(_encode_fn)
return pack_and_batch(dataset, batch_size, sequence_length) | [
"def",
"packed_parallel_tsv_dataset",
"(",
"filenames",
"=",
"gin",
".",
"REQUIRED",
",",
"dataset_split",
"=",
"gin",
".",
"REQUIRED",
",",
"batch_size",
"=",
"gin",
".",
"REQUIRED",
",",
"sequence_length",
"=",
"gin",
".",
"REQUIRED",
",",
"vocabulary",
"=",... | Reads parallel tab-separated text file. One example per line. | [
"Reads",
"parallel",
"tab",
"-",
"separated",
"text",
"file",
".",
"One",
"example",
"per",
"line",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/transformer/dataset.py#L213-L250 | train | 222,696 |
tensorflow/mesh | mesh_tensorflow/transformer/dataset.py | supervised_to_dict | def supervised_to_dict(dataset, text2self):
"""Turns a supervised dataset into a dataset with a feature dictionary.
if text2self, then the features dictionary contains a "targets" key.
else, the features dictionary contains "inputs" and "targets" keys.
Args:
dataset: a tf.data.Dataset
text2self: a boolean
Returns:
a tf.data.Dataset
"""
def my_fn(inputs, targets):
if text2self:
return {"targets": targets}
else:
return {"inputs": inputs, "targets": targets}
return dataset.map(my_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE) | python | def supervised_to_dict(dataset, text2self):
"""Turns a supervised dataset into a dataset with a feature dictionary.
if text2self, then the features dictionary contains a "targets" key.
else, the features dictionary contains "inputs" and "targets" keys.
Args:
dataset: a tf.data.Dataset
text2self: a boolean
Returns:
a tf.data.Dataset
"""
def my_fn(inputs, targets):
if text2self:
return {"targets": targets}
else:
return {"inputs": inputs, "targets": targets}
return dataset.map(my_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE) | [
"def",
"supervised_to_dict",
"(",
"dataset",
",",
"text2self",
")",
":",
"def",
"my_fn",
"(",
"inputs",
",",
"targets",
")",
":",
"if",
"text2self",
":",
"return",
"{",
"\"targets\"",
":",
"targets",
"}",
"else",
":",
"return",
"{",
"\"inputs\"",
":",
"i... | Turns a supervised dataset into a dataset with a feature dictionary.
if text2self, then the features dictionary contains a "targets" key.
else, the features dictionary contains "inputs" and "targets" keys.
Args:
dataset: a tf.data.Dataset
text2self: a boolean
Returns:
a tf.data.Dataset | [
"Turns",
"a",
"supervised",
"dataset",
"into",
"a",
"dataset",
"with",
"a",
"feature",
"dictionary",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/transformer/dataset.py#L291-L308 | train | 222,697 |
tensorflow/mesh | mesh_tensorflow/transformer/dataset.py | encode_all_features | def encode_all_features(dataset, vocabulary):
"""Encode all features.
Args:
dataset: a tf.data.Dataset
vocabulary: a vocabulary.Vocabulary
Returns:
a tf.data.Dataset
"""
def my_fn(features):
ret = {}
for k, v in features.items():
v = vocabulary.encode_tf(v)
v = tf.concat([tf.to_int64(v), [1]], 0)
ret[k] = v
return ret
return dataset.map(my_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE) | python | def encode_all_features(dataset, vocabulary):
"""Encode all features.
Args:
dataset: a tf.data.Dataset
vocabulary: a vocabulary.Vocabulary
Returns:
a tf.data.Dataset
"""
def my_fn(features):
ret = {}
for k, v in features.items():
v = vocabulary.encode_tf(v)
v = tf.concat([tf.to_int64(v), [1]], 0)
ret[k] = v
return ret
return dataset.map(my_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE) | [
"def",
"encode_all_features",
"(",
"dataset",
",",
"vocabulary",
")",
":",
"def",
"my_fn",
"(",
"features",
")",
":",
"ret",
"=",
"{",
"}",
"for",
"k",
",",
"v",
"in",
"features",
".",
"items",
"(",
")",
":",
"v",
"=",
"vocabulary",
".",
"encode_tf",... | Encode all features.
Args:
dataset: a tf.data.Dataset
vocabulary: a vocabulary.Vocabulary
Returns:
a tf.data.Dataset | [
"Encode",
"all",
"features",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/transformer/dataset.py#L311-L327 | train | 222,698 |
tensorflow/mesh | mesh_tensorflow/transformer/dataset.py | pretokenized_tfrecord_dataset | def pretokenized_tfrecord_dataset(filenames,
text2self,
eos_included,
repeat,
batch_size,
sequence_length):
"""Reads tensor2tensor-style data files.
The dataset is defined by sets of TFRecord files of TFExample protos.
There should be a "targets" feature (a 1d tensor of integers)
If not text2self, there should also be an "inputs" feature.
Other features get ignored.
eos_included specifies whether the inputs and targets were written with an
EOS token, as in tensor2tensor
Args:
filenames: a list of strings
text2self: a boolean
eos_included: a boolean
repeat: a boolean
batch_size: an integer
sequence_length: an integer
Returns:
A tf.data.Dataset of batches
"""
dataset = tf.data.TFRecordDataset(filenames, buffer_size=64 * 1024 * 1024)
if repeat:
dataset = dataset.repeat()
keys = ["targets"] if text2self else ["inputs", "targets"]
def decode_example(serialized_example):
"""Return a dict of Tensors from a serialized tensorflow.Example."""
data_fields = {}
data_items_to_decoders = {}
for k in keys:
data_fields[k] = tf.VarLenFeature(tf.int64)
data_items_to_decoders[k] = tf.contrib.slim.tfexample_decoder.Tensor(k)
decoder = tf.contrib.slim.tfexample_decoder.TFExampleDecoder(
data_fields, data_items_to_decoders)
decode_items = list(sorted(data_items_to_decoders))
decoded = decoder.decode(serialized_example, items=decode_items)
if not eos_included:
decoded = [tf.concat([v, [1]], 0) for v in decoded]
return dict(zip(decode_items, decoded))
dataset = dataset.map(decode_example,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
return pack_and_batch(dataset, batch_size, sequence_length) | python | def pretokenized_tfrecord_dataset(filenames,
text2self,
eos_included,
repeat,
batch_size,
sequence_length):
"""Reads tensor2tensor-style data files.
The dataset is defined by sets of TFRecord files of TFExample protos.
There should be a "targets" feature (a 1d tensor of integers)
If not text2self, there should also be an "inputs" feature.
Other features get ignored.
eos_included specifies whether the inputs and targets were written with an
EOS token, as in tensor2tensor
Args:
filenames: a list of strings
text2self: a boolean
eos_included: a boolean
repeat: a boolean
batch_size: an integer
sequence_length: an integer
Returns:
A tf.data.Dataset of batches
"""
dataset = tf.data.TFRecordDataset(filenames, buffer_size=64 * 1024 * 1024)
if repeat:
dataset = dataset.repeat()
keys = ["targets"] if text2self else ["inputs", "targets"]
def decode_example(serialized_example):
"""Return a dict of Tensors from a serialized tensorflow.Example."""
data_fields = {}
data_items_to_decoders = {}
for k in keys:
data_fields[k] = tf.VarLenFeature(tf.int64)
data_items_to_decoders[k] = tf.contrib.slim.tfexample_decoder.Tensor(k)
decoder = tf.contrib.slim.tfexample_decoder.TFExampleDecoder(
data_fields, data_items_to_decoders)
decode_items = list(sorted(data_items_to_decoders))
decoded = decoder.decode(serialized_example, items=decode_items)
if not eos_included:
decoded = [tf.concat([v, [1]], 0) for v in decoded]
return dict(zip(decode_items, decoded))
dataset = dataset.map(decode_example,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
return pack_and_batch(dataset, batch_size, sequence_length) | [
"def",
"pretokenized_tfrecord_dataset",
"(",
"filenames",
",",
"text2self",
",",
"eos_included",
",",
"repeat",
",",
"batch_size",
",",
"sequence_length",
")",
":",
"dataset",
"=",
"tf",
".",
"data",
".",
"TFRecordDataset",
"(",
"filenames",
",",
"buffer_size",
... | Reads tensor2tensor-style data files.
The dataset is defined by sets of TFRecord files of TFExample protos.
There should be a "targets" feature (a 1d tensor of integers)
If not text2self, there should also be an "inputs" feature.
Other features get ignored.
eos_included specifies whether the inputs and targets were written with an
EOS token, as in tensor2tensor
Args:
filenames: a list of strings
text2self: a boolean
eos_included: a boolean
repeat: a boolean
batch_size: an integer
sequence_length: an integer
Returns:
A tf.data.Dataset of batches | [
"Reads",
"tensor2tensor",
"-",
"style",
"data",
"files",
"."
] | 3921196e5e43302e820da0a87329f25d7e2a3016 | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/transformer/dataset.py#L330-L376 | train | 222,699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.