repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
diffmimic | diffmimic-main/diffmimic/utils/io.py | import brax
import jax.numpy as jnp
from brax import QP
def deserialize_qp(nparray) -> brax.QP:
"""
Get QP from a trajectory numpy array
"""
num_bodies = nparray.shape[-1] // 13 # pos (,3) rot (,4) vel (,3) ang (,3)
batch_dims = nparray.shape[:-1]
slices = [num_bodies * x for x in [0, 3, 7, 10, 13]]
pos = jnp.reshape(nparray[..., slices[0]:slices[1]], batch_dims + (num_bodies, 3))
rot = jnp.reshape(nparray[..., slices[1]:slices[2]], batch_dims + (num_bodies, 4))
vel = jnp.reshape(nparray[..., slices[2]:slices[3]], batch_dims + (num_bodies, 3))
ang = jnp.reshape(nparray[..., slices[3]:slices[4]], batch_dims + (num_bodies, 3))
return QP(pos=pos, rot=rot, vel=vel, ang=ang)
def serialize_qp(qp) -> jnp.array:
"""
Serialize QP to a trajectory numpy array
"""
pos = qp.pos
rot = qp.rot
vel = qp.vel
ang = qp.ang
batch_dim = pos.shape[:-2]
nparray = []
nparray.append(pos.reshape(batch_dim + (-1,)))
nparray.append(rot.reshape(batch_dim + (-1,)))
nparray.append(vel.reshape(batch_dim + (-1,)))
nparray.append(ang.reshape(batch_dim + (-1,)))
return jnp.concatenate(nparray, axis=-1)
| 1,191 | 33.057143 | 86 | py |
diffmimic | diffmimic-main/data/tools/amass_converter.py | import os
import numpy as np
from diffmimic.mimic_envs.system_configs import *
from brax import math
from brax.physics import bodies
from brax.physics.base import QP, vec_to_arr
from data.tools.rotation_utils.conversions import *
from data.tools.joint_utils import *
from data.tools.rotation_utils.quaternion import *
from scipy.interpolate import CubicSpline
from scipy.spatial.transform import Rotation, RotationSpline
from scipy.ndimage import gaussian_filter1d
import matplotlib.pyplot as plt
CFG_SMPL = process_system_cfg(get_system_cfg('smpl'))
def convert_to_qp(ase_poses, ase_vel, ase_ang, pelvis_trans):
qp_list = []
for i in range(ase_poses.shape[0]):
qp = QP.zero(shape=(len(CFG_SMPL.bodies),))
body = bodies.Body(CFG_SMPL)
# set any default qps from the config
joint_idxs = []
j_idx = 0
for j in CFG_SMPL.joints:
beg = joint_idxs[-1][1][1] if joint_idxs else 0
dof = len(j.angle_limit)
joint_idxs.append((j, (beg, beg + dof), j_idx))
j_idx += 1
lineage = {j.child: j.parent for j in CFG_SMPL.joints}
depth = {}
for child, parent in lineage.items():
depth[child] = 1
while parent in lineage:
parent = lineage[parent]
depth[child] += 1
joint_idxs = sorted(joint_idxs, key=lambda x: depth.get(x[0].parent, 0))
joint = [j for j, _, _ in joint_idxs]
joint_order = [i for _, _, i in joint_idxs]
# update qp in depth order
joint_body = jp.array([
(body.index[j.parent], body.index[j.child]) for j in joint
])
joint_off = jp.array([(vec_to_arr(j.parent_offset),
vec_to_arr(j.child_offset)) for j in joint])
local_rot = ase_poses[i][1:]
world_vel = ase_vel[i][1:]
world_ang = ase_ang[i][1:]
def init(qp):
pos = jp.index_update(qp.pos, 0, pelvis_trans[i])
rot = ase_poses[i][0] / jp.norm(ase_poses[i][0]) # important
rot = math.quat_mul(math.euler_to_quat(np.array([0., -90, 0.])), rot)
rot = jp.index_update(qp.rot, 0, rot)
vel = jp.index_update(qp.vel, 0, ase_vel[i][0])
ang = jp.index_update(qp.ang, 0, ase_ang[i][0])
qp = qp.replace(pos=pos, rot=rot, vel=vel, ang=ang)
return qp
qp = init(qp)
amp_rot = local_rot[joint_order]
world_vel = world_vel[joint_order]
world_ang = world_ang[joint_order]
num_joint_dof = sum(len(j.angle_limit) for j in CFG_SMPL.joints)
num_joints = len(CFG_SMPL.joints)
takes = []
for j, (beg, end), _ in joint_idxs:
arr = list(range(beg, end))
arr.extend([num_joint_dof] * (3 - len(arr)))
takes.extend(arr)
takes = jp.array(takes, dtype=int)
def to_dof(a):
b = np.zeros([num_joint_dof])
for idx, (j, (beg, end), _) in enumerate(joint_idxs):
b[beg:end] = a[idx, :end - beg]
return b
def to_3dof(a):
a = jp.concatenate([a, jp.array([0.0])])
a = jp.take(a, takes)
a = jp.reshape(a, (num_joints, 3))
return a
# build local rot and ang per joint
joint_rot = jp.array(
[math.euler_to_quat(vec_to_arr(j.rotation)) for j in joint])
joint_ref = jp.array(
[math.euler_to_quat(vec_to_arr(j.reference_rotation)) for j in joint])
def local_rot_ang(_, x):
angles, vels, rot, ref = x
axes = jp.vmap(math.rotate, [True, False])(jp.eye(3), rot)
ang = jp.dot(axes.T, vels).T
rot = ref
for axis, angle in zip(axes, angles):
# these are euler intrinsic rotations, so the axes are rotated too:
axis = math.rotate(axis, rot)
next_rot = math.quat_rot_axis(axis, angle)
rot = math.quat_mul(next_rot, rot)
return (), (rot, ang)
def local_rot_ang_inv(_, x):
angles, vels, rot, ref = x
axes = jp.vmap(math.rotate, [True, False])(jp.eye(3), math.quat_inv(rot))
ang = jp.dot(axes.T, vels).T
rot = ref
for axis, angle in zip(axes, angles):
# these are euler intrinsic rotations, so the axes are rotated too:
axis = math.rotate(axis, rot)
next_rot = math.quat_rot_axis(axis, angle)
rot = math.quat_mul(next_rot, rot)
return (), (rot, ang)
amp_rot = quaternion_to_euler(amp_rot)
xs = (amp_rot, world_ang, joint_rot, joint_ref)
_, (amp_rot, _) = jp.scan(local_rot_ang_inv, (), xs, len(joint))
amp_rot = quaternion_to_euler(amp_rot)
amp_rot = to_3dof(to_dof(amp_rot))
xs = (amp_rot, world_ang, joint_rot, joint_ref)
_, (amp_rot, _) = jp.scan(local_rot_ang, (), xs, len(joint))
def set_qp(carry, x):
qp, = carry
(body_p, body_c), (off_p, off_c), local_rot, world_ang, world_vel = x
local_rot = local_rot / jp.norm(local_rot) # important
world_rot = math.quat_mul(qp.rot[body_p], local_rot)
world_rot = world_rot / jp.norm(world_rot) # important
local_pos = off_p - math.rotate(off_c, local_rot)
world_pos = qp.pos[body_p] + math.rotate(local_pos, qp.rot[body_p])
world_vel = qp.vel[body_p] + math.rotate(local_pos, math.euler_to_quat(qp.ang[body_p]))
pos = jp.index_update(qp.pos, body_c, world_pos)
rot = jp.index_update(qp.rot, body_c, world_rot)
vel = jp.index_update(qp.vel, body_c, world_vel)
ang = jp.index_update(qp.ang, body_c, world_ang)
qp = qp.replace(pos=pos, rot=rot, vel=vel, ang=ang)
return (qp,), ()
xs = (joint_body, joint_off, amp_rot, world_ang, world_vel)
(qp,), () = jp.scan(set_qp, (qp,), xs, len(joint))
# any trees that have no body qp overrides in the config are moved above
# the xy plane. this convenience operation may be removed in the future.
fixed = {j.child for j in joint}
root_idx = {
b.name: [i]
for i, b in enumerate(CFG_SMPL.bodies)
if b.name not in fixed
}
for j in joint:
parent = j.parent
while parent in lineage:
parent = lineage[parent]
if parent in root_idx:
root_idx[parent].append(body.index[j.child])
for children in root_idx.values():
zs = jp.array([
bodies.min_z(jp.take(qp, c), CFG_SMPL.bodies[c]) for c in children
])
min_z = min(jp.amin(zs), 0)
children = jp.array(children)
pos = jp.take(qp.pos, children) - min_z * jp.array([0., 0., 1.])
pos = jp.index_update(qp.pos, children, pos)
qp = qp.replace(pos=pos)
qp_list.append(qp)
return qp_list
def convert_to_states(qp_list):
demo_traj = []
for i in range(len(qp_list)):
qp = qp_list[i]
demo_traj.append(
np.concatenate([qp.pos.reshape(-1), qp.rot.reshape(-1), qp.vel.reshape(-1), qp.ang.reshape(-1)], axis=-1))
demo_traj = np.stack(demo_traj, axis=0)
return demo_traj
def convert(x):
x = np.array(x)
if x.shape[0] == 3:
x = x[[0, 2, 1]]
x[1] *= -1
return x
if x.shape[0] == 1:
x = euler_to_quaternion(np.array([0, -1 * x[0], 0]))
return x
else:
x = x[[0, 1, 3, 2]]
x[2] *= -1
return x
def interpolate(y, dt, target_dt, gt=None):
x = np.arange(y.shape[0]) * dt
x_target = np.arange(int(y.shape[0] * dt / target_dt)) * target_dt
cs = CubicSpline(x, y)
vel = cs.derivative()(x_target)
vel_smooth = gaussian_filter1d(vel, sigma=2 * dt / target_dt, axis=0)
if gt is not None:
plt.plot(x, gt)
plt.plot(x_target, vel, 'x')
plt.plot(x_target, vel_smooth, '--')
plt.show()
return cs(x_target), vel_smooth
def _compute_angular_velocity(r, time_delta: float):
# assume the second last dimension is the time axis
diff_quat_data = quat_identity_like(r)
diff_quat_data[:-1, :] = quat_mul_norm(
r[1:, :], quat_inverse(r[:-1, :])
)
diff_angle, diff_axis = quat_angle_axis(diff_quat_data)
angular_velocity = diff_axis * diff_angle[..., None] / time_delta
return angular_velocity
def get_ang_vel(rot, dt, target_dt, gt=None):
rot = rot[..., [1, 2, 3, 0]]
x = np.arange(rot.shape[0]) * dt
x_target = np.arange(int(rot.shape[0] * dt / target_dt)) * target_dt
rotations = Rotation.from_quat(rot)
spline = RotationSpline(x, rotations)
ang = _compute_angular_velocity(spline(x_target, 0).as_quat(), target_dt) # [x,y,z,w]
ang_smoothed = gaussian_filter1d(ang, sigma=2 * dt / target_dt, axis=0, mode="nearest")
if gt is not None:
plt.plot(x, gt, 'x')
plt.plot(x_target, ang_smoothed, '-')
plt.show()
return ang_smoothed
def get_rot(rot, dt, target_dt):
rot = rot[..., [1, 2, 3, 0]]
nframe = rot.shape[0]
x = np.arange(nframe) * dt
x_target = np.arange(int(nframe * dt / target_dt)) * target_dt
rotations = Rotation.from_quat(rot)
spline = RotationSpline(x, rotations)
return spline(x_target, 0).as_quat()[:, [3, 0, 1, 2]]
if __name__ == '__main__':
for fps in ['30']:
for fname in [
# 'KIT/10/WalkingStraightBackwards07_stageii.npz',
# 'KIT/200/KickHuefthoch05_stageii.npz',
'CMU/75/75_09_stageii.npz'
]:
in_file = '/PATH/TO/MOTION/{}'.format(fname)
action = os.path.basename(fname).split('.')[0]
ase_motion = np.load(in_file)
for k in ase_motion.files:
print(k)
ase_poses = np.concatenate([ase_motion['root_orient'], ase_motion['pose_body']], -1)
ase_poses = ase_poses.reshape([ase_poses.shape[0], -1, 3])
print(ase_poses.shape)
ase_poses = ase_poses[:, SMPL2HUMANOID]
ase_poses = axis_angle_to_matrix(torch.from_numpy(ase_poses)).float()
ase_poses = matrix_to_euler_angles(ase_poses, "ZXY")
ase_poses = matrix_to_quaternion(euler_angles_to_matrix(ase_poses, 'XYZ')).numpy()
pelvis_trans = ase_motion['trans']
pelvis_trans = pelvis_trans[:, [1,0,2]]
pelvis_trans[:, 0] *= -1
print(ase_motion['mocap_time_length'])
dt = ase_motion['mocap_time_length'] / ase_poses.shape[0]
print(dt)
ase_ang = np.zeros_like(ase_poses)[..., :-1]
ase_vel = np.zeros_like(ase_poses)[..., :-1]
target_dt = {
'orig': dt,
'16': 0.0625,
'30': 0.0333
}[fps]
_qp_list = convert_to_qp(ase_poses, ase_vel, ase_ang, pelvis_trans * 0.)
abs_poses = np.stack([qp.rot for qp in _qp_list], axis=0)
abs_trans = np.stack([qp.pos[0] for qp in _qp_list], axis=0)
ase_poses_interp = np.stack([get_rot(ase_poses[:, i, :], dt, target_dt) for i in range(ase_poses.shape[1])],
axis=1)
ase_ang_interp = np.stack(
[get_ang_vel(abs_poses[:, i, :], dt, target_dt) for i in range(ase_poses.shape[1])], axis=1)
offset = abs_trans[0, 2] - pelvis_trans[0, 2]
print(offset)
pelvis_trans -= 0.05
pelvis_trans += offset
pelvis_trans_interp, pelvis_trans_vel_interp = interpolate(pelvis_trans, dt, target_dt)
ase_vel_interp = np.zeros_like(ase_ang_interp)
ase_vel_interp[:, 0] = pelvis_trans_vel_interp
qp_list = convert_to_qp(ase_poses_interp, ase_vel_interp, ase_ang_interp, pelvis_trans_interp)
demo_traj = convert_to_states(qp_list)
demo_traj = demo_traj[60:120]
print(action, demo_traj.shape[0])
with open('../demo_amass/{}.npy'.format(action), 'wb') as f:
np.save(f, demo_traj)
| 12,299 | 38.423077 | 120 | py |
diffmimic | diffmimic-main/data/tools/aist_converter.py | import os
import numpy as np
from diffmimic.mimic_envs.system_configs import *
from brax import math
from brax.physics import bodies
from brax.physics.base import QP, vec_to_arr
from data.tools.rotation_utils.conversions import *
from data.tools.joint_utils import *
from data.tools.rotation_utils.quaternion import *
from scipy.interpolate import CubicSpline
from scipy.spatial.transform import Rotation, RotationSpline
from scipy.ndimage import gaussian_filter1d
import matplotlib.pyplot as plt
import pickle
CFG_SMPL = process_system_cfg(get_system_cfg('smpl'))
def convert_to_qp(ase_poses, ase_vel, ase_ang, pelvis_trans):
qp_list = []
for i in range(ase_poses.shape[0]):
qp = QP.zero(shape=(len(CFG_SMPL.bodies),))
body = bodies.Body(CFG_SMPL)
# set any default qps from the config
joint_idxs = []
j_idx = 0
for j in CFG_SMPL.joints:
beg = joint_idxs[-1][1][1] if joint_idxs else 0
dof = len(j.angle_limit)
joint_idxs.append((j, (beg, beg + dof), j_idx))
j_idx += 1
lineage = {j.child: j.parent for j in CFG_SMPL.joints}
depth = {}
for child, parent in lineage.items():
depth[child] = 1
while parent in lineage:
parent = lineage[parent]
depth[child] += 1
joint_idxs = sorted(joint_idxs, key=lambda x: depth.get(x[0].parent, 0))
joint = [j for j, _, _ in joint_idxs]
joint_order = [i for _, _, i in joint_idxs]
# update qp in depth order
joint_body = jp.array([
(body.index[j.parent], body.index[j.child]) for j in joint
])
joint_off = jp.array([(vec_to_arr(j.parent_offset),
vec_to_arr(j.child_offset)) for j in joint])
local_rot = ase_poses[i][1:]
world_vel = ase_vel[i][1:]
world_ang = ase_ang[i][1:]
def init(qp):
pos = jp.index_update(qp.pos, 0, pelvis_trans[i])
rot = ase_poses[i][0] / jp.norm(ase_poses[i][0]) # important
rot = jp.index_update(qp.rot, 0, rot)
vel = jp.index_update(qp.vel, 0, ase_vel[i][0])
ang = jp.index_update(qp.ang, 0, ase_ang[i][0])
qp = qp.replace(pos=pos, rot=rot, vel=vel, ang=ang)
return qp
qp = init(qp)
amp_rot = local_rot[joint_order]
world_vel = world_vel[joint_order]
world_ang = world_ang[joint_order]
num_joint_dof = sum(len(j.angle_limit) for j in CFG_SMPL.joints)
num_joints = len(CFG_SMPL.joints)
takes = []
for j, (beg, end), _ in joint_idxs:
arr = list(range(beg, end))
arr.extend([num_joint_dof] * (3 - len(arr)))
takes.extend(arr)
takes = jp.array(takes, dtype=int)
def to_dof(a):
b = np.zeros([num_joint_dof])
for idx, (j, (beg, end), _) in enumerate(joint_idxs):
b[beg:end] = a[idx, :end - beg]
return b
def to_3dof(a):
a = jp.concatenate([a, jp.array([0.0])])
a = jp.take(a, takes)
a = jp.reshape(a, (num_joints, 3))
return a
# build local rot and ang per joint
joint_rot = jp.array(
[math.euler_to_quat(vec_to_arr(j.rotation)) for j in joint])
joint_ref = jp.array(
[math.euler_to_quat(vec_to_arr(j.reference_rotation)) for j in joint])
def local_rot_ang(_, x):
angles, vels, rot, ref = x
axes = jp.vmap(math.rotate, [True, False])(jp.eye(3), rot)
ang = jp.dot(axes.T, vels).T
rot = ref
for axis, angle in zip(axes, angles):
# these are euler intrinsic rotations, so the axes are rotated too:
axis = math.rotate(axis, rot)
next_rot = math.quat_rot_axis(axis, angle)
rot = math.quat_mul(next_rot, rot)
return (), (rot, ang)
def local_rot_ang_inv(_, x):
angles, vels, rot, ref = x
axes = jp.vmap(math.rotate, [True, False])(jp.eye(3), math.quat_inv(rot))
ang = jp.dot(axes.T, vels).T
rot = ref
for axis, angle in zip(axes, angles):
# these are euler intrinsic rotations, so the axes are rotated too:
axis = math.rotate(axis, rot)
next_rot = math.quat_rot_axis(axis, angle)
rot = math.quat_mul(next_rot, rot)
return (), (rot, ang)
amp_rot = quaternion_to_euler(amp_rot)
xs = (amp_rot, world_ang, joint_rot, joint_ref)
_, (amp_rot, _) = jp.scan(local_rot_ang_inv, (), xs, len(joint))
amp_rot = quaternion_to_euler(amp_rot)
amp_rot = to_3dof(to_dof(amp_rot))
xs = (amp_rot, world_ang, joint_rot, joint_ref)
_, (amp_rot, _) = jp.scan(local_rot_ang, (), xs, len(joint))
def set_qp(carry, x):
qp, = carry
(body_p, body_c), (off_p, off_c), local_rot, world_ang, world_vel = x
local_rot = local_rot / jp.norm(local_rot) # important
world_rot = math.quat_mul(qp.rot[body_p], local_rot)
world_rot = world_rot / jp.norm(world_rot) # important
local_pos = off_p - math.rotate(off_c, local_rot)
world_pos = qp.pos[body_p] + math.rotate(local_pos, qp.rot[body_p])
world_vel = qp.vel[body_p] + math.rotate(local_pos, math.euler_to_quat(qp.ang[body_p]))
pos = jp.index_update(qp.pos, body_c, world_pos)
rot = jp.index_update(qp.rot, body_c, world_rot)
vel = jp.index_update(qp.vel, body_c, world_vel)
ang = jp.index_update(qp.ang, body_c, world_ang)
qp = qp.replace(pos=pos, rot=rot, vel=vel, ang=ang)
return (qp,), ()
xs = (joint_body, joint_off, amp_rot, world_ang, world_vel)
(qp,), () = jp.scan(set_qp, (qp,), xs, len(joint))
# any trees that have no body qp overrides in the config are moved above
# the xy plane. this convenience operation may be removed in the future.
fixed = {j.child for j in joint}
root_idx = {
b.name: [i]
for i, b in enumerate(CFG_SMPL.bodies)
if b.name not in fixed
}
for j in joint:
parent = j.parent
while parent in lineage:
parent = lineage[parent]
if parent in root_idx:
root_idx[parent].append(body.index[j.child])
for children in root_idx.values():
zs = jp.array([
bodies.min_z(jp.take(qp, c), CFG_SMPL.bodies[c]) for c in children
])
min_z = min(jp.amin(zs), 0)
children = jp.array(children)
pos = jp.take(qp.pos, children) - min_z * jp.array([0., 0., 1.])
pos = jp.index_update(qp.pos, children, pos)
qp = qp.replace(pos=pos)
qp_list.append(qp)
return qp_list
def convert_to_states(qp_list):
demo_traj = []
for i in range(len(qp_list)):
qp = qp_list[i]
demo_traj.append(
np.concatenate([qp.pos.reshape(-1), qp.rot.reshape(-1), qp.vel.reshape(-1), qp.ang.reshape(-1)], axis=-1))
demo_traj = np.stack(demo_traj, axis=0)
return demo_traj
def convert(x):
x = np.array(x)
if x.shape[0] == 3:
x = x[[0, 2, 1]]
x[1] *= -1
return x
if x.shape[0] == 1:
x = euler_to_quaternion(np.array([0, -1 * x[0], 0]))
return x
else:
x = x[[0, 1, 3, 2]]
x[2] *= -1
return x
def interpolate(y, dt, target_dt, gt=None):
x = np.arange(y.shape[0]) * dt
x_target = np.arange(int(y.shape[0] * dt / target_dt)) * target_dt
cs = CubicSpline(x, y)
vel = cs.derivative()(x_target)
vel_smooth = gaussian_filter1d(vel, sigma=2 * dt / target_dt, axis=0)
if gt is not None:
plt.plot(x, gt)
plt.plot(x_target, vel, 'x')
plt.plot(x_target, vel_smooth, '--')
plt.show()
return cs(x_target), vel_smooth
def _compute_angular_velocity(r, time_delta: float):
# assume the second last dimension is the time axis
diff_quat_data = quat_identity_like(r)
diff_quat_data[:-1, :] = quat_mul_norm(
r[1:, :], quat_inverse(r[:-1, :])
)
diff_angle, diff_axis = quat_angle_axis(diff_quat_data)
angular_velocity = diff_axis * diff_angle[..., None] / time_delta
return angular_velocity
def get_ang_vel(rot, dt, target_dt, gt=None):
rot = rot[..., [1, 2, 3, 0]]
x = np.arange(rot.shape[0]) * dt
x_target = np.arange(int(rot.shape[0] * dt / target_dt)) * target_dt
rotations = Rotation.from_quat(rot)
spline = RotationSpline(x, rotations)
ang = _compute_angular_velocity(spline(x_target, 0).as_quat(), target_dt) # [x,y,z,w]
ang_smoothed = gaussian_filter1d(ang, sigma=2 * dt / target_dt, axis=0, mode="nearest")
if gt is not None:
plt.plot(x, gt, 'x')
plt.plot(x_target, ang_smoothed, '-')
plt.show()
return ang_smoothed
def get_rot(rot, dt, target_dt):
rot = rot[..., [1, 2, 3, 0]]
nframe = rot.shape[0]
x = np.arange(nframe) * dt
x_target = np.arange(int(nframe * dt / target_dt)) * target_dt
rotations = Rotation.from_quat(rot)
spline = RotationSpline(x, rotations)
return spline(x_target, 0).as_quat()[:, [3, 0, 1, 2]]
if __name__ == '__main__':
for fps in ['30']:
for smpl_fn in [
'gBR_sBM_cAll_d04_mBR0_ch01.pkl',
'gBR_sFM_cAll_d04_mBR1_ch02.pkl'
]:
in_file = '/PATH/TO/MOTION/{}'.format(smpl_fn)
with open(in_file, 'rb') as f:
data = pickle.load(f)
print(data.keys())
ase_poses = data['smpl_poses'].reshape(-1, 24, 3) # (N, 24, 3)
pelvis_trans = data['smpl_trans'].reshape(-1, 3) # (N, 24, 3)
action = os.path.basename(smpl_fn).split('.')[0]
ase_poses = ase_poses[:, SMPL2HUMANOID]
ase_poses = axis_angle_to_matrix(torch.from_numpy(ase_poses)).float()
ase_poses = matrix_to_euler_angles(ase_poses, "ZXY")
ase_poses = matrix_to_quaternion(euler_angles_to_matrix(ase_poses, 'XYZ')).numpy()
pelvis_trans = pelvis_trans[:, [2, 0, 1]]
dt = 0.016667
ase_ang = np.zeros_like(ase_poses)[..., :-1]
ase_vel = np.zeros_like(ase_poses)[..., :-1]
target_dt = {
'orig': dt,
'16': 0.0625,
'30': 0.0333
}[fps]
_qp_list = convert_to_qp(ase_poses, ase_vel, ase_ang, pelvis_trans*0.)
abs_poses = np.stack([qp.rot for qp in _qp_list], axis=0)
abs_trans = np.stack([qp.pos[0] for qp in _qp_list], axis=0)
ase_poses_interp = np.stack([get_rot(ase_poses[:, i, :], dt, target_dt) for i in range(ase_poses.shape[1])],
axis=1)
ase_ang_interp = np.stack(
[get_ang_vel(abs_poses[:, i, :], dt, target_dt) for i in range(ase_poses.shape[1])], axis=1)
trans_scale = 1. / data['smpl_scaling'][0]
pelvis_trans *= trans_scale
trans_offset = abs_trans[0,2] - pelvis_trans[0, 2]
print(trans_scale)
trans_offset -= 0.05
pelvis_trans += trans_offset # scaling
pelvis_trans_interp, pelvis_trans_vel_interp = interpolate(pelvis_trans, dt, target_dt)
ase_vel_interp = np.zeros_like(ase_ang_interp)
ase_vel_interp[:, 0] = pelvis_trans_vel_interp
qp_list = convert_to_qp(ase_poses_interp, ase_vel_interp, ase_ang_interp, pelvis_trans_interp)
demo_traj = convert_to_states(qp_list)
print(action, demo_traj.shape[0])
with open('../demo_aist/{}.npy'.format(action), 'wb') as f:
np.save(f, demo_traj)
| 12,007 | 37.860841 | 120 | py |
diffmimic | diffmimic-main/data/tools/rotation_utils/conversions.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import Optional
import torch
import torch.nn.functional as F
def quaternion_to_euler(x, axis='XYZ'):
x = torch.from_numpy(x)
x = quaternion_to_matrix(x)
x = matrix_to_euler_angles(x, axis)
x = x.numpy()
return x
def euler_to_quaternion(x, axis='XYZ'):
x = torch.from_numpy(x)
x = euler_angles_to_matrix(x, axis)
x = matrix_to_quaternion(x)
x = x.numpy()
return x
"""
The transformation matrices returned from the functions in this file assume
the points on which the transformation will be applied are column vectors.
i.e. the R matrix is structured as
R = [
[Rxx, Rxy, Rxz],
[Ryx, Ryy, Ryz],
[Rzx, Rzy, Rzz],
] # (3, 3)
This matrix can be applied to column vectors by post multiplication
by the points e.g.
points = [[0], [1], [2]] # (3 x 1) xyz coordinates of a point
transformed_points = R * points
To apply the same matrix to points which are row vectors, the R matrix
can be transposed and pre multiplied by the points:
e.g.
points = [[0, 1, 2]] # (1 x 3) xyz coordinates of a point
transformed_points = points * R.transpose(1, 0)
"""
def quaternion_to_matrix(quaternions: torch.Tensor) -> torch.Tensor:
"""
Convert rotations given as quaternions to rotation matrices.
Args:
quaternions: quaternions with real part first,
as tensor of shape (..., 4).
Returns:
Rotation matrices as tensor of shape (..., 3, 3).
"""
r, i, j, k = torch.unbind(quaternions, -1)
two_s = 2.0 / (quaternions * quaternions).sum(-1)
o = torch.stack(
(
1 - two_s * (j * j + k * k),
two_s * (i * j - k * r),
two_s * (i * k + j * r),
two_s * (i * j + k * r),
1 - two_s * (i * i + k * k),
two_s * (j * k - i * r),
two_s * (i * k - j * r),
two_s * (j * k + i * r),
1 - two_s * (i * i + j * j),
),
-1,
)
return o.reshape(quaternions.shape[:-1] + (3, 3))
def _copysign(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
"""
Return a tensor where each element has the absolute value taken from the,
corresponding element of a, with sign taken from the corresponding
element of b. This is like the standard copysign floating-point operation,
but is not careful about negative 0 and NaN.
Args:
a: source tensor.
b: tensor whose signs will be used, of the same shape as a.
Returns:
Tensor of the same shape as a with the signs of b.
"""
signs_differ = (a < 0) != (b < 0)
return torch.where(signs_differ, -a, a)
def _sqrt_positive_part(x: torch.Tensor) -> torch.Tensor:
"""
Returns torch.sqrt(torch.max(0, x))
but with a zero subgradient where x is 0.
"""
ret = torch.zeros_like(x)
positive_mask = x > 0
ret[positive_mask] = torch.sqrt(x[positive_mask])
return ret
def matrix_to_quaternion(matrix: torch.Tensor) -> torch.Tensor:
"""
Convert rotations given as rotation matrices to quaternions.
Args:
matrix: Rotation matrices as tensor of shape (..., 3, 3).
Returns:
quaternions with real part first, as tensor of shape (..., 4).
"""
if matrix.size(-1) != 3 or matrix.size(-2) != 3:
raise ValueError(f"Invalid rotation matrix shape {matrix.shape}.")
batch_dim = matrix.shape[:-2]
m00, m01, m02, m10, m11, m12, m20, m21, m22 = torch.unbind(
matrix.reshape(batch_dim + (9,)), dim=-1
)
q_abs = _sqrt_positive_part(
torch.stack(
[
1.0 + m00 + m11 + m22,
1.0 + m00 - m11 - m22,
1.0 - m00 + m11 - m22,
1.0 - m00 - m11 + m22,
],
dim=-1,
)
)
# we produce the desired quaternion multiplied by each of r, i, j, k
quat_by_rijk = torch.stack(
[
torch.stack([q_abs[..., 0] ** 2, m21 - m12, m02 - m20, m10 - m01], dim=-1),
torch.stack([m21 - m12, q_abs[..., 1] ** 2, m10 + m01, m02 + m20], dim=-1),
torch.stack([m02 - m20, m10 + m01, q_abs[..., 2] ** 2, m12 + m21], dim=-1),
torch.stack([m10 - m01, m20 + m02, m21 + m12, q_abs[..., 3] ** 2], dim=-1),
],
dim=-2,
)
# We floor here at 0.1 but the exact level is not important; if q_abs is small,
# the candidate won't be picked.
flr = torch.tensor(0.1).to(dtype=q_abs.dtype, device=q_abs.device)
quat_candidates = quat_by_rijk / (2.0 * q_abs[..., None].max(flr))
# if not for numerical problems, quat_candidates[i] should be same (up to a sign),
# forall i; we pick the best-conditioned one (with the largest denominator)
return quat_candidates[
F.one_hot(q_abs.argmax(dim=-1), num_classes=4) > 0.5, : # pyre-ignore[16]
].reshape(batch_dim + (4,))
def _axis_angle_rotation(axis: str, angle: torch.Tensor) -> torch.Tensor:
"""
Return the rotation matrices for one of the rotations about an axis
of which Euler angles describe, for each value of the angle given.
Args:
axis: Axis label "X" or "Y or "Z".
angle: any shape tensor of Euler angles in radians
Returns:
Rotation matrices as tensor of shape (..., 3, 3).
"""
cos = torch.cos(angle)
sin = torch.sin(angle)
one = torch.ones_like(angle)
zero = torch.zeros_like(angle)
if axis == "X":
R_flat = (one, zero, zero, zero, cos, -sin, zero, sin, cos)
elif axis == "Y":
R_flat = (cos, zero, sin, zero, one, zero, -sin, zero, cos)
elif axis == "Z":
R_flat = (cos, -sin, zero, sin, cos, zero, zero, zero, one)
else:
raise ValueError("letter must be either X, Y or Z.")
return torch.stack(R_flat, -1).reshape(angle.shape + (3, 3))
def euler_angles_to_matrix(euler_angles: torch.Tensor, convention: str) -> torch.Tensor:
"""
Convert rotations given as Euler angles in radians to rotation matrices.
Args:
euler_angles: Euler angles in radians as tensor of shape (..., 3).
convention: Convention string of three uppercase letters from
{"X", "Y", and "Z"}.
Returns:
Rotation matrices as tensor of shape (..., 3, 3).
"""
if euler_angles.dim() == 0 or euler_angles.shape[-1] != 3:
raise ValueError("Invalid input euler angles.")
if len(convention) != 3:
raise ValueError("Convention must have 3 letters.")
if convention[1] in (convention[0], convention[2]):
raise ValueError(f"Invalid convention {convention}.")
for letter in convention:
if letter not in ("X", "Y", "Z"):
raise ValueError(f"Invalid letter {letter} in convention string.")
matrices = [
_axis_angle_rotation(c, e)
for c, e in zip(convention, torch.unbind(euler_angles, -1))
]
# return functools.reduce(torch.matmul, matrices)
return torch.matmul(torch.matmul(matrices[0], matrices[1]), matrices[2])
def _angle_from_tan(
axis: str, other_axis: str, data, horizontal: bool, tait_bryan: bool
) -> torch.Tensor:
"""
Extract the first or third Euler angle from the two members of
the matrix which are positive constant times its sine and cosine.
Args:
axis: Axis label "X" or "Y or "Z" for the angle we are finding.
other_axis: Axis label "X" or "Y or "Z" for the middle axis in the
convention.
data: Rotation matrices as tensor of shape (..., 3, 3).
horizontal: Whether we are looking for the angle for the third axis,
which means the relevant entries are in the same row of the
rotation matrix. If not, they are in the same column.
tait_bryan: Whether the first and third axes in the convention differ.
Returns:
Euler Angles in radians for each matrix in data as a tensor
of shape (...).
"""
i1, i2 = {"X": (2, 1), "Y": (0, 2), "Z": (1, 0)}[axis]
if horizontal:
i2, i1 = i1, i2
even = (axis + other_axis) in ["XY", "YZ", "ZX"]
if horizontal == even:
return torch.atan2(data[..., i1], data[..., i2])
if tait_bryan:
return torch.atan2(-data[..., i2], data[..., i1])
return torch.atan2(data[..., i2], -data[..., i1])
def _index_from_letter(letter: str) -> int:
if letter == "X":
return 0
if letter == "Y":
return 1
if letter == "Z":
return 2
raise ValueError("letter must be either X, Y or Z.")
def matrix_to_euler_angles(matrix: torch.Tensor, convention: str) -> torch.Tensor:
"""
Convert rotations given as rotation matrices to Euler angles in radians.
Args:
matrix: Rotation matrices as tensor of shape (..., 3, 3).
convention: Convention string of three uppercase letters.
Returns:
Euler angles in radians as tensor of shape (..., 3).
"""
if len(convention) != 3:
raise ValueError("Convention must have 3 letters.")
if convention[1] in (convention[0], convention[2]):
raise ValueError(f"Invalid convention {convention}.")
for letter in convention:
if letter not in ("X", "Y", "Z"):
raise ValueError(f"Invalid letter {letter} in convention string.")
if matrix.size(-1) != 3 or matrix.size(-2) != 3:
raise ValueError(f"Invalid rotation matrix shape {matrix.shape}.")
i0 = _index_from_letter(convention[0])
i2 = _index_from_letter(convention[2])
tait_bryan = i0 != i2
if tait_bryan:
central_angle = torch.asin(
matrix[..., i0, i2] * (-1.0 if i0 - i2 in [-1, 2] else 1.0)
)
else:
central_angle = torch.acos(matrix[..., i0, i0])
o = (
_angle_from_tan(
convention[0], convention[1], matrix[..., i2], False, tait_bryan
),
central_angle,
_angle_from_tan(
convention[2], convention[1], matrix[..., i0, :], True, tait_bryan
),
)
return torch.stack(o, -1)
def random_quaternions(
n: int, dtype: Optional[torch.dtype] = None, device: Optional[int] = None
) -> torch.Tensor:
"""
Generate random quaternions representing rotations,
i.e. versors with nonnegative real part.
Args:
n: Number of quaternions in a batch to return.
dtype: Type to return.
device: Desired device of returned tensor. Default:
uses the current device for the default tensor type.
Returns:
Quaternions as tensor of shape (N, 4).
"""
if isinstance(device, str):
device = torch.device(device)
o = torch.randn((n, 4), dtype=dtype, device=device)
s = (o * o).sum(1)
o = o / _copysign(torch.sqrt(s), o[:, 0])[:, None]
return o
def random_rotations(
n: int, dtype: Optional[torch.dtype] = None, device: Optional[int] = None
) -> torch.Tensor:
"""
Generate random rotations as 3x3 rotation matrices.
Args:
n: Number of rotation matrices in a batch to return.
dtype: Type to return.
device: Device of returned tensor. Default: if None,
uses the current device for the default tensor type.
Returns:
Rotation matrices as tensor of shape (n, 3, 3).
"""
quaternions = random_quaternions(n, dtype=dtype, device=device)
return quaternion_to_matrix(quaternions)
def random_rotation(
dtype: Optional[torch.dtype] = None, device: Optional[int] = None
) -> torch.Tensor:
"""
Generate a single random 3x3 rotation matrix.
Args:
dtype: Type to return
device: Device of returned tensor. Default: if None,
uses the current device for the default tensor type
Returns:
Rotation matrix as tensor of shape (3, 3).
"""
return random_rotations(1, dtype, device)[0]
def standardize_quaternion(quaternions: torch.Tensor) -> torch.Tensor:
"""
Convert a unit quaternion to a standard form: one in which the real
part is non negative.
Args:
quaternions: Quaternions with real part first,
as tensor of shape (..., 4).
Returns:
Standardized quaternions as tensor of shape (..., 4).
"""
return torch.where(quaternions[..., 0:1] < 0, -quaternions, quaternions)
def quaternion_raw_multiply(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
"""
Multiply two quaternions.
Usual torch rules for broadcasting apply.
Args:
a: Quaternions as tensor of shape (..., 4), real part first.
b: Quaternions as tensor of shape (..., 4), real part first.
Returns:
The product of a and b, a tensor of quaternions shape (..., 4).
"""
aw, ax, ay, az = torch.unbind(a, -1)
bw, bx, by, bz = torch.unbind(b, -1)
ow = aw * bw - ax * bx - ay * by - az * bz
ox = aw * bx + ax * bw + ay * bz - az * by
oy = aw * by - ax * bz + ay * bw + az * bx
oz = aw * bz + ax * by - ay * bx + az * bw
return torch.stack((ow, ox, oy, oz), -1)
def quaternion_multiply(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
"""
Multiply two quaternions representing rotations, returning the quaternion
representing their composition, i.e. the versor with nonnegative real part.
Usual torch rules for broadcasting apply.
Args:
a: Quaternions as tensor of shape (..., 4), real part first.
b: Quaternions as tensor of shape (..., 4), real part first.
Returns:
The product of a and b, a tensor of quaternions of shape (..., 4).
"""
ab = quaternion_raw_multiply(a, b)
return standardize_quaternion(ab)
def quaternion_invert(quaternion: torch.Tensor) -> torch.Tensor:
"""
Given a quaternion representing rotation, get the quaternion representing
its inverse.
Args:
quaternion: Quaternions as tensor of shape (..., 4), with real part
first, which must be versors (unit quaternions).
Returns:
The inverse, a tensor of quaternions of shape (..., 4).
"""
scaling = torch.tensor([1, -1, -1, -1], device=quaternion.device)
return quaternion * scaling
def quaternion_apply(quaternion: torch.Tensor, point: torch.Tensor) -> torch.Tensor:
"""
Apply the rotation given by a quaternion to a 3D point.
Usual torch rules for broadcasting apply.
Args:
quaternion: Tensor of quaternions, real part first, of shape (..., 4).
point: Tensor of 3D points of shape (..., 3).
Returns:
Tensor of rotated points of shape (..., 3).
"""
if point.size(-1) != 3:
raise ValueError(f"Points are not in 3D, {point.shape}.")
real_parts = point.new_zeros(point.shape[:-1] + (1,))
point_as_quaternion = torch.cat((real_parts, point), -1)
out = quaternion_raw_multiply(
quaternion_raw_multiply(quaternion, point_as_quaternion),
quaternion_invert(quaternion),
)
return out[..., 1:]
def axis_angle_to_matrix(axis_angle: torch.Tensor) -> torch.Tensor:
"""
Convert rotations given as axis/angle to rotation matrices.
Args:
axis_angle: Rotations given as a vector in axis angle form,
as a tensor of shape (..., 3), where the magnitude is
the angle turned anticlockwise in radians around the
vector's direction.
Returns:
Rotation matrices as tensor of shape (..., 3, 3).
"""
return quaternion_to_matrix(axis_angle_to_quaternion(axis_angle))
def matrix_to_axis_angle(matrix: torch.Tensor) -> torch.Tensor:
"""
Convert rotations given as rotation matrices to axis/angle.
Args:
matrix: Rotation matrices as tensor of shape (..., 3, 3).
Returns:
Rotations given as a vector in axis angle form, as a tensor
of shape (..., 3), where the magnitude is the angle
turned anticlockwise in radians around the vector's
direction.
"""
return quaternion_to_axis_angle(matrix_to_quaternion(matrix))
def axis_angle_to_quaternion(axis_angle: torch.Tensor) -> torch.Tensor:
"""
Convert rotations given as axis/angle to quaternions.
Args:
axis_angle: Rotations given as a vector in axis angle form,
as a tensor of shape (..., 3), where the magnitude is
the angle turned anticlockwise in radians around the
vector's direction.
Returns:
quaternions with real part first, as tensor of shape (..., 4).
"""
angles = torch.norm(axis_angle, p=2, dim=-1, keepdim=True)
half_angles = angles * 0.5
eps = 1e-6
small_angles = angles.abs() < eps
sin_half_angles_over_angles = torch.empty_like(angles)
sin_half_angles_over_angles[~small_angles] = (
torch.sin(half_angles[~small_angles]) / angles[~small_angles]
)
# for x small, sin(x/2) is about x/2 - (x/2)^3/6
# so sin(x/2)/x is about 1/2 - (x*x)/48
sin_half_angles_over_angles[small_angles] = (
0.5 - (angles[small_angles] * angles[small_angles]) / 48
)
quaternions = torch.cat(
[torch.cos(half_angles), axis_angle * sin_half_angles_over_angles], dim=-1
)
return quaternions
def quaternion_to_axis_angle(quaternions: torch.Tensor) -> torch.Tensor:
"""
Convert rotations given as quaternions to axis/angle.
Args:
quaternions: quaternions with real part first,
as tensor of shape (..., 4).
Returns:
Rotations given as a vector in axis angle form, as a tensor
of shape (..., 3), where the magnitude is the angle
turned anticlockwise in radians around the vector's
direction.
"""
norms = torch.norm(quaternions[..., 1:], p=2, dim=-1, keepdim=True)
half_angles = torch.atan2(norms, quaternions[..., :1])
angles = 2 * half_angles
eps = 1e-6
small_angles = angles.abs() < eps
sin_half_angles_over_angles = torch.empty_like(angles)
sin_half_angles_over_angles[~small_angles] = (
torch.sin(half_angles[~small_angles]) / angles[~small_angles]
)
# for x small, sin(x/2) is about x/2 - (x/2)^3/6
# so sin(x/2)/x is about 1/2 - (x*x)/48
sin_half_angles_over_angles[small_angles] = (
0.5 - (angles[small_angles] * angles[small_angles]) / 48
)
return quaternions[..., 1:] / sin_half_angles_over_angles
def rotation_6d_to_matrix(d6: torch.Tensor) -> torch.Tensor:
"""
Converts 6D rotation representation by Zhou et al. [1] to rotation matrix
using Gram--Schmidt orthogonalization per Section B of [1].
Args:
d6: 6D rotation representation, of size (*, 6)
Returns:
batch of rotation matrices of size (*, 3, 3)
[1] Zhou, Y., Barnes, C., Lu, J., Yang, J., & Li, H.
On the Continuity of Rotation Representations in Neural Networks.
IEEE Conference on Computer Vision and Pattern Recognition, 2019.
Retrieved from http://arxiv.org/abs/1812.07035
"""
a1, a2 = d6[..., :3], d6[..., 3:]
b1 = F.normalize(a1, dim=-1)
b2 = a2 - (b1 * a2).sum(-1, keepdim=True) * b1
b2 = F.normalize(b2, dim=-1)
b3 = torch.cross(b1, b2, dim=-1)
return torch.stack((b1, b2, b3), dim=-2)
def matrix_to_rotation_6d(matrix: torch.Tensor) -> torch.Tensor:
"""
Converts rotation matrices to 6D rotation representation by Zhou et al. [1]
by dropping the last row. Note that 6D representation is not unique.
Args:
matrix: batch of rotation matrices of size (*, 3, 3)
Returns:
6D rotation representation, of size (*, 6)
[1] Zhou, Y., Barnes, C., Lu, J., Yang, J., & Li, H.
On the Continuity of Rotation Representations in Neural Networks.
IEEE Conference on Computer Vision and Pattern Recognition, 2019.
Retrieved from http://arxiv.org/abs/1812.07035
"""
batch_dim = matrix.size()[:-2]
return matrix[..., :2, :].clone().reshape(batch_dim + (6,)) | 20,109 | 31.699187 | 88 | py |
anime-face-detector | anime-face-detector-main/demo_gradio.py | import argparse
import functools
import pathlib
import cv2
import gradio as gr
import numpy as np
import PIL.Image
import torch
import anime_face_detector
def detect(img, face_score_threshold: float, landmark_score_threshold: float,
detector: anime_face_detector.LandmarkDetector) -> PIL.Image.Image:
image = cv2.imread(img.name)
preds = detector(image)
res = image.copy()
for pred in preds:
box = pred['bbox']
box, score = box[:4], box[4]
if score < face_score_threshold:
continue
box = np.round(box).astype(int)
lt = max(2, int(3 * (box[2:] - box[:2]).max() / 256))
cv2.rectangle(res, tuple(box[:2]), tuple(box[2:]), (0, 255, 0), lt)
pred_pts = pred['keypoints']
for *pt, score in pred_pts:
if score < landmark_score_threshold:
color = (0, 255, 255)
else:
color = (0, 0, 255)
pt = np.round(pt).astype(int)
cv2.circle(res, tuple(pt), lt, color, cv2.FILLED)
res = cv2.cvtColor(res, cv2.COLOR_BGR2RGB)
image_pil = PIL.Image.fromarray(res)
return image_pil
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--detector',
type=str,
default='yolov3',
choices=['yolov3', 'faster-rcnn'])
parser.add_argument('--device',
type=str,
default='cuda:0',
choices=['cuda:0', 'cpu'])
parser.add_argument('--face-score-threshold', type=float, default=0.5)
parser.add_argument('--landmark-score-threshold', type=float, default=0.3)
parser.add_argument('--score-slider-step', type=float, default=0.05)
parser.add_argument('--port', type=int)
parser.add_argument('--debug', action='store_true')
parser.add_argument('--share', action='store_true')
parser.add_argument('--live', action='store_true')
args = parser.parse_args()
sample_path = pathlib.Path('input.jpg')
if not sample_path.exists():
torch.hub.download_url_to_file(
'https://raw.githubusercontent.com/hysts/anime-face-detector/main/assets/input.jpg',
sample_path.as_posix())
detector = anime_face_detector.create_detector(args.detector,
device=args.device)
func = functools.partial(detect, detector=detector)
func = functools.update_wrapper(func, detect)
title = 'hysts/anime-face-detector'
description = 'Demo for hysts/anime-face-detector. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below.'
article = "<a href='https://github.com/hysts/anime-face-detector'>GitHub Repo</a>"
gr.Interface(
func,
[
gr.inputs.Image(type='file', label='Input'),
gr.inputs.Slider(0,
1,
step=args.score_slider_step,
default=args.face_score_threshold,
label='Face Score Threshold'),
gr.inputs.Slider(0,
1,
step=args.score_slider_step,
default=args.landmark_score_threshold,
label='Landmark Score Threshold'),
],
gr.outputs.Image(type='pil', label='Output'),
server_port=args.port,
title=title,
description=description,
article=article,
examples=[
[
sample_path.as_posix(),
args.face_score_threshold,
args.landmark_score_threshold,
],
],
enable_queue=True,
live=args.live,
).launch(debug=args.debug, share=args.share)
if __name__ == '__main__':
main()
| 3,910 | 33.610619 | 165 | py |
anime-face-detector | anime-face-detector-main/anime_face_detector/detector.py | from __future__ import annotations
import pathlib
import warnings
from typing import Optional, Union
import cv2
import mmcv
import numpy as np
import torch.nn as nn
from mmdet.apis import inference_detector, init_detector
from mmpose.apis import inference_top_down_pose_model, init_pose_model
from mmpose.datasets import DatasetInfo
class LandmarkDetector:
def __init__(
self,
landmark_detector_config_or_path: Union[mmcv.Config, str,
pathlib.Path],
landmark_detector_checkpoint_path: Union[str, pathlib.Path],
face_detector_config_or_path: Optional[Union[mmcv.Config, str,
pathlib.Path]] = None,
face_detector_checkpoint_path: Optional[Union[
str, pathlib.Path]] = None,
device: str = 'cuda:0',
flip_test: bool = True,
box_scale_factor: float = 1.1):
landmark_config = self._load_config(landmark_detector_config_or_path)
self.dataset_info = DatasetInfo(
landmark_config.dataset_info) # type: ignore
face_detector_config = self._load_config(face_detector_config_or_path)
self.landmark_detector = self._init_pose_model(
landmark_config, landmark_detector_checkpoint_path, device,
flip_test)
self.face_detector = self._init_face_detector(
face_detector_config, face_detector_checkpoint_path, device)
self.box_scale_factor = box_scale_factor
@staticmethod
def _load_config(
config_or_path: Optional[Union[mmcv.Config, str, pathlib.Path]]
) -> Optional[mmcv.Config]:
if config_or_path is None or isinstance(config_or_path, mmcv.Config):
return config_or_path
return mmcv.Config.fromfile(config_or_path)
@staticmethod
def _init_pose_model(config: mmcv.Config,
checkpoint_path: Union[str, pathlib.Path],
device: str, flip_test: bool) -> nn.Module:
if isinstance(checkpoint_path, pathlib.Path):
checkpoint_path = checkpoint_path.as_posix()
model = init_pose_model(config, checkpoint_path, device=device)
model.cfg.model.test_cfg.flip_test = flip_test
return model
@staticmethod
def _init_face_detector(config: Optional[mmcv.Config],
checkpoint_path: Optional[Union[str,
pathlib.Path]],
device: str) -> Optional[nn.Module]:
if config is not None:
if isinstance(checkpoint_path, pathlib.Path):
checkpoint_path = checkpoint_path.as_posix()
model = init_detector(config, checkpoint_path, device=device)
else:
model = None
return model
def _detect_faces(self, image: np.ndarray) -> list[np.ndarray]:
# predicted boxes using mmdet model have the format of
# [x0, y0, x1, y1, score]
boxes = inference_detector(self.face_detector, image)[0]
# scale boxes by `self.box_scale_factor`
boxes = self._update_pred_box(boxes)
return boxes
def _update_pred_box(self, pred_boxes: np.ndarray) -> list[np.ndarray]:
boxes = []
for pred_box in pred_boxes:
box = pred_box[:4]
size = box[2:] - box[:2] + 1
new_size = size * self.box_scale_factor
center = (box[:2] + box[2:]) / 2
tl = center - new_size / 2
br = tl + new_size
pred_box[:4] = np.concatenate([tl, br])
boxes.append(pred_box)
return boxes
def _detect_landmarks(
self, image: np.ndarray,
boxes: list[dict[str, np.ndarray]]) -> list[dict[str, np.ndarray]]:
preds, _ = inference_top_down_pose_model(
self.landmark_detector,
image,
boxes,
format='xyxy',
dataset_info=self.dataset_info,
return_heatmap=False)
return preds
@staticmethod
def _load_image(
image_or_path: Union[np.ndarray, str, pathlib.Path]) -> np.ndarray:
if isinstance(image_or_path, np.ndarray):
image = image_or_path
elif isinstance(image_or_path, str):
image = cv2.imread(image_or_path)
elif isinstance(image_or_path, pathlib.Path):
image = cv2.imread(image_or_path.as_posix())
else:
raise ValueError
return image
def __call__(
self,
image_or_path: Union[np.ndarray, str, pathlib.Path],
boxes: Optional[list[np.ndarray]] = None
) -> list[dict[str, np.ndarray]]:
"""Detect face landmarks.
Args:
image_or_path: An image with BGR channel order or an image path.
boxes: A list of bounding boxes for faces. Each bounding box
should be of the form [x0, y0, x1, y1, [score]].
Returns: A list of detection results. Each detection result has
bounding box of the form [x0, y0, x1, y1, [score]], and landmarks
of the form [x, y, score].
"""
image = self._load_image(image_or_path)
if boxes is None:
if self.face_detector is not None:
boxes = self._detect_faces(image)
else:
warnings.warn(
'Neither the face detector nor the bounding box is '
'specified. So the entire image is treated as the face '
'region.')
h, w = image.shape[:2]
boxes = [np.array([0, 0, w - 1, h - 1, 1])]
box_list = [{'bbox': box} for box in boxes]
return self._detect_landmarks(image, box_list)
| 5,849 | 38.527027 | 79 | py |
anime-face-detector | anime-face-detector-main/anime_face_detector/__init__.py | import pathlib
import torch
from .detector import LandmarkDetector
def get_config_path(model_name: str) -> pathlib.Path:
assert model_name in ['faster-rcnn', 'yolov3', 'hrnetv2']
package_path = pathlib.Path(__file__).parent.resolve()
if model_name in ['faster-rcnn', 'yolov3']:
config_dir = package_path / 'configs' / 'mmdet'
else:
config_dir = package_path / 'configs' / 'mmpose'
return config_dir / f'{model_name}.py'
def get_checkpoint_path(model_name: str) -> pathlib.Path:
assert model_name in ['faster-rcnn', 'yolov3', 'hrnetv2']
if model_name in ['faster-rcnn', 'yolov3']:
file_name = f'mmdet_anime-face_{model_name}.pth'
else:
file_name = f'mmpose_anime-face_{model_name}.pth'
model_dir = pathlib.Path(torch.hub.get_dir()) / 'checkpoints'
model_dir.mkdir(exist_ok=True, parents=True)
model_path = model_dir / file_name
if not model_path.exists():
url = f'https://github.com/hysts/anime-face-detector/releases/download/v0.0.1/{file_name}'
torch.hub.download_url_to_file(url, model_path.as_posix())
return model_path
def create_detector(face_detector_name: str = 'yolov3',
landmark_model_name='hrnetv2',
device: str = 'cuda:0',
flip_test: bool = True,
box_scale_factor: float = 1.1) -> LandmarkDetector:
assert face_detector_name in ['yolov3', 'faster-rcnn']
assert landmark_model_name in ['hrnetv2']
detector_config_path = get_config_path(face_detector_name)
landmark_config_path = get_config_path(landmark_model_name)
detector_checkpoint_path = get_checkpoint_path(face_detector_name)
landmark_checkpoint_path = get_checkpoint_path(landmark_model_name)
model = LandmarkDetector(landmark_config_path,
landmark_checkpoint_path,
detector_config_path,
detector_checkpoint_path,
device=device,
flip_test=flip_test,
box_scale_factor=box_scale_factor)
return model
| 2,163 | 38.345455 | 98 | py |
anime-face-detector | anime-face-detector-main/anime_face_detector/configs/mmdet/faster-rcnn.py | model = dict(type='FasterRCNN',
backbone=dict(type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch'),
neck=dict(type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_generator=dict(type='AnchorGenerator',
scales=[8],
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
bbox_coder=dict(type='DeltaXYWHBBoxCoder',
target_means=[0.0, 0.0, 0.0, 0.0],
target_stds=[1.0, 1.0, 1.0, 1.0])),
roi_head=dict(
type='StandardRoIHead',
bbox_roi_extractor=dict(type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign',
output_size=7,
sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=1,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0.0, 0.0, 0.0, 0.0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False)),
test_cfg=dict(rpn=dict(nms_pre=1000,
max_per_img=1000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100)))
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img'])
])
]
data = dict(test=dict(pipeline=test_pipeline))
| 3,338 | 48.835821 | 78 | py |
VectorQuantizedVAE | VectorQuantizedVAE-master/model.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Categorical, RelaxedOneHotCategorical
import math
class VQEmbeddingEMA(nn.Module):
def __init__(self, latent_dim, num_embeddings, embedding_dim, commitment_cost=0.25, decay=0.999, epsilon=1e-5):
super(VQEmbeddingEMA, self).__init__()
self.commitment_cost = commitment_cost
self.decay = decay
self.epsilon = epsilon
embedding = torch.zeros(latent_dim, num_embeddings, embedding_dim)
embedding.uniform_(-1/num_embeddings, 1/num_embeddings)
self.register_buffer("embedding", embedding)
self.register_buffer("ema_count", torch.zeros(latent_dim, num_embeddings))
self.register_buffer("ema_weight", self.embedding.clone())
def forward(self, x):
B, C, H, W = x.size()
N, M, D = self.embedding.size()
assert C == N * D
x = x.view(B, N, D, H, W).permute(1, 0, 3, 4, 2)
x_flat = x.detach().reshape(N, -1, D)
distances = torch.baddbmm(torch.sum(self.embedding ** 2, dim=2).unsqueeze(1) +
torch.sum(x_flat ** 2, dim=2, keepdim=True),
x_flat, self.embedding.transpose(1, 2),
alpha=-2.0, beta=1.0)
indices = torch.argmin(distances, dim=-1)
encodings = F.one_hot(indices, M).float()
quantized = torch.gather(self.embedding, 1, indices.unsqueeze(-1).expand(-1, -1, D))
quantized = quantized.view_as(x)
if self.training:
self.ema_count = self.decay * self.ema_count + (1 - self.decay) * torch.sum(encodings, dim=1)
n = torch.sum(self.ema_count, dim=-1, keepdim=True)
self.ema_count = (self.ema_count + self.epsilon) / (n + M * self.epsilon) * n
dw = torch.bmm(encodings.transpose(1, 2), x_flat)
self.ema_weight = self.decay * self.ema_weight + (1 - self.decay) * dw
self.embedding = self.ema_weight / self.ema_count.unsqueeze(-1)
e_latent_loss = F.mse_loss(x, quantized.detach())
loss = self.commitment_cost * e_latent_loss
quantized = x + (quantized - x).detach()
avg_probs = torch.mean(encodings, dim=1)
perplexity = torch.exp(-torch.sum(avg_probs * torch.log(avg_probs + 1e-10), dim=-1))
return quantized.permute(1, 0, 4, 2, 3).reshape(B, C, H, W), loss, perplexity.sum()
class VQEmbeddingGSSoft(nn.Module):
def __init__(self, latent_dim, num_embeddings, embedding_dim):
super(VQEmbeddingGSSoft, self).__init__()
self.embedding = nn.Parameter(torch.Tensor(latent_dim, num_embeddings, embedding_dim))
nn.init.uniform_(self.embedding, -1/num_embeddings, 1/num_embeddings)
def forward(self, x):
B, C, H, W = x.size()
N, M, D = self.embedding.size()
assert C == N * D
x = x.view(B, N, D, H, W).permute(1, 0, 3, 4, 2)
x_flat = x.reshape(N, -1, D)
distances = torch.baddbmm(torch.sum(self.embedding ** 2, dim=2).unsqueeze(1) +
torch.sum(x_flat ** 2, dim=2, keepdim=True),
x_flat, self.embedding.transpose(1, 2),
alpha=-2.0, beta=1.0)
distances = distances.view(N, B, H, W, M)
dist = RelaxedOneHotCategorical(0.5, logits=-distances)
if self.training:
samples = dist.rsample().view(N, -1, M)
else:
samples = torch.argmax(dist.probs, dim=-1)
samples = F.one_hot(samples, M).float()
samples = samples.view(N, -1, M)
quantized = torch.bmm(samples, self.embedding)
quantized = quantized.view_as(x)
KL = dist.probs * (dist.logits + math.log(M))
KL[(dist.probs == 0).expand_as(KL)] = 0
KL = KL.sum(dim=(0, 2, 3, 4)).mean()
avg_probs = torch.mean(samples, dim=1)
perplexity = torch.exp(-torch.sum(avg_probs * torch.log(avg_probs + 1e-10), dim=-1))
return quantized.permute(1, 0, 4, 2, 3).reshape(B, C, H, W), KL, perplexity.sum()
class Residual(nn.Module):
def __init__(self, channels):
super(Residual, self).__init__()
self.block = nn.Sequential(
nn.ReLU(True),
nn.Conv2d(channels, channels, 3, 1, 1, bias=False),
nn.BatchNorm2d(channels),
nn.ReLU(True),
nn.Conv2d(channels, channels, 1, bias=False),
nn.BatchNorm2d(channels)
)
def forward(self, x):
return x + self.block(x)
class Encoder(nn.Module):
def __init__(self, channels, latent_dim, embedding_dim):
super(Encoder, self).__init__()
self.encoder = nn.Sequential(
nn.Conv2d(3, channels, 4, 2, 1, bias=False),
nn.BatchNorm2d(channels),
nn.ReLU(True),
nn.Conv2d(channels, channels, 4, 2, 1, bias=False),
nn.BatchNorm2d(channels),
Residual(channels),
Residual(channels),
nn.Conv2d(channels, latent_dim * embedding_dim, 1)
)
def forward(self, x):
return self.encoder(x)
class Decoder(nn.Module):
def __init__(self, channels, latent_dim, embedding_dim):
super(Decoder, self).__init__()
self.decoder = nn.Sequential(
nn.Conv2d(latent_dim * embedding_dim, channels, 1, bias=False),
nn.BatchNorm2d(channels),
Residual(channels),
Residual(channels),
nn.ConvTranspose2d(channels, channels, 4, 2, 1, bias=False),
nn.BatchNorm2d(channels),
nn.ReLU(True),
nn.ConvTranspose2d(channels, channels, 4, 2, 1, bias=False),
nn.BatchNorm2d(channels),
nn.ReLU(True),
nn.Conv2d(channels, 3 * 256, 1)
)
def forward(self, x):
x = self.decoder(x)
B, _, H, W = x.size()
x = x.view(B, 3, 256, H, W).permute(0, 1, 3, 4, 2)
dist = Categorical(logits=x)
return dist
class VQVAE(nn.Module):
def __init__(self, channels, latent_dim, num_embeddings, embedding_dim):
super(VQVAE, self).__init__()
self.encoder = Encoder(channels, latent_dim, embedding_dim)
self.codebook = VQEmbeddingEMA(latent_dim, num_embeddings, embedding_dim)
self.decoder = Decoder(channels, latent_dim, embedding_dim)
def forward(self, x):
x = self.encoder(x)
x, loss, perplexity = self.codebook(x)
dist = self.decoder(x)
return dist, loss, perplexity
class GSSOFT(nn.Module):
def __init__(self, channels, latent_dim, num_embeddings, embedding_dim):
super(GSSOFT, self).__init__()
self.encoder = Encoder(channels, latent_dim, embedding_dim)
self.codebook = VQEmbeddingGSSoft(latent_dim, num_embeddings, embedding_dim)
self.decoder = Decoder(channels, latent_dim, embedding_dim)
def forward(self, x):
x = self.encoder(x)
x, KL, perplexity = self.codebook(x)
dist = self.decoder(x)
return dist, KL, perplexity
| 7,126 | 36.909574 | 115 | py |
VectorQuantizedVAE | VectorQuantizedVAE-master/train.py | import argparse
from pathlib import Path
import numpy as np
from tqdm import tqdm
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from torchvision import datasets, transforms, utils
from model import VQVAE, GSSOFT
def save_checkpoint(model, optimizer, step, checkpoint_dir):
checkpoint_state = {
"model": model.state_dict(),
"optimizer": optimizer.state_dict(),
"step": step}
checkpoint_path = checkpoint_dir / "model.ckpt-{}.pt".format(step)
torch.save(checkpoint_state, checkpoint_path)
print("Saved checkpoint: {}".format(checkpoint_path))
def shift(x):
return x - 0.5
def train_gssoft(args):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = GSSOFT(args.channels, args.latent_dim, args.num_embeddings, args.embedding_dim)
model.to(device)
model_name = "{}_C_{}_N_{}_M_{}_D_{}".format(args.model, args.channels, args.latent_dim,
args.num_embeddings, args.embedding_dim)
checkpoint_dir = Path(model_name)
checkpoint_dir.mkdir(parents=True, exist_ok=True)
writer = SummaryWriter(log_dir=Path("runs") / model_name)
optimizer = optim.Adam(model.parameters(), lr=args.learning_rate)
if args.resume is not None:
print("Resume checkpoint from: {}:".format(args.resume))
checkpoint = torch.load(args.resume, map_location=lambda storage, loc: storage)
model.load_state_dict(checkpoint["model"])
optimizer.load_state_dict(checkpoint["optimizer"])
global_step = checkpoint["step"]
else:
global_step = 0
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Lambda(shift)
])
training_dataset = datasets.CIFAR10("./CIFAR10", train=True, download=True,
transform=transform)
test_dataset = datasets.CIFAR10("./CIFAR10", train=False, download=True,
transform=transform)
training_dataloader = DataLoader(training_dataset, batch_size=args.batch_size, shuffle=True,
num_workers=args.num_workers, pin_memory=True)
test_dataloader = DataLoader(test_dataset, batch_size=64, shuffle=True, drop_last=True,
num_workers=args.num_workers, pin_memory=True)
num_epochs = args.num_training_steps // len(training_dataloader) + 1
start_epoch = global_step // len(training_dataloader) + 1
N = 3 * 32 * 32
for epoch in range(start_epoch, num_epochs + 1):
model.train()
average_logp = average_KL = average_elbo = average_bpd = average_perplexity = 0
for i, (images, _) in enumerate(tqdm(training_dataloader), 1):
images = images.to(device)
dist, KL, perplexity = model(images)
targets = (images + 0.5) * 255
targets = targets.long()
logp = dist.log_prob(targets).sum((1, 2, 3)).mean()
loss = (KL - logp) / N
elbo = (KL - logp) / N
bpd = elbo / np.log(2)
optimizer.zero_grad()
loss.backward()
optimizer.step()
global_step += 1
if global_step % 25000 == 0:
save_checkpoint(model, optimizer, global_step, checkpoint_dir)
average_logp += (logp.item() - average_logp) / i
average_KL += (KL.item() - average_KL) / i
average_elbo += (elbo.item() - average_elbo) / i
average_bpd += (bpd.item() - average_bpd) / i
average_perplexity += (perplexity.item() - average_perplexity) / i
writer.add_scalar("logp/train", average_logp, epoch)
writer.add_scalar("kl/train", average_KL, epoch)
writer.add_scalar("elbo/train", average_elbo, epoch)
writer.add_scalar("bpd/train", average_bpd, epoch)
writer.add_scalar("perplexity/train", average_perplexity, epoch)
model.eval()
average_logp = average_KL = average_elbo = average_bpd = average_perplexity = 0
for i, (images, _) in enumerate(test_dataloader, 1):
images = images.to(device)
with torch.no_grad():
dist, KL, perplexity = model(images)
targets = (images + 0.5) * 255
targets = targets.long()
logp = dist.log_prob(targets).sum((1, 2, 3)).mean()
elbo = (KL - logp) / N
bpd = elbo / np.log(2)
average_logp += (logp.item() - average_logp) / i
average_KL += (KL.item() - average_KL) / i
average_elbo += (elbo.item() - average_elbo) / i
average_bpd += (bpd.item() - average_bpd) / i
average_perplexity += (perplexity.item() - average_perplexity) / i
writer.add_scalar("logp/test", average_logp, epoch)
writer.add_scalar("kl/test", average_KL, epoch)
writer.add_scalar("elbo/test", average_elbo, epoch)
writer.add_scalar("bpd/test", average_bpd, epoch)
writer.add_scalar("perplexity/test", average_perplexity, epoch)
samples = torch.argmax(dist.logits, dim=-1)
grid = utils.make_grid(samples.float() / 255)
writer.add_image("reconstructions", grid, epoch)
print("epoch:{}, logp:{:.3E}, KL:{:.3E}, elbo:{:.3f}, bpd:{:.3f}, perplexity:{:.3f}"
.format(epoch, average_logp, average_KL, average_elbo, average_bpd, average_perplexity))
def train_vqvae(args):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = VQVAE(args.channels, args.latent_dim, args.num_embeddings, args.embedding_dim)
model.to(device)
model_name = "{}_C_{}_N_{}_M_{}_D_{}".format(args.model, args.channels, args.latent_dim,
args.num_embeddings, args.embedding_dim)
checkpoint_dir = Path(model_name)
checkpoint_dir.mkdir(parents=True, exist_ok=True)
writer = SummaryWriter(log_dir=Path("runs") / model_name)
optimizer = optim.Adam(model.parameters(), lr=args.learning_rate)
if args.resume is not None:
print("Resume checkpoint from: {}:".format(args.resume))
checkpoint = torch.load(args.resume, map_location=lambda storage, loc: storage)
model.load_state_dict(checkpoint["model"])
optimizer.load_state_dict(checkpoint["optimizer"])
global_step = checkpoint["step"]
else:
global_step = 0
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Lambda(shift)
])
training_dataset = datasets.CIFAR10("./CIFAR10", train=True, download=True,
transform=transform)
test_dataset = datasets.CIFAR10("./CIFAR10", train=False, download=True,
transform=transform)
training_dataloader = DataLoader(training_dataset, batch_size=args.batch_size, shuffle=True,
num_workers=args.num_workers, pin_memory=True)
test_dataloader = DataLoader(test_dataset, batch_size=64, shuffle=True, drop_last=True,
num_workers=args.num_workers, pin_memory=True)
num_epochs = args.num_training_steps // len(training_dataloader) + 1
start_epoch = global_step // len(training_dataloader) + 1
N = 3 * 32 * 32
KL = args.latent_dim * 8 * 8 * np.log(args.num_embeddings)
for epoch in range(start_epoch, num_epochs + 1):
model.train()
average_logp = average_vq_loss = average_elbo = average_bpd = average_perplexity = 0
for i, (images, _) in enumerate(tqdm(training_dataloader), 1):
images = images.to(device)
dist, vq_loss, perplexity = model(images)
targets = (images + 0.5) * 255
targets = targets.long()
logp = dist.log_prob(targets).sum((1, 2, 3)).mean()
loss = - logp / N + vq_loss
elbo = (KL - logp) / N
bpd = elbo / np.log(2)
optimizer.zero_grad()
loss.backward()
optimizer.step()
global_step += 1
if global_step % 25000 == 0:
save_checkpoint(model, optimizer, global_step, checkpoint_dir)
average_logp += (logp.item() - average_logp) / i
average_vq_loss += (vq_loss.item() - average_vq_loss) / i
average_elbo += (elbo.item() - average_elbo) / i
average_bpd += (bpd.item() - average_bpd) / i
average_perplexity += (perplexity.item() - average_perplexity) / i
writer.add_scalar("logp/train", average_logp, epoch)
writer.add_scalar("kl/train", KL, epoch)
writer.add_scalar("vqloss/train", average_vq_loss, epoch)
writer.add_scalar("elbo/train", average_elbo, epoch)
writer.add_scalar("bpd/train", average_bpd, epoch)
writer.add_scalar("perplexity/train", average_perplexity, epoch)
model.eval()
average_logp = average_vq_loss = average_elbo = average_bpd = average_perplexity = 0
for i, (images, _) in enumerate(test_dataloader, 1):
images = images.to(device)
with torch.no_grad():
dist, vq_loss, perplexity = model(images)
targets = (images + 0.5) * 255
targets = targets.long()
logp = dist.log_prob(targets).sum((1, 2, 3)).mean()
elbo = (KL - logp) / N
bpd = elbo / np.log(2)
average_logp += (logp.item() - average_logp) / i
average_vq_loss += (vq_loss.item() - average_vq_loss) / i
average_elbo += (elbo.item() - average_elbo) / i
average_bpd += (bpd.item() - average_bpd) / i
average_perplexity += (perplexity.item() - average_perplexity) / i
writer.add_scalar("logp/test", average_logp, epoch)
writer.add_scalar("kl/test", KL, epoch)
writer.add_scalar("vqloss/test", average_vq_loss, epoch)
writer.add_scalar("elbo/test", average_elbo, epoch)
writer.add_scalar("bpd/test", average_bpd, epoch)
writer.add_scalar("perplexity/test", average_perplexity, epoch)
samples = torch.argmax(dist.logits, dim=-1)
grid = utils.make_grid(samples.float() / 255)
writer.add_image("reconstructions", grid, epoch)
print("epoch:{}, logp:{:.3E}, vq loss:{:.3E}, elbo:{:.3f}, bpd:{:.3f}, perplexity:{:.3f}"
.format(epoch, average_logp, average_vq_loss, average_elbo, average_bpd, average_perplexity))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--num-workers", type=int, default=4, help="Number of dataloader workers.")
parser.add_argument("--resume", type=str, default=None, help="Checkpoint path to resume.")
parser.add_argument("--model", choices=["VQVAE", "GSSOFT"], help="Select model to train (either VQVAE or GSSOFT)")
parser.add_argument("--channels", type=int, default=256, help="Number of channels in conv layers.")
parser.add_argument("--latent-dim", type=int, default=8, help="Dimension of categorical latents.")
parser.add_argument("--num-embeddings", type=int, default=128, help="Number of codebook embeddings size.")
parser.add_argument("--embedding-dim", type=int, default=32, help="Dimension of codebook embeddings.")
parser.add_argument("--learning-rate", type=float, default=5e-4, help="Learning rate.")
parser.add_argument("--batch-size", type=int, default=128, help="Batch size.")
parser.add_argument("--num-training-steps", type=int, default=250000, help="Number of training steps.")
args = parser.parse_args()
if args.model == "VQVAE":
train_vqvae(args)
if args.model == "GSSOFT":
train_gssoft(args)
| 11,846 | 41.310714 | 118 | py |
psal | psal-master/test.py | import torch
from PIL import Image
from time import time
from torchvision import transforms
from torchvision.utils import save_image
from psal import PSAttention
PSIZE = 7
HPSIZE = PSIZE//2
tf = transforms.ToTensor()
imgb = "data/img1_512.png"
imga = "data/img2_512.png"
s = 512
a = tf(Image.open(imga).resize((s,s))).to("cuda").unsqueeze(0)
b = tf(Image.open(imgb).resize((s,s))).to("cuda").unsqueeze(0)
attention = PSAttention(n_iters=10, patch_size=7, aggregation=False)
if __name__ == '__main__':
start_time = time()
reconstruction = attention(a, b, b)
loss = torch.mean((reconstruction - a)**2)
print("Success")
print(f"Reconstruction loss: {loss.item():0.05f} in {(time() - start_time)*10:0.02f} ms")
print("Saving reconstruction to: output.png")
start_time = time()
save_image(reconstruction.clone().detach(), f"output.png")
| 874 | 24 | 93 | py |
psal | psal-master/setup.py | from setuptools import setup, find_packages
from torch.utils import cpp_extension
setup(name='psal',
package_dir={"psal": "src"},
py_modules=["psal.psal_attention"],
ext_modules=[
cpp_extension.CUDAExtension('psal.patchmatch', ['src/patchmatch.cu']),
cpp_extension.CUDAExtension('psal.patchmatch_masked', ['src/patchmatch_masked.cu'])
],
cmdclass={
'build_ext': cpp_extension.BuildExtension
}
)
| 462 | 29.866667 | 93 | py |
psal | psal-master/src/psal_attention.py | import torch
from torch.autograd import Function
from torch.nn.functional import pad, unfold, conv2d
from .patchmatch import backward, patchmatch
from .patchmatch_masked import backward_masked, patchmatch_masked
class PatchMatch(Function):
@staticmethod
def forward(ctx, a, b, patch_size=3, n_iters=10):
shift_map, cost_map = patchmatch(a, b, patch_size, n_iters=n_iters)
torch.cuda.synchronize()
ctx.save_for_backward(a, b, shift_map, torch.tensor(patch_size))
shift_map = shift_map.type(torch.int64)
return shift_map, cost_map
@staticmethod
def backward(ctx, shift_map_grad, cost_map_grad):
a, b, shift_map, patch_size = ctx.saved_tensors
grad_a, grad_b = backward(a, b, shift_map, cost_map_grad, patch_size)
torch.cuda.synchronize()
return grad_a, grad_b, None, None
class PSAttention(torch.nn.Module):
def __init__(self, patch_size=3, n_iters=5, T=1.0, aggregation=False):
super().__init__()
self.patch_size = patch_size
self.n_iters = n_iters
self.T = T
self.aggregation = aggregation
def attention(self, q, k, v):
shift_map, cost_map = PatchMatch.apply(q, k, self.patch_size, self.n_iters)
if not self.aggregation:
# Simple reconstruction using the central pixel
cost_map = torch.softmax(-cost_map/self.T, dim=0)
reconstruction = torch.sum(cost_map[None, None, :, :, :] * v[:,shift_map[0], shift_map[1]], dim=2)
else:
reconstruction = self.aggregate(v, shift_map, cost_map)
return reconstruction
def aggregate(self, v, shift_map, cost_map):
p = self.patch_size//2
K, H, W = cost_map.shape
padded_cost_map = pad(cost_map, (p,p,p,p), value=10)
padded_shift_map = pad(shift_map, (p,p,p,p), value=10) # Add a padding value in the valid region
all_cost_map = torch.zeros((self.patch_size*self.patch_size*K, H, W), device="cuda")
all_shift_map = torch.zeros((2, self.patch_size*self.patch_size*K, H, W), dtype=torch.int64, device="cuda")
idx = 0
# Complicated computation going on:
for di in range(self.patch_size):
for dj in range(self.patch_size):
start_i, start_j = di, dj
end_i, end_j = H + di, W + dj
pi, pj = p - di, p - dj # Relative to the patch position (reference is central pixel)
all_cost_map[K*idx:K*(idx+1)] = padded_cost_map[:,start_i:end_i, start_j:end_j]
all_shift_map[0, K*idx:K*(idx+1)] = padded_shift_map[0,:,start_i:end_i,start_j:end_j] + pi
all_shift_map[1, K*idx:K*(idx+1)] = padded_shift_map[1,:,start_i:end_i,start_j:end_j] + pj
idx += 1
all_cost_map = torch.softmax(-all_cost_map / self.T, dim=0)
all_shift_map[0] = torch.clamp(all_shift_map[0], 0, v.shape[1]-1)
all_shift_map[1] = torch.clamp(all_shift_map[1], 0, v.shape[2]-1)
return torch.sum(all_cost_map[None, None, :, :, :] * v[:,all_shift_map[0], all_shift_map[1]], dim=2)
def forward(self, q, k, v):
assert(q.shape[0] == k.shape[0]) # Same batch size
assert(q.shape[1] == k.shape[1]) # Same number of channels (Q/K)
assert(k.shape[2] == v.shape[2]) # Same spatial dimensions (K/V)
assert(k.shape[3] == v.shape[3])
output = torch.zeros(q.shape[0], v.shape[1], q.shape[2], q.shape[3], device=q.device)
# Process batch elements one by one
for i, (qi, ki, vi) in enumerate(zip(q, k, v)):
output[i] = self.attention(qi, ki, vi)
return output
class PatchMatchMasked(Function):
@staticmethod
def forward(ctx, a, b, patch_size=3, n_iters=10):
shift_map, cost_map = patchmatch_masked(a, b, patch_size=patch_size, n_iters=n_iters)
torch.cuda.synchronize()
ctx.save_for_backward(a, b, shift_map, torch.tensor(patch_size))
shift_map = shift_map.type(torch.int64)
return shift_map, cost_map
@staticmethod
def backward(ctx, shift_map_grad, cost_map_grad):
a, b, shift_map, patch_size = ctx.saved_tensors
grad_a = backward_masked(a, b, shift_map, cost_map_grad, patch_size)
torch.cuda.synchronize()
return grad_a, None, None, None
class PSAttentionMasked(torch.nn.Module):
def __init__(self, patch_size=3, n_iters=5, T=1.0, aggregation=False):
super().__init__()
self.patch_size = patch_size
self.n_iters = n_iters
self.T = T
self.aggregation = aggregation
def attention(self, x, mask, v, T=1.0):
shift_map, cost_map = PatchMatchMasked.apply(x, mask, self.patch_size, self.n_iters)
if not self.aggregation:
# Simple reconstruction using the central pixel
cost_map = torch.softmax(-cost_map/T, dim=0)
reconstruction = torch.sum(cost_map[None, None, :, :, :] * v[:,shift_map[0], shift_map[1]], dim=2)
else:
reconstruction = self.aggregate(x, shift_map, cost_map)
return reconstruction
def aggregate(self, v, shift_map, cost_map):
p = self.patch_size//2
K, H, W = cost_map.shape
padded_cost_map = pad(cost_map, (p,p,p,p), value=10)
padded_shift_map = pad(shift_map, (p,p,p,p), value=10) # Add a padding value in the valid region
all_cost_map = torch.zeros((self.patch_size*self.patch_size*K, H, W), device="cuda")
all_shift_map = torch.zeros((2, self.patch_size*self.patch_size*K, H, W), dtype=torch.int64, device="cuda")
idx = 0
# Complicated computation going on:
for di in range(self.patch_size):
for dj in range(self.patch_size):
start_i, start_j = di, dj
end_i, end_j = H + di, W + dj
pi, pj = p - di, p - dj # Relative to the patch position (reference is central pixel)
all_cost_map[K*idx:K*(idx+1)] = padded_cost_map[:,start_i:end_i, start_j:end_j]
all_shift_map[0, K*idx:K*(idx+1)] = padded_shift_map[0,:,start_i:end_i,start_j:end_j] + pi
all_shift_map[1, K*idx:K*(idx+1)] = padded_shift_map[1,:,start_i:end_i,start_j:end_j] + pj
idx += 1
all_cost_map = torch.softmax(-all_cost_map / self.T, dim=0)
all_shift_map[0] = torch.clamp(all_shift_map[0], 0, v.shape[1]-1)
all_shift_map[1] = torch.clamp(all_shift_map[1], 0, v.shape[2]-1)
return torch.sum(all_cost_map[None, None, :, :, :] * v[:,all_shift_map[0], all_shift_map[1]], dim=2)
def forward(self, x, mask, v=None, T=1.0):
if v is None:
v = x
output = torch.zeros(x.shape[0], v.shape[1], x.shape[2], x.shape[3], device=x.device)
# Process batch elements one by one
for i, (xi, maski, vi) in enumerate(zip(x, mask, v)):
output[i] = self.attention(xi, maski, vi, T=T)
return output
| 6,987 | 43.227848 | 115 | py |
swissbert | swissbert-master/evaluation/swissner/run_ner.py | # Adapted from https://github.com/huggingface/transformers/blob/6f79d264422245d88c7a34032c1a8254a0c65752/examples/pytorch/token-classification/run_ner.py
#!/usr/bin/env python
# coding=utf-8
# Copyright 2020 The HuggingFace Team All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fine-tuning the library models for token classification.
"""
# You can also adapt this script on your own token classification task and datasets. Pointers for this are left as
# comments.
import logging
import os
import sys
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import ClassLabel, disable_caching
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorForTokenClassification,
HfArgumentParser,
PretrainedConfig,
PreTrainedTokenizerFast,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
from utils import custom_load_dataset as load_dataset
disable_caching()
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.27.1")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/token-classification/requirements.txt")
logger = logging.getLogger(__name__)
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
use_auth_token: bool = field(
default=False,
metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
},
)
ignore_mismatched_sizes: bool = field(
default=False,
metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."},
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
task_name: Optional[str] = field(default="ner", metadata={"help": "The name of the task (ner, pos...)."})
dataset_name: Optional[str] = field(
default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
)
dataset_config_name: Optional[str] = field(
default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
)
train_file: Optional[str] = field(
default=None, metadata={"help": "The input training data file (a csv or JSON file)."}
)
validation_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input evaluation data file to evaluate on (a csv or JSON file)."},
)
test_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input test data file to predict on (a csv or JSON file)."},
)
text_column_name: Optional[str] = field(
default=None, metadata={"help": "The column name of text to input in the file (a csv or JSON file)."}
)
label_column_name: Optional[str] = field(
default=None, metadata={"help": "The column name of label to input in the file (a csv or JSON file)."}
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
max_seq_length: int = field(
default=None,
metadata={
"help": (
"The maximum total input sequence length after tokenization. If set, sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
},
)
pad_to_max_length: bool = field(
default=False,
metadata={
"help": (
"Whether to pad all samples to model maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
},
)
max_predict_samples: Optional[int] = field(
default=None,
metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
},
)
label_all_tokens: bool = field(
default=False,
metadata={
"help": (
"Whether to put the label for one word on all tokens of generated by that word or just on the "
"one (in which case the other tokens will have a padding index)."
)
},
)
return_entity_level_metrics: bool = field(
default=False,
metadata={"help": "Whether to return all the entity levels during evaluation or just the overall ones."},
)
def __post_init__(self):
if self.dataset_name is None and self.train_file is None and self.validation_file is None:
raise ValueError("Need either a dataset name or a training/validation file.")
else:
if self.train_file is not None:
extension = self.train_file.split(".")[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
extension = self.validation_file.split(".")[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
self.task_name = self.task_name.lower()
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_ner", model_args, data_args)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
logger.info(f"Training/evaluation parameters {training_args}")
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Set seed before initializing model.
set_seed(training_args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
raw_datasets = load_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
)
else:
data_files = {}
if data_args.train_file is not None:
data_files["train"] = data_args.train_file
if data_args.validation_file is not None:
data_files["validation"] = data_args.validation_file
if data_args.test_file is not None:
data_files["test"] = data_args.test_file
extension = data_args.train_file.split(".")[-1]
raw_datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
if training_args.do_train:
column_names = raw_datasets["train"].column_names
features = raw_datasets["train"].features
else:
column_names = raw_datasets["validation"].column_names
features = raw_datasets["validation"].features
if data_args.text_column_name is not None:
text_column_name = data_args.text_column_name
elif "tokens" in column_names:
text_column_name = "tokens"
else:
text_column_name = column_names[0]
if data_args.label_column_name is not None:
label_column_name = data_args.label_column_name
elif f"{data_args.task_name}_tags" in column_names:
label_column_name = f"{data_args.task_name}_tags"
else:
label_column_name = column_names[1]
# In the event the labels are not a `Sequence[ClassLabel]`, we will need to go through the dataset to get the
# unique labels.
def get_label_list(labels):
unique_labels = set()
for label in labels:
unique_labels = unique_labels | set(label)
label_list = list(unique_labels)
label_list.sort()
return label_list
# If the labels are of type ClassLabel, they are already integers and we have the map stored somewhere.
# Otherwise, we have to get the list of labels manually.
labels_are_int = isinstance(features[label_column_name].feature, ClassLabel)
if labels_are_int:
label_list = features[label_column_name].feature.names
label_to_id = {i: i for i in range(len(label_list))}
else:
label_list = get_label_list(raw_datasets["train"][label_column_name])
label_to_id = {l: i for i, l in enumerate(label_list)}
num_labels = len(label_list)
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
num_labels=num_labels,
finetuning_task=data_args.task_name,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
tokenizer_name_or_path = model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path
is_swissbert = "swissbert" in Path(model_args.model_name_or_path).name
is_xmod = "xmod" in Path(model_args.model_name_or_path).name
if config.model_type in {"bloom", "gpt2", "roberta"}:
tokenizer = AutoTokenizer.from_pretrained(
tokenizer_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=True,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
add_prefix_space=True,
)
else:
tokenizer = AutoTokenizer.from_pretrained(
tokenizer_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=True,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
model = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
ignore_mismatched_sizes=model_args.ignore_mismatched_sizes,
)
if is_swissbert:
adapter_map = {
"de": 0,
"de_CH": 0,
"fr": 1,
"fr_CH": 1,
"it": 2,
"it_CH": 2,
}
if data_args.dataset_config_name == "de":
model.set_default_language("de_CH")
elif data_args.dataset_config_name == "fr":
model.set_default_language("fr_CH")
elif data_args.dataset_config_name == "it":
model.set_default_language("it_CH")
elif data_args.dataset_config_name == "rm":
model.set_default_language("rm_CH")
logger.info(f"Setting default language to {model.config.default_language}")
elif is_xmod:
adapter_map = {
"de": 8,
"de_CH": 8,
"fr": 7,
"fr_CH": 7,
"it": 25,
"it_CH": 25,
"en": 0,
"es": 12,
"nl": 21,
"pl": 19,
"pt": 16,
"ru": 3,
}
if data_args.dataset_config_name == "de":
model.set_default_language("de_DE")
elif data_args.dataset_config_name == "fr":
model.set_default_language("fr_XX")
elif data_args.dataset_config_name == "it":
model.set_default_language("it_IT")
elif data_args.dataset_config_name == "rm":
model.set_default_language("it_IT")
logger.info(f"Setting default language to {model.config.default_language}")
if is_swissbert or is_xmod:
logger.info("Freezing adapters")
for layer in model.roberta.encoder.layer:
if layer.output.adapter_layer_norm is not None:
for parameter in layer.output.adapter_layer_norm.parameters():
parameter.requires_grad = False
for parameter in layer.output.adapter_modules.parameters():
parameter.requires_grad = False
# Tokenizer check: this script requires a fast tokenizer.
if not isinstance(tokenizer, PreTrainedTokenizerFast):
raise ValueError(
"This example script only works for models that have a fast tokenizer. Checkout the big table of models at"
" https://huggingface.co/transformers/index.html#supported-frameworks to find the model types that meet"
" this requirement"
)
# Model has labels -> use them.
if model.config.label2id != PretrainedConfig(num_labels=num_labels).label2id:
if list(sorted(model.config.label2id.keys())) == list(sorted(label_list)):
# Reorganize `label_list` to match the ordering of the model.
if labels_are_int:
label_to_id = {i: int(model.config.label2id[l]) for i, l in enumerate(label_list)}
label_list = [model.config.id2label[i] for i in range(num_labels)]
else:
label_list = [model.config.id2label[i] for i in range(num_labels)]
label_to_id = {l: i for i, l in enumerate(label_list)}
else:
logger.warning(
"Your model seems to have been trained with labels, but they don't match the dataset: ",
f"model labels: {list(sorted(model.config.label2id.keys()))}, dataset labels:"
f" {list(sorted(label_list))}.\nIgnoring the model labels as a result.",
)
# Set the correspondences label/ID inside the model config
model.config.label2id = {l: i for i, l in enumerate(label_list)}
model.config.id2label = {i: l for i, l in enumerate(label_list)}
# Map that sends B-Xxx label to its I-Xxx counterpart
b_to_i_label = []
for idx, label in enumerate(label_list):
if label.startswith("B-") and label.replace("B-", "I-") in label_list:
b_to_i_label.append(label_list.index(label.replace("B-", "I-")))
else:
b_to_i_label.append(idx)
# Preprocessing the dataset
# Padding strategy
padding = "max_length" if data_args.pad_to_max_length else False
# Tokenize all texts and align the labels with them.
def tokenize_and_align_labels(examples):
tokenized_inputs = tokenizer(
examples[text_column_name],
padding=padding,
truncation=True,
max_length=data_args.max_seq_length,
# We use this argument because the texts in our dataset are lists of words (with a label for each word).
is_split_into_words=True,
)
labels = []
for i, label in enumerate(examples[label_column_name]):
word_ids = tokenized_inputs.word_ids(batch_index=i)
previous_word_idx = None
label_ids = []
for word_idx in word_ids:
# Special tokens have a word id that is None. We set the label to -100 so they are automatically
# ignored in the loss function.
if word_idx is None:
label_ids.append(-100)
# We set the label for the first token of each word.
elif word_idx != previous_word_idx:
label_ids.append(label_to_id[label[word_idx]])
# For the other tokens in a word, we set the label to either the current label or -100, depending on
# the label_all_tokens flag.
else:
if data_args.label_all_tokens:
label_ids.append(b_to_i_label[label_to_id[label[word_idx]]])
else:
label_ids.append(-100)
previous_word_idx = word_idx
labels.append(label_ids)
tokenized_inputs["labels"] = labels
return tokenized_inputs
def _add_language_ids(dataset):
if "lang" not in dataset[0]:
return dataset
def preprocess_function(examples):
examples["lang_ids"] = [adapter_map[language] for language in examples["lang"]]
return examples
dataset = dataset.map(preprocess_function, batched=True)
return dataset
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset")
train_dataset = raw_datasets["train"]
if data_args.max_train_samples is not None:
max_train_samples = min(len(train_dataset), data_args.max_train_samples)
train_dataset = train_dataset.select(range(max_train_samples))
with training_args.main_process_first(desc="train dataset map pre-processing"):
train_dataset = train_dataset.map(
tokenize_and_align_labels,
batched=True,
num_proc=data_args.preprocessing_num_workers,
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on train dataset",
)
if is_swissbert or is_xmod:
train_dataset = _add_language_ids(train_dataset)
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset")
eval_dataset = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples)
eval_dataset = eval_dataset.select(range(max_eval_samples))
with training_args.main_process_first(desc="validation dataset map pre-processing"):
eval_dataset = eval_dataset.map(
tokenize_and_align_labels,
batched=True,
num_proc=data_args.preprocessing_num_workers,
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on validation dataset",
)
if is_swissbert or is_xmod:
eval_dataset = _add_language_ids(eval_dataset)
if training_args.do_predict:
if "test" not in raw_datasets:
raise ValueError("--do_predict requires a test dataset")
predict_dataset = raw_datasets["test"]
if data_args.max_predict_samples is not None:
max_predict_samples = min(len(predict_dataset), data_args.max_predict_samples)
predict_dataset = predict_dataset.select(range(max_predict_samples))
with training_args.main_process_first(desc="prediction dataset map pre-processing"):
predict_dataset = predict_dataset.map(
tokenize_and_align_labels,
batched=True,
num_proc=data_args.preprocessing_num_workers,
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on prediction dataset",
)
if is_swissbert or is_xmod:
predict_dataset = _add_language_ids(predict_dataset)
# Data collator
data_collator = DataCollatorForTokenClassification(tokenizer, pad_to_multiple_of=8 if training_args.fp16 else None)
# Metrics
metric = evaluate.load("seqeval")
def compute_metrics(p):
predictions, labels = p
predictions = np.argmax(predictions, axis=2)
# Remove ignored index (special tokens)
true_predictions = [
[label_list[p] for (p, l) in zip(prediction, label) if l != -100]
for prediction, label in zip(predictions, labels)
]
true_labels = [
[label_list[l] for (p, l) in zip(prediction, label) if l != -100]
for prediction, label in zip(predictions, labels)
]
results = metric.compute(predictions=true_predictions, references=true_labels)
if data_args.return_entity_level_metrics:
# Unpack nested dictionaries
final_results = {}
for key, value in results.items():
if isinstance(value, dict):
for n, v in value.items():
final_results[f"{key}_{n}"] = v
else:
final_results[key] = value
return final_results
else:
return {
"precision": results["overall_precision"],
"recall": results["overall_recall"],
"f1": results["overall_f1"],
"accuracy": results["overall_accuracy"],
}
# Initialize our Trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset if training_args.do_train else None,
eval_dataset=eval_dataset if training_args.do_eval else None,
tokenizer=tokenizer,
data_collator=data_collator,
compute_metrics=compute_metrics,
)
# Training
if training_args.do_train:
checkpoint = None
if training_args.resume_from_checkpoint is not None:
checkpoint = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
checkpoint = last_checkpoint
train_result = trainer.train(resume_from_checkpoint=checkpoint)
metrics = train_result.metrics
trainer.save_model() # Saves the tokenizer too for easy upload
max_train_samples = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset)
)
metrics["train_samples"] = min(max_train_samples, len(train_dataset))
trainer.log_metrics("train", metrics)
trainer.save_metrics("train", metrics)
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***")
metrics = trainer.evaluate()
max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset)
metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset))
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
# Predict
if training_args.do_predict:
logger.info("*** Predict ***")
predictions, labels, metrics = trainer.predict(predict_dataset, metric_key_prefix="predict")
predictions = np.argmax(predictions, axis=2)
# Remove ignored index (special tokens)
true_predictions = [
[label_list[p] for (p, l) in zip(prediction, label) if l != -100]
for prediction, label in zip(predictions, labels)
]
trainer.log_metrics("predict", metrics)
trainer.save_metrics("predict", metrics)
# Save predictions
output_predictions_file = os.path.join(training_args.output_dir, "predictions.txt")
if trainer.is_world_process_zero():
with open(output_predictions_file, "w") as writer:
for prediction in true_predictions:
writer.write(" ".join(prediction) + "\n")
kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "token-classification"}
if data_args.dataset_name is not None:
kwargs["dataset_tags"] = data_args.dataset_name
if data_args.dataset_config_name is not None:
kwargs["dataset_args"] = data_args.dataset_config_name
kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}"
else:
kwargs["dataset"] = data_args.dataset_name
if training_args.push_to_hub:
trainer.push_to_hub(**kwargs)
else:
trainer.create_model_card(**kwargs)
def _mp_fn(index):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 29,543 | 40.32028 | 153 | py |
swissbert | swissbert-master/evaluation/romansh_alignment/utils.py | from typing import List, Union, Tuple
import numpy as np
import torch
from nltk import Alignment
from transformers import BatchEncoding
class AlignmentLevel(str):
WORD = "word"
TOKEN = "token"
class WordToTokenStrategy(str):
ALL_TOKENS = "all-tokens"
FIRST_TOKEN = "first-token"
class TokenToWordStrategy(str):
ANY_TOKEN = "any-token"
ALL_TOKENS = "all-tokens"
FIRST_TOKEN = "first-token"
class LayerAggregation(str):
SINGLE = "single"
AVERAGE = "average"
class WordAlignment(Alignment):
@classmethod
def fromstring(cls, s) -> 'WordAlignment':
return WordAlignment(Alignment.fromstring(s))
@classmethod
def from_labels(cls, alignment_labels: np.ndarray) -> 'WordAlignment':
pairs = {tuple(pair) for pair in list(zip(*alignment_labels.nonzero()))}
return WordAlignment(pairs)
@property
def level(self):
return AlignmentLevel.WORD
def subword_to_word_map(sentence: str, encoding: BatchEncoding) -> List[int]:
if encoding.input_ids.shape[0] != 1:
raise NotImplementedError("Only batch size 1 is supported")
to_word_map = []
word_offsets: List[Tuple[int, int]] = []
for i, char in enumerate(sentence):
if i == 0 or sentence[i - 1] == " ":
word_offsets.append((i, i + 1))
elif char != " ":
word_offsets[-1] = (word_offsets[-1][0], i + 1)
# Provided encoding.offsets is not accurate for custom vocabularies, need to correct
subword_offsets: List[Tuple[int, int]] = []
for i, (token_start, token_stop) in enumerate(encoding[0].offsets):
if encoding[0].tokens[i].startswith("▁") and (token_stop - token_start) == len(encoding[0].tokens[i]):
token_start += 1
subword_offsets.append((token_start, token_stop))
for token_start, token_stop in subword_offsets:
if (token_start, token_stop) == (0, 0):
to_word_map.append(None)
continue
for i, (word_start, word_stop) in enumerate(word_offsets):
if token_start >= word_start and token_stop <= word_stop:
word_index = i
break
to_word_map.append(word_index)
words = sentence.split()
try:
assert len(set([i for i in to_word_map if i is not None])) == len(words)
assert max([i for i in to_word_map if i is not None]) == len(words) - 1, print(sentence, to_word_map, words)
assert len(to_word_map) == encoding.input_ids.size(-1)
except AssertionError:
raise ValueError(f"Failed to map subwords to words: {sentence} {to_word_map} {words}")
return to_word_map
def alignment_error_rate(
references: List[Alignment],
hypotheses: List[Alignment],
):
# Does not account for "possible" links
assert len(references) == len(hypotheses)
num_reference_pairs = 0
num_hypothesis_pairs = 0
num_pairs_in_intersection = 0
for reference, hypothesis in zip(references, hypotheses):
assert type(reference) is type(hypothesis)
num_reference_pairs += len(reference)
num_hypothesis_pairs += len(hypothesis)
num_pairs_in_intersection += len(reference & hypothesis)
aer = 1.0 - 2 * num_pairs_in_intersection / float(num_hypothesis_pairs + num_reference_pairs)
return aer
def alignment_f1_score(
references: List[Alignment],
hypotheses: List[Alignment],
) -> Tuple[float, float, float]:
# Does not account for "possible" links
assert len(references) == len(hypotheses)
num_reference_pairs = 0
num_hypothesis_pairs = 0
num_pairs_in_intersection = 0
for reference, hypothesis in zip(references, hypotheses):
assert type(reference) is type(hypothesis)
num_reference_pairs += len(reference)
num_hypothesis_pairs += len(hypothesis)
num_pairs_in_intersection += len(reference & hypothesis)
precision = num_pairs_in_intersection / float(num_hypothesis_pairs)
recall = num_pairs_in_intersection / float(num_reference_pairs)
f1 = 2 * precision * recall / (precision + recall)
return precision, recall, f1
def bert_score(
query_embeddings: np.ndarray,
document_embeddings: Union[List[np.ndarray], np.ndarray],
device: str = "cpu",
):
"""
Adapted from https://github.com/Tiiiger/bert_score/blob/cb582ed5c88b02230b8f101173fd959b68023dc6/bert_score/utils.py#L469
"""
assert document_embeddings[0].shape[-1] == query_embeddings.shape[-1]
if isinstance(document_embeddings, list):
# Pad document_embeddings to the same length with zeros
max_length = max(len(embeddings) for embeddings in document_embeddings)
document_embeddings = [np.pad(embeddings, ((0, max_length - len(embeddings)), (0, 0)), 'constant') for embeddings in document_embeddings]
document_embeddings = np.array(document_embeddings)
with torch.no_grad():
ref_embedding = torch.from_numpy(query_embeddings).unsqueeze(0).repeat(len(document_embeddings), 1, 1).to(device)
hyp_embedding = torch.from_numpy(document_embeddings).to(device)
ref_masks = (ref_embedding != 0).all(-1)
hyp_masks = (hyp_embedding != 0).all(-1)
# Avoid NaN
ref_embedding[~ref_masks] = 1
hyp_embedding[~hyp_masks] = 1
ref_embedding.div_(torch.norm(ref_embedding, dim=-1).unsqueeze(-1))
hyp_embedding.div_(torch.norm(hyp_embedding, dim=-1).unsqueeze(-1))
batch_size = ref_embedding.size(0)
sim = torch.bmm(hyp_embedding, ref_embedding.transpose(1, 2))
masks = torch.bmm(hyp_masks.unsqueeze(2).float(), ref_masks.unsqueeze(1).float())
masks = masks.expand(batch_size, -1, -1).contiguous().view_as(sim)
masks = masks.float().to(sim.device)
sim = sim * masks
word_precision = sim.max(dim=2)[0]
word_recall = sim.max(dim=1)[0]
P = word_precision.sum(dim=1) / hyp_masks.sum(dim=1)
R = word_recall.sum(dim=1) / ref_masks.sum(dim=1)
F = 2 * P * R / (P + R)
return F.cpu().numpy()
| 6,082 | 36.549383 | 145 | py |
swissbert | swissbert-master/evaluation/romansh_alignment/word_aligners/simalign_aligner.py | from collections import Counter
from typing import Tuple, List, Union
import torch
from networkx.algorithms.bipartite import from_biadjacency_matrix
from scipy.sparse import csr_matrix
from tqdm import tqdm
from evaluation.romansh_alignment.utils import LayerAggregation, subword_to_word_map, AlignmentLevel, WordAlignment
from evaluation.romansh_alignment.word_aligners import WordAligner, AlignerOutput
class SimalignMethod(str):
ARGMAX = "argmax"
ITERMAX = "itermax"
MATCH = "match"
class SimalignAligner(WordAligner):
"""
https://www.aclweb.org/anthology/2020.findings-emnlp.147.pdf – Argmax only
Average the subword embeddings to obtain word embeddings
"""
def __init__(self,
tokenizer,
model,
tokenizer_args: dict = None,
model_args: dict = None,
layer: int = -1,
method: Union[str, SimalignMethod] = SimalignMethod.ARGMAX,
aggregation: Union[str, LayerAggregation] = LayerAggregation.SINGLE,
):
if method == SimalignMethod.ITERMAX:
raise NotImplementedError
self.method = method
self.aggregation = aggregation
self.tokenizer = tokenizer
self.tokenizer_args = tokenizer_args or {
"return_tensors": "pt",
}
self.model = model
self.model.eval()
self.model_args = model_args or {
"output_hidden_states": True,
"return_dict": True,
}
self.layer = layer
def align(self,
src_sentences: List[str],
tgt_sentences: List[str],
src_lang_id: int = None,
tgt_lang_id: int = None,
) -> AlignerOutput:
assert len(src_sentences) == len(tgt_sentences)
alignments = []
all_src_words = []
all_tgt_words = []
for i in tqdm(list(range(len(src_sentences)))):
src_sentence = src_sentences[i]
all_src_words.append(src_sentence.split())
src_embeddings = self._encode_sentence(src_sentence, src_lang_id)
tgt_sentence = tgt_sentences[i]
all_tgt_words.append(tgt_sentence.split())
tgt_embeddings = self._encode_sentence(tgt_sentence, tgt_lang_id)
similarity_matrix = self._get_similarity_matrix(src_embeddings, tgt_embeddings)
if self.method == SimalignMethod.ARGMAX:
forward, backward = self._get_alignment_matrix(similarity_matrix)
aligns = forward & backward
elif self.method == SimalignMethod.MATCH:
aligns = self._get_max_weight_match(similarity_matrix)
else:
raise ValueError(f"Unknown method: {self.method}")
word_alignment = WordAlignment.from_labels(aligns[0].cpu().detach().numpy())
alignments.append(word_alignment)
return AlignerOutput(
alignments=alignments,
src_tokens=all_src_words,
tgt_tokens=all_tgt_words,
level=AlignmentLevel.WORD,
)
@torch.no_grad()
def _encode_sentence(self, sentence: str, lang_id: int = None) -> torch.Tensor:
"""
Encode a sentence and return the word embeddings, averaged across the subwords of a word
"""
words = sentence.split()
encoding = self.tokenizer(sentence, **self.tokenizer_args).to(self.model.device)
to_word_map = subword_to_word_map(sentence, encoding)
model_args = self.model_args.copy()
if lang_id is not None:
model_args["lang_ids"] = torch.tensor([lang_id], device=self.model.device)
output = self.model(**encoding, **model_args)
if self.aggregation == LayerAggregation.SINGLE:
hidden_states = output.hidden_states[self.layer]
elif self.aggregation == LayerAggregation.AVERAGE:
hidden_states = torch.stack(output.hidden_states, dim=0).mean(dim=0)
else:
raise ValueError(f"Unknown aggregation: {self.aggregation}")
word_embeddings = torch.zeros((1, len(words), hidden_states.size(-1)), device=self.model.device)
for token_id, word_id in enumerate(to_word_map):
if word_id is None:
continue
word_embeddings[0, word_id] += hidden_states[0, token_id]
subwords_counter = Counter(to_word_map)
for word_id, count in subwords_counter.items():
if word_id is None:
continue
word_embeddings[0, word_id] /= count
return word_embeddings
@staticmethod
@torch.no_grad()
def _get_similarity_matrix(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
"""
Initial source: https://stackoverflow.com/a/58144658/3902795
Added a batch dimension
"""
eps = 1e-8
# Initial dim: batch x seq_len x embedding_size
a_n, b_n = a.norm(dim=2)[..., None], b.norm(dim=2)[..., None] # Same dim
a_norm = a / torch.max(a_n, eps * torch.ones_like(a_n)) # Same dim
b_norm = b / torch.max(b_n, eps * torch.ones_like(b_n))
pairwise_cosine_similarity = torch.matmul(a_norm, b_norm.transpose(1, 2)) # batch x seq_len_1 x seq_len_2
return (pairwise_cosine_similarity + 1.0) / 2.0 # Same dim
@staticmethod
@torch.no_grad()
def _get_alignment_matrix(similarity_matrix: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
batch_size, seq_len_1, seq_len_2 = similarity_matrix.shape
forward_base = torch.eye(seq_len_2, dtype=torch.bool, device=similarity_matrix.device)
backward_base = torch.eye(seq_len_1, dtype=torch.bool, device=similarity_matrix.device)
forward = forward_base[similarity_matrix.argmax(dim=2)] # batch x seq_len_1 x seq_len_2
backward = backward_base[similarity_matrix.argmax(dim=1)] # batch x seq_len_2 x seq_len_1
return forward, backward.transpose(1, 2)
@staticmethod
@torch.no_grad()
def _get_max_weight_match(similarity_matrix: torch.Tensor) -> torch.Tensor:
try:
import networkx as nx
except ImportError:
raise ValueError("networkx must be installed to use match algorithm.")
similarity_matrix = similarity_matrix.squeeze(0)
def permute(edge):
if edge[0] < similarity_matrix.shape[0]:
return edge[0], edge[1] - similarity_matrix.shape[0]
else:
return edge[1], edge[0] - similarity_matrix.shape[0]
G = from_biadjacency_matrix(csr_matrix(similarity_matrix.cpu().numpy()))
matching = nx.max_weight_matching(G, maxcardinality=True)
matching = [permute(x) for x in matching]
matching = sorted(matching, key=lambda x: x[0])
res_matrix = torch.zeros_like(similarity_matrix, device=similarity_matrix.device)
for edge in matching:
res_matrix[edge[0], edge[1]] = 1
return res_matrix.unsqueeze(0)
def __str__(self):
return f"SimalignAligner({self.model.name_or_path.replace('/', '_')})"
| 7,157 | 39.902857 | 115 | py |
swissbert | swissbert-master/evaluation/romansh_alignment/encoders/hf.py | import math
from typing import Union
import numpy as np
import torch
from transformers import PreTrainedModel
from evaluation.romansh_alignment.encoders import SentenceEncoder
from evaluation.romansh_alignment.utils import LayerAggregation
class HuggingfaceEncoder(SentenceEncoder):
def __init__(self,
model: PreTrainedModel,
tokenizer,
aggregation: Union[str, LayerAggregation] = LayerAggregation.SINGLE,
):
self.model = model.eval()
if torch.cuda.is_available():
self.model = self.model.cuda()
self.tokenizer = tokenizer
self.aggregation = aggregation
def _embed_tokens(self, text: str, lang_id: int = None) -> np.ndarray:
inputs = self.tokenizer(text, return_tensors='pt').to(self.model.device)
# If sequence length is longer than maximum of model, split inputs into overlapping chunks and superimpose the hidden states
max_length = self.model.config.max_position_embeddings - 2
if max_length % 2 != 0:
max_length -= 1
# Make sure that input is padded to a multiple of max_length
multiple = math.ceil(inputs['input_ids'].shape[1] / max_length) * max_length
inputs = self.tokenizer(
text,
return_tensors='pt',
padding="max_length",
max_length=multiple,
).to(self.model.device)
assert inputs['input_ids'].shape[1] == multiple
assert inputs['input_ids'].shape[1] % max_length == 0
chunks = self._chunk_inputs(inputs, max_length=max_length)
chunk_outputs = []
for chunk in chunks:
with torch.no_grad():
model_args = {
"output_hidden_states": True,
"return_dict": True,
}
if lang_id is not None:
model_args["lang_ids"] = torch.tensor([lang_id]).to(self.model.device)
chunk_output = self.model(**chunk, **model_args)
if self.aggregation == LayerAggregation.SINGLE:
chunk_output = chunk_output.last_hidden_state
elif self.aggregation == LayerAggregation.AVERAGE:
chunk_output = torch.stack(chunk_output.hidden_states, dim=0).mean(dim=0)
else:
raise ValueError(f"Invalid aggregation: {self.aggregation}")
assert chunk_output.shape[1] == max_length
chunk_output = chunk_output * chunk['attention_mask'].unsqueeze(-1)
chunk_output = chunk_output.cpu().numpy()[0]
chunk_outputs.append(chunk_output)
embeddings = self._merge_outputs(inputs['input_ids'].shape[1], chunk_outputs)
return embeddings
def _chunk_inputs(self, inputs, max_length: int):
"""
Split inputs into chunks of length `max_length` that overlap by 50%.
"""
if inputs['input_ids'].shape[1] > max_length:
chunks = []
for i in range(0, inputs['input_ids'].shape[1], max_length // 2):
chunk = {}
for key, value in inputs.items():
chunk[key] = value[:, i:(i + max_length)]
if chunk['input_ids'].shape[1] < max_length:
continue
chunks.append(chunk)
else:
chunks = [inputs]
return chunks
def _merge_outputs(self, seq_len: int, chunk_outputs) -> np.ndarray:
embeddings = np.zeros((seq_len, chunk_outputs[0].shape[1]))
chunk_length = chunk_outputs[0].shape[0]
assert len(chunk_outputs) * chunk_length == 2 * seq_len - chunk_length
for i, chunk_output in enumerate(chunk_outputs):
embeddings[(i * chunk_length // 2):(i * chunk_length // 2 + chunk_length)] += chunk_output
# Average all overlapping parts
embeddings[chunk_length // 2:-chunk_length // 2] /= 2
return embeddings
def _embed_sentence(self, text: str, lang_id: int = None) -> np.ndarray:
token_embeddings = self.embed_tokens(text, lang_id)
sentence_embeddings = np.mean(token_embeddings[token_embeddings.sum(axis=1) != 0], axis=0)
return sentence_embeddings
def __str__(self):
return f"HuggingfaceEncoder({self.model.name_or_path.replace('/', '_')}, aggregation={self.aggregation})"
| 4,411 | 42.683168 | 132 | py |
swissbert | swissbert-master/pretraining/fairseq_additions/models/swissbert/hub_interface.py | from fairseq import utils
from fairseq.models.xmod import XMODHubInterface
class SwissBERTHubInterface(XMODHubInterface):
def fill_mask(self, masked_input: str, topk: int = 5, **kwargs):
"""
Source: https://github.com/facebookresearch/fairseq/blob/58cc6cca18f15e6d56e3f60c959fe4f878960a60/fairseq/models/roberta/hub_interface.py#L156
Added **kwargs to allow passing lang_id
"""
masked_token = "<mask>"
assert (
masked_token in masked_input and masked_input.count(masked_token) == 1
), "Please add one {0} token for the input, eg: 'He is a {0} guy'".format(
masked_token
)
text_spans = masked_input.split(masked_token)
text_spans_bpe = (
(" {0} ".format(masked_token))
.join([self.bpe.encode(text_span.rstrip()) for text_span in text_spans])
.strip()
)
tokens = self.task.source_dictionary.encode_line(
"<s> " + text_spans_bpe + " </s>",
append_eos=False,
add_if_not_exist=False,
)
masked_index = (tokens == self.task.mask_idx).nonzero(as_tuple=False)
if tokens.dim() == 1:
tokens = tokens.unsqueeze(0)
with utils.model_eval(self.model):
features, extra = self.model(
tokens.long().to(device=self.device),
features_only=False,
return_all_hiddens=False,
**kwargs,
)
logits = features[0, masked_index, :].squeeze()
prob = logits.softmax(dim=0)
values, index = prob.topk(k=topk, dim=0)
topk_predicted_token_bpe = self.task.source_dictionary.string(index)
topk_filled_outputs = []
for index, predicted_token_bpe in enumerate(
topk_predicted_token_bpe.split(" ")
):
predicted_token = self.bpe.decode(predicted_token_bpe)
# Quick hack to fix https://github.com/pytorch/fairseq/issues/1306
if predicted_token_bpe.startswith("\u2581"):
predicted_token = " " + predicted_token
if " {0}".format(masked_token) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(
" {0}".format(masked_token), predicted_token
),
values[index].item(),
predicted_token,
)
)
else:
topk_filled_outputs.append(
(
masked_input.replace(masked_token, predicted_token),
values[index].item(),
predicted_token,
)
)
return topk_filled_outputs
| 2,844 | 37.445946 | 150 | py |
swissbert | swissbert-master/pretraining/fairseq_additions/models/swissbert/model.py | import logging
from argparse import Namespace
from pathlib import Path
from typing import Optional
import torch
from fairseq.models import register_model_architecture, register_model
from fairseq.models.roberta import base_architecture
from fairseq.models.xmod import XMODModel
from omegaconf import DictConfig
from fairseq_additions.models.swissbert.hub_interface import SwissBERTHubInterface
@register_model("swissbert")
class SwissBERTModel(XMODModel):
@classmethod
def from_pretrained(
cls,
model_name_or_path,
checkpoint_file="model.pt",
data_name_or_path=".",
bpe="sentencepiece",
**kwargs,
):
from fairseq import hub_utils
x = hub_utils.from_pretrained(
model_name_or_path,
checkpoint_file,
data_name_or_path,
archive_map=cls.hub_models(),
bpe=bpe,
load_checkpoint_heads=True,
**kwargs,
)
return SwissBERTHubInterface(x["args"], x["task"], x["models"][0])
def load_state_dict(
self,
state_dict,
strict=True,
model_cfg: Optional[DictConfig] = None,
args: Optional[Namespace] = None,
):
"""
After loading a pre-trained XMOD model, initialize new language adapters, prune unneeded language adapters, and freeze some components
"""
logging.info("Disabling strict loading from pre-trained XMOD model")
if getattr(self.args, "train_new_embeddings", False):
# Rename weights that depend on vocab size to avoid size mismatch
state_dict["encoder.sentence_encoder.embed_tokens.weight_old"] = state_dict["encoder.sentence_encoder.embed_tokens.weight"]
del state_dict["encoder.sentence_encoder.embed_tokens.weight"]
state_dict["encoder.lm_head.weight_old"] = state_dict["encoder.lm_head.weight"]
del state_dict["encoder.lm_head.weight"]
state_dict["encoder.lm_head.bias_old"] = state_dict["encoder.lm_head.bias"]
del state_dict["encoder.lm_head.bias"]
super().load_state_dict(state_dict, strict=False, model_cfg=model_cfg, args=args)
self._init_languages()
if getattr(self.args, "prune_languages", False):
self._prune_languages()
self._freeze_shared_components()
if getattr(self.args, "train_new_embeddings", False):
self._initialize_new_embeddings(state_dict)
def _init_languages(self):
# Format of argument: de_DE->de_CH,fr_XX->fr_CH,de_DE+fr_XX->rm_CH
if getattr(self.args, "init_languages", None) is None:
return
for mapping in self.args.init_languages.split(","):
sources, target = mapping.split("->")
sources = sources.split("+")
logging.info(f"Initializing language adapter for {target} with language adapters for {sources}")
for source in sources:
assert source in self.model_languages, f"Source language {source} not in model languages"
assert target in self.train_languages, f"Target language {target} not in train_languages"
# Average state dicts of source adapters
for k, v in self.encoder.sentence_encoder.layers._modules.items():
average_state_dict = v.adapter_modules[sources[0]].state_dict()
for source in sources[1:]:
for k_, v_ in v.adapter_modules[source].state_dict().items():
average_state_dict[k_] += v_
for k_ in average_state_dict:
average_state_dict[k_] /= len(sources)
v.adapter_modules[target].load_state_dict(average_state_dict)
def _prune_languages(self):
"""
Remove language adapters that are not trained
"""
for i, (k, v) in enumerate(self.encoder.sentence_encoder.layers._modules.items()):
for lang in self.model_languages:
if lang not in self.train_languages:
if i == 0:
logging.info(f"Removing language adapter for {lang}")
del v.adapter_modules[lang]
def _freeze_shared_components(self):
"""
Freeze everything except language adapters
"""
logging.info("❄Freezing everything except language adapters❄")
for parameter in self.parameters():
parameter.requires_grad = False
for k, v in self.encoder.sentence_encoder.layers._modules.items():
if hasattr(v, "adapter_layer_norm"):
for parameter in v.adapter_layer_norm.parameters():
parameter.requires_grad = True
for parameter in v.adapter_modules.parameters():
parameter.requires_grad = True
def _initialize_new_embeddings(self, pretrained_state_dict):
"""
Initialize the new embeddings with the pre-trained embeddings of identical subwords
Unfreeze the embeddings, including the positional embeddings
"""
device = self.encoder.sentence_encoder.embed_tokens.weight.device
dtype = self.encoder.sentence_encoder.embed_tokens.weight.dtype
from transformers import XLMRobertaTokenizer
logging.info("Initializing new embeddings with pre-trained embeddings")
old_word_embeddings = pretrained_state_dict["encoder.sentence_encoder.embed_tokens.weight_old"]
old_vocab_path = Path(self.args.old_vocab_path)
assert old_vocab_path.exists()
old_vocab = XLMRobertaTokenizer(old_vocab_path).get_vocab()
logging.info("Pre-trained vocabulary size: %d", len(old_vocab))
assert len(old_vocab) == old_word_embeddings.shape[0]
new_word_embeddings = self.encoder.sentence_encoder.embed_tokens.weight
new_vocab_path = Path(self.args.new_vocab_path)
assert new_vocab_path.exists()
new_vocab = XLMRobertaTokenizer(new_vocab_path).get_vocab()
logging.info("New vocabulary size: %d", len(new_vocab))
assert len(new_vocab) == new_word_embeddings.shape[0]
with torch.no_grad():
num_overlap = 0
for subword, new_index in new_vocab.items():
if subword in old_vocab:
old_index = old_vocab[subword]
new_word_embeddings[new_index] = old_word_embeddings[old_index]
num_overlap += 1
logging.info("Number of overlapping subwords: %d", num_overlap)
# Tie LM head again
if not self.args.untie_weights_roberta:
self.encoder.lm_head.weight = new_word_embeddings
logging.info("Unfreezing embeddings")
for parameter in self.encoder.sentence_encoder.embed_tokens.parameters():
parameter.requires_grad = True
for parameter in self.encoder.sentence_encoder.embed_positions.parameters():
parameter.requires_grad = True
for parameter in self.encoder.lm_head.parameters():
parameter.requires_grad = True
self.to(device=device, dtype=dtype)
@register_model_architecture("swissbert", "swissbert_base")
def swissbert_base(args):
args.ffn_modules = getattr(args, "ffn_modules", False)
args.adapter_modules = getattr(args, "adapter_modules", True)
args.adapter_layer_norm = getattr(args, "adapter_layer_norm", False)
args.adapter_reuse_layer_norm = getattr(args, "adapter_reuse_layer_norm", True)
args.ln_before_adapter = getattr(args, "ln_before_adapter", True)
args.languages = getattr(
args,
"languages",
[
# 1. Language adapters of X-MOD
"en_XX",
"id_ID",
"vi_VN",
"ru_RU",
"fa_IR",
"sv_SE",
"ja_XX",
"fr_XX",
"de_DE",
"ro_RO",
"ko_KR",
"hu_HU",
"es_XX",
"fi_FI",
"uk_UA",
"da_DK",
"pt_XX",
"no_XX",
"th_TH",
"pl_PL",
"bg_BG",
"nl_XX",
"zh_CN",
"he_IL",
"el_GR",
"it_IT",
"sk_SK",
"hr_HR",
"tr_TR",
"ar_AR",
"cs_CZ",
"lt_LT",
"hi_IN",
"zh_TW",
"ca_ES",
"ms_MY",
"sl_SI",
"lv_LV",
"ta_IN",
"bn_IN",
"et_EE",
"az_AZ",
"sq_AL",
"sr_RS",
"kk_KZ",
"ka_GE",
"tl_XX",
"ur_PK",
"is_IS",
"hy_AM",
"ml_IN",
"mk_MK",
"be_BY",
"la_VA",
"te_IN",
"eu_ES",
"gl_ES",
"mn_MN",
"kn_IN",
"ne_NP",
"sw_KE",
"si_LK",
"mr_IN",
"af_ZA",
"gu_IN",
"cy_GB",
"eo_EO",
"km_KH",
"ky_KG",
"uz_UZ",
"ps_AF",
"pa_IN",
"ga_IE",
"ha_NG",
"am_ET",
"lo_LA",
"ku_TR",
"so_SO",
"my_MM",
"or_IN",
"sa_IN",
# 2. Added language adapters
"de_CH",
"fr_CH",
"it_CH",
"rm_CH",
],
)
base_architecture(args)
| 9,556 | 36.332031 | 142 | py |
swissbert | swissbert-master/pretraining/fairseq_additions/tasks/multilingual_masked_lm_xmod.py | # Adapted from https://github.com/facebookresearch/fairseq/blob/a48f235636557b8d3bc4922a6fa90f3a0fa57955/fairseq/tasks/multilingual_masked_lm.py
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import numpy as np
import torch
from fairseq import utils
from fairseq.data import (
ConcatDataset,
Dictionary,
IdDataset,
MaskTokensDataset,
NestedDictionaryDataset,
NumelDataset,
NumSamplesDataset,
PadDataset,
PrependTokenDataset,
ListDataset,
ResamplingDataset,
SortDataset,
TokenBlockDataset,
data_utils,
encoders,
)
from fairseq.tasks import LegacyFairseqTask, register_task
logger = logging.getLogger(__name__)
@register_task("multilingual_masked_lm_xmod")
class MultiLingualMaskedLMTaskXmod(LegacyFairseqTask):
"""Task for training masked language models (e.g., BERT, RoBERTa)."""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
parser.add_argument(
"data",
help="colon separated path to data directories list, \
will be iterated upon during epochs in round-robin manner",
)
parser.add_argument(
"--sample-break-mode",
default="complete",
choices=["none", "complete", "complete_doc", "eos"],
help='If omitted or "none", fills each sample with tokens-per-sample '
'tokens. If set to "complete", splits samples only at the end '
"of sentence, but may include multiple sentences per sample. "
'"complete_doc" is similar but respects doc boundaries. '
'If set to "eos", includes only one sentence per sample.',
)
parser.add_argument(
"--tokens-per-sample",
default=512,
type=int,
help="max number of total tokens over all segments "
"per sample for BERT dataset",
)
parser.add_argument(
"--mask-prob",
default=0.15,
type=float,
help="probability of replacing a token with mask",
)
parser.add_argument(
"--leave-unmasked-prob",
default=0.1,
type=float,
help="probability that a masked token is unmasked",
)
parser.add_argument(
"--random-token-prob",
default=0.1,
type=float,
help="probability of replacing a token with a random token",
)
parser.add_argument(
"--freq-weighted-replacement",
action="store_true",
help="sample random replacement words based on word frequencies",
)
parser.add_argument(
"--mask-whole-words",
default=False,
action="store_true",
help="mask whole words; you may also want to set --bpe",
)
parser.add_argument(
"--multilang-sampling-alpha",
type=float,
default=1.0,
help="smoothing alpha for sample rations across multiple datasets",
)
parser.add_argument(
"--init-languages",
type=str,
default=None,
help="Initialize new language adapters from (averages of) existing languages. "
"Format: de_DE->de_CH,fr_XX->fr_CH,de_DE+fr_XX->rm_CH",
)
parser.add_argument(
"--prune-languages",
default=False,
action="store_true",
help="Remove language adapters without training data",
)
parser.add_argument(
"--train-new-embeddings",
default=False,
action="store_true",
help="Initialize embeddings via overlap and do not freeze embeddings",
)
parser.add_argument(
"--old-vocab-path",
type=str,
default=None,
help="Path to vocab file of pre-trained model (needed for embeddings initialization)",
)
parser.add_argument(
"--new-vocab-path",
type=str,
default=None,
help="Path to new vocab file (needed for embeddings initialization)",
)
def __init__(self, args, dictionary):
super().__init__(args)
self.dictionary = dictionary
self.seed = args.seed
# add mask token
self.mask_idx = dictionary.add_symbol("<mask>")
@classmethod
def setup_task(cls, args, **kwargs):
paths = utils.split_paths(args.data)
assert len(paths) > 0
dictionary = Dictionary.load(os.path.join(paths[0], "dict.txt"))
logger.info("dictionary: {} types".format(len(dictionary)))
return cls(args, dictionary)
def _get_whole_word_mask(self):
# create masked input and targets
if self.args.mask_whole_words:
bpe = encoders.build_bpe(self.args)
if bpe is not None:
def is_beginning_of_word(i):
if i < self.source_dictionary.nspecial:
# special elements are always considered beginnings
return True
tok = self.source_dictionary[i]
if tok.startswith("madeupword"):
return True
try:
return bpe.is_beginning_of_word(tok)
except ValueError:
return True
mask_whole_words = torch.ByteTensor(
list(map(is_beginning_of_word, range(len(self.source_dictionary))))
)
else:
mask_whole_words = None
return mask_whole_words
def _get_sample_prob(self, dataset_lens):
"""
Get smoothed sampling porbability by languages. This helps low resource
languages by upsampling them.
"""
prob = dataset_lens / dataset_lens.sum()
smoothed_prob = prob**self.args.multilang_sampling_alpha
smoothed_prob = smoothed_prob / smoothed_prob.sum()
return smoothed_prob
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
paths = utils.split_paths(self.args.data)
assert len(paths) > 0
data_path = paths[(epoch - 1) % len(paths)]
languages = sorted(
name
for name in os.listdir(data_path)
if os.path.isdir(os.path.join(data_path, name))
)
assert self.model_languages
languages = [language for language in languages if language in self.model_languages]
logger.info("Training on {0} languages: {1}".format(len(languages), languages))
# logger.info(
# "Language to id mapping: ", {lang: id for id, lang in enumerate(languages)}
# )
mask_whole_words = self._get_whole_word_mask()
lang_datasets = []
for lang_id, language in enumerate(languages):
split_path = os.path.join(data_path, language, split)
dataset = data_utils.load_indexed_dataset(
split_path,
self.source_dictionary,
self.args.dataset_impl,
combine=combine,
)
if dataset is None:
raise FileNotFoundError(
"Dataset not found: {} ({})".format(split, split_path)
)
# create continuous blocks of tokens
dataset = TokenBlockDataset(
dataset,
dataset.sizes,
self.args.tokens_per_sample - 1, # one less for <s>
pad=self.source_dictionary.pad(),
eos=self.source_dictionary.eos(),
break_mode=self.args.sample_break_mode,
)
logger.info("loaded {} blocks from: {}".format(len(dataset), split_path))
# prepend beginning-of-sentence token (<s>, equiv. to [CLS] in BERT)
dataset = PrependTokenDataset(dataset, self.source_dictionary.bos())
# JV: Use a different masking seed for each epoch
if split == self.args.train_subset:
mask_seed = str(self.args.seed) + str(epoch)
else:
mask_seed = self.args.seed
src_dataset, tgt_dataset = MaskTokensDataset.apply_mask(
dataset,
self.source_dictionary,
pad_idx=self.source_dictionary.pad(),
mask_idx=self.mask_idx,
seed=mask_seed,
mask_prob=self.args.mask_prob,
leave_unmasked_prob=self.args.leave_unmasked_prob,
random_token_prob=self.args.random_token_prob,
freq_weighted_replacement=self.args.freq_weighted_replacement,
mask_whole_words=mask_whole_words,
)
lang_dataset = NestedDictionaryDataset(
{
"net_input": {
"src_tokens": PadDataset(
src_dataset,
pad_idx=self.source_dictionary.pad(),
left_pad=False,
),
"src_lengths": NumelDataset(src_dataset, reduce=False),
"lang_id": ListDataset([language] * src_dataset.sizes.shape[0]), # XMOD code expects str, not id
},
"target": PadDataset(
tgt_dataset,
pad_idx=self.source_dictionary.pad(),
left_pad=False,
),
"nsentences": NumSamplesDataset(),
"ntokens": NumelDataset(src_dataset, reduce=True),
# "lang_id": RawLabelDataset([lang_id] * src_dataset.sizes.shape[0]),
},
sizes=[src_dataset.sizes],
)
lang_datasets.append(lang_dataset)
dataset_lengths = np.array(
[len(d) for d in lang_datasets],
dtype=float,
)
logger.info(
"loaded total {} blocks for all languages".format(
dataset_lengths.sum(),
)
)
if split == self.args.train_subset:
# For train subset, additionally up or down sample languages.
sample_probs = self._get_sample_prob(dataset_lengths)
logging.info("Sample probability by language: {}".format(
{
lang: "{0:.4f}".format(sample_probs[id])
for id, lang in enumerate(languages)
},
))
size_ratio = (sample_probs * dataset_lengths.sum()) / dataset_lengths
logger.info("Up/Down Sampling ratio by language: {}".format(
{
lang: "{0:.2f}".format(size_ratio[id])
for id, lang in enumerate(languages)
},
))
resampled_lang_datasets = [
ResamplingDataset(
lang_datasets[i],
size_ratio=size_ratio[i],
seed=self.args.seed,
epoch=epoch,
replace=size_ratio[i] >= 1.0,
)
for i, d in enumerate(lang_datasets)
]
dataset = ConcatDataset(resampled_lang_datasets)
else:
dataset = ConcatDataset(lang_datasets)
lang_splits = [split]
for lang_id, lang_dataset in enumerate(lang_datasets):
split_name = split + "_" + languages[lang_id]
lang_splits.append(split_name)
self.datasets[split_name] = lang_dataset
# [TODO]: This is hacky for now to print validation ppl for each
# language individually. Maybe need task API changes to allow it
# in more generic ways.
if split in self.args.valid_subset:
self.args.valid_subset = self.args.valid_subset.replace(
split, ",".join(lang_splits)
)
with data_utils.numpy_seed(self.args.seed + epoch):
shuffle = np.random.permutation(len(dataset))
self.datasets[split] = SortDataset(
dataset,
sort_order=[
shuffle,
dataset.sizes,
],
)
def build_dataset_for_inference(self, src_tokens, src_lengths, sort=True):
src_dataset = PadDataset(
TokenBlockDataset(
src_tokens,
src_lengths,
self.args.tokens_per_sample - 1, # one less for <s>
pad=self.source_dictionary.pad(),
eos=self.source_dictionary.eos(),
break_mode="eos",
),
pad_idx=self.source_dictionary.pad(),
left_pad=False,
)
src_dataset = PrependTokenDataset(src_dataset, self.source_dictionary.bos())
src_dataset = NestedDictionaryDataset(
{
"id": IdDataset(),
"net_input": {
"src_tokens": src_dataset,
"src_lengths": NumelDataset(src_dataset, reduce=False),
},
},
sizes=src_lengths,
)
if sort:
src_dataset = SortDataset(src_dataset, sort_order=[src_lengths])
return src_dataset
@property
def source_dictionary(self):
return self.dictionary
@property
def target_dictionary(self):
return self.dictionary
def build_model(self, *args, **kwargs):
model = super().build_model(*args, **kwargs)
self.model_languages = model.args.languages
assert self.model_languages
self._set_train_languages()
model.model_languages = self.model_languages
model.train_languages = self.train_languages
return model
def _set_train_languages(self):
paths = utils.split_paths(self.args.data)
assert len(paths) > 0
data_path = paths[0]
languages = sorted(
name
for name in os.listdir(data_path)
if os.path.isdir(os.path.join(data_path, name))
)
self.train_languages = [language for language in languages if language in self.model_languages]
| 14,651 | 35.721805 | 144 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/setup.py | # This Python file uses the following encoding: utf-8
# !/usr/bin/env python
# Welcome to the Intel Extension for PyTorch setup.py.
#
# Environment variables you are probably interested in:
#
# DEBUG
# build with -O0 and -g (debug symbols)
#
# RELEASE
# build with optimization level -O2
#
# REL_WITH_DEB_INFO
# build with optimization level -O2 and -g (debug symbols)
#
# CFLAGS
# flags to apply to both C and C++ files to be compiled (a quirk of setup.py
# which we have faithfully adhered to in our build system is that CFLAGS
# also applies to C++ files (unless CXXFLAGS is set), in contrast to the
# default behavior of autogoo and cmake build systems.)
#
# CC
# the C/C++ compiler to use
#
# MKLROOT
# specify MKL library path.
# ONLY NEEDED if you have a specific MKL version you want to link against.
# Make sure this directory contains include and lib directories.
# By default, the MKL library installed with pip/conda is used.
#
# Environment variables we respect (these environment variables are
# conventional and are often understood/set by other software.)
#
# TORCH_VERSION
# specify the PyTorch version to depend on
#
# IPEX_VERSION
# specify the extension version literal
#
# MAX_JOBS
# process for parallel compile, must be a Integer
#
# VERBOSE
# more output when compile
#
# IPEX_VERSIONED_BUILD
# build wheel files versioned with a git commit number
#
##############################################################
# XPU Build options:
# USE_ONEMKL - to use oneMKL in operators
# USE_CHANNELS_LAST_1D - to use channels last 1d feature
# USE_PERSIST_STREAM - to use persistent oneDNN stream
# USE_PRIMITIVE_CACHE - to Cache oneDNN primitives by framework
# USE_QUEUE_BARRIER - to use queue submit_barrier API
# USE_SCRATCHPAD_MODE - to trun on oneDNN scratchpad user mode
# USE_MULTI_CONTEXT - to create DPC++ runtime context per device
# USE_AOT_DEVLIST - to set device list for AOT build option, for example, bdw,tgl,ats,..."
# USE_SYCL_ASSERT - to enable assert in sycl kernel
# USE_ITT_ANNOTATION - to enable ITT annotation in sycl kernel
# BUILD_STATIC_ONEMKL - to link static oneMKL libraries
# BUILD_STATS - to count statistics for each component during build process
# BUILD_BY_PER_KERNEL - to build by DPC++ per_kernel option (exclusive with USE_AOT_DEVLIST)
# BUILD_STRIPPED_BIN - to strip all symbols after build
# BUILD_SEPARATE_OPS - to build each operator in separate library
# BUILD_SIMPLE_TRACE - to build simple trace for each registered operator
# BUILD_OPT_LEVEL - to add build option -Ox, accept values: 0/1
# BUILD_NO_CLANGFORMAT - to build without force clang-format
# BUILD_INTERNAL_DEBUG - to build internal debug code path
#
##############################################################
from __future__ import print_function
from distutils.command.build_py import build_py
from distutils.command.install import install
from distutils.version import LooseVersion
from functools import lru_cache
from subprocess import check_call, check_output
from setuptools.command.build_clib import build_clib
from setuptools.command.egg_info import egg_info
from setuptools import setup, distutils
from pathlib import Path
from typing import Any, Optional
import sysconfig
import distutils.ccompiler
import distutils.command.clean
import os
import glob
import platform
import shutil
import subprocess
import sys
import errno
# FIXME: always set BUILD_WITH_XPU = ON in XPU repo
os.environ["BUILD_WITH_XPU"] = "OFF"
# Define env values
ON_ENV_VAL = ["ON", "YES", "1", "Y"]
OFF_ENV_VAL = ["OFF", "NO", "0", "N"]
FULL_ENV_VAL = ON_ENV_VAL + OFF_ENV_VAL
# initialize variables for compilation
IS_LINUX = platform.system() == "Linux"
IS_DARWIN = platform.system() == "Darwin"
IS_WINDOWS = platform.system() == "Windows"
@lru_cache(maxsize=128)
def _get_build_target():
build_target = ""
if len(sys.argv) > 1:
if sys.argv[1] in ["build_clib", "bdist_cppsdk"]:
build_target = "cppsdk"
elif sys.argv[1] in ["clean"]:
build_target = "clean"
elif sys.argv[1] in ["develop"]:
build_target = "develop"
else:
build_target = "python"
return build_target
torch_install_prefix = None
if _get_build_target() == "cppsdk":
torch_install_prefix = os.environ.get("LIBTORCH_PATH", None)
if torch_install_prefix is None or not os.path.exists(torch_install_prefix):
raise RuntimeError("Can not find libtorch from env LIBTORCH_PATH!")
torch_install_prefix = os.path.abspath(torch_install_prefix)
elif _get_build_target() in ["develop", "python"]:
try:
import torch
from torch.utils.cpp_extension import BuildExtension, CppExtension
except ImportError as e:
raise RuntimeError("Fail to import torch!")
def _check_env_flag(name, default=""):
return os.getenv(name, default).upper() in ON_ENV_VAL
def get_build_type():
return (
"RelWithDebInfo"
if _check_env_flag("REL_WITH_DEB_INFO")
else "Debug"
if _check_env_flag("DEBUG")
else "Release"
)
def create_if_not_exist(path_dir):
if not os.path.exists(path_dir):
try:
Path(path_dir).mkdir(parents=True, exist_ok=True)
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise RuntimeError("Fail to create path {}".format(path_dir))
def get_version_num():
versions = {}
version_file = "version.txt"
version_lines = open(version_file, "r").readlines()
for line in version_lines:
key, value = line.strip().split(" ")
versions[key] = value
for v in ("VERSION_MAJOR", "VERSION_MINOR", "VERSION_PATCH"):
if v not in versions:
print("ERROR:", v, "is not found in", version_file)
sys.exit(1)
version = (
versions["VERSION_MAJOR"]
+ "."
+ versions["VERSION_MINOR"]
+ "."
+ versions["VERSION_PATCH"]
)
return version
PACKAGE_NAME = "intel_extension_for_pytorch"
PYTHON_VERSION = sys.version_info
def get_pytorch_install_dir():
if _get_build_target() == "clean":
return None
if _get_build_target() == "cppsdk":
return torch_install_prefix
else:
return os.path.dirname(os.path.abspath(torch.__file__))
pytorch_install_dir = get_pytorch_install_dir()
def _build_installation_dependency():
install_requires = []
install_requires.append("psutil")
install_requires.append("numpy")
return install_requires
def which(thefile: str) -> Optional[str]:
path = os.environ.get("PATH", os.defpath).split(os.pathsep)
for d in path:
fname = os.path.join(d, thefile)
fnames = [fname]
if sys.platform == "win32":
exts = os.environ.get("PATHEXT", "").split(os.pathsep)
fnames += [fname + ext for ext in exts]
for name in fnames:
if os.access(name, os.F_OK | os.X_OK) and not os.path.isdir(name):
return name
return None
def get_cmake_command():
if platform.system() == "Windows":
return "cmake"
def _get_version(cmd: Optional[str]) -> Any:
"Returns cmake version."
if cmd is None:
return None
for line in check_output([cmd, "--version"]).decode("utf-8").split("\n"):
if "version" in line:
return LooseVersion(line.strip().split(" ")[2])
raise RuntimeError("no version found")
cmake3_version = _get_version(which("cmake3"))
cmake_version = _get_version(which("cmake"))
_cmake_min_version = LooseVersion("3.13.0")
if all(
(
ver is None or ver < _cmake_min_version
for ver in [cmake_version, cmake3_version]
)
):
raise RuntimeError("Require cmake or cmake3 3.13.0 or higher but not found")
if cmake3_version is None:
cmake_command = "cmake"
elif cmake_version is None:
cmake_command = "cmake3"
else:
if cmake3_version >= cmake_version:
cmake_command = "cmake3"
else:
cmake_command = "cmake"
return cmake_command
def get_cpack_command():
if platform.system() == "Windows":
return "cpack"
if shutil.which("cpack3") is not None:
return "cpack3"
if shutil.which("cpack") is not None:
return "cpack"
else:
raise RuntimeError("no cpack or cpack3 found")
def get_ipex_git_head_sha(base_dir):
ipex_git_sha = (
subprocess.check_output(["git", "rev-parse", "--short", "HEAD"], cwd=base_dir)
.decode("ascii")
.strip()
)
return ipex_git_sha
def get_torch_git_head_sha():
if _get_build_target() == "clean":
return None
if _get_build_target() == "cppsdk":
libtorch_hash_file = os.path.join(torch_install_prefix, "build-hash")
if not os.path.exists(libtorch_hash_file):
raise RuntimeError(
"can not find build-hash at {}".format(libtorch_hash_file)
)
with open(libtorch_hash_file, "r") as f:
lines = f.readlines()
for line in lines:
line = line.strip()
if line.isalnum():
return line
raise RuntimeError("can not get libtorch hash in {}".format(libtorch_hash_file))
else:
torch_git_sha = torch.version.git_version
return torch_git_sha
def get_submodule_commit(base_dir, submodule_dir):
if not os.path.isdir(submodule_dir):
return ""
return (
subprocess.check_output(
["git", "submodule", "status", submodule_dir], cwd=base_dir
)
.decode("ascii")
.strip()
.split()[0]
)
def get_build_version(ipex_git_sha):
pkg_type = "xpu" if _check_env_flag("BUILD_WITH_XPU") else "cpu"
ipex_version = os.getenv("IPEX_VERSION", get_version_num())
if _check_env_flag("IPEX_VERSIONED_BUILD", default="1"):
try:
ipex_version += "+git" + ipex_git_sha[:7]
except Exception:
pass
else:
ipex_version += "+" + pkg_type
return ipex_version
def write_buffer_to_file(file_path, buffer):
create_if_not_exist(os.path.dirname(file_path))
with open(file_path, "w") as f:
f.write(buffer)
f.close()
def get_code_fingerprint(ipex_build_version, ipex_git_sha, torch_git_sha, build_type):
fingerprint = "{}_{}_{}_{}".format(
ipex_build_version, ipex_git_sha, torch_git_sha, build_type
)
return fingerprint
def check_code_fingerprint_in_file(file_path, fingerprint):
b_exist = os.path.isfile(file_path)
if b_exist is False:
return False
with open(file_path) as file:
# read all content of a file
content = file.read()
# check if string present in a file
if fingerprint in content:
return True
else:
return False
def create_version_files(
base_dir,
ipex_build_version,
ipex_git_sha,
torch_git_sha,
gpu_onednn_sha,
cpu_ideep_sha,
):
print(
"Building Intel Extension for PyTorch. Version: {}".format(ipex_build_version)
)
py_version_path = os.path.join(base_dir, PACKAGE_NAME, "_version.py")
cpp_version_path = os.path.join(
base_dir, PACKAGE_NAME, "..", "csrc", "utils", "version.h"
)
build_type_str = get_build_type()
# Check code fingerprint to avoid non-modify rebuild.
current_code_fingerprint = get_code_fingerprint(
ipex_build_version, ipex_git_sha, torch_git_sha, build_type_str
)
b_same_fingerprint = check_code_fingerprint_in_file(
py_version_path, current_code_fingerprint
)
if b_same_fingerprint is False:
py_buffer = "# Autogenerated file, do not edit!\n"
py_buffer += "# code fingerprint:\n"
py_buffer += "# {}\n\n".format(current_code_fingerprint)
py_buffer += '__version__ = "{}"\n'.format(ipex_build_version)
py_buffer += '__ipex_gitrev__ = "{}"\n'.format(ipex_git_sha)
py_buffer += '__torch_gitrev__ = "{}"\n'.format(
"" if build_type_str == "Release" else torch_git_sha
)
py_buffer += '__gpu_onednn_gitrev__ = "{}"\n'.format(gpu_onednn_sha)
py_buffer += '__cpu_ideep_gitrev__ = "{}"\n'.format(cpu_ideep_sha)
py_buffer += '__build_type__ = "{}"\n'.format(build_type_str)
write_buffer_to_file(py_version_path, py_buffer)
b_same_fingerprint = check_code_fingerprint_in_file(
cpp_version_path, current_code_fingerprint
)
if b_same_fingerprint is False:
c_buffer = "// Autogenerated file, do not edit!\n"
c_buffer += "// clang-format off\n"
c_buffer += "// code fingerprint: {}\n".format(current_code_fingerprint)
c_buffer += "// clang-format on\n\n"
c_buffer += "#pragma once\n"
c_buffer += "#include <string>\n\n"
c_buffer += "namespace torch_ipex {\n\n"
c_buffer += "const std::string __version__()\n"
c_buffer += '{{ return "{}"; }}\n\n'.format(ipex_build_version)
c_buffer += "const std::string __gitrev__()\n"
c_buffer += '{{ return "{}"; }}\n\n'.format(ipex_git_sha)
c_buffer += "const std::string __torch_gitrev__()\n"
c_buffer += '{{ return "{}"; }}\n\n'.format(torch_git_sha)
c_buffer += "const std::string __build_type__()\n"
c_buffer += '{{ return "{}"; }}\n\n'.format(build_type_str)
c_buffer += "} // namespace torch_ipex\n"
write_buffer_to_file(cpp_version_path, c_buffer)
def get_project_dir():
project_root_dir = os.path.dirname(__file__)
return os.path.abspath(project_root_dir)
def get_build_dir():
return os.path.join(get_project_dir(), "build")
def get_build_type_dir():
build_type_dir = os.path.join(get_build_dir(), get_build_type())
create_if_not_exist(build_type_dir)
return build_type_dir
def get_package_base_dir():
return os.path.join(get_build_type_dir(), "packages")
def get_package_dir():
return os.path.join(get_package_base_dir(), PACKAGE_NAME)
def get_package_lib_dir():
package_lib_dir = os.path.join(get_package_dir(), "lib")
create_if_not_exist(package_lib_dir)
return package_lib_dir
def get_ipex_cpu_dir():
cpu_root_dir = os.path.join(get_project_dir(), "csrc", "cpu")
return os.path.abspath(cpu_root_dir)
def get_ipex_cpu_build_dir():
cpu_build_dir = os.path.join(get_build_type_dir(), "csrc", "cpu")
create_if_not_exist(cpu_build_dir)
return cpu_build_dir
def get_xpu_project_dir():
project_root_dir = os.path.abspath(os.path.dirname(__file__))
return os.path.join(project_root_dir)
def get_xpu_project_build_dir():
xpu_build_dir = os.path.join(get_build_type_dir(), "csrc", "gpu")
create_if_not_exist(xpu_build_dir)
return xpu_build_dir
def get_xpu_compliers():
if shutil.which("icx") is None or shutil.which("icpx") is None:
raise RuntimeError("Failed to find compiler path from OS PATH")
return "icx", "icpx"
def get_ipex_python_dir():
project_root_dir = os.path.dirname(__file__)
python_root_dir = os.path.join(project_root_dir, PACKAGE_NAME, "csrc")
return os.path.abspath(python_root_dir)
def get_ipex_python_build_dir():
python_build_dir = os.path.join(get_build_type_dir(), PACKAGE_NAME, "csrc")
create_if_not_exist(python_build_dir)
return python_build_dir
def get_ipex_cppsdk_build_dir():
cppsdk_build_dir = os.path.join(get_build_type_dir(), "csrc", "cppsdk")
create_if_not_exist(cppsdk_build_dir)
return cppsdk_build_dir
base_dir = os.path.dirname(os.path.abspath(__file__))
# Generate version info (ipex.__version__)
torch_git_sha = get_torch_git_head_sha()
ipex_git_sha = get_ipex_git_head_sha(base_dir)
ipex_build_version = get_build_version(ipex_git_sha)
ipex_gpu_onednn_git_sha = get_submodule_commit(base_dir, "third_party/oneDNN")
ipex_cpu_ideep_git_sha = get_submodule_commit(base_dir, "third_party/ideep")
create_version_files(
base_dir,
ipex_build_version,
ipex_git_sha,
torch_git_sha,
ipex_gpu_onednn_git_sha,
ipex_cpu_ideep_git_sha,
)
# global setup modules
class IPEXClean(distutils.command.clean.clean, object):
def run(self):
import glob
import re
with open(".gitignore", "r") as f:
ignores = f.read()
pat = re.compile(r"^#( BEGIN NOT-CLEAN-FILES )?")
for wildcard in filter(None, ignores.split("\n")):
match = pat.match(wildcard)
if match:
if match.group(1):
# Marker is found and stop reading .gitignore.
break
# Ignore lines which begin with '#'.
else:
for filename in glob.glob(wildcard):
try:
os.remove(filename)
except OSError:
shutil.rmtree(filename, ignore_errors=True)
# It's an old-style class in Python 2.7...
distutils.command.clean.clean.run(self)
def get_cpp_test_dir():
project_root_dir = os.path.abspath(os.path.dirname(__file__))
return os.path.join(project_root_dir, "tests", "cpu", "cpp")
def get_cpp_test_build_dir():
cpp_test_build_dir = os.path.join(get_build_type_dir(), "tests", "cpu", "cpp")
create_if_not_exist(cpp_test_build_dir)
return cpp_test_build_dir
def get_pybind11_abi_compiler_flags():
pybind11_abi_flags = []
for pname in ["COMPILER_TYPE", "STDLIB", "BUILD_ABI"]:
pval = getattr(torch._C, f"_PYBIND11_{pname}")
if pval is not None:
pybind11_abi_flags.append(f'-DPYBIND11_{pname}=\\"{pval}\\"')
cl_flags = ""
for flag in pybind11_abi_flags:
cl_flags += flag + " "
return cl_flags
def _gen_build_cfg_from_cmake(
cmake_exec, project_root_dir, cmake_args, build_dir, build_env
):
check_call(
[cmake_exec, project_root_dir] + cmake_args, cwd=build_dir, env=build_env
)
def _build_project(build_args, build_dir, build_env, use_ninja=False):
if use_ninja:
check_call(["ninja"] + build_args, cwd=build_dir, env=build_env)
else:
check_call(["make"] + build_args, cwd=build_dir, env=build_env)
def define_build_options(args, **kwargs):
for key, value in sorted(kwargs.items()):
if value is not None:
args.append("-D{}={}".format(key, value))
class IPEXCPPLibBuild(build_clib, object):
def run(self):
self.build_lib = os.path.relpath(get_package_dir())
self.build_temp = os.path.relpath(get_build_type_dir())
cmake_exec = get_cmake_command()
if cmake_exec is None:
raise RuntimeError(
"CMake must be installed to build the following extensions: "
+ ", ".join(e.name for e in self.extensions)
)
self.cmake = cmake_exec
if platform.system() == "Windows":
raise RuntimeError("Intel Extension for PyTorch only supports Linux now.")
project_root_dir = get_project_dir()
build_type_dir = get_build_type_dir()
ipex_python_dir = get_ipex_python_dir()
ipex_python_build_dir = get_ipex_python_build_dir()
ipex_cpu_dir = get_ipex_cpu_dir()
ipex_cpu_build_dir = get_ipex_cpu_build_dir()
ipex_xpu_dir = get_xpu_project_dir()
ipex_xpu_build_dir = get_xpu_project_build_dir()
ipex_cppsdk_build_dir = get_ipex_cppsdk_build_dir()
cpack_out_file = os.path.abspath(
os.path.join(build_type_dir, "IPEXCPackConfig.cmake")
)
self_extract_script = "gen_self_extract.sh"
if _get_build_target() == "cppsdk":
cmake_prefix_path = torch_install_prefix
else:
cmake_prefix_path = torch.utils.cmake_prefix_path
build_option_common = {
"CMAKE_BUILD_TYPE": get_build_type(),
"CMAKE_INSTALL_LIBDIR": "lib",
"CMAKE_PREFIX_PATH": cmake_prefix_path,
"CMAKE_INSTALL_PREFIX": os.path.abspath(get_package_dir()),
"CMAKE_PROJECT_VERSION": get_version_num(),
"PYTHON_PLATFORM_INFO": platform.platform(),
"PYTHON_INCLUDE_DIR": sysconfig.get_paths()["include"],
"PYTHON_EXECUTABLE": sys.executable,
"IPEX_PROJ_NAME": PACKAGE_NAME,
"LIBIPEX_GITREV": ipex_git_sha,
"LIBIPEX_VERSION": ipex_build_version,
}
build_with_cpu = True # Default ON
build_with_xpu = False # Default OFF
use_ninja = False
sequential_build = False
cmake_common_args = []
my_env = os.environ.copy()
for var, val in my_env.items():
if var.startswith(("BUILD_", "USE_", "CMAKE_")):
if var == "CMAKE_PREFIX_PATH":
# XXX: Do NOT overwrite CMAKE_PREFIX_PATH. Append into the list, instead!
build_option_common[var] = ";".join(
[build_option_common[var], val.replace(":", ";")]
)
continue
if var == "USE_NINJA" and val.upper() in ON_ENV_VAL:
use_ninja = True
cmake_common_args.append("-GNinja")
continue
if var == "BUILD_STATS" and val.upper() in ON_ENV_VAL:
sequential_build = True
# fall through
if var == "BUILD_WITH_XPU" and val.upper() in ON_ENV_VAL:
build_with_xpu = True
# fall through
if var == "BUILD_WITH_CPU" and val.upper() in OFF_ENV_VAL:
build_with_cpu = False
# fall through
build_option_common[var] = val
define_build_options(cmake_common_args, **build_option_common)
nproc = min(int(os.environ.get("MAX_JOBS", os.cpu_count())), os.cpu_count())
if sequential_build:
nproc = 1
print("WARNING: Practice as sequential build with single process !")
build_args = ["-j", str(nproc), "install"]
if _check_env_flag("VERBOSE") and use_ninja:
build_args.append("-v")
if build_with_xpu:
# Generate cmake for XPU module:
if os.path.isdir(ipex_xpu_dir) is False:
raise RuntimeError(
"It maybe CPU only branch, and it is not contains XPU code."
)
gpu_cc, gpu_cxx = get_xpu_compliers()
build_option_gpu = {
**build_option_common,
"BUILD_MODULE_TYPE": "GPU",
"CMAKE_C_COMPILER": gpu_cc,
"CMAKE_CXX_COMPILER": gpu_cxx,
}
if get_build_type() == "Debug":
build_option_gpu = {
**build_option_gpu,
"BUILD_SEPARATE_OPS": "ON",
"USE_SYCL_ASSERT": "ON",
"USE_ITT_ANNOTATION": "ON",
}
cmake_args_gpu = []
define_build_options(cmake_args_gpu, **build_option_gpu)
_gen_build_cfg_from_cmake(
cmake_exec, project_root_dir, cmake_args_gpu, ipex_xpu_build_dir, my_env
)
if build_with_cpu:
# Generate cmake for CPU module:
build_option_cpu = {**build_option_common, "BUILD_MODULE_TYPE": "CPU"}
cmake_args_cpu = []
define_build_options(cmake_args_cpu, **build_option_cpu)
_gen_build_cfg_from_cmake(
cmake_exec, project_root_dir, cmake_args_cpu, ipex_cpu_build_dir, my_env
)
# Generate cmake for the CPP UT
build_option_cpp_test = {
**build_option_common,
"PROJECT_DIR": project_root_dir,
"PYTORCH_INSTALL_DIR": pytorch_install_dir,
"CPP_TEST_BUILD_DIR": get_cpp_test_build_dir(),
}
cmake_args_cpp_test = []
define_build_options(cmake_args_cpp_test, **build_option_cpp_test)
_gen_build_cfg_from_cmake(
cmake_exec,
get_cpp_test_dir(),
cmake_args_cpp_test,
get_cpp_test_build_dir(),
my_env,
)
if _get_build_target() in ["develop", "python"]:
# Generate cmake for common python module:
build_option_python = {
**build_option_common,
"BUILD_MODULE_TYPE": "PYTHON",
"PYBIND11_CL_FLAGS": get_pybind11_abi_compiler_flags(),
}
cmake_args_python = []
define_build_options(cmake_args_python, **build_option_python)
_gen_build_cfg_from_cmake(
cmake_exec,
project_root_dir,
cmake_args_python,
ipex_python_build_dir,
my_env,
)
elif _get_build_target() == "cppsdk":
# Generate cmake for CPPSDK package:
build_option_cppsdk = {
**build_option_common,
"BUILD_MODULE_TYPE": "CPPSDK",
"CPACK_CONFIG_FILE": cpack_out_file,
"CPACK_OUTPUT_DIR": build_type_dir,
"LIBIPEX_GEN_SCRIPT": self_extract_script,
}
cmake_args_cppsdk = []
define_build_options(cmake_args_cppsdk, **build_option_cppsdk)
_gen_build_cfg_from_cmake(
cmake_exec,
project_root_dir,
cmake_args_cppsdk,
ipex_cppsdk_build_dir,
my_env,
)
if build_with_xpu:
# Build XPU module:
_build_project(build_args, ipex_xpu_build_dir, my_env, use_ninja)
if build_with_cpu:
# Build CPU module:
_build_project(build_args, ipex_cpu_build_dir, my_env, use_ninja)
# Build the CPP UT
_build_project(build_args, get_cpp_test_build_dir(), my_env, use_ninja)
if _get_build_target() in ["develop", "python"]:
# Build common python module:
_build_project(build_args, ipex_python_build_dir, my_env, use_ninja)
elif _get_build_target() == "cppsdk":
# Build CPPSDK package:
_build_project(build_args, ipex_cppsdk_build_dir, my_env, use_ninja)
cpack_exec = get_cpack_command()
check_call([cpack_exec, "--config", cpack_out_file])
gen_script_path = os.path.abspath(
os.path.join(build_type_dir, self_extract_script)
)
if not os.path.isfile(gen_script_path):
raise "Cannot find script to generate self-extract package in {}".format(
gen_script_path
)
check_call(gen_script_path, shell=True)
# Copy the export library, header and cmake file to root/intel_extension_for_pytorch dir.
# It is only copied in "develop" mode, which can save disk space in "install" mode.
if _get_build_target() == "develop":
ret = get_src_lib_and_dst()
for src, dst in ret:
self.copy_file(src, dst)
def get_src_lib_and_dst():
ret = []
generated_cpp_files = glob.glob(
os.path.join(get_package_base_dir(), PACKAGE_NAME, "lib", "**/*.so"),
recursive=True,
)
generated_cpp_files.extend(
glob.glob(
os.path.join(get_package_base_dir(), PACKAGE_NAME, "bin", "**/*.dll"),
recursive=True,
)
)
generated_cpp_files.extend(
glob.glob(
os.path.join(get_package_base_dir(), PACKAGE_NAME, "lib", "**/*.lib"),
recursive=True,
)
)
generated_cpp_files.extend(
glob.glob(
os.path.join(get_package_base_dir(), PACKAGE_NAME, "include", "**/*.h"),
recursive=True,
)
)
generated_cpp_files.extend(
glob.glob(
os.path.join(get_package_base_dir(), PACKAGE_NAME, "share", "**/*.cmake"),
recursive=True,
)
)
for src in generated_cpp_files:
dst = os.path.join(
get_project_dir(),
PACKAGE_NAME,
os.path.relpath(src, os.path.join(get_package_base_dir(), PACKAGE_NAME)),
)
dst_path = Path(dst)
if not dst_path.parent.exists():
Path(dst_path.parent).mkdir(parents=True, exist_ok=True)
ret.append((src, dst))
return ret
def get_src_py_and_dst():
ret = []
generated_python_files = glob.glob(
os.path.join(get_project_dir(), PACKAGE_NAME, "**/*.py"), recursive=True
)
for src in generated_python_files:
dst = os.path.join(
get_package_base_dir(),
PACKAGE_NAME,
os.path.relpath(src, os.path.join(get_project_dir(), PACKAGE_NAME)),
)
dst_path = Path(dst)
if not dst_path.parent.exists():
Path(dst_path.parent).mkdir(parents=True, exist_ok=True)
ret.append((src, dst))
return ret
# python specific setup modules
class IPEXEggInfoBuild(egg_info, object):
def finalize_options(self):
super(IPEXEggInfoBuild, self).finalize_options()
class IPEXInstallCmd(install, object):
def finalize_options(self):
self.build_lib = os.path.relpath(get_package_base_dir())
return super(IPEXInstallCmd, self).finalize_options()
class IPEXPythonPackageBuild(build_py, object):
def run(self) -> None:
ret = get_src_py_and_dst()
for src, dst in ret:
self.copy_file(src, dst)
super(IPEXPythonPackageBuild, self).finalize_options()
def make_relative_rpath(path):
if IS_DARWIN:
return "-Wl,-rpath,@loader_path/" + path
elif IS_WINDOWS:
raise "Windows support is in the plan. Intel Extension for PyTorch supports Linux now."
else:
return "-Wl,-rpath,$ORIGIN/" + path
def pyi_module():
main_libraries = ["intel-ext-pt-python"]
main_sources = [os.path.join(PACKAGE_NAME, "csrc", "_C.cpp")]
include_dirs = [
os.path.realpath("."),
os.path.realpath(os.path.join(PACKAGE_NAME, "csrc")),
os.path.join(pytorch_install_dir, "include"),
os.path.join(pytorch_install_dir, "include", "torch", "csrc", "api", "include"),
]
library_dirs = ["lib", os.path.join(pytorch_install_dir, "lib")]
extra_compile_args = [
"-Wall",
"-Wextra",
"-Wno-strict-overflow",
"-Wno-unused-parameter",
"-Wno-missing-field-initializers",
"-Wno-write-strings",
"-Wno-unknown-pragmas",
# This is required for Python 2 declarations that are deprecated in 3.
"-Wno-deprecated-declarations",
# Python 2.6 requires -fno-strict-aliasing, see
# http://legacy.python.org/dev/peps/pep-3123/
# We also depend on it in our code (even Python 3).
"-fno-strict-aliasing",
# Clang has an unfixed bug leading to spurious missing
# braces warnings, see
# https://bugs.llvm.org/show_bug.cgi?id=21629
"-Wno-missing-braces",
]
C_ext = CppExtension(
"{}._C".format(PACKAGE_NAME),
libraries=main_libraries,
sources=main_sources,
language="c++",
extra_compile_args=extra_compile_args,
include_dirs=include_dirs,
library_dirs=library_dirs,
extra_link_args=[make_relative_rpath("lib")],
)
return C_ext
def pyi_isa_help_module():
main_libraries = []
main_sources = [
os.path.join(PACKAGE_NAME, "csrc", "_isa_help_main.cpp"),
os.path.join(PACKAGE_NAME, "csrc", "cpu", "isa_help", "isa_help.cpp"),
os.path.join("csrc", "cpu", "isa", "cpu_feature.cpp"),
]
include_dirs = [
os.path.realpath("."),
os.path.realpath(os.path.join("csrc", "cpu", "isa")),
os.path.realpath(os.path.join(PACKAGE_NAME, "csrc")),
os.path.join(pytorch_install_dir, "include"),
os.path.join(pytorch_install_dir, "include", "torch", "csrc", "api", "include"),
]
library_dirs = ["lib", os.path.join(pytorch_install_dir, "lib")]
extra_compile_args = [
"-Wall",
"-Wextra",
"-Wno-strict-overflow",
"-Wno-unused-parameter",
"-Wno-missing-field-initializers",
"-Wno-write-strings",
"-Wno-unknown-pragmas",
# This is required for Python 2 declarations that are deprecated in 3.
"-Wno-deprecated-declarations",
# Python 2.6 requires -fno-strict-aliasing, see
# http://legacy.python.org/dev/peps/pep-3123/
# We also depend on it in our code (even Python 3).
"-fno-strict-aliasing",
# Clang has an unfixed bug leading to spurious missing
# braces warnings, see
# https://bugs.llvm.org/show_bug.cgi?id=21629
"-Wno-missing-braces",
]
C_ext = CppExtension(
"{}._isa_help".format(PACKAGE_NAME),
libraries=main_libraries,
sources=main_sources,
language="c++",
extra_compile_args=extra_compile_args,
include_dirs=include_dirs,
library_dirs=library_dirs,
extra_link_args=[make_relative_rpath("lib")],
)
return C_ext
ext_modules = []
cmdclass = {
"build_clib": IPEXCPPLibBuild,
"bdist_cppsdk": IPEXCPPLibBuild,
"clean": IPEXClean,
}
def fill_python_target_cmd(cmdclass, ext_modules):
class IPEXExtBuild(BuildExtension):
def run(self):
self.run_command("build_clib")
self.build_lib = os.path.relpath(get_package_base_dir())
self.build_temp = os.path.relpath(get_build_type_dir())
self.library_dirs.append(os.path.relpath(get_package_lib_dir()))
super(IPEXExtBuild, self).run()
cmdclass["build_ext"] = IPEXExtBuild
cmdclass["build_py"] = IPEXPythonPackageBuild
cmdclass["egg_info"] = IPEXEggInfoBuild
cmdclass["install"] = IPEXInstallCmd
ext_modules.append(pyi_module())
ext_modules.append(pyi_isa_help_module())
if _get_build_target() in ["develop", "python"]:
fill_python_target_cmd(cmdclass, ext_modules)
long_description = ""
this_directory = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(this_directory, "README.md"), encoding="utf-8") as f:
long_description = f.read()
entry_points = {
"console_scripts": [
"ipexrun = {}.launcher:main".format(PACKAGE_NAME),
]
}
setup(
name=PACKAGE_NAME,
version=ipex_build_version,
description="Intel® Extension for PyTorch*",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/intel/intel-extension-for-pytorch",
author="Intel Corp.",
install_requires=_build_installation_dependency(),
packages=[PACKAGE_NAME],
package_data={PACKAGE_NAME: ["*.so", "lib/*.so", "bin/*.dll", "lib/*.lib"]},
zip_safe=False,
ext_modules=ext_modules,
cmdclass=cmdclass,
entry_points=entry_points,
license="https://www.apache.org/licenses/LICENSE-2.0",
classifiers=[
"License :: OSI Approved :: Apache Software License",
],
)
| 35,565 | 32.521206 | 97 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/tools/linter/clang_format_all.py | #!/usr/bin/env python3
"""
A script that runs clang-format on all C/C++ files in CLANG_FORMAT_ALLOWLIST. There is
also a diff mode which simply checks if clang-format would make any changes, which is useful for
CI purposes.
If clang-format is not available, the script also downloads a platform-appropriate binary from
and S3 bucket and verifies it against a precommited set of blessed binary hashes.
"""
import argparse
import asyncio
import re
import os
import sys
from typing import List, Set
from .clang_format_utils import get_and_check_clang_format, CLANG_FORMAT_PATH
# Allowlist of directories to check. All files that in that directory
# (recursively) will be checked.
# If you edit this, please edit the allowlist in clang_format_ci.sh as well.
CLANG_FORMAT_ALLOWLIST = [
"torch_ipex/csrc"
]
# Only files with names matching this regex will be formatted.
CPP_FILE_REGEX = re.compile(".*\\.(h|cpp|cc|c|hpp)$")
def get_allowlisted_files() -> Set[str]:
"""
Parse CLANG_FORMAT_ALLOWLIST and resolve all directories.
Returns the set of allowlist cpp source files.
"""
matches = []
for dir in CLANG_FORMAT_ALLOWLIST:
for root, dirnames, filenames in os.walk(dir):
for filename in filenames:
if CPP_FILE_REGEX.match(filename):
matches.append(os.path.join(root, filename))
return set(matches)
async def run_clang_format_on_file(
filename: str,
semaphore: asyncio.Semaphore,
verbose: bool = False,
) -> None:
"""
Run clang-format on the provided file.
"""
# -style=file picks up the closest .clang-format, -i formats the files inplace.
cmd = "{} -style=file -i {}".format(CLANG_FORMAT_PATH, filename)
async with semaphore:
proc = await asyncio.create_subprocess_shell(cmd)
_ = await proc.wait()
if verbose:
print("Formatted {}".format(filename))
async def file_clang_formatted_correctly(
filename: str,
semaphore: asyncio.Semaphore,
verbose: bool = False,
) -> bool:
"""
Checks if a file is formatted correctly and returns True if so.
"""
ok = True
# -style=file picks up the closest .clang-format
cmd = "{} -style=file {}".format(CLANG_FORMAT_PATH, filename)
async with semaphore:
proc = await asyncio.create_subprocess_shell(cmd, stdout=asyncio.subprocess.PIPE)
# Read back the formatted file.
stdout, _ = await proc.communicate()
formatted_contents = stdout.decode()
# Compare the formatted file to the original file.
with open(filename) as orig:
orig_contents = orig.read()
if formatted_contents != orig_contents:
ok = False
if verbose:
print("{} is not formatted correctly".format(filename))
return ok
async def run_clang_format(
max_processes: int,
diff: bool = False,
verbose: bool = False,
) -> bool:
"""
Run clang-format to all files in CLANG_FORMAT_ALLOWLIST that match CPP_FILE_REGEX.
"""
# Check to make sure the clang-format binary exists.
if not os.path.exists(CLANG_FORMAT_PATH):
print("clang-format binary not found")
return False
# Gather command-line options for clang-format.
args = [CLANG_FORMAT_PATH, "-style=file"]
if not diff:
args.append("-i")
ok = True
# Semaphore to bound the number of subprocesses that can be created at once to format files.
semaphore = asyncio.Semaphore(max_processes)
# Format files in parallel.
if diff:
for f in asyncio.as_completed([file_clang_formatted_correctly(f, semaphore, verbose) for f in get_allowlisted_files()]):
ok &= await f
if ok:
print("All files formatted correctly")
else:
print("Some files not formatted correctly")
else:
await asyncio.gather(*[run_clang_format_on_file(f, semaphore, verbose) for f in get_allowlisted_files()])
return ok
def parse_args(args: List[str]) -> argparse.Namespace:
"""
Parse and return command-line arguments.
"""
parser = argparse.ArgumentParser(
description="Execute clang-format on your working copy changes."
)
parser.add_argument(
"-d",
"--diff",
action="store_true",
default=False,
help="Determine whether running clang-format would produce changes",
)
parser.add_argument("--verbose", "-v", action="store_true", default=False)
parser.add_argument("--max-processes", type=int, default=50,
help="Maximum number of subprocesses to create to format files in parallel")
return parser.parse_args(args)
def main(args: List[str]) -> bool:
# Parse arguments.
options = parse_args(args)
# Get clang-format and make sure it is the right binary and it is in the right place.
ok = get_and_check_clang_format(options.verbose)
# Invoke clang-format on all files in the directories in the allowlist.
if ok:
loop = asyncio.get_event_loop()
ok = loop.run_until_complete(run_clang_format(options.max_processes, options.diff, options.verbose))
# We have to invert because False -> 0, which is the code to be returned if everything is okay.
return not ok
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
| 5,337 | 31.54878 | 128 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/tools/linter/translate_annotations.py | #!/usr/bin/env python3
import argparse
import json
import re
import subprocess
from bisect import bisect_right
from collections import defaultdict
from typing import (Callable, DefaultDict, Generic, List, Optional, Pattern,
Sequence, TypeVar, cast)
from typing_extensions import TypedDict
class Hunk(TypedDict):
old_start: int
old_count: int
new_start: int
new_count: int
class Diff(TypedDict):
old_filename: Optional[str]
hunks: List[Hunk]
# @@ -start,count +start,count @@
hunk_pattern = r'^@@\s+-(\d+)(?:,(\d+))?\s+\+(\d+)(?:,(\d+))?\s+@@'
def parse_diff(diff: str) -> Diff:
name = None
name_found = False
hunks: List[Hunk] = []
for line in diff.splitlines():
hunk_match = re.match(hunk_pattern, line)
if name_found:
if hunk_match:
old_start, old_count, new_start, new_count = hunk_match.groups()
hunks.append({
'old_start': int(old_start),
'old_count': int(old_count or '1'),
'new_start': int(new_start),
'new_count': int(new_count or '1'),
})
else:
assert not hunk_match
name_match = re.match(r'^--- (?:(?:/dev/null)|(?:a/(.*)))$', line)
if name_match:
name_found = True
name, = name_match.groups()
return {
'old_filename': name,
'hunks': hunks,
}
T = TypeVar('T')
U = TypeVar('U')
# we want to use bisect.bisect_right to find the closest hunk to a given
# line number, but the bisect module won't have a key function until
# Python 3.10 https://github.com/python/cpython/pull/20556 so we make an
# O(1) wrapper around the list of hunks that makes it pretend to just be
# a list of line numbers
# https://gist.github.com/ericremoreynolds/2d80300dabc70eebc790
class KeyifyList(Generic[T, U]):
def __init__(self, inner: List[T], key: Callable[[T], U]) -> None:
self.inner = inner
self.key = key
def __len__(self) -> int:
return len(self.inner)
def __getitem__(self, k: int) -> U:
return self.key(self.inner[k])
def translate(diff: Diff, line_number: int) -> Optional[int]:
if line_number < 1:
return None
hunks = diff['hunks']
if not hunks:
return line_number
keyified = KeyifyList(
hunks,
lambda hunk: hunk['new_start'] + (0 if hunk['new_count'] > 0 else 1)
)
i = bisect_right(cast(Sequence[int], keyified), line_number)
if i < 1:
return line_number
hunk = hunks[i - 1]
d = line_number - (hunk['new_start'] + (hunk['new_count'] or 1))
return None if d < 0 else hunk['old_start'] + (hunk['old_count'] or 1) + d
# we use camelCase here because this will be output as JSON and so the
# field names need to match the group names from here:
# https://github.com/pytorch/add-annotations-github-action/blob/3ab7d7345209f5299d53303f7aaca7d3bc09e250/action.yml#L23
class Annotation(TypedDict):
filename: str
lineNumber: int
columnNumber: int
errorCode: str
errorDesc: str
def parse_annotation(regex: Pattern[str], line: str) -> Optional[Annotation]:
m = re.match(regex, line)
if m:
try:
line_number = int(m.group('lineNumber'))
column_number = int(m.group('columnNumber'))
except ValueError:
return None
return {
'filename': m.group('filename'),
'lineNumber': line_number,
'columnNumber': column_number,
'errorCode': m.group('errorCode'),
'errorDesc': m.group('errorDesc'),
}
else:
return None
def translate_all(
*,
lines: List[str],
regex: Pattern[str],
commit: str
) -> List[Annotation]:
ann_dict: DefaultDict[str, List[Annotation]] = defaultdict(list)
for line in lines:
annotation = parse_annotation(regex, line)
if annotation is not None:
ann_dict[annotation['filename']].append(annotation)
ann_list = []
for filename, annotations in ann_dict.items():
raw_diff = subprocess.check_output(
['git', 'diff-index', '--unified=0', commit, filename],
encoding='utf-8',
)
diff = parse_diff(raw_diff) if raw_diff.strip() else None
# if there is a diff but it doesn't list an old filename, that
# means the file is absent in the commit we're targeting, so we
# skip it
if not (diff and not diff['old_filename']):
for annotation in annotations:
line_number: Optional[int] = annotation['lineNumber']
if diff:
annotation['filename'] = cast(str, diff['old_filename'])
line_number = translate(diff, cast(int, line_number))
if line_number:
annotation['lineNumber'] = line_number
ann_list.append(annotation)
return ann_list
def main() -> None:
parser = argparse.ArgumentParser()
parser.add_argument('--file')
parser.add_argument('--regex')
parser.add_argument('--commit')
args = parser.parse_args()
with open(args.file, 'r') as f:
lines = f.readlines()
print(json.dumps(translate_all(
lines=lines,
regex=args.regex,
commit=args.commit
)))
if __name__ == '__main__':
main()
| 5,447 | 29.099448 | 119 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/tools/linter/mypy_wrapper.py | #!/usr/bin/env python3
"""
This module is meant to be run as a script (see the docstring of main
below) and passed the filename of any Python file in this repo, to
typecheck that file using only the subset of our mypy configs that apply
to it.
Since editors (e.g. VS Code) can be configured to use this wrapper
script in lieu of mypy itself, the idea is that this can be used to get
inline mypy results while developing, and have at least some degree of
assurance that those inline results match up with what you would get
from running the mypy lint from the .github/workflows/lint.yml file.
See also these wiki pages:
- https://github.com/pytorch/pytorch/wiki/Guide-for-adding-type-annotations-to-PyTorch
- https://github.com/pytorch/pytorch/wiki/Lint-as-you-type
"""
import sys
from collections import defaultdict
from configparser import ConfigParser
from pathlib import Path, PurePath, PurePosixPath
from typing import Any, Dict, List, Optional, Set, Tuple
import mypy.api
# not part of the public API, but this is the easiest way to ensure that
# we agree with what mypy actually does
import mypy.config_parser
def read_config(config_path: Path) -> Set[str]:
"""
Return the set of `files` in the `mypy` ini file at config_path.
"""
config = ConfigParser()
config.read(config_path)
# hopefully on Windows this gives posix paths
return set(mypy.config_parser.split_and_match_files(
config['mypy']['files'],
))
# see tools/test/test_mypy_wrapper.py for examples of many of the
# following functions
def config_files() -> Dict[str, Set[str]]:
"""
Return a dict from all our `mypy` ini filenames to their `files`.
"""
return {str(ini): read_config(ini) for ini in Path().glob('mypy*.ini')}
def split_path(path: str) -> List[str]:
"""
Split a relative (not absolute) POSIX path into its segments.
"""
pure = PurePosixPath(path)
return [str(p.name) for p in list(reversed(pure.parents))[1:] + [pure]]
# mypy doesn't support recursive types yet
# https://github.com/python/mypy/issues/731
# but if it did, the `Any` here would be `Union[Set[str], 'Trie']`,
# although that is not completely accurate: specifically, every `None`
# key must map to a `Set[str]`, and every `str` key must map to a `Trie`
Trie = Dict[Optional[str], Any]
def make_trie(configs: Dict[str, Set[str]]) -> Trie:
"""
Return a trie from path prefixes to their `mypy` configs.
Specifically, each layer of the trie represents a segment of a POSIX
path relative to the root of this repo. If you follow a path down
the trie and reach a `None` key, that `None` maps to the (nonempty)
set of keys in `configs` which explicitly include that path.
"""
trie: Trie = {}
for ini, files in configs.items():
for f in files:
inner = trie
for segment in split_path(f):
inner = inner.setdefault(segment, {})
inner.setdefault(None, set()).add(ini)
return trie
def lookup(trie: Trie, filename: str) -> Set[str]:
"""
Return the configs in `trie` that include a prefix of `filename`.
A path is included by a config if any of its ancestors are included
by the wildcard-expanded version of that config's `files`. Thus,
this function follows `filename`'s path down the `trie` and
accumulates all the configs it finds along the way.
"""
configs = set()
inner = trie
for segment in split_path(filename):
inner = inner.get(segment, {})
configs |= inner.get(None, set())
return configs
def make_plan(
*,
configs: Dict[str, Set[str]],
files: List[str]
) -> Dict[str, List[str]]:
"""
Return a dict from config names to the files to run them with.
The keys of the returned dict are a subset of the keys of `configs`.
The list of files in each value of returned dict should contain a
nonempty subset of the given `files`, in the same order as `files`.
"""
trie = make_trie(configs)
plan = defaultdict(list)
for filename in files:
for config in lookup(trie, filename):
plan[config].append(filename)
return plan
def run(
*,
args: List[str],
files: List[str],
) -> Tuple[int, List[str], List[str]]:
"""
Return the exit code and list of output lines from running `mypy`.
The given `args` are passed verbatim to `mypy`. The `files` (each of
which must be an absolute path) are converted to relative paths
(that is, relative to the root of this repo) and then classified
according to which ones need to be run with each `mypy` config.
Thus, `mypy` may be run zero, one, or multiple times, but it will be
run at most once for each `mypy` config used by this repo.
"""
repo_root = Path.cwd()
plan = make_plan(configs=config_files(), files=[
PurePath(f).relative_to(repo_root).as_posix() for f in files
])
mypy_results = [
mypy.api.run(
# insert custom flags after args to avoid being overridden
# by existing flags in args
args + [
# don't special-case the last line
'--no-error-summary',
f'--config-file={config}',
] + filtered
)
# by construction, filtered must be nonempty
for config, filtered in plan.items()
]
return (
# assume all mypy exit codes are nonnegative
# https://github.com/python/mypy/issues/6003
max(
[exit_code for _, _, exit_code in mypy_results],
default=0,
),
list(dict.fromkeys( # remove duplicates, retain order
item
for stdout, _, _ in mypy_results
for item in stdout.splitlines()
)),
[stderr for _, stderr, _ in mypy_results],
)
def main(args: List[str]) -> None:
"""
Run mypy on one Python file using the correct config file(s).
This function assumes the following preconditions hold:
- the cwd is set to the root of this cloned repo
- args is a valid list of CLI arguments that could be passed to mypy
- some of args are absolute paths to files to typecheck
- all the other args are config flags for mypy, rather than files
These assumptions hold, for instance, when mypy is run automatically
by VS Code's Python extension, so in your clone of this repository,
you could modify your .vscode/settings.json to look something like
this (assuming you use a conda environment named "pytorch"):
{
"python.linting.enabled": true,
"python.linting.mypyEnabled": true,
"python.linting.mypyPath":
"${env:HOME}/miniconda3/envs/pytorch/bin/python",
"python.linting.mypyArgs": [
"${workspaceFolder}/tools/linter/mypy_wrapper.py"
]
}
More generally, this should work for any editor sets the cwd to the
repo root, runs mypy on individual files via their absolute paths,
and allows you to set the path to the mypy executable.
"""
repo_root = str(Path.cwd())
exit_code, mypy_issues, stderrs = run(
args=[arg for arg in args if not arg.startswith(repo_root)],
files=[arg for arg in args if arg.startswith(repo_root)],
)
for issue in mypy_issues:
print(issue)
for stderr in stderrs:
print(stderr, end='', file=sys.stderr)
sys.exit(exit_code)
if __name__ == '__main__':
main(sys.argv[1:])
| 7,511 | 32.99095 | 86 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/tools/linter/clang_tidy/__main__.py | import argparse
import pathlib
import os
import shutil
import subprocess
import re
import sys
from typing import List
from tools.linter.clang_tidy.run import run
from tools.linter.clang_tidy.generate_build_files import generate_build_files
from tools.linter.install.clang_tidy import INSTALLATION_PATH, PLATFORM_TO_URL, PLATFORM_TO_HASH, OUTPUT_DIR
from tools.linter.install.download_bin import download
try:
import torch
except ImportError as e:
print('Unable to import torch. Error:')
print('\t', e)
print('You need to install pytorch first.')
sys.exit(1)
def project_inc_dirs() -> List[str]:
inc_dirs = []
inc_dirs.append(os.path.join(os.environ['CONDA_PREFIX'], "include"))
return inc_dirs
def clang_search_dirs() -> List[str]:
# Compilers are ordered based on fallback preference
# We pick the first one that is available on the system
compilers = ["clang", "gcc", "cpp", "cc"]
compilers = [c for c in compilers if shutil.which(c) is not None]
if len(compilers) == 0:
raise RuntimeError(f"None of {compilers} were found")
compiler = compilers[0]
result = subprocess.run(
[compiler, "-E", "-x", "c++", "-", "-v"],
stdin=subprocess.DEVNULL,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
check=True,
)
stderr = result.stderr.decode().strip().split("\n")
search_start = r"#include.*search starts here:"
search_end = r"End of search list."
append_path = False
search_paths = []
for line in stderr:
if re.match(search_start, line):
if append_path:
continue
else:
append_path = True
elif re.match(search_end, line):
break
elif append_path:
search_paths.append(line.strip())
return search_paths
PYTORCH_PATH = os.path.dirname(os.path.abspath(torch.__file__))
DEFAULTS = {
"glob": [
# The negative filters below are to exclude files that include onnx_pb.h or
# caffe2_pb.h, otherwise we'd have to build protos as part of this CI job.
# FunctionsManual.cpp is excluded to keep this diff clean. It will be fixed
# in a follow up PR.
# /torch/csrc/generic/*.cpp is excluded because those files aren't actually built.
# deploy/interpreter files are excluded due to using macros and other techniquies
# that are not easily converted to accepted c++
"-torch/csrc/jit/passes/onnx/helper.cpp",
"-torch/csrc/jit/passes/onnx/shape_type_inference.cpp",
"-torch/csrc/jit/serialization/onnx.cpp",
"-torch/csrc/jit/serialization/export.cpp",
"-torch/csrc/jit/serialization/import.cpp",
"-torch/csrc/jit/serialization/import_legacy.cpp",
"-torch/csrc/onnx/init.cpp",
"-torch/csrc/cuda/nccl.*",
"-torch/csrc/cuda/python_nccl.cpp",
"-torch/csrc/autograd/FunctionsManual.cpp",
"-torch/csrc/generic/*.cpp",
"-torch/csrc/jit/codegen/cuda/runtime/*",
"-torch/csrc/deploy/interpreter/interpreter.cpp",
"-torch/csrc/deploy/interpreter/interpreter.h",
"-torch/csrc/deploy/interpreter/interpreter_impl.h",
"-torch/csrc/deploy/interpreter/test_main.cpp",
],
"paths": ["torch_ipex/csrc"],
"include-dir": ["/usr/lib/llvm-11/include/openmp"] + project_inc_dirs() + clang_search_dirs(),
"clang-tidy-exe": INSTALLATION_PATH,
"compile-commands-dir": "build/Release",
"config-file": ".clang-tidy-oss",
"disable-progress-bar": False,
}
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(description="clang-tidy wrapper script")
parser.add_argument(
"-e",
"--clang-tidy-exe",
default=DEFAULTS["clang-tidy-exe"],
help="Path to clang-tidy executable",
)
parser.add_argument(
"-g",
"--glob",
action="append",
default=DEFAULTS["glob"],
help="Only lint files that match these glob patterns "
"(see documentation for `fnmatch` for supported syntax)."
"If a pattern starts with a - the search is negated for that pattern.",
)
parser.add_argument(
"-x",
"--regex",
action="append",
default=[],
help="Only lint files that match these regular expressions (from the start of the filename). "
"If a pattern starts with a - the search is negated for that pattern.",
)
parser.add_argument(
"-c",
"--compile-commands-dir",
default=DEFAULTS["compile-commands-dir"],
help="Path to the folder containing compile_commands.json",
)
parser.add_argument(
"--diff-file",
help="File containing diff to use for determining files to lint and line filters",
)
parser.add_argument(
"-p",
"--paths",
nargs="+",
default=DEFAULTS["paths"],
help="Lint only the given paths (recursively)",
)
parser.add_argument(
"-n",
"--dry-run",
action="store_true",
help="Only show the command to be executed, without running it",
)
parser.add_argument("-v", "--verbose", action="store_true", help="Verbose output")
parser.add_argument("-q", "--quiet", action="store_true", help="Don't print output")
parser.add_argument(
"--config-file",
default=DEFAULTS["config-file"],
help="Path to a clang-tidy config file. Defaults to '.clang-tidy'.",
)
parser.add_argument(
"--print-include-paths",
action="store_true",
help="Print the search paths used for include directives",
)
parser.add_argument(
"-I",
"--include-dir",
action="append",
default=DEFAULTS["include-dir"],
help="Add the specified directory to the search path for include files",
)
parser.add_argument(
"-s",
"--suppress-diagnostics",
action="store_true",
help="Add NOLINT to suppress clang-tidy violations",
)
parser.add_argument(
"--disable-progress-bar",
action="store_true",
default=DEFAULTS["disable-progress-bar"],
help="Disable the progress bar",
)
parser.add_argument(
"extra_args", nargs="*", help="Extra arguments to forward to clang-tidy"
)
return parser.parse_args()
def update_defaults() -> None:
build_types = ["Release", "RelWithDebInfo", "Debug"]
found_compile_commands_dir = False
for build_type in build_types:
compile_command_dir = os.path.join("build", build_type, "compile_commands.json")
if pathlib.Path(compile_command_dir).exists():
DEFAULTS["compile-commands-dir"] = compile_command_dir
found_compile_commands_dir = True
break
if not found_compile_commands_dir:
print("Error: Clang-tidy cannot get the compile_commands.json file. You can build the code first.")
def main() -> None:
update_defaults()
options = parse_args()
if not pathlib.Path("build/Release").exists():
generate_build_files()
# Check if clang-tidy executable exists
exists = os.access(options.clang_tidy_exe, os.X_OK)
if not exists:
ok = download("clang-tidy", OUTPUT_DIR, PLATFORM_TO_URL, PLATFORM_TO_HASH)
if not ok:
msg = (
f"Could not find '{options.clang_tidy_exe}'\n"
+ "We provide a custom build of clang-tidy that has additional checks.\n"
+ "You can install it by running:\n"
+ "$ python3 tools/linter/install/clang_tidy.py"
)
raise RuntimeError(msg)
result, _ = run(options)
sys.exit(result.returncode)
if __name__ == "__main__":
main()
| 7,789 | 33.622222 | 108 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/examples/cpu/features/graph_capture.py | import torch
import torchvision.models as models
model = models.resnet50(weights='ResNet50_Weights.DEFAULT')
model.eval()
data = torch.rand(1, 3, 224, 224)
#################### code changes ####################
import intel_extension_for_pytorch as ipex
model = ipex.optimize(model, graph_mode=True)
######################################################
with torch.no_grad():
model(data)
| 394 | 25.333333 | 59 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/examples/cpu/features/int8_recipe_tuning/imagenet_autotune.py | import os
import torch
import torchvision.models as models
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import intel_extension_for_pytorch as ipex
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print('\t'.join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def validate(val_loader, model, criterion, args):
# switch to evaluate mode
model.eval()
def eval_func(model):
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
number_iter = len(val_loader)
progress = ProgressMeter(
number_iter,
[batch_time, losses, top1, top5],
prefix='Test: ')
print('Evaluating RESNET: total Steps: {}'.format(number_iter))
with torch.no_grad():
for i, (images, target) in enumerate(val_loader):
images = images.contiguous(memory_format=torch.channels_last)
output = model(images)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
if i % args.print_freq == 0:
progress.display(i)
# TODO: this should also be done with the ProgressMeter
print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg.item()
print(".........runing calibration step.........")
from torch.ao.quantization import MinMaxObserver, PerChannelMinMaxObserver, QConfig
qconfig = QConfig(
activation=MinMaxObserver.with_args(qscheme=torch.per_tensor_symmetric, dtype=torch.qint8),
weight= PerChannelMinMaxObserver.with_args(dtype=torch.qint8, qscheme=torch.per_channel_symmetric))
x = torch.randn(1, 3, 224, 224)
prepared_model = ipex.quantization.prepare(model, qconfig, x, inplace=True)
with torch.no_grad():
for i, (images, target) in enumerate(val_loader):
images = images.contiguous(memory_format=torch.channels_last)
prepared_model(images)
if i == 4:
break
print(".........calibration step done.........")
print(".........runing autotuning step.........")
tuned_model = ipex.quantization.autotune(prepared_model, val_loader, eval_func, sampling_sizes=[300])
print(".........autotuning step done.........")
print(".........runing int8 inference.........")
converted_model = ipex.quantization.convert(tuned_model)
with torch.no_grad():
for i, (images, target) in enumerate(val_loader):
images = images.contiguous(memory_format=torch.channels_last)
traced_model = torch.jit.trace(converted_model, images)
traced_model = torch.jit.freeze(traced_model)
traced_model(images)
traced_model(images)
break
eval_func(traced_model)
return
def main(args):
print("=> using pre-trained model '{}'".format(args.arch))
model = models.__dict__[args.arch](pretrained=True)
assert args.data != None, "please set dataset path if you want to using real data"
valdir = os.path.join(args.data, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
criterion = torch.nn.CrossEntropyLoss()
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
validate(val_loader, model, criterion, args)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('data', metavar='DIR', nargs='?', default='imagenet',
help='path to dataset (default: imagenet)')
parser.add_argument('-a', '--arch', metavar='ARCH', default='resnet18',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: resnet18)')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N',
help='mini-batch size (default: 256), this is the total '
'batch size of all GPUs on the current node when '
'using Data Parallel or Distributed Data Parallel')
parser.add_argument('-p', '--print-freq', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
main(parser.parse_args())
| 6,397 | 34.743017 | 105 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/examples/cpu/features/int8_recipe_tuning/int8_autotune.py | import torch
from torch import nn
from torch.utils.data import DataLoader
from torchvision import datasets
from torchvision.transforms import ToTensor
import intel_extension_for_pytorch as ipex
########################################################################
# Reference for training portion:
# https://pytorch.org/tutorials/beginner/basics/quickstart_tutorial.html
# Download training data from open datasets.
training_data = datasets.FashionMNIST(
root="data",
train=True,
download=True,
transform=ToTensor(),
)
# Download test data from open datasets.
test_data = datasets.FashionMNIST(
root="data",
train=False,
download=True,
transform=ToTensor(),
)
batch_size = 64
# Create data loaders.
train_dataloader = DataLoader(training_data, batch_size=batch_size)
test_dataloader = DataLoader(test_data, batch_size=1)
for X, y in test_dataloader:
print(f"Shape of X [N, C, H, W]: {X.shape}")
print(f"Shape of y: {y.shape} {y.dtype}")
break
# Define model
class NeuralNetwork(nn.Module):
def __init__(self):
super().__init__()
self.flatten = nn.Flatten()
self.linear_relu_stack = nn.Sequential(
nn.Linear(28*28, 512),
nn.ReLU(),
nn.Linear(512, 512),
nn.ReLU(),
nn.Linear(512, 10)
)
def forward(self, x):
x = self.flatten(x)
logits = self.linear_relu_stack(x)
return logits
model = NeuralNetwork()
loss_fn = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)
def train(dataloader, model, loss_fn, optimizer):
size = len(dataloader.dataset)
model.train()
for batch, (X, y) in enumerate(dataloader):
# Compute prediction error
pred = model(X)
loss = loss_fn(pred, y)
# Backpropagation
optimizer.zero_grad()
loss.backward()
optimizer.step()
if batch % 100 == 0:
loss, current = loss.item(), batch * len(X)
print(f"loss: {loss:>7f} [{current:>5d}/{size:>5d}]")
model, optimizer = ipex.optimize(model, optimizer=optimizer)
epochs = 5
for t in range(epochs):
print(f"Epoch {t+1}\n-------------------------------")
train(train_dataloader, model, loss_fn, optimizer)
print("Done!")
########################################################################
################################ QUANTIZE ##############################
model.eval()
def evaluate(dataloader, model):
size = len(dataloader.dataset)
model.eval()
accuracy = 0
with torch.no_grad():
for X, y in dataloader:
# X, y = X.to('cpu'), y.to('cpu')
pred = model(X)
accuracy += (pred.argmax(1) == y).type(torch.float).sum().item()
accuracy /= size
return accuracy
# prepare model, do conv+bn folding, and init model quant_state.
qconfig = ipex.quantization.default_static_qconfig
data = torch.randn(1, 1, 28, 28)
prepared_model = ipex.quantization.prepare(model, qconfig, example_inputs=data, inplace=False)
######################## recipe tuning with INC ########################
def eval(prepared_model):
accu = evaluate(test_dataloader, prepared_model)
return float(accu)
# print(eval(prepared_model))
tuned_model = ipex.quantization.autotune(prepared_model, test_dataloader, eval, sampling_sizes=[100],
accuracy_criterion={'relative': .01}, tuning_time=0)
########################################################################
# run tuned model
convert_model = ipex.quantization.convert(tuned_model)
with torch.no_grad():
traced_model = torch.jit.trace(convert_model, data)
traced_model = torch.jit.freeze(traced_model)
traced_model(data)
# save tuned qconfig file
tuned_model.save_qconf_summary(qconf_summary = "tuned_conf.json")
| 3,634 | 26.961538 | 101 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/examples/cpu/features/graph_optimization/folding.py | import torch
import torchvision.models as models
model = models.resnet50(weights='ResNet50_Weights.DEFAULT')
model.eval()
x = torch.randn(4, 3, 224, 224)
with torch.no_grad():
model = torch.jit.trace(model, x, check_trace=False).eval()
# Fold the BatchNormalization and propagate constant
torch.jit.freeze(model)
# Print the graph
print(model.graph_for(x))
| 369 | 25.428571 | 61 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/examples/cpu/features/graph_optimization/int8.py | import torch
import torchvision.models as models
import intel_extension_for_pytorch as ipex
from intel_extension_for_pytorch.quantization import prepare, convert
# construct the model
model = models.resnet50(weights='ResNet50_Weights.DEFAULT')
qconfig = ipex.quantization.default_static_qconfig
model.eval()
example_inputs = torch.rand(1, 3, 224, 224)
prepared_model = prepare(model, qconfig, example_inputs=example_inputs, inplace=False)
##### Example Dataloader #####
import torchvision
DOWNLOAD = True
DATA = 'datasets/cifar10/'
transform = torchvision.transforms.Compose([
torchvision.transforms.Resize((224, 224)),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
train_dataset = torchvision.datasets.CIFAR10(
root=DATA,
train=True,
transform=transform,
download=DOWNLOAD,
)
calibration_data_loader = torch.utils.data.DataLoader(
dataset=train_dataset,
batch_size=128
)
with torch.no_grad():
for batch_idx, (d, target) in enumerate(calibration_data_loader):
print(f'calibrated on batch {batch_idx} out of {len(calibration_data_loader)}')
prepared_model(d)
##############################
convert_model = convert(prepared_model)
with torch.no_grad():
traced_model = torch.jit.trace(convert_model, example_inputs)
traced_model = torch.jit.freeze(traced_model)
traced_model.save("quantized_model.pt")
# Deployment
quantized_model = torch.jit.load("quantized_model.pt")
quantized_model = torch.jit.freeze(quantized_model.eval())
images = torch.rand(1, 3, 244, 244)
with torch.no_grad():
output = quantized_model(images)
print('fin')
| 1,636 | 29.314815 | 86 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/examples/cpu/features/graph_optimization/fp32_bf16.py | import torch
import torchvision.models as models
# Import the Intel Extension for PyTorch
import intel_extension_for_pytorch as ipex
model = models.resnet50(weights='ResNet50_Weights.DEFAULT')
model.eval()
# Apply some fusions at the front end
model = ipex.optimize(model, dtype=torch.float32)
x = torch.randn(4, 3, 224, 224)
with torch.no_grad():
model = torch.jit.trace(model, x, check_trace=False).eval()
# Fold the BatchNormalization and propagate constant
torch.jit.freeze(model)
# Print the graph
print(model.graph_for(x))
| 543 | 26.2 | 61 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/examples/cpu/features/hypertune/resnet50.py | import torch
import torchvision.models as models
def inference(model, data):
with torch.no_grad():
# warm up
for _ in range(100):
model(data)
# measure
import time
measure_iter = 100
start = time.time()
for _ in range(measure_iter):
output = model(data)
end = time.time()
duration = (end-start)*1000
latency = duration/measure_iter
throughput = measure_iter/duration
print("@hypertune {'name': 'latency (ms)'}") # Add print statement of the form @hypertune {'name': str, 'higher_is_better': bool, 'target_val': int or float}`
print(latency) # Print the objective(s) you want to optimize. Make sure this is just an int or float to be minimzied or maximized.
def main(args):
model = models.resnet50(pretrained=False)
model.eval()
data = torch.rand(1, 3, 224, 224)
import intel_extension_for_pytorch as ipex
if args.dtype == 'float32':
model = ipex.optimize(model, dtype=torch.float32)
elif args.dtype == 'bfloat16':
model = ipex.optimize(model, dtype=torch.bfloat16)
else: # int8
from intel_extension_for_pytorch.quantization import prepare, convert
qconfig = ipex.quantization.default_static_qconfig
model = prepare(model, qconfig, example_inputs=data, inplace=False)
# calibration
n_iter = 100
for i in range(n_iter):
model(data)
model = convert(model)
with torch.cpu.amp.autocast(enabled=args.dtype=='bfloat16'):
if args.torchscript:
with torch.no_grad():
model = torch.jit.trace(model, data)
model = torch.jit.freeze(model)
inference(model, data)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--dtype', default='float32', choices=['float32', 'bfloat16', 'int8'])
parser.add_argument("--torchscript", default=False, action="store_true")
main(parser.parse_args())
| 1,904 | 28.307692 | 162 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/examples/cpu/training/single_instance_training_bf16.py | import torch
import torchvision
import intel_extension_for_pytorch as ipex
LR = 0.001
DOWNLOAD = True
DATA = 'datasets/cifar10/'
transform = torchvision.transforms.Compose([
torchvision.transforms.Resize((224, 224)),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
train_dataset = torchvision.datasets.CIFAR10(
root=DATA,
train=True,
transform=transform,
download=DOWNLOAD,
)
train_loader = torch.utils.data.DataLoader(
dataset=train_dataset,
batch_size=128
)
model = torchvision.models.resnet50()
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr = LR, momentum=0.9)
model.train()
model, optimizer = ipex.optimize(model, optimizer=optimizer, dtype=torch.bfloat16)
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
with torch.cpu.amp.autocast():
output = model(data)
loss = criterion(output, target)
loss.backward()
optimizer.step()
print(batch_idx)
torch.save({
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
}, 'checkpoint.pth')
| 1,148 | 25.72093 | 82 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/examples/cpu/training/single_instance_training_fp32.py | import torch
import torchvision
import intel_extension_for_pytorch as ipex
LR = 0.001
DOWNLOAD = True
DATA = 'datasets/cifar10/'
transform = torchvision.transforms.Compose([
torchvision.transforms.Resize((224, 224)),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
train_dataset = torchvision.datasets.CIFAR10(
root=DATA,
train=True,
transform=transform,
download=DOWNLOAD,
)
train_loader = torch.utils.data.DataLoader(
dataset=train_dataset,
batch_size=128
)
model = torchvision.models.resnet50()
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr = LR, momentum=0.9)
model.train()
model, optimizer = ipex.optimize(model, optimizer=optimizer)
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
output = model(data)
loss = criterion(output, target)
loss.backward()
optimizer.step()
print(batch_idx)
torch.save({
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
}, 'checkpoint.pth')
| 1,090 | 24.97619 | 70 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/examples/cpu/inference/cpp/model_gen.py | #!/usr/bin/env python
# encoding: utf-8
import torch
import torchvision
model = torchvision.models.resnet50(pretrained=True)
model.eval()
input = torch.rand(1, 3, 224, 224)
model = torch.jit.trace(model, input, check_trace=False)
model.save('resnet50.pt')
print("save mode to: resnet50.pt")
| 296 | 18.8 | 56 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/examples/cpu/inference/python/bert_fast_inference_bf16.py | import torch
from transformers import BertModel
model = BertModel.from_pretrained("bert-base-uncased")
model.eval()
vocab_size = model.config.vocab_size
batch_size = 1
seq_length = 512
data = torch.randint(vocab_size, size=[batch_size, seq_length])
torch.manual_seed(43)
#################### code changes ####################
import intel_extension_for_pytorch as ipex
model = ipex.fast_bert(model, dtype=torch.bfloat16)
######################################################
with torch.no_grad():
model(data)
| 516 | 24.85 | 63 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/examples/cpu/inference/python/resnet50_imperative_mode_inference_fp32.py | import torch
import torchvision.models as models
model = models.resnet50(weights='ResNet50_Weights.DEFAULT')
model.eval()
data = torch.rand(1, 3, 224, 224)
#################### code changes ####################
import intel_extension_for_pytorch as ipex
model = ipex.optimize(model)
######################################################
with torch.no_grad():
model(data) | 376 | 25.928571 | 59 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/examples/cpu/inference/python/bert_torchdynamo_mode_inference_fp32.py | import torch
from transformers import BertModel
model = BertModel.from_pretrained("bert-base-uncased")
model.eval()
vocab_size = model.config.vocab_size
batch_size = 1
seq_length = 512
data = torch.randint(vocab_size, size=[batch_size, seq_length])
# Experimental Feature
#################### code changes ####################
import intel_extension_for_pytorch as ipex
model = ipex.optimize(model)
model = torch.compile(model, backend="ipex")
######################################################
with torch.no_grad():
model(data)
| 539 | 24.714286 | 63 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/examples/cpu/inference/python/bert_torchscript_mode_inference_fp32.py | import torch
from transformers import BertModel
model = BertModel.from_pretrained("bert-base-uncased")
model.eval()
vocab_size = model.config.vocab_size
batch_size = 1
seq_length = 512
data = torch.randint(vocab_size, size=[batch_size, seq_length])
#################### code changes ####################
import intel_extension_for_pytorch as ipex
model = ipex.optimize(model)
######################################################
with torch.no_grad():
d = torch.randint(vocab_size, size=[batch_size, seq_length])
model = torch.jit.trace(model, (d,), check_trace=False, strict=False)
model = torch.jit.freeze(model)
model(data) | 640 | 28.136364 | 71 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/examples/cpu/inference/python/resnet50_torchscript_mode_inference_bf16.py | import torch
import torchvision.models as models
model = models.resnet50(weights='ResNet50_Weights.DEFAULT')
model.eval()
data = torch.rand(1, 3, 224, 224)
#################### code changes ####################
import intel_extension_for_pytorch as ipex
model = ipex.optimize(model, dtype=torch.bfloat16)
######################################################
with torch.no_grad(), torch.cpu.amp.autocast():
model = torch.jit.trace(model, torch.rand(1, 3, 224, 224))
model = torch.jit.freeze(model)
model(data)
| 521 | 28 | 60 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/examples/cpu/inference/python/bert_general_inference_script.py | import torch
from transformers import BertModel
def inference(model, data):
with torch.no_grad():
# warm up
for _ in range(100):
model(data)
# measure
import time
start = time.time()
for _ in range(100):
model(data)
end = time.time()
print('Inference took {:.2f} ms in average'.format((end-start)/100*1000))
def main(args):
model = BertModel.from_pretrained(args.model_name)
model.eval()
vocab_size = model.config.vocab_size
batch_size = 1
seq_length = 512
data = torch.randint(vocab_size, size=[batch_size, seq_length])
import intel_extension_for_pytorch as ipex
if args.dtype == 'float32':
model = ipex.optimize(model, dtype=torch.float32)
elif args.dtype == 'bfloat16':
model = ipex.optimize(model, dtype=torch.bfloat16)
else: # int8
from intel_extension_for_pytorch.quantization import prepare, convert
if args.quantization == 'static':
qconfig = ipex.quantization.default_static_qconfig
model = prepare(model, qconfig, example_inputs=data, inplace=False)
# calibration
n_iter = 100
for i in range(n_iter):
model(data)
model = convert(model)
else:
qconfig = ipex.quantization.default_dynamic_qconfig
model = prepare(model, qconfig, example_inputs=data)
model = convert(model)
with torch.cpu.amp.autocast(enabled=args.dtype=='bfloat16'):
if args.torchscript:
with torch.no_grad():
model = torch.jit.trace(model, data, check_trace=False, strict=False)
model = torch.jit.freeze(model)
inference(model, data)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--model_name", default="bert-base-multilingual-cased")
parser.add_argument('--dtype', default='float32', choices=['float32', 'bfloat16', 'int8'])
parser.add_argument("--torchscript", default=False, action="store_true")
parser.add_argument('--quantization', default='static', choices=['static', 'dynamic'])
main(parser.parse_args())
| 2,049 | 29.147059 | 92 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/examples/cpu/inference/python/resnet50_general_inference_script.py | import torch
import torchvision.models as models
def inference(model, data):
with torch.no_grad():
# warm up
for _ in range(100):
model(data)
# measure
import time
start = time.time()
for _ in range(100):
output = model(data)
end = time.time()
print('Inference took {:.2f} ms in average'.format((end-start)/100*1000))
def main(args):
model = models.resnet50(pretrained=False)
model.eval()
data = torch.rand(1, 3, 224, 224)
import intel_extension_for_pytorch as ipex
model = model.to(memory_format=torch.channels_last)
data = data.to(memory_format=torch.channels_last)
if args.dtype == 'float32':
model = ipex.optimize(model, dtype=torch.float32)
elif args.dtype == 'bfloat16':
model = ipex.optimize(model, dtype=torch.bfloat16)
else: # int8
from intel_extension_for_pytorch.quantization import prepare, convert
qconfig = ipex.quantization.default_static_qconfig
model = prepare(model, qconfig, example_inputs=data, inplace=False)
# calibration
n_iter = 100
for i in range(n_iter):
model(data)
model = convert(model)
with torch.cpu.amp.autocast(enabled=args.dtype=='bfloat16'):
if args.torchscript:
with torch.no_grad():
model = torch.jit.trace(model, data)
model = torch.jit.freeze(model)
inference(model, data)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--dtype', default='float32', choices=['float32', 'bfloat16', 'int8'])
parser.add_argument("--torchscript", default=False, action="store_true")
main(parser.parse_args())
| 1,644 | 25.967213 | 92 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/examples/cpu/inference/python/int8_deployment.py | import torch
#################### code changes ####################
import intel_extension_for_pytorch as ipex
######################################################
model = torch.jit.load('quantized_model.pt')
model.eval()
model = torch.jit.freeze(model)
data = torch.rand(1, 3, 224, 224)
with torch.no_grad():
model(data) | 327 | 26.333333 | 54 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/examples/cpu/inference/python/int8_calibration_dynamic.py | import os
import torch
#################### code changes ####################
import intel_extension_for_pytorch as ipex
from intel_extension_for_pytorch.quantization import prepare, convert
######################################################
##### Example Model #####
from transformers import BertModel
model = BertModel.from_pretrained("bert-base-uncased")
model.eval()
vocab_size = model.config.vocab_size
batch_size = 1
seq_length = 512
data = torch.randint(vocab_size, size=[batch_size, seq_length])
#########################
qconfig_mapping = ipex.quantization.default_dynamic_qconfig_mapping
# Alternatively, define your own qconfig:
# from torch.ao.quantization import PerChannelMinMaxObserver, PlaceholderObserver, QConfig, QConfigMapping
# qconfig = QConfig(
# activation = PlaceholderObserver.with_args(dtype=torch.float, is_dynamic=True),
# weight = PerChannelMinMaxObserver.with_args(dtype=torch.qint8, qscheme=torch.per_channel_symmetric))
# qconfig_mapping = QConfigMapping().set_global(qconfig)
prepared_model = prepare(model, qconfig_mapping, example_inputs=data)
converted_model = convert(prepared_model)
with torch.no_grad():
traced_model = torch.jit.trace(converted_model, (data,), check_trace=False, strict=False)
traced_model = torch.jit.freeze(traced_model)
traced_model.save("quantized_model.pt")
| 1,348 | 37.542857 | 109 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/examples/cpu/inference/python/bert_torchscript_mode_inference_bf16.py | import torch
from transformers import BertModel
model = BertModel.from_pretrained("bert-base-uncased")
model.eval()
vocab_size = model.config.vocab_size
batch_size = 1
seq_length = 512
data = torch.randint(vocab_size, size=[batch_size, seq_length])
#################### code changes ####################
import intel_extension_for_pytorch as ipex
model = ipex.optimize(model, dtype=torch.bfloat16)
######################################################
with torch.no_grad(), torch.cpu.amp.autocast():
d = torch.randint(vocab_size, size=[batch_size, seq_length])
model = torch.jit.trace(model, (d,), check_trace=False, strict=False)
model = torch.jit.freeze(model)
model(data)
| 689 | 29 | 71 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/examples/cpu/inference/python/int8_calibration_static.py | import os
import torch
#################### code changes ####################
import intel_extension_for_pytorch as ipex
from intel_extension_for_pytorch.quantization import prepare, convert
######################################################
##### Example Model #####
import torchvision.models as models
model = models.resnet50(weights='ResNet50_Weights.DEFAULT')
model.eval()
data = torch.rand(1, 3, 224, 224)
#########################
qconfig_mapping = ipex.quantization.default_static_qconfig_mapping
# Alternatively, define your own qconfig_mapping:
# from torch.ao.quantization import MinMaxObserver, PerChannelMinMaxObserver, QConfig, QConfigMapping
# qconfig = QConfig(
# activation=MinMaxObserver.with_args(qscheme=torch.per_tensor_affine, dtype=torch.quint8),
# weight=PerChannelMinMaxObserver.with_args(dtype=torch.qint8, qscheme=torch.per_channel_symmetric))
# qconfig_mapping = QConfigMapping().set_global(qconfig)
prepared_model = prepare(model, qconfig_mapping, example_inputs=data, inplace=False)
##### Example Dataloader #####
import torchvision
DOWNLOAD = True
DATA = 'datasets/cifar10/'
transform = torchvision.transforms.Compose([
torchvision.transforms.Resize((224, 224)),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
train_dataset = torchvision.datasets.CIFAR10(
root=DATA,
train=True,
transform=transform,
download=DOWNLOAD,
)
calibration_data_loader = torch.utils.data.DataLoader(
dataset=train_dataset,
batch_size=128
)
with torch.no_grad():
for batch_idx, (d, target) in enumerate(calibration_data_loader):
print(f'calibrated on batch {batch_idx} out of {len(calibration_data_loader)}')
prepared_model(d)
##############################
converted_model = convert(prepared_model)
with torch.no_grad():
traced_model = torch.jit.trace(converted_model, data)
traced_model = torch.jit.freeze(traced_model)
traced_model.save("quantized_model.pt")
| 1,982 | 33.789474 | 107 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/examples/cpu/inference/python/resnet50_torchdynamo_mode_inference_fp32.py | import torch
import torchvision.models as models
model = models.resnet50(weights=models.ResNet50_Weights.DEFAULT)
model.eval()
data = torch.rand(1, 3, 224, 224)
# Experimental Feature
#################### code changes ####################
import intel_extension_for_pytorch as ipex
model = ipex.optimize(model)
model = torch.compile(model, backend="ipex")
######################################################
with torch.no_grad():
model(data)
| 450 | 25.529412 | 64 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/examples/cpu/inference/python/resnet50_torchscript_mode_inference_fp32.py | import torch
import torchvision.models as models
model = models.resnet50(weights='ResNet50_Weights.DEFAULT')
model.eval()
data = torch.rand(1, 3, 224, 224)
#################### code changes ####################
import intel_extension_for_pytorch as ipex
model = ipex.optimize(model)
######################################################
with torch.no_grad():
d = torch.rand(1, 3, 224, 224)
model = torch.jit.trace(model, d)
model = torch.jit.freeze(model)
model(data) | 480 | 25.722222 | 59 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/examples/cpu/inference/python/resnet50_imperative_mode_inference_bf16.py | import torch
import torchvision.models as models
model = models.resnet50(weights='ResNet50_Weights.DEFAULT')
model.eval()
data = torch.rand(1, 3, 224, 224)
#################### code changes ####################
import intel_extension_for_pytorch as ipex
model = ipex.optimize(model, dtype=torch.bfloat16)
######################################################
with torch.no_grad(), torch.cpu.amp.autocast():
model(data)
| 425 | 27.4 | 59 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/examples/cpu/inference/python/bert_imperative_mode_inference_fp32.py | import torch
from transformers import BertModel
model = BertModel.from_pretrained("bert-base-uncased")
model.eval()
vocab_size = model.config.vocab_size
batch_size = 1
seq_length = 512
data = torch.randint(vocab_size, size=[batch_size, seq_length])
#################### code changes ####################
import intel_extension_for_pytorch as ipex
model = ipex.optimize(model)
######################################################
with torch.no_grad():
model(data) | 470 | 25.166667 | 63 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/examples/cpu/inference/python/bert_imperative_mode_inference_bf16.py | import torch
from transformers import BertModel
model = BertModel.from_pretrained("bert-base-uncased")
model.eval()
vocab_size = model.config.vocab_size
batch_size = 1
seq_length = 512
data = torch.randint(vocab_size, size=[batch_size, seq_length])
#################### code changes ####################
import intel_extension_for_pytorch as ipex
model = ipex.optimize(model, dtype=torch.bfloat16)
######################################################
with torch.no_grad(), torch.cpu.amp.autocast():
model(data)
| 519 | 26.368421 | 63 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/scripts/collect_env.py | # Referenced from https://github.com/pytorch/pytorch/blob/master/torch/utils/collect_env.py
# Run it with `python collect_env.py`.
import locale
import re
import subprocess
import sys
import os
from collections import namedtuple
try:
import torch
TORCH_AVAILABLE = True
except (ImportError, NameError, AttributeError, OSError):
TORCH_AVAILABLE = False
try:
import intel_extension_for_pytorch as ipex
IPEX_AVAILABLE = True
except (ImportError, NameError, AttributeError, OSError):
IPEX_AVAILABLE = False
# System Environment Information
SystemEnv = namedtuple(
"SystemEnv",
[
"torch_version",
"torch_cxx11_abi",
"ipex_version",
"ipex_gitrev",
"build_type",
"gcc_version",
"clang_version",
"icx_version",
"cmake_version",
"os",
"libc_version",
"python_version",
"python_platform",
"is_xpu_available",
"dpcpp_runtime_version",
"mkl_version",
"gpu_models",
"intel_opencl_version",
"level_zero_version",
"pip_version", # 'pip' or 'pip3'
"pip_packages",
"conda_packages",
"cpu_info",
],
)
def run(command):
"""Returns (return-code, stdout, stderr)"""
p = subprocess.Popen(
command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True
)
raw_output, raw_err = p.communicate()
rc = p.returncode
if get_platform() == "win32":
enc = "oem"
else:
enc = locale.getpreferredencoding()
output = raw_output.decode(enc)
err = raw_err.decode(enc)
return rc, output.strip(), err.strip()
def run_and_read_all(run_lambda, command):
"""Runs command using run_lambda; reads and returns entire output if rc is 0"""
rc, out, _ = run_lambda(command)
if rc != 0:
return None
return out
def run_and_parse_first_match(run_lambda, command, regex):
"""Runs command using run_lambda, returns the first regex match if it exists"""
rc, out, _ = run_lambda(command)
if rc != 0:
return None
match = re.search(regex, out)
if match is None:
return None
return match.group(1)
def run_and_return_first_line(run_lambda, command):
"""Runs command using run_lambda and returns first line if output is not empty"""
rc, out, _ = run_lambda(command)
if rc != 0:
return None
return out.split("\n")[0]
def get_conda_packages(run_lambda):
conda = os.environ.get("CONDA_EXE", "conda")
out = run_and_read_all(run_lambda, "{} list".format(conda))
if out is None:
return out
return "\n".join(
line
for line in out.splitlines()
if not line.startswith("#")
and any(
name in line
for name in {
"torch",
"numpy",
"mkl",
}
)
)
def get_gcc_version(run_lambda):
return run_and_parse_first_match(run_lambda, "gcc --version", r"gcc (.*)")
def get_clang_version(run_lambda):
return run_and_parse_first_match(
run_lambda, "clang --version", r"clang version (.*)"
)
def get_icx_version(run_lambda):
return run_and_parse_first_match(
run_lambda, "icx --version", r"Intel\(R\) oneAPI DPC\+\+\/C\+\+ Compiler (.*)"
)
def get_cmake_version(run_lambda):
return run_and_parse_first_match(run_lambda, "cmake --version", r"cmake (.*)")
def get_pkg_version(run_lambda, pkg):
txt = ""
index = -1
if get_platform() == "linux":
mgr_name = ""
if mgr_name == "":
rc, _, _ = run("which dpkg")
if rc == 0:
mgr_name = "dpkg"
if mgr_name == "":
rc, _, _ = run("which yum")
if rc == 0:
mgr_name = "yum"
if mgr_name == "":
rc, _, _ = run("which dnf")
if rc == 0:
mgr_name = "dnf"
if mgr_name != "":
cmd = ""
if mgr_name == "yum" or mgr_name == "dnf":
index = 1
pkg_name = ""
if pkg == "intel_opencl":
pkg_name = "intel-opencl"
if pkg == "level_zero":
pkg_name = "intel-level-zero-gpu"
if pkg_name != "":
cmd = f"{mgr_name} list | grep {pkg_name}"
if mgr_name == "dpkg":
index = 2
pkg_name = ""
if pkg == "intel_opencl":
pkg_name = "intel-opencl-icd"
if pkg == "level_zero":
pkg_name = "intel-level-zero-gpu"
if pkg_name != "":
cmd = f"{mgr_name} -l | grep {pkg_name}"
if cmd != "":
txt = run_and_read_all(run_lambda, cmd)
lst_txt = []
if txt:
lst_txt = re.sub(" +", " ", txt).split(" ")
if len(lst_txt) > index and index != -1:
txt = lst_txt[index]
else:
txt = "N/A"
return txt
def get_gpu_info(run_lambda):
if TORCH_AVAILABLE and IPEX_AVAILABLE:
devices = [
f"[{i}] {torch.xpu.get_device_properties(i)}"
for i in range(torch.xpu.device_count())
]
return "\n".join(devices)
else:
return "N/A"
def get_running_dpcpp_version(run_lambda):
return run_and_read_all(
run_lambda, 'env | grep CMPLR_ROOT | rev | cut -d "/" -f 1 | rev'
)
def get_mkl_version(run_lambda):
return run_and_read_all(
run_lambda, 'env | grep MKLROOT | rev | cut -d "/" -f 1 | rev'
)
def get_cpu_info(run_lambda):
rc, out, err = 0, "", ""
if get_platform() == "linux":
rc, out, err = run_lambda("lscpu")
elif get_platform() == "win32":
rc, out, err = run_lambda(
"wmic cpu get Name,Manufacturer,Family,Architecture,ProcessorType,DeviceID,\
CurrentClockSpeed,MaxClockSpeed,L2CacheSize,L2CacheSpeed,Revision /VALUE"
)
elif get_platform() == "darwin":
rc, out, err = run_lambda("sysctl -n machdep.cpu.brand_string")
cpu_info = "N/A"
if rc == 0:
cpu_info = out
else:
cpu_info = err
return cpu_info
def get_platform():
if sys.platform.startswith("linux"):
return "linux"
elif sys.platform.startswith("win32"):
return "win32"
elif sys.platform.startswith("cygwin"):
return "cygwin"
elif sys.platform.startswith("darwin"):
return "darwin"
else:
return sys.platform
def get_mac_version(run_lambda):
return run_and_parse_first_match(run_lambda, "sw_vers -productVersion", r"(.*)")
def get_windows_version(run_lambda):
system_root = os.environ.get("SYSTEMROOT", "C:\\Windows")
wmic_cmd = os.path.join(system_root, "System32", "Wbem", "wmic")
findstr_cmd = os.path.join(system_root, "System32", "findstr")
return run_and_read_all(
run_lambda, "{} os get Caption | {} /v Caption".format(wmic_cmd, findstr_cmd)
)
def get_lsb_version(run_lambda):
return run_and_parse_first_match(
run_lambda, "lsb_release -a", r"Description:\t(.*)"
)
def check_release_file(run_lambda):
return run_and_parse_first_match(
run_lambda, "cat /etc/*-release", r'PRETTY_NAME="(.*)"'
)
def get_os(run_lambda):
from platform import machine
platform = get_platform()
if platform == "win32" or platform == "cygwin":
return get_windows_version(run_lambda)
if platform == "darwin":
version = get_mac_version(run_lambda)
if version is None:
return None
return "macOS {} ({})".format(version, machine())
if platform == "linux":
# Ubuntu/Debian based
desc = get_lsb_version(run_lambda)
if desc is not None:
return "{} ({})".format(desc, machine())
# Try reading /etc/*-release
desc = check_release_file(run_lambda)
if desc is not None:
return "{} ({})".format(desc, machine())
return "{} ({})".format(platform, machine())
# Unknown platform
return platform
def get_python_platform():
import platform
return platform.platform()
def get_libc_version():
import platform
if get_platform() != "linux":
return "N/A"
return "-".join(platform.libc_ver())
def get_pip_packages(run_lambda):
"""Returns `pip list` output. Note: will also find conda-installed pytorch
and numpy packages."""
# People generally have `pip` as `pip` or `pip3`
# But here it is incoved as `python -mpip`
def run_with_pip(pip):
out = run_and_read_all(run_lambda, "{} list --format=freeze".format(pip))
return "\n".join(
line
for line in out.splitlines()
if any(
name in line
for name in {
"torch",
"numpy",
"mypy",
}
)
)
pip_version = "pip3" if sys.version[0] == "3" else "pip"
out = run_with_pip(sys.executable + " -mpip")
return pip_version, out
def get_env_info():
run_lambda = run
pip_version, pip_list_output = get_pip_packages(run_lambda)
if TORCH_AVAILABLE:
torch_version_str = torch.__version__
torch_cxx11_abi_str = torch._C._GLIBCXX_USE_CXX11_ABI
else:
torch_version_str = torch_cxx11_abi_str = "N/A"
if IPEX_AVAILABLE:
ipex_version_str = ipex.__version__
try:
import intel_extension_for_pytorch._version as ver
except ImportError:
import intel_extension_for_pytorch.version as ver
try:
ipex_gitrev_str = ver.__ipex_gitrev__
except AttributeError:
ipex_gitrev_str = ver.__gitrev__
try:
build_type_str = str(ver.__build_type__)
except AttributeError:
build_type_str = str(ver.__mode__)
try:
xpu_available_str = str(torch.xpu.is_available())
except AttributeError:
xpu_available_str = False
else:
ipex_version_str = ipex_gitrev_str = "N/A"
build_type_str = xpu_available_str = "N/A"
sys_version = sys.version.replace("\n", " ")
return SystemEnv(
torch_version=torch_version_str,
torch_cxx11_abi=torch_cxx11_abi_str,
ipex_version=ipex_version_str,
ipex_gitrev=ipex_gitrev_str,
build_type=build_type_str,
python_version="{} ({}-bit runtime)".format(
sys_version, sys.maxsize.bit_length() + 1
),
python_platform=get_python_platform(),
is_xpu_available=xpu_available_str,
dpcpp_runtime_version=get_running_dpcpp_version(run_lambda),
mkl_version=get_mkl_version(run_lambda),
gpu_models=f"\n{get_gpu_info(run_lambda)}",
intel_opencl_version=get_pkg_version(run_lambda, "intel_opencl"),
level_zero_version=get_pkg_version(run_lambda, "level_zero"),
pip_version=pip_version,
pip_packages=pip_list_output,
conda_packages=get_conda_packages(run_lambda),
os=get_os(run_lambda),
libc_version=get_libc_version(),
gcc_version=get_gcc_version(run_lambda),
clang_version=get_clang_version(run_lambda),
icx_version=get_icx_version(run_lambda),
cmake_version=get_cmake_version(run_lambda),
cpu_info=get_cpu_info(run_lambda),
)
env_info_fmt = """
PyTorch version: {torch_version}
PyTorch CXX11 ABI: {torch_cxx11_abi}
IPEX version: {ipex_version}
IPEX commit: {ipex_gitrev}
Build type: {build_type}
OS: {os}
GCC version: {gcc_version}
Clang version: {clang_version}
IGC version: {icx_version}
CMake version: {cmake_version}
Libc version: {libc_version}
Python version: {python_version}
Python platform: {python_platform}
Is XPU available: {is_xpu_available}
DPCPP runtime version: {dpcpp_runtime_version}
MKL version: {mkl_version}
GPU models and configuration: {gpu_models}
Intel OpenCL ICD version: {intel_opencl_version}
Level Zero version: {level_zero_version}
CPU:
{cpu_info}
Versions of relevant libraries:
{pip_packages}
{conda_packages}
""".strip()
def pretty_str(envinfo):
def replace_nones(dct, replacement="Could not collect"):
for key in dct.keys():
if dct[key] is not None:
continue
dct[key] = replacement
return dct
def replace_empties(dct, replacement="Could not collect"):
for key in dct.keys():
if dct[key] is not None and len(dct[key]) > 0:
continue
dct[key] = replacement
return dct
def replace_bools(dct, true="Yes", false="No"):
for key in dct.keys():
if dct[key] is True:
dct[key] = true
elif dct[key] is False:
dct[key] = false
return dct
def prepend(text, tag="[prepend]"):
lines = text.split("\n")
updated_lines = [tag + line for line in lines]
return "\n".join(updated_lines)
def replace_if_empty(text, replacement="No relevant packages"):
if text is not None and len(text) == 0:
return replacement
return text
def maybe_start_on_next_line(string):
# If `string` is multiline, prepend a \n to it.
if string is not None and len(string.split("\n")) > 1:
return "\n{}\n".format(string)
return string
mutable_dict = envinfo._asdict()
# Replace True with Yes, False with No
mutable_dict = replace_bools(mutable_dict)
# Replace all None objects with 'N/A'
mutable_dict = replace_nones(mutable_dict, replacement="N/A")
# Replace all empty objects with 'N/A'
mutable_dict = replace_empties(mutable_dict, replacement="N/A")
# If either of these are '', replace with 'No relevant packages'
mutable_dict["pip_packages"] = replace_if_empty(mutable_dict["pip_packages"])
mutable_dict["conda_packages"] = replace_if_empty(mutable_dict["conda_packages"])
# Tag conda and pip packages with a prefix
# If they were previously None, they'll show up as ie '[conda] Could not collect'
if mutable_dict["pip_packages"]:
mutable_dict["pip_packages"] = prepend(
mutable_dict["pip_packages"], "[{}] ".format(envinfo.pip_version)
)
if mutable_dict["conda_packages"]:
mutable_dict["conda_packages"] = prepend(
mutable_dict["conda_packages"], "[conda] "
)
mutable_dict["cpu_info"] = envinfo.cpu_info
return env_info_fmt.format(**mutable_dict)
def get_pretty_env_info():
return pretty_str(get_env_info())
def main():
print("Collecting environment information...")
output = get_pretty_env_info()
print(output)
if __name__ == "__main__":
main()
| 14,855 | 27.790698 | 91 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/scripts/tools/setup/flake8.py | import os
import shutil
import subprocess
import sys
def check_flake8_errors(base_dir, filepath):
if shutil.which("flake8") is None:
print(
"WARNING: Please install flake8 by pip install -r requirements-flake8.txt to check format!"
)
flak8_cmd = ["flake8"] # '--quiet'
if shutil.which("black") is None:
print(
"WARNING: Please install black by pip install -r requirements-flake8.txt to auto format!"
)
black_cmd = ["black"]
if os.path.isdir(filepath):
for root, dirs, files in os.walk(filepath):
for file in files:
if file.endswith(".py"):
black_cmd.append(os.path.join(root, file))
flak8_cmd.append(os.path.join(root, file))
elif os.path.isfile(filepath):
black_cmd.append(filepath)
flak8_cmd.append(filepath)
# Auto format python code.
subprocess.call(black_cmd, cwd=base_dir)
# Check code style.
ret = subprocess.call(flak8_cmd, cwd=base_dir)
return ret
if __name__ == "__main__":
base_dir = os.path.abspath(
os.path.dirname(os.path.join(os.path.abspath(__file__), "../../../../"))
)
setupfile = os.path.join(base_dir, "setup.py")
base_pydir = os.path.join(base_dir, "intel_extension_for_pytorch")
base_scripts = os.path.join(base_dir, "scripts")
base_cpu_uts = os.path.join(base_dir, "tests/cpu")
Check_dir = [setupfile, base_pydir, base_scripts, base_cpu_uts]
ret = sum([check_flake8_errors(base_dir, path) for path in Check_dir])
if ret != 0:
print("ERROR: flake8 found format errors!")
sys.exit(1)
else:
print("Pass!")
| 1,700 | 30.5 | 103 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/tests/cpu/test_inductor.py | import torch
import intel_extension_for_pytorch as ipex
import unittest
from torch.utils._pytree import tree_flatten, tree_unflatten
from torch.testing._internal.common_utils import TestCase
# TODO(jgong5): import and pass all inductor tests from stock pytorch
def check_model(
self: TestCase,
model,
example_inputs,
kwargs=None,
*,
atol=None,
rtol=None,
check_lowp=True,
exact_dtype=True,
nopython=True,
copy_to_cuda=True,
reference_in_float=True,
assert_equal=True,
check_gradient=False,
):
"""Copied and revised from test/inductor/test_torchinductor.py"""
def compute_grads(args, kwrags, results, grads):
def gather_leaf_tensors(args, kwargs):
args, _ = tree_flatten(args)
kwargs, _ = tree_flatten(kwargs)
args = args + kwargs
leaf_tensors = [
arg
for arg in args
if isinstance(arg, torch.Tensor) and arg.requires_grad
]
return leaf_tensors
flat_results, _ = tree_flatten(results)
flat_diff_results = [r for r in flat_results if r.requires_grad]
assert len(flat_diff_results) > 0
leaf_tensors = gather_leaf_tensors(args, kwrags)
assert len(leaf_tensors) > 0
return torch.autograd.grad(
flat_diff_results,
leaf_tensors,
grads,
allow_unused=True,
retain_graph=True,
)
def clone_preserve_strides(x, device=None):
if not isinstance(x, torch.Tensor):
return x
buffer = torch.as_strided(
x, (x.untyped_storage().size() // x.element_size(),), (1,), 0
)
if not device:
buffer = buffer.clone()
else:
buffer = buffer.to(device, copy=True)
out = torch.as_strided(buffer, x.size(), x.stride(), x.storage_offset())
return out
kwargs = kwargs or {}
torch._dynamo.reset()
ref_inputs = [clone_preserve_strides(x) for x in example_inputs]
ref_kwargs = kwargs
has_lowp_args = False
original_lowp_dtype = torch.half
if reference_in_float:
# check_lowp is ignored here, it's kept just to be able to call `common` with extra arg
def upcast_fn(x):
nonlocal has_lowp_args
if isinstance(x, torch.Tensor) and (
x.dtype == torch.float16 or x.dtype == torch.bfloat16
):
has_lowp_args = True
return x.float()
else:
return x
def get_original_lowp_dtype(example_inputs):
dtypes = [x.dtype for x in example_inputs if isinstance(x, torch.Tensor)]
dtype_set = set(dtypes)
return dtype_set.pop() if len(dtype_set) == 1 else torch.half
ref_inputs = list(map(upcast_fn, example_inputs))
ref_kwargs = {k: upcast_fn(v) for k, v in kwargs.items()}
if has_lowp_args:
original_lowp_dtype = get_original_lowp_dtype(example_inputs)
if hasattr(model, "to"):
model = model.to(torch.float)
torch.manual_seed(0)
correct = model(*ref_inputs, **ref_kwargs)
# downcast the model back if needed
if reference_in_float and has_lowp_args:
if hasattr(model, "to"):
model = model.to(original_lowp_dtype)
torch._inductor.metrics.reset()
def run(*ex, **kwargs):
return model(*ex, **kwargs)
run = torch.compile(run, backend="ipex")
torch.manual_seed(0)
actual = run(*example_inputs, **kwargs)
assert type(actual) == type(correct)
correct_flat, correct_spec = tree_flatten(correct)
actual_flat, _ = tree_flatten(actual)
if reference_in_float:
correct_flat = tuple(
y.to(x.dtype)
if isinstance(y, torch.Tensor) and y.dtype.is_floating_point
else y
for x, y in zip(actual_flat, correct_flat)
)
correct = tree_unflatten(correct_flat, correct_spec)
if assert_equal:
self.assertEqual(
actual,
correct,
atol=atol,
rtol=rtol,
equal_nan=True,
exact_dtype=exact_dtype,
)
# In case of input mutations, check that inputs are the same
self.assertEqual(
ref_inputs,
example_inputs,
atol=atol,
rtol=rtol,
equal_nan=True,
# our testing sometimes uses higher precision inputs for the reference
exact_dtype=False,
)
else:
for correct_val, actual_val in zip(correct_flat, actual_flat):
if isinstance(correct_val, torch.Tensor):
assert correct_val.device == actual_val.device
assert correct_val.size() == actual_val.size()
assert correct_val.stride() == actual_val.stride()
assert correct_val.layout == actual_val.layout
if exact_dtype:
assert correct_val.dtype == actual_val.dtype
if check_gradient:
# generate random unit norm gradients
grads = [
torch.rand(r.shape, device=r.device, dtype=r.dtype)
for r in correct_flat
if r.requires_grad
]
for g in grads:
g /= g.norm()
correct_grad = compute_grads(ref_inputs, ref_kwargs, correct, grads)
flat_grads, _ = tree_flatten(correct_grad)
all_none_grads = all(x is None for x in flat_grads)
if all_none_grads:
# See Note [Detaching inputs that never need gradients]
# There are a handful of ops that can return None gradients, into of zero gradients.
# If all inputs to an AOTAutograd graph are supposed to get None gradients,
# AOTAutograd will end up forcing all of the outputs of the forward to not require grad.
# There's no easy fix to this (see the note above), although one option is to
# force any derivative formulas in core to return tensors of zeros instead of None.
flat_results, _ = tree_flatten(actual)
results_that_require_grad = [
x
for x in flat_results
if isinstance(x, torch.Tensor) and x.requires_grad
]
self.assertEqual(len(results_that_require_grad), 0)
else:
actual_grad = compute_grads(example_inputs, kwargs, actual, grads)
self.assertEqual(
actual_grad,
correct_grad,
atol=atol,
rtol=rtol,
equal_nan=True,
exact_dtype=exact_dtype,
)
torch._dynamo.reset()
class TestIpexInductor(TestCase):
common = check_model
def setUp(self):
self.old_backend = ipex._get_compiler_backend()
ipex._set_compiler_backend("inductor")
return super().setUp()
def tearDown(self):
ipex._set_compiler_backend(self.old_backend)
return super().tearDown()
def test_custom_lowering(self):
"""mm lowering overrides"""
def fn(x: torch.Tensor, y: torch.Tensor):
return torch.matmul(torch.softmax(x / 10 + 10, -1), y)
from intel_extension_for_pytorch._inductor.lowering import register_lowering
from torch._inductor.lowering import aten
from torch._inductor.ir import TensorBox, Reduction
from torch._inductor.virtualized import ops, V
@register_lowering(aten.mm.default)
def _mm(a: TensorBox, b: TensorBox):
assert isinstance(a, TensorBox)
assert isinstance(b, TensorBox)
a.realize_hint()
b.realize_hint()
m, k = a.get_size()
_k, n = b.get_size()
assert k == _k
reduced_sizes = [k]
new_size = [m, n]
m = V.graph.sizevars.guard_static_shape(m)
n = V.graph.sizevars.guard_static_shape(n)
k = V.graph.sizevars.guard_static_shape(k)
_a_loader = a.make_loader()
_b_loader = b.make_loader()
def a_loader(idx, reduction_idx):
m, _ = idx
(k,) = reduction_idx
return _a_loader([m, k])
def b_loader(idx, reduction_idx):
_, n = idx
(k,) = reduction_idx
return _b_loader([k, n])
def fn(idx, reduction_idx):
return ops.mul(
a_loader(idx, reduction_idx), b_loader(idx, reduction_idx)
)
result = Reduction.create(
device=a.get_device(),
dst_dtype=a.get_dtype(),
src_dtype=a.get_dtype(),
inner_fn=fn,
ranges=new_size,
reduction_ranges=reduced_sizes,
reduction_type="sum",
)
if isinstance(
result.data.data, Reduction
): # Only realize if reduction isn't unrolled
result.realize()
return result
x = torch.randn(64, 128)
y = torch.randn(128, 256).as_strided([128, 256], [1, 128])
self.common(fn, (x, y))
if __name__ == "__main__":
test = unittest.main()
| 9,345 | 32.259786 | 100 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/tests/cpu/test_emb.py | import torch
import torch.nn as nn
import unittest
import itertools
import copy
from torch.testing._internal.common_utils import TestCase
import intel_extension_for_pytorch as ipex
ipex_emb_fn = ipex.nn.functional._embeddingbag._embeddingbag
aten_emb_fn = ipex.nn.functional._embeddingbag.torch_embedding_bag
class Embeddingbag(torch.nn.Module):
def __init__(self):
super(Embeddingbag, self).__init__()
self.embeddingbag = nn.EmbeddingBag(10, 3, mode="sum", sparse=True)
def forward(self, input, offsets):
return self.embeddingbag(input, offsets)
class TestEMB(TestCase):
def _test_emb(
self,
mode,
per_sample_weights=None,
padding_idx=None,
include_last_offset=False,
sparse=True,
test_int32=False,
):
aten_emb = nn.EmbeddingBag(
10,
33,
mode=mode,
sparse=sparse,
padding_idx=padding_idx,
include_last_offset=include_last_offset,
)
aten_emb = aten_emb.bfloat16().float()
ipex_emb = copy.deepcopy(aten_emb)
bf16_emb = copy.deepcopy(aten_emb).bfloat16()
# a batch of 2 samples of 4 indices each
tensor_create_fn = torch.IntTensor if test_int32 else torch.LongTensor
input = tensor_create_fn([1, 2, 4, 5, 4, 3, 2, 9])
if per_sample_weights is not None:
per_sample_weights = torch.rand_like(input.float())
if include_last_offset:
offsets = tensor_create_fn([0, 4, 8])
else:
offsets = tensor_create_fn([0, 4])
# aten path
torch.embedding_bag = aten_emb_fn
aten_out = aten_emb(input, offsets, per_sample_weights)
aten_out.sum().backward()
# ipex fast path (both fp32/bf16)
torch.embedding_bag = ipex_emb_fn
ipex_out = ipex_emb(input, offsets, per_sample_weights)
ipex_out.sum().backward()
self.assertEqual(aten_out, ipex_out)
if sparse:
self.assertEqual(
aten_emb.weight.grad.data._nnz(), ipex_emb.weight.grad.data._nnz()
)
self.assertEqual(
aten_emb.weight.grad.data.sparse_dim(),
ipex_emb.weight.grad.data.sparse_dim(),
)
self.assertEqual(
aten_emb.weight.grad.data.dense_dim(),
ipex_emb.weight.grad.data.dense_dim(),
)
self.assertEqual(
aten_emb.weight.grad.data.is_coalesced(),
ipex_emb.weight.grad.data.is_coalesced(),
)
self.assertEqual(
aten_emb.weight.grad.data._indices(),
ipex_emb.weight.grad.data._indices(),
)
self.assertEqual(
aten_emb.weight.grad.data._values(), ipex_emb.weight.grad.data._values()
)
if mode == "sum" and padding_idx is None and per_sample_weights is None:
bf16_out = bf16_emb(input, offsets)
bf16_out.sum().backward()
self.assertEqual(aten_out.bfloat16(), bf16_out)
if sparse:
self.assertEqual(
bf16_emb.weight.grad.data._values().dtype, torch.bfloat16
)
self.assertEqual(
aten_emb.weight.grad.data._nnz(), ipex_emb.weight.grad.data._nnz()
)
self.assertEqual(
aten_emb.weight.grad.data.sparse_dim(),
ipex_emb.weight.grad.data.sparse_dim(),
)
self.assertEqual(
aten_emb.weight.grad.data.dense_dim(),
ipex_emb.weight.grad.data.dense_dim(),
)
self.assertEqual(
aten_emb.weight.grad.data.is_coalesced(),
ipex_emb.weight.grad.data.is_coalesced(),
)
self.assertEqual(
aten_emb.weight.grad.data._indices(),
ipex_emb.weight.grad.data._indices(),
)
self.assertEqual(
aten_emb.weight.grad.data._values().bfloat16().float(),
ipex_emb.weight.grad.data._values().float(),
)
def test_emb_fallback_path(self):
self._test_emb(mode="mean")
for options in itertools.product(
[2, None], [True, None], [True, False], [True, False], [True, False]
):
(
padding_idx,
per_sample_weights,
include_last_offset,
sparse,
test_int32,
) = options
self._test_emb(
mode="sum",
per_sample_weights=per_sample_weights,
padding_idx=padding_idx,
include_last_offset=include_last_offset,
sparse=sparse,
test_int32=test_int32,
)
def test_emb_fast_path(self):
for options in itertools.product([True, False], [True, False]):
include_last_offset, sparse = options
self._test_emb(
mode="sum", sparse=sparse, include_last_offset=include_last_offset
)
def test_emb_jit_scriptable(self):
emb = nn.EmbeddingBag(10, 3, mode="sum", sparse=True)
input = torch.LongTensor([1, 2, 4, 5, 4, 3, 2, 9])
offsets = torch.LongTensor([0, 1, 2, 3, 4, 5, 6, 7])
ref_out = emb(input, offsets)
script_emb = torch.jit.script(emb)
out = script_emb(input, offsets)
self.assertEqual(out, ref_out)
def test_emb_torch_compile(self):
emb = Embeddingbag().eval()
input = torch.LongTensor([1, 2, 4, 5, 4, 3, 2, 9])
offsets = torch.LongTensor([0, 1, 2, 3, 4, 5, 6, 7])
for dtype, compiler_backend, dynamic in itertools.product(
[torch.float32, torch.bfloat16],
["torchscript", "inductor"],
[True, False],
):
torch._dynamo.reset()
ipex._set_compiler_backend(compiler_backend)
emb_torchcompile = torch.compile(emb, dynamic=dynamic, backend="ipex")
with torch.cpu.amp.autocast(
enabled=(dtype == torch.bfloat16)
), torch.no_grad():
y0 = emb(input, offsets)
y1 = emb_torchcompile(input, offsets)
self.assertEqual(y0, y1)
self.assertEqual(y1.dtype, dtype)
if __name__ == "__main__":
test = unittest.main()
| 6,570 | 35.505556 | 88 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/tests/cpu/test_tensor_method.py | import torch
import unittest
from common_utils import TestCase
class TestTesorMethod(TestCase):
def test_numpy(self):
# float tensor, numpy array will share memory with torch tensor.
x = torch.randn(2, 3)
y = torch.from_numpy(x.numpy())
self.assertEqual(x, y)
self.assertEqual(x.data_ptr(), y.data_ptr())
# bfloat16 tensor, numpy array will not share memory with torch tensor.
x = torch.randn(2, 3).bfloat16()
y = torch.from_numpy(x.numpy())
self.assertEqual(x, y.bfloat16())
self.assertNotEqual(x.data_ptr(), y.data_ptr())
if __name__ == "__main__":
test = unittest.main()
| 666 | 29.318182 | 79 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/tests/cpu/test_shared_param.py | import unittest
import copy
import torch
import intel_extension_for_pytorch as ipex
from torch.testing._internal.common_utils import TestCase
from torch.optim import (
Adadelta,
Adagrad,
Adam,
AdamW,
Adamax,
ASGD,
RMSprop,
Rprop,
SGD,
)
from intel_extension_for_pytorch.optim._lamb import Lamb
import itertools
class TestParamSharing(TestCase):
def test_param_shared(self):
class SharedParaModel(torch.nn.Module):
# from bart
def __init__(self):
super(SharedParaModel, self).__init__()
self.shared = torch.nn.Embedding(3, 3)
self.encoder = torch.nn.Embedding(3, 3)
self.decoder = torch.nn.Embedding(3, 3)
self.encoder.weight = self.shared.weight
self.decoder.weight = self.shared.weight
self.linear = torch.nn.Linear(3, 3)
self.linear.weight = self.shared.weight
def forward(self, x):
x = self.encoder(x)
x = self.decoder(x)
x = self.linear(x) + self.shared(x)
return x
def check_shared_in_model(model, dtype):
self.assertEqual(
model.shared.weight.data_ptr(), model.encoder.weight.data_ptr()
)
self.assertEqual(
model.shared.weight.data_ptr(), model.decoder.weight.data_ptr()
)
self.assertEqual(
model.shared.weight.data_ptr(), model.linear.weight.data_ptr()
)
self.assertEqual(model.shared.weight.dtype, dtype)
def check_shared_in_state_dict(state_dict):
self.assertEqual(
state_dict["shared.weight"].data_ptr(),
state_dict["encoder.weight"].data_ptr(),
)
self.assertEqual(
state_dict["shared.weight"].data_ptr(),
state_dict["decoder.weight"].data_ptr(),
)
self.assertEqual(
state_dict["shared.weight"].data_ptr(),
state_dict["linear.weight"].data_ptr(),
)
self.assertEqual(state_dict["shared.weight"].dtype, torch.float)
def test_inference(model):
params_dict = {
"dtype": [torch.float, torch.bfloat16],
"level": ["O0", "O1"],
"inplace": [True, False],
}
for dtype, level, inplace in list(itertools.product(*params_dict.values())):
test_model = copy.deepcopy(model).eval()
opt_M = ipex.optimize(
test_model, dtype=dtype, level=level, inplace=inplace
)
check_shared_in_model(opt_M, dtype)
check_shared_in_state_dict(opt_M.state_dict())
def test_training(model):
params_dict = {
"dtype": [torch.float, torch.bfloat16],
"level": ["O0", "O1"],
"inplace": [True, False],
"optimizer": [
Lamb,
Adadelta,
Adagrad,
Adam,
AdamW,
Adamax,
ASGD,
RMSprop,
Rprop,
SGD,
],
}
for dtype, level, inplace, opt in list(
itertools.product(*params_dict.values())
):
test_model = copy.deepcopy(model)
if opt == SGD:
optimizer = opt(model.parameters(), lr=10.01, momentum=0.1)
else:
optimizer = opt(model.parameters(), lr=10.01)
opt_M, _ = ipex.optimize(
test_model,
optimizer=optimizer,
dtype=dtype,
level=level,
inplace=inplace,
)
check_shared_in_model(opt_M, dtype)
check_shared_in_state_dict(opt_M.state_dict())
test_inference(SharedParaModel())
test_training(SharedParaModel())
def test_nocast_since_shared(self):
class NoCastforSharingPara(torch.nn.Module):
def __init__(self):
super(NoCastforSharingPara, self).__init__()
self.param = torch.nn.Parameter(torch.rand(3, 3))
self.no_cast_linear = torch.nn.Linear(3, 3)
self.no_cast_linear.weight = self.param
def forward(self, x):
x = self.no_cast_linear(x)
x = x + self.other
return x
model = NoCastforSharingPara()
for level in ["O0", "O1"]:
for train in [True, False]:
test_model = copy.deepcopy(model)
if train:
optimizer = SGD(model.parameters(), lr=10.01, momentum=0.1)
opt_M, _ = ipex.optimize(
test_model,
optimizer=optimizer,
dtype=torch.bfloat16,
level=level,
)
else:
opt_M = ipex.optimize(
test_model.eval(), dtype=torch.bfloat16, level=level
)
self.assertEqual(
opt_M.param.data_ptr(), opt_M.no_cast_linear.weight.data_ptr()
)
self.assertEqual(opt_M.no_cast_linear.weight.dtype, torch.float)
self.assertEqual(opt_M.no_cast_linear.bias.dtype, torch.float)
def test_noprepack_since_shared(self):
class NoPrepackforSharingPara(torch.nn.Module):
def __init__(self):
super(NoPrepackforSharingPara, self).__init__()
self.shared = torch.nn.Embedding(3, 3)
self.no_prepack_linear = torch.nn.Linear(3, 3)
self.no_prepack_linear.weight = self.shared.weight
def forward(self, x):
x = self.no_cast_linear(x)
x = x + self.other
return x
model = NoPrepackforSharingPara()
for level in ["O0", "O1"]:
for train in [True, False]:
test_model = copy.deepcopy(model)
if train:
optimizer = SGD(model.parameters(), lr=10.01, momentum=0.1)
opt_M, _ = ipex.optimize(
test_model,
weights_prepack=True,
optimizer=optimizer,
dtype=torch.bfloat16,
level=level,
)
else:
opt_M = ipex.optimize(
test_model.eval(),
weights_prepack=True,
dtype=torch.bfloat16,
level=level,
)
self.assertEqual(
opt_M.shared.weight.data_ptr(),
opt_M.no_prepack_linear.weight.data_ptr(),
)
self.assertTrue(isinstance(opt_M.no_prepack_linear, torch.nn.Linear))
if __name__ == "__main__":
test = unittest.main()
| 7,295 | 35.48 | 88 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/tests/cpu/test_linear_fuse_eltwise.py | import unittest
import torch
import intel_extension_for_pytorch as ipex
from torch.testing._internal.common_utils import TestCase
import copy
class MLP(torch.nn.Module):
def __init__(self):
super(MLP, self).__init__()
self.mlp = torch.nn.ModuleList()
self.mlp.append(torch.nn.Linear(10, 10))
self.mlp.append(torch.nn.ReLU())
self.mlp.append(torch.nn.Linear(10, 10))
self.mlp.append(torch.nn.Sigmoid())
def forward(self, x):
for layer in self.mlp:
x = layer(x)
return x
class TestLinearFuseEltwise(TestCase):
def test_linear_fuse_eltwise(self):
x1 = torch.rand(5, 10).requires_grad_()
x2 = copy.deepcopy(x1)
for dtype in [torch.float, torch.bfloat16]:
model = MLP()
opt = torch.optim.SGD(model.parameters(), lr=0.01)
model, opt = ipex.optimize(
model, optimizer=opt, dtype=dtype, auto_kernel_selection=True
)
with torch.cpu.amp.autocast(enabled=(dtype == torch.bfloat16)):
ref_out = model(x1).sum()
ref_out.backward()
fused_model = copy.deepcopy(model)
fused_model.mlp[0] = ipex.nn.modules.IPEXLinearEltwise(
fused_model.mlp[0], "relu"
)
fused_model.mlp[1] = torch.nn.Identity()
fused_model.mlp[2] = ipex.nn.modules.IPEXLinearEltwise(
fused_model.mlp[2], "sigmoid"
)
fused_model.mlp[3] = torch.nn.Identity()
with torch.cpu.amp.autocast(enabled=(dtype == torch.bfloat16)):
out = fused_model(x2).sum()
out.backward()
self.assertEqual(out, ref_out)
self.assertEqual(x1.grad, x2.grad)
if __name__ == "__main__":
test = unittest.main()
| 1,838 | 32.436364 | 77 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/tests/cpu/test_ao_jit_ipex_quantization.py | import sys
import os
import itertools
import tempfile
import torch
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from torch.testing import FileCheck
import copy
import json
from test_autocast import get_rand_seed
import intel_extension_for_pytorch as ipex
from test_ao_jit_llga_utils import (
JitLlgaTestCase,
LLGA_FUSION_GROUP,
llga_fp32_bf16_test_env,
)
from torch.testing._internal.jit_utils import freeze_rng_state
from torch.ao.quantization import (
MinMaxObserver,
PerChannelMinMaxObserver,
HistogramObserver,
QConfig,
PlaceholderObserver,
)
from torch.testing._internal.common_utils import run_tests
default_weight_observer = PerChannelMinMaxObserver.with_args(
dtype=torch.qint8, qscheme=torch.per_channel_symmetric
)
static_qconfig = [
QConfig(
activation=MinMaxObserver.with_args(
qscheme=torch.per_tensor_affine, dtype=torch.quint8
),
weight=default_weight_observer,
),
QConfig(
activation=MinMaxObserver.with_args(
qscheme=torch.per_tensor_symmetric, dtype=torch.qint8
),
weight=default_weight_observer,
),
QConfig(
activation=HistogramObserver.with_args(
qscheme=torch.per_tensor_symmetric, dtype=torch.qint8, reduce_range=True
),
weight=default_weight_observer,
),
ipex.quantization.default_static_qconfig,
]
dynamic_qconfig = [
QConfig(
activation=PlaceholderObserver.with_args(
dtype=torch.float, compute_dtype=torch.quint8
),
weight=MinMaxObserver.with_args(
dtype=torch.qint8, qscheme=torch.per_tensor_symmetric
),
),
ipex.quantization.default_dynamic_qconfig,
]
class TestIpexOps(JitLlgaTestCase):
def test_adaptive_avg_pool2d(self):
class M(nn.Module):
def __init__(self):
super(M, self).__init__()
self.conv = nn.Conv2d(3, 3, 2, padding=1, bias=True)
self.adaptive_avg_pool2d = nn.AdaptiveAvgPool2d((5, 7))
def forward(self, x):
x = self.conv(x)
x = self.adaptive_avg_pool2d(x)
return x
m = M()
x = torch.rand(1, 3, 28, 28)
patterns = [
["aten::dequantize", "aten::_convolution", "aten::quantize_per_tensor"],
]
for qconfig in static_qconfig:
graph = self.checkQuantizeTrace(m, [x], atol=2e-1, qconfig=qconfig)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
self.checkPatterns(graph, patterns)
# single none gemm ops will not be quantized if pre and post don't has
# quantizable op.
def test_adaptive_avg_pool2d_fp32(self):
class M(nn.Module):
def __init__(self):
super(M, self).__init__()
self.adaptive_avg_pool2d = nn.AdaptiveAvgPool2d((5, 7))
def forward(self, x):
x = self.adaptive_avg_pool2d(x)
return x
m = M()
x = torch.rand(1, 3, 28, 28)
for qconfig in static_qconfig:
graph = self.checkQuantizeTrace(m, [x], qconfig=qconfig)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 0)
FileCheck().check_not("aten::quantize_per_tensor").check_not(
"at::dequantize"
).check("aten::adaptive_avg_pool2d").run(graph)
def test_flatten_int8(self):
class M(nn.Module):
def __init__(self):
super(M, self).__init__()
self.conv1 = nn.Conv2d(3, 3, 2, padding=1, bias=True)
self.pool = nn.MaxPool2d(2)
self.flatten = nn.Flatten(1)
self.linear = nn.Linear(147, 32)
def forward(self, x):
x = self.conv1(x)
x = self.pool(x)
x = self.flatten(x)
x = self.linear(x)
return x
class M2(nn.Module):
def __init__(self):
super(M2, self).__init__()
self.conv1 = nn.Conv2d(3, 3, 2, padding=1, bias=True)
self.pool = nn.MaxPool2d(2)
self.linear = nn.Linear(147, 32)
def forward(self, x):
x = self.conv1(x)
x = self.pool(x)
x = x.flatten(1)
x = self.linear(x)
return x
m = M()
m2 = M2()
for test_m in [m, m2]:
x = torch.rand(1, 3, 14, 14)
patterns = [
["aten::dequantize", "aten::_convolution", "aten::quantize_per_tensor"],
["aten::dequantize", "aten::max_pool2d", "aten::quantize_per_tensor"],
["aten::dequantize", "aten::linear"],
]
for qconfig in static_qconfig:
graph = self.checkQuantizeTrace(test_m, [x], atol=2e-1, qconfig=qconfig)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 3)
self.checkPatterns(graph, patterns)
# single none gemm ops will not be quantized if pre and post don't has
# quantizable op.
def test_flatten_fp32(self):
class M(nn.Module):
def __init__(self):
super(M, self).__init__()
self.flatten = nn.Flatten(1)
def forward(self, x):
x = self.flatten(x)
return x
m = M()
x = torch.rand(1, 3, 14, 14)
for qconfig in static_qconfig:
graph = self.checkQuantizeTrace(m, [x], qconfig=qconfig)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 0)
FileCheck().check_not("aten::quantize_per_tensor").check_not(
"at::dequantize"
).check("aten::flatten").run(graph)
def test_embeddingbag_int8(self):
class M(nn.Module):
def __init__(self):
super(M, self).__init__()
self.m = nn.EmbeddingBag(10, 110, mode="sum", sparse=True)
def forward(self, input, offset):
x = self.m(input, offset)
return x
def get_input(bag_size_1):
if bag_size_1:
return torch.LongTensor(
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
), torch.LongTensor([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
else:
return torch.LongTensor(
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
), torch.LongTensor([0])
def fake_quant(tensor, scale, zp):
qtensor = torch.quantize_per_tensor(tensor, scale, zp, torch.qint8)
return qtensor.dequantize()
def get_expect(module, input, offsets):
def _calculate_scale(max_val, min_val):
min_val_neg = torch.min(min_val, torch.zeros_like(min_val))
max_val_pos = torch.max(max_val, torch.zeros_like(max_val))
max_val_pos = torch.max(-min_val_neg, max_val_pos)
scale = max_val_pos / 127.5
scale = max(scale.item(), torch.finfo(torch.float32).eps)
return scale
_module = copy.deepcopy(module)
y = _module(input, offsets)
o_scale = _calculate_scale(y.max(), y.min())
if isinstance(_module, nn.EmbeddingBag):
w_scale = _calculate_scale(_module.weight.max(), _module.weight.min())
_module.weight.data = fake_quant(_module.weight, w_scale, 0)
else:
w_scale = _calculate_scale(
_module.m.weight.max(), _module.m.weight.min()
)
_module.m.weight.data = fake_quant(_module.m.weight, w_scale, 0)
expect = _module(input, offsets)
return fake_quant(expect, o_scale, 0)
# This will call in F.embeddingbag
with torch.no_grad():
for bag_size_1 in [True, False]:
input, offsets = get_input(bag_size_1)
m = nn.EmbeddingBag(10, 110, mode="sum", sparse=True)
y = get_expect(m, input, offsets)
tol = 1e-2 if bag_size_1 else 5e-2
graph = self.checkQuantizeTrace(
m, [input, offsets], qconfig=static_qconfig[1], expect_result=y
)
self.assertGraphContainsExactly(graph, "ipex::qembedding_bag", 1)
# test nn.EmbeddingBag
m = M().eval()
y = get_expect(m, input, offsets)
graph = self.checkQuantizeTrace(
m, [input, offsets], qconfig=static_qconfig[1], expect_result=y
)
self.assertGraphContainsExactly(graph, "ipex::qembedding_bag", 1)
def test_interaction_int8(self):
class M(nn.Module):
def __init__(self):
super(M, self).__init__()
self.f = ipex.nn.functional.interaction
def forward(self, x1, x2, x3):
x = self.f(x1.relu(), x2.relu(), x3.relu())
return x
m = M()
inputs = []
for i in range(0, 3):
inputs.append(torch.randn([128, 128]) * 0.1)
graph = self.checkQuantizeTrace(m, inputs, atol=1e-2, qconfig=static_qconfig[1])
self.assertGraphContainsExactly(graph, "ipex::qinteraction", 1)
# Besides its primary objective, this UT also implicitly tests if mayRevertDtypeAttributeInsertion
# in csrc/jit/codegen/onednn/prepare_binary.cpp works well.
def test_add_int8(self):
class M(nn.Module):
def __init__(self):
super(M, self).__init__()
def forward(self, x1, x2):
out = torch.add(torch.dequantize(x1), torch.dequantize(x2))
return torch.quantize_per_tensor(out, 0.1, 10, torch.quint8)
m = M().eval()
inputs = [
torch.quantize_per_tensor(torch.randn(12, 12), 0.1, 10, torch.quint8),
torch.quantize_per_tensor(torch.randn(12, 12), 0.1, 10, torch.quint8),
]
with torch.no_grad():
traced_model = torch.jit.trace(m, inputs)
traced_model = torch.jit.freeze(traced_model)
traced_model(*inputs)
graph = traced_model.graph_for(*inputs)
ori_out = m(*inputs)
out = traced_model(*inputs)
self.assertEqual(ori_out, out)
self.assertGraphContainsExactly(graph, "quantized::add", 1)
# This test case will be enabled after LSTM int8->fp32 works
def test_lstm(self):
class M(nn.Module):
def __init__(
self,
input_size,
hidden_size,
num_layers,
bidirectional=False,
bias=False,
dropout=0,
batch_first=False,
):
super(M, self).__init__()
self.lstm = nn.LSTM(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
bidirectional=bidirectional,
bias=bias,
dropout=dropout,
batch_first=batch_first,
)
def forward(self, x, h=None):
x, h = self.lstm(x, h)
return x, h
def _lstm_params_list():
params_dict = {
"input_size": [1, 32],
"hidden_size": [16],
"num_layers": [3],
"bidirectional": [False, True],
"bias": [False, True],
"empty_state": [False, True],
"batch_first": [False, True],
"dropout": [0, 0.4, 1],
"batch_size": [1, 2],
"seq_len": [48],
}
params_list = []
for key, value in params_dict.items():
params_list.append(value)
return params_list
rand_seed = int(get_rand_seed())
print("{} rand sed: {}".format(sys._getframe().f_code.co_name, rand_seed))
torch.manual_seed(rand_seed)
params_list = _lstm_params_list()
for (
input_size,
hidden_size,
num_layers,
bidirectional,
bias,
empty_state,
batch_first,
dropout,
batch_size,
seq_len,
) in itertools.product(*params_list):
# dropout option adds dropout after all but last recurrent layer, so non-zero dropout expects num_layers greater than 1
if dropout > 0 and num_layers == 1:
continue
num_directions = 2 if bidirectional else 1
if batch_first:
x = torch.randn(batch_size, seq_len, input_size)
else:
x = torch.randn(seq_len, batch_size, input_size)
m = M(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
bidirectional=bidirectional,
bias=bias,
dropout=dropout,
batch_first=batch_first,
)
graph = self.checkQuantizeTrace(m, [x], atol=3e-2, rtol=1e-1)
self.assertGraphContainsExactly(graph, "ipex::quantized_lstm", 1)
def test_lstm_PackedSequence(self):
class M(nn.Module):
def __init__(self):
super(M, self).__init__()
self.lstm = nn.LSTM(
input_size=288,
hidden_size=1024,
num_layers=6,
batch_first=True,
bidirectional=True,
bias=True,
dropout=0.2,
)
def forward(self, input, hid, mask=None):
if mask is not None:
lengths = mask.sum(-1)
seq = pack_padded_sequence(input, lengths.cpu(), batch_first=True)
seq, hid = self.lstm(seq, hid)
seq = pad_packed_sequence(seq, batch_first=True)[0]
return seq, hid
else:
return self.lstm(input, hid)
model = M().eval()
seq = torch.randn(size=(1, 211, 288), dtype=torch.float32)
# initialize hidden states
h0 = torch.zeros((12, 1, 1024), dtype=seq.dtype)
hid = (h0, h0)
mask = torch.ones(size=(1, 211), dtype=torch.uint8)
graph = self.checkQuantizeTrace(model, [seq, hid, mask])
self.assertGraphContainsExactly(graph, "aten::lstm", 1)
def test_linear_lstm(self):
class M(nn.Module):
def __init__(self):
super(M, self).__init__()
self.linear = nn.Linear(512, 64)
self.lstm = nn.LSTM(input_size=64, hidden_size=256, num_layers=2)
def forward(self, input, hid=None):
x = self.linear(input)
x = self.lstm(x, hid)
return x
model = M().eval()
seq = torch.randn(24, 1, 512)
h0 = torch.zeros((2, 1, 256), dtype=seq.dtype)
hid = (h0, h0)
graph = self.checkQuantizeTrace(model, [seq, hid], atol=3e-2, rtol=1e-1)
self.assertGraphContainsExactly(graph, "ipex::quantized_lstm", 1)
self.assertGraphContainsExactly(graph, "aten::lstm", 0)
def test_conv2d_with_padding(self):
class M(nn.Module):
def __init__(self, padding_mode):
super(M, self).__init__()
self.conv = nn.Conv2d(
3, 3, 2, padding=1, bias=True, padding_mode=padding_mode
)
def forward(self, x):
x = self.conv(x)
return x
x = torch.rand(1, 3, 14, 14)
patterns = [
["aten::dequantize", "aten::_convolution"],
]
for padding_mode in ["circular", "replicate", "reflect"]:
m = M(padding_mode=padding_mode).eval()
graph = self.checkQuantizeTrace(m, [x], atol=2e-1)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
self.checkPatterns(graph, patterns)
class TestIpexQuantizationConvertAPI(JitLlgaTestCase):
def test_inplace_preapre(self):
class M(nn.Module):
def __init__(self):
super(M, self).__init__()
self.linear = nn.Linear(128, 1)
def forward(self, x):
x = self.linear(x)
return x
x = torch.rand(1, 128)
for inplace in [False, True]:
m = M()
prepared_model = ipex.quantization.prepare(
m, static_qconfig[0], example_inputs=x, inplace=inplace
)
if inplace:
self.assertEqual(
m.linear.weight.data_ptr(), prepared_model.linear.weight.data_ptr()
)
else:
self.assertNotEqual(
m.linear.weight.data_ptr(), prepared_model.linear.weight.data_ptr()
)
def test_inplace_convert(self):
class M(nn.Module):
def __init__(self):
super(M, self).__init__()
self.linear = nn.Linear(128, 1)
def forward(self, x):
x = self.linear(x)
return x
m = M()
x = torch.rand(1, 128)
for int8_bf16 in [False]:
m_ = copy.deepcopy(m)
for inplace in [False, True]:
orgin_model_weight_dtype = m_.linear.weight.dtype
orgin_model_bias_dtype = m_.linear.bias.dtype
_, _, ori_model = self.prepareModel(
m_,
x,
qconfig=static_qconfig[1],
int8_bf16=int8_bf16,
prepare_inplace=True,
convert_inplace=inplace,
)
if inplace and int8_bf16:
if (
m_.linear.weight.dtype == orgin_model_weight_dtype
or m_.linear.bias.dtype == orgin_model_bias_dtype
):
print("model should have changed")
assert 0
else:
if (
m_.linear.weight.dtype != orgin_model_weight_dtype
or m_.linear.bias.dtype != orgin_model_bias_dtype
):
print("model should not change")
assert 0
def test_qconf_summary_save_load(self):
class M(nn.Module):
def __init__(self):
super(M, self).__init__()
self.conv = nn.Conv2d(3, 64, 1, 1)
self.linear = nn.Linear(256, 1)
def forward(self, x):
x = self.conv(x)
x = torch.flatten(x, 1)
x = self.linear(x)
y = torch.relu(x)
x = torch.add(x, y)
return x
m = M()
x = torch.rand(1, 3, 2, 2)
prepared_model = ipex.quantization.prepare(
m, static_qconfig[0], example_inputs=x, inplace=False
)
prepared_model(x)
with tempfile.TemporaryDirectory() as tmp:
# case1: save qconf and load qconf.
path = os.path.join(tmp, "configure.json")
prepared_model.save_qconf_summary(path)
convert_model = ipex.quantization.convert(prepared_model)
traced_model_ref = torch.jit.trace(convert_model, x).eval()
traced_model_ref = torch.jit.freeze(traced_model_ref)
# load the saved qconf
prepared_model = ipex.quantization.prepare(
m, static_qconfig[0], example_inputs=x, inplace=False
)
prepared_model.load_qconf_summary(path)
convert_model = ipex.quantization.convert(prepared_model)
traced_model = torch.jit.trace(convert_model, x).eval()
traced_model = torch.jit.freeze(traced_model)
for i in range(2):
y_before = traced_model_ref(x)
y_after = traced_model(x)
self.assertEqual(y_before, y_after)
# save and load qconf again to make sure we didn't lost something
path2 = os.path.join(tmp, "configure_new.json")
prepared_model.save_qconf_summary(path2)
prepared_model = ipex.quantization.prepare(
m, static_qconfig[0], example_inputs=x, inplace=False
)
prepared_model.load_qconf_summary(path2)
convert_model = ipex.quantization.convert(prepared_model)
traced_model = torch.jit.trace(convert_model, x).eval()
traced_model = torch.jit.freeze(traced_model)
for i in range(2):
y_after = traced_model(x)
self.assertEqual(y_before, y_after)
# make sure the new saved json is same as old one.
with open(path, "r") as f:
old_json = json.load(f)
with open(path2, "r") as f:
new_json = json.load(f)
self.assertTrue(old_json == new_json)
# case2: load qconf and re-do calibration, make sure the scales/zps is updated.
x_new = torch.rand(1, 3, 2, 2) * 10
# do ref quantization
prepared_model = ipex.quantization.prepare(
m, static_qconfig[0], example_inputs=x_new, inplace=False
)
prepared_model(x_new)
ref_path = os.path.join(tmp, "configure_ref.json")
prepared_model.save_qconf_summary(ref_path)
convert_model = ipex.quantization.convert(prepared_model)
traced_model_ref = torch.jit.trace(convert_model, x_new).eval()
traced_model_ref = torch.jit.freeze(traced_model_ref)
# load qconf, and re-do calibration
prepared_model = ipex.quantization.prepare(
m, static_qconfig[0], example_inputs=x_new, inplace=False
)
prepared_model.load_qconf_summary(path2)
prepared_model(x_new)
new_path = os.path.join(tmp, "configure_new.json")
prepared_model.save_qconf_summary(new_path)
traced_model_new = torch.jit.trace(convert_model, x_new).eval()
traced_model_new = torch.jit.freeze(traced_model_new)
for i in range(2):
y_ref = traced_model_ref(x_new)
y_new = traced_model_new(x_new)
self.assertEqual(y_ref, y_new)
# make sure the new saved json is same as ref one.
with open(ref_path, "r") as f:
old_json = json.load(f)
with open(new_path, "r") as f:
new_json = json.load(f)
self.assertTrue(old_json == new_json)
def test_observer_dtype_update(self):
class M(nn.Module):
def __init__(self):
super(M, self).__init__()
self.linear = nn.Linear(4, 4)
def forward(self, x):
return self.linear(x)
m = M()
x = torch.rand(4, 4)
prepared_model = ipex.quantization.prepare(
m, static_qconfig[0], example_inputs=x, inplace=False
)
prepared_model(x)
with tempfile.TemporaryDirectory() as tmp:
ref_path = os.path.join(tmp, "configure.json")
prepared_model.save_qconf_summary(ref_path)
with open(ref_path, "r") as f:
old_json = json.load(f)
# change observe's dtype.
old_json[" "]["q_op_infos"]["0"]["activation_observer"][
"dtype"
] = "torch.qint8"
old_json[" "]["q_op_infos"]["0"]["activation_observer"][
"quant_min"
] = -128
old_json[" "]["q_op_infos"]["0"]["activation_observer"][
"quant_max"
] = 127
new_path = os.path.join(tmp, "configure_new.json")
with open(new_path, "w") as fp:
json.dump(old_json, fp, indent=4)
prepared_model.load_qconf_summary(new_path)
prepared_model(x)
convert_model = ipex.quantization.convert(prepared_model)
traced_model = torch.jit.trace(convert_model, x).eval()
traced_model = torch.jit.freeze(traced_model)
for _ in range(2):
y_new = traced_model(x)
ref_qconfig = QConfig(
activation=MinMaxObserver.with_args(
qscheme=torch.per_tensor_affine, dtype=torch.qint8
),
weight=default_weight_observer,
)
prepared_model = ipex.quantization.prepare(
m, ref_qconfig, example_inputs=x, inplace=False
)
prepared_model(x)
convert_model = ipex.quantization.convert(prepared_model)
traced_model = torch.jit.trace(convert_model, x).eval()
traced_model = torch.jit.freeze(traced_model)
for _ in range(2):
y_ref = traced_model(x)
self.assertEqual(y_ref, y_new)
def test_subclass_format(self):
class M(nn.Module):
def __init__(self):
super(M, self).__init__()
self.linear = nn.Linear(128, 1)
def forward(self, x):
x = self.linear(x)
return x.sum()
x = torch.rand(1, 128)
prepared_model = ipex.quantization.prepare(
M().eval(), static_qconfig[0], example_inputs=x
)
out = prepared_model(x)
print(out.__format__(".4f"))
converted_model = ipex.quantization.convert(prepared_model)
out = converted_model(x)
print(out.__format__(".4f"))
class TestRemoveMutate(JitLlgaTestCase):
def test_mutated_value_alive_after_inplace_op(self):
class M(nn.Module):
def __init__(self):
super(M, self).__init__()
self.conv = torch.nn.Conv2d(3, 16, 3, 224)
def forward(self, x):
a = self.conv(x)
b = torch.sigmoid(a)
c = a[0]
a.mul_(b)
c += 2
return c
m = M()
x = torch.randn(1, 3, 224, 224)
graph, _, _ = self.prepareModel(m, [x])
FileCheck().check_not("aten::mul").check("aten::mul_").run(graph)
def test_mutated_value_inalive_after_inplace_op(self):
class M(nn.Module):
def __init__(self):
super(M, self).__init__()
self.conv = torch.nn.Conv2d(3, 16, 3, 224)
def forward(self, x):
a = self.conv(x)
b = torch.sigmoid(a)
res = a.mul_(b)
return res
m = M()
x = torch.randn(1, 3, 224, 224)
graph, _, _ = self.prepareModel(m, [x])
FileCheck().check_not("aten::mul_").check("aten::mul").run(graph)
@llga_fp32_bf16_test_env
def test_special_mapped_op(self):
class M1(nn.Module):
def __init__(self):
super(M1, self).__init__()
def forward(self, x, y):
z = x + 1
z.zero_()
y.fill_(3)
return z, y
m = M1()
x = torch.tensor([2, 2])
y = torch.tensor([2, 4])
graph, traced_model, _ = self.prepareModel(m, [x, y])
FileCheck().check_not("aten::zero_").check_not("aten::fill_").run(graph)
self.assertEqual(traced_model(x, y), m(x, y))
class M2(nn.Module):
def __init__(self):
super(M2, self).__init__()
def forward(self, x):
return x.normal_()
m = M2()
x = torch.rand(2, 1, 3, 4)
graph, traced_model, _ = self.prepareModel(m, [x])
FileCheck().check_not("normal_").run(graph)
with freeze_rng_state():
out1 = m(x)
with freeze_rng_state():
out2 = traced_model(x)
self.assertEqual(out1, out2)
class M3(nn.Module):
def __init__(self):
super(M3, self).__init__()
def forward(self, x):
x.fill_(3)
x.zero_()
return x
m = M3()
x = torch.tensor([2, 2])
graph, traced_model, _ = self.prepareModel(m, [x])
FileCheck().check_not("aten::zero_").check_not("aten::fill_").run(graph)
self.assertEqual(traced_model(x), m(x))
class TestDynamicQuantization(JitLlgaTestCase):
def test_linear_dynamic(self):
class SubModule(nn.Module):
def __init__(self):
super(SubModule, self).__init__()
self.linear = nn.Linear(3, 3)
def forward(self, x):
x = self.linear(x)
return x
class M(nn.Module):
def __init__(self):
super(M, self).__init__()
self.linear1 = nn.Sequential(nn.Linear(3, 3))
self.linear2 = SubModule()
self.linear3 = nn.Linear(3, 3)
def forward(self, x):
x = self.linear1(x)
x = self.linear2(x)
x = self.linear3(x)
return x
m = M().eval()
x = torch.randn(3, 3)
for qconfig in dynamic_qconfig:
graph = self.checkQuantizeTrace(m, [x], atol=2e-1, qconfig=qconfig)
FileCheck().check_not("aten:linear").check("quantized::linear_dynamic").run(
graph
)
def test_linear_dynamic_bf16(self):
class M(nn.Module):
def __init__(self):
super(M, self).__init__()
self.linear = nn.Linear(3, 3)
def forward(self, x):
x = self.linear(x)
return x
x = torch.randn(3, 3)
m = M().eval()
graph, _, _ = self.prepareModel(
m, [x], qconfig=dynamic_qconfig[0], int8_bf16=True
)
FileCheck().check_not("aten:linear").check("quantized::linear_dynamic").run(
graph
)
def test_lstm_dynamic(self):
class M(nn.Module):
def __init__(self):
super(M, self).__init__()
self.lstm = torch.nn.LSTM(10, 20, 2)
def forward(self, x, hx, cx):
x, h_xs = self.lstm(x, (hx, cx))
return x, h_xs
m = M().eval()
x = torch.randn(5, 3, 10)
h = torch.randn(2, 3, 20)
c = torch.randn(2, 3, 20)
for qconfig in dynamic_qconfig:
graph = self.checkQuantizeTrace(m, [x, h, c], atol=2e-1, qconfig=qconfig)
FileCheck().check_not("aten:lstm").check("aten::quantized_lstm").run(graph)
class TestDictInput(JitLlgaTestCase):
def test_only_dict_input(self):
class SubModule(nn.Module):
def __init__(self):
super(SubModule, self).__init__()
self.linear = nn.Linear(3, 3)
def forward(self, x):
x = self.linear(x)
return x
class M(nn.Module):
def __init__(self):
super(M, self).__init__()
self.linear1 = nn.Sequential(nn.Linear(3, 3))
self.linear2 = SubModule()
self.linear3 = nn.Linear(3, 3)
def forward(self, x1, x2, x3):
x1 = self.linear1(x1)
x2 = self.linear2(x2)
x3 = self.linear3(x3)
return x1 + x2 + x3
int8_bf16_list = [True, False]
for qconfig, int8_bf16 in itertools.product(static_qconfig, int8_bf16_list):
# Step1: Test model with tuple(x1, x2, x3) input.
m = M().eval()
m2 = copy.deepcopy(m).eval()
x1 = torch.randn(3, 3)
x2 = torch.randn(3, 3)
x3 = torch.randn(3, 3)
graph = self.checkQuantizeTrace(
m, [x1, x2, x3], atol=2e-1, qconfig=qconfig, int8_bf16=int8_bf16
)
FileCheck().check("aten::linear").run(graph)
patterns = [
[
"aten::dequantize",
"aten::linear",
],
[
"aten::dequantize",
"aten::linear",
"aten::add",
],
[
"aten::dequantize",
"aten::linear",
"aten::add",
],
]
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 3)
self.checkPatterns(graph, patterns)
# Step2: Test model with Dict{"x1": x1, "x2": x2, "x3": x3} input.
graph = self.checkQuantizeTrace(
m2,
atol=2e-1,
qconfig=qconfig,
int8_bf16=int8_bf16,
x_kwarg={"x1": x1, "x2": x2, "x3": x3},
)
FileCheck().check("aten::linear").run(graph)
patterns = [
[
"aten::dequantize",
"aten::linear",
],
[
"aten::dequantize",
"aten::linear",
"aten::add",
],
[
"aten::dequantize",
"aten::linear",
"aten::add",
],
]
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 3)
self.checkPatterns(graph, patterns)
if __name__ == "__main__":
run_tests()
| 34,156 | 35.609861 | 131 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/tests/cpu/test_interaction.py | import unittest
import torch
import intel_extension_for_pytorch as ipex
from common_utils import TestCase
import itertools
class TestInteractionCases(TestCase):
def test_interaction(self):
def interact_fusion(x, ly):
A = [x] + ly
R = ipex.nn.functional.interaction(*A)
return R
def interact_features(x, ly):
(batch_size, d) = x.shape
T = torch.cat([x] + ly, dim=1).view((batch_size, -1, d))
Z = torch.bmm(T, torch.transpose(T, 1, 2))
_, ni, nj = Z.shape
offset = 0
li = torch.tensor([i for i in range(ni) for j in range(i + offset)])
lj = torch.tensor([j for i in range(nj) for j in range(i + offset)])
Zflat = Z[:, li, lj]
# concatenate dense features and interactions
R = torch.cat([x] + [Zflat], dim=1)
return R
dtypes = [torch.float32, torch.bfloat16]
feature_sizes = [127, 128]
for dtype, feature_size in itertools.product(dtypes, feature_sizes):
x1 = (
torch.randn([2048, feature_size])
.to(dtype)
.clone()
.detach()
.requires_grad_()
)
x2 = x1.clone().detach().requires_grad_()
ly1 = []
ly2 = []
for i in range(0, 26):
V = (
torch.randn([2048, feature_size])
.to(dtype)
.clone()
.detach()
.requires_grad_()
)
ly1.append(V)
ly2.append(V.clone().detach().requires_grad_())
A = interact_fusion(x1, ly1)
B = interact_features(x2, ly2)
# For FP32 data type, fused interaction will use MKLDNN gemm while
# non-fused interaction will use GEMM. So there might be a small difference here
torch.testing.assert_allclose(A, B, rtol=1e-4, atol=1e-4)
A.sum().backward()
B.sum().backward()
torch.testing.assert_allclose(x1.grad, x2.grad, rtol=0.005, atol=0.1)
for i in range(0, 26):
torch.testing.assert_allclose(
ly1[i].grad, ly2[i].grad, rtol=0.005, atol=0.1
)
if __name__ == "__main__":
test = unittest.main()
| 2,405 | 33.869565 | 92 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/tests/cpu/test_check.py | import unittest
import torch
import torch.nn as nn
import intel_extension_for_pytorch as ipex
from common_utils import TestCase
class Conv(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, **kwargs):
super(Conv, self).__init__()
self.conv = nn.Conv2d(
in_channels, out_channels, kernel_size, bias=False, **kwargs
)
def forward(self, x):
return self.conv(x)
class ConvTranspose(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, **kwargs):
super(ConvTranspose, self).__init__()
self.conv_transpose = nn.ConvTranspose2d(
in_channels, out_channels, kernel_size, bias=False, **kwargs
)
def forward(self, x):
return self.conv_transpose(x)
class Linear(nn.Module):
def __init__(self, in_channels, out_channels, **kwargs):
super(Linear, self).__init__()
self.linear = nn.Linear(in_channels, out_channels)
def forward(self, x):
return self.linear(x)
class Tester(TestCase):
def _test_conv_check(self, model, input, e_message):
try:
m = model.eval()
m = ipex.optimize(m)
with torch.no_grad():
m(input)
raise RuntimeError("the CHECK doesn't trigger error as expected")
except RuntimeError as e:
self.assertTrue(e_message in str(e))
def _test_linear_check(self, model, input, e_message):
try:
m = model.eval()
m = ipex.optimize(m, auto_kernel_selection=True)
with torch.no_grad():
m(input)
raise RuntimeError("the CHECK doesn't trigger error as expected")
except RuntimeError as e:
self.assertTrue(e_message in str(e))
def test_conv_negative_padding(self):
self._test_conv_check(
Conv(16, 33, 3, padding=-1),
torch.randn(20, 16, 50, 100),
"negative padding is not supported",
)
def test_conv_nonpositive_stride(self):
self._test_conv_check(
Conv(16, 33, 3, stride=0),
torch.randn(20, 16, 50, 100),
"non-positive stride is not supported",
)
def test_conv_nonpositive_dilation(self):
self._test_conv_check(
Conv(16, 33, 3, dilation=0),
torch.randn(20, 16, 50, 100),
"non-positive dilation is not supported",
)
def test_conv_input_dims(self):
self._test_conv_check(
Conv(16, 33, 3),
torch.randn(20, 16, 50),
"Expected 4-dimensional input for 4-dimensional weight [33, 16, 3, 3], "
"but got 3-dimensional input of size [20, 16, 50] instead",
)
def test_conv_input_shape(self):
self._test_conv_check(
Conv(16, 33, 3),
torch.randn(20, 30, 50, 100),
"Given groups=1, weight of size [33, 16, 3, 3], expected input[20, 30, 50, 100] to "
"have 16 channels, but got 30 channels instead",
)
def test_conv_kernel_size(self):
self._test_conv_check(
Conv(16, 33, 60),
torch.randn(20, 16, 50, 100),
"Calculated padded input size per channel: (50 x 100). Kernel size: (60 x 60). "
"Kernel size can't be greater than actual input size",
)
def test_conv_transpose_negative_padding(self):
self._test_conv_check(
ConvTranspose(16, 33, 3, padding=-1),
torch.randn(20, 16, 50, 100),
"negative padding is not supported",
)
def test_conv_transpose_nonpositive_stride(self):
self._test_conv_check(
ConvTranspose(16, 33, 3, stride=0),
torch.randn(20, 16, 50, 100),
"non-positive stride is not supported",
)
def test_conv_transpose_nonpositive_dilation(self):
self._test_conv_check(
ConvTranspose(16, 33, 3, dilation=0),
torch.randn(20, 16, 50, 100),
"non-positive dilation is not supported",
)
def test_conv_transpose_input_dims(self):
self._test_conv_check(
ConvTranspose(16, 33, 3),
torch.randn(20, 16, 50),
"Expected 4-dimensional input for 4-dimensional weight [16, 33, 3, 3], "
"but got 3-dimensional input of size [20, 16, 50] instead",
)
def test_conv_transpose_input_shape(self):
self._test_conv_check(
ConvTranspose(16, 33, 3),
torch.randn(20, 30, 50, 100),
"Given transposed=True, weight of size [16, 33, 3, 3], expected input[20, 30, 50, 100] to "
"have 16 channels, but got 30 channels instead",
)
def test_linear(self):
self._test_linear_check(
Linear(16, 33),
torch.randn(3),
"Check the shapes of mat1 and mat2, they cannot be multiplied!",
)
def test_linear_bf16(self):
self._test_linear_check(
Linear(16, 33).to(torch.bfloat16),
torch.randn(3).bfloat16(),
"Check the shapes of mat1 and mat2, they cannot be multiplied!",
)
if __name__ == "__main__":
test = unittest.main()
| 5,248 | 31.80625 | 103 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/tests/cpu/test_cpu_ops.py | import unittest
import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
import random
import itertools
import intel_extension_for_pytorch as ipex
from common_utils import TestCase
import torch.autograd.functional as autogradF
from copy import deepcopy
try:
import torchvision
HAS_TORCHVISION = True
except ImportError:
HAS_TORCHVISION = False
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
bn_m = {1: nn.BatchNorm1d, 2: nn.BatchNorm2d, 3: nn.BatchNorm3d}
class CPUOPsTester(TestCase):
# Keep this UT temporarily to make sure the OP behavior in PyTorch is as expected.
def test_channelshuffle(self):
channel_shuffle = torch.nn.ChannelShuffle(20)
x = torch.randn(3, 40, 20, 20)
x1 = x.clone()
y1 = channel_shuffle(x1)
# test channels last
x2 = x.clone().to(memory_format=torch.channels_last)
y2 = channel_shuffle(x2)
self.assertTrue(y2.is_contiguous(memory_format=torch.channels_last))
self.assertEqual(y1, y2)
# Keep this UT temporarily to make sure the OP behavior in PyTorch is as expected.
def test_pixel_shuffle_unshuffle(self):
def _test_pixel_shuffle_unshuffle_helper(
num_input_dims, valid_channels_dim=True, upscale_factor=None
):
# Function to imperatively ensure pixels are shuffled to the correct locations.
# Used to validate the batch operations in pixel_shuffle.
def _verify_pixel_shuffle(input, output, upscale_factor):
for c in range(output.size(-3)):
for h in range(output.size(-2)):
for w in range(output.size(-1)):
height_idx = h // upscale_factor
weight_idx = w // upscale_factor
channel_idx = (
(upscale_factor * (h % upscale_factor))
+ (w % upscale_factor)
+ (c * upscale_factor**2)
)
self.assertEqual(
output[..., c, h, w],
input[..., channel_idx, height_idx, weight_idx],
)
upscale_factor = (
random.randint(2, 5) if upscale_factor is None else upscale_factor
)
# If valid_channels_dim=False, add 1 to make channels dim indivisible by upscale_factor ** 2.
channels = random.randint(1, 4) * upscale_factor**2 + (
0 if valid_channels_dim else 1
)
height = random.randint(5, 10)
width = random.randint(5, 10)
if num_input_dims == 1:
input = torch.rand(channels, requires_grad=True)
elif num_input_dims == 2:
input = torch.rand(height, width, requires_grad=True)
else:
batch_sizes = [random.randint(1, 3) for _ in range(num_input_dims - 3)]
input = torch.rand(
*batch_sizes, channels, height, width, requires_grad=True
)
ps = nn.PixelShuffle(upscale_factor)
pus = nn.PixelUnshuffle(downscale_factor=upscale_factor)
if num_input_dims >= 3 and valid_channels_dim and upscale_factor > 0:
output = ps(input)
_verify_pixel_shuffle(input, output, upscale_factor)
output.backward(output.data)
self.assertEqual(input.data, input.grad.data)
# Ensure unshuffle properly inverts shuffle.
unshuffle_output = pus(output)
self.assertEqual(input, unshuffle_output)
else:
self.assertRaises(RuntimeError, lambda: ps(input))
def _test_pixel_unshuffle_error_case_helper(
num_input_dims,
valid_height_dim=True,
valid_width_dim=True,
downscale_factor=None,
):
downscale_factor = (
random.randint(2, 5) if downscale_factor is None else downscale_factor
)
channels = random.randint(1, 4)
# If valid_height_dim=False, add 1 to make height dim indivisible by downscale_factor.
height = random.randint(3, 5) * abs(downscale_factor) + (
0 if valid_height_dim else 1
)
# If valid_width_dim=False, add 1 to make width dim indivisible by downscale_factor.
width = random.randint(3, 5) * abs(downscale_factor) + (
0 if valid_width_dim else 1
)
if num_input_dims == 1:
input = torch.rand(channels, requires_grad=True)
elif num_input_dims == 2:
input = torch.rand(height, width, requires_grad=True)
else:
batch_sizes = [random.randint(1, 3) for _ in range(num_input_dims - 3)]
input = torch.rand(
*batch_sizes, channels, height, width, requires_grad=True
)
pus = nn.PixelUnshuffle(downscale_factor)
self.assertRaises(RuntimeError, lambda: pus(input))
def _test_pixel_shuffle_unshuffle_for_input_dims(num_input_dims):
# For 1D - 2D, this is an error case.
# For 3D - 5D, this is a success case for pixel_shuffle + pixel_unshuffle.
_test_pixel_shuffle_unshuffle_helper(num_input_dims=num_input_dims)
# Error cases for pixel_shuffle.
_test_pixel_shuffle_unshuffle_helper(
num_input_dims=num_input_dims, valid_channels_dim=False
)
_test_pixel_shuffle_unshuffle_helper(
num_input_dims=num_input_dims, upscale_factor=0
)
_test_pixel_shuffle_unshuffle_helper(
num_input_dims=num_input_dims, upscale_factor=-2
)
# Error cases for pixel_unshuffle.
_test_pixel_unshuffle_error_case_helper(
num_input_dims=num_input_dims, valid_height_dim=False
)
_test_pixel_unshuffle_error_case_helper(
num_input_dims=num_input_dims, valid_width_dim=False
)
_test_pixel_unshuffle_error_case_helper(
num_input_dims=num_input_dims, downscale_factor=0
)
_test_pixel_unshuffle_error_case_helper(
num_input_dims=num_input_dims, downscale_factor=-2
)
def test_pixel_shuffle_unshuffle_1D():
_test_pixel_shuffle_unshuffle_for_input_dims(num_input_dims=1)
def test_pixel_shuffle_unshuffle_2D():
_test_pixel_shuffle_unshuffle_for_input_dims(num_input_dims=2)
def test_pixel_shuffle_unshuffle_3D():
_test_pixel_shuffle_unshuffle_for_input_dims(num_input_dims=3)
def test_pixel_shuffle_unshuffle_4D():
_test_pixel_shuffle_unshuffle_for_input_dims(num_input_dims=4)
def test_pixel_shuffle_unshuffle_5D():
_test_pixel_shuffle_unshuffle_for_input_dims(num_input_dims=5)
test_pixel_shuffle_unshuffle_1D()
test_pixel_shuffle_unshuffle_2D()
test_pixel_shuffle_unshuffle_3D()
test_pixel_shuffle_unshuffle_4D()
test_pixel_shuffle_unshuffle_5D()
# Keep this UT temporarily to make sure the OP behavior in PyTorch is as expected.
def test_pixel_shuffle_nhwc_cpu(self):
input = torch.randn(3, 18, 4, 4, device="cpu")
input = input.contiguous(memory_format=torch.channels_last).requires_grad_()
grad = torch.randn(3, 18, 4, 4, device="cpu")
ps = torch.nn.PixelShuffle(3)
pus = torch.nn.PixelUnshuffle(3)
ref_input = input.detach().clone().contiguous().requires_grad_(True)
ref_grad = grad.detach().clone().contiguous()
ref_ps = torch.nn.PixelShuffle(3)
ref_pus = torch.nn.PixelUnshuffle(3)
out = pus(ps(input))
out.backward(grad)
ref_out = ref_pus(ref_ps(ref_input))
ref_out.backward(ref_grad)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(ref_out.is_contiguous())
self.assertEqual(out, ref_out)
self.assertEqual(input.grad, ref_input.grad)
# Keep this UT temporarily to make sure the OP behavior in PyTorch is as expected.
def test_batch_norm(self):
for dim in [2, 3]:
m = bn_m[dim](10)
input_size = [3, 10, 25, 25]
if dim == 3:
input_size.append(25)
x = torch.randn(input_size)
x1 = x.clone().detach().requires_grad_()
y1 = m(x1)
y1.mean().backward()
# test channels last
suggest_memory_format = (
torch.channels_last if dim == 2 else torch.channels_last_3d
)
x2 = (
x.clone()
.detach()
.to(memory_format=suggest_memory_format)
.requires_grad_()
)
y2 = m(x2)
y2.mean().backward()
self.assertTrue(y2.is_contiguous(memory_format=suggest_memory_format))
self.assertEqual(y1, y2)
self.assertTrue(x2.grad.is_contiguous(memory_format=suggest_memory_format))
self.assertEqual(x1.grad, x2.grad)
# test bfloat16
x3 = x.clone().detach().bfloat16().requires_grad_()
y3 = m(x3)
y3.mean().backward()
self.assertTrue(y3.dtype == torch.bfloat16)
self.assertEqual(y1, y3, prec=0.1)
self.assertTrue(x3.grad.dtype == torch.bfloat16)
self.assertEqual(x1.grad, x3.grad)
# test autocast
with torch.cpu.amp.autocast():
for datatype in (torch.bfloat16, torch.float32):
x4 = x.clone().detach().to(datatype).requires_grad_()
y4 = m(x4)
y4.mean().backward()
self.assertTrue(y4.dtype == datatype)
self.assertTrue(x4.grad.dtype == datatype)
x5 = (
x.clone()
.detach()
.to(datatype)
.to(memory_format=suggest_memory_format)
.requires_grad_()
)
y5 = m(x5)
y5.mean().backward()
self.assertTrue(y5.dtype == datatype)
self.assertTrue(x5.grad.dtype == datatype)
self.assertTrue(
y5.is_contiguous(memory_format=suggest_memory_format)
)
self.assertTrue(
x5.grad.is_contiguous(memory_format=suggest_memory_format)
)
# test non-contiguous inputs
x6 = torch.transpose(x.clone().detach(), 2, 3).requires_grad_()
x_ref = x6.clone().detach().contiguous().requires_grad_()
y6 = m(x6)
y6.mean().backward()
y_ref = m(x_ref)
y_ref.mean().backward()
self.assertEqual(y6, y_ref)
self.assertEqual(x6.grad, x_ref.grad)
# Keep this UT temporarily to make sure the OP behavior in PyTorch is as expected.
def test_adaptive_avg_pool2d(self):
m = nn.AdaptiveAvgPool2d((5, 7))
x = torch.randn(3, 64, 8, 9)
x1 = x.clone().detach().requires_grad_()
y1 = m(x1)
y1.mean().backward()
# test channels last
x2 = x.clone().detach().to(memory_format=torch.channels_last).requires_grad_()
y2 = m(x2)
y2.mean().backward()
self.assertTrue(y2.is_contiguous(memory_format=torch.channels_last))
self.assertEqual(y1, y2)
self.assertTrue(x2.grad.is_contiguous(memory_format=torch.channels_last))
self.assertEqual(x1.grad, x2.grad)
# test bfloat16
x3 = x.clone().detach().bfloat16().requires_grad_()
y3 = m(x3)
y3.mean().backward()
self.assertTrue(y3.dtype == torch.bfloat16)
self.assertEqual(y1, y3, prec=0.01)
self.assertTrue(x3.grad.dtype == torch.bfloat16)
self.assertEqual(x1.grad, x3.grad)
# test autocast
with torch.cpu.amp.autocast():
for datatype in (torch.bfloat16, torch.float32):
x4 = x.clone().detach().to(datatype).requires_grad_()
y4 = m(x4)
y4.mean().backward()
self.assertTrue(y4.dtype == datatype)
self.assertTrue(x4.grad.dtype == datatype)
x5 = (
x.clone()
.detach()
.to(datatype)
.to(memory_format=torch.channels_last)
.requires_grad_()
)
y5 = m(x5)
y5.mean().backward()
self.assertTrue(y5.dtype == datatype)
self.assertTrue(x5.grad.dtype == datatype)
self.assertTrue(y5.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(
x5.grad.is_contiguous(memory_format=torch.channels_last)
)
# Keep this UT temporarily to make sure the OP behavior in PyTorch is as expected.
def test_copy(self):
x = torch.randn(3, 64, 8, 9)
y = torch.empty(3, 64, 8, 9)
y.copy_(x)
self.assertEqual(x, y)
# test channels last
y1 = torch.empty(3, 64, 8, 9).to(memory_format=torch.channels_last)
y1.copy_(x)
self.assertTrue(y1.is_contiguous(memory_format=torch.channels_last))
self.assertEqual(x, y1)
# test bfloat16
y2 = torch.empty(3, 64, 8, 9).bfloat16()
y2.copy_(x)
self.assertTrue(y2.dtype == torch.bfloat16)
self.assertEqual(x, y2, prec=0.01)
# Keep this UT temporarily to make sure the OP behavior in PyTorch is as expected.
def test_max_pool2d(self):
m = nn.MaxPool2d((3, 2), stride=(2, 1))
x = torch.randn(20, 16, 50, 32)
x1 = x.clone().detach().requires_grad_()
y1 = m(x1)
y1.mean().backward()
# test channels last
x2 = x.clone().detach().to(memory_format=torch.channels_last).requires_grad_()
y2 = m(x2)
y2.mean().backward()
self.assertTrue(y2.is_contiguous(memory_format=torch.channels_last))
self.assertEqual(y1, y2)
self.assertTrue(x2.grad.is_contiguous(memory_format=torch.channels_last))
self.assertEqual(x1.grad, x2.grad)
# test bfloat16
x3 = x.clone().detach().bfloat16().requires_grad_()
y3 = m(x3)
y3.mean().backward()
self.assertTrue(y3.dtype == torch.bfloat16)
self.assertEqual(y1, y3, prec=0.02)
self.assertTrue(x3.grad.dtype == torch.bfloat16)
self.assertEqual(x1.grad, x3.grad, prec=1e-4)
# test autocast
with torch.cpu.amp.autocast():
for datatype in (torch.bfloat16, torch.float32):
x4 = x.clone().detach().to(datatype).requires_grad_()
y4 = m(x4)
y4.mean().backward()
self.assertTrue(y4.dtype == datatype)
self.assertTrue(x4.grad.dtype == datatype)
x5 = (
x.clone()
.detach()
.to(datatype)
.to(memory_format=torch.channels_last)
.requires_grad_()
)
y5 = m(x5)
y5.mean().backward()
self.assertTrue(y5.dtype == datatype)
self.assertTrue(x5.grad.dtype == datatype)
self.assertTrue(y5.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(
x5.grad.is_contiguous(memory_format=torch.channels_last)
)
# Keep this UT temporarily to make sure the OP behavior in PyTorch is as expected.
def test_upsample_nearest1d(self):
x = torch.randn(2, 2, 4)
x1 = x.clone().detach().requires_grad_()
y1 = F.interpolate(x1, scale_factor=2, mode="nearest")
y1.mean().backward()
# test bfloat16
x3 = x.clone().detach().bfloat16().requires_grad_()
y3 = F.interpolate(x3, scale_factor=2, mode="nearest")
y3.mean().backward()
self.assertTrue(y3.dtype == torch.bfloat16)
self.assertEqual(y1, y3, prec=0.01)
self.assertTrue(x3.grad.dtype == torch.bfloat16)
self.assertEqual(x1.grad, x3.grad)
# test autocast
with torch.cpu.amp.autocast():
for datatype in (torch.bfloat16, torch.float32):
x4 = x.clone().detach().to(datatype).requires_grad_()
y4 = F.interpolate(x4, scale_factor=2, mode="nearest")
y4.mean().backward()
self.assertTrue(y4.dtype == datatype)
self.assertTrue(x4.grad.dtype == datatype)
# Keep this UT temporarily to make sure the OP behavior in PyTorch is as expected.
def test_upsample_nearest2d(self):
x = torch.randn(2, 2, 4, 4)
x1 = x.clone().detach().requires_grad_()
y1 = F.interpolate(x1, scale_factor=2, mode="nearest")
y1.mean().backward()
# test channels last
x2 = x.clone().detach().to(memory_format=torch.channels_last).requires_grad_()
y2 = F.interpolate(x2, scale_factor=2, mode="nearest")
y2.mean().backward()
self.assertTrue(y2.is_contiguous(memory_format=torch.channels_last))
self.assertEqual(y1, y2)
self.assertTrue(x2.grad.is_contiguous(memory_format=torch.channels_last))
self.assertEqual(x1.grad, x2.grad)
# test bfloat16
x3 = x.clone().detach().bfloat16().requires_grad_()
y3 = F.interpolate(x3, scale_factor=2, mode="nearest")
y3.mean().backward()
self.assertTrue(y3.dtype == torch.bfloat16)
self.assertEqual(y1, y3, prec=0.01)
self.assertTrue(x3.grad.dtype == torch.bfloat16)
self.assertEqual(x1.grad, x3.grad)
# test autocast
with torch.cpu.amp.autocast():
for datatype in (torch.bfloat16, torch.float32):
x4 = x.clone().detach().to(datatype).requires_grad_()
y4 = F.interpolate(x4, scale_factor=2, mode="nearest")
y4.mean().backward()
self.assertTrue(y4.dtype == datatype)
self.assertTrue(x4.grad.dtype == datatype)
x5 = (
x.clone()
.detach()
.to(datatype)
.to(memory_format=torch.channels_last)
.requires_grad_()
)
y5 = F.interpolate(x5, scale_factor=2, mode="nearest")
y5.mean().backward()
self.assertTrue(y5.dtype == datatype)
self.assertTrue(x5.grad.dtype == datatype)
self.assertTrue(y5.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(
x5.grad.is_contiguous(memory_format=torch.channels_last)
)
# Keep this UT temporarily to make sure the OP behavior in PyTorch is as expected.
def test_upsample_nearest3d(self):
x = torch.randn(2, 2, 2, 4, 4)
x1 = x.clone().detach().requires_grad_()
y1 = F.interpolate(x1, scale_factor=2, mode="nearest")
y1.mean().backward()
# test bfloat16
x3 = x.clone().detach().bfloat16().requires_grad_()
y3 = F.interpolate(x3, scale_factor=2, mode="nearest")
y3.mean().backward()
self.assertTrue(y3.dtype == torch.bfloat16)
self.assertEqual(y1, y3, prec=0.01)
self.assertTrue(x3.grad.dtype == torch.bfloat16)
self.assertEqual(x1.grad, x3.grad)
# test autocast
with torch.cpu.amp.autocast():
for datatype in (torch.bfloat16, torch.float32):
x4 = x.clone().detach().to(datatype).requires_grad_()
y4 = F.interpolate(x4, scale_factor=2, mode="nearest")
y4.mean().backward()
self.assertTrue(y4.dtype == datatype)
self.assertTrue(x4.grad.dtype == datatype)
x5 = (
x.clone()
.detach()
.to(datatype)
.to(memory_format=torch.channels_last_3d)
.requires_grad_()
)
y5 = F.interpolate(x5, scale_factor=2, mode="nearest")
y5.mean().backward()
self.assertTrue(y5.dtype == datatype)
self.assertTrue(x5.grad.dtype == datatype)
self.assertTrue(y5.is_contiguous(memory_format=torch.channels_last_3d))
self.assertTrue(
x5.grad.is_contiguous(memory_format=torch.channels_last_3d)
)
# Keep this UT temporarily to make sure the OP behavior in PyTorch is as expected.
def test_upsample_linear1d(self):
x = torch.randn(2, 2, 4)
x1 = x.clone().detach().requires_grad_()
y1 = F.interpolate(x1, scale_factor=2, mode="linear")
y1.mean().backward()
# test bfloat16
x3 = x.clone().detach().bfloat16().requires_grad_()
y3 = F.interpolate(x3, scale_factor=2, mode="linear")
y3.mean().backward()
self.assertTrue(y3.dtype == torch.bfloat16)
self.assertEqual(y1, y3, prec=0.01)
self.assertTrue(x3.grad.dtype == torch.bfloat16)
self.assertEqual(x1.grad, x3.grad)
# test autocast
with torch.cpu.amp.autocast():
for datatype in (torch.bfloat16, torch.float32):
x4 = x.clone().detach().to(datatype).requires_grad_()
y4 = F.interpolate(x4, scale_factor=2, mode="linear")
y4.mean().backward()
self.assertTrue(y4.dtype == datatype)
self.assertTrue(x4.grad.dtype == datatype)
# Keep this UT temporarily to make sure the OP behavior in PyTorch is as expected.
def test_upsample_bilinear2d(self):
x = torch.randn(2, 2, 4, 4)
x1 = x.clone().detach().requires_grad_()
y1 = F.interpolate(x1, scale_factor=2, mode="bilinear")
y1.mean().backward()
# test channels last
x2 = x.clone().detach().to(memory_format=torch.channels_last).requires_grad_()
y2 = F.interpolate(x2, scale_factor=2, mode="bilinear")
y2.mean().backward()
self.assertTrue(y2.is_contiguous(memory_format=torch.channels_last))
self.assertEqual(y1, y2)
self.assertTrue(x2.grad.is_contiguous(memory_format=torch.channels_last))
self.assertEqual(x1.grad, x2.grad)
# test bfloat16
x3 = x.clone().detach().bfloat16().requires_grad_()
y3 = F.interpolate(x3, scale_factor=2, mode="bilinear")
y3.mean().backward()
self.assertTrue(y3.dtype == torch.bfloat16)
self.assertEqual(y1, y3, prec=0.01)
self.assertTrue(x3.grad.dtype == torch.bfloat16)
self.assertEqual(x1.grad, x3.grad)
# test autocast
with torch.cpu.amp.autocast():
for datatype in (torch.bfloat16, torch.float32):
x4 = x.clone().detach().to(datatype).requires_grad_()
y4 = F.interpolate(x4, scale_factor=2, mode="bilinear")
y4.mean().backward()
self.assertTrue(y4.dtype == datatype)
self.assertTrue(x4.grad.dtype == datatype)
x5 = (
x.clone()
.detach()
.to(datatype)
.to(memory_format=torch.channels_last)
.requires_grad_()
)
y5 = F.interpolate(x5, scale_factor=2, mode="bilinear")
y5.mean().backward()
self.assertTrue(y5.dtype == datatype)
self.assertTrue(x5.grad.dtype == datatype)
self.assertTrue(y5.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(
x5.grad.is_contiguous(memory_format=torch.channels_last)
)
# Keep this UT temporarily to make sure the OP behavior in PyTorch is as expected.
def test_upsample_trilinear3d(self):
x = torch.randn(2, 2, 2, 4, 4)
x1 = x.clone().detach().requires_grad_()
y1 = F.interpolate(x1, scale_factor=2, mode="trilinear")
y1.mean().backward()
# test bfloat16
x3 = x.clone().detach().bfloat16().requires_grad_()
y3 = F.interpolate(x3, scale_factor=2, mode="trilinear")
y3.mean().backward()
self.assertTrue(y3.dtype == torch.bfloat16)
self.assertEqual(y1, y3, prec=0.02)
self.assertTrue(x3.grad.dtype == torch.bfloat16)
self.assertEqual(x1.grad, x3.grad)
# test autocast
with torch.cpu.amp.autocast():
for datatype in (torch.bfloat16, torch.float32):
x4 = x.clone().detach().to(datatype).requires_grad_()
y4 = F.interpolate(x4, scale_factor=2, mode="trilinear")
y4.mean().backward()
self.assertTrue(y4.dtype == datatype)
self.assertTrue(x4.grad.dtype == datatype)
x5 = (
x.clone()
.detach()
.to(datatype)
.to(memory_format=torch.channels_last_3d)
.requires_grad_()
)
y5 = F.interpolate(x5, scale_factor=2, mode="trilinear")
y5.mean().backward()
self.assertTrue(y5.dtype == datatype)
self.assertTrue(x5.grad.dtype == datatype)
self.assertTrue(y5.is_contiguous(memory_format=torch.channels_last_3d))
self.assertTrue(
x5.grad.is_contiguous(memory_format=torch.channels_last_3d)
)
def test_GroupNorm_memory_format(self):
def helper(input_format, grad_format, B=2, C=4, W=4, H=4):
net_orig = torch.nn.GroupNorm(B, C)
net = copy.deepcopy(net_orig)
x_orig = torch.rand(B, C, W, H, requires_grad=True)
grad_orig = torch.rand(B, C, W, H)
x = (
x_orig.clone()
.detach()
.to(memory_format=input_format)
.requires_grad_(True)
)
grad = grad_orig.detach().to(memory_format=grad_format)
y = net(x)
y.backward(grad)
y_orig = net_orig(x_orig)
y_orig.backward(grad_orig)
self.assertEqual(y, y_orig)
self.assertEqual(x.grad, x_orig.grad)
for input_format in [torch.contiguous_format, torch.channels_last]:
for grad_format in [torch.contiguous_format, torch.channels_last]:
helper(input_format, grad_format)
def test_groupNorm_mixed_dtype(self):
def helper(size, groups, memory_format):
channels = size[1]
input = torch.randn(size, dtype=torch.bfloat16).cpu()
input_bf1 = (
input.contiguous(memory_format=memory_format)
.detach()
.requires_grad_(True)
)
input_bf2 = input_bf1.clone().detach().requires_grad_(True)
input_f = input_bf1.float().detach().requires_grad_(True)
m_bf = nn.GroupNorm(groups, channels).cpu().bfloat16()
m_f = deepcopy(m_bf).float()
m_f2 = deepcopy(m_f)
# bfloat16 input and bfloat16 parameters
out = m_bf(input_bf1)
# bfloat16 input and float parameters
out2 = m_f(input_bf2)
# float input and float parameters
out3 = m_f2(input_f)
torch.testing.assert_close(out, out2, atol=5e-3, rtol=5e-3)
torch.testing.assert_close(out2.float(), out3, atol=5e-3, rtol=5e-3)
grad_out = torch.randn(out2.shape, dtype=torch.bfloat16).cpu()
grad_out_bf1 = (
grad_out.contiguous(memory_format=memory_format)
.detach()
.requires_grad_(True)
)
grad_out_bf2 = grad_out_bf1.clone().detach().requires_grad_(True)
grad_out_f = grad_out_bf2.clone().float().detach().requires_grad_(True)
# bfloat16 input grad and float parameters
out2.backward(grad_out_bf2, retain_graph=True)
# float input grad and float parameters
out3.backward(grad_out_f, retain_graph=True)
# bfloat16 input grad and bfloat16 parameters
out.backward(grad_out_bf1, retain_graph=True)
torch.testing.assert_close(
m_f.weight.grad, m_f2.weight.grad, atol=1e-5, rtol=1e-5
)
torch.testing.assert_close(
input_bf2.grad.float(), input_f.grad, atol=5e-5, rtol=5e-3
)
torch.testing.assert_close(
m_f.bias.grad, m_f2.bias.grad, atol=1e-5, rtol=1e-5
)
# full bf16 has lower precision compared with mixed bf16 and fp32 .
torch.testing.assert_close(
m_bf.weight.grad.float(), m_f.weight.grad, atol=1e-3, rtol=1e-1
)
torch.testing.assert_close(
m_bf.bias.grad.float(), m_f.bias.grad, atol=1e-3, rtol=1e-2
)
torch.testing.assert_close(
input_bf1.grad, input_bf2.grad, atol=1e-2, rtol=1e-2
)
helper((1, 8, 4, 3), 2, torch.contiguous_format)
helper((1, 8, 4, 3), 2, torch.channels_last)
helper((1, 8, 3, 4), 4, torch.contiguous_format)
helper((1, 8, 3, 4), 4, torch.channels_last)
helper((4, 8, 40, 40), 4, torch.contiguous_format),
helper((4, 8, 40, 40), 4, torch.channels_last),
helper((4, 40, 40, 40), 2, torch.contiguous_format)
helper((4, 40, 40, 40), 2, torch.channels_last)
helper((1, 8, 40, 40), 4, torch.contiguous_format)
helper((1, 8, 40, 40), 2, torch.channels_last)
helper((1, 8, 40, 40), 2, torch.contiguous_format)
helper((1, 8, 50, 50), 2, torch.channels_last)
helper((1, 8, 50, 50), 4, torch.contiguous_format)
helper((1, 8, 50, 50), 4, torch.channels_last)
helper((1, 40, 50, 50), 2, torch.contiguous_format)
helper((1, 40, 50, 50), 2, torch.channels_last)
helper((1, 9, 3, 4, 5), 3, torch.contiguous_format)
helper((1, 9, 3, 4, 5), 3, torch.channels_last_3d)
helper((1, 60, 10, 10, 10), 3, torch.contiguous_format)
helper((1, 60, 10, 10, 10), 3, torch.channels_last_3d)
helper((1, 9, 10, 50, 50), 3, torch.contiguous_format)
helper((1, 9, 10, 50, 50), 3, torch.channels_last_3d)
helper((1, 60, 10, 50, 50), 3, torch.contiguous_format)
helper((1, 60, 10, 50, 50), 3, torch.channels_last_3d)
def test_groupnorm_nhwc(self):
def helper(self, size, groups, memory_format, dtype, is_mixed):
channels = size[1]
input = torch.randn(size, dtype=dtype, requires_grad=True)
input = input.contiguous(memory_format=memory_format)
input.retain_grad()
grad = torch.randn(size, dtype=dtype)
grad = grad.contiguous(memory_format=memory_format)
if dtype == torch.bfloat16 and is_mixed:
gn = nn.GroupNorm(groups, channels).to(torch.float)
else:
gn = nn.GroupNorm(groups, channels).to(dtype)
gn.weight.data.uniform_()
gn.bias.data.uniform_()
ref_input = (
input.detach()
.clone()
.contiguous(memory_format=torch.contiguous_format)
.requires_grad_(True)
)
ref_grad = (
grad.detach().clone().contiguous(memory_format=torch.contiguous_format)
)
if dtype == torch.bfloat16 and is_mixed:
ref_gn = nn.GroupNorm(groups, channels).to(torch.float)
else:
ref_gn = nn.GroupNorm(groups, channels).to(dtype)
ref_gn.load_state_dict(gn.state_dict())
out = gn(input)
out.backward(grad)
ref_out = ref_gn(ref_input)
ref_out.backward(ref_grad)
self.assertTrue(out.is_contiguous(memory_format=memory_format))
self.assertTrue(
ref_out.is_contiguous(memory_format=torch.contiguous_format)
)
torch.testing.assert_close(out, ref_out)
# training: parameters in bfloat16 is not recommended
# if (dtype != torch.bfloat16) or is_mixed:
torch.testing.assert_close(
gn.weight.grad, ref_gn.weight.grad, atol=5e-4, rtol=5e-4
)
torch.testing.assert_close(
gn.bias.grad, ref_gn.bias.grad, atol=5e-4, rtol=5e-4
)
torch.testing.assert_close(input.grad, ref_input.grad, atol=5e-4, rtol=8e-3)
for dtype in [torch.bfloat16, torch.float, torch.double]:
for is_mixed in [True, False]:
helper(self, (4, 8, 10, 10), 4, torch.channels_last, dtype, is_mixed)
helper(self, (2, 30, 9, 9), 3, torch.channels_last, dtype, is_mixed)
helper(self, (4, 8, 40, 40), 4, torch.channels_last, dtype, is_mixed)
helper(self, (4, 40, 40, 40), 2, torch.channels_last, dtype, is_mixed)
helper(self, (2, 30, 50, 50), 3, torch.channels_last, dtype, is_mixed)
helper(self, (2, 60, 50, 50), 3, torch.channels_last, dtype, is_mixed)
helper(
self, (2, 9, 7, 11, 15), 3, torch.channels_last_3d, dtype, is_mixed
)
helper(
self, (2, 9, 7, 200, 15), 3, torch.channels_last_3d, dtype, is_mixed
)
helper(
self,
(2, 60, 7, 200, 15),
3,
torch.channels_last_3d,
dtype,
is_mixed,
)
def test_groupnorm_nwc(self):
size = (4, 20, 20)
channels = size[1]
groups = 4
x = torch.randn(size, requires_grad=True)
grad = torch.randn(size)
m = nn.GroupNorm(groups, channels)
# test nwc
x1 = x.clone().detach().transpose(1, 2).requires_grad_()
grad1 = grad.detach().clone()
y1 = m(x1)
y1.backward(grad1)
x2 = x1.clone().detach().contiguous().requires_grad_()
grad2 = grad.detach().clone()
y2 = m(x2)
y2.backward(grad2)
self.assertEqual(y1, y2)
self.assertEqual(x1.grad, x2.grad)
# test bfloat16/double
for dtype in [torch.bfloat16, torch.double]:
prec = None
if dtype == torch.bfloat16:
prec = 0.03
x3 = x.clone().detach().transpose(1, 2).to(dtype).requires_grad_()
grad3 = grad.detach().clone()
m_dtype = m.to(dtype)
y3 = m_dtype(x3)
y3.backward(grad3)
self.assertTrue(y3.dtype == dtype)
self.assertEqual(y3, y2, prec=prec)
self.assertEqual(x3.grad, x2.grad, prec=prec)
self.assertEqual(m.weight.grad, m_dtype.weight.grad)
self.assertEqual(m.bias.grad, m_dtype.bias.grad)
# test mixed data type
prec = 0.02
x_bf16 = x.clone().detach().transpose(1, 2).to(torch.bfloat16).requires_grad_()
grad_bf16 = grad.clone().detach().to(torch.bfloat16)
m_fp32 = copy.deepcopy(m).to(torch.float32)
y_bf16 = m_fp32(x_bf16)
y_bf16.backward(grad_bf16)
self.assertTrue(y_bf16.dtype == torch.bfloat16)
self.assertEqual(y_bf16, y2, prec=prec)
self.assertTrue(x_bf16.grad.dtype == torch.bfloat16)
self.assertEqual(x_bf16.grad, x2.grad, prec=prec)
def test_avg_pool2d(self):
def helper(self, m, x):
x1 = x.clone().detach().requires_grad_()
y1 = m(x1)
y1.backward(y1.data)
# test channels last
x2 = (
x.clone()
.detach()
.to(memory_format=torch.channels_last)
.requires_grad_()
)
y2 = m(x2)
y2.backward(y2.data)
self.assertTrue(y2.is_contiguous(memory_format=torch.channels_last))
self.assertEqual(y1, y2)
self.assertTrue(x2.grad.is_contiguous(memory_format=torch.channels_last))
self.assertEqual(x1.grad, x2.grad)
for dtype in [torch.bfloat16, torch.double, torch.int64, torch.float16]:
x3 = x.clone().detach().to(dtype)
x4 = x.clone().detach().to(dtype).to(memory_format=torch.channels_last)
if dtype != torch.int64:
x3 = x3.requires_grad_()
x4 = x4.requires_grad_()
y3 = m(x3)
y4 = m(x4)
self.assertTrue(y3.dtype == dtype)
self.assertTrue(y4.dtype == dtype)
self.assertEqual(y3, y4)
self.assertTrue(y4.is_contiguous(memory_format=torch.channels_last))
if dtype != torch.int64:
y3.backward(y3.data)
self.assertTrue(x3.grad.dtype == dtype)
if dtype == torch.bfloat16:
self.assertEqual(y1, y3, prec=0.01)
self.assertEqual(x1.grad, x3.grad, prec=0.01)
if dtype != torch.int64:
y4.backward(y4.data)
self.assertEqual(x3.grad, x4.grad)
self.assertTrue(x4.grad.dtype == dtype)
self.assertTrue(
x4.grad.is_contiguous(memory_format=torch.channels_last)
)
helper(self, nn.AvgPool2d((3, 2), stride=(2, 1)), torch.randn(20, 16, 50, 32))
helper(self, nn.AvgPool2d((3, 2), stride=(2, 1)), torch.randn(10, 8, 25, 16))
helper(
self,
nn.AvgPool2d((3, 2), stride=(2, 1), count_include_pad=False),
torch.randn(20, 16, 50, 32),
)
helper(
self,
nn.AvgPool2d(
(3, 2), stride=(2, 1), count_include_pad=True, divisor_override=100
),
torch.randn(20, 16, 50, 32),
)
helper(
self,
nn.AvgPool2d(
(3, 2), stride=(2, 1), count_include_pad=True, divisor_override=100
),
torch.randn(10, 8, 25, 16),
)
# Keep this UT temporarily to make sure the OP behavior in PyTorch is as expected.
def test_adaptive_max_pool2d(self):
m = nn.AdaptiveMaxPool2d((5, 7))
x = torch.randn(3, 64, 8, 9)
x1 = x.clone().detach().requires_grad_()
y1 = m(x1)
y1.mean().backward()
# test channels last
x2 = x.clone().detach().to(memory_format=torch.channels_last).requires_grad_()
y2 = m(x2)
y2.mean().backward()
self.assertTrue(y2.is_contiguous(memory_format=torch.channels_last))
self.assertEqual(y1, y2)
self.assertTrue(x2.grad.is_contiguous(memory_format=torch.channels_last))
self.assertEqual(x1.grad, x2.grad)
# test bfloat16
x3 = x.clone().detach().bfloat16().requires_grad_()
y3 = m(x3)
y3.mean().backward()
self.assertTrue(y3.dtype == torch.bfloat16)
self.assertEqual(y1, y3, prec=0.01)
self.assertTrue(x3.grad.dtype == torch.bfloat16)
self.assertEqual(x1.grad, x3.grad, prec=0.001)
# test autocast
with torch.cpu.amp.autocast():
for datatype in (torch.bfloat16, torch.float32):
x4 = x.clone().detach().to(datatype).requires_grad_()
y4 = m(x4)
y4.mean().backward()
self.assertTrue(y4.dtype == datatype)
self.assertTrue(x4.grad.dtype == datatype)
x5 = (
x.clone()
.detach()
.to(datatype)
.to(memory_format=torch.channels_last)
.requires_grad_()
)
y5 = m(x5)
y5.mean().backward()
self.assertTrue(y5.dtype == datatype)
self.assertTrue(x5.grad.dtype == datatype)
self.assertTrue(y5.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(
x5.grad.is_contiguous(memory_format=torch.channels_last)
)
def test_avg_pool3d_ndhwc(self):
def helper(
n,
c,
d,
h,
w,
kernel_size,
dtype,
contig,
count_include_pad=True,
divisor_override=None,
):
input = torch.randint(1, 10, (n, c, d, h, w), device="cpu", dtype=dtype)
input = input.contiguous(memory_format=torch.channels_last_3d)
if not contig:
input = input[:, ::2, :, :, :]
pool = torch.nn.AvgPool3d(
kernel_size=kernel_size,
count_include_pad=count_include_pad,
divisor_override=divisor_override,
)
ref_input = input.detach().clone().contiguous()
if dtype != torch.int64:
input = input.requires_grad_()
ref_input = ref_input.requires_grad_()
out = pool(input)
ref_out = pool(ref_input)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last_3d))
self.assertTrue(ref_out.is_contiguous())
if dtype != torch.int64:
out.backward(out.data)
ref_out.backward(ref_out.data)
self.assertEqual(out, ref_out)
self.assertEqual(input.grad, ref_input.grad)
for dtype in [torch.int64, torch.float32, torch.double]:
for contig in [True, False]:
for count_include_pad in [True, False]:
helper(
4,
8,
10,
10,
10,
(3, 2, 3),
dtype,
contig,
count_include_pad=count_include_pad,
)
helper(
4,
8,
18,
9,
14,
(2, 3, 2),
dtype,
contig,
count_include_pad=count_include_pad,
)
helper(
4,
8,
7,
8,
9,
(2, 2, 2),
dtype,
contig,
count_include_pad=count_include_pad,
divisor_override=100,
)
def test_avg_pool(self):
def helper(input, kernel_size):
if input.ndim == 4:
pool = torch.nn.AvgPool3d(kernel_size=kernel_size)
input = input.contiguous(
memory_format=torch.channels_last
).requires_grad_()
self.assertRaises(RuntimeError, lambda: pool(input))
ref_input = input.detach().clone().contiguous().requires_grad_(True)
ref_out = pool(ref_input)
ref_out.backward(ref_out.data)
elif input.ndim == 3:
pool = torch.nn.AvgPool2d(kernel_size=kernel_size)
input = input.requires_grad_()
out = pool(input)
input2 = input.detach().clone().to(torch.bfloat16).requires_grad_()
out2 = pool(input2)
out.backward(out.data)
out2.backward(out2.data)
self.assertEqual(out, out2, 0.01)
self.assertEqual(input.grad, input2.grad, 0.01)
helper(torch.rand(4, 8, 10, 10), (3, 2, 3))
helper(torch.rand(4, 8, 10), (3, 2))
@skipIfNoTorchVision
def test_torchvision_nms(self):
num_boxes = 50
boxes = torch.randn(num_boxes, 4)
boxes[:, 2:] += boxes[:, :2]
scores = torch.randn(num_boxes)
y1 = torchvision.ops.nms(boxes, scores, 0.5)
# test autocast
with torch.cpu.amp.autocast():
y2 = torchvision.ops.nms(boxes.bfloat16(), scores.bfloat16(), 0.5)
self.assertEqual(y1, y2)
# test double
y3 = torchvision.ops.nms(boxes.double(), scores.double(), 0.5)
self.assertEqual(y1, y3)
def test_mean(self):
x = torch.randn(1, 64, 100, 13, 24, requires_grad=True)
for dtype in [torch.float32, torch.double, torch.bfloat16]:
y1 = torch.mean(x, dim=(3, 4), keepdim=False, dtype=dtype)
x2 = (
x.clone()
.detach()
.to(memory_format=torch.channels_last_3d)
.requires_grad_()
)
y2 = torch.mean(x2, dim=(3, 4), keepdim=False, dtype=dtype)
self.assertEqual(y1, y2)
def test_sum(self):
def helper(self, x1, x2, dim, keepdim, dtype):
y1 = torch.sum(x1, dim=dim, keepdim=keepdim, dtype=dtype)
y2 = torch.sum(x2, dim=dim, keepdim=keepdim, dtype=dtype)
self.assertEqual(y1, y2, prec=2e-4)
dtypes = [
torch.float32,
torch.double,
torch.bfloat16,
torch.float16,
torch.complex64,
torch.complex128,
]
x1 = torch.randn((1, 128, 56, 56)).to(memory_format=torch.channels_last)
x1 = x1.reshape([1, 2, 64, 56, 56])
x2 = x1.detach().clone().contiguous()
x3 = torch.randn((1, 64, 100, 13, 24)).to(memory_format=torch.channels_last_3d)
x4 = x3.detach().clone().contiguous()
x5 = torch.randn((1, 10, 16, 16)).to(memory_format=torch.channels_last)
x6 = x5.detach().clone().contiguous()
x7 = torch.randn((1, 1, 1, 1)).to(memory_format=torch.channels_last)
x8 = x7.detach().clone().contiguous()
x9 = torch.randn((1, 10, 256, 256)).to(memory_format=torch.channels_last)
x10 = x9.detach().clone().contiguous()
x11 = (
torch.randn((224, 1, 224))
.unsqueeze(0)
.to(memory_format=torch.channels_last)
.squeeze(0)
)
x12 = x11.detach().clone().contiguous()
x13 = (
torch.randn((3, 1, 224))
.unsqueeze(0)
.to(memory_format=torch.channels_last)
.squeeze(0)
)
x14 = x13.detach().clone().contiguous()
for dtype in dtypes:
for dim in [(1), (-1, -2)]:
for keepdim in [True, False]:
helper(self, x1, x2, dim, keepdim, dtype)
helper(self, x3, x4, dim, keepdim, dtype)
helper(self, x5, x6, dim, keepdim, dtype)
helper(self, x7, x8, dim, keepdim, dtype)
helper(self, x9, x10, dim, keepdim, dtype)
helper(self, x11, x12, dim, keepdim, dtype)
helper(self, x13, x14, dim, keepdim, dtype)
a = torch.randn([3, 2, 3])
mask = a.ge(0.5)
s = mask.sum()
self.assertTrue(s.dtype != torch.bool)
# add ut for special case - not a true reduction in sumkernel
for dtype in [torch.float32, torch.bfloat16, torch.double]:
x5 = torch.rand(789, 357).to(dtype)
x6 = x5.detach().clone().transpose(0, 1)
y5 = torch.mvlgamma(x5, p=1)
y6 = torch.mvlgamma(x6, p=1).transpose(0, 1)
self.assertEqual(y5, y6)
x5 = torch.rand(789, 357).to(torch.float16)
x6 = x5.detach().clone().transpose(0, 1)
y5 = torch.arange(0, 0.5, 0.5).to(torch.float16).add(x5.unsqueeze(-1)).sum(-1)
y6 = (
torch.arange(0, 0.5, 0.5)
.to(torch.float16)
.add(x6.unsqueeze(-1))
.sum(-1)
.transpose(0, 1)
)
self.assertEqual(y5, y6)
def test_matmul(self):
def helper(a, b, c, op):
dtypes = [torch.float32, torch.bfloat16]
for dtype in dtypes:
a = a.to(dtype)
b = b.to(dtype)
c = c.to(dtype)
op(a, b, out=c)
d = op(a, b)
self.assertTrue(torch.equal(c, d))
ipex.set_fp32_math_mode(mode=ipex.FP32MathMode.BF32, device="cpu")
op(a, b, out=c)
d = op(a, b)
self.assertTrue(torch.equal(c, d))
e = a.clone().requires_grad_()
f = b.clone().requires_grad_()
g = op(e, f)
g.backward(g.data)
h = op(a, f)
h.backward(h.data)
ipex.set_fp32_math_mode(mode=ipex.FP32MathMode.FP32, device="cpu")
helper(torch.randn(2, 3), torch.randn(3, 4), torch.zeros(2, 4), torch.mm)
helper(torch.randn(2, 3), torch.randn(3, 4), torch.zeros(2, 4), torch.matmul)
helper(
torch.randn(10, 3, 4),
torch.randn(10, 4, 5),
torch.zeros(10, 3, 5),
torch.bmm,
)
helper(
torch.randn(10, 3, 4, 5),
torch.randn(10, 3, 5, 5),
torch.zeros(10, 3, 4, 5),
torch.matmul,
)
helper(torch.randn(1), torch.randn(1), torch.zeros(1), torch.matmul)
helper(torch.randn(2, 3), torch.randn(3), torch.zeros(2, 3), torch.matmul)
helper(torch.randn(2, 3, 4), torch.randn(4), torch.zeros(2, 3, 4), torch.matmul)
helper(torch.randn(3), torch.randn(3, 1), torch.zeros(3), torch.matmul)
helper(
torch.randn(2, 3), torch.randn(1, 3, 3), torch.zeros(1, 2, 3), torch.matmul
)
helper(torch.randn(3), torch.randn(1, 3, 3), torch.zeros(1, 3), torch.matmul)
def f(x, y, z):
return ((x.relu() * x) @ y.sin() @ z).sum()
x = torch.randn(2, 3)
y = torch.randn(3, 5)
z = torch.randn(5, 5)
ipex.set_fp32_math_mode(mode=ipex.FP32MathMode.BF32, device="cpu")
result_forward_mode = autogradF.hessian(
f, (x, y, z), outer_jacobian_strategy="forward-mode", vectorize=True
)
ipex.set_fp32_math_mode(mode=ipex.FP32MathMode.FP32, device="cpu")
def test_index_select(self):
for index_datatype in [torch.int32, torch.int64]:
indices = torch.tensor([1], dtype=index_datatype)
# test floating types
for datatype in [
torch.float32,
torch.bfloat16,
torch.double,
torch.float16,
torch.complex64,
torch.complex128,
]:
for dim in [0, 1]:
x1_1 = torch.randn((10, 2), dtype=datatype)
y1_1 = x1_1.index_select(dim, indices)
self.assertTrue(y1_1.dtype == datatype)
x1_2 = torch.randn((10, 10), dtype=datatype)
y1_2 = x1_2.index_select(dim, indices)
self.assertTrue(y1_2.dtype == datatype)
x1_3 = torch.randn((10, 40000), dtype=datatype)
y1_3 = x1_3.index_select(dim, indices)
self.assertTrue(y1_3.dtype == datatype)
x1_4 = torch.randn((40000, 5), dtype=datatype)
y1_4 = x1_4.index_select(dim, indices)
self.assertTrue(y1_4.dtype == datatype)
for dim in [0, 1, 2]:
x1_5 = torch.randn((10, 2, 3), dtype=datatype)
y1_5 = x1_5.index_select(dim, indices)
self.assertTrue(y1_5.dtype == datatype)
x1_6 = torch.randn((10), dtype=datatype)
y1_6 = x1_6.index_select(0, indices)
self.assertTrue(y1_6.dtype == datatype)
# test integer types
for datatype in [
torch.int32,
torch.int64,
torch.int16,
torch.int8,
torch.uint8,
]:
for dim in [0, 1]:
x2_1 = torch.randint(10, (10, 10), dtype=datatype)
y2_1 = x2_1.index_select(dim, indices)
self.assertTrue(y2_1.dtype == datatype)
x2_2 = torch.randint(10, (40000, 5), dtype=datatype)
y2_2 = x2_2.index_select(dim, indices)
self.assertTrue(y2_2.dtype == datatype)
x2_3 = torch.randint(10, (10,), dtype=datatype)
y2_3 = x2_3.index_select(0, indices)
self.assertTrue(y2_3.dtype == datatype)
# test bool
for dim in [0, 1]:
x3_1 = torch.randint(1, (10, 10), dtype=torch.bool)
y3_1 = x3_1.index_select(dim, indices)
self.assertTrue(y3_1.dtype == torch.bool)
x3_2 = torch.randint(1, (40000, 5), dtype=torch.bool)
y3_2 = x3_2.index_select(dim, indices)
self.assertTrue(y3_2.dtype == torch.bool)
x3_3 = torch.randint(1, (10,), dtype=torch.bool)
y3_3 = x3_3.index_select(0, indices)
self.assertTrue(y3_3.dtype == torch.bool)
# out is defined
for dim in [0, 1]:
x1_5 = torch.randn(10, 2)
y1_5 = torch.index_select(x1_5, dim, indices, out=torch.empty(0))
self.assertTrue(y1_5.dtype == torch.float32)
def test_cat(self):
for datatype in [torch.float32, torch.double, torch.bfloat16, torch.float16]:
for dim, size in itertools.product([0, 1], [[2, 1], [2, 2], [5, 10]]):
x = torch.randn(size, dtype=datatype)
y = torch.cat([x, x], dim)
self.assertTrue(y.dtype == datatype)
# long input tensor list
x1 = torch.randn((2, 2), dtype=datatype)
input1 = []
for i in range(100):
input1.append(x1)
y1 = torch.cat(input1, 0)
self.assertTrue(y1.size() == torch.Size([200, 2]))
self.assertTrue(y1.dtype == datatype)
# input tensors have different shapes and strides
x2 = torch.randn((400, 2), dtype=datatype)
input2 = []
for i in range(10):
input2.append(x1)
for i in range(100):
input2.append(x2)
y2 = torch.cat(input2, 0)
self.assertTrue(y2.size() == torch.Size([40020, 2]))
self.assertTrue(y2.dtype == datatype)
x3 = torch.randn((4000, 2), dtype=datatype)
input3 = []
for i in range(10):
input3.append(x1)
for i in range(10):
input3.append(x3)
y3 = torch.cat(input3, 0)
self.assertTrue(y3.size() == torch.Size([40020, 2]))
self.assertTrue(y3.dtype == datatype)
x4 = torch.randn((4, 2), dtype=datatype)
input4 = []
for i in range(10):
input4.append(x1)
for i in range(10):
input4.append(x4)
y4 = torch.cat(input4, 0)
self.assertTrue(y4.size() == torch.Size([60, 2]))
self.assertTrue(y4.dtype == datatype)
# "out" arg is used but un-defined
y5 = torch.cat([x4, x4], 0, out=torch.empty(0, dtype=datatype))
self.assertEqual(y5, torch.cat([x4, x4], 0))
self.assertTrue(y5.dtype == datatype)
# out is defined with wrong shape
ref = torch.cat([x4, x4], 0)
out = torch.zeros(1)
out_ptr = out.data_ptr()
torch.cat([x4, x4], 0, out=out)
self.assertEqual(ref, out)
self.assertTrue(ref.dtype == datatype)
self.assertTrue(out_ptr != out.data_ptr())
# out is defined with correct shape
ref = torch.cat([x4, x4], 0)
out = torch.zeros_like(ref)
out_ptr = out.data_ptr()
torch.cat([x4, x4], 0, out=out)
self.assertEqual(ref, out)
self.assertTrue(ref.dtype == datatype)
self.assertTrue(out_ptr == out.data_ptr())
y6 = torch.cat([x4, x4], 0, out=torch.empty(0, dtype=torch.float32))
self.assertEqual(y6, torch.cat([x4, x4], 0))
self.assertTrue(y6.dtype == torch.float32)
# one of input tensors is empty
x7 = torch.empty(0, dtype=datatype)
y7 = torch.cat([x4, x4, x7], 0)
self.assertTrue(y7.size() == torch.Size([8, 2]))
self.assertTrue(y7.dtype == datatype)
if __name__ == "__main__":
test = unittest.main()
| 57,816 | 40.386543 | 105 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/tests/cpu/test_graph_capture.py | import unittest
import copy
import os
import tempfile
import torch
import torch.nn as nn
import torch.nn.functional as F
import intel_extension_for_pytorch as ipex
from common_utils import TestCase
from common_ipex_conf import runtime_thread_affinity_test_env
from torch.utils import ThroughputBenchmark
try:
import torchvision
HAS_TORCHVISION = True
except ImportError:
HAS_TORCHVISION = False
except RuntimeError:
HAS_TORCHVISION = False
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
class Conv_Bn_Relu(nn.Module):
def __init__(self):
super(Conv_Bn_Relu, self).__init__()
self.conv = torch.nn.Conv2d(6, 3, 3)
self.bn = torch.nn.BatchNorm2d(3, eps=0.001)
def forward(self, x):
return F.relu(self.bn(self.conv(x)), inplace=True)
class Conv_IF_Relu(nn.Module):
def __init__(self):
super(Conv_IF_Relu, self).__init__()
self.conv = torch.nn.Conv2d(6, 3, 3)
def forward(self, x):
if x.sum().item() > 0:
return F.relu(self.conv(x), inplace=True)
else:
return F.relu(self.conv(x))
class LinearBatchNormNd(torch.nn.Module):
def __init__(self, dim):
super(LinearBatchNormNd, self).__init__()
self.linear = torch.nn.Linear(32, 32)
if dim == 1:
self.input1 = torch.randn(1, 32)
self.bn = torch.nn.BatchNorm1d(32)
elif dim == 2:
self.input1 = torch.randn(1, 32, 32, 32)
self.bn = torch.nn.BatchNorm2d(32)
elif dim == 3:
self.input1 = torch.randn(1, 32, 32, 32, 32)
self.bn = torch.nn.BatchNorm3d(32)
def forward(self, x):
return self.bn(self.linear(x))
class ConvBatchNormLinearBatchNorm(torch.nn.Module):
def __init__(
self,
):
super(ConvBatchNormLinearBatchNorm, self).__init__()
self.input1 = torch.randn(1, 32, 32, 32)
self.conv = torch.nn.Conv2d(32, 32, 1)
self.bn1 = torch.nn.BatchNorm2d(32)
self.linear = torch.nn.Linear(32, 32)
self.bn2 = torch.nn.BatchNorm2d(32)
def forward(self, x):
return self.bn2(self.linear(self.bn1(self.conv(x))))
class TestGraphCapture(TestCase):
def test_inference_graph_mode_jit(self):
model = Conv_Bn_Relu().to(memory_format=torch.channels_last).eval()
x = torch.randn(3, 6, 10, 10).to(memory_format=torch.channels_last)
y1 = model(x)
model = ipex.optimize(model, graph_mode=True)
with torch.no_grad():
for _ in range(10):
y2 = model(x)
self.assertEqual(y1, y2)
def test_inference_graph_mode_torchdynamo(self):
model = Conv_IF_Relu().to(memory_format=torch.channels_last).eval()
x = torch.randn(3, 6, 10, 10).to(memory_format=torch.channels_last)
y1 = model(x)
model = ipex.optimize(model, graph_mode=True)
with torch.no_grad():
for _ in range(10):
y2 = model(x)
self.assertEqual(y1, y2)
def test_inference_graph_mode_jit_autocast(self):
model = Conv_Bn_Relu().to(memory_format=torch.channels_last).eval()
x = torch.randn(3, 6, 10, 10).to(memory_format=torch.channels_last)
y1 = model(x)
model = ipex.optimize(model, dtype=torch.bfloat16, graph_mode=True)
with torch.cpu.amp.autocast(), torch.no_grad():
for _ in range(10):
y2_bf16 = model(x)
self.assertEqual(y1, y2_bf16, prec=0.01)
self.assertTrue(y2_bf16.dtype == torch.bfloat16)
def test_inference_graph_mode_torchdynamo_autocast(self):
model = Conv_IF_Relu().to(memory_format=torch.channels_last).eval()
x = torch.randn(3, 6, 10, 10).to(memory_format=torch.channels_last)
y1 = model(x)
model = ipex.optimize(model, dtype=torch.bfloat16, graph_mode=True)
with torch.cpu.amp.autocast(), torch.no_grad():
for _ in range(10):
y2_bf16 = model(x)
self.assertEqual(y1, y2_bf16, prec=0.01)
self.assertTrue(y2_bf16.dtype == torch.bfloat16)
def test_inference_trace_graph_mode(self):
model = Conv_Bn_Relu().to(memory_format=torch.channels_last).eval()
x = torch.randn(3, 6, 10, 10).to(memory_format=torch.channels_last)
y1 = model(x)
# JIT trace and freeze
traced_model = torch.jit.trace(model, x)
traced_model = torch.jit.freeze(traced_model)
# graph capture
traced_model = ipex.optimize(traced_model, graph_mode=True)
with torch.no_grad():
for _ in range(10):
y2 = traced_model(x)
self.assertEqual(y1, y2)
freeze_graph = traced_model.graph_for(x)
self.assertTrue(
any(
n.kind() == "ipex_prepack::convolution_relu_run"
for n in freeze_graph.nodes()
)
)
self.assertTrue(isinstance(traced_model, torch.jit.RecursiveScriptModule))
# JIT save, load
with tempfile.TemporaryDirectory() as tmp:
path = os.path.join(tmp, "scriptmodule.pt")
torch.jit.save(traced_model, path)
load_model = torch.jit.load(path)
with torch.no_grad():
for _ in range(10):
y3 = load_model(x)
self.assertEqual(y1, y3)
freeze_graph = load_model.graph_for(x)
self.assertTrue(
any(
n.kind() == "ipex_prepack::convolution_relu_run"
for n in freeze_graph.nodes()
)
)
self.assertTrue(isinstance(load_model, torch.jit.RecursiveScriptModule))
def test_inference_trace_graph_mode_linear_bn(self):
for model in [
LinearBatchNormNd(dim=1).eval(),
LinearBatchNormNd(dim=2).eval(),
LinearBatchNormNd(dim=3).eval(),
ConvBatchNormLinearBatchNorm().eval(),
]:
x = model.input1
y1 = model(x)
# JIT trace and freeze
traced_model = torch.jit.trace(model, x)
traced_model = torch.jit.freeze(traced_model)
# graph capture
traced_model = ipex.optimize(traced_model, graph_mode=True)
with torch.no_grad():
for _ in range(10):
y2 = traced_model(x)
self.assertEqual(y1, y2)
freeze_graph = traced_model.graph_for(x)
self.assertFalse(
any(n.kind() == "ipex::batch_norm" for n in freeze_graph.nodes())
)
self.assertTrue(isinstance(traced_model, torch.jit.RecursiveScriptModule))
# JIT save, load
with tempfile.TemporaryDirectory() as tmp:
path = os.path.join(tmp, "scriptmodule.pt")
torch.jit.save(traced_model, path)
load_model = torch.jit.load(path)
with torch.no_grad():
for _ in range(10):
y3 = load_model(x)
self.assertEqual(y1, y3)
freeze_graph = load_model.graph_for(x)
self.assertFalse(
any(n.kind() == "ipex::batch_norm" for n in freeze_graph.nodes())
)
self.assertTrue(isinstance(load_model, torch.jit.RecursiveScriptModule))
def test_inference_graph_mode_trace(self):
model = Conv_Bn_Relu().to(memory_format=torch.channels_last).eval()
x = torch.randn(3, 6, 10, 10).to(memory_format=torch.channels_last)
y1 = model(x)
# graph capture
model = ipex.optimize(model, graph_mode=True)
# JIT trace and freeze
traced_model = torch.jit.trace(model, x)
traced_model = torch.jit.freeze(traced_model)
with torch.no_grad():
for _ in range(10):
y2 = traced_model(x)
self.assertEqual(y1, y2)
freeze_graph = traced_model.graph_for(x)
self.assertTrue(
any(
n.kind() == "ipex_prepack::convolution_relu_run"
for n in freeze_graph.nodes()
)
)
self.assertTrue(isinstance(traced_model, torch.jit.RecursiveScriptModule))
# JIT save, load
with tempfile.TemporaryDirectory() as tmp:
path = os.path.join(tmp, "scriptmodule.pt")
torch.jit.save(traced_model, path)
load_model = torch.jit.load(path)
with torch.no_grad():
for _ in range(10):
y3 = load_model(x)
self.assertEqual(y1, y3)
freeze_graph = load_model.graph_for(x)
self.assertTrue(
any(
n.kind() == "ipex_prepack::convolution_relu_run"
for n in freeze_graph.nodes()
)
)
self.assertTrue(isinstance(load_model, torch.jit.RecursiveScriptModule))
def test_inference_graph_mode_trace2(self):
model = Conv_Bn_Relu().to(memory_format=torch.channels_last).eval()
x = torch.randn(3, 6, 10, 10).to(memory_format=torch.channels_last)
y1 = model(x)
# graph capture
model = ipex.optimize(model, graph_mode=True)
with torch.no_grad():
for _ in range(10):
y2 = model(x)
self.assertEqual(y1, y2)
# JIT trace and freeze
traced_model = torch.jit.trace(model, x)
traced_model = torch.jit.freeze(traced_model)
with torch.no_grad():
for _ in range(10):
y3 = traced_model(x)
self.assertEqual(y1, y3)
freeze_graph = traced_model.graph_for(x)
self.assertTrue(
any(
n.kind() == "ipex_prepack::convolution_relu_run"
for n in freeze_graph.nodes()
)
)
self.assertTrue(isinstance(traced_model, torch.jit.RecursiveScriptModule))
# JIT save, load
with tempfile.TemporaryDirectory() as tmp:
path = os.path.join(tmp, "scriptmodule.pt")
torch.jit.save(traced_model, path)
load_model = torch.jit.load(path)
with torch.no_grad():
for _ in range(10):
y4 = load_model(x)
self.assertEqual(y1, y4)
freeze_graph = load_model.graph_for(x)
self.assertTrue(
any(
n.kind() == "ipex_prepack::convolution_relu_run"
for n in freeze_graph.nodes()
)
)
self.assertTrue(isinstance(load_model, torch.jit.RecursiveScriptModule))
def test_throughput_benchmark_graph_mode_jit(self):
model = Conv_Bn_Relu().to(memory_format=torch.channels_last)
model.eval()
x = torch.rand(3, 6, 10, 10).to(memory_format=torch.channels_last)
model = ipex.optimize(model, graph_mode=True)
bench = ThroughputBenchmark(model)
bench.add_input(x)
bench.benchmark(num_calling_threads=14, num_warmup_iters=10, num_iters=100)
y_bench = bench.run_once(x)
# Calculate the reference result
y = model(x)
self.assertEqual(y, y_bench)
def test_throughput_benchmark_graph_mode_torchdynamo(self):
model = Conv_IF_Relu().to(memory_format=torch.channels_last)
model.eval()
x = torch.rand(3, 6, 10, 10).to(memory_format=torch.channels_last)
model = ipex.optimize(model, graph_mode=True)
bench = ThroughputBenchmark(model)
bench.add_input(x)
bench.benchmark(num_calling_threads=14, num_warmup_iters=10, num_iters=100)
y_bench = bench.run_once(x)
# Calculate the reference result
y = model(x)
self.assertEqual(y, y_bench)
def test_throughput_benchmark_graph_mode_jit_autocast(self):
model = Conv_Bn_Relu().to(memory_format=torch.channels_last)
model.eval()
x = torch.rand(3, 6, 10, 10).to(memory_format=torch.channels_last)
model = ipex.optimize(model, dtype=torch.bfloat16, graph_mode=True)
bench = ThroughputBenchmark(model)
bench.add_input(x)
with torch.cpu.amp.autocast():
bench.benchmark(num_calling_threads=14, num_warmup_iters=10, num_iters=100)
y_bench = bench.run_once(x)
# Calculate the reference result
y = model(x)
self.assertEqual(y, y_bench)
self.assertTrue(y_bench.dtype == torch.bfloat16)
def test_throughput_benchmark_graph_mode_torchdynamo_autocast(self):
model = Conv_IF_Relu().to(memory_format=torch.channels_last)
model.eval()
x = torch.rand(3, 6, 10, 10).to(memory_format=torch.channels_last)
model = ipex.optimize(model, dtype=torch.bfloat16, graph_mode=True)
bench = ThroughputBenchmark(model)
bench.add_input(x)
with torch.cpu.amp.autocast():
bench.benchmark(num_calling_threads=14, num_warmup_iters=10, num_iters=100)
y_bench = bench.run_once(x)
# Calculate the reference result
y = model(x)
self.assertEqual(y, y_bench)
self.assertTrue(y_bench.dtype == torch.bfloat16)
@skipIfNoTorchVision
def test_resnet50(self):
model = torchvision.models.resnet50(pretrained=False)
model.eval()
data = torch.rand(2, 3, 224, 224)
model = model.to(memory_format=torch.channels_last)
data = data.to(memory_format=torch.channels_last)
model = ipex.optimize(model, graph_mode=True)
with torch.no_grad():
for _ in range(10):
y = model(data)
self.assertTrue(y.dtype == torch.float32)
@skipIfNoTorchVision
def test_resnet50_autocast(self):
model = torchvision.models.resnet50(pretrained=False)
model.eval()
data = torch.rand(2, 3, 224, 224)
model = model.to(memory_format=torch.channels_last)
data = data.to(memory_format=torch.channels_last)
model = ipex.optimize(model, dtype=torch.bfloat16, graph_mode=True)
with torch.cpu.amp.autocast(), torch.no_grad():
for _ in range(10):
y = model(data)
self.assertTrue(y.dtype == torch.bfloat16)
def test_training_graph_mode_jit(self):
model = Conv_Bn_Relu().to(memory_format=torch.channels_last).train()
x = torch.randn(3, 6, 10, 10).to(memory_format=torch.channels_last)
x1 = copy.deepcopy(x).requires_grad_()
x2 = copy.deepcopy(x).requires_grad_()
y1 = model(x1)
y1.sum().backward()
sgd = torch.optim.SGD(model.parameters(), lr=0.1)
model, opt = ipex.optimize(model, optimizer=sgd, graph_mode=True)
y2 = model(x2)
y2.sum().backward()
self.assertEqual(y1, y2)
self.assertEqual(x1.grad, x2.grad)
def test_training_graph_mode_fallback(self):
model = Conv_IF_Relu().to(memory_format=torch.channels_last).train()
x = torch.randn(3, 6, 10, 10).to(memory_format=torch.channels_last)
x1 = copy.deepcopy(x).requires_grad_()
x2 = copy.deepcopy(x).requires_grad_()
y1 = model(x1)
y1.sum().backward()
sgd = torch.optim.SGD(model.parameters(), lr=0.1)
model, opt = ipex.optimize(model, optimizer=sgd, graph_mode=True)
y2 = model(x2)
y2.sum().backward()
self.assertEqual(y1, y2)
self.assertEqual(x1.grad, x2.grad)
def test_training_graph_mode_jit_autocast(self):
model = Conv_Bn_Relu().to(memory_format=torch.channels_last).train()
x = torch.randn(3, 6, 10, 10).to(memory_format=torch.channels_last)
x1 = copy.deepcopy(x).requires_grad_()
x2 = copy.deepcopy(x).requires_grad_()
y1 = model(x1)
y1.sum().backward()
sgd = torch.optim.SGD(model.parameters(), lr=0.1)
model, opt = ipex.optimize(
model, optimizer=sgd, dtype=torch.bfloat16, graph_mode=True
)
with torch.cpu.amp.autocast():
y2 = model(x2)
y2.sum().backward()
self.assertEqual(y1, y2, prec=0.1)
self.assertEqual(x1.grad, x2.grad, prec=0.01)
self.assertTrue(y2.dtype == torch.bfloat16)
def test_training_graph_mode_fallback_autocast(self):
model = Conv_IF_Relu().to(memory_format=torch.channels_last).train()
x = torch.randn(3, 6, 10, 10).to(memory_format=torch.channels_last)
x1 = copy.deepcopy(x).requires_grad_()
x2 = copy.deepcopy(x).requires_grad_()
y1 = model(x1)
y1.sum().backward()
sgd = torch.optim.SGD(model.parameters(), lr=0.1)
model, opt = ipex.optimize(
model, optimizer=sgd, dtype=torch.bfloat16, graph_mode=True
)
with torch.cpu.amp.autocast():
y2 = model(x2)
y2.sum().backward()
self.assertEqual(y1, y2, prec=0.1)
self.assertEqual(x1.grad, x2.grad, prec=0.01)
self.assertTrue(y2.dtype == torch.bfloat16)
def test_training_save_load(self):
model = Conv_Bn_Relu().to(memory_format=torch.channels_last).train()
x = torch.randn(3, 6, 10, 10).to(memory_format=torch.channels_last)
origin_x = x.clone()
ipex_x = x.clone()
origin_model = copy.deepcopy(model).train()
lr = 1e-2
origin_optimizer = torch.optim.SGD(origin_model.parameters(), lr=lr)
ipex_model, ipex_optimizer = ipex.optimize(
origin_model, optimizer=origin_optimizer, graph_mode=True
)
# train one step for origin.
y1 = origin_model(origin_x)
loss1 = y1.sum()
origin_optimizer.zero_grad()
loss1.backward()
torch.nn.utils.clip_grad_value_(origin_model.parameters(), 10)
origin_optimizer.step()
# train one step for ipex.
y2 = ipex_model(ipex_x)
loss2 = y2.sum()
ipex_optimizer.zero_grad()
loss2.backward()
torch.nn.utils.clip_grad_value_(ipex_model.parameters(), 10)
ipex_optimizer.step()
with tempfile.TemporaryDirectory() as tmp:
origin_checkpoint_path = os.path.join(tmp, "origin_checkpoint.pth")
ipex_checkpoint_path = os.path.join(tmp, "ipex_checkpoint.pth")
torch.save(
{
"model_state_dict": origin_model.state_dict(),
"optimizer_state_dict": origin_optimizer.state_dict(),
},
origin_checkpoint_path,
)
torch.save(
{
"model_state_dict": ipex_model.state_dict(),
"optimizer_state_dict": ipex_optimizer.state_dict(),
},
ipex_checkpoint_path,
)
self.assertEqual(y1, y2)
origin_model_state = origin_model.state_dict()
ipex_model_state = ipex_model.state_dict()
for var_name in origin_model_state:
self.assertEqual(
origin_model_state[var_name], ipex_model_state[var_name]
)
# check state_buffer works.
origin_optimizer_state = origin_optimizer.state_dict()
ipex_optimizer_state = ipex_optimizer.state_dict()
for var_name in origin_optimizer_state:
if var_name == "state":
self.assertEqual(
origin_optimizer_state[var_name], ipex_optimizer_state[var_name]
)
origin_model = copy.deepcopy(model).train()
origin_optimizer = torch.optim.SGD(origin_model.parameters(), lr=lr)
origin_checkpoint = torch.load(origin_checkpoint_path)
origin_model.load_state_dict(origin_checkpoint["model_state_dict"])
origin_optimizer.load_state_dict(origin_checkpoint["optimizer_state_dict"])
# load ipex model state
origin_ipex_model = copy.deepcopy(model)
origin_ipex_optimizer = torch.optim.SGD(
origin_ipex_model.parameters(), lr=lr
)
ipex_checkpoint = torch.load(ipex_checkpoint_path)
origin_ipex_model.load_state_dict(ipex_checkpoint["model_state_dict"])
origin_ipex_optimizer.load_state_dict(
ipex_checkpoint["optimizer_state_dict"]
)
ipex_model, ipex_optimizer = ipex.optimize(
origin_model, optimizer=origin_optimizer, graph_mode=True
)
# train second step for origin.
y1 = origin_model(origin_x)
loss = y1.sum()
origin_optimizer.zero_grad()
loss.backward()
origin_optimizer.step()
# traing second step for ipex model.
y3 = ipex_model(ipex_x)
loss3 = y3.sum()
ipex_optimizer.zero_grad()
loss3.backward()
ipex_optimizer.step()
self.assertEqual(y1, y3)
origin_model_state = origin_model.state_dict()
ipex_model_state = ipex_model.state_dict()
for var_name in origin_model_state:
self.assertEqual(
origin_model_state[var_name], ipex_model_state[var_name]
)
# check state_buffer works.
origin_optimizer_state = origin_optimizer.state_dict()
ipex_optimizer_state = ipex_optimizer.state_dict()
for var_name in origin_optimizer_state:
if var_name == "state":
self.assertEqual(
origin_optimizer_state[var_name], ipex_optimizer_state[var_name]
)
class TestGraphCaptureMultiStream(TestCase):
@unittest.skipIf(
not ipex.cpu.runtime.is_runtime_ext_enabled(),
"Skip when IPEX Runtime extension is not enabled",
)
@runtime_thread_affinity_test_env
def test_multi_stream_graph_mode_jit(self):
model = Conv_Bn_Relu().to(memory_format=torch.channels_last)
model.eval()
x = torch.rand(56, 6, 10, 10).to(memory_format=torch.channels_last)
model = ipex.optimize(model, graph_mode=True)
# Create MultiStreamModule
cpu_pool = ipex.cpu.runtime.CPUPool(node_id=0)
multi_stream_model = ipex.cpu.runtime.MultiStreamModule(
model, num_streams=28, cpu_pool=cpu_pool
)
for _ in range(10):
y_runtime = multi_stream_model(x)
# Calculate the reference result
y = model(x)
self.assertEqual(y, y_runtime)
@unittest.skipIf(
not ipex.cpu.runtime.is_runtime_ext_enabled(),
"Skip when IPEX Runtime extension is not enabled",
)
@runtime_thread_affinity_test_env
def test_multi_stream_graph_mode_torchdynamo(self):
model = Conv_IF_Relu().to(memory_format=torch.channels_last)
model.eval()
x = torch.rand(56, 6, 10, 10).to(memory_format=torch.channels_last)
model = ipex.optimize(model, graph_mode=True)
# Create MultiStreamModule
cpu_pool = ipex.cpu.runtime.CPUPool(node_id=0)
multi_stream_model = ipex.cpu.runtime.MultiStreamModule(
model, num_streams=28, cpu_pool=cpu_pool
)
for _ in range(10):
y_runtime = multi_stream_model(x)
# Calculate the reference result
y = model(x)
self.assertEqual(y, y_runtime)
@unittest.skipIf(
not ipex.cpu.runtime.is_runtime_ext_enabled(),
"Skip when IPEX Runtime extension is not enabled",
)
@runtime_thread_affinity_test_env
def test_multi_stream_graph_mode_jit_autocast(self):
model = Conv_Bn_Relu().to(memory_format=torch.channels_last)
model.eval()
x = torch.rand(56, 6, 10, 10).to(memory_format=torch.channels_last)
model = ipex.optimize(model, dtype=torch.bfloat16, graph_mode=True)
# Create MultiStreamModule
cpu_pool = ipex.cpu.runtime.CPUPool(node_id=0)
multi_stream_model = ipex.cpu.runtime.MultiStreamModule(
model, num_streams=28, cpu_pool=cpu_pool
)
with torch.cpu.amp.autocast():
for _ in range(10):
y_runtime = multi_stream_model(x)
# Calculate the reference result
y = model(x)
self.assertEqual(y, y_runtime)
self.assertTrue(y_runtime.dtype == torch.bfloat16)
@unittest.skipIf(
not ipex.cpu.runtime.is_runtime_ext_enabled(),
"Skip when IPEX Runtime extension is not enabled",
)
@runtime_thread_affinity_test_env
def test_multi_stream_graph_mode_torchdynamo_autocast(self):
model = Conv_IF_Relu().to(memory_format=torch.channels_last)
model.eval()
x = torch.rand(56, 6, 10, 10).to(memory_format=torch.channels_last)
model = ipex.optimize(model, dtype=torch.bfloat16, graph_mode=True)
# Create MultiStreamModule
cpu_pool = ipex.cpu.runtime.CPUPool(node_id=0)
multi_stream_model = ipex.cpu.runtime.MultiStreamModule(
model, num_streams=28, cpu_pool=cpu_pool
)
with torch.cpu.amp.autocast():
for _ in range(10):
y_runtime = multi_stream_model(x)
# Calculate the reference result
y = model(x)
self.assertEqual(y, y_runtime)
self.assertTrue(y_runtime.dtype == torch.bfloat16)
if __name__ == "__main__":
test = unittest.main()
| 25,582 | 35.599428 | 88 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/tests/cpu/test_dropout.py | import unittest
import torch
import torch.nn as nn
import intel_extension_for_pytorch as ipex
from common_utils import TestCase
from torch.testing import FileCheck
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.dropout = nn.Dropout(0.5)
def forward(self, x):
return self.dropout(x)
class DropoutTester(TestCase):
def test_remove_dropout_jit(self):
model = Net().eval()
x = torch.randn(2, 3)
with torch.no_grad():
trace_model = torch.jit.trace(model, x).eval()
frozen_mod = torch.jit.freeze(trace_model)
y = trace_model(x)
self.assertEqual(x, y)
FileCheck().check_not("aten::dropout").run(trace_model.graph)
def test_replace_dropout_with_identity(self):
model = Net().eval()
optimized_model = ipex.optimize(model)
x = torch.randn(2, 3)
named_children = dict(optimized_model.named_children())
self.assertTrue(isinstance(named_children["dropout"], torch.nn.Identity))
optimized_model = ipex.optimize(model, replace_dropout_with_identity=False)
named_children = dict(optimized_model.named_children())
self.assertTrue(isinstance(named_children["dropout"], torch.nn.Dropout))
if __name__ == "__main__":
test = unittest.main()
| 1,352 | 28.413043 | 83 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/tests/cpu/linear_prepack.py | import torch
import intel_extension_for_pytorch as ipex
from common_utils import int8_calibration
ipex.core.enable_auto_dnnl()
ic = 1024
oc = 1024
bs = 16
LL = torch.nn.Linear(ic, oc).to(ipex.DEVICE)
def get_input():
return torch.rand(bs, ic).to(ipex.DEVICE)
def run_linear(auto_mix_conf=None):
for i in range(3):
if auto_mix_conf is not None:
with ipex.AutoMixPrecision(auto_mix_conf):
LL(get_input())
else:
LL(get_input())
if __name__ == "__main__":
print(f"fp32, {'*' * 50}")
run_linear()
print(f"auto-mix for bf16, {'*' * 50}")
bf16_conf = ipex.AmpConf(torch.bfloat16)
run_linear(bf16_conf)
print(f"back to fp32, {'*' * 50}")
ipex.core.reorder_to_float32(LL.weight)
ipex.core.reorder_to_float32(LL.bias)
run_linear()
print(f"auto-mix for int8, {'*' * 50}")
int8_calibration(LL, [get_input() for i in range(3)], "./int8.config")
int8_conf = ipex.AmpConf(torch.int8, "./int8.config")
run_linear(int8_conf)
print(f"back to fp32, {'*' * 50}")
ipex.core.reorder_to_float32(LL.weight)
ipex.core.reorder_to_float32(LL.bias)
run_linear()
| 1,179 | 23.081633 | 74 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/tests/cpu/test_runtime_api_jit.py | import unittest
import torch
import intel_extension_for_pytorch as ipex
from torch.testing._internal.jit_utils import JitTestCase
from test_ao_jit_llga_utils import JitLlgaTestCase
from test_runtime_api import TestInputOutputModule
from common_ipex_conf import runtime_thread_affinity_test_env
class SimpleNet(torch.nn.Module):
def __init__(self):
super(SimpleNet, self).__init__()
self.conv = torch.nn.Conv2d(
64, 128, (3, 3), stride=(2, 2), padding=(1, 1), bias=False
)
def forward(self, x):
x1 = self.conv(x)
y = torch.flatten(x1, start_dim=1)
return y
class SimpleNet_v2(torch.nn.Module):
def __init__(self):
super(SimpleNet_v2, self).__init__()
self.conv = torch.nn.Conv2d(
3, 64, (3, 3), stride=(2, 2), padding=(1, 1), bias=False
)
self.conv2 = torch.nn.Conv2d(
64, 64, (3, 3), stride=(2, 2), padding=(1, 1), bias=False
)
def forward(self, x):
x1 = self.conv(x)
x1 = self.conv2(x1)
y = torch.flatten(x1, start_dim=1)
return y
class TestJitRuntimeAPI(JitTestCase):
@unittest.skipIf(
not ipex.cpu.runtime.is_runtime_ext_enabled(),
"Skip when IPEX Runtime extension is not enabled",
)
@runtime_thread_affinity_test_env
def test_task_async_api_fp32_jit_model(self):
model = SimpleNet()
model.eval()
x = torch.rand(64, 64, 3, 3)
# Calculate the reference result
trace_model = torch.jit.trace(model, x)
y = trace_model(x)
# Create task
cpu_pool = ipex.cpu.runtime.CPUPool(node_id=0)
task = ipex.cpu.runtime.Task(trace_model, cpu_pool)
# Task submit and get
y_runtime_future = task(x)
y_runtime = y_runtime_future.get()
self.assertEqual(y, y_runtime)
@unittest.skipIf(
not ipex.cpu.runtime.is_runtime_ext_enabled(),
"Skip when IPEX Runtime extension is not enabled",
)
@runtime_thread_affinity_test_env
def test_task_sync_api_fp32_jit_model(self):
model = SimpleNet()
model.eval()
x = torch.rand(64, 64, 3, 3)
# Calculate the reference result
trace_mode = torch.jit.trace(model, x)
y = trace_mode(x)
# Create task
cpu_pool = ipex.cpu.runtime.CPUPool(node_id=0)
task = ipex.cpu.runtime.Task(trace_mode, cpu_pool)
# Task sync run
y_runtime = task.run_sync(x)
self.assertEqual(y, y_runtime)
@unittest.skipIf(
not ipex.cpu.runtime.is_runtime_ext_enabled(),
"Skip when IPEX Runtime extension is not enabled",
)
@runtime_thread_affinity_test_env
def test_task_async_api_bf16_jit_model(self):
model = SimpleNet()
model.eval()
x = torch.rand(64, 64, 3, 3)
# Calculate the reference result
with torch.cpu.amp.autocast(
enabled=True, dtype=torch.bfloat16
), torch.no_grad():
trace_mode = torch.jit.trace(model, x)
y = trace_mode(x)
# Create task
cpu_pool = ipex.cpu.runtime.CPUPool(node_id=0)
task = ipex.cpu.runtime.Task(trace_mode, cpu_pool)
# Task submit and wait
y_runtime_future = task(x)
y_runtime = y_runtime_future.get()
self.assertEqual(y, y_runtime)
@unittest.skipIf(
not ipex.cpu.runtime.is_runtime_ext_enabled(),
"Skip when IPEX Runtime extension is not enabled",
)
@runtime_thread_affinity_test_env
def test_task_async_api_bf16_jit_model_multi_submission(self):
model = SimpleNet()
model.eval()
x = torch.rand(64, 64, 3, 3)
# Calculate the reference result
with torch.cpu.amp.autocast(
enabled=True, dtype=torch.bfloat16
), torch.no_grad():
trace_mode = torch.jit.trace(model, x)
y = trace_mode(x)
# Create task
cpu_pool = ipex.cpu.runtime.CPUPool(node_id=0)
task = ipex.cpu.runtime.Task(trace_mode, cpu_pool)
# Submit task 3 times, then wait for result
y_runtime = []
y_runtime_future = []
for i in range(3):
y_runtime_future.append(task(x))
for item in y_runtime_future:
y_runtime.append(item.get())
self.assertEqual(y, y_runtime[0])
self.assertEqual(y, y_runtime[1])
self.assertEqual(y, y_runtime[2])
@unittest.skipIf(
not ipex.cpu.runtime.is_runtime_ext_enabled(),
"Skip when IPEX Runtime extension is not enabled",
)
@runtime_thread_affinity_test_env
def test_task_copy_bf16_jit_mode(self):
model = SimpleNet()
model.eval()
x = torch.rand(64, 64, 3, 3)
# Calculate the reference result
with torch.cpu.amp.autocast(
enabled=True, dtype=torch.bfloat16
), torch.no_grad():
trace_mode = torch.jit.trace(model, x)
y = trace_mode(x)
# Create task
cpu_pool = ipex.cpu.runtime.CPUPool(node_id=0)
task = ipex.cpu.runtime.Task(trace_mode, cpu_pool)
# Copy Task
task2 = task
# Task submit and wait
y_runtime_future = task(x)
y_runtime = y_runtime_future.get()
y_runtime_future2 = task2(x)
y_runtime2 = y_runtime_future2.get()
self.assertEqual(y, y_runtime)
self.assertEqual(y, y_runtime2)
class TestJITMultiStreamModule(JitTestCase):
@unittest.skipIf(
not ipex.cpu.runtime.is_runtime_ext_enabled(),
"Skip when IPEX Runtime extension is not enabled",
)
@runtime_thread_affinity_test_env
def test_multi_stream_module_bf16_jit_model(self):
model = SimpleNet()
model.eval()
cpu_pool = ipex.cpu.runtime.CPUPool()
batch_size = cpu_pool.core_ids.__len__()
x = torch.rand(batch_size, 64, 3, 3)
num_streams = batch_size
# Calculate the reference result
with torch.cpu.amp.autocast(
enabled=True, dtype=torch.bfloat16
), torch.no_grad():
trace_model = torch.jit.trace(model, x)
y = trace_model(x)
# Create MultiStreamModule
cpu_pool = ipex.cpu.runtime.CPUPool()
multi_stream_model = ipex.cpu.runtime.MultiStreamModule(
trace_model, num_streams=num_streams, cpu_pool=cpu_pool
)
y_runtime = multi_stream_model(x)
self.assertEqual(y, y_runtime)
@unittest.skipIf(
not ipex.cpu.runtime.is_runtime_ext_enabled(),
"Skip when IPEX Runtime extension is not enabled",
)
@runtime_thread_affinity_test_env
def test_multi_stream_module_bf16_jit_model_concat_output(self):
model = SimpleNet()
model.eval()
cpu_pool = ipex.cpu.runtime.CPUPool(node_id=0)
batch_size = cpu_pool.core_ids.__len__()
x = torch.rand(batch_size, 64, 3, 3)
num_streams = batch_size
# Calculate the reference result
with torch.cpu.amp.autocast(
enabled=True, dtype=torch.bfloat16
), torch.no_grad():
trace_model = torch.jit.trace(model, x)
# Create MultiStreamModule
multi_stream_model = ipex.cpu.runtime.MultiStreamModule(
trace_model, num_streams=num_streams, cpu_pool=cpu_pool
)
y_runtime = multi_stream_model(x)
# Create MultiStreamModule with concat_output=False
multi_stream_model2 = ipex.cpu.runtime.MultiStreamModule(
trace_model, num_streams=num_streams, cpu_pool=cpu_pool, concat_output=False
)
y_runtime2 = multi_stream_model2(x)
self.assertEqual(y_runtime2.__len__(), num_streams)
self.assertEqual(y_runtime, torch.cat(y_runtime2))
@unittest.skipIf(
not ipex.cpu.runtime.is_runtime_ext_enabled(),
"Skip when IPEX Runtime extension is not enabled",
)
@runtime_thread_affinity_test_env
def test_single_stream_module_bf16_jit_model(self):
model = SimpleNet()
model.eval()
batch_size = ipex.cpu.runtime.get_core_list_of_node_id(0).__len__()
x = torch.rand(batch_size, 64, 3, 3)
# Calculate the reference result
with torch.cpu.amp.autocast(
enabled=True, dtype=torch.bfloat16
), torch.no_grad():
trace_model = torch.jit.trace(model, x)
y = trace_model(x)
# Create MultiStreamModule
cpu_pool = ipex.cpu.runtime.CPUPool(node_id=0)
multi_stream_model = ipex.cpu.runtime.MultiStreamModule(
trace_model, num_streams=1, cpu_pool=cpu_pool
)
y_runtime = multi_stream_model(x)
# Create MultiStreamModule with concat_output=False
multi_stream_model2 = ipex.cpu.runtime.MultiStreamModule(
trace_model, num_streams=1, cpu_pool=cpu_pool, concat_output=False
)
y_runtime2 = multi_stream_model2(x)
self.assertEqual(y, y_runtime)
self.assertEqual(y, y_runtime2[0])
@unittest.skipIf(
not ipex.cpu.runtime.is_runtime_ext_enabled(),
"Skip when IPEX Runtime extension is not enabled",
)
@runtime_thread_affinity_test_env
def test_core_number_not_divisible_stream_number_bf16_jit_model(self):
model = SimpleNet()
model.eval()
num_streams = 2
batch_size = num_streams
x = torch.rand(batch_size, 64, 3, 3)
# Calculate the reference result
with torch.cpu.amp.autocast(
enabled=True, dtype=torch.bfloat16
), torch.no_grad():
traced_model = torch.jit.trace(model, x)
traced_model = torch.jit.freeze(traced_model)
# Calculate the reference result
y = traced_model(x)
# Create MultiStreamModule
# Core Number is 3, stream Number is 2
cpu_pool = ipex.cpu.runtime.CPUPool(core_ids=[0, 1, 2])
multi_stream_model = ipex.cpu.runtime.MultiStreamModule(
traced_model, num_streams=num_streams, cpu_pool=cpu_pool
)
multi_stream_model2 = ipex.cpu.runtime.MultiStreamModule(
traced_model,
num_streams=num_streams,
cpu_pool=cpu_pool,
concat_output=False,
)
y_runtime = multi_stream_model(x)
y_runtime2 = multi_stream_model2(x)
self.assertEqual(y, y_runtime)
self.assertEqual(y, torch.cat(y_runtime2))
@unittest.skipIf(
not ipex.cpu.runtime.is_runtime_ext_enabled(),
"Skip when IPEX Runtime extension is not enabled",
)
@runtime_thread_affinity_test_env
def test_batchsize_less_than_stream_number_bf16_jit_model(self):
model = SimpleNet()
model.eval()
num_streams = 3
batch_size = 2
x = torch.rand(batch_size, 64, 3, 3)
# Calculate the reference result
with torch.cpu.amp.autocast(
enabled=True, dtype=torch.bfloat16
), torch.no_grad():
traced_model = torch.jit.trace(model, x)
traced_model = torch.jit.freeze(traced_model)
# Calculate the reference result
y = traced_model(x)
# Create MultiStreamModule
# Batchsize 2, Core Number is 3, stream Number is 3
cpu_pool = ipex.cpu.runtime.CPUPool(core_ids=[0, 1, 2])
multi_stream_model = ipex.cpu.runtime.MultiStreamModule(
traced_model, num_streams=num_streams, cpu_pool=cpu_pool
)
multi_stream_model2 = ipex.cpu.runtime.MultiStreamModule(
traced_model,
num_streams=num_streams,
cpu_pool=cpu_pool,
concat_output=False,
)
y_runtime = multi_stream_model(x)
y_runtime2 = multi_stream_model2(x)
self.assertEqual(y, y_runtime)
self.assertEqual(y, torch.cat(y_runtime2))
@unittest.skipIf(
not ipex.cpu.runtime.is_runtime_ext_enabled(),
"Skip when IPEX Runtime extension is not enabled",
)
@runtime_thread_affinity_test_env
def test_batchsize_not_divisible_stream_number_bf16_jit_model(self):
model = SimpleNet()
model.eval()
num_streams = 3
batch_size = 4
x = torch.rand(batch_size, 64, 3, 3)
# Calculate the reference result
with torch.cpu.amp.autocast(
enabled=True, dtype=torch.bfloat16
), torch.no_grad():
traced_model = torch.jit.trace(model, x)
traced_model = torch.jit.freeze(traced_model)
# Calculate the reference result
y = traced_model(x)
# Create MultiStreamModule
# Batchsize 4, Core Number is 3, stream Number is 3
cpu_pool = ipex.cpu.runtime.CPUPool(core_ids=[0, 1, 2])
multi_stream_model = ipex.cpu.runtime.MultiStreamModule(
traced_model, num_streams=num_streams, cpu_pool=cpu_pool
)
multi_stream_model2 = ipex.cpu.runtime.MultiStreamModule(
traced_model,
num_streams=num_streams,
cpu_pool=cpu_pool,
concat_output=False,
)
y_runtime = multi_stream_model(x)
y_runtime2 = multi_stream_model2(x)
self.assertEqual(y, y_runtime)
self.assertEqual(y, torch.cat(y_runtime2))
self.assertEqual(y_runtime2[0].size(0), 2)
self.assertEqual(y_runtime2[1].size(0), 1)
self.assertEqual(y_runtime2[2].size(0), 1)
@unittest.skipIf(
not ipex.cpu.runtime.is_runtime_ext_enabled(),
"Skip when IPEX Runtime extension is not enabled",
)
@runtime_thread_affinity_test_env
def test_stream_number_auto_bf16_jit_model(self):
model = torch.nn.Softmax(dim=-1)
model.eval()
for i in range(ipex.cpu.runtime.get_core_list_of_node_id(0).__len__()):
batch_size = list(range(i + 1)).__len__()
x = torch.rand(batch_size, 64)
# Calculate the reference result
with torch.cpu.amp.autocast(
enabled=True, dtype=torch.bfloat16
), torch.no_grad():
traced_model = torch.jit.trace(model, x)
traced_model = torch.jit.freeze(traced_model)
# Warm Up
for _ in range(3):
traced_model(x)
# Calculate the reference result
y = traced_model(x)
cpu_pool = ipex.cpu.runtime.CPUPool(core_ids=list(range(i + 1)))
# The stream number will be determined automatically.
multi_stream_model = ipex.cpu.runtime.MultiStreamModule(
traced_model, cpu_pool=cpu_pool
)
y_runtime = multi_stream_model(x)
stream_num_ground_truth = ipex.cpu.runtime.get_default_num_streams(cpu_pool)
self.assertEqual(y, y_runtime)
self.assertEqual(
multi_stream_model.get_stream_number(), stream_num_ground_truth
)
@unittest.skipIf(
not ipex.cpu.runtime.is_runtime_ext_enabled(),
"Skip when IPEX Runtime extension is not enabled",
)
@runtime_thread_affinity_test_env
def test_stream_number_larger_than_core_number(self):
model = torch.nn.Softmax(dim=-1)
model.eval()
cpu_pool = ipex.cpu.runtime.CPUPool()
batch_size = cpu_pool.core_ids.__len__()
num_streams = batch_size + 1
x = torch.rand(batch_size, 64)
# Calculate the reference result
with torch.cpu.amp.autocast(
enabled=True, dtype=torch.bfloat16
), torch.no_grad():
traced_model = torch.jit.trace(model, x)
traced_model = torch.jit.freeze(traced_model)
# Warm Up
for _ in range(3):
traced_model(x)
# Calculate the reference result
y = traced_model(x)
# The stream number will be determined automatically.
multi_stream_model = ipex.cpu.runtime.MultiStreamModule(
traced_model, num_streams=num_streams, cpu_pool=cpu_pool
)
y_runtime = multi_stream_model(x)
stream_num_ground_truth = ipex.cpu.runtime.get_default_num_streams(cpu_pool)
self.assertEqual(y, y_runtime)
self.assertEqual(
multi_stream_model.get_stream_number(), cpu_pool.core_ids.__len__()
)
class TestLLGARuntimeAPI(JitLlgaTestCase):
@unittest.skipIf(
not ipex.cpu.runtime.is_runtime_ext_enabled(),
"Skip when IPEX Runtime extension is not enabled",
)
@runtime_thread_affinity_test_env
def test_task_async_api_int8_jit_model(self):
with torch.no_grad():
model = SimpleNet_v2()
model.eval()
x = torch.rand(2, 3, 224, 224).contiguous(memory_format=torch.channels_last)
# Calculate the reference result
graph, m_llga, m_cpu = self.prepareModel(model, [x])
y = m_llga(x)
# Create task
cpu_pool = ipex.cpu.runtime.CPUPool(node_id=0)
task = ipex.cpu.runtime.Task(m_llga, cpu_pool)
# Task submit and wait
y_runtime_future = task(x)
y_runtime = y_runtime_future.get()
self.assertEqual(y, y_runtime)
@unittest.skipIf(
not ipex.cpu.runtime.is_runtime_ext_enabled(),
"Skip when IPEX Runtime extension is not enabled",
)
@runtime_thread_affinity_test_env
def test_multi_stream_module_int8_jit_model(self):
with torch.no_grad():
model = SimpleNet_v2()
model.eval()
x = torch.rand(2, 3, 224, 224).contiguous(memory_format=torch.channels_last)
# Calculate the reference result
graph, m_llga, m_cpu = self.prepareModel(model, [x])
y = m_llga(x)
# Create task
cpu_pool = ipex.cpu.runtime.CPUPool(node_id=0)
multi_stream_model = ipex.cpu.runtime.MultiStreamModule(
m_llga, num_streams=1, cpu_pool=cpu_pool
)
multi_stream_model2 = ipex.cpu.runtime.MultiStreamModule(
m_llga, num_streams=1, cpu_pool=cpu_pool, concat_output=False
)
# Task submit and wait
y_runtime = multi_stream_model(x)
y_runtime2 = multi_stream_model2(x)
self.assertEqual(y, y_runtime)
self.assertEqual(y, torch.cat(y_runtime2))
@unittest.skipIf(
not ipex.cpu.runtime.is_runtime_ext_enabled(),
"Skip when IPEX Runtime extension is not enabled",
)
@runtime_thread_affinity_test_env
def test_core_number_not_divisible_stream_number_int8_jit_model(self):
with torch.no_grad():
model = SimpleNet_v2()
model.eval()
num_streams = 2
batch_size = num_streams
x = torch.rand(batch_size, 3, 16, 16).contiguous(
memory_format=torch.channels_last
)
# Calculate the reference result
graph, m_llga, m_cpu = self.prepareModel(model, [x])
y = m_llga(x)
# Create MultiStreamModule
# Core Number is 3, stream Number is 2
cpu_pool = ipex.cpu.runtime.CPUPool(core_ids=[0, 1, 2])
multi_stream_model = ipex.cpu.runtime.MultiStreamModule(
m_llga, num_streams=num_streams, cpu_pool=cpu_pool
)
multi_stream_model2 = ipex.cpu.runtime.MultiStreamModule(
m_llga, num_streams=num_streams, cpu_pool=cpu_pool, concat_output=False
)
# Task submit and wait
y_runtime = multi_stream_model(x)
y_runtime2 = multi_stream_model2(x)
self.assertEqual(y, y_runtime)
self.assertEqual(y, torch.cat(y_runtime2))
@unittest.skipIf(
not ipex.cpu.runtime.is_runtime_ext_enabled(),
"Skip when IPEX Runtime extension is not enabled",
)
@runtime_thread_affinity_test_env
def test_batchsize_less_than_stream_number_int8_jit_model(self):
with torch.no_grad():
model = SimpleNet_v2()
model.eval()
num_streams = 3
batch_size = 2
x = torch.rand(batch_size, 3, 16, 16).contiguous(
memory_format=torch.channels_last
)
# Calculate the reference result
graph, m_llga, m_cpu = self.prepareModel(model, [x])
y = m_llga(x)
# Create MultiStreamModule
# Batchsize is 2, Core Number is 3, stream Number is 3
cpu_pool = ipex.cpu.runtime.CPUPool(core_ids=[0, 1, 2])
multi_stream_model = ipex.cpu.runtime.MultiStreamModule(
m_llga, num_streams=num_streams, cpu_pool=cpu_pool
)
multi_stream_model2 = ipex.cpu.runtime.MultiStreamModule(
m_llga, num_streams=num_streams, cpu_pool=cpu_pool, concat_output=False
)
# Task submit and wait
y_runtime = multi_stream_model(x)
y_runtime2 = multi_stream_model2(x)
self.assertEqual(y, y_runtime)
self.assertEqual(y, torch.cat(y_runtime2))
self.assertEqual(y_runtime2.__len__(), batch_size)
class TestMultiStreamModuleHint(JitTestCase):
def init_set_up(self):
# Create Multi Stream Module without concat output
cpu_pool = ipex.cpu.runtime.CPUPool()
batch_size = cpu_pool.core_ids.__len__()
num_streams = cpu_pool.core_ids.__len__()
return batch_size, num_streams, cpu_pool
def create_jit_traced_model(self, model, input):
traced_model = torch.jit.trace(model, input).eval()
traced_model = torch.jit.freeze(traced_model)
return traced_model
def create_multi_stream_module(
self,
traced_model,
num_streams,
cpu_pool,
multi_stream_input_hint,
multi_stream_output_hint=None,
concat_output=True,
):
if not concat_output:
return ipex.cpu.runtime.MultiStreamModule(
traced_model,
num_streams=num_streams,
cpu_pool=cpu_pool,
concat_output=False,
input_split_hint=multi_stream_input_hint,
)
else:
return ipex.cpu.runtime.MultiStreamModule(
traced_model,
num_streams=num_streams,
cpu_pool=cpu_pool,
input_split_hint=multi_stream_input_hint,
output_concat_hint=multi_stream_output_hint,
)
@unittest.skipIf(
not ipex.cpu.runtime.is_runtime_ext_enabled(),
"Skip when IPEX Runtime extension is not enabled",
)
@runtime_thread_affinity_test_env
def test_input_output_hint(self):
batch_size, num_streams, cpu_pool = self.init_set_up()
# This module:
# * Accept 3 tensors as input
# * Return a tuple of 3 tensors as output
model = TestInputOutputModule().eval()
for batch_size in (num_streams - 1, num_streams):
# There is test for when batch_size is less than num_streams
input_tensor1 = torch.rand(batch_size, 1)
input_tensor2 = torch.rand(batch_size, 1)
input_tensor3 = torch.rand(batch_size, 1)
# Since jit trace only accept single tensor or a tuple of tensors as input
# https://pytorch.org/docs/stable/generated/torch.jit.trace.html#torch-jit-trace
jit_input = (input_tensor1, input_tensor2, input_tensor3)
traced_model = self.create_jit_traced_model(model, jit_input)
# Warm Up in the main thread to finish the jit pass optimizations
for _ in range(3):
traced_model(input_tensor1, input_tensor2, input_tensor3)
# Calculate the reference result
y_ref = traced_model(input_tensor1, input_tensor2, input_tensor3)
multi_stream_input_hint = ipex.cpu.runtime.MultiStreamModuleHint(0, 0, 0)
multi_stream_model = self.create_multi_stream_module(
traced_model,
num_streams,
cpu_pool,
multi_stream_input_hint,
concat_output=False,
)
y_runtime = multi_stream_model(input_tensor1, input_tensor2, input_tensor3)
# Manually concat the output
y_runtime_res1 = []
y_runtime_res2 = []
y_runtime_res3 = []
for stream_id in range(
num_streams if ((batch_size // num_streams) >= 1) else batch_size
):
y_runtime_res1.append(y_runtime[stream_id][0])
y_runtime_res2.append(y_runtime[stream_id][1])
y_runtime_res3.append(y_runtime[stream_id][2])
y_runtime_res = (
torch.cat(y_runtime_res1),
torch.cat(y_runtime_res2),
torch.cat(y_runtime_res3),
)
self.assertEqual(y_ref, y_runtime_res)
# Create Multi Stream Module with concat output
multi_stream_output_hint = ipex.cpu.runtime.MultiStreamModuleHint((0, 0, 0))
multi_stream_model2 = self.create_multi_stream_module(
traced_model,
num_streams,
cpu_pool,
multi_stream_input_hint,
multi_stream_output_hint,
concat_output=True,
)
y_runtime_res2 = multi_stream_model2(
input_tensor1, input_tensor2, input_tensor3
)
self.assertEqual(y_ref, y_runtime_res2)
@unittest.skipIf(
not ipex.cpu.runtime.is_runtime_ext_enabled(),
"Skip when IPEX Runtime extension is not enabled",
)
@runtime_thread_affinity_test_env
def test_simulate_bert_large_input_output(self):
class TestModule(torch.nn.Module):
def __init__(self):
super(TestModule, self).__init__()
def forward(self, key1, key2, key3):
return key1 * 2, key2 * 2
# This module simulates the behaviour of Bert Large LZ models:
# * Accept 3 tensors (with key word) as input
# * Return a tuple of 2 tensors as output
model = TestModule().eval()
batch_size, num_streams, cpu_pool = self.init_set_up()
jit_input = (
torch.rand(batch_size, 1),
torch.rand(batch_size, 2),
torch.rand(batch_size, 3),
)
traced_model = self.create_jit_traced_model(model, jit_input)
input_tensor1 = torch.rand(batch_size, 1)
input_tensor2 = torch.rand(batch_size, 1)
input_tensor3 = torch.rand(batch_size, 1)
# Warm Up
for _ in range(3):
traced_model(key1=input_tensor1, key2=input_tensor2, key3=input_tensor3)
# Calculate the reference result
y_ref = traced_model(key1=input_tensor1, key2=input_tensor2, key3=input_tensor3)
multi_stream_input_hint = ipex.cpu.runtime.MultiStreamModuleHint(
key1=0, key2=0, key3=0
)
multi_stream_model = self.create_multi_stream_module(
traced_model,
num_streams,
cpu_pool,
multi_stream_input_hint,
concat_output=False,
)
y_runtime = multi_stream_model(
key1=input_tensor1, key2=input_tensor2, key3=input_tensor3
)
# Manually Concat the output
y_runtime_res1 = []
y_runtime_res2 = []
for i in range(num_streams):
y_runtime_res1.append(y_runtime[i][0])
y_runtime_res2.append(y_runtime[i][1])
y_runtime_res = (torch.cat(y_runtime_res1), torch.cat(y_runtime_res2))
self.assertEqual(y_ref, y_runtime_res)
multi_stream_output_hint = ipex.cpu.runtime.MultiStreamModuleHint((0, 0))
multi_stream_model2 = self.create_multi_stream_module(
traced_model,
num_streams,
cpu_pool,
multi_stream_input_hint,
multi_stream_output_hint,
concat_output=True,
)
y_runtime_res2 = multi_stream_model2(
key1=input_tensor1, key2=input_tensor2, key3=input_tensor3
)
self.assertEqual(y_ref, y_runtime_res2)
@unittest.skipIf(
not ipex.cpu.runtime.is_runtime_ext_enabled(),
"Skip when IPEX Runtime extension is not enabled",
)
@runtime_thread_affinity_test_env
def test_mix_position_keyword_input_output_hint(self):
class TestModule(torch.nn.Module):
def __init__(self):
super(TestModule, self).__init__()
def forward(self, param1, param2, key1=None):
return param1, param2, key1
batch_size, num_streams, cpu_pool = self.init_set_up()
# This module simulates the behaviour of Bert Large LZ models:
# * Accept 3 tensors (2 position parameter and 1 key word parameter) as input
# * Return a tuple of 3 tensors as output
model = TestModule().eval()
jit_input = (
torch.rand(batch_size, 1),
torch.rand(batch_size, 2),
torch.rand(batch_size, 3),
)
traced_model = self.create_jit_traced_model(model, jit_input)
input_tensor1 = torch.rand(batch_size, 1)
input_tensor2 = torch.rand(batch_size, 2)
input_tensor3 = torch.rand(batch_size, 3)
input = (input_tensor1, input_tensor2)
k_input = {"key1": input_tensor3}
# Warm Up
for _ in range(3):
traced_model(input_tensor1, input_tensor2, key1=input_tensor3)
# Calculate the reference result
y_ref = traced_model(*input, **k_input)
y_ref2 = traced_model(input_tensor1, input_tensor2, input_tensor3)
y_ref3 = traced_model(input_tensor1, input_tensor2, key1=input_tensor3)
self.assertEqual(y_ref, y_ref2)
self.assertEqual(y_ref, y_ref3)
# Be careful, jit traced model will change the input type
multi_stream_input_hint = ipex.cpu.runtime.MultiStreamModuleHint(0, 0, key1=0)
# Create Multi Stream Module with concat output
multi_stream_output_hint = ipex.cpu.runtime.MultiStreamModuleHint((0, 0, 0))
multi_stream_model = self.create_multi_stream_module(
traced_model,
num_streams,
cpu_pool,
multi_stream_input_hint,
multi_stream_output_hint,
concat_output=True,
)
# There are 2 ways to write now
y_runtime_res = multi_stream_model(
input_tensor1, input_tensor2, key1=input_tensor3
)
y_runtime_res2 = multi_stream_model(*input, **k_input)
self.assertEqual(y_ref, y_runtime_res)
self.assertEqual(y_ref, y_runtime_res2)
@unittest.skipIf(
not ipex.cpu.runtime.is_runtime_ext_enabled(),
"Skip when IPEX Runtime extension is not enabled",
)
@runtime_thread_affinity_test_env
def test_input_output_hint_not_along_dim_zero(self):
batch_size, num_streams, cpu_pool = self.init_set_up()
# This module:
# * Accept 3 tensors as input
# * Return a tuple of 3 tensors as output
model = TestInputOutputModule().eval()
input_tensor1 = torch.rand(1, batch_size)
input_tensor2 = torch.rand(batch_size, 2)
input_tensor3 = torch.rand(3, batch_size)
# Since jit trace only accept single tensor or a tuple of tensors as input
# https://pytorch.org/docs/stable/generated/torch.jit.trace.html#torch-jit-trace
jit_input = (input_tensor1, input_tensor2, input_tensor3)
traced_model = self.create_jit_traced_model(model, jit_input)
# Warm Up in the main thread to finish the jit pass optimizations
for _ in range(3):
traced_model(input_tensor1, input_tensor2, input_tensor3)
# Calculate the reference result
y_ref = traced_model(input_tensor1, input_tensor2, input_tensor3)
multi_stream_input_hint = ipex.cpu.runtime.MultiStreamModuleHint(1, 0, 1)
multi_stream_model = self.create_multi_stream_module(
traced_model,
num_streams,
cpu_pool,
multi_stream_input_hint,
concat_output=False,
)
y_runtime = multi_stream_model(input_tensor1, input_tensor2, input_tensor3)
# Manually concat the output
y_runtime_res1 = []
y_runtime_res2 = []
y_runtime_res3 = []
for stream_id in range(
num_streams if ((batch_size // num_streams) >= 1) else batch_size
):
y_runtime_res1.append(y_runtime[stream_id][0])
y_runtime_res2.append(y_runtime[stream_id][1])
y_runtime_res3.append(y_runtime[stream_id][2])
y_runtime_res = (
torch.cat(y_runtime_res1, 1),
torch.cat(y_runtime_res2, 0),
torch.cat(y_runtime_res3, 1),
)
self.assertEqual(y_ref, y_runtime_res)
# Create Multi Stream Module with concat output
multi_stream_output_hint = ipex.cpu.runtime.MultiStreamModuleHint((1, 0, 1))
multi_stream_model2 = self.create_multi_stream_module(
traced_model,
num_streams,
cpu_pool,
multi_stream_input_hint,
multi_stream_output_hint,
concat_output=True,
)
y_runtime_res2 = multi_stream_model2(
input_tensor1, input_tensor2, input_tensor3
)
self.assertEqual(y_ref, y_runtime_res2)
class TestMultiStreamBenchmarkModule(JitTestCase):
@unittest.skipIf(
not ipex.cpu.runtime.is_runtime_ext_enabled(),
"Skip when IPEX Runtime extension is not enabled",
)
@runtime_thread_affinity_test_env
def test_multi_stream_benchmark_module_bf16_jit_model(self):
model = SimpleNet().eval()
batch_size = 1
x = torch.rand(batch_size, 64, 3, 3)
# Calculate the reference result
with torch.cpu.amp.autocast(
enabled=True, dtype=torch.bfloat16
), torch.no_grad():
trace_model = torch.jit.trace(model, x)
# Warm Up
for _ in range(3):
trace_model(x)
# Create MultiStreamModule
multi_stream_model = ipex.cpu.runtime._MultiStreamBenchmarkModule(trace_model)
multi_stream_model(x)
if __name__ == "__main__":
test = unittest.main()
| 34,536 | 34.752588 | 92 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/tests/cpu/test_optimizer.py | # This Python file uses the following encoding: utf-8
# !/usr/bin/env python
import torch
import intel_extension_for_pytorch as ipex # flake8: noqa
import itertools
import unittest
from torch.testing._internal.common_utils import TestCase
from common_utils import TestModule, _empty_weight_bias_parameter_names
import bench.custom_op_bench.optimizer
from torch.optim import Adadelta, AdamW, Adamax, ASGD, RMSprop, Rprop
import copy
class TestOptimizers(TestCase):
def _test_update(
self, module, optimizer, dtype, split_master_weight_for_bf16, set_to_none, fused
):
atol, rtol = None, None
if dtype == torch.bfloat16:
atol, rtol = 1e-2, 1e-2
ipex_module, ipex_optimizer = ipex.optimize(
module,
dtype=dtype,
optimizer=optimizer,
split_master_weight_for_bf16=split_master_weight_for_bf16,
fuse_update_step=fused,
)
for i in range(2):
with torch.cpu.amp.autocast(enabled=True, dtype=dtype):
# torch optmizer
y = module(*module.input).sum()
optimizer.zero_grad(set_to_none=set_to_none)
y.backward()
optimizer.step()
# ipex optimizer
y1 = ipex_module(*ipex_module.input).sum()
ipex_optimizer.zero_grad(set_to_none=set_to_none)
y1.backward()
ipex_optimizer.step()
origin_model_state = module.state_dict()
ipex_model_state = ipex_module.state_dict()
for var_name in origin_model_state:
self.assertEqual(
origin_model_state[var_name],
ipex_model_state[var_name],
atol=atol,
rtol=rtol,
)
origin_optimizer_state = optimizer.state_dict()
ipex_optimizer_state = ipex_optimizer.state_dict()
for var_name in origin_optimizer_state:
if var_name == "state":
self.assertEqual(
origin_optimizer_state[var_name],
ipex_optimizer_state[var_name],
atol=atol,
rtol=rtol,
)
def test_sgd(self):
M = TestModule()
options = itertools.product(
[True, False],
[True, False],
[torch.float, torch.bfloat16],
[0.1, 0],
[0.1, 0],
[0.1, 0],
[True, False],
[True, False],
[True, False],
[True, False],
)
for (
set_to_none,
split_master_weight_for_bf16,
dtype,
momentum,
weight_decay,
dampening,
nesterov,
foreach,
maximize,
fused,
) in options:
if nesterov and (momentum <= 0 or dampening != 0):
# dose not support such configs
continue
sgd = torch.optim.SGD(
M.parameters(),
lr=0.001,
momentum=momentum,
weight_decay=weight_decay,
dampening=dampening,
nesterov=nesterov,
foreach=foreach,
maximize=maximize,
)
self._test_update(
M, sgd, dtype, split_master_weight_for_bf16, set_to_none, fused=fused
)
def test_sgd_fallback(self):
# for sparse grad with weight_decay/momentum !=0, stock pytorch will also failed
M = TestModule(has_sparse_grad=True)
options = itertools.product(
[True, False],
[True, False],
[torch.float, torch.bfloat16],
[0.1, 0],
[True, False],
[True, False],
)
for (
set_to_none,
split_master_weight_for_bf16,
dtype,
dampening,
foreach,
maximize,
) in options:
if foreach:
# stock pytorch will fail while foreach and has_sparse_grad
continue
sgd = torch.optim.SGD(
M.parameters(),
lr=0.001,
dampening=dampening,
foreach=foreach,
maximize=maximize,
)
self._test_update(
M, sgd, dtype, split_master_weight_for_bf16, set_to_none, fused=True
)
def test_adagrad(self):
M = TestModule()
options = itertools.product(
[True, False],
[True, False],
[torch.float, torch.bfloat16],
[0.1, 0],
[0.1, 0],
[0.1, 0],
[1e-5, 0],
[True, False],
[True, False],
[True],
)
for (
set_to_none,
split_master_weight_for_bf16,
dtype,
lr_decay,
weight_decay,
initial_accumulator_value,
eps,
foreach,
maximize,
fused,
) in options:
adagrad = torch.optim.Adagrad(
M.parameters(),
lr=0.001,
lr_decay=lr_decay,
weight_decay=weight_decay,
initial_accumulator_value=initial_accumulator_value,
eps=eps,
foreach=foreach,
maximize=maximize,
)
self._test_update(
M, adagrad, dtype, split_master_weight_for_bf16, set_to_none, fused
)
def test_adagrad_fallback(self):
M = TestModule(has_sparse_grad=True)
options = itertools.product(
[True, False],
[True, False],
[torch.float, torch.bfloat16],
[0.1, 0],
[0.1, 0],
[1e-5, 0],
[True, False],
)
for (
set_to_none,
split_master_weight_for_bf16,
dtype,
lr_decay,
initial_accumulator_value,
eps,
maximize,
) in options:
adagrad = torch.optim.Adagrad(
M.parameters(),
lr=0.001,
lr_decay=lr_decay,
initial_accumulator_value=initial_accumulator_value,
eps=eps,
maximize=maximize,
)
self._test_update(
M, adagrad, dtype, split_master_weight_for_bf16, set_to_none, fused=True
)
def test_lamb(self):
M = TestModule()
options = itertools.product(
[True, False],
[True, False],
[torch.float, torch.bfloat16],
[(0.1, 0.111), (0.9, 0.999)],
[1e-8],
[0, 0.1],
[True, False],
)
for (
set_to_none,
split_master_weight_for_bf16,
dtype,
betas,
eps,
weight_decay,
fused,
) in options:
lamb = ipex.optim._lamb.Lamb(
M.parameters(),
lr=0.001,
betas=betas,
eps=eps,
weight_decay=weight_decay,
fused=fused,
)
self._test_update(
M, lamb, dtype, split_master_weight_for_bf16, set_to_none, fused
)
def test_adam(self):
M = TestModule()
options = itertools.product(
[True, False],
[True, False],
[True, False],
[torch.float, torch.bfloat16],
[(0.1, 0.111), (0.9, 0.999)],
[1e-8],
[0, 0.1],
[True, False],
[True, False],
[True, False],
)
for (
set_to_none,
split_master_weight_for_bf16,
amsgrad,
dtype,
betas,
eps,
weight_decay,
foreach,
maximize,
fused,
) in options:
if foreach:
# there is a bug for foreach option in stock PT: https://github.com/pytorch/pytorch/issues/78807
continue
adam = torch.optim.Adam(
M.parameters(),
lr=0.001,
betas=betas,
eps=eps,
weight_decay=weight_decay,
amsgrad=amsgrad,
foreach=foreach,
maximize=maximize,
)
self._test_update(
M, adam, dtype, split_master_weight_for_bf16, set_to_none, fused
)
class TestFusedSteps(TestCase):
def test_lamb_step(self):
fused = torch.ops.torch_ipex.lamb_fused_step
non_fused = bench.custom_op_bench.optimizer.non_fused_lamb
# fused fp32 args
param = torch.randn(31, 33)
grad = torch.randn(31, 33)
exp_avg = torch.randn(31, 33).abs()
exp_avg_sq = torch.randn(31, 33).abs()
trail = torch.Tensor()
# fused bf16 params (master weight split)
param2, trail2 = torch.ops.torch_ipex.split_float_bfloat16(param)
grad2 = grad.bfloat16()
exp_avg2 = exp_avg.clone()
exp_avg_sq2 = exp_avg_sq.clone()
# fused bf16 params (master weight)
param3 = param.clone()
grad3 = grad.bfloat16()
exp_avg3 = exp_avg.clone()
exp_avg_sq3 = exp_avg_sq.clone()
bf16_param = param3.bfloat16()
# non-fused fp32 params
param4 = param.clone()
grad4 = grad.clone()
exp_avg4 = exp_avg.clone()
exp_avg_sq4 = exp_avg_sq.clone()
# fused and non-contiguous fp32 args
param5 = param.clone().t().contiguous().t()
grad5 = grad.clone().t().contiguous().t()
exp_avg5 = exp_avg.clone().t().contiguous().t()
exp_avg_sq5 = exp_avg_sq.clone().t().contiguous().t()
step = 10
beta1 = 0.8
beta2 = 0.9
learning_rate = 0.1
weight_decay = 0.3
eps = 0.001
fused(
param,
exp_avg,
exp_avg_sq,
grad,
trail,
step,
beta1,
beta2,
learning_rate,
weight_decay,
eps,
)
fused(
param2,
exp_avg2,
exp_avg_sq2,
grad2,
trail2,
step,
beta1,
beta2,
learning_rate,
weight_decay,
eps,
)
fused(
param3,
exp_avg3,
exp_avg_sq3,
grad3,
bf16_param,
step,
beta1,
beta2,
learning_rate,
weight_decay,
eps,
)
non_fused(
param4,
exp_avg4,
exp_avg_sq4,
grad4,
step,
beta1,
beta2,
learning_rate,
weight_decay,
eps,
)
fused(
param5,
exp_avg5,
exp_avg_sq5,
grad5,
trail,
step,
beta1,
beta2,
learning_rate,
weight_decay,
eps,
)
# compare fused and non-fused
self.assertEqual(param, param4)
self.assertEqual(exp_avg, exp_avg4)
self.assertEqual(exp_avg_sq, exp_avg_sq4)
# compare fused fp32 and fused bf16
self.assertEqual(param, param2.float(), rtol=1e-4, atol=1e-1)
self.assertEqual(exp_avg, exp_avg2.float(), rtol=1e-4, atol=1e-1)
self.assertEqual(exp_avg_sq, exp_avg_sq2.float(), rtol=1e-4, atol=1e-1)
# compare split vs non-split
self.assertEqual(param3, param2.float(), rtol=1e-4, atol=1e-1)
self.assertEqual(exp_avg3, exp_avg2.float(), rtol=1e-4, atol=1e-1)
self.assertEqual(exp_avg_sq3, exp_avg_sq2.float(), rtol=1e-4, atol=1e-1)
# make sure bf16_param are updated
self.assertEqual(bf16_param, param3.bfloat16())
# compare fused contiguous and fused non-contiguous()
self.assertEqual(param, param5)
self.assertEqual(exp_avg, exp_avg5)
self.assertEqual(exp_avg_sq, exp_avg_sq5)
# fused double args
param = torch.randn(31, 33).double()
grad = torch.randn(31, 33).double()
exp_avg = torch.randn(31, 33).double().abs()
exp_avg_sq = torch.randn(31, 33).double().abs()
trail = torch.Tensor()
# non-fused double params
param2 = param.clone()
grad2 = grad.clone()
exp_avg2 = exp_avg.clone()
exp_avg_sq2 = exp_avg_sq.clone()
fused(
param,
exp_avg,
exp_avg_sq,
grad,
trail,
step,
beta1,
beta2,
learning_rate,
weight_decay,
eps,
)
non_fused(
param2,
exp_avg2,
exp_avg_sq2,
grad2,
step,
beta1,
beta2,
learning_rate,
weight_decay,
eps,
)
# compare fused and non-fused for double
self.assertEqual(param, param2)
self.assertEqual(exp_avg, exp_avg2)
self.assertEqual(exp_avg_sq, exp_avg_sq2)
def test_adam_step(self):
fused = torch.ops.torch_ipex.adam_fused_step
non_fused = bench.custom_op_bench.optimizer.non_fused_adam
# fused fp32 args
param = torch.randn(31, 33)
grad = torch.randn(31, 33)
exp_avg = torch.randn(31, 33).abs()
exp_avg_sq = torch.randn(31, 33).abs()
max_exp_avg_sq = torch.randn(31, 33).abs()
trail = torch.Tensor()
# fused bf16 params (master weight split)
param2, trail2 = torch.ops.torch_ipex.split_float_bfloat16(param)
grad2 = grad.bfloat16()
exp_avg2 = exp_avg.clone()
exp_avg_sq2 = exp_avg_sq.clone()
max_exp_avg_sq2 = max_exp_avg_sq.clone()
# fused bf16 params (master weight)
param3 = param.clone()
grad3 = grad.bfloat16()
exp_avg3 = exp_avg.clone()
exp_avg_sq3 = exp_avg_sq.clone()
max_exp_avg_sq3 = max_exp_avg_sq.clone()
bf16_param = param3.bfloat16()
# non-fused fp32 params
param4 = param.clone()
grad4 = grad.clone()
exp_avg4 = exp_avg.clone()
exp_avg_sq4 = exp_avg_sq.clone()
max_exp_avg_sq4 = max_exp_avg_sq.clone()
# fused and non-contiguous fp32 args
param5 = param.clone().t().contiguous().t()
grad5 = grad.clone().t().contiguous().t()
exp_avg5 = exp_avg.clone().t().contiguous().t()
exp_avg_sq5 = exp_avg_sq.clone().t().contiguous().t()
max_exp_avg_sq5 = max_exp_avg_sq.clone().t().contiguous().t()
step = 10
beta1 = 0.8
beta2 = 0.9
learning_rate = 0.1
weight_decay = 0.3
eps = 0.001
amsgrad = True
fused(
param,
exp_avg,
exp_avg_sq,
max_exp_avg_sq,
grad,
trail,
amsgrad,
step,
beta1,
beta2,
learning_rate,
weight_decay,
eps,
)
fused(
param2,
exp_avg2,
exp_avg_sq2,
max_exp_avg_sq2,
grad2,
trail2,
amsgrad,
step,
beta1,
beta2,
learning_rate,
weight_decay,
eps,
)
fused(
param3,
exp_avg3,
exp_avg_sq3,
max_exp_avg_sq3,
grad3,
bf16_param,
amsgrad,
step,
beta1,
beta2,
learning_rate,
weight_decay,
eps,
)
non_fused(
param4,
exp_avg4,
exp_avg_sq4,
max_exp_avg_sq4,
grad4,
amsgrad,
step,
beta1,
beta2,
learning_rate,
weight_decay,
eps,
)
fused(
param5,
exp_avg5,
exp_avg_sq5,
max_exp_avg_sq5,
grad5,
trail,
amsgrad,
step,
beta1,
beta2,
learning_rate,
weight_decay,
eps,
)
# compare fused and non-fused
self.assertEqual(param, param4)
self.assertEqual(exp_avg, exp_avg4)
self.assertEqual(exp_avg_sq, exp_avg_sq4)
self.assertEqual(max_exp_avg_sq, max_exp_avg_sq4)
# compare fused fp32 and fused bf16
self.assertEqual(param, param2.float(), rtol=1e-4, atol=1e-1)
self.assertEqual(exp_avg, exp_avg2.float(), rtol=1e-4, atol=1e-1)
self.assertEqual(exp_avg_sq, exp_avg_sq2.float(), rtol=1e-4, atol=1e-1)
self.assertEqual(max_exp_avg_sq, max_exp_avg_sq2.float(), rtol=1e-4, atol=1e-1)
# compare split vs non-split
self.assertEqual(param3, param2.float(), rtol=1e-4, atol=1e-1)
self.assertEqual(exp_avg3, exp_avg2.float(), rtol=1e-4, atol=1e-1)
self.assertEqual(exp_avg_sq3, exp_avg_sq2.float(), rtol=1e-4, atol=1e-1)
self.assertEqual(max_exp_avg_sq3, max_exp_avg_sq2.float(), rtol=1e-4, atol=1e-1)
# make sure bf16_param are updated
self.assertEqual(bf16_param, param3.bfloat16())
# compare fused contiguous and fused non-contiguous()
self.assertEqual(param, param5)
self.assertEqual(exp_avg, exp_avg5)
self.assertEqual(exp_avg_sq, exp_avg_sq5)
self.assertEqual(max_exp_avg_sq, max_exp_avg_sq5)
# fused double args
param = torch.randn(31, 33).double()
grad = torch.randn(31, 33).double()
exp_avg = torch.randn(31, 33).double().abs()
exp_avg_sq = torch.randn(31, 33).double().abs()
max_exp_avg_sq = torch.randn(31, 33).double().abs()
trail = torch.Tensor()
# non-fused double params
param2 = param.clone()
grad2 = grad.clone()
exp_avg2 = exp_avg.clone()
exp_avg_sq2 = exp_avg_sq.clone()
max_exp_avg_sq2 = max_exp_avg_sq.clone()
fused(
param,
exp_avg,
exp_avg_sq,
max_exp_avg_sq,
grad,
trail,
amsgrad,
step,
beta1,
beta2,
learning_rate,
weight_decay,
eps,
)
non_fused(
param2,
exp_avg2,
exp_avg_sq2,
max_exp_avg_sq2,
grad2,
amsgrad,
step,
beta1,
beta2,
learning_rate,
weight_decay,
eps,
)
# compare fused and non-fused for double
self.assertEqual(param, param2)
self.assertEqual(exp_avg, exp_avg2)
self.assertEqual(exp_avg_sq, exp_avg_sq2)
self.assertEqual(max_exp_avg_sq, max_exp_avg_sq2)
def test_adagrad_step(self):
fused = torch.ops.torch_ipex.adagrad_fused_step
non_fused = bench.custom_op_bench.optimizer.non_fused_adagrad
# fused fp32 args
param = torch.randn(31, 33)
grad = torch.randn(31, 33)
state_sum = torch.randn(31, 33).abs()
trail = torch.Tensor()
# fused bf16 args( master weight split )
param2, trail2 = torch.ops.torch_ipex.split_float_bfloat16(param)
grad2 = grad.bfloat16()
state_sum2 = state_sum.clone()
# fused bf16 args( master weight )
param3 = param.clone()
grad3 = grad.bfloat16()
state_sum3 = state_sum.clone()
bf16_param = param3.bfloat16()
# non-fused fp32 args
param4 = param.clone()
grad4 = grad.clone()
state_sum4 = state_sum.clone()
# compare fused contiguous and fused non-contiguous()
param5 = param.clone().t().contiguous().t()
grad5 = grad.clone().t().contiguous().t()
state_sum5 = state_sum.clone().t().contiguous().t()
step = 10
learning_rate = 0.1
weight_decay = 0.3
lr_decay = 0.01
eps = 0.001
fused(
param,
grad,
state_sum,
trail,
step,
learning_rate,
weight_decay,
lr_decay,
eps,
)
fused(
param2,
grad2,
state_sum2,
trail2,
step,
learning_rate,
weight_decay,
lr_decay,
eps,
)
fused(
param3,
grad3,
state_sum3,
bf16_param,
step,
learning_rate,
weight_decay,
lr_decay,
eps,
)
non_fused(
param4, grad4, state_sum4, step, learning_rate, weight_decay, lr_decay, eps
)
fused(
param5,
grad5,
state_sum5,
trail,
step,
learning_rate,
weight_decay,
lr_decay,
eps,
)
# compare fused fp32 vs non-fused fp32
self.assertEqual(param, param4)
self.assertEqual(state_sum, state_sum4)
# compare fused fp32 vs fused bf16 fused
self.assertEqual(param, param2.float(), rtol=1e-4, atol=1e-1)
self.assertEqual(state_sum, state_sum2.float(), rtol=1e-4, atol=1e-1)
# compare split vs non-split
self.assertEqual(param3, param2.float(), rtol=1e-4, atol=1e-1)
self.assertEqual(state_sum3, state_sum2, rtol=1e-4, atol=1e-1)
# make sure bf16_param are updated
self.assertEqual(bf16_param, param3.bfloat16())
# compare fused contiguous and fused non-contiguous()
self.assertEqual(param, param5)
self.assertEqual(state_sum, state_sum5)
# fused double args
param = torch.randn(31, 33).double()
grad = torch.randn(31, 33).double()
state_sum = torch.randn(31, 33).double().abs()
# non-fused double params
param2 = param.clone()
grad2 = grad.clone()
state_sum2 = state_sum.clone()
fused(
param,
grad,
state_sum,
trail,
step,
learning_rate,
weight_decay,
lr_decay,
eps,
)
non_fused(
param2, grad2, state_sum2, step, learning_rate, weight_decay, lr_decay, eps
)
# compare fused and non-fused for double
self.assertEqual(param, param2)
self.assertEqual(state_sum, state_sum2)
def test_sgd_step(self):
fused = torch.ops.torch_ipex.sgd_fused_step
non_fused = bench.custom_op_bench.optimizer.non_fused_sgd
# fused fp32 args
param = torch.randn(31, 33)
grad = torch.randn(31, 33)
momentum_buf = torch.randn(31, 33)
trail = torch.Tensor()
# fused bf16 args ( master weight split )
param2, trail2 = torch.ops.torch_ipex.split_float_bfloat16(param)
grad2 = grad.bfloat16()
momentum_buf2 = momentum_buf.clone()
# fused bf16 args( master weight )
param3 = param.clone()
grad3 = grad.bfloat16()
momentum_buf3 = momentum_buf.clone()
bf16_param = param3.bfloat16()
# non-fused fp32 args
param4 = param.clone()
grad4 = grad.clone()
momentum_buf4 = momentum_buf.clone()
# compare fused contiguous and fused non-contiguous()
param5 = param.clone().t().contiguous().t()
grad5 = grad.clone().t().contiguous().t()
momentum_buf5 = momentum_buf.clone().t().contiguous().t()
trail5 = torch.Tensor()
learning_rate = 0.1
weight_decay = 0.3
momentum = 0.5
dampening = 0.5
nesterov = True
fused(
param,
grad,
momentum_buf,
trail,
momentum,
learning_rate,
weight_decay,
dampening,
nesterov,
)
fused(
param2,
grad2,
momentum_buf2,
trail2,
momentum,
learning_rate,
weight_decay,
dampening,
nesterov,
)
fused(
param3,
grad3,
momentum_buf3,
bf16_param,
momentum,
learning_rate,
weight_decay,
dampening,
nesterov,
)
non_fused(
param4,
grad4,
momentum_buf4,
momentum,
learning_rate,
weight_decay,
dampening,
nesterov,
)
fused(
param5,
grad5,
momentum_buf5,
trail,
momentum,
learning_rate,
weight_decay,
dampening,
nesterov,
)
# compare fused fp32 vs non-fused fp32
self.assertEqual(param, param4)
self.assertEqual(momentum_buf, momentum_buf4)
# compare fused fp32 vs fused bf16
self.assertEqual(param, param2.float(), rtol=1e-4, atol=1e-1)
self.assertEqual(momentum_buf, momentum_buf2, rtol=1e-4, atol=1e-1)
# compare split vs non-split
self.assertEqual(param3, param2.float(), rtol=1e-4, atol=1e-1)
self.assertEqual(momentum_buf3, momentum_buf2, rtol=1e-4, atol=1e-1)
# make sure bf16_param are updated
self.assertEqual(bf16_param, param3.bfloat16())
# compare fused contiguous and fused non-contiguous()
self.assertEqual(param, param5)
self.assertEqual(momentum_buf, momentum_buf5)
# fused double args
param = torch.randn(31, 33).double()
grad = torch.randn(31, 33).double()
momentum_buf = torch.randn(31, 33).double().abs()
# non-fused double params
param2 = param.clone()
grad2 = grad.clone()
momentum_buf2 = momentum_buf.clone()
fused(
param,
grad,
momentum_buf,
trail,
momentum,
learning_rate,
weight_decay,
dampening,
nesterov,
)
non_fused(
param2,
grad2,
momentum_buf2,
momentum,
learning_rate,
weight_decay,
dampening,
nesterov,
)
# compare fused and non-fused for double
self.assertEqual(param, param2)
self.assertEqual(momentum_buf, momentum_buf2)
def _test_packed_add(self, param, grad, param2, trail, grad2):
packed_add = torch.ops.torch_ipex.packed_add
learning_rate = 0.1
param.add_(grad, alpha=-learning_rate)
packed_add(param2, trail, grad2, -learning_rate)
# compare fp32 vs bf16 fused
self.assertEqual(param, param2.float(), rtol=1e-4, atol=1e-1)
def test_packed_add(self):
# contiguous case
# fp32 args
param = torch.randn(31, 33)
grad = torch.randn(31, 33)
# bf16 args
param2, trail = torch.ops.torch_ipex.split_float_bfloat16(param)
grad2 = grad.bfloat16()
self._test_packed_add(param, grad, param2, trail, grad2)
# transposed case
# fp32 args
param = torch.randn(31, 33).t().contiguous().t()
grad = torch.randn(31, 33).t().contiguous().t()
# bf16 args
param2, trail = torch.ops.torch_ipex.split_float_bfloat16(param)
grad2 = grad.bfloat16().t().contiguous().t()
self._test_packed_add(param, grad, param2, trail, grad2)
# sliced-out case
# fp32 args
base_param = torch.randn(31, 33)
base_grad = torch.randn(31, 33)
param = base_param[10:20, 10:20]
grad = base_grad[10:20, 10:20]
# bf16 args
param2, trail = torch.ops.torch_ipex.split_float_bfloat16(base_param)
param2 = param2[10:20, 10:20]
trail = trail[10:20, 10:20]
grad2 = base_grad.bfloat16()[10:20, 10:20]
self._test_packed_add(param, grad, param2, trail, grad2)
class TestPatchedMethod(TestCase):
def test_zero_grad(self):
def count_zero_grad(evt_list):
count = 0
for evt in evt_list:
if "zero_grad" in evt.name:
count += 1
return count
M = TestModule().train()
optimizers_list = [Adadelta, AdamW, Adamax, ASGD, RMSprop, Rprop]
for optimizer, set_to_none in itertools.product(optimizers_list, [True, False]):
ori_model = copy.deepcopy(M)
ori_optimizer = optimizer(ori_model.parameters(), lr=0.1)
ipex_model, ipex_optimizer = ipex.optimize(
ori_model, torch.bfloat16, ori_optimizer
)
# original
with torch.cpu.amp.autocast():
y = ori_model(*ori_model.input).sum()
y.backward()
with torch.autograd.profiler.profile() as ori_prof:
ori_optimizer.zero_grad(set_to_none=set_to_none)
# ipex
with torch.cpu.amp.autocast():
y1 = ipex_model(*ipex_model.input).sum()
y1.backward()
# check grad are correctly attached
for name, param in ipex_model.named_parameters():
# We won't use the grad of the empty weight and bias tensor.
# These tensors are only used during inference.
if name in _empty_weight_bias_parameter_names(
prefixes=["conv", "linear"]
):
continue
self.assertTrue(param.grad is not None)
uncast_weight = [
ipex_model.bn.weight.data_ptr(),
ipex_model.bn.bias.data_ptr(),
]
for param in ipex_optimizer.param_groups[0]["params"]:
if param.data_ptr() not in uncast_weight:
self.assertTrue(param.grad is None)
self.assertTrue(
ipex_optimizer.params_attr[param].parameter.grad is not None
)
else:
self.assertTrue(param.grad is not None)
with torch.autograd.profiler.profile() as ipex_prof:
ipex_optimizer.zero_grad(set_to_none=set_to_none)
# check grad are zeroed or are set to none
for name, param in ipex_model.named_parameters():
# We won't use the grad of the empty weight and bias tensor.
# These tensors are only used during inference.
if name in _empty_weight_bias_parameter_names(
prefixes=["conv", "linear"]
):
continue
expected_grad = None if set_to_none else torch.zeros_like(param)
self.assertEqual(expected_grad, param.grad)
for param in ipex_optimizer.param_groups[0]["params"]:
if param.data_ptr() not in uncast_weight:
expected_grad = (
None if set_to_none else torch.zeros_like(param).bfloat16()
)
self.assertEqual(
expected_grad,
ipex_optimizer.params_attr[param].parameter.grad,
)
else:
expected_grad = None if set_to_none else torch.zeros_like(param)
self.assertEqual(expected_grad, param.grad)
# check the num of calls for 'zero_grad' are same
self.assertEqual(
count_zero_grad(ori_prof.function_events),
count_zero_grad(ipex_prof.function_events),
)
if __name__ == "__main__":
test = unittest.main()
| 32,432 | 29.74218 | 112 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/tests/cpu/test_rnnt_custom_kernel.py | import unittest
import copy
from itertools import product
import torch
from common_utils import TestCase
class TestRNNTUpdateBatch(TestCase):
def _test_org(
self, hidden, hidden_prime, x, batch_size, max_symbol, blank_id, loop_cnt
):
f = x[:, 0, :]
max_lens = torch.tensor(
[self.max_len - 1 for i in range(batch_size)], dtype=torch.int64
)
time_idxs = torch.zeros((batch_size), dtype=torch.long)
out_lens = torch.tensor(list(i for i in range(batch_size)), dtype=torch.long)
blank_vec = torch.zeros((batch_size), dtype=torch.long)
not_blank = torch.zeros((batch_size), dtype=torch.int)
symbols_added = torch.zeros((batch_size), dtype=torch.long)
label_row = torch.tensor(list(i for i in range(batch_size)))
label_col = torch.zeros((batch_size), dtype=torch.long)
label_tensor = torch.tensor([self._SOS]).repeat(
batch_size, self.max_len * max_symbol
)
k = torch.tensor(list(i for i in range(batch_size)), dtype=torch.long)
# TODO: randomly set k to blank_id (28)
for i in range(loop_cnt):
blankness = k.eq(blank_id)
time_idxs = time_idxs + blankness
# it doesn't matter if blank_vec is update now or later,
# tmp_blank_vec always get correct value for this round
blank_vec = time_idxs.ge(out_lens)
if blank_vec.nonzero().size(0) == batch_size:
break
symbols_added *= blankness.logical_not()
tmp_blank_vec = blank_vec.logical_or(blankness)
not_blank = tmp_blank_vec.eq(0)
idx = (not_blank).nonzero(as_tuple=True)[0]
hidden[0][:, idx, :] = hidden_prime[0][:, idx, :]
hidden[1][:, idx, :] = hidden_prime[1][:, idx, :]
label_col += not_blank
label_tensor.index_put_(
[label_row, label_col], (k - self._SOS) * not_blank, accumulate=True
)
symbols_added += not_blank
need_add = symbols_added.ge(max_symbol)
time_idxs += need_add
blankness.logical_or_(need_add)
temp = symbols_added.lt(max_symbol)
symbols_added *= temp
if blankness.nonzero().size(0) > 0:
fetch_time_idxs = time_idxs.min(max_lens)
f = x[list(range(x.size(0))), fetch_time_idxs, :]
return (
blank_vec,
blankness,
label_col,
time_idxs,
symbols_added,
not_blank,
label_tensor,
hidden,
f,
)
def _test_rnnt_update_batch_kernel(
self, hidden, hidden_prime, x, batch_size, max_symbol, blank_id, loop_cnt
):
f = x[:, 0, :]
time_idxs = torch.zeros((batch_size), dtype=torch.int)
out_lens = torch.tensor(list(i for i in range(batch_size)), dtype=torch.int)
blank_vec_out = torch.zeros((batch_size), dtype=torch.int)
not_blank = torch.zeros((batch_size), dtype=torch.int)
blankness = torch.zeros((batch_size), dtype=torch.int)
symbols_added = torch.zeros((batch_size), dtype=torch.int)
label_col = torch.zeros((batch_size), dtype=torch.int)
label_tensor = torch.empty(
(batch_size, self.max_len * max_symbol), dtype=torch.long
).fill_(self._SOS)
k = torch.tensor(list(i for i in range(batch_size)), dtype=torch.long)
label_to_put = torch.zeros((batch_size), dtype=torch.long)
label_for_next_loop = torch.tensor(
[self._SOS for i in range(batch_size)], dtype=torch.long
)
for i in range(loop_cnt):
finished = torch.ops.torch_ipex.rnnt_update_batch(
k,
out_lens,
label_col,
symbols_added,
time_idxs,
blankness,
blank_vec_out,
not_blank,
label_to_put,
label_tensor,
label_for_next_loop,
hidden[0],
hidden[1],
hidden_prime[0],
hidden_prime[1],
x,
f,
max_symbol,
blank_id,
batch_size,
self._SOS,
self.max_len,
)
if finished:
break
return (
blank_vec_out,
blankness,
label_col,
time_idxs,
symbols_added,
not_blank,
label_tensor,
hidden,
f,
)
def test_rnnt_update_batch(self):
self._SOS = -1
self.max_len = 192
dtypes = [torch.float, torch.bfloat16]
loop_cnts = [1, 10, 30]
batch_sizes = [1, 15, 64, 448]
max_symbols = [30]
blank_ids = [1, 21]
for batch_size, max_symbol, blank_id, loop_cnt, dtype in list(
product(batch_sizes, max_symbols, blank_ids, loop_cnts, dtypes)
):
x_org = torch.randn([self.max_len, batch_size, 2], dtype=dtype)
x = copy.deepcopy(x_org)
hidden = [
torch.zeros([2, batch_size, 320], dtype=dtype),
torch.zeros([2, batch_size, 320], dtype=dtype),
]
hidden_prime = [
torch.randn([2, batch_size, 320], dtype=dtype),
torch.randn([2, batch_size, 320], dtype=dtype),
]
(
blank_vec_org,
blankness_org,
label_col_org,
time_idxs_org,
symbols_added_org,
not_blank_org,
label_tensor_org,
hidden_org,
f_org,
) = self._test_org(
hidden,
hidden_prime,
x_org.transpose(0, 1),
batch_size,
max_symbol,
blank_id,
loop_cnt,
)
(
blank_vec_out,
blankness_out,
label_col,
time_idxs,
symbols_added,
not_blank,
label_tensor,
hidden,
f,
) = self._test_rnnt_update_batch_kernel(
hidden,
hidden_prime,
x.transpose(0, 1),
batch_size,
max_symbol,
blank_id,
loop_cnt,
)
self.assertEqual(blank_vec_org, blank_vec_out)
self.assertEqual(blankness_org, blankness_out)
self.assertEqual(label_col_org, label_col)
self.assertEqual(time_idxs_org, time_idxs)
self.assertEqual(symbols_added_org, symbols_added)
self.assertEqual(not_blank_org, not_blank)
self.assertEqual(label_tensor_org, label_tensor)
self.assertEqual(hidden_org, hidden)
self.assertEqual(f_org, f)
class TestRNNTEmbedding(TestCase):
def _test_org(self, y, embedding):
y_mask = y.eq(self._SOS)
y.masked_fill_(y_mask, 0)
y = embedding(y)
y.masked_fill_(y_mask.unsqueeze(2), 0.0)
return y
def _test_rnnt_embedding_kernel(self, y_in, embedding):
batch_size = y_in.shape[0]
embedding_dim = embedding.weight.shape[1]
y = torch.empty(
[batch_size, y_in.shape[1], embedding_dim], dtype=embedding.weight.dtype
)
torch.ops.torch_ipex.rnnt_embedding(
embedding.weight, y_in, y, self._SOS, batch_size, embedding_dim
)
return y
def test_rnnt_embedding(self):
self._SOS = -1
for dtype in [torch.float, torch.bfloat16]:
vocab_size = 29
pred_n_hidden = 320
embedding = torch.nn.Embedding(vocab_size - 1, pred_n_hidden).to(dtype)
y_org = torch.Tensor([-1, 2, 15, -1, 5]).unsqueeze(1).to(torch.long)
y = copy.deepcopy(y_org)
y_embed_org = self._test_org(y_org, embedding)
y_embed = self._test_rnnt_embedding_kernel(y, embedding)
self.assertEqual(y_embed_org, y_embed)
if __name__ == "__main__":
test = unittest.main()
| 8,395 | 31.92549 | 85 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/tests/cpu/common_nn.py | """
From PyTorch:
Copyright (c) 2016- Facebook, Inc (Adam Paszke)
Copyright (c) 2014- Facebook, Inc (Soumith Chintala)
Copyright (c) 2011-2014 Idiap Research Institute (Ronan Collobert)
Copyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu)
Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu)
Copyright (c) 2011-2013 NYU (Clement Farabet)
Copyright (c) 2006-2010 NEC Laboratories America (Ronan Collobert, Leon Bottou, Iain Melvin, Jason Weston)
Copyright (c) 2006 Idiap Research Institute (Samy Bengio)
Copyright (c) 2001-2004 Idiap Research Institute (Ronan Collobert, Samy Bengio, Johnny Mariethoz)
From Caffe2:
Copyright (c) 2016-present, Facebook Inc. All rights reserved.
All contributions by Facebook:
Copyright (c) 2016 Facebook Inc.
All contributions by Google:
Copyright (c) 2015 Google Inc.
All rights reserved.
All contributions by Yangqing Jia:
Copyright (c) 2015 Yangqing Jia
All rights reserved.
All contributions from Caffe:
Copyright(c) 2013, 2014, 2015, the respective contributors
All rights reserved.
All other contributions:
Copyright(c) 2015, 2016 the respective contributors
All rights reserved.
Caffe2 uses a copyright model similar to Caffe: each contributor holds
copyright over their contributions to Caffe2. The project versioning records
all such contribution and copyright details. If a contributor wants to further
mark their specific copyright on a particular contribution, they should
indicate their copyright solely in the commit message of the change when it is
committed.
All rights reserved.
"""
import math
import sys
import tempfile
import unittest
from copy import deepcopy
from functools import reduce
from itertools import product
from operator import mul
from math import pi
import torch
import torch.cuda
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.functional import _Reduction
from common_utils import (
TestCase,
to_gpu,
freeze_rng_state,
is_iterable,
TEST_WITH_ROCM,
_assertGradAndGradgradChecks,
)
from torch.autograd.gradcheck import get_numerical_jacobian, iter_tensors
from torch.autograd import Variable
import torch.backends.cudnn
TEST_CUDA = torch.cuda.is_available()
# tarfile module tries to obtain a file object name in python 3.3
if sys.version_info[:2] == (3, 3):
TemporaryFile = tempfile.NamedTemporaryFile
else:
TemporaryFile = tempfile.TemporaryFile
PRECISION = 1e-5
def get_reduction(m):
result = getattr(m, "reduction", None)
if result is None:
result = _Reduction.legacy_get_string(
getattr(m, "sizeAverage", None), True, emit_warning=False
)
assert result is not None
return result
def get_weight(m):
result = getattr(m, "weight", None)
if result is not None:
return result
return getattr(m, "weights", None)
# NOTE [How to check NN module / functional API parity between Python and C++ frontends]
#
# The way to check API parity is to add parity tests for the NN module / functional of interest.
# Here are the detailed steps:
#
# For NN module:
# 1. Make sure you already have a test dict with the module configuration you want to test.
# 2. Add `cpp_constructor_args` entry to the test dict, with its value exactly matching
# the Python module constructor arguments. For example, if in the test dict we pass
# `(10, 8)` to `torch.nn.Linear` constructor, then we should pass `torch::nn::LinearOptions(10, 8)`
# as the corresponding C++ constructor argument to `torch::nn::Linear`.
# 3. If in the process of performing the above step you referenced any variables
# in the `cpp_constructor_args` entry, you must add `cpp_var_map` entry
# to the test dict to make sure that those variables are populated with the right Python values.
# For example, if the Python constructor call is
# `torch.nn.FractionalMaxPool2d(2, output_ratio=0.5, _random_samples=random_samples)`,
# the corresponding C++ constructor argument is
# `torch::nn::FractionalMaxPool2dOptions(2).output_ratio(0.5)._random_samples(random_samples)`,
# and the `cpp_var_map` entry must be
# `{'random_samples': random_samples}` in order to populate the C++ variable `random_samples`
# used in the C++ constructor argument with the Python tensor value `random_samples`.
#
# For NN functional:
# 1. Make sure you already have a test dict with the functional configuration you want to test.
# 2. If the test dict's `constructor` entry looks like `wrap_functional(F.some_functional_name, ...)`,
# then you must add `cpp_options_args` entry to the test dict, with its value exactly matching the Python
# functional optional arguments. For example, if the test dict's `constructor` entry is
# `wrap_functional(F.interpolate, size=12, scale_factor=None, mode='nearest')`,
# then the `cpp_options_args` entry should be
# "F::InterpolateFuncOptions().size(std::vector<int64_t>({12})).scale_factor(c10::nullopt).mode(torch::kNearest)".
# 3. Otherwise, if the test dict's `constructor` entry looks like
# `wrap_functional(lambda i: F.some_functional_name(...))`,
# then you must add `cpp_function_call` entry to the test dict, with its value exactly matching the Python
# functional function call. For example, if the test dict's `constructor` entry is
# `wrap_functional(lambda i: F.poisson_nll_loss(i, t.type_as(i), reduction='none'))`,
# then the `cpp_function_call` entry should be
# "F::poisson_nll_loss(i, t.to(i.options()), F::PoissonNLLLossFuncOptions().reduction(torch::kNone))".
# 4. If in the process of performing the above two steps you referenced any variables
# in the `cpp_options_args` or `cpp_function_call` entry, you must
# add `cpp_var_map` entry to the test dict to make sure that those variables
# are populated with the right Python values. For example, if the test dict's `constructor` entry is
# `wrap_functional(lambda i: F.poisson_nll_loss(i, t.type_as(i), reduction='none'))`,
# then the `cpp_function_call` entry should be
# "F::poisson_nll_loss(i, t.to(i.options()), F::PoissonNLLLossFuncOptions().reduction(torch::kNone))".
# Notice that there are two variables `i` and `t` that need to have their values provided,
# and the way to do so is to add a `cpp_var_map` entry: `cpp_var_map={'i': '_get_input()', 't': t}`.
# (Note that for `i`, since we want it to take the Python input value, we pass '_get_input()' string as value
# and the C++ parity test mechanism will populate `i` with the Python input value correctly.)
#
# There are also a few optional flags in the test dict to control the C++ parity test behavior:
#
# - `test_cpp_api_parity`: if `False`, skips the C++ parity test for this test dict. Default: True.
# - `has_parity`: if `False`, expects this test dict to fail the C++ parity test. Default: True.
module_tests = [
dict(
module_name="Linear",
constructor_args=(10, 8),
cpp_constructor_args="torch::nn::LinearOptions(10, 8)",
input_size=(4, 10),
reference_fn=lambda i, p, _: torch.mm(i, p[0].t())
+ p[1].view(1, -1).expand(4, 8),
check_gradgrad=False,
),
dict(
module_name="Linear",
constructor_args=(10, 8, False),
cpp_constructor_args="torch::nn::LinearOptions(10, 8).bias(false)",
input_size=(4, 10),
desc="no_bias",
reference_fn=lambda i, p, _: torch.mm(i, p[0].t()),
check_gradgrad=False,
),
dict(
module_name="Threshold",
constructor_args=(2.0, 1.0),
cpp_constructor_args="torch::nn::ThresholdOptions(2., 1.)",
input_size=(2, 3, 4, 5),
check_inplace=True,
desc="threshold_value",
),
dict(
module_name="Threshold",
constructor_args=(2.0, 10.0),
cpp_constructor_args="torch::nn::ThresholdOptions(2., 10.)",
input_size=(2, 3, 4, 5),
desc="large_value",
),
dict(
module_name="ReLU",
input_size=(2, 3, 4, 5),
check_inplace=True,
),
dict(
module_name="ReLU6",
input_size=(2, 3, 4, 5),
check_inplace=True,
),
dict(
module_name="RReLU",
input_size=(1, 2, 2),
test_cuda=False,
),
dict(
module_name="RReLU",
constructor_args=(0.1, 0.9),
cpp_constructor_args="torch::nn::RReLUOptions().lower(0.1).upper(0.9)",
input_size=(4, 4, 5),
desc="with_up_down",
test_cuda=False,
),
dict(
module_name="Hardtanh",
input_size=(3, 2, 5),
reference_fn=lambda i, *_: i.clamp(-1, 1),
),
dict(
module_name="Sigmoid",
input_size=(2, 3, 4, 5),
),
dict(
module_name="Tanh",
input_size=(2, 3, 4, 5),
),
dict(
module_name="Flatten",
input_size=(2, 3, 4, 5),
reference_fn=lambda i, *_: torch.flatten(i, 1),
),
dict(
module_name="Softmax",
constructor_args=(1,),
cpp_constructor_args="torch::nn::SoftmaxOptions(1)",
input_size=(10, 20),
reference_fn=lambda i, *_: torch.exp(i).div(
torch.exp(i).sum(1, True).expand(10, 20)
),
),
dict(
module_name="Softmax2d",
input_size=(1, 3, 10, 20),
reference_fn=lambda i, *_: torch.exp(i).div(torch.exp(i).sum(1, False)),
),
dict(
module_name="LogSoftmax",
constructor_args=(1,),
cpp_constructor_args="torch::nn::LogSoftmaxOptions(1)",
input_size=(10, 20),
reference_fn=lambda i, *_: torch.exp(i)
.div_(torch.exp(i).sum(1, True).expand(10, 20))
.log_(),
),
dict(
module_name="LogSoftmax",
constructor_args=(1,),
cpp_constructor_args="torch::nn::LogSoftmaxOptions(1)",
input_size=(1, 3, 10, 20),
reference_fn=lambda i, *_: torch.exp(i).div_(torch.exp(i).sum(1, False)).log_(),
desc="multiparam",
),
dict(
module_name="ELU",
constructor_args=(2.0,),
cpp_constructor_args="torch::nn::ELUOptions().alpha(2.)",
input_size=(3, 2, 5),
reference_fn=lambda x, *_: torch.where(x >= 0, x, 2 * (x.exp() - 1)),
),
# TODO: reference function
dict(
module_name="Hardshrink",
constructor_args=(2.0,),
cpp_constructor_args="torch::nn::HardshrinkOptions(2.)",
input_size=(4, 3, 2, 4),
),
dict(module_name="LeakyReLU", input_size=(3, 2, 5), check_inplace=True),
dict(
module_name="LeakyReLU",
constructor_args=(0.5,),
cpp_constructor_args="torch::nn::LeakyReLUOptions().negative_slope(0.5)",
input_size=(3, 2, 5),
check_inplace=True,
desc="with_negval",
),
dict(
module_name="LogSigmoid",
input_size=(2, 3, 4),
reference_fn=lambda i, *_: i.sigmoid().log(),
),
dict(
module_name="Softplus",
input_size=(10, 20),
reference_fn=lambda i, *_: torch.log(1 + torch.exp(i)),
),
dict(
module_name="Softplus",
constructor_args=(2,),
cpp_constructor_args="torch::nn::SoftplusOptions().beta(2)",
input_size=(10, 20),
reference_fn=lambda i, *_: 1.0 / 2.0 * torch.log(1 + torch.exp(2 * i)),
desc="beta",
),
dict(
module_name="Softplus",
constructor_args=(2, -100),
cpp_constructor_args="torch::nn::SoftplusOptions().beta(2).threshold(-100)",
input_size=(10, 20),
reference_fn=(
lambda i, *_: ((i * 2) > -100).type_as(i) * i
+ ((i * 2) <= -100).type_as(i) * 1.0 / 2.0 * torch.log(1 + torch.exp(2 * i))
),
desc="beta_threshold",
),
dict(
module_name="Softshrink",
input_size=(3, 2, 5),
),
dict(
module_name="Softshrink",
constructor_args=(1,),
cpp_constructor_args="torch::nn::SoftshrinkOptions(1)",
input_size=(3, 2, 5),
desc="lambda",
),
dict(
module_name="CrossMapLRN2d",
constructor_args=(5, 5e-3, 1e-3, 2),
cpp_constructor_args="torch::nn::CrossMapLRN2dOptions(5).alpha(5e-3).beta(1e-3).k(2)",
input_size=(2, 3, 6, 6),
check_gradgrad=False,
),
dict(
module_name="PReLU",
input_size=(2, 3, 4),
reference_fn=lambda i, p, _: torch.clamp(i, min=0)
+ torch.clamp(i, max=0) * p[0][0],
desc="1d",
),
dict(
module_name="PReLU",
constructor_args=(3,),
cpp_constructor_args="torch::nn::PReLUOptions().num_parameters(3)",
input_size=(2, 3, 4),
desc="1d_multiparam",
reference_fn=lambda i, p, _: torch.clamp(i, min=0)
+ torch.clamp(i, max=0) * p[0][0],
),
dict(
module_name="PReLU",
input_size=(2, 3, 4, 5),
desc="2d",
reference_fn=lambda i, p, _: torch.clamp(i, min=0)
+ torch.clamp(i, max=0) * p[0][0],
),
dict(
module_name="PReLU",
constructor_args=(3,),
cpp_constructor_args="torch::nn::PReLUOptions().num_parameters(3)",
input_size=(2, 3, 4, 5),
desc="2d_multiparam",
reference_fn=lambda i, p, _: torch.clamp(i, min=0)
+ torch.clamp(i, max=0) * p[0][0],
),
dict(
module_name="PReLU",
input_size=(2, 3, 4, 5, 6),
reference_fn=lambda i, p, _: torch.clamp(i, min=0)
+ torch.clamp(i, max=0) * p[0][0],
desc="3d",
),
dict(
module_name="PReLU",
constructor_args=(3,),
cpp_constructor_args="torch::nn::PReLUOptions().num_parameters(3)",
input_size=(2, 3, 4, 5, 6),
desc="3d_multiparam",
reference_fn=lambda i, p, _: torch.clamp(i, min=0)
+ torch.clamp(i, max=0) * p[0][0],
),
dict(
module_name="Softsign",
input_size=(3, 2, 5),
reference_fn=lambda i, *_: i.div(1 + torch.abs(i)),
),
dict(
module_name="Softmin",
constructor_args=(1,),
cpp_constructor_args="torch::nn::SoftminOptions(1)",
input_size=(10, 20),
),
dict(
module_name="Softmin",
constructor_args=(1,),
cpp_constructor_args="torch::nn::SoftminOptions(1)",
input_size=(2, 3, 5, 10),
desc="multidim",
),
dict(
module_name="Tanhshrink",
input_size=(2, 3, 4, 5),
),
]
# Generates rand tensor with non-equal values. This ensures that duplicate
# values won't be causing test failure for modules like MaxPooling.
# size should be small, otherwise randperm fails / long overflows.
def _rand_tensor_non_equal(*size):
total = reduce(mul, size, 1)
return torch.randperm(total).view(*size).double()
def wrap_functional(fn, **kwargs):
class FunctionalModule(nn.Module):
def forward(self, *args):
return fn(*args, **kwargs)
return FunctionalModule
def poissonnllloss_no_reduce_test():
t = torch.randn(10, 10)
return dict(
fullname="PoissonNLLLoss_no_reduce",
constructor=wrap_functional(
lambda i: F.poisson_nll_loss(i, t.type_as(i), reduction="none")
),
cpp_function_call="F::poisson_nll_loss("
"i, t.to(i.options()), F::PoissonNLLLossFuncOptions().reduction(torch::kNone))",
input_fn=lambda: torch.rand(10, 10),
cpp_var_map={"i": "_get_input()", "t": t},
reference_fn=lambda i, *_: i.exp() - t.mul(i),
pickle=False,
)
def bceloss_no_reduce_test():
t = Variable(torch.randn(15, 10).gt(0).double())
return dict(
fullname="BCELoss_no_reduce",
constructor=wrap_functional(
lambda i: F.binary_cross_entropy(i, t.type_as(i), reduction="none")
),
cpp_function_call="F::binary_cross_entropy("
"i, t.to(i.options()), F::BinaryCrossEntropyFuncOptions().reduction(torch::kNone))",
input_fn=lambda: torch.rand(15, 10).clamp_(2.8e-2, 1 - 2.8e-2),
cpp_var_map={"i": "_get_input()", "t": t},
reference_fn=lambda i, *_: -(t * i.log() + (1 - t) * (1 - i).log()),
pickle=False,
precision=7e-4,
)
def bceloss_no_reduce_scalar_test():
t = torch.randn(()).gt(0).double()
return dict(
fullname="BCELoss_no_reduce_scalar",
constructor=wrap_functional(
lambda i: F.binary_cross_entropy(i, t.type_as(i), reduction="none")
),
cpp_function_call="F::binary_cross_entropy("
"i, t.to(i.options()), F::BinaryCrossEntropyFuncOptions().reduction(torch::kNone))",
input_fn=lambda: torch.rand(()).clamp_(2.8e-2, 1 - 2.8e-2),
cpp_var_map={"i": "_get_input()", "t": t},
reference_fn=lambda i, *_: -(t * i.log() + (1 - t) * (1 - i).log()),
pickle=False,
)
def bceloss_weights_no_reduce_test():
t = Variable(torch.randn(15, 10).gt(0).double())
weights = torch.rand(10)
return dict(
fullname="BCELoss_weights_no_reduce",
constructor=wrap_functional(
lambda i: F.binary_cross_entropy(
i, t.type_as(i), weight=weights.type_as(i), reduction="none"
)
),
cpp_function_call="F::binary_cross_entropy("
"i, t.to(i.options()), "
"F::BinaryCrossEntropyFuncOptions().weight(weights.to(i.options())).reduction(torch::kNone))",
input_fn=lambda: torch.rand(15, 10).clamp_(2.8e-2, 1 - 2.8e-2),
cpp_var_map={"i": "_get_input()", "t": t, "weights": weights},
reference_fn=lambda i, p, m: -(t * i.log() + (1 - t) * (1 - i).log()) * weights,
pickle=False,
precision=3e-4,
)
def bceloss_weights_no_reduce_scalar_test():
t = torch.randn(()).double()
weights = torch.rand(())
return dict(
fullname="BCELoss_weights_no_reduce_scalar",
constructor=wrap_functional(
lambda i: F.binary_cross_entropy(
i, t.type_as(i), weight=weights.type_as(i), reduction="none"
)
),
cpp_function_call="""F::binary_cross_entropy(
i, t.to(i.options()),
F::BinaryCrossEntropyFuncOptions().weight(weights.to(i.options())).reduction(torch::kNone))""",
cpp_var_map={"i": "_get_input()", "t": t, "weights": weights},
input_fn=lambda: torch.rand(()).clamp_(2.8e-2, 1 - 2.8e-2),
reference_fn=lambda i, *_: -(t * i.log() + (1 - t) * (1 - i).log()) * weights,
pickle=False,
)
def bce_with_logistic_legacy_enum_test():
t = Variable(torch.randn(15, 10).gt(0).double())
sigmoid = nn.Sigmoid()
return dict(
fullname="BCEWithLogitsLoss_legacy_enum",
constructor=wrap_functional(
lambda i: F.binary_cross_entropy_with_logits(i, t.type_as(i), reduce=False)
),
cpp_function_call="""F::binary_cross_entropy_with_logits(
i, t.to(i.options()), F::BinaryCrossEntropyWithLogitsFuncOptions().reduction(torch::kNone))""",
input_fn=lambda: torch.rand(15, 10).clamp_(2.8e-2, 1 - 2.8e-2),
cpp_var_map={"i": "_get_input()", "t": t},
reference_fn=lambda i, *_: -(
t * sigmoid(i).log() + (1 - t) * (1 - sigmoid(i)).log()
),
check_gradgrad=False,
pickle=False,
)
def bce_with_logistic_no_reduce_test():
t = Variable(torch.randn(15, 10).gt(0).double())
sigmoid = nn.Sigmoid()
return dict(
fullname="BCEWithLogitsLoss_no_reduce",
constructor=wrap_functional(
lambda i: F.binary_cross_entropy_with_logits(
i, t.type_as(i), reduction="none"
)
),
cpp_function_call="""F::binary_cross_entropy_with_logits(
i, t.to(i.options()), F::BinaryCrossEntropyWithLogitsFuncOptions().reduction(torch::kNone))""",
input_fn=lambda: torch.rand(15, 10).clamp_(2.8e-2, 1 - 2.8e-2),
cpp_var_map={"i": "_get_input()", "t": t},
reference_fn=lambda i, *_: -(
t * sigmoid(i).log() + (1 - t) * (1 - sigmoid(i)).log()
),
check_gradgrad=False,
pickle=False,
)
def bce_with_logistic_no_reduce_scalar_test():
t = torch.randn(()).gt(0).double()
sigmoid = nn.Sigmoid()
return dict(
fullname="BCEWithLogitsLoss_no_reduce_scalar",
constructor=wrap_functional(
lambda i: F.binary_cross_entropy_with_logits(
i, t.type_as(i), reduction="none"
)
),
cpp_function_call="""F::binary_cross_entropy_with_logits(
i, t.to(i.options()), F::BinaryCrossEntropyWithLogitsFuncOptions().reduction(torch::kNone))""",
input_fn=lambda: torch.rand(()).clamp_(2.8e-2, 1 - 2.8e-2),
cpp_var_map={"i": "_get_input()", "t": t},
reference_fn=lambda i, *_: -(
t * sigmoid(i).log() + (1 - t) * (1 - sigmoid(i)).log()
),
check_gradgrad=False,
pickle=False,
)
def kldivloss_with_target_no_reduce_test():
i = torch.rand(10, 10).log()
return dict(
fullname="KLDivLoss_with_target_no_reduce",
constructor=wrap_functional(
lambda t: F.kl_div(i.type_as(t), t, reduction="none")
),
cpp_function_call="F::kl_div(i.to(t.options()), t, F::KLDivFuncOptions().reduction(torch::kNone))",
input_fn=lambda: torch.rand(10, 10),
cpp_var_map={"i": i, "t": "_get_input()"},
reference_fn=lambda t, *_: loss_reference_fns["KLDivLoss"](
i.type_as(t), t, reduction="none"
),
pickle=False,
)
def kldivloss_no_reduce_test():
t = torch.randn(10, 10)
return dict(
fullname="KLDivLoss_no_reduce",
constructor=wrap_functional(
lambda i: F.kl_div(i, t.type_as(i), reduction="none")
),
cpp_function_call="F::kl_div(i, t.to(i.options()), F::KLDivFuncOptions().reduction(torch::kNone))",
input_fn=lambda: torch.rand(10, 10).log(),
cpp_var_map={"i": "_get_input()", "t": t},
reference_fn=lambda i, *_: loss_reference_fns["KLDivLoss"](
i, t.type_as(i), reduction="none"
),
pickle=False,
)
def kldivloss_no_reduce_scalar_test():
t = torch.randn(())
return dict(
fullname="KLDivLoss_no_reduce_scalar",
constructor=wrap_functional(
lambda i: F.kl_div(i, t.type_as(i), reduction="none")
),
cpp_function_call="F::kl_div(i, t.to(i.options()), F::KLDivFuncOptions().reduction(torch::kNone))",
input_fn=lambda: torch.rand(()).log(),
cpp_var_map={"i": "_get_input()", "t": t},
reference_fn=lambda i, *_: loss_reference_fns["KLDivLoss"](
i, t.type_as(i), reduction="none"
),
pickle=False,
)
def l1loss_no_reduce_test():
t = torch.randn(2, 3, 4)
return dict(
fullname="L1Loss_no_reduce",
constructor=wrap_functional(
lambda i: F.l1_loss(i, t.type_as(i), reduction="none")
),
cpp_function_call="F::l1_loss(i, t.to(i.options()), F::L1LossFuncOptions().reduction(torch::kNone))",
input_fn=lambda: torch.randn(2, 3, 4),
cpp_var_map={"i": "_get_input()", "t": t},
reference_fn=lambda i, *_: (i - t.type_as(i)).abs(),
pickle=False,
)
def l1loss_no_reduce_scalar_test():
t = torch.randn(())
return dict(
fullname="L1Loss_no_reduce_scalar",
constructor=wrap_functional(
lambda i: F.l1_loss(i, t.type_as(i), reduction="none")
),
cpp_function_call="F::l1_loss(i, t.to(i.options()), F::L1LossFuncOptions().reduction(torch::kNone))",
input_fn=lambda: torch.randn(()),
cpp_var_map={"i": "_get_input()", "t": t},
reference_fn=lambda i, *_: (i - t.type_as(i)).abs(),
pickle=False,
)
def mseloss_no_reduce_test():
input_size = (2, 3, 4, 5)
target = torch.randn(*input_size)
return dict(
fullname="MSELoss_no_reduce",
constructor=wrap_functional(
lambda i: F.mse_loss(i, target.type_as(i), reduction="none")
),
cpp_function_call="F::mse_loss(i, target.to(i.options()), F::MSELossFuncOptions().reduction(torch::kNone))",
input_size=input_size,
cpp_var_map={"i": "_get_input()", "target": target},
reference_fn=lambda i, *_: (i - target).pow(2),
pickle=False,
)
def mseloss_no_reduce_scalar_test():
input_size = ()
target = torch.randn(input_size)
return dict(
fullname="MSELoss_no_reduce_scalar",
constructor=wrap_functional(
lambda i: F.mse_loss(i, target.type_as(i), reduction="none")
),
cpp_function_call="F::mse_loss(i, target.to(i.options()), F::MSELossFuncOptions().reduction(torch::kNone))",
input_size=input_size,
cpp_var_map={"i": "_get_input()", "target": target},
reference_fn=lambda i, *_: (i - target).pow(2),
pickle=False,
)
def nllloss_no_reduce_test():
t = Variable(torch.Tensor(15).uniform_().mul(10).floor().long())
kwargs = {"reduction": "none"}
return dict(
fullname="NLLLoss_no_reduce",
constructor=wrap_functional(
lambda i: F.nll_loss(i, t.type_as(i).long(), **kwargs)
),
cpp_function_call="""F::nll_loss(
i, t.to(i.options()).to(torch::kLong), F::NLLLossFuncOptions().reduction(torch::kNone))""",
input_fn=lambda: torch.rand(15, 10).log(),
cpp_var_map={"i": "_get_input()", "t": t},
reference_fn=lambda i, *_: loss_reference_fns["NLLLoss"](
i, t.type_as(i).long(), **kwargs
),
pickle=False,
)
def nllloss_no_reduce_ignore_index_test():
t = Variable(torch.Tensor(15).uniform_().mul(10).floor().long())
kwargs = {"ignore_index": 2, "reduction": "none"}
return dict(
fullname="NLLLoss_no_reduce_ignore_index",
constructor=wrap_functional(
lambda i: F.nll_loss(i, t.type_as(i).long(), **kwargs)
),
cpp_function_call="""F::nll_loss(
i, t.to(i.options()).to(torch::kLong), F::NLLLossFuncOptions().ignore_index(2).reduction(torch::kNone))""",
input_fn=lambda: torch.rand(15, 10).log(),
cpp_var_map={"i": "_get_input()", "t": t},
reference_fn=lambda i, *_: loss_reference_fns["NLLLoss"](
i, t.type_as(i).long(), **kwargs
),
pickle=False,
)
def nllloss_no_reduce_weights_test():
t = Variable(torch.Tensor(15).uniform_().mul(10).floor().long())
weight = torch.rand(10)
def kwargs(i):
return {"weight": weight.type_as(i), "reduction": "none"}
return dict(
fullname="NLLLoss_no_reduce_weights",
constructor=wrap_functional(
lambda i: F.nll_loss(i, t.type_as(i).long(), **kwargs(i))
),
cpp_function_call="""F::nll_loss(
i, t.to(i.options()).to(torch::kLong),
F::NLLLossFuncOptions().weight(weight.to(i.options())).reduction(torch::kNone))""",
input_fn=lambda: torch.rand(15, 10).add(1e-2).log(),
cpp_var_map={"i": "_get_input()", "t": t, "weight": weight},
reference_fn=lambda i, *_: loss_reference_fns["NLLLoss"](
i, t.type_as(i).long(), **kwargs(i)
),
pickle=False,
)
def nllloss_no_reduce_weights_ignore_index_test():
t = Variable(torch.Tensor(15).uniform_().mul(10).floor().long())
weight = torch.rand(10)
def kwargs(i):
return {"weight": weight.type_as(i), "reduction": "none", "ignore_index": 2}
return dict(
fullname="NLLLoss_no_reduce_weights_ignore_index",
constructor=wrap_functional(
lambda i: F.nll_loss(i, t.type_as(i).long(), **kwargs(i.data))
),
cpp_function_call="""F::nll_loss(
i, t.to(i.options()).to(torch::kLong),
F::NLLLossFuncOptions().weight(weight.to(i.options())).reduction(torch::kNone).ignore_index(2))""",
input_fn=lambda: torch.rand(15, 10).add(1e-2).log(),
cpp_var_map={"i": "_get_input()", "t": t, "weight": weight},
reference_fn=lambda i, *_: loss_reference_fns["NLLLoss"](
i, t.type_as(i).long(), **kwargs(i)
),
pickle=False,
)
def nllloss_no_reduce_weights_ignore_index_neg_test():
t = Variable(torch.Tensor(15).uniform_().mul(10).floor().long())
weight = torch.rand(10)
def kwargs(i):
return {"weight": weight.type_as(i), "reduction": "none", "ignore_index": -1}
return dict(
fullname="NLLLoss_no_reduce_weights_ignore_index_neg",
constructor=wrap_functional(
lambda i: F.nll_loss(i, t.type_as(i).long(), **kwargs(i))
),
cpp_function_call="""F::nll_loss(
i, t.to(i.options()).to(torch::kLong),
F::NLLLossFuncOptions().weight(weight.to(i.options())).reduction(torch::kNone).ignore_index(-1))""",
input=torch.rand(15, 10).add(1e-2).log(),
cpp_var_map={"i": "_get_input()", "t": t, "weight": weight},
reference_fn=lambda i, *_: loss_reference_fns["NLLLoss"](
i, t.type_as(i).long(), **kwargs(i)
),
pickle=False,
)
def nllloss2d_no_reduce_test():
t = Variable(torch.rand(2, 5, 5).mul(3).floor().long())
kwargs = {"reduction": "none"}
return dict(
fullname="NLLLoss2d_no_reduce",
constructor=wrap_functional(
lambda i: F.nll_loss(i, t.type_as(i).long(), **kwargs)
),
cpp_function_call="""F::nll_loss(
i, t.to(i.options()).to(torch::kLong), F::NLLLossFuncOptions().reduction(torch::kNone))""",
input_fn=lambda: torch.rand(2, 3, 5, 5).log(),
cpp_var_map={"i": "_get_input()", "t": t},
reference_fn=lambda i, *_: loss_reference_fns["NLLLossNd"](
i, t.type_as(i).long(), **kwargs
),
pickle=False,
)
def nllloss2d_no_reduce_ignore_index_test():
t = Variable(torch.rand(2, 5, 5).mul(3).floor().long())
kwargs = {"ignore_index": 1, "reduction": "none"}
return dict(
fullname="NLLLoss2d_no_reduce_ignore_index",
constructor=wrap_functional(
lambda i: F.nll_loss(i, t.type_as(i).long(), **kwargs)
),
cpp_function_call="""F::nll_loss(
i, t.to(i.options()).to(torch::kLong), F::NLLLossFuncOptions().ignore_index(1).reduction(torch::kNone))""",
input_fn=lambda: torch.rand(2, 3, 5, 5).log(),
cpp_var_map={"i": "_get_input()", "t": t},
reference_fn=lambda i, *_: loss_reference_fns["NLLLossNd"](
i, t.type_as(i).long(), **kwargs
),
pickle=False,
)
def nllloss2d_no_reduce_weights_test():
t = Variable(torch.rand(2, 5, 5).mul(3).floor().long())
weight = torch.rand(3)
def kwargs(i):
return {"weight": weight.type_as(i), "reduction": "none"}
return dict(
fullname="NLLLoss2d_no_reduce_weights",
constructor=wrap_functional(
lambda i: F.nll_loss(i, t.type_as(i).long(), **kwargs(i))
),
cpp_function_call="""F::nll_loss(
i, t.to(i.options()).to(torch::kLong),
F::NLLLossFuncOptions().weight(weight.to(i.options())).reduction(torch::kNone))""",
input_fn=lambda: torch.rand(2, 3, 5, 5).log(),
cpp_var_map={"i": "_get_input()", "t": t, "weight": weight},
reference_fn=lambda i, *_: loss_reference_fns["NLLLossNd"](
i, t.type_as(i).long(), **kwargs(i)
),
pickle=False,
)
def nlllossNd_no_reduce_test():
t = Variable(torch.rand(2, 5, 5, 2, 2).mul(3).floor().long())
kwargs = {"reduction": "none"}
return dict(
fullname="NLLLossNd_no_reduce",
constructor=wrap_functional(
lambda i: F.nll_loss(i, t.type_as(i).long(), **kwargs)
),
cpp_function_call="""F::nll_loss(
i, t.to(i.options()).to(torch::kLong), F::NLLLossFuncOptions().reduction(torch::kNone))""",
input_fn=lambda: torch.rand(2, 3, 5, 5, 2, 2).log(),
cpp_var_map={"i": "_get_input()", "t": t},
reference_fn=lambda i, *_: loss_reference_fns["NLLLossNd"](
i, t.type_as(i).long(), **kwargs
),
pickle=False,
)
def nlllossNd_no_reduce_ignore_index_test():
t = Variable(torch.rand(2, 5, 5, 2, 2).mul(3).floor().long())
kwargs = {"ignore_index": 1, "reduction": "none"}
return dict(
fullname="NLLLossNd_no_reduce_ignore_index",
constructor=wrap_functional(
lambda i: F.nll_loss(i, t.type_as(i).long(), **kwargs)
),
cpp_function_call="""F::nll_loss(
i, t.to(i.options()).to(torch::kLong), F::NLLLossFuncOptions().ignore_index(1).reduction(torch::kNone))""",
input_fn=lambda: torch.rand(2, 3, 5, 5, 2, 2).log(),
cpp_var_map={"i": "_get_input()", "t": t},
reference_fn=lambda i, *_: loss_reference_fns["NLLLossNd"](
i, t.type_as(i).long(), **kwargs
),
pickle=False,
)
def nlllossNd_no_reduce_weights_test():
t = Variable(torch.rand(2, 5, 5, 2, 2).mul(3).floor().long())
weight = torch.rand(3)
def kwargs(i):
return {"weight": weight.type_as(i), "reduction": "none"}
return dict(
fullname="NLLLossNd_no_reduce_weights",
constructor=wrap_functional(
lambda i: F.nll_loss(i, t.type_as(i).long(), **kwargs(i))
),
cpp_function_call="""F::nll_loss(
i, t.to(i.options()).to(torch::kLong),
F::NLLLossFuncOptions().weight(weight.to(i.options())).reduction(torch::kNone))""",
input_fn=lambda: torch.rand(2, 3, 5, 5, 2, 2).log(),
cpp_var_map={"i": "_get_input()", "t": t, "weight": weight},
reference_fn=lambda i, *_: loss_reference_fns["NLLLossNd"](
i, t.type_as(i).long(), **kwargs(i)
),
pickle=False,
)
def smoothl1loss_no_reduce_test():
t = torch.randn(2, 3, 4)
return dict(
fullname="SmoothL1Loss_no_reduce",
constructor=wrap_functional(
lambda i: F.smooth_l1_loss(i, t.type_as(i), reduction="none")
),
cpp_function_call="""F::smooth_l1_loss(
i, t.to(i.options()), F::SmoothL1LossFuncOptions().reduction(torch::kNone))""",
input_fn=lambda: torch.randn(2, 3, 4),
cpp_var_map={"i": "_get_input()", "t": t},
reference_fn=lambda i, *_: loss_reference_fns["SmoothL1Loss"](
i, t.type_as(i), reduction="none"
),
pickle=False,
)
def smoothl1loss_no_reduce_scalar_test():
t = torch.randn(())
return dict(
fullname="SmoothL1Loss_no_reduce_scalar",
constructor=wrap_functional(
lambda i: F.smooth_l1_loss(i, t.type_as(i), reduction="none")
),
cpp_function_call="""F::smooth_l1_loss(
i, t.to(i.options()), F::SmoothL1LossFuncOptions().reduction(torch::kNone))""",
input_fn=lambda: torch.randn(()),
cpp_var_map={"i": "_get_input()", "t": t},
reference_fn=lambda i, *_: loss_reference_fns["SmoothL1Loss"](
i, t.type_as(i), reduction="none"
),
pickle=False,
)
def multilabelmarginloss_0d_no_reduce_test():
t = torch.zeros(()).long()
return dict(
fullname="MultiLabelMarginLoss_0d_no_reduce",
constructor=wrap_functional(
lambda i: F.multilabel_margin_loss(i, t.type_as(i).long(), reduction="none")
),
cpp_function_call="""F::multilabel_margin_loss(
i, t.to(i.options()).to(torch::kLong), F::MultilabelMarginLossFuncOptions().reduction(torch::kNone))""",
input_fn=lambda: torch.randn(()),
cpp_var_map={"i": "_get_input()", "t": t},
reference_fn=lambda i, *_: loss_reference_fns["MultiLabelMarginLoss"](
i, t.data.type_as(i).long(), reduction="none"
),
check_sum_reduction=True,
check_gradgrad=False,
pickle=False,
)
def multilabelmarginloss_1d_no_reduce_test():
t = Variable(torch.rand(10).mul(10).floor().long())
return dict(
fullname="MultiLabelMarginLoss_1d_no_reduce",
constructor=wrap_functional(
lambda i: F.multilabel_margin_loss(i, t.type_as(i).long(), reduction="none")
),
cpp_function_call="""F::multilabel_margin_loss(
i, t.to(i.options()).to(torch::kLong), F::MultilabelMarginLossFuncOptions().reduction(torch::kNone))""",
input_fn=lambda: torch.randn(10),
cpp_var_map={"i": "_get_input()", "t": t},
reference_fn=lambda i, *_: loss_reference_fns["MultiLabelMarginLoss"](
i, t.data.type_as(i).long(), reduction="none"
),
check_sum_reduction=True,
check_gradgrad=False,
pickle=False,
)
def multilabelmarginloss_index_neg_test():
t = Variable(
torch.clamp(torch.rand(5, 10).add(-0.5).mul(20).floor().long(), min=-1)
)
return dict(
fullname="MultiLabelMarginLoss_index_neg",
constructor=wrap_functional(
lambda i: F.multilabel_margin_loss(i, t.type_as(i).long(), reduction="none")
),
cpp_function_call="""F::multilabel_margin_loss(
i, t.to(i.options()).to(torch::kLong), F::MultilabelMarginLossFuncOptions().reduction(torch::kNone))""",
input_fn=lambda: torch.randn(5, 10),
cpp_var_map={"i": "_get_input()", "t": t},
reference_fn=lambda i, *_: loss_reference_fns["MultiLabelMarginLoss"](
i, t.data.type_as(i).long(), reduction="none"
),
check_sum_reduction=True,
check_gradgrad=False,
pickle=False,
)
def multilabelmarginloss_no_reduce_test():
t = Variable(torch.rand(5, 10).mul(10).floor().long())
return dict(
fullname="MultiLabelMarginLoss_no_reduce",
constructor=wrap_functional(
lambda i: F.multilabel_margin_loss(i, t.type_as(i).long(), reduction="none")
),
cpp_function_call="""F::multilabel_margin_loss(
i, t.to(i.options()).to(torch::kLong), F::MultilabelMarginLossFuncOptions().reduction(torch::kNone))""",
input_fn=lambda: torch.randn(5, 10),
cpp_var_map={"i": "_get_input()", "t": t},
reference_fn=lambda i, *_: loss_reference_fns["MultiLabelMarginLoss"](
i, t.data.type_as(i).long(), reduction="none"
),
check_sum_reduction=True,
check_gradgrad=False,
pickle=False,
)
def hingeembeddingloss_no_reduce_test():
t = Variable(torch.randn(10).gt(0).double().mul_(2).sub(1))
return dict(
fullname="HingeEmbeddingLoss_no_reduce",
constructor=wrap_functional(
lambda i: F.hinge_embedding_loss(i, t.type_as(i), reduction="none")
),
cpp_function_call="""F::hinge_embedding_loss(
i, t.to(i.options()), F::HingeEmbeddingLossFuncOptions().reduction(torch::kNone))""",
input_fn=lambda: torch.randn(10),
cpp_var_map={"i": "_get_input()", "t": t},
reference_fn=lambda i, *_: loss_reference_fns["HingeEmbeddingLoss"](
i, t.type_as(i), reduction="none"
),
check_sum_reduction=True,
pickle=False,
)
def hingeembeddingloss_margin_no_reduce_test():
t = Variable(torch.randn(10).gt(0).double().mul_(2).sub(1))
return dict(
fullname="HingeEmbeddingLoss_margin_no_reduce",
constructor=wrap_functional(
lambda i: F.hinge_embedding_loss(
i, t.type_as(i), margin=0.5, reduction="none"
)
),
cpp_function_call="""F::hinge_embedding_loss(
i, t.to(i.options()), F::HingeEmbeddingLossFuncOptions().margin(0.5).reduction(torch::kNone))""",
input_fn=lambda: torch.randn(10),
cpp_var_map={"i": "_get_input()", "t": t},
reference_fn=lambda i, *_: loss_reference_fns["HingeEmbeddingLoss"](
i, t.type_as(i), margin=0.5, reduction="none"
),
check_sum_reduction=True,
pickle=False,
)
def softmarginloss_no_reduce_test():
t = torch.randn(5, 5)
return dict(
fullname="SoftMarginLoss_no_reduce",
constructor=wrap_functional(
lambda i: F.soft_margin_loss(i, t.type_as(i), reduction="none")
),
cpp_function_call="""F::soft_margin_loss(
i, t.to(i.options()), F::SoftMarginLossFuncOptions().reduction(torch::kNone))""",
input_fn=lambda: torch.randn(5, 5),
cpp_var_map={"i": "_get_input()", "t": t},
reference_fn=lambda i, *_: loss_reference_fns["SoftMarginLoss"](
i, t.type_as(i), reduction="none"
),
pickle=False,
)
def multilabelsoftmarginloss_no_reduce_test():
t = torch.rand(5, 10).mul(2).floor()
return dict(
fullname="MultiLabelSoftMarginLoss_no_reduce",
constructor=wrap_functional(
lambda i: F.multilabel_soft_margin_loss(i, t.type_as(i), reduction="none")
),
cpp_function_call="""F::multilabel_soft_margin_loss(
i, t.to(i.options()), F::MultilabelSoftMarginLossFuncOptions().reduction(torch::kNone))""",
input_fn=lambda: torch.randn(5, 10),
cpp_var_map={"i": "_get_input()", "t": t},
reference_fn=lambda i, *_: (
-(t * i.sigmoid().log() + (1 - t) * (-i).sigmoid().log())
).sum(dim=1)
/ i.size(1),
check_gradgrad=False,
pickle=False,
)
def multilabelsoftmarginloss_weights_no_reduce_test():
t = torch.rand(5, 10).mul(2).floor()
weights = torch.rand(10)
return dict(
fullname="MultiLabelSoftMarginLoss_weights_no_reduce",
constructor=wrap_functional(
lambda i: F.multilabel_soft_margin_loss(
i, t.type_as(i), weight=weights.type_as(i), reduction="none"
)
),
cpp_function_call="""F::multilabel_soft_margin_loss(
i, t.to(i.options()),
F::MultilabelSoftMarginLossFuncOptions().weight(weights.to(i.options())).reduction(torch::kNone))""",
input_fn=lambda: torch.randn(5, 10),
cpp_var_map={"i": "_get_input()", "t": t, "weights": weights},
reference_fn=lambda i, *_: (
-(t * i.sigmoid().log() + (1 - t) * (-i).sigmoid().log()) * weights
).sum(dim=1)
/ i.size(1),
check_sum_reduction=True,
check_gradgrad=False,
pickle=False,
)
def multimarginloss_no_reduce_test():
t = torch.rand(5).mul(8).floor().long()
return dict(
fullname="MultiMarginLoss_no_reduce",
constructor=wrap_functional(
lambda i: F.multi_margin_loss(i, t.type_as(i).long(), reduction="none")
),
cpp_function_call="""F::multi_margin_loss(
i, t.to(i.options()).to(torch::kLong), F::MultiMarginLossFuncOptions().reduction(torch::kNone))""",
input_fn=lambda: torch.randn(5, 10),
cpp_var_map={"i": "_get_input()", "t": t},
reference_fn=lambda i, *_: loss_reference_fns["MultiMarginLoss"](
i, t.data.type_as(i).long(), reduction="none"
),
check_sum_reduction=True,
check_gradgrad=False,
pickle=False,
)
def multimarginloss_1d_no_reduce_test():
t = torch.rand(1).mul(8).floor().long()
return dict(
fullname="MultiMarginLoss_1d_no_reduce",
constructor=wrap_functional(
lambda i: F.multi_margin_loss(i, t.type_as(i).long(), reduction="none")
),
cpp_function_call="""F::multi_margin_loss(
i, t.to(i.options()).to(torch::kLong), F::MultiMarginLossFuncOptions().reduction(torch::kNone))""",
input_fn=lambda: torch.randn(10),
cpp_var_map={"i": "_get_input()", "t": t},
reference_fn=lambda i, *_: loss_reference_fns["MultiMarginLoss"](
i, t.data.type_as(i).long(), reduction="none"
),
check_sum_reduction=True,
check_gradgrad=False,
pickle=False,
)
def multimarginloss_1d_input_0d_target_no_reduce_test():
t = torch.rand(()).mul(8).floor().long()
return dict(
fullname="multimarginloss_1d_input_0d_target_no_reduce",
constructor=wrap_functional(
lambda i: F.multi_margin_loss(i, t.type_as(i).long(), reduction="none")
),
cpp_function_call="""F::multi_margin_loss(
i, t.to(i.options()).to(torch::kLong), F::MultiMarginLossFuncOptions().reduction(torch::kNone))""",
input_fn=lambda: torch.randn(10),
cpp_var_map={"i": "_get_input()", "t": t},
reference_fn=lambda i, *_: loss_reference_fns["MultiMarginLoss"](
i, t.data.type_as(i).long(), reduction="none"
),
check_sum_reduction=True,
check_gradgrad=False,
pickle=False,
)
def multimarginloss_p_no_reduce_test():
t = torch.rand(5).mul(8).floor().long()
return dict(
fullname="MultiMarginLoss_p_no_reduce",
constructor=wrap_functional(
lambda i: F.multi_margin_loss(i, t.type_as(i).long(), p=2, reduction="none")
),
cpp_function_call="""F::multi_margin_loss(
i, t.to(i.options()).to(torch::kLong), F::MultiMarginLossFuncOptions().p(2).reduction(torch::kNone))""",
input_fn=lambda: torch.randn(5, 10).clamp_(1e-2, 1 - 1e-2),
cpp_var_map={"i": "_get_input()", "t": t},
reference_fn=lambda i, *_: loss_reference_fns["MultiMarginLoss"](
i, t.data.type_as(i).long(), p=2, reduction="none"
),
check_sum_reduction=True,
check_gradgrad=False,
pickle=False,
)
def multimarginloss_margin_no_reduce_test():
t = torch.rand(5).mul(8).floor().long()
return dict(
fullname="MultiMarginLoss_margin_no_reduce",
constructor=wrap_functional(
lambda i: F.multi_margin_loss(
i, t.type_as(i).long(), margin=0.5, reduction="none"
)
),
cpp_function_call="""F::multi_margin_loss(
i, t.to(i.options()).to(torch::kLong),
F::MultiMarginLossFuncOptions().margin(0.5).reduction(torch::kNone))""",
input_fn=lambda: torch.randn(5, 10),
cpp_var_map={"i": "_get_input()", "t": t},
reference_fn=lambda i, *_: loss_reference_fns["MultiMarginLoss"](
i, t.data.type_as(i).long(), margin=0.5, reduction="none"
),
check_sum_reduction=True,
check_gradgrad=False,
pickle=False,
)
def multimarginloss_weights_no_reduce_test():
t = torch.rand(5).mul(8).floor().long()
weights = torch.rand(10)
return dict(
fullname="MultiMarginLoss_weights_no_reduce",
constructor=wrap_functional(
lambda i: F.multi_margin_loss(
i, t.type_as(i).long(), weight=weights.type_as(i), reduction="none"
)
),
cpp_function_call="""F::multi_margin_loss(
i, t.to(i.options()).to(torch::kLong),
F::MultiMarginLossFuncOptions().weight(weights.to(i.options())).reduction(torch::kNone))""",
input_fn=lambda: torch.randn(5, 10),
cpp_var_map={"i": "_get_input()", "t": t, "weights": weights},
reference_fn=lambda i, *_: loss_reference_fns["MultiMarginLoss"](
i, t.data.type_as(i).long(), weight=weights, reduction="none"
),
check_sum_reduction=True,
check_gradgrad=False,
pickle=False,
)
def fractional_max_pool2d_test(test_case):
random_samples = torch.DoubleTensor(1, 3, 2).uniform_()
if test_case == "ratio":
return dict(
constructor=lambda: nn.FractionalMaxPool2d(
2, output_ratio=0.5, _random_samples=random_samples
),
cpp_constructor_args="""torch::nn::FractionalMaxPool2dOptions(2)
.output_ratio(0.5)
._random_samples(random_samples)""",
input_size=(1, 3, 5, 7),
cpp_var_map={"random_samples": random_samples},
fullname="FractionalMaxPool2d_ratio",
)
elif test_case == "size":
return dict(
constructor=lambda: nn.FractionalMaxPool2d(
(2, 3), output_size=(4, 3), _random_samples=random_samples
),
cpp_constructor_args="""torch::nn::FractionalMaxPool2dOptions({2, 3})
.output_size(std::vector<int64_t>({4, 3}))
._random_samples(random_samples)""",
input_size=(1, 3, 7, 6),
cpp_var_map={"random_samples": random_samples},
fullname="FractionalMaxPool2d_size",
)
def fractional_max_pool3d_test(test_case):
random_samples = torch.DoubleTensor(2, 4, 3).uniform_()
if test_case == "ratio":
return dict(
constructor=lambda: nn.FractionalMaxPool3d(
2, output_ratio=0.5, _random_samples=random_samples
),
cpp_constructor_args="""torch::nn::FractionalMaxPool3dOptions(2)
.output_ratio(0.5)
._random_samples(random_samples)""",
input_size=(2, 4, 5, 5, 5),
cpp_var_map={"random_samples": random_samples},
fullname="FractionalMaxPool3d_ratio",
)
elif test_case == "size":
return dict(
constructor=lambda: nn.FractionalMaxPool3d(
(2, 2, 2), output_size=(4, 4, 4), _random_samples=random_samples
),
cpp_constructor_args="""torch::nn::FractionalMaxPool3dOptions({2, 2, 2})
.output_size(std::vector<int64_t>({4, 4, 4}))
._random_samples(random_samples)""",
input_size=(2, 4, 7, 7, 7),
cpp_var_map={"random_samples": random_samples},
fullname="FractionalMaxPool3d_size",
)
elif test_case == "asymsize":
return dict(
constructor=lambda: nn.FractionalMaxPool3d(
(4, 2, 3), output_size=(10, 3, 2), _random_samples=random_samples
),
cpp_constructor_args="""torch::nn::FractionalMaxPool3dOptions({4, 2, 3})
.output_size(std::vector<int64_t>({10, 3, 2}))
._random_samples(random_samples)""",
input_size=(2, 4, 16, 7, 5),
cpp_var_map={"random_samples": random_samples},
fullname="FractionalMaxPool3d_asymsize",
)
new_module_tests = [
poissonnllloss_no_reduce_test(),
bceloss_no_reduce_test(),
bceloss_weights_no_reduce_test(),
bce_with_logistic_legacy_enum_test(),
bce_with_logistic_no_reduce_test(),
bceloss_no_reduce_scalar_test(),
bceloss_weights_no_reduce_scalar_test(),
bce_with_logistic_no_reduce_scalar_test(),
kldivloss_with_target_no_reduce_test(),
kldivloss_no_reduce_test(),
kldivloss_no_reduce_scalar_test(),
l1loss_no_reduce_test(),
l1loss_no_reduce_scalar_test(),
mseloss_no_reduce_test(),
mseloss_no_reduce_scalar_test(),
nllloss_no_reduce_test(),
nllloss_no_reduce_ignore_index_test(),
nllloss_no_reduce_weights_test(),
nllloss_no_reduce_weights_ignore_index_test(),
nllloss_no_reduce_weights_ignore_index_neg_test(),
nllloss2d_no_reduce_test(),
nllloss2d_no_reduce_weights_test(),
nllloss2d_no_reduce_ignore_index_test(),
nlllossNd_no_reduce_test(),
nlllossNd_no_reduce_weights_test(),
nlllossNd_no_reduce_ignore_index_test(),
smoothl1loss_no_reduce_test(),
smoothl1loss_no_reduce_scalar_test(),
multilabelmarginloss_0d_no_reduce_test(),
multilabelmarginloss_1d_no_reduce_test(),
multilabelmarginloss_index_neg_test(),
multilabelmarginloss_no_reduce_test(),
hingeembeddingloss_no_reduce_test(),
hingeembeddingloss_margin_no_reduce_test(),
softmarginloss_no_reduce_test(),
multilabelsoftmarginloss_no_reduce_test(),
multilabelsoftmarginloss_weights_no_reduce_test(),
multimarginloss_no_reduce_test(),
multimarginloss_1d_no_reduce_test(),
multimarginloss_1d_input_0d_target_no_reduce_test(),
multimarginloss_p_no_reduce_test(),
multimarginloss_margin_no_reduce_test(),
multimarginloss_weights_no_reduce_test(),
fractional_max_pool2d_test("ratio"),
fractional_max_pool2d_test("size"),
fractional_max_pool3d_test("ratio"),
fractional_max_pool3d_test("size"),
fractional_max_pool3d_test("asymsize"),
dict(
module_name="BatchNorm1d",
constructor_args=(10,),
cpp_constructor_args="torch::nn::BatchNorm1dOptions(10)",
input_size=(4, 10),
cudnn=True,
check_eval=True,
desc="affine",
test_cuda=(not TEST_WITH_ROCM),
pickle=False,
),
dict(
module_name="BatchNorm1d",
constructor_args=(5,),
cpp_constructor_args="torch::nn::BatchNorm1dOptions(5)",
input_size=(4, 5, 3),
cudnn=True,
check_eval=True,
desc="3d_input",
pickle=False,
),
dict(
module_name="BatchNorm1d",
constructor_args=(10, 1e-3, None),
cpp_constructor_args="torch::nn::BatchNorm1dOptions(10).eps(1e-3).momentum(c10::nullopt)",
input_size=(4, 10),
cudnn=True,
check_eval=True,
desc="affine_simple_average",
test_cuda=(not TEST_WITH_ROCM),
pickle=False,
),
dict(
module_name="BatchNorm1d",
constructor_args=(10, 1e-3, 0.3, False),
cpp_constructor_args="torch::nn::BatchNorm1dOptions(10).eps(1e-3).momentum(0.3).affine(false)",
input_size=(4, 10),
cudnn=True,
check_eval=True,
desc="not_affine",
pickle=False,
),
dict(
module_name="BatchNorm1d",
constructor_args=(10, 1e-3, 0.3, True, False),
cpp_constructor_args="""torch::nn::BatchNorm1dOptions(10)
.eps(1e-3).momentum(0.3).affine(true).track_running_stats(false)""",
input_size=(4, 10),
cudnn=True,
check_eval=True,
desc="not_tracking_stats",
test_cuda=(not TEST_WITH_ROCM),
pickle=False,
),
dict(
module_name="BatchNorm1d",
constructor_args=(5, 1e-3, 0.3, False),
cpp_constructor_args="torch::nn::BatchNorm1dOptions(5).eps(1e-3).momentum(0.3).affine(false)",
input_size=(4, 5, 3),
cudnn=True,
check_eval=True,
desc="3d_input_not_affine",
pickle=False,
),
dict(
module_name="BatchNorm1d",
constructor_args=(5, 1e-3, 0.3, False),
cpp_constructor_args="torch::nn::BatchNorm1dOptions(5).eps(1e-3).momentum(0.3).affine(false)",
input_size=(0, 5, 9),
cudnn=True,
check_eval=True,
desc="zero_batch",
pickle=False,
),
dict(
module_name="BatchNorm2d",
constructor_args=(3,),
cpp_constructor_args="torch::nn::BatchNorm2dOptions(3)",
input_size=(2, 3, 6, 6),
cudnn=True,
check_eval=True,
pickle=False,
),
dict(
module_name="BatchNorm2d",
constructor_args=(3, 1e-3, None),
cpp_constructor_args="torch::nn::BatchNorm2dOptions(3).eps(1e-3).momentum(c10::nullopt)",
input_size=(2, 3, 6, 6),
cudnn=True,
check_eval=True,
desc="2d_simple_average",
pickle=False,
),
dict(
module_name="BatchNorm2d",
constructor_args=(3, 1e-3, 0.8),
cpp_constructor_args="torch::nn::BatchNorm2dOptions(3).eps(1e-3).momentum(0.8)",
input_size=(2, 3, 6, 6),
cudnn=True,
check_eval=True,
desc="momentum",
pickle=False,
),
dict(
module_name="BatchNorm2d",
constructor_args=(3, 1e-3, 0.8, False),
cpp_constructor_args="torch::nn::BatchNorm2dOptions(3).eps(1e-3).momentum(0.8).affine(false)",
input_size=(2, 3, 6, 6),
cudnn=True,
check_eval=True,
desc="not_affine",
pickle=False,
),
dict(
module_name="BatchNorm2d",
constructor_args=(3, 1e-3, 0.8, True, False),
cpp_constructor_args="""torch::nn::BatchNorm2dOptions(3)
.eps(1e-3).momentum(0.8).affine(true).track_running_stats(false)""",
input_size=(2, 3, 6, 6),
cudnn=True,
check_eval=True,
desc="not_tracking_stats",
pickle=False,
),
dict(
module_name="BatchNorm2d",
constructor_args=(5, 1e-3, 0.3, False),
cpp_constructor_args="torch::nn::BatchNorm2dOptions(5).eps(1e-3).momentum(0.3).affine(false)",
input_size=(0, 5, 2, 2),
cudnn=True,
check_eval=True,
desc="zero_batch",
pickle=False,
),
dict(
module_name="BatchNorm3d",
constructor_args=(3,),
cpp_constructor_args="torch::nn::BatchNorm3dOptions(3)",
input_size=(2, 3, 4, 4, 4),
cudnn=True,
check_eval=True,
pickle=False,
),
dict(
module_name="BatchNorm3d",
constructor_args=(3, 1e-3, None),
cpp_constructor_args="torch::nn::BatchNorm3dOptions(3).eps(1e-3).momentum(c10::nullopt)",
input_size=(2, 3, 4, 4, 4),
cudnn=True,
check_eval=True,
desc="3d_simple_average",
pickle=False,
),
dict(
module_name="BatchNorm3d",
constructor_args=(3, 1e-3, 0.7),
cpp_constructor_args="torch::nn::BatchNorm3dOptions(3).eps(1e-3).momentum(0.7)",
input_size=(2, 3, 4, 4, 4),
cudnn=True,
check_eval=True,
desc="momentum",
pickle=False,
),
dict(
module_name="BatchNorm3d",
constructor_args=(3, 1e-3, 0.7, False),
cpp_constructor_args="torch::nn::BatchNorm3dOptions(3).eps(1e-3).momentum(0.7).affine(false)",
input_size=(2, 3, 4, 4, 4),
cudnn=True,
check_eval=True,
desc="not_affine",
pickle=False,
),
dict(
module_name="BatchNorm3d",
constructor_args=(3, 1e-3, 0.7, True, False),
cpp_constructor_args="""torch::nn::BatchNorm3dOptions(3)
.eps(1e-3).momentum(0.7).affine(true).track_running_stats(false)""",
input_size=(2, 3, 4, 4, 4),
cudnn=True,
check_eval=True,
desc="not_tracking_stats",
pickle=False,
),
dict(
module_name="BatchNorm3d",
constructor_args=(5, 1e-3, 0.3, False),
cpp_constructor_args="torch::nn::BatchNorm3dOptions(5).eps(1e-3).momentum(0.3).affine(false)",
input_size=(0, 5, 2, 2, 2),
cudnn=True,
check_eval=True,
desc="zero_batch",
pickle=False,
),
dict(
module_name="InstanceNorm1d",
constructor_args=(3, 1e-3, 0.3),
cpp_constructor_args="torch::nn::InstanceNorm1dOptions(3).eps(1e-3).momentum(0.3)",
input_size=(4, 3, 15),
cudnn=True,
check_eval=True,
pickle=False,
),
dict(
module_name="InstanceNorm1d",
constructor_args=(3, 1e-3, 0.3, False, True),
cpp_constructor_args="""torch::nn::InstanceNorm1dOptions(3)
.eps(1e-3).momentum(0.3).affine(false).track_running_stats(true)""",
input_size=(4, 3, 15),
cudnn=True,
check_eval=True,
desc="tracking_stats",
pickle=False,
),
dict(
module_name="InstanceNorm2d",
constructor_args=(3, 1e-3, 0.3),
cpp_constructor_args="torch::nn::InstanceNorm2dOptions(3).eps(1e-3).momentum(0.3)",
input_size=(2, 3, 6, 6),
cudnn=True,
check_eval=True,
pickle=False,
),
dict(
module_name="InstanceNorm2d",
constructor_args=(3, 1e-3, 0.3, False, True),
cpp_constructor_args="""torch::nn::InstanceNorm2dOptions(3)
.eps(1e-3).momentum(0.3).affine(false).track_running_stats(true)""",
input_size=(2, 3, 6, 6),
cudnn=True,
check_eval=True,
desc="tracking_stats",
pickle=False,
),
dict(
module_name="InstanceNorm3d",
constructor_args=(3, 1e-3, 0.3),
cpp_constructor_args="torch::nn::InstanceNorm3dOptions(3).eps(1e-3).momentum(0.3)",
input_size=(2, 3, 4, 4, 4),
cudnn=True,
check_eval=True,
pickle=False,
),
dict(
module_name="InstanceNorm3d",
constructor_args=(3, 1e-3, 0.3, False, True),
cpp_constructor_args="""torch::nn::InstanceNorm3dOptions(3)
.eps(1e-3).momentum(0.3).affine(false).track_running_stats(true)""",
input_size=(2, 3, 4, 4, 4),
cudnn=True,
check_eval=True,
desc="tracking_stats",
pickle=False,
),
dict(
module_name="LayerNorm",
constructor_args=([5], 1e-3),
cpp_constructor_args="torch::nn::LayerNormOptions({5}).eps(1e-3)",
input_size=(4, 5, 5),
cudnn=True,
check_eval=True,
desc="1d_elementwise_affine",
),
dict(
module_name="LayerNorm",
constructor_args=([5], 1e-3, False),
cpp_constructor_args="torch::nn::LayerNormOptions({5}).eps(1e-3).elementwise_affine(false)",
input_size=(4, 5, 5),
cudnn=True,
check_eval=True,
desc="1d_no_elementwise_affine",
),
dict(
module_name="LayerNorm",
constructor_args=([2, 2, 5], 1e-3),
cpp_constructor_args="torch::nn::LayerNormOptions({2, 2, 5}).eps(1e-3)",
input_size=(4, 2, 2, 5),
cudnn=True,
check_eval=True,
desc="3d_elementwise_affine",
),
dict(
module_name="LayerNorm",
constructor_args=([2, 2, 5], 1e-3, False),
cpp_constructor_args="torch::nn::LayerNormOptions({2, 2, 5}).eps(1e-3).elementwise_affine(false)",
input_size=(4, 2, 2, 5),
cudnn=True,
check_eval=True,
desc="3d_no_elementwise_affine",
),
dict(
module_name="LayerNorm",
constructor_args=([5], 1e-3),
cpp_constructor_args="torch::nn::LayerNormOptions({5}).eps(1e-3)",
input_size=(0, 5),
cudnn=True,
check_eval=True,
desc="1d_empty_elementwise_affine",
),
dict(
module_name="GroupNorm",
constructor_args=(3, 6, 1e-3),
cpp_constructor_args="torch::nn::GroupNormOptions(3, 6).eps(1e-3)",
input_size=(4, 6, 5),
cudnn=True,
check_eval=True,
desc="1d_affine",
),
dict(
module_name="GroupNorm",
constructor_args=(5, 5, 1e-3, False),
cpp_constructor_args="torch::nn::GroupNormOptions(5, 5).eps(1e-3).affine(false)",
input_size=(4, 5, 5),
cudnn=True,
check_eval=True,
desc="1d_no_affine_IN", # this setting is equivalent with InstanceNormi
),
dict(
module_name="GroupNorm",
constructor_args=(1, 5, 1e-3, False),
cpp_constructor_args="torch::nn::GroupNormOptions(1, 5).eps(1e-3).affine(false)",
input_size=(4, 5, 5),
cudnn=True,
check_eval=True,
desc="1d_no_affine_LN", # this setting is equivalent with LayerNorm
),
dict(
module_name="GroupNorm",
constructor_args=(3, 6, 1e-3),
cpp_constructor_args="torch::nn::GroupNormOptions(3, 6).eps(1e-3)",
input_size=(4, 6, 2, 3),
cudnn=True,
check_eval=True,
desc="2d_affine",
),
dict(
module_name="GroupNorm",
constructor_args=(3, 3, 1e-3, False),
cpp_constructor_args="torch::nn::GroupNormOptions(3, 3).eps(1e-3).affine(false)",
input_size=(4, 3, 2, 3),
cudnn=True,
check_eval=True,
desc="2d_no_affine_IN", # this setting is equivalent with InstanceNorm
),
dict(
module_name="GroupNorm",
constructor_args=(1, 3, 1e-3, False),
cpp_constructor_args="torch::nn::GroupNormOptions(1, 3).eps(1e-3).affine(false)",
input_size=(4, 3, 2, 3),
cudnn=True,
check_eval=True,
desc="2d_no_affine_LN", # this setting is equivalent with LayerNorm
),
dict(
module_name="Conv1d",
constructor_args=(4, 5, 3),
cpp_constructor_args="torch::nn::Conv1dOptions(4, 5, 3)",
input_size=(2, 4, 10),
cudnn=True,
),
dict(
module_name="Conv1d",
constructor_args=(4, 5, 3, 2),
cpp_constructor_args="torch::nn::Conv1dOptions(4, 5, 3).stride(2)",
input_size=(2, 4, 10),
cudnn=True,
desc="stride",
),
dict(
module_name="Conv1d",
constructor_args=(4, 5, 3, 1, 1),
cpp_constructor_args="torch::nn::Conv1dOptions(4, 5, 3).stride(1).padding(1)",
input_size=(2, 4, 10),
cudnn=True,
desc="pad1",
),
dict(
module_name="Conv1d",
constructor_args=(4, 5, 5, 1, 2),
cpp_constructor_args="torch::nn::Conv1dOptions(4, 5, 5).stride(1).padding(2)",
input_size=(2, 4, 10),
cudnn=True,
desc="pad2",
),
dict(
module_name="Conv1d",
constructor_args=(4, 4, 3, 1, 1),
cpp_constructor_args="torch::nn::Conv1dOptions(4, 4, 3).stride(1).padding(1)",
input_size=(1, 4, 1),
cudnn=True,
desc="pad1size1",
),
dict(
module_name="Conv1d",
constructor_args=(4, 4, 5, 1, 2),
cpp_constructor_args="torch::nn::Conv1dOptions(4, 4, 5).stride(1).padding(2)",
input_size=(1, 4, 1),
cudnn=True,
desc="pad2size1",
),
dict(
module_name="Conv1d",
constructor_args=(4, 5, 3),
cpp_constructor_args="torch::nn::Conv1dOptions(4, 5, 3)",
input_size=(0, 4, 10),
cudnn=True,
desc="zero_batch",
test_cuda=(not TEST_WITH_ROCM),
),
dict(
fullname="Conv1d_dilated",
constructor=lambda: nn.Conv1d(4, 5, kernel_size=3, dilation=2),
cpp_constructor_args="torch::nn::Conv1dOptions(4, 5, 3).dilation(2)",
input_size=(2, 4, 10),
),
dict(
fullname="Conv1d_groups",
constructor=lambda: nn.Conv1d(4, 6, kernel_size=3, groups=2),
cpp_constructor_args="torch::nn::Conv1dOptions(4, 6, 3).groups(2)",
input_size=(2, 4, 6),
cudnn=True,
),
dict(
fullname="ConvTranspose1d",
constructor=lambda: nn.ConvTranspose1d(
3, 4, kernel_size=3, stride=(3,), padding=1, output_padding=(1,)
),
cpp_constructor_args="torch::nn::ConvTranspose1dOptions(3, 4, 3).stride(3).padding(1).output_padding(1)",
cudnn=True,
input_size=(1, 3, 7),
),
dict(
module_name="ConvTranspose1d",
constructor_args=(3, 4, 3, 2, 1, 1, 1, False),
cpp_constructor_args="""torch::nn::ConvTranspose1dOptions(3, 4, 3)
.stride(2).padding(1).output_padding(1).groups(1).bias(false)""",
input_size=(1, 3, 6),
cudnn=True,
desc="no_bias",
),
dict(
module_name="ConvTranspose1d",
constructor_args=(3, 4, 3, 2, 1, 1, 1, True, 2),
cpp_constructor_args="""torch::nn::ConvTranspose1dOptions(3, 4, 3)
.stride(2).padding(1).output_padding(1).groups(1).bias(true).dilation(2)""",
input_size=(1, 3, 6),
cudnn=True,
desc="dilated",
),
dict(
fullname="ConvTranspose1d_groups",
constructor=lambda: nn.ConvTranspose1d(
4, 6, 3, stride=(3,), padding=1, output_padding=(1,), groups=2
),
cpp_constructor_args="""torch::nn::ConvTranspose1dOptions(4, 6, 3)
.stride(3).padding(1).output_padding(1).groups(2)""",
cudnn=True,
input_size=(2, 4, 7),
),
dict(
module_name="MaxPool1d",
constructor_args=(4,),
cpp_constructor_args="torch::nn::MaxPool1dOptions(4)",
input_size=(2, 10, 4),
),
dict(
module_name="MaxPool1d",
constructor_args=(4, 4),
cpp_constructor_args="torch::nn::MaxPool1dOptions(4).stride(4)",
input_size=(2, 10, 4),
desc="stride",
),
dict(
module_name="Conv2d",
constructor_args=(3, 4, (3, 2)),
cpp_constructor_args="torch::nn::Conv2dOptions(3, 4, {3, 2})",
input_size=(2, 3, 7, 5),
cudnn=True,
check_with_long_tensor=True,
),
dict(
module_name="Conv2d",
constructor_args=(3, 4, (3, 3), (2, 2)),
cpp_constructor_args="torch::nn::Conv2dOptions(3, 4, {3, 3}).stride({2, 2})",
input_size=(2, 3, 6, 6),
cudnn=True,
desc="strided",
check_with_long_tensor=True,
),
dict(
module_name="Conv2d",
constructor_args=(3, 4, (3, 3), (2, 2), (1, 1)),
cpp_constructor_args="torch::nn::Conv2dOptions(3, 4, {3, 3}).stride({2, 2}).padding({1, 1})",
input_size=(2, 3, 6, 6),
cudnn=True,
desc="padding",
check_with_long_tensor=True,
),
dict(
module_name="Conv2d",
constructor_args=(3, 2, (3, 3), (2, 2), (1, 1), (2, 2)),
cpp_constructor_args="torch::nn::Conv2dOptions(3, 2, {3, 3}).stride({2, 2}).padding({1, 1}).dilation({2, 2})",
input_size=(2, 3, 8, 8),
cudnn=True,
desc="dilated",
check_with_long_tensor=True,
),
dict(
module_name="Conv2d",
constructor_args=(3, 4, (3, 2), 1, 0, 1, 1, False),
cpp_constructor_args="""torch::nn::Conv2dOptions(3, 4, {3, 2})
.stride(1).padding(0).dilation(1).groups(1).bias(false)""",
input_size=(2, 3, 6, 5),
cudnn=True,
desc="no_bias",
check_with_long_tensor=True,
),
dict(
module_name="Conv2d",
constructor_args=(3, 4, (3, 2)),
cpp_constructor_args="torch::nn::Conv2dOptions(3, 4, {3, 2})",
input_size=(0, 3, 7, 5),
cudnn=True,
desc="zero_batch",
check_with_long_tensor=True,
test_cuda=(not TEST_WITH_ROCM),
),
dict(
fullname="Conv2d_groups",
constructor=lambda: nn.Conv2d(4, 6, (3, 2), groups=2),
cpp_constructor_args="torch::nn::Conv2dOptions(4, 6, {3, 2}).groups(2)",
input_size=(2, 4, 6, 5),
cudnn=True,
check_with_long_tensor=True,
),
dict(
fullname="Conv2d_groups_thnn",
constructor=lambda: nn.Conv2d(4, 6, (3, 2), groups=2),
cpp_constructor_args="torch::nn::Conv2dOptions(4, 6, {3, 2}).groups(2)",
input_size=(2, 4, 6, 5),
check_with_long_tensor=True,
),
dict(
module_name="ConvTranspose2d",
constructor_args=(3, 4, 3, (3, 2), 1, (1, 1)),
cpp_constructor_args="""torch::nn::ConvTranspose2dOptions(3, 4, 3)
.stride({3, 2}).padding(1).output_padding({1, 1})""",
cudnn=True,
input_size=(1, 3, 7, 6),
check_with_long_tensor=True,
),
dict(
module_name="ConvTranspose2d",
constructor_args=(3, 4, 3, (2, 3), 1, (1, 1), 1, False, (2, 2)),
cpp_constructor_args="""torch::nn::ConvTranspose2dOptions(3, 4, 3)
.stride({2, 3})
.padding(1)
.output_padding({1, 1})
.groups(1)
.bias(false)
.dilation({2, 2})""",
input_size=(1, 3, 6, 7),
cudnn=True,
desc="dilated",
check_with_long_tensor=True,
),
dict(
module_name="ConvTranspose2d",
constructor_args=(3, 4, 3, (2, 3), 1, (1, 1), 1, False),
cpp_constructor_args="""torch::nn::ConvTranspose2dOptions(3, 4, 3)
.stride({2, 3}).padding(1).output_padding({1, 1}).groups(1).bias(false)""",
input_size=(1, 3, 6, 7),
cudnn=True,
desc="no_bias",
check_with_long_tensor=True,
),
dict(
fullname="ConvTranspose2d_groups",
constructor=lambda: nn.ConvTranspose2d(2, 4, (2, 3), groups=2),
cpp_constructor_args="torch::nn::ConvTranspose2dOptions(2, 4, {2, 3}).groups(2)",
input_size=(1, 2, 4, 5),
cudnn=True,
check_with_long_tensor=True,
),
dict(
fullname="Conv2d_depthwise",
constructor=lambda: nn.Conv2d(4, 4, (3, 3), groups=4),
cpp_constructor_args="torch::nn::Conv2dOptions(4, 4, {3, 3}).groups(4)",
input_size=(2, 4, 6, 6),
),
dict(
fullname="Conv2d_depthwise_with_multiplier",
constructor=lambda: nn.Conv2d(4, 8, (3, 3), groups=4),
cpp_constructor_args="torch::nn::Conv2dOptions(4, 8, {3, 3}).groups(4)",
input_size=(2, 4, 6, 6),
),
dict(
fullname="Conv2d_depthwise_strided",
constructor=lambda: nn.Conv2d(4, 4, (3, 3), stride=(2, 2), groups=4),
cpp_constructor_args="torch::nn::Conv2dOptions(4, 4, {3, 3}).stride({2, 2}).groups(4)",
input_size=(2, 4, 6, 6),
),
dict(
fullname="Conv2d_depthwise_padded",
constructor=lambda: nn.Conv2d(4, 4, (3, 3), padding=(1, 1), groups=4),
cpp_constructor_args="torch::nn::Conv2dOptions(4, 4, {3, 3}).padding({1, 1}).groups(4)",
input_size=(2, 4, 6, 6),
),
dict(
fullname="Conv2d_depthwise_dilated",
constructor=lambda: nn.Conv2d(4, 4, (2, 2), dilation=(2, 2), groups=4),
cpp_constructor_args="torch::nn::Conv2dOptions(4, 4, {2, 2}).dilation({2, 2}).groups(4)",
input_size=(2, 4, 5, 5),
),
dict(
module_name="MaxPool2d",
constructor_args=((3, 3), (2, 2), (1, 1)),
cpp_constructor_args="torch::nn::MaxPool2dOptions({3, 3}).stride({2, 2}).padding({1, 1})",
input_size=(3, 7, 7),
desc="3d_input",
check_gradgrad=False,
),
dict(
module_name="MaxPool2d",
constructor_args=((3, 3), (2, 2), (1, 1)),
cpp_constructor_args="torch::nn::MaxPool2dOptions({3, 3}).stride({2, 2}).padding({1, 1})",
input_size=(1, 3, 7, 7),
check_with_channels_last=True,
desc="4d_input",
check_gradgrad=False,
),
dict(
module_name="AvgPool1d",
constructor_args=(2,),
cpp_constructor_args="torch::nn::AvgPool1dOptions(2)",
input_size=(2, 3, 6),
),
dict(
module_name="AvgPool1d",
constructor_args=((2,), (2,)),
cpp_constructor_args="torch::nn::AvgPool1dOptions(2).stride(2)",
input_size=(2, 3, 6),
desc="stride",
),
dict(
module_name="AvgPool1d",
constructor_args=(2, 2, 1),
cpp_constructor_args="torch::nn::AvgPool1dOptions(2).stride(2).padding(1)",
input_size=(2, 3, 6),
desc="stride_pad",
),
dict(
module_name="AvgPool2d",
constructor_args=((2, 2),),
cpp_constructor_args="torch::nn::AvgPool2dOptions({2, 2})",
input_size=(2, 3, 6, 6),
),
dict(
module_name="AvgPool2d",
constructor_args=((2, 2), (2, 2)),
cpp_constructor_args="torch::nn::AvgPool2dOptions({2, 2}).stride({2, 2})",
input_size=(2, 3, 6, 6),
desc="stride",
),
dict(
module_name="AvgPool2d",
constructor_args=((2, 2), (2, 2), (1, 1)),
cpp_constructor_args="torch::nn::AvgPool2dOptions({2, 2}).stride({2, 2}).padding({1, 1})",
input_size=(2, 3, 6, 6),
desc="stride_pad",
),
dict(
fullname="AvgPool2d_divisor",
constructor=lambda: nn.AvgPool2d((2, 2), divisor_override=1),
cpp_constructor_args="torch::nn::AvgPool2dOptions({2, 2}).divisor_override(1)",
input_size=(2, 3, 6, 6),
check_with_long_tensor=True,
),
dict(
fullname="AvgPool2d_divisor_stride",
constructor=lambda: nn.AvgPool2d((2, 2), (2, 2), divisor_override=1),
cpp_constructor_args="torch::nn::AvgPool2dOptions({2, 2}).stride({2, 2}).divisor_override(1)",
input_size=(2, 3, 6, 6),
check_with_long_tensor=True,
),
dict(
fullname="AvgPool2d_divisor_stride_pad",
constructor=lambda: nn.AvgPool2d((2, 2), (2, 2), (1, 1), divisor_override=1),
cpp_constructor_args="torch::nn::AvgPool2dOptions({2, 2}).stride({2, 2}).padding({1, 1}).divisor_override(1)",
input_size=(2, 3, 6, 6),
check_with_long_tensor=True,
),
dict(
module_name="LPPool2d",
constructor_args=(2, 2, 2),
cpp_constructor_args="torch::nn::LPPool2dOptions(2, 2).stride(2)",
input_size=(1, 3, 7, 7),
),
dict(
module_name="LPPool2d",
constructor_args=(1.5, 2),
cpp_constructor_args="torch::nn::LPPool2dOptions(1.5, 2)",
input_fn=lambda: torch.rand(1, 3, 7, 7),
desc="norm",
),
dict(
module_name="LPPool1d",
constructor_args=(1.5, 2),
cpp_constructor_args="torch::nn::LPPool1dOptions(1.5, 2)",
input_fn=lambda: torch.rand(1, 3, 7),
desc="norm",
),
dict(
module_name="LPPool1d",
constructor_args=(2, 2, 3),
cpp_constructor_args="torch::nn::LPPool1dOptions(2, 2).stride(3)",
input_size=(1, 3, 7),
),
dict(
module_name="LocalResponseNorm",
constructor_args=(3,),
cpp_constructor_args="torch::nn::LocalResponseNormOptions(3)",
input_size=(1, 5, 7),
desc="1d",
),
dict(
module_name="LocalResponseNorm",
constructor_args=(2,),
cpp_constructor_args="torch::nn::LocalResponseNormOptions(2)",
input_size=(1, 5, 7, 7),
desc="2d_uneven_pad",
),
dict(
module_name="LocalResponseNorm",
constructor_args=(1, 1.0, 0.5, 2.0),
cpp_constructor_args="torch::nn::LocalResponseNormOptions(1).alpha(1.).beta(0.5).k(2.)",
input_size=(1, 5, 7, 7, 7),
desc="3d_custom_params",
),
dict(
module_name="ReflectionPad1d",
constructor_args=((1, 2),),
cpp_constructor_args="torch::nn::ReflectionPad1dOptions({1, 2})",
input_size=(2, 3, 8),
),
dict(
module_name="ReflectionPad2d",
constructor_args=((1, 2, 3, 4),),
cpp_constructor_args="torch::nn::ReflectionPad2dOptions({1, 2, 3, 4})",
input_size=(2, 3, 8, 8),
),
dict(
module_name="ReplicationPad1d",
constructor_args=((1, 2),),
cpp_constructor_args="torch::nn::ReplicationPad1dOptions({1, 2})",
input_size=(2, 3, 4),
),
dict(
module_name="ReplicationPad2d",
constructor_args=((1, 2, 3, 4),),
cpp_constructor_args="torch::nn::ReplicationPad2dOptions({1, 2, 3, 4})",
input_size=(2, 3, 4, 4),
),
dict(
module_name="ZeroPad2d",
constructor_args=((1, 2, 3, 4),),
cpp_constructor_args="torch::nn::ZeroPad2dOptions({1, 2, 3, 4})",
input_size=(2, 3, 4, 4),
),
dict(
module_name="ZeroPad2d",
constructor_args=((-1, -1, -1, -2),),
cpp_constructor_args="torch::nn::ZeroPad2dOptions({-1, -1, -1, -2})",
input_size=(2, 3, 4, 4),
desc="negative_dims",
),
dict(
module_name="ConstantPad1d",
constructor_args=((1, 2), 2.0),
cpp_constructor_args="torch::nn::ConstantPad1dOptions({1, 2}, 2.)",
input_size=(2, 3, 4),
),
dict(
module_name="ConstantPad2d",
constructor_args=((1, 2, 3, 4), 2.0),
cpp_constructor_args="torch::nn::ConstantPad2dOptions({1, 2, 3, 4}, 2.)",
input_size=(2, 3, 4, 4),
),
dict(
module_name="ConstantPad3d",
constructor_args=((1, 2, 3, 4, 1, 0), 2.0),
cpp_constructor_args="torch::nn::ConstantPad3dOptions({1, 2, 3, 4, 1, 0}, 2.)",
input_size=(2, 3, 4, 4, 5),
),
dict(
module_name="Conv3d",
constructor_args=(3, 4, (2, 3, 4)),
cpp_constructor_args="torch::nn::Conv3dOptions(3, 4, {2, 3, 4})",
input_size=(2, 3, 3, 4, 5),
cudnn=True,
check_with_long_tensor=True,
),
dict(
module_name="Conv3d",
constructor_args=(3, 4, (2, 3, 4), 1, 0, 1, 1, False),
cpp_constructor_args="""torch::nn::Conv3dOptions(3, 4, {2, 3, 4})
.stride(1).padding(0).dilation(1).groups(1).bias(false)""",
input_size=(2, 3, 3, 4, 5),
cudnn=True,
desc="no_bias",
check_with_long_tensor=True,
),
dict(
module_name="Conv3d",
constructor_args=(3, 4, 2, 2),
cpp_constructor_args="torch::nn::Conv3dOptions(3, 4, 2).stride(2)",
input_size=(2, 3, 5, 5, 5),
cudnn=True,
desc="stride",
check_with_long_tensor=True,
),
dict(
module_name="Conv3d",
constructor_args=(3, 4, 2, 2, 1),
cpp_constructor_args="torch::nn::Conv3dOptions(3, 4, 2).stride(2).padding(1)",
input_size=(2, 3, 5, 5, 5),
cudnn=True,
desc="stride_padding",
check_with_long_tensor=True,
),
dict(
module_name="Conv3d",
constructor_args=(3, 4, (2, 3, 4)),
cpp_constructor_args="torch::nn::Conv3dOptions(3, 4, {2, 3, 4})",
input_size=(0, 3, 3, 4, 5),
cudnn=True,
check_with_long_tensor=True,
desc="zero_batch",
test_cuda=(not TEST_WITH_ROCM),
),
dict(
fullname="Conv3d_groups",
constructor=lambda: nn.Conv3d(4, 6, kernel_size=3, groups=2),
cpp_constructor_args="torch::nn::Conv3dOptions(4, 6, 3).groups(2)",
input_size=(2, 4, 4, 5, 4),
cudnn=True,
check_with_long_tensor=True,
),
dict(
fullname="Conv3d_dilated",
constructor=lambda: nn.Conv3d(3, 4, kernel_size=2, dilation=2),
cpp_constructor_args="torch::nn::Conv3dOptions(3, 4, 2).dilation(2)",
input_size=(2, 3, 5, 5, 5),
),
dict(
fullname="Conv3d_dilated_strided",
constructor=lambda: nn.Conv3d(3, 4, kernel_size=2, dilation=2, stride=2),
cpp_constructor_args="torch::nn::Conv3dOptions(3, 4, 2).dilation(2).stride(2)",
input_size=(2, 3, 5, 5, 5),
),
dict(
module_name="ConvTranspose3d",
constructor_args=(2, 3, (2, 3, 2)),
cpp_constructor_args="torch::nn::ConvTranspose3dOptions(2, 3, {2, 3, 2})",
cudnn=True,
input_size=(1, 2, 4, 5, 4),
),
dict(
module_name="ConvTranspose3d",
constructor_args=(2, 3, (2, 3, 2), 1, 0, 0, 1, True, (2, 2, 2)),
cpp_constructor_args="""torch::nn::ConvTranspose3dOptions(2, 3, {2, 3, 2})
.stride(1).padding(0).output_padding(0).groups(1).bias(true).dilation({2, 2, 2})""",
cudnn=True,
input_size=(1, 2, 4, 5, 4),
desc="dilated",
),
dict(
module_name="MaxPool3d",
constructor_args=((2, 2, 2),),
cpp_constructor_args="torch::nn::MaxPool3dOptions({2, 2, 2})",
input_size=(2, 3, 5, 5, 5),
check_gradgrad=False,
),
dict(
module_name="MaxPool3d",
constructor_args=(2, (2, 2, 2)),
cpp_constructor_args="torch::nn::MaxPool3dOptions(2).stride({2, 2, 2})",
input_size=(2, 3, 5, 5, 5),
desc="stride",
check_gradgrad=False,
),
dict(
module_name="MaxPool3d",
constructor_args=(2, 2, (1, 1, 1)),
cpp_constructor_args="torch::nn::MaxPool3dOptions(2).stride(2).padding({1, 1, 1})",
input_size=(2, 3, 5, 5, 5),
desc="stride_padding",
check_gradgrad=False,
),
dict(
module_name="AvgPool3d",
constructor_args=((2, 2, 2),),
cpp_constructor_args="torch::nn::AvgPool3dOptions({2, 2, 2})",
input_size=(2, 3, 4, 4, 4),
),
dict(
module_name="AvgPool3d",
constructor_args=(2, (2, 2, 2)),
cpp_constructor_args="torch::nn::AvgPool3dOptions(2).stride({2, 2, 2})",
input_size=(2, 3, 5, 5, 5),
desc="stride",
),
dict(
module_name="AvgPool3d",
constructor_args=(2, 2, (1, 1, 1)),
cpp_constructor_args="torch::nn::AvgPool3dOptions(2).stride(2).padding({1, 1, 1})",
input_size=(2, 3, 5, 5, 5),
desc="stride_pad",
),
dict(
module_name="AvgPool3d",
constructor_args=(4, 2, (1, 2, 1)),
cpp_constructor_args="torch::nn::AvgPool3dOptions(4).stride(2).padding({1, 2, 1})",
input_size=(2, 3, 5, 5, 5),
desc="stride_pad_gpu_fixedkw_output",
),
dict(
module_name="AvgPool3d",
constructor_args=((2, 4, 8), 1, (1, 1, 2)),
cpp_constructor_args="torch::nn::AvgPool3dOptions({2, 4, 8}).stride(1).padding({1, 1, 2})",
input_size=(2, 3, 2, 4, 8),
desc="stride_pad_gpu_general_output",
),
dict(
module_name="AvgPool3d",
constructor_args=(3, 1, 0),
cpp_constructor_args="torch::nn::AvgPool3dOptions(3).stride(1).padding(0)",
input_size=(2, 3, 4, 4, 4),
desc="stride1_pad0_gpu_input",
),
dict(
module_name="AvgPool3d",
constructor_args=(2, 2, (1, 1, 1)),
cpp_constructor_args="torch::nn::AvgPool3dOptions(2).stride(2).padding({1, 1, 1})",
input_size=(2, 3, 4, 4, 4),
desc="stride_pad_gpu_input_nooverlap",
),
dict(
fullname="AvgPool3d_divisor",
constructor=lambda: nn.AvgPool3d((2, 2, 2), divisor_override=1),
cpp_constructor_args="torch::nn::AvgPool3dOptions({2, 2, 2}).divisor_override(1)",
input_size=(2, 3, 4, 4, 4),
check_with_long_tensor=True,
),
dict(
fullname="AvgPool3d_divisor_stride",
constructor=lambda: nn.AvgPool3d(2, (2, 2, 2), divisor_override=1),
cpp_constructor_args="torch::nn::AvgPool3dOptions(2).stride({2, 2, 2}).divisor_override(1)",
input_size=(2, 3, 5, 5, 5),
check_with_long_tensor=True,
),
dict(
fullname="AvgPool3d_divisor_stride_pad",
constructor=lambda: nn.AvgPool3d(2, 2, (1, 1, 1), divisor_override=1),
cpp_constructor_args="torch::nn::AvgPool3dOptions(2).stride(2).padding({1, 1, 1}).divisor_override(1)",
input_size=(2, 3, 5, 5, 5),
check_with_long_tensor=True,
),
dict(
fullname="AvgPool3d_divisor_stride_pad_gpu_fixedkw_output",
constructor=lambda: nn.AvgPool3d(4, 2, (1, 2, 1), divisor_override=1),
cpp_constructor_args="torch::nn::AvgPool3dOptions(4).stride(2).padding({1, 2, 1}).divisor_override(1)",
input_size=(2, 3, 5, 5, 5),
check_with_long_tensor=True,
),
dict(
fullname="AvgPool3d_divisor_stride_pad_gpu_general_output",
constructor=lambda: nn.AvgPool3d((2, 4, 8), 1, (1, 1, 2), divisor_override=1),
cpp_constructor_args="torch::nn::AvgPool3dOptions({2, 4, 8}).stride(1).padding({1, 1, 2}).divisor_override(1)",
input_size=(2, 3, 2, 4, 8),
check_with_long_tensor=True,
),
dict(
fullname="AvgPool3d_divisor_stride1_pad0_gpu_input",
constructor=lambda: nn.AvgPool3d(3, 1, 0, divisor_override=1),
cpp_constructor_args="torch::nn::AvgPool3dOptions(3).stride(1).padding(0).divisor_override(1)",
input_size=(2, 3, 4, 4, 4),
check_with_long_tensor=True,
),
dict(
fullname="AvgPool3d_divisor_stride_pad_gpu_input_nooverlap",
constructor=lambda: nn.AvgPool3d(2, 2, (1, 1, 1), divisor_override=1),
cpp_constructor_args="torch::nn::AvgPool3dOptions(2).stride(2).padding({1, 1, 1}).divisor_override(1)",
input_size=(2, 3, 4, 4, 4),
check_with_long_tensor=True,
),
dict(
module_name="ReplicationPad3d",
constructor_args=((1, 2, 3, 4, 5, 6),),
cpp_constructor_args="torch::nn::ReplicationPad3dOptions({1, 2, 3, 4, 5, 6})",
input_size=(2, 3, 5, 5, 5),
),
dict(
module_name="Embedding",
constructor_args=(4, 3),
cpp_constructor_args="torch::nn::EmbeddingOptions(4, 3)",
input_fn=lambda: torch.empty(2, 3, dtype=torch.long).random_(4),
jacobian_input=False,
check_gradgrad=False,
),
dict(
module_name="EmbeddingBag",
constructor_args=(4, 3),
cpp_constructor_args="torch::nn::EmbeddingBagOptions(4, 3)",
input_fn=lambda: torch.empty(2, 3, dtype=torch.long).random_(4),
jacobian_input=False,
check_gradgrad=False,
check_forward_only=True,
desc="mean",
),
dict(
module_name="EmbeddingBag",
constructor_args=(4, 3, None, 2.0, False, "sum"),
cpp_constructor_args="""torch::nn::EmbeddingBagOptions(4, 3)
.max_norm(c10::nullopt).norm_type(2.).scale_grad_by_freq(false).mode(torch::kSum)""",
input_fn=lambda: torch.empty(2, 3, dtype=torch.long).random_(4),
jacobian_input=False,
check_gradgrad=False,
check_forward_only=True,
desc="sum",
),
dict(
module_name="EmbeddingBag",
constructor_args=(4, 3, None, 2.0, False, "max"),
cpp_constructor_args="""torch::nn::EmbeddingBagOptions(4, 3)
.max_norm(c10::nullopt).norm_type(2.).scale_grad_by_freq(false).mode(torch::kMax)""",
input_fn=lambda: torch.empty(2, 3, dtype=torch.long).random_(4),
jacobian_input=False,
check_gradgrad=False,
check_forward_only=True,
desc="max",
),
dict(
fullname="EmbeddingBag_sparse",
constructor=lambda: nn.EmbeddingBag(4, 3, sparse=True),
cpp_constructor_args="torch::nn::EmbeddingBagOptions(4, 3).sparse(true)",
input_fn=lambda: torch.randperm(2).repeat(1, 2),
jacobian_input=False,
check_gradgrad=False,
),
dict(
constructor=lambda: nn.Embedding(4, 3, sparse=True),
cpp_constructor_args="torch::nn::EmbeddingOptions(4, 3).sparse(true)",
input_fn=lambda: torch.randperm(2).repeat(1, 2),
jacobian_input=False,
fullname="Embedding_sparse",
check_gradgrad=False,
),
dict(
module_name="PixelShuffle",
constructor_args=(3,),
cpp_constructor_args="torch::nn::PixelShuffleOptions(3)",
input_size=(1, 9, 4, 4),
),
dict(
constructor=wrap_functional(
F.interpolate, size=12, scale_factor=None, mode="nearest"
),
cpp_options_args="""F::InterpolateFuncOptions()
.size(std::vector<int64_t>({12})).scale_factor(c10::nullopt).mode(torch::kNearest)""",
input_size=(1, 2, 4),
fullname="interpolate_nearest_1d",
pickle=False,
),
dict(
constructor=wrap_functional(
F.interpolate, size=12, scale_factor=None, mode="nearest"
),
cpp_options_args="""F::InterpolateFuncOptions()
.size(std::vector<int64_t>({12})).scale_factor(c10::nullopt).mode(torch::kNearest)""",
input_size=(0, 2, 4),
fullname="interpolate_nearest_1d_zero_dim",
pickle=False,
),
dict(
constructor=wrap_functional(
F.interpolate, size=(12,), scale_factor=None, mode="nearest"
),
cpp_options_args="""F::InterpolateFuncOptions()
.size(std::vector<int64_t>({12})).scale_factor(c10::nullopt).mode(torch::kNearest)""",
input_size=(1, 2, 3),
fullname="interpolate_nearest_tuple_1d",
pickle=False,
),
dict(
constructor=wrap_functional(
F.interpolate, size=None, scale_factor=4.0, mode="nearest"
),
cpp_options_args="""F::InterpolateFuncOptions()
.size(c10::nullopt).scale_factor(std::vector<double>({4.})).mode(torch::kNearest)""",
input_size=(1, 2, 4),
fullname="interpolate_nearest_scale_1d",
pickle=False,
),
dict(
constructor=wrap_functional(
F.interpolate,
size=12,
scale_factor=None,
mode="linear",
align_corners=False,
),
cpp_options_args="""F::InterpolateFuncOptions()
.size(std::vector<int64_t>({12}))
.scale_factor(c10::nullopt)
.mode(torch::kLinear)
.align_corners(false)""",
input_size=(1, 2, 4),
fullname="interpolate_linear_1d",
pickle=False,
),
dict(
constructor=wrap_functional(
F.interpolate,
size=(4,),
scale_factor=None,
mode="linear",
align_corners=False,
),
cpp_options_args="""F::InterpolateFuncOptions()
.size(std::vector<int64_t>({4}))
.scale_factor(c10::nullopt)
.mode(torch::kLinear)
.align_corners(false)""",
input_size=(1, 2, 3),
fullname="interpolate_linear_tuple_1d",
pickle=False,
),
dict(
constructor=wrap_functional(
F.interpolate,
size=None,
scale_factor=4.0,
mode="linear",
align_corners=False,
),
cpp_options_args="""F::InterpolateFuncOptions()
.size(c10::nullopt)
.scale_factor(std::vector<double>({4.}))
.mode(torch::kLinear)
.align_corners(false)""",
input_size=(1, 2, 4),
fullname="interpolate_linear_scale_1d",
pickle=False,
),
dict(
constructor=wrap_functional(
F.interpolate,
size=12,
scale_factor=None,
mode="linear",
align_corners=False,
),
cpp_options_args="""F::InterpolateFuncOptions()
.size(std::vector<int64_t>({12}))
.scale_factor(c10::nullopt)
.mode(torch::kLinear)
.align_corners(false)""",
input_size=(0, 2, 4),
fullname="interpolate_linear_1d_zero_dim",
pickle=False,
),
dict(
constructor=wrap_functional(
F.interpolate, size=12, scale_factor=None, mode="linear", align_corners=True
),
cpp_options_args="""F::InterpolateFuncOptions()
.size(std::vector<int64_t>({12}))
.scale_factor(c10::nullopt)
.mode(torch::kLinear)
.align_corners(true)""",
input_size=(1, 2, 4),
fullname="interpolate_linear_1d_align_corners",
pickle=False,
),
dict(
constructor=wrap_functional(
F.interpolate,
size=None,
scale_factor=4.0,
mode="linear",
align_corners=True,
),
cpp_options_args="""F::InterpolateFuncOptions()
.size(c10::nullopt)
.scale_factor(std::vector<double>({4.}))
.mode(torch::kLinear)
.align_corners(true)""",
input_size=(1, 2, 4),
fullname="interpolate_linear_scale_1d_align_corners",
pickle=False,
),
dict(
constructor=wrap_functional(
F.interpolate, size=2, scale_factor=None, mode="nearest"
),
cpp_options_args="""F::InterpolateFuncOptions()
.size(std::vector<int64_t>({2, 2}))
.scale_factor(c10::nullopt)
.mode(torch::kNearest)""",
input_size=(1, 128, 1, 1),
fullname="interpolate_nearest_2d_launch_configs",
pickle=False,
),
dict(
constructor=wrap_functional(
F.interpolate, size=12, scale_factor=None, mode="nearest"
),
cpp_options_args="""F::InterpolateFuncOptions()
.size(std::vector<int64_t>({12, 12}))
.scale_factor(c10::nullopt)
.mode(torch::kNearest)""",
input_size=(1, 2, 4, 4),
fullname="interpolate_nearest_2d",
pickle=False,
),
dict(
constructor=wrap_functional(
F.interpolate, size=(12, 16), scale_factor=None, mode="nearest"
),
cpp_options_args="""F::InterpolateFuncOptions()
.size(std::vector<int64_t>({12, 16}))
.scale_factor(c10::nullopt)
.mode(torch::kNearest)""",
input_size=(1, 2, 3, 4),
fullname="interpolate_nearest_tuple_2d",
pickle=False,
),
dict(
constructor=wrap_functional(
F.interpolate, size=None, scale_factor=4.0, mode="nearest"
),
cpp_options_args="""F::InterpolateFuncOptions()
.size(c10::nullopt)
.scale_factor(std::vector<double>({4., 4.}))
.mode(torch::kNearest)""",
input_size=(1, 2, 4, 4),
fullname="interpolate_nearest_scale_2d",
pickle=False,
),
dict(
constructor=wrap_functional(
F.interpolate, size=12, scale_factor=None, mode="nearest"
),
cpp_options_args="""F::InterpolateFuncOptions()
.size(std::vector<int64_t>({12, 12}))
.scale_factor(c10::nullopt)
.mode(torch::kNearest)""",
input_size=(0, 2, 4, 4),
fullname="interpolate_nearest_2d_zero_dim",
pickle=False,
),
dict(
constructor=wrap_functional(
F.interpolate,
size=12,
scale_factor=None,
mode="bilinear",
align_corners=False,
),
cpp_options_args="""F::InterpolateFuncOptions()
.size(std::vector<int64_t>({12, 12}))
.scale_factor(c10::nullopt)
.mode(torch::kBilinear)
.align_corners(false)""",
input_size=(1, 2, 4, 4),
fullname="interpolate_bilinear_2d",
pickle=False,
),
dict(
constructor=wrap_functional(
F.interpolate,
size=12,
scale_factor=None,
mode="bilinear",
align_corners=False,
),
cpp_options_args="""F::InterpolateFuncOptions()
.size(std::vector<int64_t>({12, 12}))
.scale_factor(c10::nullopt)
.mode(torch::kBilinear)
.align_corners(false)""",
input_size=(0, 2, 4, 4),
fullname="interpolate_bilinear_2d_zero_dim",
pickle=False,
),
dict(
constructor=wrap_functional(
F.interpolate,
size=(4, 6),
scale_factor=None,
mode="bilinear",
align_corners=False,
),
cpp_options_args="""F::InterpolateFuncOptions()
.size(std::vector<int64_t>({4, 6}))
.scale_factor(c10::nullopt)
.mode(torch::kBilinear)
.align_corners(false)""",
input_size=(1, 2, 2, 3),
fullname="interpolate_bilinear_tuple_2d",
pickle=False,
),
dict(
constructor=wrap_functional(
F.interpolate,
size=None,
scale_factor=4.0,
mode="bilinear",
align_corners=False,
),
cpp_options_args="""F::InterpolateFuncOptions()
.size(c10::nullopt)
.scale_factor(std::vector<double>({4., 4.}))
.mode(torch::kBilinear)
.align_corners(false)""",
input_size=(1, 2, 4, 4),
fullname="interpolate_bilinear_scale_2d",
pickle=False,
),
dict(
constructor=wrap_functional(
F.interpolate,
size=None,
scale_factor=(2.0, 2.0),
mode="bilinear",
align_corners=False,
),
cpp_options_args="""F::InterpolateFuncOptions()
.size(c10::nullopt)
.scale_factor(std::vector<double>({2., 2.}))
.mode(torch::kBilinear)
.align_corners(false)""",
input_size=(1, 2, 4, 4),
fullname="interpolate_bilinear_scale_tuple_shared_2d",
pickle=False,
),
dict(
constructor=wrap_functional(
F.interpolate,
size=None,
scale_factor=(2.0, 1.0),
mode="bilinear",
align_corners=False,
),
cpp_options_args="""F::InterpolateFuncOptions()
.size(c10::nullopt)
.scale_factor(std::vector<double>({2., 1.}))
.mode(torch::kBilinear)
.align_corners(false)""",
input_size=(1, 2, 4, 4),
fullname="interpolate_bilinear_scale_tuple_skewed_2d",
pickle=False,
),
dict(
constructor=wrap_functional(
F.interpolate,
size=(4, 6),
scale_factor=None,
mode="bilinear",
align_corners=True,
),
cpp_options_args="""F::InterpolateFuncOptions()
.size(std::vector<int64_t>({4, 6}))
.scale_factor(c10::nullopt)
.mode(torch::kBilinear)
.align_corners(true)""",
input_size=(1, 2, 4, 4),
fullname="interpolate_bilinear_tuple_2d_align_corners",
pickle=False,
),
dict(
constructor=wrap_functional(
F.interpolate,
size=None,
scale_factor=(2.0, 1.0),
mode="bilinear",
align_corners=True,
),
cpp_options_args="""F::InterpolateFuncOptions()
.size(c10::nullopt)
.scale_factor(std::vector<double>({2., 1.}))
.mode(torch::kBilinear)
.align_corners(true)""",
input_size=(1, 2, 4, 4),
fullname="interpolate_bilinear_scale_tuple_skewed_2d_align_corners",
pickle=False,
),
dict(
constructor=wrap_functional(
F.interpolate,
size=12,
scale_factor=None,
mode="bicubic",
align_corners=False,
),
cpp_options_args="""F::InterpolateFuncOptions()
.size(std::vector<int64_t>({12, 12}))
.scale_factor(c10::nullopt)
.mode(torch::kBicubic)
.align_corners(false)""",
input_size=(1, 2, 4, 4),
fullname="interpolate_bicubic_2d",
pickle=False,
),
dict(
constructor=wrap_functional(
F.interpolate,
size=12,
scale_factor=None,
mode="bicubic",
align_corners=False,
),
cpp_options_args="""F::InterpolateFuncOptions()
.size(std::vector<int64_t>({12, 12}))
.scale_factor(c10::nullopt)
.mode(torch::kBicubic)
.align_corners(false)""",
input_size=(0, 2, 4, 4),
fullname="interpolate_bicubic_2d_zero_dim",
pickle=False,
),
dict(
constructor=wrap_functional(
F.interpolate,
size=(4, 6),
scale_factor=None,
mode="bicubic",
align_corners=False,
),
cpp_options_args="""F::InterpolateFuncOptions()
.size(std::vector<int64_t>({4, 6}))
.scale_factor(c10::nullopt)
.mode(torch::kBicubic)
.align_corners(false)""",
input_size=(1, 2, 2, 3),
fullname="interpolate_bicubic_tuple_2d",
pickle=False,
),
dict(
constructor=wrap_functional(
F.interpolate,
size=None,
scale_factor=4.0,
mode="bicubic",
align_corners=False,
),
cpp_options_args="""F::InterpolateFuncOptions()
.size(c10::nullopt)
.scale_factor(std::vector<double>({4., 4.}))
.mode(torch::kBicubic)
.align_corners(false)""",
input_size=(1, 2, 4, 4),
fullname="interpolate_bicubic_scale_2d",
pickle=False,
),
dict(
constructor=wrap_functional(
F.interpolate,
size=None,
scale_factor=(2.0, 2.0),
mode="bicubic",
align_corners=False,
),
cpp_options_args="""F::InterpolateFuncOptions()
.size(c10::nullopt)
.scale_factor(std::vector<double>({2., 2.}))
.mode(torch::kBicubic)
.align_corners(false)""",
input_size=(1, 2, 4, 4),
fullname="interpolate_bicubic_scale_tuple_shared_2d",
pickle=False,
),
dict(
constructor=wrap_functional(
F.interpolate,
size=None,
scale_factor=(2.0, 1.0),
mode="bicubic",
align_corners=False,
),
cpp_options_args="""F::InterpolateFuncOptions()
.size(c10::nullopt)
.scale_factor(std::vector<double>({2., 1.}))
.mode(torch::kBicubic)
.align_corners(false)""",
input_size=(1, 2, 4, 4),
fullname="interpolate_bicubic_scale_tuple_skewed_2d",
pickle=False,
),
dict(
constructor=wrap_functional(
F.interpolate,
size=(4, 6),
scale_factor=None,
mode="bicubic",
align_corners=True,
),
cpp_options_args="""F::InterpolateFuncOptions()
.size(std::vector<int64_t>({4, 6}))
.scale_factor(c10::nullopt)
.mode(torch::kBicubic)
.align_corners(true)""",
input_size=(1, 2, 4, 4),
fullname="interpolate_bicubic_tuple_2d_align_corners",
pickle=False,
),
dict(
constructor=wrap_functional(
F.interpolate,
size=None,
scale_factor=(2.0, 1.0),
mode="bicubic",
align_corners=True,
),
cpp_options_args="""F::InterpolateFuncOptions()
.size(c10::nullopt)
.scale_factor(std::vector<double>({2., 1.}))
.mode(torch::kBicubic)
.align_corners(true)""",
input_size=(1, 2, 4, 4),
fullname="interpolate_bicubic_scale_tuple_skewed_2d_align_corners",
pickle=False,
),
dict(
constructor=wrap_functional(
F.interpolate, size=12, scale_factor=None, mode="nearest"
),
cpp_options_args="""F::InterpolateFuncOptions()
.size(std::vector<int64_t>({12, 12, 12}))
.scale_factor(c10::nullopt)
.mode(torch::kNearest)""",
input_size=(1, 2, 4, 4, 4),
fullname="interpolate_nearest_3d",
pickle=False,
),
dict(
constructor=wrap_functional(
F.interpolate, size=12, scale_factor=None, mode="nearest"
),
cpp_options_args="""F::InterpolateFuncOptions()
.size(std::vector<int64_t>({12, 12, 12}))
.scale_factor(c10::nullopt)
.mode(torch::kNearest)""",
input_size=(0, 2, 4, 4, 4),
fullname="interpolate_nearest_3d_zero_dim",
pickle=False,
),
dict(
constructor=wrap_functional(
F.interpolate, size=(12, 16, 16), scale_factor=None, mode="nearest"
),
cpp_options_args="""F::InterpolateFuncOptions()
.size(std::vector<int64_t>({12, 16, 16}))
.scale_factor(c10::nullopt)
.mode(torch::kNearest)""",
input_size=(1, 2, 3, 4, 4),
fullname="interpolate_nearest_tuple_3d",
pickle=False,
),
dict(
constructor=wrap_functional(
F.interpolate, size=None, scale_factor=4.0, mode="nearest"
),
cpp_options_args="""F::InterpolateFuncOptions()
.size(c10::nullopt)
.scale_factor(std::vector<double>({4., 4., 4.}))
.mode(torch::kNearest)""",
input_size=(1, 2, 4, 4, 4),
fullname="interpolate_nearest_scale_3d",
pickle=False,
),
dict(
constructor=wrap_functional(
F.interpolate,
size=12,
scale_factor=None,
mode="trilinear",
align_corners=False,
),
cpp_options_args="""F::InterpolateFuncOptions()
.size(std::vector<int64_t>({12, 12, 12}))
.scale_factor(c10::nullopt)
.mode(torch::kTrilinear)
.align_corners(false)""",
input_size=(1, 2, 4, 4, 4),
fullname="interpolate_trilinear_3d",
pickle=False,
),
dict(
constructor=wrap_functional(
F.interpolate,
size=12,
scale_factor=None,
mode="trilinear",
align_corners=False,
),
cpp_options_args="""F::InterpolateFuncOptions()
.size(std::vector<int64_t>({12, 12, 12}))
.scale_factor(c10::nullopt)
.mode(torch::kTrilinear)
.align_corners(false)""",
input_size=(0, 2, 4, 4, 4),
fullname="interpolate_trilinear_3d_zero_dim",
pickle=False,
),
dict(
constructor=wrap_functional(
F.interpolate,
size=(4, 6, 6),
scale_factor=None,
mode="trilinear",
align_corners=False,
),
cpp_options_args="""F::InterpolateFuncOptions()
.size(std::vector<int64_t>({4, 6, 6}))
.scale_factor(c10::nullopt)
.mode(torch::kTrilinear)
.align_corners(false)""",
input_size=(1, 2, 2, 3, 3),
fullname="interpolate_trilinear_tuple_3d",
pickle=False,
),
dict(
constructor=wrap_functional(
F.interpolate,
size=None,
scale_factor=3.0,
mode="trilinear",
align_corners=False,
),
cpp_options_args="""F::InterpolateFuncOptions()
.size(c10::nullopt)
.scale_factor(std::vector<double>({3., 3., 3.}))
.mode(torch::kTrilinear)
.align_corners(false)""",
input_size=(1, 2, 3, 4, 4),
fullname="interpolate_trilinear_scale_3d",
# See https://github.com/pytorch/pytorch/issues/5006
precision=3e-4,
pickle=False,
),
dict(
constructor=wrap_functional(
F.interpolate,
size=(4, 6, 6),
scale_factor=None,
mode="trilinear",
align_corners=True,
),
cpp_options_args="""F::InterpolateFuncOptions()
.size(std::vector<int64_t>({4, 6, 6}))
.scale_factor(c10::nullopt)
.mode(torch::kTrilinear)
.align_corners(true)""",
input_size=(1, 2, 2, 3, 3),
fullname="interpolate_trilinear_tuple_3d_align_corners",
pickle=False,
),
dict(
constructor=wrap_functional(
F.interpolate,
size=None,
scale_factor=3.0,
mode="trilinear",
align_corners=True,
),
cpp_options_args="""F::InterpolateFuncOptions()
.size(c10::nullopt)
.scale_factor(std::vector<double>({3., 3., 3.}))
.mode(torch::kTrilinear)
.align_corners(true)""",
input_size=(1, 2, 3, 4, 4),
fullname="interpolate_trilinear_scale_3d_align_corners",
# See https://github.com/pytorch/pytorch/issues/5006
precision=3e-4,
pickle=False,
),
dict(
module_name="AdaptiveMaxPool1d",
constructor_args=(3,),
cpp_constructor_args="torch::nn::AdaptiveMaxPool1dOptions(3)",
input_fn=lambda: _rand_tensor_non_equal(1, 3, 5),
),
dict(
module_name="AdaptiveMaxPool2d",
constructor_args=(3,),
cpp_constructor_args="torch::nn::AdaptiveMaxPool2dOptions(3)",
input_fn=lambda: _rand_tensor_non_equal(1, 3, 5, 6),
desc="single",
),
dict(
module_name="AdaptiveMaxPool2d",
constructor_args=((3, 4),),
cpp_constructor_args="torch::nn::AdaptiveMaxPool2dOptions({3, 4})",
input_fn=lambda: _rand_tensor_non_equal(1, 3, 5, 6),
desc="tuple",
),
dict(
module_name="AdaptiveMaxPool2d",
constructor_args=((3, None),),
cpp_constructor_args="torch::nn::AdaptiveMaxPool2dOptions({3, c10::nullopt})",
input_fn=lambda: _rand_tensor_non_equal(1, 3, 5, 6),
desc="tuple_none",
),
dict(
module_name="AdaptiveMaxPool3d",
constructor_args=(3,),
cpp_constructor_args="torch::nn::AdaptiveMaxPool3dOptions(3)",
input_fn=lambda: _rand_tensor_non_equal(2, 3, 5, 6, 7),
desc="single",
),
dict(
module_name="AdaptiveMaxPool3d",
constructor_args=((3, 4, 5),),
cpp_constructor_args="torch::nn::AdaptiveMaxPool3dOptions({3, 4, 5})",
input_fn=lambda: _rand_tensor_non_equal(2, 3, 5, 6, 7),
desc="tuple",
),
dict(
module_name="AdaptiveMaxPool3d",
constructor_args=((3, None, 5),),
cpp_constructor_args="torch::nn::AdaptiveMaxPool3dOptions({3, c10::nullopt, 5})",
input_fn=lambda: _rand_tensor_non_equal(2, 3, 5, 6, 7),
desc="tuple_none",
),
dict(
module_name="AdaptiveMaxPool3d",
constructor_args=(3,),
cpp_constructor_args="torch::nn::AdaptiveMaxPool3dOptions(3)",
input_fn=lambda: _rand_tensor_non_equal(2, 3, 12, 9, 3),
desc="single_nonatomic",
),
dict(
module_name="AdaptiveMaxPool3d",
constructor_args=((3, 4, 5),),
cpp_constructor_args="torch::nn::AdaptiveMaxPool3dOptions({3, 4, 5})",
input_fn=lambda: _rand_tensor_non_equal(2, 3, 6, 4, 10),
desc="tuple_nonatomic",
),
dict(
module_name="AdaptiveAvgPool1d",
constructor_args=(3,),
cpp_constructor_args="torch::nn::AdaptiveAvgPool1dOptions(3)",
input_fn=lambda: torch.rand(1, 3, 5),
),
dict(
module_name="AdaptiveAvgPool1d",
constructor_args=(1,),
cpp_constructor_args="torch::nn::AdaptiveAvgPool1dOptions(1)",
input_fn=lambda: torch.rand(1, 3, 5),
desc="one_output",
),
dict(
module_name="AdaptiveAvgPool2d",
constructor_args=(3,),
cpp_constructor_args="torch::nn::AdaptiveAvgPool2dOptions(3)",
input_fn=lambda: torch.rand(1, 3, 5, 6),
desc="single",
check_gradgrad=False,
),
dict(
module_name="AdaptiveAvgPool2d",
constructor_args=(1,),
cpp_constructor_args="torch::nn::AdaptiveAvgPool2dOptions(1)",
input_fn=lambda: torch.rand(1, 3, 5, 6),
desc="single_1x1output",
check_gradgrad=False,
),
dict(
module_name="AdaptiveAvgPool2d",
constructor_args=((3, 4),),
cpp_constructor_args="torch::nn::AdaptiveAvgPool2dOptions({3, 4})",
input_fn=lambda: torch.rand(1, 3, 5, 6),
desc="tuple",
check_gradgrad=False,
),
dict(
module_name="AdaptiveAvgPool2d",
constructor_args=((3, None),),
cpp_constructor_args="torch::nn::AdaptiveAvgPool2dOptions({3, c10::nullopt})",
input_fn=lambda: torch.rand(1, 3, 5, 6),
desc="tuple_none",
check_gradgrad=False,
),
dict(
module_name="AdaptiveAvgPool3d",
constructor_args=(3,),
cpp_constructor_args="torch::nn::AdaptiveAvgPool3dOptions(3)",
input_fn=lambda: torch.rand(2, 3, 5, 2, 7),
desc="single",
),
dict(
module_name="AdaptiveAvgPool3d",
constructor_args=((3, 4, 5),),
cpp_constructor_args="torch::nn::AdaptiveAvgPool3dOptions({3, 4, 5})",
input_fn=lambda: torch.rand(2, 3, 5, 3, 7),
desc="tuple",
),
dict(
module_name="AdaptiveAvgPool3d",
constructor_args=((None, 4, 5),),
cpp_constructor_args="torch::nn::AdaptiveAvgPool3dOptions({c10::nullopt, 4, 5})",
input_fn=lambda: torch.rand(2, 3, 5, 3, 7),
desc="tuple_none",
),
dict(module_name="SELU", input_size=(3, 2, 5), check_inplace=True),
dict(module_name="SELU", input_size=(), check_inplace=True, desc="scalar"),
dict(
module_name="CELU",
input_size=(3, 2, 5),
constructor_args=(2.0,),
cpp_constructor_args="torch::nn::CELUOptions().alpha(2.)",
check_inplace=True,
reference_fn=lambda x, *_: torch.where(x >= 0, x, 2.0 * ((0.5 * x).exp() - 1)),
),
dict(
module_name="CELU",
input_size=(),
constructor_args=(2.0,),
cpp_constructor_args="torch::nn::CELUOptions().alpha(2.)",
check_inplace=True,
reference_fn=lambda x, *_: torch.where(x >= 0, x, 2.0 * ((0.5 * x).exp() - 1)),
desc="scalar",
),
dict(
module_name="GLU",
input_size=(5, 6),
),
dict(
module_name="GLU",
constructor_args=(1,),
cpp_constructor_args="torch::nn::GLUOptions(1)",
input_size=(5, 6, 7),
desc="dim",
),
dict(
module_name="GELU",
input_size=(),
desc="scalar",
reference_fn=lambda x, *_: x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))),
),
dict(
module_name="GELU",
input_size=(3, 2, 5),
reference_fn=lambda x, *_: x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))),
),
dict(
constructor=wrap_functional(F.softmax, dim=-1),
cpp_options_args="F::SoftmaxFuncOptions(-1)",
input_size=(2, 128), # trigger the last-dim algo in CUDA
fullname="softmax_lastdim",
pickle=False,
),
dict(
constructor=wrap_functional(F.softmax, dim=1, dtype=torch.float64),
cpp_options_args="F::SoftmaxFuncOptions(1).dtype(torch::kFloat64)",
input_size=(2, 128),
fullname="softmax_lastdim_dtype",
pickle=False,
test_cuda=False,
),
dict(
constructor=wrap_functional(F.softmax, dim=1),
cpp_options_args="F::SoftmaxFuncOptions(1)",
input_size=(2, 128, 2, 2), # trigger special case of spatial CUDA algo
fullname="softmax_spatial_special",
pickle=False,
test_cuda=(not TEST_WITH_ROCM),
),
dict(
constructor=wrap_functional(F.softmax, dim=1),
cpp_options_args="F::SoftmaxFuncOptions(1)",
input_size=(2, 2, 4, 4), # regular spatial algorithm
fullname="softmax_spatial",
pickle=False,
),
dict(
constructor=wrap_functional(F.softmax, dim=1, dtype=torch.float64),
cpp_options_args="F::SoftmaxFuncOptions(1).dtype(torch::kFloat64)",
input_size=(2, 2, 4, 4), # regular spatial algorithm
fullname="softmax_spatial_dtype",
pickle=False,
test_cuda=False,
),
dict(
constructor=wrap_functional(F.softmax, dim=0),
cpp_options_args="F::SoftmaxFuncOptions(0)",
input_size=(2, 3, 4, 5),
fullname="softmax_functional_dim0",
test_cuda=False,
pickle=False,
),
dict(
constructor=wrap_functional(F.softmax, dim=3),
cpp_options_args="F::SoftmaxFuncOptions(3)",
input_size=(2, 3, 4, 5),
fullname="softmax_functional_dim3",
test_cuda=False,
pickle=False,
),
dict(
constructor=wrap_functional(F.softmax, dim=-1),
cpp_options_args="F::SoftmaxFuncOptions(-1)",
input_size=(),
fullname="softmax_functional_scalar",
test_cuda=False,
pickle=False,
),
dict(
constructor=wrap_functional(F.log_softmax, dim=-1),
cpp_options_args="F::LogSoftmaxFuncOptions(-1)",
input_size=(2, 128), # trigger the last-dim algo in CUDA
fullname="log_softmax_lastdim",
pickle=False,
),
dict(
constructor=wrap_functional(F.log_softmax, dim=1),
cpp_options_args="F::LogSoftmaxFuncOptions(1)",
input_size=(2, 128, 2, 2), # trigger special case of spatial CUDA algo
fullname="log_softmax_spatial_special",
pickle=False,
test_cuda=(not TEST_WITH_ROCM),
),
dict(
constructor=wrap_functional(F.log_softmax, dim=1),
cpp_options_args="F::LogSoftmaxFuncOptions(1)",
input_size=(2, 2, 4, 4), # regular spatial algorithm
fullname="log_softmax_spatial",
pickle=False,
),
dict(
constructor=wrap_functional(F.log_softmax, dim=0),
cpp_options_args="F::LogSoftmaxFuncOptions(0)",
input_size=(2, 3, 4, 5),
fullname="log_softmax_dim0",
pickle=False,
),
dict(
constructor=wrap_functional(F.log_softmax, dim=3),
cpp_options_args="F::LogSoftmaxFuncOptions(3)",
input_size=(2, 3, 4, 5),
fullname="log_softmax_dim3",
pickle=False,
),
dict(
constructor=wrap_functional(F.log_softmax, dim=0),
cpp_options_args="F::LogSoftmaxFuncOptions(0)",
input_size=(),
fullname="log_softmax_scalar",
pickle=False,
),
dict(
fullname="Unfold",
constructor=lambda: nn.Unfold((2, 2), (1, 1), (0, 0), (1, 1)),
cpp_constructor_args="torch::nn::UnfoldOptions({2, 2}).dilation({1, 1}).padding({0, 0}).stride({1, 1})",
input_size=(2, 4, 3, 3),
check_gradgrad=False,
test_cuda=True,
),
dict(
fullname="Fold",
constructor=lambda: nn.Fold((3, 3), (2, 2), (1, 1), (0, 0), (1, 1)),
cpp_constructor_args="torch::nn::FoldOptions({3, 3}, {2, 2}).dilation({1, 1}).padding({0, 0}).stride({1, 1})",
input_size=(2, 16, 4),
check_gradgrad=False,
test_cuda=True,
),
dict(
fullname="Unfold_int_input",
constructor=lambda: nn.Unfold(2, 1, 0, 1),
cpp_constructor_args="torch::nn::UnfoldOptions(2).dilation(1).padding(0).stride(1)",
input_size=(2, 4, 3, 3),
check_gradgrad=False,
test_cuda=True,
),
dict(
fullname="Fold_int_input",
constructor=lambda: nn.Fold(3, 2, 1, 0, 1),
cpp_constructor_args="torch::nn::FoldOptions(3, 2).dilation(1).padding(0).stride(1)",
input_size=(2, 16, 4),
check_gradgrad=False,
test_cuda=True,
),
dict(
module_name="Threshold",
constructor_args=(2.0, 1.0),
cpp_constructor_args="torch::nn::ThresholdOptions(2., 1.)",
input_size=(),
check_inplace=True,
desc="threshold_value_scalar",
),
dict(module_name="ReLU", input_size=(), check_inplace=True, desc="scalar"),
dict(module_name="ReLU6", input_size=(), check_inplace=True, desc="scalar"),
dict(
module_name="RReLU",
constructor_args=(0.1, 0.9),
cpp_constructor_args="torch::nn::RReLUOptions().lower(0.1).upper(0.9)",
input_size=(),
desc="with_up_down_scalar",
test_cuda=False,
),
dict(
module_name="Hardtanh",
input_size=(),
reference_fn=lambda i, *_: i.clamp(-1, 1),
desc="scalar",
),
dict(
module_name="Sigmoid",
input_size=(),
desc="scalar",
),
dict(
module_name="Tanh",
input_size=(),
desc="scalar",
),
dict(
module_name="Softmax",
constructor_args=(0,),
cpp_constructor_args="torch::nn::SoftmaxOptions(0)",
input_size=(),
reference_fn=lambda i, *_: torch.exp(i).div(torch.exp(i).sum(0, True)),
desc="scalar",
),
dict(
module_name="LogSoftmax",
constructor_args=(0,),
cpp_constructor_args="torch::nn::LogSoftmaxOptions(0)",
input_size=(),
reference_fn=lambda i, *_: torch.exp(i).div_(torch.exp(i).sum(0, False)).log_(),
desc="multiparam_scalar",
),
dict(
module_name="ELU",
constructor_args=(2.0,),
cpp_constructor_args="torch::nn::ELUOptions().alpha(2.)",
input_size=(),
desc="scalar",
),
dict(
module_name="Hardshrink",
constructor_args=(2.0,),
cpp_constructor_args="torch::nn::HardshrinkOptions(2.)",
input_size=(),
desc="scalar",
),
dict(
module_name="LeakyReLU",
constructor_args=(0.5,),
cpp_constructor_args="torch::nn::LeakyReLUOptions().negative_slope(0.5)",
input_size=(),
check_inplace=True,
desc="with_negval_scalar",
),
dict(
module_name="LogSigmoid",
input_size=(),
reference_fn=lambda i, *_: i.sigmoid().log(),
desc="scalar",
),
dict(
module_name="Softplus",
constructor_args=(2, -100),
cpp_constructor_args="torch::nn::SoftplusOptions().beta(2).threshold(-100)",
input_size=(),
reference_fn=(
lambda i, *_: ((i * 2) > -100).type_as(i) * i
+ ((i * 2) <= -100).type_as(i) * 1.0 / 2.0 * torch.log(1 + torch.exp(2 * i))
),
desc="beta_threshold_scalar",
),
dict(
module_name="Softshrink",
constructor_args=(1,),
cpp_constructor_args="torch::nn::SoftshrinkOptions(1)",
input_size=(),
desc="lambda_scalar",
),
dict(
module_name="PReLU",
input_size=(),
reference_fn=lambda i, p, _: torch.clamp(i, min=0)
+ torch.clamp(i, max=0) * p[0][0],
desc="scalar",
),
dict(
module_name="Softsign",
input_size=(),
reference_fn=lambda i, *_: i.div(1 + torch.abs(i)),
desc="scalar",
),
dict(
module_name="Softmin",
constructor_args=(0,),
cpp_constructor_args="torch::nn::SoftminOptions(0)",
input_size=(),
desc="scalar",
),
dict(
module_name="Tanhshrink",
input_size=(),
desc="scalar",
),
dict(
fullname="Padding12_1dcircular",
constructor=wrap_functional(F.pad, pad=(1, 2), mode="circular"),
cpp_options_args="F::PadFuncOptions({1, 2}).mode(torch::kCircular)",
input_fn=lambda: torch.arange(6, out=torch.DoubleTensor()).reshape([1, 2, 3]),
reference_fn=lambda i, *_: padding1d_circular(i, (1, 2)),
skip_double=TEST_WITH_ROCM,
pickle=False,
),
dict(
fullname="Padding31_1dcircular",
constructor=wrap_functional(F.pad, pad=(3, 1), mode="circular"),
cpp_options_args="F::PadFuncOptions({3, 1}).mode(torch::kCircular)",
input_fn=lambda: torch.arange(6, out=torch.DoubleTensor()).reshape([1, 2, 3]),
reference_fn=lambda i, *_: padding1d_circular(i, (3, 1)),
skip_double=TEST_WITH_ROCM,
pickle=False,
),
dict(
fullname="Padding33_1dcircular",
constructor=wrap_functional(F.pad, pad=(3, 3), mode="circular"),
cpp_options_args="F::PadFuncOptions({3, 3}).mode(torch::kCircular)",
input_fn=lambda: torch.arange(6, out=torch.DoubleTensor()).reshape([1, 2, 3]),
reference_fn=lambda i, *_: padding1d_circular(i, (3, 3)),
skip_double=TEST_WITH_ROCM,
pickle=False,
),
dict(
fullname="Padding1221_2dcircular",
constructor=wrap_functional(F.pad, pad=(1, 2, 2, 1), mode="circular"),
cpp_options_args="F::PadFuncOptions({1, 2, 2, 1}).mode(torch::kCircular)",
input_fn=lambda: torch.arange(6, out=torch.DoubleTensor()).reshape(
[1, 1, 2, 3]
),
reference_fn=lambda i, *_: padding2d_circular(i, (1, 2, 2, 1)),
skip_double=TEST_WITH_ROCM,
pickle=False,
),
dict(
fullname="Padding2322_2dcircular",
constructor=wrap_functional(F.pad, pad=(2, 3, 2, 2), mode="circular"),
cpp_options_args="F::PadFuncOptions({2, 3, 2, 2}).mode(torch::kCircular)",
input_fn=lambda: torch.arange(6, out=torch.DoubleTensor()).reshape(
[1, 1, 2, 3]
),
reference_fn=lambda i, *_: padding2d_circular(i, (2, 3, 2, 2)),
skip_double=TEST_WITH_ROCM,
pickle=False,
),
dict(
fullname="Padding3331_2dcircular",
constructor=wrap_functional(F.pad, pad=(3, 3, 3, 1), mode="circular"),
cpp_options_args="F::PadFuncOptions({3, 3, 3, 1}).mode(torch::kCircular)",
input_fn=lambda: torch.arange(9, out=torch.DoubleTensor()).reshape(
[1, 1, 3, 3]
),
reference_fn=lambda i, *_: padding2d_circular(i, (3, 3, 3, 1)),
skip_double=TEST_WITH_ROCM,
pickle=False,
),
dict(
fullname="Padding122112_3dcircular",
constructor=wrap_functional(F.pad, pad=(1, 2, 2, 1, 1, 2), mode="circular"),
cpp_options_args="F::PadFuncOptions({1, 2, 2, 1, 1, 2}).mode(torch::kCircular)",
input_fn=lambda: torch.arange(12, out=torch.DoubleTensor()).reshape(
[1, 1, 2, 2, 3]
),
reference_fn=lambda i, *_: padding3d_circular(i, (1, 2, 2, 1, 1, 2)),
skip_double=TEST_WITH_ROCM,
pickle=False,
),
dict(
fullname="Padding322112_3dcircular",
constructor=wrap_functional(F.pad, pad=(3, 2, 2, 1, 1, 2), mode="circular"),
cpp_options_args="F::PadFuncOptions({3, 2, 2, 1, 1, 2}).mode(torch::kCircular)",
input_fn=lambda: torch.arange(12, out=torch.DoubleTensor()).reshape(
[1, 1, 2, 2, 3]
),
reference_fn=lambda i, *_: padding3d_circular(i, (3, 2, 2, 1, 1, 2)),
skip_double=TEST_WITH_ROCM,
pickle=False,
),
dict(
fullname="Padding332122_3dcircular",
constructor=wrap_functional(F.pad, pad=(3, 3, 2, 1, 2, 2), mode="circular"),
cpp_options_args="F::PadFuncOptions({3, 3, 2, 1, 2, 2}).mode(torch::kCircular)",
input_fn=lambda: torch.arange(12, out=torch.DoubleTensor()).reshape(
[1, 1, 2, 2, 3]
),
reference_fn=lambda i, *_: padding3d_circular(i, (3, 3, 2, 1, 2, 2)),
skip_double=TEST_WITH_ROCM,
pickle=False,
),
]
# add conv padding mode tests:
for padding_mode, cpp_padding_mode in zip(
["reflect", "circular", "replicate", "zeros"],
["torch::kReflect", "torch::kCircular", "torch::kReplicate", "torch::kZeros"],
):
# conv signature:
# in_channels, out_channels, kernel_size, stride=1,
# padding=0, dilation=1, groups=1,
# bias=True, padding_mode='zeros'
for d in (1, 2, 3):
if d == 3 and padding_mode == "reflect":
# FIXME: remove after implementing reflection pad 3d
# https://github.com/pytorch/pytorch/issues/27655
continue
new_module_tests.append(
dict(
module_name="Conv{}d".format(d),
constructor_args=(3, 4, 3, 2, 2, 1, 1, True, padding_mode),
cpp_constructor_args="""torch::nn::Conv{}dOptions(3, 4, 3)
.stride(2)
.padding(2)
.dilation(1)
.groups(1)
.bias(true)
.padding_mode({})""".format(
d, cpp_padding_mode
),
input_size=(2, 3) + (3,) * d,
output_size=(2, 4) + (3,) * d,
cudnn=True,
desc="{}_stride2_pad2".format(padding_mode),
),
)
def kldivloss_reference(input, target, reduction="mean"):
safe_target = target * (target > 0).type_as(target)
safe_target_log = (safe_target + (target <= 0).type_as(target)).log()
result = safe_target * (safe_target_log - input)
if reduction == "mean":
return result.mean()
elif reduction == "sum":
return result.sum()
elif reduction == "batchmean" and results.dim() != 0:
return result.sum() / result.size(0)
return result
def nlllossNd_reference(
input, target, weight=None, ignore_index=-100, reduction="mean"
):
assert input.dim() >= 3
N = input.size(0)
C = input.size(1)
out_size = (N,) + input.size()[2:]
output = torch.zeros(out_size).type_as(input)
if weight is None:
weight = torch.ones(C).type_as(input)
total_weight = 0
for tup in product(*[range(size) for size in out_size]):
t_nx = target[tup]
norm = 0.0 if ignore_index == t_nx else weight[t_nx].item()
input_index = list(tup)
input_index.insert(1, t_nx)
output[tup] = -input[tuple(input_index)] * norm
total_weight += norm
if reduction == "mean":
return output.sum() / total_weight
elif reduction == "sum":
return output.sum()
return output
def nllloss_reference(input, target, weight=None, ignore_index=-100, reduction="mean"):
def nll_loss_helper(input, target, weight, ignore_index):
if target == ignore_index:
return (0, 0)
norm = 1 if weight is None else weight[target]
result = -input[target] * norm
return (result, norm)
losses_and_weights = [
nll_loss_helper(i, t, weight, ignore_index) for i, t in zip(input, target)
]
losses, weights = zip(*losses_and_weights)
losses_tensor = input.new_tensor(losses)
if reduction == "mean":
return sum(losses_tensor) / sum(weights)
elif reduction == "sum":
return sum(losses_tensor)
else:
return losses_tensor
def smoothl1loss_reference(input, target, reduction="mean"):
abs_diff = (input - target).abs()
ge_one_mask = (abs_diff >= 1).type_as(abs_diff)
lt_one_mask = (abs_diff < 1).type_as(abs_diff)
output = ge_one_mask * (abs_diff - 0.5) + lt_one_mask * 0.5 * (abs_diff**2)
if reduction == "mean":
return output.mean()
elif reduction == "sum":
return output.sum()
return output
def _multilabelmarginloss_reference(input, target):
targets = []
for target_index in target:
if target_index < 0:
break
targets.append(target_index)
sum = 0
for target_index in targets:
for i in range(0, len(input)):
if i not in targets:
sum += max(0, 1 - input[target_index] + input[i])
return sum
def multilabelmarginloss_reference(input, target, reduction="mean"):
# make everything 2-dimensional
input_dim = input.dim()
if input.dim() < 2:
assert target.dim() < 2
input = (
input.unsqueeze(0) if input.dim() == 1 else input.unsqueeze(0).unsqueeze(0)
)
target = (
target.unsqueeze(0)
if target.dim() == 1
else target.unsqueeze(0).unsqueeze(0)
)
n = input.size(0)
dim = input.size(1)
output = input.new(n).zero_()
for i in range(0, n):
output[i] = _multilabelmarginloss_reference(input[i], target[i])
if reduction == "mean":
return output.mean() / dim
elif reduction == "sum":
return output.sum() / dim
elif input_dim < 2:
# we know we have (1, C) X (1, C) -> (1,), so squeeze will get us
# back to correct dimensionality
return output.squeeze() / dim
else:
return output / dim
def hingeembeddingloss_reference(input, target, margin=1.0, reduction="mean"):
margin_clamp = (margin - input).clamp(min=0).type_as(input)
output = torch.where(target == 1, input, margin_clamp)
if reduction == "mean":
return output.mean()
elif reduction == "sum":
return output.sum()
return output
def softmarginloss_reference(input, target, reduction="mean"):
output = (1 + (-input * target).exp()).log()
if reduction == "mean":
return output.mean()
elif reduction == "sum":
return output.sum()
return output
def _multimarginloss_reference(input, target_idx, p, margin, weight):
if weight is None:
weight = input.new(len(input)).fill_(1)
output = 0
for i in range(0, len(input)):
if i != target_idx:
output += max(
0, weight[target_idx] * (margin - input[target_idx] + input[i]) ** p
)
return output
def multimarginloss_reference(
input, target, p=1, margin=1, weight=None, reduction="mean"
):
if input.dim() < 2:
input = (
input.unsqueeze(0) if input.dim() == 1 else input.unsqueeze(0).unsqueeze(0)
)
target_dim = target.dim()
if target.dim() == 0:
target = target.unsqueeze(0)
n = input.size(0)
dim = input.size(1)
output = input.new(n)
for x in range(0, n):
output[x] = _multimarginloss_reference(input[x], target[x], p, margin, weight)
if reduction == "mean":
return output.mean() / dim
elif reduction == "sum":
return output.sum() / dim
elif target_dim == 0:
return output.squeeze(0) / dim
return output / dim
def cosineembeddingloss_reference(input1, input2, target, margin=0, reduction="mean"):
def _cos(a, b):
cos = a.new(a.size(0))
for i in range(0, a.size(0)):
cos[i] = (a[i] * b[i]).sum() / (
(((a[i] * a[i]).sum() + 1e-12) * ((b[i] * b[i]).sum() + 1e-12)) ** 0.5
)
return cos
output = torch.where(
target == 1,
1 - _cos(input1, input2),
(_cos(input1, input2) - margin).clamp(min=0),
)
if reduction == "mean":
return output.mean()
elif reduction == "sum":
return output.sum()
return output
def tripletmarginloss_reference(
anchor, positive, negative, margin=1.0, p=2, eps=1e-6, swap=False, reduction="mean"
):
d_p = torch.pairwise_distance(anchor, positive, p, eps)
d_n = torch.pairwise_distance(anchor, negative, p, eps)
if swap:
d_s = torch.pairwise_distance(positive, negative, p, eps)
d_n = torch.min(d_n, d_s)
output = torch.clamp(margin + d_p - d_n, min=0.0)
if reduction == "mean":
return output.mean()
elif reduction == "sum":
return output.sum()
return output
def marginrankingloss_reference(input1, input2, target, margin=0, reduction="mean"):
output = (-target * (input1 - input2) + margin).clamp(min=0)
if reduction == "mean":
return output.mean()
elif reduction == "sum":
return output.sum()
return output
# this directly follows Graves et al's paper, in contrast to the production implementation, it does not use log-space
def ctcloss_reference(
log_probs, targets, input_lengths, target_lengths, blank=0, reduction="mean"
):
input_lengths = torch.as_tensor(input_lengths, dtype=torch.long)
target_lengths = torch.as_tensor(target_lengths, dtype=torch.long)
dt = log_probs.dtype
log_probs = log_probs.double() # we need the accuracy as we are not in logspace
targets = targets.long()
cum_target_lengths = target_lengths.cumsum(0)
losses = []
for i in range(log_probs.size(1)):
input_length = input_lengths[i].item()
target_length = target_lengths[i].item()
cum_target_length = cum_target_lengths[i].item()
targets_prime = targets.new_full((2 * target_length + 1,), blank)
if targets.dim() == 2:
targets_prime[1::2] = targets[i, :target_length]
else:
targets_prime[1::2] = targets[
cum_target_length - target_length : cum_target_length
]
probs = log_probs[:input_length, i].exp()
alpha = log_probs.new_zeros((target_length * 2 + 1,))
alpha[0] = probs[0, blank]
alpha[1] = probs[0, targets_prime[1]]
mask_third = targets_prime[:-2] != targets_prime[2:]
for t in range(1, input_length):
alpha_next = alpha.clone()
alpha_next[1:] += alpha[:-1]
alpha_next[2:] += torch.where(mask_third, alpha[:-2], alpha.new_zeros(1))
alpha = probs[t, targets_prime] * alpha_next
losses.append(-alpha[-2:].sum().log()[None])
output = torch.cat(losses, 0)
if reduction == "mean":
return (
output / target_lengths.to(dtype=output.dtype, device=output.device)
).mean()
elif reduction == "sum":
return output.sum()
output = output.to(dt)
return output
def padding1d_circular(input, pad):
r"""input:
[[[0., 1., 2.],
[3., 4., 5.]]]
pad: (1, 2)
output:
[[[2., 0., 1., 2., 0., 1.],
[5., 3., 4., 5., 3., 4.]]]
"""
return torch.cat([input[:, :, -pad[0] :], input, input[:, :, 0 : pad[1]]], dim=2)
def padding2d_circular(input, pad):
r"""input:
[[[[0., 1., 2],
[3., 4., 5.]]]]
pad: (1, 2, 2, 1)
output:
[[[[2., 0., 1., 2., 0., 1.],
[5., 3., 4., 5., 3., 4.],
[2., 0., 1., 2., 0., 1.],
[5., 3., 4., 5., 3., 4.],
[2., 0., 1., 2., 0., 1.]]]]
"""
input = torch.cat([input[:, :, -pad[2] :], input, input[:, :, 0 : pad[3]]], dim=2)
return torch.cat(
[input[:, :, :, -pad[0] :], input, input[:, :, :, 0 : pad[1]]], dim=3
)
def padding3d_circular(input, pad):
r"""input:
[[[[[ 0., 1., 2.],
[ 3., 4., 5.]],
[[ 6., 7., 8.],
[ 9., 10., 11.]]]]]
pad: (1, 2, 2, 1, 1, 2)
output: [[[[[ 8., 6., 7., 8., 6., 7.],
[11., 9., 10., 11., 9., 10.],
[ 8., 6., 7., 8., 6., 7.],
[11., 9., 10., 11., 9., 10.],
[ 8., 6., 7., 8., 6., 7.]],
[[ 2., 0., 1., 2., 0., 1.],
[ 5., 3., 4., 5., 3., 4.],
[ 2., 0., 1., 2., 0., 1.],
[ 5., 3., 4., 5., 3., 4.],
[ 2., 0., 1., 2., 0., 1.]],
[[ 8., 6., 7., 8., 6., 7.],
[11., 9., 10., 11., 9., 10.],
[ 8., 6., 7., 8., 6., 7.],
[11., 9., 10., 11., 9., 10.],
[ 8., 6., 7., 8., 6., 7.]],
[[ 2., 0., 1., 2., 0., 1.],
[ 5., 3., 4., 5., 3., 4.],
[ 2., 0., 1., 2., 0., 1.],
[ 5., 3., 4., 5., 3., 4.],
[ 2., 0., 1., 2., 0., 1.]],
[[ 8., 6., 7., 8., 6., 7.],
[11., 9., 10., 11., 9., 10.],
[ 8., 6., 7., 8., 6., 7.],
[11., 9., 10., 11., 9., 10.],
[ 8., 6., 7., 8., 6., 7.]]]]]
"""
input = torch.cat([input[:, :, -pad[4] :], input, input[:, :, 0 : pad[5]]], dim=2)
input = torch.cat(
[input[:, :, :, -pad[2] :], input, input[:, :, :, 0 : pad[3]]], dim=3
)
return torch.cat(
[input[:, :, :, :, -pad[0] :], input, input[:, :, :, :, 0 : pad[1]]], dim=4
)
loss_reference_fns = {
"KLDivLoss": kldivloss_reference,
"NLLLoss": nllloss_reference,
"NLLLossNd": nlllossNd_reference,
"SmoothL1Loss": smoothl1loss_reference,
"MultiLabelMarginLoss": multilabelmarginloss_reference,
"HingeEmbeddingLoss": hingeembeddingloss_reference,
"SoftMarginLoss": softmarginloss_reference,
"MultiMarginLoss": multimarginloss_reference,
"CosineEmbeddingLoss": cosineembeddingloss_reference,
"TripletMarginLoss": tripletmarginloss_reference,
"MarginRankingLoss": marginrankingloss_reference,
"CTCLoss": ctcloss_reference,
}
criterion_tests = [
dict(
module_name="L1Loss",
input_size=(2, 3, 4),
target_size=(2, 3, 4),
reference_fn=lambda i, t, _: 1.0
/ i.numel()
* sum((a - b).abs().sum() for a, b in zip(i, t)),
),
dict(
module_name="NLLLoss",
input_fn=lambda: torch.rand(15, 10).log(),
target_fn=lambda: torch.Tensor(15).uniform_().mul(10).floor().long(),
reference_fn=lambda i, t, m: nllloss_reference(
i, t, reduction=get_reduction(m)
),
check_sum_reduction=True,
check_bfloat16=TEST_WITH_ROCM,
),
dict(
module_name="NLLLoss",
constructor_args=(None, None, 2),
cpp_constructor_args="torch::nn::NLLLossOptions().weight({}).ignore_index(2)",
input_fn=lambda: torch.rand(15, 10).log(),
target_fn=lambda: torch.Tensor(15).uniform_().mul(10).floor().long(),
reference_fn=lambda i, t, _: nllloss_reference(i, t, ignore_index=2),
desc="ignore_index",
check_bfloat16=TEST_WITH_ROCM,
),
dict(
module_name="NLLLoss",
constructor_args_fn=lambda: (torch.rand(10),),
cpp_constructor_args="torch::nn::NLLLossOptions().weight(torch::rand(10))",
input_fn=lambda: torch.rand(15, 10).add(1e-2).log(),
target_fn=lambda: torch.Tensor(15).uniform_().mul(10).floor().long(),
reference_fn=lambda i, t, m: nllloss_reference(i, t, weight=get_weight(m)),
desc="weights",
check_bfloat16=TEST_WITH_ROCM,
),
dict(
module_name="NLLLoss",
constructor_args_fn=lambda: (torch.rand(10), None, 2),
cpp_constructor_args="torch::nn::NLLLossOptions().weight(torch::rand(10)).ignore_index(2)",
input_fn=lambda: torch.rand(15, 10).add(1e-2).log(),
target_fn=lambda: torch.Tensor(15).uniform_().mul(10).floor().long(),
reference_fn=lambda i, t, m: nllloss_reference(
i, t, weight=get_weight(m), ignore_index=2
),
desc="weights_ignore_index",
check_bfloat16=TEST_WITH_ROCM,
),
dict(
module_name="NLLLoss",
constructor_args_fn=lambda: (torch.rand(10), None, -1),
cpp_constructor_args="torch::nn::NLLLossOptions().weight(torch::rand(10)).ignore_index(-1)",
input_fn=lambda: torch.rand(15, 10).add(1e-2).log(),
target_fn=lambda: torch.Tensor(15).uniform_().mul(10 + 1).floor().long() - 1,
reference_fn=lambda i, t, m: nllloss_reference(
i, t, weight=get_weight(m), ignore_index=-1
),
desc="weights_ignore_index_neg",
check_bfloat16=TEST_WITH_ROCM,
),
dict(
module_name="KLDivLoss",
input_fn=lambda: torch.rand(10, 10).log(),
target_fn=lambda: torch.rand(10, 10),
reference_fn=lambda i, t, m: kldivloss_reference(i, t, get_reduction(m)),
check_sum_reduction=True,
),
dict(
module_name="MSELoss",
input_size=(2, 3, 4, 5),
target_size=(2, 3, 4, 5),
reference_fn=lambda i, t, m: (
(i - t).abs().pow(2).sum()
/ (i.numel() if get_reduction(m) == "mean" else 1)
),
check_sum_reduction=True,
),
dict(
module_name="BCELoss",
input_fn=lambda: torch.rand(15, 10).clamp_(1e-2, 1 - 1e-2),
target_fn=lambda: torch.randn(15, 10).gt(0).double(),
reference_fn=lambda i, t, m: -(t * i.log() + (1 - t) * (1 - i).log()).sum()
/ (i.numel() if get_reduction(m) else 1),
check_gradgrad=False,
check_bfloat16=TEST_WITH_ROCM,
),
dict(
module_name="BCELoss",
constructor_args_fn=lambda: (torch.rand(10),),
cpp_constructor_args="torch::nn::BCELossOptions().weight(torch::rand(10))",
input_fn=lambda: torch.rand(15, 10).clamp_(1e-2, 1 - 1e-2),
target_fn=lambda: torch.randn(15, 10).gt(0).double(),
reference_fn=lambda i, t, m: -(
(t * i.log() + (1 - t) * (1 - i).log()) * get_weight(m)
).sum()
/ (i.numel() if get_reduction(m) else 1),
desc="weights",
check_gradgrad=False,
check_bfloat16=TEST_WITH_ROCM,
),
dict(
module_name="CrossEntropyLoss",
input_size=(15, 10),
target_fn=lambda: torch.Tensor(15).uniform_().mul(10).floor().long(),
),
dict(
module_name="CrossEntropyLoss",
constructor_args_fn=lambda: (torch.rand(10),),
cpp_constructor_args="torch::nn::CrossEntropyLossOptions().weight(torch::rand(10))",
input_size=(15, 10),
target_fn=lambda: torch.Tensor(15).uniform_().mul(10).floor().long(),
desc="weights",
),
dict(
module_name="HingeEmbeddingLoss",
input_size=(10,),
target_fn=lambda: torch.randn(10).gt(0).double().mul_(2).sub(1),
reference_fn=lambda i, t, m: hingeembeddingloss_reference(
i, t, reduction=get_reduction(m)
),
check_sum_reduction=True,
),
dict(
module_name="HingeEmbeddingLoss",
constructor_args=(0.5,),
cpp_constructor_args="torch::nn::HingeEmbeddingLossOptions().margin(0.5)",
input_size=(10,),
target_fn=lambda: torch.randn(10).gt(0).double().mul_(2).sub(1),
reference_fn=lambda i, t, m: hingeembeddingloss_reference(
i, t, margin=0.5, reduction=get_reduction(m)
),
desc="margin",
check_sum_reduction=True,
),
dict(
module_name="MultiLabelMarginLoss",
input_size=(10,),
target_fn=lambda: torch.rand(10).mul(10).floor().long(),
reference_fn=lambda i, t, m: multilabelmarginloss_reference(
i, t, reduction=get_reduction(m)
),
desc="1d",
check_sum_reduction=True,
check_gradgrad=False,
check_bfloat16=TEST_WITH_ROCM,
),
dict(
module_name="MultiLabelMarginLoss",
input_size=(5, 10),
target_fn=lambda: torch.rand(5, 10).mul(10).floor().long(),
reference_fn=lambda i, t, m: multilabelmarginloss_reference(
i, t, reduction=get_reduction(m)
),
check_sum_reduction=True,
check_gradgrad=False,
check_bfloat16=TEST_WITH_ROCM,
),
dict(
module_name="MultiLabelSoftMarginLoss",
input_size=(5, 10),
target_fn=lambda: torch.rand(5, 10).mul(2).floor(),
reference_fn=lambda i, t, m: -(
t * i.sigmoid().log() + (1 - t) * (-i).sigmoid().log()
).sum()
/ i.numel(),
check_gradgrad=False,
),
dict(
module_name="MultiMarginLoss",
input_size=(5, 10),
target_fn=lambda: torch.rand(5).mul(8).floor().long(),
reference_fn=lambda i, t, m: multimarginloss_reference(
i, t, reduction=get_reduction(m)
),
check_sum_reduction=True,
check_gradgrad=False,
),
dict(
module_name="MultiMarginLoss",
input_size=(10,),
target_fn=lambda: torch.rand(1).mul(8).floor().long(),
reference_fn=lambda i, t, m: multimarginloss_reference(
i, t, reduction=get_reduction(m)
),
desc="1d",
check_sum_reduction=True,
check_gradgrad=False,
),
dict(
module_name="MultiMarginLoss",
constructor_args=(2,),
cpp_constructor_args="torch::nn::MultiMarginLossOptions().p(2)",
input_fn=lambda: torch.rand(5, 10).clamp_(1e-2, 1 - 1e-2),
target_fn=lambda: torch.rand(5).mul(8).floor().long(),
reference_fn=lambda i, t, m: multimarginloss_reference(
i, t, p=2, reduction=get_reduction(m)
),
desc="p",
check_sum_reduction=True,
check_gradgrad=False,
),
dict(
module_name="MultiMarginLoss",
constructor_args=(1, 0.5),
cpp_constructor_args="torch::nn::MultiMarginLossOptions().p(1).margin(0.5)",
legacy_constructor_args=(1, None, 0.5),
input_size=(5, 10),
target_fn=lambda: torch.rand(5).mul(8).floor().long(),
reference_fn=lambda i, t, m: multimarginloss_reference(
i, t, margin=0.5, reduction=get_reduction(m)
),
desc="margin",
check_sum_reduction=True,
check_gradgrad=False,
),
dict(
module_name="MultiMarginLoss",
constructor_args=(1, 1.0, torch.rand(10)),
cpp_constructor_args="torch::nn::MultiMarginLossOptions().p(1).margin(1.).weight(torch::rand(10))",
legacy_constructor_args=(1, torch.rand(10)),
input_size=(5, 10),
target_fn=lambda: torch.rand(5).mul(8).floor().long(),
reference_fn=lambda i, t, m: multimarginloss_reference(
i, t, weight=get_weight(m), reduction=get_reduction(m)
),
desc="weights",
check_sum_reduction=True,
check_gradgrad=False,
),
dict(
module_name="SmoothL1Loss",
input_size=(5, 10),
target_size=(5, 10),
check_sum_reduction=True,
reference_fn=lambda i, t, m: smoothl1loss_reference(
i, t, reduction=get_reduction(m)
),
),
dict(
module_name="SoftMarginLoss",
input_size=(5, 5),
target_fn=lambda: torch.randn(5, 5).sign(),
reference_fn=lambda i, t, m: softmarginloss_reference(
i, t, reduction=get_reduction(m)
),
check_sum_reduction=True,
),
dict(
module_name="CosineEmbeddingLoss",
input_fn=lambda: (torch.rand(15, 10), torch.rand(15, 10)),
target_fn=lambda: torch.randn(15).sign(),
reference_fn=lambda i, t, m: cosineembeddingloss_reference(
i[0], i[1], t, reduction=get_reduction(m)
),
check_sum_reduction=True,
),
dict(
module_name="CosineEmbeddingLoss",
constructor_args=(0.7,),
cpp_constructor_args="torch::nn::CosineEmbeddingLossOptions().margin(0.7)",
input_fn=lambda: (torch.rand(15, 10), torch.rand(15, 10)),
target_fn=lambda: torch.randn(15).sign(),
reference_fn=lambda i, t, m: cosineembeddingloss_reference(
i[0], i[1], t, margin=0.7, reduction=get_reduction(m)
),
desc="margin",
check_sum_reduction=True,
),
dict(
module_name="MarginRankingLoss",
input_fn=lambda: (torch.randn(50).mul(10), torch.randn(50).mul(10)),
target_fn=lambda: torch.randn(50).sign(),
reference_fn=lambda i, t, m: marginrankingloss_reference(
i[0], i[1], t, reduction=get_reduction(m)
),
check_sum_reduction=True,
),
dict(
module_name="MarginRankingLoss",
constructor_args=(0.5,),
cpp_constructor_args="torch::nn::MarginRankingLossOptions().margin(0.5)",
input_fn=lambda: (torch.randn(50).mul(10), torch.randn(50).mul(10)),
target_fn=lambda: torch.randn(50).sign(),
reference_fn=lambda i, t, m: marginrankingloss_reference(
i[0], i[1], t, margin=0.5, reduction=get_reduction(m)
),
desc="margin",
check_sum_reduction=True,
),
]
new_criterion_tests = [
dict(
module_name="BCEWithLogitsLoss",
input_fn=lambda: torch.rand(15, 10).clamp_(1e-2, 1 - 1e-2),
target_fn=lambda: torch.randn(15, 10).gt(0).double(),
),
dict(
module_name="BCEWithLogitsLoss",
constructor_args=(torch.rand(10),),
cpp_constructor_args="torch::nn::BCEWithLogitsLossOptions().weight(torch::rand(10))",
input_fn=lambda: torch.rand(15, 10).clamp_(1e-2, 1 - 1e-2),
target_fn=lambda: torch.randn(15, 10).gt(0).double(),
desc="weights",
),
dict(
module_name="BCEWithLogitsLoss",
constructor_args=(torch.rand(()),),
cpp_constructor_args="torch::nn::BCEWithLogitsLossOptions().weight(torch::rand({}))",
input_fn=lambda: torch.rand(()).clamp_(1e-2, 1 - 1e-2),
target_fn=lambda: torch.randn(()).gt(0).double(),
desc="scalar_weights",
),
dict(
module_name="NLLLoss",
input_size=(2, 3, 5, 5),
target_fn=lambda: torch.rand(2, 5, 5).mul(3).floor().long(),
reference_fn=lambda i, t, m: loss_reference_fns["NLLLossNd"](
i, t, reduction=get_reduction(m)
),
check_sum_reduction=True,
desc="2d",
check_bfloat16=TEST_WITH_ROCM,
),
dict(
module_name="NLLLoss",
constructor_args_fn=lambda: (torch.rand(3),),
cpp_constructor_args="torch::nn::NLLLossOptions().weight(torch::rand(3))",
input_size=(2, 3, 5, 5),
target=torch.rand(2, 5, 5).mul(3).floor().long(),
reference_fn=lambda i, t, m: loss_reference_fns["NLLLossNd"](
i, t, weight=get_weight(m)
),
desc="2d_weights",
check_bfloat16=TEST_WITH_ROCM,
),
dict(
module_name="NLLLoss",
constructor_args=(None, None, 1),
cpp_constructor_args="torch::nn::NLLLossOptions().weight({}).ignore_index(1)",
input_size=(2, 3, 5, 5),
target_fn=lambda: torch.rand(2, 5, 5).mul(3).floor().long(),
reference_fn=lambda i, t, m: loss_reference_fns["NLLLossNd"](
i, t, ignore_index=1
),
desc="2d_ignore_index",
check_bfloat16=TEST_WITH_ROCM,
),
dict(
module_name="NLLLoss",
input_size=(2, 3, 5, 5, 2, 2),
target_fn=lambda: torch.rand(2, 5, 5, 2, 2).mul(3).floor().long(),
reference_fn=lambda i, t, m: loss_reference_fns["NLLLossNd"](
i, t, reduction=get_reduction(m)
),
check_sum_reduction=True,
desc="higher_dim",
check_bfloat16=TEST_WITH_ROCM,
),
dict(
module_name="NLLLoss",
input_size=(2, 3, 5),
target_fn=lambda: torch.rand(2, 5).mul(3).floor().long(),
reference_fn=lambda i, t, m: loss_reference_fns["NLLLossNd"](
i, t, reduction=get_reduction(m)
),
check_sum_reduction=True,
desc="dim_is_3",
check_bfloat16=TEST_WITH_ROCM,
),
dict(
module_name="PoissonNLLLoss", # Default is log_input=True, full=False
input_size=(2, 3, 4, 5),
target_fn=lambda: torch.randn(2, 3, 4, 5).floor_().abs_(),
reference_fn=lambda i, t, _: (i.exp() - t.mul(i)).mean(),
desc="no_full_loss",
),
dict(
module_name="PoissonNLLLoss",
constructor_args=(False, False), # log_input=False, full=False
cpp_constructor_args="torch::nn::PoissonNLLLossOptions().log_input(false).full(false)",
input_fn=lambda: torch.randn(2, 3, 4, 5).abs_().add_(0.001),
target_fn=lambda: torch.randn(2, 3, 4, 5).floor_().abs_(),
reference_fn=lambda i, t, _: (i - t.mul((i + 1e-8).log())).mean(),
desc="no_full_loss_no_log_input",
),
dict(
module_name="PoissonNLLLoss",
constructor_args=(True, True), # log_input=True, full=True
cpp_constructor_args="torch::nn::PoissonNLLLossOptions().log_input(true).full(true)",
input_size=(2, 3, 4, 5),
target_fn=lambda: torch.randn(2, 3, 4, 5).floor_().abs_(),
reference_fn=lambda i, t, _: (
i.exp()
- t.mul(i)
+ (t.mul(t.log()) - t + 0.5 * (2.0 * pi * t).log()).masked_fill(t <= 1, 0)
).mean(),
desc="full_loss",
),
dict(
module_name="PoissonNLLLoss",
constructor_args=(False, True), # log_input=False, full=True
cpp_constructor_args="torch::nn::PoissonNLLLossOptions().log_input(false).full(true)",
input_fn=lambda: torch.randn(2, 3, 4, 5).abs_().add_(0.001),
target_fn=lambda: torch.randn(2, 3, 4, 5).floor_().abs_(),
reference_fn=lambda i, t, _: (
i
- t.mul((i + 1e-8).log())
+ (t.mul(t.log()) - t + 0.5 * (2.0 * pi * t).log()).masked_fill(t <= 1, 0)
).mean(),
desc="full_loss_no_log_input",
),
dict(
module_name="L1Loss",
input_size=(),
target_size=(),
reference_fn=lambda i, t, _: 1.0 / i.numel() * (i - t).abs().sum(),
desc="scalar",
),
dict(
module_name="KLDivLoss",
input_fn=lambda: torch.rand(()).log(),
target_fn=lambda: torch.rand(()),
reference_fn=lambda i, t, m: kldivloss_reference(i, t, get_reduction(m)),
check_sum_reduction=True,
desc="scalar",
),
dict(
module_name="MSELoss",
input_size=(),
target_size=(),
reference_fn=lambda i, t, m: (
(i - t).abs().pow(2).sum()
/ (i.numel() if get_reduction(m) == "mean" else 1)
),
check_sum_reduction=True,
desc="scalar",
check_bfloat16=TEST_WITH_ROCM,
),
dict(
module_name="MSELoss",
input_fn=lambda: torch.ones(5, 68, 64, 64, dtype=torch.float) / 10,
target_fn=lambda: torch.zeros(5, 68, 64, 64, dtype=torch.float),
reference_fn=lambda i, t, m: (
(i - t).abs().pow(2).sum()
/ (i.numel() if get_reduction(m) == "mean" else 1)
),
check_forward_only=True,
desc="prec",
check_bfloat16=TEST_WITH_ROCM,
),
dict(
module_name="BCELoss",
constructor_args_fn=lambda: (torch.rand(()),),
cpp_constructor_args="torch::nn::BCELossOptions().weight(torch::rand({}))",
input_fn=lambda: torch.rand(()).clamp_(1e-2, 1 - 1e-2),
target_fn=lambda: torch.rand(()).gt(0).double(),
reference_fn=lambda i, t, m: -(
(t * i.log() + (1 - t) * (1 - i).log()) * get_weight(m)
).sum()
/ (i.numel() if get_reduction(m) == "mean" else 1),
desc="scalar_weights",
check_gradgrad=False,
check_bfloat16=TEST_WITH_ROCM,
),
dict(
module_name="HingeEmbeddingLoss",
constructor_args=(0.5,),
cpp_constructor_args="torch::nn::HingeEmbeddingLossOptions().margin(0.5)",
input_size=(),
target_fn=lambda: torch.randn(()).gt(0).double().mul_(2).sub(1),
desc="scalar_margin",
check_sum_reduction=True,
),
dict(
module_name="SmoothL1Loss",
input_size=(),
target_size=(),
check_sum_reduction=True,
reference_fn=lambda i, t, m: smoothl1loss_reference(
i, t, reduction=get_reduction(m)
),
desc="scalar",
),
dict(
module_name="MultiLabelSoftMarginLoss",
constructor_args=(torch.rand(10),),
cpp_constructor_args="torch::nn::MultiLabelSoftMarginLossOptions().weight(torch::rand(10))",
input_fn=lambda: torch.randn(5, 10),
target_fn=lambda: torch.rand(5, 10).mul(2).floor(),
reference_fn=lambda i, t, m: -(
(t * i.sigmoid().log() + (1 - t) * (-i).sigmoid().log()) * get_weight(m)
).sum()
/ (
i.numel()
if get_reduction(m) == "mean"
else i.size(1)
if get_reduction(m) == "sum"
else 1
),
desc="weights",
check_sum_reduction=True,
check_gradgrad=False,
),
dict(
module_name="CTCLoss",
constructor_args=(14,), # blank=14
extra_args=([50, 50, 50], [30, 25, 20]), # input_lengths, target_lengths
input_fn=lambda: torch.randn(50, 3, 15).log_softmax(2),
target_fn=lambda: torch.randint(0, 14, (3, 30), dtype=torch.long),
reference_fn=lambda i, t, il, tl, m: ctcloss_reference(
i, t, il, tl, blank=14, reduction=get_reduction(m)
),
desc="lengths_intlists",
check_sum_reduction=True,
check_gradgrad=False,
check_half=False,
# `CTCLoss` in C++ frontend doesn't accept integer list for `input_lengths` or `target_lengths`
test_cpp_api_parity=False,
),
dict(
module_name="CTCLoss",
constructor_args=(14,), # blank=14
cpp_constructor_args="torch::nn::CTCLossOptions().blank(14)",
extra_args=(
torch.tensor([50, 50, 50]),
torch.tensor([30, 25, 20]),
), # input_lengths, target_lengths
input_fn=lambda: torch.randn(50, 3, 15).log_softmax(2),
target_fn=lambda: torch.randint(0, 14, (3, 30), dtype=torch.long),
reference_fn=lambda i, t, il, tl, m: ctcloss_reference(
i, t, il, tl, blank=14, reduction=get_reduction(m)
),
desc="lengths_tensors",
check_sum_reduction=True,
check_gradgrad=False,
check_half=False,
),
# Test is flaky
# See https://github.com/pytorch/pytorch/issues/29380.
# dict(
# module_name='CTCLoss',
# desc='1d_target',
# constructor_args=(14,), # blank=14
# extra_args=([50, 50, 50], [30, 25, 20]), # input_lengths, target_lengths
# input_fn=lambda: torch.randn(50, 3, 15).log_softmax(2),
# target_fn=lambda: torch.randint(0, 14, (3, 30), dtype=torch.long),
# reference_fn=lambda i, t, il, tl, m:
# ctcloss_reference(i, t, il, tl, blank=14, reduction=get_reduction(m)),
# check_sum_reduction=True,
# check_gradgrad=False,
# check_half=False,
# ),
dict(
module_name="CTCLoss",
desc="2d_int_target_lengths_intlists",
constructor_args=(0,), # blank=0
extra_args=([50, 50, 50], [30, 25, 20]), # input_lengths, target_lengths
input_fn=lambda: torch.randn(50, 3, 15).log_softmax(2),
target_fn=lambda: torch.randint(1, 15, (3, 30), dtype=torch.int),
reference_fn=lambda i, t, il, tl, m: ctcloss_reference(
i, t, il, tl, blank=0, reduction=get_reduction(m)
),
check_sum_reduction=True,
check_gradgrad=False,
check_half=False,
convert_target=False,
# `CTCLoss` in C++ frontend doesn't accept integer list for `input_lengths` or `target_lengths`
test_cpp_api_parity=False,
),
dict(
module_name="CTCLoss",
desc="2d_int_target_lengths_tensors",
constructor_args=(0,), # blank=0
cpp_constructor_args="torch::nn::CTCLossOptions().blank(0)",
extra_args=(
torch.tensor([50, 50, 50]),
torch.tensor([30, 25, 20]),
), # input_lengths, target_lengths
input_fn=lambda: torch.randn(50, 3, 15).log_softmax(2),
target_fn=lambda: torch.randint(1, 15, (3, 30), dtype=torch.int),
reference_fn=lambda i, t, il, tl, m: ctcloss_reference(
i, t, il, tl, blank=0, reduction=get_reduction(m)
),
check_sum_reduction=True,
check_gradgrad=False,
check_half=False,
convert_target=False,
),
dict(
module_name="CTCLoss",
desc="2d_lengths_tensors",
constructor_args=(0,), # blank=0
cpp_constructor_args="torch::nn::CTCLossOptions().blank(0)",
extra_args=(
torch.tensor([50, 50, 50]),
torch.tensor([30, 25, 20]),
), # input_lengths, target_lengths
input_fn=lambda: torch.randn(50, 3, 15).log_softmax(2),
target_fn=lambda: torch.randint(1, 15, (3, 30), dtype=torch.int),
reference_fn=lambda i, t, il, tl, m: ctcloss_reference(
i, t, il, tl, blank=0, reduction=get_reduction(m)
),
check_sum_reduction=True,
check_gradgrad=False,
check_half=False,
convert_target=False,
),
]
class NNTestCase(TestCase):
def _jacobian(self, input, num_out):
if isinstance(input, tuple):
return tuple(self._jacobian(elem, num_out) for elem in input)
elif isinstance(input, list):
return [self._jacobian(elem, num_out) for elem in input]
else:
return torch.zeros(input.nelement(), num_out)
def _flatten_tensors(self, x):
if isinstance(x, torch.Tensor):
if x.is_sparse:
return x.to_dense().view(-1)
else:
return x.view(-1)
else:
return tuple(self._flatten_tensors(a) for a in x)
def _zero_grad_input(self, input):
if isinstance(input, torch.Tensor):
if input.requires_grad and input.grad is not None:
input.grad.zero_()
input.grad.detach_()
else:
for i in input:
self._zero_grad_input(i)
def _analytical_jacobian(
self, module, input, jacobian_input=True, jacobian_parameters=True
):
output = self._forward(module, input)
output_size = output.nelement()
if jacobian_input:
jacobian_inp = self._jacobian(input, output_size)
flat_jacobian_input = list(iter_tensors(jacobian_inp))
if jacobian_parameters:
num_param = sum(p.numel() for p in self._get_parameters(module)[0])
jacobian_param = torch.zeros(num_param, output_size)
for i in range(output_size):
param, d_param = self._get_parameters(module)
# make non grad zeros
d_param = [
torch.zeros_like(p) if d is None else d
for (p, d) in zip(param, d_param)
]
d_out = torch.zeros_like(output)
flat_d_out = d_out.view(-1)
flat_d_out[i] = 1
if jacobian_parameters:
self._zero_grad_parameters(module)
# Tensors will accumulate gradient from multiple steps
if jacobian_input:
self._zero_grad_input(input)
d_input = self._backward(module, input, output, d_out)
if jacobian_input:
for jacobian_x, d_x in zip(flat_jacobian_input, iter_tensors(d_input)):
jacobian_x[:, i] = d_x.contiguous().view(-1)
if jacobian_parameters:
jacobian_param[:, i] = torch.cat(self._flatten_tensors(d_param), 0)
res = tuple()
if jacobian_input:
res += (jacobian_inp,)
if jacobian_parameters:
res += (jacobian_param,)
return res
def _numerical_jacobian(
self, module, input, jacobian_input=True, jacobian_parameters=True
):
def fw(input):
return self._forward(module, input).detach()
res = tuple()
if jacobian_input:
res += (get_numerical_jacobian(fw, input, eps=1e-6),)
if jacobian_parameters:
param, _ = self._get_parameters(module)
res += (
torch.cat(
[get_numerical_jacobian(fw, input, p, eps=1e-6) for p in param], 0
),
)
return res
def check_jacobian(self, module, input, jacobian_input=True):
jacobian_parameters = bool(self._get_parameters(module)[0])
analytical = self._analytical_jacobian(
module, input, jacobian_input, jacobian_parameters
)
numerical = self._numerical_jacobian(
module, input, jacobian_input, jacobian_parameters
)
analytical_t = list(iter_tensors(analytical))
numerical_t = list(iter_tensors(numerical))
# TODO: compare structure
if input.numel() != 0:
self.assertLessEqual(
max(
a.add(n, alpha=-1).abs().max()
for a, n in zip(analytical_t, numerical_t)
),
PRECISION,
)
def check_criterion_jacobian(self, criterion, input, target):
eps = 1e-6
self._forward_criterion(criterion, input, target)
analytical_d_x = self._backward_criterion(criterion, input, target)
numerical_d_x = deepcopy(analytical_d_x)
input_t = iter_tensors(input)
numerical_t = iter_tensors(numerical_d_x)
for x, d_x in zip(input_t, numerical_t):
x = x.view(-1).data
d_x = d_x.view(-1).data
for i in range(x.nelement()):
original = x[i].item()
x[i] = original + eps
fx1 = self._forward_criterion(criterion, input, target)
x[i] = original - eps
fx2 = self._forward_criterion(criterion, input, target)
deriv = (fx1 - fx2) / (2.0 * eps)
d_x[i] = float(deriv)
x[i] = original
# TODO: check structure
analytical_t = list(iter_tensors(analytical_d_x))
numerical_t = list(iter_tensors(numerical_d_x))
self.assertLessEqual(
max(
a.add(n, alpha=-1).abs().max()
for a, n in zip(analytical_t, numerical_t)
),
PRECISION,
)
class TestBase(object):
_required_arg_names = {"constructor_args", "input", "extra_args"}
def __init__(
self, constructor, desc="", reference_fn=None, fullname=None, **kwargs
):
self.desc = desc
self.fullname = fullname
self.constructor = constructor
self.reference_fn = reference_fn
for name in self._required_arg_names:
if (
name not in kwargs
and name + "_fn" not in kwargs
and name + "_size" not in kwargs
):
if name in {"constructor_args", "extra_args"}:
kwargs[name] = tuple()
else:
raise ValueError(
"{}: Specify {} by a value, a function to generate it, or it's size!".format(
self.get_name(), name
)
)
self._extra_kwargs = kwargs
self._arg_cache = {}
def get_name(self):
if self.fullname is not None:
return "test_" + self.fullname
test_name = "test_" + self.constructor.__name__
if self.desc:
test_name += "_" + self.desc
return test_name
def _unpack(self, value):
if isinstance(value, torch.Tensor):
return value
elif is_iterable(value):
return type(value)(self._unpack(v) for v in value)
else:
return value
@property
def constructor_args(self):
return self._get_arg("constructor_args", True)
@property
def extra_args(self):
return self._get_arg("extra_args", True)
def _get_arg(self, name, unpack):
assert name in self._required_arg_names
if name not in self._arg_cache:
fn_name = name + "_fn"
size_name = name + "_size"
if name in self._extra_kwargs:
self._arg_cache[name] = self._extra_kwargs[name]
elif fn_name in self._extra_kwargs:
self._arg_cache[name] = self._extra_kwargs[fn_name]()
else:
assert (
size_name in self._extra_kwargs
), "Missing `{}`, `{}` or `{}` for {}".format(
name, size_name, fn_name, self.get_name()
)
def map_tensor_sizes(sizes):
if isinstance(sizes, list):
return [map_tensor_sizes(s) for s in sizes]
elif isinstance(sizes, torch.Tensor):
return sizes.double()
else:
return torch.randn(sizes)
self._arg_cache[name] = map_tensor_sizes(self._extra_kwargs[size_name])
return self._unpack(self._arg_cache[name]) if unpack else self._arg_cache[name]
def _get_input(self, unpack=True):
return self._get_arg("input", unpack)
def __call__(self, test_case):
raise NotImplementedError
class ModuleTest(TestBase):
def __init__(self, *args, **kwargs):
super(ModuleTest, self).__init__(*args, **kwargs)
self.jacobian_input = kwargs.get("jacobian_input", True)
self.should_test_cuda = kwargs.get("test_cuda", True)
self.should_test_pickle = kwargs.get("pickle", True)
self.check_gradgrad = kwargs.get("check_gradgrad", True)
self.FIXME_no_cuda_gradgrad_comparison = kwargs.get(
"FIXME_no_cuda_gradgrad_comparison", False
)
self.precision = kwargs.get("precision", 2e-4)
self.check_forward_only = kwargs.get("check_forward_only", False)
def __call__(self, test_case):
module = self.constructor(*self.constructor_args).to("xpu")
input = self._get_input()
if self.reference_fn is not None:
out = test_case._forward(module, input)
ref_input = deepcopy(input)
ref_module = deepcopy(module)
expected_out = self.reference_fn(
ref_input, test_case._get_parameters(module)[0], ref_module
)
test_case.assertEqual(out, expected_out)
unsupported_backward_modules = [
"Conv1d",
"Conv2d",
"Conv3d",
"ConvTranspose1d",
"ConvTranspose2d",
"ConvTranspose3d",
]
if (
module._get_name() in unsupported_backward_modules
and input.dtype == torch.float64
):
return
if self.check_forward_only:
return
self.test_noncontig(test_case, module, input)
if self.should_test_pickle:
# TODO: do this with in-memory files as soon as torch.save will support it
with TemporaryFile() as f:
test_case._forward(module, input)
torch.save(module, f)
f.seek(0)
module_copy = torch.load(f)
test_case.assertEqual(
test_case._forward(module, input),
test_case._forward(module_copy, input),
)
self._do_test(test_case, module, input)
def noncontiguize(self, obj):
if isinstance(obj, list):
return [self.noncontiguize(o) for o in obj]
tensor = obj
ndim = tensor.dim()
# Always making only the last dimension noncontiguous is easy to hide
# bugs because .view(-1) will still work. So try to find a dim with size
# > 1 and make that non-contiguous, i.e., stack + select on the
# dimension directly after that.
dim = ndim
for d in range(ndim):
if tensor.size(d) > 1:
dim = d + 1
break
noncontig = (
torch.stack([torch.empty_like(tensor), tensor], dim).select(dim, 1).detach()
)
assert (
noncontig.numel() == 1
or noncontig.numel() == 0
or not noncontig.is_contiguous()
)
noncontig.requires_grad = tensor.requires_grad
return noncontig
def test_noncontig(self, test_case, module, input):
# check no scalars, can't make non-contig
if isinstance(input, torch.Tensor) and input.dim() == 0:
return
if any(i.dim() == 0 for i in input if isinstance(i, torch.Tensor)):
return
test_case._zero_grad_parameters(module)
test_case._zero_grad_input(input)
with freeze_rng_state():
output = test_case._forward(module, input)
grad_output = output.new(output.shape).normal_()
output = output.clone()
d_input = deepcopy(test_case._backward(module, input, output, grad_output))
d_param = deepcopy(test_case._get_parameters(module)[1])
nc_input = self.noncontiguize(input)
nc_grad_output = self.noncontiguize(grad_output)
for contig_i, contig_g in product((True, False), repeat=2):
i = input if contig_i else nc_input
# Some ops, e.g., nn.Flatten, return gradient that shares
# storage with the grad_output. Hence we copy here.
go = deepcopy(grad_output if contig_g else nc_grad_output)
test_case._zero_grad_parameters(module)
test_case._zero_grad_input(i)
with freeze_rng_state():
out = test_case._forward(module, i)
grad = test_case._backward(module, i, out, go)
test_case.assertEqual(out, output)
test_case.assertEqual(grad, d_input, 1e-4)
test_case.assertEqual(test_case._get_parameters(module)[1], d_param)
def test_cuda(self, test_case):
if not TEST_CUDA or not self.should_test_cuda:
raise unittest.SkipTest("Excluded from CUDA tests")
try:
cpu_input = self._get_input()
type_map = {"torch.DoubleTensor": torch.cuda.FloatTensor}
gpu_input = to_gpu(cpu_input, type_map=type_map)
cpu_module = self.constructor(*self.constructor_args)
gpu_module = self.constructor(*self.constructor_args).float().cuda()
cpu_param = test_case._get_parameters(cpu_module)
gpu_param = test_case._get_parameters(gpu_module)
for cpu_p, gpu_p in zip(cpu_param[0], gpu_param[0]):
gpu_p.data.copy_(cpu_p)
test_case._zero_grad_input(cpu_input)
test_case._zero_grad_input(gpu_input)
test_case._zero_grad_parameters(cpu_module)
test_case._zero_grad_parameters(gpu_module)
cpu_output = test_case._forward(cpu_module, cpu_input)
gpu_output = test_case._forward(gpu_module, gpu_input)
test_case.assertEqual(cpu_output, gpu_output, self.precision)
# Run backwards on CPU and GPU and compare results
for _ in range(5):
cpu_gradOutput = cpu_output.clone().normal_()
gpu_gradOutput = cpu_gradOutput.type("torch.cuda.FloatTensor")
cpu_gradInput = test_case._backward(
cpu_module, cpu_input, cpu_output, cpu_gradOutput
)
gpu_gradInput = test_case._backward(
gpu_module, gpu_input, gpu_output, gpu_gradOutput
)
test_case.assertEqual(cpu_gradInput, gpu_gradInput, self.precision)
for cpu_d_p, gpu_d_p in zip(cpu_param[1], gpu_param[1]):
test_case.assertEqual(cpu_d_p, gpu_d_p, self.precision)
# Run double-backwards on CPU and GPU and compare results
if self.check_gradgrad and not self.FIXME_no_cuda_gradgrad_comparison:
cpu_output = cpu_module(cpu_input)
gpu_output = gpu_module(gpu_input)
cpu_gradOutput = torch.randn_like(cpu_output, requires_grad=True)
gpu_gradOutput = cpu_gradOutput.type_as(gpu_output).detach()
gpu_gradOutput.requires_grad = True
cpu_gradInputs = torch.autograd.grad(
cpu_output,
(cpu_input,) + tuple(cpu_module.parameters()),
cpu_gradOutput,
create_graph=True,
)
gpu_gradInputs = torch.autograd.grad(
gpu_output,
(gpu_input,) + tuple(gpu_module.parameters()),
gpu_gradOutput,
create_graph=True,
)
for cpu_d_i, gpu_d_i in zip(cpu_gradInputs, gpu_gradInputs):
test_case.assertEqual(cpu_d_i, gpu_d_i, self.precision)
# We mix output into the second backwards computation so that
# torch.autograd.grad doesn't complain that some inputs
# are unreachable (which can happen if you differentiate
# only on the gradient.
cpu_gg = torch.autograd.grad(
cpu_output.sum() + sum(map(lambda x: x.sum(), cpu_gradInputs)),
(cpu_input, cpu_gradOutput) + tuple(cpu_module.parameters()),
retain_graph=True,
)
gpu_gg = torch.autograd.grad(
gpu_output.sum() + sum(map(lambda x: x.sum(), gpu_gradInputs)),
(gpu_input, gpu_gradOutput) + tuple(gpu_module.parameters()),
retain_graph=True,
)
test_case.assertEqual(cpu_gradInput, gpu_gradInput, self.precision)
for cpu_d_p, gpu_d_p in zip(cpu_gg, gpu_gg):
test_case.assertEqual(cpu_d_p, gpu_d_p, self.precision)
self.test_noncontig(test_case, gpu_module, gpu_input)
except NotImplementedError:
pass
# TODO: remove this after CUDA scatter_ is implemented
except AttributeError as e:
if (
len(e.args) == 1
and "'FloatTensor' object has no attribute 'scatter_'" in e.args[0]
):
pass
else:
raise
class CriterionTest(TestBase):
_required_arg_names = TestBase._required_arg_names.union({"target"})
def __init__(self, *args, **kwargs):
super(CriterionTest, self).__init__(*args, **kwargs)
self.should_test_cuda = kwargs.get("test_cuda", True)
self.check_forward_only = kwargs.get("check_forward_only", True)
def _get_target(self):
return self._get_arg("target", True)
def __call__(self, test_case):
module = self.constructor(*self.constructor_args)
input = self._get_input()
# Check that these methods don't raise errors
module.__repr__()
str(module)
target = self._get_target()
if self.reference_fn is not None:
out = test_case._forward_criterion(
module, input, target, extra_args=self.extra_args
)
ref_args = (deepcopy(input), deepcopy(target)) + self.extra_args + (module,)
expected_out = self.reference_fn(*ref_args)
test_case.assertEqual(out, expected_out)
if self.check_forward_only:
return
test_case.check_criterion_jacobian(module, input, target)
self._do_extra_tests(test_case, module, input, target)
def test_cuda(self, test_case):
if not TEST_CUDA or not self.should_test_cuda:
raise unittest.SkipTest("Excluded from CUDA tests")
try:
cpu_input = self._get_input()
type_map = {
"torch.DoubleTensor": torch.cuda.FloatTensor,
}
gpu_input = to_gpu(cpu_input, type_map=type_map)
cpu_target = self._get_target()
gpu_target = to_gpu(cpu_target, type_map=type_map)
cpu_module = self.constructor(*self.constructor_args)
gpu_module = self.constructor(*self.constructor_args).float().cuda()
cpu_output = test_case._forward_criterion(cpu_module, cpu_input, cpu_target)
gpu_output = test_case._forward_criterion(gpu_module, gpu_input, gpu_target)
test_case.assertEqual(cpu_output, gpu_output, 4e-4)
gradOutput = torch.randn(())
cpu_gradInput = test_case._backward_criterion(
cpu_module, cpu_input, cpu_target, gradOutput
)
gpu_gradInput = test_case._backward_criterion(
gpu_module, gpu_input, gpu_target, gradOutput
)
test_case.assertEqual(cpu_gradInput, gpu_gradInput, 4e-4)
except NotImplementedError:
pass
def _do_extra_tests(self, test_case, module, input, target):
pass
class InputVariableMixin(object):
def _get_input(self):
input = TestBase._get_input(self, False)
def map_variables(i):
if isinstance(i, torch.Tensor):
if i.is_floating_point():
i.requires_grad = True
return i
else:
return type(i)(map_variables(elem) for elem in i)
return map_variables(input)
class NewModuleTest(InputVariableMixin, ModuleTest):
def __init__(self, *args, **kwargs):
super(NewModuleTest, self).__init__(*args, **kwargs)
self.cudnn = kwargs.get("cudnn", False)
self.check_inplace = kwargs.get("check_inplace", False)
self.check_gradgrad = kwargs.get("check_gradgrad", True)
self.skip_double = kwargs.get("skip_double", False)
def _do_test(self, test_case, module, input):
test_case.check_jacobian(module, input, self.jacobian_input)
if self.check_gradgrad:
# could probably unify check_jacobian above with this.
params = tuple(x for x in module.parameters())
_assertGradAndGradgradChecks(
test_case,
lambda x, *args, **kw: test_case._forward(module, x),
(input,) + params,
)
# check if module can be printed
module.__repr__()
if self.check_inplace:
# check if the inplace variant of the module gives the same result
# as the out-of-place
module_ip = self.constructor(*self.constructor_args, inplace=True)
input_version = input._version
with freeze_rng_state():
output = module(input)
test_case.assertEqual(input._version, input_version)
input_ip = deepcopy(input)
if input.device.type == "xpu":
input_ip.requires_grad = True
input_ip_clone = input_ip.clone()
with freeze_rng_state():
output_ip = module_ip(input_ip_clone)
if input.device == torch.device("cpu"):
test_case.assertNotEqual(input_ip_clone._version, input_version)
test_case.assertEqual(output, output_ip)
grad = output.data.clone().normal_()
input.grad.data.zero_()
output.backward(grad)
output_ip.backward(grad)
test_case.assertEqual(input.grad, input_ip.grad)
if isinstance(input, torch.LongTensor) and TEST_CUDA:
# check that cuda() moves module parameters to correct GPU device,
# and that float() casts parameters correctly
input = input.cuda()
module.float().cuda()
module(input)
for p in module.parameters():
test_case.assertIsInstance(p, torch.cuda.FloatTensor)
test_case.assertEqual(p.get_device(), 0)
if torch.cuda.device_count() > 1:
input = input.cuda(1)
module.cuda(1)
with torch.cuda.device(1):
module(input)
for p in module.parameters():
test_case.assertIsInstance(p, torch.cuda.FloatTensor)
test_case.assertEqual(p.get_device(), 1)
else:
# check that float()/double() casters work correctly
# to float
if input.device == torch.device("cpu"):
if not isinstance(input, torch.LongTensor):
input = input.float()
module.float()
module(input)
for p in module.parameters():
test_case.assertIsInstance(p, torch.FloatTensor)
# and back to double
if not isinstance(input, torch.LongTensor):
input = input.double()
module.double()
module(input)
for p in module.parameters():
test_case.assertIsInstance(p, torch.DoubleTensor)
# else: # for xpu
# print()
# if not isinstance(input, torch.xpu.LongTensor):
# input = input.float()
# module.float()
# module(input)
# for p in module.parameters():
# test_case.assertIsInstance(p, torch.xpu.FloatTensor)
# # and back to double
# if not isinstance(input, torch.xpu.LongTensor):
# input = input.double()
# module.double()
# module(input)
# for p in module.parameters():
# test_case.assertIsInstance(p, torch.xpu.DoubleTensor)
if TEST_CUDA and self.should_test_cuda:
# check that cuda() moves module parameters to correct GPU device,
# and that float() casts parameters correctly
# to GPU0
input = input.float().cuda()
module.float().cuda()
module(input)
for p in module.parameters():
test_case.assertIsInstance(p, torch.cuda.FloatTensor)
test_case.assertEqual(p.get_device(), 0)
# to CPU
input = input.cpu()
module.cpu()
module(input)
for p in module.parameters():
test_case.assertIsInstance(p, torch.FloatTensor)
# back to GPU0
input = input.cuda()
module.cuda()
module(input)
for p in module.parameters():
test_case.assertIsInstance(p, torch.cuda.FloatTensor)
test_case.assertEqual(p.get_device(), 0)
# test that forwards of module runs correctly without cuDNN
if self.cudnn:
with torch.backends.cudnn.flags(enabled=False):
module(input)
for p in module.parameters():
test_case.assertIsInstance(p, torch.cuda.FloatTensor)
test_case.assertEqual(p.get_device(), 0)
if torch.cuda.device_count() >= 2:
# test cross-GPU transfer works
# to GPU1
input = input.cuda(1)
module.cuda(1)
with torch.cuda.device(1):
module(input)
for p in module.parameters():
test_case.assertIsInstance(p, torch.cuda.FloatTensor)
test_case.assertEqual(p.get_device(), 1)
if not self.skip_double:
# test double()
input = input.double().cuda()
module.double().cuda()
module(input)
for p in module.parameters():
test_case.assertIsInstance(p, torch.cuda.DoubleTensor)
test_case.assertEqual(p.get_device(), 0)
# test half()
input = input.half().cuda()
module.half().cuda()
module(input)
for p in module.parameters():
test_case.assertIsInstance(p, torch.cuda.HalfTensor)
test_case.assertEqual(p.get_device(), 0)
def _get_target(self):
return self._get_arg("target", False)
@property
def constructor_args(self):
return self._get_arg("constructor_args", False)
class NewCriterionTest(InputVariableMixin, CriterionTest):
# TODO: check that criterions don't ignore grad_output
def __init__(self, *args, **kwargs):
super(NewCriterionTest, self).__init__(*args, **kwargs)
self.check_gradgrad = kwargs.get("check_gradgrad", True)
self.check_half = kwargs.get("check_half", True)
self.check_bfloat16 = kwargs.get("check_bfloat16", False)
self.convert_target = kwargs.get("convert_target", True)
def _do_extra_tests(self, test_case, module, input, target):
if not self.check_gradgrad:
return
test_case.assertFalse(target.requires_grad)
params = tuple(x for x in module.parameters())
if not isinstance(input, tuple):
inputs = (input,) + params
def apply_fn(input, *params):
return module(input, target)
else:
inputs = input + params
def apply_fn(input1, input2, *params):
return module(input1, input2, target)
# TODO: we don't pass `target` as part of inputs because we don't
# currently compute the gradient w.r.t. target for loss functions.
gradcheck(apply_fn, inputs)
gradgradcheck(apply_fn, inputs)
def test_cuda(self, test_case, dtype=None, extra_args=None):
def convert_dtype(obj, dtype, requires_grad=False):
if isinstance(obj, torch.Tensor):
return obj.detach().to(dtype=dtype).requires_grad_(requires_grad)
elif isinstance(obj, torch.Tensor):
return obj.to(dtype)
elif isinstance(obj, tuple):
return tuple(convert_dtype(o, dtype, requires_grad) for o in obj)
else:
return obj
if not TEST_CUDA or not self.should_test_cuda:
raise unittest.SkipTest("Excluded from CUDA tests")
try:
cpu_input = self._get_input()
cpu_target = self._get_target()
cpu_module = self.constructor(*self.constructor_args)
gpu_module = self.constructor(*self.constructor_args)
# Convert input, target and module parameters to dtype
if dtype is not None:
cpu_input = convert_dtype(cpu_input, dtype, True)
# NLLLoss requires target to be LongTensor
if not isinstance(cpu_target, torch.LongTensor) and self.convert_target:
cpu_target = convert_dtype(cpu_target, dtype)
cpu_module.type(dtype)
gpu_module.type(dtype)
# GPU setup
gpu_input = to_gpu(cpu_input)
gpu_target = to_gpu(cpu_target)
gpu_module.cuda()
# torch.HalfTensor doesn't support most operations, converting back to default
if dtype in {torch.half, torch.bfloat16}:
cpu_input = self._get_input()
cpu_target = self._get_target()
# Loss modules with weights require consistent input/module weight types
cpu_module = self.constructor(*self.constructor_args)
cpu_output = test_case._forward_criterion(
cpu_module, cpu_input, cpu_target, extra_args=extra_args
)
gpu_output = test_case._forward_criterion(
gpu_module, gpu_input, gpu_target, extra_args=extra_args
)
# dtype can be None, so set precision in this way instead of a precision map
test_case.assertEqual(
cpu_output,
gpu_output,
1e-1 if dtype in {torch.half, torch.bfloat16} else 4e-4,
)
cpu_gradInput = test_case._backward_criterion(
cpu_module, cpu_input, cpu_target, extra_args=extra_args
)
gpu_gradInput = test_case._backward_criterion(
gpu_module, gpu_input, gpu_target, extra_args=extra_args
)
test_case.assertEqual(
cpu_gradInput,
gpu_gradInput,
1e-1 if dtype in {torch.half, torch.bfloat16} else 4e-4,
)
except NotImplementedError:
pass
def _get_target(self):
return self._get_arg("target", False)
@property
def constructor_args(self):
return self._get_arg("constructor_args", False)
@property
def extra_args(self):
return self._get_arg("extra_args", False)
| 200,725 | 35.986549 | 119 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/tests/cpu/test_ipex_custom_op.py | import torch
import intel_extension_for_pytorch as ipex
import unittest
from common_utils import TestCase
class TestCustomOp(TestCase):
# Port from test_torch
def test_add_softmax(self):
# smaller input which can't can in AVX512
a = torch.randn(2, 3)
b = torch.randn(2, 3)
orig_result = a.add(b).softmax(-1)
ipex_result = torch.ops.torch_ipex.add_softmax_(a, b)
self.assertEqual(orig_result, ipex_result)
# bigger input which can in AVX512
a = torch.randn(30, 30)
b = torch.randn(30, 30)
orig_result = a.add(b).softmax(-1)
ipex_result = torch.ops.torch_ipex.add_softmax_(a, b)
self.assertEqual(orig_result, ipex_result)
# broadcast
a = torch.randn(30, 30)
b = torch.randn(30)
orig_result = a.add(b).softmax(-1)
ipex_result = torch.ops.torch_ipex.add_softmax_(a, b)
self.assertEqual(orig_result, ipex_result)
def test_inference_mode(self):
class DemoModel(torch.nn.Module):
def __init__(self):
super(DemoModel, self).__init__()
self.conv = torch.nn.Conv2d(3, 64, (3, 3))
self.bn = ipex.nn.FrozenBatchNorm2d(64)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return x
x = torch.rand((1, 3, 640, 640))
model = DemoModel().eval()
# enable weight prepack op.
model = ipex.optimize(model)
with torch.no_grad():
y_ref = model(x)
with torch.inference_mode():
y_inf = model(x)
self.assertEqual(y_ref, y_inf)
if __name__ == "__main__":
test = unittest.main()
| 1,732 | 30.509091 | 61 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/tests/cpu/test_launcher.py | import unittest
from common_utils import TestCase
from utils.cpuinfo import construct_numa_config
from intel_extension_for_pytorch.cpu.launch import (
CPUPoolList,
Launcher,
DistributedTrainingLauncher,
)
import os
from os.path import expanduser
import glob
import subprocess
class TestLauncher(TestCase):
launch_scripts = [
["python", "-m", "intel_extension_for_pytorch.cpu.launch"],
["ipexrun"],
]
def find_lib(self, lib_type):
library_paths = []
if "CONDA_PREFIX" in os.environ:
library_paths.append(f'{os.environ["CONDA_PREFIX"]}/lib/')
elif "VIRTUAL_ENV" in os.environ:
library_paths.append(f'{os.environ["VIRTUAL_ENV"]}/lib/')
library_paths += [
f'{expanduser("~")}/.local/lib/',
"/usr/local/lib/",
"/usr/local/lib64/",
"/usr/lib/",
"/usr/lib64/",
]
lib_find = False
for lib_path in library_paths:
library_file = f"{lib_path}/lib{lib_type}.so"
matches = glob.glob(library_file)
if len(matches) > 0:
lib_find = True
break
return lib_find
def del_env(self, env_name):
if env_name in os.environ:
del os.environ[env_name]
def test_memory_allocator_setup(self):
launcher = Launcher()
# tcmalloc
find_tcmalloc = self.find_lib("tcmalloc")
launcher.set_memory_allocator(memory_allocator="tcmalloc")
ld_preload = (
":".join(launcher.ld_preload) if len(launcher.ld_preload) > 0 else ""
)
tcmalloc_enabled = "libtcmalloc.so" in ld_preload
self.assertEqual(find_tcmalloc, tcmalloc_enabled)
# jemalloc
find_jemalloc = self.find_lib("jemalloc")
launcher.set_memory_allocator(memory_allocator="jemalloc")
ld_preload = (
":".join(launcher.ld_preload) if len(launcher.ld_preload) > 0 else ""
)
jemalloc_enabled = "libjemalloc.so" in ld_preload
self.assertEqual(find_jemalloc, jemalloc_enabled)
if jemalloc_enabled:
self.assertTrue("MALLOC_CONF" in launcher.environ_set)
self.assertTrue(
launcher.environ_set["MALLOC_CONF"]
== "oversize_threshold:1,background_thread:true,metadata_thp:auto"
)
self.del_env("MALLOC_CONF")
launcher.set_memory_allocator(memory_allocator="jemalloc", benchmark=True)
if jemalloc_enabled:
self.assertTrue("MALLOC_CONF" in launcher.environ_set)
self.assertTrue(
launcher.environ_set["MALLOC_CONF"]
== "oversize_threshold:1,background_thread:false,metadata_thp:always,dirty_decay_ms:-1,muzzy_decay_ms:-1"
)
def test_mpi_pin_domain_and_ccl_worker_affinity(self):
# HT ON, use_logical_cores ON
nprocs_per_node = 2
ccl_worker_count = 4
lscpu_txt = construct_numa_config(
nprocs_per_node, 28, enable_ht=True, numa_mode=1
)
launcher = DistributedTrainingLauncher(lscpu_txt=lscpu_txt)
launcher.cpuinfo.gen_pools_ondemand(
ninstances=nprocs_per_node, use_logical_cores=True
)
pin_domain_affinity = launcher.get_pin_domain_affinity(
launcher.cpuinfo.pools_ondemand, ccl_worker_count
)
expect_pin_domain = "[0xffffff0,0xffffff00000000]"
self.assertEqual(pin_domain_affinity["pin_domain"], expect_pin_domain)
expected_ccl_worker_affinity = "0,1,2,3,28,29,30,31"
self.assertEqual(pin_domain_affinity["affinity"], expected_ccl_worker_affinity)
# HT ON, use_logical_cores OFF
nprocs_per_node = 2
ccl_worker_count = 4
lscpu_txt = construct_numa_config(
nprocs_per_node, 28, enable_ht=True, numa_mode=1
)
launcher = DistributedTrainingLauncher(lscpu_txt=lscpu_txt)
launcher.cpuinfo.gen_pools_ondemand(
ninstances=nprocs_per_node, use_logical_cores=True
)
pin_domain_affinity = launcher.get_pin_domain_affinity(
launcher.cpuinfo.pools_ondemand,
ccl_worker_count,
logical_cores_for_ccl=True,
)
expect_pin_domain = "[0xfffffff,0xfffffff0000000]"
self.assertEqual(pin_domain_affinity["pin_domain"], expect_pin_domain)
expected_ccl_worker_affinity = "56,57,58,59,84,85,86,87"
self.assertEqual(pin_domain_affinity["affinity"], expected_ccl_worker_affinity)
# HT OFF, use_logical_cores ON
nprocs_per_node = 2
ccl_worker_count = 4
lscpu_txt = construct_numa_config(
nprocs_per_node, 28, enable_ht=False, numa_mode=1
)
launcher = DistributedTrainingLauncher(lscpu_txt=lscpu_txt)
launcher.cpuinfo.gen_pools_ondemand(
ninstances=nprocs_per_node, use_logical_cores=True
)
pin_domain_affinity = launcher.get_pin_domain_affinity(
launcher.cpuinfo.pools_ondemand,
ccl_worker_count,
logical_cores_for_ccl=True,
)
expect_pin_domain = "[0xffffff0,0xffffff00000000]"
self.assertEqual(pin_domain_affinity["pin_domain"], expect_pin_domain)
expected_ccl_worker_affinity = "0,1,2,3,28,29,30,31"
self.assertEqual(pin_domain_affinity["affinity"], expected_ccl_worker_affinity)
# nodes_list
nprocs_per_node = 2
ccl_worker_count = 2
lscpu_txt = construct_numa_config(4, 14, enable_ht=True, numa_mode=1)
launcher = DistributedTrainingLauncher(lscpu_txt=lscpu_txt)
launcher.cpuinfo.gen_pools_ondemand(
ninstances=nprocs_per_node, nodes_list=[1, 2], use_logical_cores=True
)
pin_domain_affinity = launcher.get_pin_domain_affinity(
launcher.cpuinfo.pools_ondemand, ccl_worker_count
)
expect_pin_domain = "[0xfff0000,0x3ffc0000000]"
self.assertEqual(pin_domain_affinity["pin_domain"], expect_pin_domain)
expected_ccl_worker_affinity = "14,15,28,29"
self.assertEqual(pin_domain_affinity["affinity"], expected_ccl_worker_affinity)
# ncores_per_instance
nprocs_per_node = 2
ccl_worker_count = 4
lscpu_txt = construct_numa_config(
nprocs_per_node, 28, enable_ht=True, numa_mode=1
)
launcher = DistributedTrainingLauncher(lscpu_txt=lscpu_txt)
launcher.cpuinfo.gen_pools_ondemand(
ninstances=nprocs_per_node,
ncores_per_instance=(8 + ccl_worker_count) * nprocs_per_node,
use_logical_cores=True,
)
pin_domain_affinity = launcher.get_pin_domain_affinity(
launcher.cpuinfo.pools_ondemand, ccl_worker_count
)
expect_pin_domain = "[0xff0,0xff0000]"
self.assertEqual(pin_domain_affinity["pin_domain"], expect_pin_domain)
expected_ccl_worker_affinity = "0,1,2,3,12,13,14,15"
self.assertEqual(pin_domain_affinity["affinity"], expected_ccl_worker_affinity)
# e-cores
nprocs_per_node = 2
ccl_worker_count = 4
lscpu_txt = construct_numa_config(
nprocs_per_node, 28, enable_ht=True, n_e_cores=4, numa_mode=0
)
launcher = DistributedTrainingLauncher(lscpu_txt=lscpu_txt)
launcher.cpuinfo.gen_pools_ondemand(
ninstances=nprocs_per_node, use_logical_cores=True
)
pin_domain_affinity = launcher.get_pin_domain_affinity(
launcher.cpuinfo.pools_ondemand,
ccl_worker_count,
logical_cores_for_ccl=True,
)
expect_pin_domain = "[0xfffffff,0xfffffff000000000000000]"
self.assertEqual(pin_domain_affinity["pin_domain"], expect_pin_domain)
expected_ccl_worker_affinity = "28,29,30,31,88,89,90,91"
self.assertEqual(pin_domain_affinity["affinity"], expected_ccl_worker_affinity)
def test_launcher_scripts(self):
for launch_script in self.launch_scripts:
cmd = launch_script + ["--help"]
r = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
self.assertEqual(r.returncode, 0)
def verify_affinity(self, pools, ground_truth):
self.assertEqual(len(pools), ground_truth["ninstances"])
self.assertEqual(len(pools[0]), ground_truth["ncores_per_instance"])
self.assertEqual(
len(set([c.cpu for p in pools for c in p])), ground_truth["num_cores_sum"]
)
self.assertEqual(
len(set([c.node for p in pools for c in p])), ground_truth["num_nodes_sum"]
)
for i in range(ground_truth["ninstances"]):
self.assertEqual(
len(set([c.cpu for c in pools[i]])), ground_truth["num_cores"][i]
)
self.assertEqual(
len(set([c.node for c in pools[i]])), ground_truth["num_nodes"][i]
)
pool_txt = pools[i].get_pool_txt()
self.assertEqual(pool_txt["cores"], ground_truth["pools_cores"][i])
self.assertEqual(pool_txt["nodes"], ground_truth["pools_nodes"][i])
def test_core_affinity(self):
# mode 0
num_nodes = 2
n_phycores_per_node = 28
lscpu_txt = construct_numa_config(
num_nodes, n_phycores_per_node, enable_ht=True, numa_mode=0
)
cpuinfo = CPUPoolList(lscpu_txt=lscpu_txt)
ground_truth = {
"ninstances": 1,
"ncores_per_instance": 112,
"num_cores_sum": 112,
"num_nodes_sum": 2,
"num_cores": [112],
"num_nodes": [2],
"pools_cores": ["0-111"],
"pools_nodes": ["0,1"],
}
self.verify_affinity([cpuinfo.pool_all], ground_truth)
cpuinfo.gen_pools_ondemand(ninstances=2)
ground_truth = {
"ninstances": 2,
"ncores_per_instance": 28,
"num_cores_sum": 56,
"num_nodes_sum": 2,
"num_cores": [28, 28],
"num_nodes": [1, 1],
"pools_cores": ["0-27", "56-83"],
"pools_nodes": ["0", "1"],
}
self.verify_affinity(cpuinfo.pools_ondemand, ground_truth)
cpuinfo.gen_pools_ondemand(ninstances=4)
ground_truth = {
"ninstances": 4,
"ncores_per_instance": 14,
"num_cores_sum": 56,
"num_nodes_sum": 2,
"num_cores": [14, 14, 14, 14],
"num_nodes": [1, 1, 1, 1],
"pools_cores": ["0-13", "14-27", "56-69", "70-83"],
"pools_nodes": ["0", "0", "1", "1"],
}
self.verify_affinity(cpuinfo.pools_ondemand, ground_truth)
cpuinfo.gen_pools_ondemand(ncores_per_instance=28)
ground_truth = {
"ninstances": 2,
"ncores_per_instance": 28,
"num_cores_sum": 56,
"num_nodes_sum": 2,
"num_cores": [28, 28],
"num_nodes": [1, 1],
"pools_cores": ["0-27", "56-83"],
"pools_nodes": ["0", "1"],
}
self.verify_affinity(cpuinfo.pools_ondemand, ground_truth)
cpuinfo.gen_pools_ondemand(ncores_per_instance=14)
ground_truth = {
"ninstances": 4,
"ncores_per_instance": 14,
"num_cores_sum": 56,
"num_nodes_sum": 2,
"num_cores": [14, 14, 14, 14],
"num_nodes": [1, 1, 1, 1],
"pools_cores": ["0-13", "14-27", "56-69", "70-83"],
"pools_nodes": ["0", "0", "1", "1"],
}
self.verify_affinity(cpuinfo.pools_ondemand, ground_truth)
cores_list_local = []
cores_list_local.extend(list(i for i in range(14, 28)))
cores_list_local.extend(list(i for i in range(42, 56)))
cpuinfo.gen_pools_ondemand(cores_list=cores_list_local)
ground_truth = {
"ninstances": 1,
"ncores_per_instance": 28,
"num_cores_sum": 28,
"num_nodes_sum": 1,
"num_cores": [28],
"num_nodes": [1],
"pools_cores": ["14-27,42-55"],
"pools_nodes": ["0"],
}
self.verify_affinity(cpuinfo.pools_ondemand, ground_truth)
num_nodes = 4
n_phycores_per_node = 14
lscpu_txt = construct_numa_config(
num_nodes, n_phycores_per_node, enable_ht=True, numa_mode=0
)
cpuinfo = CPUPoolList(lscpu_txt=lscpu_txt)
ground_truth = {
"ninstances": 1,
"ncores_per_instance": 112,
"num_cores_sum": 112,
"num_nodes_sum": 4,
"num_cores": [112],
"num_nodes": [4],
"pools_cores": ["0-111"],
"pools_nodes": ["0,1,2,3"],
}
self.verify_affinity([cpuinfo.pool_all], ground_truth)
cpuinfo.gen_pools_ondemand(nodes_list=[1, 2])
ground_truth = {
"ninstances": 1,
"ncores_per_instance": 28,
"num_cores_sum": 28,
"num_nodes_sum": 2,
"num_cores": [28],
"num_nodes": [2],
"pools_cores": ["28-41,56-69"],
"pools_nodes": ["1,2"],
}
self.verify_affinity(cpuinfo.pools_ondemand, ground_truth)
num_nodes = 2
n_phycores_per_node = 28
lscpu_txt = construct_numa_config(
num_nodes, n_phycores_per_node, enable_ht=True, n_e_cores=4, numa_mode=0
)
cpuinfo = CPUPoolList(lscpu_txt=lscpu_txt)
cpuinfo.gen_pools_ondemand(ninstances=2)
ground_truth = {
"ninstances": 2,
"ncores_per_instance": 28,
"num_cores_sum": 56,
"num_nodes_sum": 2,
"num_cores": [28, 28],
"num_nodes": [1, 1],
"pools_cores": ["0-27", "60-87"],
"pools_nodes": ["0", "1"],
}
self.verify_affinity(cpuinfo.pools_ondemand, ground_truth)
# mode 1
num_nodes = 2
n_phycores_per_node = 28
lscpu_txt = construct_numa_config(
num_nodes, n_phycores_per_node, enable_ht=True, numa_mode=1
)
cpuinfo = CPUPoolList(lscpu_txt=lscpu_txt)
cpuinfo.gen_pools_ondemand(ninstances=1)
ground_truth = {
"ninstances": 1,
"ncores_per_instance": 56,
"num_cores_sum": 56,
"num_nodes_sum": 2,
"num_cores": [56],
"num_nodes": [2],
"pools_cores": ["0-55"],
"pools_nodes": ["0,1"],
}
self.verify_affinity(cpuinfo.pools_ondemand, ground_truth)
cpuinfo.gen_pools_ondemand(ninstances=2)
ground_truth = {
"ninstances": 2,
"ncores_per_instance": 28,
"num_cores_sum": 56,
"num_nodes_sum": 2,
"num_cores": [28, 28],
"num_nodes": [1, 1],
"pools_cores": ["0-27", "28-55"],
"pools_nodes": ["0", "1"],
}
self.verify_affinity(cpuinfo.pools_ondemand, ground_truth)
cpuinfo.gen_pools_ondemand(ninstances=4)
ground_truth = {
"ninstances": 4,
"ncores_per_instance": 14,
"num_cores_sum": 56,
"num_nodes_sum": 2,
"num_cores": [14, 14, 14, 14],
"num_nodes": [1, 1, 1, 1],
"pools_cores": ["0-13", "14-27", "28-41", "42-55"],
"pools_nodes": ["0", "0", "1", "1"],
}
self.verify_affinity(cpuinfo.pools_ondemand, ground_truth)
cpuinfo.gen_pools_ondemand(ncores_per_instance=28)
ground_truth = {
"ninstances": 2,
"ncores_per_instance": 28,
"num_cores_sum": 56,
"num_nodes_sum": 2,
"num_cores": [28, 28],
"num_nodes": [1, 1],
"pools_cores": ["0-27", "28-55"],
"pools_nodes": ["0", "1"],
}
self.verify_affinity(cpuinfo.pools_ondemand, ground_truth)
cpuinfo.gen_pools_ondemand(ncores_per_instance=14)
ground_truth = {
"ninstances": 4,
"ncores_per_instance": 14,
"num_cores_sum": 56,
"num_nodes_sum": 2,
"num_cores": [14, 14, 14, 14],
"num_nodes": [1, 1, 1, 1],
"pools_cores": ["0-13", "14-27", "28-41", "42-55"],
"pools_nodes": ["0", "0", "1", "1"],
}
self.verify_affinity(cpuinfo.pools_ondemand, ground_truth)
cores_list_local = []
cores_list_local.extend(list(i for i in range(14, 28)))
cores_list_local.extend(list(i for i in range(42, 56)))
cpuinfo.gen_pools_ondemand(ninstances=2, cores_list=cores_list_local)
ground_truth = {
"ninstances": 2,
"ncores_per_instance": 14,
"num_cores_sum": 28,
"num_nodes_sum": 2,
"num_cores": [14, 14],
"num_nodes": [1, 1],
"pools_cores": ["14-27", "42-55"],
"pools_nodes": ["0", "1"],
}
self.verify_affinity(cpuinfo.pools_ondemand, ground_truth)
num_nodes = 4
n_phycores_per_node = 14
lscpu_txt = construct_numa_config(
num_nodes, n_phycores_per_node, enable_ht=True, numa_mode=1
)
cpuinfo = CPUPoolList(lscpu_txt=lscpu_txt)
cpuinfo.gen_pools_ondemand(ninstances=2, nodes_list=[1, 2])
ground_truth = {
"ninstances": 2,
"ncores_per_instance": 14,
"num_cores_sum": 28,
"num_nodes_sum": 2,
"num_cores": [14, 14],
"num_nodes": [1, 1],
"pools_cores": ["14-27", "28-41"],
"pools_nodes": ["1", "2"],
}
self.verify_affinity(cpuinfo.pools_ondemand, ground_truth)
num_nodes = 2
n_phycores_per_node = 28
lscpu_txt = construct_numa_config(
num_nodes, n_phycores_per_node, enable_ht=True, n_e_cores=4, numa_mode=1
)
cpuinfo = CPUPoolList(lscpu_txt=lscpu_txt)
cpuinfo.gen_pools_ondemand(ninstances=2)
ground_truth = {
"ninstances": 2,
"ncores_per_instance": 28,
"num_cores_sum": 56,
"num_nodes_sum": 2,
"num_cores": [28, 28],
"num_nodes": [1, 1],
"pools_cores": ["0-27", "28-55"],
"pools_nodes": ["0", "1"],
}
self.verify_affinity(cpuinfo.pools_ondemand, ground_truth)
# mode 2
num_nodes = 2
n_phycores_per_node = 28
lscpu_txt = construct_numa_config(
num_nodes, n_phycores_per_node, enable_ht=True, numa_mode=2
)
cpuinfo = CPUPoolList(lscpu_txt=lscpu_txt)
cpuinfo.gen_pools_ondemand(ninstances=2)
ground_truth = {
"ninstances": 2,
"ncores_per_instance": 28,
"num_cores_sum": 56,
"num_nodes_sum": 2,
"num_cores": [28, 28],
"num_nodes": [1, 1],
"pools_cores": [
"0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30,32,34,36,38,40,42,44,46,48,50,52,54",
"56,58,60,62,64,66,68,70,72,74,76,78,80,82,84,86,88,90,92,94,96,98,100,102,104,106,108,110",
],
"pools_nodes": ["0", "1"],
}
self.verify_affinity(cpuinfo.pools_ondemand, ground_truth)
cpuinfo.gen_pools_ondemand(ninstances=4)
ground_truth = {
"ninstances": 4,
"ncores_per_instance": 14,
"num_cores_sum": 56,
"num_nodes_sum": 2,
"num_cores": [14, 14, 14, 14],
"num_nodes": [1, 1, 1, 1],
"pools_cores": [
"0,2,4,6,8,10,12,14,16,18,20,22,24,26",
"28,30,32,34,36,38,40,42,44,46,48,50,52,54",
"56,58,60,62,64,66,68,70,72,74,76,78,80,82",
"84,86,88,90,92,94,96,98,100,102,104,106,108,110",
],
"pools_nodes": ["0", "0", "1", "1"],
}
self.verify_affinity(cpuinfo.pools_ondemand, ground_truth)
cpuinfo.gen_pools_ondemand(ncores_per_instance=28)
ground_truth = {
"ninstances": 2,
"ncores_per_instance": 28,
"num_cores_sum": 56,
"num_nodes_sum": 2,
"num_cores": [28, 28],
"num_nodes": [1, 1],
"pools_cores": [
"0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30,32,34,36,38,40,42,44,46,48,50,52,54",
"56,58,60,62,64,66,68,70,72,74,76,78,80,82,84,86,88,90,92,94,96,98,100,102,104,106,108,110",
],
"pools_nodes": ["0", "1"],
}
self.verify_affinity(cpuinfo.pools_ondemand, ground_truth)
cpuinfo.gen_pools_ondemand(ncores_per_instance=14)
ground_truth = {
"ninstances": 4,
"ncores_per_instance": 14,
"num_cores_sum": 56,
"num_nodes_sum": 2,
"num_cores": [14, 14, 14, 14],
"num_nodes": [1, 1, 1, 1],
"pools_cores": [
"0,2,4,6,8,10,12,14,16,18,20,22,24,26",
"28,30,32,34,36,38,40,42,44,46,48,50,52,54",
"56,58,60,62,64,66,68,70,72,74,76,78,80,82",
"84,86,88,90,92,94,96,98,100,102,104,106,108,110",
],
"pools_nodes": ["0", "0", "1", "1"],
}
self.verify_affinity(cpuinfo.pools_ondemand, ground_truth)
cpuinfo.gen_pools_ondemand(ninstances=3)
ground_truth = {
"ninstances": 3,
"ncores_per_instance": 18,
"num_cores_sum": 54,
"num_nodes_sum": 2,
"num_cores": [18, 18, 18],
"num_nodes": [1, 2, 1],
"pools_cores": [
"0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30,32,34",
"36,38,40,42,44,46,48,50,52,54,56,58,60,62,64,66,68,70",
"72,74,76,78,80,82,84,86,88,90,92,94,96,98,100,102,104,106",
],
"pools_nodes": ["0", "0,1", "1"],
}
self.verify_affinity(cpuinfo.pools_ondemand, ground_truth)
cores_list_local = []
cores_list_local.extend(list(i for i in range(14, 28)))
cores_list_local.extend(list(i for i in range(98, 112)))
cpuinfo.gen_pools_ondemand(ninstances=2, cores_list=cores_list_local)
ground_truth = {
"ninstances": 2,
"ncores_per_instance": 14,
"num_cores_sum": 28,
"num_nodes_sum": 2,
"num_cores": [14, 14],
"num_nodes": [1, 1],
"pools_cores": ["14-27", "98-111"],
"pools_nodes": ["0", "1"],
}
self.verify_affinity(cpuinfo.pools_ondemand, ground_truth)
num_nodes = 4
n_phycores_per_node = 14
lscpu_txt = construct_numa_config(
num_nodes, n_phycores_per_node, enable_ht=True, numa_mode=2
)
cpuinfo = CPUPoolList(lscpu_txt=lscpu_txt)
cpuinfo.gen_pools_ondemand(nodes_list=[1, 2])
ground_truth = {
"ninstances": 1,
"ncores_per_instance": 28,
"num_cores_sum": 28,
"num_nodes_sum": 2,
"num_cores": [28],
"num_nodes": [2],
"pools_cores": [
"28,30,32,34,36,38,40,42,44,46,48,50,52,54,56,58,60,62,64,66,68,70,72,74,76,78,80,82"
],
"pools_nodes": ["1,2"],
}
self.verify_affinity(cpuinfo.pools_ondemand, ground_truth)
num_nodes = 2
n_phycores_per_node = 28
lscpu_txt = construct_numa_config(
num_nodes, n_phycores_per_node, enable_ht=True, n_e_cores=4, numa_mode=2
)
cpuinfo = CPUPoolList(lscpu_txt=lscpu_txt)
cpuinfo.gen_pools_ondemand(ninstances=2)
ground_truth = {
"ninstances": 2,
"ncores_per_instance": 28,
"num_cores_sum": 56,
"num_nodes_sum": 2,
"num_cores": [28, 28],
"num_nodes": [1, 1],
"pools_cores": [
"0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30,32,34,36,38,40,42,44,46,48,50,52,54",
"60,62,64,66,68,70,72,74,76,78,80,82,84,86,88,90,92,94,96,98,100,102,104,106,108,110,112,114",
],
"pools_nodes": ["0", "1"],
}
self.verify_affinity(cpuinfo.pools_ondemand, ground_truth)
def test_core_affinity_with_logical_cores(self):
num_nodes = 2
n_phycores_per_node = 28
lscpu_txt = construct_numa_config(
num_nodes, n_phycores_per_node, enable_ht=True, numa_mode=1
)
cpuinfo = CPUPoolList(lscpu_txt=lscpu_txt)
cpuinfo.gen_pools_ondemand(ninstances=2, use_logical_cores=True)
ground_truth = {
"ninstances": 2,
"ncores_per_instance": 56,
"num_cores_sum": 112,
"num_nodes_sum": 2,
"num_cores": [56, 56],
"num_nodes": [1, 1],
"pools_cores": ["0-27,56-83", "28-55,84-111"],
"pools_nodes": ["0", "1"],
}
self.verify_affinity(cpuinfo.pools_ondemand, ground_truth)
def test_core_affinity_with_skip_cross_node_cores(self):
num_nodes = 2
n_phycores_per_node = 28
lscpu_txt = construct_numa_config(
num_nodes, n_phycores_per_node, enable_ht=True, numa_mode=1
)
cpuinfo = CPUPoolList(lscpu_txt=lscpu_txt)
cpuinfo.gen_pools_ondemand(ninstances=3, skip_cross_node_cores=True)
ground_truth = {
"ninstances": 3,
"ncores_per_instance": 14,
"num_cores_sum": 42,
"num_nodes_sum": 2,
"num_cores": [14, 14, 14],
"num_nodes": [1, 1, 1],
"pools_cores": ["0-13", "14-27", "28-41"],
"pools_nodes": ["0", "0", "1"],
}
self.verify_affinity(cpuinfo.pools_ondemand, ground_truth)
def test_core_affinity_with_skip_cross_node_cores_and_use_logical_core(self):
num_nodes = 2
n_phycores_per_node = 28
lscpu_txt = construct_numa_config(
num_nodes, n_phycores_per_node, enable_ht=True, numa_mode=1
)
cpuinfo = CPUPoolList(lscpu_txt=lscpu_txt)
cpuinfo.gen_pools_ondemand(
ninstances=7, use_logical_cores=True, skip_cross_node_cores=True
)
ground_truth = {
"ninstances": 7,
"ncores_per_instance": 14,
"num_cores_sum": 98,
"num_nodes_sum": 2,
"num_cores": [14, 14, 14, 14, 14, 14, 14],
"num_nodes": [1, 1, 1, 1, 1, 1, 1],
"pools_cores": [
"0-6,56-62",
"7-13,63-69",
"14-20,70-76",
"21-27,77-83",
"28-34,84-90",
"35-41,91-97",
"42-48,98-104",
],
"pools_nodes": ["0", "0", "0", "0", "1", "1", "1"],
}
self.verify_affinity(cpuinfo.pools_ondemand, ground_truth)
def test_core_affinity_with_skip_cross_node_cores_and_node_id_use_logical_core(
self,
):
num_nodes = 4
n_phycores_per_node = 14
lscpu_txt = construct_numa_config(
num_nodes, n_phycores_per_node, enable_ht=True, numa_mode=1
)
cpuinfo = CPUPoolList(lscpu_txt=lscpu_txt)
cpuinfo.gen_pools_ondemand(
ninstances=3,
nodes_list=[1, 2],
use_logical_cores=True,
skip_cross_node_cores=True,
)
ground_truth = {
"ninstances": 3,
"ncores_per_instance": 14,
"num_cores_sum": 42,
"num_nodes_sum": 2,
"num_cores": [14, 14, 14],
"num_nodes": [1, 1, 1],
"pools_cores": ["14-20,70-76", "21-27,77-83", "28-34,84-90"],
"pools_nodes": ["1", "1", "2"],
}
self.verify_affinity(cpuinfo.pools_ondemand, ground_truth)
if __name__ == "__main__":
test = unittest.main()
| 28,110 | 36.682306 | 121 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/tests/cpu/test_fake_tensor.py | import unittest
import itertools
import copy
import torch
from torch._subclasses.fake_tensor import (
FakeTensor,
FakeTensorMode,
)
import intel_extension_for_pytorch as ipex
from common_utils import TestCase
conv_module = {1: torch.nn.Conv1d, 2: torch.nn.Conv2d, 3: torch.nn.Conv3d}
convtranspose_module = {2: torch.nn.ConvTranspose2d, 3: torch.nn.ConvTranspose3d}
class ConvNd(torch.nn.Module):
def __init__(
self,
dim,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
bias,
groups,
):
super(ConvNd, self).__init__()
self.conv = conv_module[dim](
in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias,
groups=groups,
)
def forward(self, x):
return self.conv(x)
class Linear(torch.nn.Module):
def __init__(self, in_f, out_f, bias):
super(Linear, self).__init__()
self.linear = torch.nn.Linear(in_f, out_f, bias=bias)
def forward(self, x):
return self.linear(x)
class DeconvNd(torch.nn.Module):
def __init__(
self, dim, ic, oc, kernel_size, stride, padding, groups, bias, dilation
):
super(DeconvNd, self).__init__()
self.deconv = convtranspose_module[dim](
ic,
oc,
kernel_size=kernel_size,
stride=stride,
padding=padding,
groups=groups,
bias=bias,
dilation=dilation,
)
def forward(self, x):
return self.deconv(x)
class Lstm(torch.nn.Module):
def __init__(
self,
input_size,
hidden_size,
num_layers,
bidirectional,
bias,
dropout,
batch_first,
):
super(Lstm, self).__init__()
self.lstm = torch.nn.LSTM(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
bidirectional=bidirectional,
bias=bias,
dropout=dropout,
batch_first=batch_first,
)
def forward(self, x, h=None):
x, h = self.lstm(x, h)
return x, h
class TestFakeCases(TestCase):
def test_conv_inference(self):
for dim in [1, 2, 3]:
input_shapes = {1: (224,), 2: (224, 224), 3: (55, 55, 55)}
if dim == 2:
channels_last = torch.channels_last
elif dim == 3:
channels_last = torch.channels_last_3d
if dim == 1:
options = itertools.product(
[True, False],
[1, 2],
[1, 4],
[True, False],
[torch.contiguous_format],
[torch.float32, torch.bfloat16],
)
else:
options = itertools.product(
[True, False],
[1, 2],
[1, 4],
[True, False],
[torch.contiguous_format, channels_last],
[torch.float32, torch.bfloat16],
)
for (
bias,
dilation,
groups,
feed_sample_input,
memory_format,
dtype,
) in options:
N = torch.randint(1, 10, (1,)).item()
M = torch.randint(1, 3, (1,)).item() * groups
C = torch.randint(1, 3, (1,)).item() * groups
x_shape = (N, C) + input_shapes[dim]
x = torch.randn(x_shape, dtype=torch.float32)
model = ConvNd(
dim=dim,
in_channels=C,
out_channels=M,
kernel_size=3,
stride=2,
padding=1,
dilation=dilation,
bias=bias,
groups=groups,
).eval()
model = model.to(memory_format=memory_format)
x = x.to(memory_format=memory_format)
if feed_sample_input:
ipex_model = ipex.optimize(
model, dtype=dtype, level="O1", sample_input=x
)
else:
ipex_model = ipex.optimize(model, dtype=dtype, level="O1")
with torch.cpu.amp.autocast(
enabled=(dtype == torch.bfloat16), dtype=torch.bfloat16
), torch.no_grad():
y = ipex_model(x)
mode = FakeTensorMode(allow_fallback_kernels=False)
with torch._subclasses.fake_tensor.FakeCopyMode(mode):
ipex_model_fake = copy.deepcopy(ipex_model)
with mode:
x_fake = mode.from_tensor(x)
with torch.cpu.amp.autocast(
enabled=(dtype == torch.bfloat16), dtype=torch.bfloat16
), torch.no_grad():
y_fake = ipex_model_fake(x_fake)
self.assertTrue(isinstance(x_fake, FakeTensor))
self.assertTrue(isinstance(y_fake, FakeTensor))
self.assertTrue(y_fake.size() == y.size())
self.assertTrue(y_fake.dtype == dtype)
def test_linear_inference(self):
out_features = torch.randint(3, 10, (1,)).item()
in_features = torch.randint(3, 10, (1,)).item()
input_shapes = [(8, in_features), (2, 4, in_features), (2, 2, 2, in_features)]
options = itertools.product(
[True, False],
input_shapes,
[True, False],
[True, False],
[torch.float32, torch.bfloat16],
)
for bias, x_shape, feed_sample_input, auto_kernel_selection, dtype in options:
x = torch.randn(x_shape, dtype=torch.float32)
model = Linear(in_features, out_features, bias).eval()
if feed_sample_input:
ipex_model = ipex.optimize(
model,
dtype=dtype,
level="O1",
auto_kernel_selection=auto_kernel_selection,
sample_input=x,
)
else:
ipex_model = ipex.optimize(
model,
dtype=dtype,
auto_kernel_selection=auto_kernel_selection,
level="O1",
)
with torch.cpu.amp.autocast(
enabled=(dtype == torch.bfloat16), dtype=torch.bfloat16
), torch.no_grad():
y = ipex_model(x)
mode = FakeTensorMode(allow_fallback_kernels=False)
with torch._subclasses.fake_tensor.FakeCopyMode(mode):
ipex_model_fake = copy.deepcopy(ipex_model)
with mode:
x_fake = mode.from_tensor(x)
with torch.cpu.amp.autocast(
enabled=(dtype == torch.bfloat16), dtype=torch.bfloat16
), torch.no_grad():
y_fake = ipex_model_fake(x_fake)
self.assertTrue(isinstance(x_fake, FakeTensor))
self.assertTrue(isinstance(y_fake, FakeTensor))
self.assertTrue(y_fake.size() == y.size())
self.assertTrue(y_fake.dtype == dtype)
def test_deconv_inference(self):
for dim in [2, 3]:
input_shapes = {2: (12, 12), 3: (12, 12, 12)}
if dim == 2:
channels_last = torch.channels_last
else:
channels_last = torch.channels_last_3d
input_channel_per_group = 15
output_channel_per_group = 3
kernel_size = 3
options = itertools.product(
[True, False],
[1, 2],
[1, 2],
[1, 2],
[1, 2],
[True, False],
[torch.contiguous_format, channels_last],
[torch.float32, torch.bfloat16],
)
for (
bias,
stride,
padding,
groups,
dilation,
feed_sample_input,
memory_format,
dtype,
) in options:
ic = input_channel_per_group * groups
oc = output_channel_per_group * groups
x_shape = (2, ic) + input_shapes[dim]
x = torch.randn(x_shape, dtype=torch.float32)
model = DeconvNd(
dim, ic, oc, kernel_size, stride, padding, groups, bias, dilation
).eval()
model = model.to(memory_format=memory_format)
x = x.to(memory_format=memory_format)
if feed_sample_input:
ipex_model = ipex.optimize(
model, dtype=dtype, level="O1", sample_input=x
)
else:
ipex_model = ipex.optimize(model, dtype=dtype, level="O1")
with torch.cpu.amp.autocast(
enabled=(dtype == torch.bfloat16), dtype=torch.bfloat16
), torch.no_grad():
y = ipex_model(x)
mode = FakeTensorMode(allow_fallback_kernels=False)
with torch._subclasses.fake_tensor.FakeCopyMode(mode):
ipex_model_fake = copy.deepcopy(ipex_model)
with mode:
x_fake = mode.from_tensor(x)
with torch.cpu.amp.autocast(
enabled=(dtype == torch.bfloat16), dtype=torch.bfloat16
), torch.no_grad():
y_fake = ipex_model_fake(x_fake)
self.assertTrue(isinstance(x_fake, FakeTensor))
self.assertTrue(isinstance(y_fake, FakeTensor))
self.assertTrue(y_fake.size() == y.size())
self.assertTrue(y_fake.dtype == dtype)
def _lstm_params_list(self):
params_dict = {
"input_size": [1, 2],
"hidden_size": [5, 16],
"num_layers": [1, 3],
"bidirectional": [False, True],
"bias": [False, True],
"empty_state": [False, True],
"batch_first": [False, True],
"dropout": [0, 0.4, 0.7, 1],
"batch_size": [1, 2],
"seq_len": [1, 3],
}
params_list = []
for key, value in params_dict.items():
params_list.append(value)
return params_list
def test_lstm_inference(self):
params_list = self._lstm_params_list()
for (
input_size,
hidden_size,
num_layers,
bidirectional,
bias,
empty_state,
batch_first,
dropout,
batch_size,
seq_len,
) in itertools.product(*params_list):
# dropout option adds dropout after all but last recurrent layer, so non-zero dropout expects num_layers greater than 1
if dropout > 0 and num_layers == 1:
continue
num_directions = 2 if bidirectional else 1
if batch_first:
x = torch.randn(batch_size, seq_len, input_size)
else:
x = torch.randn(seq_len, batch_size, input_size)
h = torch.randn(num_layers * num_directions, batch_size, hidden_size)
c = torch.randn(num_layers * num_directions, batch_size, hidden_size)
model = Lstm(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
bidirectional=bidirectional,
bias=bias,
dropout=dropout,
batch_first=batch_first,
).eval()
for dtype in [torch.float32, torch.bfloat16]:
ipex_model = ipex.optimize(model, dtype=dtype, level="O1")
with torch.cpu.amp.autocast(
enabled=(dtype == torch.bfloat16), dtype=torch.bfloat16
), torch.no_grad():
if empty_state:
y, hy = ipex_model(x)
else:
y, hy = ipex_model(x, (h, c))
mode = FakeTensorMode(allow_fallback_kernels=False)
with torch._subclasses.fake_tensor.FakeCopyMode(mode):
ipex_model_fake = copy.deepcopy(ipex_model)
with mode:
x_fake = mode.from_tensor(x)
h_fake = mode.from_tensor(h)
c_fake = mode.from_tensor(c)
with torch.cpu.amp.autocast(
enabled=(dtype == torch.bfloat16), dtype=torch.bfloat16
), torch.no_grad():
if empty_state:
y_fake, hy_fake = ipex_model_fake(x_fake)
else:
y_fake, hy_fake = ipex_model_fake(x_fake, (h_fake, c_fake))
self.assertTrue(isinstance(x_fake, FakeTensor))
self.assertTrue(isinstance(y_fake, FakeTensor))
self.assertTrue(isinstance(hy_fake[0], FakeTensor))
self.assertTrue(isinstance(hy_fake[1], FakeTensor))
self.assertTrue(y_fake.size() == y.size())
self.assertTrue(hy_fake[0].size() == hy[0].size())
self.assertTrue(hy_fake[1].size() == hy[1].size())
self.assertTrue(y_fake.dtype == dtype)
self.assertTrue(hy_fake[0].dtype == dtype)
self.assertTrue(hy_fake[1].dtype == dtype)
if __name__ == "__main__":
torch.manual_seed(2020)
test = unittest.main()
| 14,096 | 35.615584 | 131 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/tests/cpu/common_device_type.py | """
From PyTorch:
Copyright (c) 2016- Facebook, Inc (Adam Paszke)
Copyright (c) 2014- Facebook, Inc (Soumith Chintala)
Copyright (c) 2011-2014 Idiap Research Institute (Ronan Collobert)
Copyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu)
Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu)
Copyright (c) 2011-2013 NYU (Clement Farabet)
Copyright (c) 2006-2010 NEC Laboratories America (Ronan Collobert, Leon Bottou, Iain Melvin, Jason Weston)
Copyright (c) 2006 Idiap Research Institute (Samy Bengio)
Copyright (c) 2001-2004 Idiap Research Institute (Ronan Collobert, Samy Bengio, Johnny Mariethoz)
From Caffe2:
Copyright (c) 2016-present, Facebook Inc. All rights reserved.
All contributions by Facebook:
Copyright (c) 2016 Facebook Inc.
All contributions by Google:
Copyright (c) 2015 Google Inc.
All rights reserved.
All contributions by Yangqing Jia:
Copyright (c) 2015 Yangqing Jia
All rights reserved.
All contributions from Caffe:
Copyright(c) 2013, 2014, 2015, the respective contributors
All rights reserved.
All other contributions:
Copyright(c) 2015, 2016 the respective contributors
All rights reserved.
Caffe2 uses a copyright model similar to Caffe: each contributor holds
copyright over their contributions to Caffe2. The project versioning records
all such contribution and copyright details. If a contributor wants to further
mark their specific copyright on a particular contribution, they should
indicate their copyright solely in the commit message of the change when it is
committed.
All rights reserved.
"""
import inspect
import threading
from functools import wraps
import unittest
import torch
import intel_extension_for_pytorch as ipex
import copy
from common_utils import TestCase, TEST_WITH_ROCM, TEST_MKL, skipCUDANonDefaultStreamIf
# Note: Generic Device-Type Testing
#
# [WRITING TESTS]
#
# Write your test class as usual except:
# (1) Each test method should have one of four signatures:
#
# (1a) testX(self, device)
#
# (1b) @deviceCountAtLeast(<minimum number of devices to run test with>)
# testX(self, devices)
#
# (1c) @dtypes(<list of dtypes>)
# testX(self, device, dtype)
#
# (1d) @deviceCountAtLeast(<minimum number of devices to run test with>)
# @dtypes(<list of dtypes>)
# testX(self, devices, dtype)
#
#
# Note that the decorators are required for signatures (1b), (1c) and
# (1d).
#
# When a test like (1a) is called it will be given a device string,
# like 'cpu' or 'cuda:0.'
#
# Tests like (1b) are called with a list of device strings, like
# ['cuda:0', 'cuda:1']. The first device string will be the
# primary device. These tests will be skipped if the device type
# has fewer available devices than the argument to @deviceCountAtLeast.
#
# Tests like (1c) are called with a device string and a torch.dtype from
# the list of dtypes specified in the @dtypes decorator. Device-specific
# dtype overrides can be specified using @dtypesIfCPU and @dtypesIfCUDA.
#
# Tests like (1d) take a devices argument like (1b) and a dtype
# argument from (1c).
#
# (2) Prefer using test decorators defined in this file to others.
# For example, using the @skipIfNoLapack decorator instead of the
# @skipCPUIfNoLapack will cause the test to not run on CUDA if
# LAPACK is not available, which is wrong. If you need to use a decorator
# you may want to ask about porting it to this framework.
#
# See the TestTorchDeviceType class in test_torch.py for an example.
#
# [RUNNING TESTS]
#
# After defining your test class call instantiate_device_type_tests on it
# and pass in globals() for the second argument. This will instantiate
# discoverable device-specific test classes from your generic class. It will
# also hide the tests in your generic class so they're not run.
#
# If you device-generic test class is TestClass then new classes with names
# TestClass<DEVICE_TYPE> will be created for each available device type.
# TestClassCPU and TestClassCUDA, for example. Tests in these classes also
# have the device type and dtype, if provided, appended to their original
# name. testX, for instance, becomes testX_<device_type> or
# testX_<device_type>_<dtype>.
#
# More concretely, TestTorchDeviceType becomes TestTorchDeviceTypeCPU,
# TestTorchDeviceTypeCUDA, ... test_diagonal in TestTorchDeviceType becomes
# test_diagonal_cpu, test_diagonal_cuda, ... test_erfinv, which accepts a dtype,
# becomes test_erfinv_cpu_float, test_erfinv_cpu_double, test_erfinv_cuda_half,
# ...
#
# In short, if you write a test signature like
# def textX(self, device)
# You are effectively writing
# def testX_cpu(self, device='cpu')
# def textX_cuda(self, device='cuda')
# def testX_xla(self, device='xla')
# ...
#
# These tests can be run directly like normal tests:
# "python test_torch.py TestTorchDeviceTypeCPU.test_diagonal_cpu"
#
# All the tests for a particular device type can be run using the class, and
# other collections of tests can be run using pytest filtering, like
#
# "pytest test_torch.py -k 'test_diag'"
#
# which will run test_diag on every available device.
#
# To specify particular device types the 'and' keyword can be used:
#
# "pytest test_torch.py -k 'test_erfinv and cpu'"
#
# will run test_erfinv on all cpu dtypes.
#
# [ADDING A DEVICE TYPE]
#
# To add a device type:
#
# (1) Create a new "TestBase" extending DeviceTypeTestBase.
# See CPUTestBase and CUDATestBase below.
# (2) Define the "device_type" attribute of the base to be the
# appropriate string.
# (3) Add logic to this file that appends your base class to
# device_type_test_bases when your device type is available.
# (4) (Optional) Write setUpClass/tearDownClass class methods that
# instantiate dependencies (see MAGMA in CUDATestBase).
# (5) (Optional) Override the "instantiate_test" method for total
# control over how your class creates tests.
#
# setUpClass is called AFTER tests have been created and BEFORE and ONLY IF
# they are run. This makes it useful for initializing devices and dependencies.
#
# List of device type test bases that can be used to instantiate tests.
# See below for how this list is populated. If you're adding a device type
# you should check if it's available and (if it is) add it to this list.
device_type_test_bases = []
class DeviceTypeTestBase(TestCase):
device_type = "generic_device_type"
# Precision is a thread-local setting since it may be overriden per test
_tls = threading.local()
_tls.precision = TestCase.precision
@property
def precision(self):
return self._tls.precision
@precision.setter
def precision(self, prec):
self._tls.precision = prec
# Returns a string representing the device that single device tests should use.
# Note: single device tests use this device exclusively.
@classmethod
def get_primary_device(cls):
return cls.device_type
# Returns a list of strings representing all available devices of this
# device type. The primary device must be the first string in the list
# and the list must contain no duplicates.
# Note: UNSTABLE API. Will be replaced once PyTorch has a device generic
# mechanism of acquiring all available devices.
@classmethod
def get_all_devices(cls):
return [cls.get_primary_device()]
# Returns the dtypes the test has requested.
# Prefers device-specific dtype specifications over generic ones.
@classmethod
def _get_dtypes(cls, test):
if not hasattr(test, "dtypes"):
return None
return test.dtypes.get(cls.device_type, test.dtypes.get("all", None))
def _get_precision_override(self, test, dtype):
if not hasattr(test, "precision_overrides"):
return self.precision
return test.precision_overrides.get(dtype, self.precision)
# Creates device-specific tests.
@classmethod
def instantiate_test(cls, name, test):
test_name = name + "_" + cls.device_type
dtypes = cls._get_dtypes(test)
if dtypes is None: # Test has no dtype variants
assert not hasattr(cls, test_name), "Redefinition of test {0}".format(
test_name
)
@wraps(test)
def instantiated_test(self, test=test):
device_arg = (
cls.get_primary_device()
if not hasattr(test, "num_required_devices")
else cls.get_all_devices()
)
return test(self, device_arg)
setattr(cls, test_name, instantiated_test)
else: # Test has dtype variants
for dtype in dtypes:
dtype_str = str(dtype).split(".")[1]
dtype_test_name = test_name + "_" + dtype_str
assert not hasattr(
cls, dtype_test_name
), "Redefinition of test {0}".format(dtype_test_name)
@wraps(test)
def instantiated_test(self, test=test, dtype=dtype):
device_arg = (
cls.get_primary_device()
if not hasattr(test, "num_required_devices")
else cls.get_all_devices()
)
# Sets precision and runs test
# Note: precision is reset after the test is run
guard_precision = self.precision
try:
self.precision = self._get_precision_override(test, dtype)
result = test(self, device_arg, dtype)
finally:
self.precision = guard_precision
return result
setattr(cls, dtype_test_name, instantiated_test)
class CPUTestBase(DeviceTypeTestBase):
device_type = "cpu"
class DPCPPTestBase(DeviceTypeTestBase):
device_type = ipex.DEVICE
@classmethod
def get_primary_device(cls):
return cls.primary_device
@classmethod
def get_all_devices(cls):
return [ipex.DEVICE]
# Returns the dtypes the test has requested.
# Prefers device-specific dtype specifications over generic ones.
@classmethod
def _get_dtypes(cls, test):
if not hasattr(test, "dtypes"):
return None
dtypes_vec = test.dtypes.get(cls.device_type, test.dtypes.get("all", None))
try:
dtypes_res = [
item
for item in dtypes_vec
if (item != torch.float16 and item != torch.half)
]
except BaseException: # dtypes_vec == None
dtypes_res = []
return dtypes_res
@classmethod
def setUpClass(cls):
cls.primary_device = ipex.DEVICE
class CUDATestBase(DeviceTypeTestBase):
device_type = "cuda"
_do_cuda_memory_leak_check = True
_do_cuda_non_default_stream = True
def has_cudnn(self):
return not self.no_cudnn
@classmethod
def get_primary_device(cls):
return cls.primary_device
@classmethod
def get_all_devices(cls):
primary_device_idx = int(cls.get_primary_device().split(":")[1])
num_devices = torch.cuda.device_count()
prim_device = cls.get_primary_device()
cuda_str = "cuda:{0}"
non_primary_devices = [
cuda_str.format(idx)
for idx in range(num_devices)
if idx != primary_device_idx
]
return [prim_device] + non_primary_devices
@classmethod
def setUpClass(cls):
# has_magma shows up after cuda is initialized
t = torch.ones(1).cuda()
cls.no_magma = not torch.cuda.has_magma
# Determines if cuDNN is available and its version
cls.no_cudnn = not (TEST_WITH_ROCM or torch.backends.cudnn.is_acceptable(t))
cls.cudnn_version = None if cls.no_cudnn else torch.backends.cudnn.version()
# Acquires the current device as the primary (test) device
cls.primary_device = "cuda:{0}".format(torch.cuda.current_device())
# Adds available device-type-specific test base classes
device_type_test_bases.append(DPCPPTestBase)
# Adds 'instantiated' device-specific test cases to the given scope.
# The tests in these test cases are derived from the generic tests in
# generic_test_class.
# See note "Generic Device Type Testing."
def instantiate_device_type_tests(generic_test_class, scope, except_for=None):
# Removes the generic test class from its enclosing scope so its tests
# are not discoverable.
del scope[generic_test_class.__name__]
# Creates an 'empty' version of the generic_test_class
# Note: we don't inherit from the generic_test_class directly because
# that would add its tests to our test classes and they would be
# discovered (despite not being runnable). Inherited methods also
# can't be removed later, and we can't rely on load_tests because
# pytest doesn't support it (as of this writing).
empty_name = generic_test_class.__name__ + "_base"
empty_class = type(empty_name, generic_test_class.__bases__, {})
# Acquires members names
generic_members = set(dir(generic_test_class)) - set(dir(empty_class))
generic_tests = [x for x in generic_members if x.startswith("test")]
# Creates device-specific test cases
for base in device_type_test_bases:
# Skips bases listed in except_for
if except_for is not None and base.device_type in except_for:
continue
class_name = generic_test_class.__name__ + base.device_type.upper()
device_type_test_class = type(class_name, (base, empty_class), {})
for name in generic_members:
if name in generic_tests: # Instantiates test member
# Requires tests be a function for Python2 compat
# (In Python2 tests are type checked methods wrapping functions)
test = getattr(generic_test_class, name)
if hasattr(test, "__func__"):
test = test.__func__
assert inspect.isfunction(
test
), "Couldn't extract function from '{0}'".format(name)
# Instantiates the device-specific tests
device_type_test_class.instantiate_test(name, copy.deepcopy(test))
else: # Ports non-test member
assert not hasattr(
device_type_test_class, name
), "Redefinition of non-test member {0}".format(name)
# Unwraps to functions (when available) for Python2 compat
nontest = getattr(generic_test_class, name)
if hasattr(nontest, "__func__"):
nontest = nontest.__func__
setattr(device_type_test_class, name, nontest)
# Mimics defining the instantiated class in the caller's file
# by setting its module to the given class's and adding
# the module to the given scope.
# This lets the instantiated class be discovered by unittest.
device_type_test_class.__module__ = generic_test_class.__module__
scope[class_name] = device_type_test_class
# Decorator that skips a test if the given condition is true.
# Notes:
# (1) Skip conditions stack.
# (2) Skip conditions can be bools or strings. If a string the
# test base must have defined the corresponding attribute to be False
# for the test to run. If you want to use a string argument you should
# probably define a new decorator instead (see below).
# (3) Prefer the existing decorators to defining the 'device_type' kwarg.
class skipIf(object):
def __init__(self, dep, reason, device_type=None):
self.dep = dep
self.reason = reason
self.device_type = device_type
def __call__(self, fn):
@wraps(fn)
def dep_fn(slf, device, *args, **kwargs):
if self.device_type is None or self.device_type == slf.device_type:
if (isinstance(self.dep, str) and getattr(slf, self.dep, True)) or (
isinstance(self.dep, bool) and self.dep
):
raise unittest.SkipTest(self.reason)
return fn(slf, device, *args, **kwargs)
return dep_fn
# Skips a test on CPU if the condition is true.
class skipCPUIf(skipIf):
def __init__(self, dep, reason):
super(skipCPUIf, self).__init__(dep, reason, device_type="cpu")
# Skips a test on CUDA if the condition is true.
class skipCUDAIf(skipIf):
def __init__(self, dep, reason):
super(skipCUDAIf, self).__init__(dep, reason, device_type="cuda")
# Only runs on cuda, and only run when there is enough GPU RAM
def largeCUDATensorTest(size):
if isinstance(size, str):
assert size.endswith("GB") or size.endswith("gb"), "only bytes or GB supported"
size = 1024**3 * int(size[:-2])
valid = (
torch.cuda.is_available()
and torch.cuda.get_device_properties(0).total_memory >= size
)
return unittest.skipIf(
not valid, "No CUDA or Has CUDA but GPU RAM is not large enough"
)
class onlyOn(object):
def __init__(self, device_type):
self.device_type = device_type
def __call__(self, fn):
@wraps(fn)
def only_fn(slf, device, *args, **kwargs):
if self.device_type != slf.device_type:
reason = "Only runs on {0}".format(self.device_type)
raise unittest.SkipTest(reason)
return fn(slf, device, *args, **kwargs)
return only_fn
# Decorator that provides all available devices of the device type to the test
# as a list of strings instead of providing a single device string.
# Skips the test if the number of available devices of the variant's device
# type is less than the 'num_required_devices' arg.
class deviceCountAtLeast(object):
def __init__(self, num_required_devices):
self.num_required_devices = num_required_devices
def __call__(self, fn):
assert not hasattr(
fn, "num_required_devices"
), "deviceCountAtLeast redefinition for {0}".format(fn.__name__)
fn.num_required_devices = self.num_required_devices
@wraps(fn)
def multi_fn(slf, devices, *args, **kwargs):
if len(devices) < self.num_required_devices:
reason = "fewer than {0} devices detected".format(
self.num_required_devices
)
raise unittest.SkipTest(reason)
return fn(slf, devices, *args, **kwargs)
return multi_fn
# Specifies per-dtype precision overrides.
# Ex.
#
# @precisionOverride(torch.half : 1e-2, torch.float : 1e-4)
# @dtypes(torch.half, torch.float, torch.double)
# def test_X(self, device, dtype):
# ...
#
# When the test is instantiated its class's precision will be set to the
# corresponding override, if it exists.
# self.precision can be accessed directly, and it also controls the behavior of
# functions like self.assertEqual().
#
# Note that self.precision is a scalar value, so if you require multiple
# precisions (or are working with multiple dtypes) they should be specified
# explicitly and computed using self.precision (e.g.
# self.precision *2, max(1, self.precision)).
class precisionOverride(object):
def __init__(self, d):
assert isinstance(
d, dict
), "precisionOverride not given a dtype : precision dict!"
for dtype, prec in d.items():
assert isinstance(
dtype, torch.dtype
), "precisionOverride given unknown dtype {0}".format(dtype)
self.d = d
def __call__(self, fn):
fn.precision_overrides = self.d
return fn
# Decorator that instantiates a variant of the test for each given dtype.
# Notes:
# (1) Tests that accept the dtype argument MUST use this decorator.
# (2) Can be overriden for the CPU or CUDA, respectively, using dtypesIfCPU
# or dtypesIfCUDA.
# (3) Prefer the existing decorators to defining the 'device_type' kwarg.
class dtypes(object):
# Note: *args, **kwargs for Python2 compat.
# Python 3 allows (self, *args, device_type='all').
def __init__(self, *args, **kwargs):
assert args is not None and len(args) != 0, "No dtypes given"
assert all(
isinstance(arg, torch.dtype) for arg in args
), "Unknown dtype in {0}".format(str(args))
self.args = args
self.device_type = kwargs.get("device_type", "all")
def __call__(self, fn):
d = getattr(fn, "dtypes", {})
assert self.device_type not in d, "dtypes redefinition for {0}".format(
self.device_type
)
d[self.device_type] = self.args
fn.dtypes = d
return fn
# Overrides specified dtypes on the CPU.
class dtypesIfCPU(dtypes):
def __init__(self, *args):
super(dtypesIfCPU, self).__init__(*args, device_type="cpu")
# Overrides specified dtypes on CUDA.
class dtypesIfCUDA(dtypes):
def __init__(self, *args):
super(dtypesIfCUDA, self).__init__(*args, device_type="cuda")
def onlyCPU(fn):
return onlyOn("cpu")(fn)
def onlyCUDA(fn):
return onlyOn("cuda")(fn)
# Skips a test on CPU if LAPACK is not available.
def skipCPUIfNoLapack(fn):
return skipCPUIf(not torch._C.has_lapack, "PyTorch compiled without Lapack")(fn)
# Skips a test on CPU if MKL is not available.
def skipCPUIfNoMkl(fn):
return skipCPUIf(not TEST_MKL, "PyTorch is built without MKL support")(fn)
# Skips a test on CUDA if MAGMA is not available.
def skipCUDAIfNoMagma(fn):
return skipCUDAIf("no_magma", "no MAGMA library detected")(
skipCUDANonDefaultStreamIf(True)(fn)
)
# Skips a test on CUDA when using ROCm.
def skipCUDAIfRocm(fn):
return skipCUDAIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")(
fn
)
# Skips a test on CUDA when not using ROCm.
def skipCUDAIfNotRocm(fn):
return skipCUDAIf(
not TEST_WITH_ROCM, "test doesn't currently work on the CUDA stack"
)(fn)
# Skips a test on CUDA if cuDNN is unavailable or its version is lower than requested.
def skipCUDAIfCudnnVersionLessThan(version=0):
def dec_fn(fn):
@wraps(fn)
def wrap_fn(self, device, *args, **kwargs):
if self.device_type == "cuda":
if self.no_cudnn:
reason = "cuDNN not available"
raise unittest.SkipTest(reason)
if self.cudnn_version is None or self.cudnn_version < version:
reason = "cuDNN version {0} is available but {1} required".format(
self.cudnn_version, version
)
raise unittest.SkipTest(reason)
return fn(self, device, *args, **kwargs)
return wrap_fn
return dec_fn
def skipCUDAIfNoCudnn(fn):
return skipCUDAIfCudnnVersionLessThan(0)(fn)
| 23,264 | 35.238318 | 106 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/tests/cpu/test_rmsnorm.py | import unittest
import torch
from torch import nn
from common_utils import TestCase
class RMSNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-6):
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
variance = hidden_states.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
return self.weight * hidden_states
class RMSNormTester(TestCase):
def test_RMSNorm(self):
for dim in [2, 3, 4, 5]:
with torch.cpu.amp.autocast(), torch.no_grad():
input_size = [
3,
]
for _ in range(dim - 1):
input_size.append(10)
x = torch.randn(input_size)
# RMSNorm input is fp32
model = RMSNorm(input_size).eval()
trace_model = torch.jit.trace(model, x)
y1_fp32 = model(x)
y2_fp32 = trace_model(x)
rmsnorm_graph = trace_model.graph_for(x)
self.assertEqual(y1_fp32.dtype, torch.float32)
self.assertEqual(y2_fp32.dtype, torch.float32)
self.assertEqual(y1_fp32, y2_fp32)
self.assertTrue(
any(n.kind() == "ipex::RMSNorm" for n in rmsnorm_graph.nodes())
)
if __name__ == "__main__":
test = unittest.main()
| 1,514 | 32.666667 | 85 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/tests/cpu/test_cumsum.py | import torch
import unittest
from common_utils import TestCase
class TestCumSum(TestCase):
# Port from test_torch
def test_cumsum(self):
for dtype in [torch.float, torch.double, torch.long]:
x = torch.randn(17, 4097).to(dtype)
res1 = torch.ops.torch_ipex.cumsum(x, 1)
res2 = torch.tensor([]).to(dtype)
torch.ops.torch_ipex.cumsum(x, 1, out=res2)
self.assertEqual(res1, res2)
torch.ops.torch_ipex.cumsum_(x, 1)
self.assertEqual(res1, x)
a = torch.tensor(
[[True, False, True], [False, False, False], [True, True, True]]
)
b = a.byte()
aRes = torch.ops.torch_ipex.cumsum(a, 0)
bRes = torch.ops.torch_ipex.cumsum(b, 0)
self.assertEqual(aRes, bRes)
self.assertEqual(aRes, torch.tensor([[1, 0, 1], [1, 0, 1], [2, 1, 2]]))
aRes = torch.ops.torch_ipex.cumsum(a, 1)
bRes = torch.ops.torch_ipex.cumsum(b, 1)
self.assertEqual(aRes, bRes)
self.assertEqual(aRes, torch.tensor([[1, 1, 2], [0, 0, 0], [1, 2, 3]]))
# Check that cummulative sum over a zero length dimension doesn't crash on backprop.
# Also check that cumsum over other dimensions in a tensor with a zero-length
# dimensiuon also works
# Also include a basic suite of similar tests for other bases cases.
shapes = [[2, 0], [2, 1, 4], [0, 2, 3], [1], [5]]
for shape in shapes:
for dim in range(len(shape)):
raw_tensor = torch.zeros(*shape, requires_grad=True)
integrated = torch.ops.torch_ipex.cumsum(raw_tensor, dim=dim)
# Check that backward does not crash
integrated.sum().backward()
# Check that output maintained correct shape
self.assertEqual(raw_tensor.shape, raw_tensor.grad.shape)
# Check a scalar example
raw_tensor = torch.tensor(3.0, requires_grad=True)
integrated = raw_tensor.cumsum(dim=-1)
self.assertEqual(raw_tensor, integrated)
# Check that backward does not crash
integrated.sum().backward()
# Check that output maintained correct shape
self.assertEqual(raw_tensor.shape, raw_tensor.grad.shape)
if __name__ == "__main__":
test = unittest.main()
| 2,346 | 39.465517 | 92 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/tests/cpu/test_autocast.py | import unittest
import copy
import torch
import torch.nn as nn
import intel_extension_for_pytorch as ipex
import intel_extension_for_pytorch._C as core
from common_utils import TestCase
from torch.testing._internal.common_utils import TestCase as TorchTestCase
import time
import sys
import itertools
import collections
from autocast_test_lists import AutocastCPUTestLists
from typing import Tuple
from test_jit import (
Conv_Bn_Relu,
BatchNorm_Conv_BatchNorm,
ConvBatchNorm_Fixed,
ConvReshapeBatchNorm,
CascadedConvBnSumRelu,
LinearBn,
Linear_Reshape_Bn,
)
_default_tolerances = {
"float64": (1e-5, 1e-8), # NumPy default
"float32": (1e-4, 1e-5), # This may need to be changed
"float16": (1e-3, 1e-3), # This may need to be changed
}
bn_m = {1: nn.BatchNorm1d, 2: nn.BatchNorm2d, 3: nn.BatchNorm3d}
def _get_default_tolerance(a, b=None) -> Tuple[float, float]:
if b is None:
dtype = str(a.dtype).split(".")[-1] # e.g. "float32"
return _default_tolerances.get(dtype, (0, 0))
a_tol = _get_default_tolerance(a)
b_tol = _get_default_tolerance(b)
return (max(a_tol[0], b_tol[0]), max(a_tol[1], b_tol[1]))
def get_rand_seed():
return int(time.time() * 1000000000)
class TestFunction(TestCase):
def setUp(self):
super(TestFunction, self).setUp()
self.models = [
Conv_Bn_Relu(2, 3, 32, kernel_size=3, stride=1),
LinearBn(2, 32, 32, bias=True),
Linear_Reshape_Bn(2, 32, 32, (1, 1, 64, 16), bias=True),
]
self.inputs = [
torch.randn(32, 3, 64, 64),
torch.rand(1, 1, 32, 32),
torch.rand(1, 1, 32, 32),
]
def test_set_autocast_dtype(self):
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloat16):
self.assertEqual(core.get_autocast_dtype(), torch.bfloat16)
with torch.cpu.amp.autocast(enabled=True, dtype=torch.float16):
self.assertEqual(core.get_autocast_dtype(), torch.float16)
def test_forward_dtype(self):
rand_seed = int(get_rand_seed())
print("{} rand sed: {}".format(sys._getframe().f_code.co_name, rand_seed))
torch.manual_seed(rand_seed)
_in_cpu = torch.rand((1, 1, 7, 7))
_conv = torch.nn.Conv2d(1, 1, (3, 3))
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloat16):
out_autocast = _conv(_in_cpu)
self.assertEqual(out_autocast.dtype, torch.bfloat16)
@unittest.skipIf(
not core.onednn_has_fp16_support(),
"ipex fp16 is not supported on this CPU device",
)
def test_gradscaler(self):
scaler = torch.cpu.amp.GradScaler()
niters = 100
criterion = torch.nn.L1Loss()
for i in range(self.models.__len__()):
model = self.models[i]
out = model(self.inputs[i])
target = torch.rand_like(out)
optimizer = torch.optim.SGD(model.parameters(), lr=0.05, momentum=0.95)
model, optimizer = ipex.optimize(
model,
optimizer=optimizer,
dtype=torch.half,
auto_kernel_selection=True,
weights_prepack=True,
)
optimizer.zero_grad()
for _ in range(niters):
optimizer.zero_grad()
with torch.cpu.amp.autocast(enabled=True, dtype=torch.half):
output = model(self.inputs[i])
loss = criterion(output, target)
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
def test_nested_useage(self):
rand_seed = int(get_rand_seed())
print("{} rand sed: {}".format(sys._getframe().f_code.co_name, rand_seed))
torch.manual_seed(rand_seed)
_in_cpu = torch.rand((1, 1, 7, 7))
_conv = torch.nn.Conv2d(1, 1, (3, 3))
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloat16):
with torch.cpu.amp.autocast(enabled=False):
out_autocast = _conv(_in_cpu)
self.assertEqual(out_autocast.dtype, torch.float)
with torch.cpu.amp.autocast(enabled=True, dtype=torch.float):
out_autocast = _conv(_in_cpu)
self.assertEqual(out_autocast.dtype, torch.float)
class TestAutocastWithJit(TestCase):
def setUp(self):
super(TestAutocastWithJit, self).setUp()
self.models = [
Conv_Bn_Relu(2, 3, 32, kernel_size=3, stride=1),
BatchNorm_Conv_BatchNorm(2, 3, 32, kernel_size=3, stride=1),
ConvBatchNorm_Fixed(2, 3, 32, kernel_size=3, stride=1),
ConvBatchNorm_Fixed(3, 3, 32, kernel_size=3, stride=1),
ConvReshapeBatchNorm(2, 3, 32, (64, 16, 62, 62), kernel_size=3, stride=1),
CascadedConvBnSumRelu(2, 3, 64, 32, kernel_size=3, stride=1),
LinearBn(2, 32, 32, bias=True),
Linear_Reshape_Bn(2, 32, 32, (1, 1, 64, 16), bias=True),
]
self.inputs = [
torch.randn(32, 3, 64, 64),
torch.randn(32, 3, 64, 64),
torch.randn(32, 3, 64, 64),
torch.randn(32, 3, 32, 32, 32),
torch.randn(32, 3, 64, 64),
torch.rand(32, 3, 64, 64),
torch.rand(1, 1, 32, 32),
torch.rand(1, 1, 32, 32),
]
def test_autocast_jit_cache_enable(self):
def test_generate_autocast_jit_cache_enable(model, x):
model.eval()
ipex.enable_onednn_fusion(False)
pre_te_enable_status = torch._C._jit_texpr_fuser_enabled()
torch._C._jit_set_texpr_fuser_enabled(False)
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloat16):
y0 = model(x)
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloat16):
traced_model = torch.jit.trace(model, x)
y1 = traced_model(x)
with torch.cpu.amp.autocast(
enabled=True, dtype=torch.bfloat16, cache_enabled=False
):
traced_model = torch.jit.trace(model, x)
y2 = traced_model(x)
self.assertEqual(y0, y1)
self.assertEqual(y1, y2)
ipex.enable_onednn_fusion(True)
torch._C._jit_set_texpr_fuser_enabled(pre_te_enable_status)
for i in range(self.models.__len__()):
test_generate_autocast_jit_cache_enable(self.models[i], self.inputs[i])
def test_generate_autocast_jit_trace_model(self):
def test_generate_autocast_jit_trace_model(model, x):
model.eval()
ipex.enable_onednn_fusion(False)
pre_te_enable_status = torch._C._jit_texpr_fuser_enabled()
torch._C._jit_set_texpr_fuser_enabled(False)
with torch.cpu.amp.autocast(
enabled=True, dtype=torch.bfloat16
), torch.no_grad():
traced_model = torch.jit.trace(model, x)
ipex.enable_onednn_fusion(True)
torch._C._jit_set_texpr_fuser_enabled(pre_te_enable_status)
with torch.cpu.amp.autocast(
enabled=True, dtype=torch.bfloat16
), torch.no_grad():
traced_model2 = torch.jit.trace(model, x.clone())
for i in range(self.models.__len__()):
test_generate_autocast_jit_trace_model(self.models[i], self.inputs[i])
def test_nchw_autocast_jit_trace_model(self):
def test_nchw_autocast_jit_trace_model(model, x):
model.eval()
ipex.enable_onednn_fusion(False)
pre_te_enable_status = torch._C._jit_texpr_fuser_enabled()
torch._C._jit_set_texpr_fuser_enabled(False)
with torch.cpu.amp.autocast(
enabled=True, dtype=torch.bfloat16
), torch.no_grad():
traced_model = torch.jit.trace(model, x)
with torch.cpu.amp.autocast(
enabled=True, dtype=torch.bfloat16
), torch.no_grad():
y = traced_model(x.clone())
y2 = model(x.clone())
ipex.enable_onednn_fusion(True)
torch._C._jit_set_texpr_fuser_enabled(pre_te_enable_status)
torch.testing.assert_allclose(
y.double(),
y2.double(),
rtol=1e-05,
atol=_get_default_tolerance(y, y2)[1],
)
for i in range(self.models.__len__()):
test_nchw_autocast_jit_trace_model(self.models[i], self.inputs[i])
def test_nhwc_autocast_jit_trace_model(self):
def test_nhwc_autocast_jit_trace_model(model, x):
model.eval()
ipex.enable_onednn_fusion(False)
pre_te_enable_status = torch._C._jit_texpr_fuser_enabled()
torch._C._jit_set_texpr_fuser_enabled(False)
with torch.cpu.amp.autocast(
enabled=True, dtype=torch.bfloat16
), torch.no_grad():
traced_model = torch.jit.trace(
model, x.to(memory_format=torch.channels_last)
)
with torch.cpu.amp.autocast(
enabled=True, dtype=torch.bfloat16
), torch.no_grad():
y = traced_model(x.clone().to(memory_format=torch.channels_last))
y2 = model(x.clone().to(memory_format=torch.channels_last))
ipex.enable_onednn_fusion(True)
torch._C._jit_set_texpr_fuser_enabled(pre_te_enable_status)
torch.testing.assert_allclose(
y.double(),
y2.double(),
rtol=1e-05,
atol=_get_default_tolerance(y, y2)[1],
)
for i in range(self.models.__len__()):
if self.inputs[i].size().__len__() == 5:
# NHWC 3D case not support yet
continue
test_nhwc_autocast_jit_trace_model(self.models[i], self.inputs[i])
# Check whether cat has done the promotion in AMP with mixed dtype inputs
# since input type of cat is changed to ITensorListRef
def test_cat_promote(self):
class TestModel(torch.nn.Module):
def __init__(self):
super(TestModel, self).__init__()
def forward(self, a, b):
return torch.cat([a, b], 0)
with torch.jit.fuser("none"):
# In this testcase, we will check whether cat has done the promotion in AMP with mixed dtype inputs.
# To avoid the fusion group from TE, we will disable the fuser here.
for jit_freeze_or_not in [False, True]:
test_model = TestModel().eval()
with torch.cpu.amp.autocast(
cache_enabled=False, dtype=torch.bfloat16
), torch.no_grad():
a = torch.rand(24, 128, 128)
b = torch.rand(24, 128, 128, dtype=torch.bfloat16)
c = test_model(a, b)
traced = torch.jit.trace(test_model, (a, b))
if jit_freeze_or_not:
traced = torch.jit.freeze(traced)
for _ in range(3):
c2 = traced(a, b)
self.assertTrue(c.dtype, torch.float32)
self.assertTrue(c2.dtype, torch.float32)
traced_graph = traced.graph_for(a, b)
self.assertTrue(
any(n.kind() == "aten::to" for n in traced_graph.nodes())
)
class TestPyTorchOps(TestCase):
def test_bernoulli(self):
input = torch.rand(8, 8)
input_bf16 = input.to(dtype=torch.bfloat16)
input_fp16 = input.to(dtype=torch.float16)
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloat16):
out1 = torch.bernoulli(input_bf16)
out2 = torch.bernoulli(input_bf16, 0.5)
self.assertEqual(out1.dtype, torch.bfloat16)
self.assertEqual(out2.dtype, torch.bfloat16)
# Does not support fp16 yet
with torch.cpu.amp.autocast(enabled=True, dtype=torch.float16):
out1 = torch.bernoulli(input_fp16)
out2 = torch.bernoulli(input_fp16, 0.5)
self.assertEqual(out1.dtype, torch.float32)
self.assertEqual(out2.dtype, torch.float32)
class TestCustomerOps(TestCase):
def test_interaction_op(self):
def interact_fusion(x, ly):
A = [x] + ly
R = ipex.nn.functional.interaction(*A)
return R
def interact_fusion_autocast(x, ly):
A = [x] + ly
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloat16):
R = ipex.nn.functional.interaction(*A)
return R
# test unexpected data types with autocast
try:
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloat16):
ipex.nn.functional.interaction(
*[
torch.randn([128, 128], dtype=torch.float),
torch.randn([128, 128], dtype=torch.double),
]
)
except BaseException:
# expected type error
pass
try:
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloat16):
ipex.nn.functional.interaction(
*[
torch.randn([128, 128]).to(torch.half),
torch.randn([128, 128]).to(torch.half),
]
)
except BaseException:
# expected type error
pass
dtypes = [torch.float32]
for dtype in dtypes:
x1 = torch.randn([2048, 128]).to(dtype).clone().detach().requires_grad_()
x1_bf16 = x1.clone().bfloat16().detach().requires_grad_()
x2 = x1.clone().detach().requires_grad_()
x2_bf16 = x1.clone().bfloat16().detach().requires_grad_()
ly1 = []
ly1_bf16 = []
ly2 = []
ly2_bf16 = []
for i in range(0, 26):
V = torch.randn([2048, 128]).to(dtype).clone().detach().requires_grad_()
ly1.append(V)
ly1_bf16.append(V.clone().bfloat16().detach().requires_grad_())
ly2.append(V.clone().detach().requires_grad_())
ly2_bf16.append(V.clone().bfloat16().detach().requires_grad_())
A = interact_fusion(x1, ly1) # all fp32
B = interact_fusion_autocast(x1_bf16, ly1_bf16) # all bf16
C = interact_fusion_autocast(x2, ly2_bf16) # fp32 dense bf16 emb
D = interact_fusion_autocast(x2_bf16, ly2) # bf16 dense fp32 emb
self.assertEqual(A.dtype, torch.float)
self.assertEqual(B.dtype, torch.bfloat16)
# promote to fp32
self.assertEqual(C.dtype, torch.float)
self.assertEqual(D.dtype, torch.float)
self.assertTrue(torch.allclose(A, B.float(), rtol=0.05, atol=0.1))
self.assertTrue(torch.allclose(A, C.float(), rtol=0.05, atol=0.1))
self.assertTrue(torch.allclose(A, D.float(), rtol=0.05, atol=0.1))
A.mean().backward()
B.mean().backward()
C.mean().backward()
D.mean().backward()
self.assertEqual(x1.grad.dtype, torch.float)
self.assertEqual(x1_bf16.grad.dtype, torch.bfloat16)
self.assertEqual(x2.grad.dtype, torch.float)
self.assertEqual(x2_bf16.grad.dtype, torch.bfloat16)
self.assertEqual(x1.grad, x1_bf16.grad.float(), 1e-03)
self.assertEqual(x1.grad, x2.grad)
self.assertEqual(x1.grad, x2_bf16.grad.float(), 1e-03)
for i in range(0, 26):
self.assertEqual(ly1[i].grad.dtype, torch.float)
self.assertEqual(ly1_bf16[i].grad.dtype, torch.bfloat16)
self.assertEqual(ly2[i].grad.dtype, torch.float)
self.assertEqual(ly2_bf16[i].grad.dtype, torch.bfloat16)
torch.testing.assert_allclose(
ly1[i].grad, ly1_bf16[i].grad.float(), rtol=1e-02, atol=1e-04
)
torch.testing.assert_allclose(ly1[i].grad, ly2[i].grad)
torch.testing.assert_allclose(
ly1[i].grad, ly2_bf16[i].grad.float(), rtol=1e-02, atol=1e-04
)
def test_embeddingbag_op(self):
cpu_emb = nn.EmbeddingBag(10, 3, mode="sum", sparse=True)
autocast_emb = copy.deepcopy(cpu_emb)
input = torch.LongTensor([1, 2, 4, 5, 4, 3, 2, 9])
# bf16_input = input.clone().detach()
offsets = torch.LongTensor([0, 1, 2, 3, 4, 5, 6, 7])
# bf16_offsets = offsets.clone().detach()
cpu_out = cpu_emb(input, offsets)
with torch.cpu.amp.autocast(
enabled=True, dtype=torch.bfloat16
), torch.no_grad():
inference_out = autocast_emb(input, offsets)
self.assertEqual(cpu_out.dtype, torch.float)
self.assertEqual(inference_out.dtype, torch.bfloat16)
torch.testing.assert_allclose(
cpu_out, inference_out.float(), rtol=1e-02, atol=1e-4
)
# re-init autocast_emb
autocast_emb = copy.deepcopy(cpu_emb)
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloat16):
traininig_out = autocast_emb(input, offsets)
# do not cast weight to bf16 while not inference only
self.assertEqual(traininig_out.dtype, torch.float)
self.assertEqual(cpu_out, traininig_out)
# it will be removed after pytorch bacth_norm optimized well.
class TestBatchNorm(TestCase):
def test_batch_norm(self):
class M(nn.Module):
def __init__(self, conv, bn):
super(M, self).__init__()
self.conv = conv(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn = bn(
64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True
)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x.relu_()
return x
conv_m = {1: nn.Conv1d, 2: nn.Conv2d, 3: nn.Conv3d}
input_size = {1: [50], 2: [50, 50], 3: [50, 50, 50]}
for dim in [1, 2, 3]:
# make fall through for batch_norm for autocast case.
x = torch.randn([1, 3] + input_size[dim])
model = M(conv_m[dim], bn_m[dim])
# test training case.
model.train()
with torch.cpu.amp.autocast():
y = model(x)
self.assertEqual(y.dtype, torch.bfloat16)
# test inference case.
model.eval()
with torch.cpu.amp.autocast():
y = model(x)
self.assertEqual(y.dtype, torch.bfloat16)
def _test_batch_norm(self, bn, dim=2):
m = copy.deepcopy(bn)
m_autocast = copy.deepcopy(bn)
m_bf16 = copy.deepcopy(bn)
input_size = [20, 100, 35]
if dim == 2:
input_size += [45]
if dim == 3:
input_size += [45, 10]
input = torch.randn(input_size)
x = input.clone().detach().requires_grad_()
x_autocast = input.clone().detach().requires_grad_()
x_bf16 = input.clone().detach().bfloat16().requires_grad_()
y = m(x)
y.mean().backward()
with torch.cpu.amp.autocast():
y_autocast = m_autocast(x_autocast)
y_autocast.mean().backward()
self.assertEqual(y, y_autocast)
self.assertEqual(x.grad, x_autocast.grad)
self.assertEqual(m.weight.grad, m_autocast.weight.grad)
self.assertEqual(m.bias.grad, m_autocast.bias.grad)
# bfloat16
with torch.cpu.amp.autocast():
y_bf16 = m_bf16(x_bf16)
y_bf16.mean().backward()
self.assertEqual(y_bf16.dtype, torch.bfloat16)
self.assertEqual(x_bf16.grad.dtype, torch.bfloat16)
# channels last
m1_autocast = copy.deepcopy(bn)
if dim == 2:
x1_autocast = (
input.clone()
.detach()
.to(memory_format=torch.channels_last)
.requires_grad_()
)
else:
x1_autocast = input.clone().detach().requires_grad_()
with torch.cpu.amp.autocast():
y1_autocast = m1_autocast(x1_autocast)
y1_autocast.mean().backward()
if dim == 2:
self.assertTrue(
y1_autocast.is_contiguous(memory_format=torch.channels_last)
)
self.assertTrue(
x1_autocast.grad.is_contiguous(memory_format=torch.channels_last)
)
self.assertEqual(y, y1_autocast)
self.assertEqual(x.grad, x1_autocast.grad)
self.assertEqual(m.weight.grad, m1_autocast.weight.grad)
self.assertEqual(m.bias.grad, m1_autocast.bias.grad)
def test_batch_norm_train(self):
for dim in [1, 2, 3]:
bn = bn_m[dim](100).train()
bn.weight.data = torch.randn(100)
bn.bias.data = torch.randn(100)
self._test_batch_norm(bn, dim=dim)
def test_batch_norm_eval(self):
for dim in [1, 2, 3]:
bn = bn_m[dim](100).eval()
bn.weight.data = torch.randn(100)
bn.bias.data = torch.randn(100)
self._test_batch_norm(bn, dim=dim)
def test_batch_norm_untrack_running_stats(self):
for dim in [1, 2, 3]:
bn = bn_m[dim](100, track_running_stats=False)
bn.weight.data = torch.randn(100)
bn.bias.data = torch.randn(100)
self._test_batch_norm(bn, dim=dim)
class M(nn.Module):
def __init__(
self,
input_size,
hidden_size,
num_layers,
bidirectional,
bias,
dropout,
batch_first,
):
super(M, self).__init__()
self.lstm = nn.LSTM(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
bidirectional=bidirectional,
bias=bias,
dropout=dropout,
batch_first=batch_first,
)
def forward(self, x, h=None):
x, h = self.lstm(x, h)
return x, h
class TestLSTM(TorchTestCase):
def _lstm_params_list(self):
params_dict = {
"input_size": [1, 2],
"hidden_size": [5, 32],
"num_layers": [1, 3],
"bidirectional": [False, True],
"bias": [False, True],
"empty_state": [False, True],
"batch_first": [False, True],
"dropout": [0, 0.4, 0.7, 1],
"batch_size": [1, 2],
"seq_len": [1, 3],
}
params_list = []
for key, value in params_dict.items():
params_list.append(value)
return params_list
def _cast_dtype(self, input, bf16):
if bf16:
input = input.to(torch.bfloat16)
return input
def _test_lstm(self, training, bf16, rtol=1.3e-6, atol=1e-5):
rand_seed = int(get_rand_seed())
print("{} rand sed: {}".format(sys._getframe().f_code.co_name, rand_seed))
torch.manual_seed(rand_seed)
params_list = self._lstm_params_list()
for (
input_size,
hidden_size,
num_layers,
bidirectional,
bias,
empty_state,
batch_first,
dropout,
batch_size,
seq_len,
) in itertools.product(*params_list):
# dropout option adds dropout after all but last recurrent layer, so non-zero dropout expects num_layers greater than 1
if dropout > 0 and num_layers == 1:
continue
num_directions = 2 if bidirectional else 1
if batch_first:
input = torch.randn(batch_size, seq_len, input_size)
else:
input = torch.randn(seq_len, batch_size, input_size)
h = torch.randn(num_layers * num_directions, batch_size, hidden_size)
c = torch.randn(num_layers * num_directions, batch_size, hidden_size)
input_cpu = input.clone().requires_grad_(training)
h_cpu = h.clone().requires_grad_(training)
c_cpu = c.clone().requires_grad_(training)
model_cpu = M(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
bidirectional=bidirectional,
bias=bias,
dropout=dropout,
batch_first=batch_first,
)
model_cpu.train() if training else model_cpu.eval()
input_ipex = input.clone().requires_grad_(training)
h_ipex = h.clone().requires_grad_(training)
c_ipex = c.clone().requires_grad_(training)
model_ipex = copy.deepcopy(model_cpu)
model_ipex.train() if training else model_ipex.eval()
ipex.nn.utils._lstm_convert.replace_lstm_with_ipex_lstm(model_ipex, None)
with torch.cpu.amp.autocast(enabled=bf16, dtype=torch.bfloat16):
if empty_state:
torch.manual_seed(rand_seed)
y_cpu, hy_cpu = self._cast_dtype(model_cpu, bf16)(
self._cast_dtype(input_cpu, bf16)
)
torch.manual_seed(rand_seed)
y_ipex, hy_ipex = model_ipex(input_ipex)
else:
torch.manual_seed(rand_seed)
y_cpu, hy_cpu = self._cast_dtype(model_cpu, bf16)(
self._cast_dtype(input_cpu, bf16),
(self._cast_dtype(h_cpu, bf16), self._cast_dtype(c_cpu, bf16)),
)
torch.manual_seed(rand_seed)
y_ipex, hy_ipex = model_ipex(input_ipex, (h_ipex, c_ipex))
self.assertEqual(y_cpu, y_ipex, rtol=rtol, atol=atol)
self.assertEqual(hy_cpu[0], hy_ipex[0], rtol=rtol, atol=atol)
self.assertEqual(hy_cpu[1], hy_ipex[1], rtol=rtol, atol=atol)
if training:
y_cpu.sum().backward(retain_graph=True)
y_ipex.sum().backward(retain_graph=True)
self.assertEqual(
input_ipex.grad, input_cpu.grad, rtol=rtol, atol=atol
)
self.assertEqual(
self._cast_dtype(model_ipex.lstm.weight_ih_l0.grad, bf16),
model_cpu.lstm.weight_ih_l0.grad,
rtol=rtol,
atol=atol,
)
self.assertEqual(
self._cast_dtype(model_ipex.lstm.weight_hh_l0.grad, bf16),
model_cpu.lstm.weight_hh_l0.grad,
rtol=rtol,
atol=atol,
)
if bias:
self.assertEqual(
self._cast_dtype(model_ipex.lstm.bias_ih_l0.grad, bf16),
model_cpu.lstm.bias_ih_l0.grad,
rtol=rtol,
atol=atol,
)
self.assertEqual(
self._cast_dtype(model_ipex.lstm.bias_hh_l0.grad, bf16),
model_cpu.lstm.bias_hh_l0.grad,
rtol=rtol,
atol=atol,
)
if not empty_state:
hy_cpu[0].sum().backward(retain_graph=True)
hy_ipex[0].sum().backward(retain_graph=True)
self.assertEqual(h_ipex.grad, h_cpu.grad, rtol=rtol, atol=atol)
hy_cpu[1].sum().backward(retain_graph=True)
hy_ipex[1].sum().backward(retain_graph=True)
self.assertEqual(c_ipex.grad, c_cpu.grad, rtol=rtol, atol=atol)
def _test_lstm_pack_padded_sequence(self):
embedding_dim = 1024
hidden_dim = 10
batch_size = 24
num_layers = 1
bidirectional = True
num_direc = 2
max_lens = 96
sent = torch.randn(batch_size, max_lens, embedding_dim)
hid_0 = torch.rand(num_layers * num_direc, batch_size, hidden_dim)
hid_1 = torch.randn(num_layers * num_direc, batch_size, hidden_dim)
sentences = sent.clone().requires_grad_(False)
sent_lens = torch.Tensor(
[1, 2, 3, 4, 5, 1, 3, 2, 96, 5, 3, 1, 1, 2, 1, 2, 3, 6, 1, 2, 4, 6, 2, 1]
)
assert sent_lens.shape[0] == batch_size
assert sent_lens.max().item() == max_lens
hidden_0 = hid_0.clone().requires_grad_(False)
hidden_1 = hid_1.clone().requires_grad_(False)
embeds = torch.nn.utils.rnn.pack_padded_sequence(
sentences, sent_lens, batch_first=True, enforce_sorted=False
)
model = M(
embedding_dim,
hidden_dim,
num_layers=num_layers,
bidirectional=bidirectional,
batch_first=True,
bias=True,
dropout=0.2,
)
model_ipex = copy.deepcopy(model)
ipex.nn.utils._lstm_convert.replace_lstm_with_ipex_lstm(model_ipex, None)
lstm_out, hidden_out = model(embeds, (hidden_0, hidden_1))
lstm_out, _ = torch.nn.utils.rnn.pad_packed_sequence(lstm_out, batch_first=True)
lstm_out_ipex, hidden_out_ipex = model_ipex(embeds, (hidden_0, hidden_1))
lstm_out_ipex, _ = torch.nn.utils.rnn.pad_packed_sequence(
lstm_out_ipex, batch_first=True
)
self.assertEqual(lstm_out, lstm_out_ipex)
self.assertEqual(hidden_out[0], hidden_out_ipex[0])
self.assertEqual(hidden_out[1], hidden_out_ipex[1])
def test_lstm_op(self):
self._test_lstm(training=False, bf16=False)
self._test_lstm(training=False, bf16=True, rtol=0.02, atol=0.02)
self._test_lstm(training=True, bf16=False)
self._test_lstm(training=True, bf16=True, rtol=0.02, atol=0.03)
def test_lstm_pack_padded_sequence(self):
self._test_lstm_pack_padded_sequence()
class TestAutocastOperations(TestCase):
def setUp(self):
super(TestAutocastOperations, self).setUp()
self.autocast_lists = AutocastCPUTestLists(torch.device("cpu"))
def tearDown(self):
del self.autocast_lists
super(TestAutocastOperations, self).tearDown()
def _run_autocast_outofplace(
self,
op,
args,
run_as_type,
out_type=None,
autocast_type=torch.bfloat16,
module=torch,
add_kwargs=None,
):
# helper to cast args
def cast(val, to_type):
if isinstance(val, torch.Tensor):
return val.to(to_type) if val.is_floating_point() else val
elif isinstance(val, collections.abc.Iterable):
return type(val)(cast(v, to_type) for v in val)
else:
return val
if add_kwargs is None:
add_kwargs = {}
self.assertFalse(torch.is_autocast_cpu_enabled())
with torch.cpu.amp.autocast(enabled=True, dtype=autocast_type):
self.assertTrue(torch.is_autocast_cpu_enabled())
out_type = out_type if out_type is not None else run_as_type
output = output_method = None
# Try module.* variant, if requested:
if module is not None and hasattr(module, op):
output = getattr(module, op)(*args, **add_kwargs)
if isinstance(output, torch.Tensor):
self.assertTrue(
out_type == output.dtype,
"autocast for torch.{} produced {}, should produce {}".format(
op, output.dtype, out_type
),
)
# Try Tensor.* variant:
if hasattr(torch.Tensor, op):
output_method = getattr(args[0], op)(*args[1:], **add_kwargs)
if isinstance(output_method, torch.Tensor):
self.assertTrue(
out_type == output_method.dtype,
"autocast for torch.{} produced {}, should produce torch.{}".format(
op, output_method.dtype, out_type
),
)
self.assertTrue(
(output is not None) or (output_method is not None),
"{} not found as an attribute on either Tensor or the requested module {}".format(
op, module
),
)
# Accounts for ops that return Tensors, iterables, and other non-Tensors.
# For example, lstm_cell returns a tuple and equal returns bool.
def compare(first, second):
if isinstance(first, torch.Tensor):
return torch.equal(first, second)
elif isinstance(first, collections.abc.Iterable):
return all(compare(f, s) for f, s in zip(first, second))
else:
return first == second
# If both torch.* and Tensor.* variants were found, check outputs are identical
if (output is not None) and (output_method is not None):
self.assertTrue(type(output) == type(output_method))
comparison = compare(output, output_method)
self.assertTrue(
comparison,
"torch.{0} result did not match Tensor.{0} result".format(op),
)
# Compare numerics to Python-side "autocasting" that (we expect) does the same thing
# as the C++-side autocasting, and should be bitwise accurate.
output_to_compare = output if output is not None else output_method
with torch.cpu.amp.autocast(enabled=False, dtype=autocast_type):
self.assertFalse(torch.is_autocast_cpu_enabled())
if module is not None and hasattr(module, op):
control = getattr(module, op)(
*cast(args, run_as_type), **add_kwargs
)
else:
control = getattr(args[0].to(run_as_type), op)(
*cast(args[1:], run_as_type), **add_kwargs
)
self.assertTrue(type(output_to_compare) == type(control))
comparison = compare(output_to_compare, control)
self.assertTrue(
comparison, "torch.{} result did not match control".format(op)
)
self.assertTrue(torch.is_autocast_cpu_enabled())
self.assertFalse(torch.is_autocast_cpu_enabled())
def _run_autocast_pass_test(
self,
op,
args,
run_as_type,
out_type=None,
autocast_type=torch.bfloat16,
module=torch,
add_kwargs=None,
):
# helper to cast args
def cast(val, to_type):
if isinstance(val, torch.Tensor):
return val.to(to_type) if val.is_floating_point() else val
elif isinstance(val, collections.abc.Iterable):
return type(val)(cast(v, to_type) for v in val)
else:
return val
if add_kwargs is None:
add_kwargs = {}
self.assertFalse(torch.is_autocast_cpu_enabled())
with torch.cpu.amp.autocast(enabled=True, dtype=autocast_type):
self.assertTrue(torch.is_autocast_cpu_enabled())
out_type = out_type if out_type is not None else run_as_type
# Try module.* variant, if requested:
if module is not None and hasattr(module, op):
getattr(module, op)(*args, **add_kwargs)
# Try Tensor.* variant:
if hasattr(torch.Tensor, op):
getattr(args[0], op)(*args[1:], **add_kwargs)
def args_maybe_kwargs(self, op_with_args):
if len(op_with_args) == 2:
return op_with_args[0], op_with_args[1], {}
else:
return op_with_args[0], op_with_args[1], op_with_args[2]
def test_autocast_torch_expect_builtin_promote(self):
for op, args, out_type in self.autocast_lists.torch_expect_builtin_promote_bf16:
self._run_autocast_outofplace(op, args, torch.float32, out_type=out_type)
for op, args, out_type in self.autocast_lists.torch_expect_builtin_promote_fp16:
self._run_autocast_outofplace(
op, args, torch.float32, out_type=out_type, autocast_type=torch.float16
)
def test_autocast_methods_expect_builtin_promote(self):
for (
op,
args,
out_type,
) in self.autocast_lists.methods_expect_builtin_promote_bf16:
self._run_autocast_outofplace(
op, args, torch.float32, module=None, out_type=out_type
)
for (
op,
args,
out_type,
) in self.autocast_lists.methods_expect_builtin_promote_bf16:
self._run_autocast_outofplace(
op,
args,
torch.float32,
module=None,
out_type=out_type,
autocast_type=torch.float16,
)
def test_autocast_torch_bf16(self):
for op_with_args in self.autocast_lists.torch_bf16:
op, args, maybe_kwargs = self.args_maybe_kwargs(op_with_args)
self._run_autocast_outofplace(
op, args, torch.bfloat16, add_kwargs=maybe_kwargs
)
def test_autocast_nn_bf16(self):
for op, args in self.autocast_lists.nn_bf16:
self._run_autocast_outofplace(op, args, torch.bfloat16, module=torch._C._nn)
def test_autocast_torch_bf16_fp32(self):
for op_with_args in self.autocast_lists.torch_bf16_fp32:
op, args, maybe_kwargs = self.args_maybe_kwargs(op_with_args)
self._run_autocast_outofplace(
op, args, torch.float32, add_kwargs=maybe_kwargs
)
def test_autocast_nn_bf16_fp32(self):
for op_with_args in self.autocast_lists.nn_bf16_fp32:
op, args, maybe_kwargs = self.args_maybe_kwargs(op_with_args)
self._run_autocast_outofplace(
op, args, torch.float32, module=torch._C._nn, add_kwargs=maybe_kwargs
)
def test_autocast_fft_fp32(self):
for op_with_args in self.autocast_lists.fft_fp32:
op, args, maybe_kwargs = self.args_maybe_kwargs(op_with_args)
self._run_autocast_pass_test(
op, args, torch.float32, module=torch._C._fft, add_kwargs=maybe_kwargs
)
def test_autocast_special_fp32(self):
for op_with_args in self.autocast_lists.special_fp32:
op, args, maybe_kwargs = self.args_maybe_kwargs(op_with_args)
self._run_autocast_pass_test(
op,
args,
torch.float32,
module=torch._C._special,
add_kwargs=maybe_kwargs,
)
def test_autocast_linalg_fp32(self):
for op_with_args in self.autocast_lists.linalg_fp32:
op, args, maybe_kwargs = self.args_maybe_kwargs(op_with_args)
self._run_autocast_pass_test(
op,
args,
torch.float32,
module=torch._C._linalg,
add_kwargs=maybe_kwargs,
)
def test_autocast_torch_need_autocast_promote(self):
for op, args in self.autocast_lists.torch_need_autocast_promote_bf16:
self._run_autocast_outofplace(op, args, torch.float32)
for op, args in self.autocast_lists.torch_need_autocast_promote_fp16:
self._run_autocast_outofplace(
op, args, torch.float32, autocast_type=torch.float16
)
def test_autocast_blacklist_non_float_output(self):
for op, args in self.autocast_lists.blacklist_non_float_output_pass_test:
self._run_autocast_pass_test(op, args, torch.float32)
def test_autocast_torch_bf16_multi_output(self):
for op_with_args in self.autocast_lists.torch_bf16_multi_output:
op, args, maybe_kwargs = self.args_maybe_kwargs(op_with_args)
self._run_autocast_pass_test(
op, args, torch.bfloat16, add_kwargs=maybe_kwargs
)
def test_autocast_torch_bf16_fp32_multi_output(self):
for op_with_args in self.autocast_lists.torch_bf16_fp32_multi_output:
op, args, maybe_kwargs = self.args_maybe_kwargs(op_with_args)
self._run_autocast_pass_test(
op, args, torch.float32, add_kwargs=maybe_kwargs
)
def test_autocast_nn_bf16_fp32_multi_output(self):
for op_with_args in self.autocast_lists.nn_bf16_fp32_multi_output:
op, args, maybe_kwargs = self.args_maybe_kwargs(op_with_args)
self._run_autocast_pass_test(
op, args, torch.float32, module=torch._C._nn, add_kwargs=maybe_kwargs
)
def test_autocast_torch_fp16(self):
for op_with_args in self.autocast_lists.torch_fp16:
op, args, maybe_kwargs = self.args_maybe_kwargs(op_with_args)
self._run_autocast_outofplace(
op,
args,
torch.float16,
autocast_type=torch.float16,
add_kwargs=maybe_kwargs,
)
def test_autocast_nn_fp16(self):
for op, args in self.autocast_lists.nn_fp16:
self._run_autocast_outofplace(
op,
args,
torch.float16,
autocast_type=torch.float16,
module=torch._C._nn,
)
def test_autocast_torch_fp16_fp32(self):
for op_with_args in self.autocast_lists.torch_fp16_fp32:
op, args, maybe_kwargs = self.args_maybe_kwargs(op_with_args)
self._run_autocast_outofplace(
op,
args,
torch.float32,
autocast_type=torch.float16,
add_kwargs=maybe_kwargs,
)
def test_autocast_nn_fp16_fp32(self):
for op_with_args in self.autocast_lists.nn_fp16_fp32:
op, args, maybe_kwargs = self.args_maybe_kwargs(op_with_args)
self._run_autocast_outofplace(
op,
args,
torch.float32,
autocast_type=torch.float16,
module=torch._C._nn,
add_kwargs=maybe_kwargs,
)
def test_autocast_torch_fp16_fp32_multi_output(self):
for op_with_args in self.autocast_lists.torch_fp16_fp32_multi_output:
op, args, maybe_kwargs = self.args_maybe_kwargs(op_with_args)
self._run_autocast_pass_test(
op,
args,
torch.float32,
autocast_type=torch.float16,
add_kwargs=maybe_kwargs,
)
def test_autocast_torch_fallthrough_bf16(self):
for op_with_args in self.autocast_lists.torch_fallthrough_bf16:
op, args, maybe_kwargs = self.args_maybe_kwargs(op_with_args)
self._run_autocast_outofplace(
op,
args,
torch.bfloat16,
add_kwargs=maybe_kwargs,
)
def test_autocast_nn_fallthrough_bf16(self):
for op_with_args in self.autocast_lists.nn_fallthrough_bf16:
op, args, maybe_kwargs = self.args_maybe_kwargs(op_with_args)
self._run_autocast_outofplace(
op,
args,
torch.bfloat16,
module=torch._C._nn,
add_kwargs=maybe_kwargs,
)
if __name__ == "__main__":
test = unittest.main()
| 44,348 | 38.597321 | 131 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/tests/cpu/test_import.py | import unittest
import subprocess
class TestImport(unittest.TestCase):
def test_import_ipex_without_warning(self):
command = 'python -c "import intel_extension_for_pytorch" '
with subprocess.Popen(
command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
) as p:
out = p.stdout.readlines()
print(out)
assert "warn" not in out
if __name__ == "__main__":
unittest.main()
| 466 | 24.944444 | 81 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/tests/cpu/test_ipex_optimize.py | import torch
import torch.fx.experimental.optimization as optimization
import intel_extension_for_pytorch as ipex
import intel_extension_for_pytorch._C as core
from intel_extension_for_pytorch.nn.utils._weight_prepack import (
_IPEXLinear as _IPEXLinear,
_IPEXConv2d as _IPEXConv2d,
)
from torch.testing._internal.common_utils import TestCase
from torch.optim import (
Adadelta,
Adagrad,
Adam,
AdamW,
Adamax,
ASGD,
RMSprop,
Rprop,
SGD,
)
import unittest
import itertools
import copy
from common_utils import TestModule, _empty_weight_bias_parameter_names
from intel_extension_for_pytorch.optim._lamb import Lamb
import os
class ConvBatchNorm(torch.nn.Module):
def __init__(
self,
):
super(ConvBatchNorm, self).__init__()
self.input1 = torch.randn(1, 3, 224, 224)
self.conv = torch.nn.Conv2d(
3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3)
)
self.bn = torch.nn.BatchNorm2d(
64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True
)
def forward(self, x):
return self.bn(self.conv(x))
class TwoLayerMLP(torch.nn.Module):
def __init__(self):
super(TwoLayerMLP, self).__init__()
self.input1 = torch.randn(2, 2)
self.input2 = torch.randn(3, 3)
self.l1 = torch.nn.Linear(2, 2)
self.l2 = torch.nn.Linear(3, 3)
def forward(self, x1, x2):
return self.l1(x1).sum() + self.l2(x2).sum()
class OneLayerMLP(torch.nn.Module):
def __init__(self):
super(OneLayerMLP, self).__init__()
self.input1 = torch.randn(2, 2)
self.l1 = torch.nn.Linear(2, 2)
def forward(self, x1):
return self.l1(x1)
class ConvTranspose2d(torch.nn.Module):
def __init__(
self,
):
super(ConvTranspose2d, self).__init__()
self.conv_transpose2d = torch.nn.ConvTranspose2d(5, 5, (3, 3))
self.input1 = torch.randn(5, 5, 3, 3)
def forward(self, x):
x = self.conv_transpose2d(x)
return x
class LinearBatchNormNd(torch.nn.Module):
def __init__(self, dim):
super(LinearBatchNormNd, self).__init__()
self.linear = torch.nn.Linear(32, 32)
if dim == 1:
self.input1 = torch.randn(1, 32)
self.bn = torch.nn.BatchNorm1d(32)
elif dim == 2:
self.input1 = torch.randn(1, 32, 32, 32)
self.bn = torch.nn.BatchNorm2d(32)
elif dim == 3:
self.input1 = torch.randn(1, 32, 32, 32, 32)
self.bn = torch.nn.BatchNorm3d(32)
def forward(self, x):
return self.bn(self.linear(x))
class ConvBatchNormLinearBatchNorm(torch.nn.Module):
def __init__(
self,
):
super(ConvBatchNormLinearBatchNorm, self).__init__()
self.input1 = torch.randn(1, 32, 32, 32)
self.conv = torch.nn.Conv2d(32, 32, 1)
self.bn1 = torch.nn.BatchNorm2d(32)
self.linear = torch.nn.Linear(32, 32)
self.bn2 = torch.nn.BatchNorm2d(32)
def forward(self, x):
return self.bn2(self.linear(self.bn1(self.conv(x))))
class TestOptimizeCases(TestCase):
def test_optimize_conv_bn_parameters_behavior(self):
model = ConvBatchNorm().eval()
pre_te_enable_status = torch._C._jit_texpr_fuser_enabled()
torch._C._jit_set_texpr_fuser_enabled(False)
for level in ["O0", "O1"]:
for conv_bn_folding in [True, False]:
opt_M = ipex.optimize(
model,
level=level,
dtype=torch.float,
conv_bn_folding=conv_bn_folding,
)
with torch.no_grad():
x = model.input1
traced_model = torch.jit.trace(opt_M, x)
trace_graph = traced_model.graph_for(x)
self.assertEqual(
any(n.kind() == "ipex::batch_norm" for n in trace_graph.nodes()),
not (conv_bn_folding),
)
# TODO check weight_prepack.
torch._C._jit_set_texpr_fuser_enabled(pre_te_enable_status)
def test_optimize_linear_bn_parameters_behavior(self):
for dim in [1, 2, 3]:
model = LinearBatchNormNd(dim=dim).eval()
for level in ["O0", "O1"]:
for linear_bn_folding in [True, False]:
opt_M = ipex.optimize(
model,
level=level,
dtype=torch.float,
linear_bn_folding=linear_bn_folding,
)
with torch.no_grad():
x = model.input1
traced_model = torch.jit.trace(opt_M, x)
trace_graph = traced_model.graph_for(x)
self.assertEqual(
any(
n.kind() == "ipex::batch_norm" for n in trace_graph.nodes()
),
not (linear_bn_folding),
)
def test_optimize_conv_bn_linear_bn_parameters_behavior(self):
model = ConvBatchNormLinearBatchNorm().eval()
max_num_folding = 2
for level in ["O0", "O1"]:
for conv_bn_folding in [True, False]:
for linear_bn_folding in [True, False]:
opt_M = ipex.optimize(
model,
level=level,
dtype=torch.float,
conv_bn_folding=conv_bn_folding,
linear_bn_folding=linear_bn_folding,
)
with torch.no_grad():
x = model.input1
traced_model = torch.jit.trace(opt_M, x)
trace_graph = traced_model.graph_for(x)
self.assertEqual(
len(
[
n
for n in trace_graph.nodes()
if n.kind() == "ipex::batch_norm"
]
),
max_num_folding - (conv_bn_folding + linear_bn_folding),
)
def test_optimize_bf16_model(self):
model = ConvBatchNorm()
optimized_model = ipex.optimize(model.eval(), dtype=torch.bfloat16)
# model should not has master weight attr for infernence model.
self.assertTrue(not hasattr(optimized_model.conv, "master_weight"))
# model should has master weight attr for infernence model.
sgd = torch.optim.SGD(model.parameters(), lr=0.1)
optimized_model, optimized_sgd = ipex.optimize(
model.train(),
optimizer=sgd,
dtype=torch.bfloat16,
split_master_weight_for_bf16=False,
)
self.assertEqual(optimized_model.conv.weight.dtype, torch.bfloat16)
def found_wrapper(parameter, params_attr):
for _, v in params_attr.items():
if parameter is v.parameter:
return v
return None
wrapper = found_wrapper(optimized_model.conv.weight, optimized_sgd.params_attr)
self.assertTrue(wrapper is not None)
self.assertEqual(wrapper.master_parameter.dtype, torch.float)
def test_optimize_pretrain_model(self):
optimizer_options = [
Lamb,
Adadelta,
Adagrad,
Adam,
AdamW,
Adamax,
ASGD,
RMSprop,
Rprop,
SGD,
]
options = itertools.product([torch.float, torch.bfloat16], optimizer_options)
for dtype, optimizer in options:
model = ConvBatchNorm().to(memory_format=torch.channels_last).train()
model.conv.weight.requires_grad_(False)
model.conv.bias.requires_grad_(False)
origin_model = copy.deepcopy(model)
lr = 1e-4 if optimizer is SGD else 1e-2
origin_optimizer = optimizer(origin_model.parameters(), lr=lr)
ipex_model, ipex_optimizer = ipex.optimize(
origin_model, optimizer=origin_optimizer, dtype=dtype
)
for origi_p, opti_p in zip(
origin_model.parameters(), ipex_model.parameters()
):
self.assertEqual(origi_p.requires_grad, opti_p.requires_grad)
x = model.input1.to(memory_format=torch.channels_last)
origin_x = x.clone()
ipex_x = x.clone()
with torch.cpu.amp.autocast(enabled=True, dtype=dtype):
y1 = origin_model(origin_x)
grad_y = torch.ones_like(y1)
origin_optimizer.zero_grad()
y1.backward(grad_y)
origin_optimizer.step()
# train one step for ipex.
y2 = ipex_model(ipex_x)
ipex_optimizer.zero_grad()
y2.backward(grad_y)
ipex_optimizer.step()
self.assertEqual(y1, y2, rtol=1e-4, atol=5e-02)
origin_model_state = origin_model.state_dict()
ipex_model_state = ipex_model.state_dict()
for var_name in origin_model_state:
self.assertEqual(
origin_model_state[var_name],
ipex_model_state[var_name],
rtol=1e-4,
atol=5e-02,
)
self.assertTrue(origin_model.conv.weight.grad is None)
self.assertTrue(ipex_model.conv.weight.grad is None)
def test_optimize_unsupport_dtype_conversion(self):
class Conv(torch.nn.Module):
def __init__(
self,
):
super(Conv, self).__init__()
self.conv = torch.nn.Conv2d(
3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False
)
def forward(self, x):
return self.conv(x)
model = Conv().double()
with self.assertWarnsRegex(
UserWarning, "WARNING: Can't convert model's parameters dtype"
):
optimized_model = ipex.optimize(model.eval(), dtype=torch.bfloat16)
def test_optimize_bf16_upsupported(self):
class Conv(torch.nn.Module):
def __init__(
self,
):
super(Conv, self).__init__()
self.conv = torch.nn.Conv2d(
3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False
)
def forward(self, x):
return self.conv(x)
model = Conv()
if not core.onednn_has_bf16_support():
msg = r"BF16 weight prepack needs the cpu support avx512bw, avx512vl and avx512dq, \
please set dtype to torch.float or set weights_prepack to False."
with self.assertRaisesRegex(AssertionError, msg):
optimized_model = ipex.optimize(model.eval(), dtype=torch.bfloat16)
def test_optimize_unsupport_freeze_optimization(self):
model = ConvBatchNorm().eval()
x = model.input1
with torch.no_grad():
traced_model = torch.jit.trace(model, x)
frozen_model = torch.jit.freeze(traced_model)
optimized_model = ipex.optimize(frozen_model)
self.assertTrue(frozen_model == optimized_model)
def test_optimize_inplace_behavior_eval_mode(self):
M_ori = TestModule()
options = itertools.product([torch.float32, torch.bfloat16], ["O0", "O1"])
for dtype, level in options:
# non-inplace
M = copy.deepcopy(M_ori).eval()
opt_M = ipex.optimize(M, dtype=dtype, level=level, inplace=False)
self.assertTrue(
M.linear.weight.data_ptr() != opt_M.linear.weight.data_ptr()
)
self.assertTrue(M.conv.weight.data_ptr() != opt_M.conv.weight.data_ptr())
self.assertTrue(
M.embeddingbag.weight.data_ptr() != opt_M.embeddingbag.weight.data_ptr()
)
# inplace
M = copy.deepcopy(M_ori).eval()
opt_M = ipex.optimize(M, dtype=dtype, level=level, inplace=True)
# After ConvBN folding, opt_M will be Graph Module while the M is original nn.Module which they
# share parameters. But the changes on Graph Module cannot be reflected on original module. So
# only the un-opitimized weight will use same mem buffer with original module.
if level == "O1":
self.assertTrue(
M.conv.weight.data_ptr() != opt_M.conv.weight.data_ptr()
) # linear is optimized and used same parameter with original model
self.assertTrue(M.linear.weight is opt_M.linear.weight)
self.assertTrue(isinstance(opt_M.linear, _IPEXLinear))
# un-optimized part should be inplaced
self.assertTrue(
M.embeddingbag.weight.data_ptr() == opt_M.embeddingbag.weight.data_ptr()
)
def test_optimize_inplace_behavior_training_mode_with_optimizer(self):
M_ori = TestModule()
options = itertools.product([torch.float32, torch.bfloat16], ["O0", "O1"])
for dtype, level in options:
# non-inplace
M = copy.deepcopy(M_ori).train()
sgd = torch.optim.SGD(M.parameters(), lr=0.1)
opt_M, _ = ipex.optimize(
M, dtype=dtype, optimizer=sgd, level=level, inplace=False
)
self.assertTrue(
M.linear.weight.data_ptr() != opt_M.linear.weight.data_ptr()
)
self.assertTrue(M.conv.weight.data_ptr() != opt_M.conv.weight.data_ptr())
self.assertTrue(
M.embeddingbag.weight.data_ptr() != opt_M.embeddingbag.weight.data_ptr()
)
if level == "O1":
self.assertEqual(M.linear.weight.dtype, torch.float)
self.assertEqual(M.conv.weight.dtype, torch.float)
self.assertEqual(M.embeddingbag.weight.dtype, torch.float)
self.assertEqual(M.bn.weight.dtype, torch.float)
self.assertEqual(opt_M.linear.weight.dtype, dtype)
self.assertEqual(opt_M.conv.weight.dtype, dtype)
self.assertEqual(opt_M.embeddingbag.weight.dtype, dtype)
self.assertEqual(opt_M.bn.weight.dtype, torch.float)
# inplace
M = copy.deepcopy(M_ori).train()
sgd = torch.optim.SGD(M.parameters(), lr=0.1)
opt_M, _ = ipex.optimize(
M, dtype=dtype, optimizer=sgd, level=level, inplace=True
)
self.assertTrue(
M.linear.weight.data_ptr() == opt_M.linear.weight.data_ptr()
)
self.assertTrue(M.conv.weight.data_ptr() == opt_M.conv.weight.data_ptr())
self.assertTrue(
M.embeddingbag.weight.data_ptr() == opt_M.embeddingbag.weight.data_ptr()
)
if level == "O1":
self.assertEqual(M.linear.weight.dtype, dtype)
self.assertEqual(M.conv.weight.dtype, dtype)
self.assertEqual(M.embeddingbag.weight.dtype, dtype)
self.assertEqual(M.bn.weight.dtype, torch.float)
def _test_tensor_convert(self, tensor, bf16_tensor):
top_half, bot_half = torch.ops.torch_ipex.split_float_bfloat16(tensor)
# truncated top half should equal with convert fp32 to bf16 by ".bfloat()"
self.assertEqual(bf16_tensor, top_half)
# recovery float tensor with top half and bottom half
float_tensor = torch.ops.torch_ipex.cat_bfloat16_float(top_half, bot_half)
self.assertEqual(tensor, float_tensor)
self.assertEqual(tensor.stride(), top_half.stride())
self.assertEqual(tensor.stride(), float_tensor.stride())
def test_tensor_convert(self):
# contiguous case
tensor = torch.rand(100, 100)
self._test_tensor_convert(tensor, tensor.bfloat16())
# transposed case
self._test_tensor_convert(tensor.t(), tensor.bfloat16().t())
# sliced-out case
self._test_tensor_convert(tensor[2:5, 2:5], tensor.bfloat16()[2:5, 2:5])
# nc11 channel-last case
tensor = torch.rand(128, 256, 1, 1).to(memory_format=torch.channels_last)
self._test_tensor_convert(tensor, tensor.bfloat16())
def test_module_conversion(self):
M_ori = TestModule()
options = itertools.product(
[torch.bfloat16, torch.float32], ["O0", "O1"], [True, False]
)
for dtype, level, auto_kernel_selection in options:
sgd = torch.optim.SGD(M_ori.parameters(), lr=0.1)
opt_M, _ = ipex.optimize(
M_ori,
dtype=dtype,
optimizer=sgd,
level=level,
auto_kernel_selection=auto_kernel_selection,
)
if level == "O0":
self.assertTrue(isinstance(opt_M.linear, torch.nn.Linear))
self.assertTrue(isinstance(opt_M.conv, torch.nn.Conv2d))
else:
if not auto_kernel_selection and dtype == torch.float32:
self.assertTrue(isinstance(opt_M.linear, torch.nn.Linear))
else:
self.assertTrue(isinstance(opt_M.linear, _IPEXLinear))
self.assertTrue(isinstance(opt_M.conv, _IPEXConv2d))
def test_record_shape(self):
options = itertools.product([OneLayerMLP, TwoLayerMLP], [True, False])
for module, inference_only in options:
M = module()
input = M.input1
if isinstance(M, TwoLayerMLP):
input = (M.input1, M.input2)
if inference_only:
M.eval()
opt_M = ipex.optimize(M, sample_input=input, auto_kernel_selection=True)
else:
optimizer = torch.optim.SGD(M.parameters(), lr=0.01)
opt_M, _ = ipex.optimize(
M,
optimizer=optimizer,
sample_input=input,
auto_kernel_selection=True,
)
self.assertEqual(opt_M.l1.batch_size_collapsed, 2)
if isinstance(M, TwoLayerMLP):
self.assertEqual(opt_M.l2.batch_size_collapsed, 3)
def test_traced_model_serialization(self):
for module in [ConvBatchNorm, OneLayerMLP, ConvTranspose2d]:
for dtype in [torch.float, torch.bfloat16]:
M = module().eval()
input = M.input1.to(dtype)
opt_M = ipex.optimize(M, dtype=dtype, auto_kernel_selection=True)
with torch.no_grad():
traced_M = torch.jit.trace(opt_M, input).eval()
traced_M.save("traced_m.pt")
loaded_M = torch.jit.load("traced_m.pt")
self.assertEqual(traced_M(input), loaded_M(input))
os.remove("traced_m.pt")
def test_optimized_model_with_fx(self):
for module in [ConvBatchNorm, OneLayerMLP, ConvTranspose2d]:
for dtype in [torch.float, torch.bfloat16]:
M = module().eval()
input = M.input1.to(dtype)
opt_M = ipex.optimize(M, dtype=dtype, auto_kernel_selection=True)
ref_out = opt_M(input)
fx_M = optimization.fuse(opt_M)
fx_out = fx_M(input)
self.assertEqual(ref_out, fx_out)
with torch.no_grad():
traced_M = torch.jit.trace(fx_M, input).eval()
traced_M = torch.jit.freeze(traced_M)
# do graph opt
traced_M(input)
# get optimized results
out = traced_M(input)
self.assertEqual(ref_out, out)
def test_optimized_model_with_sample_input(self):
for module in [ConvBatchNorm, OneLayerMLP, ConvTranspose2d]:
model = module().train()
input = model.input1
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
origin_model_state = copy.deepcopy(model.state_dict())
ipex_model, _ = ipex.optimize(
model,
dtype=torch.float32,
inplace=False,
optimizer=optimizer,
sample_input=input,
)
ipex_model_state = ipex_model.state_dict()
for var_name in origin_model_state:
self.assertEqual(
origin_model_state[var_name], ipex_model_state[var_name]
)
def test_partial_model_update(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.L1 = torch.nn.Linear(10, 10)
self.L2 = torch.nn.Linear(10, 10)
def forward(self, x):
return (self.L1(x), self.L2(x))
model = M()
optimizer = torch.optim.AdamW(model.parameters(), lr=5e-5, eps=1e-8)
model.train()
model, optimizer = ipex.optimize(
model, optimizer=optimizer, dtype=torch.bfloat16
)
with torch.cpu.amp.autocast():
loss = model(torch.rand(10, 10))[0].sum()
loss.backward()
optimizer.step()
def _test_load_after_ipex_optimize_inference(
self, model_class, dtype, optimizer_class, level, inplace
):
model = model_class().train()
input = model.input
if optimizer_class == SGD:
optimizer = optimizer_class(model.parameters(), lr=10.01, momentum=0.1)
else:
optimizer = optimizer_class(model.parameters(), lr=10.01)
ipex_model, ipex_optimizer = ipex.optimize(
model,
dtype=dtype,
optimizer=optimizer,
sample_input=input,
level=level,
inplace=inplace,
)
# train 2 iters to save something in optimizer's state
for _ in range(2):
with torch.cpu.amp.autocast(enabled=True, dtype=dtype):
y = ipex_model(*input).sum()
ipex_optimizer.zero_grad()
y.backward()
ipex_optimizer.step()
inf_model = model_class().eval()
inf_model_state = inf_model.state_dict()
ipex_inf_model = ipex.optimize(
inf_model, dtype=dtype, sample_input=input, level=level, inplace=inplace
)
# check parameters are not same before load
ipex_model_state = ipex_model.state_dict()
for var_name in ipex_model_state:
self.assertNotEqual(ipex_model_state[var_name], inf_model_state[var_name])
for p1 in ipex_model.named_parameters():
prefix, attr = p1[0].split(".")
sub_m = getattr(ipex_inf_model, prefix)
param = getattr(sub_m, attr)
# the empty weight and bias tensor will always be Tensor()
assert_fn = (
self.assertEqual
if p1[0]
in _empty_weight_bias_parameter_names(
prefixes=["conv", "linear", "conv_transpose2d"]
)
else self.assertNotEqual
)
assert_fn(p1[1], param)
# check parameters are same after load
ipex_inf_model.load_state_dict(ipex_model_state)
inf_model_state = ipex_inf_model.state_dict()
for var_name in ipex_model_state:
self.assertEqual(
ipex_model_state[var_name].to(dtype).float(), inf_model_state[var_name]
)
for p1 in ipex_model.named_parameters():
if p1[0] == "linear.weight":
# Do not compare linear.weight with block format since
# linear.weight in ipex_model(training model) is plain
continue
prefix, attr = p1[0].split(".")
sub_m = getattr(ipex_inf_model, prefix)
param = getattr(sub_m, attr)
self.assertEqual(p1[1], param)
def _test_load_after_ipex_optimize_training(
self, model_class, dtype, optimizer_class, level, inplace
):
model = model_class().train()
input = model.input
if optimizer_class == SGD:
optimizer = optimizer_class(model.parameters(), lr=10.01, momentum=0.1)
else:
optimizer = optimizer_class(model.parameters(), lr=10.01)
ipex_model, ipex_optimizer = ipex.optimize(
model,
dtype=dtype,
optimizer=optimizer,
sample_input=input,
level=level,
inplace=inplace,
)
# train 2 iters to save something in optimizer's state
for _ in range(2):
with torch.cpu.amp.autocast(enabled=True, dtype=dtype):
y = ipex_model(*input).sum()
ipex_optimizer.zero_grad()
y.backward()
ipex_optimizer.step()
ref_ipex_model = copy.deepcopy(ipex_model)
ref_ipex_optimizer = copy.deepcopy(ipex_optimizer)
ref_ipex_model_state = copy.deepcopy(ipex_model.state_dict())
ref_ipex_optimizer_state = copy.deepcopy(ipex_optimizer.state_dict())
# train 2 iters to change model/optimizer state
for _ in range(2):
with torch.cpu.amp.autocast(enabled=True, dtype=dtype):
y = ipex_model(*input).sum()
ipex_optimizer.zero_grad()
y.backward()
ipex_optimizer.step()
# check state changed (with public formt)
ipex_model_state = ipex_model.state_dict()
ipex_optimizer_state = ipex_optimizer.state_dict()
for var_name in ipex_model_state:
self.assertNotEqual(
ipex_model_state[var_name], ref_ipex_model_state[var_name]
)
for var_name in ipex_optimizer_state:
if var_name == "state":
self.assertNotEqual(
ipex_optimizer_state[var_name], ref_ipex_optimizer_state[var_name]
)
# check values before load (with block format)
for p1, p2 in zip(
ipex_model.named_parameters(), ref_ipex_model.named_parameters()
):
# the empty weight and bias tensor will always be Tensor()
assert_fn = (
self.assertEqual
if p1[0]
in _empty_weight_bias_parameter_names(
prefixes=["conv", "linear", "conv_transpose2d"]
)
else self.assertNotEqual
)
assert_fn(p1[1], p2[1])
for (_, v1), (_, v2) in zip(
ipex_optimizer.state.items(), ref_ipex_optimizer.state.items()
):
self.assertNotEqual(v1, v2)
ipex_model.load_state_dict(ref_ipex_model_state)
ipex_optimizer.load_state_dict(ref_ipex_optimizer_state)
# check values same after load (with block format)
for p1, p2 in zip(
ipex_model.named_parameters(), ref_ipex_model.named_parameters()
):
self.assertEqual(p1[1], p2[1])
for (_, v1), (_, v2) in zip(
ipex_optimizer.state.items(), ref_ipex_optimizer.state.items()
):
if "step_size" in v1:
# For Rprop, there is a "clamp" operation on step_size which will change the "zero"
# attribute for packed position.
# The zero pos will be changed after "clamp", and will be zero again after pack and
# repack it. So in ipex_optimizer, the packed pos of "step_size" will be zero but in
# ref_ipex_optimizer, the packed pos of "step_size" will not be zero. Thus the
# assertEqual will be failed.
# step_sizes=(1e-6, 50)
# step_size_min, step_size_max = group['step_sizes']
# step_size.mul_(sign).clamp_(step_size_min, step_size_max)
# param.addcmul_(grad.sign(), step_size, value=-1)
# (param = param - grad.sign() * step_size)
# but this step_size will not have impact since grad are zero
v1 = copy.deepcopy(v1)
v1.pop("step_size")
v2 = copy.deepcopy(v2)
v2.pop("step_size")
self.assertEqual(v1, v2)
# check state same after load (with plain format)
ipex_model_state = ipex_model.state_dict()
ipex_optimizer_state = ipex_optimizer.state_dict()
for var_name in ipex_model_state:
self.assertEqual(ipex_model_state[var_name], ref_ipex_model_state[var_name])
for var_name in ipex_optimizer_state:
self.assertEqual(
ipex_optimizer_state[var_name], ref_ipex_optimizer_state[var_name]
)
def test_load_after_optimize(self):
class Model(torch.nn.Module):
def __init__(self):
super(Model, self).__init__()
self.input = (
torch.randn(1, 3, 224, 224),
torch.randn(100, 100),
torch.randn(5, 5, 3, 3),
)
self.conv = torch.nn.Conv2d(
3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3)
)
self.linear = torch.nn.Linear(100, 100)
self.conv_transpose2d = torch.nn.ConvTranspose2d(5, 5, (3, 3))
def forward(self, x1, x2, x3):
return (
self.conv(x1).sum()
+ self.linear(x2).sum()
+ self.conv_transpose2d(x3)
)
params_dict = {
"dtype": [torch.float, torch.bfloat16],
"optimizer": [
Lamb,
Adadelta,
Adagrad,
Adam,
AdamW,
Adamax,
ASGD,
RMSprop,
Rprop,
SGD,
],
"level": ["O0", "O1"],
"inplace": [True, False],
}
for dtype, optimizer, level, inplace in list(
itertools.product(*params_dict.values())
):
self._test_load_after_ipex_optimize_training(
Model, dtype, optimizer, level, inplace
)
self._test_load_after_ipex_optimize_inference(
Model, dtype, optimizer, level, inplace
)
def test_reentrancy_of_ipex_optimize(self):
CALL_NUM = 3
class Model(torch.nn.Module):
def __init__(self):
super(Model, self).__init__()
self.input = (
torch.randn(1, 3, 224, 224),
torch.randn(100, 100),
torch.randn(5, 5, 3, 3),
)
self.conv = torch.nn.Conv2d(
3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3)
)
self.linear = torch.nn.Linear(100, 100)
self.conv_transpose2d = torch.nn.ConvTranspose2d(5, 5, (3, 3))
def forward(self, x1, x2, x3):
return (
self.conv(x1).sum()
+ self.linear(x2).sum()
+ self.conv_transpose2d(x3)
)
def run_and_recursively_call_ipex_optimize(
model_class,
dtype,
level,
inplace,
weights_prepack,
split_master_weight_for_bf16,
fuse_update_step,
graph_mode,
):
model = model_class().train()
input = model.input
optimizer = torch.optim.SGD(model.parameters(), lr=10.01)
for _ in range(CALL_NUM):
# recursively calling ipex.optimize CALL_NUM times
model, optimizer = ipex.optimize(
model,
dtype=dtype,
optimizer=optimizer,
level=level,
inplace=inplace,
weights_prepack=weights_prepack,
split_master_weight_for_bf16=split_master_weight_for_bf16,
fuse_update_step=fuse_update_step,
graph_mode=graph_mode,
)
with torch.cpu.amp.autocast(enabled=True, dtype=dtype):
y = model(*input).sum()
optimizer.zero_grad()
y.backward()
optimizer.step()
params_dict = {
"dtype": [torch.float32, torch.bfloat16],
"level": ["O1"],
"inplace": [True, False],
"weights_prepack": [True, False],
"split_master_weight_for_bf16": [True, False],
"fuse_update_step": [True, False],
"graph_mode": [True, False],
}
for (
dtype,
level,
inplace,
weights_prepack,
split_master_weight_for_bf16,
fuse_update_step,
graph_mode,
) in list(itertools.product(*params_dict.values())):
run_and_recursively_call_ipex_optimize(
Model,
dtype,
level,
inplace,
weights_prepack,
split_master_weight_for_bf16,
fuse_update_step,
graph_mode,
)
if __name__ == "__main__":
test = unittest.main()
| 33,897 | 39.021251 | 108 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/tests/cpu/test_nms.py | import unittest
import torch
import torch.nn as nn
from common_utils import TestCase
import time
import torch.nn.functional as F
import os
def nms(dets, scores, threshold, sorted=False):
return torch.ops.torch_ipex.nms(dets, scores, threshold, sorted)
batch_score_nms = torch.ops.torch_ipex.batch_score_nms
parallel_scale_back_batch = torch.ops.torch_ipex.parallel_scale_back_batch
rpn_nms = torch.ops.torch_ipex.rpn_nms
box_head_nms = torch.ops.torch_ipex.box_head_nms
def get_rand_seed():
return int(time.time() * 1000000000)
# This function is from https://github.com/kuangliu/pytorch-ssd.
def calc_iou_tensor(box1, box2):
"""Calculation of IoU based on two boxes tensor,
Reference to https://github.com/kuangliu/pytorch-ssd
input:
box1 (N, 4)
box2 (M, 4)
output:
IoU (N, M)
"""
N = box1.size(0)
M = box2.size(0)
be1 = box1.unsqueeze(1).expand(-1, M, -1)
be2 = box2.unsqueeze(0).expand(N, -1, -1)
# Left Top & Right Bottom
lt = torch.max(be1[:, :, :2], be2[:, :, :2])
# mask1 = (be1[:,:, 0] < be2[:,:, 0]) ^ (be1[:,:, 1] < be2[:,:, 1])
# mask1 = ~mask1
rb = torch.min(be1[:, :, 2:], be2[:, :, 2:])
# mask2 = (be1[:,:, 2] < be2[:,:, 2]) ^ (be1[:,:, 3] < be2[:,:, 3])
# mask2 = ~mask2
delta = rb - lt
delta[delta < 0] = 0
intersect = delta[:, :, 0] * delta[:, :, 1]
# *mask1.float()*mask2.float()
delta1 = be1[:, :, 2:] - be1[:, :, :2]
area1 = delta1[:, :, 0] * delta1[:, :, 1]
delta2 = be2[:, :, 2:] - be2[:, :, :2]
area2 = delta2[:, :, 0] * delta2[:, :, 1]
iou = intersect / (area1 + area2 - intersect)
return iou
class TestScaleBackBatch(TestCase):
def scale_back_batch(self, bboxes_in, scores_in, dboxes_xywh, scale_xy, scale_wh):
"""
Python implementation of Encoder::scale_back_batch, refer to \
https://github.com/mlcommons/inference/blob/v0.7/others/cloud/single_stage_detector/pytorch/utils.py
"""
bboxes_in[:, :, :2] = scale_xy * bboxes_in[:, :, :2]
bboxes_in[:, :, 2:] = scale_wh * bboxes_in[:, :, 2:]
bboxes_in[:, :, :2] = (
bboxes_in[:, :, :2] * dboxes_xywh[:, :, 2:] + dboxes_xywh[:, :, :2]
)
bboxes_in[:, :, 2:] = bboxes_in[:, :, 2:].exp() * dboxes_xywh[:, :, 2:]
# Transform format to ltrb
l, t, r, b = (
bboxes_in[:, :, 0] - 0.5 * bboxes_in[:, :, 2],
bboxes_in[:, :, 1] - 0.5 * bboxes_in[:, :, 3],
bboxes_in[:, :, 0] + 0.5 * bboxes_in[:, :, 2],
bboxes_in[:, :, 1] + 0.5 * bboxes_in[:, :, 3],
)
bboxes_in[:, :, 0] = l
bboxes_in[:, :, 1] = t
bboxes_in[:, :, 2] = r
bboxes_in[:, :, 3] = b
return bboxes_in, F.softmax(scores_in, dim=-1)
def test_scale_back_batch_result(self):
batch_size = 16
number_boxes = 1024
scale_xy = 0.1
scale_wh = 0.2
predicted_loc = (
torch.randn((batch_size, number_boxes, 4)).contiguous().to(torch.float32)
)
predicted_score = (
torch.randn((batch_size, number_boxes, 81)).contiguous().to(torch.float32)
)
dboxes_xywh = torch.randn((1, number_boxes, 4)).contiguous().to(torch.float64)
bbox_res1, score_res1 = self.scale_back_batch(
predicted_loc.clone(),
predicted_score.clone(),
dboxes_xywh.clone(),
scale_xy,
scale_wh,
)
bbox_res2, score_res2 = parallel_scale_back_batch(
predicted_loc, predicted_score, dboxes_xywh, scale_xy, scale_wh
)
# test autocast
with torch.cpu.amp.autocast():
bbox_res3, score_res3 = parallel_scale_back_batch(
predicted_loc, predicted_score, dboxes_xywh, scale_xy, scale_wh
)
self.assertTrue(torch.allclose(bbox_res1, bbox_res2, rtol=1e-4, atol=1e-4))
self.assertTrue(torch.allclose(bbox_res1, bbox_res3, rtol=1e-4, atol=1e-4))
self.assertTrue(torch.allclose(score_res1, score_res2, rtol=1e-4, atol=1e-4))
self.assertTrue(torch.allclose(score_res1, score_res3, rtol=1e-4, atol=1e-4))
# test double
bbox_res4, score_res4 = parallel_scale_back_batch(
predicted_loc.clone().double(),
predicted_score,
dboxes_xywh,
scale_xy,
scale_wh,
)
self.assertEqual(bbox_res4, bbox_res2)
self.assertEqual(score_res4, score_res2)
self.assertTrue(bbox_res4.dtype == torch.float64)
class TestNMS(TestCase):
def decode_single(self, bboxes_in, scores_in, criteria, max_output, max_num=200):
"""
Python implementation of Encoder::decode_single, refer to \
https://github.com/mlcommons/inference/blob/v0.7/others/cloud/single_stage_detector/pytorch/utils.py
"""
# perform non-maximum suppression
# Reference to https://github.com/amdegroot/ssd.pytorch
bboxes_out = []
scores_out = []
labels_out = []
for i, score in enumerate(scores_in.split(1, 1)):
# skip background
# print(score[score>0.90])
if i == 0:
continue
score = score.squeeze(1)
mask = score > 0.05
bboxes, score = bboxes_in[mask, :], score[mask]
if score.size(0) == 0:
continue
score_sorted, score_idx_sorted = score.sort(dim=0)
# select max_output indices
score_idx_sorted = score_idx_sorted[-max_num:]
candidates = []
while score_idx_sorted.numel() > 0:
idx = score_idx_sorted[-1].item()
bboxes_sorted = bboxes[score_idx_sorted, :]
bboxes_idx = bboxes[idx, :].unsqueeze(dim=0)
iou_sorted = calc_iou_tensor(bboxes_sorted, bboxes_idx).squeeze()
# we only need iou < criteria
score_idx_sorted = score_idx_sorted[iou_sorted < criteria]
candidates.append(idx)
bboxes_out.append(bboxes[candidates, :])
scores_out.append(score[candidates])
labels_out.extend([i] * len(candidates))
bboxes_out, labels_out, scores_out = (
torch.cat(bboxes_out, dim=0),
torch.tensor(labels_out, dtype=torch.long),
torch.cat(scores_out, dim=0),
)
_, max_ids = scores_out.sort(dim=0)
max_ids = max_ids[-max_output:]
return bboxes_out[max_ids, :], labels_out[max_ids], scores_out[max_ids]
def test_batch_nms_result(self):
batch_size = 1
number_boxes = 15130
scale_xy = 0.1
scale_wh = 0.2
criteria = 0.50
max_output = 200
predicted_loc = torch.load(
os.path.join(os.path.dirname(__file__), "data/nms_ploc.pt")
) # sizes: [1, 15130, 4]
predicted_score = torch.load(
os.path.join(os.path.dirname(__file__), "data/nms_plabel.pt")
) # sizes: [1, 15130, 81]
dboxes_xywh = torch.load(
os.path.join(os.path.dirname(__file__), "data/nms_dboxes_xywh.pt")
)
bboxes, probs = parallel_scale_back_batch(
predicted_loc, predicted_score, dboxes_xywh, scale_xy, scale_wh
)
bboxes_clone = bboxes.clone()
probs_clone = probs.clone()
output = []
for bbox, prob in zip(bboxes.split(1, 0), probs.split(1, 0)):
bbox = bbox.squeeze(0)
prob = prob.squeeze(0)
output.append(self.decode_single(bbox, prob, criteria, max_output))
output2_raw = batch_score_nms(bboxes_clone, probs_clone, criteria, max_output)
# test autocast
with torch.cpu.amp.autocast():
for datatype in (torch.bfloat16, torch.float32):
bboxes_autocast = bboxes.clone().to(datatype)
probs_autocast = probs.clone().to(datatype)
output2_raw_autocast = batch_score_nms(
bboxes_autocast, probs_autocast, criteria, max_output
)
for i in range(3):
self.assertTrue(output2_raw_autocast[i].dtype == torch.float32)
# Re-assembly the result
output2 = []
idx = 0
for i in range(output2_raw[3].size(0)):
output2.append(
(
output2_raw[0][idx : idx + output2_raw[3][i]],
output2_raw[1][idx : idx + output2_raw[3][i]],
output2_raw[2][idx : idx + output2_raw[3][i]],
)
)
idx += output2_raw[3][i]
for i in range(batch_size):
loc, label, prob = list(r for r in output[i])
loc2, label2, prob2 = list(r for r in output2[i])
self.assertTrue(torch.allclose(loc, loc2, rtol=1e-4, atol=1e-4))
self.assertEqual(label, label2)
self.assertTrue(torch.allclose(prob, prob2, rtol=1e-4, atol=1e-4))
# test double
output2_raw_double = batch_score_nms(
bboxes.clone().double(), probs.clone().double(), criteria, max_output
)
self.assertEqual(output2_raw_double, output2_raw)
self.assertTrue(output2_raw_double[0].dtype == torch.float64)
def test_jit_trace_batch_nms(self):
class Batch_NMS(nn.Module):
def __init__(self, criteria, max_output):
super(Batch_NMS, self).__init__()
self.criteria = criteria
self.max_output = max_output
def forward(self, bboxes_clone, probs_clone):
return batch_score_nms(
bboxes_clone, probs_clone, self.criteria, self.max_output
)
batch_size = 1
number_boxes = 15130
scale_xy = 0.1
scale_wh = 0.2
criteria = 0.50
max_output = 200
predicted_loc = torch.load(
os.path.join(os.path.dirname(__file__), "data/nms_ploc.pt")
) # sizes: [1, 15130, 4]
predicted_score = torch.load(
os.path.join(os.path.dirname(__file__), "data/nms_plabel.pt")
) # sizes: [1, 15130, 81]
dboxes_xywh = torch.load(
os.path.join(os.path.dirname(__file__), "data/nms_dboxes_xywh.pt")
)
bboxes, probs = parallel_scale_back_batch(
predicted_loc, predicted_score, dboxes_xywh, scale_xy, scale_wh
)
bboxes_clone = bboxes.clone()
probs_clone = probs.clone()
output = []
for bbox, prob in zip(bboxes.split(1, 0), probs.split(1, 0)):
bbox = bbox.squeeze(0)
prob = prob.squeeze(0)
output.append(self.decode_single(bbox, prob, criteria, max_output))
batch_score_nms_module = Batch_NMS(criteria, max_output)
model_decode = torch.jit.trace(
batch_score_nms_module, (bboxes_clone, probs_clone)
)
output2_raw = model_decode(bboxes_clone, probs_clone)
# Re-assembly the result
output2 = []
idx = 0
for i in range(output2_raw[3].size(0)):
output2.append(
(
output2_raw[0][idx : idx + output2_raw[3][i]],
output2_raw[1][idx : idx + output2_raw[3][i]],
output2_raw[2][idx : idx + output2_raw[3][i]],
)
)
idx += output2_raw[3][i]
for i in range(batch_size):
loc, label, prob = list(r for r in output[i])
loc2, label2, prob2 = list(r for r in output2[i])
self.assertTrue(torch.allclose(loc, loc2, rtol=1e-4, atol=1e-4))
self.assertEqual(label, label2)
self.assertTrue(torch.allclose(prob, prob2, rtol=1e-4, atol=1e-4))
def test_nms_kernel_result(self):
batch_size = 1
class_number = 81
scale_xy = 0.1
scale_wh = 0.2
criteria = 0.50
max_output = 200
predicted_loc = torch.load(
os.path.join(os.path.dirname(__file__), "data/nms_ploc.pt")
) # sizes: [1, 15130, 4]
predicted_score = torch.load(
os.path.join(os.path.dirname(__file__), "data/nms_plabel.pt")
) # sizes: [1, 15130, 81]
dboxes_xywh = torch.load(
os.path.join(os.path.dirname(__file__), "data/nms_dboxes_xywh.pt")
)
bboxes, probs = parallel_scale_back_batch(
predicted_loc, predicted_score, dboxes_xywh, scale_xy, scale_wh
)
for bs in range(batch_size):
loc = bboxes[bs].squeeze(0)
for class_id in range(class_number):
if class_id == 0:
# Skip the background
continue
score = probs[bs, :, class_id]
score_sorted, indices = torch.sort(score, descending=True)
loc_sorted = torch.index_select(loc, 0, indices)
result = nms(loc_sorted.clone(), score_sorted.clone(), criteria, True)
result_ref = nms(loc.clone(), score.clone(), criteria, False)
result_ref2 = nms(
loc_sorted.clone().to(dtype=torch.float64),
score_sorted.clone().to(dtype=torch.float64),
criteria,
True,
)
bbox_keep, _ = torch.sort(
torch.index_select(loc_sorted, 0, result).squeeze(0), 0
)
bbox_keep_ref, _ = torch.sort(
torch.index_select(loc, 0, result_ref).squeeze(0), 0
)
bbox_keep_ref2, _ = torch.sort(
torch.index_select(loc_sorted, 0, result_ref2).squeeze(0), 0
)
score_keep, _ = torch.sort(
torch.index_select(score_sorted, 0, result).squeeze(0), 0
)
score_keep_ref, _ = torch.sort(
torch.index_select(score, 0, result_ref).squeeze(0), 0
)
score_keep_ref2, _ = torch.sort(
torch.index_select(score_sorted, 0, result_ref2).squeeze(0), 0
)
self.assertEqual(result.size(0), result_ref.size(0))
self.assertTrue(
torch.allclose(bbox_keep, bbox_keep_ref, rtol=1e-4, atol=1e-4)
)
self.assertTrue(
torch.allclose(score_keep, score_keep_ref, rtol=1e-4, atol=1e-4)
)
self.assertTrue(
torch.allclose(bbox_keep, bbox_keep_ref2, rtol=1e-4, atol=1e-4)
)
self.assertTrue(
torch.allclose(score_keep, score_keep_ref2, rtol=1e-4, atol=1e-4)
)
# test autocast
with torch.cpu.amp.autocast():
result_autocast = nms(loc.clone(), score.clone(), criteria, False)
self.assertEqual(result_autocast, result_ref)
# test double
result_double = nms(
loc.clone().double(), score.clone().double(), criteria, False
)
self.assertEqual(result_double, result_ref)
def test_rpn_nms_result(self):
image_shapes = [(800, 824), (800, 1199)]
min_size = 0
nms_thresh = 0.7
post_nms_top_n = 1000
proposals = torch.load(
os.path.join(os.path.dirname(__file__), "data/rpn_nms_proposals.pt")
)
objectness = torch.load(
os.path.join(os.path.dirname(__file__), "data/rpn_nms_objectness.pt")
)
new_proposal = []
new_score = []
for proposal, score, im_shape in zip(
proposals.clone(), objectness.clone(), image_shapes
):
proposal[:, 0].clamp_(min=0, max=im_shape[0] - 1)
proposal[:, 1].clamp_(min=0, max=im_shape[1] - 1)
proposal[:, 2].clamp_(min=0, max=im_shape[0] - 1)
proposal[:, 3].clamp_(min=0, max=im_shape[1] - 1)
keep = (
(
(proposal[:, 2] - proposal[:, 0] >= min_size)
& (proposal[:, 3] - proposal[:, 1] >= min_size)
)
.nonzero()
.squeeze(1)
)
proposal = proposal[keep]
score = score[keep]
if nms_thresh > 0:
keep = nms(proposal, score, nms_thresh)
if post_nms_top_n > 0:
keep = keep[:post_nms_top_n]
new_proposal.append(proposal[keep])
new_score.append(score[keep])
new_proposal_, new_score_ = rpn_nms(
proposals, objectness, image_shapes, min_size, nms_thresh, post_nms_top_n
)
self.assertEqual(new_proposal, new_proposal_)
self.assertEqual(new_score, new_score_)
# test autocast
with torch.cpu.amp.autocast():
for datatype in (torch.bfloat16, torch.float32):
proposals_autocast = proposals.clone().to(datatype)
objectness_autocast = objectness.clone().to(datatype)
new_proposal_autocast, new_score_autocast = rpn_nms(
proposals_autocast,
objectness_autocast,
image_shapes,
min_size,
nms_thresh,
post_nms_top_n,
)
self.assertTrue(new_proposal_autocast[0].dtype == torch.float32)
self.assertTrue(new_score_autocast[0].dtype == torch.float32)
# test double
new_proposal_double, new_score_double = rpn_nms(
proposals.clone().double(),
objectness.clone().double(),
image_shapes,
min_size,
nms_thresh,
post_nms_top_n,
)
self.assertEqual(new_proposal_double, new_proposal)
self.assertEqual(new_score_double, new_score)
self.assertTrue(new_proposal_double[0].dtype == torch.float64)
self.assertTrue(new_score_double[0].dtype == torch.float64)
def test_box_head_nms_result(self):
image_shapes = [(800, 824), (800, 1199)]
score_thresh = 0.05
nms_ = 0.5
detections_per_img = 100
num_classes = 81
proposals = torch.load(
os.path.join(os.path.dirname(__file__), "data/box_head_nms_proposals.pt")
)
class_prob = torch.load(
os.path.join(os.path.dirname(__file__), "data/box_head_nms_class_prob.pt")
)
boxes_out = []
scores_out = []
labels_out = []
for scores, boxes, image_shape in zip(class_prob, proposals, image_shapes):
boxes = boxes.reshape(-1, 4)
boxes[:, 0].clamp_(min=0, max=image_shape[0] - 1)
boxes[:, 1].clamp_(min=0, max=image_shape[1] - 1)
boxes[:, 2].clamp_(min=0, max=image_shape[0] - 1)
boxes[:, 3].clamp_(min=0, max=image_shape[1] - 1)
boxes = boxes.reshape(-1, num_classes * 4)
scores = scores.reshape(-1, num_classes)
inds_all = scores > score_thresh
new_boxes = []
new_scores = []
new_labels = []
for j in range(1, num_classes):
inds = inds_all[:, j].nonzero().squeeze(1)
scores_j = scores[inds, j]
boxes_j = boxes[inds, j * 4 : (j + 1) * 4]
if nms_ > 0:
keep = nms(boxes_j, scores_j, nms_)
new_boxes.append(boxes_j[keep])
new_scores.append(scores_j[keep])
new_labels.append(torch.full((len(keep),), j, dtype=torch.int64))
new_boxes, new_scores, new_labels = (
torch.cat(new_boxes, dim=0),
torch.cat(new_scores, dim=0),
torch.cat(new_labels, dim=0),
)
number_of_detections = new_boxes.size(0)
if number_of_detections > detections_per_img > 0:
image_thresh, _ = torch.kthvalue(
new_scores, number_of_detections - detections_per_img + 1
)
keep = new_scores >= image_thresh.item()
keep = torch.nonzero(keep).squeeze(1)
boxes_out.append(new_boxes[keep])
scores_out.append(new_scores[keep])
labels_out.append(new_labels[keep])
else:
boxes_out.append(new_boxes)
scores_out.append(new_scores)
labels_out.append(new_labels)
boxes_out_, scores_out_, labels_out_ = box_head_nms(
proposals,
class_prob,
image_shapes,
score_thresh,
nms_,
detections_per_img,
num_classes,
)
self.assertEqual(boxes_out, boxes_out_)
self.assertEqual(scores_out, scores_out_)
self.assertEqual(labels_out, labels_out_)
# test autocast
with torch.cpu.amp.autocast():
for datatype in (torch.bfloat16, torch.float32):
proposals_autocast = (
proposals[0].to(datatype),
proposals[1].to(datatype),
)
class_prob_autocast = (
class_prob[0].to(datatype),
class_prob[1].to(datatype),
)
(
boxes_out_autocast,
scores_out_autocast,
labels_out_autocast,
) = box_head_nms(
proposals_autocast,
class_prob_autocast,
image_shapes,
score_thresh,
nms_,
detections_per_img,
num_classes,
)
self.assertTrue(boxes_out_autocast[0].dtype == torch.float32)
self.assertTrue(scores_out_autocast[0].dtype == torch.float32)
# test double
proposals_double = (proposals[0].double(), proposals[1].double())
class_prob_double = (class_prob[0].double(), class_prob[1].double())
boxes_out_double, scores_out_double, labels_out_double = box_head_nms(
proposals_double,
class_prob_double,
image_shapes,
score_thresh,
nms_,
detections_per_img,
num_classes,
)
self.assertEqual(boxes_out_double, boxes_out)
self.assertEqual(scores_out_double, scores_out)
self.assertEqual(labels_out_double, labels_out)
self.assertTrue(boxes_out_double[0].dtype == torch.float64)
self.assertTrue(scores_out_double[0].dtype == torch.float64)
if __name__ == "__main__":
test = unittest.main()
| 22,971 | 38.134583 | 112 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/tests/cpu/test_add_layernorm.py | import unittest
import torch
from common_utils import TestCase
class add_layernorm(torch.nn.Module):
def __init__(self, size):
super(add_layernorm, self).__init__()
self.layer_norm = torch.nn.LayerNorm(size)
def forward(self, a, b):
x = torch.add(a, b)
x = self.layer_norm(x)
return x
class AddLayerNormTester(TestCase):
def test_add_layernorm(self):
for size in [10, 16, 35]:
for dim in [2, 3, 4, 5]:
with torch.cpu.amp.autocast(), torch.no_grad():
input_size = [
3,
]
for _ in range(dim - 1):
input_size.append(size)
# add_layernorm input is fp32
a = torch.randn(input_size)
b = torch.randn(input_size)
model = add_layernorm(size).eval()
trace_model = torch.jit.trace(model, (a, b))
y1_fp32 = model(a, b)
y2_fp32 = trace_model(a, b)
self.assertEqual(y1_fp32.dtype, torch.float32)
self.assertEqual(y2_fp32.dtype, torch.float32)
self.assertEqual(y1_fp32, y2_fp32)
# add_layernorm input is bfloat16
a_bf16 = a.bfloat16()
b_bf16 = b.bfloat16()
model = model.bfloat16()
trace_model = torch.jit.trace(model, (a_bf16, b_bf16))
y1_bf16 = model(a_bf16, b_bf16)
y2_bf16 = trace_model(a_bf16, b_bf16)
self.assertEqual(y1_bf16.dtype, torch.bfloat16)
self.assertEqual(y2_bf16.dtype, torch.bfloat16)
# Add a custom threshold for bf16 test because of fused add_layernorm in jit has higher precision
# and causes mismatch with eager mode.
self.assertEqual(y1_bf16, y2_bf16, prec=5e-2)
if __name__ == "__main__":
test = unittest.main()
| 2,070 | 36.654545 | 117 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/tests/cpu/test_roialign.py | import unittest
import itertools
import torch
import intel_extension_for_pytorch as ipex
from common_utils import TestCase
import numpy as np
import math
import copy
try:
import torchvision
HAS_TORCHVISION = True
except ImportError:
HAS_TORCHVISION = False
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
def bilinear_interpolate(data, y, x, snap_border=False):
height, width = data.shape
if snap_border:
if -1 < y <= 0:
y = 0
elif height - 1 <= y < height:
y = height - 1
if -1 < x <= 0:
x = 0
elif width - 1 <= x < width:
x = width - 1
y_low = int(math.floor(y))
x_low = int(math.floor(x))
y_high = y_low + 1
x_high = x_low + 1
wy_h = y - y_low
wx_h = x - x_low
wy_l = 1 - wy_h
wx_l = 1 - wx_h
val = 0
for wx, xp in zip((wx_l, wx_h), (x_low, x_high)):
for wy, yp in zip((wy_l, wy_h), (y_low, y_high)):
if 0 <= yp < height and 0 <= xp < width:
val += wx * wy * data[yp, xp]
return val
def fn(x, rois, pool_h, pool_w, spatial_scale=1, sampling_ratio=-1, aligned=False):
return ipex.nn.modules._roi_align.RoIAlign(
(pool_h, pool_w),
spatial_scale=spatial_scale,
sampling_ratio=sampling_ratio,
aligned=aligned,
)(x, rois)
def torchvision_fn(
x, rois, pool_h, pool_w, spatial_scale=1, sampling_ratio=-1, aligned=False
):
return torchvision.ops.RoIAlign(
(pool_h, pool_w),
spatial_scale=spatial_scale,
sampling_ratio=sampling_ratio,
aligned=aligned,
)(x, rois)
def expected_fn(
in_data,
rois,
pool_h,
pool_w,
spatial_scale=1,
sampling_ratio=-1,
aligned=False,
dtype=torch.float64,
):
n_channels = in_data.size(1)
out_data = torch.zeros(rois.size(0), n_channels, pool_h, pool_w, dtype=dtype)
offset = 0.5 if aligned else 0.0
for r, roi in enumerate(rois):
batch_idx = int(roi[0])
j_begin, i_begin, j_end, i_end = (
x.item() * spatial_scale - offset for x in roi[1:]
)
roi_h = i_end - i_begin
roi_w = j_end - j_begin
bin_h = roi_h / pool_h
bin_w = roi_w / pool_w
for i in range(0, pool_h):
start_h = i_begin + i * bin_h
grid_h = sampling_ratio if sampling_ratio > 0 else int(np.ceil(bin_h))
for j in range(0, pool_w):
start_w = j_begin + j * bin_w
grid_w = sampling_ratio if sampling_ratio > 0 else int(np.ceil(bin_w))
for channel in range(0, n_channels):
val = 0
for iy in range(0, grid_h):
y = start_h + (iy + 0.5) * bin_h / grid_h
for ix in range(0, grid_w):
x = start_w + (ix + 0.5) * bin_w / grid_w
val += bilinear_interpolate(
in_data[batch_idx, channel, :, :],
y,
x,
snap_border=True,
)
val /= grid_h * grid_w
out_data[r, channel, i, j] = val
return out_data
class RoIAlignTester(TestCase):
def test_roialign(self):
pool_size = 5
# n_channels % (pool_size ** 2) == 0 required for PS opeartions.
n_channels = 2 * (pool_size**2)
for datatype in [torch.double, torch.float32, torch.float16]:
x = torch.rand(2, n_channels, 10, 10, dtype=datatype)
gt_x = x.float().clone().detach().requires_grad_()
rois = torch.tensor(
[
[0, 0, 0, 9, 9], # format is (xyxy)
[0, 0, 5, 4, 9],
[0, 5, 5, 9, 9],
[1, 0, 0, 9, 9],
],
dtype=datatype,
)
pool_h, pool_w = pool_size, pool_size
gt_y = expected_fn(
gt_x, rois, pool_h, pool_w, spatial_scale=1, sampling_ratio=-1
)
gt_y.mean().backward()
# forward
with torch.no_grad():
x0 = x.clone().detach()
y0 = fn(x0, rois, pool_h, pool_w, spatial_scale=1, sampling_ratio=-1)
self.assertTrue(y0.dtype == datatype)
self.assertTrue(torch.allclose(gt_y.to(y0.dtype), y0, rtol=1e-2, atol=1e-2))
x1 = x.clone().detach().requires_grad_()
y1 = fn(x1, rois, pool_h, pool_w, spatial_scale=1, sampling_ratio=-1)
self.assertTrue(y1.dtype == datatype)
self.assertTrue(torch.allclose(gt_y.to(y1.dtype), y1, rtol=1e-2, atol=1e-2))
# backward
y1.mean().backward()
self.assertTrue(x1.grad.dtype == datatype)
self.assertTrue(
torch.allclose(gt_x.grad.to(x1.dtype), x1.grad, rtol=1e-5, atol=1e-5)
)
# test channels last
x2 = (
x.clone()
.detach()
.to(memory_format=torch.channels_last)
.requires_grad_()
)
y2 = fn(x2, rois, pool_h, pool_w, spatial_scale=1, sampling_ratio=-1)
self.assertTrue(y2.dtype == datatype)
self.assertTrue(y2.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(torch.allclose(gt_y.to(y2.dtype), y2, rtol=1e-2, atol=1e-2))
y2.mean().backward()
self.assertTrue(x2.grad.dtype == datatype)
self.assertTrue(x2.grad.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(
torch.allclose(gt_x.grad.to(x2.dtype), x2.grad, rtol=1e-5, atol=1e-5)
)
# test autocast
with torch.cpu.amp.autocast():
x3 = x.clone().bfloat16().requires_grad_()
y3 = fn(
x3, rois.bfloat16(), pool_h, pool_w, spatial_scale=1, sampling_ratio=-1
)
y3.mean().backward()
self.assertTrue(y3.dtype == torch.bfloat16)
self.assertTrue(torch.allclose(gt_y.to(y3.dtype), y3, rtol=1e-2, atol=1e-2))
self.assertTrue(x3.grad.dtype == torch.bfloat16)
self.assertTrue(
torch.allclose(gt_x.grad.to(x3.dtype), x3.grad, rtol=1e-5, atol=1e-5)
)
x4 = (
x.clone()
.bfloat16()
.to(memory_format=torch.channels_last)
.requires_grad_()
)
y4 = fn(
x4, rois.bfloat16(), pool_h, pool_w, spatial_scale=1, sampling_ratio=-1
)
y4.mean().backward()
self.assertTrue(y4.dtype == torch.bfloat16)
self.assertTrue(torch.allclose(gt_y.to(y4.dtype), y4, rtol=1e-2, atol=1e-2))
self.assertTrue(x4.grad.dtype == torch.bfloat16)
self.assertTrue(
torch.allclose(gt_x.grad.to(x4.dtype), x4.grad, rtol=1e-5, atol=1e-5)
)
@skipIfNoTorchVision
def test_torchvision_roialign(self):
pool_size = 5
# n_channels % (pool_size ** 2) == 0 required for PS opeartions.
n_channels = 2 * (pool_size**2)
for datatype in [torch.double, torch.float32, torch.float16]:
x = torch.rand(2, n_channels, 10, 10, dtype=datatype)
gt_x = x.clone().detach().requires_grad_()
rois = torch.tensor(
[
[0, 0, 0, 9, 9], # format is (xyxy)
[0, 0, 5, 4, 9],
[0, 5, 5, 9, 9],
[1, 0, 0, 9, 9],
],
dtype=datatype,
)
pool_h, pool_w = pool_size, pool_size
gt_y = expected_fn(
gt_x, rois, pool_h, pool_w, spatial_scale=1, sampling_ratio=-1
)
gt_y.mean().backward()
# forward
with torch.no_grad():
x0 = x.clone().detach()
y0 = torchvision_fn(
x0, rois, pool_h, pool_w, spatial_scale=1, sampling_ratio=-1
)
self.assertTrue(y0.dtype == datatype)
self.assertTrue(torch.allclose(gt_y.to(y0.dtype), y0, rtol=1e-2, atol=1e-2))
x1 = x.clone().detach().requires_grad_()
y1 = torchvision_fn(
x1, rois, pool_h, pool_w, spatial_scale=1, sampling_ratio=-1
)
self.assertTrue(y1.dtype == datatype)
self.assertTrue(torch.allclose(gt_y.to(y1.dtype), y1, rtol=1e-2, atol=1e-2))
y1.mean().backward()
self.assertTrue(x1.grad.dtype == datatype)
self.assertTrue(
torch.allclose(gt_x.grad.to(x1.dtype), x1.grad, rtol=1e-5, atol=1e-5)
)
# test channels last
x2 = (
x.clone()
.detach()
.to(memory_format=torch.channels_last)
.requires_grad_()
)
y2 = torchvision_fn(
x2, rois, pool_h, pool_w, spatial_scale=1, sampling_ratio=-1
)
self.assertTrue(y2.dtype == datatype)
self.assertTrue(y2.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(torch.allclose(gt_y.to(y2.dtype), y2, rtol=1e-2, atol=1e-2))
y2.mean().backward()
self.assertTrue(x2.grad.dtype == datatype)
self.assertTrue(x2.grad.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(
torch.allclose(gt_x.grad.to(x2.dtype), x2.grad, rtol=1e-5, atol=1e-5)
)
# test autocast
with torch.cpu.amp.autocast():
x3 = x.clone().bfloat16().requires_grad_()
y3 = torchvision_fn(
x3, rois.bfloat16(), pool_h, pool_w, spatial_scale=1, sampling_ratio=-1
)
y3.mean().backward()
self.assertTrue(y3.dtype == torch.bfloat16)
self.assertTrue(torch.allclose(gt_y.to(y3.dtype), y3, rtol=1e-2, atol=1e-2))
self.assertTrue(x3.grad.dtype == torch.bfloat16)
self.assertTrue(
torch.allclose(gt_x.grad.to(x3.dtype), x3.grad, rtol=1e-5, atol=1e-5)
)
x4 = (
x.clone()
.bfloat16()
.to(memory_format=torch.channels_last)
.requires_grad_()
)
y4 = torchvision_fn(
x4, rois.bfloat16(), pool_h, pool_w, spatial_scale=1, sampling_ratio=-1
)
y4.mean().backward()
self.assertTrue(y4.dtype == torch.bfloat16)
self.assertTrue(torch.allclose(gt_y.to(y4.dtype), y4, rtol=1e-2, atol=1e-2))
self.assertTrue(x4.grad.dtype == torch.bfloat16)
self.assertTrue(
torch.allclose(gt_x.grad.to(x4.dtype), x4.grad, rtol=1e-5, atol=1e-5)
)
@skipIfNoTorchVision
def test_torchvision_roialign_inference_torchcompile(self):
pool_size = 5
n_channels = 2 * (pool_size**2)
x = torch.rand(2, n_channels, 10, 10).to(memory_format=torch.channels_last)
rois = torch.tensor(
[
[0, 0, 0, 9, 9], # format is (xyxy)
[0, 0, 5, 4, 9],
[0, 5, 5, 9, 9],
[1, 0, 0, 9, 9],
]
)
pool_h, pool_w = pool_size, pool_size
for dtype, compiler_backend, dynamic in itertools.product(
[torch.float32, torch.bfloat16], ["torchscript", "inductor"], [True, False]
):
torch._dynamo.reset()
ipex._set_compiler_backend(compiler_backend)
torchcompile_torchvision_fn = torch.compile(
torchvision_fn, dynamic=dynamic, backend="ipex"
)
x = x.to(dtype=dtype)
rois = rois.to(dtype=dtype)
# forward
with torch.cpu.amp.autocast(
enabled=(dtype == torch.bfloat16)
), torch.no_grad():
y0 = torchvision_fn(
x, rois, pool_h, pool_w, spatial_scale=1, sampling_ratio=-1
)
y1 = torchcompile_torchvision_fn(
x, rois, pool_h, pool_w, spatial_scale=1, sampling_ratio=-1
)
self.assertEqual(y0, y1)
self.assertTrue(y1.dtype == dtype)
@skipIfNoTorchVision
def test_torchvision_roialign_train_torchcompile(self):
pool_size = 5
n_channels = 2 * (pool_size**2)
input = torch.rand(2, n_channels, 10, 10).to(memory_format=torch.channels_last)
rois = torch.tensor(
[
[0, 0, 0, 9, 9], # format is (xyxy)
[0, 0, 5, 4, 9],
[0, 5, 5, 9, 9],
[1, 0, 0, 9, 9],
]
)
pool_h, pool_w = pool_size, pool_size
for dtype, compiler_backend, dynamic in itertools.product(
[torch.float32, torch.bfloat16], ["inductor"], [True, False]
):
torch._dynamo.reset()
ipex._set_compiler_backend(compiler_backend)
torchcompile_torchvision_fn = torch.compile(
copy.deepcopy(torchvision_fn), dynamic=dynamic, backend="ipex"
)
input = input.to(dtype=dtype)
rois = rois.to(dtype=dtype)
ori_x = input.clone().requires_grad_()
x = input.clone().requires_grad_()
# forward
with torch.cpu.amp.autocast(enabled=(dtype == torch.bfloat16)):
ori_y = torchvision_fn(
ori_x, rois, pool_h, pool_w, spatial_scale=1, sampling_ratio=-1
)
y = torchcompile_torchvision_fn(
x, rois, pool_h, pool_w, spatial_scale=1, sampling_ratio=-1
)
grad_y = torch.randn(ori_y.shape, dtype=torch.float32)
ori_y.backward(grad_y)
y.backward(grad_y)
self.assertEqual(y, ori_y)
self.assertTrue(y.dtype == dtype)
self.assertEqual(x.grad, ori_x.grad)
if __name__ == "__main__":
test = unittest.main()
| 14,399 | 35.180905 | 88 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/tests/cpu/test_jit.py | from __future__ import division
from __future__ import print_function
import logging
"""
From PyTorch:
Copyright (c) 2016- Facebook, Inc (Adam Paszke)
Copyright (c) 2014- Facebook, Inc (Soumith Chintala)
Copyright (c) 2011-2014 Idiap Research Institute (Ronan Collobert)
Copyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu)
Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu)
Copyright (c) 2011-2013 NYU (Clement Farabet)
Copyright (c) 2006-2010 NEC Laboratories America (Ronan Collobert, Leon Bottou, Iain Melvin, Jason Weston)
Copyright (c) 2006 Idiap Research Institute (Samy Bengio)
Copyright (c) 2001-2004 Idiap Research Institute (Ronan Collobert, Samy Bengio, Johnny Mariethoz)
From Caffe2:
Copyright (c) 2016-present, Facebook Inc. All rights reserved.
All contributions by Facebook:
Copyright (c) 2016 Facebook Inc.
All contributions by Google:
Copyright (c) 2015 Google Inc.
All rights reserved.
All contributions by Yangqing Jia:
Copyright (c) 2015 Yangqing Jia
All rights reserved.
All contributions from Caffe:
Copyright(c) 2013, 2014, 2015, the respective contributors
All rights reserved.
All other contributions:
Copyright(c) 2015, 2016 the respective contributors
All rights reserved.
Caffe2 uses a copyright model similar to Caffe: each contributor holds
copyright over their contributions to Caffe2. The project versioning records
all such contribution and copyright details. If a contributor wants to further
mark their specific copyright on a particular contribution, they should
indicate their copyright solely in the commit message of the change when it is
committed.
All rights reserved.
"""
"""Tests for rn50."""
import math
import unittest
import time
import sys
import warnings
import itertools
import contextlib
import torch
import torch.nn as nn
import torch.fx.experimental.optimization as optimization
from torch.optim import SGD
from torch.testing import FileCheck
import copy
import intel_extension_for_pytorch as ipex
import torch.nn.functional as F
from common_utils import TestCase
def get_rand_seed():
return int(time.time() * 1000000000)
try:
import torchvision # noqa: F401
HAS_TORCHVISION = True
except ImportError:
HAS_TORCHVISION = False
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
device = "cpu:0"
SIZE = 100
conv_module = {1: torch.nn.Conv1d, 2: torch.nn.Conv2d, 3: torch.nn.Conv3d}
convtranspose_module = {2: torch.nn.ConvTranspose2d, 3: torch.nn.ConvTranspose3d}
bn_module = {2: torch.nn.BatchNorm2d, 3: torch.nn.BatchNorm3d}
from typing import Dict, NamedTuple
class EltwiseFusionOp(NamedTuple):
ipex_eltwise_op: str
bf16_supported: bool = True
prec: float = 0.02
op_input_list: Dict = {}
unary_PyTorch_op_to_IPEX_op_map = {
# PyTorch_op_name: [ipex_op_name, BF16_supported, prec]
torch.relu: EltwiseFusionOp("relu"),
torch.relu_: EltwiseFusionOp("relu"),
torch.sigmoid: EltwiseFusionOp("sigmoid"),
torch.sigmoid_: EltwiseFusionOp("sigmoid"),
nn.SiLU(inplace=True): EltwiseFusionOp("swish"),
nn.SiLU(inplace=False): EltwiseFusionOp("swish"),
torch.tanh: EltwiseFusionOp("tanh"),
torch.tanh_: EltwiseFusionOp("tanh"),
nn.Mish(inplace=True): EltwiseFusionOp(
"mish", bf16_supported=False
), # TODO: support bf16 mish_ in stock PyTorch
nn.Mish(inplace=False): EltwiseFusionOp(
"mish", bf16_supported=False
), # TODO: support bf16 mish in stock PyTorch
torch.abs: EltwiseFusionOp("abs"),
torch.abs_: EltwiseFusionOp("abs"),
torch.exp: EltwiseFusionOp("exp", prec=0.035),
torch.exp_: EltwiseFusionOp("exp", prec=0.035),
torch.nn.Hardswish(inplace=True): EltwiseFusionOp("hardswish"),
torch.nn.Hardswish(inplace=False): EltwiseFusionOp("hardswish"),
torch.square: EltwiseFusionOp("square", prec=0.035),
torch.square_: EltwiseFusionOp("square", prec=0.035),
torch.nn.Hardsigmoid(inplace=True): EltwiseFusionOp("hardsigmoid"),
torch.nn.Hardsigmoid(inplace=False): EltwiseFusionOp("hardsigmoid"),
}
non_unary_PyTorch_op_to_IPEX_op_map = {
nn.GELU(approximate="none"): EltwiseFusionOp("gelu"),
nn.GELU(approximate="tanh"): EltwiseFusionOp("gelu"),
nn.LeakyReLU(0.1, inplace=True): EltwiseFusionOp("leaky_relu"),
nn.LeakyReLU(0.1, inplace=False): EltwiseFusionOp("leaky_relu"),
nn.Hardtanh(inplace=True): EltwiseFusionOp("hardtanh"),
nn.Hardtanh(inplace=False): EltwiseFusionOp("hardtanh"),
nn.ELU(inplace=True): EltwiseFusionOp("elu"),
nn.ELU(inplace=False): EltwiseFusionOp("elu"),
torch.clamp: EltwiseFusionOp("hardtanh", op_input_list={"min": -2, "max": 3}),
torch.clamp_: EltwiseFusionOp("hardtanh", op_input_list={"min": -2, "max": 3}),
torch.pow: EltwiseFusionOp("pow", op_input_list={"exponent": 2}),
lambda t: t.pow_(2): EltwiseFusionOp("pow"),
}
unsupported_PyTorch_op_to_IPEX_op_map = {
torch.clamp: EltwiseFusionOp(
"hardtanh", op_input_list={"min": -2}
), # clamp fusion requires that neither of min and max is None
torch.clamp_: EltwiseFusionOp(
"hardtanh", op_input_list={"max": 3}
), # clamp_ fusion requires that neither of min and max is None
torch.pow: EltwiseFusionOp(
"pow", op_input_list={"exponent": torch.randn(1)}
), # pow fusion requires exponent to be a Scalar but not a Tensor
lambda t: t.pow_(torch.randn(1)): EltwiseFusionOp(
"pow"
), # pow_ fusion requires exponent to be a Scalar but not a Tensor
}
# The below eltwise OP have unstable numeric issue.
# We will run the tests with fixed seed to avoid false positive.
# For example, for log, when running bf16 linear-log test
# y = linear(x)
# z = log(y)
# Supposing we meet a case where
# y_fp32 = 0 and y_bf16 = 0.0008
# Then z_fp32 = log(0) = nan
# z_bf16 = log(0.0008) = -7.1309
# We're not able to directly compare z_fp32 with z_bf16.
PyTorch_op_to_IPEX_op_fixed_seed_map = {
torch.log: EltwiseFusionOp("log", prec=0.065),
torch.log_: EltwiseFusionOp("log", prec=0.065),
torch.round: EltwiseFusionOp("round"),
torch.round_: EltwiseFusionOp("round"),
torch.sqrt: EltwiseFusionOp("sqrt"),
torch.sqrt_: EltwiseFusionOp("sqrt"),
}
class ConvEltwise(nn.Module):
def __init__(
self,
eltwise_fn,
dim,
in_channels,
out_channels,
kernel_size,
image_size,
**kwargs,
):
super(ConvEltwise, self).__init__()
self.conv = conv_module[dim](in_channels, out_channels, kernel_size, image_size)
self.eltwise = eltwise_fn
self.kwargs = kwargs
def forward(self, x):
a = self.conv(x)
a = a / 2
b = self.eltwise(a, **self.kwargs)
return b
class LinearEltwise(nn.Module):
def __init__(self, eltwise_fn, in_channels, out_channels, bias, **kwargs):
super(LinearEltwise, self).__init__()
self.linear = nn.Linear(in_channels, out_channels, bias=bias)
self.eltwise = eltwise_fn
self.kwargs = kwargs
def forward(self, x):
x = x / 2 # keep same accuracy with LinearDivEltwise
a = self.linear(x)
b = self.eltwise(a, **self.kwargs)
return b
class LinearDivEltwise(nn.Module):
def __init__(self, eltwise_fn, in_channels, out_channels, bias, **kwargs):
super(LinearDivEltwise, self).__init__()
self.linear = nn.Linear(in_channels, out_channels, bias=bias)
self.eltwise = eltwise_fn
self.kwargs = kwargs
def forward(self, x):
a = self.linear(x)
a = a / 2
b = self.eltwise(a, **self.kwargs)
return b
class ConvTransposeEltwise(nn.Module):
def __init__(
self,
eltwise_fn,
dim,
in_channels,
out_channels,
kernel_size,
image_size,
**kwargs,
):
super(ConvTransposeEltwise, self).__init__()
self.conv_transpose = convtranspose_module[dim](
in_channels, out_channels, kernel_size, image_size
)
self.eltwise = eltwise_fn
self.kwargs = kwargs
def forward(self, x):
a = self.conv_transpose(x)
b = self.eltwise(a, **self.kwargs)
return b
class ConvTransposeSumAccumuOnRight(nn.Module):
def __init__(
self,
dim,
add_func,
in_channels,
out_channels,
kernel_size,
image_size,
**kwargs,
):
super(ConvTransposeSumAccumuOnRight, self).__init__()
self.convtranspose = convtranspose_module[dim](
in_channels, out_channels, kernel_size, image_size
)
self.convtranspose1 = convtranspose_module[dim](
in_channels, out_channels, kernel_size, image_size
)
self.add_func = add_func
self.kwargs = kwargs
def forward(self, x):
a = self.convtranspose(x)
b = F.relu(self.convtranspose1(x))
return self.add_func(a, b, self.kwargs)
class ConvTransposeSumAccumuOnLeft(nn.Module):
def __init__(
self,
dim,
add_func,
in_channels,
out_channels,
kernel_size,
image_size,
**kwargs,
):
super(ConvTransposeSumAccumuOnLeft, self).__init__()
self.convtranspose = convtranspose_module[dim](
in_channels, out_channels, kernel_size, image_size
)
self.convtranspose1 = convtranspose_module[dim](
in_channels, out_channels, kernel_size, image_size
)
self.add_func = add_func
self.kwargs = kwargs
def forward(self, x):
a = F.relu(self.convtranspose(x))
b = self.convtranspose1(x)
return self.add_func(a, b, self.kwargs)
class ConvTransposeSumBroadcast(nn.Module):
def __init__(
self,
dim,
add_func,
in_channels,
out_channels,
kernel_size,
image_size,
**kwargs,
):
super(ConvTransposeSumBroadcast, self).__init__()
self.convtranspose = convtranspose_module[dim](
in_channels, 1, kernel_size, image_size
)
self.convtranspose1 = convtranspose_module[dim](
in_channels, out_channels, kernel_size, image_size
)
self.add_func = add_func
self.kwargs = kwargs
def forward(self, x):
a = F.relu(self.convtranspose(x))
b = self.convtranspose1(x)
return self.add_func(a, b, self.kwargs)
class ConvTransposeAddRelu(nn.Module):
def __init__(
self,
dim,
in_channels,
mid_channels,
out_channels,
kernel_size,
inplace,
**kwargs,
):
super(ConvTransposeAddRelu, self).__init__()
self.convtranspose = convtranspose_module[dim](
in_channels, mid_channels, kernel_size, padding=1, bias=False, **kwargs
)
self.convtranspose1 = convtranspose_module[dim](
mid_channels, out_channels, kernel_size, padding=1, bias=False, **kwargs
)
self.convtranspose2 = convtranspose_module[dim](
in_channels, out_channels, kernel_size, padding=1, bias=False, **kwargs
)
self.inplace = inplace
def forward(self, x):
a = self.convtranspose(x)
a = F.relu(a, inplace=self.inplace)
a = self.convtranspose1(a)
b = self.convtranspose2(x)
return F.relu(a.add_(b), inplace=self.inplace)
class ConvBatchNorm_Fixed(nn.Module):
def __init__(self, dim, in_channels, out_channels, **kwargs):
super(ConvBatchNorm_Fixed, self).__init__()
seed = 2018
torch.manual_seed(seed)
self.conv = conv_module[dim](in_channels, out_channels, bias=False, **kwargs)
self.bn = bn_module[dim](out_channels, eps=0.001)
def forward(self, x):
return self.bn(self.conv(x))
class ConvBatchNorm_Fixed2(nn.Module):
def __init__(self, dim, in_channels, out_channels, **kwargs):
super(ConvBatchNorm_Fixed2, self).__init__()
seed = 2018
torch.manual_seed(seed)
self.conv = conv_module[dim](in_channels, out_channels, bias=False, **kwargs)
self.bn = bn_module[dim](out_channels, eps=0.001, track_running_stats=False)
def forward(self, x):
return self.bn(self.conv(x))
class ConvBatchNorm_Fixed3(nn.Module):
def __init__(self, dim, in_channels, out_channels, **kwargs):
super(ConvBatchNorm_Fixed3, self).__init__()
seed = 2018
torch.manual_seed(seed)
self.conv = conv_module[dim](in_channels, out_channels, bias=True, **kwargs)
self.bn = bn_module[dim](out_channels, eps=0.001, affine=False)
def forward(self, x):
return self.bn(self.conv(x))
class BatchNormConv_Fixed(nn.Module):
def __init__(self, dim, in_channels, out_channels, **kwargs):
super(BatchNormConv_Fixed, self).__init__()
seed = 2018
torch.manual_seed(seed)
self.conv = conv_module[dim](in_channels, out_channels, bias=False, **kwargs)
self.bn = bn_module[dim](in_channels, eps=0.001)
def forward(self, x):
return self.conv(self.bn(x))
class BatchNorm_Conv_BatchNorm(nn.Module):
def __init__(self, dim, in_channels, out_channels, **kwargs):
super(BatchNorm_Conv_BatchNorm, self).__init__()
seed = 2018
torch.manual_seed(seed)
self.conv = conv_module[dim](in_channels, out_channels, bias=False, **kwargs)
self.bn1 = bn_module[dim](in_channels, eps=0.001)
self.bn2 = bn_module[dim](out_channels, eps=0.001)
def forward(self, x):
return self.bn2(self.conv(self.bn1(x)))
class ConvReshapeBatchNorm(nn.Module):
def __init__(self, dim, in_channels, out_channels, dest_shape, **kwargs):
super(ConvReshapeBatchNorm, self).__init__()
seed = 2018
torch.manual_seed(seed)
self.dest_shape = dest_shape
self.conv = conv_module[dim](in_channels, out_channels, bias=False, **kwargs)
self.bn = bn_module[dim](dest_shape[1], eps=0.001)
def forward(self, x):
conv_output = self.conv(x)
return self.bn(torch.reshape(conv_output, self.dest_shape))
class Conv_Conv_Concat(nn.Module):
def __init__(self, dim, in_channels, out_channels, **kwargs):
super(Conv_Conv_Concat, self).__init__()
seed = 2018
torch.manual_seed(seed)
self.conv1 = conv_module[dim](in_channels, out_channels, bias=False, **kwargs)
self.conv2 = conv_module[dim](in_channels, out_channels, bias=False, **kwargs)
def forward(self, x):
return torch.cat((self.conv1(x), self.conv2(x)))
class ConvRelu_Fixed(nn.Module):
def __init__(self, dim, in_channels, out_channels, **kwargs):
super(ConvRelu_Fixed, self).__init__()
seed = 2018
torch.manual_seed(seed)
self.conv = conv_module[dim](in_channels, out_channels, bias=False, **kwargs)
def forward(self, x):
return F.relu(self.conv(x), inplace=True)
class Conv_Relu_Add(nn.Module):
def __init__(self, dim, in_channels, out_channels, **kwargs):
super(Conv_Relu_Add, self).__init__()
seed = 2018
torch.manual_seed(seed)
self.conv1 = conv_module[dim](in_channels, out_channels, bias=False, **kwargs)
self.conv2 = conv_module[dim](in_channels, out_channels, bias=False, **kwargs)
def forward(self, x):
return torch.add(F.relu(self.conv1(x), inplace=True), self.conv2(x))
class Conv_Scalar_Binary(nn.Module):
def __init__(self, op, dim, in_channels, out_channels, **kwargs):
super(Conv_Scalar_Binary, self).__init__()
seed = 2018
torch.manual_seed(seed)
self.conv = conv_module[dim](in_channels, out_channels, **kwargs)
self.op = op
def forward(self, x):
return self.op(self.conv(x), 2.0)
class Conv_Scalar_Binary_Add(nn.Module):
def __init__(self, op, dim, in_channels, out_channels, **kwargs):
super(Conv_Scalar_Binary_Add, self).__init__()
seed = 2018
torch.manual_seed(seed)
self.conv1 = conv_module[dim](in_channels, out_channels, **kwargs)
self.conv2 = conv_module[dim](in_channels, out_channels, **kwargs)
self.op = op
def forward(self, x):
return torch.add(self.op(self.conv1(x), 2.0), self.op(self.conv2(x), 2.0))
class Conv_Tensor_Binary(nn.Module):
def __init__(self, op, dim, in_channels, out_channels, **kwargs):
super(Conv_Tensor_Binary, self).__init__()
seed = 2018
torch.manual_seed(seed)
self.conv = conv_module[dim](in_channels, out_channels, **kwargs)
self.op = op
input_size = [1, out_channels, 1, 1]
if dim == 3:
input_size.append(1)
self.tensor = torch.randn(input_size)
def forward(self, x):
return self.op(self.conv(x), self.tensor)
class Conv_Tensor_Binary2(nn.Module):
def __init__(self, op, dim, in_channels, out_channels, **kwargs):
super(Conv_Tensor_Binary2, self).__init__()
seed = 2018
torch.manual_seed(seed)
self.conv = conv_module[dim](in_channels, out_channels, **kwargs)
self.op = op
input_size = [1, out_channels, 1, 1]
if dim == 3:
input_size.append(1)
self.tensor = torch.randn(input_size, dtype=torch.cfloat)
def forward(self, x):
return self.op(self.conv(x), self.tensor)
class Conv_Tensor_Binary_Add(nn.Module):
def __init__(self, op, dim, in_channels, out_channels, **kwargs):
super(Conv_Tensor_Binary_Add, self).__init__()
seed = 2018
torch.manual_seed(seed)
self.conv1 = conv_module[dim](in_channels, out_channels, **kwargs)
self.conv2 = conv_module[dim](in_channels, out_channels, **kwargs)
self.op = op
input_size = [1, out_channels, 1, 1]
if dim == 3:
input_size.append(1)
self.tensor = torch.randn(input_size)
def forward(self, x):
return torch.add(
self.op(self.conv1(x), self.tensor), self.op(self.conv2(x), self.tensor)
)
class Conv_Bn_Relu(nn.Module):
def __init__(self, dim, in_channels, out_channels, **kwargs):
super(Conv_Bn_Relu, self).__init__()
seed = 2018
torch.manual_seed(seed)
self.conv = conv_module[dim](in_channels, out_channels, bias=False, **kwargs)
self.bn = bn_module[dim](out_channels, eps=0.001)
def forward(self, x):
return F.relu(self.bn(self.conv(x)), inplace=True)
class ConvReshapeRelu(nn.Module):
def __init__(self, dim, in_channels, out_channels, dest_shape, **kwargs):
super(ConvReshapeRelu, self).__init__()
seed = 2018
torch.manual_seed(seed)
self.dest_shape = dest_shape
self.conv = conv_module[dim](in_channels, out_channels, bias=False, **kwargs)
def forward(self, x):
return F.relu(torch.reshape(self.conv(x), self.dest_shape), inplace=True)
class ConvSum(nn.Module):
def __init__(self, dim, in_channels, out_channels, **kwargs):
super(ConvSum, self).__init__()
seed = 2018
torch.manual_seed(seed)
self.conv = conv_module[dim](in_channels, out_channels, bias=False, **kwargs)
self.conv1 = conv_module[dim](in_channels, out_channels, bias=False, **kwargs)
def forward(self, x):
a = self.conv(x)
b = self.conv1(x)
return a + b
class ConvSum_v2(nn.Module):
def __init__(self, dim, in_channels, out_channels, **kwargs):
super(ConvSum_v2, self).__init__()
seed = 2018
torch.manual_seed(seed)
self.conv = conv_module[dim](in_channels, out_channels, bias=False, **kwargs)
self.conv1 = conv_module[dim](in_channels, out_channels, bias=False, **kwargs)
def forward(self, x):
a = self.conv(x)
b = self.conv(x)
a.add_(b)
c = self.conv1(x)
a.add_(c)
return a
class ConvScalarSum(nn.Module):
def __init__(self, dim, in_channels, out_channels, **kwargs):
super(ConvScalarSum, self).__init__()
seed = 2018
torch.manual_seed(seed)
self.conv = conv_module[dim](in_channels, out_channels, bias=False, **kwargs)
def forward(self, x):
b = self.conv(x)
return b + 2
class ConvBroadcastSum(nn.Module):
def __init__(self, dim, in_channels, out_channels, **kwargs):
super(ConvBroadcastSum, self).__init__()
seed = 2018
torch.manual_seed(seed)
self.conv = conv_module[dim](in_channels, out_channels, bias=False, **kwargs)
self.conv1 = conv_module[dim](in_channels, out_channels, bias=False, **kwargs)
def forward(self, x):
a = self.conv(x)
b = self.conv1(x)
return a[1:2].clone() + b
class ConvReshapeSum(nn.Module):
def __init__(self, dim, in_channels, out_channels, dest_shape, **kwargs):
super(ConvReshapeSum, self).__init__()
seed = 2018
torch.manual_seed(seed)
self.dest_shape = dest_shape
self.conv1 = conv_module[dim](in_channels, out_channels, bias=False, **kwargs)
self.conv2 = conv_module[dim](in_channels, out_channels, bias=False, **kwargs)
def forward(self, x):
a = torch.reshape(self.conv1(x), self.dest_shape)
b = torch.reshape(self.conv2(x), self.dest_shape)
return a + b
class CascadedConvBnSumRelu(nn.Module):
def __init__(self, dim, in_channels, mid_channels, out_channels, **kwargs):
super(CascadedConvBnSumRelu, self).__init__()
torch.manual_seed(2018)
self.conv = conv_module[dim](in_channels, mid_channels, bias=False, **kwargs)
self.conv1 = conv_module[dim](
mid_channels, out_channels, bias=False, padding=1, **kwargs
)
self.conv2 = conv_module[dim](in_channels, out_channels, bias=False, **kwargs)
self.bn = bn_module[dim](mid_channels, eps=0.001)
self.bn1 = bn_module[dim](out_channels, eps=0.001)
self.bn2 = bn_module[dim](out_channels, eps=0.001)
def forward(self, x):
a = self.conv(x)
a = self.bn(a)
a = F.relu(a, inplace=True)
a = self.conv1(a)
a = self.bn1(a)
b = self.conv2(x)
b = self.bn2(b)
return F.relu(a.add_(b), inplace=True)
class Linear_Scalar_Binary(nn.Module):
def __init__(self, op, in_channels, out_channels, **kwargs):
super(Linear_Scalar_Binary, self).__init__()
seed = 2018
torch.manual_seed(seed)
self.linear = nn.Linear(in_channels, out_channels, **kwargs)
self.op = op
def forward(self, x):
return self.op(self.linear(x), 2.0)
class Linear_Tensor_Binary(nn.Module):
def __init__(self, op, in_channels, out_channels, **kwargs):
super(Linear_Tensor_Binary, self).__init__()
seed = 2018
torch.manual_seed(seed)
self.linear = nn.Linear(in_channels, out_channels, **kwargs)
self.op = op
self.tensor = torch.randn(out_channels)
def forward(self, x):
return self.op(self.linear(x), self.tensor)
class Linear_Tensor_Binary2(nn.Module):
def __init__(self, op, in_channels, out_channels, **kwargs):
super(Linear_Tensor_Binary2, self).__init__()
seed = 2018
torch.manual_seed(seed)
self.linear = nn.Linear(in_channels, out_channels, **kwargs)
self.op = op
self.tensor = torch.tensor([2])
def forward(self, x):
return self.op(self.linear(x), self.tensor)
class Linear_Tensor_Binary3(nn.Module):
def __init__(self, op, in_channels, out_channels, **kwargs):
super(Linear_Tensor_Binary3, self).__init__()
seed = 2018
torch.manual_seed(seed)
self.linear = nn.Linear(in_channels, out_channels, **kwargs)
self.op = op
self.tensor = torch.randn(out_channels, dtype=torch.cfloat)
def forward(self, x):
return self.op(self.linear(x), self.tensor)
class LinearRelu(nn.Module):
def __init__(self, in_channels, out_channels, **kwargs):
super(LinearRelu, self).__init__()
seed = 2018
torch.manual_seed(seed)
self.linear = nn.Linear(in_channels, out_channels, **kwargs)
def forward(self, x):
return F.relu(self.linear(x), inplace=True)
class LinearSigmoidMul(nn.Module):
def __init__(self, in_channels, out_channels, **kwargs):
super(LinearSigmoidMul, self).__init__()
seed = 2018
torch.manual_seed(seed)
self.linear = nn.Linear(in_channels, out_channels, **kwargs)
def forward(self, x):
linear_res = self.linear(x)
return torch.mul(linear_res, F.sigmoid(linear_res))
class LinearAdd(nn.Module):
def __init__(self, in_channels, out_channels, **kwargs):
super(LinearAdd, self).__init__()
seed = 2018
torch.manual_seed(seed)
self.linear = nn.Linear(in_channels, out_channels, **kwargs)
self.linear1 = nn.Linear(in_channels, out_channels, **kwargs)
def forward(self, x):
x1 = x.clone()
return torch.add(self.linear(x), self.linear1(x1))
class LinearAddRelu(nn.Module):
def __init__(self, in_channels, mid_channels, out_channels, inplace, **kwargs):
super(LinearAddRelu, self).__init__()
self.linear = nn.Linear(in_channels, mid_channels, bias=False, **kwargs)
self.linear1 = nn.Linear(mid_channels, out_channels, bias=False, **kwargs)
self.linear2 = nn.Linear(in_channels, out_channels, bias=False, **kwargs)
self.inplace = inplace
def forward(self, x):
a = self.linear(x)
a = F.relu(a, inplace=self.inplace)
a = self.linear1(a)
b = self.linear2(x)
return F.relu(a.add_(b), inplace=self.inplace)
class Linear_Reshape_Relu(nn.Module):
def __init__(self, in_channels, out_channels, dest_shape, **kwargs):
super(Linear_Reshape_Relu, self).__init__()
seed = 2018
torch.manual_seed(seed)
self.linear = nn.Linear(in_channels, out_channels, **kwargs)
self.dest_shape = dest_shape
def forward(self, x):
return F.relu(torch.reshape(self.linear(x), self.dest_shape), inplace=True)
class LinearBn(nn.Module):
def __init__(self, dim, in_channels, out_channels, **kwargs):
super(LinearBn, self).__init__()
seed = 2018
torch.manual_seed(seed)
self.linear = nn.Linear(in_channels, out_channels, **kwargs)
self.bn = bn_module[dim](1, eps=0.001)
def forward(self, x):
return self.bn(self.linear(x))
class Linear_Reshape_Bn(nn.Module):
def __init__(self, dim, in_channels, out_channels, dest_shape, **kwargs):
super(Linear_Reshape_Bn, self).__init__()
seed = 2018
torch.manual_seed(seed)
self.linear = nn.Linear(in_channels, out_channels, **kwargs)
self.bn = bn_module[dim](1, eps=0.001)
self.dest_shape = dest_shape
def forward(self, x):
return self.bn(torch.reshape(self.linear(x), self.dest_shape))
class Linear_With_Transposed_Weight(nn.Module):
def __init__(self, in_channels, out_channels):
super(Linear_With_Transposed_Weight, self).__init__()
self.linear = nn.Linear(in_channels, out_channels)
self.linear.weight = nn.Parameter(
self.linear.weight.transpose(0, 1).reshape(out_channels, in_channels)
)
def forward(self, x):
return self.linear(x)
class ConvSumInDiffBlock(nn.Module):
def __init__(self, dim, in_channels, out_channels, **kwargs):
super(ConvSumInDiffBlock, self).__init__()
seed = 2018
torch.manual_seed(seed)
self.pad = (0, 0) * dim
self.conv = conv_module[dim](in_channels, out_channels, bias=False, **kwargs)
def forward(self, x):
y = self.conv(x)
if y.size(1) != x.size(1):
z = F.pad(x, self.pad + (0, y.size(1) - x.size(1)), "constant", 0.0)
y += z
else:
y += x
return y
class ConvSwishOutplace(nn.Module):
def __init__(self, dim, in_channels, out_channels, kernel_size, image_size):
super(ConvSwishOutplace, self).__init__()
self.conv = conv_module[dim](in_channels, out_channels, kernel_size, image_size)
def forward(self, x):
a1 = self.conv(x)
b1 = torch.sigmoid(a1)
c1 = torch.mul(a1, b1)
return c1
class ConvSwishInplace(nn.Module):
def __init__(self, dim, in_channels, out_channels, kernel_size, image_size):
super(ConvSwishInplace, self).__init__()
self.conv = conv_module[dim](in_channels, out_channels, kernel_size, image_size)
def forward(self, x):
a = self.conv(x)
b = torch.sigmoid(a)
res = a.mul_(b)
return res
class ConvSwishOutplaceSumOutplace(nn.Module):
def __init__(self, dim, in_channels, out_channels, kernel_size, image_size):
super(ConvSwishOutplaceSumOutplace, self).__init__()
self.conv = conv_module[dim](in_channels, out_channels, kernel_size, image_size)
self.conv1 = conv_module[dim](
in_channels, out_channels, kernel_size, image_size
)
def forward(self, x):
a1 = self.conv(x)
b1 = torch.sigmoid(a1)
c1 = torch.mul(a1, b1)
a2 = self.conv1(x)
b2 = torch.sigmoid(a2)
c2 = torch.mul(a2, b2)
return c1 + c2
class ConvSwishInplaceSumInplace(nn.Module):
def __init__(self, dim, in_channels, out_channels, kernel_size, image_size):
super(ConvSwishInplaceSumInplace, self).__init__()
self.conv = conv_module[dim](in_channels, out_channels, kernel_size, image_size)
self.conv1 = conv_module[dim](
in_channels, out_channels, kernel_size, image_size
)
def forward(self, x):
a1 = self.conv(x)
b1 = torch.sigmoid(a1)
c1 = a1.mul_(b1)
a2 = self.conv1(x)
b2 = torch.sigmoid(a2)
c2 = a2.mul_(b2)
return c1.add_(c2)
class ConvTranspose(nn.Module):
def __init__(
self,
dim,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
output_padding=0,
groups=1,
bias=True,
dilation=1,
):
super(ConvTranspose, self).__init__()
self.conv_transpose = convtranspose_module[dim](
in_channels,
out_channels,
kernel_size,
stride,
padding,
output_padding,
groups,
bias,
dilation,
)
def forward(self, x):
x = self.conv_transpose(x)
return x
class ConvTransposeSigmoidMul(nn.Module):
def __init__(self, mul, dim, in_channels, out_channels, kernel_size, image_size):
super(ConvTransposeSigmoidMul, self).__init__()
self.conv_transpose = convtranspose_module[dim](
in_channels, out_channels, kernel_size, image_size
)
self.mul_op = mul
def forward(self, x):
a1 = self.conv_transpose(x)
b1 = torch.sigmoid(a1)
c1 = self.mul_op(a1, b1)
return c1
class ChannelShuffle_with_Static_Shape(nn.Module):
def __init__(self, batchsize, num_channels, height, width, groups):
super(ChannelShuffle_with_Static_Shape, self).__init__()
self.batchsize = batchsize
self.num_channels = num_channels
self.height = height
self.width = width
self.groups = groups
def forward(self, x):
channels_per_group = self.num_channels // self.groups
x = x.view(
self.batchsize, self.groups, channels_per_group, self.height, self.width
)
x = torch.transpose(x, 1, 2).contiguous()
x = x.view(self.batchsize, -1, self.height, self.width)
return x
class ChannelShuffle_with_Dynamic_Shape(nn.Module):
def __init__(self, groups):
super(ChannelShuffle_with_Dynamic_Shape, self).__init__()
self.groups = groups
def forward(self, x):
batchsize, num_channels, height, width = x.size()
channels_per_group = num_channels // self.groups
x = x.view(batchsize, self.groups, channels_per_group, height, width)
x = torch.transpose(x, 1, 2).contiguous()
x = x.view(batchsize, -1, height, width)
return x
class NotChannelShuffle(nn.Module):
def __init__(self, groups):
super(NotChannelShuffle, self).__init__()
self.groups = groups
def forward(self, x):
batchsize, num_channels, height, width = x.size()
channels_per_group = num_channels // self.groups
x = x.view(batchsize, self.groups, channels_per_group, width, height)
x = torch.transpose(x, 1, 2).contiguous()
x = x.view(batchsize, -1, width, height)
return x
class MatmulDivOutplaceOutModifiedByOtherOP_v1(nn.Module):
def __init__(self, div_scalar=False, with_out=True):
super(MatmulDivOutplaceOutModifiedByOtherOP_v1, self).__init__()
self.div_scalar = div_scalar
self.with_out = with_out
def forward(self, x):
y = torch.transpose(x, -1, -2).contiguous()
mm_res_shape = x.size()[:-1] + (y.size()[-1:])
mm_res = torch.randn(mm_res_shape, dtype=x.dtype)
mm_out = torch.empty(mm_res_shape, dtype=x.dtype)
mm_res = torch.matmul(x, y, out=mm_out)
if self.div_scalar:
div_res = mm_res.div(2.0)
else:
div_res = mm_res.div(torch.ones(mm_res_shape, dtype=x.dtype) + 1)
mm_out.add_(5)
return div_res
class MatmulDivOutplaceOutModifiedByOtherOP_v2(nn.Module):
def __init__(self, div_scalar=False, with_out=True):
super(MatmulDivOutplaceOutModifiedByOtherOP_v2, self).__init__()
self.div_scalar = div_scalar
self.with_out = with_out
def forward(self, x):
y = torch.transpose(x, -1, -2).contiguous()
mm_res_shape = x.size()[:-1] + (y.size()[-1:])
mm_res = torch.randn(mm_res_shape, dtype=x.dtype)
mm_out = torch.empty(mm_res_shape, dtype=x.dtype)
mm_res = torch.matmul(x, y, out=mm_out)
if self.div_scalar:
div_res = mm_res.div(2.0)
else:
div_res = mm_res.div(torch.ones(mm_res_shape, dtype=x.dtype) + 1)
mm_out.add_(5)
div_out_equal = mm_out == div_res
return div_res + div_out_equal
class MatmulDivOutplace(nn.Module):
def __init__(self, div_scalar=False, with_out=False):
super(MatmulDivOutplace, self).__init__()
self.div_scalar = div_scalar
self.with_out = with_out
def forward(self, x):
mm_res = None
y = torch.transpose(x, -1, -2).contiguous()
mm_res_shape = x.size()[:-1] + (y.size()[-1:])
if self.with_out:
mm_res = torch.randn(mm_res_shape, dtype=x.dtype)
torch.matmul(x, y, out=mm_res)
else:
mm_res = torch.matmul(x, y)
if self.div_scalar:
return mm_res.div(2.0)
else:
return mm_res.div(torch.ones(mm_res_shape, dtype=x.dtype) + 1)
class MatmulDivInplace(nn.Module):
def __init__(self, div_scalar=False, with_out=False):
super(MatmulDivInplace, self).__init__()
self.div_scalar = div_scalar
self.with_out = with_out
def forward(self, x):
mm_res = None
y = torch.transpose(x, -1, -2).contiguous()
mm_res_shape = x.size()[:-1] + (y.size()[-1:])
if self.with_out:
mm_res = torch.randn(mm_res_shape, dtype=x.dtype)
torch.matmul(x, y, out=mm_res)
else:
mm_res = torch.matmul(x, y)
if self.div_scalar:
return mm_res.div_(2.0)
else:
return mm_res.div_(torch.ones(mm_res_shape, dtype=x.dtype) + 1)
class MatmulMul(nn.Module):
def __init__(self, mul_scalar=False, with_out=False):
super(MatmulMul, self).__init__()
self.with_out = with_out
self.mul_scalar = mul_scalar
def forward(self, x):
mm_res = None
y = torch.transpose(x, -1, -2).contiguous()
mm_res_shape = x.size()[:-1] + (y.size()[-1:])
if not self.mul_scalar:
x = x * (torch.ones([1], dtype=x.dtype) + 1)
if self.with_out:
mm_res = torch.randn(mm_res_shape, dtype=x.dtype)
mm_res = torch.matmul(x, y, out=mm_res)
else:
mm_res = torch.matmul(x, y)
if self.mul_scalar:
mm_res = mm_res * 0.125
else:
mm_res = mm_res * (torch.ones([1], dtype=x.dtype) + 1)
return mm_res
class TransposedMatmulDiv(nn.Module):
def __init__(self):
super(TransposedMatmulDiv, self).__init__()
def forward(self, batch1, batch2):
bmm_res = torch.matmul(batch1, batch2)
res = bmm_res * 0.3
return res
class BmmAdd(nn.Module):
def __init__(self):
super(BmmAdd, self).__init__()
def forward(self, input, batch1, batch2):
bmm_res = torch.bmm(batch1, batch2)
res = torch.add(bmm_res, input)
return res
class MHAScoresCalculation(nn.Module):
def __init__(self, dim_per_head, softmax_dim=-1):
super(MHAScoresCalculation, self).__init__()
self.softmax = nn.Softmax(dim=softmax_dim)
self.dim_per_head = dim_per_head
def forward(self, mat1, mat2, bias):
mat1 = mat1 / math.sqrt(self.dim_per_head)
qk = torch.matmul(mat1, mat2.transpose(2, 3))
scores = qk + bias
return self.softmax(scores)
class MHAScoresCalculation_v2(nn.Module):
def __init__(self, dim_per_head, softmax_dim=-1):
super(MHAScoresCalculation_v2, self).__init__()
self.softmax = nn.Softmax(dim=softmax_dim)
self.scale = 1 / math.sqrt(dim_per_head)
def forward(self, mat1, mat2, bias):
qk = torch.matmul(mat1, mat2.transpose(2, 3))
qk = qk * self.scale
scores = qk + bias
return self.softmax(scores)
class MHAScoresCalculation_v3(nn.Module):
def __init__(self, dim_per_head, softmax_dim=-1):
super(MHAScoresCalculation_v3, self).__init__()
self.softmax = nn.Softmax(dim=softmax_dim)
self.scale = 1 / math.sqrt(dim_per_head)
def forward(self, mat1, mat2, bias):
mat1 = mat1 * self.scale
qk = torch.matmul(mat1, mat2.transpose(2, 3))
scores = qk + bias
return self.softmax(scores)
class MHAScoresCalculation_v1(nn.Module):
def __init__(self, dim_per_head, softmax_dim=-1):
super(MHAScoresCalculation_v1, self).__init__()
self.softmax = nn.Softmax(dim=softmax_dim)
self.dim_per_head = dim_per_head
def forward(self, mat1, mat2, bias):
qk = torch.matmul(mat1, mat2.transpose(2, 3))
qk = qk / math.sqrt(self.dim_per_head)
scores = qk + bias
return self.softmax(scores)
class DistilMHAScoresCalculation_v1(nn.Module):
def __init__(self, dim_per_head, fill_value, softmax_dim=-1):
super(DistilMHAScoresCalculation_v1, self).__init__()
self.softmax = nn.Softmax(dim=softmax_dim)
self.dim_per_head = dim_per_head
self.fill = fill_value
def forward(self, mat1, mat2, mask):
mask_shape = [mat1.shape[0], 1, 1, mat1.shape[3]]
mat1 = mat1 / math.sqrt(self.dim_per_head)
qk = torch.matmul(mat1, mat2.transpose(2, 3))
mask = (mask == 0).view(mask_shape).expand_as(qk)
qk.masked_fill_(mask, self.fill)
return self.softmax(qk)
class DistilMHAScoresCalculation_v2(nn.Module):
def __init__(self, dim_per_head, fill_value, softmax_dim=-1):
super(DistilMHAScoresCalculation_v2, self).__init__()
self.softmax = nn.Softmax(dim=softmax_dim)
self.dim_per_head = dim_per_head
self.fill = fill_value
def forward(self, mat1, mat2, mask):
mask_shape = [mat1.shape[0], 1, 1, mat1.shape[3]]
mat1 = mat1 / math.sqrt(self.dim_per_head)
qk = torch.matmul(mat1, mat2.transpose(2, 3))
mask = (mask == 0).view(mask_shape).expand_as(qk)
qk.masked_fill_(mask, self.fill)
return self.softmax(qk)
class VitMHAScoresCalculation_v1(nn.Module):
def __init__(self, dim_per_head):
super(VitMHAScoresCalculation_v1, self).__init__()
self.scale = dim_per_head**-0.5
def forward(self, mat1, mat2, mask):
qk = torch.matmul(mat1, mat2.transpose(-1, 2)) * self.scale
mask_value = -torch.finfo(qk.dtype).max
qk = qk.masked_fill(mask, mask_value)
return nn.functional.softmax(qk, dim=-1)
class VitMHAScoresCalculation_v2(nn.Module):
def __init__(self, dim_per_head):
super(VitMHAScoresCalculation_v2, self).__init__()
self.scale = dim_per_head**-0.5
def forward(self, mat1, mat2, mask):
q = mat1 * self.scale
qk = torch.matmul(q, mat2.transpose(-1, 2))
mask_value = -torch.finfo(qk.dtype).max
qk = qk.masked_fill(mask, mask_value)
return nn.functional.softmax(qk, dim=-1)
class Maskedfill__softmax(nn.Module):
def __init__(self, fill_value, softmax_dim=-1):
super(Maskedfill__softmax, self).__init__()
self.softmax = nn.Softmax(dim=softmax_dim)
self.fill = fill_value
def forward(self, qk, mask):
mask_shape = [qk.shape[0], 1, 1, qk.shape[3]]
mask = (mask == 0).view(mask_shape).expand_as(qk)
qk.masked_fill_(mask, self.fill)
return self.softmax(qk)
class Maskedfill_softmax(nn.Module):
def __init__(self, fill_value):
super(Maskedfill_softmax, self).__init__()
self.fill = fill_value
def forward(self, qk, mask):
mask_shape = [qk.shape[0], 1, 1, qk.shape[3]]
mask = (mask == 0).view(mask_shape).expand_as(qk)
qk = qk.masked_fill(mask, self.fill)
return nn.functional.softmax(qk, dim=-1)
class AtenSoftmaxRepalce(nn.Module):
def __init__(self, dim=-1):
super(AtenSoftmaxRepalce, self).__init__()
self.softmax = torch.nn.Softmax(dim)
def forward(self, x):
return self.softmax(x)
class AtenBatchNormRepalce(nn.Module):
def __init__(self):
super(AtenBatchNormRepalce, self).__init__()
self.bn = torch.nn.BatchNorm2d(10)
def forward(self, x):
return self.bn(x)
class AddLayerNorm(torch.nn.Module):
def __init__(self, dim=32):
super(AddLayerNorm, self).__init__()
self.layernorm = torch.nn.LayerNorm(dim)
def forward(self, x, y):
z = torch.add(x, y)
return self.layernorm(z)
class AddLayerNorm_v1(torch.nn.Module):
def __init__(self, dim=32):
super(AddLayerNorm_v1, self).__init__()
self.layernorm = torch.nn.LayerNorm(dim)
def forward(self, x, y, z):
x = x + y + z
return self.layernorm(x)
class AddLayerNorm_v2(torch.nn.Module):
def __init__(self, dim=32):
super(AddLayerNorm_v2, self).__init__()
self.dim = dim
def forward(self, x, y, w):
z = torch.add(x, y)
return torch.nn.functional.layer_norm(
z,
[
self.dim,
],
weight=w,
)
class ConcatBnRelu(torch.nn.Module):
def __init__(self, dim, cat_dim, in_channels, **kwargs):
super(ConcatBnRelu, self).__init__()
self.bn = bn_module[dim](in_channels)
self.relu = torch.nn.ReLU()
self.cat_dim = cat_dim
def forward(self, x1, x2, x3):
x = torch.cat((x1, x2, x3), dim=self.cat_dim)
x = self.bn(x)
return self.relu(x)
class ConcatBnReluV2(torch.nn.Module):
def __init__(self, dim, cat_dim, in_channels, **kwargs):
super(ConcatBnReluV2, self).__init__()
self.bn = bn_module[dim](in_channels)
self.relu = torch.nn.ReLU(inplace=True)
self.cat_dim = cat_dim
def forward(self, x1, x2, x3):
x = torch.cat((x1, x2, x3), dim=self.cat_dim)
x = self.bn(x)
return self.relu(x)
class ConcatBnReluV3(torch.nn.Module):
def __init__(self, dim, cat_dim, in_channels, **kwargs):
super(ConcatBnReluV3, self).__init__()
self.bn = bn_module[dim](in_channels)
self.relu = torch.nn.ReLU(inplace=True)
self.cat_dim = cat_dim
def forward(self, x1, x2, x3):
x = torch.cat((x1, x2, x3), dim=self.cat_dim)
x = self.bn(x)
y = self.relu(x)
x += 2
return y + x
class ModMultLinear(nn.Module):
def __init__(self, w1_dim, w2_dim):
super(ModMultLinear, self).__init__()
self.linear1 = nn.Linear(5, w1_dim)
self.linear2 = nn.Linear(5, w2_dim)
self.linear3 = nn.Linear(w1_dim, 5)
self.linear4 = nn.Linear(w1_dim, 5)
def forward(self, x):
res1 = self.linear1(x)
res2 = self.linear2(x)
res3 = self.linear3(res1)
res4 = self.linear4(res1)
return res1, res2, res3, res4
class ModMultLinearWithOrWithoutBias(nn.Module):
def __init__(self):
super(ModMultLinearWithOrWithoutBias, self).__init__()
self.linear1 = nn.Linear(10, 32, bias=False)
self.linear2 = nn.Linear(10, 32, bias=True)
self.linear3 = nn.Linear(10, 32, bias=True)
self.linear4 = nn.Linear(10, 32, bias=False)
def forward(self, x):
res1 = self.linear1(x)
res2 = self.linear2(x)
res3 = self.linear3(x)
res4 = self.linear4(x)
return res1, res2, res3, res4
class LinearSwishNaive(nn.Module):
def __init__(self, in_feature, out_feature):
super(LinearSwishNaive, self).__init__()
self.linear = nn.Linear(in_feature, out_feature)
self.sigmoid = nn.Sigmoid()
def forward(self, input):
linear_out = self.linear(input)
sigmoid_out = self.sigmoid(linear_out)
return torch.mul(linear_out, sigmoid_out)
class Bottleneck_v1(nn.Module):
def __init__(self):
super(Bottleneck_v1, self).__init__()
self.conv1 = nn.Conv2d(64, 64, kernel_size=(1, 1), stride=(1, 1), bias=True)
self.conv2 = nn.Conv2d(
64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=True
)
self.conv3 = nn.Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=True)
self.downsample = nn.Conv2d(
64, 256, kernel_size=(1, 1), stride=(1, 1), bias=True
)
def forward(self, x):
y1 = self.conv1(x).relu_()
y2 = self.conv2(y1).relu_()
y3 = self.conv3(y2)
y3 += self.downsample(x)
return y3.relu_()
class Bottleneck_v2(nn.Module):
def __init__(self):
super(Bottleneck_v2, self).__init__()
self.conv = nn.Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=True)
self.conv1 = nn.Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1), bias=True)
self.conv2 = nn.Conv2d(
64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=True
)
self.conv3 = nn.Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=True)
def forward(self, x):
x = self.conv(x)
y1 = self.conv1(x).relu_()
y2 = self.conv2(y1).relu_()
y3 = self.conv3(y2)
y3 += x
return y3.relu_()
class EinsumAdd(nn.Module):
def __init__(self, equation):
super(EinsumAdd, self).__init__()
self.equation = equation
def forward(self, input1, input2, bias):
return torch.einsum(self.equation, input1, input2) + bias
class EinsumAddScalar(nn.Module):
def __init__(self, equation):
super(EinsumAddScalar, self).__init__()
self.equation = equation
def forward(self, input1, input2):
return torch.einsum(self.equation, input1, input2) + 12.0
class EinsumAddInplace(nn.Module):
def __init__(self, equation):
super(EinsumAddInplace, self).__init__()
self.equation = equation
def forward(self, input1, input2, bias):
return torch.einsum(self.equation, input1, input2).add_(bias)
class EinsumAddInplaceV1(nn.Module):
def __init__(self, equation):
super(EinsumAddInplaceV1, self).__init__()
self.equation = equation
def forward(self, input1, input2, bias):
return bias.add_(torch.einsum(self.equation, input1, input2))
class AddMulDiv(nn.Module):
def __init__(self):
super(AddMulDiv, self).__init__()
def forward(self, input):
return torch.div(torch.mul(input, torch.add(input, 3)), 6)
class Python_GELU_Tanh_v1(nn.Module):
def __init__(self):
super(Python_GELU_Tanh_v1, self).__init__()
def forward(self, input):
return (
0.5
* input
* (
1.0
+ torch.tanh(
math.sqrt(2.0 / math.pi)
* (input + 0.044715 * torch.pow(input, 3.0))
)
)
)
class Python_GELU_Tanh_v2(nn.Module):
def __init__(self):
super(Python_GELU_Tanh_v2, self).__init__()
def forward(self, input):
return (
input
* 0.5
* (1.0 + torch.tanh(0.79788456 * input * (1 + 0.044715 * input * input)))
)
class Tester(TestCase):
@contextlib.contextmanager
def _texpr_enable(self, strategy):
old_texpr_fuser_state = torch._C._jit_texpr_fuser_enabled()
torch._C._jit_set_texpr_fuser_enabled(strategy)
try:
yield
finally:
torch._C._jit_set_texpr_fuser_enabled(old_texpr_fuser_state)
def _test_output(
self,
base_model,
x,
kind_in_graph=None,
kind_not_in_graph=None,
prec=None,
levels=None,
use_channels_last=None,
use_te=None,
):
if levels is None:
levels = ["O0", "O1"]
if use_channels_last is None:
use_channels_last = [True, False]
if use_te is None:
use_te = [False, True]
modelName = base_model.__class__.__name__
options = itertools.product(levels, use_channels_last, use_te)
for level, use_channels_last, use_te in options:
with self._texpr_enable(use_te):
ipex.enable_onednn_fusion(False)
model = copy.deepcopy(base_model).eval()
# It will be removed after jit support conv_bn folding
if level == "O0":
try:
model = optimization.fuse(model)
except BaseException:
warnings.warn("Conv BatchNorm folding failed.")
if x.dim() == 4 and use_channels_last:
x = x.to(memory_format=torch.channels_last)
model = model.to(memory_format=torch.channels_last)
if x.dim() == 5 and use_channels_last:
x = x.to(memory_format=torch.channels_last_3d)
model = model.to(memory_format=torch.channels_last_3d)
oresult = model(x)
model = ipex.optimize(
model, dtype=torch.float32, level=level, weights_prepack=False
)
with torch.no_grad():
result = model(x)
traced_model = torch.jit.trace(model, x).eval()
traced_model = torch.jit.freeze(traced_model)
tresult = traced_model(x)
self.assertEqual(oresult, result, prec=prec)
self.assertEqual(result, tresult, prec=prec)
ipex.enable_onednn_fusion(True)
with torch.no_grad():
trace_fused_model = torch.jit.trace(model, x)
trace_fused_model = torch.jit.freeze(trace_fused_model)
y = trace_fused_model(x)
# enable fusiong in ipex.
fused_tresult = trace_fused_model(x)
# conv relu fusion, conv sum fusion or conv sum relu fusion
trace_graph = trace_fused_model.graph_for(x)
fused_tresult = trace_fused_model(x)
self.assertEqual(result, fused_tresult, prec=prec)
# check if the fused node exists in the graph
if kind_in_graph is not None:
self.assertTrue(
any(
"prim::If" in n.kind() or n.kind() == kind_in_graph
for n in trace_graph.nodes()
)
)
# check if certain node does not exist in the graph
if kind_not_in_graph is not None:
self.assertTrue(
all(n.kind() != kind_not_in_graph for n in trace_graph.nodes())
)
def _test_mkl_fp32(self, model, input, kind_in_graph=None, prec=5e-3):
model = model.eval()
model = ipex.optimize(model, dtype=torch.float32)
with torch.no_grad():
res_ref = model(input)
tr_model = torch.jit.trace(model, (input))
tr_model = torch.jit.freeze(tr_model)
tr_model(input)
trace_graph = tr_model.graph_for(input)
res_jit = tr_model(input)
self.assertEqual(res_ref, res_jit)
if kind_in_graph is not None:
self.assertTrue(
any(n.kind() == kind_in_graph for n in trace_graph.nodes())
)
def _test_dnnl_fp32(self, model, input, kind_in_graph=None, prec=5e-3):
model = model.eval()
model = ipex.optimize(model, dtype=torch.float32, auto_kernel_selection=True)
with torch.no_grad():
res_ref = model(input)
tr_model = torch.jit.trace(model, (input))
tr_model = torch.jit.freeze(tr_model)
tr_model(input)
trace_graph = tr_model.graph_for(input)
res_jit = tr_model(input)
self.assertEqual(res_ref, res_jit)
if kind_in_graph is not None:
self.assertTrue(
any(
"prim::If" in n.kind() or n.kind() == kind_in_graph
for n in trace_graph.nodes()
)
)
def _test_output_bf16(
self,
base_model,
x,
kind_in_graph=None,
kind_not_in_graph=None,
prec=None,
levels=None,
use_channels_last=None,
use_te=None,
):
if levels is None:
levels = ["O0", "O1"]
if use_channels_last is None:
use_channels_last = [True, False]
if use_te is None:
use_te = [True, False]
modelName = base_model.__class__.__name__
options = itertools.product(levels, use_channels_last, use_te)
for level, use_channels_last, use_te in options:
with self._texpr_enable(use_te):
ipex.enable_onednn_fusion(True)
model = copy.deepcopy(base_model).eval()
# It will be removed after jit support conv_bn folding
if level == "O0":
try:
model = optimization.fuse(model)
except BaseException:
warnings.warn("Conv BatchNorm folding failed.")
if x.dim() == 4 and use_channels_last:
x = x.to(memory_format=torch.channels_last)
model = model.to(memory_format=torch.channels_last)
if x.dim() == 5 and use_channels_last:
x = x.to(memory_format=torch.channels_last_3d)
model = model.to(memory_format=torch.channels_last_3d)
model = ipex.optimize(model, dtype=torch.bfloat16, level=level)
x2 = x.clone()
x3 = x.clone()
with torch.cpu.amp.autocast(
enabled=True, dtype=torch.bfloat16
), torch.no_grad():
# bf16, native path
result = model(x)
trace_fused_model = torch.jit.trace(copy.deepcopy(model), x3)
trace_fused_model = torch.jit.freeze(trace_fused_model)
# enable fusion path.
fused_tresult = trace_fused_model(x3)
# bf16, jit trace path
trace_graph = trace_fused_model.graph_for(x3)
fused_tresult = trace_fused_model(x3)
self.assertEqual(fused_tresult, result, prec=prec)
if not torch._C._jit_texpr_fuser_enabled():
self.assertEqual(fused_tresult.dtype, torch.bfloat16)
# check if the fused node exists in the graph
if kind_in_graph is not None:
self.assertTrue(
any(
"prim::If" in n.kind() or n.kind() == kind_in_graph
for n in trace_graph.nodes()
)
)
# check if certain node does not exist in the graph
if kind_not_in_graph is not None:
self.assertTrue(
all(n.kind() != kind_not_in_graph for n in trace_graph.nodes())
)
def _test_fusion_unsupported_case(
self,
m,
x,
auto_kernel_selection=False,
kind_in_graph=None,
kind_not_in_graph=None,
):
m.eval()
model = ipex.optimize(
m, dtype=torch.float32, auto_kernel_selection=auto_kernel_selection
)
with torch.no_grad():
result = model(x)
traced_model = torch.jit.trace(model, x).eval()
traced_model = torch.jit.freeze(traced_model)
tresult = traced_model(x)
trace_graph = traced_model.graph_for(x)
if kind_in_graph is not None:
self.assertTrue(
any(n.kind() == kind_in_graph for n in trace_graph.nodes())
)
if kind_not_in_graph is not None:
self.assertTrue(
all(n.kind() != kind_not_in_graph for n in trace_graph.nodes())
)
def test_jit_freeze(self):
model = ConvBatchNorm_Fixed(2, 3, 32, kernel_size=3, stride=1).eval()
x = torch.randn(32, 3, 64, 64).to(memory_format=torch.channels_last)
model = model.to(memory_format=torch.channels_last)
model = ipex.optimize(model, dtype=torch.float32)
with torch.no_grad():
trace_model = torch.jit.trace(model, x).eval()
freeze_model = torch.jit.freeze(trace_model)
with torch.no_grad():
# enable fusiong in ipex.
result1 = trace_model(x)
result2 = freeze_model(x)
# conv relu fusion, conv sum fusion or conv sum relu fusion
trace_graph = trace_model.graph_for(x)
freeze_graph = freeze_model.graph_for(x)
jit_node = "ipex_prepack::convolution_run"
pack_node = "ipex_prepack::convolution_prepack"
imperative_node = "torch_ipex::convolution_forward"
# for freeze model, there will be only convolution_run in the graph
self.assertTrue(any(n.kind() == jit_node for n in freeze_graph.nodes()))
self.assertTrue(all(n.kind() != pack_node for n in freeze_graph.nodes()))
# for non-freeze model, since op-ctx dose not have value, cannot re-pack for this path
self.assertTrue(any(n.kind() == imperative_node for n in trace_graph.nodes()))
def test_concat_linear(self):
def check_op_count(graph_str, op_names=None):
if op_names is None:
op_names = []
count = 0
node_list = graph_str.strip().split("\n")
for node in node_list:
for op_name in op_names:
if op_name in node:
count += 1
return count
origin_model = ModMultLinear(50, 60).eval()
test_val1 = torch.rand([50, 5])
# call mkl path(fp32)
model = ipex.optimize(
origin_model,
concat_linear=False,
dtype=torch.float32,
weights_prepack=False,
)
ori_res = model(test_val1)
with torch.no_grad():
model_jit = torch.jit.trace(model, (test_val1))
graph_ori = str(model_jit.graph_for(test_val1))
linear_count_ori = check_op_count(graph_ori, ["aten::linear"])
self.assertEqual(linear_count_ori, 4)
model_jit = torch.jit.freeze(model_jit)
jit_res = model_jit(test_val1)
self.assertEqual(ori_res, jit_res)
graph_opt = str(model_jit.graph_for(test_val1))
linear_count_ori = check_op_count(graph_opt, ["aten::linear"])
self.assertEqual(linear_count_ori, 2)
# call prepack mkl path(fp32)
model = ipex.optimize(origin_model, concat_linear=False, dtype=torch.float32)
ori_res = model(test_val1)
with torch.no_grad():
model_jit = torch.jit.trace(model, (test_val1))
graph_ori = str(model_jit.graph_for(test_val1))
linear_count_ori = check_op_count(
graph_ori, ["ipex_prepack::mkl_sgemm_run"]
)
self.assertEqual(linear_count_ori, 4)
model_jit = torch.jit.freeze(model_jit)
jit_res = model_jit(test_val1)
self.assertEqual(ori_res, jit_res)
graph_opt = str(model_jit.graph_for(test_val1))
linear_count_ori = check_op_count(
graph_opt, ["ipex_prepack::mkl_sgemm_run"]
)
self.assertEqual(linear_count_ori, 2)
# call onednn path(fp32)
model = ipex.optimize(
origin_model,
concat_linear=False,
dtype=torch.float32,
auto_kernel_selection=True,
)
ori_res = model(test_val1)
with torch.no_grad():
model_jit = torch.jit.trace(model, (test_val1))
graph_ori = str(model_jit.graph_for(test_val1))
linear_count_ori = check_op_count(graph_ori, ["ipex_prepack::linear_run"])
self.assertEqual(linear_count_ori, 4)
model_jit = torch.jit.freeze(model_jit)
jit_res = model_jit(test_val1)
self.assertEqual(ori_res, jit_res)
graph_opt = str(model_jit.graph_for(test_val1))
linear_count_ori = check_op_count(graph_opt, ["ipex_prepack::linear_run"])
self.assertEqual(linear_count_ori, 2)
model = ipex.optimize(origin_model, concat_linear=False, dtype=torch.bfloat16)
test_val1 = test_val1.bfloat16()
with torch.cpu.amp.autocast(), torch.no_grad():
ori_res = model(test_val1)
model_jit = torch.jit.trace(model, (test_val1))
graph_ori = str(model_jit.graph_for(test_val1))
linear_count_ori = check_op_count(graph_ori, ["ipex_prepack::linear_run"])
self.assertEqual(linear_count_ori, 4)
model_jit = torch.jit.freeze(model_jit)
model_jit(test_val1)
graph_opt = str(model_jit.graph_for(test_val1))
jit_res = model_jit(test_val1)
self.assertEqual(ori_res[1], jit_res[1])
linear_count_ori = check_op_count(graph_opt, ["ipex_prepack::linear_run"])
self.assertEqual(linear_count_ori, 2)
origin_model_v1 = ModMultLinearWithOrWithoutBias().eval()
test_val1 = torch.rand([40, 10])
# Only verify Concat Linear OPs w/ or w/o bias, so use the default packed MKL path
model_v1 = ipex.optimize(
origin_model_v1, concat_linear=False, dtype=torch.float32
)
with torch.no_grad():
ori_res_v1 = model_v1(test_val1)
model_jit_v1 = torch.jit.trace(model_v1, (test_val1))
graph_ori_v1 = str(model_jit_v1.graph_for(test_val1))
linear_count_ori_v1 = check_op_count(
graph_ori_v1, ["ipex_prepack::mkl_sgemm_run"]
)
self.assertEqual(linear_count_ori_v1, 4)
model_jit_v1 = torch.jit.freeze(model_jit_v1)
jit_res_v1 = model_jit_v1(test_val1)
self.assertEqual(ori_res_v1, jit_res_v1)
graph_opt_v1 = str(model_jit_v1.graph_for(test_val1))
linear_count_ori_v1 = check_op_count(
graph_opt_v1, ["ipex_prepack::mkl_sgemm_run"]
)
self.assertEqual(linear_count_ori_v1, 2)
model_v1 = ipex.optimize(
origin_model_v1, concat_linear=False, dtype=torch.bfloat16
)
test_val1 = test_val1.bfloat16()
with torch.cpu.amp.autocast(), torch.no_grad():
ori_res_v1 = model_v1(test_val1)
model_jit_v1 = torch.jit.trace(model_v1, (test_val1))
graph_ori_v1 = str(model_jit_v1.graph_for(test_val1))
linear_count_ori_v1 = check_op_count(
graph_ori_v1, ["ipex_prepack::linear_run"]
)
self.assertEqual(linear_count_ori_v1, 4)
model_jit_v1 = torch.jit.freeze(model_jit_v1)
jit_res_v1 = model_jit_v1(test_val1)
self.assertEqual(ori_res_v1, jit_res_v1)
graph_opt_v1 = str(model_jit_v1.graph_for(test_val1))
linear_count_ori_v1 = check_op_count(
graph_opt_v1, ["ipex_prepack::linear_run"]
)
self.assertEqual(linear_count_ori_v1, 2)
def test_add_layernorm(self):
for dim in [768, 100]:
with torch.no_grad():
bs = 56
seq_len = 384
a = torch.randn(bs, seq_len, dim)
b = torch.randn(bs, seq_len, dim)
w = torch.ones(dim)
model = AddLayerNorm(dim)
pre_te_enable_status = torch._C._jit_texpr_fuser_enabled()
torch._C._jit_set_texpr_fuser_enabled(False)
jit_model = torch.jit.trace(model, (a, b))
trace_graph = jit_model.graph_for(a, b)
jit_res = jit_model(a, b)
ori_res = model(a, b)
self.assertEqual(jit_res, ori_res)
node = "ipex::add_layernorm"
self.assertTrue(any(n.kind() == node for n in trace_graph.nodes()))
# test norm dim is not last dim, expect RuntimeError
# here in the a/b error input case, norm dim is mid dim but last dim is seq_len
# which is expected as unsupported RuntimeError
try:
model_except_error = AddLayerNorm(dim)
model_except_error = torch.jit.trace(model_except_error, (a, b))
a_error = torch.randn(bs, dim, seq_len)
b_error = torch.randn(bs, dim, seq_len)
model_except_error(a_error, b_error)
# it is not excepted if no RuntimeError exception is found
# so end with assert
self.assertTrue(False)
except RuntimeError as e:
expected_error = f"Given normalized_shape=[{dim}], expected input with shape [*, {dim}]"
self.assertTrue(expected_error in str(e))
logging.info("expected RuntimeError is found")
finally:
pass
# not contiguous
a_not_cont = (
a.clone()
.detach()
.unsqueeze(0)
.to(memory_format=torch.channels_last)
.squeeze(0)
)
b_not_cont = (
b.clone()
.detach()
.unsqueeze(0)
.to(memory_format=torch.channels_last)
.squeeze(0)
)
ori_res = model(a_not_cont, b_not_cont)
jit_model = torch.jit.trace(model, (a, b))
trace_graph = jit_model.graph_for(a, b)
jit_res = jit_model(a_not_cont, b_not_cont)
node = "ipex::add_layernorm"
self.assertTrue(any(n.kind() == node for n in trace_graph.nodes()))
self.assertEqual(jit_res, ori_res)
# input bf16, weight fp32
a_bf16 = a.to(torch.bfloat16)
b_bf16 = b.to(torch.bfloat16)
with torch.cpu.amp.autocast():
ori_res = model(a_bf16, b_bf16)
model_jit = jit_model = torch.jit.trace(model, (a, b))
trace_graph = jit_model.graph_for(a, b)
jit_res = jit_model(a_bf16, b_bf16)
node = "ipex::add_layernorm"
self.assertTrue(any(n.kind() == node for n in trace_graph.nodes()))
self.assertEqual(jit_res, ori_res, prec=5e-2)
# input weight both bf16
a_bf16 = a.to(torch.bfloat16)
b_bf16 = b.to(torch.bfloat16)
w_bf16 = w.to(torch.bfloat16)
model = AddLayerNorm_v2(dim)
jit_model = torch.jit.trace(model, (a, b, w))
ori_res = model(a_bf16, b_bf16, w)
trace_graph = jit_model.graph_for(a_bf16, b_bf16, w_bf16)
jit_res = jit_model(a_bf16, b_bf16, w_bf16)
node = "ipex::add_layernorm"
self.assertTrue(any(n.kind() == node for n in trace_graph.nodes()))
self.assertEqual(jit_res, ori_res, prec=5e-2)
model = AddLayerNorm_v1(dim)
c = torch.randn(bs, seq_len, dim)
jit_model = torch.jit.trace(model, (a, b, c))
trace_graph = jit_model.graph_for(a, b, c)
jit_res = jit_model(a, b, c)
ori_res = model(a, b, c)
self.assertEqual(jit_res, ori_res)
node = "ipex::add_layernorm"
torch._C._jit_set_texpr_fuser_enabled(pre_te_enable_status)
self.assertTrue(any(n.kind() == node for n in trace_graph.nodes()))
def test_concat_bn_relu(self):
batch_size = 3
image_size = 16
options = itertools.product(
[2, 3],
[[32, 32, 32], [60, 60, 60], [17, 27, 32], [16, 32, 48]],
[torch.float32, torch.bfloat16],
["O0", "O1"],
[True, False],
)
for dim, channels, dtype, level, use_channels_last in options:
input_size = [
[batch_size, channels[0], image_size, image_size],
[batch_size, channels[1], image_size, image_size],
[batch_size, channels[2], image_size, image_size],
]
if dim == 3:
for i in range(3):
input_size[i].append(image_size)
a1 = torch.randn(input_size[0], dtype=dtype)
a2 = torch.randn(input_size[1], dtype=dtype)
a3 = torch.randn(input_size[2], dtype=dtype)
a = [a1, a2, a3]
in_channels = sum(channels)
model1 = ConcatBnRelu(dim, 1, in_channels).eval()
model2 = ConcatBnReluV2(dim, 1, in_channels).eval()
model3 = ConcatBnReluV3(dim, 1, in_channels).eval()
for model in [model1, model2]:
if use_channels_last:
suggest_memory_format = (
torch.channels_last if dim == 2 else torch.channels_last_3d
)
for i in range(3):
a[i] = a[i].to(memory_format=suggest_memory_format)
model = model.to(memory_format=suggest_memory_format)
model = ipex.optimize(model, dtype=dtype, level=level)
with torch.cpu.amp.autocast(
enabled=True if dtype == torch.bfloat16 else False
), torch.no_grad():
result = model(a[0], a[1], a[2])
trace_model = torch.jit.trace(model, (a[0], a[1], a[2])).eval()
trace_model = torch.jit.freeze(trace_model)
tresult = trace_model(a[0], a[1], a[2])
trace_graph = trace_model.graph_for(a[0], a[1], a[2])
self.assertEqual(result, tresult)
self.assertEqual(tresult.dtype, dtype)
if use_channels_last:
self.assertTrue(
tresult.is_contiguous(memory_format=suggest_memory_format)
)
if (
use_channels_last
and a1.size(1) % 16 == 0
and a2.size(1) % 16 == 0
and a3.size(1) % 16 == 0
):
self.assertTrue(
any(
n.kind() == "ipex::concat_bn_relu"
for n in trace_graph.nodes()
)
)
else:
self.assertTrue(
all(
n.kind() != "ipex::concat_bn_relu"
for n in trace_graph.nodes()
)
)
model = ipex.optimize(model3, dtype=dtype, level=level)
trace_model = torch.jit.trace(model, (a[0], a[1], a[2])).eval()
trace_model = torch.jit.freeze(trace_model)
trace_graph = trace_model.graph_for(a[0], a[1], a[2])
self.assertTrue(
any(n.kind() != "ipex::concat_bn_relu" for n in trace_graph.nodes())
)
def test_mha_scores_calculation(self):
def _check_match_mha(
trace_model, mat1, mat2, bias, node="ipex::mha_scores_calc"
):
graph = trace_model.graph_for((mat1, mat2, bias))
self.assertTrue(any(n.kind() == node for n in graph.nodes()))
def _test_pure_bf16(
model,
trace_model,
mat1,
mat2,
bias,
prec=3e-2,
node="ipex::mha_scores_calc",
):
mat1_bf16 = mat1.to(torch.bfloat16)
mat2_bf16 = mat2.to(torch.bfloat16)
bias_bf16 = bias.to(torch.bfloat16)
res_ref = model(mat1_bf16, mat2_bf16, bias_bf16)
res_jit = trace_model(mat1_bf16, mat2_bf16, bias_bf16)
self.assertEqual(res_ref, res_jit, prec=prec)
_check_match_mha(trace_model, mat1, mat2, bias, node)
# shape case from bert-large
mat1 = torch.randn(56, 16, 384, 64)
mat2 = torch.randn(56, 16, 384, 64)
bias = torch.randn(56, 16, 384, 384)
mha = MHAScoresCalculation(64, -1)
with torch.no_grad():
mha_jit = torch.jit.trace(mha, (mat1, mat2, bias))
mha_jit.eval()
res_ref = mha(mat1, mat2, bias)
res_jit = mha_jit(mat1, mat2, bias)
self.assertEqual(res_ref, res_jit)
_check_match_mha(mha_jit, mat1, mat2, bias)
_test_pure_bf16(mha, mha_jit, mat1, mat2, bias)
# other shape cases for mha
for softmax_dim in [0, 1, 2, -1]:
for v in [0, 1, 2, 3]:
if v == 0:
mha = MHAScoresCalculation(4, softmax_dim)
node = "ipex::mha_scores_calc"
if v == 1:
mha = MHAScoresCalculation_v1(4, softmax_dim)
node = "ipex::mha_scores_calc"
elif v == 2:
mha = MHAScoresCalculation_v2(4, softmax_dim)
node = "ipex::mha_scores_calc_v2"
else:
mha = MHAScoresCalculation_v3(4, softmax_dim)
node = "ipex::mha_scores_calc_v2"
with torch.no_grad():
mha_jit = torch.jit.trace(mha, (mat1, mat2, bias))
mha_jit.eval()
res_ref = mha(mat1, mat2, bias)
res_jit = mha_jit(mat1, mat2, bias)
self.assertEqual(res_ref, res_jit)
_check_match_mha(mha_jit, mat1, mat2, bias, node=node)
_test_pure_bf16(mha, mha_jit, mat1, mat2, bias, node=node)
mat1 = torch.randn(1, 1, 2, 3)
mat2 = torch.randn(1, 1, 16, 3)
bias = torch.randn(1, 1, 2, 16)
res_ref = mha(mat1, mat2, bias)
res_jit = mha_jit(mat1, mat2, bias)
self.assertEqual(res_ref, res_jit)
_check_match_mha(mha_jit, mat1, mat2, bias, node=node)
_test_pure_bf16(mha, mha_jit, mat1, mat2, bias, node=node)
mat1 = torch.randn(1, 1, 2, 3)
mat2 = torch.randn(1, 1, 32, 3)
bias = torch.randn(1, 1, 2, 32)
res_ref = mha(mat1, mat2, bias)
res_jit = mha_jit(mat1, mat2, bias)
self.assertEqual(res_ref, res_jit)
_check_match_mha(mha_jit, mat1, mat2, bias, node=node)
_test_pure_bf16(mha, mha_jit, mat1, mat2, bias, node=node)
mat1 = torch.randn(1, 1, 2, 3)
mat2 = torch.randn(1, 1, 33, 3)
bias = torch.randn(1, 1, 2, 33)
res_ref = mha(mat1, mat2, bias)
res_jit = mha_jit(mat1, mat2, bias)
self.assertEqual(res_ref, res_jit)
_check_match_mha(mha_jit, mat1, mat2, bias, node=node)
_test_pure_bf16(mha, mha_jit, mat1, mat2, bias, node=node)
mat1 = torch.randn(2, 3, 4, 6)
mat2 = torch.randn(2, 3, 6, 6)
bias = torch.randn(2, 3, 4, 6)
res_ref = mha(mat1, mat2, bias)
res_jit = mha_jit(mat1, mat2, bias)
self.assertEqual(res_ref, res_jit)
_check_match_mha(mha_jit, mat1, mat2, bias, node=node)
_test_pure_bf16(mha, mha_jit, mat1, mat2, bias, node=node)
# Test broadcast
mat1 = torch.randn(2, 3, 4, 10)
mat2 = torch.randn(2, 3, 16, 10)
bias = torch.randn(1, 1, 1, 16)
self.assertEqual(mha(mat1, mat2, bias), mha_jit(mat1, mat2, bias))
_check_match_mha(mha_jit, mat1, mat2, bias, node=node)
_test_pure_bf16(mha, mha_jit, mat1, mat2, bias, node=node)
bias = torch.randn(4, 16)
self.assertEqual(mha(mat1, mat2, bias), mha_jit(mat1, mat2, bias))
_check_match_mha(mha_jit, mat1, mat2, bias, node=node)
_test_pure_bf16(mha, mha_jit, mat1, mat2, bias, node=node)
bias = torch.randn(3, 1, 1)
self.assertEqual(mha(mat1, mat2, bias), mha_jit(mat1, mat2, bias))
_check_match_mha(mha_jit, mat1, mat2, bias, node=node)
_test_pure_bf16(mha, mha_jit, mat1, mat2, bias, node=node)
bias = torch.randn(2, 1, 1, 1)
self.assertEqual(mha(mat1, mat2, bias), mha_jit(mat1, mat2, bias))
_check_match_mha(mha_jit, mat1, mat2, bias, node=node)
_test_pure_bf16(mha, mha_jit, mat1, mat2, bias, node=node)
bias = torch.randn(3, 4, 16)
self.assertEqual(mha(mat1, mat2, bias), mha_jit(mat1, mat2, bias))
_check_match_mha(mha_jit, mat1, mat2, bias, node=node)
_test_pure_bf16(mha, mha_jit, mat1, mat2, bias, node=node)
bias = torch.randn(2, 1, 1, 16)
self.assertEqual(mha(mat1, mat2, bias), mha_jit(mat1, mat2, bias))
_check_match_mha(mha_jit, mat1, mat2, bias, node=node)
_test_pure_bf16(mha, mha_jit, mat1, mat2, bias, node=node)
bias = torch.randn(2, 1, 4, 16)
self.assertEqual(mha(mat1, mat2, bias), mha_jit(mat1, mat2, bias))
_check_match_mha(mha_jit, mat1, mat2, bias, node=node)
_test_pure_bf16(mha, mha_jit, mat1, mat2, bias, node=node)
def test_linear_swish(self):
mat1 = torch.randn(10000, 5)
pattern_1 = LinearSwishNaive(5, 1024)
with torch.no_grad():
pattern1_jit = torch.jit.trace(pattern_1, (mat1))
pattern1_jit.eval()
res_ref = pattern_1(mat1)
res_jit = pattern1_jit(mat1)
self.assertEqual(res_ref, res_jit)
mat2 = torch.randn(10000, 1024)
pattern_2 = LinearSwishNaive(1024, 1024)
pattern2_jit = torch.jit.trace(pattern_2, (mat2))
pattern2_jit.eval()
res_ref = pattern_2(mat2)
res_jit = pattern2_jit(mat2)
self.assertEqual(res_ref, res_jit)
# non 512 bit align shape
mat3 = torch.randn(10000, 1019)
pattern_3 = LinearSwishNaive(1019, 1019)
pattern3_jit = torch.jit.trace(pattern_3, (mat3))
pattern3_jit.eval()
res_ref = pattern_3(mat3)
res_jit = pattern3_jit(mat3)
self.assertEqual(res_ref, res_jit)
def _test_pure_bf16(model, trace_model, mat1, prec=5e-2):
model = model.to(torch.bfloat16)
trace_model = trace_model.to(torch.bfloat16)
mat1_bf16 = mat1.to(torch.bfloat16)
res_ref = model(mat1_bf16)
res_jit = trace_model(mat1_bf16)
self.assertEqual(res_ref, res_jit, prec=prec)
_test_pure_bf16(pattern_1, pattern1_jit, mat1)
_test_pure_bf16(pattern_2, pattern2_jit, mat2)
_test_pure_bf16(pattern_3, pattern3_jit, mat3)
def test_distil_mha_scores_calculation(self):
def _check_match_mha(
trace_model, mat1, mat2, mask, node="ipex::distil_mha_scores_calc"
):
graph = trace_model.graph_for((mat1, mat2, mask))
self.assertTrue(any(n.kind() == node for n in graph.nodes()))
def _check_match_mha_parts(
trace_model, qk, mask, node="ipex::maskedfill_softmax"
):
graph = trace_model.graph_for((qk, mask))
self.assertTrue(any(n.kind() == node for n in graph.nodes()))
def _test_pure_bf16(model, trace_model, mat1, mat2, mask, prec=3e-2):
mat1_bf16 = mat1.to(torch.bfloat16)
mat2_bf16 = mat2.to(torch.bfloat16)
mask_bf16 = mask.to(torch.bfloat16)
res_ref = model(mat1_bf16, mat2_bf16, mask_bf16)
res_jit = trace_model(mat1_bf16, mat2_bf16, mask_bf16)
self.assertEqual(res_ref, res_jit, prec=prec)
_check_match_mha(trace_model, mat1, mat2, mask)
def _test_pure_bf16_parts(model, trace_model, qk, mask, prec=3e-2):
qk_bf16 = qk.to(torch.bfloat16)
mask_bf16 = mask.to(torch.bfloat16)
res_ref = model(qk_bf16, mask_bf16)
res_jit = trace_model(qk_bf16, mask_bf16)
self.assertEqual(res_ref, res_jit, prec=prec)
_check_match_mha_parts(trace_model, qk_bf16, mask)
for sequence_length in [128, 100]:
mat1 = torch.randn(56, 12, sequence_length, sequence_length)
mat2 = torch.randn(56, 12, sequence_length, sequence_length)
mask = torch.randn(56, sequence_length)
qk = torch.matmul(mat1, mat2)
mask = mask > 0.5
for fill_value in [-float("inf"), torch.tensor(torch.finfo(float).min)]:
model_v1 = DistilMHAScoresCalculation_v1(64, fill_value)
with torch.no_grad():
mha_jit = torch.jit.trace(model_v1, (mat1, mat2, mask))
mha_jit.eval()
res_ref = model_v1(mat1, mat2, mask)
res_jit = mha_jit(mat1, mat2, mask)
self.assertEqual(res_ref, res_jit)
_check_match_mha(mha_jit, mat1, mat2, mask)
_test_pure_bf16(model_v1, mha_jit, mat1, mat2, mask)
model_v2 = DistilMHAScoresCalculation_v2(64, fill_value)
with torch.no_grad():
mha_jit = torch.jit.trace(model_v2, (mat1, mat2, mask))
mha_jit.eval()
res_ref = model_v2(mat1, mat2, mask)
res_jit = mha_jit(mat1, mat2, mask)
self.assertEqual(res_ref, res_jit)
_check_match_mha(mha_jit, mat1, mat2, mask)
_test_pure_bf16(model_v2, mha_jit, mat1, mat2, mask)
model_v3 = Maskedfill__softmax(fill_value)
with torch.no_grad():
mha_jit = torch.jit.trace(model_v3, (qk, mask))
mha_jit.eval()
res_ref = model_v3(qk, mask)
res_jit = mha_jit(qk, mask)
self.assertEqual(res_ref, res_jit)
_check_match_mha_parts(mha_jit, qk, mask)
_test_pure_bf16_parts(model_v3, mha_jit, qk, mask)
model_v4 = Maskedfill_softmax(fill_value)
with torch.no_grad():
mha_jit = torch.jit.trace(model_v4, (qk, mask))
mha_jit.eval()
res_ref = model_v4(qk, mask)
res_jit = mha_jit(qk, mask)
self.assertEqual(res_ref, res_jit)
_check_match_mha_parts(mha_jit, qk, mask)
_test_pure_bf16_parts(model_v4, mha_jit, qk, mask)
def test_vit_mha_scores_calculation(self):
def _check_match_mha(
trace_model, mat1, mat2, mask, node="ipex::vit_mha_scores_calc"
):
graph = trace_model.graph_for(mat1, mat2, mask)
self.assertTrue(any(n.kind() == node for n in graph.nodes()))
def _test_amp_bf16(
model, mat1, mat2, mask, prec=3e-2, node="ipex::vit_mha_scores_calc"
):
with torch.cpu.amp.autocast():
trace_model = torch.jit.trace(model, (mat1, mat2, mask))
trace_model = torch.jit.freeze(trace_model)
res_ref = model(mat1, mat2, mask)
res_jit = trace_model(mat1, mat2, mask)
self.assertEqual(res_ref, res_jit, prec=prec)
_check_match_mha(trace_model, mat1, mat2, mask, node)
for patch in [128, 257]:
mat1 = torch.randn(56, 12, patch, patch)
mat2 = torch.randn(56, 12, patch, patch)
mask_1 = torch.randn(56, 1, patch, patch)
mask = ~(mask_1 > 0.5)
mha_v1 = VitMHAScoresCalculation_v1(64).eval()
with torch.no_grad():
mha_jit = torch.jit.trace(mha_v1, (mat1, mat2, mask))
res_ref = mha_v1(mat1, mat2, mask)
res_jit = mha_jit(mat1, mat2, mask)
self.assertEqual(res_ref, res_jit)
_check_match_mha(mha_jit, mat1, mat2, mask)
_test_amp_bf16(mha_v1, mat1, mat2, mask)
mha_v2 = VitMHAScoresCalculation_v2(64).eval()
with torch.no_grad():
mha_jit = torch.jit.trace(mha_v2, (mat1, mat2, mask))
res_ref = mha_v2(mat1, mat2, mask)
res_jit = mha_jit(mat1, mat2, mask)
self.assertEqual(res_ref, res_jit)
_check_match_mha(mha_jit, mat1, mat2, mask)
def _test_conv_unary_fusion(self, op_list, seed=None):
batch_size = 8
out_channels = 16
in_channels = 3
kernel_size = 3
image_size = 16
if seed is None:
rand_seed = int(get_rand_seed())
print("{} rand sed: {}".format(sys._getframe().f_code.co_name, rand_seed))
torch.manual_seed(rand_seed)
else:
print("{} rand sed: {}".format(sys._getframe().f_code.co_name, seed))
torch.manual_seed(seed)
for dim in [2, 3]:
for eltwise in op_list:
input_size = [batch_size, in_channels, image_size, image_size]
if dim == 3:
input_size.append(image_size)
unary_fusion_op = op_list[eltwise]
ipex_eltwise_op = unary_fusion_op.ipex_eltwise_op
bf16_supported = unary_fusion_op.bf16_supported
prec = unary_fusion_op.prec
op_input_list = unary_fusion_op.op_input_list
x = torch.randn(input_size)
m = ConvEltwise(
eltwise,
dim,
in_channels,
out_channels,
kernel_size,
image_size,
**op_input_list,
)
self._test_output(
m,
x,
kind_in_graph="ipex_prepack::convolution_%s_run" % ipex_eltwise_op,
kind_not_in_graph="ipex_prepack::convolution_%s_prepack"
% ipex_eltwise_op,
)
if bf16_supported:
self._test_output_bf16(
m,
x,
kind_in_graph="ipex_prepack::convolution_%s_run"
% ipex_eltwise_op,
kind_not_in_graph="ipex_prepack::convolution_%s_prepack"
% ipex_eltwise_op,
prec=prec,
)
def _test_conv_transpose_unary_fusion(self, op_list, seed=None):
batch_size = 1
out_channels = 5
in_channels = 3
kernel_size = 3
image_size = 8
if seed is None:
rand_seed = int(get_rand_seed())
print("{} rand sed: {}".format(sys._getframe().f_code.co_name, rand_seed))
torch.manual_seed(rand_seed)
else:
print("{} rand sed: {}".format(sys._getframe().f_code.co_name, seed))
torch.manual_seed(seed)
for dim in [2, 3]:
for eltwise in op_list:
input_size = [batch_size, in_channels, image_size, image_size]
if dim == 3:
input_size.append(image_size)
unary_fusion_op = op_list[eltwise]
ipex_eltwise_op = unary_fusion_op.ipex_eltwise_op
bf16_supported = unary_fusion_op.bf16_supported
prec = unary_fusion_op.prec
op_input_list = unary_fusion_op.op_input_list
x = torch.randn(input_size)
m = ConvTransposeEltwise(
eltwise,
dim,
in_channels,
out_channels,
kernel_size,
image_size,
**op_input_list,
)
self._test_output(
m,
x,
kind_in_graph="ipex_prepack::conv_transpose_%s_run"
% ipex_eltwise_op,
kind_not_in_graph="ipex_prepack::conv_transpose_prepack",
)
# temporary disable before https://github.com/pytorch/pytorch/pull/92530 merged
# if bf16_supported:
# self._test_output_bf16(
# m,
# x,
# kind_in_graph="ipex_prepack::conv_transpose_%s_run" % ipex_eltwise_op,
# kind_not_in_graph="ipex_prepack::conv_transpose_prepack",
# prec=prec)
def test_conv_unary_fusion(self):
self._test_conv_unary_fusion(unary_PyTorch_op_to_IPEX_op_map)
self._test_conv_unary_fusion(
PyTorch_op_to_IPEX_op_fixed_seed_map, 1654064339261196288
)
def test_conv_non_unary_fusion(self):
self._test_conv_unary_fusion(non_unary_PyTorch_op_to_IPEX_op_map)
def test_conv_fusion_unsupported_case(self):
dim = 2
batch_size = 1
in_channels = 3
out_channels = 16
in_channels = 3
kernel_size = 3
image_size = 16
for eltwise in unsupported_PyTorch_op_to_IPEX_op_map:
input_size = [batch_size, in_channels, image_size, image_size]
unary_fusion_op = unsupported_PyTorch_op_to_IPEX_op_map[eltwise]
ipex_eltwise_op = unary_fusion_op.ipex_eltwise_op
bf16_supported = unary_fusion_op.bf16_supported
prec = unary_fusion_op.prec
op_input_list = unary_fusion_op.op_input_list
x = torch.randn(input_size)
m = ConvEltwise(
eltwise,
dim,
in_channels,
out_channels,
kernel_size,
image_size,
**op_input_list,
)
self._test_fusion_unsupported_case(
m,
x,
kind_not_in_graph="ipex_prepack::convolution_%s_run" % ipex_eltwise_op,
)
def _test_conv_transpose_sum(self, module, alpha, supported, test_inplace=True):
batch_size = 1
out_channels = 3
in_channels = 3
kernel_size = 3
image_size = 8
rand_seed = int(get_rand_seed())
print("{} rand sed: {}".format(sys._getframe().f_code.co_name, rand_seed))
torch.manual_seed(rand_seed)
prec = 0.02
kwargs = {"alpha": alpha}
add_funcs = [lambda a, b, kwargs: torch.add(a, b, **kwargs)]
if test_inplace:
add_funcs.append(lambda a, b, kwargs: a.add_(b, **kwargs))
for dim in [2, 3]:
for add_func in add_funcs:
input_size = [batch_size, in_channels, image_size, image_size]
if dim == 3:
input_size.append(image_size)
ipex_eltwise_op = "add"
x = torch.randn(input_size)
m = module(
dim,
add_func,
in_channels,
out_channels,
kernel_size,
image_size,
**kwargs,
)
if supported:
self._test_output(
m,
x,
kind_in_graph="ipex_prepack::conv_transpose_%s_run"
% ipex_eltwise_op,
kind_not_in_graph="ipex_prepack::conv_transpose_prepack",
)
# temporary disable before https://github.com/pytorch/pytorch/pull/92530 merged
# self._test_output_bf16(
# m,
# x,
# kind_in_graph="ipex_prepack::conv_transpose_%s_run" % ipex_eltwise_op,
# kind_not_in_graph="ipex_prepack::conv_transpose_prepack",
# prec=prec)
else:
self._test_output(
m,
x,
kind_not_in_graph="ipex_prepack::conv_transpose_%s_run"
% ipex_eltwise_op,
)
# temporary disable before https://github.com/pytorch/pytorch/pull/92530 merged
# self._test_output_bf16(
# m,
# x,
# kind_not_in_graph="ipex_prepack::conv_transpose_%s_run" % ipex_eltwise_op,
# prec=prec)
def test_conv_transpose_sum_accumu_on_right(self):
self._test_conv_transpose_sum(
ConvTransposeSumAccumuOnRight, alpha=1, supported=True
)
self._test_conv_transpose_sum(
ConvTransposeSumAccumuOnRight, alpha=2, supported=True
)
def test_conv_transpose_sum_accumu_on_left(self):
self._test_conv_transpose_sum(
ConvTransposeSumAccumuOnLeft, alpha=1, supported=True
)
self._test_conv_transpose_sum(
ConvTransposeSumAccumuOnLeft, alpha=2, supported=False
)
self._test_conv_transpose_sum(
ConvTransposeSumAccumuOnLeft, alpha=2.0, supported=False
)
def test_conv_transpose_sum_broadcast_unsupported(self):
self._test_conv_transpose_sum(
ConvTransposeSumBroadcast, alpha=1, supported=False, test_inplace=False
) # in-place add does not support shape broadcast
def test_conv_transpose_sum_relu(self):
batch_size = 1
out_channels = 3
mid_channels = 2
in_channels = 3
kernel_size = 3
image_size = 8
for inplace in [True, False]:
for dim in [2, 3]:
m = ConvTransposeAddRelu(
dim, in_channels, mid_channels, out_channels, kernel_size, inplace
)
input_size = [batch_size, in_channels, image_size, image_size]
if dim == 3:
input_size.append(image_size)
x = torch.randn(input_size)
self._test_output(
m,
x,
kind_in_graph="ipex_prepack::conv_transpose_add_relu_run",
kind_not_in_graph="ipex_prepack::conv_transpose_add_run",
)
# self._test_output_bf16(
# m,
# x,
# kind_in_graph="ipex_prepack::conv_transpose_add_relu_run",
# kind_not_in_graph="ipex_prepack::conv_transpose_add_run",
# prec=5e-2)
def test_conv_fusion(self):
batch_size = 8
out_channels = 16
in_channels = 3
kernel_size = 3
image_size = 16
for dim in [2, 3]:
input_size = [batch_size, in_channels, image_size, image_size]
if dim == 3:
input_size.append(image_size)
x = torch.randn(input_size)
self._test_output(
ConvSwishOutplace(
dim, in_channels, out_channels, kernel_size, image_size
),
x,
kind_in_graph="ipex_prepack::convolution_swish_run",
kind_not_in_graph="ipex_prepack::convolution_swish_prepack",
)
self._test_output_bf16(
ConvSwishOutplace(
dim, in_channels, out_channels, kernel_size, image_size
),
x,
kind_in_graph="ipex_prepack::convolution_swish_run",
kind_not_in_graph="ipex_prepack::convolution_swish_prepack",
prec=0.02,
)
self._test_output(
ConvSwishInplace(
dim, in_channels, out_channels, kernel_size, image_size
),
x,
kind_in_graph="ipex_prepack::convolution_swish_run",
kind_not_in_graph="ipex_prepack::convolution_swish_prepack",
)
self._test_output_bf16(
ConvSwishInplace(
dim, in_channels, out_channels, kernel_size, image_size
),
x,
kind_in_graph="ipex_prepack::convolution_swish_run",
kind_not_in_graph="ipex_prepack::convolution_swish_prepack",
prec=0.02,
)
self._test_output(
ConvSwishOutplaceSumOutplace(
dim, in_channels, out_channels, kernel_size, image_size
),
x,
kind_in_graph="ipex_prepack::convolution_swish_add_run",
kind_not_in_graph="ipex_prepack::convolution_swish_add_prepack",
)
self._test_output_bf16(
ConvSwishOutplaceSumOutplace(
dim, in_channels, out_channels, kernel_size, image_size
),
x,
kind_in_graph="ipex_prepack::convolution_swish_add_run",
kind_not_in_graph="ipex_prepack::convolution_swish_add_prepack",
prec=0.02,
)
self._test_output(
ConvSwishInplaceSumInplace(
dim, in_channels, out_channels, kernel_size, image_size
),
x,
kind_in_graph="ipex_prepack::convolution_swish_add_run",
kind_not_in_graph="ipex_prepack::convolution_swish_add_prepack",
)
self._test_output_bf16(
ConvSwishInplaceSumInplace(
dim, in_channels, out_channels, kernel_size, image_size
),
x,
kind_in_graph="ipex_prepack::convolution_swish_add_run",
kind_not_in_graph="ipex_prepack::convolution_swish_add_prepack",
prec=0.02,
)
def test_output_conv_bn(self):
batch_size = 8
out_channels = 16
in_channels = 3
kernel_size = 3
image_size = 16
for dim in [2, 3]:
input_size = [batch_size, in_channels, image_size, image_size]
if dim == 3:
input_size.append(image_size)
x = torch.randn(input_size)
self._test_output(
ConvBatchNorm_Fixed(
dim, in_channels, out_channels, kernel_size=kernel_size, stride=1
),
x,
kind_in_graph="ipex_prepack::convolution_run",
kind_not_in_graph="ipex::batch_norm",
levels=["O1"],
)
self._test_output_bf16(
ConvBatchNorm_Fixed(
dim, in_channels, out_channels, kernel_size=kernel_size, stride=1
),
x,
kind_in_graph="ipex_prepack::convolution_run",
kind_not_in_graph="ipex::batch_norm",
prec=0.02,
levels=["O1"],
)
self._test_output(
ConvBatchNorm_Fixed2(
dim, in_channels, out_channels, kernel_size=kernel_size, stride=1
),
x,
kind_in_graph="ipex::batch_norm",
prec=0.02,
levels=["O0"],
)
def test_output_frozen_conv_bn(self):
batch_size = 8
out_channels = 16
in_channels = 3
kernel_size = 3
image_size = 16
options = itertools.product(
[torch.float32, torch.bfloat16],
[True, False],
[ConvBatchNorm_Fixed, ConvBatchNorm_Fixed3],
)
for dtype, use_channels_last, model in options:
input_size = [batch_size, in_channels, image_size, image_size]
model = model(
2, in_channels, out_channels, kernel_size=kernel_size, stride=1
).eval()
x = torch.randn(input_size, dtype=dtype)
if use_channels_last:
x = x.to(memory_format=torch.channels_last)
model = model.to(memory_format=torch.channels_last)
model = ipex.optimize(model, dtype=dtype, conv_bn_folding=False)
with torch.cpu.amp.autocast(enabled=True, dtype=dtype), torch.no_grad():
result = model(x)
trace_model = torch.jit.trace(model, x).eval()
freeze_model = torch.jit.freeze(trace_model)
tresult = trace_model(x)
fused_tresult = freeze_model(x)
trace_graph = trace_model.graph_for(x)
freeze_graph = freeze_model.graph_for(x)
self.assertEqual(result, tresult)
self.assertEqual(result, fused_tresult)
self.assertEqual(fused_tresult.dtype, dtype)
self.assertTrue(
any(n.kind() == "ipex::batch_norm" for n in trace_graph.nodes())
)
self.assertTrue(
all(n.kind() != "ipex::batch_norm" for n in freeze_graph.nodes())
)
def test_output_bn_conv(self):
batch_size = 8
out_channels = 16
in_channels = 3
kernel_size = 3
image_size = 16
for dim in [2, 3]:
input_size = [batch_size, in_channels, image_size, image_size]
if dim == 3:
input_size.append(image_size)
x = torch.randn(input_size)
self._test_output(
BatchNormConv_Fixed(
dim, in_channels, out_channels, kernel_size=kernel_size, stride=1
),
x,
kind_in_graph="ipex::batch_norm",
kind_not_in_graph=None,
)
def test_output_bn_conv_bn(self):
batch_size = 8
out_channels = 16
in_channels = 3
kernel_size = 3
image_size = 16
for dim in [2, 3]:
input_size = [batch_size, in_channels, image_size, image_size]
if dim == 3:
input_size.append(image_size)
x = torch.randn(input_size)
self._test_output(
BatchNorm_Conv_BatchNorm(
dim, in_channels, out_channels, kernel_size=kernel_size, stride=1
),
x,
kind_in_graph="ipex::batch_norm",
kind_not_in_graph=None,
)
def test_output_conv_reshape_bn(self):
batch_size = 8
out_channels = 32
in_channels = 3
kernel_size = 3
image_size = 64
for dim in [2, 3]:
input_size = [batch_size, in_channels, image_size, image_size]
if dim == 3:
input_size.append(image_size)
x = torch.randn(input_size)
dst_shape = [16, 16, 62, 62]
if dim == 3:
dst_shape.append(62)
self._test_output(
ConvReshapeBatchNorm(
dim,
in_channels,
out_channels,
dst_shape,
kernel_size=kernel_size,
stride=1,
),
x,
kind_in_graph="ipex::batch_norm",
kind_not_in_graph=None,
)
def test_output_conv_conv_concate(self):
batch_size = 8
out_channels = 32
in_channels = 3
kernel_size = 3
image_size = 64
for dim in [2, 3]:
input_size = [batch_size, in_channels, image_size, image_size]
if dim == 3:
input_size.append(image_size)
x = torch.randn(input_size)
self._test_output(
Conv_Conv_Concat(
dim, in_channels, out_channels, kernel_size=kernel_size, stride=1
),
x,
kind_in_graph="ipex_prepack::convolution_run",
kind_not_in_graph="ipex_prepack::convolution_prepack",
)
def test_output_conv_relu_add(self):
batch_size = 8
out_channels = 32
in_channels = 3
kernel_size = 3
image_size = 64
for dim in [2, 3]:
input_size = [batch_size, in_channels, image_size, image_size]
if dim == 3:
input_size.append(image_size)
x = torch.randn(input_size)
self._test_output(
Conv_Relu_Add(
dim, in_channels, out_channels, kernel_size=kernel_size, stride=1
),
x,
kind_in_graph="ipex_prepack::convolution_relu_run",
kind_not_in_graph="ipex_prepack::convolution_relu_prepack",
)
def test_output_conv_scalar_binary(self):
batch_size = 2
out_channels = 12
in_channels = 3
kernel_size = 3
image_size = 24
for dim in [2, 3]:
for bias in [True, False]:
input_size = [batch_size, in_channels, image_size, image_size]
if dim == 3:
input_size.append(image_size)
x = torch.randn(input_size)
self._test_output(
Conv_Scalar_Binary(
torch.add,
dim,
in_channels,
out_channels,
kernel_size=kernel_size,
stride=1,
bias=bias,
),
x,
kind_in_graph="ipex_prepack::convolution_run",
kind_not_in_graph="aten::add",
)
self._test_output(
Conv_Scalar_Binary(
torch.sub,
dim,
in_channels,
out_channels,
kernel_size=kernel_size,
stride=1,
bias=bias,
),
x,
kind_in_graph="ipex_prepack::convolution_run",
kind_not_in_graph="aten::sub",
)
self._test_output(
Conv_Scalar_Binary(
torch.mul,
dim,
in_channels,
out_channels,
kernel_size=kernel_size,
stride=1,
bias=bias,
),
x,
kind_in_graph="ipex_prepack::convolution_run",
kind_not_in_graph="aten::mul",
)
self._test_output(
Conv_Scalar_Binary(
torch.div,
dim,
in_channels,
out_channels,
kernel_size=kernel_size,
stride=1,
bias=bias,
),
x,
kind_in_graph="ipex_prepack::convolution_run",
kind_not_in_graph="aten::div",
)
self._test_output_bf16(
Conv_Scalar_Binary(
torch.add,
dim,
in_channels,
out_channels,
kernel_size=kernel_size,
stride=1,
bias=bias,
),
x,
kind_in_graph="ipex_prepack::convolution_run",
kind_not_in_graph="aten::add",
prec=0.1,
)
self._test_output_bf16(
Conv_Scalar_Binary(
torch.sub,
dim,
in_channels,
out_channels,
kernel_size=kernel_size,
stride=1,
bias=bias,
),
x,
kind_in_graph="ipex_prepack::convolution_run",
kind_not_in_graph="aten::sub",
prec=0.1,
)
self._test_output_bf16(
Conv_Scalar_Binary(
torch.mul,
dim,
in_channels,
out_channels,
kernel_size=kernel_size,
stride=1,
bias=bias,
),
x,
kind_in_graph="ipex_prepack::convolution_run",
kind_not_in_graph="aten::mul",
prec=0.1,
)
self._test_output_bf16(
Conv_Scalar_Binary(
torch.div,
dim,
in_channels,
out_channels,
kernel_size=kernel_size,
stride=1,
bias=bias,
),
x,
kind_in_graph="ipex_prepack::convolution_run",
kind_not_in_graph="aten::div",
prec=0.1,
)
def test_output_conv_scalar_binary_add(self):
batch_size = 2
out_channels = 12
in_channels = 3
kernel_size = 3
image_size = 24
for dim in [2, 3]:
for bias in [True, False]:
input_size = [batch_size, in_channels, image_size, image_size]
if dim == 3:
input_size.append(image_size)
x = torch.randn(input_size)
self._test_output(
Conv_Scalar_Binary_Add(
torch.add,
dim,
in_channels,
out_channels,
kernel_size=kernel_size,
stride=1,
bias=bias,
),
x,
kind_in_graph="ipex_prepack::convolution_add_run",
kind_not_in_graph="aten::add",
)
self._test_output(
Conv_Scalar_Binary_Add(
torch.sub,
dim,
in_channels,
out_channels,
kernel_size=kernel_size,
stride=1,
bias=bias,
),
x,
kind_in_graph="ipex_prepack::convolution_add_run",
kind_not_in_graph="aten::sub",
)
self._test_output(
Conv_Scalar_Binary_Add(
torch.mul,
dim,
in_channels,
out_channels,
kernel_size=kernel_size,
stride=1,
bias=bias,
),
x,
kind_in_graph="ipex_prepack::convolution_add_run",
kind_not_in_graph="aten::mul",
)
self._test_output(
Conv_Scalar_Binary_Add(
torch.div,
dim,
in_channels,
out_channels,
kernel_size=kernel_size,
stride=1,
bias=bias,
),
x,
kind_in_graph="ipex_prepack::convolution_add_run",
kind_not_in_graph="aten::div",
)
self._test_output_bf16(
Conv_Scalar_Binary_Add(
torch.add,
dim,
in_channels,
out_channels,
kernel_size=kernel_size,
stride=1,
bias=bias,
),
x,
kind_in_graph="ipex_prepack::convolution_add_run",
kind_not_in_graph="aten::add",
prec=0.1,
)
self._test_output_bf16(
Conv_Scalar_Binary_Add(
torch.sub,
dim,
in_channels,
out_channels,
kernel_size=kernel_size,
stride=1,
bias=bias,
),
x,
kind_in_graph="ipex_prepack::convolution_add_run",
kind_not_in_graph="aten::sub",
prec=0.1,
)
self._test_output_bf16(
Conv_Scalar_Binary_Add(
torch.mul,
dim,
in_channels,
out_channels,
kernel_size=kernel_size,
stride=1,
bias=bias,
),
x,
kind_in_graph="ipex_prepack::convolution_add_run",
kind_not_in_graph="aten::mul",
prec=0.1,
)
self._test_output_bf16(
Conv_Scalar_Binary_Add(
torch.div,
dim,
in_channels,
out_channels,
kernel_size=kernel_size,
stride=1,
bias=bias,
),
x,
kind_in_graph="ipex_prepack::convolution_add_run",
kind_not_in_graph="aten::div",
prec=0.1,
)
def test_output_conv_tensor_binary(self):
batch_size = 2
out_channels = 12
in_channels = 3
kernel_size = 3
image_size = 24
for dim in [2, 3]:
for bias in [True, False]:
input_size = [batch_size, in_channels, image_size, image_size]
if dim == 3:
input_size.append(image_size)
x = torch.randn(input_size)
self._test_output(
Conv_Tensor_Binary2(
torch.add,
dim,
in_channels,
out_channels,
kernel_size=kernel_size,
stride=1,
bias=bias,
),
x,
kind_in_graph="aten::add",
)
self._test_output(
Conv_Tensor_Binary(
torch.add,
dim,
in_channels,
out_channels,
kernel_size=kernel_size,
stride=1,
bias=bias,
),
x,
kind_in_graph="ipex_prepack::convolution_run",
kind_not_in_graph="aten::add",
)
self._test_output(
Conv_Tensor_Binary(
torch.sub,
dim,
in_channels,
out_channels,
kernel_size=kernel_size,
stride=1,
bias=bias,
),
x,
kind_in_graph="ipex_prepack::convolution_run",
kind_not_in_graph="aten::sub",
)
self._test_output(
Conv_Tensor_Binary(
torch.mul,
dim,
in_channels,
out_channels,
kernel_size=kernel_size,
stride=1,
bias=bias,
),
x,
kind_in_graph="ipex_prepack::convolution_run",
kind_not_in_graph="aten::mul",
)
self._test_output(
Conv_Tensor_Binary(
torch.div,
dim,
in_channels,
out_channels,
kernel_size=kernel_size,
stride=1,
bias=bias,
),
x,
kind_in_graph="ipex_prepack::convolution_run",
kind_not_in_graph="aten::div",
prec=2e-5,
)
self._test_output_bf16(
Conv_Tensor_Binary(
torch.add,
dim,
in_channels,
out_channels,
kernel_size=kernel_size,
stride=1,
bias=bias,
),
x,
kind_in_graph="ipex_prepack::convolution_run",
kind_not_in_graph="aten::add",
prec=0.1,
)
self._test_output_bf16(
Conv_Tensor_Binary(
torch.sub,
dim,
in_channels,
out_channels,
kernel_size=kernel_size,
stride=1,
bias=bias,
),
x,
kind_in_graph="ipex_prepack::convolution_run",
kind_not_in_graph="aten::sub",
prec=0.1,
)
self._test_output_bf16(
Conv_Tensor_Binary(
torch.mul,
dim,
in_channels,
out_channels,
kernel_size=kernel_size,
stride=1,
bias=bias,
),
x,
kind_in_graph="ipex_prepack::convolution_run",
kind_not_in_graph="aten::mul",
prec=0.1,
)
self._test_output_bf16(
Conv_Tensor_Binary(
torch.div,
dim,
in_channels,
out_channels,
kernel_size=kernel_size,
stride=1,
bias=bias,
),
x,
kind_in_graph="ipex_prepack::convolution_run",
kind_not_in_graph="aten::div",
prec=0.5,
)
def test_output_conv_tensor_binary_add(self):
batch_size = 2
out_channels = 12
in_channels = 3
kernel_size = 3
image_size = 24
for dim in [2, 3]:
for bias in [True, False]:
input_size = [batch_size, in_channels, image_size, image_size]
if dim == 3:
input_size.append(image_size)
x = torch.randn(input_size)
self._test_output(
Conv_Tensor_Binary_Add(
torch.add,
dim,
in_channels,
out_channels,
kernel_size=kernel_size,
stride=1,
bias=bias,
),
x,
kind_in_graph="ipex_prepack::convolution_add_run",
kind_not_in_graph="aten::add",
)
self._test_output(
Conv_Tensor_Binary_Add(
torch.sub,
dim,
in_channels,
out_channels,
kernel_size=kernel_size,
stride=1,
bias=bias,
),
x,
kind_in_graph="ipex_prepack::convolution_add_run",
kind_not_in_graph="aten::sub",
)
self._test_output(
Conv_Tensor_Binary_Add(
torch.mul,
dim,
in_channels,
out_channels,
kernel_size=kernel_size,
stride=1,
bias=bias,
),
x,
kind_in_graph="ipex_prepack::convolution_add_run",
kind_not_in_graph="aten::mul",
)
self._test_output(
Conv_Tensor_Binary_Add(
torch.div,
dim,
in_channels,
out_channels,
kernel_size=kernel_size,
stride=1,
bias=bias,
),
x,
kind_in_graph="ipex_prepack::convolution_add_run",
kind_not_in_graph="aten::div",
prec=2e-5,
)
self._test_output_bf16(
Conv_Tensor_Binary_Add(
torch.add,
dim,
in_channels,
out_channels,
kernel_size=kernel_size,
stride=1,
bias=bias,
),
x,
kind_in_graph="ipex_prepack::convolution_add_run",
kind_not_in_graph="aten::add",
prec=0.1,
)
self._test_output_bf16(
Conv_Tensor_Binary_Add(
torch.sub,
dim,
in_channels,
out_channels,
kernel_size=kernel_size,
stride=1,
bias=bias,
),
x,
kind_in_graph="ipex_prepack::convolution_add_run",
kind_not_in_graph="aten::sub",
prec=0.1,
)
self._test_output_bf16(
Conv_Tensor_Binary_Add(
torch.mul,
dim,
in_channels,
out_channels,
kernel_size=kernel_size,
stride=1,
bias=bias,
),
x,
kind_in_graph="ipex_prepack::convolution_add_run",
kind_not_in_graph="aten::mul",
prec=0.1,
)
self._test_output_bf16(
Conv_Tensor_Binary_Add(
torch.div,
dim,
in_channels,
out_channels,
kernel_size=kernel_size,
stride=1,
bias=bias,
),
x,
kind_in_graph="ipex_prepack::convolution_add_run",
kind_not_in_graph="aten::div",
prec=0.5,
)
def test_output_conv_bn_relu(self):
batch_size = 8
out_channels = 32
in_channels = 3
kernel_size = 3
image_size = 64
for dim in [2, 3]:
input_size = [batch_size, in_channels, image_size, image_size]
if dim == 3:
input_size.append(image_size)
x = torch.randn(input_size)
self._test_output(
Conv_Bn_Relu(
dim, in_channels, out_channels, kernel_size=kernel_size, stride=1
),
x,
kind_in_graph="ipex_prepack::convolution_relu_run",
kind_not_in_graph="ipex_prepack::convolution_relu_prepack",
)
def test_output_conv_reshape_relu(self):
batch_size = 8
out_channels = 32
in_channels = 3
kernel_size = 3
image_size = 64
for dim in [2, 3]:
input_size = [batch_size, in_channels, image_size, image_size]
if dim == 3:
input_size.append(image_size)
x = torch.randn(input_size)
dst_shape = [16, 16, 62, 62]
if dim == 3:
dst_shape.append(62)
self._test_output(
ConvReshapeRelu(
dim,
in_channels,
out_channels,
dst_shape,
kernel_size=kernel_size,
stride=1,
),
x,
kind_in_graph="ipex_prepack::convolution_run",
kind_not_in_graph="ipex_prepack::convolution_relu_run",
)
def test_output_conv_reshape_sum(self):
batch_size = 8
out_channels = 32
in_channels = 3
kernel_size = 3
image_size = 64
for dim in [2, 3]:
input_size = [batch_size, in_channels, image_size, image_size]
if dim == 3:
input_size.append(image_size)
x = torch.randn(input_size)
dst_shape = [16, 16, 62, 62]
if dim == 3:
dst_shape.append(62)
self._test_output(
ConvReshapeSum(
dim,
in_channels,
out_channels,
dst_shape,
kernel_size=kernel_size,
stride=1,
),
x,
kind_in_graph="ipex_prepack::convolution_run",
kind_not_in_graph="ipex_prepack::convolution_add_run",
)
def test_output_conv_sum(self):
batch_size = 2
out_channels = 16
in_channels = 3
kernel_size = 3
image_size = 28
for dim in [1, 2, 3]:
if dim == 1:
input_size = [batch_size, in_channels, image_size]
else:
input_size = [batch_size, in_channels, image_size, image_size]
if dim == 3:
input_size.append(image_size)
x = torch.randn(input_size)
self._test_output(
ConvSum(
dim, in_channels, out_channels, kernel_size=kernel_size, stride=1
),
x,
kind_in_graph="ipex_prepack::convolution_add_run",
kind_not_in_graph="ipex_prepack::convolution_add_prepack",
)
self._test_output_bf16(
ConvSum(
dim, in_channels, out_channels, kernel_size=kernel_size, stride=1
),
x,
kind_in_graph="ipex_prepack::convolution_add_run",
kind_not_in_graph="ipex_prepack::convolution_add_prepack",
prec=0.1,
)
self._test_output(
ConvSum_v2(
dim, in_channels, out_channels, kernel_size=kernel_size, stride=1
),
x,
kind_in_graph="ipex_prepack::convolution_add_run",
kind_not_in_graph="ipex_prepack::convolution_add_prepack",
)
self._test_output_bf16(
ConvSum_v2(
dim, in_channels, out_channels, kernel_size=kernel_size, stride=1
),
x,
kind_in_graph="ipex_prepack::convolution_add_run",
kind_not_in_graph="ipex_prepack::convolution_add_prepack",
prec=0.1,
)
# add outputs' have different data format
m = ConvSum(
dim, in_channels, out_channels, kernel_size=kernel_size, stride=1
).eval()
if dim == 2:
m.conv = m.conv.to(memory_format=torch.torch.channels_last)
else:
m.conv = m.conv.to(memory_format=torch.torch.channels_last_3d)
self._test_output(
m,
x,
kind_in_graph="ipex_prepack::convolution_add_run",
kind_not_in_graph="ipex_prepack::convolution_add_prepack",
use_channels_last=[False],
)
m = ConvSum(
dim, in_channels, out_channels, kernel_size=kernel_size, stride=1
).eval()
if dim == 2:
m.conv = m.conv.to(memory_format=torch.channels_last)
else:
m.conv = m.conv.to(memory_format=torch.channels_last_3d)
self._test_output_bf16(
m,
x,
kind_in_graph="ipex_prepack::convolution_add_run",
kind_not_in_graph="ipex_prepack::convolution_add_prepack",
prec=0.1,
use_channels_last=[False],
)
def test_conv_sum_dynamic_shape(self):
m = ConvSum(2, 3, 16, kernel_size=3, stride=1).eval()
x1 = torch.randn(1, 3, 56, 56)
x2 = torch.randn(2, 3, 56, 56)
with torch.no_grad():
traced = torch.jit.trace(m, x1)
traced = torch.jit.freeze(traced)
# apply fusion
y = m(x1)
y = m(x1)
traced_y = traced(x2)
eager_y = m(x2)
self.assertEqual(eager_y, traced_y)
def test_output_conv_scalar_sum(self):
batch_size = 8
out_channels = 32
in_channels = 3
kernel_size = 3
image_size = 64
for dim in [2, 3]:
input_size = [batch_size, in_channels, image_size, image_size]
if dim == 3:
input_size.append(image_size)
x = torch.randn(input_size)
self._test_output(
ConvScalarSum(
dim, in_channels, out_channels, kernel_size=kernel_size, stride=1
),
x,
kind_in_graph="ipex_prepack::convolution_run",
kind_not_in_graph="ipex_prepack::convolution_add_run",
)
self._test_output_bf16(
ConvScalarSum(
dim, in_channels, out_channels, kernel_size=kernel_size, stride=1
),
x,
kind_in_graph="ipex_prepack::convolution_run",
kind_not_in_graph="ipex_prepack::convolution_add_run",
prec=0.1,
)
def test_output_conv_broadcast_sum(self):
batch_size = 8
out_channels = 32
in_channels = 3
kernel_size = 3
image_size = 64
for dim in [2, 3]:
input_size = [batch_size, in_channels, image_size, image_size]
if dim == 3:
input_size.append(image_size)
x = torch.randn(input_size)
self._test_output(
ConvBroadcastSum(
dim, in_channels, out_channels, kernel_size=kernel_size, stride=1
),
x,
kind_in_graph="ipex_prepack::convolution_run",
kind_not_in_graph="ipex_prepack::convolution_add_run",
)
self._test_output_bf16(
ConvBroadcastSum(
dim, in_channels, out_channels, kernel_size=kernel_size, stride=1
),
x,
kind_in_graph="ipex_prepack::convolution_run",
kind_not_in_graph="ipex_prepack::convolution_add_run",
prec=0.1,
)
def test_output_cascaded_conv_bn_sum_relu(self):
batch_size = 8
mid_channels = 64
out_channels = 32
in_channels = 3
kernel_size = 3
image_size = 64
for dim in [2, 3]:
input_size = [batch_size, in_channels, image_size, image_size]
if dim == 3:
input_size.append(image_size)
x = torch.randn(input_size)
self._test_output(
CascadedConvBnSumRelu(
dim,
in_channels,
mid_channels,
out_channels,
kernel_size=kernel_size,
stride=1,
),
x,
kind_in_graph="ipex_prepack::convolution_add_relu_run",
kind_not_in_graph="ipex::batch_norm",
)
self._test_output_bf16(
CascadedConvBnSumRelu(
dim,
in_channels,
mid_channels,
out_channels,
kernel_size=kernel_size,
stride=1,
),
x,
kind_in_graph="ipex_prepack::convolution_add_relu_run",
kind_not_in_graph="ipex::batch_norm",
prec=0.02,
)
def test_bottleneck_fusion(self):
x1 = torch.randn(1, 64, 56, 56)
self._test_output(
Bottleneck_v1(),
x1,
kind_in_graph="ipex_prepack::convolution_bottleneck_run",
use_channels_last=[True],
levels=["O1"],
)
self._test_output_bf16(
Bottleneck_v1(),
x1,
kind_in_graph="ipex_prepack::convolution_bottleneck_run",
prec=0.03,
use_channels_last=[True],
levels=["O1"],
)
self._test_output(
Bottleneck_v2(),
x1,
kind_in_graph="ipex_prepack::convolution_bottleneck_run",
use_channels_last=[True],
levels=["O1"],
)
self._test_output_bf16(
Bottleneck_v2(),
x1,
kind_in_graph="ipex_prepack::convolution_bottleneck_run",
prec=0.03,
use_channels_last=[True],
levels=["O1"],
)
# dynamic shape
models = [Bottleneck_v1().eval(), Bottleneck_v2().eval()]
x2 = torch.randn(2, 64, 56, 56)
with torch.no_grad():
for m in models:
traced = torch.jit.trace(m, x1)
traced = torch.jit.freeze(traced)
# apply fusion
y = m(x1)
y = m(x1)
traced_y = traced(x2)
eager_y = m(x2)
self.assertEqual(eager_y, traced_y)
def test_jit_conv_sum_in_diff_block(self):
batch_size = 8
out_channels = 32
in_channels = 3
kernel_size = 1
image_size = 64
for dim in [2, 3]:
input_size = [batch_size, in_channels, image_size, image_size]
if dim == 3:
input_size.append(image_size)
x = torch.randn(input_size)
self._test_output(
ConvSumInDiffBlock(
dim,
in_channels,
out_channels,
kernel_size=kernel_size,
stride=1,
padding=0,
),
x,
kind_not_in_graph="ipex_prepack::convolution_add_run",
)
self._test_output_bf16(
ConvSumInDiffBlock(
dim,
in_channels,
out_channels,
kernel_size=kernel_size,
stride=1,
padding=0,
),
x,
kind_not_in_graph="ipex_prepack::convolution_add_run",
)
def test_output_conv_transpose(self):
def _deconv_params_list():
params_dict = {
"input_height": [12],
"input_width": [12],
"input_depth": [12],
"input_channel_per_group": [15],
"output_channel_per_group": [3],
"kernel_size": [3],
"bias": [True, False],
"stride": [1, 2],
"padding": [1, 2],
"output_padding": [0], # TODO: fix output_padding >1.
"groups": [1, 2],
"dilation": [1, 2],
}
params_list = []
for key, value in params_dict.items():
params_list.append(value)
return params_list
def _deconv_with_output_padding():
params_dict = {
"input_height": 8,
"input_width": 8,
"input_depth": 8,
"input_channel_per_group": 10,
"output_channel_per_group": 10,
"kernel_size": 3,
"bias": False,
"stride": 2,
"padding": 1,
"output_padding": 2,
"groups": 1,
"dilation": 3,
}
params_list = []
for key, value in params_dict.items():
params_list.append(value)
return params_list
params_list = _deconv_params_list()
for (
input_width,
input_height,
input_depth,
input_channel_per_group,
output_channel_per_group,
kernel_size,
bias,
stride,
padding,
output_padding,
groups,
dilation,
) in list(itertools.product(*params_list)) + [_deconv_with_output_padding()]:
if (
(output_padding < stride or output_padding < dilation)
and (
(input_height - 1) * stride
- 2 * padding
+ dilation * (kernel_size - 1)
+ output_padding
+ 1
> 0
)
and (
(input_width - 1) * stride
- 2 * padding
+ dilation * (kernel_size - 1)
+ output_padding
+ 1
> 0
)
and (
(input_depth - 1) * stride
- 2 * padding
+ dilation * (kernel_size - 1)
+ output_padding
+ 1
> 0
)
):
ic = input_channel_per_group * groups
oc = output_channel_per_group * groups
for dim in [2, 3]:
if dim == 2:
x = torch.randn(2, ic, input_height, input_width)
else:
x = torch.randn(2, ic, input_depth, input_height, input_width)
model = ConvTranspose(
dim,
ic,
oc,
kernel_size,
stride,
padding,
output_padding,
groups,
bias,
dilation,
)
self._test_output(
model,
x,
kind_in_graph="ipex_prepack::conv_transpose_run",
kind_not_in_graph="ipex_prepack::conv_transpose_prepack",
levels=["O0"],
)
# temporary disable before https://github.com/pytorch/pytorch/pull/92530 merged
# self._test_output_bf16(
# model,
# x,
# kind_in_graph="ipex_prepack::conv_transpose_run",
# kind_not_in_graph="ipex_prepack::conv_transpose_prepack",
# levels=["O0"],
# prec=0.02)
self._test_output(
model,
x,
kind_in_graph="ipex_prepack::conv_transpose_run",
kind_not_in_graph="ipex_prepack::conv_transpose_prepack",
levels=["O1"],
)
# temporary disable before https://github.com/pytorch/pytorch/pull/92530 merged
# self._test_output_bf16(
# model,
# x,
# kind_in_graph="ipex_prepack::conv_transpose_run",
# kind_not_in_graph="ipex_prepack::conv_transpose_prepack",
# levels=["O1"],
# prec=0.02)
def test_conv_transpose_unary_fusion(self):
self._test_conv_transpose_unary_fusion(unary_PyTorch_op_to_IPEX_op_map)
self._test_conv_transpose_unary_fusion(
PyTorch_op_to_IPEX_op_fixed_seed_map, 1654583254233936896
)
def test_conv_transpose_non_unary_fusion(self):
self._test_conv_transpose_unary_fusion(non_unary_PyTorch_op_to_IPEX_op_map)
def test_conv_transpose_fusion_unsupported_case(self):
dim = 2
batch_size = 1
in_channels = 3
out_channels = 5
in_channels = 3
kernel_size = 3
image_size = 8
for eltwise in unsupported_PyTorch_op_to_IPEX_op_map:
input_size = [batch_size, in_channels, image_size, image_size]
unary_fusion_op = unsupported_PyTorch_op_to_IPEX_op_map[eltwise]
ipex_eltwise_op = unary_fusion_op.ipex_eltwise_op
bf16_supported = unary_fusion_op.bf16_supported
prec = unary_fusion_op.prec
op_input_list = unary_fusion_op.op_input_list
x = torch.randn(input_size)
m = ConvTransposeEltwise(
eltwise,
dim,
in_channels,
out_channels,
kernel_size,
image_size,
**op_input_list,
)
self._test_fusion_unsupported_case(
m,
x,
kind_not_in_graph="ipex_prepack::conv_transpose_%s_run"
% ipex_eltwise_op,
)
def test_conv_transpose_sigmoid_mul(self):
batch_size = 1
out_channels = 5
in_channels = 3
kernel_size = 3
image_size = 8
rand_seed = int(get_rand_seed())
print("{} rand sed: {}".format(sys._getframe().f_code.co_name, rand_seed))
torch.manual_seed(rand_seed)
prec = 0.02
for dim in [2, 3]:
for eltwise in [torch.mul, lambda a, b: a.mul_(b)]:
input_size = [batch_size, in_channels, image_size, image_size]
if dim == 3:
input_size.append(image_size)
ipex_eltwise_op = "swish"
x = torch.randn(input_size)
m = ConvTransposeSigmoidMul(
eltwise, dim, in_channels, out_channels, kernel_size, image_size
)
self._test_output(
m,
x,
kind_in_graph="ipex_prepack::conv_transpose_%s_run"
% ipex_eltwise_op,
kind_not_in_graph="ipex_prepack::conv_transpose_prepack",
)
# temporary disable before https://github.com/pytorch/pytorch/pull/92530 merged
# self._test_output_bf16(
# m,
# x,
# kind_in_graph="ipex_prepack::conv_transpose_%s_run" % ipex_eltwise_op,
# kind_not_in_graph="ipex_prepack::conv_transpose_prepack",
# prec=prec)
def test_linear_fp32_with_dynamic_input(self):
x1 = torch.rand(512, 64)
x2 = torch.rand(15, 64)
model = LinearRelu(64, 241, bias=True).eval()
model1 = ipex.optimize(model, dtype=torch.float32, level="O1")
model2 = ipex.optimize(model, dtype=torch.float32, level="O1", sample_input=x2)
y1_ref = model(x1)
y2_ref = model(x2)
y11 = model1(x1)
y12 = model2(x1)
y21 = model1(x2)
y22 = model2(x2)
self.assertEqual(y1_ref, y11, prec=1e-5)
self.assertEqual(y1_ref, y12, prec=1e-5)
self.assertEqual(y2_ref, y21, prec=1e-5)
self.assertEqual(y2_ref, y22, prec=1e-5)
with torch.no_grad():
traced_model11 = torch.jit.trace(model1, x1).eval()
traced_model11 = torch.jit.freeze(traced_model11)
traced_model12 = torch.jit.trace(model2, x1).eval()
traced_model12 = torch.jit.freeze(traced_model12)
for i in range(4):
if i % 2 == 0:
z11 = traced_model11(x1)
z12 = traced_model12(x1)
else:
z21 = traced_model11(x2)
z22 = traced_model12(x2)
self.assertEqual(y1_ref, z11, prec=1e-5)
self.assertEqual(y1_ref, z12, prec=1e-5)
self.assertEqual(y2_ref, z21, prec=1e-5)
self.assertEqual(y2_ref, z22, prec=1e-5)
def test_linear_auto_kernel_selection_fp32(self):
x = torch.rand(32, 3)
options = itertools.product(["O0", "O1"], [True, False])
for level, auto_select_kernel in options:
model = LinearRelu(3, 32, bias=True).eval()
model = ipex.optimize(
model,
dtype=torch.float32,
level=level,
auto_kernel_selection=auto_select_kernel,
)
with torch.no_grad():
traced_model = torch.jit.trace(model, x).eval()
traced_model = torch.jit.freeze(traced_model)
y = traced_model(x)
trace_graph = traced_model.graph_for(x)
if not auto_select_kernel and level == "O1":
# for auto_select_kernel is False and level is O1 (weights_prepack is True),
# we will use ipex prepacked MKL linear
self.assertTrue(
any(
n.kind() == "ipex_prepack::mkl_sgemm_run"
for n in trace_graph.nodes()
)
)
elif auto_select_kernel and level == "O1":
# for auto_select_kernel is True and level is O1 (weights_prepack is True),
# we will use onednn prepacked linear
self.assertTrue(
any(
n.kind() == "ipex_prepack::linear_relu_run"
for n in trace_graph.nodes()
)
)
else:
# level is O0 (weights_prepack is False), we will use mkl linear
self.assertTrue(
any(n.kind() == "aten::linear" for n in trace_graph.nodes())
)
def test_linear_auto_kernel_selection_bf16(self):
x = torch.rand(32, 3)
options = itertools.product(["O0", "O1"], [True, False])
for level, auto_select_kernel in options:
model = LinearRelu(3, 32, bias=True).eval()
model = ipex.optimize(
model,
dtype=torch.bfloat16,
level=level,
auto_kernel_selection=auto_select_kernel,
)
with torch.cpu.amp.autocast(), torch.no_grad():
traced_model = torch.jit.trace(model, x).eval()
traced_model = torch.jit.freeze(traced_model)
y = traced_model(x)
trace_graph = traced_model.graph_for(x)
# for bfloat16 path, we will use ipex linear for 'O0' and 'O1'
self.assertTrue(
any(
"prim::If" in n.kind()
or n.kind() == "ipex_prepack::linear_relu_run"
for n in trace_graph.nodes()
)
)
def test_output_linear_scalar_binary(self):
for bias in [True, False]:
self._test_output(
Linear_Scalar_Binary(torch.add, 3, 1, bias=bias),
torch.randn(52, 3),
kind_in_graph="aten::linear",
kind_not_in_graph="aten::add",
)
self._test_output(
Linear_Scalar_Binary(torch.add, 3, 32, bias=bias),
torch.randn(52, 3),
kind_in_graph="aten::linear",
kind_not_in_graph="aten::add",
)
self._test_output(
Linear_Scalar_Binary(torch.sub, 3, 32, bias=bias),
torch.randn(52, 3),
kind_in_graph="aten::linear",
kind_not_in_graph="aten::sub",
)
self._test_output(
Linear_Scalar_Binary(torch.mul, 3, 32, bias=bias),
torch.randn(52, 3),
kind_in_graph="aten::linear",
kind_not_in_graph="aten::mul",
)
self._test_output(
Linear_Scalar_Binary(torch.div, 3, 32, bias=bias),
torch.randn(52, 3),
kind_in_graph="aten::linear",
kind_not_in_graph="aten::div",
)
self._test_output_bf16(
Linear_Scalar_Binary(torch.add, 3, 32, bias=bias),
torch.randn(52, 3),
kind_in_graph="ipex_prepack::linear_run",
kind_not_in_graph="aten::add",
prec=0.1,
)
self._test_output_bf16(
Linear_Scalar_Binary(torch.sub, 3, 32, bias=bias),
torch.randn(52, 3),
kind_in_graph="ipex_prepack::linear_run",
kind_not_in_graph="aten::sub",
prec=0.1,
)
self._test_output_bf16(
Linear_Scalar_Binary(torch.mul, 3, 32, bias=bias),
torch.randn(52, 3),
kind_in_graph="ipex_prepack::linear_run",
kind_not_in_graph="aten::mul",
prec=0.1,
)
self._test_output_bf16(
Linear_Scalar_Binary(torch.div, 3, 32, bias=bias),
torch.randn(52, 3),
kind_in_graph="ipex_prepack::linear_run",
kind_not_in_graph="aten::div",
prec=0.1,
)
def test_output_linear_tensor_binary(self):
for bias in [True, False]:
self._test_output(
Linear_Tensor_Binary2(torch.add, 3, 2, bias=bias),
torch.randn(52, 3),
kind_in_graph="aten::add",
)
self._test_output(
Linear_Tensor_Binary3(torch.add, 3, 2, bias=bias),
torch.randn(52, 3),
kind_in_graph="aten::add",
)
self._test_output(
Linear_Tensor_Binary(torch.add, 3, 32, bias=bias),
torch.randn(52, 3),
kind_in_graph="aten::linear",
kind_not_in_graph="aten::add",
)
self._test_output(
Linear_Tensor_Binary(torch.sub, 3, 32, bias=bias),
torch.randn(52, 3),
kind_in_graph="aten::linear",
kind_not_in_graph="aten::sub",
)
self._test_output(
Linear_Tensor_Binary(torch.mul, 3, 32, bias=bias),
torch.randn(52, 3),
kind_in_graph="aten::linear",
kind_not_in_graph="aten::mul",
)
self._test_output(
Linear_Tensor_Binary(torch.div, 3, 32, bias=bias),
torch.randn(52, 3),
kind_in_graph="aten::linear",
kind_not_in_graph="aten::div",
)
self._test_output_bf16(
Linear_Tensor_Binary(torch.add, 3, 32, bias=bias),
torch.randn(52, 3),
kind_in_graph="ipex_prepack::linear_run",
kind_not_in_graph="aten::add",
prec=0.1,
)
self._test_output_bf16(
Linear_Tensor_Binary(torch.sub, 3, 32, bias=bias),
torch.randn(52, 3),
kind_in_graph="ipex_prepack::linear_run",
kind_not_in_graph="aten::sub",
prec=0.1,
)
self._test_output_bf16(
Linear_Tensor_Binary(torch.mul, 3, 32, bias=bias),
torch.randn(52, 3),
kind_in_graph="ipex_prepack::linear_run",
kind_not_in_graph="aten::mul",
prec=0.1,
)
self._test_output_bf16(
Linear_Tensor_Binary(torch.div, 3, 32, bias=bias),
torch.randn(52, 3),
kind_in_graph="ipex_prepack::linear_run",
kind_not_in_graph="aten::div",
prec=0.2,
)
def _test_linear_unary_fusion(self, op_list, seed=None, cls=None):
batch_size = 3
out_channels = 32
in_channels = 3
if seed is None:
rand_seed = int(get_rand_seed())
print("{} rand sed: {}".format(sys._getframe().f_code.co_name, rand_seed))
torch.manual_seed(rand_seed)
else:
print("{} rand sed: {}".format(sys._getframe().f_code.co_name, seed))
torch.manual_seed(seed)
for bias in [True, False]:
for eltwise in op_list:
input_size = [batch_size, in_channels]
unary_fusion_op = op_list[eltwise]
ipex_eltwise_op = unary_fusion_op.ipex_eltwise_op
bf16_supported = unary_fusion_op.bf16_supported
prec = unary_fusion_op.prec
op_input_list = unary_fusion_op.op_input_list
x = torch.randn(input_size)
_cls = cls if cls is not None else LinearDivEltwise
m = _cls(eltwise, in_channels, out_channels, bias, **op_input_list)
self._test_output(m, x, kind_in_graph="aten::linear")
self._test_mkl_fp32(m, x, kind_in_graph="ipex_prepack::mkl_sgemm_run")
self._test_dnnl_fp32(
m, x, kind_in_graph="ipex_prepack::linear_%s_run" % ipex_eltwise_op
)
if bf16_supported:
self._test_output_bf16(
m,
x,
kind_in_graph="ipex_prepack::linear_%s_run" % ipex_eltwise_op,
kind_not_in_graph="ipex_prepack::linear_prepack",
prec=prec,
)
def test_linear_unary_fusion(self):
self._test_linear_unary_fusion(unary_PyTorch_op_to_IPEX_op_map)
self._test_linear_unary_fusion(
PyTorch_op_to_IPEX_op_fixed_seed_map, 1654065112450588160
)
def test_linear_non_unary_fusion(self):
self._test_linear_unary_fusion(non_unary_PyTorch_op_to_IPEX_op_map)
def test_linear_fusion_unsupported_case(self):
batch_size = 3
out_channels = 32
in_channels = 3
bias = False
for eltwise in unsupported_PyTorch_op_to_IPEX_op_map:
input_size = [batch_size, in_channels]
unary_fusion_op = unsupported_PyTorch_op_to_IPEX_op_map[eltwise]
ipex_eltwise_op = unary_fusion_op.ipex_eltwise_op
bf16_supported = unary_fusion_op.bf16_supported
prec = unary_fusion_op.prec
op_input_list = unary_fusion_op.op_input_list
x = torch.randn(input_size)
m = LinearEltwise(eltwise, in_channels, out_channels, bias, **op_input_list)
self._test_fusion_unsupported_case(
m,
x,
auto_kernel_selection=True,
kind_not_in_graph="ipex_prepack::linear_%s_run" % ipex_eltwise_op,
)
def test_output_linear_add(self):
self._test_output(
LinearAdd(3, 32, bias=True), torch.rand(32, 3), kind_in_graph="aten::linear"
)
self._test_mkl_fp32(
LinearAdd(3, 32, bias=True),
torch.rand(32, 3),
kind_in_graph="ipex_prepack::mkl_sgemm_run",
)
self._test_dnnl_fp32(
LinearAdd(3, 32, bias=True),
torch.rand(32, 3),
kind_in_graph="ipex_prepack::linear_add_run",
)
self._test_output_bf16(
LinearAdd(3, 32, bias=True),
torch.rand(32, 3),
kind_not_in_graph="aten::linear",
kind_in_graph="ipex_prepack::linear_add_run",
prec=5e-2,
)
def test_output_linear_add_relu(self):
for inplace in [True, False]:
m = LinearAddRelu(3, 5, 8, inplace)
x = torch.randn(2, 3)
self._test_output(m, x, kind_in_graph="aten::linear")
self._test_mkl_fp32(m, x, kind_in_graph="ipex_prepack::mkl_sgemm_run")
self._test_dnnl_fp32(
m, x, kind_in_graph="ipex_prepack::linear_add_relu_run"
)
self._test_output_bf16(
m,
x,
kind_in_graph="ipex_prepack::linear_add_relu_run",
kind_not_in_graph="ipex_prepack::linear_add_run",
prec=5e-2,
)
def test_output_linear_reshape_relu(self):
self._test_output(
Linear_Reshape_Relu(3, 32, (64, 16), bias=True),
torch.rand(32, 3),
kind_in_graph="aten::linear",
)
def test_output_linear_bn(self):
self._test_output(
LinearBn(2, 32, 32, bias=True),
torch.rand(1, 1, 32, 32),
kind_in_graph="aten::linear",
)
def test_output_linear_reshape_bn(self):
self._test_output(
Linear_Reshape_Bn(2, 32, 32, (1, 1, 64, 16), bias=True),
torch.rand(1, 1, 32, 32),
kind_in_graph="aten::linear",
)
def test_output_linear_with_transposed_weight(self):
self._test_mkl_fp32(
Linear_With_Transposed_Weight(133, 133),
torch.randn(2, 133),
kind_in_graph="ipex_prepack::mkl_sgemm_run",
)
def test_output_linear_swish(self):
self._test_mkl_fp32(
LinearSigmoidMul(3, 32, bias=True),
torch.rand(32, 3),
kind_in_graph="ipex_prepack::mkl_sgemm_run",
)
self._test_mkl_fp32(
LinearSigmoidMul(3, 32, bias=False),
torch.rand(32, 3),
kind_in_graph="ipex_prepack::mkl_sgemm_run",
)
self._test_dnnl_fp32(
LinearSigmoidMul(3, 32, bias=True),
torch.rand(32, 3),
kind_in_graph="ipex_prepack::linear_swish_run",
)
self._test_dnnl_fp32(
LinearSigmoidMul(3, 32, bias=False),
torch.rand(32, 3),
kind_in_graph="ipex_prepack::linear_swish_run",
)
self._test_output_bf16(
LinearSigmoidMul(3, 32, bias=True),
torch.rand(32, 3),
kind_in_graph="ipex_prepack::linear_swish_run",
prec=5e-3,
)
self._test_output_bf16(
LinearSigmoidMul(3, 32, bias=False),
torch.rand(32, 3),
kind_in_graph="ipex_prepack::linear_swish_run",
prec=5e-3,
)
def test_channel_shuffle(self):
self._test_output(
ChannelShuffle_with_Static_Shape(10, 16, 50, 50, 4),
torch.rand(10, 16, 50, 50),
kind_in_graph="ipex::shuffle_2d",
)
self._test_output(
ChannelShuffle_with_Dynamic_Shape(4),
torch.rand(10, 16, 50, 50),
kind_in_graph="ipex::shuffle_2d",
)
self._test_output(
NotChannelShuffle(4),
torch.rand(10, 16, 50, 60),
kind_not_in_graph="ipex::shuffle_2d",
)
def test_jit_function(self):
# test hool trace and script can works for function
def fn(input, weight, bias):
return F.linear(input, weight, bias)
input = torch.randn(2, 4)
weight = torch.randn(5, 4)
bias = torch.randn(5)
result = fn(input, weight, bias)
scripted_fn = torch.jit.script(fn)
traced_fn = torch.jit.trace(fn, (input, weight, bias))
self.assertEqual(scripted_fn(input, weight, bias), result)
self.assertEqual(traced_fn(input, weight, bias), result)
def test_matmul_div_or_mul(self):
inputs = [torch.randn(10, 3, 4), torch.randn(3, 4)]
for x in inputs:
self._test_output(
MatmulMul(mul_scalar=True, with_out=False),
x,
kind_in_graph="ipex::matmul_mul",
kind_not_in_graph=None,
)
self._test_output(
MatmulMul(mul_scalar=True, with_out=True),
x,
kind_in_graph="ipex::matmul_mul",
kind_not_in_graph=None,
)
self._test_output(
MatmulMul(mul_scalar=False, with_out=True),
x,
kind_in_graph=None,
kind_not_in_graph="ipex::matmul_mul",
)
self._test_output_bf16(
MatmulMul(mul_scalar=True, with_out=False),
x.to(torch.bfloat16),
kind_in_graph="ipex::matmul_mul",
kind_not_in_graph=None,
prec=5e-2,
)
self._test_output_bf16(
MatmulMul(mul_scalar=True, with_out=True),
x.to(torch.bfloat16),
kind_in_graph="ipex::matmul_mul",
kind_not_in_graph=None,
prec=5e-2,
)
self._test_output(
MatmulDivOutplace(div_scalar=True, with_out=True),
x,
kind_in_graph="ipex::matmul_div",
kind_not_in_graph=None,
)
self._test_output(
MatmulDivOutplace(div_scalar=True, with_out=False),
x,
kind_in_graph="ipex::matmul_div",
kind_not_in_graph=None,
)
self._test_output(
MatmulDivOutplace(div_scalar=False, with_out=False),
x,
kind_in_graph="ipex::matmul_div",
kind_not_in_graph=None,
)
self._test_output(
MatmulDivOutplace(div_scalar=False, with_out=True),
x,
kind_in_graph="ipex::matmul_div",
kind_not_in_graph=None,
)
self._test_output_bf16(
MatmulDivOutplace(div_scalar=True, with_out=True),
x.to(torch.bfloat16),
kind_in_graph="ipex::matmul_div",
kind_not_in_graph=None,
prec=5e-2,
)
self._test_output_bf16(
MatmulDivOutplace(div_scalar=True, with_out=False),
x.to(torch.bfloat16),
kind_in_graph="ipex::matmul_div",
kind_not_in_graph=None,
prec=5e-2,
)
self._test_output_bf16(
MatmulDivOutplace(div_scalar=False, with_out=True),
x.to(torch.bfloat16),
kind_in_graph="ipex::matmul_div",
kind_not_in_graph=None,
prec=5e-3,
)
self._test_output_bf16(
MatmulDivOutplace(div_scalar=False, with_out=False),
x.to(torch.bfloat16),
kind_in_graph="ipex::matmul_div",
kind_not_in_graph=None,
prec=5e-3,
)
self._test_output(
MatmulDivInplace(div_scalar=True, with_out=True),
x,
kind_in_graph="ipex::matmul_div",
kind_not_in_graph=None,
)
self._test_output(
MatmulDivInplace(div_scalar=True, with_out=False),
x,
kind_in_graph="ipex::matmul_div",
kind_not_in_graph=None,
)
self._test_output(
MatmulDivInplace(div_scalar=False, with_out=False),
x,
kind_in_graph="ipex::matmul_div",
kind_not_in_graph=None,
)
self._test_output(
MatmulDivInplace(div_scalar=False, with_out=True),
x,
kind_in_graph="ipex::matmul_div",
kind_not_in_graph=None,
)
self._test_output_bf16(
MatmulDivInplace(div_scalar=True, with_out=True),
x.to(torch.bfloat16),
kind_in_graph="ipex::matmul_div",
kind_not_in_graph=None,
prec=5e-2,
)
self._test_output_bf16(
MatmulDivInplace(div_scalar=True, with_out=False),
x.to(torch.bfloat16),
kind_in_graph="ipex::matmul_div",
kind_not_in_graph=None,
prec=5e-2,
)
self._test_output_bf16(
MatmulDivInplace(div_scalar=False, with_out=True),
x.to(torch.bfloat16),
kind_in_graph="ipex::matmul_div",
kind_not_in_graph=None,
prec=5e-3,
)
self._test_output_bf16(
MatmulDivInplace(div_scalar=False, with_out=False),
x.to(torch.bfloat16),
kind_in_graph="ipex::matmul_div",
kind_not_in_graph=None,
prec=5e-3,
)
# When the div is outplace and out parameter be modified with an inplace op not in this pattern,
# but we didn't observe it's value.
self._test_output(
MatmulDivOutplaceOutModifiedByOtherOP_v1(div_scalar=True),
x,
kind_in_graph="ipex::matmul_div",
kind_not_in_graph=None,
)
# When the div is outplace and out parameter be modified with an inplace op not in this pattern,
# and we observe it's value by some other op("==" -> aten::equl). In this case, jit.trace will treat
# out parameter that will modified by other ops as output of matmul, thus will not be matched by
# our pattern, and we can't observe our fused op's side effect after we modified out param by an inplace op.
self._test_output(
MatmulDivOutplaceOutModifiedByOtherOP_v2(div_scalar=False),
x,
kind_in_graph=None,
kind_not_in_graph="ipex::matmul_div",
)
def test_transposed_matmuldiv(self):
x1 = [
torch.randn(53, 23, 27, 25),
torch.randn(53, 27, 23, 25).transpose(1, 2),
torch.randn(53, 23, 25, 27).transpose(2, 3),
torch.randn(53, 25, 23, 27).transpose(2, 3).transpose(1, 3),
]
y1 = [
torch.randn(53, 23, 25, 27),
torch.randn(53, 25, 23, 27).transpose(1, 2),
torch.randn(53, 23, 27, 25).transpose(2, 3),
torch.randn(53, 27, 23, 25).transpose(2, 3).transpose(1, 3),
]
model = TransposedMatmulDiv().eval()
model_fp32 = ipex.optimize(model, dtype=torch.float32, level="O1")
model_bf16 = ipex.optimize(model, dtype=torch.bfloat16, level="O1")
for i in range(len(x1)):
for j in range(len(y1)):
with torch.no_grad():
traced_mod = torch.jit.trace(model, (x1[i], y1[j]))
fused_mod = traced_mod.graph_for(x1[i], y1[j])
out = traced_mod(x1[i], y1[j])
expected = model(x1[i], y1[j])
self.assertTrue(
any(n.kind() == "ipex::matmul_mul" for n in fused_mod.nodes())
)
self.assertEqual(out, expected, prec=1e-4)
with torch.cpu.amp.autocast(), torch.no_grad():
traced_mod = torch.jit.trace(
model, (x1[i].bfloat16(), y1[j].bfloat16())
)
fused_mod = traced_mod.graph_for(x1[i].bfloat16(), y1[j].bfloat16())
out = traced_mod(x1[i].bfloat16(), y1[j].bfloat16())
expected = model(x1[i].bfloat16(), y1[j].bfloat16())
self.assertTrue(
any(n.kind() == "ipex::matmul_mul" for n in fused_mod.nodes())
)
self.assertEqual(out, expected, prec=1e-1)
def test_bmm_add(self):
M = torch.randn(60, 30, 50)
x1 = [
torch.randn(60, 30, 40),
torch.randn(60, 40, 30).transpose(1, 2),
torch.randn(30, 60, 40).transpose(0, 1),
]
y1 = [
torch.randn(60, 40, 50),
torch.randn(60, 50, 40).transpose(1, 2),
torch.randn(50, 40, 60).transpose(0, 2),
]
model = BmmAdd().eval()
model_fp32 = ipex.optimize(model, dtype=torch.float32, level="O1")
model_bf16 = ipex.optimize(model, dtype=torch.bfloat16, level="O1")
for i in range(len(x1)):
for j in range(len(y1)):
with torch.no_grad():
traced_mod = torch.jit.trace(model, (M, x1[i], y1[j]))
fused_mod = traced_mod.graph_for(M, x1[i], y1[j])
out = traced_mod(M, x1[i], y1[j])
expected = torch.baddbmm(M, x1[i], y1[j])
self.assertTrue(
any(n.kind() == "ipex::bmm_add" for n in fused_mod.nodes())
)
self.assertEqual(out, expected, prec=1e-4)
with torch.cpu.amp.autocast(), torch.no_grad():
traced_mod = torch.jit.trace(
model, (M.bfloat16(), x1[i].bfloat16(), y1[j].bfloat16())
)
fused_mod = traced_mod.graph_for(
M.bfloat16(), x1[i].bfloat16(), y1[j].bfloat16()
)
out = traced_mod(M.bfloat16(), x1[i].bfloat16(), y1[j].bfloat16())
expected = torch.baddbmm(
M.bfloat16(), x1[i].bfloat16(), y1[j].bfloat16()
)
self.assertTrue(
any(n.kind() == "ipex::bmm_add" for n in fused_mod.nodes())
)
self.assertEqual(out, expected, prec=1e-1)
def test_einsum_add(self):
def _test_fp32(
model_test,
input1,
input2,
bias=None,
kind_in_graph="ipex::einsum_binary",
prec=1e-3,
):
model = copy.deepcopy(model_test)
model = model.eval()
model = ipex.optimize(model, dtype=torch.float32)
with torch.no_grad():
tr_model = torch.jit.trace(model, (input1, input2, bias))
tr_model = torch.jit.freeze(tr_model)
tr_model(input1, input2, bias)
tr_model(input1, input2, bias)
trace_graph = tr_model.graph_for(input1, input2, bias)
res_jit = tr_model(
input1,
input2,
bias,
)
res_ref = model(input1, input2, bias)
self.assertEqual(res_ref, res_jit, prec)
self.assertTrue(
any(n.kind() == kind_in_graph for n in trace_graph.nodes())
)
bias = torch.randn(2, 3, 2304)
input1 = torch.randn(2, 3, 768)
input2 = torch.randn(768, 2304)
model_v1 = EinsumAdd("bsh,ho->bso")
_test_fp32(model_v1, input1, input2, bias)
bias = torch.randn(1, 1, 1, 4)
input1 = torch.randn(12, 1, 4, 16)
input2 = torch.randn(12, 4, 4, 16)
model_v1 = EinsumAdd("bqhc,bkhc->bhqk")
_test_fp32(model_v1, input1, input2, bias)
bias = torch.randn(2304)
input1 = torch.randn(4, 3, 768)
input2 = torch.randn(768, 2304)
model_v1 = EinsumAddInplace("bsh,ho->bso")
_test_fp32(model_v1, input1, input2, bias)
input1 = torch.randn(8, 3, 768)
input2 = torch.randn(768, 2304)
model = EinsumAddScalar("bsh,ho->bso").eval()
res_ref = model(input1, input2)
tr_model = torch.jit.trace(model, (input1, input2))
tr_model = torch.jit.freeze(tr_model)
tr_model(input1, input2)
tr_model(input1, input2)
trace_graph = tr_model.graph_for(input1, input2)
res_jit = tr_model(input1, input2)
self.assertEqual(res_ref, res_jit, prec=1e-3)
self.assertTrue(
any(n.kind() == "ipex::einsum_binary" for n in trace_graph.nodes())
)
bias = torch.randn(4, 3, 2304)
input1 = torch.randn(4, 3, 768)
input2 = torch.randn(768, 2304)
model_v1 = EinsumAddInplaceV1("bsh,ho->bso")
_test_fp32(model_v1, input1, input2, bias, kind_in_graph="aten::einsum")
bias1 = torch.randn(2, 4, 128, 128)
input3 = torch.randn(2, 4, 128, 768)
input4 = torch.randn(2, 4, 128, 768)
model_v2 = EinsumAdd("bnqd,bnkd->bnqk")
_test_fp32(model_v2, input3, input4, bias1)
bias1 = torch.randn(8, 1, 1, 128)
input3 = torch.randn(8, 4, 128, 768)
input4 = torch.randn(8, 4, 128, 768)
model_v2 = EinsumAdd("bnqd,bnkd->bnqk")
_test_fp32(model_v2, input3, input4, bias1)
bias1 = torch.randn(2, 4, 128, 768)
input1 = torch.randn(2, 4, 128, 768)
input2 = torch.randn(4, 768, 768)
model_v2 = EinsumAdd("balh,ahr->balr")
_test_fp32(model_v2, input1, input2, bias1)
bias1 = torch.randn(768)
input1 = torch.randn(128, 1024)
input2 = torch.randn(768, 1024)
model_v2 = EinsumAdd("mc,nc->mn")
_test_fp32(model_v2, input1, input2, bias1)
bias1 = torch.randn(768)
input1 = torch.randn(128, 1024)
input2 = torch.randn(1024, 768)
model_v2 = EinsumAdd("mc,cn->mn")
_test_fp32(model_v2, input1, input2, bias1)
bias1 = torch.randn(1)
input1 = torch.randn(1024, 1)
input2 = torch.randn(1024, 1024)
model_v2 = EinsumAdd("mc,cc->mc")
_test_fp32(model_v2, input1, input2, bias1)
bias1 = torch.randn(1)
input1 = torch.randn(1024, 1)
input2 = torch.randn(1024)
model_v2 = EinsumAdd("mc,c->mc")
_test_fp32(model_v2, input1, input2, bias1)
bias1 = torch.randn(1, 1)
input1 = torch.randn(1, 1)
input2 = torch.randn(1)
model_v2 = EinsumAdd("mc,c->m")
_test_fp32(model_v2, input1, input2, bias1)
bias1 = torch.randn(2)
input1 = torch.randn(2)
input2 = torch.tensor(2)
model_v2 = EinsumAdd("m,...->m")
_test_fp32(model_v2, input1, input2, bias1)
# this case is testing the repeated dim c meeting unmatched size during runtime
# which is excepted as a RuntimeError
try:
bias1 = torch.randn(1)
input1 = torch.randn(1024, 1)
input2 = torch.randn(1024, 512)
input2_fake = torch.randn(1024, 1024)
model_v2 = EinsumAdd("mc,cc->mc").eval()
model_v2 = ipex.optimize(model_v2, dtype=torch.float32)
with torch.no_grad():
tr_model = torch.jit.trace(model_v2, (input1, input2_fake, bias1))
tr_model = torch.jit.freeze(tr_model)
tr_model(input1, input2_fake, bias1)
tr_model(input1, input2, bias1)
# it is not excepted if no RuntimeError exception is found
# so end with assert
self.assertTrue(False)
except RuntimeError as e:
expected_error = (
"subscript c is repeated for operand 1 but the sizes don't match"
)
self.assertTrue(expected_error in str(e))
logging.info("expected RuntimeError is found")
finally:
pass
# this case is testing the broadcast dim b meeting remapped shape during runtime
# which is excepted as a RuntimeError
try:
bias1 = torch.randn(2)
input1 = torch.randn(2)
input2 = torch.randn(4, 4)
input2_fake = torch.randn(2, 4)
model_v2 = EinsumAdd("b,bj->b").eval()
with torch.no_grad():
tr_model = torch.jit.trace(model_v2, (input1, input2_fake, bias1))
tr_model = torch.jit.freeze(tr_model)
tr_model(input1, input2_fake, bias1)
tr_model(input1, input2, bias1)
# it is not excepted if no RuntimeError exception is found
# so end with assert
self.assertTrue(False)
except RuntimeError as e:
expected_error = (
"operands do not broadcast with remapped shapes [original->remapped]"
)
self.assertTrue(expected_error in str(e))
logging.info("expected RuntimeError is found")
finally:
pass
bias1 = torch.randn(2)
input1 = torch.randn(2)
input2 = torch.randn(2)
model_v2 = EinsumAdd("i,j->").eval()
model_ipex = ipex.optimize(model_v2, dtype=torch.float32)
with torch.no_grad():
res_ref = model_v2(input1, input2, bias1)
tr_model = torch.jit.trace(model_ipex, (input1, input2, bias1))
tr_model = torch.jit.freeze(tr_model)
tr_model(input1, input2, bias1)
res_jit = tr_model(input1, input2, bias1)
self.assertEqual(res_ref, res_jit, prec=1e-3)
# sum dims > 2
bias = torch.randn(1, 7)
input1 = torch.randn(3, 4, 6, 7)
input2 = torch.randn(4, 6, 7)
model_v2 = EinsumAdd("sho,ksho->ko")
_test_fp32(model_v2, input2, input1, bias)
bias = torch.randn(1, 7)
input1 = torch.randn(3, 6, 7)
input2 = torch.randn(6, 7)
model_v2 = EinsumAdd("so,kso->ko")
_test_fp32(model_v2, input2, input1, bias)
bias1 = torch.randn(1024)
input1 = torch.randn(1024, 1024)
input2 = torch.randn(1024, 1024)
model_v2 = EinsumAdd("mc,cn->nm")
_test_fp32(model_v2, input1, input2, bias1)
bias1 = torch.randn(768)
input1 = torch.randn(2, 128, 1024)
input2 = torch.randn(1024, 23, 768)
model_v2 = EinsumAdd("bqc,chv->bqhv")
_test_fp32(model_v2, input1, input2, bias1)
bias = torch.randn(768)
input1 = torch.randn(2, 128, 16, 64)
input2 = torch.randn(16, 64, 768)
model = EinsumAdd("bqhc,hco->bqo")
_test_fp32(model, input1, input2, bias)
bias = torch.randn(8)
input1 = torch.randn(8)
input2 = torch.randn(8)
model = EinsumAdd("i,i->")
_test_fp32(model, input1, input2, bias)
# the output of torch.einsum("ij,j") is tensor([])
bias = torch.randn(1)
input1 = torch.randn(0, 3)
input2 = torch.randn(3)
model = EinsumAdd(("ij,j"))
_test_fp32(model, input1, input2, bias)
bias = torch.randn(1, 4, 49, 49)
input1 = torch.randn(8, 4, 49, 32)
input2 = torch.randn(8, 4, 49, 32)
model_from_vit = EinsumAdd("bhid,bhjd->bhij")
_test_fp32(model_from_vit, input1, input2, bias)
bias = torch.randn(1, 1, 49, 49)
input1 = torch.randn(8, 6, 49, 32)
input2 = torch.randn(8, 6, 49, 32)
model_from_vit_v2 = EinsumAdd("bhid,bhjd->bhij")
_test_fp32(model_from_vit_v2, input1, input2, bias)
bias = torch.randn(8, 1, 1, 49)
input1 = torch.randn(8, 6, 49, 32)
input2 = torch.randn(8, 6, 49, 32)
model_from_vit_alphafold2_v1 = EinsumAdd("bhid,bhjd->bhij")
_test_fp32(model_from_vit_alphafold2_v1, input1, input2, bias)
bias = torch.randn(1, 1, 32)
input1 = torch.randn(6, 50, 32)
input2 = torch.randn(32, 32)
model_from_vit_alphafold2_v2 = EinsumAdd("bsh,ho->bso")
_test_fp32(model_from_vit_alphafold2_v2, input1, input2, bias)
bias = torch.randn(6, 1, 50)
input1 = torch.randn(6, 50, 32)
input2 = torch.randn(6, 32, 50)
model_from_vit_alphafold2_v3 = EinsumAdd("bsh,bho->bso")
_test_fp32(model_from_vit_alphafold2_v3, input1, input2, bias)
def test_ipex_softmax(self):
self._test_output(
AtenSoftmaxRepalce(), torch.rand(3, 4, 4), kind_in_graph="ipex::softmax"
)
self._test_output_bf16(
AtenSoftmaxRepalce(),
torch.rand(3, 4, 4, dtype=torch.bfloat16),
kind_in_graph="ipex::softmax",
prec=5e-3,
)
def test_ipex_batch_norm(self):
self._test_output(
AtenBatchNormRepalce(),
torch.rand(10, 10, 4, 4),
kind_in_graph="ipex::batch_norm",
)
self._test_output_bf16(
AtenBatchNormRepalce(),
torch.rand(10, 10, 4, 4, dtype=torch.bfloat16),
kind_in_graph="ipex::batch_norm",
prec=5e-3,
)
def test_max_pool2d_int8(self):
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.pool = torch.nn.MaxPool2d(3, stride=2)
def forward(self, x):
x = torch.quantize_per_tensor(x, 0.1, 10, torch.quint8)
return self.pool(x)
model = Model().eval()
x = torch.randn(1, 3, 24, 24)
with torch.no_grad():
ref_out = model(x)
traced_model = torch.jit.trace(model, x)
traced_out = traced_model(x)
self.assertEqual(ref_out, traced_out)
trace_graph = traced_model.graph_for(x)
self.assertTrue(
any(n.kind() == "aten::max_pool2d" for n in trace_graph.nodes())
)
def test_restore_inplace(self):
class M(nn.Module):
def __init__(self, eltwise_fn, params_dict=None):
if params_dict is None:
params_dict = {}
super(M, self).__init__()
self.conv = torch.nn.Conv2d(3, 5, 3, 3)
self.eltwise = eltwise_fn
self.params_dict = params_dict
def forward(self, x):
x = x * 3.1
x = self.eltwise(x, **self.params_dict)
x = self.conv(x)
return x
for eltwise in [
"sigmoid",
"tanh",
"celu",
"elu",
"hardsigmoid",
"hardswish",
"hardtanh",
"leaky_relu",
"relu6",
"relu",
"rrelu",
"selu",
"silu",
"clamp",
]:
eltwise_fn_name = eltwise + "_"
if eltwise in ["sigmoid", "tanh", "celu", "relu", "rrelu", "selu"]:
# use torch.sigmoid_(x)
eltwise_fn = getattr(torch, eltwise_fn_name)
m = M(eltwise_fn)
elif eltwise == "clamp":
eltwise_fn = getattr(torch, eltwise_fn_name)
m = M(eltwise_fn, {"min": 0, "max": 2})
else:
# use F.elu(x, inplace = True)
eltwise_fn = getattr(F, eltwise)
m = M(eltwise_fn, {"inplace": True})
with torch.no_grad():
m.eval()
x = torch.randn(1, 3, 16, 16)
# test restore inplace
# Since TE is with priority and it has not supported inplace op yet, we make inplace optimization after TE.
# Some in place ops replaced by replaceInplaceOpsWithOutplaceOps will be optimized by TE and won't
# resume by ApplyInplaceOptimization.
# Thus we need to disable TE here.
with self._texpr_enable(False):
traced = torch.jit.trace(m, x)
trace_graph = traced.graph_for(x)
self.assertTrue(
any(
n.kind() == "aten::" + eltwise_fn_name
for n in trace_graph.nodes()
)
)
y = m(x)
traced_y = traced(x)
self.assertEqual(y, traced_y)
def test_enable_inplace(self):
# M_apply_inplace is for testing success inplace replacement condition
class M_apply_inplace(nn.Module):
def __init__(self, eltwise_fn, params_dict=None):
if params_dict is None:
params_dict = {}
super(M_apply_inplace, self).__init__()
self.eltwise = eltwise_fn
self.params_dict = params_dict
def forward(self, x):
# put a softmax here for following reasons:
# (1) x is the input, pass it to eltwise op will make it unable to be inplace
# (2) ipex::softmax will not be fused into TE with following eltwise
x1 = nn.Softmax(dim=-1)(x)
x1 = self.eltwise(x1, **self.params_dict)
return x1
# M_remain_outplace is for testing failed inplace replacement condition
class M_remain_outplace(nn.Module):
def __init__(self, eltwise_fn, params_dict=None):
if params_dict is None:
params_dict = {}
super(M_remain_outplace, self).__init__()
self.eltwise = eltwise_fn
self.params_dict = params_dict
def forward(self, x):
x1 = self.eltwise(x, **self.params_dict)
return x1
for eltwise in [
"sigmoid",
"tanh",
"celu",
"elu",
"hardsigmoid",
"hardswish",
"hardtanh",
"leaky_relu",
"relu6",
"relu",
"rrelu",
"selu",
"silu",
]:
eltwise_fn_name = eltwise + "_"
if eltwise in ["sigmoid", "tanh", "celu", "relu", "rrelu", "selu"]:
eltwise_fn_outplace = getattr(torch, eltwise)
m_inplace = M_apply_inplace(eltwise_fn_outplace)
m_outplace = M_remain_outplace(eltwise_fn_outplace)
else:
eltwise_fn = getattr(F, eltwise)
m_inplace = M_apply_inplace(eltwise_fn)
m_outplace = M_remain_outplace(eltwise_fn)
with torch.no_grad():
m_inplace.eval()
m_outplace.eval()
x = torch.randn(1, 3, 16, 16)
traced_inplace = torch.jit.trace(m_inplace, x)
trace_graph_inplace = traced_inplace.graph_for(x)
self.assertTrue(
any(
n.kind() == "aten::" + eltwise_fn_name
for n in trace_graph_inplace.nodes()
)
)
y_inplace = m_inplace(x)
traced_y_inplace = traced_inplace(x)
self.assertEqual(y_inplace, traced_y_inplace)
traced_outplace = torch.jit.trace(m_outplace, x)
trace_graph_outplace = traced_outplace.graph_for(x)
self.assertTrue(
any(
n.kind() == "aten::" + eltwise
for n in trace_graph_outplace.nodes()
)
)
y_outplace = m_outplace(x)
traced_y_outplace = traced_outplace(x)
self.assertEqual(y_outplace, traced_y_outplace)
@skipIfNoTorchVision
def test_conv_torchvision_bn_folding(self):
from torchvision.ops import misc as misc_nn_ops
class M(nn.Module):
def __init__(self):
super(M, self).__init__()
norm_layer = misc_nn_ops.FrozenBatchNorm2d
self.inplanes = 64
self.dilation = 1
self.groups = 1
self.base_width = 64
self.conv1 = torch.nn.Conv2d(
3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False
)
self.bn1 = norm_layer(self.inplanes)
self.relu = torch.nn.ReLU(inplace=True)
self.maxpool = torch.nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
return x
model = M().eval()
self._test_output(
model,
torch.randn(1, 3, 1200, 1200),
kind_in_graph="ipex_prepack::convolution_relu_run",
kind_not_in_graph="aten::add",
)
self._test_output(
model,
torch.randn(1, 3, 1200, 1200),
kind_in_graph="ipex_prepack::convolution_relu_run",
kind_not_in_graph="aten::mul",
)
self._test_output_bf16(
model,
torch.randn(1, 3, 1200, 1200),
kind_in_graph="ipex_prepack::convolution_relu_run",
kind_not_in_graph="aten::add",
prec=0.1,
)
self._test_output_bf16(
model,
torch.randn(1, 3, 1200, 1200),
kind_in_graph="ipex_prepack::convolution_relu_run",
kind_not_in_graph="aten::mul",
prec=0.1,
)
def test_TEfusion_with_dynamic_input(self):
model = AddMulDiv().eval()
with torch.no_grad():
traced_model = torch.jit.trace(model, torch.randn(11, 3, 20, 20)).eval()
traced_model = torch.jit.freeze(traced_model)
for i in range(5):
input = torch.randn(i, 3, 20, 20)
tresult = traced_model(input)
result = model(input)
self.assertEqual(tresult, result)
def test_disable_linear_repack(self):
base = LinearRelu(10, 10).eval()
input = torch.rand(10, 10).bfloat16()
ipex._C.disable_jit_linear_repack()
model = copy.deepcopy(base)
model = ipex.optimize(model, dtype=torch.bfloat16)
weight_ptr = model.linear.weight.data_ptr()
trace_model = torch.jit.trace(model, input)
trace_model = torch.jit.freeze(trace_model)
trace_model(input)
trace_graph = trace_model.graph_for(input)
for n in trace_graph.nodes():
if type(n.output().toIValue()) == torch.ScriptObject:
# find ctx node
jit_weight_ptr = n.output().toIValue().get_weight().data_ptr()
# weight buffer should not be changed while not re-packing during jit optimization
self.assertEqual(weight_ptr, jit_weight_ptr)
break
ipex._C.enable_jit_linear_repack()
model = copy.deepcopy(base)
model = ipex.optimize(model, dtype=torch.bfloat16)
weight_ptr = model.linear.weight.data_ptr()
trace_model = torch.jit.trace(model, input)
trace_model = torch.jit.freeze(trace_model)
trace_model(input)
trace_graph = trace_model.graph_for(input)
for n in trace_graph.nodes():
if type(n.output().toIValue()) == torch.ScriptObject:
# find ctx node
jit_weight_ptr = n.output().toIValue().get_weight().data_ptr()
# weight buffer should be changed while not re-packing during jit optimization
self.assertNotEqual(weight_ptr, jit_weight_ptr)
break
def test_linear_fusion_without_repack(self):
import contextlib
def disable_repack():
@contextlib.contextmanager
def ctx():
ipex._C.disable_jit_linear_repack()
try:
yield
finally:
ipex._C.enable_jit_linear_repack()
return ctx()
with contextlib.ExitStack() as stack:
stack.enter_context(disable_repack())
self._test_linear_unary_fusion(
unary_PyTorch_op_to_IPEX_op_map, cls=LinearEltwise
)
self._test_linear_unary_fusion(
PyTorch_op_to_IPEX_op_fixed_seed_map,
1654065112450588160,
cls=LinearEltwise,
)
self._test_linear_unary_fusion(
non_unary_PyTorch_op_to_IPEX_op_map, cls=LinearEltwise
)
self.test_linear_fusion_unsupported_case()
self.test_output_linear_swish()
self.test_output_linear_reshape_relu()
self.test_output_linear_add_relu()
self.test_output_linear_add()
def test_replace_PythonGELU_with_AtenGELU(self):
for i in range(5):
model_v1 = Python_GELU_Tanh_v1().eval()
input = torch.randn((1 + i) * 16, 16, 1024)
self._test_output(
model_v1,
input,
kind_in_graph="aten::gelu",
kind_not_in_graph="aten::tanh_",
)
self._test_output_bf16(
model_v1,
input.to(torch.bfloat16),
kind_in_graph="aten::gelu",
kind_not_in_graph="aten::tanh_",
prec=0.02,
)
model_v2 = Python_GELU_Tanh_v2().eval()
input = torch.randn((1 + i) * 16, 16, 1024)
self._test_output(
model_v2,
input,
kind_in_graph="aten::gelu",
kind_not_in_graph="aten::tanh_",
)
self._test_output_bf16(
model_v2,
input.to(torch.bfloat16),
kind_in_graph="aten::gelu",
kind_not_in_graph="aten::tanh_",
prec=0.02,
)
def test_empty_weight_bias_inference(self):
class M(nn.Module):
def __init__(self, module):
super(M, self).__init__()
self.module = module
def forward(self, x):
x = self.module(x)
return x
modules = [
nn.Conv2d(3, 5, 3, bias=False),
nn.Linear(3, 7),
nn.ConvTranspose2d(3, 5, 3),
]
inputs = [
torch.randn(1, 3, 56, 56),
torch.randn(2, 3),
torch.randn(1, 3, 56, 56),
]
for module, data in zip(modules, inputs):
for auto_kernel_selection, train_and_eval in itertools.product(
[True, False], [True, False]
):
# Currently auto_kernel_selection only shows different behavior for nn.Linear
if auto_kernel_selection and not isinstance(module, nn.Linear):
continue
model = M(module)
if train_and_eval:
model.train()
origin_optimizer1 = SGD(model.parameters(), lr=0.01, momentum=0.9)
model, _ = ipex.optimize(
model,
optimizer=origin_optimizer1,
auto_kernel_selection=auto_kernel_selection,
)
model.eval()
optimized = ipex.optimize(
model, auto_kernel_selection=auto_kernel_selection
)
with torch.no_grad():
traced_model = torch.jit.trace(optimized, data)
traced_model = torch.jit.freeze(traced_model)
traced_model(data)
graph = traced_model.graph
FileCheck().check_not("self.module.weight").check_not(
"self.module.bias"
).check("_ipex_module_empty").run(graph)
y_ref = model(data)
y_traced = traced_model(data)
self.assertEqual(y_ref, y_traced)
if __name__ == "__main__":
torch.manual_seed(2020)
test = unittest.main()
| 201,624 | 36.002202 | 123 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/tests/cpu/test_ao_jit_llga_quantization_fuser.py | # This Python file uses the following encoding: utf-8
# !/usr/bin/env python
import unittest
import itertools
import torch
import torch.nn as nn
import torch.nn.functional as F
from test_ao_jit_llga_utils import (
JitLlgaTestCase,
LLGA_FUSION_GROUP,
get_eltwise_fn,
)
from torch.quantization.quantize_fx import prepare_fx, convert_fx
from torch.ao.quantization.quantize_fx import convert_to_reference_fx, prepare_qat_fx
from torch.ao.quantization import (
MinMaxObserver,
PerChannelMinMaxObserver,
HistogramObserver,
QConfig,
)
default_weight_observer = PerChannelMinMaxObserver.with_args(
dtype=torch.qint8, qscheme=torch.per_channel_symmetric
)
static_qconfig = [
QConfig(
activation=MinMaxObserver.with_args(
qscheme=torch.per_tensor_affine, dtype=torch.quint8
),
weight=default_weight_observer,
),
QConfig(
activation=MinMaxObserver.with_args(
qscheme=torch.per_tensor_symmetric, dtype=torch.qint8
),
weight=default_weight_observer,
),
QConfig(
activation=HistogramObserver.with_args(
qscheme=torch.per_tensor_affine, dtype=torch.quint8, reduce_range=True
),
weight=default_weight_observer,
),
QConfig(
activation=HistogramObserver.with_args(
qscheme=torch.per_tensor_symmetric, dtype=torch.qint8, reduce_range=True
),
weight=default_weight_observer,
),
]
try:
import torchvision
HAS_TORCHVISION = True
except ImportError:
HAS_TORCHVISION = False
except RuntimeError:
HAS_TORCHVISION = False
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
class TestOp(JitLlgaTestCase):
def test_conv_int8_in_f32_out(self):
for [
spatial,
in_channels,
out_channels,
kernel,
padding,
stride,
dilation,
g,
bias,
memory_format,
module,
] in itertools.product(
[7],
[8],
[7],
[3],
[0, 2],
[1, 2],
[1, 2],
[1, 2],
[True, False],
[torch.contiguous_format, torch.channels_last],
[torch.nn.Conv2d, torch.nn.Conv3d],
):
m = module(
in_channels=in_channels * g,
out_channels=out_channels * g,
kernel_size=kernel,
padding=padding,
stride=stride,
dilation=dilation,
groups=g,
bias=bias,
)
input_shape = [1, in_channels * g, spatial, spatial]
if isinstance(m, torch.nn.Conv3d):
input_shape.append(spatial)
if memory_format == torch.channels_last:
memory_format = torch.channels_last_3d
x = torch.rand(input_shape).to(memory_format=memory_format)
patterns = [["aten::dequantize", "aten::_convolution"]]
# TODO: enable more config case.
for qconfig in static_qconfig:
input_shape[0] = 5
x_var = [torch.rand(input_shape, requires_grad=False)]
graph = self.checkQuantizeTrace(
m, [x], x_var=x_var, atol=2e-1, qconfig=qconfig
)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
self.assertFused(graph, ["aten::_convolution", "aten::dequantize"])
self.checkPatterns(graph, patterns)
def test_deconv_int8_in_f32_out(self):
class M(nn.Module):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
padding,
stride,
dilation,
groups,
bias,
module,
):
super(M, self).__init__()
self.conv = module(
in_channels=in_channels * groups,
out_channels=out_channels * groups,
kernel_size=kernel_size,
padding=padding,
stride=stride,
dilation=dilation,
groups=groups,
bias=bias,
)
inverse_module = (
torch.nn.ConvTranspose2d
if (module == torch.nn.Conv2d)
else torch.nn.ConvTranspose3d
)
self.deconv = inverse_module(
in_channels=out_channels * groups,
out_channels=in_channels * groups,
kernel_size=kernel_size,
padding=padding,
stride=stride,
dilation=dilation,
groups=groups,
bias=bias,
)
def forward(self, x):
y = self.conv(x)
return self.deconv(y)
for [
spatial,
in_channels,
out_channels,
kernel,
padding,
stride,
dilation,
g,
bias,
memory_format,
module,
] in itertools.product(
[7],
[8],
[7],
[3],
[0, 2],
[1, 2],
[1, 2],
[1, 2],
[True, False],
[torch.contiguous_format, torch.channels_last],
[torch.nn.Conv2d, torch.nn.Conv3d],
):
m = M(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel,
padding=padding,
stride=stride,
dilation=dilation,
groups=g,
bias=bias,
module=module,
)
input_shape = [1, in_channels * g, spatial, spatial]
if module == torch.nn.Conv3d:
input_shape.append(spatial)
if memory_format == torch.channels_last:
memory_format = torch.channels_last_3d
x = torch.rand(input_shape).to(memory_format=memory_format)
patterns = [
["aten::dequantize", "aten::_convolution"],
["aten::dequantize", "aten::_convolution"],
]
# TODO: enable more config case.
for qconfig in static_qconfig:
input_shape[0] = 5
graph = self.checkQuantizeTrace(m, [x], atol=2e-1, qconfig=qconfig)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 2)
self.assertFused(graph, ["aten::_convolution", "aten::dequantize"])
self.checkPatterns(graph, patterns)
def test_conv_no_freeze(self):
m = nn.Conv2d(
in_channels=3,
out_channels=3,
kernel_size=3,
padding=1,
stride=1,
dilation=1,
groups=1,
bias=True,
)
x = torch.rand(1, 3, 5, 5)
graph = self.checkQuantizeTrace(
m, [x], atol=2e-1, qconfig=static_qconfig[0], freeze=False
)
patterns = [
["aten::dequantize", "aten::quantize_per_channel", "aten::_convolution"]
]
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
self.assertFused(
graph,
["aten::_convolution", "aten::quantize_per_channel", "aten::dequantize"],
)
self.checkPatterns(graph, patterns)
def test_conv_share_dequant_weight(self):
class M(nn.Module):
def __init__(self):
super(M, self).__init__()
self.conv = nn.Conv2d(32, 32, 3, padding=1, bias=True)
def forward(self, x):
# type: (List[Tensor]) -> Tensor
all_logits = []
for feature in x:
logits = self.conv(feature)
all_logits.append(logits)
return torch.cat(all_logits, dim=1)
for memory_format in [torch.contiguous_format, torch.channels_last]:
patterns = [
["aten::dequantize", "aten::_convolution"],
["aten::dequantize", "aten::_convolution"],
["aten::dequantize", "aten::_convolution"],
]
a = torch.randn(1, 32, 28, 28).to(memory_format=memory_format)
b = torch.randn(1, 32, 28, 28).to(memory_format=memory_format)
c = torch.randn(1, 32, 28, 28).to(memory_format=memory_format)
x = [a, b, c]
for qconfig in static_qconfig:
m = M()
graph = self.checkQuantizeTrace(m, [x], atol=2e-1, qconfig=qconfig)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 3)
self.assertFused(graph, ["aten::_convolution", "aten::dequantize"])
self.checkPatterns(graph, patterns)
def test_linear_int8_in_f32_out(self):
for bias in [True, False]:
x = torch.rand(32, 28)
m = torch.nn.Linear(in_features=28, out_features=64, bias=bias)
patterns = [
["aten::dequantize", "aten::linear"],
]
for qconfig in static_qconfig:
graph = self.checkQuantizeTrace(m, [x], atol=1e-1, qconfig=qconfig)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
self.assertFused(graph, ["aten::linear", "aten::dequantize"])
self.checkPatterns(graph, patterns)
def test_linear_int8_in_int8_out(self):
class M(nn.Module):
def __init__(self, bias):
super(M, self).__init__()
self.linear1 = nn.Linear(15, 20, bias=bias)
self.linear2 = nn.Linear(20, 3, bias=bias)
def forward(self, x, y):
x = self.linear1(x)
x = self.linear2(x)
return x
for bias in [True, False]:
x = torch.randn(2, 15)
y = torch.randn(2, 20)
m = M(bias)
patterns = [
["aten::dequantize", "aten::linear", "aten::quantize_per_tensor"],
["aten::dequantize", "aten::linear"],
]
for qconfig in static_qconfig:
graph = self.checkQuantizeTrace(m, [x, y], atol=2e-1, qconfig=qconfig)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 2)
self.assertFused(
graph,
["aten::linear", "aten::quantize_per_channel", "aten::dequantize"],
)
self.checkPatterns(graph, patterns)
def test_linear_int8_in_bf16_out(self):
class M(nn.Module):
def __init__(self, bias):
super(M, self).__init__()
self.linear1 = nn.Linear(15, 20, bias=bias)
def forward(self, x):
x = self.linear1(x)
return x
for bias in [True]: # TODO:[True, False] when supported in backend
x = torch.randn(2, 15)
patterns = [
["aten::dequantize", "aten::to", "aten::linear"],
]
for qconfig in static_qconfig:
m = M(bias)
graph = self.checkQuantizeTrace(
m, [x], atol=2e-1, qconfig=qconfig, int8_bf16=True
)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
# single aten::to won't be rewritten by llga backend
self.assertFused(graph, ["aten::dequantize", "aten::linear"])
self.checkPatterns(graph, patterns)
def test_max_pool2d(self):
class M(nn.Module):
def __init__(self, **kargs):
super(M, self).__init__()
self.conv = nn.Conv2d(3, 3, 1, 1)
self.max_pool = nn.MaxPool2d(**kargs)
def forward(self, x):
x = self.conv(x)
x = self.max_pool(x)
return x
for [
spatial,
kernel,
padding,
stride,
dilation,
ceil_mode,
memory_format,
] in itertools.product(
[15], # [15, 16], TODO: check backend
[3, 5], # [3, 4, 5], TODO: check backend
[0, 1],
[1, 2], # [1, 2, 4], TODO: fix issue in pad calculation
[1, 2],
[True, False],
[torch.contiguous_format, torch.channels_last],
):
m = M(
kernel_size=kernel,
stride=stride,
padding=padding,
dilation=dilation,
ceil_mode=ceil_mode,
)
x = torch.rand(1, 3, spatial, spatial).to(memory_format=memory_format)
patterns = [
[
"aten::dequantize",
"aten::dequantize",
"aten::_convolution",
"aten::quantize_per_tensor",
],
["aten::dequantize", "aten::max_pool2d", "aten::quantize_per_tensor"],
]
for qconfig in static_qconfig:
graph = self.checkQuantizeTrace(m, [x], atol=1e-1, qconfig=qconfig)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 2)
self.assertFused(graph, ["aten::max_pool2d"])
self.checkPatterns(graph, patterns)
def test_add_scalar_input(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
def forward(self, x):
x_shape1 = x.size()[0]
x_shape2 = x.size()[1]
y1 = x_shape1 + 2
y2 = x_shape2 + 3
return y1 + y2
# input[0] to add being scalar is unsupported
x = torch.randn(3, 3)
m = M()
graph = self.checkQuantizeTrace(m, [x], atol=2e-1)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 0)
self.assertGraphContainsExactly(graph, "aten::add", 3)
def test_reshape_6D_linear(self):
class M(nn.Module):
def __init__(self):
super(M, self).__init__()
self.linear = torch.nn.Linear(
in_features=64, out_features=192, bias=True
)
def forward(self, x):
x = x.reshape(4, 8, 7, 8, 8, 64).transpose(2, 3)
x = self.linear(x)
return x
for bias in [True, False]:
x = torch.randn(4, 56, 64, 64)
m = M()
patterns = [["aten::dequantize", "aten::linear"]]
for qconfig in static_qconfig:
graph = self.checkQuantizeTrace(m, [x], atol=2e-1, qconfig=qconfig)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
self.assertFused(graph, ["aten::linear", "aten::dequantize"])
self.checkPatterns(graph, patterns)
def test_3d_bmm_int8_in_f32_out(self):
class M(nn.Module):
def __init__(self):
super(M, self).__init__()
def forward(self, x, y):
return torch.bmm(x, y)
x = torch.randn(128, 3, 4) * 0.1
y = torch.randn(128, 4, 5) * 0.1
patterns = [
["aten::dequantize", "aten::bmm"],
]
m = M()
graph = self.checkQuantizeTrace(m, [x, y], atol=2e-1)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
self.assertFused(graph, ["aten::dequantize", "aten::bmm"])
self.checkPatterns(graph, patterns)
def test_bmm_int8_in_f32_out(self):
class M(nn.Module):
def __init__(self):
super(M, self).__init__()
def forward(self, x, y):
mm_res = torch.matmul(x, y)
return mm_res
x = torch.randn(128, 16, 384, 64) * 0.1
y = torch.randn(128, 1, 64, 384) * 0.1
patterns = [
["aten::dequantize", "aten::matmul"],
]
m = M()
graph = self.checkQuantizeTrace(m, [x, y], atol=2e-1)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
self.assertFused(graph, ["aten::matmul"])
self.checkPatterns(graph, patterns)
def test_strided_bmm_int8_in_bf16_out(self):
class M(nn.Module):
def __init__(self):
super(M, self).__init__()
self.num_attention_heads = 16
self.attention_head_size = 4
def forward(self, x, y):
new_x_shape = x.size()[:-1] + (
self.num_attention_heads,
self.attention_head_size,
)
x = x.view(*new_x_shape)
z1 = x.permute(0, 2, 1, 3)
new_y_shape2 = y.size()[:-1] + (
self.num_attention_heads,
self.attention_head_size,
)
y = y.view(*new_y_shape2)
z2 = y.permute(0, 2, 1, 3)
# inputs to matmul has been permuted or transposed, thus are strided tensor
return torch.matmul(z1, z2.transpose(-1, -2))
m = M()
x = torch.randn(2, 3, 64)
y = torch.randn(2, 3, 64)
patterns = [
["aten::dequantize", "aten::to", "aten::matmul"],
]
graph = self.checkQuantizeTrace(m, [x, y], atol=2e-1, int8_bf16=True)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
self.assertFused(graph, ["aten::matmul", "aten::dequantize"])
self.checkPatterns(graph, patterns)
def test_mixed_precision_softmax(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
def forward(self, x, y, z, a):
o = torch.matmul(x, y) / 8.0
o = o + a.to(o.dtype)
o = torch.softmax(o, -1)
o = o.matmul(z)
return o
x = torch.randn(1, 16, 16, 64)
y = torch.randn(1, 16, 64, 16)
z = torch.randn(1, 16, 16, 64)
a = torch.randn(1, 1, 1, 16)
m = M()
# fp32 in int8 out softmax
graph = self.checkQuantizeTrace(m, [x, y, z, a], atol=2e-1, int8_bf16=False)
self.assertFused(
graph, ["aten::matmul", "aten::div", "aten::add", "aten::softmax"]
)
# bf16 in int8 out softmax
graph = self.checkQuantizeTrace(m, [x, y, z, a], atol=2e-1, int8_bf16=True)
self.assertFused(
graph, ["aten::matmul", "aten::div", "aten::add", "aten::softmax"]
)
class TestFusionPattern(JitLlgaTestCase):
def test_conv2d_eltwise(self):
class M(nn.Module):
def __init__(self, eltwise_fn):
super(M, self).__init__()
self.conv1 = nn.Conv2d(32, 32, 3, padding=1, bias=True)
self.conv2 = nn.Conv2d(32, 32, 3, padding=1, bias=True)
self.eltwise = eltwise_fn
def forward(self, x):
x = self.conv1(x)
x = self.eltwise(x)
x = self.conv2(x)
return x
for eltwise in [
"relu",
"leaky_relu",
"sigmoid",
"round",
"abs",
"square",
"abs",
"round",
"exp",
"hardswish",
"tanh",
"hardtanh",
"mish",
]:
for inplace in [False, True]:
for memory_format in [torch.contiguous_format, torch.channels_last]:
eltwise_fn_name = eltwise + "_" if inplace else eltwise
eltwise_fn = get_eltwise_fn(eltwise_fn_name)
m = M(eltwise_fn)
x = torch.rand(1, 32, 28, 28).to(memory_format=memory_format)
patterns = [
[
"aten::dequantize",
"aten::_convolution",
"aten::" + eltwise,
"aten::quantize_per_tensor",
], # inplace op will become outplace op on the JIT graph
["aten::dequantize", "aten::_convolution"],
]
for qconfig in static_qconfig:
graph = self.checkQuantizeTrace(
m, [x], atol=2e-1, qconfig=qconfig
)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 2)
self.assertFused(
graph,
[
"aten::_convolution",
"aten::" + eltwise,
"aten::quantize_per_channel",
"aten::dequantize",
],
)
self.checkPatterns(graph, patterns)
def test_conv2d_clamp(self):
class M(nn.Module):
def __init__(self):
super(M, self).__init__()
self.conv1 = nn.Conv2d(32, 32, 3, padding=1, bias=True)
self.conv2 = nn.Conv2d(32, 32, 3, padding=1, bias=True)
self.conv3 = nn.Conv2d(32, 32, 3, padding=1, bias=True)
self.conv4 = nn.Conv2d(32, 32, 3, padding=1, bias=True)
self.conv5 = nn.Conv2d(32, 32, 3, padding=1, bias=True)
def forward(self, x):
x = self.conv1(x)
x = torch.clamp(x, min=float("-inf"))
x = self.conv2(x)
x = torch.clamp(x, min=-5)
x = self.conv3(x)
x = torch.clamp(x, min=0, max=float("inf"))
x = self.conv4(x)
x = torch.clamp(x, min=1, max=5)
x = self.conv5(x)
x = torch.clamp(x, max=2)
return x
for inplace in [False, True]:
for memory_format in [torch.contiguous_format, torch.channels_last]:
x = torch.rand(1, 32, 28, 28).to(memory_format=memory_format)
m = M()
for qconfig in static_qconfig:
graph = self.checkQuantizeTrace(m, [x], atol=2e-1, qconfig=qconfig)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 5)
self.assertFused(
graph,
[
"aten::_convolution",
"aten::" + "clamp",
"aten::quantize_per_channel",
"aten::dequantize",
],
)
def test_conv2d_silu(self):
class M(nn.Module):
def __init__(self, inplace):
super(M, self).__init__()
self.conv1 = nn.Conv2d(32, 32, 3, padding=1, bias=True)
self.conv2 = nn.Conv2d(32, 32, 3, padding=1, bias=True)
self.eltwise = nn.SiLU(inplace=inplace)
def forward(self, x):
x = self.conv1(x)
x = self.eltwise(x)
x = self.conv2(x)
return x
for inplace in [False, True]:
for memory_format in [torch.contiguous_format, torch.channels_last]:
m = M(inplace)
x = torch.rand(1, 32, 28, 28).to(memory_format=memory_format)
graph = self.checkQuantizeTrace(m, [x], atol=2e-1)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 2)
silu_op = "aten::silu_" if inplace else "aten::silu"
# oneDNN graph does not have silu OP. The bridge will convert silu to sigmoid - mul
patterns = [
[
"aten::dequantize",
"aten::_convolution",
"aten::sigmoid",
"aten::mul",
"aten::quantize_per_tensor",
], # inplace op will become outplace op on the JIT graph
["aten::dequantize", "aten::_convolution"],
]
self.assertFused(
graph, ["aten::_convolution", silu_op, "aten::dequantize"]
)
self.checkPatterns(graph, patterns)
def test_deconv_silu(self):
class M(nn.Module):
def __init__(self, inplace):
super(M, self).__init__()
self.deconv = nn.ConvTranspose2d(3, 2, 3, stride=2)
self.eltwise = nn.SiLU(inplace=inplace)
def forward(self, x):
x = self.deconv(x)
x = self.eltwise(x)
return x
for inplace in [False, True]:
m = M(inplace)
x = torch.rand(1, 3, 28, 28)
graph = self.checkQuantizeTrace(m, [x], atol=2e-1)
patterns = [
["aten::dequantize", "aten::_convolution", "aten::sigmoid", "aten::mul"]
]
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
self.checkPatterns(graph, patterns)
def test_ensure_tensor_is_rewrapped(self):
class M(nn.Module):
def __init__(self, eltwise_fn):
super(M, self).__init__()
self.conv1 = nn.Conv2d(32, 32, 3, padding=1, bias=True)
self.conv2 = nn.Conv2d(32, 32, 3, padding=1, bias=True)
self.eltwise = eltwise_fn
self.adaptive_avg_pool_2d = nn.AdaptiveAvgPool2d((5, 7))
def forward(self, x, y):
x = self.conv1(x)
y = self.conv2(y)
y = self.eltwise(y)
x = torch.add(x, y)
x = self.adaptive_avg_pool_2d(x)
return x
eltwise_fn_name = "relu"
eltwise_fn = get_eltwise_fn(eltwise_fn_name)
m = M(eltwise_fn)
x = torch.rand(1, 32, 28, 28).to(memory_format=torch.channels_last)
y = torch.rand(1, 32, 28, 28).to(memory_format=torch.channels_last)
for qconfig in static_qconfig:
# The output of the fourth partition is input to adaptive_avg_pool2d, which is
# unsupported by LLGA. In resnext101 32x16d, we had encountered an accuracy issue.
# The UT checks that the input to adaptive_avg_pool_2d has not been wrapped by
# LlgaTensorImpl (assertEqual would fail in that case).
graph = self.checkQuantizeTrace(m, [x, y], atol=2e-1, qconfig=qconfig)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 2)
def test_conv2d_bn(self):
class M(nn.Module):
def __init__(self, bias):
super(M, self).__init__()
self.conv1 = nn.Conv2d(32, 5, 3, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(5)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
return x
for bias in [False, True]:
for memory_format in [torch.contiguous_format, torch.channels_last]:
m = M(bias).eval()
x = torch.rand(1, 32, 16, 16).to(memory_format=memory_format)
# TODO: This shape will fail
# x = torch.rand(1, 32, 28, 28)
patterns = [["aten::dequantize", "aten::_convolution"]]
# TODO: add torch.per_tensor_symmetric case.
for qconfig in static_qconfig:
graph = self.checkQuantizeTrace(m, [x], atol=1e-1, qconfig=qconfig)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
self.assertFused(
graph,
[
"aten::_convolution",
"aten::quantize_per_channel",
"aten::dequantize",
],
)
self.checkPatterns(graph, patterns)
def test_conv2d_bn_relu(self):
class M(nn.Module):
def __init__(self):
super(M, self).__init__()
self.conv1 = nn.Conv2d(32, 32, 3, padding=1, bias=True)
self.bn1 = nn.BatchNorm2d(32)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = F.relu(x)
return x
for memory_format in [torch.contiguous_format, torch.channels_last]:
m = M().eval()
x = torch.rand(1, 32, 28, 28).to(memory_format=memory_format)
patterns = [
["aten::dequantize", "aten::_convolution", "aten::relu"],
]
for qconfig in static_qconfig:
graph = self.checkQuantizeTrace(m, [x], atol=1e-1, qconfig=qconfig)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
self.assertFused(
graph,
["aten::_convolution", "aten::relu", "aten::quantize_per_channel"],
)
self.checkPatterns(graph, patterns)
def test_linear_bn(self):
class M(nn.Module):
def __init__(self, dim):
super(M, self).__init__()
self.linear = nn.Linear(32, 32)
if dim == 1:
self.input1 = torch.randn(1, 32)
self.bn = nn.BatchNorm1d(32)
elif dim == 2:
self.input1 = torch.randn(1, 32, 32, 32)
self.bn = nn.BatchNorm2d(32)
elif dim == 3:
self.input1 = torch.randn(1, 32, 32, 32, 32)
self.bn = nn.BatchNorm3d(32)
def forward(self, x):
x = self.linear(x)
x = self.bn(x)
return x
for dim in [1, 2, 3]:
m = M(dim=dim)
x = m.input1
patterns = [["aten::dequantize", "aten::linear"]]
for qconfig in static_qconfig:
graph = self.checkQuantizeTrace(m, [x], atol=2e-1, qconfig=qconfig)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
self.assertFused(graph, ["ipex::batch_norm"])
self.checkPatterns(graph, patterns)
def test_conv_bn_linear_bn(self):
class M(nn.Module):
def __init__(
self,
):
super(M, self).__init__()
self.input1 = torch.randn(1, 32, 32, 32)
self.conv = nn.Conv2d(32, 32, 1)
self.bn1 = nn.BatchNorm2d(32)
self.linear = nn.Linear(32, 32)
self.bn2 = nn.BatchNorm2d(32)
def forward(self, x):
x = self.conv(x)
x = self.bn1(x)
x = self.linear(x)
x = self.bn2(x)
return x
m = M()
x = m.input1
patterns = [
["aten::dequantize", "aten::_convolution"],
["aten::dequantize", "aten::linear"],
]
for qconfig in static_qconfig:
graph = self.checkQuantizeTrace(m, [x], atol=2e-1, qconfig=qconfig)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 2)
self.assertFused(graph, ["ipex::batch_norm"])
self.checkPatterns(graph, patterns)
def test_linear_eltwise(self):
class M(nn.Module):
def __init__(self, eltwise_fn, bias):
super(M, self).__init__()
self.linear = nn.Linear(28, 64, bias)
self.eltwise = eltwise_fn
def forward(self, x):
x = self.linear(x)
x = self.eltwise(x)
return x
# TODO: use itertools.product once all combinations is supported
for [has_bias, eltwise] in [
[True, "relu"],
[False, "relu"],
# [True, 'gelu'], # TODO: enable it once linear_gelu default recipe is fixed
# [False, 'gelu'], # TODO: enable it once linear_gelu default recipe is fixed
[True, "sigmoid"],
[False, "sigmoid"],
]:
eltwise_fn = get_eltwise_fn(eltwise)
m = M(eltwise_fn, has_bias)
x = torch.rand(32, 28, requires_grad=False)
patterns = [
["aten::dequantize", "aten::linear", "aten::" + eltwise],
]
for qconfig in static_qconfig:
graph = self.checkQuantizeTrace(
m,
[x],
x_var=[torch.rand(2, 28, requires_grad=False)],
atol=1e-1,
qconfig=qconfig,
)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
self.assertFused(graph, ["aten::" + eltwise])
self.checkPatterns(graph, patterns)
def test_linear_silu(self):
class M(nn.Module):
def __init__(self, inplace):
super(M, self).__init__()
self.linear = nn.Linear(28, 64)
self.eltwise = nn.SiLU(inplace=inplace)
def forward(self, x):
x = self.linear(x)
x = self.eltwise(x)
return x
for inplace in [False, True]:
m = M(inplace)
x = torch.rand(1, 28, requires_grad=False)
silu_op = "aten::silu_" if inplace else "aten::silu"
patterns = [
["aten::dequantize", "aten::linear", "aten::sigmoid", "aten::mul"],
]
graph = self.checkQuantizeTrace(m, [x], atol=1e-1)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
self.assertFused(graph, ["aten::linear", silu_op, "aten::dequantize"])
self.checkPatterns(graph, patterns)
def test_conv_relu_sigmoid_mul(self):
# dequant
# |
# conv
# |
# relu
# / |
# quant |
# / |
# dequant |
# | |
# conv |
# | |
# relu |
# | |
# quant |
# | |
# dequant |
# | |
# conv |
# | |
# sigmoid |
# \ /
# mul
class M(nn.Module):
def __init__(self):
super(M, self).__init__()
self.conv1 = nn.Conv2d(32, 32, 3, padding=1)
self.conv2 = nn.Conv2d(32, 32, 3, padding=1)
self.conv3 = nn.Conv2d(32, 32, 3, padding=1)
def forward(self, x):
x = self.conv1(x)
# The output y of relu is used by mul
y = x.relu()
z = self.conv2(y)
z = z.relu()
z = self.conv3(z)
z = z.sigmoid()
z = z.mul(y)
return z
x = torch.rand(1, 32, 16, 16, requires_grad=False)
m = M()
graph = self.checkQuantizeTrace(m, [x], atol=1e-1)
patterns = [
["aten::dequantize", "aten::_convolution", "aten::relu"],
[
"aten::dequantize",
"aten::_convolution",
"aten::relu",
"aten::quantize_per_tensor",
],
["aten::dequantize", "aten::_convolution", "aten::sigmoid", "aten::mul"],
]
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 3)
self.assertFused(
graph, ["aten::_convolution", "aten::relu", "aten::sigmoid", "aten::mul"]
)
self.checkPatterns(graph, patterns)
def test_conv_eltwise_tensor_method(self):
class ConvSigmoid(nn.Module):
def __init__(self):
super(ConvSigmoid, self).__init__()
self.conv = nn.Conv2d(32, 32, 3, padding=1)
def forward(self, x):
x = self.conv(x)
x = x.sigmoid()
return x
class ConvReLU(nn.Module):
def __init__(self):
super(ConvReLU, self).__init__()
self.conv = nn.Conv2d(32, 32, 3, padding=1)
def forward(self, x):
x = self.conv(x)
x = x.relu()
return x
m = ConvSigmoid().eval()
x = torch.rand(1, 32, 16, 16, requires_grad=False)
patterns = [["aten::dequantize", "aten::_convolution", "aten::sigmoid"]]
graph = self.checkQuantizeTrace(m, [x], atol=1e-1)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
self.assertFused(graph, ["aten::_convolution", "aten::sigmoid"])
self.checkPatterns(graph, patterns)
m = ConvReLU().eval()
x = torch.rand(1, 32, 16, 16, requires_grad=False)
patterns = [["aten::dequantize", "aten::_convolution", "aten::relu"]]
graph = self.checkQuantizeTrace(m, [x], atol=1e-1)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
self.assertFused(graph, ["aten::_convolution", "aten::relu"])
self.checkPatterns(graph, patterns)
def test_conv2d_sum(self):
class M(nn.Module):
def __init__(self, bias=False):
super(M, self).__init__()
self.conv1 = nn.Conv2d(32, 32, 3, padding=1, bias=bias)
self.bn1 = nn.BatchNorm2d(32)
self.conv2 = nn.Conv2d(32, 32, 3, padding=1, bias=bias)
self.bn2 = nn.BatchNorm2d(32)
self.relu = nn.ReLU()
self.conv3 = nn.Conv2d(32, 32, 3, padding=1, bias=bias)
self.bn3 = nn.BatchNorm2d(32)
def forward(self, x, y):
x = self.conv1(x)
x = self.bn1(x)
y = self.conv2(y)
y = self.bn2(y)
z = self.relu(x + y)
z = self.conv3(z)
z = self.bn3(z)
return z
for bias in [True, False]:
for memory_format in [torch.contiguous_format, torch.channels_last]:
m = M(bias).eval()
x = torch.rand(1, 32, 16, 16, requires_grad=False).to(
memory_format=memory_format
)
y = torch.rand(1, 32, 16, 16, requires_grad=False).to(
memory_format=memory_format
)
patterns = [
[
"aten::dequantize",
"aten::_convolution",
"aten::quantize_per_tensor",
],
[
"aten::dequantize",
"aten::_convolution",
"aten::relu",
"aten::add",
"aten::quantize_per_tensor",
],
["aten::dequantize", "aten::_convolution"],
]
for qconfig in static_qconfig:
graph = self.checkQuantizeTrace(
m, [x, y], atol=1e-1, qconfig=qconfig
)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 3)
self.assertFused(
graph,
[
"aten::_convolution",
"aten::relu",
"aten::add",
"aten::quantize_per_channel",
"aten::dequantize",
],
)
self.checkPatterns(graph, patterns)
def test_add_quantization(self):
class M(nn.Module):
def __init__(self, bias=False):
super(M, self).__init__()
self.conv1 = nn.Conv2d(16, 16, 1)
self.conv2 = nn.Conv2d(16, 16, 1)
def forward(self, x):
x = self.conv1(x)
y = self.conv2(x)
y = y.mul(10)
z = torch.add(x, y)
return z
m = M().eval()
x = torch.rand(1, 16, 16, 16, requires_grad=False)
x2 = torch.rand(1, 16, 16, 16, requires_grad=False)
patterns = [
["aten::dequantize", "aten::_convolution"],
["aten::dequantize", "aten::_convolution"],
]
graph = self.checkQuantizeTrace(m, [x], atol=1e-1)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 2)
self.assertFused(graph, ["aten::_convolution", "aten::quantize_per_channel"])
self.checkPatterns(graph, patterns)
def test_conv2d_sigmoid_mul_(self):
class M(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, image_size):
super(M, self).__init__()
self.conv = torch.nn.Conv2d(
in_channels, out_channels, kernel_size, image_size
)
def forward(self, x):
a = self.conv(x)
b = torch.sigmoid(a)
res = a.mul_(b)
return res
for memory_format in [torch.contiguous_format, torch.channels_last]:
m = M(3, 16, 3, 224).eval()
x = torch.rand(1, 3, 224, 224, requires_grad=False).to(
memory_format=memory_format
)
patterns = [
[
"aten::dequantize",
"aten::_convolution",
"aten::sigmoid",
"aten::mul",
],
]
for qscheme in [torch.per_tensor_affine, torch.per_tensor_symmetric]:
graph = self.checkQuantizeTrace(m, [x])
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
self.assertFused(
graph,
[
"aten::_convolution",
"aten::sigmoid",
"aten::mul",
"aten::quantize_per_channel",
"aten::dequantize",
],
)
self.checkPatterns(graph, patterns)
# inplace mul_ cannot be replaced with mul
class M2(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, image_size):
super(M2, self).__init__()
self.conv = torch.nn.Conv2d(
in_channels, out_channels, kernel_size, image_size
)
def forward(self, x):
a = self.conv(x)
b = torch.sigmoid(a)
c = a[0]
res = a.mul_(b)
c += 2
return c
for memory_format in [torch.contiguous_format, torch.channels_last]:
m = M2(3, 16, 3, 224).eval()
x = torch.rand(1, 3, 224, 224, requires_grad=False).to(
memory_format=memory_format
)
patterns = [
["aten::dequantize", "aten::_convolution"],
]
for qscheme in [torch.per_tensor_affine, torch.per_tensor_symmetric]:
graph = self.checkQuantizeTrace(m, [x])
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
self.assertFused(
graph,
[
"aten::_convolution",
"aten::quantize_per_channel",
"aten::dequantize",
],
)
self.checkPatterns(graph, patterns)
def test_conv2d_hardsigmoid_mul_(self):
class M(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, image_size):
super(M, self).__init__()
self.conv = torch.nn.Conv2d(
in_channels, out_channels, kernel_size, image_size
)
self.activation = torch.nn.Hardsigmoid()
def forward(self, x):
a = self.conv(x)
b = self.activation(a)
res = a.mul_(b)
return res
for memory_format in [torch.contiguous_format, torch.channels_last]:
m = M(3, 16, 3, 224).eval()
x = torch.rand(1, 3, 224, 224, requires_grad=False).to(
memory_format=memory_format
)
patterns = [
[
"aten::dequantize",
"aten::_convolution",
"aten::hardsigmoid",
"aten::mul",
],
]
for qscheme in [torch.per_tensor_affine, torch.per_tensor_symmetric]:
graph = self.checkQuantizeTrace(m, [x])
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
self.assertFused(
graph,
[
"aten::_convolution",
"aten::hardsigmoid",
"aten::mul",
"aten::quantize_per_channel",
"aten::dequantize",
],
)
self.checkPatterns(graph, patterns)
def test_linear_dropout_sum(self):
class M(nn.Module):
def __init__(self):
super(M, self).__init__()
self.linear1 = nn.Linear(15, 20)
self.dropout = nn.Dropout()
self.linear2 = nn.Linear(20, 3)
def forward(self, x, y):
x = self.linear1(x)
x = self.dropout(x)
z = self.linear2(x + y)
return z
x = torch.randn(2, 15)
y = torch.randn(2, 20)
m = M()
patterns = [
[
"aten::dequantize",
"aten::linear",
"aten::add",
"aten::quantize_per_tensor",
],
["aten::dequantize", "aten::linear"],
]
for qconfig in static_qconfig:
graph = self.checkQuantizeTrace(m, [x, y], atol=2e-1, qconfig=qconfig)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 2)
self.assertFused(
graph,
[
"aten::linear",
"aten::add",
"aten::quantize_per_channel",
"aten::dequantize",
],
)
self.checkPatterns(graph, patterns)
def test_linear_sum_inplace(self):
class M(nn.Module):
def __init__(self):
super(M, self).__init__()
self.linear1 = nn.Linear(15, 20)
def forward(self, x, y):
x = self.linear1(x)
x += y.clone()
return x
x = torch.randn(2, 15)
y = torch.randn(2, 20)
m = M()
patterns = [
["aten::dequantize", "aten::linear", "aten::dequantize"],
]
# HistogramObserver failed, need to do some checks?
for qconfig in static_qconfig[:2]:
graph = self.checkQuantizeTrace(m, [x, y], atol=2e-1, qconfig=qconfig)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
self.assertFused(
graph,
["aten::linear", "aten::quantize_per_channel", "aten::dequantize"],
)
self.checkPatterns(graph, patterns)
def test_linear_dropout_sum_bf16(self):
class M(nn.Module):
def __init__(self):
super(M, self).__init__()
self.linear1 = nn.Linear(15, 20, bias=True)
self.dropout = nn.Dropout()
self.linear2 = nn.Linear(15, 20, bias=True)
def forward(self, x, y):
x = self.linear1(x)
x = self.dropout(x)
z = self.linear2(y) + x
return z
x = torch.randn(2, 15)
y = torch.randn(2, 15)
m = M()
patterns = [
[
"aten::dequantize",
"aten::to",
"aten::linear",
"aten::to",
"aten::quantize_per_tensor",
],
["aten::dequantize", "aten::to", "aten::linear", "aten::add"],
]
graph = self.checkQuantizeTrace(m, [x, y], atol=2e-1, int8_bf16=True)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 2)
# TODO: oneDNN primitive raised more limitations to sum post-ops, it forced fusion changes on oneDNN graph side.
# The dequant node connected to aten::add can't be fused into the INT8 linear-add partition any more.
# oneDNN graph expects no end to end model performance impact.
# Revisit this change if validation has found model level regression.
self.assertFused(graph, ["aten::linear", "aten::add"])
self.checkPatterns(graph, patterns)
def test_linear_gelu_bf16(self):
class M(nn.Module):
def __init__(self):
super(M, self).__init__()
self.linear = nn.Linear(28, 64, bias=True)
self.eltwise = nn.GELU()
self.linear2 = nn.Linear(64, 1, bias=True)
def forward(self, x):
x = self.linear(x)
x = self.eltwise(x)
x = self.linear2(x)
return x
patterns = [
[
"aten::dequantize",
"aten::to",
"aten::linear",
"aten::gelu",
"aten::to",
"aten::quantize_per_tensor",
],
["aten::dequantize", "aten::to", "aten::linear"],
]
m = M()
x = torch.rand(32, 28, requires_grad=False)
for qscheme in [torch.per_tensor_affine]:
graph = self.checkQuantizeTrace(
m,
[x],
x_var=[torch.rand(2, 28, requires_grad=False)],
atol=1e-1,
int8_bf16=True,
)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 2)
self.assertFused(graph, ["aten::dequantize", "aten::linear", "aten::gelu"])
self.checkPatterns(graph, patterns)
def test_defer_size(self):
class M(nn.Module):
def __init__(self):
super(M, self).__init__()
self.conv1 = nn.Conv2d(32, 32, 3, padding=1, bias=True)
self.conv2 = nn.Conv2d(32, 32, 3, padding=1, bias=True)
self.eltwise = nn.ReLU()
def forward(self, x):
x = self.conv1(x)
x = self.eltwise(x)
y = self.conv2(x)
y = y.reshape(x.size(0), -1)
return y
for memory_format in [torch.contiguous_format, torch.channels_last]:
m = M()
x = torch.rand(1, 32, 28, 28).to(memory_format=memory_format)
patterns = [
[
"aten::dequantize",
"aten::_convolution",
"aten::relu",
"aten::quantize_per_tensor",
],
["aten::dequantize", "aten::_convolution"],
]
for qconfig in static_qconfig:
graph = self.checkQuantizeTrace(m, [x], atol=2e-1, qconfig=qconfig)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 2)
self.assertFused(
graph,
[
"aten::_convolution",
"aten::relu",
"aten::quantize_per_channel",
"aten::dequantize",
],
)
self.checkPatterns(graph, patterns)
def test_lift_up_quant(self):
class M(nn.Module):
def __init__(self, bias):
super(M, self).__init__()
self.linear = nn.Linear(28, 64, bias)
self.linear2 = nn.Linear(28, 64, bias=True)
self.num_attention_heads = 16
self.attention_head_size = 4
def forward(self, x, y):
x = self.linear(x)
new_x_shape = x.size()[:-1] + (
self.num_attention_heads,
self.attention_head_size,
)
x = x.view(*new_x_shape)
z1 = x.permute(0, 2, 1, 3)
y = self.linear2(y)
new_y_shape2 = y.size()[:-1] + (
self.num_attention_heads,
self.attention_head_size,
)
y = y.view(*new_y_shape2)
z2 = y.permute(0, 2, 1, 3)
return torch.matmul(z1, z2.transpose(-1, -2))
m = M(bias=True)
x = torch.randn(2, 3, 28)
y = torch.randn(2, 3, 28)
patterns = [
["aten::dequantize", "aten::linear", "aten::quantize_per_tensor"],
["aten::dequantize", "aten::linear", "aten::quantize_per_tensor"],
["aten::dequantize", "aten::matmul"],
]
# TODO: test shape fallback
graph = self.checkQuantizeTrace(m, [x, y], atol=1e-1)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 3)
self.assertFused(graph, ["aten::dequantize", "aten::linear", "aten::matmul"])
self.checkPatterns(graph, patterns)
def test_lift_up_to_quant_bf16(self):
class M(nn.Module):
def __init__(self, bias):
super(M, self).__init__()
self.linear = nn.Linear(28, 64, bias)
self.linear2 = nn.Linear(28, 64, bias=True)
self.num_attention_heads = 16
self.attention_head_size = 4
def forward(self, x, y):
x = self.linear(x)
new_x_shape = x.size()[:-1] + (
self.num_attention_heads,
self.attention_head_size,
)
x = x.view(*new_x_shape)
z1 = x.permute(0, 2, 1, 3)
y = self.linear2(y)
new_y_shape2 = y.size()[:-1] + (
self.num_attention_heads,
self.attention_head_size,
)
y = y.view(*new_y_shape2)
z2 = y.permute(0, 2, 1, 3)
return torch.matmul(z1, z2.transpose(-1, -2))
m = M(bias=True)
x = torch.randn(2, 3, 28)
y = torch.randn(2, 3, 28)
patterns = [
[
"aten::dequantize",
"aten::to",
"aten::linear",
"aten::to",
"aten::quantize_per_tensor",
],
[
"aten::dequantize",
"aten::to",
"aten::linear",
"aten::to",
"aten::quantize_per_tensor",
],
["aten::dequantize", "aten::to", "aten::matmul"],
]
# TODO: test shape fallback
graph = self.checkQuantizeTrace(m, [x, y], atol=1e-1, int8_bf16=True)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 3)
self.assertFused(graph, ["aten::dequantize", "aten::linear", "aten::matmul"])
self.checkPatterns(graph, patterns)
def test_lift_up_quant_unsupported(self):
# Original graph:
# |
# view
# / (f32)\ /(f32)
# quant add
# |
# Lifting up in this case will raise:
# promoteTypes with quantized numbers is not handled in aten::add;
# |
# quant
# |
# view
# (int8)\ /(f32)
# add
class M(nn.Module):
def __init__(self):
super(M, self).__init__()
self.conv1 = nn.Conv2d(3, 8, 1)
self.conv2 = nn.Conv2d(8, 8, 1)
def forward(self, x, y):
x = self.conv1(x)
z1 = x.permute(0, 3, 1, 2)
z2 = self.conv2(z1)
z = z1 + y
output = z2 + z
return output
x = torch.randn(1, 3, 8, 8)
y = torch.randn(1, 8, 8, 8)
m = M()
patterns = [
["aten::dequantize", "aten::_convolution"],
["aten::dequantize", "aten::_convolution", "aten::add"],
]
graph = self.checkQuantizeTrace(m, [x, y], atol=2e-1)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 2)
# TODO: oneDNN primitive raised more limitations to sum post-ops, it forced fusion changes on oneDNN graph side.
# The dequant node connected to aten::add can't be fused into the INT8 conv-add partition any more.
# oneDNN graph expects no end to end model performance impact.
# Revisit this change if validation has found model level regression.
self.assertFused(graph, ["aten::_convolution"])
self.checkPatterns(graph, patterns)
def test_wildcard(self):
class M(nn.Module):
def __init__(self):
super(M, self).__init__()
self.conv1 = nn.Conv2d(32, 32, 3, padding=1, bias=True)
self.eltwise = nn.ReLU()
def forward(self, x):
x = self.conv1(x)
y = self.eltwise(x)
return [x, y]
# The pattern is as the following:
# conv
# | \
# eltwise \
# | \
# ListConstruct
#
# The output of conv is used by a wildcard op: ListConstruct.
# Thus conv-eltwise cannot be selected into the same Partition.
m = M()
x = torch.rand(1, 32, 28, 28)
patterns = [
["aten::dequantize", "aten::_convolution"],
]
graph = self.checkQuantizeTrace(m, [x], atol=2e-1)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
self.assertGraphContainsExactly(graph, "aten::relu", 1)
self.assertFused(graph, ["aten::_convolution", "aten::quantize_per_channel"])
self.checkPatterns(graph, patterns)
def test_bmm_div_scalar(self):
class M(nn.Module):
def __init__(self, div_value):
super(M, self).__init__()
self.div_value = div_value
def forward(self, x, y):
mm_res = torch.matmul(x, y)
return mm_res.div(self.div_value)
x = torch.randn(1, 16, 384, 64)
y = torch.randn(1, 1, 64, 384)
patterns = [
["aten::dequantize", "aten::matmul", "aten::div"],
]
m = M(8.0)
graph = self.checkQuantizeTrace(m, [x, y], atol=2e-1)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
self.assertFused(graph, ["aten::matmul", "aten::div"])
self.checkPatterns(graph, patterns)
def test_bmm_div_identity(self):
class M(nn.Module):
def __init__(self, div_value):
super(M, self).__init__()
self.div_value = div_value
def forward(self, x, y):
mm_res = torch.matmul(x, y)
return mm_res.div(self.div_value)
x = torch.randn(1, 16, 384, 64) * 0.1
y = torch.randn(1, 1, 64, 384) * 0.1
patterns = [
["aten::dequantize", "aten::matmul"],
]
m = M(1.0)
graph = self.checkQuantizeTrace(m, [x, y], atol=2e-1)
# divide by 1 should be removed by Constant Propagation
self.assertGraphContainsExactly(graph, "aten::div", 0, consider_subgraphs=True)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
self.assertFused(graph, ["aten::matmul"])
self.checkPatterns(graph, patterns)
def test_bmm_div_tensor(self):
class M(nn.Module):
def __init__(self):
super(M, self).__init__()
def forward(self, x, y, z):
mm_res = torch.matmul(x, y)
return mm_res.div(z)
x = torch.randn(1, 16, 384, 64) * 0.1
y = torch.randn(1, 1, 64, 384) * 0.1
z = torch.randn(
1
) # TODO: enable torch.randn(20) and torch.randn(1, 1, 20, 20) once backend supported them
patterns = [
["aten::dequantize", "aten::matmul", "aten::div"],
]
m = M()
graph = self.checkQuantizeTrace(m, [x, y, z], atol=2e-1)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
self.assertFused(graph, ["aten::matmul", "aten::div"])
self.checkPatterns(graph, patterns)
def test_bmm_div_int8_in_bf16_out(self):
class M(nn.Module):
def __init__(self):
super(M, self).__init__()
def forward(self, x, y):
mm_res = torch.matmul(x, y) / 2
return mm_res
x = torch.randn(1, 16, 384, 64) * 0.1
y = torch.randn(1, 1, 64, 384) * 0.1
patterns = [
["aten::dequantize", "aten::to", "aten::matmul", "aten::div"],
]
m = M()
graph = self.checkQuantizeTrace(m, [x, y], atol=2e-1, int8_bf16=True)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
# single aten::to won't be rewritten by llga backend
self.assertFused(graph, ["aten::dequantize", "aten::matmul", "aten::div"])
self.checkPatterns(graph, patterns)
def test_bmm_method_bf16(self):
class M(nn.Module):
def __init__(self):
super(M, self).__init__()
def forward(self, x, y):
mm_res = x.matmul(y)
return mm_res
x = torch.randn(1, 16, 384, 64) * 0.1
y = torch.randn(1, 1, 64, 384) * 0.1
patterns = [
["aten::dequantize", "aten::to", "aten::matmul"],
]
m = M()
graph = self.checkQuantizeTrace(m, [x, y], atol=2e-1, int8_bf16=True)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
# single aten::to won't be rewritten by llga backend
self.assertFused(graph, ["aten::dequantize", "aten::matmul"])
self.checkPatterns(graph, patterns)
def test_bmm_method_fp32(self):
class M(nn.Module):
def __init__(self):
super(M, self).__init__()
def forward(self, x, y):
mm_res = x.matmul(y)
return mm_res
x = torch.randn(1, 16, 384, 64) * 0.1
y = torch.randn(1, 1, 64, 384) * 0.1
patterns = [
["aten::dequantize", "aten::matmul"],
]
m = M()
graph = self.checkQuantizeTrace(m, [x, y], atol=2e-1)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
self.assertFused(graph, ["aten::dequantize", "aten::matmul"])
self.checkPatterns(graph, patterns)
def test_strided_bmm_div_int8_in_bf16_out(self):
class M(nn.Module):
def __init__(self):
super(M, self).__init__()
self.num_attention_heads = 16
self.attention_head_size = 4
def forward(self, x, y):
new_x_shape = x.size()[:-1] + (
self.num_attention_heads,
self.attention_head_size,
)
x = x.view(*new_x_shape)
z1 = x.permute(0, 2, 1, 3)
new_y_shape2 = y.size()[:-1] + (
self.num_attention_heads,
self.attention_head_size,
)
y = y.view(*new_y_shape2)
z2 = y.permute(0, 2, 1, 3)
# inputs to matmul has been permuted or transposed, thus are strided tensor
return torch.matmul(z1, z2.transpose(-1, -2)) / 0.4
m = M()
x = torch.randn(2, 3, 64)
y = torch.randn(2, 3, 64)
patterns = [
["aten::dequantize", "aten::to", "aten::matmul", "aten::div"],
]
graph = self.checkQuantizeTrace(m, [x, y], atol=2e-1, int8_bf16=True)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
self.assertFused(graph, ["aten::matmul", "aten::dequantize"])
self.checkPatterns(graph, patterns)
def test_bmm_div_add_int8_fp32(self):
class M(nn.Module):
def __init__(self):
super(M, self).__init__()
self.num_attention_heads = 16
self.attention_head_size = 4
def forward(self, x, y, z):
new_x_shape = x.size()[:-1] + (
self.num_attention_heads,
self.attention_head_size,
)
x = x.view(*new_x_shape)
z1 = x.permute(0, 2, 1, 3)
new_y_shape2 = y.size()[:-1] + (
self.num_attention_heads,
self.attention_head_size,
)
y = y.view(*new_y_shape2)
z2 = y.permute(0, 2, 1, 3)
# inputs to matmul has been permuted or transposed, thus are strided tensor
s = torch.matmul(z1, z2.transpose(-1, -2)) / 0.4
s = s + z
return s
m = M()
x = torch.randn(2, 3, 64)
y = torch.randn(2, 3, 64)
z = torch.randn(2, 1, 1, 3)
patterns = [
["aten::dequantize", "aten::matmul", "aten::div", "aten::add"],
]
graph = self.checkQuantizeTrace(m, [x, y, z], atol=2e-1)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
self.assertFused(
graph, ["aten::matmul", "aten::dequantize", "aten::div", "aten::add"]
)
self.checkPatterns(graph, patterns)
@unittest.skip("Graph Compiler unit-test")
def test_mha_pattern_int8_fp32(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.linear = nn.Linear(1024, 1024, False)
def forward(self, x, y, z, a):
x = x.permute(0, 2, 1, 3)
y = y.permute(0, 2, 1, 3)
y = y.transpose(-1, -2)
z = z.permute(0, 2, 1, 3)
tmp = torch.matmul(x, y) / 8.0 + a
tmp = torch.softmax(tmp, -1)
tmp = tmp.matmul(z)
tmp = tmp.permute(0, 2, 1, 3)
tmp = tmp.contiguous()
tmp = tmp.view(1, 16, 1024)
tmp = self.linear(tmp)
return tmp
x = torch.randn(1, 16, 16, 64)
y = torch.randn(1, 16, 16, 64)
z = torch.randn(1, 16, 16, 64)
m = M()
a = torch.randn(1, 1, 1, 16)
graph = self.checkQuantizeTrace(m, [x, y, z, a], atol=2e-1)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 2)
self.assertFused(
graph,
[
"aten::matmul",
"aten::div",
"aten:add",
"aten:softmax",
"aten::contiguous",
"aten::dequantize",
],
)
@unittest.skip("Graph Compiler unit-test")
def test_mha_pattern_int8_bf16(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.linear = nn.Linear(1024, 1024, False)
def forward(self, x, y, z, a):
x = x.permute(0, 2, 1, 3)
y = y.permute(0, 2, 1, 3)
y = y.transpose(-1, -2)
z = z.permute(0, 2, 1, 3)
tmp = torch.matmul(x, y) / 8.0 + a
tmp = torch.softmax(tmp, -1)
tmp = tmp.matmul(z)
tmp = tmp.permute(0, 2, 1, 3)
tmp = tmp.contiguous()
tmp = tmp.view(1, 16, 1024)
tmp = self.linear(tmp)
return tmp
x = torch.randn(1, 16, 16, 64)
y = torch.randn(1, 16, 16, 64)
z = torch.randn(1, 16, 16, 64)
m = M()
a = torch.randn(1, 1, 1, 16, dtype=torch.bfloat16)
graph = self.checkQuantizeTrace(
m,
[x, y, z, a],
atol=2e-1,
config_name="mha_pattern",
qscheme=torch.per_tensor_affine,
int8_bf16=True,
)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 5)
self.assertFused(
graph,
[
"aten::matmul",
"aten::div",
"aten:add",
"aten:softmax",
"aten::contiguous",
"aten::dequantize",
"aten::quantize_per_tensor",
],
)
def test_bmm_div_add_int8_bf16(self):
class M(nn.Module):
def __init__(self):
super(M, self).__init__()
self.num_attention_heads = 16
self.attention_head_size = 4
def forward(self, x, y, z):
new_x_shape = x.size()[:-1] + (
self.num_attention_heads,
self.attention_head_size,
)
x = x.view(*new_x_shape)
z1 = x.permute(0, 2, 1, 3)
new_y_shape2 = y.size()[:-1] + (
self.num_attention_heads,
self.attention_head_size,
)
y = y.view(*new_y_shape2)
z2 = y.permute(0, 2, 1, 3)
# inputs to matmul has been permuted or transposed, thus are strided tensor
s = torch.matmul(z1, z2.transpose(-1, -2)) / 0.4
s = s + z.to(s.dtype)
return s
m = M()
x = torch.randn(2, 3, 64)
y = torch.randn(2, 3, 64)
z = torch.randn(2, 1, 1, 3)
patterns = [
["aten::dequantize", "aten::to", "aten::matmul", "aten::div", "aten::add"],
]
graph = self.checkQuantizeTrace(m, [x, y, z], atol=2e-1, int8_bf16=True)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
self.assertFused(
graph, ["aten::matmul", "aten::dequantize", "aten::div", "aten::add"]
)
self.checkPatterns(graph, patterns)
def test_split_dequant_to(self):
class M(nn.Module):
def __init__(self):
super(M, self).__init__()
self.linear1 = nn.Linear(2, 1, bias=True)
self.linear2 = nn.Linear(2, 1, bias=True)
self.linear3 = nn.Linear(2, 1, bias=True)
def forward(self, x):
a = self.linear1(x)
b = self.linear2(x)
c = self.linear3(x)
return torch.cat([a, b, c])
# The below pattern:
# quant
# |
# dequant
# |
# to
# / | \
# linear linear linear
# | | |
#
# should be transformed to:
# to
# |
# quant
# / | \
# dequant dequant dequant
# | | |
# to to to
# | | |
# linear linear linear
# | | |
patterns = [
["aten::dequantize", "aten::to", "aten::linear"],
["aten::dequantize", "aten::to", "aten::linear"],
["aten::dequantize", "aten::to", "aten::linear"],
]
m = M()
x = torch.randn(2, 2)
graph = self.checkQuantizeTrace(m, [x], atol=2e-1, int8_bf16=True)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 3)
# single aten::to won't be rewritten by llga backend
self.assertFused(graph, ["aten::dequantize", "aten::linear"])
self.checkPatterns(graph, patterns)
def test_dequant_remove_attr(self):
class M(nn.Module):
def __init__(self):
super(M, self).__init__()
def forward(self, x):
x = torch.quantize_per_channel(
x, torch.tensor([0.1, 0.01]), torch.tensor([10, 0]), 0, torch.quint8
)
x = torch.dequantize(x)
return x
x = x = torch.tensor([[-1.0, 0.0], [1.0, 2.0]])
m = M()
traced = torch.jit.trace(m, x)
traced(x)
graph = traced.graph_for(x)
self.checkAttr(graph, "aten::dequantize", "qtype")
def test_fx_converted_model(self):
class M(nn.Module):
def __init__(self):
super(M, self).__init__()
self.linear = nn.Linear(15, 20)
def forward(self, x):
x = self.linear(x)
return x
x = x = torch.randn(2, 15)
m = M()
m.eval()
qconfig_dict = {"": static_qconfig[0]}
m = prepare_fx(m, qconfig_dict, x)
m = convert_fx(m)
graph = self.checkQuantizeTrace(m, [x], atol=2e-1)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 0)
def test_fx_ao_qat_converted_model(self):
class M(nn.Module):
def __init__(self):
super(M, self).__init__()
self.linear = nn.Linear(15, 20)
def forward(self, x):
x = self.linear(x)
return x
x = x = torch.randn(2, 15)
m = M()
m.eval()
qconfig_dict = {"": static_qconfig[0]}
m = prepare_qat_fx(m, qconfig_dict, x)
m = convert_to_reference_fx(m)
graph = self.checkQuantizeTrace(m, [x], atol=2e-1)
# dequant -> linear should be mapped to LLGA
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
@unittest.skipIf(True, "Poor accuracy")
@skipIfNoTorchVision
def test_fx_ao_qat_model(self):
class M(nn.Module):
def __init__(self):
super(M, self).__init__()
self.conv1 = nn.Conv2d(32, 32, 3, padding=1, bias=True)
self.conv2 = nn.Conv2d(32, 32, 3, padding=1, bias=True)
self.eltwise = torch.nn.ReLU()
def forward(self, x):
x = self.conv1(x)
x = self.eltwise(x)
x = self.conv2(x)
return x
data = torch.randn(1, 32, 224, 224).to(memory_format=torch.channels_last)
m = M()
m.eval()
#
# quantization aware training for static quantization
#
qconfig_dict = {"": torch.quantization.get_default_qat_qconfig("fbgemm")}
m.train()
model_prepared = prepare_qat_fx(m, qconfig_dict, example_inputs=data)
model_quantized = convert_to_reference_fx(model_prepared)
model_quantized = model_quantized.eval()
model = model_quantized.to(memory_format=torch.channels_last)
graph = self.checkQuantizeTrace(model, [data], atol=2e-1)
self.checkPatterns(
graph,
[
[
"aten::dequantize",
"aten::quantize_per_channel",
"aten::_convolution",
"aten::relu",
"aten::quantize_per_tensor",
],
[
"aten::dequantize",
"aten::quantize_per_channel",
"aten::_convolution",
"aten::quantize_per_tensor",
],
],
)
def test_ffn_residual(self):
class FFN_Residual(nn.Module):
def __init__(self, hidden_size, intermediate_size):
super(FFN_Residual, self).__init__()
self.linear1 = nn.Linear(hidden_size, intermediate_size)
self.linear2 = nn.Linear(intermediate_size, hidden_size)
self.LayerNorm1 = nn.LayerNorm(hidden_size)
self.LayerNorm2 = nn.LayerNorm(hidden_size)
self.intermediate_act_fn = nn.functional.gelu
def forward(self, x):
x1 = self.LayerNorm1(x)
x2 = self.linear1(x1)
x3 = self.intermediate_act_fn(x2)
x4 = self.linear2(x3)
x5 = self.LayerNorm2(x4 + x)
return x5
patterns = [
[
"aten::dequantize",
"aten::linear",
"aten::gelu",
"aten::quantize_per_tensor",
],
["aten::dequantize", "aten::linear", "aten::add"],
]
m = FFN_Residual(1024, 4096).eval()
x = torch.rand(128, 1024)
graph = self.checkQuantizeTrace(m, [x], atol=2e-1)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 2)
self.assertFused(graph, ["aten::linear", "aten::gelu"])
self.assertFused(graph, ["aten::linear", "aten::add"])
self.checkPatterns(graph, patterns)
class TestShapeFallback(JitLlgaTestCase):
@unittest.skipIf(True, "Size peephole optimization not enabled yet")
def test_view_permute(self):
class M(nn.Module):
def __init__(self):
super(M, self).__init__()
def forward(self, x):
new_x_shape = x.size()[:-1] + (3, 5)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
x = torch.randn(5, 10, 15)
m = M()
for qconfig in static_qconfig:
graph = self.checkQuantizeTrace(m, [x], qconfig=qconfig)
self.assertGraphContainsExactly(graph, "aten::size", 0)
self.assertGraphContainsExactly(graph, "prim::ListConstruct", 0)
# change the size of the input
x2 = torch.randn(6, 4, 15)
# Bailout get triggered here
y2 = m(x2)
def test_conv_reshape(self):
class M(nn.Module):
def __init__(self):
super(M, self).__init__()
self.conv1 = nn.Conv2d(4, 4, 3, padding=1, bias=True)
self.conv2 = nn.Conv2d(4, 32, 3, padding=1, bias=True)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x).reshape(x.size(0), 4, -1)
return x
for memory_format in [torch.contiguous_format, torch.channels_last]:
x = torch.randn(15, 4, 28, 28).to(memory_format=memory_format)
# change the size of the input, check the fallback
x_var = torch.randn(7, 4, 16, 16).to(memory_format=memory_format)
m = M()
for qconfig in static_qconfig:
graph = self.checkQuantizeTrace(
m, [x], x_var=[x_var], atol=2e-1, qconfig=qconfig
)
# TODO: enable this check when size peephole optimization is enabled
# self.assertGraphContainsExactly(graph, "aten::size", 0)
def test_add_recipe(self):
class ConvAddRelu(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, image_size):
super(ConvAddRelu, self).__init__()
self.conv = torch.nn.Conv2d(
in_channels, out_channels, kernel_size, image_size
)
def forward(self, x1, x2):
return torch.relu(torch.add(self.conv(x1), x2))
class ConvAdd(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, image_size):
super(ConvAdd, self).__init__()
self.conv = torch.nn.Conv2d(
in_channels, out_channels, kernel_size, image_size
)
def forward(self, x1, x2):
return torch.add(self.conv(x1), x2)
for memory_format in [torch.contiguous_format, torch.channels_last]:
conv_add_relu = ConvAddRelu(3, 16, 3, 2)
conv_add = ConvAdd(3, 16, 3, 2)
x1 = torch.rand(1, 3, 224, 224, requires_grad=False).to(
memory_format=memory_format
)
x2 = torch.rand(1, 16, 111, 111, requires_grad=False).to(
memory_format=memory_format
)
input = [x1, x2]
graph1 = self.checkQuantizeTrace(conv_add_relu, input, atol=1e-2)
self.assertGraphContainsExactly(graph1, "aten::quantize_per_tensor", 2)
graph2 = self.checkQuantizeTrace(conv_add, input, atol=1e-2)
self.assertGraphContainsExactly(graph2, "aten::quantize_per_tensor", 1)
class TestModel(JitLlgaTestCase):
@skipIfNoTorchVision
def _test_vision(self, model_name):
for memory_format in [torch.contiguous_format, torch.channels_last]:
m = getattr(torchvision.models, model_name)().eval()
x = (torch.rand(1, 3, 224, 224) / 10).to(memory_format=memory_format)
for qconfig in static_qconfig:
graph = self.checkQuantizeTrace(m, [x], atol=2e-1, qconfig=qconfig)
# TODO: aten::adaptive_avg_pool2d also need to be fused once backend supported it
self.assertFused(
graph,
[
"aten::_convolution",
"aten::relu",
"aten::max_pool2d",
"aten::linear",
"aten::quantize_per_channel",
],
)
# large partition: 7 fusion group in total
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 7)
for model_name, enabled in [
["resnet50", True],
]:
def wrapper(mname):
@unittest.skipIf(not enabled, "Disabled")
def test(self):
return self._test_vision(mname)
return test
setattr(TestModel, "test_vision_%s" % model_name, wrapper(model_name))
if __name__ == "__main__":
run_tests()
| 83,103 | 35.385289 | 120 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/tests/cpu/test_toolkit.py | import torch
import intel_extension_for_pytorch as ipex
from common_utils import TestCase
import sklearn.metrics
import numpy as np
class ToolkitTester(TestCase):
def test_multi_thread_sklearn_metric_eval_roc_auc_score(self):
targets = np.random.randint(0, 2, size=10)
scores = torch.rand(10)
roc_auc_st = sklearn.metrics.roc_auc_score(targets, scores.numpy())
accuracy_st = sklearn.metrics.accuracy_score(
y_true=targets, y_pred=np.round(scores.numpy())
)
roc_auc_mt, _, accuracy_mt = ipex._C.roc_auc_score_all(
torch.Tensor(targets), scores
)
# For code coverage
roc_auc_mt_2, _, _ = ipex._C.roc_auc_score(torch.Tensor(targets), scores)
self.assertEqual(roc_auc_st, roc_auc_mt)
self.assertEqual(roc_auc_st, roc_auc_mt_2)
self.assertEqual(accuracy_st, accuracy_mt)
| 892 | 36.208333 | 81 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/tests/cpu/code_free_optimization.py | import argparse
import torch
import torch.nn as nn
class ConvBatchNorm(torch.nn.Module):
def __init__(
self,
):
super(ConvBatchNorm, self).__init__()
self.conv = torch.nn.Conv2d(
3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3)
)
self.bn = torch.nn.BatchNorm2d(
64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True
)
def forward(self, x):
return self.bn(self.conv(x))
class ConvBatchNormSoftmax(torch.nn.Module):
def __init__(
self,
):
super(ConvBatchNormSoftmax, self).__init__()
self.conv = torch.nn.Conv2d(
3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3)
)
self.bn = torch.nn.BatchNorm2d(
64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True
)
def forward(self, x):
return nn.Softmax(dim=-1)(self.bn(self.conv(x)))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--conv_bn", action="store_true", help="test conv_bn model", default=False
)
parser.add_argument(
"--conv_bn_with_module_created_in_forward",
action="store_true",
help="test module created in forward",
default=False,
)
args = parser.parse_args()
if args.conv_bn:
input = torch.randn(1, 3, 224, 224)
model = ConvBatchNorm().eval()
for i in range(10):
model(input)
if args.conv_bn_with_module_created_in_forward:
input = torch.randn(1, 3, 224, 224)
model = ConvBatchNormSoftmax().eval()
for i in range(10):
model(input)
| 1,700 | 27.35 | 82 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/tests/cpu/test_quantization_default_recipe.py | import itertools
import tempfile
import torch
import torch.nn as nn
from torch.testing import FileCheck
from torch.ao.quantization import (
MinMaxObserver,
PerChannelMinMaxObserver,
QConfig,
QConfigMapping,
)
import copy
import intel_extension_for_pytorch as ipex
from test_ao_jit_llga_utils import JitLlgaTestCase, LLGA_FUSION_GROUP
from torch.testing._internal.common_utils import run_tests
from torch.ao.nn.quantized.modules.utils import _quantize_weight
from intel_extension_for_pytorch.quantization import prepare, convert
class TestDefaultRecipe(JitLlgaTestCase):
def test_quantized_op_int8_int8(self):
# Test one op which only support INT8+INT8, if its
# post op is not a quantifiable op, we need to make sure
# it can also call in INT8 kernel by inset fake quant after it's output.
class M(nn.Module):
def __init__(self):
super(M, self).__init__()
self.conv = nn.Conv2d(2, 2, 1)
self.pool = nn.MaxPool2d(1, 1)
def forward(self, x):
x = self.conv(x)
x = self.pool(x)
return x
m = M()
x = torch.rand(1, 2, 14, 14)
graph = self.checkQuantizeTrace(m, [x], atol=2e-1)
patterns = [
[
"aten::dequantize",
"aten::dequantize",
"aten::_convolution",
"aten::quantize_per_tensor",
],
["aten::dequantize", "aten::max_pool2d", "aten::quantize_per_tensor"],
]
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 2)
self.checkPatterns(graph, patterns)
def test_none_gemm_op_has_quantized_op_before(self):
# For none-gemm op, if it's pre op is quantifiable op, fake quant will be inserted.
# Given the following example, the quantization flow will be like:
# q->dq->quantized_module->q->dq->flatten->q->dq.
class M(nn.Module):
def __init__(self, quantized_module):
super(M, self).__init__()
self.quantized_module = quantized_module
def forward(self, x):
x = self.quantized_module(x)
x = x.flatten(1)
return x
class conv_swish(nn.Module):
def __init__(
self,
):
super(conv_swish, self).__init__()
self.conv = torch.nn.Conv2d(2, 2, 1)
def forward(self, x):
x = self.conv(x)
y = x.sigmoid()
z = torch.mul(x, y)
return z
class conv_eltwise(nn.Module):
def __init__(
self,
):
super(conv_eltwise, self).__init__()
self.conv = torch.nn.Conv2d(2, 2, 1)
def forward(self, x):
x = self.conv(x)
x = x.relu_()
return x
# TODO: test more quantized modules(especially for fused module).
quantized_modules = [conv_swish(), conv_eltwise()]
patterns = [
[
[
"aten::dequantize",
"aten::dequantize",
"aten::_convolution",
"aten::sigmoid",
"aten::mul",
"aten::quantize_per_tensor",
]
],
[
[
"aten::dequantize",
"aten::dequantize",
"aten::_convolution",
"aten::relu",
"aten::quantize_per_tensor",
]
],
]
for quantized_modules, pattern in zip(quantized_modules, patterns):
m = M(quantized_modules).eval()
x = torch.rand(1, 2, 14, 14)
graph = self.checkQuantizeTrace(m, [x], atol=2e-1)
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 1)
self.checkPatterns(graph, pattern)
FileCheck().check("aten::dequantize").run(graph)
def test_qconfig_mapping_for_static_quantization(self):
class M(nn.Module):
def __init__(self):
super(M, self).__init__()
self.conv = nn.Conv2d(2, 2, 1)
self.pool = nn.MaxPool2d(1, 1)
def forward(self, x):
x = self.conv(x)
x = self.pool(x)
return x
m = M()
x = torch.rand(1, 2, 14, 14)
qconfig_mapping = ipex.quantization.default_static_qconfig_mapping
graph = self.checkQuantizeTrace(m, [x], atol=2e-1, qconfig=qconfig_mapping)
patterns = [
[
"aten::dequantize",
"aten::dequantize",
"aten::_convolution",
"aten::quantize_per_tensor",
],
["aten::dequantize", "aten::max_pool2d", "aten::quantize_per_tensor"],
]
self.assertGraphContainsExactly(graph, LLGA_FUSION_GROUP, 2)
self.checkPatterns(graph, patterns)
def test_qconfig_mapping_for_dynamic_quantization(self):
class M(nn.Module):
def __init__(self):
super(M, self).__init__()
self.linear = nn.Linear(2, 2)
self.relu = nn.ReLU()
def forward(self, x):
x = self.linear(x)
x = self.relu(x)
return x
m = M()
x = torch.rand(1, 2)
qconfig_mapping = ipex.quantization.default_dynamic_qconfig_mapping
prepared_model = ipex.quantization.prepare(m, qconfig_mapping, x)
converted_model = ipex.quantization.convert(prepared_model)
assert hasattr(converted_model, "linear")
assert isinstance(converted_model.linear, nn.quantized.dynamic.Linear)
def test_check_model_obsever_has_run(self):
class Block(nn.Module):
def __init__(self) -> None:
super().__init__()
self.linears = nn.ModuleList([nn.Linear(4, 4) for _ in range(2)])
def forward(self, x):
for _, l in enumerate(self.linears):
x = l(x)
return x
class Mod(nn.Module):
def __init__(self) -> None:
super().__init__()
self.blocks = nn.ModuleList([Block() for _ in range(2)])
def forward(self, x):
for _, b in enumerate(self.blocks):
x = b(x)
return x
check_model_obsever_has_run = (
ipex.quantization._utils.check_model_obsever_has_run
)
m = Mod().eval()
x = torch.rand(4, 4)
qconfig_mapping = ipex.quantization.default_static_qconfig_mapping
prepared_model = ipex.quantization.prepare(m, qconfig_mapping, x)
assert not check_model_obsever_has_run(prepared_model)
for _ in range(5):
prepared_model(torch.rand(4, 4))
assert check_model_obsever_has_run(prepared_model)
with tempfile.NamedTemporaryFile() as fp:
qconf_filename = fp.name
prepared_model.save_qconf_summary(qconf_filename)
# Observers are removed after save_qconf_summary
assert not check_model_obsever_has_run(prepared_model)
prepared_model.load_qconf_summary(qconf_filename)
# Observers are added but not run yet after load_qconf_summary
assert not check_model_obsever_has_run(prepared_model)
for _ in range(5):
prepared_model(torch.rand(4, 4))
assert check_model_obsever_has_run(prepared_model)
def test_smooth_quant(self):
N, IC, OC = 4, 4, 4
x_data = [(i + 1) ** 3 for i in range(N)]
x = torch.Tensor(x_data).repeat(N, 1)
w_data = [(i + 1) for i in range(N)]
w = torch.Tensor(w_data).repeat(OC, 1)
class Mod(nn.Module):
def __init__(self) -> None:
super().__init__()
self.dense = nn.Linear(IC, OC)
self.dense.weight = nn.Parameter(w)
self.relu = nn.ReLU()
def forward(self, x):
x = self.dense(x)
x = self.relu(x)
return x
for bf16_mixed in [False, True]:
with torch.no_grad(), torch.autocast(
device_type="cpu", enabled=bf16_mixed, dtype=torch.bfloat16
):
m = Mod().eval()
alpha = 0.5
qconfig_mapping = ipex.quantization.get_smooth_quant_qconfig_mapping(
alpha=alpha
)
prepared_model = ipex.quantization.prepare(
copy.deepcopy(m), qconfig_mapping, example_inputs=x, inplace=False
)
prepared_model(x)
converted_model = ipex.quantization.convert(prepared_model)
traced_model = torch.jit.trace(converted_model, x)
traced_model = torch.jit.freeze(traced_model)
# Check graph
# Do not run traced_model to fuse by LLGA because `mul`
# may be fused to LLGA fusion group and cannot be found by the following code
graph = traced_model.graph_for(x)
found_mul = False
for node in graph.nodes():
if node.kind() == "aten::mul":
found_mul = True
assert (
found_mul
), "Failed to find the inserted `mul` before Linear for SmoothQuant"
traced_model(x)
result_sq = traced_model(x)
# Check correctness with reference quantized model
# Calculate and apply scaling factors manually to model and use default static quant
x_max_per_ic = torch.max(x, 0)[0]
w_max_per_ic = torch.max(w, 0)[0]
act_scaling_factors = torch.pow(w_max_per_ic, 1 - alpha) / torch.pow(
x_max_per_ic, alpha
)
wei_scaling_factors = torch.pow(x_max_per_ic, alpha) / torch.pow(
w_max_per_ic, 1 - alpha
)
new_x = torch.mul(x, act_scaling_factors)
new_w = torch.mul(w, wei_scaling_factors)
m2 = copy.deepcopy(m)
m2.dense.weight = nn.Parameter(new_w)
# SmoothQuant uses MinMaxObserver for activation not histogram observer
w_observer = PerChannelMinMaxObserver.with_args(
dtype=torch.qint8, qscheme=torch.per_channel_symmetric
)
static_qconfig = QConfig(
activation=MinMaxObserver.with_args(reduce_range=False),
weight=w_observer,
)
qconfig_mapping = QConfigMapping().set_global(static_qconfig)
prepared_model2 = ipex.quantization.prepare(
m2, qconfig_mapping, example_inputs=new_x, inplace=False
)
prepared_model2(new_x)
converted_model2 = ipex.quantization.convert(prepared_model2)
traced_model2 = torch.jit.trace(converted_model2, new_x)
traced_model2 = torch.jit.freeze(traced_model2)
traced_model2(new_x)
traced_model2(new_x)
result_ref = traced_model2(new_x)
assert torch.allclose(result_sq, result_ref)
def test_smooth_quant_save_load_qconf_summary(self):
class Mod(nn.Module):
def __init__(self):
super().__init__()
self.dense = nn.Linear(4, 4)
self.relu = nn.ReLU()
def forward(self, x):
return self.relu(self.dense(x))
m = Mod().eval()
x = torch.rand(1, 4)
calib_dataset = [torch.rand(1, 4) for _ in range(5)]
per_channel_observer = (
torch.ao.quantization.MovingAveragePerChannelMinMaxObserver
)
custom_config = {
"alpha": 0.75,
"act_observer": torch.ao.quantization.MinMaxObserver(),
"act_ic_observer": per_channel_observer(ch_axis=-1),
"wei_observer": per_channel_observer(
dtype=torch.qint8, qscheme=torch.per_channel_symmetric
),
"wei_ic_observer": per_channel_observer(ch_axis=1),
}
for use_custom_config in [False, True]:
kwargs = custom_config if use_custom_config else {}
qconfig_mapping = ipex.quantization.get_smooth_quant_qconfig_mapping(
**kwargs
)
prepared_model = ipex.quantization.prepare(
m, qconfig_mapping, example_inputs=x, inplace=False
)
# Save observer info for comparison
if use_custom_config:
observer_info = {
**prepared_model._fqn_to_auto_quant_state_map[
" "
].tensor_id_to_observer,
**prepared_model._fqn_to_auto_quant_state_map[
" "
].weight_tensor_id_to_observer,
}
observer_info_dict = {}
for key, obs in observer_info.items():
observer_info_dict[key] = {
"smooth_quant_enabled": obs.smooth_quant_enabled,
"alpha": obs.alpha,
"ic_obs": type(obs.ic_obs),
"act_obs": type(obs.act_obs),
}
for data in calib_dataset:
prepared_model(data)
with tempfile.NamedTemporaryFile() as fp:
qconf_filename = fp.name
prepared_model.save_qconf_summary(qconf_summary=qconf_filename)
q_model = ipex.quantization.convert(prepared_model)
with torch.no_grad():
q_model = torch.jit.trace(q_model, x)
q_model = torch.jit.freeze(q_model)
out_ref = q_model(x)
prepared_model_2 = ipex.quantization.prepare(
m, qconfig_mapping, example_inputs=x, inplace=False
)
prepared_model_2.load_qconf_summary(qconf_summary=qconf_filename)
# Save observer info for comparison
if use_custom_config:
observer_info_2 = {
**prepared_model_2._fqn_to_auto_quant_state_map[
" "
].tensor_id_to_observer,
**prepared_model_2._fqn_to_auto_quant_state_map[
" "
].weight_tensor_id_to_observer,
}
observer_info_dict_2 = {}
for key, obs in observer_info_2.items():
observer_info_dict_2[key] = {
"smooth_quant_enabled": obs.smooth_quant_enabled,
"alpha": obs.alpha,
"ic_obs": type(obs.ic_obs),
"act_obs": type(obs.act_obs),
}
q_model_2 = ipex.quantization.convert(prepared_model_2)
with torch.no_grad():
q_model_2 = torch.jit.trace(q_model_2, x)
q_model_2 = torch.jit.freeze(q_model_2)
out_2 = q_model_2(x)
assert torch.allclose(out_ref, out_2)
# Check observers
if use_custom_config:
assert (
observer_info_dict == observer_info_dict_2
), "Error: SmoothQuant observer info lost after saving/loading qconf JSON"
def test_none_example_input_for_quantization(self):
class M(nn.Module):
def __init__(self):
super(M, self).__init__()
self.linear = nn.Linear(2, 2)
self.relu = nn.ReLU()
def forward(self, x):
x = self.linear(x)
x = self.relu(x)
return x
m = M()
# Dynamic quant
qconfig_mapping = ipex.quantization.default_dynamic_qconfig_mapping
prepared_model = ipex.quantization.prepare(m, qconfig_mapping)
converted_model = ipex.quantization.convert(prepared_model)
assert hasattr(converted_model, "linear")
assert isinstance(converted_model.linear, nn.quantized.dynamic.Linear)
# Static quant
qconfig_mapping = ipex.quantization.default_static_qconfig_mapping
with self.assertRaises(AssertionError):
prepared_model = ipex.quantization.prepare(m, qconfig_mapping)
def test_weight_only_quantization(self):
class M(nn.Module):
def __init__(self, input_channel, output_channel, has_bias):
super(M, self).__init__()
self.linear = torch.nn.Linear(input_channel, output_channel, has_bias)
def forward(self, x):
return self.linear(x)
def test(feature, has_bias):
model = M(feature[1], feature[2], has_bias)
m = model.eval()
data = torch.rand(feature[0], feature[1])
weight = model.linear.weight
weight_observer = (
ipex.quantization.get_weight_only_quant_qconfig_mapping().global_qconfig.weight()
)
weight_observer(weight)
weight_int8 = _quantize_weight(weight, weight_observer)
weight_fp32 = weight_int8.dequantize()
if has_bias:
bias = model.linear.bias
output1 = torch.matmul(data, weight_fp32.T) + bias
else:
output1 = torch.matmul(data, weight_fp32.T)
qconfig = ipex.quantization.get_weight_only_quant_qconfig_mapping()
prepared_model = prepare(m, qconfig, example_inputs=data, inplace=False)
with torch.no_grad():
woq_model = convert(prepared_model)
woq_linear_class = (
ipex.nn.modules.weight_only_quantization.IpexWoqLinear
)
assert isinstance(woq_model.linear, woq_linear_class)
output2 = woq_model(data)
torch.testing.assert_close(output1, output2, rtol=1e-04, atol=1e-05)
case_list = [
[3, 31, 31],
[4, 4096, 4096],
[9, 4095, 4095],
[196, 4095, 16383],
]
for case in case_list:
test(case, True)
test(case, False)
def test_weight_only_quantization_autocast(self):
class M(nn.Module):
def __init__(self, use_bias):
super(M, self).__init__()
self.linear = torch.nn.Linear(4, 4, use_bias)
def forward(self, x):
return self.linear(x)
with torch.autocast(device_type="cpu", enabled=True, dtype=torch.bfloat16):
use_bias_list = [True, False]
w_dtype_list = [torch.qint8, torch.quint4x2]
cases = itertools.product(use_bias_list, w_dtype_list)
for use_bias, w_dtype in cases:
m = M(use_bias).eval()
x = torch.rand(4, 4)
qconfig = ipex.quantization.get_weight_only_quant_qconfig_mapping(
weight_dtype=w_dtype
)
prepared_model = ipex.quantization.prepare(
m, qconfig, example_inputs=x, inplace=False
)
woq_model = ipex.quantization.convert(prepared_model)
woq_model(x)
woq_linear_class = (
ipex.nn.modules.weight_only_quantization.IpexWoqLinear
)
assert isinstance(woq_model.linear, woq_linear_class)
def test_weight_only_quantization_jit_save_load(self):
class M(nn.Module):
def __init__(self, input_channel, output_channel, has_bias):
super(M, self).__init__()
self.linear = torch.nn.Linear(input_channel, output_channel, has_bias)
def forward(self, x):
return self.linear(x)
def test(feature, has_bias):
model = M(feature[1], feature[2], has_bias)
m = model.eval()
example_inputs = torch.rand(feature[0], feature[1])
qconfig = ipex.quantization.get_weight_only_quant_qconfig_mapping()
prepared_model = prepare(
m, qconfig, example_inputs=example_inputs, inplace=False
)
with torch.no_grad():
converted_model = convert(prepared_model)
with tempfile.NamedTemporaryFile() as fp:
# save
with torch.no_grad():
traced_model = torch.jit.trace(converted_model, example_inputs)
traced_model = torch.jit.freeze(traced_model)
traced_model.save(fp.name)
# load
loaded_model = torch.jit.load(fp.name)
# Compare results of original model and loaded model
output_ref = traced_model(example_inputs)
output = loaded_model(example_inputs)
torch.testing.assert_close(output_ref, output)
case_list = [
[3, 31, 31],
[4, 4096, 4096],
[9, 4095, 4095],
[196, 4095, 16383],
]
for case in case_list:
test(case, True)
test(case, False)
def test_weight_only_quantization_quint4x2_weight(self):
class M(nn.Module):
def __init__(self, use_bias):
super(M, self).__init__()
self.linear = torch.nn.Linear(4, 4, use_bias)
def forward(self, x):
return self.linear(x)
for use_bias in [True, False]:
m = M(use_bias).eval()
x = torch.rand(4, 4)
qconfig = ipex.quantization.get_weight_only_quant_qconfig_mapping(
weight_dtype=torch.quint4x2
)
prepared_model = ipex.quantization.prepare(
m, qconfig, example_inputs=x, inplace=False
)
woq_model = ipex.quantization.convert(prepared_model)
woq_model(x)
woq_linear_class = ipex.nn.modules.weight_only_quantization.IpexWoqLinear
assert isinstance(woq_model.linear, woq_linear_class)
if __name__ == "__main__":
run_tests()
| 22,852 | 38.198971 | 100 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/tests/cpu/test_layer_norm.py | import unittest
import torch
from common_utils import TestCase
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.layer_norm = torch.nn.LayerNorm(10)
def forward(self, x):
x = self.layer_norm(x)
return x
class LayerNormTester(TestCase):
def test_layer_norm(self):
# autocast inference path. layer_norm is fallthrough.
for dim in [2, 3, 4, 5, 6, 7]:
for full_bf16 in [False, True]:
model = M().eval()
if full_bf16: # support full bf16 mode for layer_norm
model = model.bfloat16()
with torch.cpu.amp.autocast(), torch.no_grad():
input_size = [
3,
]
for _ in range(dim - 1):
input_size.append(10)
x = torch.randn(input_size)
x_bf16 = x.bfloat16()
# layernorm input is bfloat16
trace_model = torch.jit.trace(model, x_bf16)
y1_bf16 = model(x_bf16)
y2_bf16 = trace_model(x_bf16)
self.assertEqual(y1_bf16.dtype, torch.bfloat16)
self.assertEqual(y2_bf16.dtype, torch.bfloat16)
self.assertEqual(y1_bf16, y2_bf16)
if not full_bf16:
# layernorm input is fp32
trace_model = torch.jit.trace(model, x)
y1_fp32 = model(x)
y2_fp32 = trace_model(x)
self.assertEqual(y1_fp32.dtype, torch.float32)
self.assertEqual(y2_fp32.dtype, torch.float32)
self.assertEqual(y1_fp32, y2_fp32)
if __name__ == "__main__":
test = unittest.main()
| 1,866 | 34.903846 | 70 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/tests/cpu/common_utils.py | """
From PyTorch:
Copyright (c) 2016- Facebook, Inc (Adam Paszke)
Copyright (c) 2014- Facebook, Inc (Soumith Chintala)
Copyright (c) 2011-2014 Idiap Research Institute (Ronan Collobert)
Copyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu)
Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu)
Copyright (c) 2011-2013 NYU (Clement Farabet)
Copyright (c) 2006-2010 NEC Laboratories America (Ronan Collobert, Leon Bottou, Iain Melvin, Jason Weston)
Copyright (c) 2006 Idiap Research Institute (Samy Bengio)
Copyright (c) 2001-2004 Idiap Research Institute (Ronan Collobert, Samy Bengio, Johnny Mariethoz)
From Caffe2:
Copyright (c) 2016-present, Facebook Inc. All rights reserved.
All contributions by Facebook:
Copyright (c) 2016 Facebook Inc.
All contributions by Google:
Copyright (c) 2015 Google Inc.
All rights reserved.
All contributions by Yangqing Jia:
Copyright (c) 2015 Yangqing Jia
All rights reserved.
All contributions from Caffe:
Copyright(c) 2013, 2014, 2015, the respective contributors
All rights reserved.
All other contributions:
Copyright(c) 2015, 2016 the respective contributors
All rights reserved.
Caffe2 uses a copyright model similar to Caffe: each contributor holds
copyright over their contributions to Caffe2. The project versioning records
all such contribution and copyright details. If a contributor wants to further
mark their specific copyright on a particular contribution, they should
indicate their copyright solely in the commit message of the change when it is
committed.
All rights reserved.
"""
r"""Importing this file must **not** initialize CUDA context. test_distributed
relies on this assumption to properly run. This means that when this is imported
no CUDA calls shall be made, including torch.cuda.device_count(), etc.
common_cuda.py can freely initialize CUDA context when imported.
"""
import sys
import os
import platform
import re
import gc
import types
import inspect
import argparse
import itertools
import unittest
import warnings
import random
import contextlib
import socket
import subprocess
import time
from collections import OrderedDict
from contextlib import contextmanager
from functools import wraps
from itertools import product
from copy import deepcopy
from numbers import Number
import tempfile
import __main__
import errno
import expecttest
import torch
import torch.cuda
from torch._utils_internal import get_writable_path
from torch import inf
import torch.backends.cudnn
import torch.backends.mkl
from torch.autograd import gradcheck
from torch.autograd.gradcheck import gradgradcheck
import intel_extension_for_pytorch as ipex
torch.backends.disable_global_flags()
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument(
"--subprocess", action="store_true", help="whether to run each test in a subprocess"
)
parser.add_argument("--seed", type=int, default=1234)
parser.add_argument("--accept", action="store_true")
args, remaining = parser.parse_known_args()
TEST_IN_SUBPROCESS = args.subprocess
SEED = args.seed
if not expecttest.ACCEPT:
expecttest.ACCEPT = args.accept
UNITTEST_ARGS = [sys.argv[0]] + remaining
torch.manual_seed(SEED)
def shell(command, cwd=None):
sys.stdout.flush()
sys.stderr.flush()
# The following cool snippet is copied from Py3 core library subprocess.call
# only the with
# 1. `except KeyboardInterrupt` block added for SIGINT handling.
# 2. In Py2, subprocess.Popen doesn't return a context manager, so we do
# `p.wait()` in a `final` block for the code to be portable.
#
# https://github.com/python/cpython/blob/71b6c1af727fbe13525fb734568057d78cea33f3/Lib/subprocess.py#L309-L323
assert not isinstance(
command, str
), "Command to shell should be a list or tuple of tokens"
p = subprocess.Popen(command, universal_newlines=True, cwd=cwd)
try:
return p.wait()
except KeyboardInterrupt:
# Give `p` a chance to handle KeyboardInterrupt. Without this,
# `pytest` can't print errors it collected so far upon KeyboardInterrupt.
exit_status = p.wait(timeout=5)
if exit_status is not None:
return exit_status
else:
p.kill()
raise
except: # noqa E722, copied from python core library
p.kill()
raise
finally:
# Always call p.wait() to ensure exit
p.wait()
# Used to run the same test with different tensor types
def repeat_test_for_types(dtypes):
def repeat_helper(f):
@wraps(f)
def call_helper(self, *args):
for dtype in dtypes:
if PY34:
with TestCase.subTest(self, dtype=dtype):
f(self, *args, dtype=dtype)
else:
f(self, *args, dtype=dtype)
return call_helper
return repeat_helper
def run_tests(argv=UNITTEST_ARGS):
if TEST_IN_SUBPROCESS:
suite = unittest.TestLoader().loadTestsFromModule(__main__)
test_cases = []
def add_to_test_cases(suite_or_case):
if isinstance(suite_or_case, unittest.TestCase):
test_cases.append(suite_or_case)
else:
for element in suite_or_case:
add_to_test_cases(element)
add_to_test_cases(suite)
failed_tests = []
for case in test_cases:
test_case_full_name = case.id().split(".", 1)[1]
exitcode = shell([sys.executable] + argv + [test_case_full_name])
if exitcode != 0:
failed_tests.append(test_case_full_name)
assert len(failed_tests) == 0, "{} unit test(s) failed:\n\t{}".format(
len(failed_tests), "\n\t".join(failed_tests)
)
else:
unittest.main(argv=argv)
PY3 = sys.version_info > (3, 0)
PY34 = sys.version_info >= (3, 4)
IS_WINDOWS = sys.platform == "win32"
IS_MACOS = sys.platform == "darwin"
IS_PPC = platform.machine() == "ppc64le"
# Environment variable `IS_PYTORCH_CI` is set in `.jenkins/common.sh`.
IS_PYTORCH_CI = bool(os.environ.get("IS_PYTORCH_CI", 0))
if IS_WINDOWS:
@contextmanager
def TemporaryFileName():
# Ideally we would like to not have to manually delete the file, but NamedTemporaryFile
# opens the file, and it cannot be opened multiple times in Windows. To support Windows,
# close the file after creation and try to remove it manually
f = tempfile.NamedTemporaryFile(delete=False)
try:
f.close()
yield f.name
finally:
os.unlink(f.name)
else:
@contextmanager # noqa: T484
def TemporaryFileName():
with tempfile.NamedTemporaryFile() as f:
yield f.name
def _check_module_exists(name):
r"""Returns if a top-level module with :attr:`name` exists *without**
importing it. This is generally safer than try-catch block around a
`import X`. It avoids third party libraries breaking assumptions of some of
our tests, e.g., setting multiprocessing start method when imported
(see librosa/#747, torchvision/#544).
"""
if not PY3: # Python 2
import imp
try:
imp.find_module(name)
return True
except ImportError:
return False
elif not PY34: # Python [3, 3.4)
import importlib
loader = importlib.find_loader(name)
return loader is not None
else: # Python >= 3.4
import importlib
import importlib.util
spec = importlib.util.find_spec(name)
return spec is not None
TEST_NUMPY = _check_module_exists("numpy")
TEST_SCIPY = _check_module_exists("scipy")
TEST_MKL = torch.backends.mkl.is_available()
TEST_NUMBA = _check_module_exists("numba")
# On Py2, importing librosa 0.6.1 triggers a TypeError (if using newest joblib)
# see librosa/librosa#729.
# TODO: allow Py2 when librosa 0.6.2 releases
TEST_LIBROSA = _check_module_exists("librosa") and PY3
# Python 2.7 doesn't have spawn
NO_MULTIPROCESSING_SPAWN = (
os.environ.get("NO_MULTIPROCESSING_SPAWN", "0") == "1" or sys.version_info[0] == 2
)
TEST_WITH_ASAN = os.getenv("PYTORCH_TEST_WITH_ASAN", "0") == "1"
TEST_WITH_TSAN = os.getenv("PYTORCH_TEST_WITH_TSAN", "0") == "1"
TEST_WITH_UBSAN = os.getenv("PYTORCH_TEST_WITH_UBSAN", "0") == "1"
TEST_WITH_ROCM = os.getenv("PYTORCH_TEST_WITH_ROCM", "0") == "1"
# Enables tests that are slow to run (disabled by default)
TEST_WITH_SLOW = os.getenv("PYTORCH_TEST_WITH_SLOW", "0") == "1"
# Disables non-slow tests (these tests enabled by default)
# This is usually used in conjunction with TEST_WITH_SLOW to
# run *only* slow tests. (I could have done an enum, but
# it felt a little awkward.
TEST_SKIP_FAST = os.getenv("PYTORCH_TEST_SKIP_FAST", "0") == "1"
if TEST_NUMPY:
import numpy
ALL_TENSORTYPES = [torch.float, torch.double, torch.half]
# bfloat16 bringup is currently only available on ROCm
# ALL_TENSORTYPES2 will eventually be unified with ALL_TENSORTYPES
# when bfloat16 bringup is complete on all platforms
if TEST_WITH_ROCM:
ALL_TENSORTYPES2 = [torch.float, torch.double, torch.half, torch.bfloat16]
else:
ALL_TENSORTYPES2 = ALL_TENSORTYPES
def skipIfRocm(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if TEST_WITH_ROCM:
raise unittest.SkipTest("test doesn't currently work on the ROCm stack")
else:
fn(*args, **kwargs)
return wrapper
def skipIfCompiledWithoutNumpy(fn):
# Even if the numpy module is present, if `USE_NUMPY=0` is used during the
# build, numpy tests will fail
numpy_support = TEST_NUMPY
if numpy_support:
try:
# The numpy module is present, verify that PyTorch is compiled with
# numpy support
torch.from_numpy(numpy.array([2, 2]))
except RuntimeError:
numpy_support = False
@wraps(fn)
def wrapper(*args, **kwargs):
if not numpy_support:
raise unittest.SkipTest("PyTorch was compiled without numpy support")
else:
fn(*args, **kwargs)
return wrapper
def _test_function(fn, device):
def run_test_function(self):
return fn(self, device)
return run_test_function
def skipIfNoLapack(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if not torch._C.has_lapack:
raise unittest.SkipTest("PyTorch compiled without Lapack")
else:
fn(*args, **kwargs)
return wrapper
def skipIfNotRegistered(op_name, message):
"""Wraps the decorator to hide the import of the `core`.
Args:
op_name: Check if this op is registered in `core._REGISTERED_OPERATORS`.
message: mesasge to fail with.
Usage:
@skipIfNotRegistered('MyOp', 'MyOp is not linked!')
This will check if 'MyOp' is in the caffe2.python.core
"""
try:
from caffe2.python import core
skipper = unittest.skipIf(op_name not in core._REGISTERED_OPERATORS, message)
except ImportError:
skipper = unittest.skip("Cannot import `caffe2.python.core`")
return skipper
def slowTest(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if not TEST_WITH_SLOW:
raise unittest.SkipTest(
"test is slow; run with PYTORCH_TEST_WITH_SLOW to enable test"
)
else:
fn(*args, **kwargs)
wrapper.__dict__["slow_test"] = True
return wrapper
def skipCUDAMemoryLeakCheckIf(condition):
def dec(fn):
if getattr(fn, "_do_cuda_memory_leak_check", True): # if current True
fn._do_cuda_memory_leak_check = not condition
return fn
return dec
def skipCUDANonDefaultStreamIf(condition):
def dec(fn):
if getattr(fn, "_do_cuda_non_default_stream", True): # if current True
fn._do_cuda_non_default_stream = not condition
return fn
return dec
def suppress_warnings(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
fn(*args, **kwargs)
return wrapper
def get_cpu_type(type_name):
module, name = type_name.rsplit(".", 1)
assert module == "torch.cuda"
return getattr(torch, name)
def get_gpu_type(type_name):
if isinstance(type_name, type):
type_name = "{}.{}".format(type_name.__module__, type_name.__name__)
module, name = type_name.rsplit(".", 1)
assert module == "torch"
return getattr(torch.cuda, name)
def to_gpu(obj, type_map=None):
if type_map is None:
type_map = {}
if isinstance(obj, torch.Tensor):
assert obj.is_leaf
t = type_map.get(obj.type(), get_gpu_type(obj.type()))
with torch.no_grad():
res = obj.clone().type(t)
res.requires_grad = obj.requires_grad
return res
elif torch.is_storage(obj):
return obj.new().resize_(obj.size()).copy_(obj)
elif isinstance(obj, list):
return [to_gpu(o, type_map) for o in obj]
elif isinstance(obj, tuple):
return tuple(to_gpu(o, type_map) for o in obj)
else:
return deepcopy(obj)
def get_function_arglist(func):
if sys.version_info > (3,):
return inspect.getfullargspec(func).args
else:
return inspect.getargspec(func).args
def set_rng_seed(seed):
torch.manual_seed(seed)
random.seed(seed)
if TEST_NUMPY:
numpy.random.seed(seed)
@contextlib.contextmanager
def freeze_rng_state():
rng_state = torch.get_rng_state()
if torch.cuda.is_available():
cuda_rng_state = torch.cuda.get_rng_state()
yield
if torch.cuda.is_available():
torch.cuda.set_rng_state(cuda_rng_state)
torch.set_rng_state(rng_state)
def iter_indices(tensor):
if tensor.dim() == 0:
return range(0)
if tensor.dim() == 1:
return range(tensor.size(0))
return product(*(range(s) for s in tensor.size()))
def is_iterable(obj):
try:
iter(obj)
return True
except TypeError:
return False
class CudaNonDefaultStream:
def __enter__(self):
# Before starting CUDA test save currently active streams on all
# CUDA devices and set new non default streams to all CUDA devices
# to ensure CUDA tests do not use default stream by mistake.
beforeDevice = torch.cuda.current_device()
self.beforeStreams = []
for d in range(torch.cuda.device_count()):
self.beforeStreams.append(torch.cuda.current_stream(d))
deviceStream = torch.cuda.Stream(device=d)
torch._C._cuda_setStream(deviceStream._cdata)
torch._C._cuda_setDevice(beforeDevice)
def __exit__(self, exec_type, exec_value, traceback):
# After completing CUDA test load previously active streams on all
# CUDA devices.
beforeDevice = torch.cuda.current_device()
for d in range(torch.cuda.device_count()):
torch._C._cuda_setStream(self.beforeStreams[d]._cdata)
torch._C._cuda_setDevice(beforeDevice)
class CudaMemoryLeakCheck:
def __init__(self, testcase, name=None):
self.name = testcase.id() if name is None else name
self.testcase = testcase
# initialize context & RNG to prevent false positive detections
# when the test is the first to initialize those
from common_cuda import initialize_cuda_context_rng
initialize_cuda_context_rng()
@staticmethod
def get_cuda_memory_usage():
# we don't need CUDA synchronize because the statistics are not tracked at
# actual freeing, but at when marking the block as free.
num_devices = torch.cuda.device_count()
gc.collect()
return tuple(torch.cuda.memory_allocated(i) for i in range(num_devices))
def __enter__(self):
self.befores = self.get_cuda_memory_usage()
def __exit__(self, exec_type, exec_value, traceback):
# Don't check for leaks if an exception was thrown
if exec_type is not None:
return
afters = self.get_cuda_memory_usage()
for i, (before, after) in enumerate(zip(self.befores, afters)):
if not TEST_WITH_ROCM:
self.testcase.assertEqual(
before,
after,
"{} leaked {} bytes CUDA memory on device {}".format(
self.name, after - before, i
),
)
else:
# TODO: Investigate ROCm memory leaking.
if before != after:
warnings.warn(
"{} leaked {} bytes ROCm memory on device {}".format(
self.name, after - before, i
),
RuntimeWarning,
)
# "min_satisfying_examples" setting has been deprecated in hypythesis
# 3.56.0 and removed in hypothesis 4.x
try:
import hypothesis
if hypothesis.version.__version_info__ >= (3, 56, 0):
hypothesis.settings.register_profile(
"pytorch_ci",
hypothesis.settings(
derandomize=True,
suppress_health_check=[hypothesis.HealthCheck.too_slow],
database=None,
max_examples=100,
verbosity=hypothesis.Verbosity.normal,
),
)
hypothesis.settings.register_profile(
"dev",
hypothesis.settings(
suppress_health_check=[hypothesis.HealthCheck.too_slow],
database=None,
max_examples=10,
verbosity=hypothesis.Verbosity.normal,
),
)
hypothesis.settings.register_profile(
"debug",
hypothesis.settings(
suppress_health_check=[hypothesis.HealthCheck.too_slow],
database=None,
max_examples=1000,
verbosity=hypothesis.Verbosity.verbose,
),
)
else:
hypothesis.settings.register_profile(
"pytorch_ci",
hypothesis.settings(
derandomize=True,
suppress_health_check=[hypothesis.HealthCheck.too_slow],
database=None,
max_examples=100,
min_satisfying_examples=1,
verbosity=hypothesis.Verbosity.normal,
),
)
hypothesis.settings.register_profile(
"dev",
hypothesis.settings(
suppress_health_check=[hypothesis.HealthCheck.too_slow],
database=None,
max_examples=10,
min_satisfying_examples=1,
verbosity=hypothesis.Verbosity.normal,
),
)
hypothesis.settings.register_profile(
"debug",
hypothesis.settings(
suppress_health_check=[hypothesis.HealthCheck.too_slow],
database=None,
max_examples=1000,
min_satisfying_examples=1,
verbosity=hypothesis.Verbosity.verbose,
),
)
hypothesis.settings.load_profile(
"pytorch_ci"
if IS_PYTORCH_CI
else os.getenv("PYTORCH_HYPOTHESIS_PROFILE", "dev")
)
except ImportError:
print("Fail to import hypothesis in common_utils, tests are not derandomized")
class TestCase(expecttest.TestCase):
precision = 1e-5
maxDiff = None
_do_cuda_memory_leak_check = False
_do_cuda_non_default_stream = False
def __init__(self, method_name="runTest"):
super(TestCase, self).__init__(method_name)
test_method = getattr(self, method_name)
# Wraps the tested method if we should do CUDA memory check.
self._do_cuda_memory_leak_check &= getattr(
test_method, "_do_cuda_memory_leak_check", True
)
# FIXME: figure out the flaky -1024 anti-leaks on windows. See #8044
if self._do_cuda_memory_leak_check and not IS_WINDOWS:
self.wrap_with_cuda_policy(method_name, self.assertLeaksNoCudaTensors)
# Wraps the tested method if we should enforce non default CUDA stream.
self._do_cuda_non_default_stream &= getattr(
test_method, "_do_cuda_non_default_stream", True
)
if self._do_cuda_non_default_stream and not IS_WINDOWS and not TEST_WITH_ROCM:
self.wrap_with_cuda_policy(method_name, self.enforceNonDefaultStream)
def assertLeaksNoCudaTensors(self, name=None):
name = self.id() if name is None else name
return CudaMemoryLeakCheck(self, name)
def enforceNonDefaultStream(self):
return CudaNonDefaultStream()
def wrap_with_cuda_policy(self, method_name, policy):
test_method = getattr(self, method_name)
# the import below may initialize CUDA context, so we do it only if
# self._do_cuda_memory_leak_check or self._do_cuda_non_default_stream
# is True.
TEST_CUDA = torch.cuda.is_available()
fullname = self.id().lower() # class_name.method_name
if TEST_CUDA and ("gpu" in fullname or "cuda" in fullname):
setattr(
self,
method_name,
self.wrap_method_with_cuda_policy(test_method, policy),
)
def wrap_method_with_cuda_policy(self, method, policy):
# Assumes that `method` is the tested function in `self`.
# NOTE: Python Exceptions (e.g., unittest.Skip) keeps objects in scope
# alive, so this cannot be done in setUp and tearDown because
# tearDown is run unconditionally no matter whether the test
# passes or not. For the same reason, we can't wrap the `method`
# call in try-finally and always do the check.
@wraps(method)
def wrapper(self, *args, **kwargs):
with policy():
method(*args, **kwargs)
return types.MethodType(wrapper, self)
def wrap_with_cuda_memory_check(self, method):
return self.wrap_method_with_cuda_policy(method, self.assertLeaksNoCudaTensors)
def setUp(self):
if TEST_SKIP_FAST:
if not getattr(self, self._testMethodName).__dict__.get("slow_test", False):
raise unittest.SkipTest(
"test is fast; we disabled it with PYTORCH_TEST_SKIP_FAST"
)
set_rng_seed(SEED)
def assertTensorsSlowEqual(self, x, y, prec=None, message=""):
max_err = 0
self.assertEqual(x.size(), y.size())
for index in iter_indices(x):
max_err = max(max_err, abs(x[index] - y[index]))
self.assertLessEqual(max_err, prec, message)
def genSparseTensor(self, size, sparse_dim, nnz, is_uncoalesced, device="cpu"):
# Assert not given impossible combination, where the sparse dims have
# empty numel, but nnz > 0 makes the indices containing values.
assert (
all(size[d] > 0 for d in range(sparse_dim)) or nnz == 0
), "invalid arguments"
v_size = [nnz] + list(size[sparse_dim:])
v = torch.randn(*v_size, device=device)
i = torch.rand(sparse_dim, nnz, device=device)
i.mul_(torch.tensor(size[:sparse_dim]).unsqueeze(1).to(i))
i = i.to(torch.long)
if is_uncoalesced:
v = torch.cat([v, torch.randn_like(v)], 0)
i = torch.cat([i, i], 1)
x = torch.sparse_coo_tensor(i, v, torch.Size(size))
if not is_uncoalesced:
x = x.coalesce()
else:
# FIXME: `x` is a sparse view of `v`. Currently rebase_history for
# sparse views is not implemented, so this workaround is
# needed for inplace operations done on `x`, e.g., copy_().
# Remove after implementing something equivalent to CopySlice
# for sparse views.
# NOTE: We do clone() after detach() here because we need to be able to change size/storage of x afterwards
x = x.detach().clone()
return x, x._indices().clone(), x._values().clone()
def safeToDense(self, t):
r = self.safeCoalesce(t)
return r.to_dense()
def safeCoalesce(self, t):
tc = t.coalesce()
self.assertEqual(tc.to_dense(), t.to_dense())
self.assertTrue(tc.is_coalesced())
# Our code below doesn't work when nnz is 0, because
# then it's a 0D tensor, not a 2D tensor.
if t._nnz() == 0:
self.assertEqual(t._indices(), tc._indices())
self.assertEqual(t._values(), tc._values())
return tc
value_map = {}
for idx, val in zip(t._indices().t(), t._values()):
idx_tup = tuple(idx.tolist())
if idx_tup in value_map:
value_map[idx_tup] += val
else:
value_map[idx_tup] = (
val.clone() if isinstance(val, torch.Tensor) else val
)
new_indices = sorted(list(value_map.keys()))
new_values = [value_map[idx] for idx in new_indices]
if t._values().ndimension() < 2:
new_values = t._values().new(new_values)
else:
new_values = torch.stack(new_values)
new_indices = t._indices().new(new_indices).t()
tg = t.new(new_indices, new_values, t.size())
self.assertEqual(tc._indices(), tg._indices())
self.assertEqual(tc._values(), tg._values())
if t.is_coalesced():
self.assertEqual(tc._indices(), t._indices())
self.assertEqual(tc._values(), t._values())
return tg
def assertEqual(self, x, y, prec=None, message="", allow_inf=False):
if isinstance(prec, str) and message == "":
message = prec
prec = None
if prec is None:
prec = self.precision
if isinstance(x, torch.Tensor) and isinstance(y, Number):
self.assertEqual(
x.item(), y, prec=prec, message=message, allow_inf=allow_inf
)
elif isinstance(y, torch.Tensor) and isinstance(x, Number):
self.assertEqual(
x, y.item(), prec=prec, message=message, allow_inf=allow_inf
)
elif isinstance(x, torch.Tensor) and isinstance(y, numpy.bool_):
self.assertEqual(
x.item(), y, prec=prec, message=message, allow_inf=allow_inf
)
elif isinstance(y, torch.Tensor) and isinstance(x, numpy.bool_):
self.assertEqual(
x, y.item(), prec=prec, message=message, allow_inf=allow_inf
)
elif isinstance(x, torch.Tensor) and isinstance(y, torch.Tensor):
def assertTensorsEqual(a, b):
super(TestCase, self).assertEqual(a.size(), b.size(), message)
if a.numel() > 0:
if a.device.type == "cpu" and (
a.dtype == torch.float16 or a.dtype == torch.bfloat16
):
# CPU half and bfloat16 tensors don't have the methods we need below
a = a.to(torch.float32)
b = b.to(a)
if (a.dtype == torch.bool) != (b.dtype == torch.bool):
raise TypeError("Was expecting both tensors to be bool type.")
else:
if a.dtype == torch.bool and b.dtype == torch.bool:
# we want to respect precision but as bool doesn't support substraction,
# boolean tensor has to be converted to int
a = a.to(torch.int)
b = b.to(torch.int)
diff = a - b
if a.is_floating_point():
# check that NaNs are in the same locations
nan_mask = torch.isnan(a)
self.assertTrue(
torch.equal(nan_mask, torch.isnan(b)), message
)
diff[nan_mask] = 0
# inf check if allow_inf=True
if allow_inf:
inf_mask = torch.isinf(a)
inf_sign = inf_mask.sign()
self.assertTrue(
torch.equal(inf_sign, torch.isinf(b).sign()),
message,
)
diff[inf_mask] = 0
# TODO: implement abs on CharTensor (int8)
if diff.is_signed() and diff.dtype != torch.int8:
diff = diff.abs()
max_err = diff.max()
self.assertLessEqual(max_err, prec, message)
super(TestCase, self).assertEqual(x.is_sparse, y.is_sparse, message)
super(TestCase, self).assertEqual(x.is_quantized, y.is_quantized, message)
if x.is_sparse:
x = self.safeCoalesce(x)
y = self.safeCoalesce(y)
assertTensorsEqual(x._indices(), y._indices())
assertTensorsEqual(x._values(), y._values())
elif x.is_quantized and y.is_quantized:
self.assertEqual(
x.qscheme(),
y.qscheme(),
prec=prec,
message=message,
allow_inf=allow_inf,
)
if x.qscheme() == torch.per_tensor_affine:
self.assertEqual(
x.q_scale(),
y.q_scale(),
prec=prec,
message=message,
allow_inf=allow_inf,
)
self.assertEqual(
x.q_zero_point(),
y.q_zero_point(),
prec=prec,
message=message,
allow_inf=allow_inf,
)
elif x.qscheme() == torch.per_channel_affine:
self.assertEqual(
x.q_per_channel_scales(),
y.q_per_channel_scales(),
prec=prec,
message=message,
allow_inf=allow_inf,
)
self.assertEqual(
x.q_per_channel_zero_points(),
y.q_per_channel_zero_points(),
prec=prec,
message=message,
allow_inf=allow_inf,
)
self.assertEqual(
x.q_per_channel_axis(),
y.q_per_channel_axis(),
prec=prec,
message=message,
)
self.assertEqual(x.dtype, y.dtype)
self.assertEqual(
x.int_repr().to(torch.int32),
y.int_repr().to(torch.int32),
prec=prec,
message=message,
allow_inf=allow_inf,
)
else:
assertTensorsEqual(x, y)
elif isinstance(x, str) and isinstance(y, str):
super(TestCase, self).assertEqual(x, y, message)
elif type(x) == set and type(y) == set:
super(TestCase, self).assertEqual(x, y, message)
elif isinstance(x, dict) and isinstance(y, dict):
if isinstance(x, OrderedDict) and isinstance(y, OrderedDict):
self.assertEqual(
x.items(),
y.items(),
prec=prec,
message=message,
allow_inf=allow_inf,
)
else:
self.assertEqual(
set(x.keys()),
set(y.keys()),
prec=prec,
message=message,
allow_inf=allow_inf,
)
key_list = list(x.keys())
self.assertEqual(
[x[k] for k in key_list],
[y[k] for k in key_list],
prec=prec,
message=message,
allow_inf=allow_inf,
)
elif is_iterable(x) and is_iterable(y):
super(TestCase, self).assertEqual(len(x), len(y), message)
for x_, y_ in zip(x, y):
self.assertEqual(
x_, y_, prec=prec, message=message, allow_inf=allow_inf
)
elif isinstance(x, bool) and isinstance(y, bool):
super(TestCase, self).assertEqual(x, y, message)
elif isinstance(x, Number) and isinstance(y, Number):
if abs(x) == inf or abs(y) == inf:
if allow_inf:
super(TestCase, self).assertEqual(x, y, message)
else:
self.fail(
"Expected finite numeric values - x={}, y={}".format(x, y)
)
return
super(TestCase, self).assertLessEqual(abs(x - y), prec, message)
else:
super(TestCase, self).assertEqual(x, y, message)
def assertAlmostEqual(
self, x, y, places=None, msg=None, delta=None, allow_inf=None
):
prec = delta
if places:
prec = 10 ** (-places)
self.assertEqual(x, y, prec, msg, allow_inf)
def assertNotEqual(self, x, y, prec=None, message=""):
if isinstance(prec, str) and message == "":
message = prec
prec = None
if prec is None:
prec = self.precision
if isinstance(x, torch.Tensor) and isinstance(y, torch.Tensor):
if x.size() != y.size():
super(TestCase, self).assertNotEqual(x.size(), y.size())
self.assertGreater(x.numel(), 0)
y = y.type_as(x)
y = y.cuda(device=x.get_device()) if x.is_cuda else y.cpu()
nan_mask = x != x
if torch.equal(nan_mask, y != y):
diff = x - y
if diff.is_signed():
diff = diff.abs()
diff[nan_mask] = 0
# Use `item()` to work around:
# https://github.com/pytorch/pytorch/issues/22301
max_err = diff.max().item()
self.assertGreaterEqual(max_err, prec, message)
elif type(x) == str and type(y) == str:
super(TestCase, self).assertNotEqual(x, y)
elif is_iterable(x) and is_iterable(y):
super(TestCase, self).assertNotEqual(x, y)
else:
try:
self.assertGreaterEqual(abs(x - y), prec, message)
return
except (TypeError, AssertionError):
pass
super(TestCase, self).assertNotEqual(x, y, message)
def assertObjectIn(self, obj, iterable):
for elem in iterable:
if id(obj) == id(elem):
return
raise AssertionError("object not found in iterable")
# TODO: Support context manager interface
# NB: The kwargs forwarding to callable robs the 'subname' parameter.
# If you need it, manually apply your callable in a lambda instead.
def assertExpectedRaises(self, exc_type, callable, *args, **kwargs):
subname = None
if "subname" in kwargs:
subname = kwargs["subname"]
del kwargs["subname"]
try:
callable(*args, **kwargs)
except exc_type as e:
self.assertExpected(str(e), subname)
return
# Don't put this in the try block; the AssertionError will catch it
self.fail(msg="Did not raise when expected to")
def assertWarns(self, callable, msg=""):
r"""
Test if :attr:`callable` raises a warning.
"""
with self._reset_warning_registry(), warnings.catch_warnings(record=True) as ws:
warnings.simplefilter("always") # allow any warning to be raised
callable()
self.assertTrue(len(ws) > 0, msg)
def assertWarnsRegex(self, callable, regex, msg=""):
r"""
Test if :attr:`callable` raises any warning with message that contains
the regex pattern :attr:`regex`.
"""
with self._reset_warning_registry(), warnings.catch_warnings(record=True) as ws:
warnings.simplefilter("always") # allow any warning to be raised
callable()
self.assertTrue(len(ws) > 0, msg)
found = any(re.search(regex, str(w.message)) is not None for w in ws)
self.assertTrue(found, msg)
@contextmanager
def _reset_warning_registry(self):
r"""
warnings.catch_warnings() in Python 2 misses already registered
warnings. We need to manually clear the existing warning registries to
ensure catching warnings in a scope.
"""
# Python 3 has no problem.
if sys.version_info >= (3,):
yield
return
# Backup and clear all existing warning registries.
backup = {}
for name, mod in list(sys.modules.items()):
try:
reg = mod.__warningregistry__
except AttributeError:
continue
else:
backup[name] = reg.copy()
reg.clear()
yield
# Restore backed up warning registries.
for name, reg_orig in backup.items():
try:
mod = sys.modules[name]
except KeyError:
continue
try:
reg = mod.__warningregistry__
except AttributeError:
mod.__warningregistry__ = reg_orig
else:
reg.clear()
reg.update(reg_orig)
def assertExpected(self, s, subname=None):
r"""
Test that a string matches the recorded contents of a file
derived from the name of this test and subname. This file
is placed in the 'expect' directory in the same directory
as the test script. You can automatically update the recorded test
output using --accept.
If you call this multiple times in a single function, you must
give a unique subname each time.
"""
if not (
isinstance(s, str) or (sys.version_info[0] == 2 and isinstance(s, unicode))
):
raise TypeError("assertExpected is strings only")
def remove_prefix(text, prefix):
if text.startswith(prefix):
return text[len(prefix) :]
return text
# NB: we take __file__ from the module that defined the test
# class, so we place the expect directory where the test script
# lives, NOT where test/common_utils.py lives. This doesn't matter in
# PyTorch where all test scripts are in the same directory as
# test/common_utils.py, but it matters in onnx-pytorch
module_id = self.__class__.__module__
munged_id = remove_prefix(self.id(), module_id + ".")
test_file = os.path.realpath(sys.modules[module_id].__file__)
expected_file = os.path.join(os.path.dirname(test_file), "expect", munged_id)
subname_output = ""
if subname:
expected_file += "-" + subname
subname_output = " ({})".format(subname)
expected_file += ".expect"
expected = None
def accept_output(update_type):
print(
"Accepting {} for {}{}:\n\n{}".format(
update_type, munged_id, subname_output, s
)
)
with open(expected_file, "w") as f:
f.write(s)
try:
with open(expected_file) as f:
expected = f.read()
except IOError as e:
if e.errno != errno.ENOENT:
raise
elif expecttest.ACCEPT:
return accept_output("output")
else:
raise RuntimeError(
(
"I got this output for {}{}:\n\n{}\n\n"
"No expect file exists; to accept the current output, run:\n"
"python {} {} --accept"
).format(munged_id, subname_output, s, __main__.__file__, munged_id)
)
# a hack for JIT tests
if IS_WINDOWS:
expected = re.sub(r"CppOp\[(.+?)\]", "CppOp[]", expected)
s = re.sub(r"CppOp\[(.+?)\]", "CppOp[]", s)
if expecttest.ACCEPT:
if expected != s:
return accept_output("updated output")
else:
if hasattr(self, "assertMultiLineEqual"):
# Python 2.7 only
# NB: Python considers lhs "old" and rhs "new".
self.assertMultiLineEqual(expected, s)
else:
self.assertEqual(s, expected)
# returns captured stderr
@staticmethod
def runWithPytorchAPIUsageStderr(code):
import subprocess
env = os.environ.copy()
env["PYTORCH_API_USAGE_STDERR"] = "1"
pipes = subprocess.Popen(
[sys.executable, "-c", code],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env,
)
return pipes.communicate()[1].decode("ascii")
if sys.version_info < (3, 2):
# assertRegexpMatches renamed to assertRegex in 3.2
assertRegex = unittest.TestCase.assertRegexpMatches
# assertRaisesRegexp renamed to assertRaisesRegex in 3.2
assertRaisesRegex = unittest.TestCase.assertRaisesRegexp
if sys.version_info < (3, 5):
# assertNotRegexpMatches renamed to assertNotRegex in 3.5
assertNotRegex = unittest.TestCase.assertNotRegexpMatches
class VerboseTestCase(TestCase):
def __init__(self, method_name="runTest"):
super(TestCase, self).__init__(method_name)
def is_dnnl_verbose(self, line):
tokens = line.strip().split(",")
return tokens[0] == "dnnl_verbose" and len(tokens) == 11
def is_dnnl_reorder(self, line):
assert self.is_dnnl_verbose(line)
return line.strip().split(",")[3] == "reorder"
def get_reorder_info(self, line):
assert self.is_dnnl_reorder(line)
tokens = line.split(",")
src_desc, dst_desc = tokens[6].split(" ")
src_dtype = src_desc.split("::")[0].split("_")
src_format = src_desc.split("::")[1]
dst_dtype = dst_desc.split("::")[0].split("_")
dst_format = dst_desc.split("::")[1]
return src_dtype, src_format, dst_dtype, dst_format
def isPlainFormat(self, check_format):
format_index = 0
format = ""
for check in check_format.split(":"):
if check == "blocked":
break
format_index = format_index + 1
format = check_format.split(":")[format_index + 1]
# ref to https://spec.oneapi.io/versions/latest/elements/oneDNN/source/data_model/memory/formats.html#
format_list = [
"a",
"ab",
"ba",
"acb",
"abc",
"bac",
"cba",
"bca",
"abcd",
"abdc",
"acdb",
"bacd",
"bcda",
"cdba",
"dcab",
"abcde",
"abdec",
"acbde",
"acdeb",
"bacde",
"bcdea",
"cdeba",
"decab",
"abcdef",
"acbdef",
"defcab",
]
for f in format_list:
if f == format:
return True
return False
def RedundantReorder(self, line):
if not self.is_dnnl_reorder(line):
return False
src_dtype, src_format, dst_dtype, dst_format = self.get_reorder_info(line)
return src_dtype[1] == dst_dtype[1] and src_format == dst_format
def ReorderForPack(self, line):
if not self.is_dnnl_reorder(line):
return False
src_dtype, src_format, dst_dtype, dst_format = self.get_reorder_info(line)
if self.isPlainFormat(src_format) and self.isPlainFormat(
dst_format
): # for prepack, at least dst should be blocked format and not in the format list
return False
return src_dtype[1] == dst_dtype[1]
def OnlyReorderDtype(self, line):
if not self.is_dnnl_reorder(line):
return False
src_dtype, src_format, dst_dtype, dst_format = self.get_reorder_info(line)
return src_dtype[1] != dst_dtype[1] and src_format == dst_format
def OnlyReorderFormat(self, line):
if not self.is_dnnl_reorder(line):
return False
src_dtype, src_format, dst_dtype, dst_format = self.get_reorder_info(line)
if self.isPlainFormat(src_format) and not self.isPlainFormat(
dst_format
): # reorder from plain format to blocked, should be prepack reorder
return False
return src_dtype[1] == dst_dtype[1] and src_format != dst_format
def assertOnlyReorderDtype(self, line):
assert OnlyReorderDtype(line), "the verbose msg shows not only reorder dtype"
def assertOnlyReorderFormat(self, line):
assert OnlyReorderFormat(line), "the verbose msg shows not only reorder format"
def assertNotReorder(self, line):
assert not is_dnnl_reorder(line)
def download_file(url, binary=True):
if sys.version_info < (3,):
from urlparse import urlsplit
import urllib2
request = urllib2
error = urllib2
else:
from urllib.parse import urlsplit
from urllib import request, error
filename = os.path.basename(urlsplit(url)[2])
data_dir = get_writable_path(os.path.join(os.path.dirname(__file__), "data"))
path = os.path.join(data_dir, filename)
if os.path.exists(path):
return path
try:
data = request.urlopen(url, timeout=15).read()
with open(path, "wb" if binary else "w") as f:
f.write(data)
return path
except error.URLError:
msg = "could not download test file '{}'".format(url)
warnings.warn(msg, RuntimeWarning)
raise unittest.SkipTest(msg)
def find_free_port():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(("localhost", 0))
sockname = sock.getsockname()
sock.close()
return sockname[1]
def retry_on_address_already_in_use_error(func):
"""Reruns a test if it sees "Address already in use" error."""
@wraps(func)
def wrapper(*args, **kwargs):
tries_remaining = 10
while True:
try:
return func(*args, **kwargs)
except RuntimeError as error:
if str(error) == "Address already in use":
tries_remaining -= 1
if tries_remaining == 0:
raise
time.sleep(random.random())
continue
raise
return wrapper
# Methods for matrix generation
# Used in test_autograd.py and test_torch.py
def prod_single_zero(dim_size):
result = torch.randn(dim_size, dim_size)
result[0, 1] = 0
return result
def random_square_matrix_of_rank(l, rank, dtype=torch.double, device="cpu"):
assert rank <= l
A = torch.randn(l, l, dtype=dtype, device=device)
u, s, v = A.svd()
for i in range(l):
if i >= rank:
s[i] = 0
elif s[i] == 0:
s[i] = 1
return u.mm(torch.diag(s)).mm(v.transpose(0, 1))
def random_symmetric_matrix(l, *batches, **kwargs):
dtype = kwargs.get("dtype", torch.double)
device = kwargs.get("device", "cpu")
A = torch.randn(*(batches + (l, l)), dtype=dtype, device=device)
A = (A + A.transpose(-2, -1)).div_(2)
return A
def random_symmetric_psd_matrix(l, *batches, **kwargs):
dtype = kwargs.get("dtype", torch.double)
device = kwargs.get("device", "cpu")
A = torch.randn(*(batches + (l, l)), dtype=dtype, device=device)
return torch.matmul(A, A.transpose(-2, -1))
def random_symmetric_pd_matrix(matrix_size, *batch_dims, **kwargs):
dtype = kwargs.get("dtype", torch.double)
device = kwargs.get("device", "cpu")
A = torch.randn(
*(batch_dims + (matrix_size, matrix_size)), dtype=dtype, device=device
)
return (
torch.matmul(A, A.transpose(-2, -1))
+ torch.eye(matrix_size, dtype=dtype, device=device) * 1e-5
)
def make_nonzero_det(A, sign=None, min_singular_value=0.1):
u, s, v = A.svd()
s.clamp_(min=min_singular_value)
A = torch.matmul(u, torch.matmul(torch.diag_embed(s), v.transpose(-2, -1)))
det = A.det()
if sign is not None:
if A.dim() == 2:
det = det.item()
if (det < 0) ^ (sign < 0):
A[0, :].neg_()
else:
cond = ((det < 0) ^ (sign < 0)).nonzero()
if cond.size(0) > 0:
for i in range(cond.size(0)):
A[list(cond[i])][0, :].neg_()
return A
def random_fullrank_matrix_distinct_singular_value(matrix_size, *batch_dims, **kwargs):
dtype = kwargs.get("dtype", torch.double)
device = kwargs.get("device", "cpu")
silent = kwargs.get("silent", False)
if silent and not torch._C.has_lapack:
return torch.ones(matrix_size, matrix_size, dtype=dtype, device=device)
A = torch.randn(batch_dims + (matrix_size, matrix_size), dtype=dtype, device=device)
u, _, v = A.svd()
s = (
torch.arange(1.0, matrix_size + 1, dtype=dtype, device=device)
.mul_(1.0 / (matrix_size + 1))
.diag()
)
return u.matmul(
s.expand(batch_dims + (matrix_size, matrix_size)).matmul(v.transpose(-2, -1))
)
def random_matrix(rows, columns, *batch_dims, **kwargs):
dtype = kwargs.get("dtype", torch.double)
device = kwargs.get("device", "cpu")
silent = kwargs.get("silent", False)
singular = kwargs.get("singular", False)
if silent and not torch._C.has_lapack:
return torch.ones(rows, columns, dtype=dtype, device=device)
A = torch.randn(batch_dims + (rows, columns), dtype=dtype, device=device)
u, _, v = A.svd(some=False)
s = torch.zeros(rows, columns, dtype=dtype, device=device)
k = min(rows, columns)
for i in range(k):
s[i, i] = (i + 1) / (k + 1)
if singular:
# make matrix singular
s[k - 1, k - 1] = 0
if k > 2:
# increase the order of singularity so that the pivoting
# in LU factorization will be non-trivial
s[0, 0] = 0
return u.matmul(s.expand(batch_dims + (rows, columns)).matmul(v.transpose(-2, -1)))
def brute_pdist(inp, p=2):
"""Computes the same as torch.pdist using primitives"""
n = inp.shape[-2]
k = n * (n - 1) // 2
if k == 0:
# torch complains about empty indices
return torch.empty(inp.shape[:-2] + (0,), dtype=inp.dtype, device=inp.device)
square = torch.norm(inp[..., None, :] - inp[..., None, :, :], p=p, dim=-1)
unroll = square.view(square.shape[:-2] + (n * n,))
inds = torch.ones(k, dtype=torch.int)
inds[torch.arange(n - 1, 1, -1, dtype=torch.int).cumsum(0)] += torch.arange(
2, n, dtype=torch.int
)
return unroll[..., inds.cumsum(0)]
def brute_cdist(x, y, p=2):
r1 = x.shape[-2]
r2 = y.shape[-2]
if r1 == 0 or r2 == 0:
return torch.empty(r1, r2, device=x.device)
return torch.norm(x[..., None, :] - y[..., None, :, :], p=p, dim=-1)
def do_test_dtypes(self, dtypes, layout, device):
for dtype in dtypes:
if dtype != torch.float16:
out = torch.zeros((2, 3), dtype=dtype, layout=layout, device=device)
self.assertIs(dtype, out.dtype)
self.assertIs(layout, out.layout)
self.assertEqual(device, out.device)
def do_test_empty_full(self, dtypes, layout, device):
shape = torch.Size([2, 3])
def check_value(tensor, dtype, layout, device, value, requires_grad):
self.assertEqual(shape, tensor.shape)
self.assertIs(dtype, tensor.dtype)
self.assertIs(layout, tensor.layout)
self.assertEqual(tensor.requires_grad, requires_grad)
if tensor.is_cuda and device is not None:
self.assertEqual(device, tensor.device)
if value is not None:
fill = tensor.new(shape).fill_(value)
self.assertEqual(tensor, fill)
def get_int64_dtype(dtype):
module = ".".join(str(dtype).split(".")[1:-1])
if not module:
return torch.int64
return operator.attrgetter(module)(torch).int64
default_dtype = torch.get_default_dtype()
check_value(torch.empty(shape), default_dtype, torch.strided, -1, None, False)
check_value(torch.full(shape, -5), default_dtype, torch.strided, -1, None, False)
for dtype in dtypes:
for rg in {dtype.is_floating_point, False}:
int64_dtype = get_int64_dtype(dtype)
v = torch.empty(
shape, dtype=dtype, device=device, layout=layout, requires_grad=rg
)
check_value(v, dtype, layout, device, None, rg)
out = v.new()
check_value(
torch.empty(
shape, out=out, device=device, layout=layout, requires_grad=rg
),
dtype,
layout,
device,
None,
rg,
)
check_value(v.new_empty(shape), dtype, layout, device, None, False)
check_value(
v.new_empty(
shape, dtype=int64_dtype, device=device, requires_grad=False
),
int64_dtype,
layout,
device,
None,
False,
)
check_value(torch.empty_like(v), dtype, layout, device, None, False)
check_value(
torch.empty_like(
v,
dtype=int64_dtype,
layout=layout,
device=device,
requires_grad=False,
),
int64_dtype,
layout,
device,
None,
False,
)
if dtype is not torch.float16 and layout != torch.sparse_coo:
fv = 3
v = torch.full(
shape,
fv,
dtype=dtype,
layout=layout,
device=device,
requires_grad=rg,
)
check_value(v, dtype, layout, device, fv, rg)
check_value(
v.new_full(shape, fv + 1), dtype, layout, device, fv + 1, False
)
out = v.new()
check_value(
torch.full(
shape,
fv + 2,
out=out,
device=device,
layout=layout,
requires_grad=rg,
),
dtype,
layout,
device,
fv + 2,
rg,
)
check_value(
v.new_full(
shape,
fv + 3,
dtype=int64_dtype,
device=device,
requires_grad=False,
),
int64_dtype,
layout,
device,
fv + 3,
False,
)
check_value(
torch.full_like(v, fv + 4), dtype, layout, device, fv + 4, False
)
check_value(
torch.full_like(
v,
fv + 5,
dtype=int64_dtype,
layout=layout,
device=device,
requires_grad=False,
),
int64_dtype,
layout,
device,
fv + 5,
False,
)
IS_SANDCASTLE = (
os.getenv("SANDCASTLE") == "1" or os.getenv("TW_JOB_USER") == "sandcastle"
)
THESE_TAKE_WAY_TOO_LONG = {
"test_Conv3d_groups",
"test_conv_double_backward",
"test_conv_double_backward_groups",
"test_Conv3d_dilated",
"test_Conv3d_stride_padding",
"test_Conv3d_dilated_strided",
"test_Conv3d",
"test_Conv2d_dilated",
"test_ConvTranspose3d_dilated",
"test_ConvTranspose2d_dilated",
"test_snli",
"test_Conv2d",
"test_Conv2d_padding",
"test_ConvTranspose2d_no_bias",
"test_ConvTranspose2d",
"test_ConvTranspose3d",
"test_Conv2d_no_bias",
"test_matmul_4d_4d",
"test_multinomial_invalid_probs",
}
running_script_path = None
def set_running_script_path():
global running_script_path
try:
running_file = os.path.abspath(os.path.realpath(sys.argv[0]))
if running_file.endswith(".py"): # skip if the running file is not a script
running_script_path = running_file
except Exception:
pass
def check_test_defined_in_running_script(test_case):
if running_script_path is None:
return
test_case_class_file = os.path.abspath(
os.path.realpath(inspect.getfile(test_case.__class__))
)
assert test_case_class_file == running_script_path, (
'Class of loaded TestCase "{}" '
'is not defined in the running script "{}", but in "{}". Did you '
"accidentally import a unittest.TestCase from another file?".format(
test_case.id(), running_script_path, test_case_class_file
)
)
def load_tests(loader, tests, pattern):
set_running_script_path()
test_suite = unittest.TestSuite()
for test_group in tests:
for test in test_group:
check_test_defined_in_running_script(test)
test_suite.addTest(test)
return test_suite
def _assertGradAndGradgradChecks(test_case, apply_fn, inputs):
# call assert function rather than returning a bool since it's nicer
# if we get whether this failed on the gradcheck or the gradgradcheck.
test_case.assertTrue(gradcheck(apply_fn, inputs))
test_case.assertTrue(gradgradcheck(apply_fn, inputs))
# Using @precisionOverride specific to your test is the recommended way
# of doing this. These are just some values that worked for test_nn.
dtype2prec_DONTUSE = {
torch.float: 1e-5,
torch.double: 1e-5,
torch.half: 1e-2,
torch.bfloat16: 1e-1,
}
# using data to do calibration for model and saving int8 configs at dir
def int8_calibration(model, data, dir):
conf = ipex.AmpConf(torch.int8)
with torch.no_grad():
for x in data:
with ipex.AutoMixPrecision(conf, running_mode="calibration"):
model(x)
conf.save(dir)
class TestModule(torch.nn.Module):
def __init__(self, has_sparse_grad=False):
super(TestModule, self).__init__()
self.linear = torch.nn.Linear(5, 49)
self.conv = torch.nn.Conv2d(1, 10, 5, 1)
self.bn = torch.nn.BatchNorm2d(num_features=10)
self.embeddingbag = torch.nn.EmbeddingBag(
10, 49, mode="sum", sparse=has_sparse_grad
)
self.input = (
torch.ones(10, 1, 5, 5),
torch.ones(10, 5),
torch.arange(0, 10).long(),
torch.arange(0, 10).long(),
)
def forward(self, x, y, indices, offsets):
x = self.conv(x)
x = self.bn(x)
y = self.linear(y)
z = self.embeddingbag(indices, offsets)
return x + y + z
def attach_grad(self, dtype=torch.float):
# Instead of .sum().backward(), attach grad to parameters directly can help to check optimizer's correctness
self.linear.weight.grad = torch.ones(self.linear.weight.shape).to(dtype)
self.set_zeros_for_packed_weight(self.linear.weight)
self.linear.bias.grad = torch.ones(self.linear.bias.shape).to(dtype)
self.conv.weight.grad = torch.ones(self.conv.weight.shape).to(dtype)
self.set_zeros_for_packed_weight(self.conv.weight)
self.conv.bias.grad = torch.ones(self.conv.bias.shape).to(dtype)
self.embeddingbag.weight.grad = torch.ones(self.embeddingbag.weight.shape).to(
dtype
)
self.bn.weight.grad = torch.ones(self.bn.weight.shape)
def set_zeros_for_packed_weight(self, weight):
for idx in range(len(weight.storage())):
if weight.storage()[idx] == 0:
weight.grad.storage()[idx] = 0
def _empty_weight_bias_parameter_names(prefixes):
param_names = [
"_ipex_module_empty_weight_tensor",
"_ipex_module_empty_bias_tensor",
]
return [
f"{prefix}.{param_name}"
for prefix, param_name in itertools.product(prefixes, param_names)
]
| 61,950 | 34.000565 | 119 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/tests/cpu/test_deepspeed.py | import sys
import os
import unittest
import torch
import torch.nn as nn
from torch.testing._internal.common_utils import TestCase
import intel_extension_for_pytorch as ipex
from intel_extension_for_pytorch.nn.utils._weight_prepack import (
may_import_deepspeed_modules,
_IPEXLinear,
_IPEXLinearAllreduce,
)
from intel_extension_for_pytorch.quantization import prepare, convert
from intel_extension_for_pytorch.quantization._quantize import (
DynamicQuantizedLinearLayer,
DynamicQuantizedLinearAllreduce,
)
from test_weight_prepack import module_found
class MyAttention(nn.Module):
def __init__(self):
super().__init__()
# For deepspeed support, please do not change the name of the attribute.
self.q_proj = nn.Linear(4, 4)
self.out_proj = nn.Linear(4, 2)
def forward(self, x):
x = self.q_proj(x)
z = self.out_proj(x)
return z
class MyBlock(nn.Module):
def __init__(self):
super().__init__()
self.attn = MyAttention()
def forward(self, x):
z = self.attn(x)
return z
# For deepspeed support, please do not change the name of the class.
class MyModel(nn.Module):
def __init__(self):
super().__init__()
# For deepspeed support, please do not change the ModuleList structure of the class.
self.linears = nn.ModuleList([MyBlock()])
def forward(self, x):
for l in self.linears:
x = l(x)
return x
# The class DeepSpeedTestM is written for deepspeed to recognize the modules and to be functional.
# Please do not change it.
class DeepSpeedTestM(nn.Module):
def __init__(self):
super().__init__()
self.linear = MyModel()
def forward(self, x):
z = self.linear(x)
return z
class DeepspeedTester(TestCase):
def _get_ds_model(self, m_linear):
import deepspeed
ds_world_size = int(os.getenv("WORLD_SIZE", "1"))
assert (
ds_world_size > 1
), "expect ds_world_size > 1, you could try launching the script with: \
deepspeed --num_gpus 2 --bind_cores_to_rank tests/cpu/test_deepspeed.py"
engine = deepspeed.init_inference(
model=m_linear,
mp_size=ds_world_size,
dtype=torch.float32,
replace_method="auto",
)
ds_model = engine.module
return ds_model
def test_ipex_optimize(self):
deepspeed_modules = may_import_deepspeed_modules()
if deepspeed_modules is not None:
LinearAllreduce, LinearLayer = deepspeed_modules
x = torch.randn(2, 4)
m_linear = DeepSpeedTestM().eval()
y = m_linear(x)
ds_model = self._get_ds_model(m_linear)
self.assertTrue(module_found(ds_model, LinearLayer))
self.assertTrue(module_found(ds_model, LinearAllreduce))
optimized = ipex.optimize(ds_model.eval(), inplace=True)
jit_optimized = torch.jit.trace(optimized, x)
jit_optimized = torch.jit.freeze(jit_optimized)
self.assertTrue(module_found(optimized, _IPEXLinear))
self.assertTrue(module_found(optimized, _IPEXLinearAllreduce))
optimized = optimized(x)
jit_res = jit_optimized(x)
self.assertEqual(y, jit_res)
self.assertEqual(y, optimized)
def test_dynamic_quantization(self):
deepspeed_modules = may_import_deepspeed_modules()
if deepspeed_modules is not None:
LinearAllreduce, LinearLayer = deepspeed_modules
x = torch.randn(2, 4)
m_linear = DeepSpeedTestM().eval()
y = m_linear(x)
ds_model = self._get_ds_model(m_linear)
self.assertTrue(module_found(ds_model, LinearLayer))
self.assertTrue(module_found(ds_model, LinearAllreduce))
dynamic_qconfig = ipex.quantization.default_dynamic_qconfig
prepared_model = prepare(
ds_model,
dynamic_qconfig,
example_inputs=(x),
inplace=True,
bn_folding=False,
)
converted = convert(prepared_model, inplace=True)
self.assertTrue(module_found(converted, DynamicQuantizedLinearLayer))
self.assertTrue(module_found(converted, DynamicQuantizedLinearAllreduce))
quantized = converted(x)
self.assertEqual(y, quantized, atol=0.005, rtol=1.3e-6)
if __name__ == "__main__":
deepspeed_modules = may_import_deepspeed_modules()
if deepspeed_modules is not None:
# when launching with deepspeed, the cmd will be python -u tests/cpu/test_deepspeed.py --local_rank=xx
# Need to handle the --local_rank before unittest.main()
if len(sys.argv) > 1:
local_rank = sys.argv.pop()
test = unittest.main()
| 4,913 | 32.202703 | 110 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/tests/cpu/test_weight_cast.py | import unittest
import copy
import torch
from intel_extension_for_pytorch.nn.utils._weight_cast import (
weight_dtype_convert_with_ipex as cast,
)
from intel_extension_for_pytorch.nn.utils._parameter_wrapper import (
IPEX_WEIGHT_CONVERT_MODULE_CPU as IPEX_WEIGHT_CONVERT_MODULE_CPU,
)
from intel_extension_for_pytorch.optim._optimizer_utils import (
IPEX_FUSED_OPTIMIZER_LIST_CPU as IPEX_FUSED_OPTIMIZER_LIST,
)
from intel_extension_for_pytorch.nn.modules import MergedEmbeddingBag
from torch.testing._internal.common_utils import TestCase
from torch.optim import (
Adadelta,
Adagrad,
Adam,
AdamW,
Adamax,
ASGD,
RMSprop,
Rprop,
SGD,
)
class TestModule(torch.nn.Module):
def __init__(self):
super(TestModule, self).__init__()
self.linear = torch.nn.Linear(5, 10)
self.conv1d = torch.nn.Conv1d(1, 10, 5, 1)
self.conv2d = torch.nn.Conv2d(1, 10, 5, 1)
self.conv3d = torch.nn.Conv3d(1, 10, 5, 1)
self.transpose_conv1d = torch.nn.ConvTranspose1d(1, 10, 5, 1)
self.transpose_conv2d = torch.nn.ConvTranspose2d(1, 10, 5, 1)
self.transpose_conv3d = torch.nn.ConvTranspose3d(1, 10, 5, 1)
self.bn = torch.nn.BatchNorm2d(num_features=10)
self.embeddingbag = torch.nn.EmbeddingBag(10, 3, mode="sum")
self.embedding = torch.nn.Embedding(10, 3)
table0 = torch.nn.EmbeddingBag(100, 16, mode="mean", sparse=False)
table1 = torch.nn.EmbeddingBag(50, 32, mode="sum", sparse=False)
self.merged = MergedEmbeddingBag.from_embeddingbag_list([table0, table1])
def forward(self, x):
x = self.conv2d(x)
return
class TestWeightCastCases(TestCase):
def is_master_weight_solution(self, module, dtype, split_master_weight):
return (
type(module) in IPEX_WEIGHT_CONVERT_MODULE_CPU(False, dtype)
and not split_master_weight
)
def is_master_weight_split_solution(self, module, split_master_weight):
return (
type(module) in IPEX_WEIGHT_CONVERT_MODULE_CPU(False, torch.bfloat16)
and split_master_weight
)
def is_fp32_weight_solution(self, module, dtype):
return type(module) not in IPEX_WEIGHT_CONVERT_MODULE_CPU(False, dtype)
def master_weight_test(
self, m, param_id, cast_dtype, optimizer_params_list, params_attr
):
def found_wrapper(parameter, params_attr):
for _, v in params_attr.items():
if parameter is v.parameter:
return v
# not found
self.assertTrue(False)
for name, param in m.named_parameters():
if hasattr(m, name):
param_wrapper = found_wrapper(param, params_attr)
self.assertTrue(param_wrapper.master_parameter.dtype == torch.float)
self.assertTrue(
param_wrapper.master_parameter is optimizer_params_list[param_id]
)
self.assertTrue(param_wrapper.parameter.dtype == cast_dtype)
self.assertTrue(param_wrapper.parameter is getattr(m, name))
param_id += 1
return param_id
def master_weight_split_test(
self, m, param_id, cast_dtype, optimizer_params_list, params_attr
):
for name, param in m.named_parameters():
if hasattr(m, name):
param_wrapper = params_attr[param]
self.assertTrue(param_wrapper.parameter.dtype == torch.bfloat16)
self.assertTrue(
param_wrapper.parameter is optimizer_params_list[param_id]
)
self.assertTrue(param_wrapper.parameter_trail.dtype == torch.bfloat16)
self.assertTrue(param_wrapper.parameter is getattr(m, name))
param_id += 1
return param_id
def test_weight_cast(self):
M = TestModule()
for pt_opt in [
Adagrad,
Adadelta,
Adam,
AdamW,
Adamax,
ASGD,
RMSprop,
Rprop,
SGD,
]:
for cast_dtype in [torch.bfloat16, torch.float16]:
for split_master_weight_for_bf16 in [True, False]:
if (
pt_opt not in IPEX_FUSED_OPTIMIZER_LIST
or cast_dtype == torch.float16
):
split_master_weight_for_bf16 = False
model = copy.deepcopy(M)
optimizer = pt_opt(model.parameters(), lr=0.01)
model, opt, params_attr = cast(
model, optimizer, {}, split_master_weight_for_bf16, cast_dtype
)
optimizer_params_list = opt.param_groups[0]["params"]
param_id = 0
for _, sub_m in model.named_children():
if self.is_master_weight_solution(
sub_m, cast_dtype, split_master_weight_for_bf16
):
param_id = self.master_weight_test(
sub_m,
param_id,
cast_dtype,
optimizer_params_list,
params_attr,
)
for name, ssub_m in sub_m.named_children():
if isinstance(ssub_m, torch.nn.ParameterList):
param_id = self.master_weight_test(
ssub_m,
param_id,
cast_dtype,
optimizer_params_list,
params_attr,
)
elif self.is_master_weight_split_solution(
sub_m, split_master_weight_for_bf16
):
param_id = self.master_weight_split_test(
sub_m,
param_id,
cast_dtype,
optimizer_params_list,
params_attr,
)
for name, ssub_m in sub_m.named_children():
if isinstance(ssub_m, torch.nn.ParameterList):
param_id = self.master_weight_split_test(
ssub_m,
param_id,
cast_dtype,
optimizer_params_list,
params_attr,
)
else:
self.assertTrue(
self.is_fp32_weight_solution(sub_m, cast_dtype)
)
for i, p in enumerate(sub_m.parameters()):
self.assertTrue(p is optimizer_params_list[param_id])
param_id += 1
# For resume training, state_dict() should always return fp32 dtype
origin_model_state = M.state_dict()
ipex_model_state = model.state_dict()
self.assertEqual(origin_model_state, ipex_model_state)
origin_opt_state = optimizer.state_dict()
ipex_opt_state = opt.state_dict()
self.assertEqual(ipex_opt_state["state"], origin_opt_state["state"])
if __name__ == "__main__":
test = unittest.main()
| 7,898 | 40.793651 | 88 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/tests/cpu/test_weight_prepack.py | import unittest
import itertools
import copy
import os
import time
import sys
from intel_extension_for_pytorch.utils.channels_last_1d import (
to_channels_last_1d,
is_contiguous_channels_last_1d,
)
try:
import torchvision
HAS_TORCHVISION = True
except ImportError:
HAS_TORCHVISION = False
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
import torch
import intel_extension_for_pytorch as ipex
import intel_extension_for_pytorch._C as core
from torch.testing._internal.common_utils import TestCase
from torch.optim import (
Adadelta,
Adagrad,
Adam,
AdamW,
Adamax,
ASGD,
RMSprop,
Rprop,
SGD,
)
from intel_extension_for_pytorch.optim._lamb import Lamb
conv_module = {1: torch.nn.Conv1d, 2: torch.nn.Conv2d, 3: torch.nn.Conv3d}
def module_found(model, type):
for child in model.children():
if isinstance(child, type):
return True
elif isinstance(child, torch.nn.ModuleList):
for module in child.children():
if module_found(module, type):
return True
else:
if module_found(child, type):
return True
return False
def get_rand_seed():
return int(time.time() * 1000000000)
class TestPrepackCases(TestCase):
def test_channels_last_1d_forward(self):
class Conv1d(torch.nn.Module):
def __init__(
self, in_channels, out_channels, kernel_size, stride, padding, bias
):
super(Conv1d, self).__init__()
self.conv = torch.nn.Conv1d(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
bias=bias,
)
def forward(self, x):
return self.conv(x)
input_shapes = [
(2, 2, 3),
(4, 4, 4),
(4, 4, 1),
(4, 1, 4),
(4, 1, 1),
(1, 4, 4),
(1, 4, 1),
(1, 1, 4),
]
for x_shape in input_shapes:
M = 5
C = x_shape[1]
x = torch.randn(x_shape, dtype=torch.float32)
model = (
Conv1d(
in_channels=C,
out_channels=M,
kernel_size=3,
stride=1,
padding=1,
bias=False,
)
.float()
.eval()
)
x_nwc = to_channels_last_1d(copy.deepcopy(x))
model = to_channels_last_1d(model)
ipex_model = ipex.optimize(model, dtype=torch.float32, level="O1")
y_ipex = ipex_model(x)
y = model(x)
self.assertEqual(y, y_ipex)
y_ipex_nwc = ipex_model(x_nwc)
y_nwc = model(x_nwc)
self.assertEqual(y_nwc, y_ipex_nwc)
self.assertEqual(y_ipex, y_ipex_nwc)
self.assertTrue(is_contiguous_channels_last_1d(y_ipex))
self.assertTrue(is_contiguous_channels_last_1d(y_ipex_nwc))
def _test_convolution_base(self, dim, dtype, is_train, rtol=None, atol=None):
class ConvNd(torch.nn.Module):
def __init__(
self,
dim,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
bias,
groups,
padding_mode,
):
super(ConvNd, self).__init__()
self.conv = conv_module[dim](
in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias,
groups=groups,
padding_mode=padding_mode,
)
def forward(self, x):
return self.conv(x)
input_shapes = {1: (224,), 2: (224, 224), 3: (55, 55, 55)}
channels_last = torch.channels_last if dim == 2 else torch.channels_last_3d
padding_modes = ["zeros", "reflect"]
# Currently, there is no channels_last_1d format for 1d input in IPEX.
# After adding a python API named `to_channels_last_1d` which is to convert 1d input to channels last,
# we will make some changes to fully support channels_last_1d.
if dim == 1:
options = itertools.product(
[True, False],
[1, 2],
[1, 4],
[torch.contiguous_format],
[True, False],
padding_modes,
)
else:
options = itertools.product(
[True, False],
[1, 2],
[1, 4],
[torch.contiguous_format, channels_last],
[True, False],
padding_modes,
)
for (
bias,
dilation,
groups,
memory_format,
feed_sample_input,
padding_mode,
) in options:
N = torch.randint(3, 10, (1,)).item()
M = torch.randint(3, 10, (1,)).item() * groups
C = torch.randint(3, 10, (1,)).item() * groups
x_shape = (N, C) + input_shapes[dim]
x = torch.randn(x_shape, dtype=torch.float32).to(dtype=dtype).float()
model = (
ConvNd(
dim=dim,
in_channels=C,
out_channels=M,
kernel_size=3,
stride=2,
padding=1,
dilation=dilation,
bias=bias,
groups=groups,
padding_mode=padding_mode,
)
.to(dtype=dtype)
.float()
)
model = model.to(memory_format=memory_format)
x = x.to(memory_format=memory_format)
x1 = x.clone().requires_grad_()
x2 = x.clone().requires_grad_()
x3 = x.clone().requires_grad_()
if is_train:
origin_model1 = copy.deepcopy(model).train()
origin_optimizer1 = SGD(
origin_model1.parameters(), lr=0.01, momentum=0.9
)
origin_model2 = copy.deepcopy(model).train()
origin_optimizer2 = SGD(
origin_model2.parameters(), lr=0.01, momentum=0.9
)
else:
origin_model1 = copy.deepcopy(model).eval()
origin_model2 = copy.deepcopy(model).eval()
if feed_sample_input:
if dtype == torch.float16:
if is_train:
ipex_model1, ipex_optimizer1 = ipex.optimize(
origin_model1,
dtype=dtype,
optimizer=origin_optimizer1,
level="O0",
weights_prepack=True,
sample_input=x,
)
ipex_model2, ipex_optimizer2 = ipex.optimize(
origin_model2,
dtype=dtype,
optimizer=origin_optimizer2,
level="O0",
weights_prepack=True,
inplace=True,
sample_input=x,
)
else:
ipex_model1 = ipex.optimize(
origin_model1,
dtype=dtype,
level="O0",
weights_prepack=True,
sample_input=x,
)
ipex_model2 = ipex.optimize(
origin_model2,
dtype=dtype,
level="O0",
weights_prepack=True,
inplace=True,
sample_input=x,
)
else:
if is_train:
ipex_model1, ipex_optimizer1 = ipex.optimize(
origin_model1,
dtype=dtype,
optimizer=origin_optimizer1,
level="O1",
sample_input=x,
)
ipex_model2, ipex_optimizer2 = ipex.optimize(
origin_model2,
dtype=dtype,
optimizer=origin_optimizer2,
level="O1",
inplace=True,
sample_input=x,
)
else:
ipex_model1 = ipex.optimize(
origin_model1, dtype=dtype, level="O1", sample_input=x
)
ipex_model2 = ipex.optimize(
origin_model2,
dtype=dtype,
level="O1",
inplace=True,
sample_input=x,
)
else:
if dtype == torch.float16:
if is_train:
ipex_model1, ipex_optimizer1 = ipex.optimize(
origin_model1,
dtype=dtype,
optimizer=origin_optimizer1,
level="O0",
weights_prepack=True,
)
ipex_model2, ipex_optimizer2 = ipex.optimize(
origin_model2,
dtype=dtype,
optimizer=origin_optimizer2,
level="O0",
weights_prepack=True,
inplace=True,
)
else:
ipex_model1 = ipex.optimize(
origin_model1, dtype=dtype, level="O0", weights_prepack=True
)
ipex_model2 = ipex.optimize(
origin_model2,
dtype=dtype,
level="O0",
weights_prepack=True,
inplace=True,
)
else:
if is_train:
ipex_model1, ipex_optimizer1 = ipex.optimize(
origin_model1,
dtype=dtype,
optimizer=origin_optimizer1,
level="O1",
)
ipex_model2, ipex_optimizer2 = ipex.optimize(
origin_model2,
dtype=dtype,
optimizer=origin_optimizer2,
level="O1",
inplace=True,
)
else:
ipex_model1 = ipex.optimize(
origin_model1, dtype=dtype, level="O1"
)
ipex_model2 = ipex.optimize(
origin_model2, dtype=dtype, level="O1", inplace=True
)
if is_train or dtype == torch.float16:
self.assertTrue(ipex_model1.conv.weight.dtype == dtype)
self.assertTrue(ipex_model2.conv.weight.dtype == dtype)
with torch.cpu.amp.autocast(enabled=True, dtype=dtype):
# original fp32 path
y1 = origin_model1(x1)
# ipex path with inplace=False
y2 = ipex_model1(x2)
# ipex path with inplace=True
y3 = ipex_model2(x3)
if dim == 1:
x4 = to_channels_last_1d(copy.deepcopy(x2))
y4 = ipex_model1(x4)
self.assertEqual(y1.float(), y4.float(), rtol=rtol, atol=atol)
self.assertTrue(is_contiguous_channels_last_1d(y2))
self.assertTrue(is_contiguous_channels_last_1d(y3))
self.assertTrue(is_contiguous_channels_last_1d(y4))
if is_train:
grad_x = (
torch.randn(y1.shape, dtype=torch.float32).to(dtype=dtype).float()
)
origin_optimizer1.zero_grad()
y1.backward(grad_x)
origin_optimizer1.step()
ipex_optimizer1.zero_grad()
y2.backward(grad_x.to(dtype=dtype))
ipex_optimizer1.step()
ipex_optimizer2.zero_grad()
y3.backward(grad_x.to(dtype=dtype))
ipex_optimizer2.step()
if not (is_train and dtype == torch.float16 and dim == 1):
self.assertTrue(y2.dtype == dtype)
self.assertTrue(y3.dtype == dtype)
self.assertEqual(y1.float(), y2.float(), rtol=rtol, atol=atol)
self.assertEqual(y1.float(), y3.float(), rtol=rtol, atol=atol)
if is_train:
self.assertEqual(x1.grad, x2.grad, rtol=rtol, atol=atol)
self.assertEqual(x1.grad, x3.grad, rtol=rtol, atol=atol)
if bias and is_train:
self.assertEqual(
origin_model1.conv.bias.grad.float(),
ipex_model1.conv.bias.grad.float(),
rtol=rtol,
atol=atol,
)
self.assertEqual(
origin_model1.conv.bias.grad.float(),
ipex_model2.conv.bias.grad.float(),
rtol=rtol,
atol=atol,
)
# compare origin_model parameters with origin_model parameters after grad updata
origin_model_state = origin_model1.state_dict()
ipex_model_state1 = ipex_model1.state_dict()
ipex_model_state2 = ipex_model2.state_dict()
for var_name in origin_model_state:
self.assertEqual(
origin_model_state[var_name],
ipex_model_state1[var_name],
rtol=rtol,
atol=atol,
)
self.assertEqual(
origin_model_state[var_name],
ipex_model_state2[var_name],
rtol=rtol,
atol=atol,
)
# compare momentum_buffer in optimizer's state(sgd)
# TODO: other optimizer.
if is_train:
origin_optimizer_state = origin_optimizer1.state_dict()
ipex_optimizer_state1 = ipex_optimizer1.state_dict()
ipex_optimizer_state2 = ipex_optimizer2.state_dict()
for var_name in origin_optimizer_state:
if var_name == "state":
self.assertEqual(
origin_optimizer_state[var_name],
ipex_optimizer_state1[var_name],
rtol=rtol,
atol=atol,
)
self.assertEqual(
origin_optimizer_state[var_name],
ipex_optimizer_state2[var_name],
rtol=rtol,
atol=atol,
)
def test_conv1d_training_inference(self):
self._test_convolution_base(dim=1, dtype=torch.float, is_train=True)
self._test_convolution_base(dim=1, dtype=torch.float, is_train=False)
if core.onednn_has_bf16_support():
self._test_convolution_base(
dim=1, dtype=torch.bfloat16, is_train=True, rtol=1e-2, atol=1e-03
)
self._test_convolution_base(
dim=1, dtype=torch.bfloat16, is_train=False, rtol=1e-2, atol=1e-03
)
if core.onednn_has_fp16_support():
self._test_convolution_base(
dim=1, dtype=torch.float16, is_train=True, rtol=5e-4, atol=5e-04
)
self._test_convolution_base(
dim=1, dtype=torch.float16, is_train=False, rtol=5e-4, atol=5e-04
)
def test_conv2d_training_inference(self):
self._test_convolution_base(dim=2, dtype=torch.float, is_train=True)
self._test_convolution_base(dim=2, dtype=torch.float, is_train=False)
if core.onednn_has_bf16_support():
self._test_convolution_base(
dim=2, dtype=torch.bfloat16, is_train=True, rtol=1e-2, atol=1e-03
)
self._test_convolution_base(
dim=2, dtype=torch.bfloat16, is_train=False, rtol=1e-2, atol=1e-03
)
if core.onednn_has_fp16_support():
self._test_convolution_base(
dim=2, dtype=torch.float16, is_train=True, rtol=5e-3, atol=5e-03
)
self._test_convolution_base(
dim=2, dtype=torch.float16, is_train=False, rtol=5e-3, atol=5e-03
)
@unittest.skipIf(
True,
"temporary disable before https://github.com/pytorch/pytorch/pull/74023 merged",
)
def test_conv3d_training(self):
self._test_convolution_training_base(dim=3, dtype=torch.float)
if core.onednn_has_bf16_support():
self._test_convolution_training_base(
dim=3, dtype=torch.bfloat16, rtol=1e-2, atol=1e-03
)
# TODO: add inference case.
def _test_conv_nc11_base(self, dim):
# related issue: https://github.com/intel-innersource/frameworks.ai.pytorch.ipex-cpu/pull/86.
channels_last = torch.channels_last if dim == 2 else torch.channels_last_3d
test_dtypes = [torch.float]
if core.onednn_has_bf16_support():
test_dtypes.append(torch.bfloat16)
options = itertools.product(
test_dtypes,
[1, 256],
[1, 324],
[torch.contiguous_format, channels_last],
[True, False],
)
for (
dtype,
in_channels,
out_channels,
memory_format,
feed_sample_input,
) in options:
model = conv_module[dim](
in_channels,
out_channels,
kernel_size=1,
stride=1,
padding=1,
bias=False,
)
model = (
model.to(memory_format=memory_format).to(dtype=dtype).float().train()
)
input_shape = [32, in_channels, 1, 1]
if dim == 3:
input_shape.append(1)
x = (
torch.randn(input_shape)
.to(memory_format=memory_format)
.to(dtype=dtype)
.float()
)
x1 = x.clone().requires_grad_()
x2 = x.clone().requires_grad_()
x3 = x.clone().requires_grad_()
origin_model1 = copy.deepcopy(model).train()
origin_optimizer1 = SGD(origin_model1.parameters(), lr=0.01, momentum=0.9)
origin_model2 = copy.deepcopy(model).train()
origin_optimizer2 = SGD(origin_model2.parameters(), lr=0.01, momentum=0.9)
if feed_sample_input:
ipex_model1, ipex_optimizer1 = ipex.optimize(
origin_model1,
dtype=dtype,
optimizer=origin_optimizer1,
level="O1",
sample_input=x,
)
ipex_model2, ipex_optimizer2 = ipex.optimize(
origin_model2,
dtype=dtype,
optimizer=origin_optimizer2,
level="O1",
inplace=True,
sample_input=x,
)
else:
ipex_model1, ipex_optimizer1 = ipex.optimize(
origin_model1, dtype=dtype, optimizer=origin_optimizer1, level="O1"
)
ipex_model2, ipex_optimizer2 = ipex.optimize(
origin_model2,
dtype=dtype,
optimizer=origin_optimizer2,
level="O1",
inplace=True,
)
with torch.cpu.amp.autocast(enabled=True, dtype=dtype):
# train one step for origin.
y1 = origin_model1(x1)
loss1 = y1.sum()
origin_optimizer1.zero_grad()
loss1.backward()
origin_optimizer1.step()
# train one step for ipex with inplace=False
y2 = ipex_model1(x2)
loss2 = y2.sum()
ipex_optimizer1.zero_grad()
loss2.backward()
ipex_optimizer1.step()
# train one step for ipex with inplace=False
y3 = ipex_model2(x3)
loss3 = y3.sum()
ipex_optimizer2.zero_grad()
loss3.backward()
ipex_optimizer2.step()
self.assertEqual(y1, y2, rtol=1e-2, atol=1e-03)
self.assertEqual(y1, y3, rtol=1e-2, atol=1e-03)
self.assertEqual(x1.grad, x2.grad, rtol=1e-2, atol=1e-03)
self.assertEqual(x1.grad, x3.grad, rtol=1e-2, atol=1e-03)
# compare origin_model parameters with origin_model parameters after grad updata
origin_model_state = origin_model1.state_dict()
ipex_model_state1 = ipex_model1.state_dict()
ipex_model_state2 = ipex_model2.state_dict()
for var_name in origin_model_state:
self.assertEqual(
origin_model_state[var_name],
ipex_model_state1[var_name],
rtol=1e-2,
atol=1e-03,
)
self.assertEqual(
origin_model_state[var_name],
ipex_model_state2[var_name],
rtol=1e-2,
atol=1e-03,
)
# compare momentum_buffer in optimizer's state(sgd)
# TODO: other optimizer.
origin_optimizer_state = origin_optimizer1.state_dict()
ipex_optimizer_state1 = ipex_optimizer1.state_dict()
ipex_optimizer_state2 = ipex_optimizer2.state_dict()
for var_name in origin_optimizer_state:
if var_name == "state":
self.assertEqual(
origin_optimizer_state[var_name],
ipex_optimizer_state1[var_name],
rtol=1e-2,
atol=1e-03,
)
self.assertEqual(
origin_optimizer_state[var_name],
ipex_optimizer_state2[var_name],
rtol=1e-2,
atol=1e-03,
)
def test_conv2d_nc11(self):
self._test_conv_nc11_base(dim=2)
def test_conv3d_nc11(self):
self._test_conv_nc11_base(dim=3)
def _test_conv_serialization_base(self, dim):
channels_last = torch.channels_last if dim == 2 else torch.channels_last_3d
optimizer_options = [
Lamb,
Adadelta,
Adagrad,
Adam,
AdamW,
Adamax,
ASGD,
RMSprop,
Rprop,
SGD,
]
padding_modes = ["zeros", "reflect"]
test_dtypes = [torch.float]
if core.onednn_has_bf16_support():
test_dtypes.append(torch.bfloat16)
options = itertools.product(
test_dtypes, optimizer_options, [True, False], padding_modes
)
input_shape = [8, 3, 56, 56]
if dim == 3:
input_shape.append(56)
for dtype, optimizer, feed_sample_input, padding_mode in options:
model = conv_module[dim](
3,
64,
kernel_size=7,
stride=2,
padding=3,
bias=False,
padding_mode=padding_mode,
)
x = (
torch.randn(input_shape)
.to(dtype=dtype)
.float()
.to(memory_format=channels_last)
)
model = (
model.to(dtype=dtype).float().to(memory_format=channels_last).train()
)
origin_x = x.clone()
ipex_x = x.clone()
origin_model = copy.deepcopy(model).train()
lr = 1e-2
origin_optimizer = optimizer(origin_model.parameters(), lr=lr)
if feed_sample_input:
ipex_model, ipex_optimizer = ipex.optimize(
origin_model,
dtype=dtype,
optimizer=origin_optimizer,
level="O1",
sample_input=x,
)
else:
ipex_model, ipex_optimizer = ipex.optimize(
origin_model, dtype=dtype, optimizer=origin_optimizer, level="O1"
)
with torch.cpu.amp.autocast(enabled=True, dtype=dtype):
# train one step for origin.
y1 = origin_model(origin_x)
loss1 = y1.sum()
origin_optimizer.zero_grad()
loss1.backward()
torch.nn.utils.clip_grad_value_(origin_model.parameters(), 10)
origin_optimizer.step()
# train one step for ipex.
y2 = ipex_model(ipex_x)
loss2 = y2.sum()
ipex_optimizer.zero_grad()
loss2.backward()
torch.nn.utils.clip_grad_value_(ipex_model.parameters(), 10)
ipex_optimizer.step()
torch.save(
{
"model_state_dict": origin_model.state_dict(),
"optimizer_state_dict": origin_optimizer.state_dict(),
},
"origin_checkpoint.pth",
)
torch.save(
{
"model_state_dict": ipex_model.state_dict(),
"optimizer_state_dict": ipex_optimizer.state_dict(),
},
"ipex_checkpoint.pth",
)
self.assertEqual(y1, y2, rtol=1e-4, atol=5e-02)
origin_model_state = origin_model.state_dict()
ipex_model_state = ipex_model.state_dict()
for var_name in origin_model_state:
self.assertEqual(
origin_model_state[var_name],
ipex_model_state[var_name],
rtol=1e-2,
atol=1e-03,
)
# check state_buffer works.
origin_optimizer_state = origin_optimizer.state_dict()
ipex_optimizer_state = ipex_optimizer.state_dict()
for var_name in origin_optimizer_state:
if var_name == "state":
self.assertEqual(
origin_optimizer_state[var_name],
ipex_optimizer_state[var_name],
rtol=1e-2,
atol=5e-02,
)
origin_model = copy.deepcopy(model).train()
origin_optimizer = optimizer(origin_model.parameters(), lr=lr)
origin_checkpoint = torch.load("origin_checkpoint.pth")
origin_model.load_state_dict(origin_checkpoint["model_state_dict"])
origin_optimizer.load_state_dict(origin_checkpoint["optimizer_state_dict"])
# load ipex model state
origin_ipex_model = copy.deepcopy(model)
origin_ipex_optimizer = optimizer(origin_ipex_model.parameters(), lr=lr)
ipex_checkpoint = torch.load("ipex_checkpoint.pth")
origin_ipex_model.load_state_dict(ipex_checkpoint["model_state_dict"])
origin_ipex_optimizer.load_state_dict(
ipex_checkpoint["optimizer_state_dict"]
)
if feed_sample_input:
ipex_model, ipex_optimizer = ipex.optimize(
origin_model,
dtype=dtype,
optimizer=origin_optimizer,
level="O1",
sample_input=x,
)
else:
ipex_model, ipex_optimizer = ipex.optimize(
origin_model, dtype=dtype, optimizer=origin_optimizer, level="O1"
)
# train second step for origin.
y1 = origin_model(origin_x)
loss = y1.sum()
origin_optimizer.zero_grad()
loss.backward()
origin_optimizer.step()
with torch.cpu.amp.autocast(enabled=True, dtype=dtype):
# traing second step for ipex model.
y3 = ipex_model(ipex_x)
loss3 = y3.sum()
ipex_optimizer.zero_grad()
loss3.backward()
ipex_optimizer.step()
self.assertEqual(y1, y3.float(), rtol=1e-2, atol=5e-02)
origin_model_state = origin_model.state_dict()
ipex_model_state = ipex_model.state_dict()
for var_name in origin_model_state:
self.assertEqual(
origin_model_state[var_name],
ipex_model_state[var_name],
rtol=1e-2,
atol=5e-02,
)
os.remove("origin_checkpoint.pth")
os.remove("ipex_checkpoint.pth")
def test_conv2d_serialization(self):
self._test_conv_serialization_base(dim=2)
def test_conv3d_serialization(self):
self._test_conv_serialization_base(dim=3)
def _test_imagenet_model(self, model):
model = model.to(memory_format=torch.channels_last)
test_dtypes = [torch.float]
if core.onednn_has_bf16_support():
test_dtypes.append(torch.bfloat16)
for dtype, feed_sample_input in itertools.product(test_dtypes, [True, False]):
model = model.to(dtype).float()
# inference case, will do conv+bn folding 'O1'. do nothing for 'O0'.
x = (
torch.randn(1, 3, 224, 224)
.to(dtype=dtype)
.float()
.to(memory_format=torch.channels_last)
)
# inference case, will do conv+bn folding 'O1'. do nothing for 'O0'.
if feed_sample_input:
ipex_model2 = ipex.optimize(
model.eval(), dtype=dtype, level="O1", sample_input=x
)
else:
ipex_model2 = ipex.optimize(model.eval(), dtype=dtype, level="O1")
y1 = model(x)
with torch.cpu.amp.autocast(enabled=True, dtype=dtype):
y2 = ipex_model2(x)
self.assertEqual(y1, y2.float(), rtol=1e-2, atol=5e-2)
# traing case.
origin_model = copy.deepcopy(model).train()
origin_optimizer = ASGD(origin_model.parameters(), lr=0.01)
# do weight prepack for 'O1'
if feed_sample_input:
ipex_model, ipex_optimizer = ipex.optimize(
origin_model,
dtype=dtype,
optimizer=origin_optimizer,
level="O1",
sample_input=x,
)
else:
ipex_model, ipex_optimizer = ipex.optimize(
origin_model, dtype=dtype, optimizer=origin_optimizer, level="O1"
)
# run two iterations, and then compare the results.
xx = [
torch.randn(1, 3, 224, 224)
.to(dtype=dtype)
.float()
.to(memory_format=torch.channels_last),
torch.randn(1, 3, 224, 224)
.to(dtype=dtype)
.float()
.to(memory_format=torch.channels_last),
]
for i in range(2):
with torch.cpu.amp.autocast(enabled=True, dtype=dtype):
x = xx[i]
# original case
y1 = origin_model(x.clone())
loss1 = y1.sum()
origin_optimizer.zero_grad()
loss1.backward()
origin_optimizer.step()
y2 = ipex_model(x.clone())
loss2 = y2.sum()
ipex_optimizer.zero_grad()
loss2.backward()
ipex_optimizer.step()
self.assertEqual(y1, y2, rtol=6e-2, atol=1e-2)
@skipIfNoTorchVision
def test_resnet18(self):
model = torchvision.models.resnet.resnet18(pretrained=False)
self._test_imagenet_model(model)
@skipIfNoTorchVision
def test_resnext50_32x4d(self):
model = torchvision.models.resnet.resnext50_32x4d(pretrained=False)
self._test_imagenet_model(model)
def test_blas_backend(self):
class L(torch.nn.Module):
def __init__(self, in_f, out_f, bias):
super(L, self).__init__()
self.linear = torch.nn.Linear(in_f, out_f, bias=bias)
def forward(self, x):
return self.linear(x)
out_features = torch.randint(3, 10, (1,)).item()
in_features = torch.randint(3, 10, (1,)).item()
input_shape = (8, in_features)
x = torch.randn(input_shape, dtype=torch.float32)
model = L(in_features, out_features, True)
origin_model = copy.deepcopy(model).eval()
def test_mkl():
self.assertTrue(ipex._using_dnnl() is False)
ipex_model_mkl = ipex.optimize(
origin_model, dtype=torch.float32, level="O1"
)
with torch.no_grad():
graph = torch.jit.trace(ipex_model_mkl.eval(), x)
graph = torch.jit.freeze(graph)
graph(x)
trace_graph = graph.graph_for(x)
self.assertTrue(
any(
n.kind() == "ipex_prepack::mkl_sgemm_run"
for n in trace_graph.nodes()
)
)
test_mkl()
ipex_model_dnnl = ipex.optimize(
origin_model, dtype=torch.float32, level="O1", auto_kernel_selection=True
)
self.assertTrue(ipex._using_dnnl())
with torch.no_grad():
dnnl_graph = torch.jit.trace(ipex_model_dnnl.eval(), x)
dnnl_graph = torch.jit.freeze(dnnl_graph)
dnnl_graph(x)
trace_graph = dnnl_graph.graph_for(x)
self.assertTrue(
any(n.kind() == "ipex_prepack::linear_run" for n in trace_graph.nodes())
)
ipex._disable_dnnl()
test_mkl()
ipex_model = ipex.optimize(
origin_model, dtype=torch.float32, level="O1", weights_prepack=False
)
self.assertTrue(ipex._using_dnnl() is False)
with torch.no_grad():
graph = torch.jit.trace(ipex_model.eval(), x)
graph = torch.jit.freeze(graph)
graph(x)
trace_graph = graph.graph_for(x)
self.assertTrue(
any(n.kind() == "aten::linear" for n in trace_graph.nodes())
)
ipex_model = ipex.optimize(
origin_model,
dtype=torch.float32,
level="O1",
auto_kernel_selection=True,
weights_prepack=False,
)
self.assertTrue(ipex._using_dnnl())
with torch.no_grad():
graph = torch.jit.trace(ipex_model.eval(), x)
graph = torch.jit.freeze(graph)
graph(x)
trace_graph = graph.graph_for(x)
self.assertTrue(
any(n.kind() == "aten::linear" for n in trace_graph.nodes())
)
def test_linear_inference(self):
class L(torch.nn.Module):
def __init__(self, in_f, out_f, bias):
super(L, self).__init__()
self.linear = torch.nn.Linear(in_f, out_f, bias=bias)
def forward(self, x):
return self.linear(x)
out_features = torch.randint(3, 10, (1,)).item()
in_features = torch.randint(3, 10, (1,)).item()
input_shapes = [(8, in_features), (2, 4, in_features), (2, 2, 2, in_features)]
test_dtypes = [torch.float]
if core.onednn_has_bf16_support():
test_dtypes.append(torch.bfloat16)
options = itertools.product(
[True, False], input_shapes, [True, False], test_dtypes
)
for bias, x_shape, feed_sample_input, dtype in options:
x = torch.randn(x_shape, dtype=torch.float32).to(dtype=dtype).float()
model = L(in_features, out_features, bias).to(dtype=dtype).float().eval()
x1 = x.clone().requires_grad_(False)
x2 = x.clone().requires_grad_(False)
origin_model = copy.deepcopy(model).eval()
if feed_sample_input:
ipex_model = ipex.optimize(
origin_model, dtype=dtype, level="O1", sample_input=x
)
else:
ipex_model = ipex.optimize(origin_model, dtype=dtype, level="O1")
self.assertEqual(ipex_model.linear.weight.dtype, dtype)
y1 = origin_model(x1)
with torch.cpu.amp.autocast(enabled=True, dtype=dtype):
# ipex path
y2 = ipex_model(x2)
self.assertEqual(y1, y2.float(), rtol=1e-2, atol=1e-3)
@unittest.skipIf(
not core.onednn_has_bf16_support(),
"ipex linear bf16 is not supported on this CPU device",
)
def test_linear_unpack(self):
class L(torch.nn.Module):
def __init__(self, ic, oc):
super(L, self).__init__()
self.linear = torch.nn.Linear(ic, oc, bias=True)
def forward(self, x):
return self.linear(x)
ic = 5
oc = 10
input_shapes = (4, 32, ic)
x = torch.randn(input_shapes)
dtype = torch.bfloat16
m = L(ic, oc).eval()
# Example taken from GPT-J. The weight loaded from the state_dict is non-contiguous with the below size and stride:
m.linear.weight = torch.nn.Parameter(
copy.deepcopy(m.linear.weight).as_strided([oc, ic], [1, oc])
)
optimized_m = ipex.optimize(m, dtype=dtype, inplace=False)
with torch.cpu.amp.autocast(enabled=True, dtype=dtype):
jit_m = torch.jit.trace(optimized_m, x)
jit_m = torch.jit.freeze(jit_m)
# warm up to trigger the JIT fusion pass
jit_m(x)
jit_m(x)
res = jit_m(x)
ref_result = m(x)
self.assertEqual(res, ref_result)
def _test_linear_base(self, dtype, is_train, rtol, atol):
linear_module = torch.nn.Linear
out_feature = [1024, 256, 1, torch.randint(3, 10, (1,)).item()]
in_feature = [128, 479, torch.randint(3, 10, (1,)).item()]
input_shapes = []
for s in in_feature:
input_shapes += [(128, s), (2, 64, s), (2, 2, 32, s)]
options = itertools.product(
out_feature, [True, False], input_shapes, [dtype], [True, False]
)
for out_features, bias, x_shape, dtype, feed_sample_input in options:
in_features = x_shape[-1]
model = (
torch.nn.Linear(in_features, out_features, bias=bias)
.to(dtype=dtype)
.float()
.train()
)
x = torch.randn(x_shape, dtype=torch.float32).to(dtype=dtype).float()
x1 = x.clone().requires_grad_()
x2 = x.clone().requires_grad_()
if is_train:
origin_model = copy.deepcopy(model).train()
origin_optimizer = SGD(origin_model.parameters(), lr=0.01, momentum=0.9)
else:
origin_model = copy.deepcopy(model).eval()
if feed_sample_input:
if dtype == torch.float16:
if is_train:
ipex_model, ipex_optimizer = ipex.optimize(
origin_model,
dtype=dtype,
optimizer=origin_optimizer,
level="O0",
weights_prepack=True,
sample_input=x,
)
else:
ipex_model = ipex.optimize(
origin_model,
dtype=dtype,
level="O0",
weights_prepack=True,
sample_input=x,
)
else:
if is_train:
ipex_model, ipex_optimizer = ipex.optimize(
origin_model,
dtype=dtype,
optimizer=origin_optimizer,
level="O1",
sample_input=x,
)
else:
ipex_model = ipex.optimize(
origin_model, dtype=dtype, level="O1", sample_input=x
)
else:
if dtype == torch.float16:
if is_train:
ipex_model, ipex_optimizer = ipex.optimize(
origin_model,
dtype=dtype,
optimizer=origin_optimizer,
level="O0",
weights_prepack=True,
)
else:
ipex_model = ipex.optimize(
origin_model, dtype=dtype, level="O0", weights_prepack=True
)
else:
if is_train:
ipex_model, ipex_optimizer = ipex.optimize(
origin_model,
dtype=dtype,
optimizer=origin_optimizer,
level="O1",
)
else:
ipex_model = ipex.optimize(
origin_model, dtype=dtype, level="O1"
)
if is_train or dtype == torch.float16:
self.assertTrue(ipex_model.weight.dtype == dtype)
for i in range(1):
# original fp32 path
y1 = origin_model(x1)
if is_train:
loss1 = y1.sum()
origin_optimizer.zero_grad()
loss1.backward()
origin_optimizer.step()
with torch.cpu.amp.autocast(enabled=True, dtype=dtype):
# ipex path
y2 = ipex_model(x2)
if is_train:
loss2 = y2.sum()
ipex_optimizer.zero_grad()
loss2.backward()
ipex_optimizer.step()
self.assertTrue(y2.dtype == dtype)
self.assertEqual(y1, y2.float(), rtol=rtol, atol=atol)
if is_train:
self.assertEqual(x1.grad, x2.grad, rtol=rtol, atol=atol)
if bias and is_train:
self.assertEqual(
origin_model.bias.grad,
ipex_model.bias.grad.float(),
rtol=rtol,
atol=atol,
)
# compare origin_model parameters with origin_model parameters after grad updata
origin_model_state = origin_model.state_dict()
ipex_model_state = ipex_model.state_dict()
for var_name in origin_model_state:
self.assertEqual(
origin_model_state[var_name],
ipex_model_state[var_name],
rtol=rtol,
atol=atol,
)
# compare momentum_buffer in optimizer's state(sgd)
# TODO: other optimizer.
if is_train:
origin_optimizer_state = origin_optimizer.state_dict()
ipex_optimizer_state = ipex_optimizer.state_dict()
for var_name in origin_optimizer_state:
if var_name == "state":
self.assertEqual(
origin_optimizer_state[var_name],
ipex_optimizer_state[var_name],
rtol=rtol,
atol=atol,
)
def test_linear_training_inference(self):
if core.onednn_has_bf16_support():
self._test_linear_base(
dtype=torch.bfloat16, is_train=True, rtol=1e-2, atol=1e-03
)
self._test_linear_base(
dtype=torch.bfloat16, is_train=False, rtol=1e-2, atol=2e-03
)
if core.onednn_has_fp16_support():
self._test_linear_base(
dtype=torch.float16, is_train=True, rtol=5e-4, atol=5e-04
)
self._test_linear_base(
dtype=torch.float16, is_train=False, rtol=5e-4, atol=5e-04
)
def _deconv_params_list(self):
# shapes that works:
params_dict = {
"input_height": [12],
"input_width": [12],
"input_depth": [12],
"input_channel_per_group": [15],
"output_channel_per_group": [3],
"kernel_size": [3],
"bias": [True, False],
"stride": [1, 2],
"padding": [1, 2],
"output_padding": [0], # TODO: fix output_padding == 2 and etc.
"groups": [1, 2],
"dilation": [1, 2],
}
params_list = []
for key, value in params_dict.items():
params_list.append(value)
return params_list
def _deconv_with_output_padding(self):
params_dict = {
"input_height": 8,
"input_width": 8,
"input_depth": 8,
"input_channel_per_group": 10,
"output_channel_per_group": 10,
"kernel_size": 3,
"bias": False,
"stride": 2,
"padding": 1,
"output_padding": 2,
"groups": 1,
"dilation": 3,
}
params_list = []
for key, value in params_dict.items():
params_list.append(value)
return params_list
# mkldnn does not support the case where:
# padding - output_padding + stride <= 0
# while PyTorch supports this case, need to fallback in this case
def _deconv_fallback_shape(self):
params_dict = {
"input_height": 8,
"input_width": 8,
"input_depth": 8,
"input_channel_per_group": 10,
"output_channel_per_group": 10,
"kernel_size": 4,
"bias": False,
"stride": 1,
"padding": 1,
"output_padding": 2,
"groups": 1,
"dilation": 3,
}
params_list = []
for key, value in params_dict.items():
params_list.append(value)
return params_list
def _test_deconv(self, dims, inference):
class Deconv2d(torch.nn.Module):
def __init__(
self,
ic,
oc,
kernel_size,
stride,
padding,
output_padding,
groups,
bias,
dilation,
):
super(Deconv2d, self).__init__()
self.deconv = torch.nn.ConvTranspose2d(
ic,
oc,
kernel_size=kernel_size,
stride=stride,
padding=padding,
output_padding=output_padding,
groups=groups,
bias=bias,
dilation=dilation,
)
def forward(self, x):
return self.deconv(x)
class Deconv3d(torch.nn.Module):
def __init__(
self,
ic,
oc,
kernel_size,
stride,
padding,
output_padding,
groups,
bias,
dilation,
):
super(Deconv3d, self).__init__()
self.deconv = torch.nn.ConvTranspose3d(
ic,
oc,
kernel_size=kernel_size,
stride=stride,
padding=padding,
output_padding=output_padding,
groups=groups,
bias=bias,
dilation=dilation,
)
def forward(self, x):
return self.deconv(x)
params_list = self._deconv_params_list()
torch.manual_seed(0)
for (
input_height,
input_width,
input_depth,
input_channel_per_group,
output_channel_per_group,
kernel_size,
bias,
stride,
padding,
output_padding,
groups,
dilation,
) in (
list(itertools.product(*params_list))
+ [self._deconv_with_output_padding()]
+ [self._deconv_fallback_shape()]
):
if (
(output_padding < stride or output_padding < dilation)
and (
(input_height - 1) * stride
- 2 * padding
+ dilation * (kernel_size - 1)
+ output_padding
+ 1
> 0
)
and (
(input_width - 1) * stride
- 2 * padding
+ dilation * (kernel_size - 1)
+ output_padding
+ 1
> 0
)
and (
(input_depth - 1) * stride
- 2 * padding
+ dilation * (kernel_size - 1)
+ output_padding
+ 1
> 0
)
):
ic = input_channel_per_group * groups
oc = output_channel_per_group * groups
if dims == 2:
model = Deconv2d(
ic,
oc,
kernel_size,
stride,
padding,
output_padding,
groups,
bias,
dilation,
).to(memory_format=torch.channels_last)
x = torch.rand((2, ic, input_height, input_width)).to(
memory_format=torch.channels_last
)
elif dims == 3:
model = Deconv3d(
ic,
oc,
kernel_size,
stride,
padding,
output_padding,
groups,
bias,
dilation,
).to(memory_format=torch.channels_last_3d)
x = torch.rand((2, ic, input_depth, input_height, input_width)).to(
memory_format=torch.channels_last_3d
)
test_dtypes = [torch.float]
if core.onednn_has_bf16_support():
test_dtypes.append(torch.bfloat16)
for dtype, feed_sample_input in itertools.product(
test_dtypes, [True, False]
):
x = x.to(dtype=dtype).float()
model = model.to(dtype=dtype).float()
if inference:
model.eval()
origin_model = copy.deepcopy(model).eval()
if feed_sample_input:
ipex_model = ipex.optimize(
origin_model, dtype=dtype, level="O1", sample_input=x
)
else:
ipex_model = ipex.optimize(
origin_model, dtype=dtype, level="O1"
)
if padding - output_padding + stride <= 0:
# unsupported in mkldnn, should not replace the original ConvTranspose module
self.assertTrue(
module_found(
ipex_model,
torch.nn.ConvTranspose2d
if dims == 2
else torch.nn.ConvTranspose3d,
)
)
continue
else:
self.assertFalse(
module_found(
ipex_model,
torch.nn.ConvTranspose2d
if dims == 2
else torch.nn.ConvTranspose3d,
)
)
self.assertEqual(ipex_model.deconv.weight.dtype, dtype)
y_origin = origin_model(x)
with torch.cpu.amp.autocast(enabled=True, dtype=dtype):
y_ipex = ipex_model(x)
self.assertEqual(
y_origin, y_ipex.float(), rtol=1e-2, atol=1e-03
)
else:
model.train()
origin_model = copy.deepcopy(model).train()
origin_optimizer = SGD(
origin_model.parameters(), lr=0.01, momentum=0.9
)
if feed_sample_input:
ipex_model, ipex_optimizer = ipex.optimize(
origin_model,
dtype=dtype,
optimizer=origin_optimizer,
level="O1",
sample_input=x,
)
else:
ipex_model, ipex_optimizer = ipex.optimize(
origin_model,
dtype=dtype,
optimizer=origin_optimizer,
level="O1",
)
if padding - output_padding + stride <= 0:
# unsupported in mkldnn, should not replace the original ConvTranspose module
self.assertTrue(
module_found(
ipex_model,
torch.nn.ConvTranspose2d
if dims == 2
else torch.nn.ConvTranspose3d,
)
)
continue
else:
self.assertFalse(
module_found(
ipex_model,
torch.nn.ConvTranspose2d
if dims == 2
else torch.nn.ConvTranspose3d,
)
)
x1 = x.clone().requires_grad_()
x2 = x.clone().requires_grad_()
y1 = origin_model(x1)
loss1 = y1.sum()
origin_optimizer.zero_grad()
loss1.backward()
origin_optimizer.step()
with torch.cpu.amp.autocast(enabled=True, dtype=dtype):
y2 = ipex_model(x2)
loss2 = y2.sum()
ipex_optimizer.zero_grad()
loss2.backward()
ipex_optimizer.step()
self.assertEqual(y1, y2.float(), rtol=1e-2, atol=1e-3)
self.assertEqual(x1.grad, x2.grad, rtol=1e-2, atol=1e-3)
if bias:
self.assertEqual(
origin_model.deconv.bias.grad,
ipex_model.deconv.bias.grad.float(),
rtol=1e-2,
atol=1e-3,
)
# compare origin_model parameters with origin_model parameters after grad updata
origin_model_state = origin_model.state_dict()
ipex_model_state = ipex_model.state_dict()
for var_name in origin_model_state:
self.assertEqual(
origin_model_state[var_name],
ipex_model_state[var_name],
rtol=1e-2,
atol=1e-3,
)
# compare momentum_buffer in optimizer's state(sgd)
# TODO: other optimizer.
origin_optimizer_state = origin_optimizer.state_dict()
ipex_optimizer_state = ipex_optimizer.state_dict()
for var_name in origin_optimizer_state:
if var_name == "state":
self.assertEqual(
origin_optimizer_state[var_name],
ipex_optimizer_state[var_name],
rtol=1e-2,
atol=1e-03,
)
def test_deconv_2d_inference(self):
self._test_deconv(dims=2, inference=True)
def test_deconv_2d_training(self):
self._test_deconv(dims=2, inference=False)
def test_deconv_3d_inference(self):
self._test_deconv(dims=3, inference=True)
def test_deconv_3d_training(self):
self._test_deconv(dims=3, inference=False)
def test_hook(self):
class ConvNd(torch.nn.Module):
def __init__(
self,
dim,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
bias,
groups,
):
super(ConvNd, self).__init__()
self.conv = conv_module[dim](
in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias,
groups=groups,
)
def forward(self, x):
return self.conv(x)
input_shapes = {1: (224,), 2: (224, 224), 3: (55, 55, 55)}
options = itertools.product([True, False], [1, 2], [1, 4])
for dim in [1, 2, 3]:
for bias, dilation, groups in options:
N = torch.randint(3, 10, (1,)).item()
M = torch.randint(1, 3, (1,)).item() * groups
C = torch.randint(1, 3, (1,)).item() * groups
x_shape = (N, C) + input_shapes[dim]
x = torch.randn(x_shape, dtype=torch.float32)
model_base = (
ConvNd(
dim=dim,
in_channels=C,
out_channels=M,
kernel_size=3,
stride=2,
padding=1,
dilation=dilation,
bias=bias,
groups=groups,
)
.float()
.eval()
)
module_type = torch.nn.Conv1d
if dim == 2:
module_type = torch.nn.Conv2d
if dim == 3:
module_type = torch.nn.Conv3d
# IPEX will replace Conv with IPEX Conv
model = copy.deepcopy(model_base)
ipex_model = ipex.optimize(model, dtype=torch.float32, level="O1")
ipex_model(x)
self.assertFalse(isinstance(ipex_model.conv, module_type))
# Use torch.nn.utils.weight_norm to hook weight in model.conv,
# hook function here is WeightNorm, it has 'name' attribute,
# IPEX will not do prepack and not replace Conv with IPEX Conv.
hook_weight_model = copy.deepcopy(model_base)
hook_weight_model.conv = torch.nn.utils.weight_norm(
hook_weight_model.conv, name="weight"
)
hook_weight_model = ipex.optimize(
hook_weight_model, dtype=torch.float32, level="O1", inplace=True
)
hook_weight_model(x)
self.assertTrue(isinstance(hook_weight_model.conv, module_type))
# User-defined hook function, it maybe has 'name' attribute or not,
# hook.name maybe is 'weight' or 'bias' or not. Only when hook function
# has 'name' attribute and hook on 'weight' or 'bias', IPEX will
# not do prepack. In other situations, IPEX will do prepack as usual.
dict_features = {}
options = itertools.product(
["pre", "forward", "backward"],
[True, False],
["weight", "bias", "others"],
)
for hook_type, has_name_attr, name in options:
hook_model = copy.deepcopy(model_base)
if hook_type == "pre":
def forward_pre_hook(self, input):
dict_features["input"] = input
if has_name_attr:
forward_pre_hook.name = name
hook_model.conv.register_forward_pre_hook(forward_pre_hook)
elif hook_type == "forward":
def forward_hook(self, input, output):
dict_features["input"] = input
dict_features["output"] = output
if has_name_attr:
forward_hook.name = name
hook_model.conv.register_forward_hook(forward_hook)
else:
def backward_hook(self, grad_input, grad_output):
dict_features["grad_input"] = grad_input
dict_features["grad_output"] = grad_output
if has_name_attr:
backward_hook.name = name
hook_model.conv.register_backward_hook(backward_hook)
hook_model = ipex.optimize(
hook_model, dtype=torch.float32, level="O1"
)
hook_model(x)
if has_name_attr and (name == "weight" or name == "bias"):
self.assertTrue(isinstance(hook_model.conv, module_type))
else:
self.assertFalse(isinstance(hook_model.conv, module_type))
def _lstm_params_list(self):
params_dict = {
"input_size": [1, 2],
"hidden_size": [5, 16],
"num_layers": [1, 3],
"bidirectional": [False, True],
"bias": [False, True],
"empty_state": [False, True],
"batch_first": [False, True],
"dropout": [0, 0.4, 0.7, 1],
"batch_size": [1, 2],
"seq_len": [1, 3],
}
params_list = []
for key, value in params_dict.items():
params_list.append(value)
return params_list
def _test_lstm(self, inference):
class Lstm(torch.nn.Module):
def __init__(
self,
input_size,
hidden_size,
num_layers,
bidirectional,
bias,
dropout,
batch_first,
):
super(Lstm, self).__init__()
self.lstm = torch.nn.LSTM(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
bidirectional=bidirectional,
bias=bias,
dropout=dropout,
batch_first=batch_first,
)
def forward(self, x, h=None):
x, h = self.lstm(x, h)
return x, h
rand_seed = int(get_rand_seed())
print("{} rand sed: {}".format(sys._getframe().f_code.co_name, rand_seed))
torch.manual_seed(rand_seed)
params_list = self._lstm_params_list()
for (
input_size,
hidden_size,
num_layers,
bidirectional,
bias,
empty_state,
batch_first,
dropout,
batch_size,
seq_len,
) in itertools.product(*params_list):
# dropout option adds dropout after all but last recurrent layer, so non-zero dropout expects num_layers greater than 1
if dropout > 0 and num_layers == 1:
continue
num_directions = 2 if bidirectional else 1
if batch_first:
input = torch.randn(batch_size, seq_len, input_size)
else:
input = torch.randn(seq_len, batch_size, input_size)
h = torch.randn(num_layers * num_directions, batch_size, hidden_size)
c = torch.randn(num_layers * num_directions, batch_size, hidden_size)
model = Lstm(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
bidirectional=bidirectional,
bias=bias,
dropout=dropout,
batch_first=batch_first,
)
test_dtypes = [torch.float]
if core.onednn_has_bf16_support():
test_dtypes.append(torch.bfloat16)
for dtype in test_dtypes:
# fp32 cannot afford default tolerance. After debug, weight grad after
# backward has error, but they are in the tolerance range. If replacing
# with same weight grad in origin_model and ipex_model, model state
# and optimizer state will be the same. Therefore, optimizer update is
# correct. Before optimizer update, it is backward, its input x, weight
# are the same, y(hy) has error, but they are in tolerance range. So
# that the difference of lstm kernel between pytorch and oneDNN will lead
# to model state diff.
rtol = 1e-5
atol = 1e-5
if dtype == torch.bfloat16:
# align atol with that in _test_lstm in test_autocast.py of bf16
rtol = 2e-2
atol = 3e-2
x = input.to(dtype=dtype).float()
h = h.to(dtype=dtype).float()
c = c.to(dtype=dtype).float()
model = model.to(dtype=dtype).float()
if inference:
model.eval()
origin_model = copy.deepcopy(model).eval()
ipex_model = ipex.optimize(origin_model, dtype=dtype, level="O1")
self.assertEqual(ipex_model.lstm.weight_ih_l0.dtype, dtype)
self.assertEqual(ipex_model.lstm.weight_hh_l0.dtype, dtype)
if empty_state:
y_origin, hy_origin = origin_model(x)
else:
y_origin, hy_origin = origin_model(x, (h, c))
with torch.cpu.amp.autocast(enabled=True, dtype=dtype):
if empty_state:
y_ipex, hy_ipex = ipex_model(x)
else:
y_ipex, hy_ipex = ipex_model(x, (h, c))
self.assertEqual(y_origin, y_ipex.float(), rtol=rtol, atol=atol)
self.assertEqual(
hy_origin[0], hy_ipex[0].float(), rtol=rtol, atol=atol
)
self.assertEqual(
hy_origin[1], hy_ipex[1].float(), rtol=rtol, atol=atol
)
else:
model.train()
origin_model = copy.deepcopy(model).train()
origin_optimizer = SGD(origin_model.parameters(), lr=0.01)
ipex_model, ipex_optimizer = ipex.optimize(
origin_model,
dtype=dtype,
optimizer=origin_optimizer,
level="O1",
)
x1 = x.clone().requires_grad_()
x2 = x.clone().requires_grad_()
h1 = h.clone().requires_grad_()
h2 = h.clone().requires_grad_()
c1 = c.clone().requires_grad_()
c2 = c.clone().requires_grad_()
if empty_state:
torch.manual_seed(rand_seed)
y1, hy1 = origin_model(x1)
else:
torch.manual_seed(rand_seed)
y1, hy1 = origin_model(x1, (h1, c1))
loss1 = y1.sum()
if not empty_state:
loss1_hy1_0 = hy1[0].sum()
loss1_hy1_1 = hy1[1].sum()
origin_optimizer.zero_grad()
loss1.backward(retain_graph=True)
if not empty_state:
loss1_hy1_0.backward(retain_graph=True)
loss1_hy1_1.backward(retain_graph=True)
origin_optimizer.step()
with torch.cpu.amp.autocast(enabled=True, dtype=dtype):
if empty_state:
torch.manual_seed(rand_seed)
y2, hy2 = ipex_model(x2)
else:
torch.manual_seed(rand_seed)
y2, hy2 = ipex_model(x2, (h2, c2))
loss2 = y2.sum()
if not empty_state:
loss2_hy2_0 = hy2[0].sum()
loss2_hy2_1 = hy2[1].sum()
ipex_optimizer.zero_grad()
loss2.backward(retain_graph=True)
if not empty_state:
loss2_hy2_0.backward(retain_graph=True)
loss2_hy2_1.backward(retain_graph=True)
for name, para in origin_model.lstm.named_parameters():
self.assertEqual(
para.grad,
getattr(ipex_model.lstm, name).grad.float(),
rtol=rtol,
atol=atol,
)
ipex_optimizer.step()
self.assertEqual(y1, y2.float(), rtol=rtol, atol=atol)
self.assertEqual(hy1[0], hy2[0].float(), rtol=rtol, atol=atol)
self.assertEqual(hy1[1], hy2[1].float(), rtol=rtol, atol=atol)
self.assertEqual(x1.grad, x2.grad, rtol=rtol, atol=atol)
if not empty_state:
self.assertEqual(h1.grad, h2.grad, rtol=rtol, atol=atol)
self.assertEqual(c1.grad, c2.grad, rtol=rtol, atol=atol)
origin_model_state = origin_model.state_dict()
ipex_model_state = ipex_model.state_dict()
for var_name in origin_model_state:
self.assertEqual(
origin_model_state[var_name],
ipex_model_state[var_name],
rtol=rtol,
atol=atol,
)
origin_optimizer_state = origin_optimizer.state_dict()
ipex_optimizer_state = ipex_optimizer.state_dict()
for var_name in origin_optimizer_state:
if var_name == "state":
self.assertEqual(
origin_optimizer_state[var_name],
ipex_optimizer_state[var_name],
rtol=rtol,
atol=atol,
)
def test_lstm_inference(self):
self._test_lstm(inference=True)
def test_lstm_training(self):
self._test_lstm(inference=False)
def test_lstm_serialization(self):
class Lstm(torch.nn.Module):
def __init__(
self,
input_size,
hidden_size,
num_layers,
bidirectional,
bias,
dropout,
batch_first,
):
super(Lstm, self).__init__()
self.lstm = torch.nn.LSTM(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
bidirectional=bidirectional,
bias=bias,
dropout=dropout,
batch_first=batch_first,
)
def forward(self, x, h=None):
x, h = self.lstm(x, h)
return x, h
rand_seed = int(get_rand_seed())
print("{} rand sed: {}".format(sys._getframe().f_code.co_name, rand_seed))
torch.manual_seed(rand_seed)
optimizer_options = [
Lamb,
Adadelta,
Adagrad,
Adam,
AdamW,
Adamax,
ASGD,
Rprop,
SGD,
]
input_size = 1
hidden_size = 5
num_layers = 3
bidirectional = True
bias = True
empty_state = False
batch_first = True
dropout = 0.2
batch_size = 2
seq_len = 3
num_directions = 2 if bidirectional else 1
input = torch.randn(batch_size, seq_len, input_size)
h = torch.randn(num_layers * num_directions, batch_size, hidden_size)
c = torch.randn(num_layers * num_directions, batch_size, hidden_size)
model = Lstm(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
bidirectional=bidirectional,
bias=bias,
dropout=dropout,
batch_first=batch_first,
)
test_dtypes = [torch.float]
if core.onednn_has_bf16_support():
test_dtypes.append(torch.bfloat16)
options = itertools.product(test_dtypes, optimizer_options)
for dtype, optimizer in options:
rtol = 1.3e-6
atol = 1e-5
if dtype == torch.bfloat16:
rtol = 2e-2
atol = 3e-2
x = input.to(dtype=dtype).float()
h = h.to(dtype=dtype).float()
c = c.to(dtype=dtype).float()
model = model.to(dtype=dtype).float()
model.train()
lr = 1e-2
origin_model = copy.deepcopy(model).train()
origin_optimizer = optimizer(origin_model.parameters(), lr=lr)
ipex_model, ipex_optimizer = ipex.optimize(
origin_model, dtype=dtype, optimizer=origin_optimizer, level="O1"
)
x1 = x.clone().requires_grad_()
x2 = x.clone().requires_grad_()
h1 = h.clone().requires_grad_()
h2 = h.clone().requires_grad_()
c1 = c.clone().requires_grad_()
c2 = c.clone().requires_grad_()
if empty_state:
torch.manual_seed(rand_seed)
y1, hy1 = origin_model(x1)
else:
torch.manual_seed(rand_seed)
y1, hy1 = origin_model(x1, (h1, c1))
loss1 = y1.sum()
loss1_hy1_0 = hy1[0].sum()
loss1_hy1_1 = hy1[1].sum()
origin_optimizer.zero_grad()
loss1.backward(retain_graph=True)
if not empty_state:
loss1_hy1_0.backward(retain_graph=True)
loss1_hy1_1.backward(retain_graph=True)
torch.nn.utils.clip_grad_value_(origin_model.parameters(), 10)
origin_optimizer.step()
with torch.cpu.amp.autocast(enabled=True, dtype=dtype):
if empty_state:
torch.manual_seed(rand_seed)
y2, hy2 = ipex_model(x2)
else:
torch.manual_seed(rand_seed)
y2, hy2 = ipex_model(x2, (h2, c2))
loss2 = y2.sum()
loss2_hy2_0 = hy2[0].sum()
loss2_hy2_1 = hy2[1].sum()
ipex_optimizer.zero_grad()
loss2.backward(retain_graph=True)
if not empty_state:
loss2_hy2_0.backward(retain_graph=True)
loss2_hy2_1.backward(retain_graph=True)
torch.nn.utils.clip_grad_value_(ipex_model.parameters(), 10)
ipex_optimizer.step()
torch.save(
{
"model_state_dict": origin_model.state_dict(),
"optimizer_state_dict": origin_optimizer.state_dict(),
},
"origin_checkpoint.pth",
)
torch.save(
{
"model_state_dict": ipex_model.state_dict(),
"optimizer_state_dict": ipex_optimizer.state_dict(),
},
"ipex_checkpoint.pth",
)
self.assertEqual(y1, y2.float(), rtol=rtol, atol=atol)
origin_model_state = origin_model.state_dict()
ipex_model_state = ipex_model.state_dict()
for var_name in origin_model_state:
self.assertEqual(
origin_model_state[var_name],
ipex_model_state[var_name],
rtol=rtol,
atol=atol,
)
origin_optimizer_state = origin_optimizer.state_dict()
ipex_optimizer_state = ipex_optimizer.state_dict()
for var_name in origin_optimizer_state:
if var_name == "state":
self.assertEqual(
origin_optimizer_state[var_name],
ipex_optimizer_state[var_name],
rtol=rtol,
atol=atol,
)
origin_model = copy.deepcopy(model).train()
origin_optimizer = optimizer(origin_model.parameters(), lr=lr)
origin_checkpoint = torch.load("origin_checkpoint.pth")
origin_model.load_state_dict(origin_checkpoint["model_state_dict"])
origin_optimizer.load_state_dict(origin_checkpoint["optimizer_state_dict"])
# load ipex model state
origin_ipex_model = copy.deepcopy(model)
origin_ipex_optimizer = optimizer(origin_ipex_model.parameters(), lr=lr)
ipex_checkpoint = torch.load("ipex_checkpoint.pth")
origin_ipex_model.load_state_dict(ipex_checkpoint["model_state_dict"])
origin_ipex_optimizer.load_state_dict(
ipex_checkpoint["optimizer_state_dict"]
)
ipex_model, ipex_optimizer = ipex.optimize(
origin_model, dtype=dtype, optimizer=origin_optimizer, level="O1"
)
# train second step for origin.
if empty_state:
torch.manual_seed(rand_seed)
y1, hy1 = origin_model(x1)
else:
torch.manual_seed(rand_seed)
y1, hy1 = origin_model(x1, (h1, c1))
loss1 = y1.sum()
loss1_hy1_0 = hy1[0].sum()
loss1_hy1_1 = hy1[1].sum()
origin_optimizer.zero_grad()
loss1.backward(retain_graph=True)
if not empty_state:
loss1_hy1_0.backward(retain_graph=True)
loss1_hy1_1.backward(retain_graph=True)
origin_optimizer.step()
with torch.cpu.amp.autocast(enabled=True, dtype=dtype):
# traing second step for ipex model.
if empty_state:
torch.manual_seed(rand_seed)
y3, hy3 = ipex_model(x2)
else:
torch.manual_seed(rand_seed)
y3, hy3 = ipex_model(x2, (h2, c2))
loss3 = y3.sum()
loss3_hy3_0 = hy3[0].sum()
loss3_hy3_1 = hy3[1].sum()
ipex_optimizer.zero_grad()
loss3.backward(retain_graph=True)
if not empty_state:
loss3_hy3_0.backward(retain_graph=True)
loss3_hy3_1.backward(retain_graph=True)
ipex_optimizer.step()
self.assertEqual(y1, y3.float(), rtol=rtol, atol=atol)
origin_model_state = origin_model.state_dict()
ipex_model_state = ipex_model.state_dict()
for var_name in origin_model_state:
self.assertEqual(
origin_model_state[var_name],
ipex_model_state[var_name],
rtol=rtol,
atol=atol,
)
os.remove("origin_checkpoint.pth")
os.remove("ipex_checkpoint.pth")
def test_lstm_weight_reorder(self):
class Lstm(torch.nn.Module):
def __init__(
self,
input_size,
hidden_size,
num_layers,
bidirectional,
bias,
dropout,
batch_first,
):
super(Lstm, self).__init__()
self.lstm = torch.nn.LSTM(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
bidirectional=bidirectional,
bias=bias,
dropout=dropout,
batch_first=batch_first,
)
def forward(self, x, h=None):
x, h = self.lstm(x, h)
return x, h
test_dtypes = []
if core.onednn_has_bf16_support():
test_dtypes.append(torch.bfloat16)
for dtype in test_dtypes:
m = Lstm(2, 3, 1, False, False, 0, False)
x = torch.randn(2, 1, 2)
x_var = torch.randn(5, 1, 2)
origin_model = copy.deepcopy(m).eval()
ipex_model = ipex.optimize(origin_model, dtype=dtype)
with torch.cpu.amp.autocast(enabled=True, dtype=dtype):
# run with 2 different shapes to verify weight prepack works when weight format changes
y = ipex_model(x)
y_var = ipex_model(x_var)
y_ref = origin_model(x_var)
self.assertEqual(y_var, y_ref)
if __name__ == "__main__":
torch.manual_seed(2020)
test = unittest.main()
| 85,990 | 38.19371 | 131 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/tests/cpu/test_instance_norm.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
import unittest
from common_utils import TestCase
from torch.nn import InstanceNorm2d, InstanceNorm3d, BatchNorm2d, BatchNorm3d
bn_m = {2: BatchNorm2d, 3: BatchNorm3d}
inst_m = {2: InstanceNorm2d, 3: InstanceNorm3d}
class InstanceNormTester(TestCase):
def test_instance_norm(self):
for dim in [2, 3]:
batch = 10
channel = 100
input_size = [batch, channel]
bn_size = [1, batch * channel]
if dim == 2:
memory_format = torch.channels_last
else:
memory_format = torch.channels_last_3d
if dim == 2:
input_size += [45, 35]
bn_size += [45, 35]
if dim == 3:
input_size += [45, 35, 100]
bn_size += [45, 35, 100]
input = torch.randn(input_size)
x = input.clone().detach().requires_grad_()
x1 = input.clone().detach().requires_grad_()
x1r = x1.reshape(bn_size)
m = inst_m[dim](channel, affine=True)
m1 = bn_m[dim](batch * channel, affine=True)
y = m(x)
y1 = m1(x1r).reshape_as(x1)
self.assertTrue(y.dtype == torch.float32)
self.assertEqual(y, y1)
# backward
y.mean().backward()
y1.mean().backward()
self.assertTrue(x.grad.dtype == torch.float32)
self.assertEqual(x.grad, x1.grad)
# test channels last
x2 = input.clone().detach().to(memory_format=memory_format).requires_grad_()
y2 = m(x2)
self.assertTrue(y2.dtype == torch.float32)
self.assertEqual(y2, y1)
self.assertTrue(y2.is_contiguous(memory_format=memory_format))
y2.mean().backward()
self.assertTrue(x2.grad.dtype == torch.float32)
self.assertEqual(x2.grad, x1.grad)
self.assertTrue(x2.grad.is_contiguous(memory_format=memory_format))
def test_instance_norm_bfloat16(self):
for dim in [2, 3]:
batch = 10
channel = 100
input_size = [batch, channel]
bn_size = [1, batch * channel]
if dim == 2:
memory_format = torch.channels_last
else:
memory_format = torch.channels_last_3d
if dim == 2:
input_size += [45, 35]
bn_size += [45, 35]
if dim == 3:
input_size += [45, 35, 100]
bn_size += [45, 35, 100]
m = inst_m[dim](channel, affine=True)
m1 = bn_m[dim](batch * channel, affine=True)
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloat16):
input = torch.randn(input_size).bfloat16()
x = input.clone().detach().requires_grad_()
x1 = input.clone().detach().requires_grad_()
x1r = x1.reshape(bn_size)
y = m(x)
y1 = m1(x1r).reshape_as(x1)
self.assertTrue(y.dtype == torch.bfloat16)
self.assertEqual(y, y1, prec=0.1)
# backward
y.mean().backward()
y1.mean().backward()
self.assertTrue(x.grad.dtype == torch.bfloat16)
self.assertEqual(x.grad, x1.grad)
# test channels last
x2 = (
input.clone()
.detach()
.to(memory_format=memory_format)
.requires_grad_()
)
y2 = m(x2)
self.assertTrue(y2.dtype == torch.bfloat16)
self.assertTrue(y2.is_contiguous(memory_format=memory_format))
self.assertEqual(y2, y1, prec=0.1)
y2.mean().backward()
self.assertTrue(x2.grad.dtype == torch.bfloat16)
self.assertTrue(x2.grad.is_contiguous(memory_format=memory_format))
self.assertEqual(x2.grad, x1.grad)
if __name__ == "__main__":
test = unittest.main()
| 4,197 | 33.130081 | 88 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/tests/cpu/common_methods_invocations.py | """
From PyTorch:
Copyright (c) 2016- Facebook, Inc (Adam Paszke)
Copyright (c) 2014- Facebook, Inc (Soumith Chintala)
Copyright (c) 2011-2014 Idiap Research Institute (Ronan Collobert)
Copyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu)
Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu)
Copyright (c) 2011-2013 NYU (Clement Farabet)
Copyright (c) 2006-2010 NEC Laboratories America (Ronan Collobert, Leon Bottou, Iain Melvin, Jason Weston)
Copyright (c) 2006 Idiap Research Institute (Samy Bengio)
Copyright (c) 2001-2004 Idiap Research Institute (Ronan Collobert, Samy Bengio, Johnny Mariethoz)
From Caffe2:
Copyright (c) 2016-present, Facebook Inc. All rights reserved.
All contributions by Facebook:
Copyright (c) 2016 Facebook Inc.
All contributions by Google:
Copyright (c) 2015 Google Inc.
All rights reserved.
All contributions by Yangqing Jia:
Copyright (c) 2015 Yangqing Jia
All rights reserved.
All contributions from Caffe:
Copyright(c) 2013, 2014, 2015, the respective contributors
All rights reserved.
All other contributions:
Copyright(c) 2015, 2016 the respective contributors
All rights reserved.
Caffe2 uses a copyright model similar to Caffe: each contributor holds
copyright over their contributions to Caffe2. The project versioning records
all such contribution and copyright details. If a contributor wants to further
mark their specific copyright on a particular contribution, they should
indicate their copyright solely in the commit message of the change when it is
committed.
All rights reserved.
"""
import torch
from torch._six import inf, istuple
from functools import reduce
from operator import mul, itemgetter
import collections
from torch.autograd import Variable
from torch.testing import make_non_contiguous
from common_utils import (
skipIfNoLapack,
prod_single_zero,
random_square_matrix_of_rank,
random_symmetric_matrix,
random_symmetric_psd_matrix,
random_symmetric_pd_matrix,
make_nonzero_det,
random_fullrank_matrix_distinct_singular_value,
set_rng_seed,
)
def index_variable(shape, max_indices):
if not isinstance(shape, tuple):
shape = (shape,)
index = torch.rand(*shape).mul_(max_indices).floor_().long()
return index
def index_perm_variable(shape, max_indices):
if not isinstance(shape, tuple):
shape = (shape,)
index = torch.randperm(max_indices).narrow(0, 0, reduce(mul, shape)).view(shape)
return index
def gather_variable(shape, index_dim, max_indices, duplicate=False):
assert len(shape) == 2
assert index_dim < 2
batch_dim = 1 - index_dim
index = torch.LongTensor(*shape)
for i in range(shape[index_dim]):
index.select(index_dim, i).copy_(
torch.randperm(max_indices)[: shape[batch_dim]]
)
if duplicate:
index.select(batch_dim, 0).copy_(index.select(batch_dim, 1))
return index
def bernoulli_scalar():
return torch.tensor(0, dtype=torch.bool).bernoulli_()
def mask_not_all_zeros(shape):
assert len(shape) > 0
while True:
result = torch.randn(shape).gt(0)
if result.sum() > 0:
return result
def uniform_scalar(offset=0, requires_grad=False):
v = torch.rand(()) + offset
v.requires_grad = requires_grad
return v
def normal_scalar_clamp(amin, amax, requires_grad=False):
v = torch.randn(()).clamp(amin, amax)
v.requires_grad = requires_grad
return v
def prod_zeros(dim_size, dim_select):
assert len(dim_select) == 2
result = torch.randn(dim_size, dim_size, dim_size)
result.narrow(dim_select[0], 0, 1).narrow(dim_select[1], 1, 1).zero_()
result.narrow(dim_select[0], 2, 1).narrow(dim_select[1], 3, 1).zero_()
result.narrow(dim_select[0], 4, 1).narrow(dim_select[1], 3, 1).zero_()
return result
non_differentiable = collections.namedtuple("non_differentiable", ["tensor"])
class dont_convert(tuple):
pass
class NoArgsClass(object):
def __iter__(self):
return self
def __next__(self):
raise StopIteration()
next = __next__ # Python 2 compatibility
def __len__(self):
return 0
NO_ARGS = NoArgsClass()
L = 20
M = 10
S = 5
def ident(x):
return x
# (
# method name,
# input size/constructing fn,
# args (tuple represents shape of a tensor arg),
# test variant name (will be used at test name suffix), // optional
# (True, nonfusible_nodes, fusible_nodes) for autodiff, // optional
# indices for possible dim arg, // optional
# fn mapping output to part that should be gradcheck'ed, // optional
# kwargs // optional
# )
# Note: some functions have separate schema for (Tensor other) and (Scalar other),
# and it's possible that we only support AD for Scalar version but not Tensor
# version, and vice versa.
# When writing tests, only scalar(float/int) input triggers the Scalar schema.
# uniform_scalar produces a scalar **Tensor** which won't match Scalar input.
def method_tests():
set_rng_seed(0)
return [
("add", (S, S, S), ((S, S, S),), "", (True,)),
("add", (S, S, S), ((S, S),), "broadcast_rhs", (True,)),
("add", (S, S), ((S, S, S),), "broadcast_lhs", (True,)),
("add", (S, 1, S), ((M, S),), "broadcast_all", (True,)),
("add", (), ((),), "scalar", (True,)),
("add", (S, S, S), ((),), "scalar_broadcast_rhs", (True,)),
("add", (), ((S, S, S),), "scalar_broadcast_lhs", (True,)),
("add", (S, S, S), (3.14,), "constant", (True,)),
("add", (), (3.14,), "scalar_constant", (True,)),
("__radd__", (S, S, S), (3.14,), "constant", (True, "aten::add")),
("__radd__", (), (3.14,), "scalar_constant", (True, "aten::add")),
("sub", (S, S, S), ((S, S, S),), "", (True,)),
("sub", (S, S, S), ((S, S),), "broadcast_rhs", (True,)),
("sub", (S, S), ((S, S, S),), "broadcast_lhs", (True,)),
("sub", (S, 1, S), ((M, S),), "broadcast_all", (True,)),
("sub", (S, S, S), ((),), "scalar_broadcast_rhs", (True,)),
("sub", (), ((S, S, S),), "scalar_broadcast_lhs", (True,)),
("sub", (S, S, S), (3.14,), "constant", (True,)),
("sub", (), (3.14,), "scalar_constant", (True,)),
("__rsub__", (S, S, S), (3.14,), "constant", (True, "aten::rsub")),
("__rsub__", (), (3.14,), "scalar_constant", (True, "aten::rsub")),
("mul", (S, S, S), ((S, S, S),), "", (True,)),
("mul", (), ((),), "scalar", (True,)),
("mul", (S, S, S), ((S, S),), "broadcast_rhs", (True,)),
("mul", (S, S), ((S, S, S),), "broadcast_lhs", (True,)),
("mul", (S, 1, S), ((M, S),), "broadcast_all", (True,)),
("mul", (S, S, S), ((),), "scalar_broadcast_rhs", (True,)),
("mul", (), ((S, S, S),), "scalar_broadcast_lhs", (True,)),
("mul", (S, S, S), (3.14,), "constant", (True,)),
("mul", (), (3.14,), "scalar_constant", (True,)),
("__rmul__", (S, S, S), (3.14,), "constant", (True, "aten::mul")),
("__rmul__", (), (3.14,), "scalar_constant", (True, "aten::mul")),
("div", (S, S, S), (torch.rand(S, S, S) + 0.1,), "", (True,)),
("div", (S, S, S), (torch.rand(S, S) + 0.1,), "broadcast_rhs", (True,)),
("div", (S, S), (torch.rand(S, S, S) + 0.1,), "broadcast_lhs", (True,)),
("div", (S, 1, S), (torch.rand(M, S) + 0.1,), "broadcast_all", (True,)),
("div", (), (uniform_scalar(0.1),), "scalar", (True,)),
("div", (S, S, S), (uniform_scalar(0.1),), "scalar_broadcast_rhs", (True,)),
("div", (), (uniform_scalar(0.1),), "scalar_broadcast_lhs", (True,)),
("div", torch.rand(S, S, S) + 1e-1, (3.14,), "constant", (True,)),
(
"__rdiv__",
torch.rand(S, S, S) + 1e-1,
(3.14,),
"constant",
(True, [], ["aten::mul", "aten::reciprocal"]),
),
(
"div",
uniform_scalar(1e-1, requires_grad=True),
(3.14,),
"scalar_constant",
(True,),
),
(
"__rdiv__",
uniform_scalar(1e-1, requires_grad=True),
(3.14,),
"scalar_constant",
(True, [], ["aten::mul", "aten::reciprocal"]),
),
("pow", torch.rand(S, S, S) + 1e-3, (torch.rand(S, S, S) + 0.1,), "", (True,)),
(
"pow",
torch.rand(S, S, S) + 1e-3,
(
torch.rand(
1,
)
+ 0.1,
),
"broadcast_rhs",
(True,),
),
(
"pow",
torch.rand(
1,
)
+ 1e-3,
(torch.rand(S, S, S) + 0.1,),
"broadcast_lhs",
(True,),
),
(
"pow",
torch.rand(S, 1, S) + 1e-3,
(torch.rand(1, S, 1) + 0.1,),
"broadcast_all",
(True,),
),
(
"pow",
uniform_scalar(1e-3, requires_grad=True),
(uniform_scalar(0.1),),
"scalar",
(True,),
),
(
"pow",
torch.rand(S, S, S) + 1e-3,
(uniform_scalar(0.1),),
"scalar_broadcast_rhs",
(True,),
),
(
"pow",
uniform_scalar(1e-3, requires_grad=True),
(torch.rand(S, S, S) + 0.1,),
"scalar_broadcast_lhs",
(True,),
),
("pow", torch.rand(S, S, S) + 1e-3, (3.14,), "constant", (True,)),
(
"__rpow__",
torch.rand(S, S, S) + 1e-3,
(3.14,),
"constant",
(True, "aten::pow"),
),
(
"pow",
uniform_scalar(1e-3, requires_grad=True),
(3.14,),
"scalar_constant",
(True,),
),
(
"__rpow__",
uniform_scalar(1e-3, requires_grad=True),
(3.14,),
"scalar_constant",
(True, "aten::pow"),
),
("transpose", (1, 2, 3), (1, 2), "dim", (True,), [0, 1]),
("transpose", (), (0, 0), "scalar", (True,)),
("transpose", (1,), (0, 0), "1d", (True,)),
("transpose", torch.rand(L, L), (0, 1), "2d", (True,)),
("transpose", torch.rand(S, S, S), (2, 0), "3d", (True,)),
("t", (1, 2), NO_ARGS, "", (True,)),
("view", (S, S, S), (S * S, S), "", (True,)),
("view", (S, S, S), (torch.Size([S * S, S]),), "size", (True,)),
("view", (S,), (S,), "1d", (True,)),
("view", (), (dont_convert(()),), "scalar_to_scalar", (True,)),
("view", (), (1,), "scalar_to_1d", (True,)),
("reshape", (S, S, S), (S * S, S), "", (True,)),
("reshape", (S, S, S), (torch.Size([S * S, S]),), "size", (True,)),
("reshape", (S,), (S,), "1d", (True,)),
("reshape", (), (dont_convert(()),), "scalar_to_scalar", (True,)),
("reshape", (), (1,), "scalar_to_1d", (True,)),
("reshape_as", (S, S, S), (non_differentiable(torch.rand(S * S, S)),)),
("reshape_as", (), (non_differentiable(torch.tensor(42.0)),), "scalar"),
("reshape_as", (), (non_differentiable(torch.rand(1, 1)),), "scalar_to_dims"),
("flip", (S, S, S), ([0],), "d0"),
("flip", (S, S, S), ([0, 1, 2],), "d012"),
("flip", (S, S, S), ([0, 2],), "d02"),
("flip", (S, S, S), ([2, 0],), "d20"),
("flip", (S, S, S), ([-1],), "neg_d"),
("roll", (S, S, S), (0, 0), "d0"),
("roll", (S, S, S), (1, 2), "d12"),
(
"roll",
(S, S, S),
(
0,
2,
),
"d02",
),
(
"roll",
(S, S, S),
(
2,
0,
),
"d20",
),
("roll", (S, S, S), (-1, 0), "neg_shift"),
("roll", (S, S, S), (10000, 1), "loop_shift"),
("roll", (S, S, S), (2,), "flattened"),
("roll", (S, S, S), ([1, 2, -1], [0, 1, 2]), "three_dims"),
(
"rot90",
(S, S, S),
(
1,
[0, 1],
),
"k1_d01",
),
(
"rot90",
(S, S, S),
(
1,
[1, 2],
),
"k1_d12",
),
(
"rot90",
(S, S, S),
(
1,
[1, -1],
),
"k1_neg_d",
),
("rot90", (S, S, S), (), "default"),
("view_as", (S, S, S), (non_differentiable(torch.rand(S * S, S)),)),
("view_as", (), (non_differentiable(torch.tensor(5.5)),), "scalar"),
("view_as", (), (non_differentiable(torch.rand(1, 1)),), "scalar_to_dims"),
("expand", (S, 1, 1), (S, S, S), "", (True,)),
("expand", (torch.Size([S, 1, S]),), (S, S, S), "size", (True,)),
("expand", (S, 1), (S, S, S), "new_dim", (True,)),
("expand", (1,), (S, S, S), "1_element", (True,)),
("expand", (1, S), (1, 1, S), "new_dim_front_old_front_1", (True,)),
("expand", (), (dont_convert(()),), "scalar_to_scalar"),
("expand", (), (1, 3, 2), "scalar_to_dims", (True,)),
("expand_as", (S, 1, 1), (torch.rand(S, S, S),), "", (True,)),
("exp", (S, S, S), NO_ARGS, "", (True,)),
("exp", (), NO_ARGS, "scalar", (True,)),
("expm1", (S, S, S), NO_ARGS, "", (True,)),
("expm1", (), NO_ARGS, "scalar", (True,)),
("erf", torch.rand(S, S, S), NO_ARGS, "", (True,)),
("erf", uniform_scalar(requires_grad=True), NO_ARGS, "scalar", (True,)),
("erfc", torch.rand(S, S, S), NO_ARGS, "", (True,)),
("erfc", uniform_scalar(requires_grad=True), NO_ARGS, "scalar", (True,)),
("erfinv", torch.rand(S, S, S).clamp(-0.9, 0.9), NO_ARGS),
(
"erfinv",
normal_scalar_clamp(-0.9, 0.9, requires_grad=True),
NO_ARGS,
"scalar",
),
("log", torch.rand(S, S, S) + 1e-2, NO_ARGS, "", (True,)),
("log", uniform_scalar(1e-2, requires_grad=True), NO_ARGS, "scalar", (True,)),
("log10", torch.rand(S, S, S) + 1e-2, NO_ARGS, "", (True,)),
("log10", uniform_scalar(1e-2, requires_grad=True), NO_ARGS, "scalar", (True,)),
("log1p", torch.rand(S, S, S), NO_ARGS, "", (True,)),
("log1p", uniform_scalar(requires_grad=True), NO_ARGS, "scalar", (True,)),
("log2", torch.rand(S, S, S) + 1e-2, NO_ARGS, "", (True,)),
("log2", uniform_scalar(1e-2, requires_grad=True), NO_ARGS, "scalar", (True,)),
("tanh", (S, S, S), NO_ARGS, "", (True,)),
("tanh", (), NO_ARGS, "scalar", (True,)),
("sigmoid", (S, S, S), NO_ARGS, "", (True,)),
("sigmoid", (), NO_ARGS, "scalar", (True,)),
("sinh", (S, S, S), NO_ARGS, "", (True,)),
("sinh", (), NO_ARGS, "scalar", (True,)),
("cosh", (S, S, S), NO_ARGS, "", (True,)),
("cosh", (), NO_ARGS, "scalar", (True,)),
("abs", (S, S, S), NO_ARGS, "", (True,)),
("abs", (), NO_ARGS, "scalar", (True,)),
("clamp", (S, S, S), (0, 1), "", (True,)),
("clamp", (S, S, S), (None, 0.5), "min", (True,)),
("clamp", (S, S, S), (0.5, None), "max", (True,)),
("clamp", (), (0, 1), "scalar", (True,)),
("clamp", (), (None, 0.5), "min_scalar", (True,)),
("clamp", (), (0.5, None), "max_scalar", (True,)),
("clamp", (S, S), (), "max_scalar_kwarg", (True,), (), (), ident, {"max": 1}),
("sqrt", torch.rand(S, S, S) + 5e-4, NO_ARGS, "", (True,)),
("sqrt", uniform_scalar(5e-4, requires_grad=True), NO_ARGS, "scalar", (True,)),
("sin", (S, S, S), NO_ARGS, "", (True,)),
("sin", (), NO_ARGS, "scalar", (True,)),
("cos", (S, S, S), NO_ARGS, "", (True,)),
("cos", (), NO_ARGS, "scalar", (True,)),
("tan", torch.randn(S, S, S).clamp(-1, 1), NO_ARGS, "", (True,)),
("asin", torch.randn(S, S, S).clamp(-0.9, 0.9), NO_ARGS, "", (True,)),
("acos", torch.randn(S, S, S).clamp(-0.9, 0.9), NO_ARGS, "", (True,)),
("atan", (S, S, S), NO_ARGS, "", (True,)),
("atan", (), NO_ARGS, "scalar", (True,)),
("atan2", (S, S, S), ((S, S, S),)),
("atan2", (), ((),), "scalar"),
("atan2", (S, S, S), ((S,),), "broadcast_rhs"),
("atan2", (S,), ((S, S, S),), "broadcast_lhs"),
("atan2", (S, 1, S), ((S, S),), "broadcast_all"),
("reciprocal", torch.rand(S, S, S) + 0.1, NO_ARGS, "", (True,)),
(
"reciprocal",
uniform_scalar(0.1, requires_grad=True),
NO_ARGS,
"scalar",
(True,),
),
("round", (S, S, S), NO_ARGS, "", (True,)),
("round", (), NO_ARGS, "scalar", (True,)),
("sign", (S, S, S), NO_ARGS),
("sign", (), NO_ARGS, "scalar"),
("trunc", (S, S, S), NO_ARGS, "", (True,)),
("trunc", (), NO_ARGS, "scalar", (True,)),
("floor", (S, S, S), NO_ARGS, "", (True,)),
("floor", (), NO_ARGS, "scalar", (True,)),
("ceil", (S, S, S), NO_ARGS, "", (True,)),
("ceil", (), NO_ARGS, "scalar", (True,)),
("rsqrt", torch.rand(S, S, S) + 1e-2, NO_ARGS, "", (True,)),
("rsqrt", uniform_scalar(1e-2, requires_grad=True), NO_ARGS, "scalar", (True,)),
("frac", (S, S, S), NO_ARGS, "", (True,)),
("frac", (), NO_ARGS, "scalar", (True,)),
("fmod", (S, S, S), (1.5,), "", (True,)),
("fmod", (), (1.5,), "scalar", (True,)),
("fmod", (S, S, S), (non_differentiable(torch.rand(S, S, S) + 1.5),), "tensor"),
(
"fmod",
(S,),
(non_differentiable(torch.rand(S, S, S) + 1.5),),
"tensor_broadcast_lhs",
),
(
"fmod",
(S, S, S),
(non_differentiable(torch.rand(S) + 1.5),),
"tensor_broadcast_rhs",
),
(
"fmod",
(S, 1, S),
(non_differentiable(torch.rand(S, S) + 1.5),),
"tensor_broadcast_all",
),
("fmod", (), (non_differentiable(uniform_scalar(1.5)),), "scalar_tensor"),
(
"fmod",
(),
(non_differentiable(torch.rand(S, S, S) + 1.5),),
"scalar_tensor_broadcast_lhs",
),
(
"fmod",
(S, S, S),
(non_differentiable(uniform_scalar(1.5)),),
"scalar_tensor_broadcast_rhs",
),
("remainder", (S, S, S), (1.5,), "", (True,)),
("remainder", (), (1.5,), "scalar", (True,)),
(
"remainder",
(S, S, S),
(non_differentiable(torch.rand(S, S, S) + 1.5),),
"tensor",
),
(
"remainder",
(S,),
(non_differentiable(torch.rand(S, S, S) + 1.5),),
"tensor_broadcast_lhs",
),
(
"remainder",
(S, 1, S),
(non_differentiable(torch.rand(S, S) + 1.5),),
"tensor_broadcast_all",
),
("remainder", (), (non_differentiable(uniform_scalar(1.5)),), "scalar_tensor"),
(
"remainder",
(),
(non_differentiable(torch.rand(S, S, S) + 1.5),),
"scalar_tensor_broadcast_lhs",
),
("lerp", (S, S, S), ((S, S, S), 0.4), "scalar_no_broadcast", (True,)),
("lerp", (S, S, S), ((S,), 0.4), "broadcast_rhs", (True,)),
("lerp", (S,), ((S, S, S), 0.4), "broadcast_lhs", (True,)),
("lerp", (S, 1, S), ((S, S), 0.4), "broadcast_all", (True,)),
("lerp", (), ((), 0.4), "scalar", (True,)),
("lerp", (S, S, S), ((), 0.4), "scalar_broadcast_rhs", (True,)),
("lerp", (), ((S, S, S), 0.4), "scalar_broadcast_lhs", (True,)),
("max", (S, S, S), NO_ARGS),
("max", (S, S, S), (1,), "dim", (), [0]),
(
"max",
(S, S, S),
(
1,
True,
),
"keepdim_dim",
(),
[0],
),
("max", (), NO_ARGS, "scalar"),
("max", (), (0,), "scalar_dim", (), [0]),
(
"max",
(),
(
0,
True,
),
"scalar_keepdim_dim",
(),
[0],
),
("max", (S, S, S), ((S, S, S),), "elementwise", (True,)),
("max", (S, S, S), ((S,),), "elementwise_broadcast_rhs", (True,)),
("max", (S,), ((S, S, S),), "elementwise_broadcast_lhs", (True,)),
("max", (S, 1, S), ((S, S),), "elementwise_broadcast_all", (True,)),
("max", (), ((),), "scalar_elementwise", (True,)),
("max", (S, S, S), ((),), "scalar_elementwise_broadcast_rhs", (True,)),
("max", (), ((S, S, S),), "scalar_elementwise_broadcast_lhs", (True,)),
(
"min",
(S, S, S),
NO_ARGS,
),
("min", (S, S, S), (1,), "dim", (), [0]),
(
"min",
(S, S, S),
(
1,
True,
),
"keepdim_dim",
(),
[0],
),
("min", (), NO_ARGS, "scalar"),
("min", (), (0,), "scalar_dim", (), [0]),
(
"min",
(),
(
0,
True,
),
"scalar_keepdim_dim",
(),
[0],
),
("min", (S, S, S), ((S, S, S),), "elementwise", (True,)),
("min", (S, S, S), ((S,),), "elementwise_broadcast_rhs", (True,)),
("min", (S,), ((S, S, S),), "elementwise_broadcast_lhs", (True,)),
("min", (S, 1, S), ((S, S),), "elementwise_broadcast_all", (True,)),
("min", (), ((),), "scalar_elementwise", (True,)),
("min", (S, S, S), ((),), "scalar_elementwise_broadcast_rhs", (True,)),
("min", (), ((S, S, S),), "scalar_elementwise_broadcast_lhs", (True,)),
("mean", (S, S, S), NO_ARGS, "", (True,)),
("mean", (S, S, S), (1,), "dim", (True,), [0]),
(
"mean",
(S, S, S),
(
1,
True,
),
"keepdim_dim",
(True,),
[0],
),
("mean", (), NO_ARGS, "scalar", (True,)),
("mean", (), (0,), "scalar_dim", (True,), [0]),
(
"mean",
(),
(
0,
True,
),
"scalar_keepdim_dim",
(True,),
[0],
),
(
"mean",
(S, S, S),
(),
"dtype",
(True,),
(),
(),
ident,
{"dtype": torch.float64},
),
("kthvalue", (S, S, S), (2,)),
("kthvalue", (), (1,), "scalar"),
(
"kthvalue",
(S, S, S),
(
2,
1,
),
"dim",
(),
[1],
),
(
"kthvalue",
(),
(
1,
0,
),
"scalar_dim",
(),
[1],
),
(
"kthvalue",
(S, S, S),
(
2,
1,
True,
),
"keepdim_dim",
(),
[1],
),
("kthvalue", (), (1, 0, True), "scalar_keepdim_dim", (), [1]),
(
"kthvalue",
(S,),
(
2,
0,
),
"dim_1d",
(),
[1],
),
(
"kthvalue",
(S,),
(
2,
0,
True,
),
"keepdim_dim_1d",
(),
[1],
),
("median", (S, S, S), NO_ARGS),
("median", (S, S, S), (1,), "dim", (), [0]),
(
"median",
(S, S, S),
(
1,
True,
),
"keepdim_dim",
(),
[0],
),
("median", (), NO_ARGS, "scalar"),
("median", (), (0,), "scalar_dim", (), [0]),
(
"median",
(),
(
0,
True,
),
"scalar_keepdim_dim",
(),
[0],
),
("mode", (S, S, S), NO_ARGS),
("mode", (S, S, S), (1,), "dim", (), [0]),
(
"mode",
(S, S, S),
(
1,
True,
),
"keepdim_dim",
(),
[0],
),
("mode", (), NO_ARGS, "scalar"),
("mode", (), (0,), "scalar_dim", (), [0]),
(
"mode",
(),
(
0,
True,
),
"scalar_keepdim_dim",
(),
[0],
),
("sum", (S, S, S), NO_ARGS),
("sum", (S, S, S), (1,), "dim", (), [0]),
(
"sum",
(S, S, S),
(
1,
True,
),
"keepdim_dim",
(),
[0],
),
("sum", (), NO_ARGS, "scalar"),
("sum", (), (0,), "scalar_dim", (), [0]),
(
"sum",
(),
(
0,
True,
),
"scalar_keepdim_dim",
(),
[0],
),
("sum", (S, S, S), ([1, 2],), "multi_dim"),
(
"sum",
(S, S, S),
(
[1, 2],
True,
),
"multi_dim_keepdim",
),
("prod", (S, S, S), NO_ARGS),
("prod", (S, S, S), (1,), "dim", (), [0]),
(
"prod",
(S, S, S),
(
1,
True,
),
"keepdim_dim",
(),
[0],
),
("prod", (), NO_ARGS, "scalar"),
("prod", (), (0,), "scalar_dim", (), [0]),
(
"prod",
(),
(
0,
True,
),
"scalar_keepdim_dim",
(),
[0],
),
("prod", prod_zeros(S, [0, 1]), NO_ARGS, "zerodims2"),
("prod", prod_zeros(S, [0, 2]), NO_ARGS, "zerodims1"),
("prod", prod_zeros(S, [1, 2]), NO_ARGS, "zerodims0"),
("prod", prod_zeros(S, [0, 1]), (1,), "zeros_dims2", (), [0]),
("prod", prod_zeros(S, [0, 2]), (1,), "zeros_dims1", (), [0]),
("prod", prod_zeros(S, [1, 2]), (1,), "zeros_dims0", (), [0]),
("prod", prod_zeros(S, [0, 1]), (1, True), "keepdim_zeros_dims2", (), [0]),
("prod", prod_zeros(S, [0, 2]), (1, True), "keepdim_zeros_dims1", (), [0]),
("prod", prod_zeros(S, [1, 2]), (1, True), "keepdim_zeros_dims0", (), [0]),
("prod", prod_single_zero(S), NO_ARGS, "single_zero"),
("prod", (torch.tensor(0.0, requires_grad=True)), NO_ARGS, "scalar_zero"),
(
"prod",
(torch.tensor(0.0, requires_grad=True)),
(0,),
"scalar_dim_zero",
(),
[0],
),
(
"prod",
(torch.tensor(0.0, requires_grad=True)),
(
0,
True,
),
"scalar_keepdim_dim_zero",
(),
[0],
),
("var", (S, S, S), NO_ARGS, "", (True,)),
("var", (S, S, S), (1,), "dim", (True,), [0]),
("var", (S, S, S), (1, True, True), "keepdim_dim", (True,), [0]),
("var", (S,), (0,), "dim_1d", (True,), [0]),
("var", (S,), (0, True, True), "keepdim_dim_1d", (True,), [0]),
("std", (S, S, S), NO_ARGS, "", (True,)),
("std", (S, S, S), (1,), "dim", (True,), [0]),
("std", (S, S, S), (1, True, True), "keepdim_dim", (True,), [0]),
("std", (S,), (0,), "dim_1d", (True,), [0]),
("std", (S,), (0, True, True), "keepdim_dim_1d", (True,), [0]),
("var_mean", (S, S, S), NO_ARGS, ""),
("var_mean", (S, S, S), (1,), "dim", [0]),
("var_mean", (S, S, S), (1, True, True), "keepdim_dim", [0]),
("var_mean", (S,), (0,), "dim_1d", [0]),
("var_mean", (S,), (0, True, True), "keepdim_dim_1d", [0]),
("std_mean", (S, S, S), NO_ARGS, ""),
("std_mean", (S, S, S), (1,), "dim", [0]),
("std_mean", (S, S, S), (1, True, True), "keepdim_dim", [0]),
("std_mean", (S,), (0,), "dim_1d", [0]),
("std_mean", (S,), (0, True, True), "keepdim_dim_1d", [0]),
("renorm", (S, S, S), (2, 1, 0.5), "dim", (), [1]),
("renorm", (S, S, S), (1, 2, 3), "norm_1"),
("renorm", (S, S, S), (inf, 2, 0.5), "norm_inf"),
("repeat", (S,), (2,), "single_number"),
("repeat", (), (2, 3), "scalar"),
("repeat", (2, 2), (3, 2)),
("repeat", (2, 2), (1, 3, 1, 2), "unsqueeze"),
("cumsum", (S, S, S), (0,), "dim0", (), [0]),
("cumsum", (S, S, S), (1,), "dim1", (), [0]),
(
"cumsum",
(S, S, S),
(1,),
"dim1_cast",
(),
[0],
(),
ident,
{"dtype": torch.float64},
),
("cumsum", (), (0,), "dim0_scalar", (), [0]),
("cumprod", (S, S, S), (0,)),
("cumprod", (S, S, S), (1,), "dim1", (), [0]),
("cumprod", (), (0,), "scalar"),
("cumprod", (torch.tensor(0.0, requires_grad=True)), (0,), "scalar_zeros"),
("cumprod", prod_zeros(S, [0, 1]), (1,), "zeros_dim2", (), [0]),
("cumprod", prod_zeros(S, [0, 2]), (1,), "zeros_dim1", (), [0]),
("cumprod", prod_zeros(S, [1, 2]), (1,), "zeros_dim0", (), [0]),
(
"cumprod",
prod_zeros(S, [1, 2]),
(1,),
"zeros_dim0_cast",
(),
[0],
(),
ident,
{"dtype": torch.float64},
),
(
"log_softmax",
(S, S, S),
(
1,
torch.float64,
),
"kwarg_dtype_would_break_jit_loader",
(True,),
),
("unfold", (), (0, 1, 1), "scalar", (), [0]),
("unfold", (S, S, S, S), (1, 3, 1), "", (), [0]),
("unfold", (S, S, S), (2, 3, 2), "lastdim", (), [0]),
("addmm", (S, M), ((S, S), (S, M)), "", (True, ["aten::add", "aten::mm"])),
(
"addmm",
(1,),
((S, S), (S, M)),
"broadcast_lhs",
(True, ["aten::add", "aten::mm"]),
),
(
"addmm",
(S, M),
((S, S), (S, M)),
"coef",
(True,),
(),
(),
ident,
{"beta": 0.2, "alpha": 0.6},
),
(
"addmm",
(1,),
((S, S), (S, M)),
"broadcast_lhs_coef",
(True,),
(),
(),
ident,
{"beta": 0.2, "alpha": 0.6},
),
(
"addmm",
(),
((S, S), (S, M)),
"scalar_broadcast_lhs",
(True, ["aten::add", "aten::mm"]),
),
(
"addmm",
(),
((S, S), (S, M)),
"scalar_broadcast_lhs_coef",
(True,),
(),
(),
ident,
{"beta": 0.2, "alpha": 0.6},
),
(
"addbmm",
(S, M),
((S, S, S), (S, S, M)),
),
("addbmm", (1,), ((S, S, S), (S, S, M)), "broadcast_lhs"),
(
"addbmm",
(S, M),
((S, S, S), (S, S, M)),
"coef",
(),
(),
(),
ident,
{"beta": 0.2, "alpha": 0.6},
),
(
"addbmm",
(1,),
((S, S, S), (S, S, M)),
"broadcast_lhs_coef",
(),
(),
(),
ident,
{"beta": 0.2, "alpha": 0.6},
),
("addbmm", (), ((S, S, S), (S, S, M)), "scalar_broadcast_lhs"),
(
"addbmm",
(),
((S, S, S), (S, S, M)),
"scalar_broadcast_lhs_coef",
(),
(),
(),
ident,
{"beta": 0.2, "alpha": 0.6},
),
(
"baddbmm",
(S, S, M),
((S, S, S), (S, S, M)),
),
("baddbmm", (1,), ((S, S, S), (S, S, M)), "broadcast_lhs"),
(
"baddbmm",
(S, S, M),
((S, S, S), (S, S, M)),
"coef",
(),
(),
(),
ident,
{"beta": 0.2, "alpha": 0.6},
),
(
"baddbmm",
(1,),
((S, S, S), (S, S, M)),
"broadcast_lhs_coef",
(),
(),
(),
ident,
{"beta": 0.2, "alpha": 0.6},
),
("baddbmm", (), ((S, S, S), (S, S, M)), "scalar_broadcast_lhs"),
(
"baddbmm",
(),
((S, S, S), (S, S, M)),
"scalar_broadcast_lhs_coef",
(),
(),
(),
ident,
{"beta": 0.2, "alpha": 0.6},
),
(
"addmv",
(S,),
((S, M), (M,)),
),
("addmv", (1,), ((S, M), (M,)), "broadcast_lhs"),
(
"addmv",
(S,),
((S, M), (M,)),
"coef",
(),
(),
(),
ident,
{"beta": 0.2, "alpha": 0.6},
),
(
"addmv",
(1,),
((S, M), (M,)),
"broadcast_lhs_coef",
(),
(),
(),
ident,
{"beta": 0.2, "alpha": 0.6},
),
("addmv", (), ((S, M), (M,)), "scalar_broadcast_lhs"),
(
"addmv",
(),
((S, M), (M,)),
"scalar_broadcast_lhs_coef",
(),
(),
(),
ident,
{"beta": 0.2, "alpha": 0.6},
),
(
"addr",
(S, M),
((S,), (M,)),
),
("addr", (), ((S,), (M,)), "broadcast_lhs"),
(
"addr",
(S, M),
((S,), (M,)),
"coef",
(),
(),
(),
ident,
{"beta": 0.2, "alpha": 0.6},
),
(
"addr",
(),
((S,), (M,)),
"broadcast_lhs_coef",
(),
(),
(),
ident,
{"beta": 0.2, "alpha": 0.6},
),
("dot", (L,), ((L,),), "", (True,)),
("mm", (S, M), ((M, S),), "", (True,)),
("bmm", (M, S, M), ((M, M, S),), "", (True,)),
("mv", (S, M), ((M,),), "", (True,)),
("ger", (S,), ((M,),)),
("matmul", (L,), ((L,),), "", (True,)),
("matmul", (S, M), ((M,),), "2d_1d", (True,)),
("matmul", (M,), ((M, S),), "1d_2d", (True,)),
("matmul", (S, M), ((M, S),), "2d_2d", (True,)),
("matmul", (S, S, M), ((M,),), "3d_1d", (True,)),
("matmul", (S, S, M), ((M, S),), "3d_2d", (True,)),
("matmul", (M,), ((S, M, S),), "1d_3d", (True,)),
("matmul", (S, M), ((S, M, S),), "2d_3d", (True,)),
("matmul", (S, S, M, M), ((S, S, M, S),), "4d_4d", (True,)),
("matmul", (S, S, M, M), ((M,),), "4d_1d", (True,)),
("matmul", (M,), ((S, S, M, S),), "1d_4d", (True,)),
("matrix_power", (S, S), [2], "n=2"),
("matrix_power", (S, S, S), [3], "n=3"),
("matrix_power", (S, S, S), [1], "n=1"),
("matrix_power", (S, S, S), [0], "n=0"),
(
"matrix_power",
lambda: random_fullrank_matrix_distinct_singular_value(S),
[-1],
"n=-1",
(),
NO_ARGS,
[skipIfNoLapack],
),
(
"matrix_power",
lambda: random_fullrank_matrix_distinct_singular_value(S),
[-3],
"n=-3",
(),
NO_ARGS,
[skipIfNoLapack],
),
(
"matrix_power",
lambda: random_fullrank_matrix_distinct_singular_value(S, S),
[-2],
"n=-2",
(),
NO_ARGS,
[skipIfNoLapack],
),
(
"mvlgamma",
torch.empty(
S,
).uniform_(0.5, 1),
[1],
"p=1",
),
(
"mvlgamma",
torch.empty(
S,
).uniform_(1, 2),
[2],
"p=2",
),
("mvlgamma", torch.empty(S, S).uniform_(1.5, 3), [3], "p=3"),
("mvlgamma", torch.empty(S, S).uniform_(2.5, 5), [5], "p=5"),
("addcmul", (S, S), ((S, S), (S, S)), "", (True,)),
("addcmul", (S, S), ((S, 1), (1, S)), "broadcast_rhs", (True,)),
("addcmul", (1,), ((S, S, 1), (1, S)), "broadcast_all", (True,)),
(
"addcmul",
(S, S),
((S, S), (S, S)),
"scale",
(True,),
(),
(),
ident,
{"value": 0.5},
),
(
"addcmul",
(S, S),
((S, 1), (1, S)),
"scale_broadcast_rhs",
(True,),
(),
(),
ident,
{"value": 0.5},
),
(
"addcmul",
(1,),
((S, S, 1), (1, S)),
"scale_broadcast_all",
(True,),
(),
(),
ident,
{"value": 0.5},
),
("addcmul", (), ((), ()), "scalar", (True,)),
("addcmul", (S, S), ((), ()), "scalar_broadcast_rhs", (True,)),
("addcmul", (), ((S, S, 1), (1, S)), "scalar_broadcast_lhs", (True,)),
(
"addcmul",
(),
((), ()),
"scalar_scale",
(True,),
(),
(),
ident,
{"value": 0.5},
),
(
"addcmul",
(S, S),
((), ()),
"scalar_scale_broadcast_rhs",
(True,),
(),
(),
ident,
{"value": 0.5},
),
(
"addcmul",
(),
((S, S, 1), (1, S)),
"scalar_scale_broadcast_lhs",
(True,),
(),
(),
ident,
{"value": 0.5},
),
("addcdiv", (S, S), ((S, S), (S, S))),
("addcdiv", (S, S), ((S, 1), (1, S)), "broadcast_rhs"),
("addcdiv", (1,), ((S, S, 1), (1, S)), "broadcast_all"),
(
"addcdiv",
(S, S),
((S, S), (S, S)),
"scale",
(),
(),
(),
ident,
{"value": 0.5},
),
(
"addcdiv",
(S, S),
((S, 1), (1, S)),
"scale_broadcast_rhs",
(),
(),
(),
ident,
{"value": 0.5},
),
(
"addcdiv",
(1,),
((S, S, 1), (1, S)),
"scale_broadcast_all",
(),
(),
(),
ident,
{"value": 0.5},
),
("addcdiv", (), ((), ()), "scalar"),
("addcdiv", (S, S), ((), ()), "scalar_broadcast_rhs"),
("addcdiv", (), ((S, S, 1), (1, S)), "scalar_broadcast_lhs"),
("addcdiv", (), ((), ()), "scalar_scale", (), (), (), ident, {"value": 0.5}),
(
"addcdiv",
(S, S),
((), ()),
"scalar_scale_broadcast_rhs",
(),
(),
(),
ident,
{"value": 0.5},
),
(
"addcdiv",
(),
((S, S, 1), (1, S)),
"scalar_scale_broadcast_lhs",
(),
(),
(),
ident,
{"value": 0.5},
),
("zero_", (S, S, S), NO_ARGS),
("zero_", (), NO_ARGS, "scalar"),
("logsumexp", (S, S), (1,), "", (True,)),
("logsumexp", (), (0,), "scalar", (True,)),
("norm", (S, S), (), "default"),
("norm", (S, S), (2,), "2"),
("norm", (S, S), (0,), "0"),
("norm", (S, S), (0.5,), "0_5"),
("norm", (S, S), (1,), "1"),
("norm", (S, S), (3,), "3"),
("norm", (S, S), (inf,), "inf"),
("norm", (S, S), (-inf,), "-inf"),
("norm", (S, S), ("fro",), "fro_default"),
(
"norm",
(S, S),
(
"fro",
[0, 1],
),
"fro",
),
("norm", (S, S), ("nuc",), "nuc", (), NO_ARGS, [skipIfNoLapack]),
(
"norm",
(S, S, S),
("nuc", [1, 2]),
"nuc_batched",
(),
NO_ARGS,
[skipIfNoLapack],
),
("norm", (S, S), (-1,), "neg_1"),
("norm", (S, S), (-2,), "neg_2"),
("norm", (S, S), (-0.5,), "neg_0_5"),
("norm", (S, S), (-1.5,), "neg_1_5"),
(
"norm",
(S, S),
(
-2,
1,
),
"neg_2_2_dim",
(),
[1],
),
(
"norm",
(S, S),
(
-1,
1,
),
"neg_1_2_dim",
(),
[1],
),
(
"norm",
(S, S),
(
0,
1,
),
"0_2_dim",
(),
[1],
),
(
"norm",
(S, S),
(
1,
1,
),
"1_2_dim",
(),
[1],
),
(
"norm",
(S, S),
(
2,
1,
),
"2_2_dim",
(),
[1],
),
(
"norm",
(S, S),
(
3,
1,
),
"3_2_dim",
(),
[1],
),
(
"norm",
(S, S),
(
inf,
1,
),
"inf_2_dim",
),
("norm", torch.rand(S, S, S) + 5e-2, (1.5,), "1_5_default"),
("norm", (S, S, S), (2, 1), "2_dim", (), [1]),
("norm", (S, S, S), (3, 1), "3_dim", (), [1]),
("norm", torch.rand(S, S, S) + 5e-2, (1.5, 1), "1_5_dim", (), [1]),
("norm", (S, S, S), (2, 1, True), "keepdim_2_dim", (), [1]),
("norm", (S, S, S), (3, 1, True), "keepdim_3_dim", (), [1]),
(
"norm",
torch.rand(S, S, S) + 5e-2,
(1.5, 1, True),
"keepdim_1_5_dim",
(),
[1],
),
("norm", (), (2, 0), "2_dim_scalar", (), [1]),
("norm", (), (3, 0), "3_dim_scalar", (), [1]),
("norm", (), (2, 0, True), "keepdim_2_dim_scalar", (), [1]),
("norm", (), (3, 0, True), "keepdim_3_dim_scalar", (), [1]),
("clone", (S, M, S), NO_ARGS),
("clone", (), NO_ARGS, "scalar"),
("contiguous", (S, S), NO_ARGS, "", (True,)),
(
"contiguous",
torch.randn(S, S).transpose(0, 1),
NO_ARGS,
"not_contiguous",
(True,),
),
("dist", (S, S, S), ((S, S, S),)),
("dist", (S, S, S), ((S,),), "broadcast_rhs"),
("dist", (S,), ((S, S, S),), "broadcast_lhs"),
("dist", (S, 1, S), ((S, S),), "broadcast_all"),
("dist", (), ((),), "scalar"),
("dist", (S, S, S), ((),), "scalar_broadcast_rhs"),
("dist", (), ((S, S, S),), "scalar_broadcast_lhs"),
("dist", (S, S, S), ((S, S, S), 4), "4"),
("dist", (S, S, S), ((S,), 4), "4_broadcast_rhs"),
("dist", (S,), ((S, S, S), 4), "4_broadcast_lhs"),
("dist", (S, 1, S), ((S, S), 4), "4_broadcast_all"),
("dist", (), ((), 4), "scalar_4"),
("dist", (S, S, S), ((), 4), "scalar_4_broadcast_rhs"),
("dist", (), ((S, S, S), 4), "scalar_4_broadcast_lhs"),
("diag", (M, M), NO_ARGS, "2d"),
("diag", (3, 5), NO_ARGS, "2d_wide"),
("diag", (3, 5), (2,), "2d_wide_pos"),
("diag", (3, 5), (-2,), "2d_wide_neg"),
("diag", (5, 3), NO_ARGS, "2d_tall"),
("diag", (5, 3), (2,), "2d_tall_pos"),
("diag", (5, 3), (-2,), "2d_tall_neg"),
("diag", (M,), NO_ARGS, "1d"),
("diag", (M, M), (1,), "2d_1"),
("diag", (M, M), (2,), "2d_2"),
("diag_embed", (S, S), NO_ARGS),
("diagonal", (M, M), NO_ARGS, "2d"),
("diagonal", (3, 5), NO_ARGS, "2d_wide"),
("diagonal", (3, 5), (2,), "2d_wide_pos"),
("diagonal", (3, 5), (-2,), "2d_wide_neg"),
("diagonal", (5, 3), NO_ARGS, "2d_tall"),
("diagonal", (5, 3), (2,), "2d_tall_pos"),
("diagonal", (5, 3), (-2,), "2d_tall_neg"),
("diagonal", (M, M), (1,), "2d_1"),
("diagonal", (M, M), (2,), "2d_2"),
("diagonal", (M, M, M), (1, 1, 2), "3d_1"),
("diagonal", (M, M, M), (2, 0, 1), "3d_2"),
("diagonal", (M, M, M), (-2, 0, 1), "3d_3"),
("tril", (M, M), NO_ARGS),
("tril", (M, M), (2,), "idx"),
("tril", (S, M, M), NO_ARGS, "batched"),
("tril", (S, M, M), (2,), "batched_idx"),
("tril", (3, 3, S, S), NO_ARGS, "more_batched"),
("triu", (M, M), NO_ARGS),
("triu", (M, M), (2,), "idx"),
("triu", (S, M, M), NO_ARGS, "batched"),
("triu", (S, M, M), (2,), "batched_idx"),
("triu", (3, 3, S, S), NO_ARGS, "more_batched"),
("trace", (M, M), NO_ARGS),
("cross", (S, 3), ((S, 3),)),
("cross", (S, 3, S), ((S, 3, S), 1), "dim"),
("index_select", (S, S, S), (0, index_variable(2, S)), "dim", (), [0]),
(
"index_select",
(),
(0, torch.tensor([0], dtype=torch.int64)),
"scalar_mixed_dim",
(),
[0],
),
(
"index_select",
(),
(0, torch.tensor(0, dtype=torch.int64)),
"scalar_dim",
(),
[0],
),
("index_add", (S, S), (0, index_variable(2, S), (2, S)), "dim", (), [0]),
(
"index_add",
(),
(0, torch.tensor([0], dtype=torch.int64), (1,)),
"scalar_input_dim",
(),
[0],
),
(
"index_add",
(),
(0, torch.tensor(0, dtype=torch.int64), ()),
"scalar_all_dim",
(),
[0],
),
("index_copy", (S, S), (0, index_perm_variable(2, S), (2, S)), "dim", (), [0]),
(
"index_copy",
(),
(0, torch.tensor([0], dtype=torch.int64), (1,)),
"scalar_input_dim",
(),
[0],
),
(
"index_copy",
(),
(0, torch.tensor(0, dtype=torch.int64), ()),
"scalar_all_dim",
(),
[0],
),
("index_fill", (S, S), (0, index_variable(2, S), 2), "dim", (), [0]),
("index_fill", (S, S), (0, index_variable(2, S), ()), "variable_dim", (), [0]),
(
"index_fill",
(S, S),
(0, torch.tensor(0, dtype=torch.int64), 2),
"scalar_index_dim",
(),
[0],
),
(
"index_fill",
(),
(0, torch.tensor([0], dtype=torch.int64), 2),
"scalar_input_dim",
(),
[0],
),
(
"index_fill",
(),
(0, torch.tensor(0, dtype=torch.int64), 2),
"scalar_both_dim",
(),
[0],
),
(
"inverse",
lambda: random_fullrank_matrix_distinct_singular_value(S),
NO_ARGS,
"",
(),
NO_ARGS,
[skipIfNoLapack],
),
(
"inverse",
lambda: random_fullrank_matrix_distinct_singular_value(S, 2, 3),
NO_ARGS,
"batched",
(),
NO_ARGS,
[skipIfNoLapack],
),
("det", (S, S), NO_ARGS, "", (), NO_ARGS, [skipIfNoLapack]),
("det", (1, 1), NO_ARGS, "1x1", (), NO_ARGS, [skipIfNoLapack]),
(
"det",
lambda: random_symmetric_matrix(S),
NO_ARGS,
"symmetric",
(),
NO_ARGS,
[skipIfNoLapack],
),
(
"det",
lambda: random_symmetric_psd_matrix(S),
NO_ARGS,
"symmetric_psd",
(),
NO_ARGS,
[skipIfNoLapack],
),
(
"det",
lambda: random_symmetric_pd_matrix(S),
NO_ARGS,
"symmetric_pd",
(),
NO_ARGS,
[skipIfNoLapack],
),
(
"det",
lambda: random_square_matrix_of_rank(S, S - 2),
NO_ARGS,
"dim2_null",
(),
NO_ARGS,
[skipIfNoLapack],
),
(
"det",
lambda: random_square_matrix_of_rank(S, 1),
NO_ARGS,
"rank1",
(),
NO_ARGS,
[skipIfNoLapack],
),
(
"det",
lambda: random_square_matrix_of_rank(S, 2),
NO_ARGS,
"rank2",
(),
NO_ARGS,
[skipIfNoLapack],
),
(
"det",
lambda: random_fullrank_matrix_distinct_singular_value(S),
NO_ARGS,
"distinct_singular_values",
(),
NO_ARGS,
[skipIfNoLapack],
),
("det", (3, 3, S, S), NO_ARGS, "batched", (), NO_ARGS, [skipIfNoLapack]),
("det", (3, 3, 1, 1), NO_ARGS, "batched_1x1", (), NO_ARGS, [skipIfNoLapack]),
(
"det",
lambda: random_symmetric_matrix(S, 3),
NO_ARGS,
"batched_symmetric",
(),
NO_ARGS,
[skipIfNoLapack],
),
(
"det",
lambda: random_symmetric_psd_matrix(S, 3),
NO_ARGS,
"batched_symmetric_psd",
(),
NO_ARGS,
[skipIfNoLapack],
),
(
"det",
lambda: random_symmetric_pd_matrix(S, 3),
NO_ARGS,
"batched_symmetric_pd",
(),
NO_ARGS,
[skipIfNoLapack],
),
(
"det",
lambda: random_fullrank_matrix_distinct_singular_value(S, 3, 3),
NO_ARGS,
"batched_distinct_singular_values",
(),
NO_ARGS,
[skipIfNoLapack],
),
# For `logdet` and `slogdet`, the function at det=0 is not smooth.
# We need to exclude tests with det=0 (e.g. dim2_null, rank1, rank2) and use
# `make_nonzero_det` to make the random matrices have nonzero det. For
# `logdet`, we also set `make_nonzero_det(matrix, sign=1)` to make the
# matrix have positive det.
(
"logdet",
lambda: make_nonzero_det(torch.randn(S, S), 1),
NO_ARGS,
"",
(),
NO_ARGS,
[skipIfNoLapack],
),
(
"logdet",
lambda: make_nonzero_det(torch.randn(1, 1), 1),
NO_ARGS,
"1x1",
(),
NO_ARGS,
[skipIfNoLapack],
),
(
"logdet",
lambda: make_nonzero_det(random_symmetric_matrix(S), 1),
NO_ARGS,
"symmetric",
(),
NO_ARGS,
[skipIfNoLapack],
),
(
"logdet",
lambda: make_nonzero_det(random_symmetric_pd_matrix(S), 1),
NO_ARGS,
"symmetric_pd",
(),
NO_ARGS,
[skipIfNoLapack],
),
(
"logdet",
lambda: make_nonzero_det(
random_fullrank_matrix_distinct_singular_value(S), 1, 0
),
NO_ARGS,
"distinct_singular_values",
(),
NO_ARGS,
[skipIfNoLapack],
),
(
"logdet",
lambda: make_nonzero_det(torch.randn(3, 3, S, S), 1),
NO_ARGS,
"batched",
(),
NO_ARGS,
[skipIfNoLapack],
),
(
"logdet",
lambda: make_nonzero_det(torch.randn(3, 3, 1, 1), 1),
NO_ARGS,
"batched_1x1",
(),
NO_ARGS,
[skipIfNoLapack],
),
(
"logdet",
lambda: make_nonzero_det(random_symmetric_matrix(S, 3), 1),
NO_ARGS,
"batched_symmetric",
(),
NO_ARGS,
[skipIfNoLapack],
),
(
"logdet",
lambda: make_nonzero_det(random_symmetric_pd_matrix(S, 3), 1),
NO_ARGS,
"batched_symmetric_pd",
(),
NO_ARGS,
[skipIfNoLapack],
),
(
"logdet",
lambda: make_nonzero_det(
random_fullrank_matrix_distinct_singular_value(S, 3), 1, 0
),
NO_ARGS,
"batched_distinct_singular_values",
(),
NO_ARGS,
[skipIfNoLapack],
),
(
"slogdet",
lambda: make_nonzero_det(torch.randn(1, 1), 1),
NO_ARGS,
"1x1_pos_det",
(),
NO_ARGS,
[skipIfNoLapack],
itemgetter(1),
),
(
"slogdet",
lambda: make_nonzero_det(torch.randn(1, 1), -1),
NO_ARGS,
"1x1_neg_det",
(),
NO_ARGS,
[skipIfNoLapack],
itemgetter(1),
),
(
"slogdet",
lambda: make_nonzero_det(torch.randn(S, S), 1),
NO_ARGS,
"pos_det",
(),
NO_ARGS,
[skipIfNoLapack],
itemgetter(1),
),
(
"slogdet",
lambda: make_nonzero_det(torch.randn(S, S), -1),
NO_ARGS,
"neg_det",
(),
NO_ARGS,
[skipIfNoLapack],
itemgetter(1),
),
(
"slogdet",
lambda: make_nonzero_det(random_symmetric_matrix(S)),
NO_ARGS,
"symmetric",
(),
NO_ARGS,
[skipIfNoLapack],
itemgetter(1),
),
(
"slogdet",
lambda: random_symmetric_pd_matrix(S),
NO_ARGS,
"symmetric_pd",
(),
NO_ARGS,
[skipIfNoLapack],
itemgetter(1),
),
(
"slogdet",
lambda: random_fullrank_matrix_distinct_singular_value(S),
NO_ARGS,
"distinct_singular_values",
(),
NO_ARGS,
[skipIfNoLapack],
itemgetter(1),
),
(
"slogdet",
lambda: make_nonzero_det(torch.randn(3, 3, 1, 1), -1),
NO_ARGS,
"batched_1x1_neg_det",
(),
NO_ARGS,
[skipIfNoLapack],
itemgetter(1),
),
(
"slogdet",
lambda: make_nonzero_det(torch.randn(3, 3, S, S), 1),
NO_ARGS,
"batched_pos_det",
(),
NO_ARGS,
[skipIfNoLapack],
itemgetter(1),
),
(
"slogdet",
lambda: make_nonzero_det(random_symmetric_matrix(S, 3)),
NO_ARGS,
"batched_symmetric",
(),
NO_ARGS,
[skipIfNoLapack],
itemgetter(1),
),
(
"slogdet",
lambda: random_symmetric_pd_matrix(S, 3),
NO_ARGS,
"batched_symmetric_pd",
(),
NO_ARGS,
[skipIfNoLapack],
itemgetter(1),
),
(
"slogdet",
lambda: random_fullrank_matrix_distinct_singular_value(S, 3),
NO_ARGS,
"batched_distinct_singular_values",
(),
NO_ARGS,
[skipIfNoLapack],
itemgetter(1),
),
(
"svd",
lambda: random_fullrank_matrix_distinct_singular_value(S),
NO_ARGS,
"",
(),
NO_ARGS,
[skipIfNoLapack],
),
(
"svd",
lambda: random_fullrank_matrix_distinct_singular_value(S)[: (S - 2)],
NO_ARGS,
"wide",
(),
NO_ARGS,
[skipIfNoLapack],
),
(
"svd",
lambda: random_fullrank_matrix_distinct_singular_value(S)[:, : (S - 2)],
NO_ARGS,
"tall",
(),
NO_ARGS,
[skipIfNoLapack],
),
(
"svd",
lambda: random_fullrank_matrix_distinct_singular_value(S)[: (S - 2)],
(False,),
"wide_all",
(),
NO_ARGS,
[skipIfNoLapack],
lambda usv: (usv[0], usv[1], usv[2][:, : (S - 2)]),
),
(
"svd",
lambda: random_fullrank_matrix_distinct_singular_value(S)[:, : (S - 2)],
(False,),
"tall_all",
(),
NO_ARGS,
[skipIfNoLapack],
lambda usv: (usv[0][:, : (S - 2)], usv[1], usv[2]),
),
(
"svd",
lambda: random_fullrank_matrix_distinct_singular_value(M),
NO_ARGS,
"large",
(),
NO_ARGS,
[skipIfNoLapack],
),
(
"svd",
lambda: random_fullrank_matrix_distinct_singular_value(S, 3),
NO_ARGS,
"batched",
(),
NO_ARGS,
[skipIfNoLapack],
),
(
"svd",
lambda: random_fullrank_matrix_distinct_singular_value(S, 3)[
..., : (S - 2), :
],
NO_ARGS,
"wide_batched",
(),
NO_ARGS,
[skipIfNoLapack],
),
(
"svd",
lambda: random_fullrank_matrix_distinct_singular_value(S, 3)[
..., :, : (S - 2)
],
NO_ARGS,
"tall_batched",
(),
NO_ARGS,
[skipIfNoLapack],
),
(
"svd",
lambda: random_fullrank_matrix_distinct_singular_value(S, 3, 3)[
..., : (S - 2), :
],
(False,),
"wide_all_batched",
(),
NO_ARGS,
[skipIfNoLapack],
lambda usv: (usv[0], usv[1], usv[2][..., :, : (S - 2)]),
),
(
"svd",
lambda: random_fullrank_matrix_distinct_singular_value(S, 3, 3)[
..., :, : (S - 2)
],
(False,),
"tall_all_batched",
(),
NO_ARGS,
[skipIfNoLapack],
lambda usv: (usv[0][..., :, : (S - 2)], usv[1], usv[2]),
),
("qr", (S, S), (False,), "square_single", (), NO_ARGS, [skipIfNoLapack]),
("qr", (S, S - 2), (True,), "tall_single", (), NO_ARGS, [skipIfNoLapack]),
("qr", (3, S, S), (False,), "square_batched", (), NO_ARGS, [skipIfNoLapack]),
("qr", (3, S, S - 2), (True,), "tall_batched", (), NO_ARGS, [skipIfNoLapack]),
(
"qr",
(3, 2, S, S),
(False,),
"square_many_batched",
(),
NO_ARGS,
[skipIfNoLapack],
),
(
"qr",
(3, 2, S, S - 2),
(True,),
"tall_many_batched",
(),
NO_ARGS,
[skipIfNoLapack],
),
(
"solve",
(S, S),
(random_fullrank_matrix_distinct_singular_value(S, silent=True),),
"",
(),
NO_ARGS,
[skipIfNoLapack],
),
(
"solve",
(S, S, S),
(random_fullrank_matrix_distinct_singular_value(S, S, silent=True),),
"batched",
(),
NO_ARGS,
[skipIfNoLapack],
),
(
"solve",
(2, 3, S, S),
(random_fullrank_matrix_distinct_singular_value(S, 2, 3, silent=True),),
"batched_dims",
(),
NO_ARGS,
[skipIfNoLapack],
),
(
"solve",
(2, 2, S, S),
(random_fullrank_matrix_distinct_singular_value(S, 1, silent=True),),
"batched_broadcast_A",
(),
NO_ARGS,
[skipIfNoLapack],
),
(
"solve",
(1, S, S),
(random_fullrank_matrix_distinct_singular_value(S, 2, 2, silent=True),),
"batched_broadcast_b",
(),
NO_ARGS,
[skipIfNoLapack],
),
("fill_", (S, S, S), (1,), "number"),
("fill_", (), (1,), "number_scalar"),
("fill_", (S, S, S), ((),), "variable"),
("eq_", (S, S, S), ((S, S, S),)),
("eq_", (S, S, S), ((1,),), "broadcast_rhs"),
("eq_", (), ((),), "scalar"),
("eq_", (S, S, S), ((),), "scalar_broadcast_rhs"),
("ne_", (S, S, S), ((S, S, S),)),
("ne_", (S, S, S), ((1,),), "broadcast_rhs"),
("ne_", (), ((),), "scalar"),
("ne_", (S, S, S), ((),), "scalar_broadcast_rhs"),
("gt_", (S, S, S), ((S, S, S),)),
("gt_", (S, S, S), ((1,),), "broadcast_rhs"),
("gt_", (), ((),), "scalar"),
("gt_", (S, S, S), ((),), "scalar_broadcast_rhs"),
("ge_", (S, S, S), ((S, S, S),)),
("ge_", (S, S, S), ((1,),), "broadcast_rhs"),
("ge_", (), ((),), "scalar"),
("ge_", (S, S, S), ((),), "scalar_broadcast_rhs"),
("lt_", (S, S, S), ((S, S, S),)),
("lt_", (S, S, S), ((1,),), "broadcast_rhs"),
("lt_", (), ((),), "scalar"),
("lt_", (S, S, S), ((),), "scalar_broadcast_rhs"),
("le_", (S, S, S), ((S, S, S),)),
("le_", (S, S, S), ((1,),), "broadcast_rhs"),
("le_", (), ((),), "scalar"),
("le_", (S, S, S), ((),), "scalar_broadcast_rhs"),
("eq_", (S, S, S), (0,), "pyscalar"),
("ne_", (S, S, S), (0,), "pyscalar"),
("gt_", (S, S, S), (0,), "pyscalar"),
("ge_", (S, S, S), (0,), "pyscalar"),
("le_", (S, S, S), (0,), "pyscalar"),
("lt_", (), (0,), "pyscalar"),
("eq_", (), (0,), "pyscalar_scalar"),
("ne_", (), (0,), "pyscalar_scalar"),
("gt_", (), (0,), "pyscalar_scalar"),
("ge_", (), (0,), "pyscalar_scalar"),
("lt_", (), (0,), "pyscalar_scalar"),
("le_", (), (0,), "pyscalar_scalar"),
("permute", (1, 2, 3, 4), (0, 2, 3, 1), "", (True,)),
("permute", (1, 2, 3, 4), (0, -2, -1, 1), "neg_dim", (True,)),
("permute", (), (dont_convert(()),), "scalar", (True,)),
("select", (S, S, S), (1, 2), "dim", (), [0]),
("select", (S, S, S), (1, -1), "wrap_dim", (), [0]),
("select", (S,), (0, 2), "1d"),
("narrow", (S, S, S), (1, 2, 2), "dim", (), [0]),
("narrow", (S, S, S), (1, 0, 0), "empty_dim", (), [0]),
("squeeze", (S, 1, S, 1), NO_ARGS, "", (True,)),
("squeeze", (1, 1, 1, 1), NO_ARGS, "input_sizes_are_ones", (True,)),
("squeeze", (S, 1, S, 1), (1,), "1_dim", (True,), [0]),
("squeeze", (S, 1, S, 1), (2,), "not_1_dim", (True,), [0]),
("squeeze", (), (0,), "scalar", (True,), [0]),
("unsqueeze", (S, S, S), (0,), "first", (True,), [0]),
("unsqueeze", (S, S, S), (1,), "middle", (True,), [0]),
("unsqueeze", (S, S, S), (3,), "last", (True,), [0]),
("unsqueeze", (), (0,), "scalar", (True,), [0]),
("chunk", (S, S, S), (2,), "", (True, "prim::ConstantChunk")),
("chunk", (S, S, S), (S, 1), "dim", (True, "prim::ConstantChunk"), [1]),
("split", (S, S, S), (2,), "", (True,)),
("split", (S, S, S), (S, 1), "dim", (True,), [1]),
(
"split",
(S, S, S),
([int(S / 3), S - int(S / 3) * 2, int(S / 3)],),
"size_list",
(True, "aten::split_with_sizes"),
),
(
"split",
(S, S, S),
([int(S / 2), S - int(S / 2) * 2, int(S / 2)], 2),
"size_list_dim",
(True, "aten::split_with_sizes"),
[1],
),
(
"split_with_sizes",
(S, S, S),
([int(S / 3), S - int(S / 3) * 2, int(S / 3)],),
"",
(True,),
),
(
"split_with_sizes",
(S, S, S),
([int(S / 3), S - int(S / 3), 0],),
"size_0",
(True,),
),
(
"split_with_sizes",
(S, S, S),
([int(S / 3), S - int(S / 3) * 2, int(S / 3)],),
"dim",
(True,),
[1],
),
("gather", (M, S), (0, gather_variable((S, S), 1, M, True)), "dim0", (), [0]),
(
"gather",
(M, S),
(1, gather_variable((M, S // 2), 0, S, True)),
"dim1",
(),
[0],
),
(
"gather",
(),
(0, torch.tensor([0], dtype=torch.int64)),
"scalar_input",
(),
[0],
),
(
"gather",
(S,),
(0, torch.tensor(0, dtype=torch.int64)),
"scalar_index",
(),
[0],
),
("gather", (), (0, torch.tensor(0, dtype=torch.int64)), "scalar_both", (), [0]),
(
"scatter",
(M, S),
(0, gather_variable((S, S), 1, M), (S, S)),
"dim0",
(),
[0],
),
(
"scatter",
(M, S),
(1, gather_variable((M, S // 2), 0, S), (M, S // 2)),
"dim1",
(),
[0],
),
(
"scatter",
(),
(0, torch.tensor(0, dtype=torch.int64), ()),
"scalartensor_all_dim0",
(),
[0],
),
(
"scatter",
(),
(0, torch.tensor(0, dtype=torch.int64), 2.5),
"scalar_all_dim0",
(),
[0],
),
(
"scatter_add",
(M, S),
(0, gather_variable((S, S), 1, M), (S, S)),
"dim0",
(),
[0],
),
(
"scatter_add",
(M, S),
(1, gather_variable((M, S // 2), 0, S), (M, S // 2)),
"dim1",
(),
[0],
),
(
"scatter_add",
(),
(0, torch.tensor(0, dtype=torch.int64), ()),
"scalar_all_dim0",
(),
[0],
),
("masked_select", (M, M), (mask_not_all_zeros((M, M)),)),
("masked_select", (M, M), (mask_not_all_zeros((M,)),), "broadcast_rhs"),
("masked_select", (M,), (mask_not_all_zeros((M, M)),), "broadcast_lhs"),
("masked_select", (M, 1, M), (mask_not_all_zeros((M, M)),), "broadcast_all"),
("masked_select", (), (torch.tensor(1, dtype=torch.bool),), "scalar"),
(
"masked_select",
(M, M),
(torch.tensor(1, dtype=torch.bool),),
"scalar_broadcast_rhs",
),
("masked_select", (), (mask_not_all_zeros((M, M)),), "scalar_broadcast_lhs"),
("masked_fill", (M, M), (torch.BoolTensor(M, M).bernoulli_(), 10)),
("masked_fill", (M, M), (torch.BoolTensor(M, M).bernoulli_(), ()), "tensor"),
(
"masked_fill",
(M,),
(torch.BoolTensor(M, M).bernoulli_(), 10),
"broadcast_lhs",
),
(
"masked_fill",
(M, M),
(
torch.BoolTensor(
M,
).bernoulli_(),
10,
),
"broadcast_rhs",
),
(
"masked_fill",
(),
(torch.tensor(0, dtype=torch.bool).bernoulli_(), 10),
"scalar",
),
(
"masked_fill",
(),
(torch.tensor(0, dtype=torch.bool).bernoulli_(), ()),
"scalar_variable",
),
(
"masked_fill",
(M, M),
(torch.tensor(0, dtype=torch.bool).bernoulli_(), 10),
"scalar_broadcast_rhs",
),
("masked_scatter", (M, M), (torch.BoolTensor(M, M).bernoulli_(), (M, M))),
(
"masked_scatter",
(M,),
(torch.BoolTensor(M, M).bernoulli_(), (M, M)),
"broadcast_lhs",
),
(
"masked_scatter",
(M, M),
(
torch.BoolTensor(
M,
).bernoulli_(),
(M, M),
),
"broadcast_rhs",
),
("masked_scatter", (M, M), (bernoulli_scalar(), (M, M)), "scalar"),
(
"masked_scatter",
(M, M),
(bernoulli_scalar(), (M, M)),
"scalar_broadcast_rhs",
),
("resize_", (S, S, S), (torch.Size([S * S, S])), "fewer_dims"),
("resize_", (), (dont_convert(()),), "scalar"),
("resize_", (), (torch.Size([1, 1, 1])), "scalar_to_dims"),
("resize_as_", (), (non_differentiable(torch.tensor(5.0)),), "scalar"),
(
"resize_as_",
(),
(non_differentiable(torch.randn((1, 1, 1))),),
"scalar_to_dims",
),
("resize_as_", (S, S, S), (non_differentiable(torch.randn(S * S, S)),)),
("sort", (S, M, S), NO_ARGS),
("sort", (S, M, S), (1,), "dim"),
("sort", (S, M, S), (1, True), "dim_desc"),
("sort", (), NO_ARGS, "scalar"),
("sort", (), (0,), "dim_scalar"),
("sort", (), (0, True), "dim_desc_scalar"),
("topk", (S, M, S), (3,)),
("topk", (S, M, S), (3, 1), "dim", (), [1]),
("topk", (S, M, S), (3, 1, True), "dim_desc", (), [1]),
("topk", (S, M, S), (3, 1, True, True), "dim_desc_sort", (), [1]),
("topk", (), (1,), "scalar"),
("topk", (), (1, 0), "dim_scalar", (), [1]),
("topk", (), (1, 0, True), "dim_desc_scalar", (), [1]),
("topk", (), (1, 0, True, True), "dim_desc_sort_scalar", (), [1]),
("take", (S, S, S), (torch.LongTensor([[-3, 2], [20, 2]]),)),
("take", (S, S, S), (torch.tensor(0, dtype=torch.int64),), "scalar_index"),
("take", (), (torch.LongTensor([0]),), "scalar_data"),
("take", (), (torch.tensor(0, dtype=torch.int64),), "scalar_both"),
("where", (M, M), (mask_not_all_zeros((M, M)), (M, M)), "", (True,)),
(
"where",
(M, 1, M),
(mask_not_all_zeros((M, M)), (M, M, 1)),
"broadcast_all",
(True,),
),
("where", (), (bernoulli_scalar(), ()), "scalar", (True,)),
(
"where",
(M, 1, M),
(bernoulli_scalar(), (M, M, 1)),
"scalar_broadcast_mask",
(True,),
),
(
"where",
(),
(mask_not_all_zeros((M, M)), ()),
"scalar_broadcast_non_mask",
(True,),
),
("__getitem__", torch.randn(S, S, S), (dont_convert([1, 2]),)),
("__getitem__", torch.randn(S, S, S), (slice(0, 3),), "slice"),
(
"__getitem__",
torch.randn(S, S, S),
(dont_convert([slice(0, 3), 1]),),
"slice_index",
),
(
"__getitem__",
torch.randn(S, S, S),
(dont_convert([[0, 2, 3], [1, 3, 3], [0, 0, 2]]),),
"adv_index",
),
(
"__getitem__",
torch.randn(S, S, S),
(dont_convert([[0, 0, 3], [1, 1, 3], [0, 0, 2]]),),
"adv_index_dup",
),
(
"__getitem__",
torch.randn(S, S, S),
(dont_convert([slice(None), slice(None), [0, 3]]),),
"adv_index_end",
),
(
"__getitem__",
torch.randn(S, S, S),
(dont_convert([slice(None), [0, 3], slice(None)]),),
"adv_index_mid",
),
(
"__getitem__",
torch.randn(S, S, S),
(dont_convert([[0, 3], slice(None), slice(None)]),),
"adv_index_beg",
),
(
"__getitem__",
torch.randn(S, S, S),
(dont_convert([[0, 3], [1, 2], slice(None)]),),
"adv_index_comb",
),
(
"__getitem__",
torch.randn(S, S, S),
(
dont_convert(
[
[0, 3],
]
),
),
"adv_index_sub",
),
(
"__getitem__",
torch.randn(S, S, S),
(dont_convert([[0, 3], slice(None)]),),
"adv_index_sub_2",
),
(
"__getitem__",
torch.randn(S, S, S),
(dont_convert([[0, 3], Ellipsis]),),
"adv_index_sub_3",
),
(
"__getitem__",
torch.randn(S, S, S),
(dont_convert([[0, 2, 3], [1, 3, 3], torch.LongTensor([0, 0, 2])]),),
"adv_index_var",
),
("to_sparse", (S, S), (), "", (), (), (), lambda x: x.to_dense()),
]
def create_input(call_args, requires_grad=True, non_contiguous=False, call_kwargs=None):
if not isinstance(call_args, tuple):
call_args = (call_args,)
def map_arg(arg):
def maybe_non_contig(tensor):
return tensor if not non_contiguous else make_non_contiguous(tensor)
if isinstance(arg, torch.Size) or isinstance(arg, dont_convert):
return arg
elif isinstance(arg, tuple) and len(arg) == 0:
var = torch.randn((), dtype=torch.double)
var.requires_grad = requires_grad
return var
elif isinstance(arg, tuple) and not isinstance(arg[0], torch.Tensor):
return Variable(
maybe_non_contig(torch.randn(*arg, dtype=torch.double)),
requires_grad=requires_grad,
)
elif isinstance(arg, non_differentiable):
if isinstance(arg.tensor, torch.Tensor):
return maybe_non_contig(arg.tensor)
return maybe_non_contig(arg.tensor)
elif isinstance(arg, torch.Tensor):
if arg.dtype == torch.float:
arg = arg.double()
# NOTE: We do clone() after detach() here because we need to be able to change size/storage of v afterwards
v = maybe_non_contig(arg).detach().clone()
v.requires_grad = requires_grad and v.is_floating_point()
return v
elif callable(arg):
return map_arg(arg())
else:
return arg
args_out = tuple(map_arg(arg) for arg in call_args)
kwargs_out = {k: map_arg(v) for k, v in call_kwargs.items()} if call_kwargs else {}
return args_out, kwargs_out
def _compare_trilu_indices(self, row, col, offset=0, dtype=torch.long, device="cpu"):
if row == 0 or col == 0:
# have to handle this separately as tril and triu does not take
# empty matrix as input
self.assertEqual(
torch.empty(0, 2, dtype=dtype, device=device).transpose(0, 1),
torch.tril_indices(row, col, offset, dtype=dtype, device=device),
)
self.assertEqual(
torch.empty(0, 2, dtype=dtype, device=device).transpose(0, 1),
torch.triu_indices(row, col, offset, dtype=dtype, device=device),
)
else:
self.assertEqual(
torch.ones(row, col, dtype=dtype, device="cpu")
.tril(offset)
.nonzero()
.transpose(0, 1)
.to(device),
torch.tril_indices(row, col, offset, dtype=dtype, device=device),
)
self.assertEqual(
torch.ones(row, col, dtype=dtype, device="cpu")
.tril(offset)
.nonzero()
.transpose(0, 1)
.to(device),
torch.tril_indices(row, col, offset, dtype=dtype, device=device),
)
def _compare_large_trilu_indices(
self, row, col, offset=0, dtype=torch.long, device="cpu"
):
l = (
torch.ones(row, col, dtype=dtype, device="cpu")
.tril(offset)
.nonzero()[-100:-1, :]
.transpose(0, 1)
.to(device)
)
torch.cuda.empty_cache()
r = torch.tril_indices(row, col, offset, dtype=dtype, device=device)[:, -100:-1]
self.assertEqual(l, r)
torch.cuda.empty_cache()
l = (
torch.ones(row, col, dtype=dtype, device="cpu")
.triu(offset)
.nonzero()[-100:-1, :]
.transpose(0, 1)
.to(device)
)
torch.cuda.empty_cache()
r = torch.triu_indices(row, col, offset, dtype=dtype, device=device)[:, -100:-1]
self.assertEqual(l, r)
torch.cuda.empty_cache()
# (
# row
# col
# offset (optional)
# dtype (optional)
# )
tri_tests_args = [
(1, 1),
(3, 3),
(3, 3, 1),
(3, 3, 2),
(3, 3, 200),
(3, 3, -1),
(3, 3, -2),
(3, 3, -200),
(0, 3, 0),
(0, 3, 1),
(0, 3, -1),
(3, 0, 0),
(3, 0, 1),
(3, 0, -1),
(0, 0, 0),
(0, 0, 1),
(0, 0, -1),
(3, 6, 0),
(3, 6, 1),
(3, 6, 3),
(3, 6, 9),
(3, 6, -1),
(3, 6, -3),
(3, 6, -9),
(6, 3, 0),
(6, 3, 1),
(6, 3, 3),
(6, 3, 9),
(6, 3, -1),
(6, 3, -3),
(6, 3, -9),
(258, 253, 1, torch.float32),
(257, 258, 1, torch.float64),
(258, 258, 1, torch.short),
(3, 513, 1, torch.long),
(513, 3, 1, torch.int),
(513, 0, 1, torch.double),
(1024, 1024),
(1024, 1024, 500, torch.float32),
(1024, 1024, 1023),
(1024, 1024, -500),
(1023, 1025),
(1025, 1023, 1022),
(1024, 1024, -500),
(3, 2028),
(3, 2028, 1),
(3, 2028, -1),
(2028, 3),
(2028, 1),
(2028, 1, -1),
]
tri_large_tests_args = [
# Large test cases below are deliberately commented out to speed up CI
# tests and to avoid OOM error. When modifying implementations of
# tril_indices and triu_indices, please enable these tests and make sure
# they pass.
#
# (1, 268435455),
# (5000, 5000),
# (10000, 10000),
# (268435455, 1),
# (134217727, 2, 1),
# (2, 134217727, 1),
# (536870901, 1),
# (1, 536870901),
# (268435455, 2, 1),
# (2, 268435455, 1)
]
def run_additional_tri_tests(self, device):
x = torch.ones(3, 3, dtype=torch.long, device=device, layout=torch.strided)
l = x.tril(0).nonzero().transpose(0, 1)
u = x.triu(0).nonzero().transpose(0, 1)
self.assertEqual(l, torch.tril_indices(3, 3, device=device))
self.assertEqual(l, torch.tril_indices(3, 3, device=device, layout=torch.strided))
self.assertEqual(u, torch.triu_indices(3, 3, device=device))
self.assertEqual(u, torch.triu_indices(3, 3, device=device, layout=torch.strided))
self.assertRaises(
RuntimeError,
lambda: torch.triu_indices(1, 1, device=device, layout=torch.sparse_coo),
)
self.assertRaises(
RuntimeError,
lambda: torch.tril_indices(1, 1, device=device, layout=torch.sparse_coo),
)
def unpack_variables(args):
if istuple(args):
return tuple(unpack_variables(elem) for elem in args)
else:
return args
EXCLUDE_FUNCTIONAL = {
"addmm",
"addmm_",
"addbmm",
"baddbmm",
"addmv",
"addmv_",
"addr",
"addr_",
"reshape",
"where", # argument order
}
EXCLUDE_GRADCHECK = {}
EXCLUDE_GRADGRADCHECK = {}
EXCLUDE_GRADGRADCHECK_BY_TEST_NAME = {
# *det methods uses svd in backward when matrix is not invertible. However,
# svd backward is unstable unless the matrix has positive distinct singular
# values. Generated random matrices satisfy this with high probability, but
# we can't rely on it. So only test gradgrad on invertible test cases and
# _distinct_singular_values.
"test_det",
"test_det_1x1",
"test_det_symmetric",
"test_det_symmetric_psd",
"test_det_dim2_null",
"test_det_rank1",
"test_det_rank2",
"test_det_batched",
"test_det_batched_1x1",
"test_det_batched_symmetric",
"test_det_batched_symmetric_psd",
# `other` expand_as(self, other) is not used in autograd.
"test_expand_as",
"test_logdet",
"test_logdet_1x1",
"test_logdet_symmetric",
"test_logdet_batched",
"test_logdet_batched_1x1",
"test_logdet_batched_symmetric",
"test_slogdet_1x1_neg_det",
"test_slogdet_neg_det",
"test_slogdet_symmetric",
"test_slogdet_batched_1x1_neg_det",
"test_slogdet_batched_symmetric",
"test_cdist",
}
def exclude_tensor_method(name, test_name):
# there are no tensor equivalents for these (inplace or out)
exclude_all_tensor_method_by_test_name = {
"test_clamp_min",
"test_clamp_max",
"test_clamp_min_scalar",
"test_clamp_max_scalar",
"test_slice",
"test_where",
"test_where_broadcast_all",
"test_where_scalar",
"test_where_scalar_broadcast_mask",
"test_where_scalar_broadcast_non_mask",
"test_var_mean_keepdim_dim_1d",
"test_var_mean_keepdim_dim",
"test_var_mean_dim_1d",
"test_var_mean_dim",
"test_var_mean",
"test_std_mean_keepdim_dim_1d",
"test_std_mean_keepdim_dim",
"test_std_mean_dim_1d",
"test_std_mean_dim",
"test_std_mean",
}
# there are no out-of-place tensor equivalents for these
exclude_outplace_tensor_method = {
"index_add",
"index_copy",
"index_fill",
"masked_fill",
"masked_scatter",
"scatter",
"scatter_add",
"det",
}
if test_name in exclude_all_tensor_method_by_test_name:
return True
is_magic_method = name[:2] == "__" and name[-2:] == "__"
is_inplace = name[-1] == "_" and not is_magic_method
if not is_inplace and name in exclude_outplace_tensor_method:
return True
return False
| 84,592 | 29.330943 | 119 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/tests/cpu/runtime.py | import argparse
import intel_extension_for_pytorch as ipex
def create_cpu_pool(args):
core_ids = [1, 2]
cpu_pool = ipex.cpu.runtime.CPUPool(core_ids)
print("The created CPUPool has core is: {}".format(cpu_pool.core_ids), flush=True)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--case-name", default="create_cpu_pool", type=str)
args = parser.parse_args()
if args.case_name == "create_cpu_pool":
create_cpu_pool(args)
| 496 | 28.235294 | 86 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/tests/cpu/common_ipex_conf.py | import torch
import intel_extension_for_pytorch as ipex
from functools import wraps
class AutoMixPrecision(object):
def __init__(self, enable_or_not=False, train=False):
self.old_value = ipex.get_auto_mix_precision()
self.train_old_value = ipex.get_train()
self.enable_or_not = enable_or_not
self.train = train
def __enter__(self):
if self.enable_or_not:
ipex.enable_auto_mixed_precision(
mixed_dtype=torch.bfloat16, train=self.train
)
else:
ipex.enable_auto_mixed_precision(mixed_dtype=None)
def __exit__(self, *args, **kwargs):
if self.old_value:
ipex.enable_auto_mixed_precision(
mixed_dtype=torch.bfloat16, train=self.train_old_value
)
else:
ipex.enable_auto_mixed_precision(mixed_dtype=None)
class AutoDNNL(object):
def __init__(self, enable_or_not=False):
self.old_value = ipex._get_auto_optimization()
self.enable_or_not = enable_or_not
def __enter__(self):
if self.enable_or_not:
ipex.core.enable_auto_dnnl()
else:
ipex.core.disable_auto_dnnl()
def __exit__(self, *args, **kwargs):
if self.old_value:
ipex.core.enable_auto_dnnl()
else:
ipex.core.disable_auto_dnnl()
def runtime_thread_affinity_test_env(func):
@wraps(func)
def wrapTheFunction(*args):
# In some cases, the affinity of main thread may be changed: MultiStreamModule of stream 1
# Ensure, we restore the affinity of main thread
previous_cpu_pool = ipex._C.get_current_cpu_pool()
func(*args)
ipex._C.set_cpu_pool(previous_cpu_pool)
return wrapTheFunction
| 1,776 | 29.637931 | 98 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/tests/cpu/test_ao_jit_llga_throughput_benchmark.py | import torch
from torch.utils import ThroughputBenchmark
from torch.testing import assert_allclose
import intel_extension_for_pytorch as ipex
from test_ao_jit_llga_utils import JitLlgaTestCase
class LinearEltwise(torch.nn.Module):
def __init__(self, D_in, H, D_out):
super(LinearEltwise, self).__init__()
self.linear1 = torch.nn.Linear(D_in, H)
self.eltwise = torch.nn.ReLU()
self.linear2 = torch.nn.Linear(H, D_out)
def forward(self, x):
x = self.linear1(x)
x = self.eltwise(x)
x = self.linear2(x)
return x
def freeze(model):
return torch.jit._recursive.wrap_cpp_module(
torch._C._freeze_module(model._c, preserveParameters=True)
)
class TestThroughputBenchmark(JitLlgaTestCase):
def test_linear_eltwise(self):
with torch.no_grad():
D_in = 10
H = 5
D_out = 15
B = 8
m = LinearEltwise(D_in, H, D_out)
x = torch.randn(B, D_in)
graph, m_llga, m_cpu = self.prepareModel(m, [x])
ipex.enable_onednn_fusion(False)
module_result = m_cpu(x)
ipex.enable_onednn_fusion(True)
bench = ThroughputBenchmark(m_llga)
bench.add_input(x)
bench_result = bench.run_once(x)
assert_allclose(bench_result, module_result, atol=1e-1, rtol=1e-2)
stats = bench.benchmark(
num_calling_threads=4, num_warmup_iters=100, num_iters=1000
)
print(stats)
if __name__ == "__main__":
run_tests()
| 1,600 | 25.245902 | 78 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/tests/cpu/linear_reorder.py | import torch
import intel_extension_for_pytorch as ipex
import torch.nn as nn
import itertools
class Model(nn.Module):
def __init__(self, ic, oc, bias):
super(Model, self).__init__()
self.linear = nn.Linear(ic, oc, bias=bias)
def forward(self, input):
return self.linear(input)
def run_model(dtype=None):
out_feature = [1024, 256, 1, torch.randint(3, 10, (1,)).item()]
in_feature = [128, 479, torch.randint(3, 10, (1,)).item()]
input_shapes = []
for s in in_feature:
input_shapes += [(128, s), (2, 64, s), (2, 2, 32, s)]
options = itertools.product(out_feature, [True, False], input_shapes)
for out_features, bias, x_shape in options:
in_features = x_shape[-1]
x = torch.randn(x_shape, dtype=torch.float32).requires_grad_()
model = Model(in_features, out_features, bias)
optimizer = torch.optim.Adagrad(model.parameters(), lr=0.1)
if dtype == 0:
conf = ipex.AmpConf(torch.float32)
model, optimizer = ipex.optimize(
model, dtype=torch.float32, optimizer=optimizer, level="O1"
)
with ipex.amp.autocast(enabled=True, configure=conf):
run_mod = model.forward(x).sum()
elif dtype == 1:
conf = ipex.AmpConf(torch.bfloat16)
model, optimizer = ipex.optimize(
model, dtype=torch.bfloat16, optimizer=optimizer, level="O1"
)
with ipex.amp.autocast(enabled=True, configure=conf):
run_mod = model.forward(x).sum()
else: # reserved
pass
optimizer.zero_grad()
run_mod.backward()
optimizer.step()
if __name__ == "__main__":
print(f"fp32, {'*' * 50}")
run_model(0)
print(f"bf16, {'*' * 50}")
run_model(1)
| 1,829 | 32.272727 | 76 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/tests/cpu/test_compile.py | import unittest
import itertools
import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
import intel_extension_for_pytorch as ipex
from common_utils import TestCase
class Conv_Bn_Relu(nn.Module):
def __init__(self):
super(Conv_Bn_Relu, self).__init__()
self.conv = nn.Conv2d(6, 3, 3)
self.bn = nn.BatchNorm2d(3, eps=0.001)
def forward(self, x):
return F.relu(self.bn(self.conv(x)))
class TestCompile(TestCase):
def test_inference(self):
model_ = Conv_Bn_Relu().to(memory_format=torch.channels_last).eval()
x = torch.randn(3, 6, 10, 10).to(memory_format=torch.channels_last)
for dtype, ipex_optimize in itertools.product(
[torch.float32, torch.bfloat16], [True, False]
):
model = copy.deepcopy(model_)
if ipex_optimize:
model = ipex.optimize(model, dtype=dtype)
with torch.cpu.amp.autocast(
enabled=(dtype == torch.bfloat16), dtype=torch.bfloat16
), torch.no_grad():
y1 = model(x)
fx_model = torch.fx.symbolic_trace(model)
compiled_model = ipex.compile(fx_model, [x])
# warm up
for _ in range(2):
compiled_model(x)
y2 = compiled_model(x)
self.assertEqual(y1, y2)
self.assertTrue(y2.dtype == dtype)
if __name__ == "__main__":
test = unittest.main()
| 1,493 | 28.88 | 76 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/tests/cpu/fpmath_mode.py | import argparse
import torch
import torch.nn as nn
from torch.optim import SGD
import intel_extension_for_pytorch as ipex
class TestModel(torch.nn.Module):
def __init__(self, ic, oc, bias):
super(TestModel, self).__init__()
self.conv = torch.nn.Conv2d(
3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=bias
)
self.linear = nn.Linear(ic, oc, bias=bias)
def forward(self, x):
y1 = self.conv(x)
y2 = torch.matmul(y1, torch.transpose(y1, 0, 1))
y3 = self.linear(y2)
return y3
class TestLSTM(torch.nn.Module):
def __init__(self):
super(TestLSTM, self).__init__()
self.lstm = torch.nn.LSTM(input_size=1024, hidden_size=1024)
def forward(self, x):
y, _ = self.lstm(x)
return y
def run_model(args):
mode = args.mode
fpmath_mode = (
ipex.FP32MathMode.BF32 if args.fpmath == "BF32" else ipex.FP32MathMode.FP32
)
if not args.env:
ipex.set_fp32_math_mode(mode=fpmath_mode, device="cpu")
inputs = torch.randn(1, 3, 224, 224).requires_grad_()
inputs2 = torch.randn(50, 50, 1024).requires_grad_()
if args.bias:
model = TestModel(112, 10, True)
else:
model = TestModel(112, 10, False)
model2 = TestLSTM().train()
if mode == "jit":
model = model.eval()
model = ipex.optimize(
model, dtype=torch.float32, level="O1", auto_kernel_selection=True
)
else:
model = model.train()
origin_optimizer1 = SGD(model.parameters(), lr=0.01, momentum=0.9)
model, _ = ipex.optimize(
model,
dtype=torch.float32,
optimizer=origin_optimizer1,
level="O1",
auto_kernel_selection=True,
)
ipex.nn.utils._lstm_convert.replace_lstm_with_ipex_lstm(model2, None)
if mode == "jit":
model = torch.jit.trace(model, inputs).eval()
model = torch.jit.freeze(model)
output = model(inputs)
output2 = model2(inputs2)
if mode == "imperative":
output.sum().backward()
output2.sum().backward()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--mode", default="imperative", type=str)
parser.add_argument("--fpmath", default="BF32", type=str)
parser.add_argument("--env", action="store_true", default=False)
parser.add_argument("--bias", default=False, type=bool)
args = parser.parse_args()
run_model(args)
| 2,511 | 30.012346 | 83 | py |
intel-extension-for-pytorch | intel-extension-for-pytorch-master/tests/cpu/override.py | import torch
import intel_extension_for_pytorch as ipex
torch_function = [
"rand",
"randint",
"arange",
"bartlett_window",
"blackman_window",
"empty",
"_empty_affine_quantized",
"_empty_per_channel_affine_quantized",
"empty_strided",
"eye",
"full",
"from_file",
"from_numpy",
"hann_window",
"hamming_window",
"linspace",
"logspace",
"ones",
"scalar_tensor",
"randn",
"randperm",
"range",
"zeros",
"sparse_coo_tensor",
"tril_indices",
"triu_indices",
"normal",
"tensor",
]
def make_hooked_func(torch_func):
def hooked_func(*args, **kwargs):
if "device" in kwargs:
return torch_func(*args, **kwargs)
else:
return torch_func(*args, **kwargs).to(ipex.DEVICE)
return hooked_func
for torch_func_name in torch_function:
torch_fn = getattr(torch, torch_func_name)
hooked_fn = make_hooked_func(torch_fn)
setattr(torch, torch_func_name, hooked_fn)
| 1,016 | 19.34 | 62 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.