id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
16,097 | import numpy as np
import scipy.sparse as sparse
import Animation as Animation
The provided code snippet includes necessary dependencies for implementing the `graph` function. Write a Python function `def graph(anim)` to solve the following problem:
Generates a weighted adjacency matrix using local joint distances along the skeletal structure. Joints which are not connected are assigned the weight `0`. Joints which actually have zero distance between them, but are still connected, are perturbed by some minimal amount. The output of this routine can be used with the `scipy.sparse.csgraph` routines for graph analysis. Parameters ---------- anim : Animation input animation Returns ------- graph : (N, N) ndarray weight adjacency matrix using local distances along the skeletal structure from joint N to joint M. If joints are not directly connected are assigned the weight `0`.
Here is the function:
def graph(anim):
"""
Generates a weighted adjacency matrix
using local joint distances along
the skeletal structure.
Joints which are not connected
are assigned the weight `0`.
Joints which actually have zero distance
between them, but are still connected, are
perturbed by some minimal amount.
The output of this routine can be used
with the `scipy.sparse.csgraph`
routines for graph analysis.
Parameters
----------
anim : Animation
input animation
Returns
-------
graph : (N, N) ndarray
weight adjacency matrix using
local distances along the
skeletal structure from joint
N to joint M. If joints are not
directly connected are assigned
the weight `0`.
"""
graph = np.zeros(anim.shape[1], anim.shape[1])
lengths = np.sum(anim.offsets**2.0, axis=1)**0.5 + 0.001
for i,p in enumerate(anim.parents):
if p == -1: continue
graph[i,p] = lengths[p]
graph[p,i] = lengths[p]
return graph | Generates a weighted adjacency matrix using local joint distances along the skeletal structure. Joints which are not connected are assigned the weight `0`. Joints which actually have zero distance between them, but are still connected, are perturbed by some minimal amount. The output of this routine can be used with the `scipy.sparse.csgraph` routines for graph analysis. Parameters ---------- anim : Animation input animation Returns ------- graph : (N, N) ndarray weight adjacency matrix using local distances along the skeletal structure from joint N to joint M. If joints are not directly connected are assigned the weight `0`. |
16,098 | import numpy as np
import scipy.sparse as sparse
import Animation as Animation
def parents_list(parents):
"""
Parameters
----------
parents : (J) ndarray
parents array
Returns
-------
parents : [ndarray]
List of arrays of joint idices for
the parents of each joint
"""
return list(parents[:,np.newaxis])
def children_list(parents):
"""
Parameters
----------
parents : (J) ndarray
parents array
Returns
-------
children : [ndarray]
List of arrays of joint indices for
the children of each joint
"""
def joint_children(i):
return [j for j, p in enumerate(parents) if p == i]
return list(map(lambda j: np.array(joint_children(j)), joints(parents)))
The provided code snippet includes necessary dependencies for implementing the `distances` function. Write a Python function `def distances(anim)` to solve the following problem:
Generates a distance matrix for pairwise joint distances along the skeletal structure Parameters ---------- anim : Animation input animation Returns ------- distances : (N, N) ndarray array of pairwise distances along skeletal structure from some joint N to some joint M
Here is the function:
def distances(anim):
"""
Generates a distance matrix for
pairwise joint distances along
the skeletal structure
Parameters
----------
anim : Animation
input animation
Returns
-------
distances : (N, N) ndarray
array of pairwise distances
along skeletal structure
from some joint N to some
joint M
"""
distances = np.zeros((anim.shape[1], anim.shape[1]))
generated = distances.copy().astype(bool)
joint_lengths = np.sum(anim.offsets**2.0, axis=1)**0.5
joint_children = children_list(anim)
joint_parents = parents_list(anim)
def find_distance(distances, generated, prev, i, j):
""" If root, identity, or already generated, return """
if j == -1: return (0.0, True)
if j == i: return (0.0, True)
if generated[i,j]: return (distances[i,j], True)
""" Find best distances along parents and children """
par_dists = [(joint_lengths[j], find_distance(distances, generated, j, i, p)) for p in joint_parents[j] if p != prev]
out_dists = [(joint_lengths[c], find_distance(distances, generated, j, i, c)) for c in joint_children[j] if c != prev]
""" Check valid distance and not dead end """
par_dists = [a + d for (a, (d, f)) in par_dists if f]
out_dists = [a + d for (a, (d, f)) in out_dists if f]
""" All dead ends """
if (out_dists + par_dists) == []: return (0.0, False)
""" Get minimum path """
dist = min(out_dists + par_dists)
distances[i,j] = dist; distances[j,i] = dist
generated[i,j] = True; generated[j,i] = True
for i in xrange(anim.shape[1]):
for j in xrange(anim.shape[1]):
find_distance(distances, generated, -1, i, j)
return distances | Generates a distance matrix for pairwise joint distances along the skeletal structure Parameters ---------- anim : Animation input animation Returns ------- distances : (N, N) ndarray array of pairwise distances along skeletal structure from some joint N to some joint M |
16,099 | import numpy as np
import scipy.sparse as sparse
import Animation as Animation
def edges(parents):
"""
Animation structure edges
Parameters
----------
parents : (J) ndarray
parents array
Returns
-------
edges : (M, 2) ndarray
array of pairs where each
pair contains two indices of a joints
which corrisponds to an edge in the
joint structure going from parent to child.
"""
return np.array(list(zip(parents, joints(parents)))[1:])
The provided code snippet includes necessary dependencies for implementing the `incidence` function. Write a Python function `def incidence(parents)` to solve the following problem:
Incidence Matrix Parameters ---------- parents : (J) ndarray parents array Returns ------- incidence : (N, M) ndarray Matrix of N joint positions by M edges which each entry is either 1 or -1 and multiplication by the joint positions returns the an array of vectors along each edge of the structure
Here is the function:
def incidence(parents):
"""
Incidence Matrix
Parameters
----------
parents : (J) ndarray
parents array
Returns
-------
incidence : (N, M) ndarray
Matrix of N joint positions by
M edges which each entry is either
1 or -1 and multiplication by the
joint positions returns the an
array of vectors along each edge
of the structure
"""
es = edges(parents)
inc = np.zeros((len(parents)-1, len(parents))).astype(np.int)
for i, e in enumerate(es):
inc[i,e[0]] = 1
inc[i,e[1]] = -1
return inc.T | Incidence Matrix Parameters ---------- parents : (J) ndarray parents array Returns ------- incidence : (N, M) ndarray Matrix of N joint positions by M edges which each entry is either 1 or -1 and multiplication by the joint positions returns the an array of vectors along each edge of the structure |
16,100 | import re
import numpy as np
from Animation import Animation
from Quaternions import Quaternions
channelmap = {
'Xrotation' : 'x',
'Yrotation' : 'y',
'Zrotation' : 'z'
}
class Animation:
"""
Animation is a numpy-like wrapper for animation data
Animation data consists of several arrays consisting
of F frames and J joints.
The animation is specified by
rotations : (F, J) Quaternions | Joint Rotations
positions : (F, J, 3) ndarray | Joint Positions
The base pose is specified by
orients : (J) Quaternions | Joint Orientations
offsets : (J, 3) ndarray | Joint Offsets
And the skeletal structure is specified by
parents : (J) ndarray | Joint Parents
"""
def __init__(self, rotations, positions, orients, offsets, parents):
self.rotations = rotations
self.positions = positions
self.orients = orients
self.offsets = offsets
self.parents = parents
def __op__(self, op, other):
return Animation(
op(self.rotations, other.rotations),
op(self.positions, other.positions),
op(self.orients, other.orients),
op(self.offsets, other.offsets),
op(self.parents, other.parents))
def __iop__(self, op, other):
self.rotations = op(self.roations, other.rotations)
self.positions = op(self.roations, other.positions)
self.orients = op(self.orients, other.orients)
self.offsets = op(self.offsets, other.offsets)
self.parents = op(self.parents, other.parents)
return self
def __sop__(self, op):
return Animation(
op(self.rotations),
op(self.positions),
op(self.orients),
op(self.offsets),
op(self.parents))
def __add__(self, other): return self.__op__(operator.add, other)
def __sub__(self, other): return self.__op__(operator.sub, other)
def __mul__(self, other): return self.__op__(operator.mul, other)
def __div__(self, other): return self.__op__(operator.div, other)
def __abs__(self): return self.__sop__(operator.abs)
def __neg__(self): return self.__sop__(operator.neg)
def __iadd__(self, other): return self.__iop__(operator.iadd, other)
def __isub__(self, other): return self.__iop__(operator.isub, other)
def __imul__(self, other): return self.__iop__(operator.imul, other)
def __idiv__(self, other): return self.__iop__(operator.idiv, other)
def __len__(self): return len(self.rotations)
def __getitem__(self, k):
if isinstance(k, tuple):
return Animation(
self.rotations[k],
self.positions[k],
self.orients[k[1:]],
self.offsets[k[1:]],
self.parents[k[1:]])
else:
return Animation(
self.rotations[k],
self.positions[k],
self.orients,
self.offsets,
self.parents)
def __setitem__(self, k, v):
if isinstance(k, tuple):
self.rotations.__setitem__(k, v.rotations)
self.positions.__setitem__(k, v.positions)
self.orients.__setitem__(k[1:], v.orients)
self.offsets.__setitem__(k[1:], v.offsets)
self.parents.__setitem__(k[1:], v.parents)
else:
self.rotations.__setitem__(k, v.rotations)
self.positions.__setitem__(k, v.positions)
self.orients.__setitem__(k, v.orients)
self.offsets.__setitem__(k, v.offsets)
self.parents.__setitem__(k, v.parents)
def shape(self): return (self.rotations.shape[0], self.rotations.shape[1])
def copy(self): return Animation(
self.rotations.copy(), self.positions.copy(),
self.orients.copy(), self.offsets.copy(),
self.parents.copy())
def repeat(self, *args, **kw):
return Animation(
self.rotations.repeat(*args, **kw),
self.positions.repeat(*args, **kw),
self.orients, self.offsets, self.parents)
def ravel(self):
return np.hstack([
self.rotations.log().ravel(),
self.positions.ravel(),
self.orients.log().ravel(),
self.offsets.ravel()])
def unravel(clas, anim, shape, parents):
nf, nj = shape
rotations = anim[nf*nj*0:nf*nj*3]
positions = anim[nf*nj*3:nf*nj*6]
orients = anim[nf*nj*6+nj*0:nf*nj*6+nj*3]
offsets = anim[nf*nj*6+nj*3:nf*nj*6+nj*6]
return cls(
Quaternions.exp(rotations), positions,
Quaternions.exp(orients), offsets,
parents.copy())
class Quaternions:
"""
Quaternions is a wrapper around a numpy ndarray
that allows it to act as if it were an narray of
a quater data type.
Therefore addition, subtraction, multiplication,
division, negation, absolute, are all defined
in terms of quater operations such as quater
multiplication.
This allows for much neater code and many routines
which conceptually do the same thing to be written
in the same way for point data and for rotation data.
The Quaternions class has been desgined such that it
should support broadcasting and slicing in all of the
usual ways.
"""
def __init__(self, qs):
if isinstance(qs, np.ndarray):
if len(qs.shape) == 1: qs = np.array([qs])
self.qs = qs
return
if isinstance(qs, Quaternions):
self.qs = qs
return
raise TypeError('Quaternions must be constructed from iterable, numpy array, or Quaternions, not %s' % type(qs))
def __str__(self): return "Quaternions("+ str(self.qs) + ")"
def __repr__(self): return "Quaternions("+ repr(self.qs) + ")"
""" Helper Methods for Broadcasting and Data extraction """
def _broadcast(cls, sqs, oqs, scalar=False):
if isinstance(oqs, float): return sqs, oqs * np.ones(sqs.shape[:-1])
ss = np.array(sqs.shape) if not scalar else np.array(sqs.shape[:-1])
os = np.array(oqs.shape)
if len(ss) != len(os):
raise TypeError('Quaternions cannot broadcast together shapes %s and %s' % (sqs.shape, oqs.shape))
if np.all(ss == os): return sqs, oqs
if not np.all((ss == os) | (os == np.ones(len(os))) | (ss == np.ones(len(ss)))):
raise TypeError('Quaternions cannot broadcast together shapes %s and %s' % (sqs.shape, oqs.shape))
sqsn, oqsn = sqs.copy(), oqs.copy()
for a in np.where(ss == 1)[0]: sqsn = sqsn.repeat(os[a], axis=a)
for a in np.where(os == 1)[0]: oqsn = oqsn.repeat(ss[a], axis=a)
return sqsn, oqsn
""" Adding Quaterions is just Defined as Multiplication """
def __add__(self, other): return self * other
def __sub__(self, other): return self / other
""" Quaterion Multiplication """
def __mul__(self, other):
"""
Quaternion multiplication has three main methods.
When multiplying a Quaternions array by Quaternions
normal quater multiplication is performed.
When multiplying a Quaternions array by a vector
array of the same shape, where the last axis is 3,
it is assumed to be a Quaternion by 3D-Vector
multiplication and the 3D-Vectors are rotated
in space by the Quaternions.
When multipplying a Quaternions array by a scalar
or vector of different shape it is assumed to be
a Quaternions by Scalars multiplication and the
Quaternions are scaled using Slerp and the identity
quaternions.
"""
""" If Quaternions type do Quaternions * Quaternions """
if isinstance(other, Quaternions):
sqs, oqs = Quaternions._broadcast(self.qs, other.qs)
q0 = sqs[...,0]; q1 = sqs[...,1];
q2 = sqs[...,2]; q3 = sqs[...,3];
r0 = oqs[...,0]; r1 = oqs[...,1];
r2 = oqs[...,2]; r3 = oqs[...,3];
qs = np.empty(sqs.shape)
qs[...,0] = r0 * q0 - r1 * q1 - r2 * q2 - r3 * q3
qs[...,1] = r0 * q1 + r1 * q0 - r2 * q3 + r3 * q2
qs[...,2] = r0 * q2 + r1 * q3 + r2 * q0 - r3 * q1
qs[...,3] = r0 * q3 - r1 * q2 + r2 * q1 + r3 * q0
return Quaternions(qs)
""" If array type do Quaternions * Vectors """
if isinstance(other, np.ndarray) and other.shape[-1] == 3:
vs = Quaternions(np.concatenate([np.zeros(other.shape[:-1] + (1,)), other], axis=-1))
return (self * (vs * -self)).imaginaries
""" If float do Quaternions * Scalars """
if isinstance(other, np.ndarray) or isinstance(other, float):
return Quaternions.slerp(Quaternions.id_like(self), self, other)
raise TypeError('Cannot multiply/add Quaternions with type %s' % str(type(other)))
def __div__(self, other):
"""
When a Quaternion type is supplied, division is defined
as multiplication by the inverse of that Quaternion.
When a scalar or vector is supplied it is defined
as multiplicaion of one over the supplied value.
Essentially a scaling.
"""
if isinstance(other, Quaternions): return self * (-other)
if isinstance(other, np.ndarray): return self * (1.0 / other)
if isinstance(other, float): return self * (1.0 / other)
raise TypeError('Cannot divide/subtract Quaternions with type %s' + str(type(other)))
def __eq__(self, other): return self.qs == other.qs
def __ne__(self, other): return self.qs != other.qs
def __neg__(self):
""" Invert Quaternions """
return Quaternions(self.qs * np.array([[1, -1, -1, -1]]))
def __abs__(self):
""" Unify Quaternions To Single Pole """
qabs = self.normalized().copy()
top = np.sum(( qabs.qs) * np.array([1,0,0,0]), axis=-1)
bot = np.sum((-qabs.qs) * np.array([1,0,0,0]), axis=-1)
qabs.qs[top < bot] = -qabs.qs[top < bot]
return qabs
def __iter__(self): return iter(self.qs)
def __len__(self): return len(self.qs)
def __getitem__(self, k): return Quaternions(self.qs[k])
def __setitem__(self, k, v): self.qs[k] = v.qs
def lengths(self):
return np.sum(self.qs**2.0, axis=-1)**0.5
def reals(self):
return self.qs[...,0]
def imaginaries(self):
return self.qs[...,1:4]
def shape(self): return self.qs.shape[:-1]
def repeat(self, n, **kwargs):
return Quaternions(self.qs.repeat(n, **kwargs))
def normalized(self):
return Quaternions(self.qs / self.lengths[...,np.newaxis])
def log(self):
norm = abs(self.normalized())
imgs = norm.imaginaries
lens = np.sqrt(np.sum(imgs**2, axis=-1))
lens = np.arctan2(lens, norm.reals) / (lens + 1e-10)
return imgs * lens[...,np.newaxis]
def constrained(self, axis):
rl = self.reals
im = np.sum(axis * self.imaginaries, axis=-1)
t1 = -2 * np.arctan2(rl, im) + np.pi
t2 = -2 * np.arctan2(rl, im) - np.pi
top = Quaternions.exp(axis[np.newaxis] * (t1[:,np.newaxis] / 2.0))
bot = Quaternions.exp(axis[np.newaxis] * (t2[:,np.newaxis] / 2.0))
img = self.dot(top) > self.dot(bot)
ret = top.copy()
ret[ img] = top[ img]
ret[~img] = bot[~img]
return ret
def constrained_x(self): return self.constrained(np.array([1,0,0]))
def constrained_y(self): return self.constrained(np.array([0,1,0]))
def constrained_z(self): return self.constrained(np.array([0,0,1]))
def dot(self, q): return np.sum(self.qs * q.qs, axis=-1)
def copy(self): return Quaternions(np.copy(self.qs))
def reshape(self, s):
self.qs.reshape(s)
return self
def interpolate(self, ws):
return Quaternions.exp(np.average(abs(self).log, axis=0, weights=ws))
def euler(self, order='xyz'):
q = self.normalized().qs
q0 = q[...,0]
q1 = q[...,1]
q2 = q[...,2]
q3 = q[...,3]
es = np.zeros(self.shape + (3,))
# These version is wrong on converting
'''
if order == 'xyz':
es[...,0] = np.arctan2(2 * (q0 * q1 + q2 * q3), 1 - 2 * (q1 * q1 + q2 * q2))
es[...,1] = np.arcsin((2 * (q0 * q2 - q3 * q1)).clip(-1,1))
es[...,2] = np.arctan2(2 * (q0 * q3 + q1 * q2), 1 - 2 * (q2 * q2 + q3 * q3))
elif order == 'yzx':
es[...,0] = np.arctan2(2 * (q1 * q0 - q2 * q3), -q1 * q1 + q2 * q2 - q3 * q3 + q0 * q0)
es[...,1] = np.arctan2(2 * (q2 * q0 - q1 * q3), q1 * q1 - q2 * q2 - q3 * q3 + q0 * q0)
es[...,2] = np.arcsin((2 * (q1 * q2 + q3 * q0)).clip(-1,1))
else:
raise NotImplementedError('Cannot convert from ordering %s' % order)
'''
if order == 'xyz':
es[..., 2] = np.arctan2(2 * (q0 * q3 - q1 * q2), q0 * q0 + q1 * q1 - q2 * q2 - q3 * q3)
es[..., 1] = np.arcsin((2 * (q1 * q3 + q0 * q2)).clip(-1,1))
es[..., 0] = np.arctan2(2 * (q0 * q1 - q2 * q3), q0 * q0 - q1 * q1 - q2 * q2 + q3 * q3)
else:
raise NotImplementedError('Cannot convert from ordering %s' % order)
# These conversion don't appear to work correctly for Maya.
# http://bediyap.com/programming/convert-quaternion-to-euler-rotations/
'''
if order == 'xyz':
es[..., 0] = np.arctan2(2 * (q0 * q3 - q1 * q2), q0 * q0 + q1 * q1 - q2 * q2 - q3 * q3)
es[..., 1] = np.arcsin((2 * (q1 * q3 + q0 * q2)).clip(-1,1))
es[..., 2] = np.arctan2(2 * (q0 * q1 - q2 * q3), q0 * q0 - q1 * q1 - q2 * q2 + q3 * q3)
elif order == 'yzx':
es[fa + (0,)] = np.arctan2(2 * (q0 * q1 - q2 * q3), q0 * q0 - q1 * q1 + q2 * q2 - q3 * q3)
es[fa + (1,)] = np.arcsin((2 * (q1 * q2 + q0 * q3)).clip(-1,1))
es[fa + (2,)] = np.arctan2(2 * (q0 * q2 - q1 * q3), q0 * q0 + q1 * q1 - q2 * q2 - q3 * q3)
elif order == 'zxy':
es[fa + (0,)] = np.arctan2(2 * (q0 * q2 - q1 * q3), q0 * q0 - q1 * q1 - q2 * q2 + q3 * q3)
es[fa + (1,)] = np.arcsin((2 * (q0 * q1 + q2 * q3)).clip(-1,1))
es[fa + (2,)] = np.arctan2(2 * (q0 * q3 - q1 * q2), q0 * q0 - q1 * q1 + q2 * q2 - q3 * q3)
elif order == 'xzy':
es[fa + (0,)] = np.arctan2(2 * (q0 * q2 + q1 * q3), q0 * q0 + q1 * q1 - q2 * q2 - q3 * q3)
es[fa + (1,)] = np.arcsin((2 * (q0 * q3 - q1 * q2)).clip(-1,1))
es[fa + (2,)] = np.arctan2(2 * (q0 * q1 + q2 * q3), q0 * q0 - q1 * q1 + q2 * q2 - q3 * q3)
elif order == 'yxz':
es[fa + (0,)] = np.arctan2(2 * (q1 * q2 + q0 * q3), q0 * q0 - q1 * q1 + q2 * q2 - q3 * q3)
es[fa + (1,)] = np.arcsin((2 * (q0 * q1 - q2 * q3)).clip(-1,1))
es[fa + (2,)] = np.arctan2(2 * (q1 * q3 + q0 * q2), q0 * q0 - q1 * q1 - q2 * q2 + q3 * q3)
elif order == 'zyx':
es[fa + (0,)] = np.arctan2(2 * (q0 * q1 + q2 * q3), q0 * q0 - q1 * q1 - q2 * q2 + q3 * q3)
es[fa + (1,)] = np.arcsin((2 * (q0 * q2 - q1 * q3)).clip(-1,1))
es[fa + (2,)] = np.arctan2(2 * (q0 * q3 + q1 * q2), q0 * q0 + q1 * q1 - q2 * q2 - q3 * q3)
else:
raise KeyError('Unknown ordering %s' % order)
'''
# https://github.com/ehsan/ogre/blob/master/OgreMain/src/OgreMatrix3.cpp
# Use this class and convert from matrix
return es
def average(self):
if len(self.shape) == 1:
import numpy.core.umath_tests as ut
system = ut.matrix_multiply(self.qs[:,:,np.newaxis], self.qs[:,np.newaxis,:]).sum(axis=0)
w, v = np.linalg.eigh(system)
qiT_dot_qref = (self.qs[:,:,np.newaxis] * v[np.newaxis,:,:]).sum(axis=1)
return Quaternions(v[:,np.argmin((1.-qiT_dot_qref**2).sum(axis=0))])
else:
raise NotImplementedError('Cannot average multi-dimensionsal Quaternions')
def angle_axis(self):
norm = self.normalized()
s = np.sqrt(1 - (norm.reals**2.0))
s[s == 0] = 0.001
angles = 2.0 * np.arccos(norm.reals)
axis = norm.imaginaries / s[...,np.newaxis]
return angles, axis
def transforms(self):
qw = self.qs[...,0]
qx = self.qs[...,1]
qy = self.qs[...,2]
qz = self.qs[...,3]
x2 = qx + qx; y2 = qy + qy; z2 = qz + qz;
xx = qx * x2; yy = qy * y2; wx = qw * x2;
xy = qx * y2; yz = qy * z2; wy = qw * y2;
xz = qx * z2; zz = qz * z2; wz = qw * z2;
m = np.empty(self.shape + (3,3))
m[...,0,0] = 1.0 - (yy + zz)
m[...,0,1] = xy - wz
m[...,0,2] = xz + wy
m[...,1,0] = xy + wz
m[...,1,1] = 1.0 - (xx + zz)
m[...,1,2] = yz - wx
m[...,2,0] = xz - wy
m[...,2,1] = yz + wx
m[...,2,2] = 1.0 - (xx + yy)
return m
def ravel(self):
return self.qs.ravel()
def id(cls, n):
if isinstance(n, tuple):
qs = np.zeros(n + (4,))
qs[...,0] = 1.0
return Quaternions(qs)
if isinstance(n, int) or isinstance(n, long):
qs = np.zeros((n,4))
qs[:,0] = 1.0
return Quaternions(qs)
raise TypeError('Cannot Construct Quaternion from %s type' % str(type(n)))
def id_like(cls, a):
qs = np.zeros(a.shape + (4,))
qs[...,0] = 1.0
return Quaternions(qs)
def exp(cls, ws):
ts = np.sum(ws**2.0, axis=-1)**0.5
ts[ts == 0] = 0.001
ls = np.sin(ts) / ts
qs = np.empty(ws.shape[:-1] + (4,))
qs[...,0] = np.cos(ts)
qs[...,1] = ws[...,0] * ls
qs[...,2] = ws[...,1] * ls
qs[...,3] = ws[...,2] * ls
return Quaternions(qs).normalized()
def slerp(cls, q0s, q1s, a):
fst, snd = cls._broadcast(q0s.qs, q1s.qs)
fst, a = cls._broadcast(fst, a, scalar=True)
snd, a = cls._broadcast(snd, a, scalar=True)
len = np.sum(fst * snd, axis=-1)
neg = len < 0.0
len[neg] = -len[neg]
snd[neg] = -snd[neg]
amount0 = np.zeros(a.shape)
amount1 = np.zeros(a.shape)
linear = (1.0 - len) < 0.01
omegas = np.arccos(len[~linear])
sinoms = np.sin(omegas)
amount0[ linear] = 1.0 - a[linear]
amount1[ linear] = a[linear]
amount0[~linear] = np.sin((1.0 - a[~linear]) * omegas) / sinoms
amount1[~linear] = np.sin( a[~linear] * omegas) / sinoms
return Quaternions(
amount0[...,np.newaxis] * fst +
amount1[...,np.newaxis] * snd)
def between(cls, v0s, v1s):
a = np.cross(v0s, v1s)
w = np.sqrt((v0s**2).sum(axis=-1) * (v1s**2).sum(axis=-1)) + (v0s * v1s).sum(axis=-1)
return Quaternions(np.concatenate([w[...,np.newaxis], a], axis=-1)).normalized()
def from_angle_axis(cls, angles, axis):
axis = axis / (np.sqrt(np.sum(axis**2, axis=-1)) + 1e-10)[...,np.newaxis]
sines = np.sin(angles / 2.0)[...,np.newaxis]
cosines = np.cos(angles / 2.0)[...,np.newaxis]
return Quaternions(np.concatenate([cosines, axis * sines], axis=-1))
def from_euler(cls, es, order='xyz', world=False):
axis = {
'x' : np.array([1,0,0]),
'y' : np.array([0,1,0]),
'z' : np.array([0,0,1]),
}
q0s = Quaternions.from_angle_axis(es[...,0], axis[order[0]])
q1s = Quaternions.from_angle_axis(es[...,1], axis[order[1]])
q2s = Quaternions.from_angle_axis(es[...,2], axis[order[2]])
return (q2s * (q1s * q0s)) if world else (q0s * (q1s * q2s))
def from_transforms(cls, ts):
d0, d1, d2 = ts[...,0,0], ts[...,1,1], ts[...,2,2]
q0 = ( d0 + d1 + d2 + 1.0) / 4.0
q1 = ( d0 - d1 - d2 + 1.0) / 4.0
q2 = (-d0 + d1 - d2 + 1.0) / 4.0
q3 = (-d0 - d1 + d2 + 1.0) / 4.0
q0 = np.sqrt(q0.clip(0,None))
q1 = np.sqrt(q1.clip(0,None))
q2 = np.sqrt(q2.clip(0,None))
q3 = np.sqrt(q3.clip(0,None))
c0 = (q0 >= q1) & (q0 >= q2) & (q0 >= q3)
c1 = (q1 >= q0) & (q1 >= q2) & (q1 >= q3)
c2 = (q2 >= q0) & (q2 >= q1) & (q2 >= q3)
c3 = (q3 >= q0) & (q3 >= q1) & (q3 >= q2)
q1[c0] *= np.sign(ts[c0,2,1] - ts[c0,1,2])
q2[c0] *= np.sign(ts[c0,0,2] - ts[c0,2,0])
q3[c0] *= np.sign(ts[c0,1,0] - ts[c0,0,1])
q0[c1] *= np.sign(ts[c1,2,1] - ts[c1,1,2])
q2[c1] *= np.sign(ts[c1,1,0] + ts[c1,0,1])
q3[c1] *= np.sign(ts[c1,0,2] + ts[c1,2,0])
q0[c2] *= np.sign(ts[c2,0,2] - ts[c2,2,0])
q1[c2] *= np.sign(ts[c2,1,0] + ts[c2,0,1])
q3[c2] *= np.sign(ts[c2,2,1] + ts[c2,1,2])
q0[c3] *= np.sign(ts[c3,1,0] - ts[c3,0,1])
q1[c3] *= np.sign(ts[c3,2,0] + ts[c3,0,2])
q2[c3] *= np.sign(ts[c3,2,1] + ts[c3,1,2])
qs = np.empty(ts.shape[:-2] + (4,))
qs[...,0] = q0
qs[...,1] = q1
qs[...,2] = q2
qs[...,3] = q3
return cls(qs)
The provided code snippet includes necessary dependencies for implementing the `load` function. Write a Python function `def load(filename, start=None, end=None, order=None, world=False, need_quater=False)` to solve the following problem:
Reads a BVH file and constructs an animation Parameters ---------- filename: str File to be opened start : int Optional Starting Frame end : int Optional Ending Frame order : str Optional Specifier for joint order. Given as string E.G 'xyz', 'zxy' world : bool If set to true euler angles are applied together in world space rather than local space Returns ------- (animation, joint_names, frametime) Tuple of loaded animation and joint names
Here is the function:
def load(filename, start=None, end=None, order=None, world=False, need_quater=False):
"""
Reads a BVH file and constructs an animation
Parameters
----------
filename: str
File to be opened
start : int
Optional Starting Frame
end : int
Optional Ending Frame
order : str
Optional Specifier for joint order.
Given as string E.G 'xyz', 'zxy'
world : bool
If set to true euler angles are applied
together in world space rather than local
space
Returns
-------
(animation, joint_names, frametime)
Tuple of loaded animation and joint names
"""
f = open(filename, "r")
i = 0
active = -1
end_site = False
names = []
orients = Quaternions.id(0)
offsets = np.array([]).reshape((0,3))
parents = np.array([], dtype=int)
for line in f:
if "HIERARCHY" in line: continue
if "MOTION" in line: continue
""" Modified line read to handle mixamo data """
# rmatch = re.match(r"ROOT (\w+)", line)
rmatch = re.match(r"ROOT (\w+:?\w+)", line)
if rmatch:
names.append(rmatch.group(1))
offsets = np.append(offsets, np.array([[0,0,0]]), axis=0)
orients.qs = np.append(orients.qs, np.array([[1,0,0,0]]), axis=0)
parents = np.append(parents, active)
active = (len(parents)-1)
continue
if "{" in line: continue
if "}" in line:
if end_site: end_site = False
else: active = parents[active]
continue
offmatch = re.match(r"\s*OFFSET\s+([\-\d\.e]+)\s+([\-\d\.e]+)\s+([\-\d\.e]+)", line)
if offmatch:
if not end_site:
offsets[active] = np.array([list(map(float, offmatch.groups()))])
continue
chanmatch = re.match(r"\s*CHANNELS\s+(\d+)", line)
if chanmatch:
channels = int(chanmatch.group(1))
if order is None:
channelis = 0 if channels == 3 else 3
channelie = 3 if channels == 3 else 6
parts = line.split()[2+channelis:2+channelie]
if any([p not in channelmap for p in parts]):
continue
order = "".join([channelmap[p] for p in parts])
continue
""" Modified line read to handle mixamo data """
# jmatch = re.match("\s*JOINT\s+(\w+)", line)
jmatch = re.match("\s*JOINT\s+(\w+:?\w+)", line)
if jmatch:
names.append(jmatch.group(1))
offsets = np.append(offsets, np.array([[0,0,0]]), axis=0)
orients.qs = np.append(orients.qs, np.array([[1,0,0,0]]), axis=0)
parents = np.append(parents, active)
active = (len(parents)-1)
continue
if "End Site" in line:
end_site = True
continue
fmatch = re.match("\s*Frames:\s+(\d+)", line)
if fmatch:
if start and end:
fnum = (end - start)-1
else:
fnum = int(fmatch.group(1))
jnum = len(parents)
positions = offsets[np.newaxis].repeat(fnum, axis=0)
rotations = np.zeros((fnum, len(orients), 3))
continue
fmatch = re.match("\s*Frame Time:\s+([\d\.]+)", line)
if fmatch:
frametime = float(fmatch.group(1))
continue
if (start and end) and (i < start or i >= end-1):
i += 1
continue
# dmatch = line.strip().split(' ')
dmatch = line.strip().split()
if dmatch:
data_block = np.array(list(map(float, dmatch)))
N = len(parents)
fi = i - start if start else i
if channels == 3:
positions[fi,0:1] = data_block[0:3]
rotations[fi, : ] = data_block[3: ].reshape(N,3)
elif channels == 6:
data_block = data_block.reshape(N,6)
positions[fi,:] = data_block[:,0:3]
rotations[fi,:] = data_block[:,3:6]
elif channels == 9:
positions[fi,0] = data_block[0:3]
data_block = data_block[3:].reshape(N-1,9)
rotations[fi,1:] = data_block[:,3:6]
positions[fi,1:] += data_block[:,0:3] * data_block[:,6:9]
else:
raise Exception("Too many channels! %i" % channels)
i += 1
f.close()
if need_quater:
rotations = Quaternions.from_euler(np.radians(rotations), order=order, world=world)
elif order != 'xyz':
rotations = Quaternions.from_euler(np.radians(rotations), order=order, world=world)
rotations = np.degrees(rotations.euler())
return (Animation(rotations, positions, orients, offsets, parents), names, frametime) | Reads a BVH file and constructs an animation Parameters ---------- filename: str File to be opened start : int Optional Starting Frame end : int Optional Ending Frame order : str Optional Specifier for joint order. Given as string E.G 'xyz', 'zxy' world : bool If set to true euler angles are applied together in world space rather than local space Returns ------- (animation, joint_names, frametime) Tuple of loaded animation and joint names |
16,101 | import re
import numpy as np
from Animation import Animation
from Quaternions import Quaternions
channelmap_inv = {
'x': 'Xrotation',
'y': 'Yrotation',
'z': 'Zrotation',
}
ordermap = {
'x' : 0,
'y' : 1,
'z' : 2,
}
def save_joint(f, anim, names, t, i, order='zyx', positions=False):
f.write("%sJOINT %s\n" % (t, names[i]))
f.write("%s{\n" % t)
t += '\t'
f.write("%sOFFSET %f %f %f\n" % (t, anim.offsets[i,0], anim.offsets[i,1], anim.offsets[i,2]))
if positions:
f.write("%sCHANNELS 6 Xposition Yposition Zposition %s %s %s \n" % (t,
channelmap_inv[order[0]], channelmap_inv[order[1]], channelmap_inv[order[2]]))
else:
f.write("%sCHANNELS 3 %s %s %s\n" % (t,
channelmap_inv[order[0]], channelmap_inv[order[1]], channelmap_inv[order[2]]))
end_site = True
for j in range(anim.shape[1]):
if anim.parents[j] == i:
t = save_joint(f, anim, names, t, j, order=order, positions=positions)
end_site = False
if end_site:
f.write("%sEnd Site\n" % t)
f.write("%s{\n" % t)
t += '\t'
f.write("%sOFFSET %f %f %f\n" % (t, 0.0, 0.0, 0.0))
t = t[:-1]
f.write("%s}\n" % t)
t = t[:-1]
f.write("%s}\n" % t)
return t
The provided code snippet includes necessary dependencies for implementing the `save` function. Write a Python function `def save(filename, anim, names=None, frametime=1.0/24.0, order='zyx', positions=False, orients=True, mask=None, quater=False)` to solve the following problem:
Saves an Animation to file as BVH Parameters ---------- filename: str File to be saved to anim : Animation Animation to save names : [str] List of joint names order : str Optional Specifier for joint order. Given as string E.G 'xyz', 'zxy' frametime : float Optional Animation Frame time positions : bool Optional specfier to save bone positions for each frame orients : bool Multiply joint orients to the rotations before saving.
Here is the function:
def save(filename, anim, names=None, frametime=1.0/24.0, order='zyx', positions=False, orients=True, mask=None, quater=False):
"""
Saves an Animation to file as BVH
Parameters
----------
filename: str
File to be saved to
anim : Animation
Animation to save
names : [str]
List of joint names
order : str
Optional Specifier for joint order.
Given as string E.G 'xyz', 'zxy'
frametime : float
Optional Animation Frame time
positions : bool
Optional specfier to save bone
positions for each frame
orients : bool
Multiply joint orients to the rotations
before saving.
"""
if names is None:
names = ["joint_" + str(i) for i in range(len(anim.parents))]
with open(filename, 'w') as f:
t = ""
f.write("%sHIERARCHY\n" % t)
f.write("%sROOT %s\n" % (t, names[0]))
f.write("%s{\n" % t)
t += '\t'
f.write("%sOFFSET %f %f %f\n" % (t, anim.offsets[0,0], anim.offsets[0,1], anim.offsets[0,2]) )
f.write("%sCHANNELS 6 Xposition Yposition Zposition %s %s %s \n" %
(t, channelmap_inv[order[0]], channelmap_inv[order[1]], channelmap_inv[order[2]]))
for i in range(anim.shape[1]):
if anim.parents[i] == 0:
t = save_joint(f, anim, names, t, i, order=order, positions=positions)
t = t[:-1]
f.write("%s}\n" % t)
f.write("MOTION\n")
f.write("Frames: %i\n" % anim.shape[0]);
f.write("Frame Time: %f\n" % frametime);
#if orients:
# rots = np.degrees((-anim.orients[np.newaxis] * anim.rotations).euler(order=order[::-1]))
#else:
# rots = np.degrees(anim.rotations.euler(order=order[::-1]))
# rots = np.degrees(anim.rotations.euler(order=order[::-1]))
if quater:
rots = np.degrees(anim.rotations.euler(order=order[::-1]))
else:
rots = anim.rotations
poss = anim.positions
for i in range(anim.shape[0]):
for j in range(anim.shape[1]):
if positions or j == 0:
f.write("%f %f %f %f %f %f " % (
poss[i,j,0], poss[i,j,1], poss[i,j,2],
rots[i,j,ordermap[order[0]]], rots[i,j,ordermap[order[1]]], rots[i,j,ordermap[order[2]]]))
else:
if mask == None or mask[j] == 1:
f.write("%f %f %f " % (
rots[i,j,ordermap[order[0]]], rots[i,j,ordermap[order[1]]], rots[i,j,ordermap[order[2]]]))
else:
f.write("%f %f %f " % (0, 0, 0))
f.write("\n") | Saves an Animation to file as BVH Parameters ---------- filename: str File to be saved to anim : Animation Animation to save names : [str] List of joint names order : str Optional Specifier for joint order. Given as string E.G 'xyz', 'zxy' frametime : float Optional Animation Frame time positions : bool Optional specfier to save bone positions for each frame orients : bool Multiply joint orients to the rotations before saving. |
16,102 | import sys
import os
from os.path import join as pjoin
import argparse
import numpy as np
import scipy.ndimage.filters as filters
from load_skeleton import Skel
from Quaternions_old import Quaternions
from Pivots import Pivots
import BVH
from probe.anim_view import visualize
class Quaternions:
"""
Quaternions is a wrapper around a numpy ndarray
that allows it to act as if it were an narray of
a quaternion data type.
Therefore addition, subtraction, multiplication,
division, negation, absolute, are all defined
in terms of quaternion operations such as quaternion
multiplication.
This allows for much neater code and many routines
which conceptually do the same thing to be written
in the same way for point data and for rotation data.
The Quaternions class has been desgined such that it
should support broadcasting and slicing in all of the
usual ways.
"""
def __init__(self, qs):
if isinstance(qs, np.ndarray):
if len(qs.shape) == 1: qs = np.array([qs])
self.qs = qs
return
if isinstance(qs, Quaternions):
self.qs = qs.qs
return
raise TypeError('Quaternions must be constructed from iterable, numpy array, or Quaternions, not %s' % type(qs))
def __str__(self): return "Quaternions("+ str(self.qs) + ")"
def __repr__(self): return "Quaternions("+ repr(self.qs) + ")"
""" Helper Methods for Broadcasting and Data extraction """
def _broadcast(cls, sqs, oqs, scalar=False):
if isinstance(oqs, float): return sqs, oqs * np.ones(sqs.shape[:-1])
ss = np.array(sqs.shape) if not scalar else np.array(sqs.shape[:-1])
os = np.array(oqs.shape)
if len(ss) != len(os):
raise TypeError('Quaternions cannot broadcast together shapes %s and %s' % (sqs.shape, oqs.shape))
if np.all(ss == os): return sqs, oqs
if not np.all((ss == os) | (os == np.ones(len(os))) | (ss == np.ones(len(ss)))):
raise TypeError('Quaternions cannot broadcast together shapes %s and %s' % (sqs.shape, oqs.shape))
sqsn, oqsn = sqs.copy(), oqs.copy()
for a in np.where(ss == 1)[0]: sqsn = sqsn.repeat(os[a], axis=a)
for a in np.where(os == 1)[0]: oqsn = oqsn.repeat(ss[a], axis=a)
return sqsn, oqsn
""" Adding Quaterions is just Defined as Multiplication """
def __add__(self, other): return self * other
def __sub__(self, other): return self / other
""" Quaterion Multiplication """
def __mul__(self, other):
"""
Quaternion multiplication has three main methods.
When multiplying a Quaternions array by Quaternions
normal quaternion multiplication is performed.
When multiplying a Quaternions array by a vector
array of the same shape, where the last axis is 3,
it is assumed to be a Quaternion by 3D-Vector
multiplication and the 3D-Vectors are rotated
in space by the Quaternions.
When multipplying a Quaternions array by a scalar
or vector of different shape it is assumed to be
a Quaternions by Scalars multiplication and the
Quaternions are scaled using Slerp and the identity
quaternions.
"""
""" If Quaternions type do Quaternions * Quaternions """
if isinstance(other, Quaternions):
sqs, oqs = Quaternions._broadcast(self.qs, other.qs)
q0 = sqs[...,0]; q1 = sqs[...,1];
q2 = sqs[...,2]; q3 = sqs[...,3];
r0 = oqs[...,0]; r1 = oqs[...,1];
r2 = oqs[...,2]; r3 = oqs[...,3];
qs = np.empty(sqs.shape)
qs[...,0] = r0 * q0 - r1 * q1 - r2 * q2 - r3 * q3
qs[...,1] = r0 * q1 + r1 * q0 - r2 * q3 + r3 * q2
qs[...,2] = r0 * q2 + r1 * q3 + r2 * q0 - r3 * q1
qs[...,3] = r0 * q3 - r1 * q2 + r2 * q1 + r3 * q0
return Quaternions(qs)
""" If array type do Quaternions * Vectors """
if isinstance(other, np.ndarray) and other.shape[-1] == 3:
vs = Quaternions(np.concatenate([np.zeros(other.shape[:-1] + (1,)), other], axis=-1))
return (self * (vs * -self)).imaginaries
""" If float do Quaternions * Scalars """
if isinstance(other, np.ndarray) or isinstance(other, float):
return Quaternions.slerp(Quaternions.id_like(self), self, other)
raise TypeError('Cannot multiply/add Quaternions with type %s' % str(type(other)))
def __div__(self, other):
"""
When a Quaternion type is supplied, division is defined
as multiplication by the inverse of that Quaternion.
When a scalar or vector is supplied it is defined
as multiplicaion of one over the supplied value.
Essentially a scaling.
"""
if isinstance(other, Quaternions): return self * (-other)
if isinstance(other, np.ndarray): return self * (1.0 / other)
if isinstance(other, float): return self * (1.0 / other)
raise TypeError('Cannot divide/subtract Quaternions with type %s' + str(type(other)))
def __eq__(self, other): return self.qs == other.qs
def __ne__(self, other): return self.qs != other.qs
def __neg__(self):
""" Invert Quaternions """
return Quaternions(self.qs * np.array([[1, -1, -1, -1]]))
def __abs__(self):
""" Unify Quaternions To Single Pole """
qabs = self.normalized().copy()
top = np.sum(( qabs.qs) * np.array([1,0,0,0]), axis=-1)
bot = np.sum((-qabs.qs) * np.array([1,0,0,0]), axis=-1)
qabs.qs[top < bot] = -qabs.qs[top < bot]
return qabs
def __iter__(self): return iter(self.qs)
def __len__(self): return len(self.qs)
def __getitem__(self, k): return Quaternions(self.qs[k])
def __setitem__(self, k, v): self.qs[k] = v.qs
def lengths(self):
return np.sum(self.qs**2.0, axis=-1)**0.5
def reals(self):
return self.qs[...,0]
def imaginaries(self):
return self.qs[...,1:4]
def shape(self): return self.qs.shape[:-1]
def repeat(self, n, **kwargs):
return Quaternions(self.qs.repeat(n, **kwargs))
def normalized(self):
return Quaternions(self.qs / self.lengths[...,np.newaxis])
def log(self):
norm = abs(self.normalized())
imgs = norm.imaginaries
lens = np.sqrt(np.sum(imgs**2, axis=-1))
lens = np.arctan2(lens, norm.reals) / (lens + 1e-10)
return imgs * lens[...,np.newaxis]
def constrained(self, axis):
rl = self.reals
im = np.sum(axis * self.imaginaries, axis=-1)
t1 = -2 * np.arctan2(rl, im) + np.pi
t2 = -2 * np.arctan2(rl, im) - np.pi
top = Quaternions.exp(axis[np.newaxis] * (t1[:,np.newaxis] / 2.0))
bot = Quaternions.exp(axis[np.newaxis] * (t2[:,np.newaxis] / 2.0))
img = self.dot(top) > self.dot(bot)
ret = top.copy()
ret[ img] = top[ img]
ret[~img] = bot[~img]
return ret
def constrained_x(self): return self.constrained(np.array([1,0,0]))
def constrained_y(self): return self.constrained(np.array([0,1,0]))
def constrained_z(self): return self.constrained(np.array([0,0,1]))
def dot(self, q): return np.sum(self.qs * q.qs, axis=-1)
def copy(self): return Quaternions(np.copy(self.qs))
def reshape(self, s):
self.qs.reshape(s)
return self
def interpolate(self, ws):
return Quaternions.exp(np.average(abs(self).log, axis=0, weights=ws))
def euler(self, order='xyz'):
q = self.normalized().qs
q0 = q[...,0]
q1 = q[...,1]
q2 = q[...,2]
q3 = q[...,3]
es = np.zeros(self.shape + (3,))
if order == 'xyz':
es[...,0] = np.arctan2(2 * (q0 * q1 + q2 * q3), 1 - 2 * (q1 * q1 + q2 * q2))
es[...,1] = np.arcsin((2 * (q0 * q2 - q3 * q1)).clip(-1,1))
es[...,2] = np.arctan2(2 * (q0 * q3 + q1 * q2), 1 - 2 * (q2 * q2 + q3 * q3))
elif order == 'yzx':
es[...,0] = np.arctan2(2 * (q1 * q0 - q2 * q3), -q1 * q1 + q2 * q2 - q3 * q3 + q0 * q0)
es[...,1] = np.arctan2(2 * (q2 * q0 - q1 * q3), q1 * q1 - q2 * q2 - q3 * q3 + q0 * q0)
es[...,2] = np.arcsin((2 * (q1 * q2 + q3 * q0)).clip(-1,1))
else:
raise NotImplementedError('Cannot convert from ordering %s' % order)
"""
# These conversion don't appear to work correctly for Maya.
# http://bediyap.com/programming/convert-quaternion-to-euler-rotations/
if order == 'xyz':
es[fa + (0,)] = np.arctan2(2 * (q0 * q3 - q1 * q2), q0 * q0 + q1 * q1 - q2 * q2 - q3 * q3)
es[fa + (1,)] = np.arcsin((2 * (q1 * q3 + q0 * q2)).clip(-1,1))
es[fa + (2,)] = np.arctan2(2 * (q0 * q1 - q2 * q3), q0 * q0 - q1 * q1 - q2 * q2 + q3 * q3)
elif order == 'yzx':
es[fa + (0,)] = np.arctan2(2 * (q0 * q1 - q2 * q3), q0 * q0 - q1 * q1 + q2 * q2 - q3 * q3)
es[fa + (1,)] = np.arcsin((2 * (q1 * q2 + q0 * q3)).clip(-1,1))
es[fa + (2,)] = np.arctan2(2 * (q0 * q2 - q1 * q3), q0 * q0 + q1 * q1 - q2 * q2 - q3 * q3)
elif order == 'zxy':
es[fa + (0,)] = np.arctan2(2 * (q0 * q2 - q1 * q3), q0 * q0 - q1 * q1 - q2 * q2 + q3 * q3)
es[fa + (1,)] = np.arcsin((2 * (q0 * q1 + q2 * q3)).clip(-1,1))
es[fa + (2,)] = np.arctan2(2 * (q0 * q3 - q1 * q2), q0 * q0 - q1 * q1 + q2 * q2 - q3 * q3)
elif order == 'xzy':
es[fa + (0,)] = np.arctan2(2 * (q0 * q2 + q1 * q3), q0 * q0 + q1 * q1 - q2 * q2 - q3 * q3)
es[fa + (1,)] = np.arcsin((2 * (q0 * q3 - q1 * q2)).clip(-1,1))
es[fa + (2,)] = np.arctan2(2 * (q0 * q1 + q2 * q3), q0 * q0 - q1 * q1 + q2 * q2 - q3 * q3)
elif order == 'yxz':
es[fa + (0,)] = np.arctan2(2 * (q1 * q2 + q0 * q3), q0 * q0 - q1 * q1 + q2 * q2 - q3 * q3)
es[fa + (1,)] = np.arcsin((2 * (q0 * q1 - q2 * q3)).clip(-1,1))
es[fa + (2,)] = np.arctan2(2 * (q1 * q3 + q0 * q2), q0 * q0 - q1 * q1 - q2 * q2 + q3 * q3)
elif order == 'zyx':
es[fa + (0,)] = np.arctan2(2 * (q0 * q1 + q2 * q3), q0 * q0 - q1 * q1 - q2 * q2 + q3 * q3)
es[fa + (1,)] = np.arcsin((2 * (q0 * q2 - q1 * q3)).clip(-1,1))
es[fa + (2,)] = np.arctan2(2 * (q0 * q3 + q1 * q2), q0 * q0 + q1 * q1 - q2 * q2 - q3 * q3)
else:
raise KeyError('Unknown ordering %s' % order)
"""
# https://github.com/ehsan/ogre/blob/master/OgreMain/src/OgreMatrix3.cpp
# Use this class and convert from matrix
return es
def average(self):
if len(self.shape) == 1:
import numpy.core.umath_tests as ut
system = ut.matrix_multiply(self.qs[:,:,np.newaxis], self.qs[:,np.newaxis,:]).sum(axis=0)
w, v = np.linalg.eigh(system)
qiT_dot_qref = (self.qs[:,:,np.newaxis] * v[np.newaxis,:,:]).sum(axis=1)
return Quaternions(v[:,np.argmin((1.-qiT_dot_qref**2).sum(axis=0))])
else:
raise NotImplementedError('Cannot average multi-dimensionsal Quaternions')
def angle_axis(self):
norm = self.normalized()
s = np.sqrt(1 - (norm.reals**2.0))
s[s == 0] = 0.001
angles = 2.0 * np.arccos(norm.reals)
axis = norm.imaginaries / s[...,np.newaxis]
return angles, axis
def transforms(self):
qw = self.qs[...,0]
qx = self.qs[...,1]
qy = self.qs[...,2]
qz = self.qs[...,3]
x2 = qx + qx; y2 = qy + qy; z2 = qz + qz;
xx = qx * x2; yy = qy * y2; wx = qw * x2;
xy = qx * y2; yz = qy * z2; wy = qw * y2;
xz = qx * z2; zz = qz * z2; wz = qw * z2;
m = np.empty(self.shape + (3,3))
m[...,0,0] = 1.0 - (yy + zz)
m[...,0,1] = xy - wz
m[...,0,2] = xz + wy
m[...,1,0] = xy + wz
m[...,1,1] = 1.0 - (xx + zz)
m[...,1,2] = yz - wx
m[...,2,0] = xz - wy
m[...,2,1] = yz + wx
m[...,2,2] = 1.0 - (xx + yy)
return m
def ravel(self):
return self.qs.ravel()
def id(cls, n):
if isinstance(n, tuple):
qs = np.zeros(n + (4,))
qs[...,0] = 1.0
return Quaternions(qs)
if isinstance(n, int) or isinstance(n, long):
qs = np.zeros((n,4))
qs[:,0] = 1.0
return Quaternions(qs)
raise TypeError('Cannot Construct Quaternion from %s type' % str(type(n)))
def id_like(cls, a):
qs = np.zeros(a.shape + (4,))
qs[...,0] = 1.0
return Quaternions(qs)
def exp(cls, ws):
ts = np.sum(ws**2.0, axis=-1)**0.5
ts[ts == 0] = 0.001
ls = np.sin(ts) / ts
qs = np.empty(ws.shape[:-1] + (4,))
qs[...,0] = np.cos(ts)
qs[...,1] = ws[...,0] * ls
qs[...,2] = ws[...,1] * ls
qs[...,3] = ws[...,2] * ls
return Quaternions(qs).normalized()
def slerp(cls, q0s, q1s, a):
fst, snd = cls._broadcast(q0s.qs, q1s.qs)
fst, a = cls._broadcast(fst, a, scalar=True)
snd, a = cls._broadcast(snd, a, scalar=True)
len = np.sum(fst * snd, axis=-1)
neg = len < 0.0
len[neg] = -len[neg]
snd[neg] = -snd[neg]
amount0 = np.zeros(a.shape)
amount1 = np.zeros(a.shape)
linear = (1.0 - len) < 0.01
omegas = np.arccos(len[~linear])
sinoms = np.sin(omegas)
amount0[ linear] = 1.0 - a[linear]
amount1[ linear] = a[linear]
amount0[~linear] = np.sin((1.0 - a[~linear]) * omegas) / sinoms
amount1[~linear] = np.sin( a[~linear] * omegas) / sinoms
return Quaternions(
amount0[...,np.newaxis] * fst +
amount1[...,np.newaxis] * snd)
def between(cls, v0s, v1s):
a = np.cross(v0s, v1s)
w = np.sqrt((v0s**2).sum(axis=-1) * (v1s**2).sum(axis=-1)) + (v0s * v1s).sum(axis=-1)
return Quaternions(np.concatenate([w[...,np.newaxis], a], axis=-1)).normalized()
def from_angle_axis(cls, angles, axis):
axis = axis / (np.sqrt(np.sum(axis**2, axis=-1)) + 1e-10)[...,np.newaxis]
sines = np.sin(angles / 2.0)[...,np.newaxis]
cosines = np.cos(angles / 2.0)[...,np.newaxis]
return Quaternions(np.concatenate([cosines, axis * sines], axis=-1))
def from_euler(cls, es, order='xyz', world=False):
axis = {
'x' : np.array([1,0,0]),
'y' : np.array([0,1,0]),
'z' : np.array([0,0,1]),
}
q0s = Quaternions.from_angle_axis(es[...,0], axis[order[0]])
q1s = Quaternions.from_angle_axis(es[...,1], axis[order[1]])
q2s = Quaternions.from_angle_axis(es[...,2], axis[order[2]])
return (q2s * (q1s * q0s)) if world else (q0s * (q1s * q2s))
def from_transforms(cls, ts):
d0, d1, d2 = ts[...,0,0], ts[...,1,1], ts[...,2,2]
q0 = ( d0 + d1 + d2 + 1.0) / 4.0
q1 = ( d0 - d1 - d2 + 1.0) / 4.0
q2 = (-d0 + d1 - d2 + 1.0) / 4.0
q3 = (-d0 - d1 + d2 + 1.0) / 4.0
q0 = np.sqrt(q0.clip(0,None))
q1 = np.sqrt(q1.clip(0,None))
q2 = np.sqrt(q2.clip(0,None))
q3 = np.sqrt(q3.clip(0,None))
c0 = (q0 >= q1) & (q0 >= q2) & (q0 >= q3)
c1 = (q1 >= q0) & (q1 >= q2) & (q1 >= q3)
c2 = (q2 >= q0) & (q2 >= q1) & (q2 >= q3)
c3 = (q3 >= q0) & (q3 >= q1) & (q3 >= q2)
q1[c0] *= np.sign(ts[c0,2,1] - ts[c0,1,2])
q2[c0] *= np.sign(ts[c0,0,2] - ts[c0,2,0])
q3[c0] *= np.sign(ts[c0,1,0] - ts[c0,0,1])
q0[c1] *= np.sign(ts[c1,2,1] - ts[c1,1,2])
q2[c1] *= np.sign(ts[c1,1,0] + ts[c1,0,1])
q3[c1] *= np.sign(ts[c1,0,2] + ts[c1,2,0])
q0[c2] *= np.sign(ts[c2,0,2] - ts[c2,2,0])
q1[c2] *= np.sign(ts[c2,1,0] + ts[c2,0,1])
q3[c2] *= np.sign(ts[c2,2,1] + ts[c2,1,2])
q0[c3] *= np.sign(ts[c3,1,0] - ts[c3,0,1])
q1[c3] *= np.sign(ts[c3,2,0] + ts[c3,0,2])
q2[c3] *= np.sign(ts[c3,2,1] + ts[c3,1,2])
qs = np.empty(ts.shape[:-2] + (4,))
qs[...,0] = q0
qs[...,1] = q1
qs[...,2] = q2
qs[...,3] = q3
return cls(qs)
The provided code snippet includes necessary dependencies for implementing the `forward_rotations` function. Write a Python function `def forward_rotations(skel, rotations, rtpos=None, trim=True)` to solve the following problem:
input: rotations [T, J, 4], rtpos [T, 3] output: positions [T, J, 3]
Here is the function:
def forward_rotations(skel, rotations, rtpos=None, trim=True):
"""
input: rotations [T, J, 4], rtpos [T, 3]
output: positions [T, J, 3]
"""
transforms = Quaternions(rotations).transforms() # [..., J, 3, 3]
glb = np.zeros(rotations.shape[:-1] + (3,)) # [T, J, 3]
if rtpos is not None:
glb[..., 0, :] = rtpos
for i, pi in enumerate(skel.topology):
if pi == -1:
continue
glb[..., i, :] = np.matmul(transforms[..., pi, :, :],
skel.offset[i])
glb[..., i, :] += glb[..., pi, :]
transforms[..., i, :, :] = np.matmul(transforms[..., pi, :, :],
transforms[..., i, :, :])
if trim:
glb = glb[..., skel.chosen_joints, :]
return glb | input: rotations [T, J, 4], rtpos [T, 3] output: positions [T, J, 3] |
16,103 | import sys
import os
from os.path import join as pjoin
import argparse
import numpy as np
import scipy.ndimage.filters as filters
from load_skeleton import Skel
from Quaternions_old import Quaternions
from Pivots import Pivots
import BVH
from probe.anim_view import visualize
def get_local3d(local_x, view_angle=None):
"""
Get the unit vectors for local rectangular coordinates for given 3D motion
:param local_x: local x axis, (B *) [*, 0, *]
:return: numpy array. unit vectors for local rectangular coordinates's , shape (3, 3).
"""
local_y = np.zeros_like(local_x) # [(B,) 3]
local_y[..., :] = np.array([0, 1, 0])
local_z = np.cross(local_x, local_y)
local_z = local_z / np.linalg.norm(local_z, axis=-1, keepdims=True)
local = np.stack([local_x, local_y, local_z], axis=-2)
if view_angle is not None:
local = rotate_coordinates(local, view_angle)
return local
The provided code snippet includes necessary dependencies for implementing the `motion_projection` function. Write a Python function `def motion_projection(motion, local_x, view_angle=None)` to solve the following problem:
motion: motion in relative joint positions & global root positions [(B,) T, (J - 1) + 1, 3] local_x: [(B,) 3], local x-axis view_angle: [3], the angles to rotate output: motion_proj [(B,) J * 2, T]
Here is the function:
def motion_projection(motion, local_x, view_angle=None):
"""
motion: motion in relative joint positions & global root positions
[(B,) T, (J - 1) + 1, 3]
local_x: [(B,) 3], local x-axis
view_angle: [3], the angles to rotate
output: motion_proj [(B,) J * 2, T]
"""
local = get_local3d(local_x, view_angle) # [(B,) 3, 3]
T = motion.shape[-1]
# proj on xy-plane
# motion_proj = (local[[0, 1], :] @ motion) this used to be [2, 3] @ [J, 3, T]
# but T doesn't matter here ... what we care is the "3", using [T, J, 3, 1] would also be OK
motion = motion[..., np.newaxis] # [(B,) T, J, 3, 1]
motion_proj = local[..., np.newaxis, np.newaxis, [0, 1], :] @ motion # [(B,), 1, 1, 2, 3] @ [(B,), T, J, 3, 1] => [(B,), T, J, 2, 1]
motion_proj = motion_proj.reshape(motion_proj.shape[:-3] + (-1, )) # [(B,) T, -1]
motion_proj = motion_proj.swapaxes(-1, -2) # [(B,) J * 2, T]
return motion_proj | motion: motion in relative joint positions & global root positions [(B,) T, (J - 1) + 1, 3] local_x: [(B,) 3], local x-axis view_angle: [3], the angles to rotate output: motion_proj [(B,) J * 2, T] |
16,104 | import sys
import os
from os.path import join as pjoin
import argparse
import numpy as np
import scipy.ndimage.filters as filters
from load_skeleton import Skel
from Quaternions_old import Quaternions
from Pivots import Pivots
import BVH
from probe.anim_view import visualize
The provided code snippet includes necessary dependencies for implementing the `foot_contact_from_positions` function. Write a Python function `def foot_contact_from_positions(positions, fid_l=(3, 4), fid_r=(7, 8))` to solve the following problem:
positions: [T, J, 3], trimmed (only "chosen_joints") fid_l, fid_r: indices of feet joints (in "chosen_joints")
Here is the function:
def foot_contact_from_positions(positions, fid_l=(3, 4), fid_r=(7, 8)):
"""
positions: [T, J, 3], trimmed (only "chosen_joints")
fid_l, fid_r: indices of feet joints (in "chosen_joints")
"""
fid_l, fid_r = np.array(fid_l), np.array(fid_r)
velfactor = np.array([0.05, 0.05])
feet_contact = []
for fid_index in [fid_l, fid_r]:
foot_vel = (positions[1:, fid_index] - positions[:-1, fid_index]) ** 2 # [T - 1, 2, 3]
foot_vel = np.sum(foot_vel, axis=-1) # [T - 1, 2]
foot_contact = (foot_vel < velfactor).astype(np.float)
feet_contact.append(foot_contact)
feet_contact = np.concatenate(feet_contact, axis=-1) # [T - 1, 4]
feet_contact = np.concatenate((feet_contact[0:1].copy(), feet_contact), axis=0)
return feet_contact # [T, 4] | positions: [T, J, 3], trimmed (only "chosen_joints") fid_l, fid_r: indices of feet joints (in "chosen_joints") |
16,105 | import sys
import os
from os.path import join as pjoin
import argparse
import numpy as np
import scipy.ndimage.filters as filters
from load_skeleton import Skel
from Quaternions_old import Quaternions
from Pivots import Pivots
import BVH
from probe.anim_view import visualize
def across_from_glb(positions, hips=(2, 6), sdrs=(14, 18)):
"""
positions: positions [T, J, 3], trimmed (only "chosen_joints")
hips, sdrs: left/right hip joints, left/right shoulder joints
output: local x-axis for each frame [T, 3]
"""
across = positions[..., hips[0], :] - positions[..., hips[1], :] + \
positions[..., sdrs[0], :] - positions[..., sdrs[1], :] # [T, 3]
across = across / np.sqrt((across ** 2).sum(axis=-1))[..., np.newaxis]
return across
class Quaternions:
"""
Quaternions is a wrapper around a numpy ndarray
that allows it to act as if it were an narray of
a quaternion data type.
Therefore addition, subtraction, multiplication,
division, negation, absolute, are all defined
in terms of quaternion operations such as quaternion
multiplication.
This allows for much neater code and many routines
which conceptually do the same thing to be written
in the same way for point data and for rotation data.
The Quaternions class has been desgined such that it
should support broadcasting and slicing in all of the
usual ways.
"""
def __init__(self, qs):
if isinstance(qs, np.ndarray):
if len(qs.shape) == 1: qs = np.array([qs])
self.qs = qs
return
if isinstance(qs, Quaternions):
self.qs = qs.qs
return
raise TypeError('Quaternions must be constructed from iterable, numpy array, or Quaternions, not %s' % type(qs))
def __str__(self): return "Quaternions("+ str(self.qs) + ")"
def __repr__(self): return "Quaternions("+ repr(self.qs) + ")"
""" Helper Methods for Broadcasting and Data extraction """
def _broadcast(cls, sqs, oqs, scalar=False):
if isinstance(oqs, float): return sqs, oqs * np.ones(sqs.shape[:-1])
ss = np.array(sqs.shape) if not scalar else np.array(sqs.shape[:-1])
os = np.array(oqs.shape)
if len(ss) != len(os):
raise TypeError('Quaternions cannot broadcast together shapes %s and %s' % (sqs.shape, oqs.shape))
if np.all(ss == os): return sqs, oqs
if not np.all((ss == os) | (os == np.ones(len(os))) | (ss == np.ones(len(ss)))):
raise TypeError('Quaternions cannot broadcast together shapes %s and %s' % (sqs.shape, oqs.shape))
sqsn, oqsn = sqs.copy(), oqs.copy()
for a in np.where(ss == 1)[0]: sqsn = sqsn.repeat(os[a], axis=a)
for a in np.where(os == 1)[0]: oqsn = oqsn.repeat(ss[a], axis=a)
return sqsn, oqsn
""" Adding Quaterions is just Defined as Multiplication """
def __add__(self, other): return self * other
def __sub__(self, other): return self / other
""" Quaterion Multiplication """
def __mul__(self, other):
"""
Quaternion multiplication has three main methods.
When multiplying a Quaternions array by Quaternions
normal quaternion multiplication is performed.
When multiplying a Quaternions array by a vector
array of the same shape, where the last axis is 3,
it is assumed to be a Quaternion by 3D-Vector
multiplication and the 3D-Vectors are rotated
in space by the Quaternions.
When multipplying a Quaternions array by a scalar
or vector of different shape it is assumed to be
a Quaternions by Scalars multiplication and the
Quaternions are scaled using Slerp and the identity
quaternions.
"""
""" If Quaternions type do Quaternions * Quaternions """
if isinstance(other, Quaternions):
sqs, oqs = Quaternions._broadcast(self.qs, other.qs)
q0 = sqs[...,0]; q1 = sqs[...,1];
q2 = sqs[...,2]; q3 = sqs[...,3];
r0 = oqs[...,0]; r1 = oqs[...,1];
r2 = oqs[...,2]; r3 = oqs[...,3];
qs = np.empty(sqs.shape)
qs[...,0] = r0 * q0 - r1 * q1 - r2 * q2 - r3 * q3
qs[...,1] = r0 * q1 + r1 * q0 - r2 * q3 + r3 * q2
qs[...,2] = r0 * q2 + r1 * q3 + r2 * q0 - r3 * q1
qs[...,3] = r0 * q3 - r1 * q2 + r2 * q1 + r3 * q0
return Quaternions(qs)
""" If array type do Quaternions * Vectors """
if isinstance(other, np.ndarray) and other.shape[-1] == 3:
vs = Quaternions(np.concatenate([np.zeros(other.shape[:-1] + (1,)), other], axis=-1))
return (self * (vs * -self)).imaginaries
""" If float do Quaternions * Scalars """
if isinstance(other, np.ndarray) or isinstance(other, float):
return Quaternions.slerp(Quaternions.id_like(self), self, other)
raise TypeError('Cannot multiply/add Quaternions with type %s' % str(type(other)))
def __div__(self, other):
"""
When a Quaternion type is supplied, division is defined
as multiplication by the inverse of that Quaternion.
When a scalar or vector is supplied it is defined
as multiplicaion of one over the supplied value.
Essentially a scaling.
"""
if isinstance(other, Quaternions): return self * (-other)
if isinstance(other, np.ndarray): return self * (1.0 / other)
if isinstance(other, float): return self * (1.0 / other)
raise TypeError('Cannot divide/subtract Quaternions with type %s' + str(type(other)))
def __eq__(self, other): return self.qs == other.qs
def __ne__(self, other): return self.qs != other.qs
def __neg__(self):
""" Invert Quaternions """
return Quaternions(self.qs * np.array([[1, -1, -1, -1]]))
def __abs__(self):
""" Unify Quaternions To Single Pole """
qabs = self.normalized().copy()
top = np.sum(( qabs.qs) * np.array([1,0,0,0]), axis=-1)
bot = np.sum((-qabs.qs) * np.array([1,0,0,0]), axis=-1)
qabs.qs[top < bot] = -qabs.qs[top < bot]
return qabs
def __iter__(self): return iter(self.qs)
def __len__(self): return len(self.qs)
def __getitem__(self, k): return Quaternions(self.qs[k])
def __setitem__(self, k, v): self.qs[k] = v.qs
def lengths(self):
return np.sum(self.qs**2.0, axis=-1)**0.5
def reals(self):
return self.qs[...,0]
def imaginaries(self):
return self.qs[...,1:4]
def shape(self): return self.qs.shape[:-1]
def repeat(self, n, **kwargs):
return Quaternions(self.qs.repeat(n, **kwargs))
def normalized(self):
return Quaternions(self.qs / self.lengths[...,np.newaxis])
def log(self):
norm = abs(self.normalized())
imgs = norm.imaginaries
lens = np.sqrt(np.sum(imgs**2, axis=-1))
lens = np.arctan2(lens, norm.reals) / (lens + 1e-10)
return imgs * lens[...,np.newaxis]
def constrained(self, axis):
rl = self.reals
im = np.sum(axis * self.imaginaries, axis=-1)
t1 = -2 * np.arctan2(rl, im) + np.pi
t2 = -2 * np.arctan2(rl, im) - np.pi
top = Quaternions.exp(axis[np.newaxis] * (t1[:,np.newaxis] / 2.0))
bot = Quaternions.exp(axis[np.newaxis] * (t2[:,np.newaxis] / 2.0))
img = self.dot(top) > self.dot(bot)
ret = top.copy()
ret[ img] = top[ img]
ret[~img] = bot[~img]
return ret
def constrained_x(self): return self.constrained(np.array([1,0,0]))
def constrained_y(self): return self.constrained(np.array([0,1,0]))
def constrained_z(self): return self.constrained(np.array([0,0,1]))
def dot(self, q): return np.sum(self.qs * q.qs, axis=-1)
def copy(self): return Quaternions(np.copy(self.qs))
def reshape(self, s):
self.qs.reshape(s)
return self
def interpolate(self, ws):
return Quaternions.exp(np.average(abs(self).log, axis=0, weights=ws))
def euler(self, order='xyz'):
q = self.normalized().qs
q0 = q[...,0]
q1 = q[...,1]
q2 = q[...,2]
q3 = q[...,3]
es = np.zeros(self.shape + (3,))
if order == 'xyz':
es[...,0] = np.arctan2(2 * (q0 * q1 + q2 * q3), 1 - 2 * (q1 * q1 + q2 * q2))
es[...,1] = np.arcsin((2 * (q0 * q2 - q3 * q1)).clip(-1,1))
es[...,2] = np.arctan2(2 * (q0 * q3 + q1 * q2), 1 - 2 * (q2 * q2 + q3 * q3))
elif order == 'yzx':
es[...,0] = np.arctan2(2 * (q1 * q0 - q2 * q3), -q1 * q1 + q2 * q2 - q3 * q3 + q0 * q0)
es[...,1] = np.arctan2(2 * (q2 * q0 - q1 * q3), q1 * q1 - q2 * q2 - q3 * q3 + q0 * q0)
es[...,2] = np.arcsin((2 * (q1 * q2 + q3 * q0)).clip(-1,1))
else:
raise NotImplementedError('Cannot convert from ordering %s' % order)
"""
# These conversion don't appear to work correctly for Maya.
# http://bediyap.com/programming/convert-quaternion-to-euler-rotations/
if order == 'xyz':
es[fa + (0,)] = np.arctan2(2 * (q0 * q3 - q1 * q2), q0 * q0 + q1 * q1 - q2 * q2 - q3 * q3)
es[fa + (1,)] = np.arcsin((2 * (q1 * q3 + q0 * q2)).clip(-1,1))
es[fa + (2,)] = np.arctan2(2 * (q0 * q1 - q2 * q3), q0 * q0 - q1 * q1 - q2 * q2 + q3 * q3)
elif order == 'yzx':
es[fa + (0,)] = np.arctan2(2 * (q0 * q1 - q2 * q3), q0 * q0 - q1 * q1 + q2 * q2 - q3 * q3)
es[fa + (1,)] = np.arcsin((2 * (q1 * q2 + q0 * q3)).clip(-1,1))
es[fa + (2,)] = np.arctan2(2 * (q0 * q2 - q1 * q3), q0 * q0 + q1 * q1 - q2 * q2 - q3 * q3)
elif order == 'zxy':
es[fa + (0,)] = np.arctan2(2 * (q0 * q2 - q1 * q3), q0 * q0 - q1 * q1 - q2 * q2 + q3 * q3)
es[fa + (1,)] = np.arcsin((2 * (q0 * q1 + q2 * q3)).clip(-1,1))
es[fa + (2,)] = np.arctan2(2 * (q0 * q3 - q1 * q2), q0 * q0 - q1 * q1 + q2 * q2 - q3 * q3)
elif order == 'xzy':
es[fa + (0,)] = np.arctan2(2 * (q0 * q2 + q1 * q3), q0 * q0 + q1 * q1 - q2 * q2 - q3 * q3)
es[fa + (1,)] = np.arcsin((2 * (q0 * q3 - q1 * q2)).clip(-1,1))
es[fa + (2,)] = np.arctan2(2 * (q0 * q1 + q2 * q3), q0 * q0 - q1 * q1 + q2 * q2 - q3 * q3)
elif order == 'yxz':
es[fa + (0,)] = np.arctan2(2 * (q1 * q2 + q0 * q3), q0 * q0 - q1 * q1 + q2 * q2 - q3 * q3)
es[fa + (1,)] = np.arcsin((2 * (q0 * q1 - q2 * q3)).clip(-1,1))
es[fa + (2,)] = np.arctan2(2 * (q1 * q3 + q0 * q2), q0 * q0 - q1 * q1 - q2 * q2 + q3 * q3)
elif order == 'zyx':
es[fa + (0,)] = np.arctan2(2 * (q0 * q1 + q2 * q3), q0 * q0 - q1 * q1 - q2 * q2 + q3 * q3)
es[fa + (1,)] = np.arcsin((2 * (q0 * q2 - q1 * q3)).clip(-1,1))
es[fa + (2,)] = np.arctan2(2 * (q0 * q3 + q1 * q2), q0 * q0 + q1 * q1 - q2 * q2 - q3 * q3)
else:
raise KeyError('Unknown ordering %s' % order)
"""
# https://github.com/ehsan/ogre/blob/master/OgreMain/src/OgreMatrix3.cpp
# Use this class and convert from matrix
return es
def average(self):
if len(self.shape) == 1:
import numpy.core.umath_tests as ut
system = ut.matrix_multiply(self.qs[:,:,np.newaxis], self.qs[:,np.newaxis,:]).sum(axis=0)
w, v = np.linalg.eigh(system)
qiT_dot_qref = (self.qs[:,:,np.newaxis] * v[np.newaxis,:,:]).sum(axis=1)
return Quaternions(v[:,np.argmin((1.-qiT_dot_qref**2).sum(axis=0))])
else:
raise NotImplementedError('Cannot average multi-dimensionsal Quaternions')
def angle_axis(self):
norm = self.normalized()
s = np.sqrt(1 - (norm.reals**2.0))
s[s == 0] = 0.001
angles = 2.0 * np.arccos(norm.reals)
axis = norm.imaginaries / s[...,np.newaxis]
return angles, axis
def transforms(self):
qw = self.qs[...,0]
qx = self.qs[...,1]
qy = self.qs[...,2]
qz = self.qs[...,3]
x2 = qx + qx; y2 = qy + qy; z2 = qz + qz;
xx = qx * x2; yy = qy * y2; wx = qw * x2;
xy = qx * y2; yz = qy * z2; wy = qw * y2;
xz = qx * z2; zz = qz * z2; wz = qw * z2;
m = np.empty(self.shape + (3,3))
m[...,0,0] = 1.0 - (yy + zz)
m[...,0,1] = xy - wz
m[...,0,2] = xz + wy
m[...,1,0] = xy + wz
m[...,1,1] = 1.0 - (xx + zz)
m[...,1,2] = yz - wx
m[...,2,0] = xz - wy
m[...,2,1] = yz + wx
m[...,2,2] = 1.0 - (xx + yy)
return m
def ravel(self):
return self.qs.ravel()
def id(cls, n):
if isinstance(n, tuple):
qs = np.zeros(n + (4,))
qs[...,0] = 1.0
return Quaternions(qs)
if isinstance(n, int) or isinstance(n, long):
qs = np.zeros((n,4))
qs[:,0] = 1.0
return Quaternions(qs)
raise TypeError('Cannot Construct Quaternion from %s type' % str(type(n)))
def id_like(cls, a):
qs = np.zeros(a.shape + (4,))
qs[...,0] = 1.0
return Quaternions(qs)
def exp(cls, ws):
ts = np.sum(ws**2.0, axis=-1)**0.5
ts[ts == 0] = 0.001
ls = np.sin(ts) / ts
qs = np.empty(ws.shape[:-1] + (4,))
qs[...,0] = np.cos(ts)
qs[...,1] = ws[...,0] * ls
qs[...,2] = ws[...,1] * ls
qs[...,3] = ws[...,2] * ls
return Quaternions(qs).normalized()
def slerp(cls, q0s, q1s, a):
fst, snd = cls._broadcast(q0s.qs, q1s.qs)
fst, a = cls._broadcast(fst, a, scalar=True)
snd, a = cls._broadcast(snd, a, scalar=True)
len = np.sum(fst * snd, axis=-1)
neg = len < 0.0
len[neg] = -len[neg]
snd[neg] = -snd[neg]
amount0 = np.zeros(a.shape)
amount1 = np.zeros(a.shape)
linear = (1.0 - len) < 0.01
omegas = np.arccos(len[~linear])
sinoms = np.sin(omegas)
amount0[ linear] = 1.0 - a[linear]
amount1[ linear] = a[linear]
amount0[~linear] = np.sin((1.0 - a[~linear]) * omegas) / sinoms
amount1[~linear] = np.sin( a[~linear] * omegas) / sinoms
return Quaternions(
amount0[...,np.newaxis] * fst +
amount1[...,np.newaxis] * snd)
def between(cls, v0s, v1s):
a = np.cross(v0s, v1s)
w = np.sqrt((v0s**2).sum(axis=-1) * (v1s**2).sum(axis=-1)) + (v0s * v1s).sum(axis=-1)
return Quaternions(np.concatenate([w[...,np.newaxis], a], axis=-1)).normalized()
def from_angle_axis(cls, angles, axis):
axis = axis / (np.sqrt(np.sum(axis**2, axis=-1)) + 1e-10)[...,np.newaxis]
sines = np.sin(angles / 2.0)[...,np.newaxis]
cosines = np.cos(angles / 2.0)[...,np.newaxis]
return Quaternions(np.concatenate([cosines, axis * sines], axis=-1))
def from_euler(cls, es, order='xyz', world=False):
axis = {
'x' : np.array([1,0,0]),
'y' : np.array([0,1,0]),
'z' : np.array([0,0,1]),
}
q0s = Quaternions.from_angle_axis(es[...,0], axis[order[0]])
q1s = Quaternions.from_angle_axis(es[...,1], axis[order[1]])
q2s = Quaternions.from_angle_axis(es[...,2], axis[order[2]])
return (q2s * (q1s * q0s)) if world else (q0s * (q1s * q2s))
def from_transforms(cls, ts):
d0, d1, d2 = ts[...,0,0], ts[...,1,1], ts[...,2,2]
q0 = ( d0 + d1 + d2 + 1.0) / 4.0
q1 = ( d0 - d1 - d2 + 1.0) / 4.0
q2 = (-d0 + d1 - d2 + 1.0) / 4.0
q3 = (-d0 - d1 + d2 + 1.0) / 4.0
q0 = np.sqrt(q0.clip(0,None))
q1 = np.sqrt(q1.clip(0,None))
q2 = np.sqrt(q2.clip(0,None))
q3 = np.sqrt(q3.clip(0,None))
c0 = (q0 >= q1) & (q0 >= q2) & (q0 >= q3)
c1 = (q1 >= q0) & (q1 >= q2) & (q1 >= q3)
c2 = (q2 >= q0) & (q2 >= q1) & (q2 >= q3)
c3 = (q3 >= q0) & (q3 >= q1) & (q3 >= q2)
q1[c0] *= np.sign(ts[c0,2,1] - ts[c0,1,2])
q2[c0] *= np.sign(ts[c0,0,2] - ts[c0,2,0])
q3[c0] *= np.sign(ts[c0,1,0] - ts[c0,0,1])
q0[c1] *= np.sign(ts[c1,2,1] - ts[c1,1,2])
q2[c1] *= np.sign(ts[c1,1,0] + ts[c1,0,1])
q3[c1] *= np.sign(ts[c1,0,2] + ts[c1,2,0])
q0[c2] *= np.sign(ts[c2,0,2] - ts[c2,2,0])
q1[c2] *= np.sign(ts[c2,1,0] + ts[c2,0,1])
q3[c2] *= np.sign(ts[c2,2,1] + ts[c2,1,2])
q0[c3] *= np.sign(ts[c3,1,0] - ts[c3,0,1])
q1[c3] *= np.sign(ts[c3,2,0] + ts[c3,0,2])
q2[c3] *= np.sign(ts[c3,2,1] + ts[c3,1,2])
qs = np.empty(ts.shape[:-2] + (4,))
qs[...,0] = q0
qs[...,1] = q1
qs[...,2] = q2
qs[...,3] = q3
return cls(qs)
class Pivots:
"""
Pivots is an ndarray of angular rotations
This wrapper provides some functions for
working with pivots.
These are particularly useful as a number
of atomic operations (such as adding or
subtracting) cannot be achieved using
the standard arithmatic and need to be
defined differently to work correctly
"""
def __init__(self, ps): self.ps = np.array(ps)
def __str__(self): return "Pivots("+ str(self.ps) + ")"
def __repr__(self): return "Pivots("+ repr(self.ps) + ")"
def __add__(self, other): return Pivots(np.arctan2(np.sin(self.ps + other.ps), np.cos(self.ps + other.ps)))
def __sub__(self, other): return Pivots(np.arctan2(np.sin(self.ps - other.ps), np.cos(self.ps - other.ps)))
def __mul__(self, other): return Pivots(self.ps * other.ps)
def __div__(self, other): return Pivots(self.ps / other.ps)
def __mod__(self, other): return Pivots(self.ps % other.ps)
def __pow__(self, other): return Pivots(self.ps ** other.ps)
def __lt__(self, other): return self.ps < other.ps
def __le__(self, other): return self.ps <= other.ps
def __eq__(self, other): return self.ps == other.ps
def __ne__(self, other): return self.ps != other.ps
def __ge__(self, other): return self.ps >= other.ps
def __gt__(self, other): return self.ps > other.ps
def __abs__(self): return Pivots(abs(self.ps))
def __neg__(self): return Pivots(-self.ps)
def __iter__(self): return iter(self.ps)
def __len__(self): return len(self.ps)
def __getitem__(self, k): return Pivots(self.ps[k])
def __setitem__(self, k, v): self.ps[k] = v.ps
def _ellipsis(self): return tuple(map(lambda x: slice(None), self.shape))
def quaternions(self, plane='xz'):
fa = self._ellipsis()
axises = np.ones(self.ps.shape + (3,))
axises[fa + ("xyz".index(plane[0]),)] = 0.0
axises[fa + ("xyz".index(plane[1]),)] = 0.0
return Quaternions.from_angle_axis(self.ps, axises)
def directions(self, plane='xz'):
dirs = np.zeros((len(self.ps), 3))
dirs[..., "xyz".index(plane[0])] = np.sin(self.ps)
dirs[..., "xyz".index(plane[1])] = np.cos(self.ps)
return dirs
def normalized(self):
xs = np.copy(self.ps)
while np.any(xs > np.pi): xs[xs > np.pi] = xs[xs > np.pi] - 2 * np.pi
while np.any(xs < -np.pi): xs[xs < -np.pi] = xs[xs < -np.pi] + 2 * np.pi
return Pivots(xs)
def interpolate(self, ws):
dir = np.average(self.directions, weights=ws, axis=0)
return np.arctan2(dir[2], dir[0])
def copy(self):
return Pivots(np.copy(self.ps))
def shape(self):
return self.ps.shape
def from_quaternions(cls, qs, forward='z', plane='xz'):
ds = np.zeros(qs.shape + (3,))
ds[...,'xyz'.index(forward)] = 1.0
return Pivots.from_directions(qs * ds, plane=plane)
def from_directions(cls, ds, plane='xz'):
ys = ds[...,'xyz'.index(plane[0])]
xs = ds[...,'xyz'.index(plane[1])]
return Pivots(np.arctan2(ys, xs))
The provided code snippet includes necessary dependencies for implementing the `y_rotation_from_positions` function. Write a Python function `def y_rotation_from_positions(positions, hips=(2, 6), sdrs=(14, 18))` to solve the following problem:
input: positions [T, J, 3] output: quaters: [T, 1, 4], quaternions that rotate the character around the y-axis to face [0, 0, 1] pivots: [T, 1] in [0, 2pi], the angle from [0, 0, 1] to the current facing direction
Here is the function:
def y_rotation_from_positions(positions, hips=(2, 6), sdrs=(14, 18)):
"""
input: positions [T, J, 3]
output: quaters: [T, 1, 4], quaternions that rotate the character around the y-axis to face [0, 0, 1]
pivots: [T, 1] in [0, 2pi], the angle from [0, 0, 1] to the current facing direction
"""
across = across_from_glb(positions, hips=hips, sdrs=sdrs)
direction_filterwidth = 20
forward = np.cross(across, np.array([[0, 1, 0]]))
forward = filters.gaussian_filter1d(forward, direction_filterwidth, axis=0, mode='nearest')
forward = forward / np.sqrt((forward ** 2).sum(axis=-1))[..., np.newaxis]
target = np.tile(np.array([0, 0, 1]), forward.shape[:-1] + (1, ))
quaters = Quaternions.between(forward, target)[..., np.newaxis, :] # [T, 4] -> [T, 1, 4]
pivots = Pivots.from_quaternions(-quaters).ps # from "target"[0, 0, 1] to current facing direction "forward"
return quaters, pivots | input: positions [T, J, 3] output: quaters: [T, 1, 4], quaternions that rotate the character around the y-axis to face [0, 0, 1] pivots: [T, 1] in [0, 2pi], the angle from [0, 0, 1] to the current facing direction |
16,106 | import sys
import os
from os.path import join as pjoin
import argparse
import numpy as np
import scipy.ndimage.filters as filters
from load_skeleton import Skel
from Quaternions_old import Quaternions
from Pivots import Pivots
import BVH
from probe.anim_view import visualize
def parse_args():
parser = argparse.ArgumentParser("test")
parser.add_argument('--bvh_in', type=str, default=None)
parser.add_argument('--dataset', type=str, default=None)
return parser.parse_args() | null |
16,107 | import sys
import os
from os.path import join as pjoin
import argparse
import numpy as np
import scipy.ndimage.filters as filters
from load_skeleton import Skel
from Quaternions_old import Quaternions
from Pivots import Pivots
import BVH
from probe.anim_view import visualize
def phase_from_ft(foot_contact, is_debug=False):
"""
foot_contact: [T, 4] -> take joints 0, 2 as standards
phase = left foot in contact (0) --> right foot in contact (pi) --> left foot in contact (2pi),
in range [0, 2pi)
"""
num_circles = 0
circle_length = 0
total_length = len(foot_contact)
ft = foot_contact[:, [0, 2]].astype(np.int)
ft_start = np.zeros((total_length, 2))
phases = np.zeros((total_length, 1))
"""
calculate the average "half-phase length"
find the first and last "01" pattern
"""
for j in range(2):
for i in range(1, total_length):
ft_start[i, j] = (ft[i - 1, j] == 0 and ft[i, j] == 1)
if is_debug:
print('ft_start,', ft_start)
last, beg_i = -1, -1
starts = []
for i in range(total_length):
if ft_start[i, 0] or ft_start[i, 1]:
if last != -1:
num_circles += 1
circle_length += i - last
else:
beg_i = i
last = i
starts.append(i)
avg_circle = 0 if num_circles == 0 else circle_length * 1.0 / num_circles
if is_debug:
print("%d circles, total length = %d, avg length = %.3lf" % (num_circles, circle_length, avg_circle))
if len(starts) == 0: # phase never changed
return phases
"""[0, beg_i - 1]: first incomplete circle"""
prev_pos = min(0, beg_i - avg_circle)
prev_val = 0 if ft_start[beg_i, 1] == 1 else 1 # 0 if next step is on the right
cir_i = 0
next_pos = starts[cir_i]
for i in range(total_length):
if i == next_pos:
prev_pos = next_pos
prev_val = 1 - prev_val
cir_i += 1
if cir_i >= len(starts):
next_pos = max(total_length + 1, next_pos + avg_circle)
else:
next_pos = starts[cir_i]
phases[i] = prev_val + (i - prev_pos) * 1.0 / (next_pos - prev_pos)
phases *= np.pi
if is_debug:
print('phases:', phases)
return phases
class AnimationData:
"""
Canonical Representation:
Skeleton
[T, Jo * 4 + 4 global params + 4 foot_contact]
"""
def __init__(self, full, skel=None, frametime=1/30):
if skel is None:
skel = Skel()
self.skel = skel
self.frametime = frametime
self.len = len(full)
self.rotations = full[:, :-8].reshape(self.len, -1, 4) # [T, Jo, 4]
assert self.rotations.shape[1] == len(self.skel.topology), "Rotations do not match the skeleton."
self.rotations /= np.sqrt(np.sum(self.rotations ** 2, axis=-1))[..., np.newaxis]
self.rt_pos = full[:, -8:-5] # [T, 3]
self.rt_rot = full[:, -5:-4] # [T, 1]
self.foot_contact = full[:, -4:] # [T, 4]
self.full = np.concatenate([self.rotations.reshape(self.len, -1), self.rt_pos, self.rt_rot, self.foot_contact], axis=-1)
self.phases = None # [T, 1]
self.local_x = None # [3]
self.positions_for_proj = None # [T, (J - 1) + 1, 3], trimmed and not forward facing
self.global_positions = None
def get_full(self):
return self.full
def get_root_positions(self):
return self.rt_pos
def get_original_rotations(self, rt_rot=None):
if rt_rot is None:
rt_rot = self.rt_rot
yaxis_rotations = Quaternions(np.array(Pivots(rt_rot).quaternions()))
rt_rotations = Quaternions(self.rotations[:, :1]) # [T, 1, 4]
rt_rotations = np.array(yaxis_rotations * rt_rotations)
rt_rotations /= np.sqrt((rt_rotations ** 2).sum(axis=-1))[..., np.newaxis]
return np.concatenate((rt_rotations, self.rotations[:, 1:]), axis=1) # [T, J, 4]
def get_foot_contact(self, transpose=False):
if transpose:
return self.foot_contact.transpose(1, 0) # [4, T]
else:
return self.foot_contact
def get_phases(self):
if self.phases is None:
self.phases = phase_from_ft(self.foot_contact)
return self.phases
def get_local_x(self):
if self.local_x is None:
forward_pivot = np.mean(self.rt_rot, axis=0) # [T, 1] -> [1]
forward_dir = Pivots(forward_pivot).directions()
self.local_x = np.cross(np.array((0, 1, 0)), forward_dir).reshape(-1)
return self.local_x
def get_content_input(self):
rotations = self.rotations.reshape(self.len, -1) # [T, Jo * 4]
return np.concatenate((rotations, self.rt_pos, self.rt_rot), axis=-1).transpose(1, 0) # [Jo * 4 + 3 + 1, T]
def get_style3d_input(self):
pos3d = forward_rotations(self.skel, self.rotations, trim=True)[:, 1:] # [T, J - 1, 3]
pos3d = pos3d.reshape((len(pos3d), -1)) # [T, (J - 1) * 3]
return np.concatenate((pos3d, self.rt_pos, self.rt_rot), axis=-1).transpose(1, 0) # [(J - 1) * 3 + 3 + 1, T]
def get_projections(self, view_angles, scales=None):
if self.positions_for_proj is None:
rotations = self.get_original_rotations()
positions = forward_rotations(self.skel, rotations, trim=True)[:, 1:] # [T, J - 1, 3]
positions = np.concatenate((positions, self.rt_pos[:, np.newaxis, :]), axis=1) # [T, J, 3]
self.positions_for_proj = positions.copy()
else:
positions = self.positions_for_proj.copy()
projections = []
if scales is None:
scales = np.ones((len(view_angles)))
for angle, scale in zip(view_angles, scales):
projections.append(motion_projection(positions, self.get_local_x(), angle) * scale)
projections = np.stack(projections, axis=-3) # [V, J * 2, T]
return projections
def get_global_positions(self, trim=True): # for visualization
if not trim:
return forward_rotations(self.skel, self.get_original_rotations(), rtpos=self.rt_pos, trim=False)
if self.global_positions is None:
rotations = self.get_original_rotations()
positions = forward_rotations(self.skel, rotations, rtpos=self.rt_pos, trim=True)
self.global_positions = positions
return self.global_positions
def get_velocity_factor(self):
positions = forward_rotations(self.skel, self.get_original_rotations(), trim=True)[:, 1:] # [T, J - 1, 3]
velocity = positions[1:] - positions[:-1] # [T - 1, J - 1, 3]
velocity = np.sqrt(np.sum(velocity ** 2, axis=-1)) # [T - 1, J - 1]
max_velocity = np.max(velocity, axis=-1) # [T - 1]
velocity_factor = np.mean(max_velocity)
return velocity_factor
def get_BVH(self, forward=True):
rt_pos = self.rt_pos # [T, 3]
rt_rot = self.rt_rot # [T, 1]
if forward: # choose a direction in [z+, x+, z-, x-], which is closest to "forward", as the new z+
directions = np.array(range(4)) * np.pi * 0.5 # [0, 1, 2, 3] * 0.5pi
diff = rt_rot[np.newaxis, :] - directions[:, np.newaxis, np.newaxis] # [1, T, 1] - [4, 1, 1]
diff = np.minimum(np.abs(diff), 2.0 * np.pi - np.abs(diff))
diff = np.sum(diff, axis=(-1, -2)) # [4, T, 1] -> [4]
new_forward = np.argmin(diff)
rt_rot -= new_forward * np.pi * 0.5
for d in range(new_forward):
tmp = rt_pos[..., 0].copy()
rt_pos[..., 0] = -rt_pos[..., 2].copy()
rt_pos[..., 2] = tmp
rotations = self.get_original_rotations(rt_rot=rt_rot)
rest, names, _ = self.skel.rest_bvh
anim = rest.copy()
anim.positions = anim.positions.repeat(self.len, axis=0)
anim.positions[:, 0, :] = rt_pos
anim.rotations.qs = rotations
return (anim, names, self.frametime)
def from_network_output(cls, input):
input = input.transpose(1, 0)
input = np.concatenate((input, np.zeros((len(input), 4))), axis=-1)
return cls(input)
def from_rotations_and_root_positions(cls, rotations, root_positions, skel=None, frametime=1/30):
"""
rotations: [T, J, 4]
root_positions: [T, 3]
"""
if skel is None:
skel = Skel()
rotations /= np.sqrt(np.sum(rotations ** 2, axis=-1))[..., np.newaxis]
global_positions = forward_rotations(skel, rotations, root_positions, trim=True)
foot_contact = foot_contact_from_positions(global_positions, fid_l=skel.fid_l, fid_r=skel.fid_r)
quaters, pivots = y_rotation_from_positions(global_positions, hips=skel.hips, sdrs=skel.sdrs)
root_rotations = Quaternions(rotations[:, 0:1, :].copy()) # [T, 1, 4]
root_rotations = quaters * root_rotations # facing [0, 0, 1]
root_rotations = np.array(root_rotations).reshape((-1, 1, 4)) # [T, 1, 4]
rotations[:, 0:1, :] = root_rotations
full = np.concatenate([rotations.reshape((len(rotations), -1)), root_positions, pivots, foot_contact], axis=-1)
return cls(full, skel, frametime)
def from_BVH(cls, filename, downsample=4, skel=None, trim_scale=None):
anim, names, frametime = BVH.load(filename)
anim = anim[::downsample]
if trim_scale is not None:
length = (len(anim) // trim_scale) * trim_scale
anim = anim[:length]
rotations = np.array(anim.rotations) # [T, J, 4]
root_positions = anim.positions[:, 0, :]
return cls.from_rotations_and_root_positions(rotations, root_positions, skel=skel, frametime=frametime * downsample)
class Skel:
def __init__(self, filename=os.path.join(BASEPATH, "..", "style_transfer", "global_info", "skeleton_CMU.yml")):
f = open(filename, "r")
skel = yaml.load(f, Loader=yaml.Loader)
self.bvh_name = os.path.join(os.path.dirname(filename), skel['BVH'])
self.rest_bvh = BVH.load(self.bvh_name)
self.offset = np.array(skel['offsets'])
self.topology = np.array(skel['parents'])
self.chosen_joints = np.array(skel['chosen_joints'])
self.chosen_parents = np.array(skel['chosen_parents'])
self.fid_l, self.fid_r = skel['left_foot'], skel['right_foot']
self.hips, self.sdrs = skel['hips'], skel['shoulders']
self.head = skel['head']
self.visualization = skel['visualization']
def visualize(data, save=False, save_path=None):
"""data: dict {title: {motion:xxx, foot_contact:xxx}}"""
motions = []
for i, (title, motion_dict) in enumerate(data.items()):
motion = to_float(motion_dict['motion']).copy()
motion = rotate_motion(motion) # [T, J, 2/3]
foot_contact = motion_dict['foot_contact'] # [T, 4]
motions.append(Motion4Anim(title,
motion,
foot_contact,
limb_colors[i],
joint_colors[i]
))
plot_motions(motions, save=save, save_path=save_path)
def test_all(args):
def mse(a, b):
return np.sum((a - b) ** 2)
def test_phase_from_ft():
pace = np.zeros((100, 1), dtype=np.int)
pace[::8] = 1
left = pace[:-4]
right = pace[4:]
phase_from_ft(np.concatenate([left, left, right, right], axis=-1), is_debug=True)
def BVH_and_back(filename):
anim, names, frametime = BVH.load(filename)
anim = anim[::4]
rotations = np.array(anim.rotations) # [T, J, 4]
root_positions = anim.positions[:, 0, :]
anim_a = AnimationData.from_BVH(filename)
rotations = rotations / np.sqrt(np.sum(rotations ** 2, axis=-1))[..., np.newaxis]
print(f'rotations: {mse(anim_a.get_original_rotations(), rotations)}')
print(f'root_positions: {mse(anim_a.get_root_positions(), root_positions)}')
content_input = anim_a.get_content_input()
style3d_input = anim_a.get_style3d_input()
view_points = ()
for i in range(7):
view_points += ((0, -np.pi / 2 + i * np.pi / 6, 0), )
view_points = ()
scales = ()
for i in range(4):
view_points += ((0, -np.pi / 2 + float(np.random.rand(1)) * np.pi, 0), )
scales += (float(np.random.rand(1)) * 0.4 + 0.8, )
style2d_input = anim_a.get_projections(view_points, scales)
print(f'content {content_input.shape}, style3d {style3d_input.shape}, style2d {style2d_input.shape}')
foot_contact = anim_a.get_foot_contact()
T = content_input.shape[-1]
inplace_no_rot = style3d_input.transpose(1, 0)[:, :-4].reshape(T, -1, 3)
inplace_no_rot = np.concatenate((np.zeros((T, 1, 3)), inplace_no_rot), axis=1)
inplace = anim_a.positions_for_proj[:, :-1, :]
inplace = np.concatenate((np.zeros((T, 1, 3)), inplace), axis=1)
original = anim_a.get_global_positions()
print(f'inplace no rot {inplace_no_rot.shape}, inplace {inplace.shape}, original {original.shape}')
"""
visualize({
"inplace_no_rot": {"motion": inplace_no_rot, "foot_contact": foot_contact},
"inplace": {"motion": inplace, "foot_contact": foot_contact},
"original": {"motion": original, "foot_contact": foot_contact},
})
"""
motion_proj = {}
for (view_point, scale, proj) in zip(view_points, scales, style2d_input): # [V, J * 2, T]
proj = proj.copy().transpose(1, 0).reshape(T, -1, 2) # [T, J, 2]
proj = np.concatenate([proj[:, -1:], proj[:, :-1]], axis=1)
ori_proj = np.concatenate([proj[:, :1], proj[:, 1:] + proj[:, :1].copy()], axis=1)
proj[:, :1] = 0
motion_proj[f'angle: {(view_point[1] / np.pi * 180):3f} scale: {scale:3f}'] = {"motion": ori_proj, "foot_contact": foot_contact}
"""
visualize({
"inplace_proj": {"motion": proj, "foot_contact": foot_contact},
"original_proj": {"motion": ori_proj, "foot_contact": foot_contact}
})
"""
visualize(motion_proj)
BVH.save("bla.bvh", *anim_a.get_BVH())
def check_velocity(dataset):
skel = Skel()
motions, labels, metas = dataset["motion"], dataset["style"], dataset["meta"]
style_names = list(set(metas["style"]))
content_names = list(set(metas["content"]))
info = {content: {style: [] for style in style_names} for content in content_names}
for i, motion in enumerate(motions):
anim = AnimationData(motion, skel=skel)
vel = anim.get_velocity_factor()
info[metas["content"][i]][metas["style"][i]].append(vel)
for content in info:
all = []
for style in info[content]:
all += info[content][style]
info[content][style] = np.mean(info[content][style])
info[content]["all"] = np.mean(all)
with open("probe_velocity.csv", "w") as f:
columns = ['all'] + style_names
f.write(',' + ','.join(columns) + '\n')
for content in info:
values = [f'{info[content][key]}' for key in columns]
f.write(','.join([content] + values) + '\n')
dataset = np.load(args.dataset, allow_pickle=True)["trainfull"].item()
check_velocity(dataset)
# BVH_and_back(args.bvh_in) | null |
16,108 | import operator
import numpy as np
import numpy.core.umath_tests as ut
import AnimationStructure
from Quaternions_old import Quaternions
class Quaternions:
"""
Quaternions is a wrapper around a numpy ndarray
that allows it to act as if it were an narray of
a quaternion data type.
Therefore addition, subtraction, multiplication,
division, negation, absolute, are all defined
in terms of quaternion operations such as quaternion
multiplication.
This allows for much neater code and many routines
which conceptually do the same thing to be written
in the same way for point data and for rotation data.
The Quaternions class has been desgined such that it
should support broadcasting and slicing in all of the
usual ways.
"""
def __init__(self, qs):
if isinstance(qs, np.ndarray):
if len(qs.shape) == 1: qs = np.array([qs])
self.qs = qs
return
if isinstance(qs, Quaternions):
self.qs = qs.qs
return
raise TypeError('Quaternions must be constructed from iterable, numpy array, or Quaternions, not %s' % type(qs))
def __str__(self): return "Quaternions("+ str(self.qs) + ")"
def __repr__(self): return "Quaternions("+ repr(self.qs) + ")"
""" Helper Methods for Broadcasting and Data extraction """
def _broadcast(cls, sqs, oqs, scalar=False):
if isinstance(oqs, float): return sqs, oqs * np.ones(sqs.shape[:-1])
ss = np.array(sqs.shape) if not scalar else np.array(sqs.shape[:-1])
os = np.array(oqs.shape)
if len(ss) != len(os):
raise TypeError('Quaternions cannot broadcast together shapes %s and %s' % (sqs.shape, oqs.shape))
if np.all(ss == os): return sqs, oqs
if not np.all((ss == os) | (os == np.ones(len(os))) | (ss == np.ones(len(ss)))):
raise TypeError('Quaternions cannot broadcast together shapes %s and %s' % (sqs.shape, oqs.shape))
sqsn, oqsn = sqs.copy(), oqs.copy()
for a in np.where(ss == 1)[0]: sqsn = sqsn.repeat(os[a], axis=a)
for a in np.where(os == 1)[0]: oqsn = oqsn.repeat(ss[a], axis=a)
return sqsn, oqsn
""" Adding Quaterions is just Defined as Multiplication """
def __add__(self, other): return self * other
def __sub__(self, other): return self / other
""" Quaterion Multiplication """
def __mul__(self, other):
"""
Quaternion multiplication has three main methods.
When multiplying a Quaternions array by Quaternions
normal quaternion multiplication is performed.
When multiplying a Quaternions array by a vector
array of the same shape, where the last axis is 3,
it is assumed to be a Quaternion by 3D-Vector
multiplication and the 3D-Vectors are rotated
in space by the Quaternions.
When multipplying a Quaternions array by a scalar
or vector of different shape it is assumed to be
a Quaternions by Scalars multiplication and the
Quaternions are scaled using Slerp and the identity
quaternions.
"""
""" If Quaternions type do Quaternions * Quaternions """
if isinstance(other, Quaternions):
sqs, oqs = Quaternions._broadcast(self.qs, other.qs)
q0 = sqs[...,0]; q1 = sqs[...,1];
q2 = sqs[...,2]; q3 = sqs[...,3];
r0 = oqs[...,0]; r1 = oqs[...,1];
r2 = oqs[...,2]; r3 = oqs[...,3];
qs = np.empty(sqs.shape)
qs[...,0] = r0 * q0 - r1 * q1 - r2 * q2 - r3 * q3
qs[...,1] = r0 * q1 + r1 * q0 - r2 * q3 + r3 * q2
qs[...,2] = r0 * q2 + r1 * q3 + r2 * q0 - r3 * q1
qs[...,3] = r0 * q3 - r1 * q2 + r2 * q1 + r3 * q0
return Quaternions(qs)
""" If array type do Quaternions * Vectors """
if isinstance(other, np.ndarray) and other.shape[-1] == 3:
vs = Quaternions(np.concatenate([np.zeros(other.shape[:-1] + (1,)), other], axis=-1))
return (self * (vs * -self)).imaginaries
""" If float do Quaternions * Scalars """
if isinstance(other, np.ndarray) or isinstance(other, float):
return Quaternions.slerp(Quaternions.id_like(self), self, other)
raise TypeError('Cannot multiply/add Quaternions with type %s' % str(type(other)))
def __div__(self, other):
"""
When a Quaternion type is supplied, division is defined
as multiplication by the inverse of that Quaternion.
When a scalar or vector is supplied it is defined
as multiplicaion of one over the supplied value.
Essentially a scaling.
"""
if isinstance(other, Quaternions): return self * (-other)
if isinstance(other, np.ndarray): return self * (1.0 / other)
if isinstance(other, float): return self * (1.0 / other)
raise TypeError('Cannot divide/subtract Quaternions with type %s' + str(type(other)))
def __eq__(self, other): return self.qs == other.qs
def __ne__(self, other): return self.qs != other.qs
def __neg__(self):
""" Invert Quaternions """
return Quaternions(self.qs * np.array([[1, -1, -1, -1]]))
def __abs__(self):
""" Unify Quaternions To Single Pole """
qabs = self.normalized().copy()
top = np.sum(( qabs.qs) * np.array([1,0,0,0]), axis=-1)
bot = np.sum((-qabs.qs) * np.array([1,0,0,0]), axis=-1)
qabs.qs[top < bot] = -qabs.qs[top < bot]
return qabs
def __iter__(self): return iter(self.qs)
def __len__(self): return len(self.qs)
def __getitem__(self, k): return Quaternions(self.qs[k])
def __setitem__(self, k, v): self.qs[k] = v.qs
def lengths(self):
return np.sum(self.qs**2.0, axis=-1)**0.5
def reals(self):
return self.qs[...,0]
def imaginaries(self):
return self.qs[...,1:4]
def shape(self): return self.qs.shape[:-1]
def repeat(self, n, **kwargs):
return Quaternions(self.qs.repeat(n, **kwargs))
def normalized(self):
return Quaternions(self.qs / self.lengths[...,np.newaxis])
def log(self):
norm = abs(self.normalized())
imgs = norm.imaginaries
lens = np.sqrt(np.sum(imgs**2, axis=-1))
lens = np.arctan2(lens, norm.reals) / (lens + 1e-10)
return imgs * lens[...,np.newaxis]
def constrained(self, axis):
rl = self.reals
im = np.sum(axis * self.imaginaries, axis=-1)
t1 = -2 * np.arctan2(rl, im) + np.pi
t2 = -2 * np.arctan2(rl, im) - np.pi
top = Quaternions.exp(axis[np.newaxis] * (t1[:,np.newaxis] / 2.0))
bot = Quaternions.exp(axis[np.newaxis] * (t2[:,np.newaxis] / 2.0))
img = self.dot(top) > self.dot(bot)
ret = top.copy()
ret[ img] = top[ img]
ret[~img] = bot[~img]
return ret
def constrained_x(self): return self.constrained(np.array([1,0,0]))
def constrained_y(self): return self.constrained(np.array([0,1,0]))
def constrained_z(self): return self.constrained(np.array([0,0,1]))
def dot(self, q): return np.sum(self.qs * q.qs, axis=-1)
def copy(self): return Quaternions(np.copy(self.qs))
def reshape(self, s):
self.qs.reshape(s)
return self
def interpolate(self, ws):
return Quaternions.exp(np.average(abs(self).log, axis=0, weights=ws))
def euler(self, order='xyz'):
q = self.normalized().qs
q0 = q[...,0]
q1 = q[...,1]
q2 = q[...,2]
q3 = q[...,3]
es = np.zeros(self.shape + (3,))
if order == 'xyz':
es[...,0] = np.arctan2(2 * (q0 * q1 + q2 * q3), 1 - 2 * (q1 * q1 + q2 * q2))
es[...,1] = np.arcsin((2 * (q0 * q2 - q3 * q1)).clip(-1,1))
es[...,2] = np.arctan2(2 * (q0 * q3 + q1 * q2), 1 - 2 * (q2 * q2 + q3 * q3))
elif order == 'yzx':
es[...,0] = np.arctan2(2 * (q1 * q0 - q2 * q3), -q1 * q1 + q2 * q2 - q3 * q3 + q0 * q0)
es[...,1] = np.arctan2(2 * (q2 * q0 - q1 * q3), q1 * q1 - q2 * q2 - q3 * q3 + q0 * q0)
es[...,2] = np.arcsin((2 * (q1 * q2 + q3 * q0)).clip(-1,1))
else:
raise NotImplementedError('Cannot convert from ordering %s' % order)
"""
# These conversion don't appear to work correctly for Maya.
# http://bediyap.com/programming/convert-quaternion-to-euler-rotations/
if order == 'xyz':
es[fa + (0,)] = np.arctan2(2 * (q0 * q3 - q1 * q2), q0 * q0 + q1 * q1 - q2 * q2 - q3 * q3)
es[fa + (1,)] = np.arcsin((2 * (q1 * q3 + q0 * q2)).clip(-1,1))
es[fa + (2,)] = np.arctan2(2 * (q0 * q1 - q2 * q3), q0 * q0 - q1 * q1 - q2 * q2 + q3 * q3)
elif order == 'yzx':
es[fa + (0,)] = np.arctan2(2 * (q0 * q1 - q2 * q3), q0 * q0 - q1 * q1 + q2 * q2 - q3 * q3)
es[fa + (1,)] = np.arcsin((2 * (q1 * q2 + q0 * q3)).clip(-1,1))
es[fa + (2,)] = np.arctan2(2 * (q0 * q2 - q1 * q3), q0 * q0 + q1 * q1 - q2 * q2 - q3 * q3)
elif order == 'zxy':
es[fa + (0,)] = np.arctan2(2 * (q0 * q2 - q1 * q3), q0 * q0 - q1 * q1 - q2 * q2 + q3 * q3)
es[fa + (1,)] = np.arcsin((2 * (q0 * q1 + q2 * q3)).clip(-1,1))
es[fa + (2,)] = np.arctan2(2 * (q0 * q3 - q1 * q2), q0 * q0 - q1 * q1 + q2 * q2 - q3 * q3)
elif order == 'xzy':
es[fa + (0,)] = np.arctan2(2 * (q0 * q2 + q1 * q3), q0 * q0 + q1 * q1 - q2 * q2 - q3 * q3)
es[fa + (1,)] = np.arcsin((2 * (q0 * q3 - q1 * q2)).clip(-1,1))
es[fa + (2,)] = np.arctan2(2 * (q0 * q1 + q2 * q3), q0 * q0 - q1 * q1 + q2 * q2 - q3 * q3)
elif order == 'yxz':
es[fa + (0,)] = np.arctan2(2 * (q1 * q2 + q0 * q3), q0 * q0 - q1 * q1 + q2 * q2 - q3 * q3)
es[fa + (1,)] = np.arcsin((2 * (q0 * q1 - q2 * q3)).clip(-1,1))
es[fa + (2,)] = np.arctan2(2 * (q1 * q3 + q0 * q2), q0 * q0 - q1 * q1 - q2 * q2 + q3 * q3)
elif order == 'zyx':
es[fa + (0,)] = np.arctan2(2 * (q0 * q1 + q2 * q3), q0 * q0 - q1 * q1 - q2 * q2 + q3 * q3)
es[fa + (1,)] = np.arcsin((2 * (q0 * q2 - q1 * q3)).clip(-1,1))
es[fa + (2,)] = np.arctan2(2 * (q0 * q3 + q1 * q2), q0 * q0 + q1 * q1 - q2 * q2 - q3 * q3)
else:
raise KeyError('Unknown ordering %s' % order)
"""
# https://github.com/ehsan/ogre/blob/master/OgreMain/src/OgreMatrix3.cpp
# Use this class and convert from matrix
return es
def average(self):
if len(self.shape) == 1:
import numpy.core.umath_tests as ut
system = ut.matrix_multiply(self.qs[:,:,np.newaxis], self.qs[:,np.newaxis,:]).sum(axis=0)
w, v = np.linalg.eigh(system)
qiT_dot_qref = (self.qs[:,:,np.newaxis] * v[np.newaxis,:,:]).sum(axis=1)
return Quaternions(v[:,np.argmin((1.-qiT_dot_qref**2).sum(axis=0))])
else:
raise NotImplementedError('Cannot average multi-dimensionsal Quaternions')
def angle_axis(self):
norm = self.normalized()
s = np.sqrt(1 - (norm.reals**2.0))
s[s == 0] = 0.001
angles = 2.0 * np.arccos(norm.reals)
axis = norm.imaginaries / s[...,np.newaxis]
return angles, axis
def transforms(self):
qw = self.qs[...,0]
qx = self.qs[...,1]
qy = self.qs[...,2]
qz = self.qs[...,3]
x2 = qx + qx; y2 = qy + qy; z2 = qz + qz;
xx = qx * x2; yy = qy * y2; wx = qw * x2;
xy = qx * y2; yz = qy * z2; wy = qw * y2;
xz = qx * z2; zz = qz * z2; wz = qw * z2;
m = np.empty(self.shape + (3,3))
m[...,0,0] = 1.0 - (yy + zz)
m[...,0,1] = xy - wz
m[...,0,2] = xz + wy
m[...,1,0] = xy + wz
m[...,1,1] = 1.0 - (xx + zz)
m[...,1,2] = yz - wx
m[...,2,0] = xz - wy
m[...,2,1] = yz + wx
m[...,2,2] = 1.0 - (xx + yy)
return m
def ravel(self):
return self.qs.ravel()
def id(cls, n):
if isinstance(n, tuple):
qs = np.zeros(n + (4,))
qs[...,0] = 1.0
return Quaternions(qs)
if isinstance(n, int) or isinstance(n, long):
qs = np.zeros((n,4))
qs[:,0] = 1.0
return Quaternions(qs)
raise TypeError('Cannot Construct Quaternion from %s type' % str(type(n)))
def id_like(cls, a):
qs = np.zeros(a.shape + (4,))
qs[...,0] = 1.0
return Quaternions(qs)
def exp(cls, ws):
ts = np.sum(ws**2.0, axis=-1)**0.5
ts[ts == 0] = 0.001
ls = np.sin(ts) / ts
qs = np.empty(ws.shape[:-1] + (4,))
qs[...,0] = np.cos(ts)
qs[...,1] = ws[...,0] * ls
qs[...,2] = ws[...,1] * ls
qs[...,3] = ws[...,2] * ls
return Quaternions(qs).normalized()
def slerp(cls, q0s, q1s, a):
fst, snd = cls._broadcast(q0s.qs, q1s.qs)
fst, a = cls._broadcast(fst, a, scalar=True)
snd, a = cls._broadcast(snd, a, scalar=True)
len = np.sum(fst * snd, axis=-1)
neg = len < 0.0
len[neg] = -len[neg]
snd[neg] = -snd[neg]
amount0 = np.zeros(a.shape)
amount1 = np.zeros(a.shape)
linear = (1.0 - len) < 0.01
omegas = np.arccos(len[~linear])
sinoms = np.sin(omegas)
amount0[ linear] = 1.0 - a[linear]
amount1[ linear] = a[linear]
amount0[~linear] = np.sin((1.0 - a[~linear]) * omegas) / sinoms
amount1[~linear] = np.sin( a[~linear] * omegas) / sinoms
return Quaternions(
amount0[...,np.newaxis] * fst +
amount1[...,np.newaxis] * snd)
def between(cls, v0s, v1s):
a = np.cross(v0s, v1s)
w = np.sqrt((v0s**2).sum(axis=-1) * (v1s**2).sum(axis=-1)) + (v0s * v1s).sum(axis=-1)
return Quaternions(np.concatenate([w[...,np.newaxis], a], axis=-1)).normalized()
def from_angle_axis(cls, angles, axis):
axis = axis / (np.sqrt(np.sum(axis**2, axis=-1)) + 1e-10)[...,np.newaxis]
sines = np.sin(angles / 2.0)[...,np.newaxis]
cosines = np.cos(angles / 2.0)[...,np.newaxis]
return Quaternions(np.concatenate([cosines, axis * sines], axis=-1))
def from_euler(cls, es, order='xyz', world=False):
axis = {
'x' : np.array([1,0,0]),
'y' : np.array([0,1,0]),
'z' : np.array([0,0,1]),
}
q0s = Quaternions.from_angle_axis(es[...,0], axis[order[0]])
q1s = Quaternions.from_angle_axis(es[...,1], axis[order[1]])
q2s = Quaternions.from_angle_axis(es[...,2], axis[order[2]])
return (q2s * (q1s * q0s)) if world else (q0s * (q1s * q2s))
def from_transforms(cls, ts):
d0, d1, d2 = ts[...,0,0], ts[...,1,1], ts[...,2,2]
q0 = ( d0 + d1 + d2 + 1.0) / 4.0
q1 = ( d0 - d1 - d2 + 1.0) / 4.0
q2 = (-d0 + d1 - d2 + 1.0) / 4.0
q3 = (-d0 - d1 + d2 + 1.0) / 4.0
q0 = np.sqrt(q0.clip(0,None))
q1 = np.sqrt(q1.clip(0,None))
q2 = np.sqrt(q2.clip(0,None))
q3 = np.sqrt(q3.clip(0,None))
c0 = (q0 >= q1) & (q0 >= q2) & (q0 >= q3)
c1 = (q1 >= q0) & (q1 >= q2) & (q1 >= q3)
c2 = (q2 >= q0) & (q2 >= q1) & (q2 >= q3)
c3 = (q3 >= q0) & (q3 >= q1) & (q3 >= q2)
q1[c0] *= np.sign(ts[c0,2,1] - ts[c0,1,2])
q2[c0] *= np.sign(ts[c0,0,2] - ts[c0,2,0])
q3[c0] *= np.sign(ts[c0,1,0] - ts[c0,0,1])
q0[c1] *= np.sign(ts[c1,2,1] - ts[c1,1,2])
q2[c1] *= np.sign(ts[c1,1,0] + ts[c1,0,1])
q3[c1] *= np.sign(ts[c1,0,2] + ts[c1,2,0])
q0[c2] *= np.sign(ts[c2,0,2] - ts[c2,2,0])
q1[c2] *= np.sign(ts[c2,1,0] + ts[c2,0,1])
q3[c2] *= np.sign(ts[c2,2,1] + ts[c2,1,2])
q0[c3] *= np.sign(ts[c3,1,0] - ts[c3,0,1])
q1[c3] *= np.sign(ts[c3,2,0] + ts[c3,0,2])
q2[c3] *= np.sign(ts[c3,2,1] + ts[c3,1,2])
qs = np.empty(ts.shape[:-2] + (4,))
qs[...,0] = q0
qs[...,1] = q1
qs[...,2] = q2
qs[...,3] = q3
return cls(qs)
The provided code snippet includes necessary dependencies for implementing the `load_to_maya` function. Write a Python function `def load_to_maya(anim, names=None, radius=0.5)` to solve the following problem:
Load Animation Object into Maya as Joint Skeleton loads each frame as a new keyfame in maya. If the animation is too slow or too fast perhaps the framerate needs adjusting before being loaded such that it matches the maya scene framerate. Parameters ---------- anim : Animation Animation to load into Scene names : [str] Optional list of Joint names for Skeleton Returns ------- List of Maya Joint Nodes loaded into scene
Here is the function:
def load_to_maya(anim, names=None, radius=0.5):
"""
Load Animation Object into Maya as Joint Skeleton
loads each frame as a new keyfame in maya.
If the animation is too slow or too fast perhaps
the framerate needs adjusting before being loaded
such that it matches the maya scene framerate.
Parameters
----------
anim : Animation
Animation to load into Scene
names : [str]
Optional list of Joint names for Skeleton
Returns
-------
List of Maya Joint Nodes loaded into scene
"""
import pymel.core as pm
joints = []
frames = range(1, len(anim)+1)
if names is None: names = ["joint_" + str(i) for i in range(len(anim.parents))]
for i, offset, orient, parent, name in zip(range(len(anim.offsets)), anim.offsets, anim.orients, anim.parents, names):
if parent < 0:
pm.select(d=True)
else:
pm.select(joints[parent])
joint = pm.joint(n=name, p=offset, relative=True, radius=radius)
joint.setOrientation([orient[1], orient[2], orient[3], orient[0]])
curvex = pm.nodetypes.AnimCurveTA(n=name + "_rotateX")
curvey = pm.nodetypes.AnimCurveTA(n=name + "_rotateY")
curvez = pm.nodetypes.AnimCurveTA(n=name + "_rotateZ")
jrotations = (-Quaternions(orient[np.newaxis]) * anim.rotations[:,i]).euler()
curvex.addKeys(frames, jrotations[:,0])
curvey.addKeys(frames, jrotations[:,1])
curvez.addKeys(frames, jrotations[:,2])
pm.connectAttr(curvex.output, joint.rotateX)
pm.connectAttr(curvey.output, joint.rotateY)
pm.connectAttr(curvez.output, joint.rotateZ)
offsetx = pm.nodetypes.AnimCurveTU(n=name + "_translateX")
offsety = pm.nodetypes.AnimCurveTU(n=name + "_translateY")
offsetz = pm.nodetypes.AnimCurveTU(n=name + "_translateZ")
offsetx.addKeys(frames, anim.positions[:,i,0])
offsety.addKeys(frames, anim.positions[:,i,1])
offsetz.addKeys(frames, anim.positions[:,i,2])
pm.connectAttr(offsetx.output, joint.translateX)
pm.connectAttr(offsety.output, joint.translateY)
pm.connectAttr(offsetz.output, joint.translateZ)
joints.append(joint)
return joints | Load Animation Object into Maya as Joint Skeleton loads each frame as a new keyfame in maya. If the animation is too slow or too fast perhaps the framerate needs adjusting before being loaded such that it matches the maya scene framerate. Parameters ---------- anim : Animation Animation to load into Scene names : [str] Optional list of Joint names for Skeleton Returns ------- List of Maya Joint Nodes loaded into scene |
16,109 | import operator
import numpy as np
import numpy.core.umath_tests as ut
import AnimationStructure
from Quaternions_old import Quaternions
class Animation:
"""
Animation is a numpy-like wrapper for animation data
Animation data consists of several arrays consisting
of F frames and J joints.
The animation is specified by
rotations : (F, J) Quaternions | Joint Rotations
positions : (F, J, 3) ndarray | Joint Positions
The base pose is specified by
orients : (J) Quaternions | Joint Orientations
offsets : (J, 3) ndarray | Joint Offsets
And the skeletal structure is specified by
parents : (J) ndarray | Joint Parents
"""
def __init__(self, rotations, positions, orients, offsets, parents):
self.rotations = rotations
self.positions = positions
self.orients = orients
self.offsets = offsets
self.parents = parents
def __op__(self, op, other):
return Animation(
op(self.rotations, other.rotations),
op(self.positions, other.positions),
op(self.orients, other.orients),
op(self.offsets, other.offsets),
op(self.parents, other.parents))
def __iop__(self, op, other):
self.rotations = op(self.roations, other.rotations)
self.positions = op(self.roations, other.positions)
self.orients = op(self.orients, other.orients)
self.offsets = op(self.offsets, other.offsets)
self.parents = op(self.parents, other.parents)
return self
def __sop__(self, op):
return Animation(
op(self.rotations),
op(self.positions),
op(self.orients),
op(self.offsets),
op(self.parents))
def __add__(self, other): return self.__op__(operator.add, other)
def __sub__(self, other): return self.__op__(operator.sub, other)
def __mul__(self, other): return self.__op__(operator.mul, other)
def __div__(self, other): return self.__op__(operator.div, other)
def __abs__(self): return self.__sop__(operator.abs)
def __neg__(self): return self.__sop__(operator.neg)
def __iadd__(self, other): return self.__iop__(operator.iadd, other)
def __isub__(self, other): return self.__iop__(operator.isub, other)
def __imul__(self, other): return self.__iop__(operator.imul, other)
def __idiv__(self, other): return self.__iop__(operator.idiv, other)
def __len__(self): return len(self.rotations)
def __getitem__(self, k):
if isinstance(k, tuple):
return Animation(
self.rotations[k],
self.positions[k],
self.orients[k[1:]],
self.offsets[k[1:]],
self.parents[k[1:]])
else:
return Animation(
self.rotations[k],
self.positions[k],
self.orients,
self.offsets,
self.parents)
def __setitem__(self, k, v):
if isinstance(k, tuple):
self.rotations.__setitem__(k, v.rotations)
self.positions.__setitem__(k, v.positions)
self.orients.__setitem__(k[1:], v.orients)
self.offsets.__setitem__(k[1:], v.offsets)
self.parents.__setitem__(k[1:], v.parents)
else:
self.rotations.__setitem__(k, v.rotations)
self.positions.__setitem__(k, v.positions)
self.orients.__setitem__(k, v.orients)
self.offsets.__setitem__(k, v.offsets)
self.parents.__setitem__(k, v.parents)
def shape(self): return (self.rotations.shape[0], self.rotations.shape[1])
def copy(self): return Animation(
self.rotations.copy(), self.positions.copy(),
self.orients.copy(), self.offsets.copy(),
self.parents.copy())
def repeat(self, *args, **kw):
return Animation(
self.rotations.repeat(*args, **kw),
self.positions.repeat(*args, **kw),
self.orients, self.offsets, self.parents)
def ravel(self):
return np.hstack([
self.rotations.log().ravel(),
self.positions.ravel(),
self.orients.log().ravel(),
self.offsets.ravel()])
def unravel(clas, anim, shape, parents):
nf, nj = shape
rotations = anim[nf*nj*0:nf*nj*3]
positions = anim[nf*nj*3:nf*nj*6]
orients = anim[nf*nj*6+nj*0:nf*nj*6+nj*3]
offsets = anim[nf*nj*6+nj*3:nf*nj*6+nj*6]
return cls(
Quaternions.exp(rotations), positions,
Quaternions.exp(orients), offsets,
parents.copy())
class Quaternions:
"""
Quaternions is a wrapper around a numpy ndarray
that allows it to act as if it were an narray of
a quaternion data type.
Therefore addition, subtraction, multiplication,
division, negation, absolute, are all defined
in terms of quaternion operations such as quaternion
multiplication.
This allows for much neater code and many routines
which conceptually do the same thing to be written
in the same way for point data and for rotation data.
The Quaternions class has been desgined such that it
should support broadcasting and slicing in all of the
usual ways.
"""
def __init__(self, qs):
if isinstance(qs, np.ndarray):
if len(qs.shape) == 1: qs = np.array([qs])
self.qs = qs
return
if isinstance(qs, Quaternions):
self.qs = qs.qs
return
raise TypeError('Quaternions must be constructed from iterable, numpy array, or Quaternions, not %s' % type(qs))
def __str__(self): return "Quaternions("+ str(self.qs) + ")"
def __repr__(self): return "Quaternions("+ repr(self.qs) + ")"
""" Helper Methods for Broadcasting and Data extraction """
def _broadcast(cls, sqs, oqs, scalar=False):
if isinstance(oqs, float): return sqs, oqs * np.ones(sqs.shape[:-1])
ss = np.array(sqs.shape) if not scalar else np.array(sqs.shape[:-1])
os = np.array(oqs.shape)
if len(ss) != len(os):
raise TypeError('Quaternions cannot broadcast together shapes %s and %s' % (sqs.shape, oqs.shape))
if np.all(ss == os): return sqs, oqs
if not np.all((ss == os) | (os == np.ones(len(os))) | (ss == np.ones(len(ss)))):
raise TypeError('Quaternions cannot broadcast together shapes %s and %s' % (sqs.shape, oqs.shape))
sqsn, oqsn = sqs.copy(), oqs.copy()
for a in np.where(ss == 1)[0]: sqsn = sqsn.repeat(os[a], axis=a)
for a in np.where(os == 1)[0]: oqsn = oqsn.repeat(ss[a], axis=a)
return sqsn, oqsn
""" Adding Quaterions is just Defined as Multiplication """
def __add__(self, other): return self * other
def __sub__(self, other): return self / other
""" Quaterion Multiplication """
def __mul__(self, other):
"""
Quaternion multiplication has three main methods.
When multiplying a Quaternions array by Quaternions
normal quaternion multiplication is performed.
When multiplying a Quaternions array by a vector
array of the same shape, where the last axis is 3,
it is assumed to be a Quaternion by 3D-Vector
multiplication and the 3D-Vectors are rotated
in space by the Quaternions.
When multipplying a Quaternions array by a scalar
or vector of different shape it is assumed to be
a Quaternions by Scalars multiplication and the
Quaternions are scaled using Slerp and the identity
quaternions.
"""
""" If Quaternions type do Quaternions * Quaternions """
if isinstance(other, Quaternions):
sqs, oqs = Quaternions._broadcast(self.qs, other.qs)
q0 = sqs[...,0]; q1 = sqs[...,1];
q2 = sqs[...,2]; q3 = sqs[...,3];
r0 = oqs[...,0]; r1 = oqs[...,1];
r2 = oqs[...,2]; r3 = oqs[...,3];
qs = np.empty(sqs.shape)
qs[...,0] = r0 * q0 - r1 * q1 - r2 * q2 - r3 * q3
qs[...,1] = r0 * q1 + r1 * q0 - r2 * q3 + r3 * q2
qs[...,2] = r0 * q2 + r1 * q3 + r2 * q0 - r3 * q1
qs[...,3] = r0 * q3 - r1 * q2 + r2 * q1 + r3 * q0
return Quaternions(qs)
""" If array type do Quaternions * Vectors """
if isinstance(other, np.ndarray) and other.shape[-1] == 3:
vs = Quaternions(np.concatenate([np.zeros(other.shape[:-1] + (1,)), other], axis=-1))
return (self * (vs * -self)).imaginaries
""" If float do Quaternions * Scalars """
if isinstance(other, np.ndarray) or isinstance(other, float):
return Quaternions.slerp(Quaternions.id_like(self), self, other)
raise TypeError('Cannot multiply/add Quaternions with type %s' % str(type(other)))
def __div__(self, other):
"""
When a Quaternion type is supplied, division is defined
as multiplication by the inverse of that Quaternion.
When a scalar or vector is supplied it is defined
as multiplicaion of one over the supplied value.
Essentially a scaling.
"""
if isinstance(other, Quaternions): return self * (-other)
if isinstance(other, np.ndarray): return self * (1.0 / other)
if isinstance(other, float): return self * (1.0 / other)
raise TypeError('Cannot divide/subtract Quaternions with type %s' + str(type(other)))
def __eq__(self, other): return self.qs == other.qs
def __ne__(self, other): return self.qs != other.qs
def __neg__(self):
""" Invert Quaternions """
return Quaternions(self.qs * np.array([[1, -1, -1, -1]]))
def __abs__(self):
""" Unify Quaternions To Single Pole """
qabs = self.normalized().copy()
top = np.sum(( qabs.qs) * np.array([1,0,0,0]), axis=-1)
bot = np.sum((-qabs.qs) * np.array([1,0,0,0]), axis=-1)
qabs.qs[top < bot] = -qabs.qs[top < bot]
return qabs
def __iter__(self): return iter(self.qs)
def __len__(self): return len(self.qs)
def __getitem__(self, k): return Quaternions(self.qs[k])
def __setitem__(self, k, v): self.qs[k] = v.qs
def lengths(self):
return np.sum(self.qs**2.0, axis=-1)**0.5
def reals(self):
return self.qs[...,0]
def imaginaries(self):
return self.qs[...,1:4]
def shape(self): return self.qs.shape[:-1]
def repeat(self, n, **kwargs):
return Quaternions(self.qs.repeat(n, **kwargs))
def normalized(self):
return Quaternions(self.qs / self.lengths[...,np.newaxis])
def log(self):
norm = abs(self.normalized())
imgs = norm.imaginaries
lens = np.sqrt(np.sum(imgs**2, axis=-1))
lens = np.arctan2(lens, norm.reals) / (lens + 1e-10)
return imgs * lens[...,np.newaxis]
def constrained(self, axis):
rl = self.reals
im = np.sum(axis * self.imaginaries, axis=-1)
t1 = -2 * np.arctan2(rl, im) + np.pi
t2 = -2 * np.arctan2(rl, im) - np.pi
top = Quaternions.exp(axis[np.newaxis] * (t1[:,np.newaxis] / 2.0))
bot = Quaternions.exp(axis[np.newaxis] * (t2[:,np.newaxis] / 2.0))
img = self.dot(top) > self.dot(bot)
ret = top.copy()
ret[ img] = top[ img]
ret[~img] = bot[~img]
return ret
def constrained_x(self): return self.constrained(np.array([1,0,0]))
def constrained_y(self): return self.constrained(np.array([0,1,0]))
def constrained_z(self): return self.constrained(np.array([0,0,1]))
def dot(self, q): return np.sum(self.qs * q.qs, axis=-1)
def copy(self): return Quaternions(np.copy(self.qs))
def reshape(self, s):
self.qs.reshape(s)
return self
def interpolate(self, ws):
return Quaternions.exp(np.average(abs(self).log, axis=0, weights=ws))
def euler(self, order='xyz'):
q = self.normalized().qs
q0 = q[...,0]
q1 = q[...,1]
q2 = q[...,2]
q3 = q[...,3]
es = np.zeros(self.shape + (3,))
if order == 'xyz':
es[...,0] = np.arctan2(2 * (q0 * q1 + q2 * q3), 1 - 2 * (q1 * q1 + q2 * q2))
es[...,1] = np.arcsin((2 * (q0 * q2 - q3 * q1)).clip(-1,1))
es[...,2] = np.arctan2(2 * (q0 * q3 + q1 * q2), 1 - 2 * (q2 * q2 + q3 * q3))
elif order == 'yzx':
es[...,0] = np.arctan2(2 * (q1 * q0 - q2 * q3), -q1 * q1 + q2 * q2 - q3 * q3 + q0 * q0)
es[...,1] = np.arctan2(2 * (q2 * q0 - q1 * q3), q1 * q1 - q2 * q2 - q3 * q3 + q0 * q0)
es[...,2] = np.arcsin((2 * (q1 * q2 + q3 * q0)).clip(-1,1))
else:
raise NotImplementedError('Cannot convert from ordering %s' % order)
"""
# These conversion don't appear to work correctly for Maya.
# http://bediyap.com/programming/convert-quaternion-to-euler-rotations/
if order == 'xyz':
es[fa + (0,)] = np.arctan2(2 * (q0 * q3 - q1 * q2), q0 * q0 + q1 * q1 - q2 * q2 - q3 * q3)
es[fa + (1,)] = np.arcsin((2 * (q1 * q3 + q0 * q2)).clip(-1,1))
es[fa + (2,)] = np.arctan2(2 * (q0 * q1 - q2 * q3), q0 * q0 - q1 * q1 - q2 * q2 + q3 * q3)
elif order == 'yzx':
es[fa + (0,)] = np.arctan2(2 * (q0 * q1 - q2 * q3), q0 * q0 - q1 * q1 + q2 * q2 - q3 * q3)
es[fa + (1,)] = np.arcsin((2 * (q1 * q2 + q0 * q3)).clip(-1,1))
es[fa + (2,)] = np.arctan2(2 * (q0 * q2 - q1 * q3), q0 * q0 + q1 * q1 - q2 * q2 - q3 * q3)
elif order == 'zxy':
es[fa + (0,)] = np.arctan2(2 * (q0 * q2 - q1 * q3), q0 * q0 - q1 * q1 - q2 * q2 + q3 * q3)
es[fa + (1,)] = np.arcsin((2 * (q0 * q1 + q2 * q3)).clip(-1,1))
es[fa + (2,)] = np.arctan2(2 * (q0 * q3 - q1 * q2), q0 * q0 - q1 * q1 + q2 * q2 - q3 * q3)
elif order == 'xzy':
es[fa + (0,)] = np.arctan2(2 * (q0 * q2 + q1 * q3), q0 * q0 + q1 * q1 - q2 * q2 - q3 * q3)
es[fa + (1,)] = np.arcsin((2 * (q0 * q3 - q1 * q2)).clip(-1,1))
es[fa + (2,)] = np.arctan2(2 * (q0 * q1 + q2 * q3), q0 * q0 - q1 * q1 + q2 * q2 - q3 * q3)
elif order == 'yxz':
es[fa + (0,)] = np.arctan2(2 * (q1 * q2 + q0 * q3), q0 * q0 - q1 * q1 + q2 * q2 - q3 * q3)
es[fa + (1,)] = np.arcsin((2 * (q0 * q1 - q2 * q3)).clip(-1,1))
es[fa + (2,)] = np.arctan2(2 * (q1 * q3 + q0 * q2), q0 * q0 - q1 * q1 - q2 * q2 + q3 * q3)
elif order == 'zyx':
es[fa + (0,)] = np.arctan2(2 * (q0 * q1 + q2 * q3), q0 * q0 - q1 * q1 - q2 * q2 + q3 * q3)
es[fa + (1,)] = np.arcsin((2 * (q0 * q2 - q1 * q3)).clip(-1,1))
es[fa + (2,)] = np.arctan2(2 * (q0 * q3 + q1 * q2), q0 * q0 + q1 * q1 - q2 * q2 - q3 * q3)
else:
raise KeyError('Unknown ordering %s' % order)
"""
# https://github.com/ehsan/ogre/blob/master/OgreMain/src/OgreMatrix3.cpp
# Use this class and convert from matrix
return es
def average(self):
if len(self.shape) == 1:
import numpy.core.umath_tests as ut
system = ut.matrix_multiply(self.qs[:,:,np.newaxis], self.qs[:,np.newaxis,:]).sum(axis=0)
w, v = np.linalg.eigh(system)
qiT_dot_qref = (self.qs[:,:,np.newaxis] * v[np.newaxis,:,:]).sum(axis=1)
return Quaternions(v[:,np.argmin((1.-qiT_dot_qref**2).sum(axis=0))])
else:
raise NotImplementedError('Cannot average multi-dimensionsal Quaternions')
def angle_axis(self):
norm = self.normalized()
s = np.sqrt(1 - (norm.reals**2.0))
s[s == 0] = 0.001
angles = 2.0 * np.arccos(norm.reals)
axis = norm.imaginaries / s[...,np.newaxis]
return angles, axis
def transforms(self):
qw = self.qs[...,0]
qx = self.qs[...,1]
qy = self.qs[...,2]
qz = self.qs[...,3]
x2 = qx + qx; y2 = qy + qy; z2 = qz + qz;
xx = qx * x2; yy = qy * y2; wx = qw * x2;
xy = qx * y2; yz = qy * z2; wy = qw * y2;
xz = qx * z2; zz = qz * z2; wz = qw * z2;
m = np.empty(self.shape + (3,3))
m[...,0,0] = 1.0 - (yy + zz)
m[...,0,1] = xy - wz
m[...,0,2] = xz + wy
m[...,1,0] = xy + wz
m[...,1,1] = 1.0 - (xx + zz)
m[...,1,2] = yz - wx
m[...,2,0] = xz - wy
m[...,2,1] = yz + wx
m[...,2,2] = 1.0 - (xx + yy)
return m
def ravel(self):
return self.qs.ravel()
def id(cls, n):
if isinstance(n, tuple):
qs = np.zeros(n + (4,))
qs[...,0] = 1.0
return Quaternions(qs)
if isinstance(n, int) or isinstance(n, long):
qs = np.zeros((n,4))
qs[:,0] = 1.0
return Quaternions(qs)
raise TypeError('Cannot Construct Quaternion from %s type' % str(type(n)))
def id_like(cls, a):
qs = np.zeros(a.shape + (4,))
qs[...,0] = 1.0
return Quaternions(qs)
def exp(cls, ws):
ts = np.sum(ws**2.0, axis=-1)**0.5
ts[ts == 0] = 0.001
ls = np.sin(ts) / ts
qs = np.empty(ws.shape[:-1] + (4,))
qs[...,0] = np.cos(ts)
qs[...,1] = ws[...,0] * ls
qs[...,2] = ws[...,1] * ls
qs[...,3] = ws[...,2] * ls
return Quaternions(qs).normalized()
def slerp(cls, q0s, q1s, a):
fst, snd = cls._broadcast(q0s.qs, q1s.qs)
fst, a = cls._broadcast(fst, a, scalar=True)
snd, a = cls._broadcast(snd, a, scalar=True)
len = np.sum(fst * snd, axis=-1)
neg = len < 0.0
len[neg] = -len[neg]
snd[neg] = -snd[neg]
amount0 = np.zeros(a.shape)
amount1 = np.zeros(a.shape)
linear = (1.0 - len) < 0.01
omegas = np.arccos(len[~linear])
sinoms = np.sin(omegas)
amount0[ linear] = 1.0 - a[linear]
amount1[ linear] = a[linear]
amount0[~linear] = np.sin((1.0 - a[~linear]) * omegas) / sinoms
amount1[~linear] = np.sin( a[~linear] * omegas) / sinoms
return Quaternions(
amount0[...,np.newaxis] * fst +
amount1[...,np.newaxis] * snd)
def between(cls, v0s, v1s):
a = np.cross(v0s, v1s)
w = np.sqrt((v0s**2).sum(axis=-1) * (v1s**2).sum(axis=-1)) + (v0s * v1s).sum(axis=-1)
return Quaternions(np.concatenate([w[...,np.newaxis], a], axis=-1)).normalized()
def from_angle_axis(cls, angles, axis):
axis = axis / (np.sqrt(np.sum(axis**2, axis=-1)) + 1e-10)[...,np.newaxis]
sines = np.sin(angles / 2.0)[...,np.newaxis]
cosines = np.cos(angles / 2.0)[...,np.newaxis]
return Quaternions(np.concatenate([cosines, axis * sines], axis=-1))
def from_euler(cls, es, order='xyz', world=False):
axis = {
'x' : np.array([1,0,0]),
'y' : np.array([0,1,0]),
'z' : np.array([0,0,1]),
}
q0s = Quaternions.from_angle_axis(es[...,0], axis[order[0]])
q1s = Quaternions.from_angle_axis(es[...,1], axis[order[1]])
q2s = Quaternions.from_angle_axis(es[...,2], axis[order[2]])
return (q2s * (q1s * q0s)) if world else (q0s * (q1s * q2s))
def from_transforms(cls, ts):
d0, d1, d2 = ts[...,0,0], ts[...,1,1], ts[...,2,2]
q0 = ( d0 + d1 + d2 + 1.0) / 4.0
q1 = ( d0 - d1 - d2 + 1.0) / 4.0
q2 = (-d0 + d1 - d2 + 1.0) / 4.0
q3 = (-d0 - d1 + d2 + 1.0) / 4.0
q0 = np.sqrt(q0.clip(0,None))
q1 = np.sqrt(q1.clip(0,None))
q2 = np.sqrt(q2.clip(0,None))
q3 = np.sqrt(q3.clip(0,None))
c0 = (q0 >= q1) & (q0 >= q2) & (q0 >= q3)
c1 = (q1 >= q0) & (q1 >= q2) & (q1 >= q3)
c2 = (q2 >= q0) & (q2 >= q1) & (q2 >= q3)
c3 = (q3 >= q0) & (q3 >= q1) & (q3 >= q2)
q1[c0] *= np.sign(ts[c0,2,1] - ts[c0,1,2])
q2[c0] *= np.sign(ts[c0,0,2] - ts[c0,2,0])
q3[c0] *= np.sign(ts[c0,1,0] - ts[c0,0,1])
q0[c1] *= np.sign(ts[c1,2,1] - ts[c1,1,2])
q2[c1] *= np.sign(ts[c1,1,0] + ts[c1,0,1])
q3[c1] *= np.sign(ts[c1,0,2] + ts[c1,2,0])
q0[c2] *= np.sign(ts[c2,0,2] - ts[c2,2,0])
q1[c2] *= np.sign(ts[c2,1,0] + ts[c2,0,1])
q3[c2] *= np.sign(ts[c2,2,1] + ts[c2,1,2])
q0[c3] *= np.sign(ts[c3,1,0] - ts[c3,0,1])
q1[c3] *= np.sign(ts[c3,2,0] + ts[c3,0,2])
q2[c3] *= np.sign(ts[c3,2,1] + ts[c3,1,2])
qs = np.empty(ts.shape[:-2] + (4,))
qs[...,0] = q0
qs[...,1] = q1
qs[...,2] = q2
qs[...,3] = q3
return cls(qs)
The provided code snippet includes necessary dependencies for implementing the `load_from_maya` function. Write a Python function `def load_from_maya(root, start, end)` to solve the following problem:
Load Animation Object from Maya Joint Skeleton Parameters ---------- root : PyNode Root Joint of Maya Skeleton start, end : int, int Start and End frame index of Maya Animation Returns ------- animation : Animation Loaded animation from maya names : [str] Joint names from maya
Here is the function:
def load_from_maya(root, start, end):
"""
Load Animation Object from Maya Joint Skeleton
Parameters
----------
root : PyNode
Root Joint of Maya Skeleton
start, end : int, int
Start and End frame index of Maya Animation
Returns
-------
animation : Animation
Loaded animation from maya
names : [str]
Joint names from maya
"""
import pymel.core as pm
original_time = pm.currentTime(q=True)
pm.currentTime(start)
""" Build Structure """
names, parents = AnimationStructure.load_from_maya(root)
descendants = AnimationStructure.descendants_list(parents)
orients = Quaternions.id(len(names))
offsets = np.array([pm.xform(j, q=True, translation=True) for j in names])
for j, name in enumerate(names):
scale = pm.xform(pm.PyNode(name), q=True, scale=True, relative=True)
if len(descendants[j]) == 0: continue
offsets[descendants[j]] *= scale
""" Load Animation """
eulers = np.zeros((end-start, len(names), 3))
positions = np.zeros((end-start, len(names), 3))
rotations = Quaternions.id((end-start, len(names)))
for i in range(end-start):
pm.currentTime(start+i+1, u=True)
scales = {}
for j, name, parent in zip(range(len(names)), names, parents):
node = pm.PyNode(name)
if i == 0 and pm.hasAttr(node, 'jointOrient'):
ort = node.getOrientation()
orients[j] = Quaternions(np.array([ort[3], ort[0], ort[1], ort[2]]))
if pm.hasAttr(node, 'rotate'): eulers[i,j] = np.radians(pm.xform(node, q=True, rotation=True))
if pm.hasAttr(node, 'translate'): positions[i,j] = pm.xform(node, q=True, translation=True)
if pm.hasAttr(node, 'scale'): scales[j] = pm.xform(node, q=True, scale=True, relative=True)
for j in scales:
if len(descendants[j]) == 0: continue
positions[i,descendants[j]] *= scales[j]
positions[i,0] = pm.xform(root, q=True, translation=True, worldSpace=True)
rotations = orients[np.newaxis] * Quaternions.from_euler(eulers, order='xyz', world=True)
""" Done """
pm.currentTime(original_time)
return Animation(rotations, positions, orients, offsets, parents), names | Load Animation Object from Maya Joint Skeleton Parameters ---------- root : PyNode Root Joint of Maya Skeleton start, end : int, int Start and End frame index of Maya Animation Returns ------- animation : Animation Loaded animation from maya names : [str] Joint names from maya |
16,110 | import operator
import numpy as np
import numpy.core.umath_tests as ut
import AnimationStructure
from Quaternions_old import Quaternions
def rotations_global(anim):
"""
Global Animation Rotations
This relies on joint ordering
being incremental. That means a joint
J1 must not be a ancestor of J0 if
J0 appears before J1 in the joint
ordering.
Parameters
----------
anim : Animation
Input animation
Returns
-------
points : (F, J) Quaternions
global rotations for every frame F
and joint J
"""
joints = np.arange(anim.shape[1])
parents = np.arange(anim.shape[1])
locals = anim.rotations
globals = Quaternions.id(anim.shape)
globals[:,0] = locals[:,0]
for i in range(1, anim.shape[1]):
globals[:,i] = globals[:,anim.parents[i]] * locals[:,i]
return globals
class Quaternions:
"""
Quaternions is a wrapper around a numpy ndarray
that allows it to act as if it were an narray of
a quaternion data type.
Therefore addition, subtraction, multiplication,
division, negation, absolute, are all defined
in terms of quaternion operations such as quaternion
multiplication.
This allows for much neater code and many routines
which conceptually do the same thing to be written
in the same way for point data and for rotation data.
The Quaternions class has been desgined such that it
should support broadcasting and slicing in all of the
usual ways.
"""
def __init__(self, qs):
if isinstance(qs, np.ndarray):
if len(qs.shape) == 1: qs = np.array([qs])
self.qs = qs
return
if isinstance(qs, Quaternions):
self.qs = qs.qs
return
raise TypeError('Quaternions must be constructed from iterable, numpy array, or Quaternions, not %s' % type(qs))
def __str__(self): return "Quaternions("+ str(self.qs) + ")"
def __repr__(self): return "Quaternions("+ repr(self.qs) + ")"
""" Helper Methods for Broadcasting and Data extraction """
def _broadcast(cls, sqs, oqs, scalar=False):
if isinstance(oqs, float): return sqs, oqs * np.ones(sqs.shape[:-1])
ss = np.array(sqs.shape) if not scalar else np.array(sqs.shape[:-1])
os = np.array(oqs.shape)
if len(ss) != len(os):
raise TypeError('Quaternions cannot broadcast together shapes %s and %s' % (sqs.shape, oqs.shape))
if np.all(ss == os): return sqs, oqs
if not np.all((ss == os) | (os == np.ones(len(os))) | (ss == np.ones(len(ss)))):
raise TypeError('Quaternions cannot broadcast together shapes %s and %s' % (sqs.shape, oqs.shape))
sqsn, oqsn = sqs.copy(), oqs.copy()
for a in np.where(ss == 1)[0]: sqsn = sqsn.repeat(os[a], axis=a)
for a in np.where(os == 1)[0]: oqsn = oqsn.repeat(ss[a], axis=a)
return sqsn, oqsn
""" Adding Quaterions is just Defined as Multiplication """
def __add__(self, other): return self * other
def __sub__(self, other): return self / other
""" Quaterion Multiplication """
def __mul__(self, other):
"""
Quaternion multiplication has three main methods.
When multiplying a Quaternions array by Quaternions
normal quaternion multiplication is performed.
When multiplying a Quaternions array by a vector
array of the same shape, where the last axis is 3,
it is assumed to be a Quaternion by 3D-Vector
multiplication and the 3D-Vectors are rotated
in space by the Quaternions.
When multipplying a Quaternions array by a scalar
or vector of different shape it is assumed to be
a Quaternions by Scalars multiplication and the
Quaternions are scaled using Slerp and the identity
quaternions.
"""
""" If Quaternions type do Quaternions * Quaternions """
if isinstance(other, Quaternions):
sqs, oqs = Quaternions._broadcast(self.qs, other.qs)
q0 = sqs[...,0]; q1 = sqs[...,1];
q2 = sqs[...,2]; q3 = sqs[...,3];
r0 = oqs[...,0]; r1 = oqs[...,1];
r2 = oqs[...,2]; r3 = oqs[...,3];
qs = np.empty(sqs.shape)
qs[...,0] = r0 * q0 - r1 * q1 - r2 * q2 - r3 * q3
qs[...,1] = r0 * q1 + r1 * q0 - r2 * q3 + r3 * q2
qs[...,2] = r0 * q2 + r1 * q3 + r2 * q0 - r3 * q1
qs[...,3] = r0 * q3 - r1 * q2 + r2 * q1 + r3 * q0
return Quaternions(qs)
""" If array type do Quaternions * Vectors """
if isinstance(other, np.ndarray) and other.shape[-1] == 3:
vs = Quaternions(np.concatenate([np.zeros(other.shape[:-1] + (1,)), other], axis=-1))
return (self * (vs * -self)).imaginaries
""" If float do Quaternions * Scalars """
if isinstance(other, np.ndarray) or isinstance(other, float):
return Quaternions.slerp(Quaternions.id_like(self), self, other)
raise TypeError('Cannot multiply/add Quaternions with type %s' % str(type(other)))
def __div__(self, other):
"""
When a Quaternion type is supplied, division is defined
as multiplication by the inverse of that Quaternion.
When a scalar or vector is supplied it is defined
as multiplicaion of one over the supplied value.
Essentially a scaling.
"""
if isinstance(other, Quaternions): return self * (-other)
if isinstance(other, np.ndarray): return self * (1.0 / other)
if isinstance(other, float): return self * (1.0 / other)
raise TypeError('Cannot divide/subtract Quaternions with type %s' + str(type(other)))
def __eq__(self, other): return self.qs == other.qs
def __ne__(self, other): return self.qs != other.qs
def __neg__(self):
""" Invert Quaternions """
return Quaternions(self.qs * np.array([[1, -1, -1, -1]]))
def __abs__(self):
""" Unify Quaternions To Single Pole """
qabs = self.normalized().copy()
top = np.sum(( qabs.qs) * np.array([1,0,0,0]), axis=-1)
bot = np.sum((-qabs.qs) * np.array([1,0,0,0]), axis=-1)
qabs.qs[top < bot] = -qabs.qs[top < bot]
return qabs
def __iter__(self): return iter(self.qs)
def __len__(self): return len(self.qs)
def __getitem__(self, k): return Quaternions(self.qs[k])
def __setitem__(self, k, v): self.qs[k] = v.qs
def lengths(self):
return np.sum(self.qs**2.0, axis=-1)**0.5
def reals(self):
return self.qs[...,0]
def imaginaries(self):
return self.qs[...,1:4]
def shape(self): return self.qs.shape[:-1]
def repeat(self, n, **kwargs):
return Quaternions(self.qs.repeat(n, **kwargs))
def normalized(self):
return Quaternions(self.qs / self.lengths[...,np.newaxis])
def log(self):
norm = abs(self.normalized())
imgs = norm.imaginaries
lens = np.sqrt(np.sum(imgs**2, axis=-1))
lens = np.arctan2(lens, norm.reals) / (lens + 1e-10)
return imgs * lens[...,np.newaxis]
def constrained(self, axis):
rl = self.reals
im = np.sum(axis * self.imaginaries, axis=-1)
t1 = -2 * np.arctan2(rl, im) + np.pi
t2 = -2 * np.arctan2(rl, im) - np.pi
top = Quaternions.exp(axis[np.newaxis] * (t1[:,np.newaxis] / 2.0))
bot = Quaternions.exp(axis[np.newaxis] * (t2[:,np.newaxis] / 2.0))
img = self.dot(top) > self.dot(bot)
ret = top.copy()
ret[ img] = top[ img]
ret[~img] = bot[~img]
return ret
def constrained_x(self): return self.constrained(np.array([1,0,0]))
def constrained_y(self): return self.constrained(np.array([0,1,0]))
def constrained_z(self): return self.constrained(np.array([0,0,1]))
def dot(self, q): return np.sum(self.qs * q.qs, axis=-1)
def copy(self): return Quaternions(np.copy(self.qs))
def reshape(self, s):
self.qs.reshape(s)
return self
def interpolate(self, ws):
return Quaternions.exp(np.average(abs(self).log, axis=0, weights=ws))
def euler(self, order='xyz'):
q = self.normalized().qs
q0 = q[...,0]
q1 = q[...,1]
q2 = q[...,2]
q3 = q[...,3]
es = np.zeros(self.shape + (3,))
if order == 'xyz':
es[...,0] = np.arctan2(2 * (q0 * q1 + q2 * q3), 1 - 2 * (q1 * q1 + q2 * q2))
es[...,1] = np.arcsin((2 * (q0 * q2 - q3 * q1)).clip(-1,1))
es[...,2] = np.arctan2(2 * (q0 * q3 + q1 * q2), 1 - 2 * (q2 * q2 + q3 * q3))
elif order == 'yzx':
es[...,0] = np.arctan2(2 * (q1 * q0 - q2 * q3), -q1 * q1 + q2 * q2 - q3 * q3 + q0 * q0)
es[...,1] = np.arctan2(2 * (q2 * q0 - q1 * q3), q1 * q1 - q2 * q2 - q3 * q3 + q0 * q0)
es[...,2] = np.arcsin((2 * (q1 * q2 + q3 * q0)).clip(-1,1))
else:
raise NotImplementedError('Cannot convert from ordering %s' % order)
"""
# These conversion don't appear to work correctly for Maya.
# http://bediyap.com/programming/convert-quaternion-to-euler-rotations/
if order == 'xyz':
es[fa + (0,)] = np.arctan2(2 * (q0 * q3 - q1 * q2), q0 * q0 + q1 * q1 - q2 * q2 - q3 * q3)
es[fa + (1,)] = np.arcsin((2 * (q1 * q3 + q0 * q2)).clip(-1,1))
es[fa + (2,)] = np.arctan2(2 * (q0 * q1 - q2 * q3), q0 * q0 - q1 * q1 - q2 * q2 + q3 * q3)
elif order == 'yzx':
es[fa + (0,)] = np.arctan2(2 * (q0 * q1 - q2 * q3), q0 * q0 - q1 * q1 + q2 * q2 - q3 * q3)
es[fa + (1,)] = np.arcsin((2 * (q1 * q2 + q0 * q3)).clip(-1,1))
es[fa + (2,)] = np.arctan2(2 * (q0 * q2 - q1 * q3), q0 * q0 + q1 * q1 - q2 * q2 - q3 * q3)
elif order == 'zxy':
es[fa + (0,)] = np.arctan2(2 * (q0 * q2 - q1 * q3), q0 * q0 - q1 * q1 - q2 * q2 + q3 * q3)
es[fa + (1,)] = np.arcsin((2 * (q0 * q1 + q2 * q3)).clip(-1,1))
es[fa + (2,)] = np.arctan2(2 * (q0 * q3 - q1 * q2), q0 * q0 - q1 * q1 + q2 * q2 - q3 * q3)
elif order == 'xzy':
es[fa + (0,)] = np.arctan2(2 * (q0 * q2 + q1 * q3), q0 * q0 + q1 * q1 - q2 * q2 - q3 * q3)
es[fa + (1,)] = np.arcsin((2 * (q0 * q3 - q1 * q2)).clip(-1,1))
es[fa + (2,)] = np.arctan2(2 * (q0 * q1 + q2 * q3), q0 * q0 - q1 * q1 + q2 * q2 - q3 * q3)
elif order == 'yxz':
es[fa + (0,)] = np.arctan2(2 * (q1 * q2 + q0 * q3), q0 * q0 - q1 * q1 + q2 * q2 - q3 * q3)
es[fa + (1,)] = np.arcsin((2 * (q0 * q1 - q2 * q3)).clip(-1,1))
es[fa + (2,)] = np.arctan2(2 * (q1 * q3 + q0 * q2), q0 * q0 - q1 * q1 - q2 * q2 + q3 * q3)
elif order == 'zyx':
es[fa + (0,)] = np.arctan2(2 * (q0 * q1 + q2 * q3), q0 * q0 - q1 * q1 - q2 * q2 + q3 * q3)
es[fa + (1,)] = np.arcsin((2 * (q0 * q2 - q1 * q3)).clip(-1,1))
es[fa + (2,)] = np.arctan2(2 * (q0 * q3 + q1 * q2), q0 * q0 + q1 * q1 - q2 * q2 - q3 * q3)
else:
raise KeyError('Unknown ordering %s' % order)
"""
# https://github.com/ehsan/ogre/blob/master/OgreMain/src/OgreMatrix3.cpp
# Use this class and convert from matrix
return es
def average(self):
if len(self.shape) == 1:
import numpy.core.umath_tests as ut
system = ut.matrix_multiply(self.qs[:,:,np.newaxis], self.qs[:,np.newaxis,:]).sum(axis=0)
w, v = np.linalg.eigh(system)
qiT_dot_qref = (self.qs[:,:,np.newaxis] * v[np.newaxis,:,:]).sum(axis=1)
return Quaternions(v[:,np.argmin((1.-qiT_dot_qref**2).sum(axis=0))])
else:
raise NotImplementedError('Cannot average multi-dimensionsal Quaternions')
def angle_axis(self):
norm = self.normalized()
s = np.sqrt(1 - (norm.reals**2.0))
s[s == 0] = 0.001
angles = 2.0 * np.arccos(norm.reals)
axis = norm.imaginaries / s[...,np.newaxis]
return angles, axis
def transforms(self):
qw = self.qs[...,0]
qx = self.qs[...,1]
qy = self.qs[...,2]
qz = self.qs[...,3]
x2 = qx + qx; y2 = qy + qy; z2 = qz + qz;
xx = qx * x2; yy = qy * y2; wx = qw * x2;
xy = qx * y2; yz = qy * z2; wy = qw * y2;
xz = qx * z2; zz = qz * z2; wz = qw * z2;
m = np.empty(self.shape + (3,3))
m[...,0,0] = 1.0 - (yy + zz)
m[...,0,1] = xy - wz
m[...,0,2] = xz + wy
m[...,1,0] = xy + wz
m[...,1,1] = 1.0 - (xx + zz)
m[...,1,2] = yz - wx
m[...,2,0] = xz - wy
m[...,2,1] = yz + wx
m[...,2,2] = 1.0 - (xx + yy)
return m
def ravel(self):
return self.qs.ravel()
def id(cls, n):
if isinstance(n, tuple):
qs = np.zeros(n + (4,))
qs[...,0] = 1.0
return Quaternions(qs)
if isinstance(n, int) or isinstance(n, long):
qs = np.zeros((n,4))
qs[:,0] = 1.0
return Quaternions(qs)
raise TypeError('Cannot Construct Quaternion from %s type' % str(type(n)))
def id_like(cls, a):
qs = np.zeros(a.shape + (4,))
qs[...,0] = 1.0
return Quaternions(qs)
def exp(cls, ws):
ts = np.sum(ws**2.0, axis=-1)**0.5
ts[ts == 0] = 0.001
ls = np.sin(ts) / ts
qs = np.empty(ws.shape[:-1] + (4,))
qs[...,0] = np.cos(ts)
qs[...,1] = ws[...,0] * ls
qs[...,2] = ws[...,1] * ls
qs[...,3] = ws[...,2] * ls
return Quaternions(qs).normalized()
def slerp(cls, q0s, q1s, a):
fst, snd = cls._broadcast(q0s.qs, q1s.qs)
fst, a = cls._broadcast(fst, a, scalar=True)
snd, a = cls._broadcast(snd, a, scalar=True)
len = np.sum(fst * snd, axis=-1)
neg = len < 0.0
len[neg] = -len[neg]
snd[neg] = -snd[neg]
amount0 = np.zeros(a.shape)
amount1 = np.zeros(a.shape)
linear = (1.0 - len) < 0.01
omegas = np.arccos(len[~linear])
sinoms = np.sin(omegas)
amount0[ linear] = 1.0 - a[linear]
amount1[ linear] = a[linear]
amount0[~linear] = np.sin((1.0 - a[~linear]) * omegas) / sinoms
amount1[~linear] = np.sin( a[~linear] * omegas) / sinoms
return Quaternions(
amount0[...,np.newaxis] * fst +
amount1[...,np.newaxis] * snd)
def between(cls, v0s, v1s):
a = np.cross(v0s, v1s)
w = np.sqrt((v0s**2).sum(axis=-1) * (v1s**2).sum(axis=-1)) + (v0s * v1s).sum(axis=-1)
return Quaternions(np.concatenate([w[...,np.newaxis], a], axis=-1)).normalized()
def from_angle_axis(cls, angles, axis):
axis = axis / (np.sqrt(np.sum(axis**2, axis=-1)) + 1e-10)[...,np.newaxis]
sines = np.sin(angles / 2.0)[...,np.newaxis]
cosines = np.cos(angles / 2.0)[...,np.newaxis]
return Quaternions(np.concatenate([cosines, axis * sines], axis=-1))
def from_euler(cls, es, order='xyz', world=False):
axis = {
'x' : np.array([1,0,0]),
'y' : np.array([0,1,0]),
'z' : np.array([0,0,1]),
}
q0s = Quaternions.from_angle_axis(es[...,0], axis[order[0]])
q1s = Quaternions.from_angle_axis(es[...,1], axis[order[1]])
q2s = Quaternions.from_angle_axis(es[...,2], axis[order[2]])
return (q2s * (q1s * q0s)) if world else (q0s * (q1s * q2s))
def from_transforms(cls, ts):
d0, d1, d2 = ts[...,0,0], ts[...,1,1], ts[...,2,2]
q0 = ( d0 + d1 + d2 + 1.0) / 4.0
q1 = ( d0 - d1 - d2 + 1.0) / 4.0
q2 = (-d0 + d1 - d2 + 1.0) / 4.0
q3 = (-d0 - d1 + d2 + 1.0) / 4.0
q0 = np.sqrt(q0.clip(0,None))
q1 = np.sqrt(q1.clip(0,None))
q2 = np.sqrt(q2.clip(0,None))
q3 = np.sqrt(q3.clip(0,None))
c0 = (q0 >= q1) & (q0 >= q2) & (q0 >= q3)
c1 = (q1 >= q0) & (q1 >= q2) & (q1 >= q3)
c2 = (q2 >= q0) & (q2 >= q1) & (q2 >= q3)
c3 = (q3 >= q0) & (q3 >= q1) & (q3 >= q2)
q1[c0] *= np.sign(ts[c0,2,1] - ts[c0,1,2])
q2[c0] *= np.sign(ts[c0,0,2] - ts[c0,2,0])
q3[c0] *= np.sign(ts[c0,1,0] - ts[c0,0,1])
q0[c1] *= np.sign(ts[c1,2,1] - ts[c1,1,2])
q2[c1] *= np.sign(ts[c1,1,0] + ts[c1,0,1])
q3[c1] *= np.sign(ts[c1,0,2] + ts[c1,2,0])
q0[c2] *= np.sign(ts[c2,0,2] - ts[c2,2,0])
q1[c2] *= np.sign(ts[c2,1,0] + ts[c2,0,1])
q3[c2] *= np.sign(ts[c2,2,1] + ts[c2,1,2])
q0[c3] *= np.sign(ts[c3,1,0] - ts[c3,0,1])
q1[c3] *= np.sign(ts[c3,2,0] + ts[c3,0,2])
q2[c3] *= np.sign(ts[c3,2,1] + ts[c3,1,2])
qs = np.empty(ts.shape[:-2] + (4,))
qs[...,0] = q0
qs[...,1] = q1
qs[...,2] = q2
qs[...,3] = q3
return cls(qs)
def rotations_parents_global(anim):
rotations = rotations_global(anim)
rotations = rotations[:,anim.parents]
rotations[:,0] = Quaternions.id(len(anim))
return rotations | null |
16,111 | import operator
import numpy as np
import numpy.core.umath_tests as ut
import AnimationStructure
from Quaternions_old import Quaternions
The provided code snippet includes necessary dependencies for implementing the `rotations_load_to_maya` function. Write a Python function `def rotations_load_to_maya(rotations, positions, names=None)` to solve the following problem:
Load Rotations into Maya Loads a Quaternions array into the scene via the representation of axis Parameters ---------- rotations : (F, J) Quaternions array of rotations to load into the scene where F = number of frames J = number of joints positions : (F, J, 3) ndarray array of positions to load rotation axis at where: F = number of frames J = number of joints names : [str] List of joint names Returns ------- maxies : Group Grouped Maya Node of all Axis nodes
Here is the function:
def rotations_load_to_maya(rotations, positions, names=None):
"""
Load Rotations into Maya
Loads a Quaternions array into the scene
via the representation of axis
Parameters
----------
rotations : (F, J) Quaternions
array of rotations to load
into the scene where
F = number of frames
J = number of joints
positions : (F, J, 3) ndarray
array of positions to load
rotation axis at where:
F = number of frames
J = number of joints
names : [str]
List of joint names
Returns
-------
maxies : Group
Grouped Maya Node of all Axis nodes
"""
import pymel.core as pm
if names is None: names = ["joint_" + str(i) for i in range(rotations.shape[1])]
maxis = []
frames = range(1, len(positions)+1)
for i, name in enumerate(names):
name = name + "_axis"
axis = pm.group(
pm.curve(p=[(0,0,0), (1,0,0)], d=1, n=name+'_axis_x'),
pm.curve(p=[(0,0,0), (0,1,0)], d=1, n=name+'_axis_y'),
pm.curve(p=[(0,0,0), (0,0,1)], d=1, n=name+'_axis_z'),
n=name)
axis.rotatePivot.set((0,0,0))
axis.scalePivot.set((0,0,0))
axis.childAtIndex(0).overrideEnabled.set(1); axis.childAtIndex(0).overrideColor.set(13)
axis.childAtIndex(1).overrideEnabled.set(1); axis.childAtIndex(1).overrideColor.set(14)
axis.childAtIndex(2).overrideEnabled.set(1); axis.childAtIndex(2).overrideColor.set(15)
curvex = pm.nodetypes.AnimCurveTA(n=name + "_rotateX")
curvey = pm.nodetypes.AnimCurveTA(n=name + "_rotateY")
curvez = pm.nodetypes.AnimCurveTA(n=name + "_rotateZ")
arotations = rotations[:,i].euler()
curvex.addKeys(frames, arotations[:,0])
curvey.addKeys(frames, arotations[:,1])
curvez.addKeys(frames, arotations[:,2])
pm.connectAttr(curvex.output, axis.rotateX)
pm.connectAttr(curvey.output, axis.rotateY)
pm.connectAttr(curvez.output, axis.rotateZ)
offsetx = pm.nodetypes.AnimCurveTU(n=name + "_translateX")
offsety = pm.nodetypes.AnimCurveTU(n=name + "_translateY")
offsetz = pm.nodetypes.AnimCurveTU(n=name + "_translateZ")
offsetx.addKeys(frames, positions[:,i,0])
offsety.addKeys(frames, positions[:,i,1])
offsetz.addKeys(frames, positions[:,i,2])
pm.connectAttr(offsetx.output, axis.translateX)
pm.connectAttr(offsety.output, axis.translateY)
pm.connectAttr(offsetz.output, axis.translateZ)
maxis.append(axis)
return pm.group(*maxis, n='RotationAnimation') | Load Rotations into Maya Loads a Quaternions array into the scene via the representation of axis Parameters ---------- rotations : (F, J) Quaternions array of rotations to load into the scene where F = number of frames J = number of joints positions : (F, J, 3) ndarray array of positions to load rotation axis at where: F = number of frames J = number of joints names : [str] List of joint names Returns ------- maxies : Group Grouped Maya Node of all Axis nodes |
16,112 | import operator
import numpy as np
import numpy.core.umath_tests as ut
import AnimationStructure
from Quaternions_old import Quaternions
class Quaternions:
"""
Quaternions is a wrapper around a numpy ndarray
that allows it to act as if it were an narray of
a quaternion data type.
Therefore addition, subtraction, multiplication,
division, negation, absolute, are all defined
in terms of quaternion operations such as quaternion
multiplication.
This allows for much neater code and many routines
which conceptually do the same thing to be written
in the same way for point data and for rotation data.
The Quaternions class has been desgined such that it
should support broadcasting and slicing in all of the
usual ways.
"""
def __init__(self, qs):
if isinstance(qs, np.ndarray):
if len(qs.shape) == 1: qs = np.array([qs])
self.qs = qs
return
if isinstance(qs, Quaternions):
self.qs = qs.qs
return
raise TypeError('Quaternions must be constructed from iterable, numpy array, or Quaternions, not %s' % type(qs))
def __str__(self): return "Quaternions("+ str(self.qs) + ")"
def __repr__(self): return "Quaternions("+ repr(self.qs) + ")"
""" Helper Methods for Broadcasting and Data extraction """
def _broadcast(cls, sqs, oqs, scalar=False):
if isinstance(oqs, float): return sqs, oqs * np.ones(sqs.shape[:-1])
ss = np.array(sqs.shape) if not scalar else np.array(sqs.shape[:-1])
os = np.array(oqs.shape)
if len(ss) != len(os):
raise TypeError('Quaternions cannot broadcast together shapes %s and %s' % (sqs.shape, oqs.shape))
if np.all(ss == os): return sqs, oqs
if not np.all((ss == os) | (os == np.ones(len(os))) | (ss == np.ones(len(ss)))):
raise TypeError('Quaternions cannot broadcast together shapes %s and %s' % (sqs.shape, oqs.shape))
sqsn, oqsn = sqs.copy(), oqs.copy()
for a in np.where(ss == 1)[0]: sqsn = sqsn.repeat(os[a], axis=a)
for a in np.where(os == 1)[0]: oqsn = oqsn.repeat(ss[a], axis=a)
return sqsn, oqsn
""" Adding Quaterions is just Defined as Multiplication """
def __add__(self, other): return self * other
def __sub__(self, other): return self / other
""" Quaterion Multiplication """
def __mul__(self, other):
"""
Quaternion multiplication has three main methods.
When multiplying a Quaternions array by Quaternions
normal quaternion multiplication is performed.
When multiplying a Quaternions array by a vector
array of the same shape, where the last axis is 3,
it is assumed to be a Quaternion by 3D-Vector
multiplication and the 3D-Vectors are rotated
in space by the Quaternions.
When multipplying a Quaternions array by a scalar
or vector of different shape it is assumed to be
a Quaternions by Scalars multiplication and the
Quaternions are scaled using Slerp and the identity
quaternions.
"""
""" If Quaternions type do Quaternions * Quaternions """
if isinstance(other, Quaternions):
sqs, oqs = Quaternions._broadcast(self.qs, other.qs)
q0 = sqs[...,0]; q1 = sqs[...,1];
q2 = sqs[...,2]; q3 = sqs[...,3];
r0 = oqs[...,0]; r1 = oqs[...,1];
r2 = oqs[...,2]; r3 = oqs[...,3];
qs = np.empty(sqs.shape)
qs[...,0] = r0 * q0 - r1 * q1 - r2 * q2 - r3 * q3
qs[...,1] = r0 * q1 + r1 * q0 - r2 * q3 + r3 * q2
qs[...,2] = r0 * q2 + r1 * q3 + r2 * q0 - r3 * q1
qs[...,3] = r0 * q3 - r1 * q2 + r2 * q1 + r3 * q0
return Quaternions(qs)
""" If array type do Quaternions * Vectors """
if isinstance(other, np.ndarray) and other.shape[-1] == 3:
vs = Quaternions(np.concatenate([np.zeros(other.shape[:-1] + (1,)), other], axis=-1))
return (self * (vs * -self)).imaginaries
""" If float do Quaternions * Scalars """
if isinstance(other, np.ndarray) or isinstance(other, float):
return Quaternions.slerp(Quaternions.id_like(self), self, other)
raise TypeError('Cannot multiply/add Quaternions with type %s' % str(type(other)))
def __div__(self, other):
"""
When a Quaternion type is supplied, division is defined
as multiplication by the inverse of that Quaternion.
When a scalar or vector is supplied it is defined
as multiplicaion of one over the supplied value.
Essentially a scaling.
"""
if isinstance(other, Quaternions): return self * (-other)
if isinstance(other, np.ndarray): return self * (1.0 / other)
if isinstance(other, float): return self * (1.0 / other)
raise TypeError('Cannot divide/subtract Quaternions with type %s' + str(type(other)))
def __eq__(self, other): return self.qs == other.qs
def __ne__(self, other): return self.qs != other.qs
def __neg__(self):
""" Invert Quaternions """
return Quaternions(self.qs * np.array([[1, -1, -1, -1]]))
def __abs__(self):
""" Unify Quaternions To Single Pole """
qabs = self.normalized().copy()
top = np.sum(( qabs.qs) * np.array([1,0,0,0]), axis=-1)
bot = np.sum((-qabs.qs) * np.array([1,0,0,0]), axis=-1)
qabs.qs[top < bot] = -qabs.qs[top < bot]
return qabs
def __iter__(self): return iter(self.qs)
def __len__(self): return len(self.qs)
def __getitem__(self, k): return Quaternions(self.qs[k])
def __setitem__(self, k, v): self.qs[k] = v.qs
def lengths(self):
return np.sum(self.qs**2.0, axis=-1)**0.5
def reals(self):
return self.qs[...,0]
def imaginaries(self):
return self.qs[...,1:4]
def shape(self): return self.qs.shape[:-1]
def repeat(self, n, **kwargs):
return Quaternions(self.qs.repeat(n, **kwargs))
def normalized(self):
return Quaternions(self.qs / self.lengths[...,np.newaxis])
def log(self):
norm = abs(self.normalized())
imgs = norm.imaginaries
lens = np.sqrt(np.sum(imgs**2, axis=-1))
lens = np.arctan2(lens, norm.reals) / (lens + 1e-10)
return imgs * lens[...,np.newaxis]
def constrained(self, axis):
rl = self.reals
im = np.sum(axis * self.imaginaries, axis=-1)
t1 = -2 * np.arctan2(rl, im) + np.pi
t2 = -2 * np.arctan2(rl, im) - np.pi
top = Quaternions.exp(axis[np.newaxis] * (t1[:,np.newaxis] / 2.0))
bot = Quaternions.exp(axis[np.newaxis] * (t2[:,np.newaxis] / 2.0))
img = self.dot(top) > self.dot(bot)
ret = top.copy()
ret[ img] = top[ img]
ret[~img] = bot[~img]
return ret
def constrained_x(self): return self.constrained(np.array([1,0,0]))
def constrained_y(self): return self.constrained(np.array([0,1,0]))
def constrained_z(self): return self.constrained(np.array([0,0,1]))
def dot(self, q): return np.sum(self.qs * q.qs, axis=-1)
def copy(self): return Quaternions(np.copy(self.qs))
def reshape(self, s):
self.qs.reshape(s)
return self
def interpolate(self, ws):
return Quaternions.exp(np.average(abs(self).log, axis=0, weights=ws))
def euler(self, order='xyz'):
q = self.normalized().qs
q0 = q[...,0]
q1 = q[...,1]
q2 = q[...,2]
q3 = q[...,3]
es = np.zeros(self.shape + (3,))
if order == 'xyz':
es[...,0] = np.arctan2(2 * (q0 * q1 + q2 * q3), 1 - 2 * (q1 * q1 + q2 * q2))
es[...,1] = np.arcsin((2 * (q0 * q2 - q3 * q1)).clip(-1,1))
es[...,2] = np.arctan2(2 * (q0 * q3 + q1 * q2), 1 - 2 * (q2 * q2 + q3 * q3))
elif order == 'yzx':
es[...,0] = np.arctan2(2 * (q1 * q0 - q2 * q3), -q1 * q1 + q2 * q2 - q3 * q3 + q0 * q0)
es[...,1] = np.arctan2(2 * (q2 * q0 - q1 * q3), q1 * q1 - q2 * q2 - q3 * q3 + q0 * q0)
es[...,2] = np.arcsin((2 * (q1 * q2 + q3 * q0)).clip(-1,1))
else:
raise NotImplementedError('Cannot convert from ordering %s' % order)
"""
# These conversion don't appear to work correctly for Maya.
# http://bediyap.com/programming/convert-quaternion-to-euler-rotations/
if order == 'xyz':
es[fa + (0,)] = np.arctan2(2 * (q0 * q3 - q1 * q2), q0 * q0 + q1 * q1 - q2 * q2 - q3 * q3)
es[fa + (1,)] = np.arcsin((2 * (q1 * q3 + q0 * q2)).clip(-1,1))
es[fa + (2,)] = np.arctan2(2 * (q0 * q1 - q2 * q3), q0 * q0 - q1 * q1 - q2 * q2 + q3 * q3)
elif order == 'yzx':
es[fa + (0,)] = np.arctan2(2 * (q0 * q1 - q2 * q3), q0 * q0 - q1 * q1 + q2 * q2 - q3 * q3)
es[fa + (1,)] = np.arcsin((2 * (q1 * q2 + q0 * q3)).clip(-1,1))
es[fa + (2,)] = np.arctan2(2 * (q0 * q2 - q1 * q3), q0 * q0 + q1 * q1 - q2 * q2 - q3 * q3)
elif order == 'zxy':
es[fa + (0,)] = np.arctan2(2 * (q0 * q2 - q1 * q3), q0 * q0 - q1 * q1 - q2 * q2 + q3 * q3)
es[fa + (1,)] = np.arcsin((2 * (q0 * q1 + q2 * q3)).clip(-1,1))
es[fa + (2,)] = np.arctan2(2 * (q0 * q3 - q1 * q2), q0 * q0 - q1 * q1 + q2 * q2 - q3 * q3)
elif order == 'xzy':
es[fa + (0,)] = np.arctan2(2 * (q0 * q2 + q1 * q3), q0 * q0 + q1 * q1 - q2 * q2 - q3 * q3)
es[fa + (1,)] = np.arcsin((2 * (q0 * q3 - q1 * q2)).clip(-1,1))
es[fa + (2,)] = np.arctan2(2 * (q0 * q1 + q2 * q3), q0 * q0 - q1 * q1 + q2 * q2 - q3 * q3)
elif order == 'yxz':
es[fa + (0,)] = np.arctan2(2 * (q1 * q2 + q0 * q3), q0 * q0 - q1 * q1 + q2 * q2 - q3 * q3)
es[fa + (1,)] = np.arcsin((2 * (q0 * q1 - q2 * q3)).clip(-1,1))
es[fa + (2,)] = np.arctan2(2 * (q1 * q3 + q0 * q2), q0 * q0 - q1 * q1 - q2 * q2 + q3 * q3)
elif order == 'zyx':
es[fa + (0,)] = np.arctan2(2 * (q0 * q1 + q2 * q3), q0 * q0 - q1 * q1 - q2 * q2 + q3 * q3)
es[fa + (1,)] = np.arcsin((2 * (q0 * q2 - q1 * q3)).clip(-1,1))
es[fa + (2,)] = np.arctan2(2 * (q0 * q3 + q1 * q2), q0 * q0 + q1 * q1 - q2 * q2 - q3 * q3)
else:
raise KeyError('Unknown ordering %s' % order)
"""
# https://github.com/ehsan/ogre/blob/master/OgreMain/src/OgreMatrix3.cpp
# Use this class and convert from matrix
return es
def average(self):
if len(self.shape) == 1:
import numpy.core.umath_tests as ut
system = ut.matrix_multiply(self.qs[:,:,np.newaxis], self.qs[:,np.newaxis,:]).sum(axis=0)
w, v = np.linalg.eigh(system)
qiT_dot_qref = (self.qs[:,:,np.newaxis] * v[np.newaxis,:,:]).sum(axis=1)
return Quaternions(v[:,np.argmin((1.-qiT_dot_qref**2).sum(axis=0))])
else:
raise NotImplementedError('Cannot average multi-dimensionsal Quaternions')
def angle_axis(self):
norm = self.normalized()
s = np.sqrt(1 - (norm.reals**2.0))
s[s == 0] = 0.001
angles = 2.0 * np.arccos(norm.reals)
axis = norm.imaginaries / s[...,np.newaxis]
return angles, axis
def transforms(self):
qw = self.qs[...,0]
qx = self.qs[...,1]
qy = self.qs[...,2]
qz = self.qs[...,3]
x2 = qx + qx; y2 = qy + qy; z2 = qz + qz;
xx = qx * x2; yy = qy * y2; wx = qw * x2;
xy = qx * y2; yz = qy * z2; wy = qw * y2;
xz = qx * z2; zz = qz * z2; wz = qw * z2;
m = np.empty(self.shape + (3,3))
m[...,0,0] = 1.0 - (yy + zz)
m[...,0,1] = xy - wz
m[...,0,2] = xz + wy
m[...,1,0] = xy + wz
m[...,1,1] = 1.0 - (xx + zz)
m[...,1,2] = yz - wx
m[...,2,0] = xz - wy
m[...,2,1] = yz + wx
m[...,2,2] = 1.0 - (xx + yy)
return m
def ravel(self):
return self.qs.ravel()
def id(cls, n):
if isinstance(n, tuple):
qs = np.zeros(n + (4,))
qs[...,0] = 1.0
return Quaternions(qs)
if isinstance(n, int) or isinstance(n, long):
qs = np.zeros((n,4))
qs[:,0] = 1.0
return Quaternions(qs)
raise TypeError('Cannot Construct Quaternion from %s type' % str(type(n)))
def id_like(cls, a):
qs = np.zeros(a.shape + (4,))
qs[...,0] = 1.0
return Quaternions(qs)
def exp(cls, ws):
ts = np.sum(ws**2.0, axis=-1)**0.5
ts[ts == 0] = 0.001
ls = np.sin(ts) / ts
qs = np.empty(ws.shape[:-1] + (4,))
qs[...,0] = np.cos(ts)
qs[...,1] = ws[...,0] * ls
qs[...,2] = ws[...,1] * ls
qs[...,3] = ws[...,2] * ls
return Quaternions(qs).normalized()
def slerp(cls, q0s, q1s, a):
fst, snd = cls._broadcast(q0s.qs, q1s.qs)
fst, a = cls._broadcast(fst, a, scalar=True)
snd, a = cls._broadcast(snd, a, scalar=True)
len = np.sum(fst * snd, axis=-1)
neg = len < 0.0
len[neg] = -len[neg]
snd[neg] = -snd[neg]
amount0 = np.zeros(a.shape)
amount1 = np.zeros(a.shape)
linear = (1.0 - len) < 0.01
omegas = np.arccos(len[~linear])
sinoms = np.sin(omegas)
amount0[ linear] = 1.0 - a[linear]
amount1[ linear] = a[linear]
amount0[~linear] = np.sin((1.0 - a[~linear]) * omegas) / sinoms
amount1[~linear] = np.sin( a[~linear] * omegas) / sinoms
return Quaternions(
amount0[...,np.newaxis] * fst +
amount1[...,np.newaxis] * snd)
def between(cls, v0s, v1s):
a = np.cross(v0s, v1s)
w = np.sqrt((v0s**2).sum(axis=-1) * (v1s**2).sum(axis=-1)) + (v0s * v1s).sum(axis=-1)
return Quaternions(np.concatenate([w[...,np.newaxis], a], axis=-1)).normalized()
def from_angle_axis(cls, angles, axis):
axis = axis / (np.sqrt(np.sum(axis**2, axis=-1)) + 1e-10)[...,np.newaxis]
sines = np.sin(angles / 2.0)[...,np.newaxis]
cosines = np.cos(angles / 2.0)[...,np.newaxis]
return Quaternions(np.concatenate([cosines, axis * sines], axis=-1))
def from_euler(cls, es, order='xyz', world=False):
axis = {
'x' : np.array([1,0,0]),
'y' : np.array([0,1,0]),
'z' : np.array([0,0,1]),
}
q0s = Quaternions.from_angle_axis(es[...,0], axis[order[0]])
q1s = Quaternions.from_angle_axis(es[...,1], axis[order[1]])
q2s = Quaternions.from_angle_axis(es[...,2], axis[order[2]])
return (q2s * (q1s * q0s)) if world else (q0s * (q1s * q2s))
def from_transforms(cls, ts):
d0, d1, d2 = ts[...,0,0], ts[...,1,1], ts[...,2,2]
q0 = ( d0 + d1 + d2 + 1.0) / 4.0
q1 = ( d0 - d1 - d2 + 1.0) / 4.0
q2 = (-d0 + d1 - d2 + 1.0) / 4.0
q3 = (-d0 - d1 + d2 + 1.0) / 4.0
q0 = np.sqrt(q0.clip(0,None))
q1 = np.sqrt(q1.clip(0,None))
q2 = np.sqrt(q2.clip(0,None))
q3 = np.sqrt(q3.clip(0,None))
c0 = (q0 >= q1) & (q0 >= q2) & (q0 >= q3)
c1 = (q1 >= q0) & (q1 >= q2) & (q1 >= q3)
c2 = (q2 >= q0) & (q2 >= q1) & (q2 >= q3)
c3 = (q3 >= q0) & (q3 >= q1) & (q3 >= q2)
q1[c0] *= np.sign(ts[c0,2,1] - ts[c0,1,2])
q2[c0] *= np.sign(ts[c0,0,2] - ts[c0,2,0])
q3[c0] *= np.sign(ts[c0,1,0] - ts[c0,0,1])
q0[c1] *= np.sign(ts[c1,2,1] - ts[c1,1,2])
q2[c1] *= np.sign(ts[c1,1,0] + ts[c1,0,1])
q3[c1] *= np.sign(ts[c1,0,2] + ts[c1,2,0])
q0[c2] *= np.sign(ts[c2,0,2] - ts[c2,2,0])
q1[c2] *= np.sign(ts[c2,1,0] + ts[c2,0,1])
q3[c2] *= np.sign(ts[c2,2,1] + ts[c2,1,2])
q0[c3] *= np.sign(ts[c3,1,0] - ts[c3,0,1])
q1[c3] *= np.sign(ts[c3,2,0] + ts[c3,0,2])
q2[c3] *= np.sign(ts[c3,2,1] + ts[c3,1,2])
qs = np.empty(ts.shape[:-2] + (4,))
qs[...,0] = q0
qs[...,1] = q1
qs[...,2] = q2
qs[...,3] = q3
return cls(qs)
def orients_global(anim):
joints = np.arange(anim.shape[1])
parents = np.arange(anim.shape[1])
locals = anim.orients
globals = Quaternions.id(anim.shape[1])
globals[:,0] = locals[:,0]
for i in range(1, anim.shape[1]):
globals[:,i] = globals[:,anim.parents[i]] * locals[:,i]
return globals | null |
16,113 | import operator
import numpy as np
import numpy.core.umath_tests as ut
import AnimationStructure
from Quaternions_old import Quaternions
def offset_lengths(anim):
return np.sum(anim.offsets[1:]**2.0, axis=1)**0.5 | null |
16,114 | import operator
import numpy as np
import numpy.core.umath_tests as ut
import AnimationStructure
from Quaternions_old import Quaternions
def position_lengths(anim):
return np.sum(anim.positions[:,1:]**2.0, axis=2)**0.5 | null |
16,115 | import operator
import numpy as np
import numpy.core.umath_tests as ut
import AnimationStructure
from Quaternions_old import Quaternions
def transforms_multiply(t0s, t1s):
"""
Transforms Multiply
Multiplies two arrays of animation transforms
Parameters
----------
t0s, t1s : (F, J, 4, 4) ndarray
Two arrays of transforms
for each frame F and each
joint J
Returns
-------
transforms : (F, J, 4, 4) ndarray
Array of transforms for each
frame F and joint J multiplied
together
"""
return ut.matrix_multiply(t0s, t1s)
def transforms_inv(ts):
fts = ts.reshape(-1, 4, 4)
fts = np.array(list(map(lambda x: np.linalg.inv(x), fts)))
return fts.reshape(ts.shape)
def transforms_global(anim):
"""
Global Animation Transforms
This relies on joint ordering
being incremental. That means a joint
J1 must not be a ancestor of J0 if
J0 appears before J1 in the joint
ordering.
Parameters
----------
anim : Animation
Input animation
Returns
------
transforms : (F, J, 4, 4) ndarray
Array of global transforms for
each frame F and joint J
"""
joints = np.arange(anim.shape[1])
parents = np.arange(anim.shape[1])
locals = transforms_local(anim)
globals = transforms_blank(anim)
globals[:,0] = locals[:,0]
for i in range(1, anim.shape[1]):
globals[:,i] = transforms_multiply(globals[:,anim.parents[i]], locals[:,i])
return globals
def skin(anim, rest, weights, mesh, maxjoints=4):
full_transforms = transforms_multiply(
transforms_global(anim),
transforms_inv(transforms_global(rest[0:1])))
weightids = np.argsort(-weights, axis=1)[:,:maxjoints]
weightvls = np.array(list(map(lambda w, i: w[i], weights, weightids)))
weightvls = weightvls / weightvls.sum(axis=1)[...,np.newaxis]
verts = np.hstack([mesh, np.ones((len(mesh), 1))])
verts = verts[np.newaxis,:,np.newaxis,:,np.newaxis]
verts = transforms_multiply(full_transforms[:,weightids], verts)
verts = (verts[:,:,:,:3] / verts[:,:,:,3:4])[:,:,:,:,0]
return np.sum(weightvls[np.newaxis,:,:,np.newaxis] * verts, axis=2) | null |
16,116 | from typing import Dict, List, Type
from helm.benchmark.model_metadata_registry import ALL_MODELS_METADATA, TEXT_MODEL_TAG, CODE_MODEL_TAG, ModelMetadata
from helm.benchmark.run_expander import RUN_EXPANDERS, RunExpander
class ModelMetadata:
def creator_organization(self) -> str:
def engine(self) -> str:
ALL_MODELS_METADATA: List[ModelMetadata] = []
class RunExpander(ABC):
def expand(self, run_spec: RunSpec) -> List[RunSpec]:
RUN_EXPANDERS = dict((expander.name, expander) for expander in RUN_EXPANDER_SUBCLASSES)
def define_env(env):
@env.macro
def models_by_organization_with_tag(tag: str) -> Dict[str, List[ModelMetadata]]:
result: Dict[str, List[ModelMetadata]] = {}
for model_metadata in ALL_MODELS_METADATA:
if tag not in model_metadata.tags:
continue
if model_metadata.creator_organization == "simple":
continue
creator_organization_name = model_metadata.creator_organization_name
if creator_organization_name not in result:
result[creator_organization_name] = []
result[creator_organization_name].append(model_metadata)
return result
@env.macro
def run_expanders() -> Dict[str, Type[RunExpander]]:
return RUN_EXPANDERS | null |
16,117 | from helm.benchmark.config_registry import register_builtin_configs_from_helm_package
def register_builtin_configs_from_helm_package() -> None:
package_path = str(resources.files(CONFIG_PACKAGE))
register_configs_from_directory(package_path)
def on_startup(command: str, dirty: bool):
register_builtin_configs_from_helm_package() | null |
16,118 | import argparse
from typing import List, Dict
import re
import sys
from helm.common.hierarchical_logger import hlog
from helm.common.authentication import Authentication
from .accounts import Usage, Account
from .services.remote_service import RemoteService, add_service_args, create_authentication
def render_header(show_model_groups: List[str]) -> List[str]:
def render_account(account: Account) -> Dict[str, str]:
def print_table(header: List[str], items: List[Dict[str, str]]):
class Authentication:
class RemoteService(Service):
def __init__(self, base_url):
def _check_response(response: Any, request: Optional[str] = None):
def get_general_info(self) -> GeneralInfo:
def get_window_service_info(self, model_name) -> WindowServiceInfo:
def expand_query(self, query: Query) -> QueryResult:
def make_request(self, auth: Authentication, request: Request) -> RequestResult:
def tokenize(self, auth: Authentication, request: TokenizationRequest) -> TokenizationRequestResult:
def decode(self, auth: Authentication, request: DecodeRequest) -> DecodeRequestResult:
def upload(self, auth: Authentication, request: FileUploadRequest) -> FileUploadResult:
def check_nudity(self, auth: Authentication, request: NudityCheckRequest) -> NudityCheckResult:
def compute_clip_score(self, auth: Authentication, request: CLIPScoreRequest) -> CLIPScoreResult:
def get_toxicity_scores(self, auth: Authentication, request: PerspectiveAPIRequest) -> PerspectiveAPIRequestResult:
def get_moderation_results(self, auth: Authentication, request: ModerationAPIRequest) -> ModerationAPIRequestResult:
def make_critique_request(self, auth: Authentication, request: CritiqueRequest) -> CritiqueRequestResult:
def create_account(self, auth: Authentication) -> Account:
def delete_account(self, auth: Authentication, api_key: str) -> Account:
def get_accounts(self, auth: Authentication) -> List[Account]:
def get_account(self, auth: Authentication) -> Account:
def update_account(self, auth: Authentication, account: Account) -> Account:
def rotate_api_key(self, auth: Authentication, account: Account) -> Account:
def shutdown(self, auth: Authentication):
def get_cache_config(self, shard_name: str) -> CacheConfig:
def do_list_command(service: RemoteService, auth: Authentication, args):
header = render_header(args.show_model_groups)
items = []
for account in service.get_accounts(auth):
# Filter by group
if args.group is not None and args.group not in account.groups:
continue
items.append(render_account(account))
print_table(header, items) | null |
16,119 | import argparse
from typing import List, Dict
import re
import sys
from helm.common.hierarchical_logger import hlog
from helm.common.authentication import Authentication
from .accounts import Usage, Account
from .services.remote_service import RemoteService, add_service_args, create_authentication
UNLIMITED_QUOTA = "unlimited"
def render_header(show_model_groups: List[str]) -> List[str]:
def render_account(account: Account) -> Dict[str, str]:
def print_item(header: List[str], item: Dict[str, str]):
def hlog(x: Any) -> None:
class Authentication:
class Usage:
def update_period(self, period: str):
def can_use(self):
class RemoteService(Service):
def __init__(self, base_url):
def _check_response(response: Any, request: Optional[str] = None):
def get_general_info(self) -> GeneralInfo:
def get_window_service_info(self, model_name) -> WindowServiceInfo:
def expand_query(self, query: Query) -> QueryResult:
def make_request(self, auth: Authentication, request: Request) -> RequestResult:
def tokenize(self, auth: Authentication, request: TokenizationRequest) -> TokenizationRequestResult:
def decode(self, auth: Authentication, request: DecodeRequest) -> DecodeRequestResult:
def upload(self, auth: Authentication, request: FileUploadRequest) -> FileUploadResult:
def check_nudity(self, auth: Authentication, request: NudityCheckRequest) -> NudityCheckResult:
def compute_clip_score(self, auth: Authentication, request: CLIPScoreRequest) -> CLIPScoreResult:
def get_toxicity_scores(self, auth: Authentication, request: PerspectiveAPIRequest) -> PerspectiveAPIRequestResult:
def get_moderation_results(self, auth: Authentication, request: ModerationAPIRequest) -> ModerationAPIRequestResult:
def make_critique_request(self, auth: Authentication, request: CritiqueRequest) -> CritiqueRequestResult:
def create_account(self, auth: Authentication) -> Account:
def delete_account(self, auth: Authentication, api_key: str) -> Account:
def get_accounts(self, auth: Authentication) -> List[Account]:
def get_account(self, auth: Authentication) -> Account:
def update_account(self, auth: Authentication, account: Account) -> Account:
def rotate_api_key(self, auth: Authentication, account: Account) -> Account:
def shutdown(self, auth: Authentication):
def get_cache_config(self, shard_name: str) -> CacheConfig:
def do_create_update_command(service: RemoteService, auth: Authentication, args):
if args.command == "create":
account = service.create_account(auth)
elif args.command == "update":
# TODO: add additional arguments to `get_accounts` to select a single account based on api key
# https://github.com/stanford-crfm/benchmarking/issues/693
accounts = [account for account in service.get_accounts(auth) if account.api_key == args.api_key]
if len(accounts) == 0:
hlog(f"No account found with API key {args.api_key}")
sys.exit(1)
else:
assert len(accounts) == 1
account = accounts[0]
else:
raise Exception(f"Invalid command: {args.command}")
# Update fields
if args.description is not None:
account.description = args.description
if args.emails is not None:
account.emails = args.emails
if args.groups is not None:
account.groups = args.groups
if args.is_admin is not None:
account.is_admin = bool(args.is_admin)
# Update quotas
for quota_str in args.quotas:
m = re.match(f"(\w+)\.(\w+)=(\d+|{UNLIMITED_QUOTA})", quota_str)
if not m:
raise Exception(
f"Invalid format: {quota_str}, expect <model_group>.<granularity>=<quota> "
f"(e.g., gpt3.daily=10000 or gpt3.daily={UNLIMITED_QUOTA})"
)
model_group, granularity, quota = m.group(1), m.group(2), m.group(3)
if model_group not in account.usages:
usages = account.usages[model_group] = {}
else:
usages = account.usages[model_group]
if granularity not in usages:
usage = usages[granularity] = Usage()
else:
usage = usages[granularity]
usage.quota = None if quota == UNLIMITED_QUOTA else int(quota)
# Commit changes
account = service.update_account(auth, account)
# Print out created/updated account information
header = render_header(show_model_groups=args.show_model_groups)
item = render_account(account)
print_item(header, item) | null |
16,120 | import argparse
from typing import List, Dict
import re
import sys
from helm.common.hierarchical_logger import hlog
from helm.common.authentication import Authentication
from .accounts import Usage, Account
from .services.remote_service import RemoteService, add_service_args, create_authentication
def render_header(show_model_groups: List[str]) -> List[str]:
"""Return list of column headers related to an account."""
header = ["api_key", "description", "emails", "groups", "is_admin"]
for model_group in show_model_groups:
for granularity in GRANULARITIES:
header.append(f"{model_group}.{granularity}")
return header
def render_account(account: Account) -> Dict[str, str]:
result = {
"api_key": account.api_key,
"description": account.description,
"emails": ",".join(account.emails),
"groups": ",".join(account.groups),
"is_admin": "admin" if account.is_admin else "-",
}
for model_group, usages in account.usages.items():
for granularity, usage in usages.items():
result[f"{model_group}.{granularity}"] = render_usage(usage)
return result
def print_item(header: List[str], item: Dict[str, str]):
# In the future, might want to print one line per item
print_table(header, [item])
def hlog(x: Any) -> None:
singleton.log(x)
class Authentication:
"""Needed to authenticate with the proxy server to make requests, etc.."""
api_key: str
class RemoteService(Service):
NOT_SUPPORTED_ERROR: str = "Not supported through the remote service."
def __init__(self, base_url):
self.base_url: str = base_url
def _check_response(response: Any, request: Optional[str] = None):
if type(response) is dict and "error" in response and response["error"]:
error_message: str = response["error"]
if request:
error_message += f" Request: {request}"
raise RemoteServiceError(error_message)
def get_general_info(self) -> GeneralInfo:
response = requests.get(f"{self.base_url}/api/general_info").json()
return from_dict(GeneralInfo, response)
def get_window_service_info(self, model_name) -> WindowServiceInfo:
params = {"model_name": model_name}
response = requests.get(f"{self.base_url}/api/window_service_info?{urllib.parse.urlencode(params)}").json()
return from_dict(WindowServiceInfo, response)
def expand_query(self, query: Query) -> QueryResult:
params = asdict(query)
response = requests.get(f"{self.base_url}/api/query?{urllib.parse.urlencode(params)}").json()
RemoteService._check_response(response)
return from_dict(QueryResult, response)
def make_request(self, auth: Authentication, request: Request) -> RequestResult:
request_json: str = json.dumps(asdict(request))
params = {
"auth": json.dumps(asdict(auth)),
"request": request_json,
}
response = requests.get(f"{self.base_url}/api/request?{urllib.parse.urlencode(params)}").json()
RemoteService._check_response(response, request_json)
return from_dict(RequestResult, response)
def tokenize(self, auth: Authentication, request: TokenizationRequest) -> TokenizationRequestResult:
request_json: str = json.dumps(asdict(request))
params = {
"auth": json.dumps(asdict(auth)),
"request": request_json,
}
response = requests.get(f"{self.base_url}/api/tokenize?{urllib.parse.urlencode(params)}").json()
RemoteService._check_response(response, request_json)
return from_dict(TokenizationRequestResult, response)
def decode(self, auth: Authentication, request: DecodeRequest) -> DecodeRequestResult:
request_json: str = json.dumps(asdict(request))
params = {
"auth": json.dumps(asdict(auth)),
"request": request_json,
}
response = requests.get(f"{self.base_url}/api/decode?{urllib.parse.urlencode(params)}").json()
RemoteService._check_response(response, request_json)
return from_dict(DecodeRequestResult, response)
def upload(self, auth: Authentication, request: FileUploadRequest) -> FileUploadResult:
raise NotImplementedError(self.NOT_SUPPORTED_ERROR)
def check_nudity(self, auth: Authentication, request: NudityCheckRequest) -> NudityCheckResult:
raise NotImplementedError(self.NOT_SUPPORTED_ERROR)
def compute_clip_score(self, auth: Authentication, request: CLIPScoreRequest) -> CLIPScoreResult:
raise NotImplementedError(self.NOT_SUPPORTED_ERROR)
def get_toxicity_scores(self, auth: Authentication, request: PerspectiveAPIRequest) -> PerspectiveAPIRequestResult:
request_json: str = json.dumps(asdict(request))
params = {
"auth": json.dumps(asdict(auth)),
"request": request_json,
}
response = requests.get(f"{self.base_url}/api/toxicity?{urllib.parse.urlencode(params)}").json()
RemoteService._check_response(response, request_json)
return from_dict(PerspectiveAPIRequestResult, response)
def get_moderation_results(self, auth: Authentication, request: ModerationAPIRequest) -> ModerationAPIRequestResult:
request_json: str = json.dumps(asdict(request))
params = {
"auth": json.dumps(asdict(auth)),
"request": request_json,
}
response = requests.get(f"{self.base_url}/api/moderation?{urllib.parse.urlencode(params)}").json()
RemoteService._check_response(response, request_json)
return from_dict(ModerationAPIRequestResult, response)
def make_critique_request(self, auth: Authentication, request: CritiqueRequest) -> CritiqueRequestResult:
raise NotImplementedError("make_critique_request is not supported by RemoteServer")
def create_account(self, auth: Authentication) -> Account:
data = {"auth": json.dumps(asdict(auth))}
response = requests.post(f"{self.base_url}/api/account", data=data).json()
RemoteService._check_response(response)
return from_dict(Account, response)
def delete_account(self, auth: Authentication, api_key: str) -> Account:
data = {
"auth": json.dumps(asdict(auth)),
"api_key": api_key,
}
response = requests.delete(f"{self.base_url}/api/account", data=data).json()
RemoteService._check_response(response)
return from_dict(Account, response)
def get_accounts(self, auth: Authentication) -> List[Account]:
params = {"auth": json.dumps(asdict(auth)), "all": "true"}
response = requests.get(f"{self.base_url}/api/account?{urllib.parse.urlencode(params)}").json()
RemoteService._check_response(response)
return [from_dict(Account, account_response) for account_response in response]
def get_account(self, auth: Authentication) -> Account:
params = {"auth": json.dumps(asdict(auth))}
response = requests.get(f"{self.base_url}/api/account?{urllib.parse.urlencode(params)}").json()
RemoteService._check_response(response)
return from_dict(Account, response[0])
def update_account(self, auth: Authentication, account: Account) -> Account:
data = {
"auth": json.dumps(asdict(auth)),
"account": json.dumps(asdict(account)),
}
response = requests.put(f"{self.base_url}/api/account", data=data).json()
RemoteService._check_response(response)
return from_dict(Account, response)
def rotate_api_key(self, auth: Authentication, account: Account) -> Account:
"""Generate a new API key for this account."""
data = {
"auth": json.dumps(asdict(auth)),
"account": json.dumps(asdict(account)),
}
response = requests.put(f"{self.base_url}/api/account/api_key", data=data).json()
RemoteService._check_response(response)
return from_dict(Account, response)
def shutdown(self, auth: Authentication):
"""Shutdown server (admin-only)."""
params = {"auth": json.dumps(asdict(auth))}
try:
response = requests.get(f"{self.base_url}/api/shutdown?{urllib.parse.urlencode(params)}").json()
RemoteService._check_response(response)
except requests.exceptions.ConnectionError:
# A ConnectionError is expected when shutting down the server.
pass
def get_cache_config(self, shard_name: str) -> CacheConfig:
"""Returns a CacheConfig"""
return BlackHoleCacheBackendConfig().get_cache_config(shard_name)
def do_delete_command(service: RemoteService, auth: Authentication, args):
account = service.delete_account(auth, args.api_key)
hlog("Deleted account:")
header = render_header(show_model_groups=args.show_model_groups)
item = render_account(account)
print_item(header, item) | null |
16,121 | import argparse
from typing import List, Dict
import re
import sys
from helm.common.hierarchical_logger import hlog
from helm.common.authentication import Authentication
from .accounts import Usage, Account
from .services.remote_service import RemoteService, add_service_args, create_authentication
DEFAULT_SERVER_URL = "https://crfm-models.stanford.edu"
class RemoteService(Service):
NOT_SUPPORTED_ERROR: str = "Not supported through the remote service."
def __init__(self, base_url):
self.base_url: str = base_url
def _check_response(response: Any, request: Optional[str] = None):
if type(response) is dict and "error" in response and response["error"]:
error_message: str = response["error"]
if request:
error_message += f" Request: {request}"
raise RemoteServiceError(error_message)
def get_general_info(self) -> GeneralInfo:
response = requests.get(f"{self.base_url}/api/general_info").json()
return from_dict(GeneralInfo, response)
def get_window_service_info(self, model_name) -> WindowServiceInfo:
params = {"model_name": model_name}
response = requests.get(f"{self.base_url}/api/window_service_info?{urllib.parse.urlencode(params)}").json()
return from_dict(WindowServiceInfo, response)
def expand_query(self, query: Query) -> QueryResult:
params = asdict(query)
response = requests.get(f"{self.base_url}/api/query?{urllib.parse.urlencode(params)}").json()
RemoteService._check_response(response)
return from_dict(QueryResult, response)
def make_request(self, auth: Authentication, request: Request) -> RequestResult:
request_json: str = json.dumps(asdict(request))
params = {
"auth": json.dumps(asdict(auth)),
"request": request_json,
}
response = requests.get(f"{self.base_url}/api/request?{urllib.parse.urlencode(params)}").json()
RemoteService._check_response(response, request_json)
return from_dict(RequestResult, response)
def tokenize(self, auth: Authentication, request: TokenizationRequest) -> TokenizationRequestResult:
request_json: str = json.dumps(asdict(request))
params = {
"auth": json.dumps(asdict(auth)),
"request": request_json,
}
response = requests.get(f"{self.base_url}/api/tokenize?{urllib.parse.urlencode(params)}").json()
RemoteService._check_response(response, request_json)
return from_dict(TokenizationRequestResult, response)
def decode(self, auth: Authentication, request: DecodeRequest) -> DecodeRequestResult:
request_json: str = json.dumps(asdict(request))
params = {
"auth": json.dumps(asdict(auth)),
"request": request_json,
}
response = requests.get(f"{self.base_url}/api/decode?{urllib.parse.urlencode(params)}").json()
RemoteService._check_response(response, request_json)
return from_dict(DecodeRequestResult, response)
def upload(self, auth: Authentication, request: FileUploadRequest) -> FileUploadResult:
raise NotImplementedError(self.NOT_SUPPORTED_ERROR)
def check_nudity(self, auth: Authentication, request: NudityCheckRequest) -> NudityCheckResult:
raise NotImplementedError(self.NOT_SUPPORTED_ERROR)
def compute_clip_score(self, auth: Authentication, request: CLIPScoreRequest) -> CLIPScoreResult:
raise NotImplementedError(self.NOT_SUPPORTED_ERROR)
def get_toxicity_scores(self, auth: Authentication, request: PerspectiveAPIRequest) -> PerspectiveAPIRequestResult:
request_json: str = json.dumps(asdict(request))
params = {
"auth": json.dumps(asdict(auth)),
"request": request_json,
}
response = requests.get(f"{self.base_url}/api/toxicity?{urllib.parse.urlencode(params)}").json()
RemoteService._check_response(response, request_json)
return from_dict(PerspectiveAPIRequestResult, response)
def get_moderation_results(self, auth: Authentication, request: ModerationAPIRequest) -> ModerationAPIRequestResult:
request_json: str = json.dumps(asdict(request))
params = {
"auth": json.dumps(asdict(auth)),
"request": request_json,
}
response = requests.get(f"{self.base_url}/api/moderation?{urllib.parse.urlencode(params)}").json()
RemoteService._check_response(response, request_json)
return from_dict(ModerationAPIRequestResult, response)
def make_critique_request(self, auth: Authentication, request: CritiqueRequest) -> CritiqueRequestResult:
raise NotImplementedError("make_critique_request is not supported by RemoteServer")
def create_account(self, auth: Authentication) -> Account:
data = {"auth": json.dumps(asdict(auth))}
response = requests.post(f"{self.base_url}/api/account", data=data).json()
RemoteService._check_response(response)
return from_dict(Account, response)
def delete_account(self, auth: Authentication, api_key: str) -> Account:
data = {
"auth": json.dumps(asdict(auth)),
"api_key": api_key,
}
response = requests.delete(f"{self.base_url}/api/account", data=data).json()
RemoteService._check_response(response)
return from_dict(Account, response)
def get_accounts(self, auth: Authentication) -> List[Account]:
params = {"auth": json.dumps(asdict(auth)), "all": "true"}
response = requests.get(f"{self.base_url}/api/account?{urllib.parse.urlencode(params)}").json()
RemoteService._check_response(response)
return [from_dict(Account, account_response) for account_response in response]
def get_account(self, auth: Authentication) -> Account:
params = {"auth": json.dumps(asdict(auth))}
response = requests.get(f"{self.base_url}/api/account?{urllib.parse.urlencode(params)}").json()
RemoteService._check_response(response)
return from_dict(Account, response[0])
def update_account(self, auth: Authentication, account: Account) -> Account:
data = {
"auth": json.dumps(asdict(auth)),
"account": json.dumps(asdict(account)),
}
response = requests.put(f"{self.base_url}/api/account", data=data).json()
RemoteService._check_response(response)
return from_dict(Account, response)
def rotate_api_key(self, auth: Authentication, account: Account) -> Account:
"""Generate a new API key for this account."""
data = {
"auth": json.dumps(asdict(auth)),
"account": json.dumps(asdict(account)),
}
response = requests.put(f"{self.base_url}/api/account/api_key", data=data).json()
RemoteService._check_response(response)
return from_dict(Account, response)
def shutdown(self, auth: Authentication):
"""Shutdown server (admin-only)."""
params = {"auth": json.dumps(asdict(auth))}
try:
response = requests.get(f"{self.base_url}/api/shutdown?{urllib.parse.urlencode(params)}").json()
RemoteService._check_response(response)
except requests.exceptions.ConnectionError:
# A ConnectionError is expected when shutting down the server.
pass
def get_cache_config(self, shard_name: str) -> CacheConfig:
"""Returns a CacheConfig"""
return BlackHoleCacheBackendConfig().get_cache_config(shard_name)
def create_remote_service(args) -> RemoteService:
return RemoteService(args.server_url or DEFAULT_SERVER_URL) | null |
16,122 | from typing import Callable, Union
from retrying import Retrying
from helm.common.request import RequestResult
from helm.common.tokenization_request import TokenizationRequestResult
from helm.common.hierarchical_logger import hlog
import traceback
import threading
print_lock: threading.Lock = threading.Lock()
class NonRetriableException(Exception):
pass
def hlog(x: Any) -> None:
singleton.log(x)
The provided code snippet includes necessary dependencies for implementing the `get_retry_decorator` function. Write a Python function `def get_retry_decorator( operation: str, max_attempts: int, wait_exponential_multiplier_seconds: int, retry_on_result: Callable ) -> Callable` to solve the following problem:
Create a decorator that will retry with exponential backoff.
Here is the function:
def get_retry_decorator(
operation: str, max_attempts: int, wait_exponential_multiplier_seconds: int, retry_on_result: Callable
) -> Callable:
"""
Create a decorator that will retry with exponential backoff.
"""
def wait(attempts: int, delay: float) -> float:
"""
Wait function to pass into `Retrying` that logs and returns the amount of time to sleep
depending on the number of attempts and delay (in milliseconds).
"""
del delay # unused
next_delay = 2**attempts * wait_exponential_multiplier_seconds * 1000
hlog(
f"{operation} failed. Retrying (attempt #{attempts + 1}) in {next_delay // 1000} seconds... "
"(See above for error details)"
)
return next_delay
def print_exception_and_traceback(exception: Exception) -> bool:
"""
This function always return True, as the exception should always be retried.
It is used to print the exception and traceback (HACK).
TODO: Should not retry on keyboard interrupt. (right now it is inconsistent)
"""
with print_lock:
hlog("")
hlog("".join(traceback.format_exception(type(exception), exception, exception.__traceback__)))
return not isinstance(exception, KeyboardInterrupt) and not isinstance(exception, NonRetriableException)
_retrying = Retrying(
retry_on_result=retry_on_result,
wait_func=wait,
stop_max_attempt_number=max_attempts,
# Used to calculate the wait between retries (2^r * wait_exponential_multiplier_seconds seconds)
wait_exponential_multiplier=wait_exponential_multiplier_seconds * 1000,
retry_on_exception=print_exception_and_traceback,
)
return lambda f: lambda *args, **kwargs: _retrying.call(f, *args, **kwargs) | Create a decorator that will retry with exponential backoff. |
16,123 | from typing import Callable, Union
from retrying import Retrying
from helm.common.request import RequestResult
from helm.common.tokenization_request import TokenizationRequestResult
from helm.common.hierarchical_logger import hlog
import traceback
import threading
class RequestResult:
"""What comes back due to a `Request`."""
success: bool
"""Whether the request was successful"""
embedding: List[float]
"""Fixed dimensional embedding corresponding to the entire prompt"""
completions: List[Sequence]
"""List of completion"""
cached: bool
"""Whether the request was actually cached"""
request_time: Optional[float] = None
"""How long did the request take?"""
request_datetime: Optional[int] = None
"""When was the request sent?
We keep track of when the request was made because the underlying model or inference procedure backing the API
might change over time. The integer represents the current time in seconds since the Epoch (January 1, 1970)."""
error: Optional[str] = None
"""If `success` is false, what was the error?"""
error_flags: Optional[ErrorFlags] = None
"""Describes how to treat errors in the request."""
batch_size: Optional[int] = None
"""Batch size (`TogetherClient` only)"""
batch_request_time: Optional[float] = None
"""How long it took to process the batch? (`TogetherClient` only)"""
def render_lines(self) -> List[str]:
output = [
f"success: {self.success}",
f"cached: {self.cached}",
]
if self.request_time:
output.append(f"request_time: {self.request_time}")
if self.request_datetime:
output.append(f"request_datetime: {self.request_datetime}")
if self.error:
output.append(f"error: {self.error}")
output.append("completions {")
for completion in self.completions:
output.extend(indent_lines(completion.render_lines()))
output.append("}")
return output
class TokenizationRequestResult:
"""Result after sending a `TokenizationRequest`."""
# Whether the request was successful
success: bool
# Whether the request was cached
cached: bool
# The input text transformed by the tokenizer for tokenization.
# e.g. The AI21 tokenizer replaces "½" with "1/2" before tokenization.
text: str
# The list of tokens
tokens: List[TokenizationToken]
# How long did the tokenization take?
request_time: Optional[float] = None
# If `success` is false, what was the error?
error: Optional[str] = None
def num_tokens(self) -> int:
return len(self.tokens)
def raw_tokens(self) -> List[Union[str, int]]:
return [token.value for token in self.tokens]
def hlog(x: Any) -> None:
singleton.log(x)
The provided code snippet includes necessary dependencies for implementing the `retry_if_request_failed` function. Write a Python function `def retry_if_request_failed(result: Union[RequestResult, TokenizationRequestResult]) -> bool` to solve the following problem:
Fails if `success` of `RequestResult` or `TokenizationRequestResult` is false.
Here is the function:
def retry_if_request_failed(result: Union[RequestResult, TokenizationRequestResult]) -> bool:
"""Fails if `success` of `RequestResult` or `TokenizationRequestResult` is false."""
if not result.success:
hlog(result.error)
retry_if_fail: bool = True
if isinstance(result, RequestResult):
retry_if_fail = (
result.error_flags is None or result.error_flags.is_retriable is None or result.error_flags.is_retriable
)
return not result.success and retry_if_fail | Fails if `success` of `RequestResult` or `TokenizationRequestResult` is false. |
16,124 | import textwrap
from .query import Query
def dedent(text: str) -> str:
# Remove leading newline
if text.startswith("\n"):
text = text[1:]
text = textwrap.dedent(text)
# Remove trailing new line
if text.endswith("\n"):
text = text[:-1]
return text | null |
16,125 | import copy
import datetime
import random
import string
from typing import Dict, Optional, Callable, List
from dacite import from_dict
from dataclasses import asdict, dataclass, field
from sqlitedict import SqliteDict
from helm.common.authentication import Authentication
from helm.common.general import hlog
DEFAULT_QUOTAS = {
# model group -> {granularity -> quota}
"gpt3": {"daily": 10000},
"gpt4": {"daily": 10000},
"codex": {"daily": 10000},
"jurassic": {"daily": 10000},
"gooseai": {"daily": 10000},
"cohere": {"daily": 10000},
"dall_e": {"daily": 5}, # In terms of the number of generated images
"together_vision": {"daily": 30},
"simple": {"daily": 10000},
}
class Usage:
"""Usage information (for a given account, model group, and granularity)."""
# What period it is (so we know when to reset `used`) - for example, for
# daily granularity, period might be 2021-12-30
period: Optional[str] = None
# How many tokens was used
used: int = 0
# How much quota do we have (None means unlimited)
quota: Optional[int] = None
def update_period(self, period: str):
if self.period != period:
self.period = period
self.used = 0 # Reset in a new period
def can_use(self):
return self.quota is None or self.used < self.quota
class Account:
"""An `Account` provides access to the API."""
# Unique API key that is used both for authentication and for identification.
# Like credit card numbers, this is a bit of a shortcut since we're trying
# to avoid building out a full-blown system. If an API key needs to be
# replaced, we can simply change it and keep the other data the same.
api_key: str
# What this account is used for (can include the user names)
description: str = ""
# Emails associated this account
emails: List[str] = field(default_factory=list)
# What groups this account is associated with
groups: List[str] = field(default_factory=list)
# Whether this account has admin access (e.g., ability to modify accounts)
is_admin: bool = False
# Usage is tracked and limited at different granularities
# `usages`: model group -> granularity -> Usage
usages: Dict[str, Dict[str, Usage]] = field(default_factory=dict)
The provided code snippet includes necessary dependencies for implementing the `set_default_quotas` function. Write a Python function `def set_default_quotas(account: Account)` to solve the following problem:
Impose the `DEFAULT_QUOTAS` on the `account` if they don't exist, but don't override anything.
Here is the function:
def set_default_quotas(account: Account):
"""Impose the `DEFAULT_QUOTAS` on the `account` if they don't exist, but don't override anything."""
for model_group, default_quotas in DEFAULT_QUOTAS.items():
model_group_usages = account.usages.get(model_group)
if model_group_usages is None:
model_group_usages = account.usages[model_group] = {}
for granularity, quota in default_quotas.items():
usage = model_group_usages.get(granularity)
if usage is None:
usage = model_group_usages[granularity] = Usage()
usage.quota = quota | Impose the `DEFAULT_QUOTAS` on the `account` if they don't exist, but don't override anything. |
16,126 | import copy
import datetime
import random
import string
from typing import Dict, Optional, Callable, List
from dacite import from_dict
from dataclasses import asdict, dataclass, field
from sqlitedict import SqliteDict
from helm.common.authentication import Authentication
from helm.common.general import hlog
def compute_daily_period():
now = datetime.datetime.now()
return f"{now.year}-{now.month}-{now.day}" | null |
16,127 | import copy
import datetime
import random
import string
from typing import Dict, Optional, Callable, List
from dacite import from_dict
from dataclasses import asdict, dataclass, field
from sqlitedict import SqliteDict
from helm.common.authentication import Authentication
from helm.common.general import hlog
def compute_monthly_period():
now = datetime.datetime.now()
return f"{now.year}-{now.month}" | null |
16,128 | import copy
import datetime
import random
import string
from typing import Dict, Optional, Callable, List
from dacite import from_dict
from dataclasses import asdict, dataclass, field
from sqlitedict import SqliteDict
from helm.common.authentication import Authentication
from helm.common.general import hlog
def compute_total_period():
return "all" | null |
16,129 | from urllib.parse import unquote_plus
import argparse
import dataclasses
import json
import os
import sys
import time
from dacite import from_dict
import bottle
from helm.benchmark.config_registry import (
register_configs_from_directory,
register_builtin_configs_from_helm_package,
)
from helm.benchmark.model_deployment_registry import get_default_model_deployment_for_model
from helm.common.authentication import Authentication
from helm.common.cache_backend_config import CacheBackendConfig, MongoCacheBackendConfig, SqliteCacheBackendConfig
from helm.common.general import ensure_directory_exists
from helm.common.hierarchical_logger import hlog
from helm.common.optional_dependencies import handle_module_not_found_error
from helm.common.request import Request
from helm.common.perspective_api_request import PerspectiveAPIRequest
from helm.common.moderations_api_request import ModerationAPIRequest
from helm.common.tokenization_request import TokenizationRequest, DecodeRequest
from helm.proxy.services.service import CACHE_DIR
from .accounts import Account
from .services.server_service import ServerService
from .query import Query
bottle.BaseRequest.MEMFILE_MAX = 1024 * 1024
def handle_root():
return bottle.redirect("/static/index.html") | null |
16,130 | from urllib.parse import unquote_plus
import argparse
import dataclasses
import json
import os
import sys
import time
from dacite import from_dict
import bottle
from helm.benchmark.config_registry import (
register_configs_from_directory,
register_builtin_configs_from_helm_package,
)
from helm.benchmark.model_deployment_registry import get_default_model_deployment_for_model
from helm.common.authentication import Authentication
from helm.common.cache_backend_config import CacheBackendConfig, MongoCacheBackendConfig, SqliteCacheBackendConfig
from helm.common.general import ensure_directory_exists
from helm.common.hierarchical_logger import hlog
from helm.common.optional_dependencies import handle_module_not_found_error
from helm.common.request import Request
from helm.common.perspective_api_request import PerspectiveAPIRequest
from helm.common.moderations_api_request import ModerationAPIRequest
from helm.common.tokenization_request import TokenizationRequest, DecodeRequest
from helm.proxy.services.service import CACHE_DIR
from .accounts import Account
from .services.server_service import ServerService
from .query import Query
bottle.BaseRequest.MEMFILE_MAX = 1024 * 1024
def handle_static_filename(filename):
resp = bottle.static_file(filename, root=os.path.join(os.path.dirname(__file__), "static"))
resp.add_header("Cache-Control", "no-store, must-revalidate ")
return resp | null |
16,131 | from urllib.parse import unquote_plus
import argparse
import dataclasses
import json
import os
import sys
import time
from dacite import from_dict
import bottle
from helm.benchmark.config_registry import (
register_configs_from_directory,
register_builtin_configs_from_helm_package,
)
from helm.benchmark.model_deployment_registry import get_default_model_deployment_for_model
from helm.common.authentication import Authentication
from helm.common.cache_backend_config import CacheBackendConfig, MongoCacheBackendConfig, SqliteCacheBackendConfig
from helm.common.general import ensure_directory_exists
from helm.common.hierarchical_logger import hlog
from helm.common.optional_dependencies import handle_module_not_found_error
from helm.common.request import Request
from helm.common.perspective_api_request import PerspectiveAPIRequest
from helm.common.moderations_api_request import ModerationAPIRequest
from helm.common.tokenization_request import TokenizationRequest, DecodeRequest
from helm.proxy.services.service import CACHE_DIR
from .accounts import Account
from .services.server_service import ServerService
from .query import Query
bottle.BaseRequest.MEMFILE_MAX = 1024 * 1024
app = bottle.default_app()
def handle_output_filename(filename):
resp = bottle.static_file(filename, root=app.config["crfm.proxy.outputpath"])
return resp | null |
16,132 | from urllib.parse import unquote_plus
import argparse
import dataclasses
import json
import os
import sys
import time
from dacite import from_dict
import bottle
from helm.benchmark.config_registry import (
register_configs_from_directory,
register_builtin_configs_from_helm_package,
)
from helm.benchmark.model_deployment_registry import get_default_model_deployment_for_model
from helm.common.authentication import Authentication
from helm.common.cache_backend_config import CacheBackendConfig, MongoCacheBackendConfig, SqliteCacheBackendConfig
from helm.common.general import ensure_directory_exists
from helm.common.hierarchical_logger import hlog
from helm.common.optional_dependencies import handle_module_not_found_error
from helm.common.request import Request
from helm.common.perspective_api_request import PerspectiveAPIRequest
from helm.common.moderations_api_request import ModerationAPIRequest
from helm.common.tokenization_request import TokenizationRequest, DecodeRequest
from helm.proxy.services.service import CACHE_DIR
from .accounts import Account
from .services.server_service import ServerService
from .query import Query
service: ServerService
def safe_call(func, to_json=True):
try:
if to_json:
bottle.response.content_type = "application/json"
if bottle.request.method in ["DELETE", "POST", "PUT"]:
if bottle.request.content_type == "application/json":
params = bottle.request.json
else:
params = bottle.request.forms
else:
# bottle.request.query doesn't decode unicode properly, so do it ourselves
params = {}
if bottle.request.query_string != "":
for item in bottle.request.query_string.split("&"):
key, value = item.split("=", 1)
# Replaces "+" with " " and then unquote
params[key] = unquote_plus(value)
start_time = time.time()
result = func(params)
end_time = time.time()
result = json.dumps(result) if to_json else result
hlog("REQUEST {}: {} seconds, {} bytes".format(bottle.request, end_time - start_time, len(result)))
return result
except Exception as e:
import traceback
if not isinstance(e, ValueError):
traceback.print_exc()
exc_type, exc_value, exc_traceback = sys.exc_info()
error_str = "EXCEPTION: " + str(e) + "\n" + "\n".join(traceback.format_tb(exc_traceback))
return json.dumps({"error": error_str}) if to_json else error_str
def handle_get_general_info():
def perform(args):
global service
return dataclasses.asdict(service.get_general_info())
return safe_call(perform) | null |
16,133 | from urllib.parse import unquote_plus
import argparse
import dataclasses
import json
import os
import sys
import time
from dacite import from_dict
import bottle
from helm.benchmark.config_registry import (
register_configs_from_directory,
register_builtin_configs_from_helm_package,
)
from helm.benchmark.model_deployment_registry import get_default_model_deployment_for_model
from helm.common.authentication import Authentication
from helm.common.cache_backend_config import CacheBackendConfig, MongoCacheBackendConfig, SqliteCacheBackendConfig
from helm.common.general import ensure_directory_exists
from helm.common.hierarchical_logger import hlog
from helm.common.optional_dependencies import handle_module_not_found_error
from helm.common.request import Request
from helm.common.perspective_api_request import PerspectiveAPIRequest
from helm.common.moderations_api_request import ModerationAPIRequest
from helm.common.tokenization_request import TokenizationRequest, DecodeRequest
from helm.proxy.services.service import CACHE_DIR
from .accounts import Account
from .services.server_service import ServerService
from .query import Query
service: ServerService
def safe_call(func, to_json=True):
def handle_get_window_service_info():
def perform(args):
global service
return dataclasses.asdict(service.get_window_service_info(args["model_name"]))
return safe_call(perform) | null |
16,134 | from urllib.parse import unquote_plus
import argparse
import dataclasses
import json
import os
import sys
import time
from dacite import from_dict
import bottle
from helm.benchmark.config_registry import (
register_configs_from_directory,
register_builtin_configs_from_helm_package,
)
from helm.benchmark.model_deployment_registry import get_default_model_deployment_for_model
from helm.common.authentication import Authentication
from helm.common.cache_backend_config import CacheBackendConfig, MongoCacheBackendConfig, SqliteCacheBackendConfig
from helm.common.general import ensure_directory_exists
from helm.common.hierarchical_logger import hlog
from helm.common.optional_dependencies import handle_module_not_found_error
from helm.common.request import Request
from helm.common.perspective_api_request import PerspectiveAPIRequest
from helm.common.moderations_api_request import ModerationAPIRequest
from helm.common.tokenization_request import TokenizationRequest, DecodeRequest
from helm.proxy.services.service import CACHE_DIR
from .accounts import Account
from .services.server_service import ServerService
from .query import Query
service: ServerService
def safe_call(func, to_json=True):
class Authentication:
def handle_create_account():
def perform(args):
global service
auth = Authentication(**json.loads(args["auth"]))
return dataclasses.asdict(service.create_account(auth))
return safe_call(perform) | null |
16,135 | from urllib.parse import unquote_plus
import argparse
import dataclasses
import json
import os
import sys
import time
from dacite import from_dict
import bottle
from helm.benchmark.config_registry import (
register_configs_from_directory,
register_builtin_configs_from_helm_package,
)
from helm.benchmark.model_deployment_registry import get_default_model_deployment_for_model
from helm.common.authentication import Authentication
from helm.common.cache_backend_config import CacheBackendConfig, MongoCacheBackendConfig, SqliteCacheBackendConfig
from helm.common.general import ensure_directory_exists
from helm.common.hierarchical_logger import hlog
from helm.common.optional_dependencies import handle_module_not_found_error
from helm.common.request import Request
from helm.common.perspective_api_request import PerspectiveAPIRequest
from helm.common.moderations_api_request import ModerationAPIRequest
from helm.common.tokenization_request import TokenizationRequest, DecodeRequest
from helm.proxy.services.service import CACHE_DIR
from .accounts import Account
from .services.server_service import ServerService
from .query import Query
service: ServerService
def safe_call(func, to_json=True):
class Authentication:
def handle_delete_account():
def perform(args):
global service
auth = Authentication(**json.loads(args["auth"]))
api_key = args["api_key"]
return dataclasses.asdict(service.delete_account(auth, api_key))
return safe_call(perform) | null |
16,136 | from urllib.parse import unquote_plus
import argparse
import dataclasses
import json
import os
import sys
import time
from dacite import from_dict
import bottle
from helm.benchmark.config_registry import (
register_configs_from_directory,
register_builtin_configs_from_helm_package,
)
from helm.benchmark.model_deployment_registry import get_default_model_deployment_for_model
from helm.common.authentication import Authentication
from helm.common.cache_backend_config import CacheBackendConfig, MongoCacheBackendConfig, SqliteCacheBackendConfig
from helm.common.general import ensure_directory_exists
from helm.common.hierarchical_logger import hlog
from helm.common.optional_dependencies import handle_module_not_found_error
from helm.common.request import Request
from helm.common.perspective_api_request import PerspectiveAPIRequest
from helm.common.moderations_api_request import ModerationAPIRequest
from helm.common.tokenization_request import TokenizationRequest, DecodeRequest
from helm.proxy.services.service import CACHE_DIR
from .accounts import Account
from .services.server_service import ServerService
from .query import Query
service: ServerService
def safe_call(func, to_json=True):
try:
if to_json:
bottle.response.content_type = "application/json"
if bottle.request.method in ["DELETE", "POST", "PUT"]:
if bottle.request.content_type == "application/json":
params = bottle.request.json
else:
params = bottle.request.forms
else:
# bottle.request.query doesn't decode unicode properly, so do it ourselves
params = {}
if bottle.request.query_string != "":
for item in bottle.request.query_string.split("&"):
key, value = item.split("=", 1)
# Replaces "+" with " " and then unquote
params[key] = unquote_plus(value)
start_time = time.time()
result = func(params)
end_time = time.time()
result = json.dumps(result) if to_json else result
hlog("REQUEST {}: {} seconds, {} bytes".format(bottle.request, end_time - start_time, len(result)))
return result
except Exception as e:
import traceback
if not isinstance(e, ValueError):
traceback.print_exc()
exc_type, exc_value, exc_traceback = sys.exc_info()
error_str = "EXCEPTION: " + str(e) + "\n" + "\n".join(traceback.format_tb(exc_traceback))
return json.dumps({"error": error_str}) if to_json else error_str
class Authentication:
"""Needed to authenticate with the proxy server to make requests, etc.."""
api_key: str
def handle_get_account():
def perform(args):
global service
auth = Authentication(**json.loads(args["auth"]))
if "all" in args and args["all"].lower() == "true":
return [dataclasses.asdict(account) for account in service.get_accounts(auth)]
else:
return [dataclasses.asdict(service.get_account(auth))]
return safe_call(perform) | null |
16,137 | from urllib.parse import unquote_plus
import argparse
import dataclasses
import json
import os
import sys
import time
from dacite import from_dict
import bottle
from helm.benchmark.config_registry import (
register_configs_from_directory,
register_builtin_configs_from_helm_package,
)
from helm.benchmark.model_deployment_registry import get_default_model_deployment_for_model
from helm.common.authentication import Authentication
from helm.common.cache_backend_config import CacheBackendConfig, MongoCacheBackendConfig, SqliteCacheBackendConfig
from helm.common.general import ensure_directory_exists
from helm.common.hierarchical_logger import hlog
from helm.common.optional_dependencies import handle_module_not_found_error
from helm.common.request import Request
from helm.common.perspective_api_request import PerspectiveAPIRequest
from helm.common.moderations_api_request import ModerationAPIRequest
from helm.common.tokenization_request import TokenizationRequest, DecodeRequest
from helm.proxy.services.service import CACHE_DIR
from .accounts import Account
from .services.server_service import ServerService
from .query import Query
service: ServerService
def safe_call(func, to_json=True):
try:
if to_json:
bottle.response.content_type = "application/json"
if bottle.request.method in ["DELETE", "POST", "PUT"]:
if bottle.request.content_type == "application/json":
params = bottle.request.json
else:
params = bottle.request.forms
else:
# bottle.request.query doesn't decode unicode properly, so do it ourselves
params = {}
if bottle.request.query_string != "":
for item in bottle.request.query_string.split("&"):
key, value = item.split("=", 1)
# Replaces "+" with " " and then unquote
params[key] = unquote_plus(value)
start_time = time.time()
result = func(params)
end_time = time.time()
result = json.dumps(result) if to_json else result
hlog("REQUEST {}: {} seconds, {} bytes".format(bottle.request, end_time - start_time, len(result)))
return result
except Exception as e:
import traceback
if not isinstance(e, ValueError):
traceback.print_exc()
exc_type, exc_value, exc_traceback = sys.exc_info()
error_str = "EXCEPTION: " + str(e) + "\n" + "\n".join(traceback.format_tb(exc_traceback))
return json.dumps({"error": error_str}) if to_json else error_str
class Authentication:
"""Needed to authenticate with the proxy server to make requests, etc.."""
api_key: str
class Account:
"""An `Account` provides access to the API."""
# Unique API key that is used both for authentication and for identification.
# Like credit card numbers, this is a bit of a shortcut since we're trying
# to avoid building out a full-blown system. If an API key needs to be
# replaced, we can simply change it and keep the other data the same.
api_key: str
# What this account is used for (can include the user names)
description: str = ""
# Emails associated this account
emails: List[str] = field(default_factory=list)
# What groups this account is associated with
groups: List[str] = field(default_factory=list)
# Whether this account has admin access (e.g., ability to modify accounts)
is_admin: bool = False
# Usage is tracked and limited at different granularities
# `usages`: model group -> granularity -> Usage
usages: Dict[str, Dict[str, Usage]] = field(default_factory=dict)
def handle_update_account():
def perform(args):
global service
auth = Authentication(**json.loads(args["auth"]))
account = from_dict(Account, json.loads(args["account"]))
return dataclasses.asdict(service.update_account(auth, account))
return safe_call(perform) | null |
16,138 | from urllib.parse import unquote_plus
import argparse
import dataclasses
import json
import os
import sys
import time
from dacite import from_dict
import bottle
from helm.benchmark.config_registry import (
register_configs_from_directory,
register_builtin_configs_from_helm_package,
)
from helm.benchmark.model_deployment_registry import get_default_model_deployment_for_model
from helm.common.authentication import Authentication
from helm.common.cache_backend_config import CacheBackendConfig, MongoCacheBackendConfig, SqliteCacheBackendConfig
from helm.common.general import ensure_directory_exists
from helm.common.hierarchical_logger import hlog
from helm.common.optional_dependencies import handle_module_not_found_error
from helm.common.request import Request
from helm.common.perspective_api_request import PerspectiveAPIRequest
from helm.common.moderations_api_request import ModerationAPIRequest
from helm.common.tokenization_request import TokenizationRequest, DecodeRequest
from helm.proxy.services.service import CACHE_DIR
from .accounts import Account
from .services.server_service import ServerService
from .query import Query
service: ServerService
def safe_call(func, to_json=True):
class Authentication:
class Account:
def handle_update_api_key():
def perform(args):
global service
auth = Authentication(**json.loads(args["auth"]))
account = from_dict(Account, json.loads(args["account"]))
return dataclasses.asdict(service.rotate_api_key(auth, account))
return safe_call(perform) | null |
16,139 | from urllib.parse import unquote_plus
import argparse
import dataclasses
import json
import os
import sys
import time
from dacite import from_dict
import bottle
from helm.benchmark.config_registry import (
register_configs_from_directory,
register_builtin_configs_from_helm_package,
)
from helm.benchmark.model_deployment_registry import get_default_model_deployment_for_model
from helm.common.authentication import Authentication
from helm.common.cache_backend_config import CacheBackendConfig, MongoCacheBackendConfig, SqliteCacheBackendConfig
from helm.common.general import ensure_directory_exists
from helm.common.hierarchical_logger import hlog
from helm.common.optional_dependencies import handle_module_not_found_error
from helm.common.request import Request
from helm.common.perspective_api_request import PerspectiveAPIRequest
from helm.common.moderations_api_request import ModerationAPIRequest
from helm.common.tokenization_request import TokenizationRequest, DecodeRequest
from helm.proxy.services.service import CACHE_DIR
from .accounts import Account
from .services.server_service import ServerService
from .query import Query
service: ServerService
def safe_call(func, to_json=True):
class Query:
def handle_query():
def perform(args):
global service
query = Query(**args)
return dataclasses.asdict(service.expand_query(query))
return safe_call(perform) | null |
16,140 | from urllib.parse import unquote_plus
import argparse
import dataclasses
import json
import os
import sys
import time
from dacite import from_dict
import bottle
from helm.benchmark.config_registry import (
register_configs_from_directory,
register_builtin_configs_from_helm_package,
)
from helm.benchmark.model_deployment_registry import get_default_model_deployment_for_model
from helm.common.authentication import Authentication
from helm.common.cache_backend_config import CacheBackendConfig, MongoCacheBackendConfig, SqliteCacheBackendConfig
from helm.common.general import ensure_directory_exists
from helm.common.hierarchical_logger import hlog
from helm.common.optional_dependencies import handle_module_not_found_error
from helm.common.request import Request
from helm.common.perspective_api_request import PerspectiveAPIRequest
from helm.common.moderations_api_request import ModerationAPIRequest
from helm.common.tokenization_request import TokenizationRequest, DecodeRequest
from helm.proxy.services.service import CACHE_DIR
from .accounts import Account
from .services.server_service import ServerService
from .query import Query
service: ServerService
def safe_call(func, to_json=True):
try:
if to_json:
bottle.response.content_type = "application/json"
if bottle.request.method in ["DELETE", "POST", "PUT"]:
if bottle.request.content_type == "application/json":
params = bottle.request.json
else:
params = bottle.request.forms
else:
# bottle.request.query doesn't decode unicode properly, so do it ourselves
params = {}
if bottle.request.query_string != "":
for item in bottle.request.query_string.split("&"):
key, value = item.split("=", 1)
# Replaces "+" with " " and then unquote
params[key] = unquote_plus(value)
start_time = time.time()
result = func(params)
end_time = time.time()
result = json.dumps(result) if to_json else result
hlog("REQUEST {}: {} seconds, {} bytes".format(bottle.request, end_time - start_time, len(result)))
return result
except Exception as e:
import traceback
if not isinstance(e, ValueError):
traceback.print_exc()
exc_type, exc_value, exc_traceback = sys.exc_info()
error_str = "EXCEPTION: " + str(e) + "\n" + "\n".join(traceback.format_tb(exc_traceback))
return json.dumps({"error": error_str}) if to_json else error_str
def get_default_model_deployment_for_model(
model_name: str, warn_arg_deprecated: bool = False, ignore_deprecated: bool = False
) -> Optional[str]:
"""Returns a valid model deployment name corresponding to the given model arg.
This is used as a backwards compatibility layer for model names that are now moved to model deployments.
Example: "anthropic/claude-v1.3" => "anthropic/claude-v1.3"
Example: "meta/llama-7b" => "together/llama-7b"
The process to find a model deployment name is as follows:
1. If there is a model deployment with the same name as the model arg, use it.
2. If there is at least one deployment for the model, use the first one that is available.
3. If there are no deployments for the model, returns None.
This function will also try to find a model deployment name that is not deprecated.
If there are no non-deprecated deployments, it will return the first deployment (even if it's deprecated).
If ignore_deprecated is True, this function will return None if the model deployment is deprecated.
If warn_arg_deprecated is True, this function will print a warning if the model deployment name is not the same
as the model arg. This is to remind the user that the model name is deprecated and should be replaced with
the model deployment name (in their config).
Args:
model_arg: The model arg to convert to a model deployment name.
warn_arg_deprecated: Whether to print a warning if the model deployment name is not the same as the model arg.
ignore_deprecated: Whether to return None if the model deployment is deprecated.
"""
# If there is a model deployment with the same name as the model arg, use it.
if model_name in DEPLOYMENT_NAME_TO_MODEL_DEPLOYMENT:
deployment: ModelDeployment = DEPLOYMENT_NAME_TO_MODEL_DEPLOYMENT[model_name]
if deployment.deprecated and ignore_deprecated:
if warn_arg_deprecated:
hlog(f"WARNING: Model deployment {model_name} is deprecated")
return None
return deployment.name
# If there is at least one deployment for the model, use the first one that is available.
available_deployments: List[ModelDeployment] = [
deployment for deployment in ALL_MODEL_DEPLOYMENTS if deployment.model_name == model_name
]
if len(available_deployments) > 0:
available_deployment_names: List[str] = [deployment.name for deployment in available_deployments]
if warn_arg_deprecated:
hlog("WARNING: Model name is deprecated. Please use the model deployment name instead.")
hlog(f"Available model deployments for model {model_name}: {available_deployment_names}")
# Additionally, if there is a non-deprecated deployment, use it.
non_deprecated_deployments: List[ModelDeployment] = [
deployment for deployment in available_deployments if not deployment.deprecated
]
if len(non_deprecated_deployments) > 0:
chosen_deployment = non_deprecated_deployments[0]
# There are no non-deprecated deployments, so there are two options:
# 1. If we can return an empty string, return it. (no model deployment is available)
# 2. If we can't return an empty string, return the first deployment (even if it's deprecated).
elif ignore_deprecated:
return None
else:
chosen_deployment = available_deployments[0]
if warn_arg_deprecated:
hlog(f"WARNING: All model deployments for model {model_name} are deprecated.")
if warn_arg_deprecated:
hlog(
f"Choosing {chosen_deployment.name} (the first one) as "
f"the default model deployment for model {model_name}"
)
hlog("If you want to use a different model deployment, please specify it explicitly.")
return chosen_deployment.name
# Some models are added but have no deployments yet.
# In this case, we return None.
return None
class Authentication:
"""Needed to authenticate with the proxy server to make requests, etc.."""
api_key: str
class Request:
"""
A `Request` specifies how to query a language model (given a prompt,
complete it). It is the unified representation for communicating with
various APIs (e.g., GPT-3, Jurassic).
"""
model_deployment: str = ""
"""Which model deployment to query -> Determines the Client.
Refers to a deployment in the model deployment registry."""
model: str = ""
"""Which model to use -> Determines the Engine.
Refers to a model metadata in the model registry."""
embedding: bool = False
"""Whether to query embedding instead of text response"""
prompt: str = ""
"""What prompt do condition the language model on"""
temperature: float = 1.0
"""Temperature parameter that governs diversity"""
num_completions: int = 1
"""Generate this many completions (by sampling from the model)"""
top_k_per_token: int = 1
"""Take this many highest probability candidates per token in the completion"""
max_tokens: int = 100
"""Maximum number of tokens to generate (per completion)"""
stop_sequences: List[str] = field(default_factory=list)
"""Stop generating once we hit one of these strings."""
echo_prompt: bool = False
"""Should `prompt` be included as a prefix of each completion? (e.g., for
evaluating perplexity of the prompt)"""
top_p: float = 1
"""Same from tokens that occupy this probability mass (nucleus sampling)"""
presence_penalty: float = 0
"""Penalize repetition (OpenAI & Writer only)"""
frequency_penalty: float = 0
"""Penalize repetition (OpenAI & Writer only)"""
random: Optional[str] = None
"""Used to control randomness. Expect different responses for the same
request but with different values for `random`."""
messages: Optional[List[Dict[str, str]]] = None
"""Used for chat models. (OpenAI only for now).
if messages is specified for a chat model, the prompt is ignored.
Otherwise, the client should convert the prompt into a message."""
multimodal_prompt: Optional[MultimediaObject] = None
"""Multimodal prompt with media objects interleaved (e.g., text, video, image, text, ...)"""
image_generation_parameters: Optional[ImageGenerationParameters] = None
"""Parameters for image generation."""
def model_host(self) -> str:
"""Returns the model host (referring to the deployment).
Not to be confused with the model creator organization (referring to the model).
Example: 'openai/davinci' => 'openai'
'together/bloom' => 'together'"""
return self.model_deployment.split("/")[0]
def model_engine(self) -> str:
"""Returns the model engine (referring to the model).
This is often the same as self.model_deploymentl.split("/")[1], but not always.
For example, one model could be served on several servers (each with a different model_deployment)
In that case we would have for example:
'aws/bloom-1', 'aws/bloom-2', 'aws/bloom-3' => 'bloom'
This is why we need to keep track of the model engine with the model metadata.
Example: 'openai/davinci' => 'davinci'"""
return self.model.split("/")[1]
def handle_request():
def perform(args):
global service
auth = Authentication(**json.loads(args["auth"]))
request = Request(**json.loads(args["request"]))
# Hack to maintain reverse compatibility with clients with version <= 0.3.0.
# Clients with version <= 0.3.0 do not set model_deployment, but this is now
# required by Request.
if not request.model_deployment:
model_deployment = get_default_model_deployment_for_model(request.model)
if model_deployment is None:
raise ValueError(f"Unknown model '{request.model}'")
request = dataclasses.replace(request, model_deployment=model_deployment)
raw_response = dataclasses.asdict(service.make_request(auth, request))
# Hack to maintain reverse compatibility with clients with version <= 1.0.0.
# Clients with version <= 1.0.0 expect each token to contain a `top_logprobs`
# field of type dict.
for completion in raw_response["completions"]:
for token in completion["tokens"]:
token["top_logprobs"] = {}
return raw_response
return safe_call(perform) | null |
16,141 | from urllib.parse import unquote_plus
import argparse
import dataclasses
import json
import os
import sys
import time
from dacite import from_dict
import bottle
from helm.benchmark.config_registry import (
register_configs_from_directory,
register_builtin_configs_from_helm_package,
)
from helm.benchmark.model_deployment_registry import get_default_model_deployment_for_model
from helm.common.authentication import Authentication
from helm.common.cache_backend_config import CacheBackendConfig, MongoCacheBackendConfig, SqliteCacheBackendConfig
from helm.common.general import ensure_directory_exists
from helm.common.hierarchical_logger import hlog
from helm.common.optional_dependencies import handle_module_not_found_error
from helm.common.request import Request
from helm.common.perspective_api_request import PerspectiveAPIRequest
from helm.common.moderations_api_request import ModerationAPIRequest
from helm.common.tokenization_request import TokenizationRequest, DecodeRequest
from helm.proxy.services.service import CACHE_DIR
from .accounts import Account
from .services.server_service import ServerService
from .query import Query
service: ServerService
def safe_call(func, to_json=True):
try:
if to_json:
bottle.response.content_type = "application/json"
if bottle.request.method in ["DELETE", "POST", "PUT"]:
if bottle.request.content_type == "application/json":
params = bottle.request.json
else:
params = bottle.request.forms
else:
# bottle.request.query doesn't decode unicode properly, so do it ourselves
params = {}
if bottle.request.query_string != "":
for item in bottle.request.query_string.split("&"):
key, value = item.split("=", 1)
# Replaces "+" with " " and then unquote
params[key] = unquote_plus(value)
start_time = time.time()
result = func(params)
end_time = time.time()
result = json.dumps(result) if to_json else result
hlog("REQUEST {}: {} seconds, {} bytes".format(bottle.request, end_time - start_time, len(result)))
return result
except Exception as e:
import traceback
if not isinstance(e, ValueError):
traceback.print_exc()
exc_type, exc_value, exc_traceback = sys.exc_info()
error_str = "EXCEPTION: " + str(e) + "\n" + "\n".join(traceback.format_tb(exc_traceback))
return json.dumps({"error": error_str}) if to_json else error_str
class Authentication:
"""Needed to authenticate with the proxy server to make requests, etc.."""
api_key: str
class TokenizationRequest:
"""A `TokenizationRequest` specifies how to tokenize some text."""
# Text to tokenize
text: str
# Which tokenizer we should use
tokenizer: str
# Whether to encode tokens
#
# If true, the response's TokenizationToken should contain integers.
# Otherwise, the response's TokenizationToken should contain strings.
encode: bool = False
# Whether to truncate
truncation: bool = False
# Maximum length when encoding
max_length: int = 2048
def tokenizer_organization(self):
"""Example: 'huggingface/gpt2' => 'huggingface'"""
return self.tokenizer.split("/")[0]
def tokenizer_name(self):
"""Example: 'huggingface/gpt2' => 'gpt2'"""
return self.tokenizer.split("/")[1]
def handle_tokenization():
def perform(args):
global service
auth = Authentication(**json.loads(args["auth"]))
request = TokenizationRequest(**json.loads(args["request"]))
return dataclasses.asdict(service.tokenize(auth, request))
return safe_call(perform) | null |
16,142 | from urllib.parse import unquote_plus
import argparse
import dataclasses
import json
import os
import sys
import time
from dacite import from_dict
import bottle
from helm.benchmark.config_registry import (
register_configs_from_directory,
register_builtin_configs_from_helm_package,
)
from helm.benchmark.model_deployment_registry import get_default_model_deployment_for_model
from helm.common.authentication import Authentication
from helm.common.cache_backend_config import CacheBackendConfig, MongoCacheBackendConfig, SqliteCacheBackendConfig
from helm.common.general import ensure_directory_exists
from helm.common.hierarchical_logger import hlog
from helm.common.optional_dependencies import handle_module_not_found_error
from helm.common.request import Request
from helm.common.perspective_api_request import PerspectiveAPIRequest
from helm.common.moderations_api_request import ModerationAPIRequest
from helm.common.tokenization_request import TokenizationRequest, DecodeRequest
from helm.proxy.services.service import CACHE_DIR
from .accounts import Account
from .services.server_service import ServerService
from .query import Query
service: ServerService
def safe_call(func, to_json=True):
try:
if to_json:
bottle.response.content_type = "application/json"
if bottle.request.method in ["DELETE", "POST", "PUT"]:
if bottle.request.content_type == "application/json":
params = bottle.request.json
else:
params = bottle.request.forms
else:
# bottle.request.query doesn't decode unicode properly, so do it ourselves
params = {}
if bottle.request.query_string != "":
for item in bottle.request.query_string.split("&"):
key, value = item.split("=", 1)
# Replaces "+" with " " and then unquote
params[key] = unquote_plus(value)
start_time = time.time()
result = func(params)
end_time = time.time()
result = json.dumps(result) if to_json else result
hlog("REQUEST {}: {} seconds, {} bytes".format(bottle.request, end_time - start_time, len(result)))
return result
except Exception as e:
import traceback
if not isinstance(e, ValueError):
traceback.print_exc()
exc_type, exc_value, exc_traceback = sys.exc_info()
error_str = "EXCEPTION: " + str(e) + "\n" + "\n".join(traceback.format_tb(exc_traceback))
return json.dumps({"error": error_str}) if to_json else error_str
class Authentication:
"""Needed to authenticate with the proxy server to make requests, etc.."""
api_key: str
class DecodeRequest:
"""For HuggingFace tokenizers. How to decode tokens and convert it to text."""
# Tokens
tokens: List[int]
# Which tokenizer we should use
tokenizer: str
# Whether to clean up the tokenization spaces. Setting to False preserves the original text.
clean_up_tokenization_spaces: bool = False
def tokenizer_organization(self):
"""Example: 'huggingface/gpt2' => 'huggingface'"""
return self.tokenizer.split("/")[0]
def tokenizer_name(self):
"""Example: 'huggingface/gpt2' => 'gpt2'"""
return self.tokenizer.split("/")[1]
def handle_decode():
def perform(args):
global service
auth = Authentication(**json.loads(args["auth"]))
request = DecodeRequest(**json.loads(args["request"]))
return dataclasses.asdict(service.decode(auth, request))
return safe_call(perform) | null |
16,143 | from urllib.parse import unquote_plus
import argparse
import dataclasses
import json
import os
import sys
import time
from dacite import from_dict
import bottle
from helm.benchmark.config_registry import (
register_configs_from_directory,
register_builtin_configs_from_helm_package,
)
from helm.benchmark.model_deployment_registry import get_default_model_deployment_for_model
from helm.common.authentication import Authentication
from helm.common.cache_backend_config import CacheBackendConfig, MongoCacheBackendConfig, SqliteCacheBackendConfig
from helm.common.general import ensure_directory_exists
from helm.common.hierarchical_logger import hlog
from helm.common.optional_dependencies import handle_module_not_found_error
from helm.common.request import Request
from helm.common.perspective_api_request import PerspectiveAPIRequest
from helm.common.moderations_api_request import ModerationAPIRequest
from helm.common.tokenization_request import TokenizationRequest, DecodeRequest
from helm.proxy.services.service import CACHE_DIR
from .accounts import Account
from .services.server_service import ServerService
from .query import Query
service: ServerService
def safe_call(func, to_json=True):
try:
if to_json:
bottle.response.content_type = "application/json"
if bottle.request.method in ["DELETE", "POST", "PUT"]:
if bottle.request.content_type == "application/json":
params = bottle.request.json
else:
params = bottle.request.forms
else:
# bottle.request.query doesn't decode unicode properly, so do it ourselves
params = {}
if bottle.request.query_string != "":
for item in bottle.request.query_string.split("&"):
key, value = item.split("=", 1)
# Replaces "+" with " " and then unquote
params[key] = unquote_plus(value)
start_time = time.time()
result = func(params)
end_time = time.time()
result = json.dumps(result) if to_json else result
hlog("REQUEST {}: {} seconds, {} bytes".format(bottle.request, end_time - start_time, len(result)))
return result
except Exception as e:
import traceback
if not isinstance(e, ValueError):
traceback.print_exc()
exc_type, exc_value, exc_traceback = sys.exc_info()
error_str = "EXCEPTION: " + str(e) + "\n" + "\n".join(traceback.format_tb(exc_traceback))
return json.dumps({"error": error_str}) if to_json else error_str
class Authentication:
"""Needed to authenticate with the proxy server to make requests, etc.."""
api_key: str
class PerspectiveAPIRequest:
"""
A `PerspectiveAPIRequest` specifies how to calculate toxicity scores using PerspectiveAPI for a batch of text.
Note: not all attributes are supported across all the different languages.
For more information on the languages and attributes, see the following documentation:
https://developers.perspectiveapi.com/s/about-the-api-attributes-and-languages
"""
# Batch of text to calculate toxicity scores.
text_batch: List[str] = field(default_factory=list)
# Attributes to calculate scores for.
attributes: List[str] = field(default_factory=lambda: DEFAULT_ATTRIBUTES)
# Languages to use.
languages: List[str] = field(default_factory=lambda: ["en"])
def handle_toxicity_request():
def perform(args):
global service
auth = Authentication(**json.loads(args["auth"]))
request = PerspectiveAPIRequest(**json.loads(args["request"]))
return dataclasses.asdict(service.get_toxicity_scores(auth, request))
return safe_call(perform) | null |
16,144 | from urllib.parse import unquote_plus
import argparse
import dataclasses
import json
import os
import sys
import time
from dacite import from_dict
import bottle
from helm.benchmark.config_registry import (
register_configs_from_directory,
register_builtin_configs_from_helm_package,
)
from helm.benchmark.model_deployment_registry import get_default_model_deployment_for_model
from helm.common.authentication import Authentication
from helm.common.cache_backend_config import CacheBackendConfig, MongoCacheBackendConfig, SqliteCacheBackendConfig
from helm.common.general import ensure_directory_exists
from helm.common.hierarchical_logger import hlog
from helm.common.optional_dependencies import handle_module_not_found_error
from helm.common.request import Request
from helm.common.perspective_api_request import PerspectiveAPIRequest
from helm.common.moderations_api_request import ModerationAPIRequest
from helm.common.tokenization_request import TokenizationRequest, DecodeRequest
from helm.proxy.services.service import CACHE_DIR
from .accounts import Account
from .services.server_service import ServerService
from .query import Query
service: ServerService
def safe_call(func, to_json=True):
try:
if to_json:
bottle.response.content_type = "application/json"
if bottle.request.method in ["DELETE", "POST", "PUT"]:
if bottle.request.content_type == "application/json":
params = bottle.request.json
else:
params = bottle.request.forms
else:
# bottle.request.query doesn't decode unicode properly, so do it ourselves
params = {}
if bottle.request.query_string != "":
for item in bottle.request.query_string.split("&"):
key, value = item.split("=", 1)
# Replaces "+" with " " and then unquote
params[key] = unquote_plus(value)
start_time = time.time()
result = func(params)
end_time = time.time()
result = json.dumps(result) if to_json else result
hlog("REQUEST {}: {} seconds, {} bytes".format(bottle.request, end_time - start_time, len(result)))
return result
except Exception as e:
import traceback
if not isinstance(e, ValueError):
traceback.print_exc()
exc_type, exc_value, exc_traceback = sys.exc_info()
error_str = "EXCEPTION: " + str(e) + "\n" + "\n".join(traceback.format_tb(exc_traceback))
return json.dumps({"error": error_str}) if to_json else error_str
class Authentication:
"""Needed to authenticate with the proxy server to make requests, etc.."""
api_key: str
class ModerationAPIRequest:
# Text to check against OpenAI's content policy
text: str
# From https://beta.openai.com/docs/api-reference/moderations/create,
# "the default is text-moderation-latest which will be automatically upgraded over time.
# This ensures you are always using our most accurate model. If you use text-moderation-stable,
# we will provide advanced notice before updating the model. Accuracy of text-moderation-stable
# may be slightly lower than for text-moderation-latest."
use_latest_model: bool = False
def handle_moderation_request():
def perform(args):
global service
auth = Authentication(**json.loads(args["auth"]))
request = ModerationAPIRequest(**json.loads(args["request"]))
return dataclasses.asdict(service.get_moderation_results(auth, request))
return safe_call(perform) | null |
16,145 | from urllib.parse import unquote_plus
import argparse
import dataclasses
import json
import os
import sys
import time
from dacite import from_dict
import bottle
from helm.benchmark.config_registry import (
register_configs_from_directory,
register_builtin_configs_from_helm_package,
)
from helm.benchmark.model_deployment_registry import get_default_model_deployment_for_model
from helm.common.authentication import Authentication
from helm.common.cache_backend_config import CacheBackendConfig, MongoCacheBackendConfig, SqliteCacheBackendConfig
from helm.common.general import ensure_directory_exists
from helm.common.hierarchical_logger import hlog
from helm.common.optional_dependencies import handle_module_not_found_error
from helm.common.request import Request
from helm.common.perspective_api_request import PerspectiveAPIRequest
from helm.common.moderations_api_request import ModerationAPIRequest
from helm.common.tokenization_request import TokenizationRequest, DecodeRequest
from helm.proxy.services.service import CACHE_DIR
from .accounts import Account
from .services.server_service import ServerService
from .query import Query
service: ServerService
def safe_call(func, to_json=True):
try:
if to_json:
bottle.response.content_type = "application/json"
if bottle.request.method in ["DELETE", "POST", "PUT"]:
if bottle.request.content_type == "application/json":
params = bottle.request.json
else:
params = bottle.request.forms
else:
# bottle.request.query doesn't decode unicode properly, so do it ourselves
params = {}
if bottle.request.query_string != "":
for item in bottle.request.query_string.split("&"):
key, value = item.split("=", 1)
# Replaces "+" with " " and then unquote
params[key] = unquote_plus(value)
start_time = time.time()
result = func(params)
end_time = time.time()
result = json.dumps(result) if to_json else result
hlog("REQUEST {}: {} seconds, {} bytes".format(bottle.request, end_time - start_time, len(result)))
return result
except Exception as e:
import traceback
if not isinstance(e, ValueError):
traceback.print_exc()
exc_type, exc_value, exc_traceback = sys.exc_info()
error_str = "EXCEPTION: " + str(e) + "\n" + "\n".join(traceback.format_tb(exc_traceback))
return json.dumps({"error": error_str}) if to_json else error_str
class Authentication:
"""Needed to authenticate with the proxy server to make requests, etc.."""
api_key: str
def handle_shutdown():
def perform(args):
global service
auth = Authentication(**json.loads(args["auth"]))
service.shutdown(auth)
return safe_call(perform) | null |
16,146 | import csv
from datetime import datetime
import os
from threading import Lock
from typing import Dict, List, Sequence
import textwrap
import re
from helm.common.critique_request import CritiqueQuestionTemplate, CritiqueRequest, CritiqueTaskTemplate, QuestionType
from helm.common.general import ensure_directory_exists
from helm.common.hierarchical_logger import hlog
from helm.proxy.critique.mechanical_turk_utils import replace_emoji_characters
def _indent_to_level(text: str, level: int) -> str:
"""Helper for indenting XML to the same level as the external template."""
return textwrap.indent(text, " " * 4 * level).lstrip()
def _format_template_tags(raw_text: str) -> str:
"""Convert from Surge AI template tag format to Mechanical Turk template tag format.
{{field}} -> ${field}"""
return re.sub(r"{{([^{}]+)}}", "${\\1}", raw_text)
def _render_question_crowd_html(question_template: CritiqueQuestionTemplate) -> str:
"""Render the Crowd HTML for a question."""
question_input_crowd_html: str
if question_template.question_type == QuestionType.FREE_RESPONSE:
question_input_crowd_html = textwrap.dedent(
f"""\
<crowd-text-area name="{question_template.name}" required></crowd-text-area>"""
)
elif question_template.question_type == QuestionType.MULTIPLE_CHOICE:
question_input_crowd_html = _render_multiple_choice_options_crowd_html(
question_template.name, question_template.options
)
elif question_template.question_type == QuestionType.CHECKBOX:
question_input_crowd_html = _render_checkbox_options_crowd_html(
question_template.name, question_template.options
)
return textwrap.dedent(
f"""\
<p style=\"white-space: pre-wrap;\">
{_format_template_tags(question_template.text)}
</p>
{_indent_to_level(question_input_crowd_html, 2)}"""
)
class CritiqueTaskTemplate:
"""The template for a critique task."""
name: str
"""Name of the template."""
instructions: str
"""HTML-formatted instructions that will be displayed before all the questions.
Can contain placeholders like {{placeholder}} that will be interpolated using the fields in CritiqueRequest."""
num_respondents: int
"""Requested number of respondents."""
questions: List[CritiqueQuestionTemplate]
"""List of templates for the questions."""
The provided code snippet includes necessary dependencies for implementing the `_render_template_crowd_html` function. Write a Python function `def _render_template_crowd_html(task_template: CritiqueTaskTemplate) -> str` to solve the following problem:
Render the Crowd HTML for the template.
Here is the function:
def _render_template_crowd_html(task_template: CritiqueTaskTemplate) -> str:
"""Render the Crowd HTML for the template."""
validation_crowd_html = textwrap.dedent(
"""\
<script>
// Validates that an option is selected for each radio group
// because Mechanical Turk Crowd HTML does not do so automatically.
// Source: https://stackoverflow.com/a/71064873
function validateForm() {
var valid = true;
var radioGroups = document.querySelectorAll("crowd-radio-group");
for (var i = 0; i < radioGroups.length; i++) {
var validGroup = false;
var radioButtons = radioGroups[i].children;
for (var j = 0; j < radioButtons.length; j++) {
validGroup = validGroup || radioButtons[j].checked;
}
valid = valid && validGroup;
}
return valid;
}
document.addEventListener("DOMContentLoaded", function(event) {
document.querySelector('crowd-form').onsubmit = function(e) {
if (!validateForm()) {
alert("Please answer all the questions in order to submit.");
e.preventDefault();
}
}
});
</script>"""
)
instructions_crowd_html = (
f'<p style="white-space: pre-wrap;">{_format_template_tags(task_template.instructions)}</p>'
)
divider_html = "\n<hr>"
questions_crowd_html = "\n<hr>\n".join(
[_render_question_crowd_html(question) for question in task_template.questions]
)
return textwrap.dedent(
f"""\
<script src="https://assets.crowd.aws/crowd-html-elements.js"></script>
{_indent_to_level(validation_crowd_html, 2)}
<crowd-form answer-format="flatten-objects">
{_indent_to_level(instructions_crowd_html, 3)}
{_indent_to_level(divider_html, 3)}
{_indent_to_level(questions_crowd_html, 3)}
{_indent_to_level(divider_html, 3)}
</crowd-form>"""
) | Render the Crowd HTML for the template. |
16,147 | import csv
from datetime import datetime
import os
from threading import Lock
from typing import Dict, List, Sequence
import textwrap
import re
from helm.common.critique_request import CritiqueQuestionTemplate, CritiqueRequest, CritiqueTaskTemplate, QuestionType
from helm.common.general import ensure_directory_exists
from helm.common.hierarchical_logger import hlog
from helm.proxy.critique.mechanical_turk_utils import replace_emoji_characters
class _MechanicalTurkCritiqueRequestExporter:
"""Exports critique requests.
- The requests will be exported to mturk/{template.name}/requests_{timestamp}.csv
- The template Crowd HTML will be exported to mturk/{template.name}/layout_{timestamp}.html"""
def __init__(self, template: CritiqueTaskTemplate):
self._template: CritiqueTaskTemplate = template
self._lock: Lock = Lock()
self._directory_path = os.path.join("mturk", self._template.name)
timestamp = datetime.now().isoformat()
self._template_filename = os.path.join(self._directory_path, f"layout_{timestamp}.html")
self._requests_filename = os.path.join(self._get_directory_path(), f"requests_{timestamp}.csv")
# Protected by `_lock`.
# Populated by `_initialize()`.
self._field_names: Sequence[str] = []
def _get_directory_path(self):
# TODO: Make this configurable.
return os.path.join("mturk", self._template.name)
def _initialize(self, field_names: Sequence[str]) -> None:
# self._lock must be held when calling this.
ensure_directory_exists(self._get_directory_path())
hlog(f"Exporting Mechanical Turk layout to {self._template_filename}")
with open(self._template_filename, "w") as f:
f.write(_render_template_crowd_html(self._template))
hlog(f"Exporting Mechanical Turk requests to {self._requests_filename}")
with open(self._requests_filename, "w") as f:
self._field_names = field_names
dict_writer: csv.DictWriter = csv.DictWriter(f, fieldnames=field_names)
dict_writer.writeheader()
def export(self, fields: Dict[str, str]):
"""Export a single critique request.
- The request will be written as a row to mturk/{template.name}/requests_{timestamp}.csv
- The template Crowd HTML will be written to mturk/{template.name}/layout_{timestamp}.html
when this is called for the first time"""
with self._lock:
if not self._field_names:
self._initialize(list(fields.keys()))
assert self._field_names
# Unfortunately, we have to re-open and close the file every time.
# TODO: Support exporting batches of requests.
with open(self._requests_filename, "a") as f:
dict_writer = csv.DictWriter(f, fieldnames=self._field_names)
dict_writer.writerow(fields)
_exporters_lock: Lock = Lock()
_exporters: Dict[str, _MechanicalTurkCritiqueRequestExporter] = {}
class CritiqueTaskTemplate:
"""The template for a critique task."""
name: str
"""Name of the template."""
instructions: str
"""HTML-formatted instructions that will be displayed before all the questions.
Can contain placeholders like {{placeholder}} that will be interpolated using the fields in CritiqueRequest."""
num_respondents: int
"""Requested number of respondents."""
questions: List[CritiqueQuestionTemplate]
"""List of templates for the questions."""
class CritiqueRequest:
"""Request for a critique."""
template: CritiqueTaskTemplate
"""Template for the instructions and questions.
The fields will be interpolated into the placeholders in this template."""
fields: Dict[str, str]
"""Fields to be interpolated into the template.
Mapping of placeholder names to the field value to be interpolated into the placeholders in the template."""
def replace_emoji_characters(s: str) -> str:
"""Replace 4-byte characters with HTML spans with bytes as JSON array
This function takes a Unicode string that may contain 4-byte Unicode
characters, e.g. "hi😀😀", and replaces each 4-byte character with an
HTML span with the 4 bytes encoded as a HTML entity,
e.g. "hi😀😀"
Args:
s (Unicode string): String that may contain emojis e.g. "hi😀😀"
Returns:
Unicode string with all 4-byte Unicode characters in the source
string replaced with HTML entities e.g. "hi😀😀"
"""
def _emoji_match_to_span(emoji_match: Match) -> str:
"""
Args:
emoji_match (Match): match containing a single group
with a single emoji e.g. "😀"
Returns:
Unicode string with the emoji encoded a HTML entity e.g. "😀"
"""
return emoji_match.group().encode("ascii", "xmlcharrefreplace").decode()
# The procedure for stripping Emoji characters is based on this
# StackOverflow post:
# http://stackoverflow.com/questions/12636489/python-convert-4-byte-char-to-avoid-mysql-error-incorrect-string-value
if sys.maxunicode == 1114111:
# Python was built with '--enable-unicode=ucs4'
highpoints = re.compile("[\U00010000-\U0010ffff]")
elif sys.maxunicode == 65535:
# Python was built with '--enable-unicode=ucs2'
highpoints = re.compile("[\uD800-\uDBFF][\uDC00-\uDFFF]")
else:
raise UnicodeError("Unable to determine if Python was built using UCS-2 or UCS-4")
return highpoints.sub(_emoji_match_to_span, s)
The provided code snippet includes necessary dependencies for implementing the `export_request` function. Write a Python function `def export_request(request: CritiqueRequest)` to solve the following problem:
Exports critique requests. After the calling this, the user should manually upload the generated CSV and Crowd HTML files to the Mechanical Turk web UI. - The requests will be exported to mturk/{template.name}/requests_{timestamp}.csv - The template Crowd HTML will be exported to mturk/{template.name}/layout_{timestamp}.html
Here is the function:
def export_request(request: CritiqueRequest):
"""Exports critique requests.
After the calling this, the user should manually upload the generated CSV
and Crowd HTML files to the Mechanical Turk web UI.
- The requests will be exported to mturk/{template.name}/requests_{timestamp}.csv
- The template Crowd HTML will be exported to mturk/{template.name}/layout_{timestamp}.html"""
template: CritiqueTaskTemplate = request.template
with _exporters_lock:
if template.name not in _exporters:
_exporters[template.name] = _MechanicalTurkCritiqueRequestExporter(template)
encoded_fields = {
field_name: replace_emoji_characters(field_value) for field_name, field_value in request.fields.items()
}
_exporters[template.name].export(encoded_fields) | Exports critique requests. After the calling this, the user should manually upload the generated CSV and Crowd HTML files to the Mechanical Turk web UI. - The requests will be exported to mturk/{template.name}/requests_{timestamp}.csv - The template Crowd HTML will be exported to mturk/{template.name}/layout_{timestamp}.html |
16,148 | from collections import defaultdict
import csv
import os
from threading import Lock
from typing import Dict, List, Optional, Tuple, Union
import re
import sys
from helm.common.critique_request import (
CritiqueRequest,
CritiqueResponse,
CritiqueTaskTemplate,
QuestionType,
CritiqueRequestResult,
)
from helm.common.hierarchical_logger import hlog
from helm.proxy.critique.mechanical_turk_utils import replace_emoji_characters
class _MechanicalTurkRequestImporter:
"""Exports critique request results.
The request results will be imported from all files matching
mturk/{template.name}/Batch_{batch_number}_batch_results.csv"""
def __init__(self, template: CritiqueTaskTemplate):
self._template: CritiqueTaskTemplate = template
self._request_key_to_results: Dict[_CritiqueRequestKey, CritiqueRequestResult] = {}
def _get_directory_path(self):
return os.path.join("mturk", self._template.name)
def _make_request_key(self, fields: Dict[str, str]) -> _CritiqueRequestKey:
"""Make a request key from request fields."""
return tuple((k, v) for k, v in sorted(fields.items()))
def _import_from_file_path(self, file_path: str) -> None:
"""Import all rows from the CSV and store them in `self._request_key_to_results`."""
request_key_to_responses: Dict[_CritiqueRequestKey, List[CritiqueResponse]] = defaultdict(list)
with open(file_path) as f:
dict_reader = csv.DictReader(f)
for row in dict_reader:
request_key = self._make_request_key(self._get_fields_from_row(row))
response = self._get_response_from_row(row)
request_key_to_responses[request_key].append(response)
for request_key, responses in request_key_to_responses.items():
self._request_key_to_results[request_key] = CritiqueRequestResult(responses)
def _get_fields_from_row(self, row: Dict[str, str]) -> Dict[str, str]:
fields = {}
for key, value in row.items():
if key.startswith("Input."):
field_key = key[len("Input.") :]
fields[field_key] = value
return fields
def _get_response_from_row(self, row: Dict[str, str]) -> CritiqueResponse:
answers: Dict[str, Union[str, List[str]]] = {}
for question in self._template.questions:
if question.question_type == QuestionType.MULTIPLE_CHOICE:
for option_index, option in enumerate(question.options):
raw_answer = row[f"Answer.{question.name}.{option_index}.on"]
if raw_answer == "true":
answers[question.name] = option
break
elif question.question_type == QuestionType.CHECKBOX:
checkbox_options: List[str] = []
for option_index, option in enumerate(question.options):
raw_answer = row[f"Answer.{question.name}.{option_index}.on"]
if raw_answer == "true":
checkbox_options.append(option)
answers[question.name] = checkbox_options
elif question.question_type == QuestionType.FREE_RESPONSE:
answers[question.name] = row[f"Answer.{question.name}"]
else:
raise ValueError(f"Unknown question_type: {question.question_type}")
return CritiqueResponse(
id=row["AssignmentId"],
respondent_id=row["WorkerId"],
answers=answers,
)
def initialize(self) -> None:
"""Initialize the instance.
Thread-hostile.
Must be called exactly once per instance.
Must be called before `import_request_result()`."""
if not os.path.exists(self._get_directory_path()) or not os.path.isdir(self._get_directory_path()):
return
for file_name in os.listdir(self._get_directory_path()):
if re.match("Batch_\\d+_batch_results.csv", file_name):
file_path = os.path.join(self._get_directory_path(), file_name)
hlog(f"Importing Mechanical Turk results from {file_path}")
self._import_from_file_path(file_path)
def import_request_result(self, fields: Dict[str, str]) -> Optional[CritiqueRequestResult]:
"""Import the request result.
`initialize()` must be called before calling this."""
return self._request_key_to_results.get(self._make_request_key(fields))
_importers_lock: Lock = Lock()
_importer: Dict[str, _MechanicalTurkRequestImporter] = {}
class CritiqueTaskTemplate:
"""The template for a critique task."""
name: str
"""Name of the template."""
instructions: str
"""HTML-formatted instructions that will be displayed before all the questions.
Can contain placeholders like {{placeholder}} that will be interpolated using the fields in CritiqueRequest."""
num_respondents: int
"""Requested number of respondents."""
questions: List[CritiqueQuestionTemplate]
"""List of templates for the questions."""
class CritiqueRequest:
"""Request for a critique."""
template: CritiqueTaskTemplate
"""Template for the instructions and questions.
The fields will be interpolated into the placeholders in this template."""
fields: Dict[str, str]
"""Fields to be interpolated into the template.
Mapping of placeholder names to the field value to be interpolated into the placeholders in the template."""
class CritiqueRequestResult:
"""List of answers from each respondent."""
responses: List[CritiqueResponse]
"""List of respondents' responses."""
def replace_emoji_characters(s: str) -> str:
"""Replace 4-byte characters with HTML spans with bytes as JSON array
This function takes a Unicode string that may contain 4-byte Unicode
characters, e.g. "hi😀😀", and replaces each 4-byte character with an
HTML span with the 4 bytes encoded as a HTML entity,
e.g. "hi😀😀"
Args:
s (Unicode string): String that may contain emojis e.g. "hi😀😀"
Returns:
Unicode string with all 4-byte Unicode characters in the source
string replaced with HTML entities e.g. "hi😀😀"
"""
def _emoji_match_to_span(emoji_match: Match) -> str:
"""
Args:
emoji_match (Match): match containing a single group
with a single emoji e.g. "😀"
Returns:
Unicode string with the emoji encoded a HTML entity e.g. "😀"
"""
return emoji_match.group().encode("ascii", "xmlcharrefreplace").decode()
# The procedure for stripping Emoji characters is based on this
# StackOverflow post:
# http://stackoverflow.com/questions/12636489/python-convert-4-byte-char-to-avoid-mysql-error-incorrect-string-value
if sys.maxunicode == 1114111:
# Python was built with '--enable-unicode=ucs4'
highpoints = re.compile("[\U00010000-\U0010ffff]")
elif sys.maxunicode == 65535:
# Python was built with '--enable-unicode=ucs2'
highpoints = re.compile("[\uD800-\uDBFF][\uDC00-\uDFFF]")
else:
raise UnicodeError("Unable to determine if Python was built using UCS-2 or UCS-4")
return highpoints.sub(_emoji_match_to_span, s)
The provided code snippet includes necessary dependencies for implementing the `import_request_result` function. Write a Python function `def import_request_result(request: CritiqueRequest) -> Optional[CritiqueRequestResult]` to solve the following problem:
Imports a request result from CSV files. Before calling this, the user should manually download the response CSV files from the Mechanical Turk web UI and place them at turk/{template.name}/Batch_{batch_number}_batch_results.csv
Here is the function:
def import_request_result(request: CritiqueRequest) -> Optional[CritiqueRequestResult]:
"""Imports a request result from CSV files.
Before calling this, the user should manually download the response CSV files from the
Mechanical Turk web UI and place them at
turk/{template.name}/Batch_{batch_number}_batch_results.csv"""
template: CritiqueTaskTemplate = request.template
with _importers_lock:
if template.name not in _importer:
_importer[template.name] = _MechanicalTurkRequestImporter(template)
_importer[template.name].initialize()
encoded_fields = {
field_name: replace_emoji_characters(field_value) for field_name, field_value in request.fields.items()
}
return _importer[template.name].import_request_result(encoded_fields) | Imports a request result from CSV files. Before calling this, the user should manually download the response CSV files from the Mechanical Turk web UI and place them at turk/{template.name}/Batch_{batch_number}_batch_results.csv |
16,149 | from hashlib import sha512
import json
import threading
from typing import Dict, List, Union, Set, Any
from cattrs import unstructure
from helm.common.hierarchical_logger import hlog
from helm.common.cache import Cache, CacheConfig
from helm.common.critique_request import (
CritiqueQuestionTemplate,
CritiqueRequest,
CritiqueRequestResult,
CritiqueTaskTemplate,
CritiqueResponse,
)
from helm.common.optional_dependencies import handle_module_not_found_error
from helm.proxy.critique.critique_client import CritiqueClient
try:
import scaleapi
from scaleapi.tasks import TaskType, TaskStatus
from scaleapi.exceptions import ScaleDuplicateResource
except ModuleNotFoundError as e:
handle_module_not_found_error(e, ["human-evaluation"])
class ScaleCritiqueClientError(Exception):
pass
def _ensure_project_exists(client: scaleapi.ScaleClient, project_name: str):
"""Ensure that the Scale project exists, creating it if necessary."""
with _scale_projects_lock:
if project_name not in _scale_projects:
try:
client.create_project(
project_name=project_name,
task_type=TaskType.TextCollection,
rapid=True,
params={},
)
hlog(f"Created new Scale project: {project_name}")
hlog(
"IMPORTANT: Run scripts/scale/create_and_setup_project.py to set up a "
"calibration batch in your project."
)
except ScaleDuplicateResource:
existing_project = client.get_project(project_name=project_name)
if existing_project.type != TaskType.TextCollection.value:
raise ScaleCritiqueClientError(
f"The existing project with name '{project_name}' has a task type of "
f"'{existing_project.type}' instead of '{TaskType.TextCollection.value}'. "
"Rename the existing batch to a different name to allow HELM to create a new project "
"with the correct task type."
)
hlog(f"Reusing existing Scale project: {project_name}")
_scale_projects.add(project_name)
_scale_batches: Set[str] = set()
_scale_batches_lock: threading.Lock = threading.Lock()
def hlog(x: Any) -> None:
singleton.log(x)
The provided code snippet includes necessary dependencies for implementing the `_ensure_batch_exists` function. Write a Python function `def _ensure_batch_exists(client: scaleapi.ScaleClient, project_name: str, batch_name: str) -> None` to solve the following problem:
Ensure that the Scale batch exists, creating it if necessary.
Here is the function:
def _ensure_batch_exists(client: scaleapi.ScaleClient, project_name: str, batch_name: str) -> None:
"""Ensure that the Scale batch exists, creating it if necessary."""
_ensure_project_exists(client, project_name)
with _scale_batches_lock:
if batch_name not in _scale_batches:
try:
client.create_batch(
project=project_name,
batch_name=batch_name,
calibration_batch=False,
self_label_batch=False,
)
hlog(f"Created new Scale batch: {batch_name}")
except ScaleDuplicateResource:
existing_batch = client.get_batch(batch_name=batch_name)
if existing_batch.project != project_name:
raise ScaleCritiqueClientError(
f"A batch named '{batch_name}' already exists in a project '{existing_batch.project}' "
f"but credentials.conf has scaleProject set to a different project '{project_name}'. "
"Either rename the existing batch to a different name to allow HELM to create a new batch, or "
f"change scaleProject in credentials.conf to '{existing_batch.project}'. {existing_batch}"
)
if existing_batch.status != "staging":
hlog("Important: The batch is not in staging status. New tasks cannot be added to it.")
hlog(f"Reusing existing Scale batch: {batch_name}")
_scale_batches.add(batch_name) | Ensure that the Scale batch exists, creating it if necessary. |
16,150 | import argparse
import json
import requests
import urllib.parse
from dataclasses import asdict
from typing import Any, List, Optional
from helm.common.cache import CacheConfig
from helm.common.cache_backend_config import BlackHoleCacheBackendConfig
from helm.common.authentication import Authentication
from helm.common.moderations_api_request import ModerationAPIRequest, ModerationAPIRequestResult
from helm.common.critique_request import CritiqueRequest, CritiqueRequestResult
from helm.common.nudity_check_request import NudityCheckRequest, NudityCheckResult
from helm.common.file_upload_request import FileUploadRequest, FileUploadResult
from helm.common.perspective_api_request import PerspectiveAPIRequest, PerspectiveAPIRequestResult
from helm.common.clip_score_request import CLIPScoreRequest, CLIPScoreResult
from helm.common.tokenization_request import (
WindowServiceInfo,
TokenizationRequest,
TokenizationRequestResult,
DecodeRequestResult,
DecodeRequest,
)
from helm.common.request import Request, RequestResult
from dacite import from_dict
from helm.proxy.accounts import Account
from helm.proxy.query import Query, QueryResult
from .service import Service, GeneralInfo
The provided code snippet includes necessary dependencies for implementing the `add_service_args` function. Write a Python function `def add_service_args(parser: argparse.ArgumentParser)` to solve the following problem:
Add command-line arguments to enable command-line utilities to specify how to connect to a remote server.
Here is the function:
def add_service_args(parser: argparse.ArgumentParser):
"""Add command-line arguments to enable command-line utilities to specify how to connect to a remote server."""
parser.add_argument("--server-url", type=str, default=None, help="URL of proxy server to connect to")
parser.add_argument(
"--api-key-path", type=str, default="proxy_api_key.txt", help="Path to a file containing the API key"
) | Add command-line arguments to enable command-line utilities to specify how to connect to a remote server. |
16,151 | import argparse
import json
import requests
import urllib.parse
from dataclasses import asdict
from typing import Any, List, Optional
from helm.common.cache import CacheConfig
from helm.common.cache_backend_config import BlackHoleCacheBackendConfig
from helm.common.authentication import Authentication
from helm.common.moderations_api_request import ModerationAPIRequest, ModerationAPIRequestResult
from helm.common.critique_request import CritiqueRequest, CritiqueRequestResult
from helm.common.nudity_check_request import NudityCheckRequest, NudityCheckResult
from helm.common.file_upload_request import FileUploadRequest, FileUploadResult
from helm.common.perspective_api_request import PerspectiveAPIRequest, PerspectiveAPIRequestResult
from helm.common.clip_score_request import CLIPScoreRequest, CLIPScoreResult
from helm.common.tokenization_request import (
WindowServiceInfo,
TokenizationRequest,
TokenizationRequestResult,
DecodeRequestResult,
DecodeRequest,
)
from helm.common.request import Request, RequestResult
from dacite import from_dict
from helm.proxy.accounts import Account
from helm.proxy.query import Query, QueryResult
from .service import Service, GeneralInfo
class Authentication:
def create_authentication(args) -> Authentication:
with open(args.api_key_path) as f:
api_key = f.read().strip()
return Authentication(api_key=api_key) | null |
16,152 | import mako.template
from abc import ABC, abstractmethod
from dataclasses import dataclass
from typing import Dict, List, Tuple, Any
from helm.common.general import parse_hocon
from helm.common.critique_request import CritiqueRequest, CritiqueRequestResult
from helm.common.clip_score_request import CLIPScoreRequest, CLIPScoreResult
from helm.common.file_upload_request import FileUploadResult, FileUploadRequest
from helm.common.nudity_check_request import NudityCheckRequest, NudityCheckResult
from helm.common.perspective_api_request import PerspectiveAPIRequestResult, PerspectiveAPIRequest
from helm.common.moderations_api_request import ModerationAPIRequest, ModerationAPIRequestResult
from helm.common.tokenization_request import (
WindowServiceInfo,
TokenizationRequest,
TokenizationRequestResult,
DecodeRequest,
DecodeRequestResult,
)
from helm.common.request import Request, RequestResult
from helm.benchmark.model_metadata_registry import ModelMetadata
from helm.proxy.query import Query, QueryResult
from helm.proxy.accounts import Authentication, Account
from helm.common.cache import CacheConfig
MAX_EXPANSION = 1000
The provided code snippet includes necessary dependencies for implementing the `expand_environments` function. Write a Python function `def expand_environments(environments: Dict[str, List[str]])` to solve the following problem:
`environments` is a map from variable names to a list of strings. Return: a list of environments, where for each variable, we choose one of its string.
Here is the function:
def expand_environments(environments: Dict[str, List[str]]):
"""
`environments` is a map from variable names to a list of strings.
Return: a list of environments, where for each variable, we choose one of its string.
"""
output_environments: List[Dict[str, str]] = []
def recurse(old_items: List[Tuple[str, List[str]]], new_items: List[Tuple[str, str]]):
if len(output_environments) >= MAX_EXPANSION:
return
if len(old_items) == 0:
output_environments.append(dict(new_items))
else:
item, rest_old_items = old_items[0], old_items[1:]
key, list_value = item
for elem_value in list_value:
recurse(rest_old_items, new_items + [(key, elem_value)])
recurse(list(environments.items()), [])
return output_environments | `environments` is a map from variable names to a list of strings. Return: a list of environments, where for each variable, we choose one of its string. |
16,153 | import mako.template
from abc import ABC, abstractmethod
from dataclasses import dataclass
from typing import Dict, List, Tuple, Any
from helm.common.general import parse_hocon
from helm.common.critique_request import CritiqueRequest, CritiqueRequestResult
from helm.common.clip_score_request import CLIPScoreRequest, CLIPScoreResult
from helm.common.file_upload_request import FileUploadResult, FileUploadRequest
from helm.common.nudity_check_request import NudityCheckRequest, NudityCheckResult
from helm.common.perspective_api_request import PerspectiveAPIRequestResult, PerspectiveAPIRequest
from helm.common.moderations_api_request import ModerationAPIRequest, ModerationAPIRequestResult
from helm.common.tokenization_request import (
WindowServiceInfo,
TokenizationRequest,
TokenizationRequestResult,
DecodeRequest,
DecodeRequestResult,
)
from helm.common.request import Request, RequestResult
from helm.benchmark.model_metadata_registry import ModelMetadata
from helm.proxy.query import Query, QueryResult
from helm.proxy.accounts import Authentication, Account
from helm.common.cache import CacheConfig
def substitute_text(text: str, environment: Dict[str, str]) -> str:
"""
Example:
text = "Hello {name}"
environment = {"name": "Sue"}
Return "Hello Sue"
"""
return mako.template.Template(text).render(**environment)
def parse_hocon(text: str):
"""Parse `text` (in HOCON format) into a dict-like object."""
return pyhocon.ConfigFactory.parse_string(text)
class Request:
"""
A `Request` specifies how to query a language model (given a prompt,
complete it). It is the unified representation for communicating with
various APIs (e.g., GPT-3, Jurassic).
"""
model_deployment: str = ""
"""Which model deployment to query -> Determines the Client.
Refers to a deployment in the model deployment registry."""
model: str = ""
"""Which model to use -> Determines the Engine.
Refers to a model metadata in the model registry."""
embedding: bool = False
"""Whether to query embedding instead of text response"""
prompt: str = ""
"""What prompt do condition the language model on"""
temperature: float = 1.0
"""Temperature parameter that governs diversity"""
num_completions: int = 1
"""Generate this many completions (by sampling from the model)"""
top_k_per_token: int = 1
"""Take this many highest probability candidates per token in the completion"""
max_tokens: int = 100
"""Maximum number of tokens to generate (per completion)"""
stop_sequences: List[str] = field(default_factory=list)
"""Stop generating once we hit one of these strings."""
echo_prompt: bool = False
"""Should `prompt` be included as a prefix of each completion? (e.g., for
evaluating perplexity of the prompt)"""
top_p: float = 1
"""Same from tokens that occupy this probability mass (nucleus sampling)"""
presence_penalty: float = 0
"""Penalize repetition (OpenAI & Writer only)"""
frequency_penalty: float = 0
"""Penalize repetition (OpenAI & Writer only)"""
random: Optional[str] = None
"""Used to control randomness. Expect different responses for the same
request but with different values for `random`."""
messages: Optional[List[Dict[str, str]]] = None
"""Used for chat models. (OpenAI only for now).
if messages is specified for a chat model, the prompt is ignored.
Otherwise, the client should convert the prompt into a message."""
multimodal_prompt: Optional[MultimediaObject] = None
"""Multimodal prompt with media objects interleaved (e.g., text, video, image, text, ...)"""
image_generation_parameters: Optional[ImageGenerationParameters] = None
"""Parameters for image generation."""
def model_host(self) -> str:
"""Returns the model host (referring to the deployment).
Not to be confused with the model creator organization (referring to the model).
Example: 'openai/davinci' => 'openai'
'together/bloom' => 'together'"""
return self.model_deployment.split("/")[0]
def model_engine(self) -> str:
"""Returns the model engine (referring to the model).
This is often the same as self.model_deploymentl.split("/")[1], but not always.
For example, one model could be served on several servers (each with a different model_deployment)
In that case we would have for example:
'aws/bloom-1', 'aws/bloom-2', 'aws/bloom-3' => 'bloom'
This is why we need to keep track of the model engine with the model metadata.
Example: 'openai/davinci' => 'davinci'"""
return self.model.split("/")[1]
The provided code snippet includes necessary dependencies for implementing the `synthesize_request` function. Write a Python function `def synthesize_request(prompt: str, settings: str, environment: Dict[str, str]) -> Request` to solve the following problem:
Substitute `environment` into `prompt` and `settings`.
Here is the function:
def synthesize_request(prompt: str, settings: str, environment: Dict[str, str]) -> Request:
"""Substitute `environment` into `prompt` and `settings`."""
request: Dict[str, Any] = {}
request["prompt"] = substitute_text(prompt, environment)
request.update(parse_hocon(substitute_text(settings, environment)))
if "model_deployment" not in request and "model" not in request:
request["model_deployment"] = "openai/text-davinci-002"
return Request(**request) | Substitute `environment` into `prompt` and `settings`. |
16,154 | from abc import ABC, abstractmethod
from typing import List, Optional
from helm.common.tokenization_request import (
TokenizationRequest,
TokenizationRequestResult,
DecodeRequest,
DecodeRequestResult,
)
def cleanup_str(token: str, tokenizer_name: Optional[str] = None) -> str:
"""
Certain tokenizers introduce special characters to represent spaces, such as
"Ġ" or "▁". This function removes those characters.
"""
if tokenizer_name in [
"TsinghuaKEG/ice",
"bigscience/T0pp",
"google/t5-11b",
"google/flan-t5-xxl",
"google/ul2",
"Yandex/yalm",
"ai21/j1",
"together",
]:
return token.replace("▁", " ")
elif tokenizer_name is not None and tokenizer_name.startswith("huggingface"):
return token.replace("Ġ", " ")
return token
The provided code snippet includes necessary dependencies for implementing the `cleanup_tokens` function. Write a Python function `def cleanup_tokens(tokens: List[str], tokenizer_name: Optional[str] = None) -> List[str]` to solve the following problem:
Applies `cleanup_str` to each token in `tokens`.
Here is the function:
def cleanup_tokens(tokens: List[str], tokenizer_name: Optional[str] = None) -> List[str]:
"""
Applies `cleanup_str` to each token in `tokens`.
"""
return [cleanup_str(token, tokenizer_name) for token in tokens] | Applies `cleanup_str` to each token in `tokens`. |
16,155 | import os
from typing import Any, Dict, Optional, cast
from threading import Lock
from helm.common.cache import CacheConfig
from helm.common.concurrency import ThreadSafeWrapper
from transformers import AutoTokenizer, PreTrainedTokenizerBase
from helm.common.hierarchical_logger import htrack_block, hlog
from .caching_tokenizer import CachingTokenizer
from .tokenizer import cleanup_tokens
_MODEL_NAME_ALIASES: Dict[str, str] = {
"google/t5-11b": "t5-11b",
"huggingface/gpt2": "gpt2",
"huggingface/santacoder": "bigcode/santacoder",
"huggingface/starcoder": "bigcode/starcoder",
"writer/gpt2": "gpt2", # Palmyra models do not support echo
# So they have a different TokenizerConfig called "writer/gpt2"
# when in reality they use the same tokenizer as "huggingface/gpt2"
"microsoft/gpt2": "gpt2", # Same as above
}
The provided code snippet includes necessary dependencies for implementing the `resolve_alias` function. Write a Python function `def resolve_alias(model_name: str) -> str` to solve the following problem:
Resolve some HELM model names to Hugging Face pretrained model name.
Here is the function:
def resolve_alias(model_name: str) -> str:
"""Resolve some HELM model names to Hugging Face pretrained model name."""
return _MODEL_NAME_ALIASES.get(model_name, model_name) | Resolve some HELM model names to Hugging Face pretrained model name. |
16,156 | from abc import abstractmethod
from dataclasses import asdict
from typing import Any, Dict, List, Optional
from helm.common.cache import Cache, CacheConfig
from helm.common.request import wrap_request_time
from helm.common.tokenization_request import (
TokenizationRequest,
TokenizationRequestResult,
DecodeRequest,
DecodeRequestResult,
TokenizationToken,
)
from .tokenizer import Tokenizer
def cleanup_str(token: str, tokenizer_name: Optional[str] = None) -> str:
"""
Certain tokenizers introduce special characters to represent spaces, such as
"Ġ" or "▁". This function removes those characters.
"""
if tokenizer_name in [
"TsinghuaKEG/ice",
"bigscience/T0pp",
"google/t5-11b",
"google/flan-t5-xxl",
"google/ul2",
"Yandex/yalm",
"ai21/j1",
"together",
]:
return token.replace("▁", " ")
elif tokenizer_name is not None and tokenizer_name.startswith("huggingface"):
return token.replace("Ġ", " ")
return token
The provided code snippet includes necessary dependencies for implementing the `cleanup_tokens` function. Write a Python function `def cleanup_tokens(tokens: List[str], tokenizer_name: Optional[str] = None) -> List[str]` to solve the following problem:
Applies `cleanup_str` to each token in `tokens`.
Here is the function:
def cleanup_tokens(tokens: List[str], tokenizer_name: Optional[str] = None) -> List[str]:
"""
Applies `cleanup_str` to each token in `tokens`.
"""
return [cleanup_str(token, tokenizer_name) for token in tokens] | Applies `cleanup_str` to each token in `tokens`. |
16,157 | import importlib_resources as resources
from helm.common.optional_dependencies import handle_module_not_found_error
import torch
The provided code snippet includes necessary dependencies for implementing the `convert_to_unicode` function. Write a Python function `def convert_to_unicode(text)` to solve the following problem:
Converts `text` to Unicode (if it's not already), assuming utf-8 input.
Here is the function:
def convert_to_unicode(text):
"""Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
if isinstance(text, bytes):
return text.decode("utf-8")
elif isinstance(text, str):
return text
else:
raise TypeError(f"Unexpected type {type(text)}") | Converts `text` to Unicode (if it's not already), assuming utf-8 input. |
16,158 | from threading import Lock
from typing import Optional
from transformers import AutoConfig, AutoModelForCausalLM
from helm.common.cache import CacheConfig
from helm.common.optional_dependencies import OptionalDependencyNotInstalled
from helm.clients.huggingface_client import HuggingFaceClient
_register_open_lm_lock = Lock()
_register_open_lm_done = False
class OptionalDependencyNotInstalled(Exception):
pass
The provided code snippet includes necessary dependencies for implementing the `_register_open_lm_for_auto_model` function. Write a Python function `def _register_open_lm_for_auto_model()` to solve the following problem:
Register OpenLMForCausalLM for AutoModelForCausalLM.
Here is the function:
def _register_open_lm_for_auto_model():
"""Register OpenLMForCausalLM for AutoModelForCausalLM."""
try:
from open_lm.utils.transformers.hf_model import OpenLMforCausalLM
from open_lm.utils.transformers.hf_config import OpenLMConfig
except ModuleNotFoundError as e:
# Provide manual instructions for installing open_lm from GitHub
# because PyPI does not allow installing dependencies directly from GitHub.
raise OptionalDependencyNotInstalled(
f"Optional dependency {e.name} is not installed. "
"Please run `pip install open_lm@git+https://github.com/mlfoundations/open_lm.git@main` to install it."
) from e
with _register_open_lm_lock:
global _register_open_lm_done
if not _register_open_lm_done:
AutoConfig.register("openlm", OpenLMConfig)
AutoModelForCausalLM.register(OpenLMConfig, OpenLMforCausalLM)
_register_open_lm_done = True | Register OpenLMForCausalLM for AutoModelForCausalLM. |
16,159 | import os
from typing import Optional
from helm.common.hierarchical_logger import hlog
from helm.common.optional_dependencies import handle_module_not_found_error
try:
import boto3
from botocore.config import Config
except ModuleNotFoundError as e:
handle_module_not_found_error(e, ["aws"])
def hlog(x: Any) -> None:
singleton.log(x)
The provided code snippet includes necessary dependencies for implementing the `get_bedrock_client` function. Write a Python function `def get_bedrock_client( assumed_role: Optional[str] = None, region: Optional[str] = None, runtime: Optional[bool] = True, )` to solve the following problem:
Create a boto3 client for Amazon Bedrock, with optional configuration overrides Parameters ---------- assumed_role : Optional ARN of an AWS IAM role to assume for calling the Bedrock service. If not specified, the current active credentials will be used. region : Optional name of the AWS Region in which the service should be called (e.g. "us-east-1"). If not specified, AWS_REGION or AWS_DEFAULT_REGION environment variable will be used. runtime : Optional choice of getting different client to perform operations with the Amazon Bedrock service.
Here is the function:
def get_bedrock_client(
assumed_role: Optional[str] = None,
region: Optional[str] = None,
runtime: Optional[bool] = True,
):
"""Create a boto3 client for Amazon Bedrock, with optional configuration overrides
Parameters
----------
assumed_role :
Optional ARN of an AWS IAM role to assume for calling the Bedrock service. If not
specified, the current active credentials will be used.
region :
Optional name of the AWS Region in which the service should be called (e.g. "us-east-1").
If not specified, AWS_REGION or AWS_DEFAULT_REGION environment variable will be used.
runtime :
Optional choice of getting different client to perform operations with the Amazon Bedrock service.
"""
if region is None:
target_region = os.environ.get("AWS_REGION", os.environ.get("AWS_DEFAULT_REGION"))
else:
target_region = region
session_kwargs = {"region_name": target_region}
client_kwargs = {**session_kwargs}
profile_name = os.environ.get("AWS_PROFILE")
if profile_name:
session_kwargs["profile_name"] = profile_name
retry_config = Config(
region_name=target_region,
retries={
"max_attempts": 10,
"mode": "standard",
},
)
session = boto3.Session(**session_kwargs)
if assumed_role:
sts = session.client("sts")
response = sts.assume_role(RoleArn=str(assumed_role), RoleSessionName="crfm-helm")
client_kwargs["aws_access_key_id"] = response["Credentials"]["AccessKeyId"]
client_kwargs["aws_secret_access_key"] = response["Credentials"]["SecretAccessKey"]
client_kwargs["aws_session_token"] = response["Credentials"]["SessionToken"]
if runtime:
service_name = "bedrock-runtime"
else:
service_name = "bedrock"
bedrock_client = session.client(service_name=service_name, config=retry_config, **client_kwargs)
hlog(f"Amazon Bedrock client successfully created with endpoint {bedrock_client._endpoint}")
return bedrock_client | Create a boto3 client for Amazon Bedrock, with optional configuration overrides Parameters ---------- assumed_role : Optional ARN of an AWS IAM role to assume for calling the Bedrock service. If not specified, the current active credentials will be used. region : Optional name of the AWS Region in which the service should be called (e.g. "us-east-1"). If not specified, AWS_REGION or AWS_DEFAULT_REGION environment variable will be used. runtime : Optional choice of getting different client to perform operations with the Amazon Bedrock service. |
16,160 | from urllib.parse import urljoin
def get_cohere_url(endpoint: str) -> str:
return urljoin("https://api.cohere.ai", endpoint) | null |
16,161 | from copy import deepcopy
import torch
from transformers import AutoModelForCausalLM
from transformers.generation.stopping_criteria import (
StoppingCriteria,
StoppingCriteriaList,
)
from typing import Any, Dict, List, Optional, TypedDict
from helm.common.cache import CacheConfig
from helm.common.hierarchical_logger import htrack_block, hlog
from helm.common.request import (
wrap_request_time,
EMBEDDING_UNAVAILABLE_REQUEST_RESULT,
Request,
RequestResult,
Sequence,
Token,
)
from .client import CachingClient, truncate_sequence
from helm.tokenizers.huggingface_tokenizer import HuggingFaceTokenizer, WrappedPreTrainedTokenizer, resolve_alias
from threading import Lock
TORCH_DTYPE_KEY = "torch_dtype"
TORCH_DTYPE_VALUE_PREFIX = "torch."
The provided code snippet includes necessary dependencies for implementing the `_process_huggingface_client_kwargs` function. Write a Python function `def _process_huggingface_client_kwargs(raw_kwargs: Dict[str, Any])` to solve the following problem:
Process the kwargs for HuggingFaceClient. The kwargs passed to HuggingFaceClient will eventually be passed to AutoModel.from_pretrained(). Since the kwargs from HuggingFaceClient may be derived from configuration YAML, they may contain primitive types instead of the unserializable types that AutoModel.from_pretrained() expects (e.g. torch_dtype). This function converts values of primitive types to values of the unserializable types.
Here is the function:
def _process_huggingface_client_kwargs(raw_kwargs: Dict[str, Any]):
"""Process the kwargs for HuggingFaceClient.
The kwargs passed to HuggingFaceClient will eventually be passed to AutoModel.from_pretrained().
Since the kwargs from HuggingFaceClient may be derived from configuration YAML,
they may contain primitive types instead of the unserializable types that
AutoModel.from_pretrained() expects (e.g. torch_dtype). This function converts values of
primitive types to values of the unserializable types."""
processed_kwargs = deepcopy(raw_kwargs)
# Convert torch_dtype string value to actual dtypes
# e.g. the string "torch.bfloat16" is converted to torch.bfloat16
torch_dtype = processed_kwargs.get(TORCH_DTYPE_KEY)
if torch_dtype and isinstance(torch_dtype, str):
if not torch_dtype.startswith(TORCH_DTYPE_VALUE_PREFIX):
raise ValueError(f'Unknown dtype "{torch_dtype}"; expected a string such as "torch.bfloat16"')
processed_kwargs[TORCH_DTYPE_KEY] = getattr(torch, torch_dtype[len(TORCH_DTYPE_VALUE_PREFIX) :])
return processed_kwargs | Process the kwargs for HuggingFaceClient. The kwargs passed to HuggingFaceClient will eventually be passed to AutoModel.from_pretrained(). Since the kwargs from HuggingFaceClient may be derived from configuration YAML, they may contain primitive types instead of the unserializable types that AutoModel.from_pretrained() expects (e.g. torch_dtype). This function converts values of primitive types to values of the unserializable types. |
16,162 | from typing import Any, Dict, List, Optional, TypedDict, Union, cast
import json
import requests
import time
import urllib.parse
from helm.common.cache import CacheConfig
from helm.common.hierarchical_logger import htrack_block, hlog
from helm.common.media_object import IMAGE_TYPE, TEXT_TYPE
from helm.common.optional_dependencies import handle_module_not_found_error
from helm.common.request import (
wrap_request_time,
EMBEDDING_UNAVAILABLE_REQUEST_RESULT,
Request,
RequestResult,
Sequence,
Token,
ErrorFlags,
)
from helm.common.tokenization_request import (
TokenizationRequest,
TokenizationRequestResult,
)
from helm.proxy.retry import NonRetriableException
from helm.tokenizers.tokenizer import Tokenizer
from helm.clients.client import CachingClient, truncate_sequence, truncate_and_tokenize_response_text
def hlog(x: Any) -> None:
singleton.log(x)
The provided code snippet includes necessary dependencies for implementing the `_is_content_moderation_failure` function. Write a Python function `def _is_content_moderation_failure(response: Dict) -> bool` to solve the following problem:
Return whether a a response failed because of the content moderation filter.
Here is the function:
def _is_content_moderation_failure(response: Dict) -> bool:
"""Return whether a a response failed because of the content moderation filter."""
if response["error"]["message"] == "Output blocked by content filtering policy":
hlog(f"Anthropic - output blocked by content filtering policy: {response}")
return True
return False | Return whether a a response failed because of the content moderation filter. |
16,163 | import json
import requests
from typing import Any, Dict, List
from helm.common.cache import CacheConfig
from helm.common.hierarchical_logger import hlog
from helm.common.request import wrap_request_time, Request, RequestResult, Sequence, Token, ErrorFlags
from helm.common.tokenization_request import (
TokenizationRequest,
TokenizationRequestResult,
)
from helm.tokenizers.tokenizer import Tokenizer
from .client import CachingClient, truncate_sequence
_CONTENT_MODERATION_KEY = "fail.content.moderation.failed"
The provided code snippet includes necessary dependencies for implementing the `_is_content_moderation_failure` function. Write a Python function `def _is_content_moderation_failure(response: Dict) -> bool` to solve the following problem:
Return whether a a response failed because of the content moderation filter.
Here is the function:
def _is_content_moderation_failure(response: Dict) -> bool:
"""Return whether a a response failed because of the content moderation filter."""
errors = response.get("errors")
if not errors:
return False
if len(errors) != 1:
return False
return errors[0].get("key") == _CONTENT_MODERATION_KEY | Return whether a a response failed because of the content moderation filter. |
16,164 | from typing import List, Optional
import torch
The provided code snippet includes necessary dependencies for implementing the `generate` function. Write a Python function `def generate( model: torch.nn.Module, idx: torch.Tensor, max_returned_tokens: int, *, temperature: float = 1.0, top_k: Optional[int] = None, stop_tokens: List[torch.Tensor] = [], ) -> torch.Tensor` to solve the following problem:
Takes a conditioning sequence (prompt) as input and continues to generate as many tokens as requested. The implementation of this function is modified from A. Karpathy's nanoGPT. Args: model: The model to use. idx: Tensor of shape (T) with indices of the prompt sequence. max_returned_tokens: The maximum number of tokens to return (given plus generated). temperature: Scales the predicted logits by 1 / temperature. top_k: If specified, only sample among the tokens with the k highest probabilities. stop_tokens: If specified, stop generating any more token once the stop token is triggered. Returns: Tuple containing a list of token indexes, id of the top log probability, and the actual log probability of the selected token.
Here is the function:
def generate(
model: torch.nn.Module,
idx: torch.Tensor,
max_returned_tokens: int,
*,
temperature: float = 1.0,
top_k: Optional[int] = None,
stop_tokens: List[torch.Tensor] = [],
) -> torch.Tensor:
"""Takes a conditioning sequence (prompt) as input and continues to generate as many tokens as requested.
The implementation of this function is modified from A. Karpathy's nanoGPT.
Args:
model: The model to use.
idx: Tensor of shape (T) with indices of the prompt sequence.
max_returned_tokens: The maximum number of tokens to return (given plus generated).
temperature: Scales the predicted logits by 1 / temperature.
top_k: If specified, only sample among the tokens with the k highest probabilities.
stop_tokens: If specified, stop generating any more token once the stop token is triggered.
Returns:
Tuple containing a list of token indexes, id of the top log probability, and the actual log probability of the
selected token.
"""
T = idx.size(0)
assert max_returned_tokens > T
if model.max_seq_length < max_returned_tokens - 1:
# rolling the kv cache based on the `input_pos` value would be necessary. However, doing so would introduce a
# data dependency on the `input_pos` tensor and impact model compilation. Since this setting is uncommon, we do
# not support it to avoid negatively impacting the overall speed
raise NotImplementedError(f"max_seq_length {model.max_seq_length} needs to be >= {max_returned_tokens - 1}")
device, dtype = idx.device, idx.dtype
# stop_tokens = [torch.tensor(tokens, device=device) for tokens in stop_tokens]
# create an empty tensor of the expected final shape and fill in the current tokens
empty = torch.empty(max_returned_tokens, dtype=dtype, device=device)
empty[:T] = idx
idx = empty
input_pos = torch.arange(0, T, device=device)
# generate up to a fixed number of tokens
for _ in range(max_returned_tokens - T):
x = idx.index_select(0, input_pos).view(1, -1)
# forward
logits = model(x, input_pos)
logits = logits[0, -1] / temperature
# optionally crop the logits to only the top k options
if top_k is not None:
v, _ = torch.topk(logits, min(top_k, logits.size(-1)))
logits = torch.where(logits < v[[-1]], -float("Inf"), logits)
probs = torch.nn.functional.softmax(logits, dim=-1)
idx_next = torch.multinomial(probs, num_samples=1).to(dtype=dtype)
# advance
input_pos = input_pos[-1:] + 1
# concatenate the new generation
idx = idx.index_copy(0, input_pos, idx_next)
# if <eos> token is triggered, return the output (stop generation)
for eos_id in stop_tokens:
if torch.equal(idx_next, eos_id):
return idx[: input_pos - 1] # exclude the EOS token
# TODO: implement logprob in a future PR
return idx | Takes a conditioning sequence (prompt) as input and continues to generate as many tokens as requested. The implementation of this function is modified from A. Karpathy's nanoGPT. Args: model: The model to use. idx: Tensor of shape (T) with indices of the prompt sequence. max_returned_tokens: The maximum number of tokens to return (given plus generated). temperature: Scales the predicted logits by 1 / temperature. top_k: If specified, only sample among the tokens with the k highest probabilities. stop_tokens: If specified, stop generating any more token once the stop token is triggered. Returns: Tuple containing a list of token indexes, id of the top log probability, and the actual log probability of the selected token. |
16,165 | import json
from abc import ABC, abstractmethod
from typing import List, Mapping, Optional, cast
from helm.common.hierarchical_logger import hlog
from helm.common.media_object import MultimediaObject, TEXT_TYPE
from helm.common.request import Request, RequestResult, Sequence, Token
from helm.common.cache import Cache, CacheConfig
from helm.common.tokenization_request import DecodeRequest, TokenizationRequest
from helm.tokenizers.tokenizer import Tokenizer
def hlog(x: Any) -> None:
singleton.log(x)
class Request:
"""
A `Request` specifies how to query a language model (given a prompt,
complete it). It is the unified representation for communicating with
various APIs (e.g., GPT-3, Jurassic).
"""
model_deployment: str = ""
"""Which model deployment to query -> Determines the Client.
Refers to a deployment in the model deployment registry."""
model: str = ""
"""Which model to use -> Determines the Engine.
Refers to a model metadata in the model registry."""
embedding: bool = False
"""Whether to query embedding instead of text response"""
prompt: str = ""
"""What prompt do condition the language model on"""
temperature: float = 1.0
"""Temperature parameter that governs diversity"""
num_completions: int = 1
"""Generate this many completions (by sampling from the model)"""
top_k_per_token: int = 1
"""Take this many highest probability candidates per token in the completion"""
max_tokens: int = 100
"""Maximum number of tokens to generate (per completion)"""
stop_sequences: List[str] = field(default_factory=list)
"""Stop generating once we hit one of these strings."""
echo_prompt: bool = False
"""Should `prompt` be included as a prefix of each completion? (e.g., for
evaluating perplexity of the prompt)"""
top_p: float = 1
"""Same from tokens that occupy this probability mass (nucleus sampling)"""
presence_penalty: float = 0
"""Penalize repetition (OpenAI & Writer only)"""
frequency_penalty: float = 0
"""Penalize repetition (OpenAI & Writer only)"""
random: Optional[str] = None
"""Used to control randomness. Expect different responses for the same
request but with different values for `random`."""
messages: Optional[List[Dict[str, str]]] = None
"""Used for chat models. (OpenAI only for now).
if messages is specified for a chat model, the prompt is ignored.
Otherwise, the client should convert the prompt into a message."""
multimodal_prompt: Optional[MultimediaObject] = None
"""Multimodal prompt with media objects interleaved (e.g., text, video, image, text, ...)"""
image_generation_parameters: Optional[ImageGenerationParameters] = None
"""Parameters for image generation."""
def model_host(self) -> str:
"""Returns the model host (referring to the deployment).
Not to be confused with the model creator organization (referring to the model).
Example: 'openai/davinci' => 'openai'
'together/bloom' => 'together'"""
return self.model_deployment.split("/")[0]
def model_engine(self) -> str:
"""Returns the model engine (referring to the model).
This is often the same as self.model_deploymentl.split("/")[1], but not always.
For example, one model could be served on several servers (each with a different model_deployment)
In that case we would have for example:
'aws/bloom-1', 'aws/bloom-2', 'aws/bloom-3' => 'bloom'
This is why we need to keep track of the model engine with the model metadata.
Example: 'openai/davinci' => 'davinci'"""
return self.model.split("/")[1]
class Token:
"""
A `Token` represents one token position in a `Sequence`, which has the
chosen `text` as well as the top probabilities under the model.
"""
# Text that was chosen
text: str
# Log probability of generating that
logprob: float
def render_lines(self) -> List[str]:
return [
f"{format_text(self.text)} logprob={self.logprob}",
]
class Sequence:
"""A `Sequence` is a sequence of tokens."""
# The concatenation of all the tokens
text: str
# The sum of the log probabilities of all tokens
logprob: float
# The tokens
tokens: List[Token]
# Why did the sequence finish?
finish_reason: Optional[Dict[str, Any]] = None
# Could be a sequence made up of multimedia content
multimodal_content: Optional[MultimediaObject] = None
def __add__(self, other: "Sequence") -> "Sequence":
return Sequence(self.text + other.text, self.logprob + other.logprob, self.tokens + other.tokens)
def render_lines(self) -> List[str]:
result = [
f"text: {self.text}",
f"log_prob: {self.logprob}",
"tokens {",
]
for token in self.tokens:
result.extend(indent_lines(token.render_lines(), 2))
result.append("}")
if self.finish_reason:
result.append(f"finish_reason: {self.finish_reason}")
return result
The provided code snippet includes necessary dependencies for implementing the `truncate_sequence` function. Write a Python function `def truncate_sequence(sequence: Sequence, request: Request, print_warning: bool = True) -> Sequence` to solve the following problem:
Certain providers have bugs where they aren't respecting max_tokens, stop_sequences and the end of text token, so as a hack, we have to manually truncate the suffix of `sequence` and `tokens` as a post-hoc process. This method is unsafe and may produce warnings or incorrect results. Prefer using the safer truncate_and_tokenize_response_text() method instead if your use case satisfies its requirements.
Here is the function:
def truncate_sequence(sequence: Sequence, request: Request, print_warning: bool = True) -> Sequence:
"""
Certain providers have bugs where they aren't respecting max_tokens,
stop_sequences and the end of text token, so as a hack, we have to manually
truncate the suffix of `sequence` and `tokens` as a post-hoc process.
This method is unsafe and may produce warnings or incorrect results.
Prefer using the safer truncate_and_tokenize_response_text() method instead
if your use case satisfies its requirements.
"""
# TODO: if echo_prompt, then we should only ignore the prompt, but we don't
# know how many tokens the prompt takes up.
# In the benchmark, usually echo_prompt is only used for language modeling,
# where max_tokens = 0, so there's nothing to truncate.
if request.echo_prompt:
if request.max_tokens != 0:
hlog("WARNING: don't know how to handle echo_prompt and max_tokens > 0, not truncating")
return sequence
for stop in request.stop_sequences:
# Find `stop` in the text
try:
new_text = sequence.text[: sequence.text.index(stop)]
except ValueError:
# The stop sequence doesn't exist, but it might exist in the list of tokens.
new_text = sequence.text
# Strip `stop` off the tokens
new_tokens: List[Token] = []
# Need to start
for token in sequence.tokens:
# Note: we can only strip at token boundaries
if token.text.startswith(stop):
break
new_tokens.append(token)
if len(new_text) < len(sequence.text) and len(new_tokens) == len(sequence.tokens):
hlog(
f"WARNING: Stripped characters from text ({len(sequence.text)} -> {len(new_text)}), "
f"but wasn't able to strip the tokens"
)
# Recompute log probability
new_logprob = sum(token.logprob for token in new_tokens)
if print_warning:
hlog(f"WARNING: truncate_sequence needs to strip {json.dumps(stop)}")
sequence = Sequence(text=new_text, logprob=new_logprob, tokens=new_tokens)
# Truncate based on the max number of tokens.
if len(sequence.tokens) > request.max_tokens:
if print_warning:
hlog(f"WARNING: truncate_sequence needs to truncate {len(sequence.tokens)} down to {request.max_tokens}")
new_tokens = sequence.tokens[: request.max_tokens]
# This is imperfect stitching together of tokens, so just to make sure this is okay
# TODO: should use the proper detokenizer since T5-style models.
# Usually, in our benchmark, max_tokens is active when it's 1, so hopefully this isn't an issue.
new_text = "".join(token.text for token in new_tokens)
if not sequence.text.startswith(new_text):
hlog(f"WARNING: {json.dumps(sequence.text)} does not start with truncated text {json.dumps(new_text)}")
new_logprob = sum(token.logprob for token in new_tokens)
sequence = Sequence(text=new_text, logprob=new_logprob, tokens=new_tokens)
return sequence | Certain providers have bugs where they aren't respecting max_tokens, stop_sequences and the end of text token, so as a hack, we have to manually truncate the suffix of `sequence` and `tokens` as a post-hoc process. This method is unsafe and may produce warnings or incorrect results. Prefer using the safer truncate_and_tokenize_response_text() method instead if your use case satisfies its requirements. |
16,166 | import json
from abc import ABC, abstractmethod
from typing import List, Mapping, Optional, cast
from helm.common.hierarchical_logger import hlog
from helm.common.media_object import MultimediaObject, TEXT_TYPE
from helm.common.request import Request, RequestResult, Sequence, Token
from helm.common.cache import Cache, CacheConfig
from helm.common.tokenization_request import DecodeRequest, TokenizationRequest
from helm.tokenizers.tokenizer import Tokenizer
class Request:
"""
A `Request` specifies how to query a language model (given a prompt,
complete it). It is the unified representation for communicating with
various APIs (e.g., GPT-3, Jurassic).
"""
model_deployment: str = ""
"""Which model deployment to query -> Determines the Client.
Refers to a deployment in the model deployment registry."""
model: str = ""
"""Which model to use -> Determines the Engine.
Refers to a model metadata in the model registry."""
embedding: bool = False
"""Whether to query embedding instead of text response"""
prompt: str = ""
"""What prompt do condition the language model on"""
temperature: float = 1.0
"""Temperature parameter that governs diversity"""
num_completions: int = 1
"""Generate this many completions (by sampling from the model)"""
top_k_per_token: int = 1
"""Take this many highest probability candidates per token in the completion"""
max_tokens: int = 100
"""Maximum number of tokens to generate (per completion)"""
stop_sequences: List[str] = field(default_factory=list)
"""Stop generating once we hit one of these strings."""
echo_prompt: bool = False
"""Should `prompt` be included as a prefix of each completion? (e.g., for
evaluating perplexity of the prompt)"""
top_p: float = 1
"""Same from tokens that occupy this probability mass (nucleus sampling)"""
presence_penalty: float = 0
"""Penalize repetition (OpenAI & Writer only)"""
frequency_penalty: float = 0
"""Penalize repetition (OpenAI & Writer only)"""
random: Optional[str] = None
"""Used to control randomness. Expect different responses for the same
request but with different values for `random`."""
messages: Optional[List[Dict[str, str]]] = None
"""Used for chat models. (OpenAI only for now).
if messages is specified for a chat model, the prompt is ignored.
Otherwise, the client should convert the prompt into a message."""
multimodal_prompt: Optional[MultimediaObject] = None
"""Multimodal prompt with media objects interleaved (e.g., text, video, image, text, ...)"""
image_generation_parameters: Optional[ImageGenerationParameters] = None
"""Parameters for image generation."""
def model_host(self) -> str:
"""Returns the model host (referring to the deployment).
Not to be confused with the model creator organization (referring to the model).
Example: 'openai/davinci' => 'openai'
'together/bloom' => 'together'"""
return self.model_deployment.split("/")[0]
def model_engine(self) -> str:
"""Returns the model engine (referring to the model).
This is often the same as self.model_deploymentl.split("/")[1], but not always.
For example, one model could be served on several servers (each with a different model_deployment)
In that case we would have for example:
'aws/bloom-1', 'aws/bloom-2', 'aws/bloom-3' => 'bloom'
This is why we need to keep track of the model engine with the model metadata.
Example: 'openai/davinci' => 'davinci'"""
return self.model.split("/")[1]
class Token:
"""
A `Token` represents one token position in a `Sequence`, which has the
chosen `text` as well as the top probabilities under the model.
"""
# Text that was chosen
text: str
# Log probability of generating that
logprob: float
def render_lines(self) -> List[str]:
return [
f"{format_text(self.text)} logprob={self.logprob}",
]
class Sequence:
"""A `Sequence` is a sequence of tokens."""
# The concatenation of all the tokens
text: str
# The sum of the log probabilities of all tokens
logprob: float
# The tokens
tokens: List[Token]
# Why did the sequence finish?
finish_reason: Optional[Dict[str, Any]] = None
# Could be a sequence made up of multimedia content
multimodal_content: Optional[MultimediaObject] = None
def __add__(self, other: "Sequence") -> "Sequence":
return Sequence(self.text + other.text, self.logprob + other.logprob, self.tokens + other.tokens)
def render_lines(self) -> List[str]:
result = [
f"text: {self.text}",
f"log_prob: {self.logprob}",
"tokens {",
]
for token in self.tokens:
result.extend(indent_lines(token.render_lines(), 2))
result.append("}")
if self.finish_reason:
result.append(f"finish_reason: {self.finish_reason}")
return result
class TokenizationRequest:
"""A `TokenizationRequest` specifies how to tokenize some text."""
# Text to tokenize
text: str
# Which tokenizer we should use
tokenizer: str
# Whether to encode tokens
#
# If true, the response's TokenizationToken should contain integers.
# Otherwise, the response's TokenizationToken should contain strings.
encode: bool = False
# Whether to truncate
truncation: bool = False
# Maximum length when encoding
max_length: int = 2048
def tokenizer_organization(self):
"""Example: 'huggingface/gpt2' => 'huggingface'"""
return self.tokenizer.split("/")[0]
def tokenizer_name(self):
"""Example: 'huggingface/gpt2' => 'gpt2'"""
return self.tokenizer.split("/")[1]
class DecodeRequest:
"""For HuggingFace tokenizers. How to decode tokens and convert it to text."""
# Tokens
tokens: List[int]
# Which tokenizer we should use
tokenizer: str
# Whether to clean up the tokenization spaces. Setting to False preserves the original text.
clean_up_tokenization_spaces: bool = False
def tokenizer_organization(self):
"""Example: 'huggingface/gpt2' => 'huggingface'"""
return self.tokenizer.split("/")[0]
def tokenizer_name(self):
"""Example: 'huggingface/gpt2' => 'gpt2'"""
return self.tokenizer.split("/")[1]
class Tokenizer(ABC):
def tokenize(self, request: TokenizationRequest) -> TokenizationRequestResult:
"""Tokenizes `request.text` using `request.tokenizer`.
Returns a `TokenizationRequestResult` object.
"""
pass
def decode(self, request: DecodeRequest) -> DecodeRequestResult:
"""Decodes `request.tokens` using `request.tokenizer`.
Returns a `DecodeRequestResult` object.
"""
pass
The provided code snippet includes necessary dependencies for implementing the `truncate_and_tokenize_response_text` function. Write a Python function `def truncate_and_tokenize_response_text( text: str, request: Request, tokenizer: Tokenizer, tokenizer_name: str, original_finish_reason: str = "endoftext" ) -> Sequence` to solve the following problem:
Truncate a string-only response to respect stop_sequences and max_tokens. This can only be used if all of the following conditions are true: - You have access to the tokenizer. - The request has echo_prompt = False. - The tokenizer supports encoding and decoding. - The tokenizer's tokenize() method supports truncation. - The model's response is text-only. - The model's response not already provide the tokenized text. - The model's response does not provide logprobs. This method is safer than truncate_sequence() and should be preferred if the above conditions are met. Unlike truncate_sequence(), this method will not produce warnings or incorrect results. This is because the the tokens are derived from the truncated text using the tokenizer, so the text and the tokens in the resulting result are guranteed to match.
Here is the function:
def truncate_and_tokenize_response_text(
text: str, request: Request, tokenizer: Tokenizer, tokenizer_name: str, original_finish_reason: str = "endoftext"
) -> Sequence:
"""Truncate a string-only response to respect stop_sequences and max_tokens.
This can only be used if all of the following conditions are true:
- You have access to the tokenizer.
- The request has echo_prompt = False.
- The tokenizer supports encoding and decoding.
- The tokenizer's tokenize() method supports truncation.
- The model's response is text-only.
- The model's response not already provide the tokenized text.
- The model's response does not provide logprobs.
This method is safer than truncate_sequence() and should be preferred if the above conditions are met.
Unlike truncate_sequence(), this method will not produce warnings or incorrect results.
This is because the the tokens are derived from the truncated text using the tokenizer,
so the text and the tokens in the resulting result are guranteed to match."""
# Finish reason strings are token from basic_metrics._compute_finish_reason_metrics()
finish_reason: str = original_finish_reason
if request.echo_prompt:
raise Exception("truncate_and_tokenize_response_text() does not support requests with echo_prompt = True")
for stop_sequence in request.stop_sequences:
try:
text = text[: text.index(stop_sequence)]
finish_reason = "stop"
except ValueError:
pass
token_strings = cast(
List[str], tokenizer.tokenize(TokenizationRequest(text=text, tokenizer=tokenizer_name)).raw_tokens
)
if len(token_strings) > request.max_tokens:
encoded_ints = cast(
List[int],
tokenizer.tokenize(
TokenizationRequest(
text=text, tokenizer=tokenizer_name, encode=True, truncation=True, max_length=request.max_tokens
)
).raw_tokens,
)
text = tokenizer.decode(DecodeRequest(encoded_ints, tokenizer_name)).text
token_strings = cast(
List[str], tokenizer.tokenize(TokenizationRequest(text=text, tokenizer=tokenizer_name)).raw_tokens
)
finish_reason = "length"
tokens = [Token(text=token_string, logprob=0.0) for token_string in token_strings]
return Sequence(text=text, logprob=0.0, tokens=tokens, finish_reason={"reason": finish_reason}) | Truncate a string-only response to respect stop_sequences and max_tokens. This can only be used if all of the following conditions are true: - You have access to the tokenizer. - The request has echo_prompt = False. - The tokenizer supports encoding and decoding. - The tokenizer's tokenize() method supports truncation. - The model's response is text-only. - The model's response not already provide the tokenized text. - The model's response does not provide logprobs. This method is safer than truncate_sequence() and should be preferred if the above conditions are met. Unlike truncate_sequence(), this method will not produce warnings or incorrect results. This is because the the tokens are derived from the truncated text using the tokenizer, so the text and the tokens in the resulting result are guranteed to match. |
16,167 | import json
from abc import ABC, abstractmethod
from typing import List, Mapping, Optional, cast
from helm.common.hierarchical_logger import hlog
from helm.common.media_object import MultimediaObject, TEXT_TYPE
from helm.common.request import Request, RequestResult, Sequence, Token
from helm.common.cache import Cache, CacheConfig
from helm.common.tokenization_request import DecodeRequest, TokenizationRequest
from helm.tokenizers.tokenizer import Tokenizer
def cleanup_str(token: str, tokenizer_name: Optional[str] = None) -> str:
"""
Certain tokenizers introduce special characters to represent spaces, such as
"Ġ" or "▁". This function removes those characters.
"""
if tokenizer_name in [
"TsinghuaKEG/ice",
"bigscience/T0pp",
"google/t5-11b",
"google/flan-t5-xxl",
"google/ul2",
"Yandex/yalm",
"ai21/j1",
"together",
]:
return token.replace("▁", " ")
elif tokenizer_name is not None and (tokenizer_name.startswith("huggingface") or tokenizer_name.endswith("gpt2")):
return token.replace("Ġ", " ")
return token
The provided code snippet includes necessary dependencies for implementing the `cleanup_tokens` function. Write a Python function `def cleanup_tokens(tokens: List[str], tokenizer_name: Optional[str] = None) -> List[str]` to solve the following problem:
Applies `cleanup_str` to each token in `tokens`.
Here is the function:
def cleanup_tokens(tokens: List[str], tokenizer_name: Optional[str] = None) -> List[str]:
"""
Applies `cleanup_str` to each token in `tokens`.
"""
return [cleanup_str(token, tokenizer_name) for token in tokens] | Applies `cleanup_str` to each token in `tokens`. |
16,168 | import json
from abc import ABC, abstractmethod
from typing import List, Mapping, Optional, cast
from helm.common.hierarchical_logger import hlog
from helm.common.media_object import MultimediaObject, TEXT_TYPE
from helm.common.request import Request, RequestResult, Sequence, Token
from helm.common.cache import Cache, CacheConfig
from helm.common.tokenization_request import DecodeRequest, TokenizationRequest
from helm.tokenizers.tokenizer import Tokenizer
TEXT_TYPE = "text"
class MultimediaObject:
"""Represents a sequence of `MediaObject`s."""
media_objects: List[MediaObject] = field(default_factory=list)
"""The sequence of `MediaObject`s."""
def add_textual_prefix(self, prefix: str) -> "MultimediaObject":
"""
Returns a new `MultimediaObject` with a textual prefix added to the beginning of the multimodal sequence.
:param prefix: The prefix to add.
:return: New multimodal object with prefix.
"""
result: MultimediaObject = deepcopy(self)
if not prefix:
return result
start: MediaObject = result.media_objects[0]
if start.is_type(TEXT_TYPE) and start.text:
result.media_objects[0] = replace(result.media_objects[0], text=prefix + start.text)
else:
result.media_objects.insert(0, MediaObject(text=prefix, content_type="text/plain"))
return result
def add_textual_suffix(self, suffix: str) -> "MultimediaObject":
"""
Returns a new `MultimediaObject` with a textual suffix added to the end of the multimodal sequence.
:param suffix: The suffix to add.
:return: New multimodal content with suffix.
"""
result: MultimediaObject = deepcopy(self)
if not suffix:
return result
end: MediaObject = result.media_objects[-1]
if end.is_type(TEXT_TYPE) and end.text:
result.media_objects[-1] = replace(result.media_objects[-1], text=end.text + suffix)
else:
result.media_objects.append(MediaObject(text=suffix, content_type="text/plain"))
return result
def combine(self, other: "MultimediaObject") -> "MultimediaObject":
"""
Return a new `MultimediaObject` that contains the contents of this object and the other object.
:param other: The other multimodal content.
:return: The combined multimodal content.
"""
return MultimediaObject(media_objects=self.media_objects + other.media_objects)
def size(self) -> int:
"""
Get the number of `MediaObject`s in this multimodal content.
:return: The number of `MediaObject`s .
"""
return len(self.media_objects)
def text(self) -> str:
"""
Get the text-only part of this multimodal content.
:return: The text-only representation.
"""
return "".join(item.text for item in self.media_objects if item.is_type(TEXT_TYPE) and item.text)
The provided code snippet includes necessary dependencies for implementing the `generate_uid_for_multimodal_prompt` function. Write a Python function `def generate_uid_for_multimodal_prompt(prompt: MultimediaObject) -> str` to solve the following problem:
Generates a unique identifier for a given multimodal prompt.
Here is the function:
def generate_uid_for_multimodal_prompt(prompt: MultimediaObject) -> str:
"""Generates a unique identifier for a given multimodal prompt."""
return "".join(
[
media_object.text if media_object.is_type(TEXT_TYPE) and media_object.text else str(media_object.location)
for media_object in prompt.media_objects
]
) | Generates a unique identifier for a given multimodal prompt. |
16,169 | from copy import deepcopy
from typing import List, Dict, Any, Optional, Union
import requests
from retrying import retry
from helm.common.cache import CacheConfig
from helm.common.request import wrap_request_time, Request, RequestResult, Sequence, Token
from .client import CachingClient, truncate_sequence, cleanup_str
class _RewriteRequestTags:
"""Tags that indicate that the request for the model must be rewritten before sending to Together."""
# TODO: Convert to StrEnum after upgrading to Python 3.11
ADD_EOS_TOKEN_AS_STOP_SEQUENCE = "ADD_EOS_TOKEN_AS_STOP_SEQUENCE"
"""Indicates that the EOS token should be added as an extra stop sequence.
This prevents the model from incorrectly returning the EOS token as part of the generation."""
SET_DETAILS_TO_TRUE = "SET_DETAILS_TO_TRUE"
"""Indicates that the `details` field should be set to `true`.
This indicates that Together should return logprobs for models that do not return logprobs by default."""
_MODEL_TO_TAGS: Dict[str, List[str]] = {
"alpaca-7b": [_RewriteRequestTags.ADD_EOS_TOKEN_AS_STOP_SEQUENCE],
"vicuna-7b-v1.3": [_RewriteRequestTags.ADD_EOS_TOKEN_AS_STOP_SEQUENCE],
"llama-65b": [_RewriteRequestTags.SET_DETAILS_TO_TRUE],
"llama-2-70b": [_RewriteRequestTags.SET_DETAILS_TO_TRUE],
"vicuna-13b-v1.3": [_RewriteRequestTags.ADD_EOS_TOKEN_AS_STOP_SEQUENCE],
}
_MODEL_TO_EOS_TOKEN: Dict[str, str] = {
"alpaca-7b": "</s>",
"vicuna-7b-v1.3": "</s>",
"vicuna-13b-v1.3": "</s>",
}
The provided code snippet includes necessary dependencies for implementing the `_rewrite_raw_request_for_model_tags` function. Write a Python function `def _rewrite_raw_request_for_model_tags(raw_request: Dict[str, Any], model_engine: str) -> Dict[str, Any]` to solve the following problem:
Rewrite the raw request given the model.
Here is the function:
def _rewrite_raw_request_for_model_tags(raw_request: Dict[str, Any], model_engine: str) -> Dict[str, Any]:
"""Rewrite the raw request given the model."""
# Make a deepcopy to avoid mutating the input in unexpected ways
# (e.g. raw_request["stop"] can be a mutable list)
rewritten_request = deepcopy(raw_request)
model_tags = _MODEL_TO_TAGS.get(model_engine, [])
for model_tag in model_tags:
if model_tag == _RewriteRequestTags.ADD_EOS_TOKEN_AS_STOP_SEQUENCE:
eos_token = _MODEL_TO_EOS_TOKEN.get(model_engine)
if not eos_token:
raise ValueError(f"Unknown EOS token for: {model_engine}")
if isinstance(rewritten_request["stop"], list):
rewritten_request["stop"].append(eos_token)
else:
rewritten_request["stop"] = [eos_token]
elif model_tag == _RewriteRequestTags.SET_DETAILS_TO_TRUE:
rewritten_request["details"] = True
else:
raise ValueError(f"Unknown `_RewriteRequestTags`: {model_tag}")
return rewritten_request | Rewrite the raw request given the model. |
16,170 | def getattr_recursive(obj, att):
"""
Return nested attribute of obj
Example: getattr_recursive(obj, 'a.b.c') is equivalent to obj.a.b.c
"""
if att == "":
return obj
i = att.find(".")
if i < 0:
return getattr(obj, att)
else:
return getattr_recursive(getattr(obj, att[:i]), att[i + 1 :])
The provided code snippet includes necessary dependencies for implementing the `setattr_recursive` function. Write a Python function `def setattr_recursive(obj, att, val)` to solve the following problem:
Set nested attribute of obj Example: setattr_recursive(obj, 'a.b.c', val) is equivalent to obj.a.b.c = val
Here is the function:
def setattr_recursive(obj, att, val):
"""
Set nested attribute of obj
Example: setattr_recursive(obj, 'a.b.c', val) is equivalent to obj.a.b.c = val
"""
if "." in att:
obj = getattr_recursive(obj, ".".join(att.split(".")[:-1]))
setattr(obj, att.split(".")[-1], val) | Set nested attribute of obj Example: setattr_recursive(obj, 'a.b.c', val) is equivalent to obj.a.b.c = val |
16,171 |
def apply_with_stopping_condition(module, apply_fn, apply_condition=None, stopping_condition=None, **other_args):
if stopping_condition(module):
return
if apply_condition(module):
apply_fn(module, **other_args)
for child in module.children():
apply_with_stopping_condition(
child, apply_fn, apply_condition=apply_condition, stopping_condition=stopping_condition, **other_args
) | null |
16,172 | import torch
from einops import rearrange, repeat
from einops_exts import rearrange_many
from torch import einsum, nn
def exists(val):
return val is not None | null |
16,173 | import torch
from einops import rearrange, repeat
from einops_exts import rearrange_many
from torch import einsum, nn
def FeedForward(dim, mult=4):
inner_dim = int(dim * mult)
return nn.Sequential(
nn.LayerNorm(dim),
nn.Linear(dim, inner_dim, bias=False),
nn.GELU(),
nn.Linear(inner_dim, dim, bias=False),
) | null |
16,174 | from typing import Optional
from transformers import AutoModelForCausalLM, AutoTokenizer
from helm.common.general import handle_module_not_found_error
from .flamingo import Flamingo
from .flamingo_lm import FlamingoLMMixin
from .utils import extend_instance
def _infer_decoder_layers_attr_name(model):
for k in __KNOWN_DECODER_LAYERS_ATTR_NAMES:
if k.lower() in model.__class__.__name__.lower():
return __KNOWN_DECODER_LAYERS_ATTR_NAMES[k]
raise ValueError(
"We require the attribute name for the nn.ModuleList in the decoder storing the transformer block layers. "
"Please supply this string manually."
)
class Flamingo(nn.Module):
def __init__(
self,
vision_encoder: nn.Module,
lang_encoder: nn.Module,
eoc_token_id: int,
media_token_id: int,
vis_dim: int,
cross_attn_every_n_layers: int = 1,
gradient_checkpointing: bool = False,
):
"""
Args:
vision_encoder (nn.Module): HF CLIPModel
lang_encoder (nn.Module): HF causal language model
eoc_token_id (int): Token id for <|endofchunk|>
media_token_id (int): Token id for <image>
vis_dim (int): Dimension of the visual features.
Visual features are projected to match this shape along the last dimension.
cross_attn_every_n_layers (int, optional): How often to apply cross attention after transformer layer. Defaults to 1.
"""
super().__init__()
self.eoc_token_id = eoc_token_id
self.media_token_id = media_token_id
self.vis_dim = vis_dim
if hasattr(lang_encoder.config, "d_model"):
self.lang_dim = lang_encoder.config.d_model # mpt uses d_model
else:
self.lang_dim = lang_encoder.config.hidden_size
self.vision_encoder = vision_encoder.visual
self.perceiver = PerceiverResampler(dim=self.vis_dim)
self.lang_encoder = lang_encoder
self.lang_encoder.init_flamingo(
media_token_id=media_token_id,
lang_hidden_size=self.lang_dim,
vis_hidden_size=self.vis_dim,
cross_attn_every_n_layers=cross_attn_every_n_layers,
gradient_checkpointing=gradient_checkpointing,
)
self._use_gradient_checkpointing = gradient_checkpointing
self.perceiver._use_gradient_checkpointing = gradient_checkpointing
def forward(
self,
vision_x: torch.Tensor,
lang_x: torch.Tensor,
attention_mask: torch.Tensor = None,
labels: torch.Tensor = None,
clear_conditioned_layers: bool = True,
past_key_values=None,
use_cache: bool = False,
):
"""
Forward pass of Flamingo.
Args:
vision_x (torch.Tensor): Vision input
shape (B, T_img, F, C, H, W) with F=1
lang_x (torch.Tensor): Language input ids
shape (B, T_txt)
attention_mask (torch.Tensor, optional): Attention mask. Defaults to None.
labels (torch.Tensor, optional): Labels. Defaults to None.
clear_conditioned_layers: if True, clear the conditioned layers
once the foward pass is completed. Set this to false if the
same set of images will be reused in another subsequent
forward pass.
past_key_values: pre-computed values to pass to language model.
See past_key_values documentation in Hugging Face
CausalLM models.
use_cache: whether to use cached key values. See use_cache
documentation in Hugging Face CausalLM models.
"""
assert (
self.lang_encoder.initialized_flamingo
), "Flamingo layers are not initialized. Please call `init_flamingo` first."
assert (
self.lang_encoder._use_cached_vision_x or vision_x is not None
), "Must provide either vision_x or have precached media using cache_media()."
if self.lang_encoder._use_cached_vision_x:
# Case: use cached; vision_x should be cached and other
# vision-related inputs should not be provided.
assert (
vision_x is None
), "Expect vision_x to be None when media has been cached using cache_media(). Try uncache_media() first."
assert self.lang_encoder.is_conditioned()
else:
# Case: do not use caching (i.e. this is a standard forward pass);
self._encode_vision_x(vision_x=vision_x)
self._condition_media_locations(input_ids=lang_x)
output = self.lang_encoder(
input_ids=lang_x,
attention_mask=attention_mask,
labels=labels,
past_key_values=past_key_values,
use_cache=use_cache,
)
if clear_conditioned_layers:
self.lang_encoder.clear_conditioned_layers()
return output
def generate(
self,
vision_x: torch.Tensor,
lang_x: torch.Tensor,
attention_mask: torch.Tensor = None,
**kwargs,
):
"""
Generate text conditioned on vision and language inputs.
Args:
vision_x (torch.Tensor): Vision input
shape (B, T_img, F, C, H, W)
images in the same chunk are collated along T_img, and frames are collated along F
currently only F=1 is supported (single-frame videos)
lang_x (torch.Tensor): Language input
shape (B, T_txt)
**kwargs: see generate documentation in Hugging Face CausalLM models. Some notable kwargs:
max_length (int, optional): Maximum length of the output. Defaults to None.
attention_mask (torch.Tensor, optional): Attention mask. Defaults to None.
num_beams (int, optional): Number of beams. Defaults to 1.
max_new_tokens (int, optional): Maximum new tokens. Defaults to None.
temperature (float, optional): Temperature. Defaults to 1.0.
top_k (int, optional): Top k. Defaults to 50.
top_p (float, optional): Top p. Defaults to 1.0.
no_repeat_ngram_size (int, optional): No repeat ngram size. Defaults to 0.
length_penalty (float, optional): Length penalty. Defaults to 1.0.
num_return_sequences (int, optional): Number of return sequences. Defaults to 1.
do_sample (bool, optional): Do sample. Defaults to False.
early_stopping (bool, optional): Early stopping. Defaults to False.
Returns:
torch.Tensor: lang_x with generated tokens appended to it
"""
num_beams = kwargs.pop("num_beams", 1)
if num_beams > 1:
vision_x = vision_x.repeat_interleave(num_beams, dim=0)
self.lang_encoder._use_cached_vision_x = True
self._encode_vision_x(vision_x=vision_x)
eos_token_id = kwargs.pop("eos_token_id", self.eoc_token_id)
output = self.lang_encoder.generate(
input_ids=lang_x,
attention_mask=attention_mask,
eos_token_id=eos_token_id,
num_beams=num_beams,
**kwargs,
)
self.lang_encoder.clear_conditioned_layers()
self.lang_encoder._use_cached_vision_x = False
return output
def _encode_vision_x(self, vision_x: torch.Tensor):
"""
Compute media tokens from vision input by passing it through vision encoder and conditioning language model.
Args:
vision_x (torch.Tensor): Vision input
shape (B, T_img, F, C, H, W)
Images in the same chunk are collated along T_img, and frames are collated along F
Currently only F=1 is supported (single-frame videos)
rearrange code based on https://github.com/dhansmair/flamingo-mini
"""
assert vision_x.ndim == 6, "vision_x should be of shape (b, T_img, F, C, H, W)"
b, T, F = vision_x.shape[:3]
assert F == 1, "Only single frame supported"
vision_x = rearrange(vision_x, "b T F c h w -> (b T F) c h w")
with torch.no_grad():
vision_x = self.vision_encoder(vision_x)[1]
vision_x = rearrange(vision_x, "(b T F) v d -> b T F v d", b=b, T=T, F=F)
vision_x = self.perceiver(vision_x)
for layer in self.lang_encoder._get_decoder_layers():
layer.condition_vis_x(vision_x)
def wrap_fsdp(self, wrapper_kwargs, device_id):
"""
Manually wraps submodules for FSDP and move other parameters to device_id.
Why manually wrap?
- all parameters within the FSDP wrapper must have the same requires_grad.
We have a mix of frozen and unfrozen parameters.
- model.vision_encoder.visual needs to be individually wrapped or encode_vision_x errors
See: https://github.com/pytorch/pytorch/issues/82461#issuecomment-1269136344
The rough wrapping structure is:
- FlamingoModel
- FSDP(FSDP(vision_encoder))
- FSDP(FSDP(perceiver))
- lang_encoder
- FSDP(FSDP(input_embeddings))
- FlamingoLayers
- FSDP(FSDP(gated_cross_attn_layer))
- FSDP(FSDP(decoder_layer))
- FSDP(FSDP(output_embeddings))
- other parameters
Known issues:
- Our FSDP strategy is not compatible with tied embeddings. If the LM embeddings are tied,
train with DDP or set the --freeze_lm_embeddings flag to true.
- With FSDP + gradient ckpting, one can increase the batch size with seemingly no upper bound.
Although the training curves look okay, we found that downstream performance dramatically
degrades if the batch size is unreasonably large (e.g., 100 MMC4 batch size for OPT-125M).
FAQs about our FSDP wrapping strategy:
Why double wrap?
As of torch==2.0.1, FSDP's _post_forward_hook and _post_backward_hook
only free gathered parameters if the module is NOT FSDP root.
Why unfreeze the decoder_layers?
See https://github.com/pytorch/pytorch/issues/95805
As of torch==2.0.1, FSDP's _post_backward_hook is only registed if the flat param
requires_grad=True. We need the postback to fire to avoid OOM.
To effectively freeze the decoder layers, we exclude them from the optimizer.
What is assumed to be frozen v. unfrozen?
We assume that the model is being trained under normal Flamingo settings
with these lines being called in factory.py:
```
# Freeze all parameters
model.requires_grad_(False)
assert sum(p.numel() for p in model.parameters() if p.requires_grad) == 0
# Unfreeze perceiver, gated_cross_attn_layers, and LM input embeddings
model.perceiver.requires_grad_(True)
model.lang_encoder.gated_cross_attn_layers.requires_grad_(True)
[optional] model.lang_encoder.get_input_embeddings().requires_grad_(True)
```
"""
# unfreeze the decoder layers
for block in self.lang_encoder.old_decoder_blocks:
block.requires_grad_(True)
# wrap in FSDP
with enable_wrap(wrapper_cls=FSDP, **wrapper_kwargs):
self.perceiver = wrap(wrap(self.perceiver))
self.lang_encoder.old_decoder_blocks = nn.ModuleList(
wrap(wrap(block)) for block in self.lang_encoder.old_decoder_blocks
)
self.lang_encoder.gated_cross_attn_layers = nn.ModuleList(
wrap(wrap(layer)) if layer is not None else None for layer in self.lang_encoder.gated_cross_attn_layers
)
self.lang_encoder.init_flamingo_layers(self._use_gradient_checkpointing)
self.lang_encoder.set_input_embeddings(wrap(wrap(self.lang_encoder.get_input_embeddings())))
self.lang_encoder.set_output_embeddings(wrap(wrap(self.lang_encoder.get_output_embeddings())))
self.vision_encoder = wrap(wrap(self.vision_encoder)) # frozen
# manually move non-FSDP managed parameters to device_id
# these are all in lang_encoder
apply_with_stopping_condition(
module=self.lang_encoder,
apply_fn=lambda m: m.to(device_id),
apply_condition=lambda m: len(list(m.children())) == 0,
stopping_condition=lambda m: isinstance(m, FSDP),
)
# exclude the original decoder layers from the optimizer
for block in self.lang_encoder.old_decoder_blocks:
for p in block.parameters():
p.exclude_from_optimizer = True
# set up clip_grad_norm_ function
def clip_grad_norm_(max_norm):
self.perceiver.clip_grad_norm_(max_norm)
for layer in self.lang_encoder.gated_cross_attn_layers:
if layer is not None:
layer.clip_grad_norm_(max_norm)
self.lang_encoder.get_input_embeddings().clip_grad_norm_(max_norm)
self.clip_grad_norm_ = clip_grad_norm_
def _condition_media_locations(self, input_ids: torch.Tensor):
"""
Compute the media token locations from lang_x and condition the language model on these.
Args:
input_ids (torch.Tensor): Language input
shape (B, T_txt)
"""
media_locations = input_ids == self.media_token_id
for layer in self.lang_encoder._get_decoder_layers():
layer.condition_media_locations(media_locations)
def cache_media(self, input_ids: torch.Tensor, vision_x: torch.Tensor):
"""
Pre-cache a prompt/sequence of images / text for log-likelihood evaluations.
All subsequent calls to forward() will generate attending to the LAST
image in vision_x.
This is not meant to be used to cache things for generate().
Args:
input_ids (torch.Tensor): Language input
shape (B, T_txt)
vision_x (torch.Tensor): Vision input
shape (B, T_img, F, C, H, W)
Images in the same chunk are collated along T_img, and frames are collated along F
Currently only F=1 is supported (single-frame videos)
"""
self._encode_vision_x(vision_x=vision_x)
self._condition_media_locations(input_ids=input_ids)
self.lang_encoder._use_cached_vision_x = True
def uncache_media(self):
"""
Clear all conditioning.
"""
self.lang_encoder.clear_conditioned_layers()
self.lang_encoder._use_cached_vision_x = False
class FlamingoLMMixin(nn.Module):
"""
Mixin to add cross-attention layers to a language model.
"""
def set_decoder_layers_attr_name(self, decoder_layers_attr_name):
self.decoder_layers_attr_name = decoder_layers_attr_name
def _get_decoder_layers(self):
return getattr_recursive(self, self.decoder_layers_attr_name)
def _set_decoder_layers(self, value):
setattr_recursive(self, self.decoder_layers_attr_name, value)
def init_flamingo(
self,
media_token_id,
lang_hidden_size,
vis_hidden_size,
cross_attn_every_n_layers,
gradient_checkpointing,
):
"""
Initialize Flamingo by adding a new gated cross attn to the decoder. Store the media token id for computing the media locations.
"""
self.old_decoder_blocks = self._get_decoder_layers()
self.gated_cross_attn_layers = nn.ModuleList(
[
GatedCrossAttentionBlock(dim=lang_hidden_size, dim_visual=vis_hidden_size)
if (layer_idx + 1) % cross_attn_every_n_layers == 0
else None
for layer_idx, _ in enumerate(self._get_decoder_layers())
]
)
self.init_flamingo_layers(gradient_checkpointing)
self.media_token_id = media_token_id
self.initialized_flamingo = True
self._use_cached_vision_x = False
def init_flamingo_layers(self, gradient_checkpointing):
"""
Re initializes the FlamingoLayers.
Propagates any changes made to self.gated_corss_attn_layers or self.old_decoder_blocks
"""
self._set_decoder_layers(
nn.ModuleList(
[
FlamingoLayer(gated_cross_attn_layer, decoder_layer, gradient_checkpointing)
for gated_cross_attn_layer, decoder_layer in zip(
self.gated_cross_attn_layers, self.old_decoder_blocks
)
]
)
)
def forward(self, input_ids, attention_mask, **kwargs):
"""Condition the Flamingo layers on the media locations before forward()"""
if not self.initialized_flamingo:
raise ValueError("Flamingo layers are not initialized. Please call `init_flamingo` first.")
media_locations = input_ids == self.media_token_id
# if there are media already cached and we're generating and there are no media tokens in the input,
# we'll assume that ALL input tokens should attend to the last previous media that is cached.
# this is especially important for HF generate() compatibility, since generate() calls forward()
# repeatedly one token at a time (with no media tokens).
# without this check, the model would not attend to any images when generating (after the first token)
use_cached_media_locations = self._use_cached_vision_x and self.is_conditioned() and not media_locations.any()
for layer in self._get_decoder_layers():
if not use_cached_media_locations:
layer.condition_media_locations(media_locations)
layer.condition_use_cached_media(use_cached_media_locations)
# package arguments for the other parent's forward. since we don't know the order of the arguments,
# make them all kwargs
kwargs["input_ids"] = input_ids
kwargs["attention_mask"] = attention_mask
return super().forward(**kwargs) # Call the other parent's forward method
def is_conditioned(self) -> bool:
"""Check whether all decoder layers are already conditioned."""
return all(l.is_conditioned() for l in self._get_decoder_layers())
def clear_conditioned_layers(self):
for layer in self._get_decoder_layers():
layer.condition_vis_x(None)
layer.condition_media_locations(None)
layer.condition_use_cached_media(None)
def extend_instance(obj, mixin):
"""Apply mixins to a class instance after creation"""
base_cls = obj.__class__
base_cls_name = obj.__class__.__name__
obj.__class__ = type(
base_cls_name, (mixin, base_cls), {}
) # mixin needs to go first for our forward() logic to work
The provided code snippet includes necessary dependencies for implementing the `create_model_and_transforms` function. Write a Python function `def create_model_and_transforms( clip_vision_encoder_path: str, clip_vision_encoder_pretrained: str, lang_encoder_path: str, tokenizer_path: str, cross_attn_every_n_layers: int = 1, use_local_files: bool = False, decoder_layers_attr_name: str = None, freeze_lm_embeddings: bool = False, cache_dir: Optional[str] = None, **flamingo_kwargs, )` to solve the following problem:
Initialize a Flamingo model from a pretrained vision encoder and language encoder. Appends special tokens to the tokenizer and freezes backbones. Args: clip_vision_encoder_path (str): path to pretrained clip model (e.g. "ViT-B-32") clip_vision_encoder_pretrained (str): name of pretraining dataset for clip model (e.g. "laion2b_s32b_b79k") lang_encoder_path (str): path to pretrained language encoder tokenizer_path (str): path to pretrained tokenizer cross_attn_every_n_layers (int, optional): determines how often to add a cross-attention layer. Defaults to 1. use_local_files (bool, optional): whether to use local files. Defaults to False. decoder_layers_attr_name (str, optional): name of the decoder layers attribute. Defaults to None. freeze_lm_embeddings (bool, optional): whether to freeze LM input embeddings when configuring Perceiver. cache_dir (str, optional): path to cache directory for downloading OpenClip/HF weights. Returns: Flamingo: Flamingo model from pretrained vision and language encoders Image processor: Pipeline to preprocess input images Tokenizer: A tokenizer for the language model
Here is the function:
def create_model_and_transforms(
clip_vision_encoder_path: str,
clip_vision_encoder_pretrained: str,
lang_encoder_path: str,
tokenizer_path: str,
cross_attn_every_n_layers: int = 1,
use_local_files: bool = False,
decoder_layers_attr_name: str = None,
freeze_lm_embeddings: bool = False,
cache_dir: Optional[str] = None,
**flamingo_kwargs,
):
"""
Initialize a Flamingo model from a pretrained vision encoder and language encoder.
Appends special tokens to the tokenizer and freezes backbones.
Args:
clip_vision_encoder_path (str): path to pretrained clip model (e.g. "ViT-B-32")
clip_vision_encoder_pretrained (str): name of pretraining dataset for clip model (e.g. "laion2b_s32b_b79k")
lang_encoder_path (str): path to pretrained language encoder
tokenizer_path (str): path to pretrained tokenizer
cross_attn_every_n_layers (int, optional): determines how often to add a cross-attention layer. Defaults to 1.
use_local_files (bool, optional): whether to use local files. Defaults to False.
decoder_layers_attr_name (str, optional): name of the decoder layers attribute. Defaults to None.
freeze_lm_embeddings (bool, optional): whether to freeze LM input embeddings when configuring Perceiver.
cache_dir (str, optional): path to cache directory for downloading OpenClip/HF weights.
Returns:
Flamingo: Flamingo model from pretrained vision and language encoders
Image processor: Pipeline to preprocess input images
Tokenizer: A tokenizer for the language model
"""
try:
import open_clip
except ModuleNotFoundError as e:
handle_module_not_found_error(e, ["vlm"])
vision_encoder, _, image_processor = open_clip.create_model_and_transforms(
clip_vision_encoder_path,
pretrained=clip_vision_encoder_pretrained,
cache_dir=cache_dir,
)
# set the vision encoder to output the visual features
vision_encoder.visual.output_tokens = True
text_tokenizer = AutoTokenizer.from_pretrained(
tokenizer_path,
local_files_only=use_local_files,
trust_remote_code=True,
cache_dir=cache_dir,
)
# add Flamingo special tokens to the tokenizer
text_tokenizer.add_special_tokens({"additional_special_tokens": ["<|endofchunk|>", "<image>"]})
if text_tokenizer.pad_token is None:
# Issue: GPT models don't have a pad token, which we use to
# modify labels for the loss.
text_tokenizer.add_special_tokens({"pad_token": "<PAD>"})
lang_encoder = AutoModelForCausalLM.from_pretrained(
lang_encoder_path,
local_files_only=use_local_files,
trust_remote_code=True,
cache_dir=cache_dir,
)
# hacks for MPT-1B, which doesn't have a get_input_embeddings method
if "mpt-1b-redpajama-200b" in lang_encoder_path:
class EmbeddingFnMixin:
def get_input_embeddings(self):
return self.transformer.wte
def set_input_embeddings(self, new_embeddings):
self.transformer.wte = new_embeddings
extend_instance(lang_encoder, EmbeddingFnMixin)
# convert LM to FlamingoLM
extend_instance(lang_encoder, FlamingoLMMixin)
if decoder_layers_attr_name is None:
decoder_layers_attr_name = _infer_decoder_layers_attr_name(lang_encoder)
lang_encoder.set_decoder_layers_attr_name(decoder_layers_attr_name)
lang_encoder.resize_token_embeddings(len(text_tokenizer))
model = Flamingo(
vision_encoder,
lang_encoder,
text_tokenizer.encode("<|endofchunk|>")[-1],
text_tokenizer.encode("<image>")[-1],
vis_dim=open_clip.get_model_config(clip_vision_encoder_path)["vision_cfg"]["width"],
cross_attn_every_n_layers=cross_attn_every_n_layers,
**flamingo_kwargs,
)
# Freeze all parameters
model.requires_grad_(False)
assert sum(p.numel() for p in model.parameters() if p.requires_grad) == 0
# Unfreeze perceiver, gated_cross_attn_layers, and LM input embeddings
model.perceiver.requires_grad_(True)
model.lang_encoder.gated_cross_attn_layers.requires_grad_(True)
if not freeze_lm_embeddings:
model.lang_encoder.get_input_embeddings().requires_grad_(True)
# TODO: investigate also training the output embeddings when untied
print(
f"Flamingo model initialized with {sum(p.numel() for p in model.parameters() if p.requires_grad)} trainable parameters"
)
return model, image_processor, text_tokenizer | Initialize a Flamingo model from a pretrained vision encoder and language encoder. Appends special tokens to the tokenizer and freezes backbones. Args: clip_vision_encoder_path (str): path to pretrained clip model (e.g. "ViT-B-32") clip_vision_encoder_pretrained (str): name of pretraining dataset for clip model (e.g. "laion2b_s32b_b79k") lang_encoder_path (str): path to pretrained language encoder tokenizer_path (str): path to pretrained tokenizer cross_attn_every_n_layers (int, optional): determines how often to add a cross-attention layer. Defaults to 1. use_local_files (bool, optional): whether to use local files. Defaults to False. decoder_layers_attr_name (str, optional): name of the decoder layers attribute. Defaults to None. freeze_lm_embeddings (bool, optional): whether to freeze LM input embeddings when configuring Perceiver. cache_dir (str, optional): path to cache directory for downloading OpenClip/HF weights. Returns: Flamingo: Flamingo model from pretrained vision and language encoders Image processor: Pipeline to preprocess input images Tokenizer: A tokenizer for the language model |
16,175 | import os
from functools import partial
from helm.common.optional_dependencies import handle_module_not_found_error
def handle_module_not_found_error(e: ModuleNotFoundError, suggestions: Optional[List[str]] = None):
# TODO: Ask user to install more specific optional dependencies
# e.g. crfm-helm[plots] or crfm-helm[server]
suggested_commands = " or ".join(
[f"`pip install crfm-helm[{suggestion}]`" for suggestion in (suggestions or []) + ["all"]]
)
raise OptionalDependencyNotInstalled(
f"Optional dependency {e.name} is not installed. Please run {suggested_commands} to install it."
) from e
def build_tokenizer(path: str, context_length: int = 64, *args, **kwargs):
try:
from tokenizers import CharBPETokenizer
except ModuleNotFoundError as e:
handle_module_not_found_error(e, ["heim"])
from_file = partial(
CharBPETokenizer.from_file,
vocab_filename=os.path.join(path, "bpe-16k-vocab.json"),
merges_filename=os.path.join(path, "bpe-16k-merges.txt"),
unk_token="[UNK]",
)
tokenizer = from_file(*args, **kwargs)
tokenizer.add_special_tokens(["[PAD]"])
tokenizer.enable_padding(length=context_length, pad_id=tokenizer.token_to_id("[PAD]"))
tokenizer.enable_truncation(max_length=context_length)
print(f"{path} successfully restored..")
return tokenizer | null |
16,176 | import torch
import torch.nn as nn
from typing import Tuple, Optional
def nonlinearity(x):
# swish
return x * torch.sigmoid(x) | null |
16,177 | import torch
import torch.nn as nn
from typing import Tuple, Optional
def Normalize(in_channels):
return torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True) | null |
16,178 | import torch
from typing import Optional
from tqdm import tqdm
from torch.nn import functional as F
def cutoff_topk_logits(logits: torch.FloatTensor, k: int) -> torch.FloatTensor:
if k is None:
return logits
else:
v, ix = torch.topk(logits, k)
out = logits.clone()
out[out < v[:, [-1]]] = -float("Inf")
return out
def cutoff_topp_probs(probs: torch.FloatTensor, p: float) -> torch.FloatTensor:
if p is None:
return probs
else:
sorted_probs, sorted_indices = torch.sort(probs, dim=-1, descending=True)
cum_probs = torch.cumsum(sorted_probs, dim=-1)
sorted_idx_remove_cond = cum_probs >= p
sorted_idx_remove_cond[..., 1:] = sorted_idx_remove_cond[..., :-1].clone()
sorted_idx_remove_cond[..., 0] = 0
indices_to_remove = sorted_idx_remove_cond.scatter(-1, sorted_indices, sorted_idx_remove_cond)
probs = probs.masked_fill(indices_to_remove, 0.0)
norm_probs = probs / torch.sum(probs, dim=-1, keepdim=True)
return norm_probs
def get_positional_encoding(inputs: torch.LongTensor, mode: str = "1d") -> torch.LongTensor:
device = inputs.device
if mode == "1d":
B, N = inputs.shape
xs_pos = torch.arange(N, device=device).repeat((B, 1))
elif mode == "2d":
B, H, W = inputs.shape
xs_pos_h = torch.arange(H, device=device).repeat(B, W, 1).transpose(1, 2)
xs_pos_w = torch.arange(W, device=device).repeat(B, H, 1)
xs_pos = (xs_pos_h, xs_pos_w)
else:
raise ValueError("%s positional encoding invalid" % mode)
return xs_pos
def sampling(
model: torch.nn.Module,
tokens: torch.LongTensor,
top_k: Optional[float] = None,
top_p: Optional[float] = None,
softmax_temperature: float = 1.0,
is_tqdm: bool = True,
use_fp16: bool = True,
max_seq_len: int = 256,
) -> torch.LongTensor:
code = None
past = None
pbar = tqdm(range(max_seq_len), total=max_seq_len) if is_tqdm else range(max_seq_len)
pos_enc_tokens = get_positional_encoding(tokens, mode="1d")
for cnt, h in enumerate(pbar):
if code is None:
code_ = None
pos_enc_code_ = None
else:
code_ = code.clone().detach()
pos_enc_code_ = get_positional_encoding(code_, mode="1d")
code_ = code_[:, cnt - 1].unsqueeze(-1)
pos_enc_code_ = pos_enc_code_[:, cnt - 1].unsqueeze(-1)
logits, present = model.sampling(
images=code_, texts=tokens, pos_images=pos_enc_code_, pos_texts=pos_enc_tokens, use_fp16=use_fp16, past=past
)
logits = logits.to(dtype=torch.float32)
logits = logits / softmax_temperature
present = torch.stack(present).clone().detach()
if past is None:
past = [present]
else:
past.append(present)
logits = cutoff_topk_logits(logits, top_k)
probs = F.softmax(logits, dim=-1)
probs = cutoff_topp_probs(probs, top_p)
idx = torch.multinomial(probs, num_samples=1).clone().detach()
code = idx if code is None else torch.cat([code, idx], axis=1)
del past
return code
def sampling_igpt(
model: torch.nn.Module,
sos: torch.FloatTensor,
top_k: Optional[float] = None,
top_p: Optional[float] = None,
softmax_temperature: float = 1.0,
is_tqdm: bool = True,
use_fp16: bool = True,
max_seq_len: int = 256,
) -> torch.LongTensor:
code = None
past = None
pbar = tqdm(range(max_seq_len), total=max_seq_len) if is_tqdm else range(max_seq_len)
for cnt, h in enumerate(pbar):
if code is None:
code_ = None
pos_enc_code_ = None
else:
code_ = code.clone().detach()
pos_enc_code_ = get_positional_encoding(code_, mode="1d")
code_ = code_[:, cnt - 1].unsqueeze(-1)
pos_enc_code_ = pos_enc_code_[:, cnt - 1].unsqueeze(-1)
logits, present = model.sampling(sos=sos, codes=code_, pos_codes=pos_enc_code_, use_fp16=use_fp16, past=past)
logits = logits.to(dtype=torch.float32)
logits = logits / softmax_temperature
present = torch.stack(present).clone().detach()
if past is None:
past = [present]
else:
past.append(present)
logits = cutoff_topk_logits(logits, top_k)
probs = F.softmax(logits, dim=-1)
probs = cutoff_topp_probs(probs, top_p)
idx = torch.multinomial(probs, num_samples=1).clone().detach()
code = idx if code is None else torch.cat([code, idx], axis=1)
del past
return code | null |
16,179 | import os
import random
import urllib
import hashlib
import tarfile
import torch
import numpy as np
from torch.nn import functional as F
from tqdm import tqdm
from helm.common.optional_dependencies import handle_module_not_found_error
def set_seed(seed: int):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed) | null |
16,180 | import os
import random
import urllib
import hashlib
import tarfile
import torch
import numpy as np
from torch.nn import functional as F
from tqdm import tqdm
from helm.common.optional_dependencies import handle_module_not_found_error
def handle_module_not_found_error(e: ModuleNotFoundError, suggestions: Optional[List[str]] = None):
# TODO: Ask user to install more specific optional dependencies
# e.g. crfm-helm[plots] or crfm-helm[server]
suggested_commands = " or ".join(
[f"`pip install crfm-helm[{suggestion}]`" for suggestion in (suggestions or []) + ["all"]]
)
raise OptionalDependencyNotInstalled(
f"Optional dependency {e.name} is not installed. Please run {suggested_commands} to install it."
) from e
def clip_score(
prompt: str, images: np.ndarray, model_clip: torch.nn.Module, preprocess_clip, device: str
) -> np.ndarray:
try:
import clip
from PIL import Image
except ModuleNotFoundError as e:
handle_module_not_found_error(e, ["heim"])
images = [preprocess_clip(Image.fromarray((image * 255).astype(np.uint8))) for image in images]
images = torch.stack(images, dim=0).to(device=device)
texts = clip.tokenize(prompt).to(device=device)
texts = torch.repeat_interleave(texts, images.shape[0], dim=0)
image_features = model_clip.encode_image(images)
text_features = model_clip.encode_text(texts)
scores = F.cosine_similarity(image_features, text_features).squeeze()
rank = torch.argsort(scores, descending=True).cpu().numpy()
return rank | null |
16,181 | import os
import random
import urllib
import hashlib
import tarfile
import torch
import numpy as np
from torch.nn import functional as F
from tqdm import tqdm
from helm.common.optional_dependencies import handle_module_not_found_error
def download(url: str, root: str) -> str:
def realpath_url_or_path(url_or_path: str, root: str = None) -> str:
if urllib.parse.urlparse(url_or_path).scheme in ("http", "https"):
return download(url_or_path, root)
return url_or_path | null |
16,182 | from typing import Optional, List
from dataclasses import dataclass, field
from helm.common.optional_dependencies import handle_module_not_found_error
class DefaultConfig:
dataset: DataConfig = DataConfig()
stage1: Stage1Config = Stage1Config()
stage2: Stage2Config = Stage2Config()
class FineTuningConfig:
dataset: DataConfig = DataConfig()
stage1: Stage1Config = Stage1Config()
stage2: Stage2Config = Stage2Config()
optimizer: OptConfig = OptConfig()
experiment: ExpConfig = ExpConfig()
def get_base_config(use_default=True):
return OmegaConf.structured(DefaultConfig if use_default else FineTuningConfig) | null |
16,183 | import torch
from helm.common.optional_dependencies import handle_module_not_found_error
def get_masks_and_position_ids_coglm(seq, context_length):
tokens = seq.unsqueeze(0)
attention_mask = torch.ones((1, len(seq), len(seq)), device=tokens.device)
attention_mask.tril_()
attention_mask[..., :context_length] = 1
attention_mask.unsqueeze_(1)
position_ids = torch.zeros(len(seq), device=tokens.device, dtype=torch.long)
torch.arange(0, context_length, out=position_ids[:context_length])
torch.arange(512, 512 + len(seq) - context_length, out=position_ids[context_length:])
position_ids = position_ids.unsqueeze(0)
return tokens, attention_mask, position_ids | null |
16,184 | import torch
from helm.common.optional_dependencies import handle_module_not_found_error
def get_recipe(name):
r = {
"attn_plus": 1.4,
"temp_all_gen": 1.15,
"topk_gen": 16,
"temp_cluster_gen": 1.0,
"temp_all_dsr": 1.5,
"topk_dsr": 100,
"temp_cluster_dsr": 0.89,
"temp_all_itersr": 1.3,
"topk_itersr": 16,
"query_template": "{}<start_of_image>",
}
if name == "none":
pass
elif name == "mainbody":
r["query_template"] = "{} 高清摄影 隔绝<start_of_image>"
elif name == "photo":
r["query_template"] = "{} 高清摄影<start_of_image>"
elif name == "flat":
r["query_template"] = "{} 平面风格<start_of_image>"
# r['attn_plus'] = 1.8
# r['temp_cluster_gen'] = 0.75
r["temp_all_gen"] = 1.1
r["topk_dsr"] = 5
r["temp_cluster_dsr"] = 0.4
r["temp_all_itersr"] = 1
r["topk_itersr"] = 5
elif name == "comics":
r["query_template"] = "{} 漫画 隔绝<start_of_image>"
r["topk_dsr"] = 5
r["temp_cluster_dsr"] = 0.4
r["temp_all_gen"] = 1.1
r["temp_all_itersr"] = 1
r["topk_itersr"] = 5
elif name == "oil":
r["query_template"] = "{} 油画风格<start_of_image>"
pass
elif name == "sketch":
r["query_template"] = "{} 素描风格<start_of_image>"
r["temp_all_gen"] = 1.1
elif name == "isometric":
r["query_template"] = "{} 等距矢量图<start_of_image>"
r["temp_all_gen"] = 1.1
elif name == "chinese":
r["query_template"] = "{} 水墨国画<start_of_image>"
r["temp_all_gen"] = 1.12
elif name == "watercolor":
r["query_template"] = "{} 水彩画风格<start_of_image>"
return r | null |
16,185 | import math
import torch
import torch.nn.functional as F
from helm.common.optional_dependencies import handle_module_not_found_error
The provided code snippet includes necessary dependencies for implementing the `sparse_attention_2d_text` function. Write a Python function `def sparse_attention_2d_text( q0, k0, v0, q1, k1, v1, attention_mask, n_head, text_len, kernel_size=9, attention_dropout=None, log_attention_weights=None, **kwargs, )` to solve the following problem:
q0, k0, v0: [batch_size, 16, hidden_size] q1, k1, v1: [batch_size, 3600, hidden_size] n_head: int attention_mask: [batch_size, 16]
Here is the function:
def sparse_attention_2d_text(
q0,
k0,
v0,
q1,
k1,
v1,
attention_mask,
n_head,
text_len,
kernel_size=9,
attention_dropout=None,
log_attention_weights=None,
**kwargs,
):
"""
q0, k0, v0: [batch_size, 16, hidden_size]
q1, k1, v1: [batch_size, 3600, hidden_size]
n_head: int
attention_mask: [batch_size, 16]
"""
b, s0, h0 = q0.shape
b, s1, h1 = q1.shape
h, l1 = h0 // n_head, sqrt(s1)
assert attention_mask.shape[-1] == s0, f"Mask Shape: {attention_mask.shape}"
q0 = q0.reshape(b, s0, n_head, h).permute(0, 2, 1, 3)
v0 = v0.reshape(b, s0, n_head, h).permute(0, 2, 1, 3)
k0T = k0.reshape(b, s0, n_head, h).permute(0, 2, 3, 1)
# standard attention for level 0
attention_scores = torch.matmul(q0 / math.sqrt(q0.shape[-1]), k0T)
attention_scores = torch.mul(attention_scores, attention_mask) - 10000.0 * (1.0 - attention_mask)
attention_probs0 = F.softmax(attention_scores, dim=-1)
# local attention for level 1
q1 = (
(q1.view(b, s1, n_head, h1 // n_head).permute(0, 2, 3, 1) / math.sqrt(h1 // n_head))
.contiguous()
.view(b * n_head, h1 // n_head, l1, l1)
)
k1 = k1.view(b, s1, n_head, h1 // n_head).permute(0, 2, 3, 1).contiguous().view(b * n_head, h1 // n_head, l1, l1)
v1 = v1.view(b, s1, n_head, h1 // n_head).permute(0, 2, 3, 1).contiguous().view(b * n_head, h1 // n_head, l1, l1)
scores_1_to_1 = f_similar(q1, k1, kernel_size * 2 - 1, kernel_size, False)
# cross attention
scores_1_to_0 = torch.matmul(q1.view(b, n_head, h, s1).transpose(-1, -2), k0T)
if log_attention_weights is not None:
scores_1_to_0 += log_attention_weights
scores_1_to_0 = torch.mul(scores_1_to_0, attention_mask) - 10000.0 * (1.0 - attention_mask)
scores_1 = torch.cat(
(scores_1_to_0.view(b * n_head, s1, s0), scores_1_to_1.view(b * n_head, -1, scores_1_to_1.shape[3])), dim=-1
)
attention_probs1 = F.softmax(scores_1, dim=-1)
if attention_dropout is not None:
with get_cuda_rng_tracker().fork():
attention_probs1 = attention_dropout(attention_probs1)
# weighting for level 0
context0 = torch.matmul(attention_probs0, v0) # [b, n_head, s0, h]
# weighting for level 1
probs_1_to_1 = attention_probs1[:, :, -scores_1_to_1.shape[3] :].view_as(scores_1_to_1)
context1_to_1 = f_weighting(v1, probs_1_to_1.contiguous(), kernel_size * 2 - 1, kernel_size, False)
context1 = context1_to_1.view(b, n_head, h, l1**2)
# weighting for cross attention
probs_1_to_0 = attention_probs1[:, :, : scores_1_to_0.shape[3]].view(b, n_head, -1, scores_1_to_0.shape[3])
context1_to_0 = torch.matmul(probs_1_to_0, v0)
context1 = context1.transpose(-1, -2) + context1_to_0
output = torch.cat((context0, context1), dim=2).transpose(1, 2).reshape(b, s0 + s1, h0)
return output | q0, k0, v0: [batch_size, 16, hidden_size] q1, k1, v1: [batch_size, 3600, hidden_size] n_head: int attention_mask: [batch_size, 16] |
16,186 | import math
import torch
import torch.nn.functional as F
from helm.common.optional_dependencies import handle_module_not_found_error
The provided code snippet includes necessary dependencies for implementing the `sparse_attention_2d_notext` function. Write a Python function `def sparse_attention_2d_notext( q0, k0, v0, q1, k1, v1, attention_mask, n_head, text_len, kernel_size=9, attention_dropout=None, log_attention_weights=None, **kwargs, )` to solve the following problem:
q0, k0, v0: [batch_size, 16, hidden_size] q1, k1, v1: [batch_size, 3600, hidden_size] n_head: int attention_mask: [batch_size, 16]
Here is the function:
def sparse_attention_2d_notext(
q0,
k0,
v0,
q1,
k1,
v1,
attention_mask,
n_head,
text_len,
kernel_size=9,
attention_dropout=None,
log_attention_weights=None,
**kwargs,
):
"""
q0, k0, v0: [batch_size, 16, hidden_size]
q1, k1, v1: [batch_size, 3600, hidden_size]
n_head: int
attention_mask: [batch_size, 16]
"""
b, s0, h0 = q0.shape
b, s1, h1 = q1.shape
h, l1 = h0 // n_head, sqrt(s1)
assert len(attention_mask.shape) == 4 and attention_mask.shape[-1] == s0, f"Mask Shape: {attention_mask.shape}"
q0 = q0.reshape(b, s0, n_head, h).permute(0, 2, 1, 3)
v0 = v0.reshape(b, s0, n_head, h).permute(0, 2, 1, 3)
k0T = k0.reshape(b, s0, n_head, h).permute(0, 2, 3, 1)
# standard attention for level 0
attention_scores = torch.matmul(q0 / math.sqrt(q0.shape[-1]), k0T)
attention_scores = torch.mul(attention_scores, attention_mask) - 10000.0 * (1.0 - attention_mask)
attention_probs0 = F.softmax(attention_scores, dim=-1)
# local attention for level 1
q1 = (
(q1.view(b, s1, n_head, h1 // n_head).permute(0, 2, 3, 1) / math.sqrt(h1 // n_head))
.contiguous()
.view(b * n_head, h1 // n_head, l1, l1)
)
k1 = k1.view(b, s1, n_head, h1 // n_head).permute(0, 2, 3, 1).contiguous().view(b * n_head, h1 // n_head, l1, l1)
v1 = v1.view(b, s1, n_head, h1 // n_head).permute(0, 2, 3, 1).contiguous().view(b * n_head, h1 // n_head, l1, l1)
scores_1_to_1 = f_similar(q1, k1, kernel_size * 2 - 1, kernel_size, False)
attention_probs1 = F.softmax(scores_1_to_1, dim=-1)
if attention_dropout is not None:
with get_cuda_rng_tracker().fork():
attention_probs1 = attention_dropout(attention_probs1)
# weighting for level 0
context0 = torch.matmul(attention_probs0, v0) # [b, n_head, s0, h]
# weighting for level 1
probs_1_to_1 = attention_probs1
context1_to_1 = f_weighting(v1, probs_1_to_1.contiguous(), kernel_size * 2 - 1, kernel_size, False)
context1 = context1_to_1.view(b, n_head, h, l1**2)
# weighting for cross attention
context1 = context1.transpose(-1, -2)
output = torch.cat((context0, context1), dim=2).transpose(1, 2).reshape(b, s0 + s1, h0)
return output | q0, k0, v0: [batch_size, 16, hidden_size] q1, k1, v1: [batch_size, 3600, hidden_size] n_head: int attention_mask: [batch_size, 16] |
16,187 | import math
import torch
import torch.nn.functional as F
from helm.common.optional_dependencies import handle_module_not_found_error
The provided code snippet includes necessary dependencies for implementing the `sparse_attention_2d_light` function. Write a Python function `def sparse_attention_2d_light( q0, k0, v0, q1, k1, v1, attention_mask, n_head, text_len, kernel_size=9, kernel_size2=7, attention_dropout=None, log_attention_weights=None, add_scalar=0, **kwargs )` to solve the following problem:
q0, k0, v0: [batch_size, 1088, hidden_size] q1, k1, v1: [batch_size, 4096, h2] n_head: int attention_mask: [batch_size, 1088, 1088]
Here is the function:
def sparse_attention_2d_light(
q0,
k0,
v0,
q1,
k1,
v1,
attention_mask,
n_head,
text_len,
kernel_size=9,
kernel_size2=7,
attention_dropout=None,
log_attention_weights=None,
add_scalar=0,
**kwargs
):
"""
q0, k0, v0: [batch_size, 1088, hidden_size]
q1, k1, v1: [batch_size, 4096, h2]
n_head: int
attention_mask: [batch_size, 1088, 1088]
"""
b, s0, h0 = q0.shape
b, s1, h1 = q1.shape
h, l0, l1 = h0 // n_head, sqrt(s0 - text_len), sqrt(s1)
q0 = q0.reshape(b, s0, n_head, h).permute(0, 2, 1, 3)
v0 = v0.reshape(b, s0, n_head, h).permute(0, 2, 1, 3)
k0T = k0.reshape(b, s0, n_head, h).permute(0, 2, 3, 1)
# standard attention for level 0
attention_scores = torch.matmul(q0 / math.sqrt(q0.shape[-1]), k0T)
if log_attention_weights is not None:
attention_scores += log_attention_weights
attention_scores = torch.mul(attention_scores, attention_mask) - 10000.0 * (1.0 - attention_mask)
attention_probs0 = F.softmax(attention_scores, dim=-1)
# local attention for level 1
q1 = (
(q1.view(b, s1, n_head, h1 // n_head).permute(0, 2, 3, 1) / math.sqrt(h1 // n_head))
.contiguous()
.view(b * n_head, h1 // n_head, l1, l1)
)
k1 = k1.view(b, s1, n_head, h1 // n_head).permute(0, 2, 3, 1).contiguous().view(b * n_head, h1 // n_head, l1, l1)
v1 = v1.view(b, s1, n_head, h1 // n_head).permute(0, 2, 3, 1).contiguous().view(b * n_head, h1 // n_head, l1, l1)
# scores_1_to_1 = f_similar(q1, k1, kernel_size*2-1, kernel_size, True)
scores_1_to_1 = f_similar(q1, k1, kernel_size * 2 - 1, kernel_size, False)
# cross attention
k0T = k0T[..., -(l0**2) :].reshape(b * n_head, h, l0, l0).contiguous()
scores_1_to_0 = f_similar(q1, k0T, kernel_size2, kernel_size2, False) # [b*n_head, l1, l1, field]
scores_1 = torch.cat(
(
scores_1_to_0.view(b * n_head, -1, scores_1_to_0.shape[3]) + add_scalar,
scores_1_to_1.view(b * n_head, -1, scores_1_to_1.shape[3]),
),
dim=-1,
)
attention_probs1 = F.softmax(scores_1, dim=-1)
if attention_dropout is not None:
# with get_cuda_rng_tracker().fork():
attention_probs0 = attention_dropout(attention_probs0)
attention_probs1 = attention_dropout(attention_probs1)
# weighting for level 0
context0 = torch.matmul(attention_probs0, v0) # [b, n_head, s0, h]
# weighting for level 1
probs_1_to_1 = attention_probs1[:, :, -scores_1_to_1.shape[3] :].view_as(scores_1_to_1)
# context1_to_1 = f_weighting(v1, probs_1_to_1.contiguous(), kernel_size*2-1, kernel_size, True)
context1_to_1 = f_weighting(v1, probs_1_to_1.contiguous(), kernel_size * 2 - 1, kernel_size, False)
context1 = context1_to_1.view(b, n_head * h, l1**2)
# weighting for cross attention
probs_1_to_0 = attention_probs1[:, :, : scores_1_to_0.shape[3]].view_as(scores_1_to_0)
v0_part = v0[:, :, -(l0**2) :].transpose(-1, -2).contiguous().view(b * n_head, h, l0, l0)
context1_to_0 = f_weighting(v0_part, probs_1_to_0.contiguous(), kernel_size2, kernel_size2, False)
context1_to_0 = context1_to_0.view(b, n_head * h, l1**2)
context1 = context1 + context1_to_0
return context0.transpose(1, 2).reshape(b, s0, h0), context1.transpose(-1, -2) | q0, k0, v0: [batch_size, 1088, hidden_size] q1, k1, v1: [batch_size, 4096, h2] n_head: int attention_mask: [batch_size, 1088, 1088] |
16,188 | import os
import math
import torch
import torch.nn.functional as F
import numpy as np
def top_k_logits_(logits, top_k=0, filter_value=-float("Inf")):
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
logits[indices_to_remove] = filter_value
return logits | null |
16,189 | import os
import math
import torch
import torch.nn.functional as F
import numpy as np
class IterativeEntfilterStrategy:
def __init__(self, invalid_slices=[], temperature=1.0, topk=6, temperature2=0.9):
self.invalid_slices = invalid_slices
self.temperature = temperature
self.topk = topk
self.cluster_labels = torch.tensor(
np.load(f"{os.path.dirname(os.path.dirname(os.path.abspath(__file__)))}/cluster_label.npy"),
device="cuda" if torch.cuda.is_available() else "cpu",
dtype=torch.long,
)
self.temperature2 = temperature2
def forward(self, logits_, tokens, temperature=None):
# In interative strategy, logits are of shape [batch_size, seq_length, hidden_size]
if temperature is None:
temperature = self.temperature
logits = logits_.float() / temperature
for invalid_slice in self.invalid_slices:
logits[..., invalid_slice] = -float("Inf")
logits = logits.view(-1, logits.shape[-1])
rprobs = F.softmax(logits.float(), dim=-1)
c = self.cluster_labels.expand(*rprobs.shape)
cprobs = torch.zeros(logits.shape[0], 500, device=logits.device).scatter_add_(1, c, rprobs)
best_scores, best_clusters = cprobs.topk(self.topk)
bz = logits.shape[0]
best_scores = best_scores / best_scores.sum(dim=-1, keepdim=True)
sampled_ids = torch.multinomial(best_scores, num_samples=1)
selected_clusters = torch.gather(best_clusters, dim=1, index=sampled_ids)
selected_mask = (
self.cluster_labels.unsqueeze(0).expand(bz, -1) != selected_clusters
) # cluster_labels [1, 20000] \in [0,500)
logits[selected_mask] = -65504
# for i in range(bz):
# selected_cluster = \
# best_clusters[i][torch.multinomial(best_scores[i] / best_scores[i].sum(), num_samples=1)]
# logits[i, self.cluster_labels != selected_cluster] = -65504
# logits = top_k_logits(logits, self.topk, self.top_p)
probs = F.softmax(logits.float() / self.temperature2, dim=-1) # float is essetial, due to a bug in Pytorch
pred = torch.multinomial(probs, num_samples=1).view(*logits_.shape[:2])
assert tokens.shape[1] == pred.shape[1] + 1
tokens = torch.cat((tokens[:, :1], pred), dim=1)
return tokens
The provided code snippet includes necessary dependencies for implementing the `filling_sequence_dsr` function. Write a Python function `def filling_sequence_dsr( model, seq0, seq1, warmup_steps=3, block_hw=(4, 4), strategy=IterativeEntfilterStrategy(topk=10), )` to solve the following problem:
seq: [PAD]... [ROI1] text ... [BOI1] {layout[0]} 1024 {layout[1]} [EOI1] 4095 {layout[2]} final_token. Attention: The sampling temperature are changing, temporally we hard code them here. The temperature in the strategy is not used.
Here is the function:
def filling_sequence_dsr(
model,
seq0,
seq1,
warmup_steps=3,
block_hw=(4, 4),
strategy=IterativeEntfilterStrategy(topk=10),
):
"""
seq: [PAD]... [ROI1] text ... [BOI1] {layout[0]} 1024 {layout[1]} [EOI1]
4095 {layout[2]} final_token.
Attention:
The sampling temperature are changing, temporally we hard code them here.
The temperature in the strategy is not used.
"""
assert hasattr(model, "layout")
layout = model.layout
assert len(seq0.shape) == 2 and len(seq1.shape) == 2 and seq0.shape[0] == seq1.shape[0]
assert len(layout) == 3
assert seq1.shape[1] == layout[-1] - layout[-2] + 1
assert (seq1 >= 0).all() and (seq0 >= 0).all()
device = seq0.device
# concat and pad sequences
batch_size = seq0.shape[0]
n_pad = layout[1] - seq0.shape[1]
assert n_pad > 0, "You should truncate long input before filling."
seq = torch.cat(
(torch.tensor([0] * n_pad, device=device, dtype=seq0.dtype).unsqueeze(0).expand(batch_size, n_pad), seq0, seq1),
dim=1,
) # [b, layout[-1]+1]
assert seq.shape[1] == layout[-1] + 1
# build initial tokens, attention_mask, and position_ids
tokens = seq.clone()
attention_mask = torch.ones(layout[1], layout[1]).to(device)
attention_mask[: layout[0], layout[0] :] = 0
attention_mask[n_pad:, :n_pad] = 0
attention_mask = attention_mask.type_as(next(model.parameters())) # if fp16
position_ids = torch.cat(
(
torch.zeros(n_pad, dtype=torch.long),
torch.arange(0, layout[0] - n_pad),
torch.arange(513, 513 + layout[1] - layout[0]),
torch.arange(1024, 1024 + layout[2] - layout[1]),
)
).to(device)
log_attention_weights = torch.zeros(layout[1], layout[1], device=device).type_as(next(model.parameters()))
log_attention_weights[layout[0] :, n_pad : layout[0]] = 0.0
# prepare for interation
unfixed = tokens < 0 # just init an all-False tensor
unfixed[:, -layout[-1] + layout[-2] :] = True
ll, rr = block_hw
edge_len = int(math.sqrt(layout[-1] - layout[-2]) + 1e-4)
num_steps = warmup_steps + ll - 1 + rr
# interative refining
# unfixed[..., -(layout[-1] - layout[-2]):].view(
# batch_size, edge_len//ll, ll, edge_len//rr, rr)[:, :, :, :, -1] = False
ret = []
ret.append(tokens[:, layout[-2] + 1 :].clone())
for step_cnt in range(1, num_steps + 1):
if step_cnt <= warmup_steps:
logits, *_dump = model(
tokens[:, :-1], position_ids, attention_mask, log_attention_weights=log_attention_weights
)
real_temp = 1.0
new_tokens = strategy.forward(logits, tokens, real_temp)
tokens[unfixed] = new_tokens[unfixed]
else:
logits, *_dump = model(
tokens[:, :-1], position_ids, attention_mask, log_attention_weights=log_attention_weights
)
real_temp = 1.0
new_tokens = strategy.forward(logits, tokens, real_temp, entfilter=1.3, filter_topk=5, temperature2=0.6)
# tokens[unfixed] = new_tokens[unfixed]
# fixed tokens (update unfixed)
unfixed2 = tokens > 10000000
for x in range(min(ll, step_cnt - warmup_steps)):
y = step_cnt - warmup_steps - x - 1
if y < rr:
unfixed[..., -(layout[-1] - layout[-2]) :].view(batch_size, edge_len // ll, ll, edge_len // rr, rr)[
:, :, x, :, y
] = False
unfixed2[..., -(layout[-1] - layout[-2]) :].view(
batch_size, edge_len // ll, ll, edge_len // rr, rr
)[:, :, x, :, y] = True
tokens[unfixed2] = new_tokens[unfixed2]
ret.append(tokens[:, layout[-2] + 1 :].clone())
return ret | seq: [PAD]... [ROI1] text ... [BOI1] {layout[0]} 1024 {layout[1]} [EOI1] 4095 {layout[2]} final_token. Attention: The sampling temperature are changing, temporally we hard code them here. The temperature in the strategy is not used. |
16,190 | import torch
import torch.nn.functional as F
from icetk import icetk as tokenizer
def top_k_logits_(logits, top_k=0, filter_value=-float("Inf")):
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
logits[indices_to_remove] = filter_value
return logits | null |
16,191 | import torch
import torch.nn.functional as F
from icetk import icetk as tokenizer
class IterativeEntfilterStrategy:
def __init__(self, invalid_slices=[], temperature=1.0, topk=10):
self.invalid_slices = invalid_slices
self.temperature = temperature
self.topk = topk
def forward(self, logits, tokens, temperature=None, entfilter=None, filter_topk=5, temperature2=None):
# In interative strategy, logits are of shape [batch_size, seq_length, hidden_size]
if temperature is None:
temperature = self.temperature
logits = logits.float() / temperature
for invalid_slice in self.invalid_slices:
logits[..., invalid_slice] = -float("Inf")
# debiased topk
# probs = F.softmax(logits, dim=-1)
# tk_value, tk_idx = torch.topk(probs, self.topk, dim=-1)
# pred = torch.multinomial(probs.view(-1, logits.shape[-1]), num_samples=1).view(*logits.shape[:2], 1)
# edge_idx = tk_idx[:, :, -1:]
# edge_value = tk_value[:, :, -1:]
# edge_mask = probs.gather(dim=-1, index=pred) < edge_value
# pred[edge_mask] = edge_idx[edge_mask] # replace outliers as the "filter_topk"-th token
# pred.squeeze_(-1) # [batch_size, seq_length]
top_k_logits_(logits, self.topk)
probs = F.softmax(logits, dim=-1)
pred = torch.multinomial(probs.view(-1, logits.shape[-1]), num_samples=1).view(*logits.shape[:2], 1)
pred.squeeze_(-1)
assert tokens.shape[1] == pred.shape[1]
tokens = pred
return tokens
The provided code snippet includes necessary dependencies for implementing the `filling_sequence_itersr` function. Write a Python function `def filling_sequence_itersr( model, seq0, seq1, warmup_steps=3, block_hw=(4, 4), strategy=IterativeEntfilterStrategy(topk=10), )` to solve the following problem:
seq: [PAD]... [ROI1] text ... [BOI1] {layout[0]} 1024 {layout[1]} [EOI1] 4095 {layout[2]} final_token. Attention: The sampling temperature are changing, temporally we hard code them here. The temperature in the strategy is not used.
Here is the function:
def filling_sequence_itersr(
model,
seq0,
seq1,
warmup_steps=3,
block_hw=(4, 4),
strategy=IterativeEntfilterStrategy(topk=10),
):
"""
seq: [PAD]... [ROI1] text ... [BOI1] {layout[0]} 1024 {layout[1]} [EOI1]
4095 {layout[2]} final_token.
Attention:
The sampling temperature are changing, temporally we hard code them here.
The temperature in the strategy is not used.
"""
assert hasattr(model, "layout")
layout = model.layout
device = seq0.device
# concat and pad sequences
batch_size = seq0.shape[0]
n_pad = layout[0] - seq0.shape[1]
assert n_pad >= 0, "You should truncate long input before filling."
seq = torch.cat(
(torch.tensor([0] * n_pad, device=device, dtype=seq0.dtype).unsqueeze(0).expand(batch_size, n_pad), seq0, seq1),
dim=1,
) # [b, layout[-1]+1]
assert seq.shape[1] == layout[-1]
# build initial tokens, attention_mask, and position_ids
tokens = seq.clone()
attention_mask = torch.ones(layout[0]).to(device)
attention_mask[:n_pad] = 0
attention_mask = attention_mask.unsqueeze(0).type_as(next(model.parameters())) # if fp16
position_ids = torch.cat(
(
torch.zeros(n_pad, dtype=torch.long),
torch.arange(0, layout[0] - n_pad),
torch.arange(1024, 1024 + layout[1] - layout[0]),
)
).to(device)
log_attention_weights = torch.zeros(layout[0], device=device).type_as(next(model.parameters()))
log_attention_weights[n_pad : layout[0]] = 0.0
log_attention_weights = log_attention_weights.unsqueeze(0)
# prepare for interation
unfixed = tokens == tokenizer["<start_of_image>"]
ll, rr = block_hw
# edge_len = int(math.sqrt(layout[-1] - layout[-2]) + 1e-4)
num_steps = 1
# interative refining
# unfixed[..., -(layout[-1] - layout[-2]):].view(
# batch_size, edge_len//ll, ll, edge_len//rr, rr)[:, :, :, :, -1] = False
ret = []
# ret.append(tokens[:, layout[-2]:-1].clone())
for step_cnt in range(1, num_steps + 1):
logits, *_dump = model(tokens, position_ids, attention_mask, log_attention_weights=log_attention_weights)
real_temp = 1.0
new_tokens = strategy.forward(logits, tokens, real_temp)
tokens[unfixed] = new_tokens[unfixed]
ret.append(tokens[:, layout[-2] :].clone())
return torch.cat(ret, dim=0) | seq: [PAD]... [ROI1] text ... [BOI1] {layout[0]} 1024 {layout[1]} [EOI1] 4095 {layout[2]} final_token. Attention: The sampling temperature are changing, temporally we hard code them here. The temperature in the strategy is not used. |
16,192 | import os
import torch
import numpy as np
import torch.nn.functional as F
def top_k_logits(logits, top_k=0, top_p=0.0, filter_value=-65504):
# This function has been mostly taken from huggingface conversational ai code at
# https://medium.com/huggingface/how-to-build-a-state-of-the-art-conversational-ai-with-transfer-learning-2d818ac26313
if top_k > 0:
# Remove all tokens with a probability less than the last token of the top-k
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
logits[indices_to_remove] = filter_value
if top_p > 0.0:
# convert to 1D
logits = logits.view(logits.size()[1]).contiguous()
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
# Remove tokens with cumulative probability above the threshold
sorted_indices_to_remove = cumulative_probs > top_p
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
indices_to_remove = sorted_indices[sorted_indices_to_remove]
logits[indices_to_remove] = filter_value
# going back to 2D
logits = logits.view(1, -1).contiguous()
return logits | null |
16,193 | import re
import torch
from .modeling_flax_vqgan import VQModel
from .configuration_vqgan import VQGANConfig
from helm.common.optional_dependencies import handle_module_not_found_error
def rename_key(key):
def convert_pytorch_state_dict_to_flax(pt_state_dict, flax_model):
class VQModel(VQGANPreTrainedModel):
class VQGANConfig(PretrainedConfig):
def __init__(
self,
ch: int = 128,
out_ch: int = 3,
in_channels: int = 3,
num_res_blocks: int = 2,
resolution: int = 256,
z_channels: int = 256,
ch_mult: Tuple = (1, 1, 2, 2, 4),
attn_resolutions: int = (16,),
n_embed: int = 1024,
embed_dim: int = 256,
dropout: float = 0.0,
double_z: bool = False,
resamp_with_conv: bool = True,
give_pre_end: bool = False,
**kwargs,
):
def convert_model(config_path, pt_state_dict_path, save_path):
config = VQGANConfig.from_pretrained(config_path)
model = VQModel(config)
state_dict = torch.load(pt_state_dict_path, map_location="cpu")["state_dict"]
keys = list(state_dict.keys())
for key in keys:
if key.startswith("loss"):
state_dict.pop(key)
continue
renamed_key = rename_key(key)
state_dict[renamed_key] = state_dict.pop(key)
state = convert_pytorch_state_dict_to_flax(state_dict, model)
model.params = state
model.save_pretrained(save_path)
return model | null |
16,194 | import random
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
import numpy as np
from datasets import Dataset, load_dataset
from .model.text import TextNormalizer
from helm.common.optional_dependencies import handle_module_not_found_error
def blank_caption_function(example, text_column, blank_caption_prob, rng=None):
if blank_caption_prob and (rng.random() if rng is not None else np.random.random()) < blank_caption_prob:
example[text_column] = ""
return example | null |
16,195 | import random
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
import numpy as np
from datasets import Dataset, load_dataset
from .model.text import TextNormalizer
from helm.common.optional_dependencies import handle_module_not_found_error
def normalize_function(example, text_column, text_normalizer):
example[text_column] = text_normalizer(example[text_column])
return example | null |
16,196 | import random
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
import numpy as np
from datasets import Dataset, load_dataset
from .model.text import TextNormalizer
from helm.common.optional_dependencies import handle_module_not_found_error
def filter_function(
example,
min_clip_score,
max_clip_score,
clip_score_column,
filter_column,
filter_value,
):
if min_clip_score is not None and example[clip_score_column] < min_clip_score:
return False
if max_clip_score is not None and example[clip_score_column] > max_clip_score:
return False
if filter_column is not None and example[filter_column] != filter_value:
return False
return True | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.