code stringlengths 101 5.91M |
|---|
def test_partial_max():
import functools
batch_size = 3
inp = torch.autograd.Variable(torch.rand(batch_size, 8))
torch_max = functools.partial(unittest.mock.Mock(wraps=torch.max), dim=(- 1))
torch_get = (lambda x, i: x[(range(x.shape[0]), i.view((- 1)))])
double = (lambda x: (x * 2))
batcher = torch_batcher.TorchBatcher()
async def process(item):
(max_value, max_idx) = batcher(torch_max, item).split(2)
max_idx = max_idx.with_shape(1)
doubled_idx = batcher(double, max_idx)
max_value2 = batcher(torch_get, item, max_idx)
max_value = (await max_value)
max_idx = (await max_idx)
doubled_idx = (await doubled_idx)
max_value2 = (await max_value2)
assert (max_value.data[0] == max_value2.data[0])
(max_value3, _) = batcher(torch_max, item).split(2)
max_value3 = (await max_value3)
assert (max_value.data[0] == max_value3.data[0])
return (max_value, max_idx, doubled_idx)
results = batcher.run([process(inp[i]) for i in range(batch_size)])
assert (torch_max.func.call_count == 2)
for i in range(batch_size):
(max_value, max_idx) = torch_max(inp[i])
doubled_idx = double(max_idx)
assert (results[i][0].data.numpy() == pytest.approx(max_value.data.numpy(), abs=1e-06))
assert (results[i][1].data.numpy() == pytest.approx(max_idx.data.numpy(), abs=1e-06))
assert (results[i][2].data.numpy() == pytest.approx(doubled_idx.data.numpy(), abs=1e-06)) |
def run(task: Task, num_samples: int, **kwargs: Any) -> torch.Tensor:
log = sbibm.get_logger(__name__)
if ('num_simulations' in kwargs):
log.warn('`num_simulations` was passed as a keyword but will be ignored, since this is a baseline method.')
prior = task.get_prior()
return prior(num_samples=num_samples) |
def raw_reward_threshold(threshold):
def fn(metadata):
if (metadata['raw_reward'] > threshold):
return 1.0
elif (metadata['raw_reward'] > 0):
return (- 1)
return metadata['raw_reward']
return fn |
def async_execution(fn):
(fn)
def wrapper(*args, **kwargs):
return fn(*args, **kwargs)
wrapper._wrapped_async_rpc_function = fn
return wrapper |
_metaclass(ABCMeta)
class Model(object):
def __init__(self, do, du, horizon):
self.do = do
self.du = du
self.horizon = horizon
def train(self, rollouts):
pass
def encode(self, y, a):
pass
def decode(self, x):
pass
def get_dynamics(self):
pass
def has_dynamics(self):
pass
def forward(self, state, action, t):
pass
def make_summaries(self, env):
pass
def __getstate__(self):
return {'do': self.do, 'du': self.du, 'horizon': self.horizon} |
class FiniteWordPath_triangle_grid_iter_with_caching(WordDatatype_iter_with_caching, FiniteWordPath_triangle_grid, FiniteWord_class):
pass |
class ByteMaskedArray(ByteMaskedMeta[Content], Content):
def __init__(self, mask, content, valid_when, *, parameters=None):
if (not (isinstance(mask, Index) and (mask.dtype == np.dtype(np.int8)))):
raise TypeError("{} 'mask' must be an Index with dtype=int8, not {}".format(type(self).__name__, repr(mask)))
if (not isinstance(content, Content)):
raise TypeError("{} 'content' must be a Content subtype, not {}".format(type(self).__name__, repr(content)))
if (content.is_union or content.is_indexed or content.is_option):
raise TypeError("{0} cannot contain a union-type, option-type, or indexed 'content' ({1}); try {0}.simplified instead".format(type(self).__name__, type(content).__name__))
if (not isinstance(valid_when, bool)):
raise TypeError("{} 'valid_when' must be boolean, not {}".format(type(self).__name__, repr(valid_when)))
if (content.backend.index_nplike.known_data and (mask.length is not unknown_length) and (content.length is not unknown_length) and (mask.length > content.length)):
raise ValueError('{} len(mask) ({}) must be <= len(content) ({})'.format(type(self).__name__, mask.length, content.length))
assert (mask.nplike is content.backend.index_nplike)
self._mask = mask
self._content = content
self._valid_when = valid_when
self._init(parameters, content.backend)
def mask(self):
return self._mask
def valid_when(self):
return self._valid_when
form_cls: Final = ByteMaskedForm
def copy(self, mask=UNSET, content=UNSET, valid_when=UNSET, *, parameters=UNSET):
return ByteMaskedArray((self._mask if (mask is UNSET) else mask), (self._content if (content is UNSET) else content), (self._valid_when if (valid_when is UNSET) else valid_when), parameters=(self._parameters if (parameters is UNSET) else parameters))
def __copy__(self):
return self.copy()
def __deepcopy__(self, memo):
return self.copy(mask=copy.deepcopy(self._mask, memo), content=copy.deepcopy(self._content, memo), parameters=copy.deepcopy(self._parameters, memo))
def simplified(cls, mask, content, valid_when, *, parameters=None):
if (content.is_union or content.is_indexed or content.is_option):
backend = content.backend
index = ak.index.Index64.empty(mask.length, nplike=backend.index_nplike)
backend.maybe_kernel_error(backend[('awkward_ByteMaskedArray_toIndexedOptionArray', index.dtype.type, mask.dtype.type)](index.data, mask.data, mask.length, valid_when))
if content.is_union:
return content._union_of_optionarrays(index, parameters)
else:
return ak.contents.IndexedOptionArray.simplified(index, content, parameters=parameters)
else:
return cls(mask, content, valid_when, parameters=parameters)
def _form_with_key(self, getkey: Callable[([Content], (str | None))]) -> ByteMaskedForm:
form_key = getkey(self)
return self.form_cls(self._mask.form, self._content._form_with_key(getkey), self._valid_when, parameters=self._parameters, form_key=form_key)
def _to_buffers(self, form: Form, getkey: Callable[([Content, Form, str], str)], container: MutableMapping[(str, ArrayLike)], backend: Backend, byteorder: str):
assert isinstance(form, self.form_cls)
key = getkey(self, form, 'mask')
container[key] = ak._util.native_to_byteorder(self._mask.raw(backend.index_nplike), byteorder)
self._content._to_buffers(form.content, getkey, container, backend, byteorder)
def _to_typetracer(self, forget_length: bool) -> Self:
tt = TypeTracer.instance()
mask = self._mask.to_nplike(tt)
return ByteMaskedArray((mask.forget_length() if forget_length else mask), self._content._to_typetracer(forget_length), self._valid_when, parameters=self._parameters)
def _touch_data(self, recursive: bool):
self._mask._touch_data()
if recursive:
self._content._touch_data(recursive)
def _touch_shape(self, recursive: bool):
self._mask._touch_shape()
if recursive:
self._content._touch_shape(recursive)
def length(self) -> ShapeItem:
return self._mask.length
def _forget_length(self):
return ByteMaskedArray(self._mask.forget_length(), self._content, self._valid_when, parameters=self._parameters)
def __repr__(self):
return self._repr('', '', '')
def _repr(self, indent, pre, post):
out = [indent, pre, '<ByteMaskedArray valid_when=']
out.append(repr(json.dumps(self._valid_when)))
out.append(' len=')
out.append(repr(str(self.length)))
out.append('>')
out.extend(self._repr_extra((indent + ' ')))
out.append('\n')
out.append(self._mask._repr((indent + ' '), '<mask>', '</mask>\n'))
out.append(self._content._repr((indent + ' '), '<content>', '</content>\n'))
out.append((indent + '</ByteMaskedArray>'))
out.append(post)
return ''.join(out)
def to_IndexedOptionArray64(self) -> IndexedOptionArray:
index = ak.index.Index64.empty(self._mask.length, nplike=self._backend.index_nplike)
assert ((index.nplike is self._backend.index_nplike) and (self._mask.nplike is self._backend.index_nplike))
self._backend.maybe_kernel_error(self._backend[('awkward_ByteMaskedArray_toIndexedOptionArray', index.dtype.type, self._mask.dtype.type)](index.data, self._mask.data, self._mask.length, self._valid_when))
return ak.contents.IndexedOptionArray(index, self._content, parameters=self._parameters)
def to_ByteMaskedArray(self, valid_when):
if (valid_when == self._valid_when):
return self
else:
return ByteMaskedArray(ak.index.Index8(self._backend.index_nplike.astype(self._backend.index_nplike.logical_not(self._backend.index_nplike.astype(self._mask.data, dtype=np.bool_)), dtype=np.int8)), self._content, valid_when, parameters=self._parameters)
def to_BitMaskedArray(self, valid_when, lsb_order):
if (not self._backend.nplike.known_data):
self._touch_data(recursive=False)
if self._backend.nplike.known_data:
excess_length = int(math.ceil((self.length / 8.0)))
else:
excess_length = unknown_length
return ak.contents.BitMaskedArray(ak.index.IndexU8(self._backend.nplike.empty(excess_length, dtype=np.uint8)), self._content, valid_when, self.length, lsb_order, parameters=self._parameters)
else:
bit_order = ('little' if lsb_order else 'big')
bytemask = self.mask_as_bool(valid_when).view(np.uint8)
bitmask = self.backend.index_nplike.packbits(bytemask, bitorder=bit_order)
return ak.contents.BitMaskedArray(ak.index.IndexU8(bitmask), self._content, valid_when, self.length, lsb_order, parameters=self._parameters)
def mask_as_bool(self, valid_when=None):
if (valid_when is None):
valid_when = self._valid_when
if (valid_when == self._valid_when):
return (self._mask.raw(self._backend.index_nplike) != 0)
else:
return (self._mask.raw(self._backend.index_nplike) != 1)
def _getitem_nothing(self):
return self._content._getitem_range(0, 0)
def _getitem_at(self, where: IndexType):
if (not self._backend.nplike.known_data):
self._touch_data(recursive=False)
return MaybeNone(self._content._getitem_at(where))
if (where < 0):
where += self.length
if (self._backend.nplike.known_data and (not (0 <= where < self.length))):
raise ak._errors.index_error(self, where)
if (self._mask[where] == self._valid_when):
return self._content._getitem_at(where)
else:
return None
def _getitem_range(self, start: IndexType, stop: IndexType) -> Content:
if (not self._backend.nplike.known_data):
self._touch_shape(recursive=False)
return self
return ByteMaskedArray(self._mask[start:stop], self._content._getitem_range(start, stop), self._valid_when, parameters=self._parameters)
def _getitem_field(self, where: (str | SupportsIndex), only_fields: tuple[(str, ...)]=()) -> Content:
return ByteMaskedArray.simplified(self._mask, self._content._getitem_field(where, only_fields), self._valid_when, parameters=None)
def _getitem_fields(self, where: list[(str | SupportsIndex)], only_fields: tuple[(str, ...)]=()) -> Content:
return ByteMaskedArray.simplified(self._mask, self._content._getitem_fields(where, only_fields), self._valid_when, parameters=None)
def _carry(self, carry: Index, allow_lazy: bool) -> Content:
assert isinstance(carry, ak.index.Index)
try:
nextmask = self._mask[carry.data]
except IndexError as err:
raise ak._errors.index_error(self, carry.data, str(err)) from err
return ByteMaskedArray.simplified(nextmask, self._content._carry(carry, allow_lazy), self._valid_when, parameters=self._parameters)
def _nextcarry_outindex(self) -> tuple[(int, ak.index.Index64, ak.index.Index64)]:
_numnull = ak.index.Index64.empty(1, nplike=self._backend.index_nplike)
assert ((_numnull.nplike is self._backend.index_nplike) and (self._mask.nplike is self._backend.index_nplike))
self._backend.maybe_kernel_error(self._backend[('awkward_ByteMaskedArray_numnull', _numnull.dtype.type, self._mask.dtype.type)](_numnull.data, self._mask.data, self._mask.length, self._valid_when))
numnull = self._backend.index_nplike.index_as_shape_item(_numnull[0])
nextcarry = ak.index.Index64.empty((self.length - numnull), nplike=self._backend.index_nplike)
outindex = ak.index.Index64.empty(self.length, nplike=self._backend.index_nplike)
assert ((nextcarry.nplike is self._backend.index_nplike) and (outindex.nplike is self._backend.index_nplike) and (self._mask.nplike is self._backend.index_nplike))
self._backend.maybe_kernel_error(self._backend[('awkward_ByteMaskedArray_getitem_nextcarry_outindex', nextcarry.dtype.type, outindex.dtype.type, self._mask.dtype.type)](nextcarry.data, outindex.data, self._mask.raw(self._backend.nplike), self._mask.length, self._valid_when))
return (numnull, nextcarry, outindex)
def _getitem_next_jagged_generic(self, slicestarts, slicestops, slicecontent, tail):
if (slicestarts.nplike.known_data and self._backend.nplike.known_data and (slicestarts.length != self.length)):
raise ak._errors.index_error(self, ak.contents.ListArray(slicestarts, slicestops, slicecontent, parameters=None), 'cannot fit jagged slice with length {} into {} of size {}'.format(slicestarts.length, type(self).__name__, self.length))
(numnull, nextcarry, outindex) = self._nextcarry_outindex()
reducedstarts = ak.index.Index64.empty((self.length - numnull), nplike=self._backend.index_nplike)
reducedstops = ak.index.Index64.empty((self.length - numnull), nplike=self._backend.index_nplike)
assert ((outindex.nplike is self._backend.index_nplike) and (slicestarts.nplike is self._backend.index_nplike) and (slicestops.nplike is self._backend.index_nplike) and (reducedstarts.nplike is self._backend.nplike) and (reducedstops.nplike is self._backend.nplike))
self._maybe_index_error(self._backend[('awkward_MaskedArray_getitem_next_jagged_project', outindex.dtype.type, slicestarts.dtype.type, slicestops.dtype.type, reducedstarts.dtype.type, reducedstops.dtype.type)](outindex.data, slicestarts.data, slicestops.data, reducedstarts.data, reducedstops.data, self.length), slicer=ak.contents.ListArray(slicestarts, slicestops, slicecontent))
next = self._content._carry(nextcarry, True)
out = next._getitem_next_jagged(reducedstarts, reducedstops, slicecontent, tail)
return ak.contents.IndexedOptionArray.simplified(outindex, out, parameters=self._parameters)
def _getitem_next_jagged(self, slicestarts: Index, slicestops: Index, slicecontent: Content, tail) -> Content:
return self._getitem_next_jagged_generic(slicestarts, slicestops, slicecontent, tail)
def _getitem_next(self, head: (SliceItem | tuple), tail: tuple[(SliceItem, ...)], advanced: (Index | None)) -> Content:
if (head is NO_HEAD):
return self
elif (is_integer_like(head) or isinstance(head, (slice, ak.index.Index64, ak.contents.ListOffsetArray))):
(_, nextcarry, outindex) = self._nextcarry_outindex()
next = self._content._carry(nextcarry, True)
out = next._getitem_next(head, tail, advanced)
return ak.contents.IndexedOptionArray.simplified(outindex, out, parameters=self._parameters)
elif isinstance(head, str):
return self._getitem_next_field(head, tail, advanced)
elif isinstance(head, list):
return self._getitem_next_fields(head, tail, advanced)
elif (head is np.newaxis):
return self._getitem_next_newaxis(tail, advanced)
elif (head is Ellipsis):
return self._getitem_next_ellipsis(tail, advanced)
elif isinstance(head, ak.contents.IndexedOptionArray):
return self._getitem_next_missing(head, tail, advanced)
else:
raise AssertionError(repr(head))
def project(self, mask=None):
mask_length = self._mask.length
_numnull = ak.index.Index64.zeros(1, nplike=self._backend.index_nplike)
if (mask is not None):
if (self._backend.nplike.known_data and (mask_length != mask.length)):
raise ValueError('mask length ({}) is not equal to {} length ({})'.format(mask.length, type(self).__name__, mask_length))
nextmask = ak.index.Index8.empty(mask_length, nplike=self._backend.index_nplike)
assert ((nextmask.nplike is self._backend.index_nplike) and (mask.nplike is self._backend.index_nplike) and (self._mask.nplike is self._backend.index_nplike))
self._backend.maybe_kernel_error(self._backend[('awkward_ByteMaskedArray_overlay_mask', nextmask.dtype.type, mask.dtype.type, self._mask.dtype.type)](nextmask.data, mask.data, self._mask.data, mask_length, self._valid_when))
valid_when = False
next = ByteMaskedArray(nextmask, self._content, valid_when, parameters=self._parameters)
return next.project()
else:
assert ((_numnull.nplike is self._backend.index_nplike) and (self._mask.nplike is self._backend.index_nplike))
self._backend.maybe_kernel_error(self._backend[('awkward_ByteMaskedArray_numnull', _numnull.dtype.type, self._mask.dtype.type)](_numnull.data, self._mask.data, mask_length, self._valid_when))
numnull = self._backend.index_nplike.index_as_shape_item(_numnull[0])
nextcarry = ak.index.Index64.empty((mask_length - numnull), nplike=self._backend.index_nplike)
assert ((nextcarry.nplike is self._backend.index_nplike) and (self._mask.nplike is self._backend.index_nplike))
self._backend.maybe_kernel_error(self._backend[('awkward_ByteMaskedArray_getitem_nextcarry', nextcarry.dtype.type, self._mask.dtype.type)](nextcarry.data, self._mask.data, mask_length, self._valid_when))
return self._content._carry(nextcarry, False)
def _offsets_and_flattened(self, axis: int, depth: int) -> tuple[(Index, Content)]:
posaxis = maybe_posaxis(self, axis, depth)
if ((posaxis is not None) and ((posaxis + 1) == depth)):
raise AxisError('axis=0 not allowed for flatten')
else:
(numnull, nextcarry, outindex) = self._nextcarry_outindex()
next = self._content._carry(nextcarry, False)
(offsets, flattened) = next._offsets_and_flattened(axis, depth)
if (offsets.length == 0):
return (offsets, ak.contents.IndexedOptionArray(outindex, flattened, parameters=self._parameters))
else:
outoffsets = ak.index.Index64.empty((offsets.length + numnull), nplike=self._backend.index_nplike, dtype=np.int64)
assert ((outoffsets.nplike is self._backend.index_nplike) and (outindex.nplike is self._backend.index_nplike) and (offsets.nplike is self._backend.index_nplike))
self._backend.maybe_kernel_error(self._backend[('awkward_IndexedArray_flatten_none2empty', outoffsets.dtype.type, outindex.dtype.type, offsets.dtype.type)](outoffsets.data, outindex.data, outindex.length, offsets.data, offsets.length))
return (outoffsets, flattened)
def _mergeable_next(self, other: Content, mergebool: bool) -> bool:
if (other.is_identity_like or other.is_union):
return True
elif (other.is_option or other.is_indexed):
return self._content._mergeable_next(other.content, mergebool)
else:
return self._content._mergeable_next(other, mergebool)
def _reverse_merge(self, other):
return self.to_IndexedOptionArray64()._reverse_merge(other)
def _mergemany(self, others: Sequence[Content]) -> Content:
if (len(others) == 0):
return self
if all(((isinstance(x, ByteMaskedArray) and (x._valid_when == self._valid_when)) for x in others)):
parameters = self._parameters
self_length_scalar = self._backend.index_nplike.shape_item_as_index(self.length)
masks = [self._mask.data[:self_length_scalar]]
tail_contents = []
length = 0
for x in others:
length_scalar = self._backend.index_nplike.shape_item_as_index(x.length)
parameters = parameters_intersect(parameters, x._parameters)
masks.append(x._mask.data[:length_scalar])
tail_contents.append(x._content[:length_scalar])
length += x.length
return ByteMaskedArray(ak.index.Index8(self._backend.nplike.concat(masks)), self._content[:self_length_scalar]._mergemany(tail_contents), self._valid_when, parameters=parameters)
else:
return self.to_IndexedOptionArray64()._mergemany(others)
def _fill_none(self, value: Content) -> Content:
return self.to_IndexedOptionArray64()._fill_none(value)
def _local_index(self, axis, depth):
posaxis = maybe_posaxis(self, axis, depth)
if ((posaxis is not None) and ((posaxis + 1) == depth)):
return self._local_index_axis0()
else:
(_, nextcarry, outindex) = self._nextcarry_outindex()
next = self._content._carry(nextcarry, False)
out = next._local_index(axis, depth)
return ak.contents.IndexedOptionArray.simplified(outindex, out, parameters=self._parameters)
def _numbers_to_type(self, name, including_unknown):
return ak.contents.ByteMaskedArray(self._mask, self._content._numbers_to_type(name, including_unknown), self._valid_when, parameters=self._parameters)
def _is_unique(self, negaxis, starts, parents, outlength):
if (self._mask.length == 0):
return True
return self.to_IndexedOptionArray64()._is_unique(negaxis, starts, parents, outlength)
def _unique(self, negaxis, starts, parents, outlength):
if (self._mask.length == 0):
return self
return self.to_IndexedOptionArray64()._unique(negaxis, starts, parents, outlength)
def _argsort_next(self, negaxis, starts, shifts, parents, outlength, ascending, stable):
return self.to_IndexedOptionArray64()._argsort_next(negaxis, starts, shifts, parents, outlength, ascending, stable)
def _sort_next(self, negaxis, starts, parents, outlength, ascending, stable):
return self.to_IndexedOptionArray64()._sort_next(negaxis, starts, parents, outlength, ascending, stable)
def _combinations(self, n, replacement, recordlookup, parameters, axis, depth):
if (n < 1):
raise ValueError("in combinations, 'n' must be at least 1")
posaxis = maybe_posaxis(self, axis, depth)
if ((posaxis is not None) and ((posaxis + 1) == depth)):
return self._combinations_axis0(n, replacement, recordlookup, parameters)
else:
(_, nextcarry, outindex) = self._nextcarry_outindex()
next = self._content._carry(nextcarry, True)
out = next._combinations(n, replacement, recordlookup, parameters, axis, depth)
return ak.contents.IndexedOptionArray.simplified(outindex, out, parameters=parameters)
def _reduce_next(self, reducer, negaxis, starts, shifts, parents, outlength, mask, keepdims, behavior):
mask_length = self._mask.length
_numnull = ak.index.Index64.empty(1, nplike=self._backend.index_nplike)
assert ((_numnull.nplike is self._backend.index_nplike) and (self._mask.nplike is self._backend.index_nplike))
self._backend.maybe_kernel_error(self._backend[('awkward_ByteMaskedArray_numnull', _numnull.dtype.type, self._mask.dtype.type)](_numnull.data, self._mask.data, mask_length, self._valid_when))
numnull = self._backend.index_nplike.index_as_shape_item(_numnull[0])
next_length = (mask_length - numnull)
nextcarry = ak.index.Index64.empty(next_length, nplike=self._backend.index_nplike)
nextparents = ak.index.Index64.empty(next_length, nplike=self._backend.index_nplike)
outindex = ak.index.Index64.empty(mask_length, nplike=self._backend.index_nplike)
assert ((nextcarry.nplike is self._backend.index_nplike) and (nextparents.nplike is self._backend.index_nplike) and (outindex.nplike is self._backend.index_nplike) and (self._mask.nplike is self._backend.index_nplike) and (parents.nplike is self._backend.index_nplike))
self._backend.maybe_kernel_error(self._backend[('awkward_ByteMaskedArray_reduce_next_64', nextcarry.dtype.type, nextparents.dtype.type, outindex.dtype.type, self._mask.dtype.type, parents.dtype.type)](nextcarry.data, nextparents.data, outindex.data, self._mask.data, parents.data, mask_length, self._valid_when))
(branch, depth) = self.branch_depth
if (reducer.needs_position and ((not branch) and (negaxis == depth))):
nextshifts = ak.index.Index64.empty(next_length, nplike=self._backend.index_nplike)
if (shifts is None):
assert ((nextshifts.nplike is self._backend.index_nplike) and (self._mask.nplike is self._backend.index_nplike))
self._backend.maybe_kernel_error(self._backend[('awkward_ByteMaskedArray_reduce_next_nonlocal_nextshifts_64', nextshifts.dtype.type, self._mask.dtype.type)](nextshifts.data, self._mask.data, mask_length, self._valid_when))
else:
assert ((nextshifts.nplike is self._backend.index_nplike) and (self._mask.nplike is self._backend.index_nplike))
self._backend.maybe_kernel_error(self._backend[('awkward_ByteMaskedArray_reduce_next_nonlocal_nextshifts_fromshifts_64', nextshifts.dtype.type, self._mask.dtype.type, shifts.dtype.type)](nextshifts.data, self._mask.data, mask_length, self._valid_when, shifts.data))
else:
nextshifts = None
next = self._content._carry(nextcarry, False)
out = next._reduce_next(reducer, negaxis, starts, nextshifts, nextparents, outlength, mask, keepdims, behavior)
if ((not branch) and (negaxis == depth)):
return out
else:
if isinstance(out, ak.contents.RegularArray):
out_content = out.content
elif isinstance(out, ak.contents.ListOffsetArray):
out_content = out.content[out.offsets[0]:]
else:
raise ValueError(('reduce_next with unbranching depth > negaxis is only expected to return RegularArray or ListOffsetArray64; instead, it returned ' + out))
outoffsets = ak.index.Index64.empty((starts.length + 1), nplike=self._backend.index_nplike)
assert (outoffsets.nplike is self._backend.nplike)
self._backend.maybe_kernel_error(self._backend[('awkward_IndexedArray_reduce_next_fix_offsets_64', outoffsets.dtype.type, starts.dtype.type)](outoffsets.data, starts.data, starts.length, outindex.length))
tmp = ak.contents.IndexedOptionArray.simplified(outindex, out_content, parameters=None)
return ak.contents.ListOffsetArray(outoffsets, tmp, parameters=None)
def _validity_error(self, path):
if (self._backend.nplike.known_data and (self._content.length < self.mask.length)):
return f'at {path} ({type(self)!r}): len(content) < len(mask)'
else:
return self._content._validity_error((path + '.content'))
def _nbytes_part(self):
return (self.mask._nbytes_part() + self.content._nbytes_part())
def _pad_none(self, target, axis, depth, clip):
posaxis = maybe_posaxis(self, axis, depth)
if ((posaxis is not None) and ((posaxis + 1) == depth)):
return self._pad_none_axis0(target, clip)
elif ((posaxis is not None) and ((posaxis + 1) == (depth + 1))):
mask = ak.index.Index8(self.mask_as_bool(valid_when=False))
index = ak.index.Index64.empty(mask.length, nplike=self._backend.index_nplike)
assert ((index.nplike is self._backend.index_nplike) and (self._mask.nplike is self._backend.index_nplike))
self._backend.maybe_kernel_error(self._backend[('awkward_IndexedOptionArray_rpad_and_clip_mask_axis1', index.dtype.type, self._mask.dtype.type)](index.data, self._mask.data, self._mask.length))
next = self.project()._pad_none(target, axis, depth, clip)
return ak.contents.IndexedOptionArray.simplified(index, next, parameters=self._parameters)
else:
return ak.contents.ByteMaskedArray(self._mask, self._content._pad_none(target, axis, depth, clip), self._valid_when, parameters=self._parameters)
def _to_arrow(self, pyarrow: Any, mask_node: (Content | None), validbytes: (Content | None), length: int, options: ToArrowOptions):
this_validbytes = self.mask_as_bool(valid_when=True)
return self._content._to_arrow(pyarrow, self, ak._connect.pyarrow.and_validbytes(validbytes, this_validbytes), length, options)
def _to_backend_array(self, allow_missing, backend):
return self.to_IndexedOptionArray64()._to_backend_array(allow_missing, backend)
def _remove_structure(self, backend: Backend, options: RemoveStructureOptions) -> list[Content]:
(branch, depth) = self.branch_depth
if (branch or options['drop_nones'] or (depth > 1)):
return self.project()._remove_structure(backend, options)
else:
return [self]
def _drop_none(self) -> Content:
return self.project()
def _recursively_apply(self, action: ImplementsApplyAction, depth: int, depth_context: (Mapping[(str, Any)] | None), lateral_context: (Mapping[(str, Any)] | None), options: ApplyActionOptions) -> (Content | None):
if self._backend.nplike.known_data:
content = self._content[0:self._mask.length]
else:
content = self._content
if options['return_array']:
if options['return_simplified']:
make = ByteMaskedArray.simplified
else:
make = ByteMaskedArray
def continuation():
return make(self._mask, content._recursively_apply(action, depth, copy.copy(depth_context), lateral_context, options), self._valid_when, parameters=(self._parameters if options['keep_parameters'] else None))
else:
def continuation():
content._recursively_apply(action, depth, copy.copy(depth_context), lateral_context, options)
result = action(self, depth=depth, depth_context=depth_context, lateral_context=lateral_context, continuation=continuation, backend=self._backend, options=options)
if isinstance(result, Content):
return result
elif (result is None):
return continuation()
else:
raise AssertionError(result)
def to_packed(self) -> Self:
if self._content.is_record:
next = self.to_IndexedOptionArray64()
content = next._content.to_packed()
if (content.length > self._mask.length):
content = content[:self._mask.length]
return ak.contents.IndexedOptionArray(next._index, content, parameters=next._parameters)
else:
content = self._content.to_packed()
if (content.length > self._mask.length):
content = content[:self._mask.length]
return ByteMaskedArray(self._mask, content, self._valid_when, parameters=self._parameters)
def _to_list(self, behavior, json_conversions):
if (not self._backend.nplike.known_data):
raise TypeError('cannot convert typetracer arrays to Python lists')
out = self._to_list_custom(behavior, json_conversions)
if (out is not None):
return out
mask = self.mask_as_bool(valid_when=True)
out = self._content._getitem_range(0, mask.size)._to_list(behavior, json_conversions)
for (i, isvalid) in enumerate(mask):
if (not isvalid):
out[i] = None
return out
def _to_backend(self, backend: Backend) -> Self:
content = self._content.to_backend(backend)
mask = self._mask.to_nplike(backend.index_nplike)
return ByteMaskedArray(mask, content, valid_when=self._valid_when, parameters=self._parameters)
def _is_equal_to(self, other: Self, index_dtype: bool, numpyarray: bool, all_parameters: bool) -> bool:
return (self._is_equal_to_generic(other, all_parameters) and (self._valid_when == other.valid_when) and self._mask.is_equal_to(other.mask, index_dtype, numpyarray) and self._content._is_equal_to(other.content, index_dtype, numpyarray, all_parameters)) |
def _refine_block(S, strong=False):
if (not S):
raise ValueError(('S (=%s) must be nonempty' % S))
if all(((s in ZZ) for s in S)):
X = sorted(S)
else:
X = sorted(S, key=str)
n = len(X)
out = []
if (not strong):
WordSet = IntegerListsLex(min_part=0, max_part=(n - 1), length=n)
else:
WordSet = IntegerListsLex(min_part=0, max_part=(n - 1), length=n, min_slope=0)
for w in WordSet:
if _is_initial_segment(sorted(set(w))):
a = [frozenset() for _ in range((max(w) + 1))]
for pos in range(n):
a[w[pos]] = a[w[pos]].union({X[pos]})
out.append(tuple(a))
return out |
def _global_config_as_py_module_proxy_setup():
if (_PyModuleName in sys.modules):
return
sys.modules[_PyModuleName] = _GlobalConfigAsPyModuleProxy(_PyModuleName) |
_utils.test()
def test_ad_nested_for():
N = 5
loss = ti.field(float, shape=(), needs_grad=True)
def nested_for():
for i in range(N):
for j in range(N):
pass
with ti.ad.Tape(loss=loss):
nested_for() |
class Significance(object):
METHODS = {'permute': count_permutation_trials}
def __init__(self, systems, gold, trials=N_TRIALS, method='permute', n_jobs=1, metrics=['precision', 'recall', 'fscore'], fmt='none', measures=DEFAULT_MEASURE, type_weights=None):
if (len(systems) < 2):
raise ValueError('Require at least two systems to compare')
if (method not in self.METHODS):
raise ValueError('Unsupported method: {}'.format(method))
if (Parallel is None):
raise ImportError('Package: "joblib" not available, please install to run significance tests.')
self.systems = systems
self.gold = gold
self.method = method
self.trials = trials
self.n_jobs = n_jobs
self.measures = parse_measures((measures or DEFAULT_MEASURE), incl_clustering=False)
self.metrics = metrics
self.fmt = (self.FMTS[fmt] if (not callable(fmt)) else fmt)
self.weighting = load_weighting(type_weights=type_weights)
def __call__(self):
all_counts = defaultdict(dict)
gold = list(Reader(utf8_open(self.gold)))
for path in self.systems:
system = list(Reader(utf8_open(path)))
doc_pairs = list(Evaluate.iter_pairs(system, gold))
for (measure, per_doc, overall) in Evaluate.count_all(doc_pairs, self.measures, weighting=self.weighting):
all_counts[measure][path] = (per_doc, overall)
results = [{'sys1': sys1, 'sys2': sys2, 'measure': measure, 'stats': self.significance(measure_counts[sys1], measure_counts[sys2])} for (sys1, sys2) in itertools.combinations(self.systems, 2) for (measure, measure_counts) in sorted(all_counts.items(), key=(lambda tup: self.measures.index(tup[0])))]
return self.fmt(self, results)
def significance(self, pair1, pair2):
(per_doc1, overall1) = pair1
(per_doc2, overall2) = pair2
base_diff = _result_diff(overall1, overall2)
randomized_diffs = functools.partial(self.METHODS[self.method], per_doc1, per_doc2, base_diff)
results = Parallel(n_jobs=self.n_jobs)((delayed(randomized_diffs)(share) for share in _job_shares(self.n_jobs, self.trials)))
all_counts = []
for result in results:
(metrics, counts) = zip(*result.items())
all_counts.append(counts)
return {metric: {'diff': base_diff[metric], 'p': ((sum(counts) + 1) / (self.trials + 1))} for (metric, counts) in zip(metrics, zip(*all_counts))}
def add_arguments(cls, p):
p.add_argument('systems', nargs='+', metavar='FILE')
p.add_argument('-g', '--gold', required=True)
p.add_argument('-n', '--trials', default=N_TRIALS, type=int)
p.add_argument('--permute', dest='method', action='store_const', const='permute', default='permute', help='Use the approximate randomization method')
p.add_argument('--bootstrap', dest='method', action='store_const', const='bootstrap', help='Use bootstrap resampling')
p.add_argument('-j', '--n_jobs', default=1, type=int, help='Number of parallel processes, use -1 for all CPUs')
p.add_argument('-f', '--fmt', default='tab', choices=cls.FMTS.keys())
p.add_argument('-m', '--measure', dest='measures', action='append', metavar='NAME', help=MEASURE_HELP)
p.add_argument('--type-weights', metavar='FILE', default=None, help='File mapping gold and sys types to a weight, such as produced by weights-for-hierarchy')
p.add_argument('--metrics', default='precision recall fscore'.split(), type=(lambda x: x.split(',')), help='Test significance for which metrics (default: precision,recall,fscore)')
p.set_defaults(cls=cls)
return p
def tab_format(self, data):
metrics = self.metrics
rows = []
for row in data:
stats = row['stats']
rows.append(([row['sys1'], row['sys2'], row['measure']] + sum(([stats[metric]['diff'], stats[metric]['p']] for metric in metrics), [])))
header = (['sys1', 'sys2', 'measure'] + sum(([(u'-' + metric[:6]), ('p-' + metric[:6])] for metric in metrics), []))
sys_width = max((len(col) for row in rows for col in row[:2]))
sys_width = max(sys_width, 4)
measure_width = max((len(row[2]) for row in rows))
measure_width = max(measure_width, 5)
fmt = (u'{:%ds}\t{:%ds}\t{:%ds}' % (sys_width, sys_width, measure_width))
ret = (fmt + ((u'\t{}' * len(metrics)) * 2)).format(*header)
fmt += u''.join((u'\t{:+8.3f}\t{:8.3f}' for metric in metrics))
ret += u''.join(((u'\n' + fmt.format(*row)) for row in rows))
return ret
FMTS = {'tab': tab_format, 'json': json_format, 'none': no_format} |
class LinearLayer(nn.Module):
def __init__(self, input_dim, output_dim, act='relu', use_bn=False):
super(LinearLayer, self).__init__()
self.use_bn = use_bn
self.lin = nn.Linear(input_dim, output_dim)
self.act = (nn.ReLU() if (act == 'relu') else act)
if use_bn:
self.bn = nn.BatchNorm1d(output_dim)
def forward(self, x):
if self.use_bn:
return self.bn(self.act(self.lin(x)))
return self.act(self.lin(x)) |
class BaseModel():
def modify_commandline_options(parser, is_train):
return parser
def name(self):
return 'BaseModel'
def initialize(self, opt):
self.opt = opt
self.gpu_ids = opt.gpu_ids
self.isTrain = opt.isTrain
self.device = (torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu'))
self.save_dir = os.path.join(opt.checkpoints_dir, opt.expr_name)
torch.backends.cudnn.benchmark = True
self.loss_names = []
self.model_names = []
self.visual_names = []
self.image_paths = []
def set_input(self, input):
self.input = input
def forward(self):
pass
def setup(self, opt, parser=None):
if self.isTrain:
self.schedulers = [networks.get_scheduler(optimizer, opt) for optimizer in self.optimizers]
if (not self.isTrain):
if (opt.model_path != ''):
self.load_model(opt.model_path)
elif (opt.which_epoch != ''):
self.load_networks(opt.which_epoch)
else:
pass
elif opt.continue_train:
self.load_networks(opt.which_epoch)
def eval(self):
for name in self.model_names:
if isinstance(name, str):
net = getattr(self, ('net' + name))
net.eval()
def test(self):
with torch.no_grad():
self.forward()
def get_image_paths(self):
return self.image_paths
def optimize_parameters(self):
pass
def update_learning_rate(self):
for scheduler in self.schedulers:
scheduler.step()
lr = self.optimizers[0].param_groups[0]['lr']
print(('learning rate = %.7f' % lr))
return lr
def get_current_visuals(self):
visual_ret = OrderedDict()
for name in self.visual_names:
if isinstance(name, str):
visual_ret[name] = getattr(self, name)
return visual_ret
def get_current_losses(self):
errors_ret = OrderedDict()
for name in self.loss_names:
if isinstance(name, str):
errors_ret[name] = float(getattr(self, ('loss_' + name)))
return errors_ret
def save_networks(self, which_epoch):
for name in self.model_names:
if isinstance(name, str):
save_filename = ('%s_net_%s.pth' % (which_epoch, name))
save_path = os.path.join(self.save_dir, save_filename)
net = getattr(self, ('net' + name))
if ((len(self.gpu_ids) > 0) and torch.cuda.is_available()):
torch.save(net.module.cpu().state_dict(), save_path)
net.cuda(self.gpu_ids[0])
else:
torch.save(net.cpu().state_dict(), save_path)
def __patch_instance_norm_state_dict(self, state_dict, module, keys, i=0):
key = keys[i]
if ((i + 1) == len(keys)):
if (module.__class__.__name__.startswith('InstanceNorm') and ((key == 'running_mean') or (key == 'running_var'))):
if (getattr(module, key) is None):
state_dict.pop('.'.join(keys))
if (module.__class__.__name__.startswith('InstanceNorm') and (key == 'num_batches_tracked')):
state_dict.pop('.'.join(keys))
else:
self.__patch_instance_norm_state_dict(state_dict, getattr(module, key), keys, (i + 1))
def load_networks(self, which_epoch, net_suff=None):
for name in self.model_names:
if isinstance(name, str):
load_filename = ('%s_net_%s.pth' % (which_epoch, name))
load_path = os.path.join(self.save_dir, load_filename)
if (net_suff is not None):
net = getattr(self, ((('net' + name) + '_') + net_suff))
else:
net = getattr(self, ('net' + name))
if isinstance(net, torch.nn.DataParallel):
net = net.module
print(('loading the model from %s' % load_path))
state_dict = torch.load(load_path, map_location=str(self.device))
del state_dict._metadata
for key in list(state_dict.keys()):
self.__patch_instance_norm_state_dict(state_dict, net, key.split('.'))
net.load_state_dict(state_dict)
def load_model(self, model_path):
state_dict = torch.load(model_path, map_location=str(self.device))
del state_dict._metadata
net = self.netDC
if isinstance(net, torch.nn.DataParallel):
net = net.module
net.load_state_dict(state_dict)
def print_networks(self, verbose=False):
for name in self.model_names:
if isinstance(name, str):
net = getattr(self, ('net' + name))
if verbose:
print(net) |
_safe_enum
_enum
class TilingType(aenum.AutoNumberEnum):
Normal = ()
CeilRange = ()
NumberOfTiles = () |
class UnbiasedPAUCLoss(nn.Module):
def __init__(self, alpha, beta, device):
super(UnbiasedPAUCLoss, self).__init__()
self.alpha = torch.tensor(alpha)
self.na = alpha
self.beta = torch.tensor(beta)
self.kappa = torch.tensor(2)
self.a = torch.tensor(0.5).to(device)
self.b = torch.tensor(0.5).to(device)
self.g = torch.tensor(1.0).to(device)
self.s_p = torch.tensor(0.5).to(device)
self.s_n = torch.tensor(0.5).to(device)
self.lam_b = torch.tensor(0.0).to(device)
self.lam_a = torch.tensor(0.0).to(device)
self.a.requires_grad = True
self.b.requires_grad = True
self.g.requires_grad = True
self.s_p.requires_grad = True
self.s_n.requires_grad = True
self.lam_b.requires_grad = True
self.lam_a.requires_grad = True
def forward(self, pred, target):
pred_p = pred[target.eq(1)]
pred_n = pred[target.ne(1)]
if (self.na == 1):
max_val_n = (torch.log((1 + torch.exp((self.kappa * ((torch.square((pred_n - self.b)) + ((2 * (1 + self.g)) * pred_n)) - self.s_n))))) / self.kappa)
res = (((torch.mean((torch.square((pred_p - self.a)) - ((2 * (1 + self.g)) * pred_p))) + (((self.beta * self.s_n) + torch.mean(max_val_n)) / self.beta)) - (1 * (self.g ** 2))) - (self.lam_b * ((self.b - 1) - self.g)))
else:
max_val_p = (torch.log((1 + torch.exp((self.kappa * ((torch.square((pred_p - self.a)) - ((2 * (1 + self.g)) * pred_p)) - self.s_p))))) / self.kappa)
max_val_n = (torch.log((1 + torch.exp((self.kappa * ((torch.square((pred_n - self.b)) + ((2 * (1 + self.g)) * pred_n)) - self.s_n))))) / self.kappa)
res = ((((((self.s_p + (torch.mean(max_val_p) / self.alpha)) + self.s_n) + (torch.mean(max_val_n) / self.beta)) - (1 * (self.g ** 2))) - (self.lam_b * ((self.b - 1) - self.g))) + (self.lam_a * (self.a + self.g)))
return res |
def get_hw_timming(in_dir):
for _iter in itertools.count(0, 1):
block_filename = f'iter{_iter}.profile'
block_filename = os.path.join(in_dir, block_filename)
if os.path.isfile(block_filename):
(yield BlockTimelineRecord(block_filename))
else:
break |
class Functional():
def __init__(self, target, normal):
self.target = target
self.n = normal
self.J = None
def solver_step(self, numerical_solution, coefficient):
self.J = assemble(((((coefficient * inner(self.n, grad(numerical_solution))) - self.target) ** 2) * ds)) |
class DistanceRepresentation():
def distance(self, p1s: ma.MaskedArray, p2s: ma.MaskedArray) -> ma.MaskedArray:
diff = (p1s - p2s)
square = ma.power(diff, 2)
sum_squares = square.sum(axis=(- 1))
sqrt = ma.sqrt(sum_squares).filled(0)
return sqrt
def __call__(self, p1s: ma.MaskedArray, p2s: ma.MaskedArray) -> ma.MaskedArray:
return self.distance(p1s, p2s) |
def add_test(cls, layouts, alignments, element_output, element_accumulator, element_epilogue, cluster_shape, threadblock_shape, stages, opclass, persistent=False):
def run(self):
element_A = cutlass.int8
element_B = cutlass.int8
inst_shape = ([1, 1, 1] if (opclass == cutlass.OpClass.Simt) else None)
warp_count = ([2, 2, 1] if (opclass == cutlass.OpClass.Simt) else None)
math_inst = MathInstruction(instruction_shape=inst_shape, element_a=element_A, element_b=element_B, element_accumulator=element_accumulator, opcode_class=opclass, math_operation=MathOperation.multiply_add)
tile_description = TileDescription(threadblock_shape=threadblock_shape, cluster_shape=cluster_shape, stages=stages, warp_count=warp_count, math_instruction=math_inst, persistent=persistent)
A = TensorDescription(element=element_A, layout=layouts[0], alignment=alignments[0])
B = TensorDescription(element=element_B, layout=layouts[1], alignment=alignments[1])
C = TensorDescription(element=element_output, layout=layouts[2], alignment=alignments[2])
if (opclass == cutlass.OpClass.Simt):
epilogue_functor_cls = LinearCombinationClamp
else:
epilogue_functor_cls = LinearCombination
epilogue_functor = epilogue_functor_cls(C.element, C.alignment, math_inst.element_accumulator, element_epilogue)
swizzling_functor = cutlass.IdentitySwizzle1
operation = GemmOperationUniversal(arch=90, tile_description=tile_description, A=A, B=B, C=C, epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor)
self.assertTrue(test_all_gemm(operation, 'universal'))
if persistent:
suffix = '_persistent'
else:
suffix = ''
name = name_fn(layouts, alignments, element_output, element_accumulator, element_epilogue, cluster_shape, threadblock_shape, stages, opclass=opclass, suffix=suffix)
setattr(cls, name, run)
return run |
_dispatch
def irfft2(x, s=None, axes=((- 2), (- 1)), norm=None, overwrite_x=False, workers=None, *, plan=None):
return (Dispatchable(x, np.ndarray),) |
def load_test_data(query_andwer_file, collections_file):
questions = []
answers = []
for line in open(query_andwer_file, encoding='utf-8'):
line = line.strip().split('\t')
questions.append(line[0])
answers.append(eval(line[1]))
collections = {}
for line in open(collections_file, encoding='utf-8'):
line = line.strip().split('\t')
collections[int(line[0])] = (line[1], line[2])
return (questions, answers, collections) |
class Bottleneck(nn.Module):
expansion: int = 4
def __init__(self, inplanes: int, planes: int, stride: int=1, downsample: Optional[nn.Module]=None, groups: int=1, base_width: int=64, dilation: int=1, norm_layer: Optional[Callable[(..., nn.Module)]]=None, outdim: int=0) -> None:
super(Bottleneck, self).__init__()
if (norm_layer is None):
norm_layer = nn.BatchNorm2d
width = (int((planes * (base_width / 64.0))) * groups)
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
if (outdim == 0):
self.conv3 = conv1x1(width, (planes * self.expansion))
self.bn3 = norm_layer((planes * self.expansion))
else:
self.conv3 = conv1x1(width, outdim)
self.bn3 = norm_layer(outdim)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x: Tensor) -> Tensor:
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if (self.downsample is not None):
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out |
def qmu_tilde(mu, data, pdf, init_pars, par_bounds, fixed_params, return_fitted_pars=False):
if (pdf.config.poi_index is None):
raise UnspecifiedPOI('No POI is defined. A POI is required for profile likelihood based test statistics.')
if (par_bounds[pdf.config.poi_index][0] != 0):
log.warning((('qmu_tilde test statistic used for fit configuration with POI not bounded at zero.\n' + 'Use the qmu test statistic (pyhf.infer.test_statistics.qmu) instead.\n') + 'If you called this from pyhf.infer.mle or pyhf.infer.hypotest, set test_stat="q".'))
return _qmu_like(mu, data, pdf, init_pars, par_bounds, fixed_params, return_fitted_pars=return_fitted_pars) |
def generate_layer(global_info, writer, out_file, tiu_instance_map, gdma_instance_map, chip_arch):
layer_list = []
for sub_net in global_info.subnet_list:
if (sub_net is not None):
layer_list.extend(sub_net.layer_list)
layer_infos = TotalLayerInfo(writer, layer_list)
layer_infos.add_kpi_field(tiu_instance_map, gdma_instance_map)
lay_info_map = layer_infos.pop_data()
layer_infos.write(chip_arch)
return lay_info_map |
class SqlTemplate():
def sanitizeSql(sql):
s = sql.strip().lower()
if (not (s[(- 1)] == ';')):
s += ';'
s = re.sub('\\(', ' ( ', s)
s = re.sub('\\)', ' ) ', s)
words = ['index', 'table', 'day', 'year', 'user', 'text']
for word in words:
s = re.sub((('([^\\w])' + word) + '$'), (('\\1' + word) + '1'), s)
s = re.sub((('([^\\w])' + word) + '([^\\w])'), ((('\\1' + word) + '1') + '\\2'), s)
s = s.replace('#', '')
return s
def parseStrings(self, tok):
if isinstance(tok, sqlparse.sql.TokenList):
for c in tok.tokens:
self.parseStrings(c)
elif (tok.ptype == STRING):
if self.regex:
tok.value = ' '.join(tokenizeRegex(tok.value))
else:
tok.value = 'CODE_STRING'
def renameIdentifiers(self, tok):
if isinstance(tok, sqlparse.sql.TokenList):
for c in tok.tokens:
self.renameIdentifiers(c)
elif (tok.ptype == COLUMN):
if (str(tok) not in self.idMap['COLUMN']):
colname = ('col' + str(self.idCount['COLUMN']))
self.idMap['COLUMN'][str(tok)] = colname
self.idMapInv[colname] = str(tok)
self.idCount['COLUMN'] += 1
tok.value = self.idMap['COLUMN'][str(tok)]
elif (tok.ptype == TABLE):
if (str(tok) not in self.idMap['TABLE']):
tabname = ('tab' + str(self.idCount['TABLE']))
self.idMap['TABLE'][str(tok)] = tabname
self.idMapInv[tabname] = str(tok)
self.idCount['TABLE'] += 1
tok.value = self.idMap['TABLE'][str(tok)]
elif (tok.ptype == FLOAT):
tok.value = 'CODE_FLOAT'
elif (tok.ptype == INTEGER):
tok.value = 'CODE_INTEGER'
elif (tok.ptype == HEX):
tok.value = 'CODE_HEX'
def __hash__(self):
return hash(tuple([str(x) for x in self.tokensWithBlanks]))
def __init__(self, sql, regex=False, rename=True):
self.sql = SqlTemplate.sanitizeSql(sql)
self.idMap = {'COLUMN': {}, 'TABLE': {}}
self.idMapInv = {}
self.idCount = {'COLUMN': 0, 'TABLE': 0}
self.regex = regex
self.parseTreeSentinel = False
self.tableStack = []
self.parse = sqlparse.parse(self.sql)
self.parse = [self.parse[0]]
self.removeWhitespaces(self.parse[0])
self.identifyLiterals(self.parse[0])
self.parse[0].ptype = SUBQUERY
self.identifySubQueries(self.parse[0])
self.identifyFunctions(self.parse[0])
self.identifyTables(self.parse[0])
self.parseStrings(self.parse[0])
if rename:
self.renameIdentifiers(self.parse[0])
self.tokens = SqlTemplate.getTokens(self.parse)
def getTokens(parse):
flatParse = []
for expr in parse:
for token in expr.flatten():
if (token.ptype == STRING):
flatParse.extend(str(token).split(' '))
else:
flatParse.append(str(token))
return flatParse
def removeWhitespaces(self, tok):
if isinstance(tok, sqlparse.sql.TokenList):
tmpChildren = []
for c in tok.tokens:
if (not c.is_whitespace()):
tmpChildren.append(c)
tok.tokens = tmpChildren
for c in tok.tokens:
self.removeWhitespaces(c)
def identifySubQueries(self, tokenList):
isSubQuery = False
for tok in tokenList.tokens:
if isinstance(tok, sqlparse.sql.TokenList):
subQuery = self.identifySubQueries(tok)
if (subQuery and isinstance(tok, sqlparse.sql.Parenthesis)):
tok.ptype = SUBQUERY
elif (str(tok) == 'select'):
isSubQuery = True
return isSubQuery
def identifyLiterals(self, tokenList):
blankTokens = [sqlparse.tokens.Name, sqlparse.tokens.Name.Placeholder]
blankTokenTypes = [sqlparse.sql.Identifier]
for tok in tokenList.tokens:
if isinstance(tok, sqlparse.sql.TokenList):
tok.ptype = INTERNAL
self.identifyLiterals(tok)
elif ((tok.ttype == sqlparse.tokens.Keyword) or (str(tok) == 'select')):
tok.ptype = KEYWORD
elif ((tok.ttype == sqlparse.tokens.Number.Integer) or (tok.ttype == sqlparse.tokens.Literal.Number.Integer)):
tok.ptype = INTEGER
elif ((tok.ttype == sqlparse.tokens.Number.Hexadecimal) or (tok.ttype == sqlparse.tokens.Literal.Number.Hexadecimal)):
tok.ptype = HEX
elif ((tok.ttype == sqlparse.tokens.Number.Float) or (tok.ttype == sqlparse.tokens.Literal.Number.Float)):
tok.ptype = FLOAT
elif ((tok.ttype == sqlparse.tokens.String.Symbol) or (tok.ttype == sqlparse.tokens.String.Single) or (tok.ttype == sqlparse.tokens.Literal.String.Single) or (tok.ttype == sqlparse.tokens.Literal.String.Symbol)):
tok.ptype = STRING
elif (tok.ttype == sqlparse.tokens.Wildcard):
tok.ptype = WILDCARD
elif ((tok.ttype in blankTokens) or isinstance(tok, blankTokenTypes[0])):
tok.ptype = COLUMN
def identifyFunctions(self, tokenList):
for tok in tokenList.tokens:
if isinstance(tok, sqlparse.sql.Function):
self.parseTreeSentinel = True
elif isinstance(tok, sqlparse.sql.Parenthesis):
self.parseTreeSentinel = False
if self.parseTreeSentinel:
tok.ptype = FUNCTION
if isinstance(tok, sqlparse.sql.TokenList):
self.identifyFunctions(tok)
def identifyTables(self, tokenList):
if (tokenList.ptype == SUBQUERY):
self.tableStack.append(False)
for i in xrange(len(tokenList.tokens)):
prevtok = tokenList.tokens[(i - 1)]
tok = tokenList.tokens[i]
if ((str(tok) == '.') and (tok.ttype == sqlparse.tokens.Punctuation) and (prevtok.ptype == COLUMN)):
prevtok.ptype = TABLE
elif ((str(tok) == 'from') and (tok.ttype == sqlparse.tokens.Keyword)):
self.tableStack[(- 1)] = True
elif (((str(tok) == 'where') or (str(tok) == 'on') or (str(tok) == 'group') or (str(tok) == 'order') or (str(tok) == 'union')) and (tok.ttype == sqlparse.tokens.Keyword)):
self.tableStack[(- 1)] = False
if isinstance(tok, sqlparse.sql.TokenList):
self.identifyTables(tok)
elif (tok.ptype == COLUMN):
if self.tableStack[(- 1)]:
tok.ptype = TABLE
if (tokenList.ptype == SUBQUERY):
self.tableStack.pop()
def __str__(self):
return ' '.join([str(tok) for tok in self.tokens])
def parseSql(self):
return [str(tok) for tok in self.tokens] |
def make_data_loader(opt, *args):
if (opt.dataset == 'atomic'):
return atomic_data.GenerationDataLoader(opt, *args)
elif (opt.dataset == 'conceptnet'):
return conceptnet_data.GenerationDataLoader(opt, *args) |
def test_methods_and_attributes():
instance1 = m.ExampleMandA()
instance2 = m.ExampleMandA(32)
instance1.add1(instance2)
instance1.add2(instance2)
instance1.add3(instance2)
instance1.add4(instance2)
instance1.add5(instance2)
instance1.add6(32)
instance1.add7(32)
instance1.add8(32)
instance1.add9(32)
instance1.add10(32)
assert (str(instance1) == 'ExampleMandA[value=320]')
assert (str(instance2) == 'ExampleMandA[value=32]')
assert (str(instance1.self1()) == 'ExampleMandA[value=320]')
assert (str(instance1.self2()) == 'ExampleMandA[value=320]')
assert (str(instance1.self3()) == 'ExampleMandA[value=320]')
assert (str(instance1.self4()) == 'ExampleMandA[value=320]')
assert (str(instance1.self5()) == 'ExampleMandA[value=320]')
assert (instance1.internal1() == 320)
assert (instance1.internal2() == 320)
assert (instance1.internal3() == 320)
assert (instance1.internal4() == 320)
assert (instance1.internal5() == 320)
assert (instance1.overloaded() == '()')
assert (instance1.overloaded(0) == '(int)')
assert (instance1.overloaded(1, 1.0) == '(int, float)')
assert (instance1.overloaded(2.0, 2) == '(float, int)')
assert (instance1.overloaded(3, 3) == '(int, int)')
assert (instance1.overloaded(4.0, 4.0) == '(float, float)')
assert (instance1.overloaded_const((- 3)) == '(int) const')
assert (instance1.overloaded_const(5, 5.0) == '(int, float) const')
assert (instance1.overloaded_const(6.0, 6) == '(float, int) const')
assert (instance1.overloaded_const(7, 7) == '(int, int) const')
assert (instance1.overloaded_const(8.0, 8.0) == '(float, float) const')
assert (instance1.overloaded_float(1, 1) == '(float, float)')
assert (instance1.overloaded_float(1, 1.0) == '(float, float)')
assert (instance1.overloaded_float(1.0, 1) == '(float, float)')
assert (instance1.overloaded_float(1.0, 1.0) == '(float, float)')
assert (instance1.value == 320)
instance1.value = 100
assert (str(instance1) == 'ExampleMandA[value=100]')
cstats = ConstructorStats.get(m.ExampleMandA)
assert (cstats.alive() == 2)
del instance1, instance2
assert (cstats.alive() == 0)
assert (cstats.values() == ['32'])
assert (cstats.default_constructions == 1)
assert (cstats.copy_constructions == 3)
assert (cstats.move_constructions >= 1)
assert (cstats.copy_assignments == 0)
assert (cstats.move_assignments == 0) |
def test_load_svmlight_files():
data_path = _svmlight_local_test_file_path(datafile)
(X_train, y_train, X_test, y_test) = load_svmlight_files(([str(data_path)] * 2), dtype=np.float32)
assert_array_equal(X_train.toarray(), X_test.toarray())
assert_array_almost_equal(y_train, y_test)
assert (X_train.dtype == np.float32)
assert (X_test.dtype == np.float32)
(X1, y1, X2, y2, X3, y3) = load_svmlight_files(([str(data_path)] * 3), dtype=np.float64)
assert (X1.dtype == X2.dtype)
assert (X2.dtype == X3.dtype)
assert (X3.dtype == np.float64) |
def load_toxcast(featurizer='Weave', cross_validation=False, test=False, split='random', reload=True, K=5, mode='regression', predict_cold=False, cold_drug=False, cold_target=False, cold_drug_cluster=False, split_warm=False, filter_threshold=0, prot_seq_dict=None, currdir='./', oversampled=False, input_protein=True, remove_val_set_entries=False):
if cross_validation:
assert (not test)
data_dir = (currdir + 'full_toxcast/')
if input_protein:
if ((mode == 'regression') or (mode == 'reg-threshold')):
mode = 'regression'
file_name = 'restructured.csv'
elif (mode == 'classification'):
file_name = 'restructured_bin.csv'
dataset_file = os.path.join(data_dir, file_name)
df = pd.read_csv(dataset_file, header=0, index_col=False)
headers = list(df)
tasks = headers[:(- 3)]
else:
if ((mode == 'regression') or (mode == 'reg-threshold')):
mode = 'regression'
file_name = 'restructured_no_prot.csv'
elif (mode == 'classification'):
file_name = 'restructured_bin_no_prot.csv'
dataset_file = os.path.join(data_dir, file_name)
df = pd.read_csv(dataset_file, header=0, index_col=False)
headers = list(df)
tasks = headers[:(- 1)]
if reload:
delim = '_5/'
if remove_val_set_entries:
delim = ('_rmval' + delim)
if (not input_protein):
delim = ('_no_prot' + delim)
if (filter_threshold > 0):
delim = ('_filtered' + delim)
if predict_cold:
delim = ('_cold' + delim)
elif split_warm:
delim = ('_warm' + delim)
elif cold_drug:
delim = ('_cold_drug' + delim)
elif cold_target:
delim = ('_cold_target' + delim)
elif cold_drug_cluster:
delim = ('_cold_drug_cluster' + delim)
if oversampled:
delim = ('_oversp' + delim)
if cross_validation:
delim = ('_CV' + delim)
save_dir = os.path.join(data_dir, ((((featurizer + delim) + mode) + '/') + split))
(loaded, all_dataset, transformers) = dcCustom.utils.save.load_cv_dataset_from_disk(save_dir, K)
else:
save_dir = os.path.join(data_dir, ((((featurizer + delim) + mode) + '/') + split))
(loaded, all_dataset, transformers) = dcCustom.utils.save.load_dataset_from_disk(save_dir)
if loaded:
return (tasks, all_dataset, transformers)
if remove_val_set_entries:
if input_protein:
save_dir_val_set = os.path.join(data_dir, ((((featurizer + '/') + mode) + '/') + split))
else:
save_dir_val_set = os.path.join(data_dir, ((((featurizer + '_cold_drug_no_prot/') + mode) + '/') + split))
else:
save_dir_val_set = None
if (featurizer == 'Weave'):
featurizer = dcCustom.feat.WeaveFeaturizer()
elif (featurizer == 'ECFP'):
featurizer = dcCustom.feat.CircularFingerprint(size=1024)
elif (featurizer == 'GraphConv'):
featurizer = dcCustom.feat.ConvMolFeaturizer()
loader = dcCustom.data.CSVLoader(tasks=tasks, smiles_field='smiles', protein_field='proteinName', source_field='protein_dataset', featurizer=featurizer, prot_seq_dict=prot_seq_dict, input_protein=input_protein)
dataset = loader.featurize(dataset_file, shard_size=8192)
if (mode == 'regression'):
transformers = [dcCustom.trans.NormalizationTransformer(transform_y=True, dataset=dataset)]
elif (mode == 'classification'):
transformers = [dcCustom.trans.BalancingTransformer(transform_w=True, dataset=dataset)]
print('About to transform data')
for transformer in transformers:
dataset = transformer.transform(dataset)
splitters = {'index': deepchem.splits.IndexSplitter(), 'random': dcCustom.splits.RandomSplitter(split_cold=predict_cold, cold_drug=cold_drug, cold_target=cold_target, cold_drug_cluster=cold_drug_cluster, split_warm=split_warm, prot_seq_dict=prot_seq_dict, threshold=filter_threshold, oversampled=oversampled, input_protein=input_protein, remove_val_set_entries=remove_val_set_entries, save_dir_val_set=save_dir_val_set), 'scaffold': deepchem.splits.ScaffoldSplitter(), 'butina': deepchem.splits.ButinaSplitter(), 'task': deepchem.splits.TaskSplitter()}
splitter = splitters[split]
if test:
(train, valid, test) = splitter.train_valid_test_split(dataset)
all_dataset = (train, valid, test)
if reload:
dcCustom.utils.save.save_dataset_to_disk(save_dir, train, valid, test, transformers)
elif cross_validation:
fold_datasets = splitter.k_fold_split(dataset, K)
all_dataset = fold_datasets
if reload:
dcCustom.utils.save.save_cv_dataset_to_disk(save_dir, all_dataset, K, transformers)
else:
(train, valid, test) = splitter.train_valid_test_split(dataset, frac_train=0.9, frac_valid=0.1, frac_test=0)
all_dataset = (train, valid, test)
if reload:
dcCustom.utils.save.save_dataset_to_disk(save_dir, train, valid, test, transformers)
return (tasks, all_dataset, transformers) |
class Codegen():
def __init__(self, inputs: Values, outputs: Values, config: codegen_config.CodegenConfig, name: T.Optional[str]=None, return_key: T.Optional[str]=None, sparse_matrices: T.Sequence[str]=None, docstring: str=None) -> None:
if (sf.epsilon() == 0):
warning_message = '\n Generating code with epsilon set to 0 - This is dangerous! You may get NaNs, Infs,\n or numerically unstable results from calling generated functions near singularities.\n\n In order to safely generate code, you should set epsilon to either a symbol\n (recommended) or a small numerical value like `sf.numeric_epsilon`. You should do\n this before importing any other code from symforce, e.g. with\n\n import symforce\n symforce.set_epsilon_to_symbol()\n\n or\n\n import symforce\n symforce.set_epsilon_to_number()\n\n For more information on use of epsilon to prevent singularities, take a look at the\n Epsilon Tutorial: '
warning_message = textwrap.indent(textwrap.dedent(warning_message), ' ')
if (config.zero_epsilon_behavior == codegen_config.ZeroEpsilonBehavior.FAIL):
raise ValueError(warning_message)
elif (config.zero_epsilon_behavior == codegen_config.ZeroEpsilonBehavior.WARN):
logger.warning(warning_message)
elif (config.zero_epsilon_behavior == codegen_config.ZeroEpsilonBehavior.ALLOW):
pass
else:
raise ValueError(f'Invalid config.zero_epsilon_behavior: {config.zero_epsilon_behavior}')
self.name = name
assert isinstance(inputs, Values)
assert isinstance(outputs, Values)
inputs = inputs.dataclasses_to_values()
outputs = outputs.dataclasses_to_values()
self.inputs = inputs
self.outputs = outputs
input_symbols_list = codegen_util.flat_symbols_from_values(inputs)
input_symbols = set(input_symbols_list)
if (not self.output_symbols.issubset(input_symbols)):
missing_outputs = (self.output_symbols - input_symbols)
error_msg = textwrap.dedent(f'''
A symbol in the output expression is missing from inputs
Inputs:
{input_symbols}
Missing symbols:
{(self.output_symbols - input_symbols)}
''')
if (sf.epsilon() in missing_outputs):
error_msg += textwrap.dedent(f'''
One of the missing symbols is `{sf.epsilon()}`, which is the default epsilon -
this typically means you called a function that requires an epsilon without
passing a value. You need to either pass 0 for epsilon if you'd like to use 0,
pass through the symbol you're using for epsilon if it's not `{sf.epsilon()}`,
or add `{sf.epsilon()}` as an input to your generated function. You would do
this either by adding an argument `{sf.epsilon()}: sf.Scalar` if using a
symbolic function, or setting `inputs["{sf.epsilon()}"] = sf.Symbol("{sf.epsilon()}")`
if using `inputs` and `outputs` `Values`.
If you aren't sure where you may have forgotten to pass an epsilon, setting
epsilon to invalid may be helpful. You should do this before importing any other
code from symforce, e.g. with
import symforce
symforce.set_epsilon_to_invalid()
''')
raise ValueError(error_msg)
assert all((k.isidentifier() for k in inputs.keys()))
assert all((k.isidentifier() for k in outputs.keys()))
assert (len(input_symbols) == len(input_symbols_list)), 'Symbols in inputs must be unique. Duplicate symbols = {}'.format([symbol for symbol in input_symbols_list if (input_symbols_list.count(symbol) > 1)])
assert all(((key not in list(outputs.keys())) for key in inputs.keys()))
self.config = config
if (return_key is not None):
assert (return_key in outputs)
self.return_key = return_key
self.sparse_mat_data: T.Dict[(str, codegen_util.CSCFormat)] = {}
if (sparse_matrices is not None):
assert all(((key in outputs) for key in sparse_matrices))
assert all((isinstance(outputs[key], sf.Matrix) for key in sparse_matrices))
for key in sparse_matrices:
self.sparse_mat_data[key] = codegen_util.CSCFormat.from_matrix(outputs[key])
self.docstring = (docstring or Codegen.default_docstring(inputs=inputs, outputs=outputs)).rstrip()
self.types_included: T.Optional[T.Set[str]] = None
self.typenames_dict: T.Optional[T.Dict[(str, str)]] = None
self.namespaces_dict: T.Optional[T.Dict[(str, str)]] = None
self.unique_namespaces: T.Optional[T.Set[str]] = None
self.namespace: T.Optional[str] = None
_property
def output_symbols(self) -> T.Set[sf.Symbol]:
return sf.S(sf.Matrix(codegen_util.flat_symbols_from_values(self.outputs)).mat).free_symbols
def function(cls, func: T.Callable, config: codegen_config.CodegenConfig, name: T.Optional[str]=None, input_types: T.Sequence[T.ElementOrType]=None, output_names: T.Sequence[str]=None, return_key: str=None, sparse_matrices: T.Sequence[str]=None, docstring: str=None) -> Codegen:
if (name is None):
inner_func = python_util.get_func_from_maybe_bound_function(func)
assert (inner_func.__name__ != '<lambda>'), "Can't deduce name automatically for a lambda"
name = inner_func.__name__
inputs = symbolic_inputs(func, input_types)
res = func(*inputs.values())
inputs = inputs.dataclasses_to_values()
if isinstance(res, tuple):
output_terms = res
if (output_names is None):
output_names = [f'res{i}' for i in range(len(res))]
if (return_key is not None):
assert (return_key in output_names), 'Return key not found in named outputs'
else:
output_terms = (res,)
if (output_names is None):
output_names = ['res']
return_key = output_names[0]
assert (len(output_terms) == len(output_names))
outputs = Values()
for (output_name, output) in zip(output_names, output_terms):
if isinstance(output, (list, tuple)):
output = sf.Matrix(output)
outputs[output_name] = output
if (docstring is None):
inner_func = python_util.get_func_from_maybe_bound_function(func)
if inner_func.__doc__:
docstring = inner_func.__doc__
else:
docstring = Codegen.default_docstring(inputs=inputs, outputs=outputs, original_function=inner_func)
return cls(name=name, inputs=inputs, outputs=outputs, config=config, return_key=return_key, sparse_matrices=sparse_matrices, docstring=textwrap.dedent(docstring))
def common_data() -> T.Dict[(str, T.Any)]:
data: T.Dict[(str, T.Any)] = {}
data['ops'] = ops
data['Symbol'] = sf.Symbol
data['Matrix'] = sf.Matrix
data['DataBuffer'] = sf.DataBuffer
data['Values'] = Values
data['pathlib'] = pathlib
data['path_to_codegen'] = str(CURRENT_DIR)
data['scalar_types'] = ('double', 'float')
data['camelcase_to_snakecase'] = python_util.camelcase_to_snakecase
data['python_util'] = python_util
data['typing_util'] = typing_util
data['lcm_type_t_include_dir'] = '<lcmtypes/sym/type_t.hpp>'
data['sf'] = sf
def is_symbolic(T: T.Any) -> bool:
return isinstance(T, (sf.Expr, sf.Symbol))
data['is_symbolic'] = is_symbolic
data['issubclass'] = issubclass
data['is_sequence'] = (lambda arg: isinstance(arg, (list, tuple)))
def should_set_zero(mat: sf.Matrix, zero_initialization_sparsity_threshold: float) -> bool:
nnz = 0
for i in range(mat.shape[0]):
for j in range(mat.shape[1]):
if (mat[(i, j)] != 0):
nnz += 1
return ((nnz / (mat.shape[0] * mat.shape[1])) < zero_initialization_sparsity_threshold)
data['should_set_zero'] = should_set_zero
def raise_helper(msg: str) -> None:
raise CodeGenerationException(msg)
data['raise'] = raise_helper
return data
_property
def print_code_results(self) -> codegen_util.PrintCodeResult:
try:
return codegen_util.print_code(inputs=self.inputs, outputs=self.outputs, sparse_mat_data=self.sparse_mat_data, config=self.config)
except (TypeError, LookupError, AttributeError) as ex:
raise CodeGenerationException('Exception printing code results, see above') from ex
_property
def unused_arguments(self) -> T.List[str]:
results = []
for (input_name, input_value) in self.inputs.items():
if isinstance(input_value, sf.DataBuffer):
input_symbols = {input_value}
else:
input_symbols = set(ops.StorageOps.to_storage(input_value))
if (not input_symbols.intersection(self.output_symbols)):
results.append(input_name)
return results
def total_ops(self) -> int:
return self.print_code_results.total_ops
def generate_function(self, output_dir: T.Openable=None, lcm_bindings_output_dir: T.Openable=None, shared_types: T.Mapping[(str, str)]=None, namespace: str='sym', generated_file_name: str=None, skip_directory_nesting: bool=False) -> GeneratedPaths:
assert (self.name is not None), 'Name should be set either at construction or by with_jacobians'
if (not self.name.isidentifier()):
raise InvalidNameError(f'Invalid function name "{self.name}". `name` must be a valid identifier.')
if (not namespace.isidentifier()):
raise InvalidNamespaceError(f'Invalid namespace "{namespace}". `namespace` must be a valid identifier (nested namespaces are not supported)')
if (output_dir is None):
output_dir = Path(tempfile.mkdtemp(prefix=f'sf_codegen_{self.name}_', dir='/tmp'))
logger.debug(f'Creating temp directory: {output_dir}')
elif isinstance(output_dir, str):
output_dir = Path(output_dir)
assert isinstance(output_dir, Path)
if (lcm_bindings_output_dir is None):
lcm_bindings_output_dir = output_dir
elif isinstance(lcm_bindings_output_dir, str):
lcm_bindings_output_dir = Path(lcm_bindings_output_dir)
assert isinstance(lcm_bindings_output_dir, Path)
if (generated_file_name is None):
generated_file_name = self.name
templates = template_util.TemplateList()
types_to_generate = []
self.types_included = set()
for d in (self.inputs, self.outputs):
for (key, value) in d.items():
base_value = codegen_util.get_base_instance(value)
if isinstance(base_value, Values):
types_to_generate.append((key, base_value))
else:
self.types_included.add(type(base_value).__name__)
values_indices = {name: gen_type.index() for (name, gen_type) in types_to_generate}
types_codegen_data = types_package_codegen.generate_types(package_name=namespace, file_name=generated_file_name, values_indices=values_indices, use_eigen_types=self.config.use_eigen_types, shared_types=shared_types, output_dir=os.fspath(output_dir), lcm_bindings_output_dir=os.fspath(lcm_bindings_output_dir), templates=templates)
self.typenames_dict = types_codegen_data.typenames_dict
self.namespaces_dict = types_codegen_data.namespaces_dict
assert (self.namespaces_dict is not None)
self.unique_namespaces = set(self.namespaces_dict.values())
self.namespace = namespace
template_data = dict(self.common_data(), spec=self)
self.config.update_template_data(data=template_data)
template_dir = self.config.template_dir()
backend_name = self.config.backend_name()
if skip_directory_nesting:
out_function_dir = output_dir
else:
out_function_dir = (((output_dir / backend_name) / 'symforce') / namespace)
logger.debug(f'Creating {backend_name} function from "{self.name}" at "{out_function_dir}"')
for (source, dest) in self.config.templates_to_render(generated_file_name):
templates.add(template_path=source, data=template_data, config=self.config.render_template_config, template_dir=template_dir, output_path=(out_function_dir / dest))
templates.render()
lcm_data = codegen_util.generate_lcm_types(lcm_type_dir=types_codegen_data.lcm_type_dir, lcm_files=types_codegen_data.lcm_files, lcm_output_dir=types_codegen_data.lcm_bindings_output_dir)
return GeneratedPaths(output_dir=output_dir, lcm_type_dir=types_codegen_data.lcm_type_dir, function_dir=out_function_dir, python_types_dir=lcm_data.python_types_dir, cpp_types_dir=lcm_data.cpp_types_dir, generated_files=[Path(v.output_path) for v in templates.items])
def default_docstring(inputs: Values, outputs: Values, original_function: T.Callable=None) -> str:
input_names = [name for (name, arg) in inputs.items() if (name != 'self')]
def nice_typename(arg: T.Any) -> str:
if typing_util.scalar_like(arg):
return 'Scalar'
else:
return typing_util.get_type(arg).__name__
input_types = [nice_typename(arg) for (name, arg) in inputs.items() if (name != 'self')]
output_types = [nice_typename(arg) for arg in outputs.values()]
if (original_function is not None):
docstring = f'''
This function was autogenerated from a symbolic function. Do not modify by hand.
Symbolic function: {original_function.__name__}
Args:
'''
else:
docstring = '\n This function was autogenerated. Do not modify by hand.\n\n Args:\n '
arg_descriptions = ''.join([f''' {name}: {input_type}
''' for (name, input_type) in zip(input_names, input_types)])
output_descriptions = ''.join([f''' {name}: {output_type}
''' for (name, output_type) in zip(outputs.keys(), output_types)])
return (((textwrap.dedent(docstring) + arg_descriptions) + '\nOutputs:\n') + output_descriptions)
def wrap_docstring_arg_description(preamble: str, description: str, config: codegen_config.CodegenConfig) -> T.List[str]:
return textwrap.wrap(description, width=(config.line_length - len(config.doc_comment_line_prefix)), initial_indent=preamble, subsequent_indent=(' ' * len(preamble)))
def _pick_name_for_function_with_derivatives(self, which_args: T.Sequence[str], include_results: bool, linearization_mode: T.Optional[LinearizationMode]) -> str:
assert (self.name is not None), 'Codegen name must have been provided already to automatically generate a name with derivatives'
name = self.name
if (linearization_mode == LinearizationMode.FULL_LINEARIZATION):
if name.endswith('_residual'):
name = name[:(- len('_residual'))]
if (not name.endswith('_factor')):
name += '_factor'
else:
if include_results:
name += '_with'
jacobians = python_util.plural('_jacobian', len(which_args))
if (len(which_args) == len(self.inputs)):
name += jacobians
else:
inputs_keys = list(self.inputs.keys())
name += (jacobians + ''.join((str(inputs_keys.index(s)) for s in which_args)))
return name
def with_linearization(self, which_args: T.Sequence[str]=None, include_result: bool=True, name: str=None, linearization_mode: LinearizationMode=LinearizationMode.FULL_LINEARIZATION, sparse_linearization: bool=False, custom_jacobian: sf.Matrix=None) -> Codegen:
if (which_args is None):
which_args = list(self.inputs.keys())
assert which_args, 'Cannot compute a linearization with respect to 0 arguments'
assert (len(list(self.outputs.keys())) == 1)
(result_name, result) = list(self.outputs.items())[0]
docstring_lines = self.docstring.rstrip().split('\n')
outputs = Values()
if include_result:
outputs[result_name] = result
else:
docstring_lines = docstring_lines[:(- 1)]
input_args = [self.inputs[arg] for arg in which_args]
if (custom_jacobian is not None):
jacobian = custom_jacobian
else:
jacobian = sf.Matrix.block_matrix([jacobian_helpers.tangent_jacobians(result, input_args)])
docstring_args = [f'{arg_name} ({ops.LieGroupOps.tangent_dim(arg)})' for (arg_name, arg) in zip(which_args, input_args)]
formatted_arg_list = '{} {}'.format(python_util.plural('arg', len(docstring_args)), ', '.join(docstring_args))
docstring_lines.extend(self.wrap_docstring_arg_description(' jacobian: ', f'({jacobian.shape[0]}x{jacobian.shape[1]}) jacobian of {result_name} wrt {formatted_arg_list}', self.config))
outputs['jacobian'] = jacobian
if (linearization_mode == LinearizationMode.FULL_LINEARIZATION):
result_is_vector = (isinstance(result, sf.Matrix) and (result.cols == 1))
if (not result_is_vector):
common_msg = f'The output of a factor must be a column vector representing the residual (of shape Nx1). For factor "{self.name}", '
if typing_util.scalar_like(result):
raise ValueError((common_msg + 'got a scalar expression instead. Did you mean to wrap it in `sf.V1(expr)`?'))
if isinstance(result, sf.Matrix):
raise ValueError((common_msg + f'got a matrix of shape {result.shape} instead'))
raise ValueError((common_msg + f'got an object of type {type(result)} instead'))
hessian = jacobian.compute_AtA(lower_only=True)
outputs['hessian'] = hessian
docstring_lines.extend(self.wrap_docstring_arg_description(' hessian: ', f'({hessian.shape[0]}x{hessian.shape[1]}) Gauss-Newton hessian for {formatted_arg_list}', self.config))
rhs = (jacobian.T * result)
outputs['rhs'] = rhs
docstring_lines.extend(self.wrap_docstring_arg_description(' rhs: ', f'({rhs.shape[0]}x{rhs.shape[1]}) Gauss-Newton rhs for {formatted_arg_list}', self.config))
return_key = (list(outputs.keys())[0] if (len(list(outputs.keys())) == 1) else None)
if (not name):
name = self._pick_name_for_function_with_derivatives(which_args, include_result, linearization_mode)
sparse_matrices = ([key for key in ('jacobian', 'hessian') if (key in outputs)] if sparse_linearization else None)
return Codegen(name=name, inputs=self.inputs, outputs=outputs, config=self.config, return_key=return_key, sparse_matrices=sparse_matrices, docstring='\n'.join(docstring_lines))
def with_jacobians(self, which_args: T.Sequence[str]=None, which_results: T.Sequence[int]=(0,), include_results: bool=True, name: str=None, sparse_jacobians: bool=False) -> Codegen:
if (which_args is None):
which_args = list(self.inputs.keys())
assert which_args, 'Cannot compute a linearization with respect to 0 arguments'
assert (list(sorted(which_results)) == list(which_results)), 'which_results must be sorted'
docstring_lines = self.docstring.rstrip().split('\n')
if include_results:
outputs = copy.deepcopy(self.outputs)
else:
outputs = Values()
self_outputs_keys = list(self.outputs.keys())
for i in range(len(self.outputs)):
if (i not in which_results):
outputs[self_outputs_keys[i]] = self.outputs[self_outputs_keys[i]]
for i in which_results:
index_from_back = ((- len(self.outputs)) + i)
del docstring_lines[index_from_back]
input_args = [self.inputs[arg] for arg in which_args]
all_outputs = list(self.outputs.items())
all_jacobian_names = []
for i in which_results:
(result_name, result) = all_outputs[i]
arg_jacobians = jacobian_helpers.tangent_jacobians(result, input_args)
for (arg_name, arg, arg_jacobian) in zip(which_args, input_args, arg_jacobians):
jacobian_name = f'{result_name}_D_{arg_name}'
outputs[jacobian_name] = arg_jacobian
all_jacobian_names.append(jacobian_name)
result_dim = ops.LieGroupOps.tangent_dim(result)
arg_dim = ops.LieGroupOps.tangent_dim(arg)
docstring_lines.append((f' {jacobian_name}: ({result_dim}x{arg_dim}) jacobian of ' + f'{result_name} ({result_dim}) wrt arg {arg_name} ({arg_dim})'))
if (len(outputs) == 1):
return_key: T.Optional[str] = list(outputs.keys())[0]
elif ((self.return_key is not None) and (self.return_key in outputs)):
return_key = self.return_key
else:
return_key = None
if (not name):
name = self._pick_name_for_function_with_derivatives(which_args, include_results, linearization_mode=None)
sparse_matrices = (all_jacobian_names if sparse_jacobians else None)
return Codegen(name=name, inputs=self.inputs, outputs=outputs, config=self.config, return_key=return_key, sparse_matrices=sparse_matrices, docstring='\n'.join(docstring_lines))
def lambdify(self) -> T.Callable:
if (not isinstance(self.config, PythonConfig)):
raise TypeError('Lambdify is only supported for Python codegen objects.')
name_was_none = False
if (self.name is None):
self.name = 'lambda'
name_was_none = True
data = self.generate_function(namespace='lambda')
generated_function = codegen_util.load_generated_function(self.name, data.function_dir, evict=(not self.config.use_numba))
if name_was_none:
self.name = None
return generated_function |
def load_url(url, model_dir=None, map_location=None, progress=True):
if (model_dir is None):
torch_home = os.path.expanduser(os.getenv('TORCH_HOME', '~/.torch'))
model_dir = os.getenv('TORCH_MODEL_ZOO', os.path.join(torch_home, 'models'))
if (not os.path.exists(model_dir)):
os.makedirs(model_dir)
parts = urlparse(url)
filename = os.path.basename(parts.path)
cached_file = os.path.join(model_dir, filename)
if (not os.path.exists(cached_file)):
sys.stderr.write('Downloading: "{}" to {}\n'.format(url, cached_file))
hash_prefix = HASH_REGEX.search(filename).group(1)
_download_url_to_file(url, cached_file, hash_prefix, progress=progress)
return torch.load(cached_file, map_location=map_location) |
class CPPTestItem(pytest.Item):
def __init__(self, *, binary, test, script=None, args=None, **kwargs):
super().__init__(**kwargs)
self.binary = binary
self.test = test
self.script = script
self.args = args
def runtest(self):
import taichi as ti
ti_lib_dir = ((Path(ti.__path__[0]) / '_lib') / 'runtime')
with tempfile.TemporaryDirectory(prefix='ti-cpp-tests-') as tmpdir:
try:
env = os.environ.copy()
env.update({'TI_DEVICE_MEMORY_GB': '0.5', 'TI_LIB_DIR': str(ti_lib_dir), 'TAICHI_AOT_FOLDER_PATH': tmpdir})
if self.script:
retcode = subprocess.call(f'{sys.executable} {self.script} {self.args}', shell=True, cwd=str(BASE), env=env)
(retcode and pytest.fail(f'{self.script} {self.args} reported failure, exit code {retcode}'))
retcode = subprocess.call(f'{self.binary} --gtest_filter={self.test}', shell=True, cwd=str(BASE), env=env)
(retcode and pytest.fail(f'C++ part reported failure, exit code {retcode}'))
except Exception:
excinfo = sys.exc_info()
raise CPPTestException(excinfo)
def repr_failure(self, excinfo):
if isinstance(excinfo.value, CPPTestException):
return super().repr_failure(ExceptionInfo(excinfo.value.excinfo))
return super().repr_failure(excinfo)
def reportinfo(self):
return (self.path, 0, self.name) |
class SawyerPegUnplugSideV2Policy(Policy):
_fully_parsed
def _parse_obs(obs):
return {'hand_pos': obs[:3], 'peg_pos': obs[3:6], 'unused_info': obs[6:]}
def get_action(self, obs):
o_d = self._parse_obs(obs)
action = Action({'delta_pos': np.arange(3), 'grab_effort': 3})
action['delta_pos'] = move(o_d['hand_pos'], to_xyz=self._desired_pos(o_d), p=25.0)
action['grab_effort'] = self._grab_effort(o_d)
return action.array
def _desired_pos(o_d):
pos_curr = o_d['hand_pos']
pos_peg = (o_d['peg_pos'] + np.array([(- 0.02), 0.0, 0.035]))
if (np.linalg.norm((pos_curr[:2] - pos_peg[:2])) > 0.04):
return (pos_peg + np.array([0.0, 0.0, 0.2]))
elif (abs((pos_curr[2] - 0.15)) > 0.02):
return np.array([*pos_peg[:2], 0.15])
else:
return (pos_curr + np.array([0.01, 0.0, 0.0]))
def _grab_effort(o_d):
pos_curr = o_d['hand_pos']
pos_peg = (o_d['peg_pos'] + np.array([(- 0.02), 0.0, 0.035]))
if ((np.linalg.norm((pos_curr[:2] - pos_peg[:2])) > 0.04) or (abs((pos_curr[2] - pos_peg[2])) > 0.15)):
return (- 1.0)
else:
return 0.1 |
def main():
parser = argparse.ArgumentParser(description='FixMatch Training')
parser.add_argument('--root', default='./data', type=str, help='dataset directory')
parser.add_argument('--wresnet-k', default=2, type=int, help='width factor of wide resnet')
parser.add_argument('--wresnet-n', default=28, type=int, help='depth of wide resnet')
parser.add_argument('--dataset', type=str, default='CIFAR10', help='number of classes in dataset')
parser.add_argument('--n-classes', type=int, default=10, help='number of classes in dataset')
parser.add_argument('--n-labeled', type=int, default=40, help='number of labeled samples for training')
parser.add_argument('--n-epoches', type=int, default=1024, help='number of training epoches')
parser.add_argument('--batchsize', type=int, default=64, help='train batch size of labeled samples')
parser.add_argument('--mu', type=int, default=7, help='factor of train batch size of unlabeled samples')
parser.add_argument('--eval-ema', default=True, help='whether to use ema model for evaluation')
parser.add_argument('--ema-m', type=float, default=0.999)
parser.add_argument('--n-imgs-per-epoch', type=int, default=(64 * 1024), help='number of training images for each epoch')
parser.add_argument('--lam-u', type=float, default=1.0, help='coefficient of unlabeled loss')
parser.add_argument('--lr', type=float, default=0.03, help='learning rate for training')
parser.add_argument('--weight-decay', type=float, default=0.0005, help='weight decay')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum for optimizer')
parser.add_argument('--seed', type=int, default=1, help='seed for random behaviors, no seed if negtive')
parser.add_argument('--DA', default=True, help='use distribution alignment')
parser.add_argument('--thr', type=float, default=0.95, help='pseudo label threshold')
parser.add_argument('--exp-dir', default='FixMatch', type=str, help='experiment directory')
parser.add_argument('--checkpoint', default='', type=str, help='use pretrained model')
args = parser.parse_args()
(logger, output_dir) = setup_default_logging(args)
logger.info(dict(args._get_kwargs()))
tb_logger = tensorboard_logger.Logger(logdir=output_dir, flush_secs=2)
if (args.seed > 0):
torch.manual_seed(args.seed)
random.seed(args.seed)
np.random.seed(args.seed)
n_iters_per_epoch = (args.n_imgs_per_epoch // args.batchsize)
n_iters_all = (n_iters_per_epoch * args.n_epoches)
logger.info('***** Running training *****')
logger.info(f' Task = {args.dataset}{args.n_labeled}')
(model, criteria_x, criteria_u, ema_model) = set_model(args)
logger.info('Total params: {:.2f}M'.format((sum((p.numel() for p in model.parameters())) / 1000000.0)))
(dltrain_x, dltrain_u) = get_train_loader(args.dataset, args.batchsize, args.mu, n_iters_per_epoch, L=args.n_labeled, root=args.root, method='fixmatch')
dlval = get_val_loader(dataset=args.dataset, batch_size=64, num_workers=2)
(wd_params, non_wd_params) = ([], [])
for (name, param) in model.named_parameters():
if ('bn' in name):
non_wd_params.append(param)
else:
wd_params.append(param)
param_list = [{'params': wd_params}, {'params': non_wd_params, 'weight_decay': 0}]
optim = torch.optim.SGD(param_list, lr=args.lr, weight_decay=args.weight_decay, momentum=args.momentum, nesterov=True)
lr_schdlr = WarmupCosineLrScheduler(optim, n_iters_all, warmup_iter=0)
prob_list = []
train_args = dict(model=model, ema_model=ema_model, criteria_x=criteria_x, criteria_u=criteria_u, optim=optim, lr_schdlr=lr_schdlr, dltrain_x=dltrain_x, dltrain_u=dltrain_u, args=args, n_iters=n_iters_per_epoch, logger=logger, prob_list=prob_list)
best_acc = (- 1)
best_epoch = 0
logger.info('start training')
for epoch in range(args.n_epoches):
(loss_x, loss_u, mask_mean, guess_label_acc, prob_list) = train_one_epoch(epoch, **train_args)
(top1, ema_top1) = evaluate(model, ema_model, dlval, criteria_x)
tb_logger.log_value('loss_x', loss_x, epoch)
tb_logger.log_value('loss_u', loss_u, epoch)
tb_logger.log_value('guess_label_acc', guess_label_acc, epoch)
tb_logger.log_value('test_acc', top1, epoch)
tb_logger.log_value('test_ema_acc', ema_top1, epoch)
tb_logger.log_value('mask', mask_mean, epoch)
if (best_acc < top1):
best_acc = top1
best_epoch = epoch
logger.info('Epoch {}. Acc: {:.4f}. Ema-Acc: {:.4f}. best_acc: {:.4f} in epoch{}'.format(epoch, top1, ema_top1, best_acc, best_epoch)) |
def main_worker(gpu, ngpus_per_node, args):
args.gpu = gpu
if (args.multiprocessing_distributed and (args.gpu != 0)):
def print_pass(*args):
pass
builtins.print = print_pass
if (args.gpu is not None):
print('Use GPU: {} for training'.format(args.gpu))
if args.distributed:
if args.multiprocessing_distributed:
args.rank = ((args.rank * ngpus_per_node) + gpu)
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank)
audio_preprocessr = TFRep(sample_rate=args.sr, f_min=0, f_max=int((args.sr / 2)), n_fft=args.n_fft, win_length=args.win_length, hop_length=int((0.01 * args.sr)), n_mels=args.mel_dim)
frontend = ResFrontEnd(input_size=(args.mel_dim, (int((100 * args.duration)) + 1)), conv_ndim=128, attention_ndim=args.attention_ndim, mix_type=args.mix_type)
audio_encoder = MusicTransformer(audio_representation=audio_preprocessr, frontend=frontend, audio_rep=args.audio_rep, attention_nlayers=args.attention_nlayers, attention_ndim=args.attention_ndim)
if (args.text_type == 'bert'):
text_encoder = AutoModel.from_pretrained(args.text_backbone)
tokenizer = AutoTokenizer.from_pretrained(args.text_backbone)
args.text_dim = 768
elif (args.text_type == 'glove'):
text_encoder = nn.Identity()
tokenizer = torch.load(os.path.join(args.data_dir, 'ecals_annotation', 'glove_tag_embs.pt'))
args.text_dim = 300
args.audio_dim = args.attention_ndim
model = TripletModel(audio_encoder=audio_encoder, text_encoder=text_encoder, text_type=args.text_type, audio_dim=args.audio_dim, text_dim=args.text_dim, mlp_dim=args.mlp_dim, margin=args.margin)
train_dataset = ECALS_Dataset(data_path=args.data_dir, split='TRAIN', sr=args.sr, duration=args.duration, num_chunks=args.num_chunks, text_preprocessor=tokenizer, text_type=args.text_type, text_rep=args.text_rep)
val_dataset = ECALS_Dataset(data_path=args.data_dir, split='VALID', sr=args.sr, duration=args.duration, num_chunks=args.num_chunks, text_preprocessor=tokenizer, text_type=args.text_type, text_rep=args.text_rep)
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset)
else:
train_sampler = None
val_sampler = None
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None), collate_fn=train_dataset.batch_processor, num_workers=args.workers, pin_memory=True, sampler=train_sampler, drop_last=True)
val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=args.batch_size, shuffle=(val_sampler is None), collate_fn=train_dataset.batch_processor, num_workers=args.workers, pin_memory=True, sampler=val_sampler, drop_last=True)
if args.distributed:
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu], find_unused_parameters=True)
else:
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
optimizer = torch.optim.Adam(model.parameters(), args.lr)
earlystopping_callback = EarlyStopping()
cudnn.benchmark = True
save_dir = f'exp/{args.arch}_{args.frontend}_{args.mix_type}_{args.audio_rep}/{args.text_type}_{args.text_rep}/'
logger = Logger(save_dir)
save_hparams(args, save_dir)
best_val_loss = np.inf
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
val_sampler.set_epoch(epoch)
train(train_loader, model, optimizer, epoch, logger, args)
val_loss = validate(val_loader, model, epoch, args)
logger.log_val_loss(val_loss, epoch)
if (val_loss < best_val_loss):
torch.save({'epoch': epoch, 'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict()}, f'{save_dir}/best.pth')
best_val_loss = val_loss
earlystopping_callback(val_loss, best_val_loss)
if earlystopping_callback.early_stop:
print('We are at epoch:', epoch)
break |
class Inception3(nn.Module):
def __init__(self, num_classes=1000, aux_logits=True, transform_input=False):
super(Inception3, self).__init__()
self.aux_logits = aux_logits
self.transform_input = transform_input
self.Conv2d_1a_3x3 = BasicConv2d(3, 32, kernel_size=3, stride=2)
self.Conv2d_2a_3x3 = BasicConv2d(32, 32, kernel_size=3)
self.Conv2d_2b_3x3 = BasicConv2d(32, 64, kernel_size=3, padding=1)
self.Conv2d_3b_1x1 = BasicConv2d(64, 80, kernel_size=1)
self.Conv2d_4a_3x3 = BasicConv2d(80, 192, kernel_size=3)
self.Mixed_5b = InceptionA(192, pool_features=32)
self.Mixed_5c = InceptionA(256, pool_features=64)
self.Mixed_5d = InceptionA(288, pool_features=64)
self.Mixed_6a = InceptionB(288)
self.Mixed_6b = InceptionC(768, channels_7x7=128)
self.Mixed_6c = InceptionC(768, channels_7x7=160)
self.Mixed_6d = InceptionC(768, channels_7x7=160)
self.Mixed_6e = InceptionC(768, channels_7x7=192)
if aux_logits:
self.AuxLogits = InceptionAux(768, num_classes)
self.Mixed_7a = InceptionD(768)
self.Mixed_7b = InceptionE(1280)
self.Mixed_7c = InceptionE(2048)
self.fc = nn.Linear(2048, num_classes)
for m in self.modules():
if (isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear)):
import scipy.stats as stats
stddev = (m.stddev if hasattr(m, 'stddev') else 0.1)
X = stats.truncnorm((- 2), 2, scale=stddev)
values = torch.Tensor(X.rvs(m.weight.data.numel()))
m.weight.data.copy_(values)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, x):
if self.transform_input:
x = x.clone()
x[0] = ((x[0] * (0.229 / 0.5)) + ((0.485 - 0.5) / 0.5))
x[1] = ((x[1] * (0.224 / 0.5)) + ((0.456 - 0.5) / 0.5))
x[2] = ((x[2] * (0.225 / 0.5)) + ((0.406 - 0.5) / 0.5))
x = self.Conv2d_1a_3x3(x)
x = self.Conv2d_2a_3x3(x)
x = self.Conv2d_2b_3x3(x)
x = F.max_pool2d(x, kernel_size=3, stride=2)
x = self.Conv2d_3b_1x1(x)
x = self.Conv2d_4a_3x3(x)
x = F.max_pool2d(x, kernel_size=3, stride=2)
x = self.Mixed_5b(x)
x = self.Mixed_5c(x)
x = self.Mixed_5d(x)
x = self.Mixed_6a(x)
x = self.Mixed_6b(x)
x = self.Mixed_6c(x)
x = self.Mixed_6d(x)
x = self.Mixed_6e(x)
if (self.training and self.aux_logits):
aux = self.AuxLogits(x)
x = self.Mixed_7a(x)
x = self.Mixed_7b(x)
x = self.Mixed_7c(x)
x = F.avg_pool2d(x, kernel_size=8)
x = F.dropout(x, training=self.training)
x = x.view(x.size(0), (- 1))
x = self.fc(x)
if (self.training and self.aux_logits):
return (x, aux)
return x |
class CartanType_affine(CartanType, cartan_type.CartanType_affine):
def classical(self):
return self.dual().classical().dual()
def basic_untwisted(self):
from . import cartan_type
if (self.dual().type() == 'B'):
return cartan_type.CartanType(['A', ((self.classical().rank() * 2) - 1)])
elif (self.dual().type() == 'BC'):
return cartan_type.CartanType(['A', (self.classical().rank() * 2)])
elif (self.dual().type() == 'C'):
return cartan_type.CartanType(['D', (self.classical().rank() + 1)])
elif (self.dual().type() == 'F'):
return cartan_type.CartanType(['E', 6])
elif (self.dual().type() == 'G'):
return cartan_type.CartanType(['D', 4])
def special_node(self):
return self.dual().special_node()
def _repr_(self, compact=False):
if (self.options.notation == 'Kac'):
if (self._type.type() == 'B'):
if compact:
return ('A%s^2' % ((self.classical().rank() * 2) - 1))
return ("['A', %s, 2]" % ((self.classical().rank() * 2) - 1))
elif (self._type.type() == 'BC'):
pass
elif (self._type.type() == 'C'):
if compact:
return ('D%s^2' % self.rank())
return ("['D', %s, 2]" % self.rank())
elif (self._type.type() == 'F'):
if compact:
return 'E6^2'
return "['E', 6, 2]"
return CartanType._repr_(self, compact)
def _latex_(self):
if (self.options('notation') == 'Kac'):
if (self._type.type() == 'B'):
return ('A_{%s}^{(2)}' % ((self.classical().rank() * 2) - 1))
elif (self._type.type() == 'BC'):
return ('A_{%s}^{(2)\\dagger}' % (2 * self.classical().rank()))
elif (self._type.type() == 'C'):
return ('D_{%s}^{(2)}' % self.rank())
elif (self._type.type() == 'F'):
return 'E_6^{(2)}'
result = self._type._latex_()
import re
if re.match('.*\\^{\\(\\d\\)}$', result):
return ('%s%s}' % (result[:(- 1)], self.options('dual_latex')))
else:
return ('{%s}^%s' % (result, self.options('dual_latex')))
def _default_folded_cartan_type(self):
from sage.combinat.root_system.type_folded import CartanTypeFolded
letter = self._type.type()
if (letter == 'BC'):
n = self._type.classical().rank()
return CartanTypeFolded(self, ['A', ((2 * n) - 1), 1], (([[0]] + [[i, ((2 * n) - i)] for i in range(1, n)]) + [[n]]))
if (letter == 'B'):
n = self._type.classical().rank()
return CartanTypeFolded(self, ['D', (n + 1), 1], ([[i] for i in range(n)] + [[n, (n + 1)]]))
if (letter == 'C'):
n = self._type.classical().rank()
return CartanTypeFolded(self, ['A', ((2 * n) - 1), 1], (([[0]] + [[i, ((2 * n) - i)] for i in range(1, n)]) + [[n]]))
if (letter == 'F'):
return CartanTypeFolded(self, ['E', 6, 1], [[0], [2], [4], [3, 5], [1, 6]])
if (letter == 'G'):
return CartanTypeFolded(self, ['D', 4, 1], [[0], [1, 3, 4], [2]])
return super()._default_folded_cartan_type() |
def dtype_to_cudadatatype(dtype: dtypes.typeclass) -> str:
types = {dtypes.float16: 'CUDA_R_16F', dtypes.float32: 'CUDA_R_32F', dtypes.complex64: 'CUDA_C_32F', dtypes.float64: 'CUDA_R_64F', dtypes.complex128: 'CUDA_C_64F', dtypes.int8: 'CUDA_R_8I', dtypes.uint8: 'CUDA_R_8U', dtypes.int32: 'CUDA_R_32I'}
return types[dtype] |
_spec_function('truthful_qa')
def get_truthful_qa_spec(task: str, method: str=ADAPT_MULTIPLE_CHOICE_JOINT) -> RunSpec:
scenario_spec = ScenarioSpec(class_name='helm.benchmark.scenarios.truthful_qa_scenario.TruthfulQAScenario', args={'task': task})
adapter_spec = get_multiple_choice_adapter_spec(method=method, instructions='', input_noun='Question', output_noun='Answer')
return RunSpec(name=f'truthful_qa:task={task},method={method}', scenario_spec=scenario_spec, adapter_spec=adapter_spec, metric_specs=get_exact_match_metric_specs(), groups=['truthful_qa']) |
_processor('frcnn_preprocess')
class FRCNNPreprocess(BaseProcessor):
class FRCNNPreprocessConfig():
model: omegaconf.DictConfig = omegaconf.MISSING
input: omegaconf.DictConfig = omegaconf.MISSING
size_divisibility: int = 0
pad_value: float = 0
def __init__(self, config: FRCNNPreprocessConfig, *args, **kwargs):
config_input = config.get('input', None)
assert (config_input is not None)
min_size_test = config_input.get('min_size_test', 800)
max_size_test = config_input.get('max_size_test', 1333)
self.aug = ResizeShortestEdge([min_size_test, min_size_test], max_size_test)
self.input_format = config_input.get('format', 'BGR')
self.size_divisibility = config.get('size_divisibility', 0)
self.pad_value = config.get('pad_value', 0)
self.max_image_size = max_size_test
config_model = config.get('model', None)
assert (config_model is not None)
self.device = config_model.get('device', 'cpu')
config_pixel_std = config_model.get('pixel_std', [1.0, 1.0, 1.0])
self.pixel_std = torch.tensor(config_pixel_std).to(self.device).view(len(config_pixel_std), 1, 1)
config_pixel_mean = config_model.get('pixel_mean', [102.9801, 115.9465, 122.7717])
self.pixel_mean = torch.tensor(config_pixel_mean).to(self.device).view(len(config_pixel_std), 1, 1)
self.normalizer = (lambda x: ((x - self.pixel_mean) / self.pixel_std))
def pad(self, images: List[torch.Tensor]):
max_size = tuple((max(s) for s in zip(*[img.shape for img in images])))
image_sizes = [im.shape[(- 2):] for im in images]
images = [F.pad(im, [0, (max_size[(- 1)] - size[1]), 0, (max_size[(- 2)] - size[0])], value=self.pad_value) for (size, im) in zip(image_sizes, images)]
return (torch.stack(images), torch.tensor(image_sizes))
def __call__(self, images: torch.Tensor, single_image: bool=False):
with torch.no_grad():
if (not isinstance(images, list)):
images = [images]
if single_image:
assert (len(images) == 1)
for i in range(len(images)):
if isinstance(images[i], torch.Tensor):
images.insert(i, images.pop(i).to(self.device).float())
elif (not isinstance(images[i], torch.Tensor)):
images.insert(i, torch.as_tensor(img_tensorize(images.pop(i))).to(self.device).float())
raw_sizes = torch.tensor([im.shape[:2] for im in images])
images = self.aug(images)
for idx in range(len(images)):
images[idx] = torch.flip(images[idx], [0])
images = [self.normalizer(x) for x in images]
(images, sizes) = self.pad(images)
if (self.size_divisibility > 0):
raise NotImplementedError()
scales_yx = torch.true_divide(raw_sizes, sizes)
if single_image:
return (images[0], sizes[0], scales_yx[0])
else:
return (images, sizes, scales_yx) |
class StanfordModel():
def __init__(self, model):
self.model = model
def tokenize(self, text, a, b, pronoun_offset, a_offset, b_offset, **kwargs):
res = self.model.api_call(text, properties={'annotators': 'tokenize,ssplit'})
res = AttrDict(res)
sent_lens = ([0] + [len(sent.tokens) for sent in res.sentences])
sent_lens = np.cumsum(sent_lens)
doc = []
for (i, sent) in enumerate(res.sentences):
assert (i == sent.index)
for (j, token) in enumerate(sent.tokens):
assert ((j + 1) == token.index)
doc.append(AttrDict({'i': ((token.index + sent_lens[i]) - 1), 'idx': token.characterOffsetBegin, 'text': token.originalText, 'word': token.word}))
a_end_idx = ((a_offset + len(a)) - 1)
b_end_idx = ((b_offset + len(b)) - 1)
pronoun_offset = map_chars_to_tokens(doc, pronoun_offset)
pronoun_token = doc[pronoun_offset]
a_offset = map_chars_to_tokens(doc, a_offset)
token_end = map_chars_to_tokens(doc, a_end_idx)
a_span = [a_offset, token_end]
a_tokens = doc[a_offset:(token_end + 1)]
b_offset = map_chars_to_tokens(doc, b_offset)
token_end = map_chars_to_tokens(doc, b_end_idx)
b_span = [b_offset, token_end]
b_tokens = doc[b_offset:(token_end + 1)]
tokens = [tok.text for tok in doc]
return (doc, tokens, pronoun_offset, a_offset, b_offset, a_span, b_span, pronoun_token, a_tokens, b_tokens) |
class SliceCombinerTest(unittest.TestCase):
def setUpClass(cls):
random.seed(123)
np.random.seed(123)
torch.manual_seed(123)
def test_forward_shape(self):
batch_size = 4
h_dim = 20
num_classes = 2
outputs = {'task_slice:base_ind_head': torch.FloatTensor(batch_size, 2).uniform_(0, 1), 'task_slice:base_pred_transform': torch.FloatTensor(batch_size, h_dim).uniform_(0, 1), 'task_slice:base_pred_head': torch.FloatTensor(batch_size, num_classes).uniform_(0, 1)}
combiner_module = SliceCombinerModule()
combined_rep = combiner_module(outputs)
self.assertEqual(tuple(combined_rep.shape), (batch_size, h_dim))
batch_size = 1
h_dim = 1
num_classes = 2
outputs = {'task_slice:base_ind_head': torch.FloatTensor(batch_size, 2).uniform_(0, 1), 'task_slice:base_pred_transform': torch.FloatTensor(batch_size, h_dim).uniform_(0, 1), 'task_slice:base_pred_head': torch.FloatTensor(batch_size, num_classes).uniform_(0, 1)}
combiner_module = SliceCombinerModule()
combined_rep = combiner_module(outputs)
self.assertEqual(tuple(combined_rep.shape), (batch_size, h_dim))
batch_size = 1
h_dim = 1
num_classes = 1
outputs = {'task_slice:base_ind_head': torch.FloatTensor(batch_size, 2).uniform_(0, 1), 'task_slice:base_pred_transform': torch.FloatTensor(batch_size, h_dim).uniform_(0, 1), 'task_slice:base_pred_head': torch.FloatTensor(batch_size, num_classes).uniform_(0, 1)}
combiner_module = SliceCombinerModule()
with self.assertRaisesRegex(NotImplementedError, 'requires output shape'):
combined_rep = combiner_module(outputs)
def test_average_reweighting(self):
batch_size = 4
h_dim = 20
num_classes = 2
outputs = {'task_slice:a_ind_head': (torch.ones(batch_size, 2) * 10.0), 'task_slice:a_pred_transform': (torch.ones(batch_size, h_dim) * 4), 'task_slice:a_pred_head': (torch.ones(batch_size, num_classes) * 10.0), 'task_slice:b_ind_head': (torch.ones(batch_size, 2) * (- 10.0)), 'task_slice:b_pred_transform': (torch.ones(batch_size, h_dim) * 2), 'task_slice:b_pred_head': (torch.ones(batch_size, num_classes) * (- 10.0))}
combiner_module = SliceCombinerModule()
combined_rep = combiner_module(outputs)
self.assertTrue(torch.allclose(combined_rep, (torch.ones(batch_size, h_dim) * 3)))
def test_average_reweighting_by_ind(self):
batch_size = 4
h_dim = 20
num_classes = 2
outputs = {'task_slice:a_ind_head': (torch.ones(batch_size, 2) * 10.0), 'task_slice:a_pred_transform': (torch.ones(batch_size, h_dim) * 4), 'task_slice:a_pred_head': torch.zeros(batch_size, num_classes), 'task_slice:b_ind_head': (torch.ones(batch_size, 2) * (- 10.0)), 'task_slice:b_pred_transform': (torch.ones(batch_size, h_dim) * 2), 'task_slice:b_pred_head': torch.zeros(batch_size, num_classes)}
combiner_module = SliceCombinerModule()
combined_rep = combiner_module(outputs)
self.assertTrue(torch.allclose(combined_rep, (torch.ones(batch_size, h_dim) * 3)))
def test_average_reweighting_by_pred_confidence(self):
batch_size = 4
h_dim = 20
num_classes = 2
outputs = {'task_slice:a_ind_head': torch.zeros(batch_size, 2), 'task_slice:a_pred_transform': (torch.ones(batch_size, h_dim) * 4), 'task_slice:a_pred_head': (torch.ones(batch_size, num_classes) * 5), 'task_slice:b_ind_head': torch.zeros(batch_size, 2), 'task_slice:b_pred_transform': (torch.ones(batch_size, h_dim) * 2), 'task_slice:b_pred_head': (torch.ones(batch_size, num_classes) * 5)}
combiner_module = SliceCombinerModule()
combined_rep = combiner_module(outputs)
self.assertTrue(torch.all((combined_rep == (torch.ones(batch_size, h_dim) * 3))))
outputs = {'task_slice:a_ind_head': torch.zeros(batch_size, 2), 'task_slice:a_pred_transform': (torch.ones(batch_size, h_dim) * 4), 'task_slice:a_pred_head': (torch.ones(batch_size, num_classes) * (- 5)), 'task_slice:b_ind_head': torch.zeros(batch_size, 2), 'task_slice:b_pred_transform': (torch.ones(batch_size, h_dim) * 2), 'task_slice:b_pred_head': (torch.ones(batch_size, num_classes) * 5)}
combiner_module = SliceCombinerModule()
combined_rep = combiner_module(outputs)
self.assertTrue(torch.allclose(combined_rep, (torch.ones(batch_size, h_dim) * 3)))
def test_many_slices(self):
batch_size = 4
h_dim = 20
num_classes = 2
outputs = {}
for i in range(100):
if ((i % 2) == 0):
outputs[f'task_slice:{i}_ind_head'] = (torch.ones(batch_size, 2) * 20.0)
outputs[f'task_slice:{i}_pred_transform'] = (torch.ones(batch_size, h_dim) * 4)
outputs[f'task_slice:{i}_pred_head'] = (torch.ones(batch_size, num_classes) * 20.0)
else:
outputs[f'task_slice:{i}_ind_head'] = (torch.ones(batch_size, 2) * (- 20.0))
outputs[f'task_slice:{i}_pred_transform'] = (torch.ones(batch_size, h_dim) * 2)
outputs[f'task_slice:{i}_pred_head'] = (torch.ones(batch_size, num_classes) * (- 20.0))
combiner_module = SliceCombinerModule()
combined_rep = combiner_module(outputs)
self.assertTrue(torch.allclose(combined_rep, (torch.ones(batch_size, h_dim) * 3)))
def test_combiner_multiclass(self):
batch_size = 4
h_dim = 20
num_classes = 10
max_score_indexes_a = [random.randint(0, num_classes) for _ in range(batch_size)]
pred_outputs_a = torch.FloatTensor(batch_size, num_classes).uniform_((- 5), 5)
pred_outputs_a = pred_outputs_a.scatter_(1, torch.tensor(max_score_indexes_a).unsqueeze(1), 10.0)
max_score_indexes_b = [random.randint(0, num_classes) for _ in range(batch_size)]
pred_outputs_b = torch.FloatTensor(batch_size, num_classes).uniform_((- 5), 5)
pred_outputs_b = pred_outputs_b.scatter_(1, torch.tensor(max_score_indexes_b).unsqueeze(1), 10.0)
outputs = {'task_slice:a_ind_head': (torch.ones(batch_size, 2) * (- 10.0)), 'task_slice:a_pred_transform': (torch.ones(batch_size, h_dim) * 4), 'task_slice:a_pred_head': pred_outputs_a, 'task_slice:b_ind_head': (torch.ones(batch_size, 2) * 10.0), 'task_slice:b_pred_transform': (torch.ones(batch_size, h_dim) * 2), 'task_slice:b_pred_head': pred_outputs_b}
combiner_module = SliceCombinerModule()
with self.assertRaisesRegex(NotImplementedError, 'more than 2 classes'):
combiner_module(outputs)
def test_temperature(self):
batch_size = 4
h_dim = 20
num_classes = 2
epsilon = 1e-05
outputs = {'task_slice:a_ind_head': ((torch.ones(batch_size, 2) * 10.0) + torch.FloatTensor(batch_size, 2).normal_(0.0, epsilon)), 'task_slice:a_pred_transform': ((torch.ones(batch_size, h_dim) * 4) + torch.FloatTensor(batch_size, h_dim).normal_(0.0, epsilon)), 'task_slice:a_pred_head': ((torch.ones(batch_size, num_classes) * 10.0) + torch.FloatTensor(batch_size, num_classes).normal_(0.0, epsilon)), 'task_slice:b_ind_head': ((torch.ones(batch_size, 2) * (- 10.0)) + torch.FloatTensor(batch_size, 2).normal_(0.0, epsilon)), 'task_slice:b_pred_transform': ((torch.ones(batch_size, h_dim) * 2) + torch.FloatTensor(batch_size, h_dim).normal_(0.0, epsilon)), 'task_slice:b_pred_head': ((torch.ones(batch_size, num_classes) * (- 10.0)) + torch.FloatTensor(batch_size, num_classes).normal_(0.0, epsilon))}
combiner_module = SliceCombinerModule(temperature=100000.0)
combined_rep = combiner_module(outputs)
self.assertTrue(torch.allclose(combined_rep, (torch.ones(batch_size, h_dim) * 3)))
combiner_module = SliceCombinerModule(temperature=1e-15)
combined_rep = combiner_module(outputs)
isclose_four = torch.isclose(combined_rep, (torch.ones(batch_size, h_dim) * 2), atol=0.0001)
isclose_two = torch.isclose(combined_rep, (torch.ones(batch_size, h_dim) * 4), atol=0.0001)
num_matching_original = (torch.sum(isclose_four) + torch.sum(isclose_two))
self.assertEqual(num_matching_original, (batch_size * h_dim)) |
.parametrize('a00', [float(i) for i in range(10)])
_utils.test(require=ti.extension.data64, default_fp=ti.f64, fast_math=False)
def test_solve_3x3_f64(a00):
_test_solve_3x3(ti.f64, a00) |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-n', '--n', type=int, required=True)
parser.add_argument('--toy_example_name', choices=['random_projections', 'no_projections'], required=True)
parser.add_argument('--p_correlation', type=float)
parser.add_argument('--mean_causal', type=float)
parser.add_argument('--mean_spurious', type=float)
parser.add_argument('--var_causal', type=float)
parser.add_argument('--d_causal', type=int)
parser.add_argument('--d_spurious', type=int)
parser.add_argument('--var_spurious', type=float)
parser.add_argument('--d_noise', type=int)
parser.add_argument('--mean_noise', type=float)
parser.add_argument('--var_noise', type=float)
parser.add_argument('-N', '--n_random_features', type=int, action='append')
parser.add_argument('-m', '--model_type', choices=['ridge', 'logistic'], required=True)
parser.add_argument('-L', '--Lambda', type=float, default=None)
parser.add_argument('-e', '--error_type', choices=['zero_one', 'squared'], required=True)
parser.add_argument('-o', '--outfile', required=True)
parser.add_argument('--model_file')
parser.add_argument('-q', '--quiet', action='store_true')
parser.add_argument('-s', '--seed', type=int, default=0)
args = parser.parse_args()
np.random.seed(args.seed)
process_args(args)
if args.random_features:
(full_data, n_groups) = generate_toy_data(args.data_generation_fn, args.data_args)
(erm_error, over_error, under_error) = run_random_features_model(full_data=full_data, n_groups=n_groups, N=args.n_random_features, fit_model_fn=args.fit_model_fn, error_fn=args.error_fn, model_kwargs=args.model_kwargs, verbose=(not args.quiet))
else:
(erm_error, over_error, under_error) = run_no_projection_model(data_generation_fn=args.data_generation_fn, data_args=args.data_args, N=args.n_random_features, fit_model_fn=args.fit_model_fn, error_fn=args.error_fn, model_kwargs=args.model_kwargs, verbose=(not args.quiet), model_file=args.model_file)
save_error_logs(args.outfile, [erm_error, over_error, under_error], ['ERM', 'oversample', 'undersample']) |
_task('multilingual_translation_latent_depth')
class MultilingualTranslationTaskLatentDepth(MultilingualTranslationTask):
def add_args(parser):
MultilingualTranslationTask.add_args(parser)
parser.add_argument('--encoder-latent-layer', action='store_true', help='latent layer selection in encoder')
parser.add_argument('--decoder-latent-layer', action='store_true', help='latent layer selection in decoder')
parser.add_argument('--target-layers', default=(- 1), type=int, help='number of effective layers to learn; -1 means no constraint')
parser.add_argument('--sparsity-weight', default=0.0, type=float, help='weight for sparsity loss')
parser.add_argument('--share-weight', default=0.0, type=float, help='weight for sharing loss')
parser.add_argument('--soft-update', default=1, type=int, help='number of updates with soft sampling')
parser.add_argument('--anneal-updates', default=1, type=int, help='number of updates to anneal the KL loss weight')
parser.add_argument('--prior', default='uniform', type=str, help='prior used for computing KL loss')
def __init__(self, args, dicts, training):
super().__init__(args, dicts, training)
(self.src_langs, self.tgt_langs) = zip(*[(lang.split('-')[0], lang.split('-')[1]) for lang in args.lang_pairs])
if (self.training and self.encoder_latent_layer):
assert self.args.share_encoders
if (self.training and self.decoder_latent_layer):
assert self.args.share_decoders
if (training or self.encoder_latent_layer or self.decoder_latent_layer):
self.lang_pairs = args.lang_pairs
else:
self.lang_pairs = ['{}-{}'.format(args.source_lang, args.target_lang)]
self.eval_lang_pairs = self.lang_pairs
self.model_lang_pairs = self.lang_pairs
if (self.training and (self.encoder_latent_layer or self.decoder_latent_layer)):
self.kl_loss = LatentLayersKLLoss(self.args)
self.sparsity_loss = LatentLayersSparsityLoss(self.args)
def _per_lang_pair_train_loss(self, lang_pair, model, update_num, criterion, sample, optimizer, ignore_grad):
(src, tgt) = lang_pair.split('-')
if self.encoder_latent_layer:
src_lang_idx = self.src_lang_idx_dict[src]
model.models[lang_pair].encoder.set_lang_idx(src_lang_idx)
model.models[lang_pair].encoder.layer_select.hard_select = (update_num > self.args.soft_update)
if self.decoder_latent_layer:
tgt_lang_idx = self.tgt_lang_idx_dict[tgt]
model.models[lang_pair].decoder.set_lang_idx(tgt_lang_idx)
model.models[lang_pair].decoder.layer_select.hard_select = (update_num > self.args.soft_update)
(loss, sample_size, logging_output) = criterion(model.models[lang_pair], sample[lang_pair])
if self.encoder_latent_layer:
none_samples = sum(((1 if (x is None) else 0) for x in model.models[lang_pair].encoder.layer_select.layer_samples))
if ((none_samples == 0) or (self.args.prior != 'agged_posterior')):
loss += self.kl_loss(model.models[lang_pair].encoder.layer_select.layer_samples, src_lang_idx, update_num, sample_size)
if self.decoder_latent_layer:
none_samples = sum(((1 if (x is None) else 0) for x in model.models[lang_pair].decoder.layer_select.layer_samples))
if ((none_samples == 0) or (self.args.prior != 'agged_posterior')):
loss += self.kl_loss(model.models[lang_pair].decoder.layer_select.layer_samples, tgt_lang_idx, update_num, sample_size)
if ignore_grad:
loss *= 0
if (hasattr(self, 'sparsity_loss') and self.sparsity_loss.is_valid(update_num)):
loss.backward(retain_graph=True)
else:
optimizer.backward(loss)
return (loss, sample_size, logging_output)
def train_step(self, sample, model, criterion, optimizer, update_num, ignore_grad=False):
(agg_loss, agg_sample_size, agg_logging_output) = super().train_step(sample, model, criterion, optimizer, update_num, ignore_grad)
if (hasattr(self, 'sparsity_loss') and self.sparsity_loss.is_valid(update_num)):
sparsity_loss = 0
if self.encoder_latent_layer:
sparsity_loss += self.sparsity_loss(next(iter(model.models.values())).encoder.layer_select.layer_samples, update_num, agg_sample_size)
if self.decoder_latent_layer:
sparsity_loss += self.sparsity_loss(next(iter(model.models.values())).decoder.layer_select.layer_samples, update_num, agg_sample_size)
if (sparsity_loss > 0):
optimizer.backward(sparsity_loss)
return (agg_loss, agg_sample_size, agg_logging_output)
def _per_lang_pair_valid_loss(self, lang_pair, model, criterion, sample):
(src, tgt) = lang_pair.split('-')
if self.encoder_latent_layer:
src_lang_idx = self.src_lang_idx_dict[src]
model.models[lang_pair].encoder.set_lang_idx(src_lang_idx)
if self.decoder_latent_layer:
tgt_lang_idx = self.tgt_lang_idx_dict[tgt]
model.models[lang_pair].decoder.set_lang_idx(tgt_lang_idx)
(loss, sample_size, logging_output) = criterion(model.models[lang_pair], sample[lang_pair])
return (loss, sample_size, logging_output)
def inference_step(self, generator, models, sample, prefix_tokens=None, constraints=None):
if (self.encoder_latent_layer or self.decoder_latent_layer):
for model in models:
if self.encoder_latent_layer:
assert (model.encoder.layer_select is not None)
src_lang_idx = self.src_lang_idx_dict[self.args.source_lang]
model.encoder.set_lang_idx(src_lang_idx)
if self.decoder_latent_layer:
assert (model.decoder.layer_select is not None)
tgt_lang_idx = self.tgt_lang_idx_dict[self.args.target_lang]
model.decoder.set_lang_idx(tgt_lang_idx)
return super().inference_step(generator, models, sample, prefix_tokens, constraints)
def encoder_latent_layer(self):
return (safe_hasattr(self.args, 'encoder_latent_layer') and self.args.encoder_latent_layer)
def decoder_latent_layer(self):
return (safe_hasattr(self.args, 'decoder_latent_layer') and self.args.decoder_latent_layer)
def src_lang_idx_dict(self):
return {lang: lang_idx for (lang_idx, lang) in enumerate(self.src_langs)}
def tgt_lang_idx_dict(self):
return {lang: lang_idx for (lang_idx, lang) in enumerate(self.tgt_langs)} |
def find_optimizer_using_name(optimizer_name):
optimizer_filename = (('optimizers.' + optimizer_name) + '_optimizer')
optimizerlib = importlib.import_module(optimizer_filename)
optimizer = None
target_optimizer_name = (optimizer_name.replace('_', '') + 'optimizer')
for (name, cls) in optimizerlib.__dict__.items():
if ((name.lower() == target_optimizer_name.lower()) and issubclass(cls, BaseOptimizer)):
optimizer = cls
if (optimizer is None):
print(('In %s.py, there should be a subclass of BaseOptimizer with class name that matches %s in lowercase.' % (optimizer_filename, target_optimizer_name)))
exit(0)
return optimizer |
def test_straight_waveguide_power_poynting():
space = Simspace(TESTDATA, optplan.SimulationSpace(pml_thickness=[10, 10, 10, 10, 0, 0], mesh=optplan.UniformMesh(dx=40), sim_region=optplan.Box3d(center=[0, 0, 0], extents=[5000, 5000, 40]), eps_bg=optplan.GdsEps(gds='straight_waveguide.gds', mat_stack=optplan.GdsMaterialStack(background=optplan.Material(mat_name='air'), stack=[optplan.GdsMaterialStackLayer(gds_layer=[100, 0], extents=[(- 80), 80], foreground=optplan.Material(mat_name='Si'), background=optplan.Material(mat_name='air'))]))))
source = creator_em.WaveguideModeSource(optplan.WaveguideModeSource(power=1.0, extents=[40, 1500, 600], normal=[1.0, 0.0, 0.0], center=[(- 1770), 0, 0], mode_num=0))
wlen = 1550
eps_grid = space(wlen).eps_bg.grids
source_grid = source(space, wlen)
eps = problem.Constant(fdfd_tools.vec(eps_grid))
sim = creator_em.FdfdSimulation(eps=eps, solver=local_matrix_solvers.DirectSolver(), wlen=wlen, source=fdfd_tools.vec(source_grid), simspace=space)
power_fun = poynting.PowerTransmissionFunction(field=sim, simspace=space, wlen=wlen, plane_slice=grid_utils.create_region_slices(space.edge_coords, [1770, 0, 0], [40, 1500, 600]), axis=gridlock.axisvec2axis([1, 0, 0]), polarity=gridlock.axisvec2polarity([1, 0, 0]))
power_fun_back = poynting.PowerTransmissionFunction(field=sim, simspace=space, wlen=wlen, plane_slice=grid_utils.create_region_slices(space.edge_coords, [1770, 0, 0], [40, 1500, 600]), axis=gridlock.axisvec2axis([(- 1), 0, 0]), polarity=gridlock.axisvec2polarity([(- 1), 0, 0]))
efield_grid = fdfd_tools.unvec(graph_executor.eval_fun(sim, None), eps_grid[0].shape)
edotj = (np.real((fdfd_tools.vec(efield_grid) * np.conj(fdfd_tools.vec(source_grid)))) * (40 ** 3))
power = ((- 0.5) * np.sum(edotj))
np.testing.assert_almost_equal(graph_executor.eval_fun(power_fun, None), power, decimal=4)
np.testing.assert_almost_equal(graph_executor.eval_fun(power_fun_back, None), (- power), decimal=4) |
class WnliProcessor(DataProcessor):
def get_train_examples(self, data_dir):
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'train.tsv')), 'train')
def get_dev_examples(self, data_dir):
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'dev.tsv')), 'dev')
def get_test_examples(self, data_dir):
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'test.tsv')), 'test')
def get_labels(self):
return ['0', '1']
def _create_examples(self, lines, set_type):
examples = []
for (i, line) in enumerate(lines):
if (i == 0):
continue
guid = ('%s-%s' % (set_type, line[0]))
text_a = line[1]
text_b = line[2]
label = line[(- 1)]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples |
def test_vector_equivariance():
torch.manual_seed(1234)
rotate = torch.tensor([[0.9886788, (- 0.110237), 0.1017945], [0.136363, 0.9431761, (- 0.3030248)], [(- 0.0626055), 0.3134752, 0.9475304]])
model = create_model(load_example_args('equivariant-transformer', prior_model=None, output_model='VectorOutput'))
z = torch.ones(100, dtype=torch.long)
pos = torch.randn(100, 3)
batch = torch.arange(50, dtype=torch.long).repeat_interleave(2)
y = model(z, pos, batch)[0]
y_rot = model(z, (pos rotate), batch)[0]
torch.testing.assert_allclose((y rotate), y_rot) |
class ManinMap():
def __init__(self, codomain, manin_relations, defining_data, check=True):
self._codomain = codomain
self._manin = manin_relations
if check:
if (coercion_model.get_action(codomain, Sigma0(manin_relations._N)) is None):
raise ValueError('Codomain must have an action of Sigma0(N)')
self._dict = {}
if isinstance(defining_data, (list, tuple)):
if (len(defining_data) != manin_relations.ngens()):
raise ValueError('length of defining data must be the same as number of Manin generators')
for i in range(len(defining_data)):
self._dict[manin_relations.gen(i)] = codomain(defining_data[i])
elif isinstance(defining_data, dict):
for g in manin_relations.gens():
self._dict[g] = codomain(defining_data[g])
else:
try:
c = codomain(defining_data)
except TypeError:
raise TypeError(('unrecognized type %s for defining_data' % type(defining_data)))
g = manin_relations.gens()
self._dict = dict(zip(g, ([c] * len(g))))
else:
self._dict = defining_data
def extend_codomain(self, new_codomain, check=True):
new_dict = {}
for g in self._manin.gens():
new_dict[g] = new_codomain(self._dict[g])
return ManinMap(new_codomain, self._manin, new_dict, check)
def _compute_image_from_gens(self, B):
L = self._manin.relations(B)
t = self._codomain(0)
for (c, A, g) in L:
g1 = (self._dict[self._manin.reps(g)] * A)
t += (g1 * c)
return t.normalize()
def __getitem__(self, B):
try:
return self._dict[B]
except KeyError:
return self._compute_image_from_gens(B)
def compute_full_data(self):
for B in self._manin.reps():
if (B not in self._dict):
self._dict[B] = self._compute_image_from_gens(B)
def __add__(self, right):
D = {}
sd = self._dict
rd = right._dict
for (ky, val) in sd.items():
if (ky in rd):
D[ky] = (val + rd[ky])
return self.__class__(self._codomain, self._manin, D, check=False)
def __sub__(self, right):
D = {}
sd = self._dict
rd = right._dict
for (ky, val) in sd.items():
if (ky in rd):
D[ky] = (val - rd[ky])
return self.__class__(self._codomain, self._manin, D, check=False)
def __mul__(self, right):
tp = Sigma0(self._manin.level())(MatrixSpace(ZZ, 2, 2)([1, 0, 0, 1]))
if isinstance(right, type(tp)):
return self._right_action(right)
D = {}
for (ky, val) in self._dict.items():
D[ky] = (val * right)
return self.__class__(self._codomain, self._manin, D, check=False)
def __repr__(self):
return ('Map from the set of right cosets of Gamma0(%s) in SL_2(Z) to %s' % (self._manin.level(), self._codomain))
def _eval_sl2(self, A):
SN = Sigma0(self._manin._N)
A = M2Z(A)
B = self._manin.equivalent_rep(A)
gaminv = SN((B * M2Z(A).adjugate()))
return (self[B] * gaminv).normalize()
def __call__(self, A):
a = A[t00]
b = A[t01]
c = A[t10]
d = A[t11]
v1 = unimod_matrices_to_infty(b, d)
v2 = unimod_matrices_to_infty(a, c)
ans = self._codomain(0)
for B in v1:
ans = (ans + self._eval_sl2(B))
for B in v2:
ans = (ans - self._eval_sl2(B))
return ans.normalize()
def apply(self, f, codomain=None, to_moments=False):
D = {}
sd = self._dict
if (codomain is None):
codomain = self._codomain
for (ky, val) in sd.items():
if to_moments:
D[ky] = codomain([f(val.moment(a)) for a in range(val.precision_absolute())])
else:
D[ky] = f(val)
return self.__class__(codomain, self._manin, D, check=False)
def __iter__(self):
for A in self._manin.gens():
(yield self._dict[A])
def _right_action(self, gamma):
D = {}
for ky in self._dict:
D[ky] = (self((gamma * ky)) * gamma)
return self.__class__(self._codomain, self._manin, D, check=False)
def normalize(self):
sd = self._dict
for val in sd.values():
val.normalize()
return self
def reduce_precision(self, M):
D = {}
for (ky, val) in self._dict.items():
D[ky] = val.reduce_precision(M)
return self.__class__(self._codomain, self._manin, D, check=False)
def specialize(self, *args):
D = {}
for (ky, val) in self._dict.items():
D[ky] = val.specialize(*args)
return self.__class__(self._codomain.specialize(*args), self._manin, D, check=False)
def hecke(self, ell, algorithm='prep'):
self.compute_full_data()
self.normalize()
M = self._manin
if (algorithm == 'prep'):
psi = {}
for g in M.gens():
psi_g = sum(((self[h] * A) for (h, A) in M.prep_hecke_on_gen_list(ell, g)), self._codomain(0))
psi_g.normalize()
psi[g] = psi_g
return self.__class__(self._codomain, self._manin, psi, check=False).normalize()
elif (algorithm == 'naive'):
S0N = Sigma0(self._manin.level())
psi = self._right_action(S0N([1, 0, 0, ell]))
for a in range(1, ell):
psi += self._right_action(S0N([1, a, 0, ell]))
if ((self._manin.level() % ell) != 0):
psi += self._right_action(S0N([ell, 0, 0, 1]))
return psi.normalize()
else:
raise ValueError('Algorithm must be either "naive" or "prep"')
def p_stabilize(self, p, alpha, V):
manin = V.source()
S0 = Sigma0(self._codomain._act._Np)
pmat = S0([p, 0, 0, 1])
D = {}
scalar = (1 / alpha)
W = self._codomain.change_ring(scalar.parent())
for g in map(M2Z, manin.gens()):
D[g] = W((self._eval_sl2(g) - (self((pmat * g)) * pmat).scale(scalar)))
ans = self.__class__(W, manin, D, check=False)
return ans |
class TFCvtSelfAttention(tf.keras.layers.Layer):
def __init__(self, config: CvtConfig, num_heads: int, embed_dim: int, kernel_size: int, stride_q: int, stride_kv: int, padding_q: int, padding_kv: int, qkv_projection_method: str, qkv_bias: bool, attention_drop_rate: float, with_cls_token: bool=True, **kwargs):
super().__init__(**kwargs)
self.scale = (embed_dim ** (- 0.5))
self.with_cls_token = with_cls_token
self.embed_dim = embed_dim
self.num_heads = num_heads
self.convolution_projection_query = TFCvtSelfAttentionProjection(config, embed_dim, kernel_size, stride_q, padding_q, projection_method=('linear' if (qkv_projection_method == 'avg') else qkv_projection_method), name='convolution_projection_query')
self.convolution_projection_key = TFCvtSelfAttentionProjection(config, embed_dim, kernel_size, stride_kv, padding_kv, projection_method=qkv_projection_method, name='convolution_projection_key')
self.convolution_projection_value = TFCvtSelfAttentionProjection(config, embed_dim, kernel_size, stride_kv, padding_kv, projection_method=qkv_projection_method, name='convolution_projection_value')
self.projection_query = tf.keras.layers.Dense(units=embed_dim, kernel_initializer=get_initializer(config.initializer_range), use_bias=qkv_bias, bias_initializer='zeros', name='projection_query')
self.projection_key = tf.keras.layers.Dense(units=embed_dim, kernel_initializer=get_initializer(config.initializer_range), use_bias=qkv_bias, bias_initializer='zeros', name='projection_key')
self.projection_value = tf.keras.layers.Dense(units=embed_dim, kernel_initializer=get_initializer(config.initializer_range), use_bias=qkv_bias, bias_initializer='zeros', name='projection_value')
self.dropout = tf.keras.layers.Dropout(attention_drop_rate)
def rearrange_for_multi_head_attention(self, hidden_state: tf.Tensor) -> tf.Tensor:
(batch_size, hidden_size, _) = shape_list(hidden_state)
head_dim = (self.embed_dim // self.num_heads)
hidden_state = tf.reshape(hidden_state, shape=(batch_size, hidden_size, self.num_heads, head_dim))
hidden_state = tf.transpose(hidden_state, perm=(0, 2, 1, 3))
return hidden_state
def call(self, hidden_state: tf.Tensor, height: int, width: int, training: bool=False) -> tf.Tensor:
if self.with_cls_token:
(cls_token, hidden_state) = tf.split(hidden_state, [1, (height * width)], 1)
(batch_size, hidden_size, num_channels) = shape_list(hidden_state)
hidden_state = tf.reshape(hidden_state, shape=(batch_size, height, width, num_channels))
key = self.convolution_projection_key(hidden_state, training=training)
query = self.convolution_projection_query(hidden_state, training=training)
value = self.convolution_projection_value(hidden_state, training=training)
if self.with_cls_token:
query = tf.concat((cls_token, query), axis=1)
key = tf.concat((cls_token, key), axis=1)
value = tf.concat((cls_token, value), axis=1)
head_dim = (self.embed_dim // self.num_heads)
query = self.rearrange_for_multi_head_attention(self.projection_query(query))
key = self.rearrange_for_multi_head_attention(self.projection_key(key))
value = self.rearrange_for_multi_head_attention(self.projection_value(value))
attention_score = (tf.matmul(query, key, transpose_b=True) * self.scale)
attention_probs = stable_softmax(logits=attention_score, axis=(- 1))
attention_probs = self.dropout(attention_probs, training=training)
context = tf.matmul(attention_probs, value)
(_, _, hidden_size, _) = shape_list(context)
context = tf.transpose(context, perm=(0, 2, 1, 3))
context = tf.reshape(context, (batch_size, hidden_size, (self.num_heads * head_dim)))
return context |
def sgx_stats_pid(pid: int) -> dict:
fmt = ''.join((x['type'] for x in _sgx_enclave_usage))
buffer = struct.pack(fmt, *((pid if (x['name'] == 'sgx_pid') else x['default_value']) for x in _sgx_enclave_usage))
try:
with open('/dev/isgx', 'r+b', buffering=0) as isgx:
result = struct.unpack(fmt, ioctl(isgx, _SGX_IOC_EPC_USAGE, buffer))
return {val['name']: str(result[idx]) for (idx, val) in enumerate(_sgx_enclave_usage)}
except EnvironmentError:
print('Unable to open /dev/isgx. Check driver and permissions.', file=sys.stderr)
sys.exit(1) |
class TStopwatch(object):
thisown = _swig_property((lambda x: x.this.own()), (lambda x, v: x.this.own(v)), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError('No constructor defined')
__repr__ = _swig_repr
LoadTables = _snap.TStopwatch_LoadTables
Preprocess = _snap.TStopwatch_Preprocess
ConstructGraph = _snap.TStopwatch_ConstructGraph
Compute = _snap.TStopwatch_Compute
Postprocess = _snap.TStopwatch_Postprocess
StoreOutputs = _snap.TStopwatch_StoreOutputs
AllocateColumnCopies = _snap.TStopwatch_AllocateColumnCopies
CopyColumns = _snap.TStopwatch_CopyColumns
Sort = _snap.TStopwatch_Sort
Group = _snap.TStopwatch_Group
MergeNeighborhoods = _snap.TStopwatch_MergeNeighborhoods
AddNeighborhoods = _snap.TStopwatch_AddNeighborhoods
AddEdges = _snap.TStopwatch_AddEdges
Sort2 = _snap.TStopwatch_Sort2
ComputeOffset = _snap.TStopwatch_ComputeOffset
ComputeETypes = _snap.TStopwatch_ComputeETypes
EstimateSizes = _snap.TStopwatch_EstimateSizes
InitGraph = _snap.TStopwatch_InitGraph
ExtractNbrETypes = _snap.TStopwatch_ExtractNbrETypes
CopyNodes = _snap.TStopwatch_CopyNodes
PopulateGraph = _snap.TStopwatch_PopulateGraph
ExtractEdges = _snap.TStopwatch_ExtractEdges
BuildSubgraph = _snap.TStopwatch_BuildSubgraph
NEXPS = _snap.TStopwatch_NEXPS
def GetInstance():
return _snap.TStopwatch_GetInstance()
GetInstance = staticmethod(GetInstance)
def Start(self, Exp):
return _snap.TStopwatch_Start(self, Exp)
def Stop(self, Exp):
return _snap.TStopwatch_Stop(self, Exp)
def Cnt(self, Exp):
return _snap.TStopwatch_Cnt(self, Exp)
def Sum(self, Exp):
return _snap.TStopwatch_Sum(self, Exp)
def Avg(self, Exp):
return _snap.TStopwatch_Avg(self, Exp)
def Max(self, Exp):
return _snap.TStopwatch_Max(self, Exp)
def Min(self, Exp):
return _snap.TStopwatch_Min(self, Exp)
__swig_destroy__ = _snap.delete_TStopwatch |
class TaskOutputList(object):
def __init__(self, outputs=None):
self.outputs = (outputs or [])
def names(self):
names = []
for o in self.outputs:
names += o.names
return names
def set_values(self, values, _fetch_func=None):
offset = 0
for o in self.outputs:
num = len(o.names)
o.set(values[offset:(offset + num)], _fetch_func)
offset += num
assert (offset == len(values)), 'Wrong number of output values.'
def __repr__(self):
return 'TaskOutputList(outputs={})'.format(self.outputs) |
def create(args):
dataset = args.dataset.split('-')[0]
dataset_args = args.dataset_args[dataset]
if (dataset not in __generator.keys()):
logging.info('')
logging.error('Error: Do NOT exist this dataset: {}!'.format(args.dataset))
raise ValueError()
return __generator[dataset](args, dataset_args) |
class TestUtilityOps(serial.SerializedTestCase):
(X=hu.tensor(), args=st.booleans(), **hu.gcs)
(deadline=10000)
def test_slice(self, X, args, gc, dc):
X = X.astype(dtype=np.float32)
dim = random.randint(0, (X.ndim - 1))
slice_start = random.randint(0, (X.shape[dim] - 1))
slice_end = random.randint(slice_start, (X.shape[dim] - 1))
starts = np.array(([0] * X.ndim)).astype(np.int32)
ends = np.array(([(- 1)] * X.ndim)).astype(np.int32)
starts[dim] = slice_start
ends[dim] = slice_end
if args:
op = core.CreateOperator('Slice', ['X'], ['Y'], starts=starts, ends=ends, device_option=gc)
def slice_ref(X):
slc = ([slice(None)] * X.ndim)
slc[dim] = slice(slice_start, slice_end)
return [X[slc]]
inputs = [X]
else:
op = core.CreateOperator('Slice', ['X', 'starts', 'ends'], ['Y'], device_option=gc)
def slice_ref(x, starts, ends):
slc = ([slice(None)] * x.ndim)
slc[dim] = slice(slice_start, slice_end)
return [x[slc]]
inputs = [X, starts, ends]
self.assertReferenceChecks(gc, op, inputs, slice_ref)
self.assertDeviceChecks(dc, op, inputs, [0])
self.assertGradientChecks(device_option=gc, op=op, inputs=inputs, outputs_to_check=0, outputs_with_grads=[0])
(ndims=st.integers(min_value=1, max_value=10), **hu.gcs)
(deadline=10000)
def test_resize_like(self, ndims, gc, dc):
X = np.zeros(((ndims * 2),))
Y = np.zeros((ndims, 2))
op = core.CreateOperator('ResizeLike', ['X', 'Y'], ['Z'])
def resize_like(X, Y):
return [X.reshape(Y.shape)]
self.assertDeviceChecks(dc, op, [X, Y], [0])
self.assertReferenceChecks(gc, op, [X, Y], resize_like, ensure_outputs_are_inferred=True)
(dtype=st.sampled_from([np.float32, np.int32]), ndims=st.integers(min_value=1, max_value=5), seed=st.integers(min_value=0, max_value=65536), null_axes=st.booleans(), engine=st.sampled_from(['CUDNN', None]), **hu.gcs)
(deadline=10000)
def test_transpose(self, dtype, ndims, seed, null_axes, engine, gc, dc):
if ((gc.device_type == caffe2_pb2.CUDA) and (engine == 'CUDNN')):
assume(((workspace.GetCuDNNVersion() >= 6000) or (dtype != np.int32)))
dims = ((np.random.rand(ndims) * 16) + 1).astype(np.int32)
X = (np.random.rand(*dims) * 16).astype(dtype)
if null_axes:
axes = None
op = core.CreateOperator('Transpose', ['input'], ['output'], engine=engine)
else:
np.random.seed(int(seed))
axes = [int(v) for v in list(np.random.permutation(X.ndim))]
op = core.CreateOperator('Transpose', ['input'], ['output'], axes=axes, engine=engine)
def transpose_ref(x, axes):
return (np.transpose(x, axes),)
self.assertReferenceChecks(gc, op, [X, axes], transpose_ref)
(m=st.integers(5, 10), n=st.integers(5, 10), o=st.integers(5, 10), nans=st.booleans(), **hu.gcs)
(deadline=10000)
def test_nan_check(self, m, n, o, nans, gc, dc):
other = np.array([1, 2, 3]).astype(np.float32)
X = np.random.rand(m, n, o).astype(np.float32)
if nans:
x_nan = np.random.randint(0, m)
y_nan = np.random.randint(0, n)
z_nan = np.random.randint(0, o)
X[(x_nan, y_nan, z_nan)] = float('NaN')
def nan_reference(X, Y):
if (not np.isnan(X).any()):
return [X]
else:
return [np.array([])]
op = core.CreateOperator('NanCheck', ['X', 'other'], ['Y'])
try:
self.assertReferenceChecks(device_option=gc, op=op, inputs=[X, other], reference=nan_reference)
if nans:
self.assertTrue(False, 'Did not fail when presented with NaN!')
except RuntimeError:
self.assertTrue(nans, 'No NaNs but failed')
try:
self.assertGradientChecks(device_option=gc, op=op, inputs=[X], outputs_to_check=0, outputs_with_grads=[0])
if nans:
self.assertTrue(False, 'Did not fail when gradient had NaN!')
except RuntimeError:
pass
(n=st.integers(4, 5), m=st.integers(6, 7), d=st.integers(2, 3), **hu.gcs)
def test_elementwise_max(self, n, m, d, gc, dc):
X = np.random.rand(n, m, d).astype(np.float32)
Y = np.random.rand(n, m, d).astype(np.float32)
Z = np.random.rand(n, m, d).astype(np.float32)
inputs = [X, Y, Z]
def max_op(X, Y, Z):
return [np.maximum(np.maximum(X, Y), Z)]
op = core.CreateOperator('Max', ['X', 'Y', 'Z'], ['mx'])
self.assertReferenceChecks(device_option=gc, op=op, inputs=inputs, reference=max_op)
self.assertDeviceChecks(dc, op, inputs, [0])
(n=st.integers(4, 5), m=st.integers(6, 7), d=st.integers(2, 3), **hu.gcs)
(deadline=10000)
def test_elementwise_max_grad(self, n, m, d, gc, dc):
go = np.random.rand(n, m, d).astype(np.float32)
X = np.random.rand(n, m, d).astype(np.float32)
Y = np.random.rand(n, m, d).astype(np.float32)
Z = np.random.rand(n, m, d).astype(np.float32)
mx = np.maximum(np.maximum(X, Y), Z)
inputs = [mx, go, X, Y, Z]
def max_grad_op(mx, go, X, Y, Z):
def mx_grad(a):
return (go * (mx == a))
return [mx_grad(a) for a in [X, Y, Z]]
op = core.CreateOperator('MaxGradient', ['mx', 'go', 'X', 'Y', 'Z'], ['gX', 'gY', 'gZ'])
self.assertReferenceChecks(device_option=gc, op=op, inputs=inputs, reference=max_grad_op)
self.assertDeviceChecks(dc, op, inputs, [0, 1, 2])
(n=st.integers(4, 5), m=st.integers(6, 7), d=st.integers(2, 3), **hu.gcs)
def test_elementwise_min(self, n, m, d, gc, dc):
X = np.random.rand(n, m, d).astype(np.float32)
Y = np.random.rand(n, m, d).astype(np.float32)
Z = np.random.rand(n, m, d).astype(np.float32)
inputs = [X, Y, Z]
def min_op(X, Y, Z):
return [np.minimum(np.minimum(X, Y), Z)]
op = core.CreateOperator('Min', ['X', 'Y', 'Z'], ['mx'])
self.assertReferenceChecks(device_option=gc, op=op, inputs=inputs, reference=min_op)
self.assertDeviceChecks(dc, op, inputs, [0])
(n=st.integers(4, 5), m=st.integers(6, 7), d=st.integers(2, 3), **hu.gcs)
(deadline=10000)
def test_elementwise_min_grad(self, n, m, d, gc, dc):
go = np.random.rand(n, m, d).astype(np.float32)
X = np.random.rand(n, m, d).astype(np.float32)
Y = np.random.rand(n, m, d).astype(np.float32)
Z = np.random.rand(n, m, d).astype(np.float32)
mx = np.minimum(np.minimum(X, Y), Z)
inputs = [mx, go, X, Y, Z]
def min_grad_op(mx, go, X, Y, Z):
def mx_grad(a):
return (go * (mx == a))
return [mx_grad(a) for a in [X, Y, Z]]
op = core.CreateOperator('MinGradient', ['mx', 'go', 'X', 'Y', 'Z'], ['gX', 'gY', 'gZ'])
self.assertReferenceChecks(device_option=gc, op=op, inputs=inputs, reference=min_grad_op)
self.assertDeviceChecks(dc, op, inputs, [0, 1, 2])
(n=st.integers(1, 8), m=st.integers(1, 10), d=st.integers(1, 4), in_place=st.booleans(), engine=st.sampled_from(['', 'CUDNN']), seed=st.integers(min_value=0, max_value=65535), dtype=st.sampled_from([np.int32, np.int64, np.float32]), **hu.gcs)
(deadline=10000)
def test_sum(self, n, m, d, in_place, engine, seed, dtype, gc, dc):
input_names = []
input_vars = []
np.random.seed(seed)
for i in range(m):
X_name = ('X' + str(i))
input_names.extend([X_name])
var = np.random.rand(n, d).astype(dtype)
vars()[X_name] = var
input_vars.append(var)
def sum_op_ref(*args):
res = np.zeros((n, d))
for i in range(m):
res = (res + args[i])
return (res,)
op = core.CreateOperator('Sum', input_names, ([input_names[0]] if in_place else ['Y']), engine=engine)
self.assertReferenceChecks(device_option=gc, op=op, inputs=input_vars, reference=sum_op_ref)
self.assertDeviceChecks(dc, op, input_vars, [0])
(inputs=hu.lengths_tensor().flatmap((lambda pair: st.tuples(st.just(pair[0]), st.just(pair[1]), hu.dims(max_value=len(pair[1]))))).flatmap((lambda tup: st.tuples(st.just(tup[0]), st.just(tup[1]), hu.arrays(tup[2], dtype=np.int32, elements=st.integers(min_value=0, max_value=(len(tup[1]) - 1)))))), **hu.gcs_cpu_only)
(deadline=1000)
def test_lengths_gather(self, inputs, gc, dc):
items = inputs[0]
lengths = inputs[1]
indices = inputs[2]
def lengths_gather_op(items, lengths, indices):
ends = np.cumsum(lengths)
return [np.concatenate(list((items[(ends[i] - lengths[i]):ends[i]] for i in indices)))]
op = core.CreateOperator('LengthsGather', ['items', 'lengths', 'indices'], ['output'])
self.assertReferenceChecks(device_option=gc, op=op, inputs=[items, lengths, indices], reference=lengths_gather_op)
(inputs=hu.lengths_tensor(), **hu.gcs_cpu_only)
(deadline=1000)
def test_lengths_to_ranges(self, inputs, gc, dc):
(_, lengths) = inputs
def lengths_to_ranges_op(lengths):
return [[[x, y] for (x, y) in zip(np.cumsum(np.append([0], lengths)), lengths)]]
op = core.CreateOperator('LengthsToRanges', ['lengths'], ['output'])
self.assertReferenceChecks(device_option=gc, op=op, inputs=[lengths], reference=lengths_to_ranges_op)
net = core.Net('test_shape_inference')
workspace.FeedBlob('lengths', lengths)
output = net.LengthsToRanges(['lengths'], ['output'])
(shapes, types) = workspace.InferShapesAndTypes([net])
workspace.RunNetOnce(net)
self.assertEqual(shapes[output], list(workspace.blobs[output].shape))
self.assertEqual(shapes[output], (list(lengths.shape) + [2]))
self.assertEqual(types[output], core.DataType.INT32)
(**hu.gcs)
(deadline=None, max_examples=50)
def test_size_op(self, gc, dc):
X = np.array([[1, 2], [3, 4]]).astype(np.float32)
def size_op(tensor):
return [np.prod(tensor.shape)]
op = core.CreateOperator('Size', ['X'], ['output'])
self.assertReferenceChecks(device_option=gc, op=op, inputs=[X], reference=size_op)
def test_alias_op(self):
for size in [0, 5]:
X = np.arange(size).astype(np.float32)
workspace.FeedBlob('X', X)
op = core.CreateOperator('Alias', ['X'], ['Y'])
workspace.RunOperatorOnce(op)
Y = workspace.FetchBlob('Y')
np.testing.assert_array_equal(X, Y)
(**hu.gcs)
(deadline=10000)
def test_range(self, gc, dc):
names = [('stop_',), ('start_', 'stop_'), ('start_', 'stop_', 'step_')]
for inputs in ((10,), (np.float32(10.0),), (0,), (0, 0), (10.0, 5.0, (- 1.0)), (2, 10000), (2, 10000, 20000), (2, 10000, (- 1))):
inputs = [np.array(v) for v in inputs]
op = core.CreateOperator('Range', names[(len(inputs) - 1)], ['Y'])
self.assertReferenceChecks(device_option=gc, op=op, inputs=inputs, reference=(lambda *x: [np.arange(*x)]))
self.assertDeviceChecks(dc, op, inputs, [0])
inputs = (np.array(0), np.array(10), np.array(0))
op = core.CreateOperator('Range', names[(len(inputs) - 1)], ['Y'])
with six.assertRaisesRegex(self, RuntimeError, 'Step size cannot be 0'):
self.assertReferenceChecks(device_option=gc, op=op, inputs=inputs, reference=(lambda *x: [np.arange(*x)])) |
class TFEsmForSequenceClassification(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
def show_all():
(fig, ax) = plt.subplots(2, 2, gridspec_kw={'width_ratios': [1, 1], 'height_ratios': [1, 1]})
dataset = 0
result = [RESULT[dataset] for RESULT in RESULTS]
Y = [(num - result[0]) for num in result][1:]
ax[0][0].bar(X, Y, color=color)
ax[0][0].set_xticklabels(X, fontsize=18)
ax[0][0].set_title(DATASETS[dataset], {'fontsize': 30})
ax[0][0].set_ylim((- 5), 20)
ax[0][0].label_outer()
ax[0][0].tick_params(axis='y', labelsize=18)
dataset = 1
result = [RESULT[dataset] for RESULT in RESULTS]
Y = [(num - result[0]) for num in result][1:]
ax[0][1].bar(X, Y, color=color)
ax[0][1].set_xticklabels(X, fontsize=18)
ax[0][1].set_title(DATASETS[dataset], {'fontsize': 30})
ax[0][1].set_ylim((- 5), 20)
ax[0][1].label_outer()
ax[0][1].tick_params(axis='y', labelsize=18)
dataset = 2
result = [RESULT[dataset] for RESULT in RESULTS]
Y = [(num - result[0]) for num in result][1:]
ax[1][0].bar(X, Y, color=color)
ax[1][0].set_xticklabels(X, fontsize=18)
ax[1][0].set_title(DATASETS[dataset], {'fontsize': 30})
ax[1][0].set_ylim((- 5), 20)
ax[1][0].label_outer()
ax[1][0].tick_params(axis='y', labelsize=18)
dataset = 3
result = [RESULT[dataset] for RESULT in RESULTS]
Y = [(num - result[0]) for num in result][1:]
ax[1][1].bar(X, Y, color=color)
ax[1][1].set_xticklabels(X, fontsize=18)
ax[1][1].set_title(DATASETS[dataset], {'fontsize': 30})
ax[1][1].set_ylim((- 5), 20)
ax[1][1].label_outer()
ax[1][1].tick_params(axis='y', labelsize=18)
fig.supylabel('Accuracy compared with CLIP', fontsize=28)
plt.show() |
class CsBbox(CsObject):
def __init__(self):
CsObject.__init__(self, CsObjectType.BBOX)
self.bbox = []
self.bboxVis = []
self.instanceId = (- 1)
def __str__(self):
bboxText = ''
bboxText += '[(x1: {}, y1: {}), (w: {}, h: {})]'.format(self.bbox[0], self.bbox[1], self.bbox[2], self.bbox[3])
bboxVisText = ''
bboxVisText += '[(x1: {}, y1: {}), (w: {}, h: {})]'.format(self.bboxVis[0], self.bboxVis[1], self.bboxVis[2], self.bboxVis[3])
text = 'Object: {} - bbox {} - visible {}'.format(self.label, bboxText, bboxVisText)
return text
def fromJsonText(self, jsonText, objId=(- 1)):
self.bbox = jsonText['bbox']
self.bboxVis = jsonText['bboxVis']
self.label = str(jsonText['label'])
self.instanceId = jsonText['instanceId']
def toJsonText(self):
objDict = {}
objDict['label'] = self.label
objDict['instanceId'] = self.instanceId
objDict['bbox'] = self.bbox
objDict['bboxVis'] = self.bboxVis
return objDict |
class DirectedGraphSAGELinkGenerator(BatchedLinkGenerator):
def __init__(self, G, batch_size, in_samples, out_samples, seed=None, name=None, weighted=False):
super().__init__(G, batch_size)
self.in_samples = in_samples
self.out_samples = out_samples
self._name = name
self.weighted = weighted
if (len(self.schema.node_types) > 1):
warnings.warn('running homogeneous GraphSAGE on a graph with multiple node types', RuntimeWarning, stacklevel=2)
self.head_node_types = (self.schema.node_types * 2)
self._graph = G
self._samplers = SeededPerBatch((lambda s: DirectedBreadthFirstNeighbours(self._graph, graph_schema=self.schema, seed=s)), seed=seed)
def sample_features(self, head_links, batch_num):
batch_feats = []
for hns in zip(*head_links):
node_samples = self._samplers[batch_num].run(nodes=hns, n=1, in_size=self.in_samples, out_size=self.out_samples, weighted=self.weighted)
node_type = self.head_node_types[0]
max_hops = len(self.in_samples)
max_slots = ((2 ** (max_hops + 1)) - 1)
features = ([None] * max_slots)
for slot in range(max_slots):
nodes_in_slot = [element for sample in node_samples for element in sample[slot]]
features_for_slot = self.graph.node_features(nodes_in_slot, node_type, use_ilocs=True)
features[slot] = np.reshape(features_for_slot, (len(hns), (- 1), features_for_slot.shape[1]))
batch_feats.append(features)
batch_feats = [feats for ab in zip(*batch_feats) for feats in ab]
return batch_feats |
def smis_to_actions(char_dict, smis):
max_seq_length = (char_dict.max_smi_len + 1)
enc_smis = list(map((lambda smi: (char_dict.encode(smi) + char_dict.END)), smis))
actions = np.zeros((len(smis), max_seq_length), dtype=np.int32)
seq_lengths = np.zeros((len(smis),), dtype=np.long)
for (i, enc_smi) in list(enumerate(enc_smis)):
for c in range(len(enc_smi)):
try:
actions[(i, c)] = char_dict.char_idx[enc_smi[c]]
except:
print(char_dict.char_idx)
print(enc_smi)
print(enc_smi[c])
assert False
seq_lengths[i] = len(enc_smi)
return (actions, seq_lengths) |
def test():
one = ak.with_parameter([1, 2, [], [3, 4]], 'one', 'one')
two = ak.with_parameter([100, 200, 300], 'two', 'two')
three = ak.with_parameter([{'x': 1}, {'x': 2}, 5, 6, 7], 'two', 'two')
result = ak.concatenate((two, one, three))
assert (ak.parameters(result) == {}) |
def set_seed(seed=None):
if (seed is None):
seed = random.randint(1, 10000)
random.seed(seed)
torch.manual_seed(seed)
np.random.seed(seed)
return seed |
class BatchIncrementalClassifier(BaseSKMObject, ClassifierMixin, MetaEstimatorMixin):
def __init__(self, base_estimator=DecisionTreeClassifier(), window_size=100, n_estimators=100):
self.window_size = window_size
self.n_estimators = n_estimators
self.base_estimator = base_estimator
self.ensemble = []
self.i = (- 1)
self.X_batch = None
self.y_batch = None
self.sample_weight = None
def partial_fit(self, X, y=None, classes=None, sample_weight=None):
(N, D) = X.shape
if (self.i < 0):
self.X_batch = np.zeros((self.window_size, D))
self.y_batch = np.zeros(self.window_size)
self.sample_weight = np.zeros(self.window_size)
self.i = 0
for n in range(N):
self.X_batch[self.i] = X[n]
self.y_batch[self.i] = y[n]
self.sample_weight[self.i] = (sample_weight[n] if sample_weight else 1.0)
self.i = (self.i + 1)
if (self.i == self.window_size):
if (len(self.ensemble) >= self.n_estimators):
self.ensemble.pop(0)
h = cp.deepcopy(self.base_estimator)
if ('sample_weight' in signature(h.fit).parameters):
h.fit(X=self.X_batch, y=self.y_batch.astype(int), sample_weight=sample_weight)
else:
h.fit(X=self.X_batch, y=self.y_batch.astype(int))
self.ensemble.append(h)
self.i = 0
return self
def predict_proba(self, X):
(N, D) = X.shape
votes = np.zeros(N)
if (len(self.ensemble) <= 0):
return votes
for h_i in self.ensemble:
votes = (votes + ((1.0 / len(self.ensemble)) * h_i.predict(X)))
return votes
def predict(self, X):
votes = self.predict_proba(X)
return ((votes >= 0.5) * 1.0)
def reset(self):
self.ensemble = []
self.i = (- 1)
self.X_batch = None
self.y_batch = None
return self |
class MCTCTForCTC(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def GenerateSM70_WmmaTensorOp_161616(manifest, cuda_version):
layouts = [(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), (LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor), (LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), (LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor)]
math_instructions = [MathInstruction([16, 16, 16], DataType.f16, DataType.f16, DataType.f32, OpcodeClass.WmmaTensorOp, MathOperation.multiply_add), MathInstruction([16, 16, 16], DataType.f16, DataType.f16, DataType.f16, OpcodeClass.WmmaTensorOp, MathOperation.multiply_add)]
min_cc = 70
max_cc = 1024
alignment_constraints = [8]
for math_inst in math_instructions:
tile_descriptions = [TileDescription([128, 128, 32], 2, [4, 2, 1], math_inst, min_cc, max_cc), TileDescription([64, 128, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc), TileDescription([128, 64, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc), TileDescription([64, 64, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc)]
data_type = [math_inst.element_a, math_inst.element_b, math_inst.element_accumulator, math_inst.element_accumulator]
CreateGemmOperator(manifest, layouts, tile_descriptions, data_type, alignment_constraints)
if (math_inst.element_a != math_inst.element_accumulator):
data_type_mixed = [math_inst.element_a, math_inst.element_b, math_inst.element_a, math_inst.element_accumulator]
CreateGemmOperator(manifest, layouts, tile_descriptions, data_type_mixed, alignment_constraints) |
def get_mongo_config(config_path):
with open(config_path, 'r') as conf:
config = yaml.load(conf)
return (config['db_host'], config['db_port']) |
def group_parameters_for_optimizer(model, optimizer_cfg, bias_weight_decay=False, normalization_weight_decay=False):
if ('weight_decay' in optimizer_cfg):
weight_decay = optimizer_cfg.weight_decay
else:
signature = inspect.signature(hydra.utils.get_class(optimizer_cfg._target_))
if ('weight_decay' in signature.parameters):
weight_decay = signature.parameters['weight_decay'].default
if (weight_decay is inspect.Parameter.empty):
weight_decay = 0.0
else:
weight_decay = 0.0
if ((weight_decay == 0.0) and (not any((hasattr(p, '_optim') for p in model.parameters())))):
return model.parameters()
skip = (model.no_weight_decay() if hasattr(model, 'no_weight_decay') else set())
skip_keywords = (model.no_weight_decay_keywords() if hasattr(model, 'no_weight_decay_keywords') else set())
decay = set()
no_decay = set()
special = set()
whitelist_weight_modules = (nn.Linear,)
blacklist_weight_modules = (nn.Embedding, PositionalEncoding)
if (not normalization_weight_decay):
blacklist_weight_modules += (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d, nn.LazyBatchNorm1d, nn.LazyBatchNorm2d, nn.LazyBatchNorm3d, nn.GroupNorm, nn.SyncBatchNorm, nn.InstanceNorm1d, nn.InstanceNorm2d, nn.InstanceNorm3d, nn.LayerNorm, nn.LocalResponseNorm)
if (FastLayerNorm is not None):
blacklist_weight_modules += (FastLayerNorm,)
param_dict = {pn: p for (pn, p) in model.named_parameters() if p.requires_grad}
for (mn, m) in model.named_modules():
for (pn, p) in m.named_parameters():
fpn = (('%s.%s' % (mn, pn)) if mn else pn)
if ((not p.requires_grad) or (fpn not in param_dict)):
continue
if hasattr(p, '_optim'):
special.add(fpn)
elif ((fpn in skip) or any(((skip_keyword in fpn) for skip_keyword in skip_keywords))):
no_decay.add(fpn)
elif getattr(p, '_no_weight_decay', False):
no_decay.add(fpn)
elif ((not bias_weight_decay) and pn.endswith('bias')):
no_decay.add(fpn)
elif (pn.endswith('weight') and isinstance(m, whitelist_weight_modules)):
decay.add(fpn)
elif isinstance(m, blacklist_weight_modules):
no_decay.add(fpn)
decay |= ((param_dict.keys() - no_decay) - special)
inter_params = (decay & no_decay)
union_params = (decay | no_decay)
assert (len(inter_params) == 0), f'Parameters {str(inter_params)} made it into both decay/no_decay sets!'
assert (len(((param_dict.keys() - special) - union_params)) == 0), f'parameters {str((param_dict.keys() - union_params))} were not separated into either decay/no_decay set!'
if ((weight_decay == 0.0) or (not no_decay)):
param_groups = [{'params': [param_dict[pn] for pn in sorted(list((no_decay | decay)))], 'weight_decay': weight_decay}]
else:
param_groups = [{'params': [param_dict[pn] for pn in sorted(list(decay))], 'weight_decay': weight_decay}, {'params': [param_dict[pn] for pn in sorted(list(no_decay))], 'weight_decay': 0.0}]
hps = [dict(s) for s in set((frozenset(param_dict[pn]._optim.items()) for pn in special))]
for hp in hps:
params = [param_dict[pn] for pn in sorted(list(special)) if (param_dict[pn]._optim == hp)]
param_groups.append({'params': params, **hp})
return param_groups |
def __get_format_len(f):
if isinstance(f, GDMAFormat):
if (f == GDMAFormat.FLOAT32):
return 4
elif ((f == GDMAFormat.INT16) or (f == GDMAFormat.FLOAT16)):
return 2
else:
return 1
elif ((f == BDFormat.FP32) or (f == BDFormat.INT32)):
return 4
elif ((f == BDFormat.FP16) or (f == BDFormat.INT16)):
return 2
else:
return 1 |
def apportion(v, default_ancestor, distance):
w = v.lbrother()
if (w is not None):
vir = vor = v
vil = w
vol = v.lmost_sibling
sir = sor = v.mod
sil = vil.mod
sol = vol.mod
while (vil.right() and vir.left()):
vil = vil.right()
vir = vir.left()
vol = vol.left()
vor = vor.right()
vor.ancestor = v
shift = (((vil.x + sil) - (vir.x + sir)) + distance)
if (shift > 0):
move_subtree(ancestor(vil, v, default_ancestor), v, shift)
sir = (sir + shift)
sor = (sor + shift)
sil += vil.mod
sir += vir.mod
sol += vol.mod
sor += vor.mod
if (vil.right() and (not vor.right())):
vor.thread = vil.right()
vor.mod += (sil - sor)
else:
if (vir.left() and (not vol.left())):
vol.thread = vir.left()
vol.mod += (sir - sol)
default_ancestor = v
return default_ancestor |
def update_config(cfg_old, cfg_new):
for (k, v) in cfg_new.items():
if (k in cfg_old.__dict__):
setattr(cfg_old, k, v)
return cfg_old |
class MultiScaleCornerCrop(object):
def __init__(self, scales, size, interpolation=Image.BILINEAR):
self.scales = scales
self.size = size
self.interpolation = interpolation
self.crop_positions = ['c', 'tl', 'tr', 'bl', 'br']
def __call__(self, img, inv, flow):
min_length = min(img.size[0], img.size[1])
crop_size = int((min_length * self.scale))
image_width = img.size[0]
image_height = img.size[1]
if (self.crop_position == 'c'):
center_x = (image_width // 2)
center_y = (image_height // 2)
box_half = (crop_size // 2)
x1 = (center_x - box_half)
y1 = (center_y - box_half)
x2 = (center_x + box_half)
y2 = (center_y + box_half)
elif (self.crop_position == 'tl'):
x1 = 0
y1 = 0
x2 = crop_size
y2 = crop_size
elif (self.crop_position == 'tr'):
x1 = (image_width - crop_size)
y1 = 1
x2 = image_width
y2 = crop_size
elif (self.crop_position == 'bl'):
x1 = 1
y1 = (image_height - crop_size)
x2 = crop_size
y2 = image_height
elif (self.crop_position == 'br'):
x1 = (image_width - crop_size)
y1 = (image_height - crop_size)
x2 = image_width
y2 = image_height
img = img.crop((x1, y1, x2, y2))
return img.resize((self.size, self.size), self.interpolation)
def randomize_parameters(self):
self.scale = self.scales[random.randint(0, (len(self.scales) - 1))]
self.crop_position = self.crop_positions[random.randint(0, (len(self.crop_positions) - 1))] |
def max(x, axis=None, keepdims=False):
axis = _normalize_axis(axis, get_ndim(x))
return tf.reduce_max(x, axis=axis, keep_dims=keepdims) |
def jit_type_of(arg):
jit_type = arg.get('jit_type')
if (not jit_type):
jit_type = TYPE_MAP[arg['simple_type']]
if is_sized_intlist_arg(arg):
jit_type = 'int[{}]'.format(arg['size'])
jit_type = optional_type_of(arg, jit_type)
jit_type = annotated_type_of(arg, jit_type)
arg['jit_type'] = jit_type
return jit_type |
class PerceiverForSequenceClassification(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
class sDMA_masked_select__reg(atomic_reg):
OP_NAME = 'sDMA_masked_select '
_fields_ = [('intr_en', ctypes.c_uint64, 1), ('stride_enable', ctypes.c_uint64, 1), ('nchw_copy', ctypes.c_uint64, 1), ('cmd_short', ctypes.c_uint64, 1), ('decompress_enable', ctypes.c_uint64, 1), ('cmd_id_en', ctypes.c_uint64, 4), ('cmd_id', ctypes.c_uint64, 20), ('Reserved', ctypes.c_uint64, 3), ('cmd_type', ctypes.c_uint64, 4), ('cmd_special_function(mask_mem)', ctypes.c_uint64, 3), ('fill_constant_en', ctypes.c_uint64, 1), ('src_data_format', ctypes.c_uint64, 3), ('mask_data_format', ctypes.c_uint64, 3), ('reserved', ctypes.c_uint64, 18), ('cmd_id_dep', ctypes.c_uint64, 20), ('reserved', ctypes.c_uint64, 12), ('src_nsize', ctypes.c_uint64, 16), ('src_csize', ctypes.c_uint64, 16), ('src_hsize', ctypes.c_uint64, 16), ('src_wsize', ctypes.c_uint64, 16), ('dst_nsize', ctypes.c_uint64, 16), ('dst_csize', ctypes.c_uint64, 16), ('dst_hsize', ctypes.c_uint64, 16), ('dst_wsize', ctypes.c_uint64, 16), ('src_start_addr_l32', ctypes.c_uint64, 32), ('src_start_addr_h8', ctypes.c_uint64, 8), ('reserved', ctypes.c_uint64, 24), ('dst_start_addr_l32', ctypes.c_uint64, 32), ('dst_start_addr_h8', ctypes.c_uint64, 8), ('reserved', ctypes.c_uint64, 24), ('mask_start_addr_l32', ctypes.c_uint64, 32), ('mask_start_addr_h8', ctypes.c_uint64, 32), ('reserved', ctypes.c_uint64, 32), ('reserved', ctypes.c_uint64, 32), ('reserved', ctypes.c_uint64, 32)]
intr_en: int
stride_enable: int
nchw_copy: int
cmd_short: int
decompress_enable: int
cmd_id_en: int
cmd_id: int
Reserved: int
cmd_type: int
cmd_special_function_mask_mem_: int
fill_constant_en: int
src_data_format: int
mask_data_format: int
reserved: int
cmd_id_dep: int
reserved: int
src_nsize: int
src_csize: int
src_hsize: int
src_wsize: int
dst_nsize: int
dst_csize: int
dst_hsize: int
dst_wsize: int
src_start_addr_l32: int
src_start_addr_h8: int
reserved: int
dst_start_addr_l32: int
dst_start_addr_h8: int
reserved: int
mask_start_addr_l32: int
mask_start_addr_h8: int
reserved: int
reserved: int
reserved: int
length: int = 512
def cmd_special_function_mask_mem_(self) -> int:
return self['cmd_special_function(mask_mem)'] |
class FacebookManagerCreatePost(VirtualFunctionTool):
name = 'FacebookManagerCreatePost'
summary = "Create a new post on the user's timeline."
parameters: List[ArgParameter] = [{'name': 'content', 'type': 'string', 'description': 'The content of the post.', 'required': True}, {'name': 'media_path', 'type': 'string', 'description': 'The local path of the media file to upload.', 'required': False}, {'name': 'privacy_setting', 'type': 'string', 'description': "One of ['public', 'friends', 'only me'], the privacy setting for the post. Default value is 'friends'.", 'required': False}]
returns: List[ArgReturn] = [{'name': 'post_id', 'type': 'string', 'description': 'The unique identifier of the created post.'}]
exceptions: List[ArgException] = [{'name': 'InvalidRequestException', 'description': "The 'content' parameter is empty or the 'privacy_setting' parameter is not one of ['public', 'friends', 'only me']."}] |
def test_ListOffset_append():
def f15(builder):
content = builder.begin_list()
content.append(1.1)
content.append(2.2)
content.append(3.3)
builder.end_list()
builder.begin_list()
builder.end_list()
builder.begin_list()
content.append(4.4)
content.append(5.5)
builder.end_list()
builder = lb.ListOffset(np.int64, lb.Numpy(np.float64))
assert (len(builder) == 0)
f15(builder)
assert (len(builder) == 3)
assert (ak.to_list(builder.snapshot()) == [[1.1, 2.2, 3.3], [], [4.4, 5.5]]) |
def load_student_model(checkpoint_path, model):
def clean_state_dict(state):
for k in list(ckpt.keys()):
if ('model' in k):
ckpt[k.replace('student_model.', '')] = ckpt[k]
del ckpt[k]
return state
ckpt = torch.load(checkpoint_path, map_location=torch.device('cpu'))['state_dict']
ckpt = clean_state_dict(ckpt)
model.load_state_dict(ckpt, strict=True)
return model |
class MsraNERPipe(_CNNERPipe):
def process_from_file(self, paths=None) -> DataBundle:
data_bundle = MsraNERLoader().load(paths)
return self.process(data_bundle) |
class _WorkspaceCtx(object):
def __init__(self, workspace_id):
self.workspace_id = workspace_id
self.workspace_stack = []
def __enter__(self):
self.workspace_stack.append(workspace.CurrentWorkspace())
workspace.SwitchWorkspace(self.workspace_id, create_if_missing=True)
def __exit__(self, exc_type, exc_value, traceback):
w = self.workspace_stack.pop()
workspace.SwitchWorkspace(w, create_if_missing=True) |
def test_read_sentences():
with tempfile.TemporaryDirectory() as tempdir:
raw_filename = os.path.join(tempdir, 'raw.tsv')
with open(raw_filename, 'w') as fout:
fout.write(FBK_SAMPLE)
sentences = split_wikiner.read_sentences(raw_filename, 'utf-8')
assert (len(sentences) == 20)
text = [['\t'.join(word) for word in sent] for sent in sentences]
text = ['\n'.join(sent) for sent in text]
text = '\n\n'.join(text)
assert (FBK_SAMPLE.strip() == text) |
class TrackingRenderer():
def __init__(self, save_path):
self.save_path = save_path
self.id2color = {}
def render(self, events: DataFrame, timestamp: int, frame_gt: List[TrackingBox], frame_pred: List[TrackingBox]) -> None:
print('Rendering {}'.format(timestamp))
switches = events[(events.Type == 'SWITCH')]
switch_ids = switches.HId.values
(fig, ax) = plt.subplots()
for b in frame_gt:
color = 'k'
box = Box(b.ego_translation, b.size, Quaternion(b.rotation), name=b.tracking_name, token=b.tracking_id)
box.render(ax, view=np.eye(4), colors=(color, color, color), linewidth=1)
for b in frame_pred:
box = Box(b.ego_translation, b.size, Quaternion(b.rotation), name=b.tracking_name, token=b.tracking_id)
if (b.tracking_id not in self.id2color.keys()):
self.id2color[b.tracking_id] = ((float((hash((b.tracking_id + 'r')) % 256)) / 255), (float((hash((b.tracking_id + 'g')) % 256)) / 255), (float((hash((b.tracking_id + 'b')) % 256)) / 255))
if (b.tracking_id in switch_ids):
color = self.id2color[b.tracking_id]
box.render(ax, view=np.eye(4), colors=('r', 'r', color))
else:
color = self.id2color[b.tracking_id]
box.render(ax, view=np.eye(4), colors=(color, color, color))
plt.scatter(0, 0, s=96, facecolors='none', edgecolors='k', marker='o')
plt.xlim((- 50), 50)
plt.ylim((- 50), 50)
fig.savefig(os.path.join(self.save_path, '{}.png'.format(timestamp)))
plt.close(fig) |
(Output('the-toronto-star-graph', 'figure'), Input('stored-df-data', 'data'), prevent_initial_call=True)
def update_fig_7(jsonified_cleaned_data):
df = pd.read_json(jsonified_cleaned_data, orient='split')
return plot_lines(df, 'The Star') |
def parse_dbpedia_entities(path='./predicates.txt'):
with open(path, 'r') as infile, open('predicates_labels.txt', 'w') as out:
for line in infile:
entity_uri = ';'.join(line.split(';')[:(- 1)])
entity_label = entity_uri.strip('/').split('/')[(- 1)].strip('>').lower()
out.write(('%s\n' % entity_label)) |
class SubsetImageIDs():
def __init__(self, config):
super().__init__()
self.data_dir = config.data_dir
self.save_data_dir = config.save_data_dir
self.all_image_ids = []
def extract_image_ids(self):
for data_type in ['train', 'val', 'test']:
data_list = self._get_vispro_data(self.data_dir, data_type=data_type)
for indx in range(len(data_list)):
self.all_image_ids.append(self.image_id_from_path(json.loads(data_list[indx])['image_file']))
vispro_image_id_path = self._vispro_img_ids_path(data_dir=self.save_data_dir)
self.write_list_to_file(vispro_image_id_path, self.all_image_ids)
def write_list_to_file(filepath, write_list):
with open(filepath, 'w') as file_handler:
for item in write_list:
file_handler.write('{}\n'.format(item))
return
def inspect_dir(data_dir):
folder_list = glob.glob(os.path.join(data_dir, '*'))
print(folder_list)
print('Total files: ', len(folder_list))
return folder_list
def image_id_from_path(image_path):
return int(image_path.split('/')[(- 1)][(- 16):(- 4)])
def _get_vispro_data(data_dir: str, data_type: str) -> List:
_path = f'{data_dir}/{data_type}.vispro.1.1.jsonlines'
with open(_path, 'r') as file_handle:
data_list = file_handle.readlines()
return data_list
def _get_json_path(data_dir: str, data_type: str, split: str='1.0') -> str:
json_path = f'{data_dir}/visdial_{split}_{data_type}.json'
return json_path
def _vispro_img_ids_path(data_dir: str, ext: str='txt') -> str:
file_path = f'{data_dir}/vispro_image_ids.{ext}'
return file_path |
class OracleTeacher():
def __init__(self, mins, maxs, window_step_vector, seed=None, reward_thr=230, step_rate=50):
self.seed = seed
if (not seed):
self.seed = np.random.randint(42, 424242)
np.random.seed(self.seed)
self.mins = np.array(mins, dtype=np.float32)
self.maxs = np.array(maxs, dtype=np.float32)
self.window_step_vector = window_step_vector
self.reward_thr = reward_thr
self.step_rate = step_rate
self.window_range = ((self.maxs - self.mins) / 6)
self.window_pos = np.zeros(len(self.mins), dtype=np.float32)
for (i, step) in enumerate(self.window_step_vector):
if (step > 0):
self.window_pos[i] = self.mins[i]
else:
self.window_pos[i] = (self.maxs[i] - self.window_range[i])
self.train_reward_weights = []
print('window range:{} \n position:{}\n step:{}\n'.format(self.window_range, self.window_pos, self.window_step_vector))
def update(self, task, reward):
self.train_reward_weights.append(reward)
if (len(self.train_reward_weights) == self.step_rate):
mean_reward = np.mean(self.train_reward_weights)
self.train_reward_weights = []
if (mean_reward > self.reward_thr):
for (i, step) in enumerate(self.window_step_vector):
if (step > 0):
self.window_pos[i] = min((self.window_pos[i] + step), (self.maxs[i] - self.window_range[i]))
elif (step <= 0):
self.window_pos[i] = max((self.window_pos[i] + step), self.mins[i])
print('mut stump: mean_ret:{} window_pos:({})'.format(mean_reward, self.window_pos))
def sample_task(self):
task = np.random.uniform(self.window_pos, (self.window_pos + self.window_range)).astype(np.float32)
return task
def dump(self, dump_dict):
return dump_dict |
class PGGenerator(nn.Module):
def __init__(self, resolution, latent_size, final_channel=3, fmap_base=(2 ** 13), fmap_decay=1.0, fmap_max=(2 ** 9), is_tanh=False):
super(PGGenerator, self).__init__()
self.latent_size_ = latent_size
self.is_tanh_ = is_tanh
self.final_channel_ = final_channel
self.fmap_base_ = fmap_base
self.fmap_decay_ = fmap_decay
self.fmap_max_ = fmap_max
image_pyramid_ = int(np.log2(resolution))
self.resolution_ = (2 ** image_pyramid_)
self.net_level_max_ = (image_pyramid_ - 1)
self.lod_layers_ = nn.ModuleList()
self.rgb_layers_ = nn.ModuleList()
for level in range(self.net_level_max_):
self._construct_by_level(level)
self.net_level_ = self.net_level_max_
self.net_status_ = 'stable'
self.net_alpha_ = 1.0
def net_config(self):
return (self.net_level_, self.net_status_, self.net_alpha_)
_config.setter
def net_config(self, config_list):
(self.net_level_, self.net_status_, self.net_alpha_) = config_list
def forward(self, x):
if (self.net_status_ == 'stable'):
cur_output_level = self.net_level_
for cursor in range((self.net_level_ + 1)):
x = self.lod_layers_[cursor](x)
x = self.rgb_layers_[cur_output_level](x)
elif (self.net_status_ == 'fadein'):
pre_output_level = (self.net_level_ - 1)
cur_output_level = self.net_level_
(pre_weight, cur_weight) = (self.net_alpha_, (1.0 - self.net_alpha_))
output_cache = []
for cursor in range((self.net_level_ + 1)):
x = self.lod_layers_[cursor](x)
if (cursor == pre_output_level):
output_cache.append(self.rgb_layers_[cursor](x))
if (cursor == cur_output_level):
output_cache.append(self.rgb_layers_[cursor](x))
x = ((HelpFunc.process_transition(output_cache[0], output_cache[1]) * pre_weight) + (output_cache[1] * cur_weight))
else:
raise AttributeError("Please set the net_status: ['stable', 'fadein']")
return x
def _construct_by_level(self, cursor):
in_level = cursor
out_level = (cursor + 1)
(in_channels, out_channels) = map(self._get_channel_by_stage, (in_level, out_level))
block_type = ('First' if (cursor == 0) else 'UpSample')
self._create_block(in_channels, out_channels, block_type)
self._create_block(out_channels, 3, 'ToRGB')
def _create_block(self, in_channels, out_channels, block_type):
block_cache = []
if (block_type in ['First', 'UpSample']):
if (block_type == 'First'):
block_cache.append(PixelWiseNormLayer())
block_cache.append(nn.Conv2d(in_channels, out_channels, kernel_size=4, stride=1, padding=3, bias=False))
if (block_type == 'UpSample'):
block_cache.append(nn.Upsample(scale_factor=2, mode='nearest'))
block_cache.append(nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=False))
block_cache.append(EqualizedLearningRateLayer(block_cache[(- 1)]))
block_cache.append(nn.LeakyReLU(negative_slope=0.2))
block_cache.append(PixelWiseNormLayer())
block_cache.append(nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=False))
block_cache.append(EqualizedLearningRateLayer(block_cache[(- 1)]))
block_cache.append(nn.LeakyReLU(negative_slope=0.2))
block_cache.append(PixelWiseNormLayer())
self.lod_layers_.append(nn.Sequential(*block_cache))
elif (block_type == 'ToRGB'):
block_cache.append(nn.Conv2d(in_channels, out_channels=3, kernel_size=1, stride=1, padding=0, bias=False))
block_cache.append(EqualizedLearningRateLayer(block_cache[(- 1)]))
if (self.is_tanh_ is True):
block_cache.append(nn.Tanh())
self.rgb_layers_.append(nn.Sequential(*block_cache))
else:
raise TypeError("'block_type' must in ['First', 'UpSample', 'ToRGB']")
def _get_channel_by_stage(self, level):
return min(int((self.fmap_base_ / (2.0 ** (level * self.fmap_decay_)))), self.fmap_max_) |
_lr_scheduler('fixed')
class FixedSchedule(FairseqLRScheduler):
def __init__(self, args, optimizer):
super().__init__(args, optimizer)
args.warmup_updates = (getattr(args, 'warmup_updates', 0) or 0)
self.lr = args.lr[0]
if (args.warmup_updates > 0):
self.warmup_factor = (1.0 / args.warmup_updates)
else:
self.warmup_factor = 1
def add_args(parser):
parser.add_argument('--force-anneal', '--fa', type=int, metavar='N', help='force annealing at specified epoch')
parser.add_argument('--lr-shrink', default=0.1, type=float, metavar='LS', help='shrink factor for annealing, lr_new = (lr * lr_shrink)')
parser.add_argument('--warmup-updates', default=0, type=int, metavar='N', help='warmup the learning rate linearly for the first N updates')
def get_next_lr(self, epoch):
lrs = self.args.lr
if ((self.args.force_anneal is None) or (epoch < self.args.force_anneal)):
next_lr = lrs[min(epoch, (len(lrs) - 1))]
else:
next_lr = (lrs[(- 1)] * (self.args.lr_shrink ** ((epoch + 1) - self.args.force_anneal)))
return next_lr
def step(self, epoch, val_loss=None):
super().step(epoch, val_loss)
self.lr = self.get_next_lr(epoch)
self.optimizer.set_lr((self.warmup_factor * self.lr))
return self.optimizer.get_lr()
def step_update(self, num_updates):
if ((self.args.warmup_updates > 0) and (num_updates < self.args.warmup_updates)):
self.warmup_factor = ((num_updates + 1) / float(self.args.warmup_updates))
self.optimizer.set_lr((self.warmup_factor * self.lr))
return self.optimizer.get_lr() |
def get_non_error_tasks(sessions):
tasks = []
for sess in sessions:
if (not sess['error']):
task = (sess['question'], sess['answer'])
tasks.append(task)
tasks = list(set(tasks))
return tasks |
def silhouette(data, labels, precomp_dict, metric='sqeuclidean'):
if (f'dists_{metric}' in precomp_dict):
return silhouette_score(precomp_dict[f'dists_{metric}'], labels, metric='precomputed')
else:
return silhouette_score(data, labels, metric=metric) |
class Registry():
task_name_mapping: Dict[(str, TAPETaskSpec)] = {}
metric_name_mapping: Dict[(str, Callable)] = {}
def register_task(cls, task_name: str, num_labels: int=(- 1), dataset: Optional[Type[Dataset]]=None, models: Optional[Dict[(str, Type[ProteinModel])]]=None):
if (dataset is not None):
if (models is None):
models = {}
task_spec = TAPETaskSpec(task_name, dataset, num_labels, models)
return cls.register_task_spec(task_name, task_spec).dataset
else:
return (lambda dataset: cls.register_task(task_name, num_labels, dataset, models))
def register_task_spec(cls, task_name: str, task_spec: Optional[TAPETaskSpec]=None):
if (task_spec is not None):
if (task_name in cls.task_name_mapping):
raise KeyError(f"A task with name '{task_name}' is already registered")
cls.task_name_mapping[task_name] = task_spec
return task_spec
else:
return (lambda task_spec: cls.register_task_spec(task_name, task_spec))
def register_task_model(cls, task_name: str, model_name: str, model_cls: Optional[Type[ProteinModel]]=None):
if (task_name not in cls.task_name_mapping):
raise KeyError(f'Tried to register a task model for an unregistered task: {task_name}. Make sure to register the task {task_name} first.')
return cls.task_name_mapping[task_name].register_model(model_name, model_cls)
def register_metric(cls, name: str) -> Callable[([Callable], Callable)]:
def wrap(fn: Callable) -> Callable:
assert callable(fn), 'All metrics must be callable'
cls.metric_name_mapping[name] = fn
return fn
return wrap
def get_task_spec(cls, name: str) -> TAPETaskSpec:
return cls.task_name_mapping[name]
def get_metric(cls, name: str) -> Callable:
return cls.metric_name_mapping[name]
def get_task_model(cls, model_name: str, task_name: str, config_file: Optional[PathType]=None, load_dir: Optional[PathType]=None) -> ProteinModel:
task_spec = registry.get_task_spec(task_name)
model_cls = task_spec.get_model(model_name)
if (load_dir is not None):
model = model_cls.from_pretrained(load_dir, num_labels=task_spec.num_labels)
else:
config_class = model_cls.config_class
if (config_file is not None):
config = config_class.from_json_file(config_file)
else:
config = config_class()
config.num_labels = task_spec.num_labels
model = model_cls(config)
return model |
_cache(maxsize=100000)
def rgamma_cached(x, dps):
with mp.workdps(dps):
return mp.rgamma(x) |
_tf
class TFGenerationIntegrationTests(unittest.TestCase, GenerationIntegrationTestsMixin):
if is_tf_available():
framework_dependent_parameters = {'AutoModelForCausalLM': TFAutoModelForCausalLM, 'AutoModelForSpeechSeq2Seq': TFAutoModelForSpeechSeq2Seq, 'AutoModelForSeq2SeqLM': TFAutoModelForSeq2SeqLM, 'AutoModelForVision2Seq': TFAutoModelForVision2Seq, 'LogitsProcessorList': TFLogitsProcessorList, 'MinLengthLogitsProcessor': TFMinLengthLogitsProcessor, 'create_tensor_fn': tf.convert_to_tensor, 'floats_tensor': floats_tensor, 'return_tensors': 'tf'}
def test_generate_tf_function_export_fixed_input_length(self):
test_model = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2')
input_length = 2
max_new_tokens = 2
class DummyModel(tf.Module):
def __init__(self, model):
super(DummyModel, self).__init__()
self.model = model
(input_signature=(tf.TensorSpec((None, input_length), tf.int32, name='input_ids'), tf.TensorSpec((None, input_length), tf.int32, name='attention_mask')), jit_compile=True)
def serving(self, input_ids, attention_mask):
outputs = self.model.generate(input_ids=input_ids, attention_mask=attention_mask, max_new_tokens=max_new_tokens, return_dict_in_generate=True)
return {'sequences': outputs['sequences']}
dummy_input_ids = [[2, 0], [102, 103]]
dummy_attention_masks = [[1, 0], [1, 1]]
dummy_model = DummyModel(model=test_model)
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(dummy_model, tmp_dir, signatures={'serving_default': dummy_model.serving})
serving_func = tf.saved_model.load(tmp_dir).signatures['serving_default']
for batch_size in range(1, (len(dummy_input_ids) + 1)):
inputs = {'input_ids': tf.constant(dummy_input_ids[:batch_size]), 'attention_mask': tf.constant(dummy_attention_masks[:batch_size])}
tf_func_outputs = serving_func(**inputs)['sequences']
tf_model_outputs = test_model.generate(**inputs, max_new_tokens=max_new_tokens)
tf.debugging.assert_equal(tf_func_outputs, tf_model_outputs)
def test_generate_tf_function_export_fixed_batch_size(self):
test_model = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2')
batch_size = 1
max_new_tokens = 2
class DummyModel(tf.Module):
def __init__(self, model):
super(DummyModel, self).__init__()
self.model = model
(input_signature=(tf.TensorSpec((batch_size, None), tf.int32, name='input_ids'), tf.TensorSpec((batch_size, None), tf.int32, name='attention_mask')), jit_compile=True)
def serving(self, input_ids, attention_mask):
outputs = self.model.generate(input_ids=input_ids, attention_mask=attention_mask, max_new_tokens=max_new_tokens, return_dict_in_generate=True)
return {'sequences': outputs['sequences']}
dummy_input_ids = [[2], [102, 103]]
dummy_attention_masks = [[1], [1, 1]]
dummy_model = DummyModel(model=test_model)
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(dummy_model, tmp_dir, signatures={'serving_default': dummy_model.serving})
serving_func = tf.saved_model.load(tmp_dir).signatures['serving_default']
for input_row in range(len(dummy_input_ids)):
inputs = {'input_ids': tf.constant([dummy_input_ids[input_row]]), 'attention_mask': tf.constant([dummy_attention_masks[input_row]])}
tf_func_outputs = serving_func(**inputs)['sequences']
tf_model_outputs = test_model.generate(**inputs, max_new_tokens=max_new_tokens)
tf.debugging.assert_equal(tf_func_outputs, tf_model_outputs)
_tensorflow_text
def test_generate_tf_function_export_with_tf_tokenizer(self):
with tempfile.TemporaryDirectory() as tmp_dir:
hf_hub_download(repo_id='google/flan-t5-small', filename='spiece.model', local_dir=tmp_dir)
class CompleteSentenceTransformer(tf.keras.layers.Layer):
def __init__(self):
super().__init__()
self.tokenizer = text.SentencepieceTokenizer(model=tf.io.gfile.GFile(os.path.join(tmp_dir, 'spiece.model'), 'rb').read())
self.model = TFAutoModelForSeq2SeqLM.from_pretrained('hf-internal-testing/tiny-random-t5')
def call(self, inputs, *args, **kwargs):
tokens = self.tokenizer.tokenize(inputs)
(input_ids, attention_mask) = text.pad_model_inputs(tokens, max_seq_length=64, pad_value=self.model.config.pad_token_id)
outputs = self.model.generate(input_ids=input_ids, attention_mask=attention_mask)
return self.tokenizer.detokenize(outputs)
complete_model = CompleteSentenceTransformer()
inputs = tf.keras.layers.Input(shape=(1,), dtype=tf.string, name='inputs')
outputs = complete_model(inputs)
keras_model = tf.keras.Model(inputs, outputs)
keras_model.save(tmp_dir)
def test_eos_token_id_int_and_list_top_k_top_sampling(self):
generation_kwargs = {'do_sample': True, 'num_beams': 1, 'top_p': 0.7, 'top_k': 10, 'temperature': 0.7}
expectation = 14
tokenizer = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2')
text = 'Hello, my dog is cute and'
tokens = tokenizer(text, return_tensors='tf')
model = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2')
eos_token_id = 638
with tf.device(':/CPU:0'):
tf.random.set_seed(0)
generated_tokens = model.generate(**tokens, eos_token_id=eos_token_id, **generation_kwargs)
self.assertTrue((expectation == len(generated_tokens[0])))
eos_token_id = [638, 198]
with tf.device(':/CPU:0'):
tf.random.set_seed(0)
generated_tokens = model.generate(**tokens, eos_token_id=eos_token_id, **generation_kwargs)
self.assertTrue((expectation == len(generated_tokens[0])))
def test_model_kwarg_encoder_signature_filtering(self):
bart_tokenizer = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bart')
article = 'Hugging Face is a technology company based in New York and Paris.'
input_ids = bart_tokenizer(article, return_tensors='tf').input_ids
bart_model = TFBartForConditionalGeneration.from_pretrained('hf-internal-testing/tiny-random-bart')
output = bart_model.generate(input_ids).numpy()
class FakeBart(TFBartForConditionalGeneration):
def call(self, input_ids, foo=None, **kwargs):
return super().call(input_ids, **kwargs)
bart_model = FakeBart.from_pretrained('hf-internal-testing/tiny-random-bart')
fake_output = bart_model.generate(input_ids, foo='bar').numpy()
self.assertTrue(np.array_equal(output, fake_output))
class FakeEncoder(bart_model.model.encoder.__class__):
def call(self, input_ids, **kwargs):
return super().call(input_ids, **kwargs)
fake_encoder = FakeEncoder(bart_model.config, bart_model.model.shared)
bart_model.model.encoder = fake_encoder
fake_output = bart_model.generate(input_ids).numpy()
with self.assertRaises(ValueError):
bart_model.generate(input_ids, foo='bar') |
def process_in_chunks(function, *args, batch_size, out=None, **kwargs):
total_size = args[0].shape[0]
first_output = function(*[x[0:batch_size] for x in args])
output_shape = ((total_size,) + tuple(first_output.shape[1:]))
if (out is None):
out = torch.zeros(*output_shape, dtype=first_output.dtype, device=first_output.device, layout=first_output.layout, **kwargs)
out[0:batch_size] = first_output
for i in range(batch_size, total_size, batch_size):
batch_ix = slice(i, min((i + batch_size), total_size))
out[batch_ix] = function(*[x[batch_ix] for x in args])
return out |
class UsageStatsStatus(Enum):
ENABLED_EXPLICITLY = auto()
DISABLED_EXPLICITLY = auto()
ENABLED_BY_DEFAULT = auto() |
def add_preprocess_args(parser):
group = parser.add_argument_group('Preprocessing')
group.add_argument('-s', '--source-lang', default=None, metavar='SRC', help='source language')
group.add_argument('-t', '--target-lang', default=None, metavar='TARGET', help='target language')
group.add_argument('--trainpref', metavar='FP', default=None, help='train file prefix (also used to build dictionaries)')
group.add_argument('--validpref', metavar='FP', default=None, help='comma separated, valid file prefixes (words missing from train set are replaced with <unk>)')
group.add_argument('--testpref', metavar='FP', default=None, help='comma separated, test file prefixes (words missing from train set are replaced with <unk>)')
group.add_argument('--align-suffix', metavar='FP', default=None, help='alignment file suffix')
group.add_argument('--destdir', metavar='DIR', default='data-bin', help='destination dir')
group.add_argument('--thresholdtgt', metavar='N', default=0, type=int, help='map words appearing less than threshold times to unknown')
group.add_argument('--thresholdsrc', metavar='N', default=0, type=int, help='map words appearing less than threshold times to unknown')
group.add_argument('--tgtdict', metavar='FP', help='reuse given target dictionary')
group.add_argument('--srcdict', metavar='FP', help='reuse given source dictionary')
group.add_argument('--nwordstgt', metavar='N', default=(- 1), type=int, help='number of target words to retain')
group.add_argument('--nwordssrc', metavar='N', default=(- 1), type=int, help='number of source words to retain')
group.add_argument('--alignfile', metavar='ALIGN', default=None, help='an alignment file (optional)')
parser.add_argument('--dataset-impl', metavar='FORMAT', default='mmap', choices=get_available_dataset_impl(), help='output dataset implementation')
group.add_argument('--joined-dictionary', action='store_true', help='Generate joined dictionary')
group.add_argument('--only-source', action='store_true', help='Only process the source language')
group.add_argument('--padding-factor', metavar='N', default=8, type=int, help='Pad dictionary size to be multiple of N')
group.add_argument('--workers', metavar='N', default=1, type=int, help='number of parallel workers')
group.add_argument('--dict-only', action='store_true', help='if true, only builds a dictionary and then exits')
return parser |
class CharDecoder(Decoder):
def decode(self, trg_sentence):
return ''.join((trg_wmap.get(c, '<UNK>') for c in trg_sentence)).replace('_', ' ') |
def homogeneous_symmetric_function(j, x):
from sage.combinat.integer_vector import IntegerVectors
from sage.misc.misc_c import prod
return sum((prod(((xx ** pp) for (xx, pp) in zip(x, p))) for p in IntegerVectors(j, length=len(x)))) |
def datetimes_to_dataset(times, dst_file):
days = [[times[0]]]
current_day = days[0]
for t in times[1:]:
if (t.date() != current_day[0].date()):
current_day = []
days.append(current_day)
if ((len(current_day) > 0) and (t == current_day[(- 1)])):
continue
current_day.append(t)
for i in range(len(days)):
date = datetime.combine(days[i][0].date(), datetime.min.time())
days[i] = np.sort(np.array([((t - date).total_seconds() / 3600) for t in days[i]]))
mean_number_items = np.mean([len(day) for day in days])
max_time = 24
np.savez(dst_file, arrival_times=days, nll=np.zeros(len(days)), t_max=max_time, mean_number_items=mean_number_items) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.