code stringlengths 101 5.91M |
|---|
class AutoModelForSequenceClassification(object):
def __init__(self):
raise EnvironmentError('AutoModelWithLMHead is designed to be instantiated using the `AutoModelWithLMHead.from_pretrained(pretrained_model_name_or_path)` method.')
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
if ('distilbert' in pretrained_model_name_or_path):
return DistilBertForSequenceClassification.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif ('roberta' in pretrained_model_name_or_path):
return RobertaForSequenceClassification.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif ('bert' in pretrained_model_name_or_path):
return BertForSequenceClassification.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif ('xlnet' in pretrained_model_name_or_path):
return XLNetForSequenceClassification.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif ('xlm' in pretrained_model_name_or_path):
return XLMForSequenceClassification.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
raise ValueError("Unrecognized model identifier in {}. Should contains one of 'bert', 'xlnet', 'xlm', 'roberta'".format(pretrained_model_name_or_path)) |
def execute_shifts(v):
shift = change = 0
for w in v.children[::(- 1)]:
w.x += shift
w.mod += shift
change += w.change
shift += (w.shift + change) |
class hyperparams(object):
def __init__(self):
self.train_epoch = 300
self.test_freq = 1
self.exp_name = 'Correct_Roll2MidiNet'
self.channels = 1
self.h = 51
self.w = 100
self.iter_train_g_loss = []
self.iter_train_d_loss = []
self.iter_test_g_loss = []
self.iter_test_d_loss = []
self.g_loss_history = []
self.d_loss_history = []
self.test_g_loss_history = []
self.test_d_loss_history = []
self.best_loss = .0
self.best_epoch = 0 |
def GetTriadParticip(tspec, *args):
if (type(tspec) == PUNGraph):
return GetTriadParticip_PUNGraph(tspec, *args)
if (type(tspec) == PUndirNet):
return GetTriadParticip_PUndirNet(tspec, *args)
if (type(tspec) == PDirNet):
return GetTriadParticip_PDirNet(tspec, *args)
if (type(tspec) == PNGraph):
return GetTriadParticip_PNGraph(tspec, *args)
if (type(tspec) == PNEANet):
return GetTriadParticip_PNEANet(tspec, *args)
if (type(tspec) == PNGraphMP):
return GetTriadParticip_PNGraphMP(tspec, *args)
if (type(tspec) == PNEANetMP):
return GetTriadParticip_PNEANetMP(tspec, *args)
raise TypeError('First argument has invalid type') |
def memory_usage_hooks() -> HookedMemoryUsage:
usage = HookedMemoryUsage()
def pack(ten: T.Tensor) -> Any:
acc = (usage.forward if usage.forward else 0)
usage.forward = (acc + (ten.numel() * ten.element_size()))
return ten
def unpack(ten: T.Tensor) -> T.Tensor:
acc = (usage.backward if usage.backward else 0)
usage.backward = (acc + (ten.numel() * ten.element_size()))
return ten
with T.autograd.graph.saved_tensors_hooks(pack, unpack):
(yield usage) |
def test_na_writable_attributes_deletion():
a = np.NA(2)
attr = ['payload', 'dtype']
for s in attr:
assert_raises(AttributeError, delattr, a, s) |
def env_loader(env_name: str, dataset_dir: str, data_percentage: int=100, batch_size: int=8, trajectory_length: int=1, **_: Any) -> Tuple[(dm_env.Environment, tf.data.Dataset)]:
data_name = env_name
if (env_name not in _ENV_FACTORY):
_env_setting = env_name.split('_')
if (len(_env_setting) > 1):
env_name = '_'.join(_env_setting[:(- 1)])
assert (env_name in _ENV_FACTORY), f'env {env_name} not supported'
dataset_name = os.path.join(dataset_dir, f'{data_name}')
print(dataset_name)
dataset = create_bsuite_ds_loader(env_name, dataset_name, data_percentage)
dataloader = dataset.batch(int(_LOAD_SIZE)).as_numpy_iterator()
data = next(dataloader)
data_buffer = {}
data_buffer['observation'] = data['observation']
data_buffer['reward'] = data['reward']
data_buffer['is_first'] = data['is_first']
data_buffer['is_last'] = data['is_last']
data_buffer['action'] = data['action']
timesteps = ActorOutput(**data_buffer)
data_size = len(timesteps.reward)
assert (data_size < _LOAD_SIZE)
iterator = UniformBuffer(0, data_size, trajectory_length, batch_size)
logging.info(f'[Data] {data_size} transitions totally.')
iterator.init_storage(timesteps)
return (_ENV_FACTORY[env_name][0](), iterator) |
class OzaBaggingClassifier(BaseSKMObject, ClassifierMixin, MetaEstimatorMixin):
def __init__(self, base_estimator=KNNADWINClassifier(), n_estimators=10, random_state=None):
super().__init__()
self.ensemble = None
self.actual_n_estimators = None
self.classes = None
self._random_state = None
self.base_estimator = base_estimator
self.n_estimators = n_estimators
self.random_state = random_state
self.__configure()
def __configure(self):
if hasattr(self.base_estimator, 'reset'):
self.base_estimator.reset()
self.actual_n_estimators = self.n_estimators
self.ensemble = [cp.deepcopy(self.base_estimator) for _ in range(self.actual_n_estimators)]
self._random_state = check_random_state(self.random_state)
def reset(self):
self.__configure()
return self
def partial_fit(self, X, y, classes=None, sample_weight=None):
if (self.classes is None):
if (classes is None):
raise ValueError('The first partial_fit call should pass all the classes.')
else:
self.classes = classes
if ((self.classes is not None) and (classes is not None)):
if (set(self.classes) == set(classes)):
pass
else:
raise ValueError('The classes passed to the partial_fit functiondiffer from those passed earlier.')
self.__adjust_ensemble_size()
(r, _) = get_dimensions(X)
for j in range(r):
for i in range(self.actual_n_estimators):
k = self._random_state.poisson()
if (k > 0):
for b in range(k):
self.ensemble[i].partial_fit([X[j]], [y[j]], classes, sample_weight)
return self
def __adjust_ensemble_size(self):
if (len(self.classes) != len(self.ensemble)):
if (len(self.classes) > len(self.ensemble)):
for i in range(len(self.ensemble), len(self.classes)):
self.ensemble.append(cp.deepcopy(self.base_estimator))
self.actual_n_estimators += 1
def predict(self, X):
(r, c) = get_dimensions(X)
proba = self.predict_proba(X)
predictions = []
if (proba is None):
return None
for i in range(r):
predictions.append(np.argmax(proba[i]))
return np.asarray(predictions)
def predict_proba(self, X):
proba = []
(r, c) = get_dimensions(X)
try:
for i in range(self.actual_n_estimators):
partial_proba = self.ensemble[i].predict_proba(X)
if (len(partial_proba[0]) > (max(self.classes) + 1)):
raise ValueError('The number of classes in the base learner is larger than in the ensemble.')
if (len(proba) < 1):
for n in range(r):
proba.append([0.0 for _ in partial_proba[n]])
for n in range(r):
for k in range(len(partial_proba[n])):
try:
proba[n][k] += partial_proba[n][k]
except IndexError:
proba[n].append(partial_proba[n][k])
except ValueError:
return np.zeros((r, 1))
except TypeError:
return np.zeros((r, 1))
sum_proba = []
for k in range(r):
sum_proba.append(np.sum(proba[k]))
aux = []
for i in range(len(proba)):
if (sum_proba[i] > 0.0):
aux.append([(x / sum_proba[i]) for x in proba[i]])
else:
aux.append(proba[i])
return np.asarray(aux) |
def register_Ns3MmWaveNetDevice_methods(root_module, cls):
cls.add_constructor([param('ns3::MmWaveNetDevice const &', 'arg0')])
cls.add_constructor([])
cls.add_method('AddLinkChangeCallback', 'void', [param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')], is_virtual=True)
cls.add_method('DoDispose', 'void', [], is_virtual=True)
cls.add_method('DoSend', 'bool', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')], is_pure_virtual=True, is_virtual=True)
cls.add_method('GetAddress', 'ns3::Address', [], is_const=True, is_virtual=True)
cls.add_method('GetBroadcast', 'ns3::Address', [], is_const=True, is_virtual=True)
cls.add_method('GetChannel', 'ns3::Ptr< ns3::Channel >', [], is_const=True, is_virtual=True)
cls.add_method('GetIfIndex', 'uint32_t', [], is_const=True, is_virtual=True)
cls.add_method('GetMtu', 'uint16_t', [], is_const=True, is_virtual=True)
cls.add_method('GetMulticast', 'ns3::Address', [param('ns3::Ipv4Address', 'multicastGroup')], is_const=True, is_virtual=True)
cls.add_method('GetMulticast', 'ns3::Address', [param('ns3::Ipv6Address', 'addr')], is_const=True, is_virtual=True)
cls.add_method('GetNode', 'ns3::Ptr< ns3::Node >', [], is_const=True, is_virtual=True)
cls.add_method('GetPacketDestination', 'ns3::Ipv4Address', [param('ns3::Ptr< ns3::Packet >', 'packet')])
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('IsBridge', 'bool', [], is_const=True, is_virtual=True)
cls.add_method('IsBroadcast', 'bool', [], is_const=True, is_virtual=True)
cls.add_method('IsLinkUp', 'bool', [], is_const=True, is_virtual=True)
cls.add_method('IsMulticast', 'bool', [], is_const=True, is_virtual=True)
cls.add_method('IsPointToPoint', 'bool', [], is_const=True, is_virtual=True)
cls.add_method('NeedsArp', 'bool', [], is_const=True, is_virtual=True)
cls.add_method('Receive', 'void', [param('ns3::Ptr< ns3::Packet >', 'p')])
cls.add_method('Send', 'bool', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')], is_virtual=True)
cls.add_method('SendFrom', 'bool', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')], is_virtual=True)
cls.add_method('SetAddress', 'void', [param('ns3::Address', 'address')], is_virtual=True)
cls.add_method('SetIfIndex', 'void', [param('uint32_t const', 'index')], is_virtual=True)
cls.add_method('SetMtu', 'bool', [param('uint16_t const', 'mtu')], is_virtual=True)
cls.add_method('SetNode', 'void', [param('ns3::Ptr< ns3::Node >', 'node')], is_virtual=True)
cls.add_method('SetPromiscReceiveCallback', 'void', [param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'cb')], is_virtual=True)
cls.add_method('SetReceiveCallback', 'void', [param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')], is_virtual=True)
cls.add_method('SupportsSendFrom', 'bool', [], is_const=True, is_virtual=True)
return |
class SawyerShelfPlaceEnvV2(SawyerXYZEnv):
def __init__(self):
liftThresh = 0.04
goal_low = ((- 0.1), 0.8, 0.299)
goal_high = (0.1, 0.9, 0.301)
hand_low = ((- 0.5), 0.4, 0.05)
hand_high = (0.5, 1, 0.5)
obj_low = ((- 0.1), 0.5, 0.019)
obj_high = (0.1, 0.6, 0.021)
super().__init__(self.model_name, hand_low=hand_low, hand_high=hand_high)
self.init_config = {'obj_init_pos': np.array([0, 0.6, 0.02]), 'obj_init_angle': 0.3, 'hand_init_pos': np.array([0, 0.6, 0.2], dtype=np.float32)}
self.goal = np.array([0.0, 0.85, 0.301], dtype=np.float32)
self.obj_init_pos = self.init_config['obj_init_pos']
self.obj_init_angle = self.init_config['obj_init_angle']
self.hand_init_pos = self.init_config['hand_init_pos']
self.liftThresh = liftThresh
self.max_path_length = 200
self.num_resets = 0
self._random_reset_space = Box(np.hstack((obj_low, goal_low)), np.hstack((obj_high, goal_high)))
self.goal_space = Box(np.array(goal_low), np.array(goal_high))
def model_name(self):
return full_v2_path_for('sawyer_xyz/sawyer_shelf_placing.xml')
_assert_task_is_set
def step(self, action):
ob = super().step(action)
(reward, _, reachDist, pickRew, _, placingDist) = self.compute_reward(action, ob)
self.curr_path_length += 1
info = {'reachDist': reachDist, 'pickRew': pickRew, 'epRew': reward, 'goalDist': placingDist, 'success': float((placingDist <= 0.08))}
return (ob, reward, False, info)
def _get_pos_objects(self):
return self.get_body_com('obj')
def adjust_initObjPos(self, orig_init_pos):
diff = (self.get_body_com('obj')[:2] - self.get_body_com('obj')[:2])
adjustedPos = (orig_init_pos[:2] + diff)
return [adjustedPos[0], adjustedPos[1], self.get_body_com('obj')[(- 1)]]
def reset_model(self):
self._reset_hand()
self.sim.model.body_pos[self.model.body_name2id('shelf')] = (self.goal.copy() - np.array([0, 0, 0.3]))
self._target_pos = (self.sim.model.site_pos[self.model.site_name2id('goal')] + self.sim.model.body_pos[self.model.body_name2id('shelf')])
self.obj_init_pos = self.adjust_initObjPos(self.init_config['obj_init_pos'])
self.obj_init_angle = self.init_config['obj_init_angle']
self.objHeight = self.get_body_com('obj')[2]
self.heightTarget = (self.objHeight + self.liftThresh)
if self.random_init:
goal_pos = self._get_state_rand_vec()
while (np.linalg.norm((goal_pos[:2] - goal_pos[(- 3):(- 1)])) < 0.1):
goal_pos = self._get_state_rand_vec()
base_shelf_pos = (goal_pos - np.array([0, 0, 0, 0, 0, 0.3]))
self.obj_init_pos = np.concatenate((base_shelf_pos[:2], [self.obj_init_pos[(- 1)]]))
self.sim.model.body_pos[self.model.body_name2id('shelf')] = base_shelf_pos[(- 3):]
self._target_pos = (self.sim.model.site_pos[self.model.site_name2id('goal')] + self.sim.model.body_pos[self.model.body_name2id('shelf')])
self._set_obj_xyz(self.obj_init_pos)
self.maxPlacingDist = (np.linalg.norm((np.array([self.obj_init_pos[0], self.obj_init_pos[1], self.heightTarget]) - np.array(self._target_pos))) + self.heightTarget)
self.target_reward = ((1000 * self.maxPlacingDist) + (1000 * 2))
self.num_resets += 1
return self._get_obs()
def _reset_hand(self):
super()._reset_hand()
(rightFinger, leftFinger) = (self._get_site_pos('rightEndEffector'), self._get_site_pos('leftEndEffector'))
self.init_fingerCOM = ((rightFinger + leftFinger) / 2)
self.pickCompleted = False
def compute_reward(self, actions, obs):
objPos = obs[3:6]
(rightFinger, leftFinger) = (self._get_site_pos('rightEndEffector'), self._get_site_pos('leftEndEffector'))
fingerCOM = ((rightFinger + leftFinger) / 2)
heightTarget = self.heightTarget
placingGoal = self._target_pos
reachDist = np.linalg.norm((objPos - fingerCOM))
placingDist = np.linalg.norm((objPos - placingGoal))
def reachReward():
reachRew = (- reachDist)
reachDistxy = np.linalg.norm((objPos[:(- 1)] - fingerCOM[:(- 1)]))
zRew = np.linalg.norm((fingerCOM[(- 1)] - self.init_fingerCOM[(- 1)]))
if (reachDistxy < 0.05):
reachRew = (- reachDist)
else:
reachRew = ((- reachDistxy) - (2 * zRew))
if (reachDist < 0.05):
reachRew = ((- reachDist) + (max(actions[(- 1)], 0) / 50))
return (reachRew, reachDist)
def pickCompletionCriteria():
tolerance = 0.01
return (objPos[2] >= (heightTarget - tolerance))
self.pickCompleted = pickCompletionCriteria()
def objDropped():
return ((objPos[2] < (self.objHeight + 0.005)) and (placingDist > 0.02) and (reachDist > 0.02))
def orig_pickReward():
hScale = 100
if (self.pickCompleted and (not objDropped())):
return (hScale * heightTarget)
elif ((reachDist < 0.1) and (objPos[2] > (self.objHeight + 0.005))):
return (hScale * min(heightTarget, objPos[2]))
else:
return 0
def placeReward():
c1 = 1000
c2 = 0.01
c3 = 0.001
cond = (self.pickCompleted and (reachDist < 0.1) and (not objDropped()))
if cond:
placeRew = ((1000 * (self.maxPlacingDist - placingDist)) + (c1 * (np.exp(((- (placingDist ** 2)) / c2)) + np.exp(((- (placingDist ** 2)) / c3)))))
placeRew = max(placeRew, 0)
return [placeRew, placingDist]
else:
return [0, placingDist]
(reachRew, reachDist) = reachReward()
pickRew = orig_pickReward()
(placeRew, placingDist) = placeReward()
assert ((placeRew >= 0) and (pickRew >= 0))
reward = ((reachRew + pickRew) + placeRew)
return [reward, reachRew, reachDist, pickRew, placeRew, placingDist] |
def norm(input, p='fro', dim=None, keepdim=False, out=None, dtype=None):
if (not torch.jit.is_scripting()):
if ((type(input) is not Tensor) and has_torch_function((input,))):
return handle_torch_function(norm, (input,), input, p=p, dim=dim, keepdim=keepdim, out=out, dtype=dtype)
ndim = input.dim()
if ((dim is None) and (out is None) and (dtype is None) and (p is not None)):
if isinstance(p, str):
if (p == 'fro'):
return _VF.frobenius_norm(input, dim=(), keepdim=keepdim)
if (not isinstance(p, str)):
_dim = [i for i in range(ndim)]
return _VF.norm(input, p, dim=_dim, keepdim=keepdim)
if (dim is not None):
if isinstance(dim, int):
_dim = [dim]
else:
_dim = dim
else:
_dim = None
if isinstance(p, str):
if (p == 'fro'):
if (dtype is not None):
raise ValueError('dtype argument is not supported in frobenius norm')
if (_dim is None):
_dim = [i for i in range(ndim)]
if (out is None):
return _VF.frobenius_norm(input, _dim, keepdim=keepdim)
else:
return _VF.frobenius_norm(input, _dim, keepdim=keepdim, out=out)
elif (p == 'nuc'):
if (dtype is not None):
raise ValueError('dtype argument is not supported in nuclear norm')
if (_dim is None):
if (out is None):
return _VF.nuclear_norm(input, keepdim=keepdim)
else:
return _VF.nuclear_norm(input, keepdim=keepdim, out=out)
elif (out is None):
return _VF.nuclear_norm(input, _dim, keepdim=keepdim)
else:
return _VF.nuclear_norm(input, _dim, keepdim=keepdim, out=out)
raise RuntimeError(f"only valid string values are 'fro' and 'nuc', found {p}")
else:
if (_dim is None):
_dim = [i for i in range(ndim)]
if (out is None):
if (dtype is None):
return _VF.norm(input, p, _dim, keepdim=keepdim)
else:
return _VF.norm(input, p, _dim, keepdim=keepdim, dtype=dtype)
elif (dtype is None):
return _VF.norm(input, p, _dim, keepdim=keepdim, out=out)
else:
return _VF.norm(input, p, _dim, keepdim=keepdim, dtype=dtype, out=out) |
def partition_list(vertices, workers):
batch_size = (((len(vertices) - 1) // workers) + 1)
part_list = []
part = []
count = 0
for (v1, nbs) in enumerate(vertices):
part.append((v1, nbs))
count += 1
if ((count % batch_size) == 0):
part_list.append(part)
part = []
if (len(part) > 0):
part_list.append(part)
return part_list |
def validate_control_flow_region(sdfg: 'dace.sdfg.SDFG', region: 'dace.sdfg.state.ControlFlowRegion', initialized_transients: Set[str], symbols: dict, references: Set[int]=None, **context: bool):
from dace.sdfg import SDFGState
from dace.sdfg.scope import is_in_scope
if ((len(region.source_nodes()) > 1) and (region.start_block is None)):
raise InvalidSDFGError('Starting block undefined', sdfg, None)
in_default_scope = None
start_block = region.start_block
visited = set()
visited_edges = set()
for edge in region.dfs_edges(start_block):
visited_edges.add(edge)
if (id(edge) in references):
raise InvalidSDFGInterstateEdgeError(f'Duplicate inter-state edge object detected: "{edge}". Please copy objects rather than using multiple references to the same one', sdfg, region.edge_id(edge))
references.add(id(edge))
if (id(edge.data) in references):
raise InvalidSDFGInterstateEdgeError(f'Duplicate inter-state edge object detected: "{edge}". Please copy objects rather than using multiple references to the same one', sdfg, region.edge_id(edge))
references.add(id(edge.data))
if (edge.src not in visited):
visited.add(edge.src)
if isinstance(edge.src, SDFGState):
validate_state(edge.src, region.node_id(edge.src), sdfg, symbols, initialized_transients, references, **context)
else:
validate_control_flow_region(sdfg, edge.src, initialized_transients, symbols, references, **context)
undef_syms = (set(edge.data.free_symbols) - set(symbols.keys()))
if (len(undef_syms) > 0):
eid = region.edge_id(edge)
raise InvalidSDFGInterstateEdgeError(f'Undefined symbols in edge: {undef_syms}. Add those with `sdfg.add_symbol()` or define outside with `dace.symbol()`', sdfg, eid)
issyms = edge.data.new_symbols(sdfg, symbols)
if any(((not dtypes.validate_name(s)) for s in issyms)):
invalid = next((s for s in issyms if (not dtypes.validate_name(s))))
eid = region.edge_id(edge)
raise InvalidSDFGInterstateEdgeError(('Invalid interstate symbol name %s' % invalid), sdfg, eid)
ise_memlets = edge.data.get_read_memlets(sdfg.arrays)
for memlet in ise_memlets:
container = memlet.data
if (not _accessible(sdfg, container, context)):
if (in_default_scope is None):
in_default_scope = False
if (sdfg.parent_nsdfg_node is not None):
if is_in_scope(sdfg.parent_sdfg, sdfg.parent, sdfg.parent_nsdfg_node, [dtypes.ScheduleType.Default]):
in_default_scope = True
if (in_default_scope is False):
eid = region.edge_id(edge)
raise InvalidSDFGInterstateEdgeError(f'Trying to read an inaccessible data container "{container}" (Storage: {sdfg.arrays[container].storage}) in host code interstate edge', sdfg, eid)
symbols.update(issyms)
if (edge.dst not in visited):
visited.add(edge.dst)
if isinstance(edge.dst, SDFGState):
validate_state(edge.dst, region.node_id(edge.dst), sdfg, symbols, initialized_transients, references, **context)
else:
validate_control_flow_region(sdfg, edge.dst, initialized_transients, symbols, references, **context)
if (start_block not in visited):
if isinstance(start_block, SDFGState):
validate_state(start_block, region.node_id(start_block), sdfg, symbols, initialized_transients, references, **context)
else:
validate_control_flow_region(sdfg, start_block, initialized_transients, symbols, references, **context)
for (eid, edge) in enumerate(region.edges()):
if (edge in visited_edges):
continue
if (id(edge) in references):
raise InvalidSDFGInterstateEdgeError(f'Duplicate inter-state edge object detected: "{edge}". Please copy objects rather than using multiple references to the same one', sdfg, eid)
references.add(id(edge))
if (id(edge.data) in references):
raise InvalidSDFGInterstateEdgeError(f'Duplicate inter-state edge object detected: "{edge}". Please copy objects rather than using multiple references to the same one', sdfg, eid)
references.add(id(edge.data))
issyms = edge.data.assignments.keys()
if any(((not dtypes.validate_name(s)) for s in issyms)):
invalid = next((s for s in issyms if (not dtypes.validate_name(s))))
raise InvalidSDFGInterstateEdgeError(('Invalid interstate symbol name %s' % invalid), sdfg, eid)
ise_memlets = edge.data.get_read_memlets(sdfg.arrays)
for memlet in ise_memlets:
container = memlet.data
if (not _accessible(sdfg, container, context)):
if (in_default_scope is None):
in_default_scope = False
if (sdfg.parent_nsdfg_node is not None):
if is_in_scope(sdfg.parent_sdfg, sdfg.parent, sdfg.parent_nsdfg_node, [dtypes.ScheduleType.Default]):
in_default_scope = True
if (in_default_scope is False):
raise InvalidSDFGInterstateEdgeError(f'Trying to read an inaccessible data container "{container}" (Storage: {sdfg.arrays[container].storage}) in host code interstate edge', sdfg, eid) |
class BertForMultipleChoice():
def __init__(self, *args, **kwargs):
requires_pytorch(self)
def from_pretrained(self, *args, **kwargs):
requires_pytorch(self) |
def main():
args = parser.parse_args()
if (args.device is None):
device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu'))
else:
device = torch.device(args.device)
fid_value = calculate_fid_given_paths(args.path, args.batch_size, device, args.dims)
print('FID: ', fid_value) |
def test_get_reuse_parameters(default_test_case):
float0 = stmt.FloatPrimitiveStatement(default_test_case, 5.0)
float1 = stmt.FloatPrimitiveStatement(default_test_case, 5.0)
default_test_case.add_statement(float0)
default_test_case.add_statement(float1)
sign_mock = MagicMock(inspect.Signature)
params = {'test0': default_test_case.test_cluster.type_system.convert_type_hint(float), 'test1': default_test_case.test_cluster.type_system.convert_type_hint(float)}
inf_sig = InferredSignature(original_parameters=params, signature=sign_mock, type_system=default_test_case.test_cluster.type_system, original_return_type=default_test_case.test_cluster.type_system.convert_type_hint(None))
with mock.patch('pynguin.utils.randomness.next_float') as float_mock:
float_mock.return_value = 0.0
with mock.patch('pynguin.testcase.testfactory.is_optional_parameter') as optional_mock:
optional_mock.side_effect = [False, True]
assert (tf.TestFactory._get_reuse_parameters(default_test_case, inf_sig, 1, {}) == {'test0': float0.ret_val}) |
class BatchLogs():
def __init__(self):
self.metric_dict = {}
def append(self, metrics, data):
if (not isinstance(metrics, list)):
sys.exit('Please specify a list of metrics to log')
for (i, metric) in enumerate(metrics):
data[i] = np.array(data[i])
if (metric not in self.metric_dict):
self.metric_dict[metric] = []
self.metric_dict[metric].append(data[i])
def append_tensor(self, metrics, data):
if (not isinstance(metrics, list)):
sys.exit('Please specify a list of metrics to log')
for (i, metric) in enumerate(metrics):
data[i] = np.array(data[i].detach().cpu().item())
if (metric not in self.metric_dict):
self.metric_dict[metric] = []
self.metric_dict[metric].append(data[i])
def flatten(self):
for metric in self.metric_dict:
self.metric_dict[metric] = np.mean(self.metric_dict[metric])
def fetch(self, metric):
return self.metric_dict[metric] |
def set_log_level(verbose, is_estimator):
assert (0 <= verbose <= 3)
if (((not is_estimator) and (verbose == 1)) or tf_is_version2()):
tf.get_logger().setLevel(((4 - verbose) * 10))
elif (verbose >= 2):
tf.logging.set_verbosity(tf.logging.INFO) |
def log_current_datetime():
current_datetime = datetime.datetime.now()
LOGGER.debug(SEP_STR)
LOGGER.debug(f'Time of execution: {current_datetime}') |
_without_pywt
def test_calibrate_denoiser_extra_output():
parameter_ranges = {'sigma': (np.linspace(0.1, 1, 5) / 2)}
(_, (parameters_tested, losses)) = calibrate_denoiser(noisy_img, _denoise_wavelet, denoise_parameters=parameter_ranges, extra_output=True)
all_denoised = [denoise_invariant(noisy_img, _denoise_wavelet, denoiser_kwargs=denoiser_kwargs) for denoiser_kwargs in parameters_tested]
ground_truth_losses = [mse(img, test_img) for img in all_denoised]
assert_((np.argmin(losses) == np.argmin(ground_truth_losses))) |
class TextBiLSTM(nn.Module):
def __init__(self, config):
super(TextBiLSTM, self).__init__()
self.num_classes = config['num_classes']
self.learning_rate = config['learning_rate']
self.dropout = config['dropout']
self.hidden_dims = config['hidden_dims']
self.rnn_layers = config['rnn_layers']
self.embedding_size = config['embedding_size']
self.bidirectional = config['bidirectional']
self.build_model()
self.init_weight()
def init_weight(net):
for (name, param) in net.named_parameters():
if ('ln' not in name):
if ('bias' in name):
nn.init.constant_(param, 0.0)
elif ('weight' in name):
nn.init.xavier_uniform_(param)
def build_model(self):
self.attention_layer = nn.Sequential(nn.Linear(self.hidden_dims, self.hidden_dims), nn.ReLU(inplace=True))
self.lstm_net = nn.LSTM(self.embedding_size, self.hidden_dims, num_layers=self.rnn_layers, dropout=self.dropout, bidirectional=self.bidirectional)
self.fc_out = nn.Sequential(nn.Linear(self.hidden_dims, self.hidden_dims), nn.ReLU(), nn.Dropout(self.dropout), nn.Linear(self.hidden_dims, self.num_classes), nn.Softmax(dim=1))
self.ln1 = nn.LayerNorm(self.embedding_size)
self.ln2 = nn.LayerNorm(self.hidden_dims)
def attention_net_with_w(self, lstm_out, lstm_hidden):
lstm_tmp_out = torch.chunk(lstm_out, 2, (- 1))
h = (lstm_tmp_out[0] + lstm_tmp_out[1])
lstm_hidden = torch.sum(lstm_hidden, dim=1)
lstm_hidden = lstm_hidden.unsqueeze(1)
atten_w = self.attention_layer(lstm_hidden)
m = nn.Tanh()(h)
atten_context = torch.bmm(atten_w, m.transpose(1, 2))
softmax_w = F.softmax(atten_context, dim=(- 1))
context = torch.bmm(softmax_w, h)
result = context.squeeze(1)
return result
def forward(self, x):
x = x.permute(1, 0, 2)
(output, (final_hidden_state, _)) = self.lstm_net(x)
output = output.permute(1, 0, 2)
final_hidden_state = final_hidden_state.permute(1, 0, 2)
atten_out = self.attention_net_with_w(output, final_hidden_state)
return self.fc_out(atten_out) |
def parse(exit_code, log, output):
(findings, infos) = ([], set())
(errors, fails) = sb.parse_utils.errors_fails(exit_code, log)
errors.discard('EXIT_CODE_1')
analysis_complete = set()
for line in log:
if (DEPRECATED in line):
infos.add(DEPRECATED)
continue
if (CANNOT_OPEN_FACT_FILE in line):
fails.add(CANNOT_OPEN_FACT_FILE)
continue
for indicator in ANALYSIS_COMPLETE:
if (indicator in line):
analysis_complete.add(indicator)
break
if (log and ((len(analysis_complete) < 3) or (CANNOT_OPEN_FACT_FILE in fails))):
infos.add('analysis incomplete')
if ((not fails) and (not errors)):
fails.add('execution failed')
if ((CANNOT_OPEN_FACT_FILE in fails) and (len(fails) > 1)):
fails.remove(CANNOT_OPEN_FACT_FILE)
if output:
try:
with io.BytesIO(output) as o, tarfile.open(fileobj=o) as tar:
for fn in tar.getnames():
if (not fn.endswith('.csv')):
continue
indicator = os.path.basename(fn)
try:
contents = tar.extractfile(fn).read()
except Exception as e:
fails.add(f'problem extracting {fn} from output archive: {e}')
continue
for line in contents.splitlines():
finding = {'name': MAP_FINDINGS[indicator], 'address': int(line.strip(), 16)}
findings.append(finding)
except Exception as e:
fails.add(f'error parsing results: {e}')
else:
for line in log:
for indicator in MAP_FINDINGS:
if (indicator in line):
findings.append({'name': MAP_FINDINGS[indicator]})
break
return (findings, infos, errors, fails) |
def parse_line_ecir(line, query, user):
line = line.strip().split()
if (len(line) == 5):
sub = line[2]
rel = line[3]
obj = line[4]
val = [1]
rank = int(line[1].split('-')[1])
return (sub, rel, obj, val, rank, 1)
elif (len(line) == 3):
rank = int(line[1].split('-')[1])
sub = query
rel = user
obj = line[2]
val = [(- 1)]
return (sub, rel, obj, val, rank, (- 1))
else:
return (None, None, None, None, None, 0) |
def test_linfit():
x = N.array([(- 1.7237128), 1.8712276, (- 0.), (- 0.), 1.3416969, 1.3757038, (- 1.3703436), 0., (- 0.), 0.])
y = N.array([0., 6.5807428, 1.4582725, 2.7270851, 5.5969253, 5.624928, 0.787615, 3.2599759, 2.9771762, 4.5936475])
ey = (0.07 * N.ones(y.shape, dtype='float64'))
p0 = N.array([1.0, 1.0], dtype='float64')
pactual = N.array([3.2, 1.78])
parbase = {'value': 0.0, 'fixed': 0, 'limited': [0, 0], 'limits': [0.0, 0.0]}
parinfo = []
for i in range(len(pactual)):
parinfo.append(copy.deepcopy(parbase))
for i in range(len(pactual)):
parinfo[i]['value'] = p0[i]
fa = {'x': x, 'y': y, 'err': ey}
m = mpfit(myfunctlin, p0, parinfo=parinfo, functkw=fa)
if (m.status <= 0):
print('error message = ', m.errmsg)
assert N.allclose(m.params, N.array([3., (- 1.7709542)], dtype='float64'))
assert N.allclose(m.perror, N.array([0., 0.], dtype='float64'))
chisq = (myfunctlin(m.params, x=x, y=y, err=ey)[1] ** 2).sum()
assert N.allclose(N.array([chisq], dtype='float64'), N.array([2.], dtype='float64'))
assert (m.dof == 8)
return |
def WebDataset(urls, shardshuffle=True, cache_dir=default_cache_dir, cache_size=default_cache_size, cache_name=default_cache_name, cache_verbose=default_cache_verbose, splitter=split_by_worker, nodesplitter=True, handler=reraise_exception, length=None):
result = ShardList(urls, shuffle=shardshuffle, splitter=splitter, nodesplitter=nodesplitter, length=length)
result = result.then(tariterators.url_opener, handler=handler)
if (cache_dir != ''):
result = result.then(shardcache.cache_shards, cache_dir=cache_dir, cache_size=cache_size, cache_name=cache_name, verbose=cache_verbose)
result = result.then(tariterators.tar_file_expander, length=None, handler=handler)
result = result.then(tariterators.group_by_keys, length=length)
return result |
def ensure_2d_arguments(f, squeeze_ret=True):
(f)
def wrapped(*args, **kwargs):
new_args = []
for arg in args:
if isinstance(arg, T.TensorVariable):
if (arg.ndim == 1):
arg = arg.dimshuffle('x', 0)
elif (arg.ndim > 2):
raise RuntimeError("ensure_2d_arguments wrapped a function which received an %i-d argument. Don't know what to do.")
new_args.append(arg)
ret = f(*new_args, **kwargs)
if squeeze_ret:
if isinstance(ret, (list, tuple)):
ret = [ret_i.squeeze() for ret_i in ret]
elif isinstance(ret, T.TensorVariable):
ret = ret.squeeze()
return ret
return wrapped |
def main():
trajs = DataLoader.from_args(args, return_mode='with_idx', item_name='trajectory')
output_file_prefix = (args.output_file_prefix or trajs.base_path)
output_path = f'{output_file_prefix}_eval{args.eval_results_out_suffix}_{args.eval_type}.jsonl'
if (args.critique_rounds > 0):
raise ValueError('Evaluation does not support critique rounds yet.')
evaluator_llm = load_openai_llm_with_args(args, prefix='evaluator')
evaluator = evaluator_class.from_args(args, evaluator_llm)
if (evaluator._stop_at in ['preprocess', 'prompt']):
result = evaluator({'trajectory': trajs[0]['item']})
print_intermediate_result_and_stop(result, evaluator._stop_at)
def evaluate_trajec(traj_with_idx):
(traj_idx, traj) = (traj_with_idx['idx'], traj_with_idx['item'])
try:
results = evaluator({'trajectory': traj})
result = results[0]
except Exception as e:
result = {'error': str(e)}
return (traj_with_idx, result)
result['eval_id'] = traj_idx
return (None, result)
runner = FuncExecutorWithRetry.from_args(args)
runner.run(evaluate_trajec, output_path, trajs)
print(f'You may want to use scripts to convert the result jsonl file {output_path} to json for easier reading.') |
class ConvertLmConfig():
checkpoint_path: str
output_dir: str
upload_to_hf: Optional[RepoRef] = None
model: LmConfig = Gpt2Config()
save_tokenizer: bool = True
tokenizer: str = 'gpt2'
override_vocab_size: Optional[int] = None
config_overrides: Optional[dict] = None
_property
def the_tokenizer(self):
return load_tokenizer(self.tokenizer) |
def getTrainMetricPerEpoch(train_metric, updates_per_epoch):
train_metric_per_epoch = []
temp_sum = 0.0
for i in range(len(train_metric)):
temp_sum += train_metric[i]
if ((i % updates_per_epoch) == (updates_per_epoch - 1)):
train_metric_per_epoch.append((temp_sum / updates_per_epoch))
temp_sum = 0.0
return train_metric_per_epoch |
def test_ufunc_add_outer_simple():
A = np.random.randint(1, 10, size=(3,), dtype=np.int32)
B = np.random.randint(1, 10, size=(3,), dtype=np.int32)
s = ufunc_add_outer_simple(A, B)
assert np.array_equal(np.add.outer(A, B), s) |
def normal_quantile(p, mean=0, std=1):
try:
return (mean + ((std * math.sqrt(2)) * inv_erf(((2 * p) - 1))))
except Exception:
return 'None' |
class ErrorRateStats(MetricStats):
def __init__(self, merge_tokens=False, split_tokens=False, space_token='_', keep_values=True, extract_concepts_values=False, tag_in='', tag_out=''):
self.clear()
self.merge_tokens = merge_tokens
self.split_tokens = split_tokens
self.space_token = space_token
self.extract_concepts_values = extract_concepts_values
self.keep_values = keep_values
self.tag_in = tag_in
self.tag_out = tag_out
def append(self, ids, predict, target, predict_len=None, target_len=None, ind2lab=None):
self.ids.extend(ids)
if (predict_len is not None):
predict = undo_padding(predict, predict_len)
if (target_len is not None):
target = undo_padding(target, target_len)
if (ind2lab is not None):
predict = ind2lab(predict)
target = ind2lab(target)
if self.merge_tokens:
predict = merge_char(predict, space=self.space_token)
target = merge_char(target, space=self.space_token)
if self.split_tokens:
predict = split_word(predict, space=self.space_token)
target = split_word(target, space=self.space_token)
if self.extract_concepts_values:
predict = extract_concepts_values(predict, self.keep_values, self.tag_in, self.tag_out, space=self.space_token)
target = extract_concepts_values(target, self.keep_values, self.tag_in, self.tag_out, space=self.space_token)
scores = wer_details_for_batch(ids, target, predict, True)
self.scores.extend(scores)
def summarize(self, field=None):
self.summary = wer_summary(self.scores)
self.summary['error_rate'] = self.summary['WER']
if (field is not None):
return self.summary[field]
else:
return self.summary
def write_stats(self, filestream):
if (not self.summary):
self.summarize()
print_wer_summary(self.summary, filestream)
print_alignments(self.scores, filestream) |
def mock_library_log_means_and_vars(mock_contrastive_adata_manager, mock_n_batch):
return _init_library_size(mock_contrastive_adata_manager, n_batch=mock_n_batch) |
def register_Ns3AttributeConstructionList_methods(root_module, cls):
cls.add_constructor([param('ns3::AttributeConstructionList const &', 'arg0')])
cls.add_constructor([])
cls.add_method('Add', 'void', [param('std::string', 'name'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::Ptr< ns3::AttributeValue >', 'value')])
cls.add_method('Begin', 'ns3::AttributeConstructionList::CIterator', [], is_const=True)
cls.add_method('End', 'ns3::AttributeConstructionList::CIterator', [], is_const=True)
cls.add_method('Find', 'ns3::Ptr< ns3::AttributeValue >', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True)
return |
class DepthConv(nn.Module):
def __init__(self, fmiddle, opt, kw=3, padding=1, stride=1):
super().__init__()
self.kw = kw
self.stride = stride
self.unfold = nn.Unfold(kernel_size=(self.kw, self.kw), dilation=1, padding=1, stride=stride)
if opt.mpdist:
BNFunc = nn.SyncBatchNorm
else:
BNFunc = nn.BatchNorm2d
self.norm_layer = BNFunc(fmiddle, affine=True)
def forward(self, x, conv_weights):
(N, C, H, W) = x.size()
conv_weights = conv_weights.view((N * C), (self.kw * self.kw), (H // self.stride), (W // self.stride))
x = self.unfold(x).view((N * C), (self.kw * self.kw), (H // self.stride), (W // self.stride))
x = torch.mul(conv_weights, x).sum(dim=1, keepdim=False).view(N, C, (H // self.stride), (W // self.stride))
return x |
def compute_aspect_term(model, input, label, tokenizer, args):
break_tokens = tokenizer.encode(tokenizer._eos_token.content)
MAX_LEN = args.block_size
batch_pred = []
batch_ground = []
for (inp, ground) in zip(input, label):
inp_text = tokenizer.decode(inp).split('<|term|>')[0].strip()
inp_dec = f'{inp_text} <|term|>'
ground_dec = tokenizer.decode(ground)
indexed_tokens = tokenizer.encode(inp_dec)
tokens_tensor = tokenizer.encode(inp_dec, return_tensors='pt').to(args.device)
predicted_index = indexed_tokens[(- 1)]
while (predicted_index not in break_tokens):
outputs = model(tokens_tensor)
predictions = outputs[0]
(probs, indexes) = torch.max(torch.softmax(predictions, (- 1)), (- 1))
predicted_index = indexes[(0, (- 1))].item()
indexed_tokens += [predicted_index]
tokens_tensor = torch.tensor([indexed_tokens]).to(args.device)
if (len(indexed_tokens) > MAX_LEN):
break
gen_text = tokenizer.decode(indexed_tokens)
batch_pred.append(gen_text)
batch_ground.append(ground_dec)
return (batch_pred, batch_ground) |
def test_forbid_value_and_auth():
filter_set = filters.FilterSet()
with pytest.raises(UsageError, match=filters.ERROR_EXPECTED_AND_REGEX):
filter_set.include(method='POST', method_regex='GET') |
class BaseDataLoader():
def __init__(self):
pass
def initialize(self, opt):
self.opt = opt
pass
def load_data(self):
return None |
def test_list_numpy_1():
text = 'var * float64'
parsedtype = deduce_type(text)
assert isinstance(parsedtype, ak.types.ListType)
assert (str(parsedtype) == text) |
def can_change_cost_type(args):
return any(((('S_COST_TYPE' in part) or ('H_COST_TRANSFORM' in part)) for part in args)) |
class Problem3D(Problem):
def __init__(self, cfg: Config):
super().__init__(cfg)
(self._height, self._width, self._length) = cfg.task.map_shape |
def load_candidate(path_to_candidate):
with open(path_to_candidate, 'r') as f:
qid_to_ranked_candidate_documents = load_candidate_from_stream(f)
return qid_to_ranked_candidate_documents |
def RunInitNet(model):
for init_net in model._data_parallel_model_init_nets:
workspace.RunNetOnce(init_net)
CreateNet(model) |
class BaseDataFrameField(BaseAnnDataField):
def __init__(self, registry_key: str, attr_key: Optional[str], field_type: Literal[('obs', 'var')]=None, required: bool=True) -> None:
super().__init__()
if (required and (attr_key is None)):
raise ValueError('`attr_key` cannot be `None` if `required=True`. Please provide an `attr_key`.')
if (field_type == 'obs'):
self._attr_name = _constants._ADATA_ATTRS.OBS
elif (field_type == 'var'):
self._attr_name = _constants._ADATA_ATTRS.VAR
else:
raise ValueError("`field_type` must be either 'obs' or 'var'.")
self._registry_key = registry_key
self._attr_key = attr_key
self._is_empty = (attr_key is None)
def registry_key(self) -> str:
return self._registry_key
def attr_name(self) -> str:
return self._attr_name
def attr_key(self) -> str:
return self._attr_key
def is_empty(self) -> bool:
return self._is_empty |
class DDPGradientStatsHook():
def __init__(self, ddp_module):
try:
ddp_module.register_comm_hook(self, self._hook_fn)
except AttributeError:
raise ValueError('DDPGradientStatsHook does not support non-DDP wrapped modules')
self._clear_state()
def _clear_state(self):
self.bucket_sq_norms_small_batch = []
self.bucket_sq_norms_large_batch = []
def _hook_fn(self, bucket):
buf = bucket.buffer()
self.bucket_sq_norms_small_batch.append(buf.pow(2).sum())
fut = torch.distributed.all_reduce(buf, op=torch.distributed.ReduceOp.AVG, async_op=True).get_future()
def callback(fut):
buf = fut.value()[0]
self.bucket_sq_norms_large_batch.append(buf.pow(2).sum())
return buf
return fut.then(callback)
def get_stats(self):
sq_norm_small_batch = sum(self.bucket_sq_norms_small_batch)
sq_norm_large_batch = sum(self.bucket_sq_norms_large_batch)
self._clear_state()
stats = torch.stack([sq_norm_small_batch, sq_norm_large_batch])
torch.distributed.all_reduce(stats, op=torch.distributed.ReduceOp.AVG)
return (stats[0].item(), stats[1].item()) |
class ProductionCollecotr(Visitor_Recursive):
_type_spec: TypeSpec
_prod_spec: ProductionSpec
def __init__(self, type_spec):
self._type_spec = type_spec
self._prod_spec = ProductionSpec()
def _process_opt_arg(opt_arg):
return str(opt_arg.children[0])
def _create_index_map(opt_args):
ret = dict()
for (index, opt_arg) in enumerate(opt_args):
if (len(opt_arg.children) > 1):
var_name = str(opt_arg.children[1])
ret[var_name] = index
return ret
def _create_type_map(types):
ret = dict()
for (index, ty) in enumerate(types):
ret[index] = ty
return ret
def _process_expr(self, index_map, type_map, tree):
expr_kind = str(tree.data)
if (expr_kind == 'expr_false'):
return ConstExpr(False)
elif (expr_kind == 'expr_true'):
return ConstExpr(True)
elif (expr_kind == 'expr_intlit'):
value = int(tree.children[0])
return ConstExpr(value)
elif (expr_kind == 'expr_var'):
name = str(tree.children[0])
index = index_map.get(name, None)
if (index is None):
raise ValueError('Cannot find parameter binding for variable "{}"'.format(name))
return ParamExpr(index)
elif (expr_kind == 'property_expr'):
name = str(tree.children[0])
arg = cast(ParamExpr, self._process_expr(index_map, type_map, tree.children[1]))
param_ty = cast(ValueType, type_map[arg.index])
param_ety = param_ty.get_property(name)
if (param_ety is None):
raise ValueError('Cannot find property {} for type {}'.format(name, param_ty))
return PropertyExpr(name, param_ety, arg)
elif (expr_kind == 'unary_expr'):
operand = self._process_expr(index_map, type_map, tree.children[1])
operator = str(tree.children[0].data)
if (operator == 'expr_neg'):
operator = UnaryOperator.NEG
elif (operator == 'expr_not'):
operator = UnaryOperator.NOT
else:
raise ValueError('Unrecognized unary operator: {}'.format(operator))
return UnaryExpr(operator, operand)
elif (expr_kind == 'factor_expr'):
lhs = self._process_expr(index_map, type_map, tree.children[0])
operator = str(tree.children[1].data)
rhs = self._process_expr(index_map, type_map, tree.children[2])
if (operator == 'expr_mul'):
operator = BinaryOperator.MUL
elif (operator == 'expr_div'):
operator = BinaryOperator.DIV
elif (operator == 'expr_mod'):
operator = BinaryOperator.MOD
else:
raise ValueError('Unrecognized binary operator: {}'.format(operator))
return BinaryExpr(operator, lhs, rhs)
elif (expr_kind == 'term_expr'):
lhs = self._process_expr(index_map, type_map, tree.children[0])
operator = str(tree.children[1].data)
rhs = self._process_expr(index_map, type_map, tree.children[2])
if (operator == 'expr_add'):
operator = BinaryOperator.ADD
elif (operator == 'expr_sub'):
operator = BinaryOperator.SUB
else:
raise ValueError('Unrecognized binary operator: {}'.format(operator))
return BinaryExpr(operator, lhs, rhs)
elif (expr_kind == 'cmp_expr'):
lhs = self._process_expr(index_map, type_map, tree.children[0])
operator = str(tree.children[1].data)
rhs = self._process_expr(index_map, type_map, tree.children[2])
if (operator == 'expr_eq'):
operator = BinaryOperator.EQ
elif (operator == 'expr_ne'):
operator = BinaryOperator.NE
elif (operator == 'expr_lt'):
operator = BinaryOperator.LT
elif (operator == 'expr_le'):
operator = BinaryOperator.LE
elif (operator == 'expr_gt'):
operator = BinaryOperator.GT
elif (operator == 'expr_ge'):
operator = BinaryOperator.GE
else:
raise ValueError('Unrecognized binary operator: {}'.format(operator))
return BinaryExpr(operator, lhs, rhs)
elif (expr_kind == 'and_expr'):
lhs = self._process_expr(index_map, type_map, tree.children[0])
rhs = self._process_expr(index_map, type_map, tree.children[1])
return BinaryExpr(BinaryOperator.AND, lhs, rhs)
elif (expr_kind == 'or_expr'):
lhs = self._process_expr(index_map, type_map, tree.children[0])
rhs = self._process_expr(index_map, type_map, tree.children[1])
return BinaryExpr(BinaryOperator.OR, lhs, rhs)
elif (expr_kind == 'imply_expr'):
lhs = self._process_expr(index_map, type_map, tree.children[0])
rhs = self._process_expr(index_map, type_map, tree.children[1])
return BinaryExpr(BinaryOperator.IMPLY, lhs, rhs)
elif (expr_kind == 'cond_expr'):
cond = self._process_expr(index_map, type_map, tree.children[0])
true_val = self._process_expr(index_map, type_map, tree.children[1])
false_val = self._process_expr(index_map, type_map, tree.children[2])
return CondExpr(cond, true_val, false_val)
else:
msg = 'Unrecognized expr kind: {}'.format(expr_kind)
raise NotImplementedError(msg)
def func_decl(self, tree):
name = str(tree.children[0])
tree_body = tree.children[1]
lhs_name = self._process_opt_arg(tree_body.children[0])
rhs_names = [self._process_opt_arg(x) for x in tree_body.children[1].children]
index_map = self._create_index_map(([tree_body.children[0]] + tree_body.children[1].children))
lhs = self._type_spec.get_type_or_raise(lhs_name)
rhs = [self._type_spec.get_type_or_raise(x) for x in rhs_names]
type_map = self._create_type_map(([lhs] + rhs))
constraints = [self._process_expr(index_map, type_map, x) for x in tree.children[2].children]
self._prod_spec.add_func_production(name=name, lhs=lhs, rhs=rhs, constraints=constraints)
def collect(self) -> ProductionSpec:
return self._prod_spec |
class ListCommand(Command):
name = 'list'
usage = '\n %prog [options]'
summary = 'List installed packages.'
def __init__(self, *args, **kw):
super(ListCommand, self).__init__(*args, **kw)
cmd_opts = self.cmd_opts
cmd_opts.add_option('-o', '--outdated', action='store_true', default=False, help='List outdated packages')
cmd_opts.add_option('-u', '--uptodate', action='store_true', default=False, help='List uptodate packages')
cmd_opts.add_option('-e', '--editable', action='store_true', default=False, help='List editable projects.')
cmd_opts.add_option('-l', '--local', action='store_true', default=False, help='If in a virtualenv that has global access, do not list globally-installed packages.')
self.cmd_opts.add_option('--user', dest='user', action='store_true', default=False, help='Only output packages installed in user-site.')
cmd_opts.add_option('--pre', action='store_true', default=False, help='Include pre-release and development versions. By default, pip only finds stable versions.')
cmd_opts.add_option('--format', action='store', dest='list_format', default='columns', choices=('columns', 'freeze', 'json'), help='Select the output format among: columns (default), freeze, or json')
cmd_opts.add_option('--not-required', action='store_true', dest='not_required', help='List packages that are not dependencies of installed packages.')
cmd_opts.add_option('--exclude-editable', action='store_false', dest='include_editable', help='Exclude editable package from output.')
cmd_opts.add_option('--include-editable', action='store_true', dest='include_editable', help='Include editable package from output.', default=True)
index_opts = make_option_group(index_group, self.parser)
self.parser.insert_option_group(0, index_opts)
self.parser.insert_option_group(0, cmd_opts)
def _build_package_finder(self, options, index_urls, session):
return PackageFinder(find_links=options.find_links, index_urls=index_urls, allow_all_prereleases=options.pre, trusted_hosts=options.trusted_hosts, process_dependency_links=options.process_dependency_links, session=session)
def run(self, options, args):
if (options.outdated and options.uptodate):
raise CommandError('Options --outdated and --uptodate cannot be combined.')
packages = get_installed_distributions(local_only=options.local, user_only=options.user, editables_only=options.editable, include_editables=options.include_editable)
if options.outdated:
packages = self.get_outdated(packages, options)
elif options.uptodate:
packages = self.get_uptodate(packages, options)
if options.not_required:
packages = self.get_not_required(packages, options)
self.output_package_listing(packages, options)
def get_outdated(self, packages, options):
return [dist for dist in self.iter_packages_latest_infos(packages, options) if (dist.latest_version > dist.parsed_version)]
def get_uptodate(self, packages, options):
return [dist for dist in self.iter_packages_latest_infos(packages, options) if (dist.latest_version == dist.parsed_version)]
def get_not_required(self, packages, options):
dep_keys = set()
for dist in packages:
dep_keys.update((requirement.key for requirement in dist.requires()))
return {pkg for pkg in packages if (pkg.key not in dep_keys)}
def iter_packages_latest_infos(self, packages, options):
index_urls = ([options.index_url] + options.extra_index_urls)
if options.no_index:
logger.debug('Ignoring indexes: %s', ','.join(index_urls))
index_urls = []
dependency_links = []
for dist in packages:
if dist.has_metadata('dependency_links.txt'):
dependency_links.extend(dist.get_metadata_lines('dependency_links.txt'))
with self._build_session(options) as session:
finder = self._build_package_finder(options, index_urls, session)
finder.add_dependency_links(dependency_links)
for dist in packages:
typ = 'unknown'
all_candidates = finder.find_all_candidates(dist.key)
if (not options.pre):
all_candidates = [candidate for candidate in all_candidates if (not candidate.version.is_prerelease)]
if (not all_candidates):
continue
best_candidate = max(all_candidates, key=finder._candidate_sort_key)
remote_version = best_candidate.version
if best_candidate.location.is_wheel:
typ = 'wheel'
else:
typ = 'sdist'
dist.latest_version = remote_version
dist.latest_filetype = typ
(yield dist)
def output_package_listing(self, packages, options):
packages = sorted(packages, key=(lambda dist: dist.project_name.lower()))
if ((options.list_format == 'columns') and packages):
(data, header) = format_for_columns(packages, options)
self.output_package_listing_columns(data, header)
elif (options.list_format == 'freeze'):
for dist in packages:
if (options.verbose >= 1):
logger.info('%s==%s (%s)', dist.project_name, dist.version, dist.location)
else:
logger.info('%s==%s', dist.project_name, dist.version)
elif (options.list_format == 'json'):
logger.info(format_for_json(packages, options))
def output_package_listing_columns(self, data, header):
if (len(data) > 0):
data.insert(0, header)
(pkg_strings, sizes) = tabulate(data)
if (len(data) > 0):
pkg_strings.insert(1, ' '.join(map((lambda x: ('-' * x)), sizes)))
for val in pkg_strings:
logger.info(val) |
def get_logger(model_dir, filename='train.log'):
global logger
logger = logging.getLogger(os.path.basename(model_dir))
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s')
if (not os.path.exists(model_dir)):
os.makedirs(model_dir)
h = logging.FileHandler(os.path.join(model_dir, filename))
h.setLevel(logging.DEBUG)
h.setFormatter(formatter)
logger.addHandler(h)
return logger |
def get_evaluation(name):
mod = __import__('evaluations.{}'.format(name), fromlist=[''])
return getattr(mod, _module_to_class(name)) |
def all_reduce(inputs, outputs=None, op=SUM, streams=None, comms=None):
_check_sequence_type(inputs)
if (outputs is None):
outputs = inputs
_check_sequence_type(outputs)
torch._C._nccl_all_reduce(inputs, outputs, op, streams, comms) |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--output-dir', required=True)
parser.add_argument('--scaling-value', type=int, help='maximum value for scaling in FEXIPRO')
parser.add_argument('--sigma', type=float, help='percentage of SIGMA for SVD incremental prune')
parser.add_argument('--top-K', help='list of comma-separated integers, e.g., 1,5,10,50')
parser.add_argument('--sample', dest='sample', action='store_true')
parser.add_argument('--no-sample', dest='sample', action='store_false')
parser.set_defaults(sample=False)
parser.add_argument('--decision-rule', dest='decision_rule', action='store_true')
parser.add_argument('--no-decision-rule', dest='decision_rule', action='store_false')
parser.set_defaults(decision_rule=False)
parser.add_argument('--test-only', dest='test_only', action='store_true')
parser.add_argument('--no-test-only', dest='test_only', action='store_false')
parser.add_argument('--user-sample-ratios', help='list of comma-separated integers, e.g., 0.001,0.005,0.01,0.05,0.1')
parser.set_defaults(test_only=False)
args = parser.parse_args()
scaling_value = (args.scaling_value if args.scaling_value else 127)
sigma = (args.sigma if args.sigma else 0.8)
TOP_K = ([int(val) for val in args.top_K.split(',')] if args.top_K else [1, 5, 10, 50])
USER_SAMPLE_RATIOS = ([float(val) for val in args.user_sample_ratios.split(',')] if args.user_sample_ratios else [0.001, 0.005, 0.01, 0.05, 0.1])
ALGS = ['SI', 'SIR']
runner_dir = 'fexipro-orig-build'
if args.decision_rule:
runner_dir += '-decision-rule'
if args.test_only:
runner_dir += '-test-only'
runner = ('../%s/runFEXIPRO' % runner_dir)
output_dir = args.output_dir
if (output_dir[(- 1)] != '/'):
output_dir += '/'
if (not os.path.exists(output_dir)):
os.makedirs(output_dir)
run_args = []
numa_queue = get_numa_queue()
for (model_dir, _, _) in TO_RUN:
input_dir = os.path.join(MODEL_DIR_BASE, model_dir)
base_name = model_dir.replace('/', '-')
for (K, alg, user_sample_ratio) in product(TOP_K, ALGS, USER_SAMPLE_RATIOS):
run_args.append((numa_queue, K, alg, scaling_value, sigma, args.sample, user_sample_ratio, input_dir, base_name, output_dir, runner))
pool = multiprocessing.Pool(NUM_NUMA_NODES)
pool.map(run, run_args) |
_grad()
def calculate_metrics(nets, args, step, mode):
print('Calculating evaluation metrics...')
assert (mode in ['latent', 'reference'])
device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu'))
domains = os.listdir(args.val_img_dir)
domains.sort()
num_domains = len(domains)
print(('Number of domains: %d' % num_domains))
lpips_dict = OrderedDict()
for (trg_idx, trg_domain) in enumerate(domains):
src_domains = [x for x in domains if (x != trg_domain)]
for (src_idx, src_domain) in enumerate(src_domains):
path_src = os.path.join(args.val_img_dir, src_domain)
loader_src = get_eval_loader(root=path_src, img_size=args.img_size, batch_size=args.val_batch_size, imagenet_normalize=False, shuffle=False)
images = os.listdir(path_src)
images.sort()
img_num = 0
task = ('%s2%s' % (src_domain, trg_domain))
path_fake = os.path.join(args.eval_dir, task)
shutil.rmtree(path_fake, ignore_errors=True)
os.makedirs(path_fake)
print(('Generating images for %s...' % task))
for (i, x_src) in enumerate(tqdm(loader_src, total=len(loader_src))):
N = x_src.size(0)
x_src = x_src.to(device)
if (src_idx >= trg_idx):
y_src = torch.tensor(([(src_idx + 1)] * N)).to(device)
else:
y_src = torch.tensor(([src_idx] * N)).to(device)
y_trg = torch.tensor(([trg_idx] * N)).to(device)
masks = (nets.fan.get_heatmap(x_src) if (args.w_hpf > 0) else None)
for j in range(args.num_outs_per_domain):
(s_trg, s_trg1, s_trg2, s_trg3) = nets.style_encoder(x_src, y_src, y_trg)
x_fake = nets.generator(x_src, ((s_trg1 + s_trg2) + (args.degree * (s_trg3 - s_trg2))), masks=masks)
for k in range(N):
filename = os.path.join(path_fake, ('%s.png' % images[img_num].split('.')[0]))
utils.save_image(x_fake[k], ncol=1, filename=filename)
img_num += 1
del loader_src
calculate_fid_for_all_tasks(args, domains, step=step, mode=mode) |
def filter_type_14_4_22(moves, rival_move):
rival = collections.Counter(rival_move)
rival_rank = my_rank = 0
for (k, v) in rival.items():
if (v == 4):
rival_rank = k
new_moves = list()
for move in moves:
mymove = collections.Counter(move)
for (k, v) in mymove.items():
if (v == 4):
my_rank = k
if (my_rank > rival_rank):
new_moves.append(move)
return new_moves |
def asd(result, reference, voxelspacing=None, connectivity=1):
sds = __surface_distances(result, reference, voxelspacing, connectivity)
asd = sds.mean()
return asd |
def create_kb(path):
print('Loading from items_wikidata_n.json')
entity_items = json.load(open(os.path.join(path, 'items_wikidata_n.json'), 'r'))
max_id = 0
for idx in entity_items:
max_id = max(max_id, get_id(idx))
graph = [{} for i in range((max_id + 1))]
cont = 0
for idx in entity_items:
graph[get_id(idx)]['name'] = entity_items[idx]
print('Loading from wikidata_short_1.json')
sub_predict_obj = json.load(open(os.path.join(path, 'wikidata_short_1.json'), 'r'))
for idx in tqdm(sub_predict_obj):
for x in sub_predict_obj[idx]:
sub_predict_obj[idx][x] = set(sub_predict_obj[idx][x])
graph[get_id(idx)]['sub'] = sub_predict_obj[idx]
print('Loading from wikidata_short_2.json')
sub_predict_obj = json.load(open(os.path.join(path, 'wikidata_short_2.json'), 'r'))
for idx in tqdm(sub_predict_obj):
for x in sub_predict_obj[idx]:
sub_predict_obj[idx][x] = set(sub_predict_obj[idx][x])
graph[get_id(idx)]['sub'] = sub_predict_obj[idx]
print('Loading from comp_wikidata_rev.json')
obj_predict_sub = json.load(open(os.path.join(path, 'comp_wikidata_rev.json'), 'r'))
for idx in tqdm(obj_predict_sub):
for x in obj_predict_sub[idx]:
obj_predict_sub[idx][x] = set(obj_predict_sub[idx][x])
graph[get_id(idx)]['obj'] = obj_predict_sub[idx]
pickle.dump(graph, open('data/BFS/wikidata.pkl', 'wb')) |
class HitBallWithQueue(Task):
def init_task(self) -> None:
queue = Shape('queue')
success_sensor = ProximitySensor('success')
ball = Shape('ball')
self.register_graspable_objects([queue])
cond_set = ConditionSet([GraspedCondition(self.robot.gripper, queue), DetectedCondition(ball, success_sensor)], order_matters=True)
self.register_success_conditions([cond_set])
def init_episode(self, index: int) -> List[str]:
return ['hit ball with queue in to the goal', 'pot the ball in the goal', 'pick up the que and use it to pot the ball into the goal']
def variation_count(self) -> int:
return 1 |
.experimental
.parametrize('pad_columns', ['user_id'])
.usefixtures('dataframe_pandas')
def test_invalid_column_dtype_pandas(pad_columns, dataframe_pandas):
with pytest.raises(ValueError):
Padder(pad_columns=pad_columns).transform(dataframe_pandas) |
def _sympysage_real_interval(self):
from sage.rings.real_mpfi import RealIntervalField
RIF = RealIntervalField(1024)
domain = self.dom._sage_().fraction_field()
return RIF(domain(self.a)).union(RIF(domain(self.b))) |
def register_Ns3ParfWifiManager_methods(root_module, cls):
cls.add_constructor([param('ns3::ParfWifiManager const &', 'arg0')])
cls.add_constructor([])
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('SetHeSupported', 'void', [param('bool', 'enable')], is_virtual=True)
cls.add_method('SetHtSupported', 'void', [param('bool', 'enable')], is_virtual=True)
cls.add_method('SetVhtSupported', 'void', [param('bool', 'enable')], is_virtual=True)
cls.add_method('SetupPhy', 'void', [param('ns3::Ptr< ns3::WifiPhy > const', 'phy')], is_virtual=True)
cls.add_method('DoCreateStation', 'ns3::WifiRemoteStation *', [], is_const=True, visibility='private', is_virtual=True)
cls.add_method('DoGetDataTxVector', 'ns3::WifiTxVector', [param('ns3::WifiRemoteStation *', 'station')], visibility='private', is_virtual=True)
cls.add_method('DoGetRtsTxVector', 'ns3::WifiTxVector', [param('ns3::WifiRemoteStation *', 'station')], visibility='private', is_virtual=True)
cls.add_method('DoReportDataFailed', 'void', [param('ns3::WifiRemoteStation *', 'station')], visibility='private', is_virtual=True)
cls.add_method('DoReportDataOk', 'void', [param('ns3::WifiRemoteStation *', 'station'), param('double', 'ackSnr'), param('ns3::WifiMode', 'ackMode'), param('double', 'dataSnr')], visibility='private', is_virtual=True)
cls.add_method('DoReportFinalDataFailed', 'void', [param('ns3::WifiRemoteStation *', 'station')], visibility='private', is_virtual=True)
cls.add_method('DoReportFinalRtsFailed', 'void', [param('ns3::WifiRemoteStation *', 'station')], visibility='private', is_virtual=True)
cls.add_method('DoReportRtsFailed', 'void', [param('ns3::WifiRemoteStation *', 'station')], visibility='private', is_virtual=True)
cls.add_method('DoReportRtsOk', 'void', [param('ns3::WifiRemoteStation *', 'station'), param('double', 'ctsSnr'), param('ns3::WifiMode', 'ctsMode'), param('double', 'rtsSnr')], visibility='private', is_virtual=True)
cls.add_method('DoReportRxOk', 'void', [param('ns3::WifiRemoteStation *', 'station'), param('double', 'rxSnr'), param('ns3::WifiMode', 'txMode')], visibility='private', is_virtual=True)
cls.add_method('IsLowLatency', 'bool', [], is_const=True, visibility='private', is_virtual=True)
return |
.expansion
class ExpandBatchedMatMulCuBLAS(ExpandTransformation):
environments = [environments.cublas.cuBLAS]
def expansion(node, state, sdfg):
node.validate(sdfg, state)
(adesc, bdesc, cdesc) = (None, None, None)
for e in state.in_edges(node):
if (e.dst_conn == '_a'):
anode = state.memlet_path(e)[0].src
if isinstance(anode, dace.sdfg.nodes.AccessNode):
adesc: dt.Array = sdfg.arrays[anode.data]
elif (e.dst_conn == '_b'):
bnode = state.memlet_path(e)[0].src
if isinstance(bnode, dace.sdfg.nodes.AccessNode):
bdesc: dt.Array = sdfg.arrays[bnode.data]
for e in state.out_edges(node):
if (e.src_conn == '_c'):
cnode = state.memlet_path(e)[(- 1)].dst
if isinstance(cnode, dace.sdfg.nodes.AccessNode):
cdesc: dt.Array = sdfg.arrays[cnode.data]
if ((not adesc) or (not bdesc) or (not cdesc)):
raise ValueError('Unsupported input/output arrays')
needs_copy = any(((desc.storage not in (dace.StorageType.GPU_Global, dace.StorageType.CPU_Pinned)) for desc in (adesc, bdesc, cdesc)))
dtype = cdesc.dtype.base_type
func = ('%sgemm' % to_blastype(dtype.type))
if (dtype == dace.float16):
cdtype = '__half'
factort = 'Half'
elif (dtype == dace.float32):
cdtype = 'float'
factort = 'Float'
elif (dtype == dace.float64):
cdtype = 'double'
factort = 'Double'
elif (dtype == dace.complex64):
cdtype = 'cuComplex'
factort = 'Complex64'
elif (dtype == dace.complex128):
cdtype = 'cuDoubleComplex'
factort = 'Complex128'
else:
raise ValueError(('Unsupported type: ' + str(dtype)))
call_prefix = environments.cublas.cuBLAS.handle_setup_code(node)
call_suffix = ''
constants = {1.0: f'__state->cublas_handle.Constants(__dace_cuda_device).{factort}Pone()', 0.0: f'__state->cublas_handle.Constants(__dace_cuda_device).{factort}Zero()'}
if (node.alpha not in constants):
if isinstance(node.alpha, complex):
alpha = f'{dtype.ctype}({node.alpha.real}, {node.alpha.imag})'
else:
alpha = f'{dtype.ctype}({node.alpha})'
call_prefix += f'''cublasSetPointerMode(__dace_cublas_handle, CUBLAS_POINTER_MODE_HOST);
{dtype.ctype} alpha = {alpha};
{dtype.ctype} beta = 0;
'''
call_suffix += '\n cublasSetPointerMode(__dace_cublas_handle, CUBLAS_POINTER_MODE_DEVICE);\n '
beta = f'({cdtype} *)&beta'
alpha = f'({cdtype} *)&alpha'
else:
alpha = constants[node.alpha]
beta = ('__state->cublas_handle.Constants(__dace_cuda_device).%sZero()' % factort)
opt = _get_codegen_gemm_opts(node, state, sdfg, adesc, bdesc, cdesc, alpha, beta, cdtype, func)
opt['array_prefix'] = ('_' if needs_copy else '')
if ((node.compute_type is None) and (node.accumulator_type is None) and (node.algorithm is None)):
call = 'cublas{func}StridedBatched(__dace_cublas_handle,\n CUBLAS_OP_{ta}, CUBLAS_OP_{tb},\n {M}, {N}, {K},\n {alpha},\n ({dtype}*){array_prefix}{x}, {lda}, {stride_a},\n ({dtype}*){array_prefix}{y}, {ldb}, {stride_b},\n {beta},\n ({dtype}*){array_prefix}_c, {ldc}, {stride_c},\n {BATCH});'.format_map(opt)
else:
if (node.compute_type is not None):
acctype = node.compute_type
elif (node.accumulator_type is not None):
acc_dtype: dtypes.typeclass = node.accumulator_type
acctype = f'CUBLAS_COMPUTE_{to_cublas_computetype(acc_dtype)}'
else:
acctype = f'CUBLAS_COMPUTE_{to_cublas_computetype(dtype)}'
algorithm = 'CUBLAS_GEMM_DEFAULT_TENSOR_OP'
if (node.algorithm is not None):
algorithm = node.algorithm
call = f'''
cublasGemmStridedBatchedEx(__dace_cublas_handle,
CUBLAS_OP_{opt['ta']}, CUBLAS_OP_{opt['tb']},
{opt['M']}, {opt['N']}, {opt['K']},
{alpha},
{opt['array_prefix']}{opt['x']},
{dtype_to_cudadatatype(opt['xdtype'])},
{opt['lda']}, {opt['stride_a']},
{opt['array_prefix']}{opt['y']},
{dtype_to_cudadatatype(opt['ydtype'])},
{opt['ldb']}, {opt['stride_b']},
{beta},
{opt['array_prefix']}_c,
{dtype_to_cudadatatype(opt['cdtype'])},
{opt['ldc']}, {opt['stride_c']},
{opt['BATCH']},
{acctype}, {algorithm});
'''
code = ((call_prefix + call) + call_suffix)
tasklet = dace.sdfg.nodes.Tasklet(node.name, node.in_connectors, node.out_connectors, code, language=dace.dtypes.Language.CPP)
if needs_copy:
nsdfg = dace.SDFG('nested_batched_matmul')
tasklet = dace.sdfg.nodes.Tasklet(node.name, {'__a': dtypes.pointer(adesc.dtype), '__b': dtypes.pointer(bdesc.dtype)}, {'__c': dtypes.pointer(cdesc.dtype)}, code, language=dace.dtypes.Language.CPP)
for (name, desc) in [('_a', adesc), ('_b', bdesc), ('_c', cdesc)]:
if isinstance(desc, dt.View):
dcopy = desc.as_array()
else:
dcopy = dc(desc)
dcopy.transient = False
dcopy.lifetime = dtypes.AllocationLifetime.Scope
dcopy_gpu = dc(dcopy)
nsdfg.add_datadesc(name, dcopy)
dcopy_gpu.transient = True
dcopy_gpu.storage = dace.StorageType.GPU_Global
nsdfg.add_datadesc((name + '_gpu'), dcopy_gpu)
nstate = nsdfg.add_state()
a = nstate.add_read('_a')
ga = nstate.add_access('_a_gpu')
b = nstate.add_read('_b')
gb = nstate.add_access('_b_gpu')
c = nstate.add_write('_c')
gc = nstate.add_access('_c_gpu')
nstate.add_node(tasklet)
nstate.add_nedge(a, ga, dace.Memlet.from_array('_a', adesc))
nstate.add_nedge(b, gb, dace.Memlet.from_array('_b', bdesc))
nstate.add_edge(ga, None, tasklet, '__a', dace.Memlet.from_array('_a_gpu', adesc))
nstate.add_edge(gb, None, tasklet, '__b', dace.Memlet.from_array('_b_gpu', bdesc))
nstate.add_edge(tasklet, '__c', gc, None, dace.Memlet.from_array('_c_gpu', cdesc))
nstate.add_nedge(gc, c, dace.Memlet.from_array('_c', cdesc))
return nsdfg
return tasklet |
def histogram(data, axis=0, r=None):
if (not isinstance(data, DataArray)):
data = DataArray(data, axis=axis)
if (r is not None):
return (histogram(d) for d in combinations(data, r=r))
(_, counts) = numpy.unique(data, return_counts=True, axis=1)
return counts |
def load_state(model_dir, model, optimizer=None):
if (not os.path.exists((model_dir + '/checkpoint'))):
print("=> no checkpoint found at '{}', train from scratch".format(model_dir))
return (0, 0)
else:
ckpt = open((model_dir + '/checkpoint'))
model_path = ckpt.readlines()[0].split(':')[1].strip('\n')
checkpoint = torch.load(model_path, map_location='cuda:{}'.format(torch.cuda.current_device()))
model.load_state_dict(checkpoint['state_dict'], strict=False)
ckpt_keys = set(checkpoint['state_dict'].keys())
own_keys = set(model.state_dict().keys())
missing_keys = (own_keys - ckpt_keys)
for k in missing_keys:
print('missing keys from checkpoint {}: {}'.format(model_dir, k))
print("=> loaded model from checkpoint '{}'".format(model_dir))
if (optimizer != None):
best_prec1 = 0
if ('best_prec1' in checkpoint.keys()):
best_prec1 = checkpoint['best_prec1']
start_epoch = checkpoint['epoch']
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> also loaded optimizer from checkpoint '{}' (epoch {})".format(model_dir, start_epoch))
return (best_prec1, start_epoch) |
(start=cython.Py_ssize_t, end=cython.Py_ssize_t)
def line_iter(source):
if isinstance(source, basestring):
start = 0
while True:
end = source.find('\n', start)
if (end == (- 1)):
(yield source[start:])
return
(yield source[start:end])
start = (end + 1)
else:
for line in source:
(yield line) |
('/get_signers/<lastN>', methods=('GET',))
def get_signers(lastN):
web3 = connect_to_geth(app.web3_url, app.consensus)
latest = web3.eth.getBlock('latest').number
start = ((latest - int(lastN)) + 1)
if (start <= 0):
start = 1
signers = {}
for bk in range(start, (latest + 1)):
bkhash = web3.eth.getBlock(bk).hash
result = send_geth_rpc(app.web3_url, 'clique_getSigner', [bkhash.hex()])
addr = Web3.toChecksumAddress(result)
name = app.eth_accounts[str(addr)]['name']
container_id = app.eth_nodes[name]['container_id']
signers[bk] = {'address': str(addr), 'container_name': name, 'container_id': container_id}
return signers |
def knn_score(train_set, test_set, n_neighbours=2):
index = faiss.IndexFlatL2(train_set.shape[1])
index.add(train_set)
(D, _) = index.search(test_set, n_neighbours)
return np.sum(D, axis=1) |
def get_losses():
try:
return tf.compat.v1.losses
except AttributeError:
return tf.losses |
class GenSampledIndividuals(GenIndividuals):
def __next__(self):
return SampledIndividual() |
def RunEpoch(args, epoch, train_model, test_model, total_batch_size, num_shards, expname, explog):
log.info('Starting epoch {}/{}'.format(epoch, args.num_epochs))
epoch_iters = int(((args.epoch_size / total_batch_size) / num_shards))
test_epoch_iters = int(((args.test_epoch_size / total_batch_size) / num_shards))
for i in range(epoch_iters):
timeout = (args.first_iter_timeout if (i == 0) else args.timeout)
with timeout_guard.CompleteInTimeOrDie(timeout):
t1 = time.time()
workspace.RunNet(train_model.net.Proto().name)
t2 = time.time()
dt = (t2 - t1)
fmt = 'Finished iteration {}/{} of epoch {} ({:.2f} images/sec)'
log.info(fmt.format((i + 1), epoch_iters, epoch, (total_batch_size / dt)))
prefix = '{}_{}'.format(train_model._device_prefix, train_model._devices[0])
accuracy = workspace.FetchBlob((prefix + '/accuracy'))
loss = workspace.FetchBlob((prefix + '/loss'))
train_fmt = 'Training loss: {}, accuracy: {}'
log.info(train_fmt.format(loss, accuracy))
num_images = ((epoch * epoch_iters) * total_batch_size)
prefix = '{}_{}'.format(train_model._device_prefix, train_model._devices[0])
accuracy = workspace.FetchBlob((prefix + '/accuracy'))
loss = workspace.FetchBlob((prefix + '/loss'))
learning_rate = workspace.FetchBlob(data_parallel_model.GetLearningRateBlobNames(train_model)[0])
test_accuracy = 0
test_accuracy_top5 = 0
if (test_model is not None):
ntests = 0
for _ in range(test_epoch_iters):
workspace.RunNet(test_model.net.Proto().name)
for g in test_model._devices:
test_accuracy += np.asscalar(workspace.FetchBlob(('{}_{}'.format(test_model._device_prefix, g) + '/accuracy')))
test_accuracy_top5 += np.asscalar(workspace.FetchBlob(('{}_{}'.format(test_model._device_prefix, g) + '/accuracy_top5')))
ntests += 1
test_accuracy /= ntests
test_accuracy_top5 /= ntests
else:
test_accuracy = (- 1)
test_accuracy_top5 = (- 1)
explog.log(input_count=num_images, batch_count=(i + (epoch * epoch_iters)), additional_values={'accuracy': accuracy, 'loss': loss, 'learning_rate': learning_rate, 'epoch': epoch, 'top1_test_accuracy': test_accuracy, 'top5_test_accuracy': test_accuracy_top5})
assert (loss < 40), 'Exploded gradients :('
return (epoch + 1) |
def calc_boomerang_tip(location, orientation):
r_vectors = bm.get_boomerang_r_vectors_15(location, orientation)
tip = r_vectors[0]
return tip |
def get_data(data_subdir):
data_dir = os.path.join('..', 'data', data_subdir)
pro_dir = os.path.join(data_dir, 'pro_sg')
n_items = get_num_items(pro_dir)
train_data = load_train_data(os.path.join(pro_dir, 'train.csv'), n_items)
(vad_data_tr, vad_data_te) = load_tr_te_data(os.path.join(pro_dir, 'validation_tr.csv'), os.path.join(pro_dir, 'validation_te.csv'), n_items)
return {'n_items': n_items, 'train_data': train_data, 'vad_data_tr': vad_data_tr, 'vad_data_te': vad_data_te} |
def test_generalized_iterators():
assert (list(m.IntPairs([(1, 2), (3, 4), (0, 5)]).nonzero()) == [(1, 2), (3, 4)])
assert (list(m.IntPairs([(1, 2), (2, 0), (0, 3), (4, 5)]).nonzero()) == [(1, 2)])
assert (list(m.IntPairs([(0, 3), (1, 2), (3, 4)]).nonzero()) == [])
assert (list(m.IntPairs([(1, 2), (3, 4), (0, 5)]).nonzero_keys()) == [1, 3])
assert (list(m.IntPairs([(1, 2), (2, 0), (0, 3), (4, 5)]).nonzero_keys()) == [1])
assert (list(m.IntPairs([(0, 3), (1, 2), (3, 4)]).nonzero_keys()) == [])
it = m.IntPairs([(0, 0)]).nonzero()
for _ in range(3):
with pytest.raises(StopIteration):
next(it)
it = m.IntPairs([(0, 0)]).nonzero_keys()
for _ in range(3):
with pytest.raises(StopIteration):
next(it) |
class ScriptFile(object):
def __init__(self, file):
self._file = file
self.src_record_path = self._file.src_record_path
self.dest_path = self._file.dest_path
self.changed = False
def save(self):
self._file.save()
self.changed = fix_script(self.dest_path) |
class Mixed_4b(nn.Module):
def __init__(self):
super(Mixed_4b, self).__init__()
self.branch0 = nn.Sequential(BasicConv3d(480, 192, kernel_size=1, stride=1))
self.branch1 = nn.Sequential(BasicConv3d(480, 96, kernel_size=1, stride=1), SepConv3d(96, 208, kernel_size=3, stride=1, padding=1))
self.branch2 = nn.Sequential(BasicConv3d(480, 16, kernel_size=1, stride=1), SepConv3d(16, 48, kernel_size=3, stride=1, padding=1))
self.branch3 = nn.Sequential(nn.MaxPool3d(kernel_size=(3, 3, 3), stride=1, padding=1), BasicConv3d(480, 64, kernel_size=1, stride=1))
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
x2 = self.branch2(x)
x3 = self.branch3(x)
out = torch.cat((x0, x1, x2, x3), 1)
return out |
def silent_net():
n = caffe.NetSpec()
(n.data, n.data2) = L.DummyData(shape=[dict(dim=[3]), dict(dim=[4, 2])], ntop=2)
n.silence_data = L.Silence(n.data, ntop=0)
n.silence_data2 = L.Silence(n.data2, ntop=0)
return n.to_proto() |
class MemoryChunkPythonArguments(MemoryChunk):
def declare_class_members(self):
return (' cdef int _n_%s\n' % self.name)
def init_class_members(self):
return je(ri(8, "\n count = args['{{ myself.name }}']\n self._n_args = count\n "), myself=self)
def setup_args(self):
return ''
def pass_argument(self):
return '(<PyTupleObject*>args).ob_item' |
def setup(opt):
if (opt.caption_model == 'show_tell'):
model = ShowTellModel(opt)
elif (opt.caption_model == 'show_attend_tell'):
model = ShowAttendTellModel(opt)
elif (opt.caption_model == 'all_img'):
model = AllImgModel(opt)
elif (opt.caption_model == 'fc'):
model = FCModel(opt)
elif (opt.caption_model == 'att2in'):
model = Att2inModel(opt)
elif (opt.caption_model == 'att2in2'):
model = Att2in2Model(opt)
elif (opt.caption_model == 'adaatt'):
model = AdaAttModel(opt)
elif (opt.caption_model == 'adaattmo'):
model = AdaAttMOModel(opt)
elif (opt.caption_model == 'topdown'):
model = TopDownModel(opt)
else:
raise Exception('Caption model not supported: {}'.format(opt.caption_model))
if (vars(opt).get('start_from', None) is not None):
assert os.path.isdir(opt.start_from), (' %s must be a a path' % opt.start_from)
assert os.path.isfile(os.path.join(opt.start_from, (('infos_' + opt.id) + '.pkl'))), ('infos.pkl file does not exist in path %s' % opt.start_from)
model.load_state_dict(torch.load(os.path.join(opt.start_from, 'model.pth')))
return model |
class Dropout3d(_DropoutNd):
def forward(self, input: Tensor) -> Tensor:
return F.dropout3d(input, self.p, self.training, self.inplace) |
def quit_with_gc(func_or_gen):
generation = 2
def _quit_with_gc(f):
def decorated(*args, **kw):
import gc
ret = f(*args, **kw)
gc.collect(generation)
return ret
return decorated
if isinstance(func_or_gen, int):
generation = func_or_gen
return _quit_with_gc
func = func_or_gen
return _quit_with_gc(func) |
def make_registry(cls: Type):
def _register(cls: Type, subclass: Type, kwargs: Dict):
cls._registry_[subclass] = kwargs
def _unregister(cls: Type, subclass: Type):
del cls._registry_[subclass]
cls._registry_ = {}
cls.register = (lambda subclass, **kwargs: _register(cls, subclass, kwargs))
cls.unregister = (lambda subclass: _unregister(cls, subclass))
cls.extensions = (lambda : cls._registry_)
return cls |
def build_backbone(args):
position_embedding = build_position_embedding(args)
train_backbone = (args.lr_backbone_ratio > 0)
if ('resnet' in args.backbone):
backbone = ResNet(name=args.backbone, train_backbone=train_backbone, return_interm_layers=False, dilation=False, freeze_bn=args.freeze_bn)
else:
raise NotImplementedError
model = Joiner(backbone, position_embedding)
model.num_channels = backbone.num_channels
return model |
_properties
class GPUGridStridedTiling(transformation.SingleStateTransformation):
outer_map_entry = transformation.PatternNode(nodes.MapEntry)
inner_map_entry = transformation.PatternNode(nodes.MapEntry)
new_dim_prefix = Property(dtype=str, default='tile', desc='Prefix for new dimension name')
max_grid_dim = SymbolicProperty(default=65535, desc='Maximum grid dimension')
block_dim = Property(default=128, desc='Block dimension')
def expressions(cls):
return [sdutil.node_path_graph(cls.outer_map_entry, cls.inner_map_entry)]
def can_be_applied(self, graph, expr_index, sdfg, permissive=False):
outer_map_entry = self.outer_map_entry
inner_map_entry = self.inner_map_entry
for e in graph.out_edges(outer_map_entry):
if (e.dst != inner_map_entry):
return False
for e in graph.in_edges(inner_map_entry):
if (e.src != outer_map_entry):
return False
inner_map_exit = graph.exit_node(inner_map_entry)
outer_map_exit = graph.exit_node(outer_map_entry)
for e in graph.out_edges(inner_map_exit):
if (e.dst != outer_map_exit):
return False
for e in graph.in_edges(outer_map_exit):
if (e.src != inner_map_exit):
return False
if ((len(outer_map_entry.map.params) != 1) or (len(inner_map_entry.map.params) != 1)):
return False
return True
def _find_new_dim(self, sdfg: SDFG, state: SDFGState, entry: nodes.MapEntry, prefix: str, target_dim: str):
candidate = ('%s_%s' % (prefix, target_dim))
index = 1
defined_vars = set((str(s) for s in (state.symbols_defined_at(entry).keys() | sdfg.symbols.keys())))
while (candidate in defined_vars):
candidate = ('%s%d_%s' % (prefix, index, target_dim))
index += 1
return candidate
def apply(self, graph: SDFGState, sdfg: SDFG):
i_entry = self.inner_map_entry
o_entry = self.outer_map_entry
i_exit = graph.exit_node(i_entry)
o_exit = graph.exit_node(o_entry)
new_dim_prefix = self.new_dim_prefix
max_grid_dim = self.max_grid_dim
block_dim = self.block_dim
max_grid_dim = symbolic.pystr_to_symbolic(max_grid_dim)
block_dim = symbolic.pystr_to_symbolic(block_dim)
(o_from, o_to, o_step) = o_entry.map.range[0]
(i_from, i_to, i_step) = i_entry.map.range[0]
tile_o_dim_new = self._find_new_dim(sdfg, graph, o_entry, new_dim_prefix, o_entry.map.params[0])
tile_i_dim_new = self._find_new_dim(sdfg, graph, i_entry, new_dim_prefix, i_entry.map.params[0])
grid_dim = sympy.Min(max_grid_dim, (((o_to + 1) - o_from) // o_step))
tile_o_range_new = (0, (grid_dim - 1), 1)
tile_i_range_new = (0, (block_dim - 1), 1)
o_range_new = ((o_from + (symbolic.pystr_to_symbolic(tile_o_dim_new) * o_step)), o_to, (grid_dim * o_step))
i_range_new = ((i_from + (symbolic.pystr_to_symbolic(tile_i_dim_new) * i_step)), i_to, (block_dim * i_step))
tile_o_map = nodes.Map(o_entry.map.label, [tile_o_dim_new], subsets.Range([tile_o_range_new]), schedule=dtypes.ScheduleType.GPU_Device)
tile_i_map = nodes.Map(i_entry.map.label, [tile_i_dim_new], subsets.Range([tile_i_range_new]), schedule=dtypes.ScheduleType.GPU_ThreadBlock)
tile_o_entry = nodes.MapEntry(tile_o_map)
tile_i_entry = nodes.MapEntry(tile_i_map)
tile_o_exit = nodes.MapExit(tile_o_map)
tile_i_exit = nodes.MapExit(tile_i_map)
tile_i_entry.map.gpu_block_size = [self.block_dim, 1, 1]
o_entry.map.range = subsets.Range([o_range_new])
o_entry.map.schedule = dtypes.ScheduleType.Sequential
i_entry.map.range = subsets.Range([i_range_new])
i_entry.map.schedule = dtypes.ScheduleType.Sequential
tile_o_entry.in_connectors = dcpy(o_entry.in_connectors)
tile_i_entry.in_connectors = dcpy(i_entry.in_connectors)
tile_o_exit.out_connectors = dcpy(o_exit.out_connectors)
tile_i_exit.out_connectors = dcpy(i_exit.out_connectors)
sdutil.change_edge_src(graph, o_exit, tile_o_exit)
sdutil.change_edge_src(graph, i_exit, tile_i_exit)
sdutil.change_edge_dest(graph, o_entry, tile_o_entry)
sdutil.change_edge_dest(graph, i_entry, tile_i_entry)
for (map_entry, new_map_entry, map_exit, new_map_exit) in [(o_entry, tile_o_entry, o_exit, tile_o_exit), (i_entry, tile_i_entry, i_exit, tile_i_exit)]:
new_in_edges = dict()
entry_in_conn = {}
entry_out_conn = {}
for (_src, src_conn, _dst, _, memlet) in graph.out_edges(map_entry):
if ((src_conn is not None) and (src_conn[:4] == 'OUT_') and (not isinstance(sdfg.arrays[memlet.data], dace.data.Scalar))):
new_subset = calc_set_image(map_entry.map.params, map_entry.map.range, memlet.subset)
conn = src_conn[4:]
key = (memlet.data, ('IN_' + conn), ('OUT_' + conn))
if (key in new_in_edges.keys()):
old_subset = new_in_edges[key].subset
new_in_edges[key].subset = calc_set_union(old_subset, new_subset)
else:
entry_in_conn[('IN_' + conn)] = None
entry_out_conn[('OUT_' + conn)] = None
new_memlet = dcpy(memlet)
new_memlet.subset = new_subset
if memlet.dynamic:
new_memlet.num_accesses = memlet.num_accesses
else:
new_memlet.num_accesses = new_memlet.num_elements().simplify()
new_in_edges[key] = new_memlet
else:
if ((src_conn is not None) and (src_conn[:4] == 'OUT_')):
conn = src_conn[4:]
in_conn = ('IN_' + conn)
out_conn = ('OUT_' + conn)
else:
in_conn = src_conn
out_conn = src_conn
if in_conn:
entry_in_conn[in_conn] = None
if out_conn:
entry_out_conn[out_conn] = None
new_in_edges[(memlet.data, in_conn, out_conn)] = dcpy(memlet)
new_map_entry.out_connectors = entry_out_conn
map_entry.in_connectors = entry_in_conn
for ((_, in_conn, out_conn), memlet) in new_in_edges.items():
graph.add_edge(new_map_entry, out_conn, map_entry, in_conn, memlet)
new_out_edges = dict()
exit_in_conn = {}
exit_out_conn = {}
for (_src, _, _dst, dst_conn, memlet) in graph.in_edges(map_exit):
if ((dst_conn is not None) and (dst_conn[:3] == 'IN_') and (not isinstance(sdfg.arrays[memlet.data], dace.data.Scalar))):
new_subset = calc_set_image(map_entry.map.params, map_entry.map.range, memlet.subset)
conn = dst_conn[3:]
key = (memlet.data, ('IN_' + conn), ('OUT_' + conn))
if (key in new_out_edges.keys()):
old_subset = new_out_edges[key].subset
new_out_edges[key].subset = calc_set_union(old_subset, new_subset)
else:
exit_in_conn[('IN_' + conn)] = None
exit_out_conn[('OUT_' + conn)] = None
new_memlet = dcpy(memlet)
new_memlet.subset = new_subset
if memlet.dynamic:
new_memlet.num_accesses = memlet.num_accesses
else:
new_memlet.num_accesses = new_memlet.num_elements().simplify()
new_out_edges[key] = new_memlet
else:
if ((dst_conn is not None) and (dst_conn[:3] == 'IN_')):
conn = dst_conn[3:]
in_conn = ('IN_' + conn)
out_conn = ('OUT_' + conn)
else:
in_conn = dst_conn
out_conn = dst_conn
if in_conn:
exit_in_conn[in_conn] = None
if out_conn:
exit_out_conn[out_conn] = None
new_out_edges[(memlet.data, in_conn, out_conn)] = dcpy(memlet)
new_map_exit.in_connectors = exit_in_conn
map_exit.out_connectors = exit_out_conn
for ((_, in_conn, out_conn), memlet) in new_out_edges.items():
graph.add_edge(map_exit, out_conn, new_map_exit, in_conn, memlet)
data_dict = {}
for e in graph.edges_between(o_entry, tile_i_entry):
if ((e.dst_conn is not None) and (e.dst_conn[:3] != 'IN_') and (e.src_conn[:4] == 'OUT_')):
graph.remove_edge(e)
tile_i_entry.add_out_connector(e.src_conn)
i_entry.add_in_connector(e.dst_conn)
graph.add_edge(tile_i_entry, e.src_conn, i_entry, e.dst_conn, dcpy(e.data))
if (e.data.data not in data_dict.keys()):
in_conn = ('IN_' + e.src_conn[4:])
o_entry.add_out_connector(e.src_conn)
tile_i_entry.add_in_connector(in_conn)
data_dict[e.data.data] = graph.add_edge(o_entry, e.src_conn, tile_i_entry, in_conn, dcpy(e.data))
tile_i_entry.remove_in_connector(e.dst_conn)
MapInterchange.apply_to(sdfg, outer_map_entry=o_entry, inner_map_entry=tile_i_entry)
def annotates_memlets():
return False |
def test_count_binary_occurrences():
test_data = ['aaabc', 'abbde']
vect = CountVectorizer(analyzer='char', max_df=1.0)
X = vect.fit_transform(test_data).toarray()
assert_array_equal(['a', 'b', 'c', 'd', 'e'], vect.get_feature_names_out())
assert_array_equal([[3, 1, 1, 0, 0], [1, 2, 0, 1, 1]], X)
vect = CountVectorizer(analyzer='char', max_df=1.0, binary=True)
X = vect.fit_transform(test_data).toarray()
assert_array_equal([[1, 1, 1, 0, 0], [1, 1, 0, 1, 1]], X)
vect = CountVectorizer(analyzer='char', max_df=1.0, binary=True, dtype=np.float32)
X_sparse = vect.fit_transform(test_data)
assert (X_sparse.dtype == np.float32) |
class Test_Metropolis():
def setup_method(self):
self.T = 2.0
self.met = Metropolis(self.T)
self.res_new = OptimizeResult(success=True, fun=0.0)
self.res_old = OptimizeResult(success=True, fun=1.0)
def test_boolean_return(self):
ret = self.met(res_new=self.res_new, res_old=self.res_old)
assert isinstance(ret, bool)
def test_lower_f_accepted(self):
assert_(self.met(res_new=self.res_new, res_old=self.res_old))
def test_accept(self):
one_accept = False
one_reject = False
for i in range(1000):
if (one_accept and one_reject):
break
res_new = OptimizeResult(success=True, fun=1.0)
res_old = OptimizeResult(success=True, fun=0.5)
ret = self.met(res_new=res_new, res_old=res_old)
if ret:
one_accept = True
else:
one_reject = True
assert_(one_accept)
assert_(one_reject)
def test_GH7495(self):
met = Metropolis(2)
res_new = OptimizeResult(success=True, fun=0.0)
res_old = OptimizeResult(success=True, fun=2000)
with np.errstate(over='raise'):
met.accept_reject(res_new=res_new, res_old=res_old)
def test_gh7799(self):
def func(x):
return ((((x ** 2) - 8) ** 2) + ((x + 2) ** 2))
x0 = (- 4)
limit = 50
con = ({'type': 'ineq', 'fun': (lambda x: (func(x) - limit))},)
res = basinhopping(func, x0, 30, minimizer_kwargs={'constraints': con})
assert res.success
assert_allclose(res.fun, limit, rtol=1e-06)
def test_accept_gh7799(self):
met = Metropolis(0)
res_new = OptimizeResult(success=True, fun=0.0)
res_old = OptimizeResult(success=True, fun=1.0)
assert met(res_new=res_new, res_old=res_old)
res_new.success = False
assert (not met(res_new=res_new, res_old=res_old))
res_old.success = False
assert met(res_new=res_new, res_old=res_old)
def test_reject_all_gh7799(self):
def fun(x):
return (x x)
def constraint(x):
return (x + 1)
kwargs = {'constraints': {'type': 'eq', 'fun': constraint}, 'bounds': [(0, 1), (0, 1)], 'method': 'slsqp'}
res = basinhopping(fun, x0=[2, 3], niter=10, minimizer_kwargs=kwargs)
assert (not res.success) |
def get_schema(query_column: str='query_id', item_column: str='item_id', timestamp_column: str='timestamp', rating_column: str='rating', has_timestamp: bool=True, has_rating: bool=True):
base = [StructField(query_column, IntegerType()), StructField(item_column, IntegerType())]
if has_timestamp:
base += [StructField(timestamp_column, TimestampType())]
if has_rating:
base += [StructField(rating_column, DoubleType())]
return StructType(base) |
def verification_performance(scores_plda):
ids = []
labels = []
positive_scores = []
negative_scores = []
for line in open(veri_file_path):
lab = int(line.split(' ')[0].rstrip().split('.')[0].strip())
enrol_id = line.split(' ')[1].rstrip().split('.')[0].strip()
test_id = line.split(' ')[2].rstrip().split('.')[0].strip()
i = int(numpy.where((scores_plda.modelset == enrol_id))[0][0])
j = int(numpy.where((scores_plda.segset == test_id))[0][0])
s = float(scores_plda.scoremat[(i, j)])
labels.append(lab)
ids.append(((enrol_id + '<>') + test_id))
if (lab == 1):
positive_scores.append(s)
else:
negative_scores.append(s)
del scores_plda
(eer, th) = EER(torch.tensor(positive_scores), torch.tensor(negative_scores))
(min_dcf, th) = minDCF(torch.tensor(positive_scores), torch.tensor(negative_scores))
return (eer, min_dcf) |
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
prepare_file_system()
model_info = create_model_info(FLAGS.architecture)
if (not model_info):
tf.logging.error('Did not recognize architecture flag')
return (- 1)
maybe_download_and_extract(model_info['data_url'])
(graph, bottleneck_tensor, resized_image_tensor) = create_model_graph(model_info)
image_lists = create_image_lists(FLAGS.image_dir, FLAGS.testing_percentage, FLAGS.validation_percentage)
class_count = len(image_lists.keys())
if (class_count == 0):
tf.logging.error(('No valid folders of images found at ' + FLAGS.image_dir))
return (- 1)
if (class_count == 1):
tf.logging.error((('Only one valid folder of images found at ' + FLAGS.image_dir) + ' - multiple classes are needed for classification.'))
return (- 1)
do_distort_images = should_distort_images(FLAGS.flip_left_right, FLAGS.random_crop, FLAGS.random_scale, FLAGS.random_brightness)
with tf.Session(graph=graph) as sess:
(jpeg_data_tensor, decoded_image_tensor) = add_jpeg_decoding(model_info['input_width'], model_info['input_height'], model_info['input_depth'], model_info['input_mean'], model_info['input_std'])
if do_distort_images:
(distorted_jpeg_data_tensor, distorted_image_tensor) = add_input_distortions(FLAGS.flip_left_right, FLAGS.random_crop, FLAGS.random_scale, FLAGS.random_brightness, model_info['input_width'], model_info['input_height'], model_info['input_depth'], model_info['input_mean'], model_info['input_std'])
else:
cache_bottlenecks(sess, image_lists, FLAGS.image_dir, FLAGS.bottleneck_dir, jpeg_data_tensor, decoded_image_tensor, resized_image_tensor, bottleneck_tensor, FLAGS.architecture)
(train_step, cross_entropy, bottleneck_input, ground_truth_input, final_tensor) = add_final_training_ops(len(image_lists.keys()), FLAGS.final_tensor_name, bottleneck_tensor, model_info['bottleneck_tensor_size'])
(evaluation_step, prediction) = add_evaluation_step(final_tensor, ground_truth_input)
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter((FLAGS.summaries_dir + '/train'), sess.graph)
validation_writer = tf.summary.FileWriter((FLAGS.summaries_dir + '/validation'))
init = tf.global_variables_initializer()
sess.run(init)
for i in range(FLAGS.how_many_training_steps):
if do_distort_images:
(train_bottlenecks, train_ground_truth) = get_random_distorted_bottlenecks(sess, image_lists, FLAGS.train_batch_size, 'training', FLAGS.image_dir, distorted_jpeg_data_tensor, distorted_image_tensor, resized_image_tensor, bottleneck_tensor)
else:
(train_bottlenecks, train_ground_truth, _) = get_random_cached_bottlenecks(sess, image_lists, FLAGS.train_batch_size, 'training', FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor, decoded_image_tensor, resized_image_tensor, bottleneck_tensor, FLAGS.architecture)
(train_summary, _) = sess.run([merged, train_step], feed_dict={bottleneck_input: train_bottlenecks, ground_truth_input: train_ground_truth})
train_writer.add_summary(train_summary, i)
is_last_step = ((i + 1) == FLAGS.how_many_training_steps)
if (((i % FLAGS.eval_step_interval) == 0) or is_last_step):
(train_accuracy, cross_entropy_value) = sess.run([evaluation_step, cross_entropy], feed_dict={bottleneck_input: train_bottlenecks, ground_truth_input: train_ground_truth})
tf.logging.info(('%s: Step %d: Train accuracy = %.1f%%' % (datetime.now(), i, (train_accuracy * 100))))
tf.logging.info(('%s: Step %d: Cross entropy = %f' % (datetime.now(), i, cross_entropy_value)))
(validation_bottlenecks, validation_ground_truth, _) = get_random_cached_bottlenecks(sess, image_lists, FLAGS.validation_batch_size, 'validation', FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor, decoded_image_tensor, resized_image_tensor, bottleneck_tensor, FLAGS.architecture)
(validation_summary, validation_accuracy) = sess.run([merged, evaluation_step], feed_dict={bottleneck_input: validation_bottlenecks, ground_truth_input: validation_ground_truth})
validation_writer.add_summary(validation_summary, i)
tf.logging.info(('%s: Step %d: Validation accuracy = %.1f%% (N=%d)' % (datetime.now(), i, (validation_accuracy * 100), len(validation_bottlenecks))))
intermediate_frequency = FLAGS.intermediate_store_frequency
if ((intermediate_frequency > 0) and ((i % intermediate_frequency) == 0) and (i > 0)):
intermediate_file_name = (((FLAGS.intermediate_output_graphs_dir + 'intermediate_') + str(i)) + '.pb')
tf.logging.info(('Save intermediate result to : ' + intermediate_file_name))
save_graph_to_file(sess, graph, intermediate_file_name)
(test_bottlenecks, test_ground_truth, test_filenames) = get_random_cached_bottlenecks(sess, image_lists, FLAGS.test_batch_size, 'testing', FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor, decoded_image_tensor, resized_image_tensor, bottleneck_tensor, FLAGS.architecture)
(test_accuracy, predictions) = sess.run([evaluation_step, prediction], feed_dict={bottleneck_input: test_bottlenecks, ground_truth_input: test_ground_truth})
tf.logging.info(('Final test accuracy = %.1f%% (N=%d)' % ((test_accuracy * 100), len(test_bottlenecks))))
if FLAGS.print_misclassified_test_images:
tf.logging.info('=== MISCLASSIFIED TEST IMAGES ===')
for (i, test_filename) in enumerate(test_filenames):
if (predictions[i] != test_ground_truth[i].argmax()):
tf.logging.info(('%70s %s' % (test_filename, list(image_lists.keys())[predictions[i]])))
save_graph_to_file(sess, graph, FLAGS.output_graph)
with gfile.FastGFile(FLAGS.output_labels, 'w') as f:
f.write(('\n'.join(image_lists.keys()) + '\n')) |
class HearScene(Problem, Trainer):
_cfg(workspace=field('???', "\nWill put the following keys into this workspace:\n 'train_dataset', 'train_sampler', 'valid_dataset', 'valid_sampler', and 'task'", 'str or Path or Workspace'), corpus=dict(CLS=field('???', '\nThe corpus class. You can add the **kwargs right below this CLS key', str), dataset_root=field('???', 'The root path of the corpus', str)), train_datapipe=dict(CLS=field(HearScenePipe, '\nThe first datapipe class to be applied to the corpus. You can add the **kwargs right below this CLS key', str)), train_sampler=dict(CLS=field(FixedBatchSizeBatchSampler, '\nThe batch sampler class. You can add the **kwargs right below this CLS key', str), batch_size='???'), valid_datapipe=dict(CLS=field(HearScenePipe, '\nThe first datapipe class to be applied to the corpus. You can add the **kwargs right below this CLS key', str)), valid_sampler=dict(CLS=FixedBatchSizeBatchSampler, batch_size=1), test_datapipe=dict(CLS=field(HearScenePipe, '\nThe first datapipe class to be applied to the corpus. You can add the **kwargs right below this CLS key', str)), test_sampler=dict(CLS=FixedBatchSizeBatchSampler, batch_size=1), upstream=dict(CLS=field(S3PRLUpstreamDriver, '\nThe class of the upstream model following the specific interface. You can add the **kwargs right below this CLS key', str), name='hubert', feature_selection='hidden_states', freeze_upstream=field(True, "Set the entire upstream model's requires_grad to False, or else, leave it alone"), normalize=field(False, "Apply layer-norm to upstream model's each layer hidden_state"), weighted_sum=field(True, "If True, apply weighted-sum on the selected layers; If False, take the final layer.\nFor the selected layers, see the 'layer_selections' option"), layer_selections=field(None, 'If None, select all layers; Or, select the subset layers defined by this option'), legacy=True), downstream=dict(CLS=field(HearFullyConnectedPrediction, '\nThe downstream model class for each task. You can add the **kwargs right below this CLS key', str), hidden_layers=2, pooling='mean'), task=dict(CLS=field(ScenePredictionTask, '\nThe task class defining what to do for each train/valid/test step in the train/valid/test dataloader loop\nYou can add the **kwargs right below this CLS key', str), prediction_type='???', scores='???'))
def setup(cls, **cfg) -> Container:
cfg = Container(cfg)
workspace = Workspace(cfg.workspace)
fix_random_seeds()
upstream = cfg.upstream()
stats = Container(feat_frame_shift=upstream.downsample_rate)
logger.info('Preparing corpus')
(train_data, valid_data, test_data, corpus_stats) = cfg.corpus().split(3)
stats = corpus_stats.add(stats)
logger.info('Preparing train data')
train_dataset = cfg.train_datapipe(**stats)(train_data, **stats)
train_sampler = cfg.train_sampler(train_dataset)
stats.override(train_dataset.all_tools())
workspace.environ.update(stats)
logger.info('Preparing valid data')
valid_dataset = cfg.valid_datapipe(**dict(workspace.environ))(valid_data, **dict(workspace.environ))
valid_sampler = cfg.valid_sampler(valid_dataset)
logger.info('Preparing test data')
test_dataset = cfg.test_datapipe(**dict(workspace.environ))(test_data, **dict(workspace.environ))
test_sampler = cfg.test_sampler(test_dataset)
logger.info('Preparing model and task')
downstream = cfg.downstream(upstream.output_size, **dict(workspace.environ))
model = UpstreamDownstreamModel(upstream, downstream)
task = cfg.task(model, **dict(workspace.environ))
workspace['train_data'] = train_data
workspace['valid_data'] = valid_data
workspace['test_data'] = test_data
workspace['train_dataset'] = train_dataset
workspace['train_sampler'] = train_sampler
workspace['valid_dataset'] = valid_dataset
workspace['valid_sampler'] = valid_sampler
workspace['test_dataset'] = test_dataset
workspace['test_sampler'] = test_sampler
workspace['task'] = task
_cfg(**Trainer.train.default_except(optimizer=dict(CLS='torch.optim.Adam', lr=0.001), trainer=dict(total_steps=150000, log_step=100, eval_step=1000, save_step=100, gradient_clipping=1.0, gradient_accumulate_steps=1, valid_metric='???', valid_higher_better='???')))
def train(cls, **cfg):
super().train(**cfg)
_cfg(**Trainer.inference.default_cfg)
def inference(cls, **cfg):
super().inference(**cfg)
_cfg(**Problem.run.default_except(stages=['setup', 'train', 'inference'], start_stage='setup', final_stage='inference', setup=setup.default_cfg.deselect('workspace', 'resume'), train=train.default_cfg.deselect('workspace', 'resume'), inference=inference.default_cfg.deselect('workspace', 'resume')))
def run(cls, **cfg):
super().run(**cfg)
_cfg(num_fold=field(5, 'The number of folds to run cross validation', int), **run.default_except(workspace=field('???', "The root workspace for all folds.\nEach fold will use a 'fold_{id}' sub-workspace under this root workspace"), setup=dict(corpus=dict(test_fold=field('TBD', "This will be auto-set by 'run_cross_validation'")))))
def cross_validation(cls, **cfg):
cfg = Container(cfg)
workspaces = [str((Workspace(cfg.workspace) / f'fold_{fold_id}')) for fold_id in range(cfg.num_fold)]
for (fold_id, workspace) in enumerate(workspaces):
fold_cfg = cfg.clone().deselect('num_fold')
fold_cfg.workspace = workspace
fold_cfg.setup.corpus.test_fold = fold_id
cls.run(**fold_cfg)
metrics = defaultdict(list)
for (fold_id, workspace) in enumerate(workspaces):
workspace = Workspace(workspace)
metric = workspace['test_metrics']
for (key, value) in metric.items():
metrics[key].append(value)
avg_result = dict()
for (key, values) in metrics.items():
avg_score = (sum(values) / len(values))
avg_result[key] = avg_score
logger.info(f'Average {key}: {avg_score}')
Workspace(cfg.workspace).put(avg_result, 'avg_test_metrics', 'yaml') |
class GPU():
def __init__(self, ignore_warnings=False):
self._consumption = 0
self._ignore_warnings = ignore_warnings
self.is_gpu_available = is_gpu_available()
if ((not self.is_gpu_available) and (not self._ignore_warnings)):
warnings.warn(message='\n\nThere is no any available GPU devices or your GPU is not supported by Nvidia library!\nThe tracker will consider CPU usage only\n', category=NoGPUWarning)
if self.is_gpu_available:
self._start = time.time()
def calculate_consumption(self):
if (not self.is_gpu_available):
return 0
duration = (time.time() - self._start)
self._start = time.time()
consumption = 0
for current_power in self.gpu_power():
consumption += ((current_power / FROM_mWATTS_TO_kWATTH) * duration)
if (consumption < 0):
consumption = 0
self._consumption += consumption
return consumption
def get_consumption(self):
if (not self.is_gpu_available):
return 0
return self._consumption
def gpu_memory(self):
if (not self.is_gpu_available):
return None
pynvml.nvmlInit()
deviceCount = pynvml.nvmlDeviceGetCount()
gpus_memory = []
for i in range(deviceCount):
handle = pynvml.nvmlDeviceGetHandleByIndex(i)
gpus_memory.append(pynvml.nvmlDeviceGetMemoryInfo(handle))
pynvml.nvmlShutdown()
return gpus_memory
def gpu_temperature(self):
if (not self.is_gpu_available):
return None
pynvml.nvmlInit()
deviceCount = pynvml.nvmlDeviceGetCount()
gpus_temps = []
for i in range(deviceCount):
handle = pynvml.nvmlDeviceGetHandleByIndex(i)
gpus_temps.append(pynvml.nvmlDeviceGetTemperature(handle, pynvml.NVML_TEMPERATURE_GPU))
pynvml.nvmlShutdown()
return gpus_temps
def gpu_power(self):
if (not self.is_gpu_available):
return None
pynvml.nvmlInit()
deviceCount = pynvml.nvmlDeviceGetCount()
gpus_powers = []
for i in range(deviceCount):
handle = pynvml.nvmlDeviceGetHandleByIndex(i)
gpus_powers.append(pynvml.nvmlDeviceGetPowerUsage(handle))
pynvml.nvmlShutdown()
return gpus_powers
def gpu_power_limit(self):
if (not self.is_gpu_available):
return None
pynvml.nvmlInit()
deviceCount = pynvml.nvmlDeviceGetCount()
gpus_limits = []
for i in range(deviceCount):
handle = pynvml.nvmlDeviceGetHandleByIndex(i)
gpus_limits.append(pynvml.nvmlDeviceGetEnforcedPowerLimit(handle))
pynvml.nvmlShutdown()
return gpus_limits
def name(self):
try:
pynvml.nvmlInit()
deviceCount = pynvml.nvmlDeviceGetCount()
gpus_name = []
for i in range(deviceCount):
handle = pynvml.nvmlDeviceGetHandleByIndex(i)
pynvml.nvmlDeviceGetPowerUsage(handle)
gpus_name.append(pynvml.nvmlDeviceGetName(handle))
pynvml.nvmlShutdown()
return gpus_name[0].encode().decode('UTF-8')
except:
return ''
def gpu_num(self):
try:
pynvml.nvmlInit()
deviceCount = pynvml.nvmlDeviceGetCount()
for i in range(deviceCount):
handle = pynvml.nvmlDeviceGetHandleByIndex(i)
pynvml.nvmlDeviceGetPowerUsage(handle)
pynvml.nvmlShutdown()
return deviceCount
except:
return 0 |
def test_ListArray_RecordArray_NumpyArray():
v2a = ak.contents.listarray.ListArray(ak.index.Index(np.array([4, 100, 1], np.int64)), ak.index.Index(np.array([7, 100, 3, 200], np.int64)), ak.contents.recordarray.RecordArray([ak.contents.numpyarray.NumpyArray(np.array([6.6, 4.4, 5.5, 7.7, 1.1, 2.2, 3.3, 8.8]))], ['nest']))
roundtrip(v2a)
array = ak.highlevel.Array(v2a)
memoryleak(array, swallow)
memoryleak(array, passthrough)
memoryleak(array, passthrough2)
memoryleak(array, digest)
memoryleak(array, digest2) |
def read_scp_info(filename, limit=numpy.inf):
res = []
with open(filename, 'r') as f:
for line in f:
(uttid, pointer) = line.strip().split()
p = pointer.rfind(':')
(arkfile, offset) = (pointer[:p], int(pointer[(p + 1):]))
with open(arkfile, 'rb') as g:
g.seek(offset)
(feat_len, feat_dim) = readMatrixShape(g)
res.append((uttid, arkfile, offset, feat_len, feat_dim))
if (len(res) == limit):
break
return res |
class Compose(transforms.Compose):
def __init__(self, fns, additional_targets=None):
super().__init__(fns)
self.additional_targets = (additional_targets or {})
self.ignore_fns = {'mask': ['Normalize']}
def _call_fn_given_type(self, fn, k, v):
t = self.additional_targets.get(k)
if hasattr(fn, f'{t}_fn'):
return getattr(fn, f'{t}_fn')(v)
elif (fn.__class__.__name__ in self.ignore_fns.get(t, [])):
return v
return fn(v)
def __call__(self, **kwargs):
out = {}
for (k, v) in kwargs.items():
for fn in self.transforms:
v = self._call_fn_given_type(fn, k, v)
out[k] = v
return out |
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, radix=1, cardinality=1, bottleneck_width=64, avd=False, avd_first=False, dilation=1, is_first=False, rectified_conv=False, rectify_avg=False, norm_layer=None, dropblock_prob=0.0, last_gamma=False):
super(Bottleneck, self).__init__()
group_width = (int((planes * (bottleneck_width / 64.0))) * cardinality)
self.conv1 = nn.Conv2d(inplanes, group_width, kernel_size=1, bias=False)
self.bn1 = get_norm(norm_layer, group_width)
self.dropblock_prob = dropblock_prob
self.radix = radix
self.avd = (avd and ((stride > 1) or is_first))
self.avd_first = avd_first
if self.avd:
self.avd_layer = nn.AvgPool2d(3, stride, padding=1)
stride = 1
if (dropblock_prob > 0.0):
self.dropblock1 = DropBlock2D(dropblock_prob, 3)
if (radix == 1):
self.dropblock2 = DropBlock2D(dropblock_prob, 3)
self.dropblock3 = DropBlock2D(dropblock_prob, 3)
if (radix >= 1):
self.conv2 = SplAtConv2d(group_width, group_width, kernel_size=3, stride=stride, padding=dilation, dilation=dilation, groups=cardinality, bias=False, radix=radix, rectify=rectified_conv, rectify_avg=rectify_avg, norm_layer=norm_layer, dropblock_prob=dropblock_prob)
elif rectified_conv:
from rfconv import RFConv2d
self.conv2 = RFConv2d(group_width, group_width, kernel_size=3, stride=stride, padding=dilation, dilation=dilation, groups=cardinality, bias=False, average_mode=rectify_avg)
self.bn2 = get_norm(norm_layer, group_width)
else:
self.conv2 = nn.Conv2d(group_width, group_width, kernel_size=3, stride=stride, padding=dilation, dilation=dilation, groups=cardinality, bias=False)
self.bn2 = get_norm(norm_layer, group_width)
self.conv3 = nn.Conv2d(group_width, (planes * 4), kernel_size=1, bias=False)
self.bn3 = get_norm(norm_layer, (planes * 4))
if last_gamma:
from torch.nn.init import zeros_
zeros_(self.bn3.weight)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.dilation = dilation
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
if (self.dropblock_prob > 0.0):
out = self.dropblock1(out)
out = self.relu(out)
if (self.avd and self.avd_first):
out = self.avd_layer(out)
out = self.conv2(out)
if (self.radix == 0):
out = self.bn2(out)
if (self.dropblock_prob > 0.0):
out = self.dropblock2(out)
out = self.relu(out)
if (self.avd and (not self.avd_first)):
out = self.avd_layer(out)
out = self.conv3(out)
out = self.bn3(out)
if (self.dropblock_prob > 0.0):
out = self.dropblock3(out)
if (self.downsample is not None):
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out |
def r_cond1(t):
cond = t[0]
def fn(k, n):
if (n > MAX_FUNC_CALL):
return (k, n, False, False)
return cond(k, n)
return [('cond', fn)] |
def create_evaluator(model):
model.reset()
evaluator = ForecastEvaluator(model=model, config=ForecastEvaluatorConfig(cadence='1h', horizon='6h', retrain_freq='12h', train_window='14d'))
return evaluator |
def _format(val: Any, output_format: str='standard', errors: str='coarse') -> Any:
val = str(val)
if (val in NULL_VALUES):
return [np.nan]
if (not validate_bg_egn(val)):
if (errors == 'raise'):
raise ValueError(f'Unable to parse value {val}')
error_result = (val if (errors == 'ignore') else np.nan)
return [error_result]
if (output_format in {'compact', 'standard'}):
result = [egn.compact(val)]
elif (output_format == 'birthdate'):
result = [egn.get_birth_date(val)]
return result |
def test_pytest_parametrize_fixture(testdir):
testdir.make_test('\nfrom hypothesis import settings, HealthCheck\n\n\ndef pytest_generate_tests(metafunc):\n metafunc.parametrize("inner", ("A", "B"))\n\()\ndef param(inner):\n return inner * 2\n\()\(suppress_health_check=[HealthCheck.function_scoped_fixture])\ndef test_(request, param, case):\n request.config.HYPOTHESIS_CASES += 1\n assert case.full_path == "/v1/users"\n assert case.method in ("GET", "POST")\n', paths={'/users': {'get': {'responses': {'200': {'description': 'OK'}}}, 'post': {'responses': {'200': {'description': 'OK'}}}}})
result = testdir.runpytest('-v', '-s')
result.assert_outcomes(passed=4)
result.stdout.re_match_lines(['test_pytest_parametrize_fixture.py::test_\\[GET /v1/users\\]\\[A\\] PASSED', 'test_pytest_parametrize_fixture.py::test_\\[GET /v1/users\\]\\[B\\] PASSED', 'test_pytest_parametrize_fixture.py::test_\\[POST /v1/users\\]\\[A\\] PASSED', 'test_pytest_parametrize_fixture.py::test_\\[POST /v1/users\\]\\[B\\] PASSED', 'Hypothesis calls: 4']) |
def GetMxDegNId(tspec, *args):
if (type(tspec) == PUNGraph):
return GetMxDegNId_PUNGraph(tspec, *args)
if (type(tspec) == PUndirNet):
return GetMxDegNId_PUndirNet(tspec, *args)
if (type(tspec) == PDirNet):
return GetMxDegNId_PDirNet(tspec, *args)
if (type(tspec) == PNGraph):
return GetMxDegNId_PNGraph(tspec, *args)
if (type(tspec) == PNEANet):
return GetMxDegNId_PNEANet(tspec, *args)
if (type(tspec) == PNGraphMP):
return GetMxDegNId_PNGraphMP(tspec, *args)
if (type(tspec) == PNEANetMP):
return GetMxDegNId_PNEANetMP(tspec, *args)
raise TypeError('First argument has invalid type') |
def main(args):
print(args)
cudnn.benchmark = True
random.seed(args.seed)
np.random.seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.manual_seed(args.seed)
solver = Solver(args)
solver.evaluate() |
def attention_pytorch(qkv, dropout_p=0.0, causal=True):
(batch_size, seqlen, _, nheads, d) = qkv.shape
(q, k, v) = qkv.unbind(dim=2)
q = rearrange(q, 'b t h d -> (b h) t d')
k = rearrange(k, 'b s h d -> (b h) d s')
softmax_scale = (1.0 / math.sqrt(d))
scores = torch.empty((batch_size * nheads), seqlen, seqlen, dtype=qkv.dtype, device=qkv.device)
scores = rearrange(torch.baddbmm(scores, q, k, beta=0, alpha=softmax_scale), '(b h) t s -> b h t s', h=nheads)
if causal:
causal_mask = torch.triu(torch.full((seqlen, seqlen), (- 10000.0), device=scores.device), 1)
scores = (scores + causal_mask.to(dtype=scores.dtype))
attention = torch.softmax(scores, dim=(- 1))
attention_drop = F.dropout(attention, dropout_p)
output = torch.einsum('bhts,bshd->bthd', attention_drop, v)
return output.to(dtype=qkv.dtype) |
class OneHot(object):
def __init__(self, n_classes):
self.n_classes = n_classes
def __call__(self, x):
import theano.tensor.extra_ops as extra_ops
y = extra_ops.to_one_hot(x.flatten(), self.n_classes)
if (x.ndim == 1):
return y
return y.reshape((x.shape[0], x.shape[1], (- 1))) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.