code stringlengths 101 5.91M |
|---|
def batch_accuracy(predicted, true):
(_, predicted_index) = predicted.max(dim=1, keepdim=True)
agreeing = true.gather(dim=1, index=predicted_index)
return (agreeing * 0.3).clamp(max=1) |
class ChooseSimpleDummyVecEnv(ShareVecEnv):
def __init__(self, env_fns):
self.envs = [fn() for fn in env_fns]
env = self.envs[0]
ShareVecEnv.__init__(self, len(env_fns), env.observation_space, env.share_observation_space, env.action_space)
self.actions = None
def step_async(self, actions):
self.actions = actions
def step_wait(self):
results = [env.step(a) for (a, env) in zip(self.actions, self.envs)]
(obs, rews, dones, infos) = map(np.array, zip(*results))
self.actions = None
return (obs, rews, dones, infos)
def reset(self, reset_choose):
obs = [env.reset(choose) for (env, choose) in zip(self.envs, reset_choose)]
return np.array(obs)
def close(self):
for env in self.envs:
env.close()
def get_max_step(self):
return [env.max_steps for env in self.envs]
def render(self, mode='human'):
if (mode == 'rgb_array'):
return np.array([env.render(mode=mode) for env in self.envs])
elif (mode == 'human'):
for env in self.envs:
env.render(mode=mode)
else:
raise NotImplementedError |
class Test():
def __init__(self, levels=20, weights=4, onlyg0=False, onlyg1=False, onlychar=False):
if (not isinstance(levels, list)):
levels = list(range(1, (int(levels) + 1)))
if (not isinstance(weights, list)):
weights = list(range(2, (int(weights) + 1)))
self.levels = levels
self.weights = weights
if (not levels):
raise RuntimeError('levels must have positive length')
if (not weights):
raise RuntimeError('weights must have positive length')
self.current_space = None
self.onlyg0 = onlyg0
self.onlyg1 = onlyg1
self.onlychar = onlychar
def __repr__(self):
return 'Modular symbols testing class'
def _modular_symbols_space(self):
if self.onlyg0:
which = 0
elif self.onlyg1:
which = 1
elif self.onlychar:
which = 2
else:
which = random.randrange(0, 3)
if (which == 0):
print('gamma0')
M = self._modular_symbols_space_gamma0()
elif (which == 1):
print('gamma1')
M = self._modular_symbols_space_gamma1()
else:
print('character')
M = self._modular_symbols_space_character()
print('\t', M)
return M
def _level_weight_sign(self):
level = random.choice(self.levels)
weight = random.choice(self.weights)
sign = random.choice([(- 1), 0, 1])
print(('level = %s, weight = %s, sign = %s' % (level, weight, sign)))
return (level, weight, sign)
def _modular_symbols_space_gamma0(self):
(level, weight, sign) = self._level_weight_sign()
M = modsym.ModularSymbols(arithgroup.Gamma0(level), weight, sign)
self.current_space = M
return M
def _modular_symbols_space_gamma1(self):
(level, weight, sign) = self._level_weight_sign()
M = modsym.ModularSymbols(arithgroup.Gamma1(level), weight, sign)
self.current_space = M
return M
def _modular_symbols_space_character(self):
(level, weight, sign) = self._level_weight_sign()
G = dirichlet.DirichletGroup(level)
eps = G.random_element()
M = modsym.ModularSymbols(eps, weight, sign)
self.current_space = M
return M
def _do(self, name):
print(('test_%s' % name))
Test.__dict__[('test_%s' % name)](self)
def random(self, seconds=0):
self.test('random', seconds)
def test(self, name, seconds=0):
seconds = float(seconds)
total = cputime()
n = 1
while ((seconds == 0) or (cputime(total) < seconds)):
s = ('** test_dimension: number %s' % n)
if (seconds > 0):
s += (' (will stop after about %s seconds)' % seconds)
t = cputime()
self._do(name)
print(('\ttime=%s\telapsed=%s' % (cputime(t), cputime(total))))
n += 1
def test_cs_dimension(self):
self._modular_symbols_space().cuspidal_submodule()
def test_csnew_dimension(self):
M = self._modular_symbols_space()
V = M.cuspidal_submodule().new_submodule()
d = V.dimension()
d2 = M._cuspidal_new_submodule_dimension_formula()
assert (d == d2), ('Test failed for M="%s", where computed dimension is %s but formula dimension is %s.' % (M, d, d2))
def test_csns_nscs(self):
M = self._modular_symbols_space()
V1 = M.cuspidal_submodule().new_submodule()
V2 = M.new_submodule().cuspidal_submodule()
assert (V1 == V2), ('Test failed for M="%s", where the new cuspidal and cuspidal new spaces are computed differently.' % M)
d = M._cuspidal_new_submodule_dimension_formula()
assert (d == V1.dimension()), ('Test failed for M="%s", where computed dimension is %s but formula dimension is %s.' % (M, V1.dimension(), d))
def test_decomposition(self):
M = self._modular_symbols_space()
D = M.decomposition()
assert (M.dimension() == sum([A.dimension() for A in D]))
def test_dimension(self):
self._modular_symbols_space().dimension()
def test_random(self):
tests = [a for a in Test.__dict__ if ((a[:5] == 'test_') and (a != 'test_random'))]
name = random.choice(tests)
print(('Doing random test %s' % name))
Test.__dict__[name](self) |
def _remove_starting_and_ending_whitespace(text):
return '\n'.join([line.strip() for line in text.split('\n')]) |
def make_parser():
p = ArgumentParser('nightly')
subcmd = p.add_subparsers(dest='subcmd', help='subcommand to execute')
co = subcmd.add_parser('checkout', help='checkout a new branch')
co.add_argument('-b', '--branch', help='Branch name to checkout', dest='branch', default=None, metavar='NAME')
pull = subcmd.add_parser('pull', help='pulls the nightly commits into the current branch')
subps = [co, pull]
for subp in subps:
subp.add_argument('-n', '--name', help='Name of environment', dest='name', default=None, metavar='ENVIRONMENT')
subp.add_argument('-p', '--prefix', help='Full path to environment location (i.e. prefix)', dest='prefix', default=None, metavar='PATH')
subp.add_argument('-v', '--verbose', help='Provide debugging info', dest='verbose', default=False, action='store_true')
subp.add_argument('--override-channels', help='Do not search default or .condarc channels.', dest='override_channels', default=False, action='store_true')
subp.add_argument('-c', '--channel', help="Additional channel to search for packages. 'pytorch-nightly' will always be prepended to this list.", dest='channels', action='append', metavar='CHANNEL')
return p |
def reset_the_weight_value(inputs, output_axis, threshold):
(x, w) = inputs[:2]
shape = w.shape
from functools import reduce
items = reduce((lambda x, y: (x * y)), shape)
upbound = ((threshold / (items / shape[output_axis])) ** 0.5)
(slice0, slice1) = (None, None)
if (output_axis == 0):
slice0 = '[:shape[0]//2,...]'
slice1 = '[shape[0]//2:,...]'
if (output_axis == 1):
slice0 = '[:,:shape[1]//2,...]'
slice1 = '[:,shape[1]//2:,...]'
if (output_axis == (- 1)):
slice0 = '[...,:shape[-1]//2]'
slice1 = '[...,shape[-1]//2:]'
exec('w.d{} = upbound * 0.9'.format(slice0))
exec('w.d{} = upbound * 2'.format(slice1)) |
def test_power_constant():
var1 = optplan.Parameter()
power1 = (var1 ** optplan.make_constant(2))
assert isinstance(power1, optplan.Power)
assert (power1.function == var1)
assert (power1.exp == 2) |
def print_object(obj: Any, *, print_all_tensors: bool=False, stats_only: bool=False, prefix: str='', ctx: Optional[PrintCtx]=None, ctx_name: Optional[str]=None):
if isinstance(obj, (dict, list, tuple)):
for (k, v) in (obj.items() if isinstance(obj, dict) else enumerate(obj)):
_print_key_value(k, v, print_all_tensors=print_all_tensors, stats_only=stats_only, prefix=prefix, ctx=ctx, ctx_name=(f'{ctx_name}.{k}' if ctx_name else f'{k}'))
elif isinstance(obj, (numpy.ndarray, torch.Tensor)):
print_tensor(obj, stats_only=stats_only, prefix=prefix, ctx=ctx, ctx_name=ctx_name)
else:
print(f'{prefix}({type(obj)}) {obj}') |
def configure(dir=None, format_strs=None, comm=None, log_suffix=''):
if (dir is None):
dir = os.getenv('OPENAI_LOGDIR')
if (dir is None):
dir = osp.join(tempfile.gettempdir(), datetime.datetime.now().strftime('sony-%Y-%m-%d-%H-%M-%S-%f'))
assert isinstance(dir, str)
dir = os.path.expanduser(dir)
os.makedirs(os.path.expanduser(dir), exist_ok=True)
rank = get_rank_without_mpi_import()
if (rank > 0):
log_suffix = (log_suffix + ('-rank%03i' % rank))
if (format_strs is None):
if (rank == 0):
format_strs = os.getenv('OPENAI_LOG_FORMAT', 'stdout,log,csv').split(',')
else:
format_strs = os.getenv('OPENAI_LOG_FORMAT_MPI', 'log').split(',')
format_strs = filter(None, format_strs)
output_formats = [make_output_format(f, dir, log_suffix) for f in format_strs]
Logger.CURRENT = Logger(dir=dir, output_formats=output_formats, comm=comm)
if output_formats:
log(('Logging to %s' % dir)) |
def main(model_name: str, backbone_name: str, image_size: list, num_classes: int, device: str):
device = torch.device(('cuda' if (torch.cuda.is_available() and (device == 'cuda')) else 'cpu'))
inputs = torch.randn(1, 3, *image_size).to(device)
model = eval(model_name)(backbone_name, num_classes)
model = model.to(device)
model.eval()
print(flop_count_table(FlopCountAnalysis(model, inputs)))
total_time = 0.0
for _ in range(10):
tic = time.perf_counter()
model(inputs)
toc = time.perf_counter()
total_time += (toc - tic)
total_time /= 10
print(f'Inference time: {(total_time * 1000):.2f}ms')
print(f'FPS: {(1 / total_time)}') |
def _new_invariant_is_linearly_independent(F, invariants):
if (len(invariants) == 0):
return True
return (PolynomialSequence(invariants).coefficient_matrix()[0].rank() != PolynomialSequence((list(invariants) + [F])).coefficient_matrix()[0].rank()) |
def perceptualLoss(fakeIm, realIm, vggnet):
weights = [1, 0.2, 0.04]
features_fake = vggnet(fakeIm)
features_real = vggnet(realIm)
features_real_no_grad = [f_real.detach() for f_real in features_real]
mse_loss = nn.MSELoss(reduction='elementwise_mean')
loss = 0
for i in range(len(features_real)):
loss_i = mse_loss(features_fake[i], features_real_no_grad[i])
loss = (loss + (loss_i * weights[i]))
return loss |
class TestOnPolicyVectorizedSampler(TfGraphTestCase):
.parametrize('cpus, n_envs, expected_n_envs', [*configs])
def test_on_policy_vectorized_sampler_n_envs(self, cpus, n_envs, expected_n_envs):
with LocalTFRunner(snapshot_config, sess=self.sess, max_cpus=cpus) as runner:
env = GarageEnv(gym.make('CartPole-v0'))
policy = CategoricalMLPPolicy(env_spec=env.spec, hidden_sizes=[32, 32])
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = REPS(env_spec=env.spec, policy=policy, baseline=baseline, max_path_length=100, discount=0.99)
runner.setup(algo, env, sampler_args=dict(n_envs=n_envs))
assert isinstance(runner._sampler, OnPolicyVectorizedSampler)
assert (runner._sampler._n_envs == expected_n_envs)
env.close() |
class Scenario(BaseScenario):
def __init__(self, num_agents=4, dist_threshold=0.1, arena_size=1, identity_size=0):
self.num_agents = num_agents
self.target_radius = 0.5
self.ideal_theta_separation = ((2 * np.pi) / self.num_agents)
self.arena_size = arena_size
self.dist_thres = 0.05
self.theta_thres = 0.1
self.identity_size = identity_size
def make_world(self):
world = World()
world.dim_c = 2
num_agents = self.num_agents
num_landmarks = 1
world.collaborative = False
world.agents = [Agent(iden=i) for i in range(num_agents)]
for (i, agent) in enumerate(world.agents):
agent.name = ('agent %d' % i)
agent.collide = True
agent.silent = True
agent.size = 0.05
agent.adversary = False
world.landmarks = [Landmark() for i in range(num_landmarks)]
for (i, landmark) in enumerate(world.landmarks):
landmark.name = ('landmark %d' % i)
landmark.collide = False
landmark.movable = False
landmark.size = 0.03
self.reset_world(world)
world.dists = []
return world
def reset_world(self, world):
for (i, agent) in enumerate(world.agents):
agent.color = np.array([0.35, 0.35, 0.85])
for (i, landmark) in enumerate(world.landmarks):
landmark.color = np.array([0.25, 0.25, 0.25])
for agent in world.agents:
agent.state.p_pos = np.random.uniform((- self.arena_size), self.arena_size, world.dim_p)
agent.state.p_vel = np.zeros(world.dim_p)
agent.state.c = np.zeros(world.dim_c)
for (i, landmark) in enumerate(world.landmarks):
landmark.state.p_pos = np.random.uniform(((- 0.5) * self.arena_size), (0.5 * self.arena_size), world.dim_p)
landmark.state.p_vel = np.zeros(world.dim_p)
world.steps = 0
world.dists = []
def reward(self, agent, world):
if (agent.iden == 0):
landmark_pose = world.landmarks[0].state.p_pos
relative_poses = [(agent.state.p_pos - landmark_pose) for agent in world.agents]
thetas = get_thetas(relative_poses)
theta_min = min(thetas)
expected_poses = [(landmark_pose + (self.target_radius * np.array([np.cos((theta_min + (i * self.ideal_theta_separation))), np.sin((theta_min + (i * self.ideal_theta_separation)))]))) for i in range(self.num_agents)]
dists = np.array([[np.linalg.norm((a.state.p_pos - pos)) for pos in expected_poses] for a in world.agents])
self.delta_dists = self._bipartite_min_dists(dists)
world.dists = self.delta_dists
total_penalty = np.mean(np.clip(self.delta_dists, 0, 2))
self.joint_reward = (- total_penalty)
return self.joint_reward
def _bipartite_min_dists(self, dists):
(ri, ci) = linear_sum_assignment(dists)
min_dists = dists[(ri, ci)]
return min_dists
def observation(self, agent, world):
entity_pos = [(entity.state.p_pos - agent.state.p_pos) for entity in world.landmarks]
default_obs = np.concatenate((([agent.state.p_vel] + [agent.state.p_pos]) + entity_pos))
if (self.identity_size != 0):
identified_obs = np.append(np.eye(self.identity_size)[agent.iden], default_obs)
return identified_obs
return default_obs
def done(self, agent, world):
condition1 = (world.steps >= world.max_steps_episode)
self.is_success = np.all((self.delta_dists < self.dist_thres))
return (condition1 or self.is_success)
def info(self, agent, world):
return {'is_success': self.is_success, 'world_steps': world.steps, 'reward': self.joint_reward, 'dists': self.delta_dists.mean()} |
def gaussian(birth, pers, mu=None, sigma=None):
if (mu is None):
mu = np.array([0.0, 0.0], dtype=np.float64)
if (sigma is None):
sigma = np.array([[1.0, 0.0], [0.0, 1.0]], dtype=np.float64)
if (sigma[0][1] == 0.0):
return sbvn_cdf(birth, pers, mu_x=mu[0], mu_y=mu[1], sigma_x=sigma[0][0], sigma_y=sigma[1][1])
else:
return bvn_cdf(birth, pers, mu_x=mu[0], mu_y=mu[1], sigma_xx=sigma[0][0], sigma_yy=sigma[1][1], sigma_xy=sigma[0][1]) |
def _evaluate_predictions_on_coco(coco_gt, coco_results, min_threshold=0.5):
metrics = ['AP']
if (min_threshold <= 0.201):
metrics += ['AP20']
if (min_threshold <= 0.301):
metrics += ['AP30']
if (min_threshold <= 0.401):
metrics += ['AP40']
metrics.extend(['AP50', 'AP75', 'APm', 'APl'])
logger = logging.getLogger(__name__)
if (len(coco_results) == 0):
logger.warn('No predictions from the model! Set scores to -1')
results_gps = {metric: (- 1) for metric in metrics}
results_gpsm = {metric: (- 1) for metric in metrics}
return (results_gps, results_gpsm)
coco_dt = coco_gt.loadRes(coco_results)
results_segm = _evaluate_predictions_on_coco_segm(coco_gt, coco_dt, metrics, min_threshold)
logger.info(('Evaluation results for densepose segm: \n' + create_small_table(results_segm)))
results_gps = _evaluate_predictions_on_coco_gps(coco_gt, coco_dt, metrics, min_threshold)
logger.info(('Evaluation results for densepose, GPS metric: \n' + create_small_table(results_gps)))
results_gpsm = _evaluate_predictions_on_coco_gpsm(coco_gt, coco_dt, metrics, min_threshold)
logger.info(('Evaluation results for densepose, GPSm metric: \n' + create_small_table(results_gpsm)))
return (results_gps, results_gpsm, results_segm) |
class AdamWClonedWeightPrediction(WeightPredictor):
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
adam_init(self.optimizer)
def forward(self):
if (not self.n_steps):
return
self.true_weights_storage.create_cloned_if_needed()
self.true_weights_storage.record_change_mode('pred')
pgs = self.optimizer.param_groups
if (self.scheduler is not None):
step_lrs = self.scheduler.get_next(self.n_steps)
pg_step_lrs = [[slr[i] for slr in step_lrs] for i in range(len(pgs))]
else:
pg_step_lrs = [([pg['lr']] * self.n_steps) for pg in pgs]
with torch.no_grad():
for (pg, step_lrs) in zip(pgs, pg_step_lrs):
(beta1, beta2) = pg['betas']
eps = pg['eps']
weight_decay = pg['weight_decay']
for p in pg['params']:
state = self.optimizer.state[p]
exp_avg = state['exp_avg']
exp_avg_sq = state['exp_avg_sq']
step = state['step']
for (staleness, lr) in zip(range(1, (self.n_steps + 1)), step_lrs):
if (lr == 0):
continue
p.data.mul_((1 - (lr * weight_decay)))
bias_correction1 = (1 - (beta1 ** (step + staleness)))
bias_correction2 = (1 - (beta2 ** (step + staleness)))
denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(eps)
step_size = (lr / bias_correction1)
p.data.addcdiv_((exp_avg * (beta1 ** staleness)), denom, value=(- step_size))
def revert(self):
if (not self.n_steps):
return
self.true_weights_storage.restore_if_needed() |
def _is_equal_tensor_proto(a, b):
name_a = a.name
name_b = b.name
a.name = ''
b.name = ''
res = (a == b)
a.name = name_a
b.name = name_b
return res |
class PrimitiveLocalComponent(LocalComponentBase):
def is_primitive(self):
return True
def minimal_twist(self):
return self |
def cross_entropy(*, estimated: Tensor, target: Tensor, axis: Dim, estimated_type: str) -> Tensor:
if (estimated_type == 'logits'):
return estimated._raw_backend.softmax_cross_entropy_with_logits(logits=estimated, targets=target, axis=axis)
if (estimated_type == 'probs'):
log_prob = rf.log(estimated)
elif (estimated_type == 'log-probs'):
log_prob = estimated
else:
raise ValueError("estimated_type must be 'probs', 'log-probs' or 'logits'")
if target.sparse_dim:
return (- rf.gather(log_prob, indices=target, axis=axis))
return (- rf.matmul(target, log_prob, reduce=axis)) |
def main(args, dataspecs, **kw):
runner = EasyTorch(dataspecs, args, load_sparse=True, **kw)
runner.run(VesselSegTrainer, BinarySemSegImgPatchDatasetCustomTransform) |
_module()
class Compose(object):
def __init__(self, transforms):
assert isinstance(transforms, Sequence)
self.transforms = []
for transform in transforms:
if isinstance(transform, dict):
transform = build_from_cfg(transform, PIPELINES)
self.transforms.append(transform)
elif callable(transform):
self.transforms.append(transform)
else:
raise TypeError(f'transform must be callable or a dict, but got {type(transform)}')
def __call__(self, data):
for t in self.transforms:
data = t(data)
if (data is None):
return None
return data
def __repr__(self):
format_string = (self.__class__.__name__ + '(')
for t in self.transforms:
format_string += f'''
{t}'''
format_string += '\n)'
return format_string |
def benchmark(ws, net, warmups=5, iters=100):
for _ in range(warmups):
ws.run(net)
plan = core.Plan('plan')
plan.AddStep(core.ExecutionStep('test-step', net, iters))
before = time.time()
ws.run(plan)
after = time.time()
print('Timing network, time taken per-iteration: {:.6f}ms'.format((((after - before) / float(iters)) * 1000.0)))
return (after - before) |
def loads(s, _dict=dict, decoder=None):
implicitgroups = []
if (decoder is None):
decoder = TomlDecoder(_dict)
retval = decoder.get_empty_table()
currentlevel = retval
if (not isinstance(s, basestring)):
raise TypeError('Expecting something like a string')
if (not isinstance(s, unicode)):
s = s.decode('utf8')
original = s
sl = list(s)
openarr = 0
openstring = False
openstrchar = ''
multilinestr = False
arrayoftables = False
beginline = True
keygroup = False
dottedkey = False
keyname = 0
key = ''
prev_key = ''
line_no = 1
for (i, item) in enumerate(sl):
if ((item == '\r') and (sl[(i + 1)] == '\n')):
sl[i] = ' '
continue
if keyname:
key += item
if (item == '\n'):
raise TomlDecodeError('Key name found without value. Reached end of line.', original, i)
if openstring:
if (item == openstrchar):
oddbackslash = False
k = 1
while ((i >= k) and (sl[(i - k)] == '\\')):
oddbackslash = (not oddbackslash)
k += 1
if (not oddbackslash):
keyname = 2
openstring = False
openstrchar = ''
continue
elif (keyname == 1):
if item.isspace():
keyname = 2
continue
elif (item == '.'):
dottedkey = True
continue
elif (item.isalnum() or (item == '_') or (item == '-')):
continue
elif (dottedkey and (sl[(i - 1)] == '.') and ((item == '"') or (item == "'"))):
openstring = True
openstrchar = item
continue
elif (keyname == 2):
if item.isspace():
if dottedkey:
nextitem = sl[(i + 1)]
if ((not nextitem.isspace()) and (nextitem != '.')):
keyname = 1
continue
if (item == '.'):
dottedkey = True
nextitem = sl[(i + 1)]
if ((not nextitem.isspace()) and (nextitem != '.')):
keyname = 1
continue
if (item == '='):
keyname = 0
prev_key = key[:(- 1)].rstrip()
key = ''
dottedkey = False
else:
raise TomlDecodeError((("Found invalid character in key name: '" + item) + "'. Try quoting the key name."), original, i)
if ((item == "'") and (openstrchar != '"')):
k = 1
try:
while (sl[(i - k)] == "'"):
k += 1
if (k == 3):
break
except IndexError:
pass
if (k == 3):
multilinestr = (not multilinestr)
openstring = multilinestr
else:
openstring = (not openstring)
if openstring:
openstrchar = "'"
else:
openstrchar = ''
if ((item == '"') and (openstrchar != "'")):
oddbackslash = False
k = 1
tripquote = False
try:
while (sl[(i - k)] == '"'):
k += 1
if (k == 3):
tripquote = True
break
if ((k == 1) or ((k == 3) and tripquote)):
while (sl[(i - k)] == '\\'):
oddbackslash = (not oddbackslash)
k += 1
except IndexError:
pass
if (not oddbackslash):
if tripquote:
multilinestr = (not multilinestr)
openstring = multilinestr
else:
openstring = (not openstring)
if openstring:
openstrchar = '"'
else:
openstrchar = ''
if ((item == '#') and ((not openstring) and (not keygroup) and (not arrayoftables))):
j = i
comment = ''
try:
while (sl[j] != '\n'):
comment += s[j]
sl[j] = ' '
j += 1
except IndexError:
break
if (not openarr):
decoder.preserve_comment(line_no, prev_key, comment, beginline)
if ((item == '[') and ((not openstring) and (not keygroup) and (not arrayoftables))):
if beginline:
if ((len(sl) > (i + 1)) and (sl[(i + 1)] == '[')):
arrayoftables = True
else:
keygroup = True
else:
openarr += 1
if ((item == ']') and (not openstring)):
if keygroup:
keygroup = False
elif arrayoftables:
if (sl[(i - 1)] == ']'):
arrayoftables = False
else:
openarr -= 1
if (item == '\n'):
if (openstring or multilinestr):
if (not multilinestr):
raise TomlDecodeError('Unbalanced quotes', original, i)
if (((sl[(i - 1)] == "'") or (sl[(i - 1)] == '"')) and (sl[(i - 2)] == sl[(i - 1)])):
sl[i] = sl[(i - 1)]
if (sl[(i - 3)] == sl[(i - 1)]):
sl[(i - 3)] = ' '
elif openarr:
sl[i] = ' '
else:
beginline = True
line_no += 1
elif (beginline and (sl[i] != ' ') and (sl[i] != '\t')):
beginline = False
if ((not keygroup) and (not arrayoftables)):
if (sl[i] == '='):
raise TomlDecodeError('Found empty keyname. ', original, i)
keyname = 1
key += item
if keyname:
raise TomlDecodeError('Key name found without value. Reached end of file.', original, len(s))
if openstring:
raise TomlDecodeError('Unterminated string found. Reached end of file.', original, len(s))
s = ''.join(sl)
s = s.split('\n')
multikey = None
multilinestr = ''
multibackslash = False
pos = 0
for (idx, line) in enumerate(s):
if (idx > 0):
pos += (len(s[(idx - 1)]) + 1)
decoder.embed_comments(idx, currentlevel)
if ((not multilinestr) or multibackslash or ('\n' not in multilinestr)):
line = line.strip()
if ((line == '') and ((not multikey) or multibackslash)):
continue
if multikey:
if multibackslash:
multilinestr += line
else:
multilinestr += line
multibackslash = False
closed = False
if (multilinestr[0] == '['):
closed = (line[(- 1)] == ']')
elif (len(line) > 2):
closed = ((line[(- 1)] == multilinestr[0]) and (line[(- 2)] == multilinestr[0]) and (line[(- 3)] == multilinestr[0]))
if closed:
try:
(value, vtype) = decoder.load_value(multilinestr)
except ValueError as err:
raise TomlDecodeError(str(err), original, pos)
currentlevel[multikey] = value
multikey = None
multilinestr = ''
else:
k = (len(multilinestr) - 1)
while ((k > (- 1)) and (multilinestr[k] == '\\')):
multibackslash = (not multibackslash)
k -= 1
if multibackslash:
multilinestr = multilinestr[:(- 1)]
else:
multilinestr += '\n'
continue
if (line[0] == '['):
arrayoftables = False
if (len(line) == 1):
raise TomlDecodeError('Opening key group bracket on line by itself.', original, pos)
if (line[1] == '['):
arrayoftables = True
line = line[2:]
splitstr = ']]'
else:
line = line[1:]
splitstr = ']'
i = 1
quotesplits = decoder._get_split_on_quotes(line)
quoted = False
for quotesplit in quotesplits:
if ((not quoted) and (splitstr in quotesplit)):
break
i += quotesplit.count(splitstr)
quoted = (not quoted)
line = line.split(splitstr, i)
if ((len(line) < (i + 1)) or (line[(- 1)].strip() != '')):
raise TomlDecodeError('Key group not on a line by itself.', original, pos)
groups = splitstr.join(line[:(- 1)]).split('.')
i = 0
while (i < len(groups)):
groups[i] = groups[i].strip()
if ((len(groups[i]) > 0) and ((groups[i][0] == '"') or (groups[i][0] == "'"))):
groupstr = groups[i]
j = (i + 1)
while (not (groupstr[0] == groupstr[(- 1)])):
j += 1
if (j > (len(groups) + 2)):
raise TomlDecodeError(((("Invalid group name '" + groupstr) + "' Something ") + 'went wrong.'), original, pos)
groupstr = '.'.join(groups[i:j]).strip()
groups[i] = groupstr[1:(- 1)]
groups[(i + 1):j] = []
elif (not _groupname_re.match(groups[i])):
raise TomlDecodeError((("Invalid group name '" + groups[i]) + "'. Try quoting it."), original, pos)
i += 1
currentlevel = retval
for i in _range(len(groups)):
group = groups[i]
if (group == ''):
raise TomlDecodeError("Can't have a keygroup with an empty name", original, pos)
try:
currentlevel[group]
if (i == (len(groups) - 1)):
if (group in implicitgroups):
implicitgroups.remove(group)
if arrayoftables:
raise TomlDecodeError("An implicitly defined table can't be an array", original, pos)
elif arrayoftables:
currentlevel[group].append(decoder.get_empty_table())
else:
raise TomlDecodeError(((('What? ' + group) + ' already exists?') + str(currentlevel)), original, pos)
except TypeError:
currentlevel = currentlevel[(- 1)]
if (group not in currentlevel):
currentlevel[group] = decoder.get_empty_table()
if ((i == (len(groups) - 1)) and arrayoftables):
currentlevel[group] = [decoder.get_empty_table()]
except KeyError:
if (i != (len(groups) - 1)):
implicitgroups.append(group)
currentlevel[group] = decoder.get_empty_table()
if ((i == (len(groups) - 1)) and arrayoftables):
currentlevel[group] = [decoder.get_empty_table()]
currentlevel = currentlevel[group]
if arrayoftables:
try:
currentlevel = currentlevel[(- 1)]
except KeyError:
pass
elif (line[0] == '{'):
if (line[(- 1)] != '}'):
raise TomlDecodeError('Line breaks are not allowed in inlineobjects', original, pos)
try:
decoder.load_inline_object(line, currentlevel, multikey, multibackslash)
except ValueError as err:
raise TomlDecodeError(str(err), original, pos)
elif ('=' in line):
try:
ret = decoder.load_line(line, currentlevel, multikey, multibackslash)
except ValueError as err:
raise TomlDecodeError(str(err), original, pos)
if (ret is not None):
(multikey, multilinestr, multibackslash) = ret
return retval |
def read_table_csv(table_obj, csv_seperator=','):
df_rows = pd.read_csv(table_obj.csv_file_location, escapechar='\\', encoding='utf-8', quotechar='"', sep=csv_seperator)
df_rows.columns = [((table_obj.table_name + '.') + attr) for attr in table_obj.attributes]
for attribute in table_obj.irrelevant_attributes:
df_rows = df_rows.drop(((table_obj.table_name + '.') + attribute), axis=1)
return df_rows.infer_objects() |
def parameter_count(model: PyTree):
leaves = {id(x): x for x in jax.tree_util.tree_leaves(model) if is_jax_array_like(x)}
return sum((x.size for x in leaves.values())) |
class PcgrlCtrlEnv(PcgrlEnv):
def __init__(self, cfg: Config, prob='binary_ctrl', rep='narrow'):
super(PcgrlCtrlEnv, self).__init__(cfg, prob, rep)
self.cond_bounds = self._prob.cond_bounds
self.static_trgs = self._prob.static_trgs
def set_map(self, init_map):
self._rep._random_start = False
self._rep._old_map = init_map.copy() |
def get_preprocessor(imsize):
def vgg_preprocess(tensor):
(r, g, b) = torch.chunk(tensor, 3, dim=0)
bgr = torch.cat((b, g, r), 0)
out = ((bgr * 255) - vgg_mean.type(tensor.type()).expand_as(bgr))
return out
preprocess = transforms.Compose([transforms.Resize(imsize), transforms.ToTensor(), transforms.Lambda(vgg_preprocess)])
return preprocess |
def show_img(img_id):
img_map = get_img_map(img_folder_list)
img_path = img_map[img_id]
print('Reading image from: ', img_path)
plt.imshow(plt.imread(img_path)) |
def canonicalize_sql_example(query, sql, ast):
query = re.sub('<.*?>', '', query)
query_tokens = nltk.word_tokenize(query)
parse_tree = parse_raw(ast)
return (query_tokens, sql, parse_tree) |
def create_logger(app):
logger = logging.getLogger(app.name)
for old_name in ('flask.app', 'flask'):
old_logger = logging.getLogger(old_name)
if (_has_config(old_logger) and (not _has_config(logger))):
warnings.warn("'app.logger' is named '{name}' for this application, but configuration was found for '{old_name}', which no longer has an effect. The logging configuration should be moved to '{name}'.".format(name=app.name, old_name=old_name))
break
if (app.debug and (not logger.level)):
logger.setLevel(logging.DEBUG)
if (not has_level_handler(logger)):
logger.addHandler(default_handler)
return logger |
def l2_promote():
import ctypes
_libcudart = ctypes.CDLL('libcudart.so')
pValue = ctypes.cast((ctypes.c_int * 1)(), ctypes.POINTER(ctypes.c_int))
_libcudart.cudaDeviceSetLimit(ctypes.c_int(5), ctypes.c_int(128))
_libcudart.cudaDeviceGetLimit(pValue, ctypes.c_int(5))
assert (pValue.contents.value == 128) |
class recursive(object):
def __init__(self, func):
self.func = func
def __call__(self, *args, **kwargs):
return self.func(self, *args, **kwargs) |
class ListModule(nn.Module):
def __init__(self, *args):
super(ListModule, self).__init__()
idx = 0
for module in args:
self.add_module(str(idx), module)
idx += 1
def __getitem__(self, idx):
if ((idx < 0) or (idx >= len(self._modules))):
raise IndexError('index {} is out of range'.format(idx))
it = iter(self._modules.values())
for i in range(idx):
next(it)
return next(it)
def __iter__(self):
return iter(self._modules.values())
def __len__(self):
return len(self._modules) |
class pAdicFieldFloatingPoint(pAdicFieldBaseGeneric, pAdicFloatingPointFieldGeneric):
def __init__(self, p, prec, print_mode, names):
pAdicFieldBaseGeneric.__init__(self, p, prec, print_mode, names, pAdicFloatingPointElement)
def _coerce_map_from_(self, R):
if (isinstance(R, (pAdicRingFixedMod, pAdicRingFloatingPoint, pAdicFieldFloatingPoint)) and (R.prime() == self.prime())):
if (R.precision_cap() > self.precision_cap()):
return True
elif ((R.precision_cap() == self.precision_cap()) and self._printer.richcmp_modes(R._printer, op_LE)):
return True
def _convert_map_from_(self, R):
from sage.rings.finite_rings.integer_mod_ring import IntegerModRing_generic
if isinstance(R, IntegerModRing_generic):
N = R.cardinality()
p = self.prime()
n = N.exact_log(p)
if (N == (p ** n)):
from sage.rings.padics.padic_generic import ResidueLiftingMap
return ResidueLiftingMap._create_(R, self) |
def _check_PSK(state: GameState):
not_passed = (state.consecutive_pass_count == 0)
is_psk = (not_passed & (jnp.abs((state.board_history[0] - state.board_history[1:])).sum(axis=1) == 0).any())
return is_psk |
def load_train_data(csv_file, n_items):
tp = pd.read_csv(csv_file)
n_users = (tp['uid'].max() + 1)
(rows, cols) = (tp['uid'], tp['sid'])
data = sparse.csr_matrix((np.ones_like(rows), (rows, cols)), dtype='float64', shape=(n_users, n_items))
return data |
def _wrap_reader_for_text(fp, encoding):
if isinstance(fp.read(0), bytes):
fp = io.TextIOWrapper(io.BufferedReader(fp), encoding)
return fp |
class MT10(Benchmark):
def __init__(self):
super().__init__()
self._train_classes = _env_dict.EASY_MODE_CLS_DICT
self._test_classes = OrderedDict()
train_kwargs = _env_dict.EASY_MODE_ARGS_KWARGS
self._train_tasks = _make_tasks(self._train_classes, train_kwargs, _MT_OVERRIDE)
self._test_tasks = [] |
def harness(policy, throughputs, scale_factors, priority_weights, cluster_spec, num_sub_clusters=1, random_cluster_assignment=False):
start_time = time.time()
sub_cluster_throughputs = []
sub_cluster_scale_factors = []
sub_cluster_priority_weights = []
job_to_sub_cluster_assignment = {}
job_ids = []
for job_id in throughputs:
if (not job_id.is_pair()):
job_ids.append(job_id)
for (i, job_id) in enumerate(job_ids):
if random_cluster_assignment:
job_to_sub_cluster_assignment[job_id[0]] = random.randint(0, (num_sub_clusters - 1))
else:
job_to_sub_cluster_assignment[job_id[0]] = (job_id[0] % num_sub_clusters)
for i in range(num_sub_clusters):
sub_cluster_throughputs.append({})
sub_cluster_scale_factors.append({})
sub_cluster_priority_weights.append({})
for job_id in throughputs:
if ((job_to_sub_cluster_assignment[job_id[0]] == i) and ((not job_id.is_pair()) or (job_to_sub_cluster_assignment[job_id[1]] == i))):
sub_cluster_throughputs[(- 1)][job_id] = copy.copy(throughputs[job_id])
if (not job_id.is_pair()):
sub_cluster_scale_factors[(- 1)][job_id] = scale_factors[job_id]
sub_cluster_priority_weights[(- 1)][job_id] = priority_weights[job_id]
sub_cluster_cluster_spec = {worker_type: (cluster_spec[worker_type] // num_sub_clusters) for worker_type in cluster_spec}
setup_time = (time.time() - start_time)
full_allocation = {}
computation_times = []
for i in range(num_sub_clusters):
start_time = time.time()
if policy._name.startswith('MaxMinFairness'):
sub_cluster_allocation = policy.get_allocation(sub_cluster_throughputs[i], sub_cluster_scale_factors[i], sub_cluster_priority_weights[i], sub_cluster_cluster_spec)
else:
sub_cluster_allocation = policy.get_allocation(sub_cluster_throughputs[i], sub_cluster_scale_factors[i], sub_cluster_cluster_spec)
for job_id in sub_cluster_allocation:
full_allocation[job_id] = sub_cluster_allocation[job_id]
computation_times.append((time.time() - start_time))
return (full_allocation, (setup_time + max(computation_times))) |
class PassageDB():
def __init__(self, input_file: str):
self._input_file = input_file
self._db = lmdb.open(input_file, subdir=False, readonly=True)
def __reduce__(self):
return (self.__class__, (self._input_file,))
def __len__(self):
return self._db.stat()['entries']
def __getitem__(self, id_: int) -> Passage:
with self._db.begin() as txn:
json_text = txn.get(str(id_).encode('utf-8'))
if (json_text is None):
raise KeyError(('Invalid passage_id: ' + str(id_)))
(title, text) = json.loads(json_text)
return Passage(id_, title, text)
def __iter__(self) -> Iterator[Passage]:
with self._db.begin() as txn:
cursor = txn.cursor()
for (id_str, json_str) in iter(cursor):
(title, text) = json.loads(json_str.decode('utf-8'))
(yield Passage(int(id_str.decode('utf-8')), title, text)) |
def collate_batch(batch):
input_patches = []
for input_patch in batch:
input_patches.append(input_patch.reshape((- 1)))
input_patches = torch.nn.utils.rnn.pad_sequence(input_patches, batch_first=True, padding_value=0)
return input_patches.to(device) |
_module()
class DetectoRS_ResNeXt(DetectoRS_ResNet):
arch_settings = {50: (Bottleneck, (3, 4, 6, 3)), 101: (Bottleneck, (3, 4, 23, 3)), 152: (Bottleneck, (3, 8, 36, 3))}
def __init__(self, groups=1, base_width=4, **kwargs):
self.groups = groups
self.base_width = base_width
super(DetectoRS_ResNeXt, self).__init__(**kwargs)
def make_res_layer(self, **kwargs):
return super().make_res_layer(groups=self.groups, base_width=self.base_width, base_channels=self.base_channels, **kwargs) |
def standardize_constraints(constraints, x0, meth):
all_constraint_types = (NonlinearConstraint, LinearConstraint, dict)
new_constraint_types = all_constraint_types[:(- 1)]
if isinstance(constraints, all_constraint_types):
constraints = [constraints]
constraints = list(constraints)
if (meth == 'trust-constr'):
for (i, con) in enumerate(constraints):
if (not isinstance(con, new_constraint_types)):
constraints[i] = old_constraint_to_new(i, con)
else:
for (i, con) in enumerate(list(constraints)):
if isinstance(con, new_constraint_types):
old_constraints = new_constraint_to_old(con, x0)
constraints[i] = old_constraints[0]
constraints.extend(old_constraints[1:])
return constraints |
def test_langlower():
assert (lang_to_langcode('WOLOF') == 'wo')
assert (lang_to_langcode('nOrWeGiAn') == 'nb')
assert ('soj' == langlower2lcode['soi'])
assert ('soj' == langlower2lcode['sohi']) |
class PickleCache():
def __init__(self, cache_name, overwrite=False):
self.cache_name = cache_name
self.exists = os.path.exists(cache_name)
self.overwrite = overwrite
self.v = None
def __enter__(self):
if self.exists:
print(f'loading from cache: {self.cache_name}')
with open(self.cache_name, 'rb') as f:
self.v = pickle.load(f)
else:
print(f'computing value for {self.cache_name}')
return self
def __exit__(self, type, value, traceback):
if ((not self.exists) or self.overwrite):
print(f'saving to cache: {self.cache_name}')
exception_happened = (type is not None)
if (not exception_happened):
assert (self.v is not None), 'You should enter a value'
with open(self.cache_name, 'wb') as f:
pickle.dump(self.v, f)
else:
print('exception_happened') |
def get_fans_or_followers_ids(user_id, crawl_type):
if (crawl_type == 1):
fans_or_follows_url = '
else:
fans_or_follows_url = '
cur_page = 1
max_page = 6
user_ids = list()
while (cur_page < max_page):
url = fans_or_follows_url.format(user_id, cur_page)
page = get_page(url)
if (cur_page == 1):
urls_length = public.get_max_crawl_pages(page)
if (max_page > urls_length):
max_page = (urls_length + 1)
user_ids.extend(public.get_fans_or_follows(page, user_id, crawl_type))
cur_page += 1
return user_ids |
class ASPP(nn.Module):
def __init__(self, in_channels, out_channels, dilations, *, norm, activation, pool_kernel_size=None, dropout: float=0.0, use_depthwise_separable_conv=False):
super(ASPP, self).__init__()
assert (len(dilations) == 3), 'ASPP expects 3 dilations, got {}'.format(len(dilations))
self.pool_kernel_size = pool_kernel_size
self.dropout = dropout
use_bias = (norm == '')
self.convs = nn.ModuleList()
self.convs.append(Conv2d(in_channels, out_channels, kernel_size=1, bias=use_bias, norm=get_norm(norm, out_channels), activation=deepcopy(activation)))
weight_init.c2_xavier_fill(self.convs[(- 1)])
for dilation in dilations:
if use_depthwise_separable_conv:
self.convs.append(DepthwiseSeparableConv2d(in_channels, out_channels, kernel_size=3, padding=dilation, dilation=dilation, norm1=norm, activation1=deepcopy(activation), norm2=norm, activation2=deepcopy(activation)))
else:
self.convs.append(Conv2d(in_channels, out_channels, kernel_size=3, padding=dilation, dilation=dilation, bias=use_bias, norm=get_norm(norm, out_channels), activation=deepcopy(activation)))
weight_init.c2_xavier_fill(self.convs[(- 1)])
if (pool_kernel_size is None):
image_pooling = nn.Sequential(nn.AdaptiveAvgPool2d(1), Conv2d(in_channels, out_channels, 1, bias=True, activation=deepcopy(activation)))
else:
image_pooling = nn.Sequential(nn.AvgPool2d(kernel_size=pool_kernel_size, stride=1), Conv2d(in_channels, out_channels, 1, bias=True, activation=deepcopy(activation)))
weight_init.c2_xavier_fill(image_pooling[1])
self.convs.append(image_pooling)
self.project = Conv2d((5 * out_channels), out_channels, kernel_size=1, bias=use_bias, norm=get_norm(norm, out_channels), activation=deepcopy(activation))
weight_init.c2_xavier_fill(self.project)
def forward(self, x):
size = x.shape[(- 2):]
if (self.pool_kernel_size is not None):
if ((size[0] % self.pool_kernel_size[0]) or (size[1] % self.pool_kernel_size[1])):
raise ValueError('`pool_kernel_size` must be divisible by the shape of inputs. Input size: {} `pool_kernel_size`: {}'.format(size, self.pool_kernel_size))
res = []
for conv in self.convs:
res.append(conv(x))
res[(- 1)] = F.interpolate(res[(- 1)], size=size, mode='bilinear', align_corners=False)
res = torch.cat(res, dim=1)
res = self.project(res)
res = (F.dropout(res, self.dropout, training=self.training) if (self.dropout > 0) else res)
return res |
def register_Ns3SimpleRefCount__Ns3RadvdInterface_Ns3Empty_Ns3DefaultDeleter__lt__ns3RadvdInterface__gt___methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::SimpleRefCount< ns3::RadvdInterface, ns3::empty, ns3::DefaultDeleter< ns3::RadvdInterface > > const &', 'o')])
return |
def _seg_11():
return [(2652, 'V'), (2653, 'X'), (2654, 'M', u''), (2655, 'X'), (2662, 'V'), (2679, 'X'), (2689, 'V'), (2692, 'X'), (2693, 'V'), (2702, 'X'), (2703, 'V'), (2706, 'X'), (2707, 'V'), (2729, 'X'), (2730, 'V'), (2737, 'X'), (2738, 'V'), (2740, 'X'), (2741, 'V'), (2746, 'X'), (2748, 'V'), (2758, 'X'), (2759, 'V'), (2762, 'X'), (2763, 'V'), (2766, 'X'), (2768, 'V'), (2769, 'X'), (2784, 'V'), (2788, 'X'), (2790, 'V'), (2802, 'X'), (2809, 'V'), (2816, 'X'), (2817, 'V'), (2820, 'X'), (2821, 'V'), (2829, 'X'), (2831, 'V'), (2833, 'X'), (2835, 'V'), (2857, 'X'), (2858, 'V'), (2865, 'X'), (2866, 'V'), (2868, 'X'), (2869, 'V'), (2874, 'X'), (2876, 'V'), (2885, 'X'), (2887, 'V'), (2889, 'X'), (2891, 'V'), (2894, 'X'), (2902, 'V'), (2904, 'X'), (2908, 'M', u''), (2909, 'M', u''), (2910, 'X'), (2911, 'V'), (2916, 'X'), (2918, 'V'), (2936, 'X'), (2946, 'V'), (2948, 'X'), (2949, 'V'), (2955, 'X'), (2958, 'V'), (2961, 'X'), (2962, 'V'), (2966, 'X'), (2969, 'V'), (2971, 'X'), (2972, 'V'), (2973, 'X'), (2974, 'V'), (2976, 'X'), (2979, 'V'), (2981, 'X'), (2984, 'V'), (2987, 'X'), (2990, 'V'), (3002, 'X'), (3006, 'V'), (3011, 'X'), (3014, 'V'), (3017, 'X'), (3018, 'V'), (3022, 'X'), (3024, 'V'), (3025, 'X'), (3031, 'V'), (3032, 'X'), (3046, 'V'), (3067, 'X'), (3072, 'V'), (3085, 'X'), (3086, 'V'), (3089, 'X'), (3090, 'V')] |
def prepara_inference_dict(pos_batch, neg_batch):
(pos_pre_input_ids, pos_pre_attention_mask, pos_pre_type_ids, pos_hyp_input_ids, pos_hyp_attention_mask, pos_hyp_type_ids, neg_pre_input_ids, neg_pre_attention_mask, neg_pre_type_ids, neg_hyp_input_ids, neg_hyp_attention_mask, neg_hyp_type_ids) = prepare_inference_batch(pos_batch, neg_batch)
return {'pos_pre_input_ids': pos_pre_input_ids, 'pos_pre_attention_mask': pos_pre_attention_mask, 'pos_pre_type_ids': pos_pre_type_ids, 'pos_hyp_input_ids': pos_hyp_input_ids, 'pos_hyp_attention_mask': pos_hyp_attention_mask, 'pos_hyp_type_ids': pos_hyp_type_ids, 'neg_pre_input_ids': neg_pre_input_ids, 'neg_pre_attention_mask': neg_pre_attention_mask, 'neg_pre_type_ids': neg_pre_type_ids, 'neg_hyp_input_ids': neg_hyp_input_ids, 'neg_hyp_attention_mask': neg_hyp_attention_mask, 'neg_hyp_attention_mask': neg_hyp_attention_mask, 'neg_hyp_type_ids': neg_hyp_type_ids} |
class ParameterList(Module):
_parameters: Dict[(str, 'Parameter')]
def __init__(self, parameters: Optional[Iterable['Parameter']]=None) -> None:
super(ParameterList, self).__init__()
self._initialized = True
if (parameters is not None):
self += parameters
def __setstate__(self, state):
state['_initialized'] = False
super(ParameterList, self).__setstate__(state)
self._initialized = True
def _get_abs_string_index(self, idx):
idx = operator.index(idx)
if (not ((- len(self)) <= idx < len(self))):
raise IndexError('index {} is out of range'.format(idx))
if (idx < 0):
idx += len(self)
return str(idx)
def __getitem__(self, idx: int) -> 'Parameter':
...
def __getitem__(self: T, idx: slice) -> T:
...
def __getitem__(self, idx):
if isinstance(idx, slice):
return self.__class__(list(self._parameters.values())[idx])
else:
idx = self._get_abs_string_index(idx)
return self._parameters[str(idx)]
def __setitem__(self, idx: int, param: 'Parameter') -> None:
idx = self._get_abs_string_index(idx)
return self.register_parameter(str(idx), param)
def __setattr__(self, key: Any, value: Any) -> None:
if getattr(self, '_initialized', False):
if ((not hasattr(self, key)) and (not isinstance(value, torch.nn.Parameter))):
warnings.warn('Setting attributes on ParameterList is not supported.')
super(ParameterList, self).__setattr__(key, value)
def __len__(self) -> int:
return len(self._parameters)
def __iter__(self) -> Iterator['Parameter']:
return iter(self._parameters.values())
def __iadd__(self, parameters: Iterable['Parameter']) -> 'ParameterList':
return self.extend(parameters)
def __dir__(self):
keys = super(ParameterList, self).__dir__()
keys = [key for key in keys if (not key.isdigit())]
return keys
def append(self, parameter: 'Parameter') -> 'ParameterList':
self.register_parameter(str(len(self)), parameter)
return self
def extend(self, parameters: Iterable['Parameter']) -> 'ParameterList':
if (not isinstance(parameters, container_abcs.Iterable)):
raise TypeError(('ParameterList.extend should be called with an iterable, but got ' + type(parameters).__name__))
offset = len(self)
for (i, param) in enumerate(parameters):
self.register_parameter(str((offset + i)), param)
return self
def extra_repr(self) -> str:
child_lines = []
for (k, p) in self._parameters.items():
size_str = 'x'.join((str(size) for size in p.size()))
device_str = ('' if (not p.is_cuda) else ' (GPU {})'.format(p.get_device()))
parastr = 'Parameter containing: [{} of size {}{}]'.format(torch.typename(p), size_str, device_str)
child_lines.append((((' (' + str(k)) + '): ') + parastr))
tmpstr = '\n'.join(child_lines)
return tmpstr
def __call__(self, input):
raise RuntimeError('ParameterList should not be called.')
def _replicate_for_data_parallel(self):
warnings.warn('nn.ParameterList is being used with DataParallel but this is not supported. This list will appear empty for the models replicated on each GPU except the original one.')
return super(ParameterList, self)._replicate_for_data_parallel() |
def train(train_loader, model, criterion, optimizer, epoch, args):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('', ':6.2f')
top5 = AverageMeter('', ':6.2f')
progress = ProgressMeter(len(train_loader), [batch_time, data_time, losses, top1, top5, 'LR {lr:.5f}'.format(lr=_get_learning_rate(optimizer))], prefix='Epoch: [{}]'.format(epoch))
model.train()
end = time.time()
for (i, (images, target, soft_label)) in enumerate(train_loader):
data_time.update((time.time() - end))
images = torch.cat(images, dim=0)
soft_label = torch.cat(soft_label, dim=0)
target = torch.cat(target, dim=0)
if (args.soft_label_type != 'ori'):
soft_label = Recover_soft_label(soft_label, args.soft_label_type, args.num_classes)
if (args.gpu is not None):
images = images.cuda(args.gpu, non_blocking=True)
if torch.cuda.is_available():
target = target.cuda(args.gpu, non_blocking=True)
soft_label = soft_label.cuda(args.gpu, non_blocking=True)
if args.mixup_cutmix:
(images, soft_label) = mixup_cutmix(images, soft_label, args)
output = model(images)
loss = criterion((output / args.temp), soft_label)
(acc1, acc5) = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
optimizer.zero_grad()
loss.backward()
optimizer.step()
batch_time.update((time.time() - end))
end = time.time()
if ((i % args.print_freq) == 0):
t = time.localtime()
current_time = time.strftime('%H:%M:%S', t)
print(current_time)
progress.display(i) |
def extract_czeng17(extract_folder, debug=False):
url = '
filename = f'{download_to}/convert_czeng16_to_17.pl.zip'
extract_to = f'{extract_folder}/{get_extract_name(filename)}'
script_path = f'{extract_to}/convert_czeng16_to_17.pl'
if (not os.path.exists(script_path)):
wget.download(url, filename, bar=bar_custom)
extract_to = extract_file(f'{download_to}/convert_czeng16_to_17.pl.zip', extract_folder, get_extract_name=get_extract_name, debug=debug)
return script_path |
def str_format_dynamic_dtype(op):
fmt_str = '\n OpInfo({name},\n dtypes={dtypesIfCPU},\n dtypesIfCUDA={dtypesIfCUDA},\n )\n '.format(name=op.name, dtypesIfCPU=dtypes_dispatch_hint(op.dtypesIfCPU).dispatch_fn_str, dtypesIfCUDA=dtypes_dispatch_hint(op.dtypesIfCUDA).dispatch_fn_str)
return fmt_str |
def spantemplate6(text_w_pairs):
(cause, effect) = get_cause_effect_spans(text_w_pairs)
question = f'What resulted from "{cause}"?'
answers = {'text': effect}
return (question, answers) |
def add_text_generate_args(parser):
group = parser.add_argument_group('Text generation', 'configurations')
group.add_argument('--temperature', type=float, default=1.0)
group.add_argument('--greedy', action='store_true', default=False)
group.add_argument('--top_p', type=float, default=0.0)
group.add_argument('--top_k', type=int, default=0)
group.add_argument('--out-seq-length', type=int, default=1024)
group.add_argument('--sample-input-file', type=str, default='', help='get input from file instead of interactive mode, each line is an input')
group.add_argument('--sample-output-file', type=str, default='', help='output file got from --sample-input-file')
group.add_argument('--num-samples', type=int, default=0, help='number of samples to generate unconditionally, defaults to 0 and interactive conditional sampling')
group.add_argument('--genfile', type=str, help='output file when generating unconditionally')
group.add_argument('--recompute', action='store_true', help='during generation recompute all attention instead of using previously computed keys/values.')
return parser |
def _get_mangled_gpu_name():
name = torch.cuda.get_device_name().lower()
out = []
for c in name:
if re.match('[a-z0-9_-]+', c):
out.append(c)
else:
out.append('-')
return ''.join(out) |
.parametrize('size, actions, expected_reward, random_state, expected_delays', [(2, 2, np.asarray([[1, 0.01], [0.5, 0.5]]), 12344, np.asarray([[2.0, 55.0], [3.0, 27.0]])), (2, 2, np.asarray([[0.1, 0.2], [0.3, 0.4]]), 12345, np.asarray([[242.0, 32.0], [15.0, 15.0]]))])
def test_exponential_delay_function_conditioned_on_expected_reward_results_in_expected_seeded_discrete_delays(size, actions, expected_reward, random_state, expected_delays):
delay_function = ExponentialDelaySampler(max_scale=100.0, min_scale=10.0, random_state=random_state).exponential_delay_function_expected_reward_weighted
actual_delays = delay_function(expected_rewards=expected_reward)
assert (actual_delays == expected_delays).all() |
class SetPartitionsBkhalf_k(SetPartitionsAkhalf_k):
def _repr_(self):
return (SetPartitionsAkhalf_k._repr_(self) + ' and with block size 2')
def __contains__(self, x):
if (not SetPartitionsAkhalf_k.__contains__(self, x)):
return False
for part in x:
if (len(part) != 2):
return False
return True
def cardinality(self):
return len(self.list())
def __iter__(self):
set = (list(range(1, (self.k + 1))) + [(- x) for x in range(1, (self.k + 1))])
for sp in SetPartitions(set, ([2] * (len(set) // 2))):
(yield self.element_class(self, (Set(list(sp)) + Set([Set([(self.k + 1), ((- self.k) - 1)])])))) |
.parametrize('through', [through_arrow, through_parquet])
.parametrize('extensionarray', [False, True])
def test_unmaskedarray_numpyarray(tmp_path, through, extensionarray):
akarray = ak.contents.UnmaskedArray(ak.contents.NumpyArray(np.array([1.1, 2.2, 3.3]), parameters={'which': 'inner'}))
(schema_arrow, array_form) = through(akarray, extensionarray, tmp_path)
predicted_form = ak._connect.pyarrow.form_handle_arrow(schema_arrow, pass_empty_field=True)
assert (predicted_form == array_form) |
class UnitNormLayer(tf.keras.layers.Layer):
def __init__(self):
super(UnitNormLayer, self).__init__()
def call(self, input_tensor):
norm = tf.norm(input_tensor, axis=1)
return (input_tensor / tf.reshape(norm, [(- 1), 1])) |
def inference(image, prompt, min_len=1, max_len=250, beam_size=5, len_penalty=(- 1), repetition_penalty=1, top_p=0.9, decoding_method='Beam Search', num_captions=1, temperature=1.0, video=False):
use_nucleus_sampling = (decoding_method == 'Nucleus sampling')
print(image, prompt, min_len, max_len, beam_size, len_penalty, repetition_penalty, top_p, use_nucleus_sampling)
if (not video):
image = vis_processors['eval'](image).unsqueeze(0).to(device)
else:
image = vis_processors(image).to(device).unsqueeze(0).half()
samples = {'image': image, 'prompt': prompt}
output = model.generate(samples, repetition_penalty=float(repetition_penalty), num_beams=beam_size, max_length=max_len, min_length=min_len, top_p=top_p, use_nucleus_sampling=use_nucleus_sampling, num_captions=num_captions, temperature=temperature)
return output[0] |
_module(name='Normal')
class NormalInit(BaseInit):
def __init__(self, mean: float=0, std: float=1, **kwargs):
super().__init__(**kwargs)
self.mean = mean
self.std = std
def __call__(self, module: nn.Module) -> None:
def init(m):
if self.wholemodule:
normal_init(m, self.mean, self.std, self.bias)
else:
layername = m.__class__.__name__
basesname = _get_bases_name(m)
if len((set(self.layer) & set(([layername] + basesname)))):
normal_init(m, self.mean, self.std, self.bias)
module.apply(init)
if hasattr(module, '_params_init_info'):
update_init_info(module, init_info=self._get_init_info())
def _get_init_info(self) -> str:
info = f'{self.__class__.__name__}: mean={self.mean}, std={self.std}, bias={self.bias}'
return info |
def get_margin(transcript, agent=None, role=None):
if (role is not None):
scenario = transcript['scenario']
roles = {scenario['kbs'][0]['personal']['Role']: 0, scenario['kbs'][1]['personal']['Role']: 1}
agent = roles[role]
if (agent is None):
winner = get_winner(transcript)
if (winner is None):
return (- 1)
if (winner == (- 1)):
winner = 0
agent = winner
scenario = transcript['scenario']
final_price = transcript['outcome']['offer']['price']
agent_role = scenario['kbs'][agent]['personal']['Role']
agent_target = scenario['kbs'][agent]['personal']['Target']
partner_target = scenario['kbs'][(1 - agent)]['personal']['Target']
midpoint = ((agent_target + partner_target) / 2.0)
norm_factor = np.abs((midpoint - agent_target))
if (agent_role == SELLER):
margin = ((final_price - midpoint) / norm_factor)
else:
margin = ((midpoint - final_price) / norm_factor)
return margin |
_test()
def test_gemv_fpga_tiles_by_column():
return run_gemv('tiles_by_column', 256, 512, transposed=True, vectorize=4) |
def test_float():
plt.figure()
with expected_warnings((imshow_expected_warnings + ['CObject type is marked|\\A\\Z'])):
ax_im = io.imshow(imf)
assert (ax_im.cmap.name == 'gray')
assert (ax_im.get_clim() == (0, 1))
assert (n_subplots(ax_im) == 1)
assert (ax_im.colorbar is None) |
class Profile():
def __init__(self, sc, job_id, load_threads=8, subsample=None):
self._storage = sc._storage
job = sc._load_descriptor(protobufs.BulkJobDescriptor, 'jobs/{}/descriptor.bin'.format(job_id))
def get_prof(path, worker=True):
file_info = self._storage.get_file_info(path)
if file_info.file_exists:
(time, profs) = self._parse_profiler_file(path, worker)
return (time, profs)
else:
return None
nodes = list(range(job.num_nodes))
if (subsample is not None):
nodes = random.sample(nodes, subsample)
with ThreadPoolExecutor(max_workers=load_threads) as executor:
profilers = executor.map(get_prof, ['{}/jobs/{}/profile_{}.bin'.format(sc._db_path, job_id, n) for n in nodes])
self._worker_profilers = {n: prof for (n, prof) in enumerate(profilers) if (prof is not None)}
path = '{}/jobs/{}/profile_master.bin'.format(sc._db_path, job_id)
self._master_profiler = get_prof(path, worker=False)
def write_trace(self, path: str):
colors = {'Wait on Input Queue': 'grey', 'Wait on Output Queue': 'grey'}
traces = []
next_tid = 0
def make_trace_from_interval(interval, cat, proc, tid):
trace = {'name': interval[0], 'cat': cat, 'ph': 'X', 'ts': (interval[1] / 1000), 'dur': ((interval[2] - interval[1]) / 1000), 'pid': proc, 'tid': tid, 'args': {}}
if (interval[0] in colors):
trace['cname'] = colors[interval[0]]
return trace
if (self._master_profiler is not None):
traces.append({'name': 'process_name', 'ph': 'M', 'pid': (- 1), 'args': {'name': 'Master'}})
traces.append({'name': 'thread_name', 'ph': 'M', 'pid': (- 1), 'tid': 0, 'args': {'name': 'EventLoop'}})
for interval in self._master_profiler[1]['intervals']:
traces.append(make_trace_from_interval(interval, 'master', (- 1), 0))
for (proc, (_, worker_profiler_groups)) in self._worker_profilers.items():
traces.append({'name': 'process_name', 'ph': 'M', 'pid': proc, 'args': {'name': 'Worker {:d}'.format(proc)}})
num_load_workers = len(worker_profiler_groups['load'])
num_eval_workers = sum([len(profs) for profs in worker_profiler_groups['eval']])
num_stages_per_pipeline = (len(worker_profiler_groups['eval'][0]) if (num_eval_workers > 0) else 0)
for (worker_type, profs) in [('process_job', worker_profiler_groups['process_job']), ('load', worker_profiler_groups['load']), ('eval', worker_profiler_groups['eval']), ('save', worker_profiler_groups['save'])]:
for (i, prof) in enumerate(profs):
tid = next_tid
next_tid += 1
pipeline_num = prof['worker_num']
tag = prof['worker_tag']
display_info = {('process_job', ''): {'name': 'EventLoop', 'index': (lambda x: 0)}, ('eval', 'pre'): {'name': 'Pipeline[{:d}]:DecodeVideo', 'index': (lambda x: (((1 + num_load_workers) + (x * num_stages_per_pipeline)) + 0))}, ('eval', 'eval'): {'name': 'Pipeline[{:d}]:Ops[{:d}]', 'index': (lambda x: (((1 + num_load_workers) + (x * num_stages_per_pipeline)) + 1))}, ('eval', 'post'): {'name': 'Pipeline[{:d}]:EncodeVideo', 'index': (lambda x: (((1 + num_load_workers) + (x * num_stages_per_pipeline)) + 2))}, ('load', ''): {'name': 'Reader[{:d}]', 'index': (lambda x: (1 + x))}, ('save', ''): {'name': 'Writer[{:d}]', 'index': (lambda x: (((1 + num_load_workers) + num_eval_workers) + x))}}
info = display_info[(worker_type, tag)]
name = info['name']
if (tag == 'eval'):
name = name.format(pipeline_num, prof['kernel_group'])
elif (worker_type != 'process_job'):
name = name.format(pipeline_num)
traces.append({'name': 'thread_name', 'ph': 'M', 'pid': proc, 'tid': tid, 'args': {'name': name}})
sort_index = info['index'](tid)
traces.append({'name': 'thread_sort_index', 'ph': 'M', 'pid': proc, 'tid': tid, 'args': {'sort_index': sort_index}})
for interval in prof['intervals']:
traces.append(make_trace_from_interval(interval, worker_type, proc, tid))
parts = path.split('.')
base = parts[0]
exts = parts[1:]
with open((base + '.trace'), 'w') as f:
f.write(json.dumps(traces))
if (exts == ['trace']):
return path
elif (exts == ['tar', 'gz']):
with tarfile.open((base + '.tar.gz'), 'w:gz') as tar:
tar.add((base + '.trace'))
os.remove((base + '.trace'))
return path
else:
raise ScannerException("Invalid trace extension '{}'. Must be .trace or .tar.gz.".format(''.join([('.' + e) for e in exts])))
def _convert_time(self, d):
def convert(t):
if isinstance(t, float):
return '{:2f}'.format((t / .0))
return t
return {k: (self._convert_time(v) if isinstance(v, dict) else convert(v)) for (k, v) in d.items()}
def total_time_interval(self):
(intv, _) = list(self._worker_profilers.values())[0]
return intv
def statistics(self):
totals = {}
for ((total_start, total_end), profiler) in list(self._worker_profilers.values()):
for kind in profiler:
if (kind not in totals):
totals[kind] = {}
for thread in profiler[kind]:
for (key, start, end) in thread['intervals']:
if (key not in totals[kind]):
totals[kind][key] = 0.0
totals[kind][key] += (end - start)
for (name, value) in thread['counters'].items():
if (name not in totals[kind]):
totals[kind][name] = 0
totals[kind][name] += value
totals['total_time'] = float((total_end - total_start))
readable_totals = self._convert_time(totals)
return readable_totals
def _parse_profiler_output(self, bytes_buffer, offset):
(t, offset) = read_advance('q', bytes_buffer, offset)
node = t[0]
(worker_type, offset) = unpack_string(bytes_buffer, offset)
(worker_tag, offset) = unpack_string(bytes_buffer, offset)
(t, offset) = read_advance('q', bytes_buffer, offset)
worker_num = t[0]
(t, offset) = read_advance('q', bytes_buffer, offset)
num_keys = t[0]
key_dictionary = {}
for i in range(num_keys):
(key_name, offset) = unpack_string(bytes_buffer, offset)
(t, offset) = read_advance('B', bytes_buffer, offset)
key_index = t[0]
key_dictionary[key_index] = key_name
(t, offset) = read_advance('q', bytes_buffer, offset)
num_intervals = t[0]
intervals = []
for i in range(num_intervals):
(t, offset) = read_advance('B', bytes_buffer, offset)
key_index = t[0]
(t, offset) = read_advance('q', bytes_buffer, offset)
start = t[0]
(t, offset) = read_advance('q', bytes_buffer, offset)
end = t[0]
intervals.append((key_dictionary[key_index], start, end))
(t, offset) = read_advance('q', bytes_buffer, offset)
num_counters = t[0]
if (num_counters > 1000000):
num_counters = 0
counters = {}
for i in range(num_counters):
(counter_name, offset) = unpack_string(bytes_buffer, offset)
(t, offset) = read_advance('q', bytes_buffer, offset)
counter_value = t[0]
counters[counter_name] = counter_value
return ({'node': node, 'worker_type': worker_type, 'worker_tag': worker_tag, 'worker_num': worker_num, 'intervals': intervals, 'counters': counters}, offset)
def _parse_profiler_file(self, profiler_path, worker=True):
bytes_buffer = self._storage.read(profiler_path)
offset = 0
(t, offset) = read_advance('q', bytes_buffer, offset)
start_time = t[0]
(t, offset) = read_advance('q', bytes_buffer, offset)
end_time = t[0]
if worker:
profilers = defaultdict(list)
(prof, offset) = self._parse_profiler_output(bytes_buffer, offset)
profilers[prof['worker_type']].append(prof)
(t, offset) = read_advance('B', bytes_buffer, offset)
num_load_workers = t[0]
for i in range(num_load_workers):
(prof, offset) = self._parse_profiler_output(bytes_buffer, offset)
profilers[prof['worker_type']].append(prof)
(t, offset) = read_advance('B', bytes_buffer, offset)
num_eval_workers = t[0]
(t, offset) = read_advance('B', bytes_buffer, offset)
groups_per_chain = t[0]
for pu in range(num_eval_workers):
for fg in range(groups_per_chain):
(prof, offset) = self._parse_profiler_output(bytes_buffer, offset)
if ((fg > 0) and (fg < (groups_per_chain - 1))):
prof['kernel_group'] = (fg - 1)
profilers[prof['worker_type']].append(prof)
(t, offset) = read_advance('B', bytes_buffer, offset)
num_save_workers = t[0]
for i in range(num_save_workers):
(prof, offset) = self._parse_profiler_output(bytes_buffer, offset)
profilers[prof['worker_type']].append(prof)
return ((start_time, end_time), profilers)
else:
return ((start_time, end_time), self._parse_profiler_output(bytes_buffer, offset)[0]) |
def filter_var_wo_type(df_vars: pd.DataFrame) -> pd.DataFrame:
df_var_len = len(df_vars)
logger.info(f'Variables before dropping: {len(df_vars):,}')
df_vars = df_vars[df_vars['var_type'].notnull()]
logger.info(f'Variables after dropping dropping: {len(df_vars):,}')
logger.info(f'Filtered out {(df_var_len - len(df_vars)):,} variables w/o a type.')
return df_vars |
class ResponseStreamMixin(object):
_property
def stream(self):
return ResponseStream(self) |
def IsInt(a):
if z3_debug():
_z3_assert(a.is_real(), 'Z3 real expression expected.')
ctx = a.ctx
return BoolRef(Z3_mk_is_int(ctx.ref(), a.as_ast()), ctx) |
class CFExplanation(ExplanationBase):
def __init__(self):
super().__init__()
self.explanations = []
def __repr__(self):
return repr(self.explanations)
def add(self, query, cfs, **kwargs):
e = {'query': query, 'counterfactual': cfs}
e.update(kwargs)
self.explanations.append(e)
def get_explanations(self, index=None):
return (self.explanations if (index is None) else self.explanations[index])
def plot(self, index=None, max_num_variables_to_plot=25, **kwargs):
import matplotlib.pyplot as plt
if (len(self.explanations) == 0):
return None
figures = []
explanations = ([self.explanations[index]] if (index is not None) else self.explanations)
for exp in explanations:
(ts, cf) = (exp['query'], exp['counterfactual'])
num_variables = ts.shape[1]
if (num_variables > max_num_variables_to_plot):
warnings.warn('The number of variables in the time series exceeds the maximum number of variables to plot.')
n = min(num_variables, max_num_variables_to_plot)
num_rows = int(np.round(np.sqrt(n)))
num_cols = int(np.ceil((n / num_rows)))
(fig, axes) = plt.subplots(num_rows, num_cols, squeeze=False)
for i in range(n):
(row, col) = divmod(i, num_cols)
plt.sca(axes[(row, col)])
left_ax = axes[(row, col)]
ts_a = ts[[ts.columns[i]]]
timestamps = [str(v) for v in ts_a.index.values]
left_ax.plot(timestamps, ts_a.values.flatten(), color='k')
left_ax.set_xticklabels(left_ax.get_xticks(), rotation=45)
if (cf is not None):
right_ax = axes[(row, col)].twinx()
ts_b = cf[[cf.columns[i]]]
right_ax.plot(timestamps, ts_b.values.flatten(), color='r', label='cf')
plt.title(f'{ts.columns[i]}')
plt.legend()
plt.grid()
figures.append(fig)
return figures
def _plotly_figure(self, index, **kwargs):
import plotly
import plotly.graph_objects as go
from plotly.subplots import make_subplots
exp = self.explanations[index]
traces = []
color_list = plotly.colors.qualitative.Dark24
(ts, cf) = (exp['query'], exp['counterfactual'])
for i in range(ts.shape[1]):
v = ts[[ts.columns[i]]]
color = color_list[(i % len(color_list))]
traces.append(go.Scatter(name=ts.columns[i], x=v.index, y=v.values.flatten(), mode='lines', line=dict(color=color)))
if (cf is not None):
v = cf[[ts.columns[i]]]
color = color_list[(i % len(color_list))]
traces.append(go.Scatter(name=f'{cf.columns[i]}_cf', x=v.index, y=v.values.flatten(), mode='lines', line=dict(color=color, dash='dash')))
layout = dict(showlegend=True, xaxis=dict(title='Time', type='date', rangeselector=dict(buttons=list([dict(count=7, label='1w', step='day', stepmode='backward'), dict(count=1, label='1m', step='month', stepmode='backward'), dict(count=6, label='6m', step='month', stepmode='backward'), dict(count=1, label='1y', step='year', stepmode='backward'), dict(step='all')]))))
fig = make_subplots(figure=go.Figure(layout=layout))
fig.update_yaxes(title_text='Timeseries')
for trace in traces:
fig.add_trace(trace)
return fig
def plotly_plot(self, index=0, **kwargs):
assert (index is not None), '`index` cannot be None for `plotly_plot`. Please specify the instance index.'
return DashFigure(self._plotly_figure(index, **kwargs))
def ipython_plot(self, index=0, **kwargs):
import plotly
assert (index is not None), '`index` cannot be None for `ipython_plot`. Please specify the instance index.'
plotly.offline.iplot(self._plotly_figure(index, **kwargs))
def to_json(self):
raise RuntimeError('`CFExplanation` for timeseries cannot be converted into JSON format.')
def from_dict(cls, d):
raise RuntimeError('`CFExplanation` for timeseries does not support `from_dict`.') |
def calc_mean_rank(src, pred):
rank = []
for (s, p) in zip(src, pred):
cur_rank = []
cmd_name = s['cmd_name']
pred_man = p['pred']
oracle_man = get_oracle(s, cmd_name)
for o in oracle_man:
if (o in pred_man):
cur_rank.append(oracle_man.index(o))
else:
cur_rank.append(101)
if cur_rank:
rank.append(np.mean(cur_rank))
print(np.mean(rank)) |
def set_blob_potential(implementation):
if (implementation == 'None'):
def default_zero_r_vectors(*args, **kwargs):
return 0
return default_zero
elif (implementation == 'python'):
return calc_blob_potential_python
elif (implementation == 'C++'):
return calc_blob_potential_boost
elif (implementation == 'pycuda'):
return many_body_potential_pycuda.calc_blob_potential_pycuda |
def diff_str(first, second):
firstlines = first.splitlines(keepends=True)
secondlines = second.splitlines(keepends=True)
if ((len(firstlines) == 1) and (first.strip('\r\n') == first)):
firstlines = [(first + '\n')]
secondlines = [(second + '\n')]
return ''.join(difflib.unified_diff(firstlines, secondlines)) |
class FiniteDiffGradient(ApproxGradientBase):
def __init__(self, fun: callable, eps: float=0.01, formula: str='central') -> None:
self.fun = fun
self.eps = eps
self.formula = formula
if (formula not in ('central', 'forward', 'backwards', 'five-point')):
raise ValueError(('Wrong value of formula: ' + formula))
def gradient(self, x: np.ndarray) -> np.ndarray:
if (len(x.shape) != 1):
raise ValueError('Unsupported shape of x!')
if (self.formula == 'forward'):
f0 = self.fun(x)
g = np.zeros(len(x))
for i in range(len(x)):
dx = (np.eye(1, len(x), k=i)[0] * self.eps)
g[i] = ((self.fun((x + dx)) - f0) / self.eps)
elif (self.formula == 'backwards'):
f0 = self.fun(x)
g = np.zeros(len(x))
for i in range(len(x)):
dx = (np.eye(1, len(x), k=i)[0] * self.eps)
g[i] = ((f0 - self.fun((x - dx))) / self.eps)
elif (self.formula == 'central'):
g = np.zeros(len(x))
for i in range(len(x)):
dx = (np.eye(1, len(x), k=i)[0] * self.eps)
g[i] = ((self.fun((x + dx)) - self.fun((x - dx))) / (2.0 * self.eps))
elif (self.formula == 'five-point'):
g = np.zeros(len(x))
for i in range(len(x)):
dx = (np.eye(1, len(x), k=i)[0] * self.eps)
g[i] = ((((((- 1.0) * self.fun((x + (2.0 * dx)))) + (8.0 * self.fun((x + (1.0 * dx))))) - (8.0 * self.fun((x - (1.0 * dx))))) + (1.0 * self.fun((x - (2.0 * dx))))) / (12.0 * self.eps))
else:
raise ValueError(('Wrong value of type: ' + self.formula))
return g |
def main(args):
if args.paint:
import matplotlib
matplotlib.use('Agg')
enable_notify = args.enable_notify
enable_tensorboard = args.enable_tensorboard
enable_attention_check = args.enable_attention_check
enable_visualize_check = args.enable_visualize_check
enable_sam = args.enable_sam
Pre_Trained_model_path = args.Pre_Trained_model_path
gpu_idx = args.gpu_idx
model_idx = args.model_idx
drop_rate = args.drop_rate
attn_drop_rate = args.attn_drop_rate
drop_path_rate = args.drop_path_rate
use_cls_token = (False if args.cls_token_off else True)
use_pos_embedding = (False if args.pos_embedding_off else True)
use_att_module = (None if (args.att_module == 'None') else args.att_module)
pretrained_backbone = (False if args.backbone_PT_off else True)
num_classes = args.num_classes
edge_size = args.edge_size
batch_size = args.batch_size
num_workers = args.num_workers
num_epochs = args.num_epochs
intake_epochs = args.intake_epochs
check_minibatch = (args.check_minibatch if (args.check_minibatch is not None) else (400 // batch_size))
lr = args.lr
lrf = args.lrf
opt_name = args.opt_name
draw_root = args.draw_root
model_path = args.model_path
dataroot = args.dataroot
if enable_notify:
import notifyemail as notify
notify.Reboost(mail_host='smtp.163.com', mail_user='', mail_pass='xxxxxx', default_reciving_list=['.com'], log_root_path='log', max_log_cnt=5)
if enable_tensorboard:
notify.add_text('update to the tensorboard')
else:
notify.add_text('not update to the tensorboard')
notify.add_text(' ')
notify.add_text(('model idx ' + str(model_idx)))
notify.add_text(' ')
notify.add_text(('GPU idx: ' + str(gpu_idx)))
notify.add_text(' ')
notify.add_text(('cls number ' + str(num_classes)))
notify.add_text(('edge size ' + str(edge_size)))
notify.add_text(('batch_size ' + str(batch_size)))
notify.add_text(('num_epochs ' + str(num_epochs)))
notify.add_text(('lr ' + str(lr)))
notify.add_text(('opt_name ' + str(opt_name)))
notify.add_text(('enable_sam ' + str(enable_sam)))
notify.send_log()
print('{}'.format('setting'))
print(args)
draw_path = os.path.join(draw_root, ('PC_' + model_idx))
save_model_path = os.path.join(model_path, (('PC_' + model_idx) + '.pth'))
if (not os.path.exists(model_path)):
os.makedirs(model_path)
if os.path.exists(draw_path):
del_file(draw_path)
else:
os.makedirs(draw_path)
if enable_tensorboard:
writer = SummaryWriter(draw_path)
else:
writer = None
data_transforms = {'train': transforms.Compose([transforms.RandomRotation((0, 180)), transforms.CenterCrop(700), transforms.Resize(edge_size), transforms.RandomHorizontalFlip(), transforms.ColorJitter(brightness=0.15, contrast=0.3, saturation=0.3, hue=0.06), transforms.ToTensor()]), 'val': transforms.Compose([transforms.CenterCrop(700), transforms.Resize(edge_size), transforms.ToTensor()])}
datasets = {x: torchvision.datasets.ImageFolder(os.path.join(dataroot, x), data_transforms[x]) for x in ['train', 'val']}
dataloaders = {'train': torch.utils.data.DataLoader(datasets['train'], batch_size=batch_size, shuffle=True, num_workers=num_workers), 'val': torch.utils.data.DataLoader(datasets['val'], batch_size=batch_size, shuffle=False, num_workers=((num_workers // 4) + 1))}
class_names = ['negative', 'positive'][0:num_classes]
dataset_sizes = {x: len(datasets[x]) for x in ['train', 'val']}
if (gpu_idx == (- 1)):
if (torch.cuda.device_count() > 1):
print('Use', torch.cuda.device_count(), 'GPUs!')
gpu_use = gpu_idx
else:
print('we dont have more GPU idx here, try to use gpu_idx=0')
try:
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
gpu_use = 0
except:
print('GPU distributing ERRO occur use CPU instead')
else:
try:
os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_idx)
gpu_use = gpu_idx
except:
print('we dont have that GPU idx here, try to use gpu_idx=0')
try:
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
gpu_use = 0
except:
print('GPU distributing ERRO occur use CPU instead')
device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu'))
if (Pre_Trained_model_path is not None):
if os.path.exists(Pre_Trained_model_path):
pretrain_model = get_model(1000, edge_size, model_idx, drop_rate, attn_drop_rate, drop_path_rate, False, use_cls_token, use_pos_embedding, use_att_module)
pretrain_model.load_state_dict(torch.load(Pre_Trained_model_path), False)
num_features = pretrain_model.num_features
pretrain_model.head = nn.Linear(num_features, num_classes)
model = pretrain_model
print('pretrain model loaded')
else:
print(('Pre_Trained_model_path:' + Pre_Trained_model_path), ' is NOT avaliable!!!!\n')
print('we ignore this with a new start up')
else:
model = get_model(num_classes, edge_size, model_idx, drop_rate, attn_drop_rate, drop_path_rate, pretrained_backbone, use_cls_token, use_pos_embedding, use_att_module)
print('GPU:', gpu_use)
if (gpu_use == (- 1)):
model = nn.DataParallel(model)
model.to(device)
print('model :', model_idx)
criterion = nn.CrossEntropyLoss()
if (opt_name == 'SGD'):
optimizer = optim.SGD(model.parameters(), lr=lr, momentum=0.8, weight_decay=0.005)
scheduler = lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.5)
elif (opt_name == 'Adam'):
optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=0.01)
scheduler = None
if enable_sam:
from utils.sam import SAM
if (opt_name == 'SGD'):
base_optimizer = torch.optim.SGD
optimizer = SAM(model.parameters(), base_optimizer, lr=lr, momentum=0.8)
scheduler = None
elif (opt_name == 'Adam'):
base_optimizer = torch.optim.Adam
optimizer = SAM(model.parameters(), base_optimizer, lr=lr, weight_decay=0.01)
if (lrf > 0):
import math
lf = (lambda x: ((((1 + math.cos(((x * math.pi) / num_epochs))) / 2) * (1 - lrf)) + lrf))
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
model_ft = train_model(model, dataloaders, criterion, optimizer, class_names, dataset_sizes, edge_size=edge_size, model_idx=model_idx, num_epochs=num_epochs, intake_epochs=intake_epochs, check_minibatch=check_minibatch, scheduler=scheduler, device=device, draw_path=draw_path, enable_attention_check=enable_attention_check, enable_visualize_check=enable_visualize_check, enable_sam=enable_sam, writer=writer)
if (gpu_use == (- 1)):
torch.save(model_ft.module.state_dict(), save_model_path)
print('model trained by multi-GPUs has its single GPU copy saved at ', save_model_path)
else:
torch.save(model_ft.state_dict(), save_model_path)
print((('model trained by GPU (idx:' + str(gpu_use)) + ') has been saved at '), save_model_path) |
class ETSDetectorParams(Config):
max_forecast_steps: int = None
target_seq_index: int = None
error: str = 'add'
trend: str = 'add'
damped_trend: bool = True
seasonal: str = 'add'
seasonal_periods: str = None
refit: bool = True
kwargs: dict = {} |
def add_train_opts(parser):
parser.add_argument('--manual_seed', default=0, type=int, help='manual seed')
parser.add_argument('-j', '--workers', default=16, type=int, help='number of workers')
parser.add_argument('--epochs', default=35, type=int, help='number epochs')
parser.add_argument('--batch_size', default=128, type=int, help='batch size')
parser.add_argument('--optimizer', default='adam', choices=['rms', 'adam', 'sgd', 'adamw'])
parser.add_argument('--lr', '--learning-rate', default=0.0001, type=float, metavar='LR', help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float)
parser.add_argument('--scheduler', default='cosine', choices=['cosine', 'step', 'multistep'], help='learning rate scheduler')
parser.add_argument('--warmup_epochs', default=5, type=int, help='number of warmup epochs to run')
parser.add_argument('--lr_decay_step', nargs='+', default=10, type=int, help='Epochs after which to decay learning rate')
parser.add_argument('--lr_decay_gamma', default=0.5, type=float, help='Factor by which to decay the learning rate')
parser.add_argument('--weight_decay', default=0.0001, type=float) |
def test_lora_scan_layers():
class Module(eqx.Module):
first: hnn.Linear
second: hnn.Linear
def __call__(self, x):
return self.second(self.first(x))
def init(*, key):
(k1, k2) = jax.random.split(key)
first = hnn.Linear.init(In, Mid, key=k1)
second = hnn.Linear.init(Mid, In, key=k2)
return Module(first, second)
Layers = hax.Axis('Layers', 3)
k0 = jax.random.PRNGKey(0)
module: hnn.Stacked[Module] = hnn.Stacked.init(Layers, Module)(key=jax.random.split(k0, 3))
loraized = loraize(module, LoraConfig(r=8, target_modules=['first']), key=k0)
assert isinstance(loraized, hnn.Stacked)
assert isinstance(loraized.stacked.first, LoraLinear)
assert isinstance(loraized.stacked.second, hnn.Linear)
assert (loraized.stacked.first.lora.lora_A.weight.axes == (Layers, hax.Axis('LORA_R', 8), In))
assert (loraized.stacked.first.lora.lora_B.weight.axes == (Layers, Mid, hax.Axis('LORA_R', 8)))
assert (loraized.stacked.second.weight.axes == (Layers, Mid, In))
input = hax.random.normal(k0, (In,))
assert (not hax.all(hax.isclose(module.fold(input), loraized.fold(input)))) |
def load_data(args, tasks):
logging.info('loading data')
train_queries = pickle.load(open(os.path.join(args.data_path, 'train-queries.pkl'), 'rb'))
train_answers = pickle.load(open(os.path.join(args.data_path, 'train-answers.pkl'), 'rb'))
valid_queries = pickle.load(open(os.path.join(args.data_path, 'valid-queries.pkl'), 'rb'))
valid_hard_answers = pickle.load(open(os.path.join(args.data_path, 'valid-hard-answers.pkl'), 'rb'))
valid_easy_answers = pickle.load(open(os.path.join(args.data_path, 'valid-easy-answers.pkl'), 'rb'))
test_queries = pickle.load(open(os.path.join(args.data_path, 'test-queries.pkl'), 'rb'))
test_hard_answers = pickle.load(open(os.path.join(args.data_path, 'test-hard-answers.pkl'), 'rb'))
test_easy_answers = pickle.load(open(os.path.join(args.data_path, 'test-easy-answers.pkl'), 'rb'))
for name in all_tasks:
if ('u' in name):
(name, evaluate_union) = name.split('-')
else:
evaluate_union = args.evaluate_union
if ((name not in tasks) or (evaluate_union != args.evaluate_union)):
query_structure = name_query_dict[(name if ('u' not in name) else '-'.join([name, evaluate_union]))]
if (query_structure in train_queries):
del train_queries[query_structure]
if (query_structure in valid_queries):
del valid_queries[query_structure]
if (query_structure in test_queries):
del test_queries[query_structure]
return (train_queries, train_answers, valid_queries, valid_hard_answers, valid_easy_answers, test_queries, test_hard_answers, test_easy_answers) |
class BiFpnLayer(nn.Module):
def __init__(self, feature_info, feat_sizes, fpn_config, fpn_channels, num_levels=5, pad_type='', downsample=None, upsample=None, norm_layer=nn.BatchNorm2d, act_layer=_ACT_LAYER, apply_resample_bn=False, pre_act=True, separable_conv=True, redundant_bias=False):
super(BiFpnLayer, self).__init__()
self.num_levels = num_levels
fpn_feature_info = (feature_info + [dict(num_chs=fpn_channels, size=feat_sizes[fc['feat_level']]) for fc in fpn_config.nodes])
self.fnode = nn.ModuleList()
for (i, fnode_cfg) in enumerate(fpn_config.nodes):
logging.debug('fnode {} : {}'.format(i, fnode_cfg))
combine = FpnCombine(fpn_feature_info, fpn_channels, tuple(fnode_cfg['inputs_offsets']), output_size=feat_sizes[fnode_cfg['feat_level']], pad_type=pad_type, downsample=downsample, upsample=upsample, norm_layer=norm_layer, apply_resample_bn=apply_resample_bn, redundant_bias=redundant_bias, weight_method=fnode_cfg['weight_method'])
after_combine = nn.Sequential()
conv_kwargs = dict(in_channels=fpn_channels, out_channels=fpn_channels, kernel_size=3, padding=pad_type, bias=False, norm_layer=norm_layer, act_layer=act_layer)
if pre_act:
conv_kwargs['bias'] = redundant_bias
conv_kwargs['act_layer'] = None
after_combine.add_module('act', act_layer(inplace=True))
after_combine.add_module('conv', (SeparableConv2d(**conv_kwargs) if separable_conv else ConvBnAct2d(**conv_kwargs)))
self.fnode.append(Fnode(combine=combine, after_combine=after_combine))
self.feature_info = fpn_feature_info[(- num_levels):]
def forward(self, x: List[torch.Tensor]):
for fn in self.fnode:
x.append(fn(x))
return x[(- self.num_levels):] |
def build_keras_ensemble(data: Dataset, ensemble_size: int=5, num_hidden_layers: int=2, units: int=25, activation: Union[(str, tf.keras.layers.Activation)]='relu', independent_normal: bool=False) -> KerasEnsemble:
(input_tensor_spec, output_tensor_spec) = get_tensor_spec_from_data(data)
hidden_layer_args = []
for i in range(num_hidden_layers):
hidden_layer_args.append({'units': units, 'activation': activation})
networks = [GaussianNetwork(input_tensor_spec, output_tensor_spec, hidden_layer_args, independent_normal) for _ in range(ensemble_size)]
keras_ensemble = KerasEnsemble(networks)
return keras_ensemble |
_function_dispatch(_ix__dispatcher)
def ix_(*args):
out = []
nd = len(args)
for (k, new) in enumerate(args):
if (not isinstance(new, _nx.ndarray)):
new = asarray(new)
if (new.size == 0):
new = new.astype(_nx.intp)
if (new.ndim != 1):
raise ValueError('Cross index must be 1 dimensional')
if issubdtype(new.dtype, _nx.bool_):
(new,) = new.nonzero()
new = new.reshape(((((1,) * k) + (new.size,)) + ((1,) * ((nd - k) - 1))))
out.append(new)
return tuple(out) |
def _make_features(n_samples, n_features, seed):
rnd = np.random.RandomState(seed)
return rnd.randn(n_samples, n_features) |
def test_fastica_whiten_unit_variance():
rng = np.random.RandomState(0)
X = rng.random_sample((100, 10))
n_components = X.shape[1]
ica = FastICA(n_components=n_components, whiten='unit-variance', random_state=0)
Xt = ica.fit_transform(X)
assert (np.var(Xt) == pytest.approx(1.0)) |
class SimpleStructuresWrapper(SpeciesWrapper):
def __init__(self, species, labels, structure_class):
SpeciesWrapper.__init__(self, species, labels, '_simple_structures_selector', 'generating_series', 'Simple structures', structure_class) |
class DeiTConfig(PretrainedConfig):
model_type = 'deit'
def __init__(self, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, initializer_range=0.02, layer_norm_eps=1e-12, image_size=224, patch_size=16, num_channels=3, qkv_bias=True, encoder_stride=16, **kwargs):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.qkv_bias = qkv_bias
self.encoder_stride = encoder_stride |
_utils.test()
def test_atomic_max_expr_evaled():
c = ti.field(ti.i32)
step = 42
ti.root.place(c)
def func():
for i in range(n):
ti.atomic_max(c[None], (i * step))
func()
assert (c[None] == ((n - 1) * step)) |
.experimental
def test_drop_duplicates(spark, duplicate_recs):
recs = drop_duplicates(duplicate_recs)
gt = spark.createDataFrame(data=[[0, 0, 3.0], [0, 1, 2.0], [0, 2, 1.0], [1, 0, 3.0], [1, 1, 4.0], [1, 4, 1.0], [2, 0, 5.0], [2, 2, 1.0], [2, 3, 2.0]], schema=REC_SCHEMA)
sparkDataFrameEqual(recs, gt) |
def specht_module_spanning_set(D, SGA=None):
n = len(D)
if (SGA is None):
from sage.combinat.symmetric_group_algebra import SymmetricGroupAlgebra
SGA = SymmetricGroupAlgebra(QQ, n)
elif (SGA.group().rank() != (n - 1)):
raise ValueError('the rank does not match the size of the diagram')
nr = (max((c[0] for c in D), default=0) + 1)
nc = (max((c[1] for c in D), default=0) + 1)
row_diagram = [set() for _ in range(nr)]
col_diagram = [set() for _ in range(nc)]
for (i, cell) in enumerate(D):
(x, y) = cell
row_diagram[x].add(i)
col_diagram[y].add(i)
row_stab = SGA.zero()
col_stab = SGA.zero()
B = SGA.basis()
for w in B.keys():
row_perm = [set() for _ in range(nr)]
col_perm = [set() for _ in range(nc)]
for (i, cell) in enumerate(D):
(x, y) = cell
row_perm[x].add((w((i + 1)) - 1))
col_perm[y].add((w((i + 1)) - 1))
if (row_diagram == row_perm):
row_stab += B[w]
if (col_diagram == col_perm):
col_stab += (w.sign() * B[w])
gen = (col_stab * row_stab)
return tuple([(b * gen) for b in B]) |
def main():
config = bootstrap()
config[MODEL][ENV] = CARTPOLE
config[MODEL][AGENT] = REINFORCE
config[MODEL][USE_BASELINE] = True
run(config=config) |
def evaluate(model, test_idxs, fold, train_idxs_tmp, train_idxs):
model.eval()
batch_idx = 1
total_loss = 0
global max_f1, max_acc, min_mae, X_test_lens, max_prec, max_rec
pred = np.array([])
with torch.no_grad():
if config['cuda']:
(x, y) = (Variable(torch.from_numpy(audio_features[test_idxs]).type(torch.FloatTensor), requires_grad=True).cuda(), Variable(torch.from_numpy(audio_targets[test_idxs])).cuda())
else:
(x, y) = (Variable(torch.from_numpy(audio_features[test_idxs]).type(torch.FloatTensor), requires_grad=True), Variable(torch.from_numpy(audio_targets[test_idxs])).type(torch.LongTensor))
optimizer.zero_grad()
output = model(x)
loss = criterion(output, y)
total_loss += loss.item()
(y_test_pred, conf_matrix) = model_performance(y, output.cpu())
accuracy = (float((conf_matrix[0][0] + conf_matrix[1][1])) / np.sum(conf_matrix))
precision = (float(conf_matrix[0][0]) / (conf_matrix[0][0] + conf_matrix[0][1]))
recall = (float(conf_matrix[0][0]) / (conf_matrix[0][0] + conf_matrix[1][0]))
f1_score = ((2 * (precision * recall)) / (precision + recall))
print('Accuracy: {}'.format(accuracy))
print('Precision: {}'.format(precision))
print('Recall: {}'.format(recall))
print('F1-Score: {}\n'.format(f1_score))
print(('=' * 89))
if ((max_f1 <= f1_score) and (train_acc > (len(train_idxs) * 0.9)) and (f1_score > 0.5)):
max_f1 = f1_score
max_acc = accuracy
max_rec = recall
max_prec = precision
mode = 'gru'
save(model, os.path.join(prefix, 'Model/ClassificationWhole/Audio/BiLSTM_{}_vlad{}_{}_{:.2f}_{}'.format(mode, config['embedding_size'], config['hidden_dims'], max_f1, fold)))
np.save(os.path.join(prefix, 'Features/TextWhole/train_idxs_{:.2f}_{}.npy'.format(f1_score, fold)), train_idxs_tmp)
print(('*' * 64))
print('model saved: f1: {}\tacc: {}'.format(max_f1, max_acc))
print(('*' * 64))
return total_loss |
def _create_entry(question, answer):
answer.pop('image_id')
answer.pop('question_id')
entry = {'question_id': question['question_id'], 'image_id': question['image_id'], 'question': question['question'], 'answer': answer}
return entry |
def all_newer(src_files, dst_files):
return all(((os.path.exists(dst) and newer(dst, src)) for dst in dst_files for src in src_files)) |
class TestNegateGradient(serial.SerializedTestCase):
(X=hu.tensor(), inplace=st.booleans(), **hu.gcs)
(deadline=10000)
def test_forward(self, X, inplace, gc, dc):
def neg_grad_ref(X):
return (X,)
op = core.CreateOperator('NegateGradient', ['X'], [('Y' if (not inplace) else 'X')])
self.assertReferenceChecks(gc, op, [X], neg_grad_ref)
self.assertDeviceChecks(dc, op, [X], [0])
(size=st.lists(st.integers(min_value=1, max_value=20), min_size=1, max_size=5))
def test_grad(self, size):
X = np.random.random_sample(size)
workspace.ResetWorkspace()
workspace.FeedBlob('X', X.astype(np.float32))
net = core.Net('negate_grad_test')
Y = net.NegateGradient(['X'], ['Y'])
grad_map = net.AddGradientOperators([Y])
workspace.RunNetOnce(net)
(x_val, y_val) = workspace.FetchBlobs(['X', 'Y'])
(x_grad_val, y_grad_val) = workspace.FetchBlobs([grad_map['X'], grad_map['Y']])
np.testing.assert_array_equal(x_val, y_val)
np.testing.assert_array_equal(x_grad_val, (y_grad_val * (- 1))) |
def launch():
TIME_TO_WAKE = 2
args = ['ciao', 'mare']
core.callDelayed(TIME_TO_WAKE, timeout_handler, args)
t = Timer(TIME_TO_WAKE, timeout_handler, args='t1')
t2 = Timer(TIME_TO_WAKE, timeout_handler, absoluteTime=True, args='t2')
tr = Timer(TIME_TO_WAKE, timeout_handler, absoluteTime=False, recurring=True, args='tr')
tw = Timer(TIME_TO_WAKE, timeout_handler, absoluteTime=False, recurring=False, started=False, args='tw')
time.sleep(TIME_TO_WAKE)
tw.start()
tk = Timer(TIME_TO_WAKE, timeout_handler_kill, absoluteTime=False, recurring=True, started=False, selfStoppable=False, args='tw') |
class Infinite(object):
file = stderr
sma_window = 10
check_tty = True
hide_cursor = True
def __init__(self, message='', **kwargs):
self.index = 0
self.start_ts = monotonic()
self.avg = 0
self._avg_update_ts = self.start_ts
self._ts = self.start_ts
self._xput = deque(maxlen=self.sma_window)
for (key, val) in kwargs.items():
setattr(self, key, val)
self._width = 0
self.message = message
if (self.file and self.is_tty()):
if self.hide_cursor:
print(HIDE_CURSOR, end='', file=self.file)
print(self.message, end='', file=self.file)
self.file.flush()
def __getitem__(self, key):
if key.startswith('_'):
return None
return getattr(self, key, None)
def elapsed(self):
return int((monotonic() - self.start_ts))
def elapsed_td(self):
return timedelta(seconds=self.elapsed)
def update_avg(self, n, dt):
if (n > 0):
xput_len = len(self._xput)
self._xput.append((dt / n))
now = monotonic()
if ((xput_len < self.sma_window) or ((now - self._avg_update_ts) > 1)):
self.avg = (sum(self._xput) / len(self._xput))
self._avg_update_ts = now
def update(self):
pass
def start(self):
pass
def clearln(self):
if (self.file and self.is_tty()):
print('\r\x1b[K', end='', file=self.file)
def write(self, s):
if (self.file and self.is_tty()):
line = (self.message + s.ljust(self._width))
print(('\r' + line), end='', file=self.file)
self._width = max(self._width, len(s))
self.file.flush()
def writeln(self, line):
if (self.file and self.is_tty()):
self.clearln()
print(line, end='', file=self.file)
self.file.flush()
def finish(self):
if (self.file and self.is_tty()):
print(file=self.file)
if self.hide_cursor:
print(SHOW_CURSOR, end='', file=self.file)
def is_tty(self):
return (self.file.isatty() if self.check_tty else True)
def next(self, n=1):
now = monotonic()
dt = (now - self._ts)
self.update_avg(n, dt)
self._ts = now
self.index = (self.index + n)
self.update()
def iter(self, it):
with self:
for x in it:
(yield x)
self.next()
def __enter__(self):
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.finish() |
class SnowballStemmer():
languages = ('arabic', 'danish', 'dutch', 'english', 'finnish', 'french', 'german', 'hungarian', 'italian', 'norwegian', 'polish', 'portuguese', 'romanian', 'russian', 'spanish', 'swedish')
def __init__(self, language):
if (language not in self.languages):
raise ValueError(("The language '%s' is not supported." % language))
stemmerclass = globals()[(language.capitalize() + 'Stemmer')]
self.stemmer = stemmerclass()
self.stem = self.stemmer.stem |
def build_sparse_features(data):
side_information_data = data.side_information_data
sp_i_f = []
for (key_side_feature_type, value) in vars(side_information_data).items():
rows_cols = [(data.public_items[item], data.public_features[f]) for (item, features) in value.items() for f in features]
rows = [item for (item, _) in rows_cols]
cols = [f for (_, f) in rows_cols]
sp_i_f.append(sp.csr_matrix((np.ones_like(rows), (rows, cols)), dtype='float32', shape=(data.num_items, len(data.features))))
user_encoder = OneHotEncoder()
user_encoder.fit(np.reshape(np.arange(data.num_users), newshape=(data.num_users, 1)))
item_encoder = OneHotEncoder()
item_encoder.fit(np.reshape(np.arange(data.num_items), newshape=(data.num_items, 1)))
return (sp_i_f, user_encoder, item_encoder) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.