code stringlengths 101 5.91M |
|---|
def process_log(log_file, job_types):
jobs = {}
with open(log_file, 'r') as f:
for line in f:
if ('[Job dispatched]' in line):
dispatch_time = float(line.split(']')[0])
job_id = int(line.strip().split('Job ID:')[(- 1)])
jobs[job_id] = Job(job_id, job_types[job_id], dispatch_time)
elif ('[Micro-task scheduled]' in line):
(start_time, _, job_id, worker_type, worker_id, allocation) = line.strip().split('\t')
start_time = float(start_time.split(']')[0])
job_id = int(job_id.split('Job ID:')[(- 1)])
worker_type = worker_type.split('Worker type: ')[(- 1)].strip()
worker_id = int(worker_id.split('Worker ID: ')[(- 1)])
jobs[job_id].add_start_time(start_time)
jobs[job_id].add_worker_id(worker_id)
jobs[job_id].add_worker_type(worker_type)
jobs[job_id].add_allocation(allocation)
elif ('[Micro-task succeeded]' in line):
(end_time, _, job_id, _, _) = line.strip().split('\t')
end_time = float(end_time.split(']')[0])
job_id = int(job_id.split('Job ID:')[(- 1)])
jobs[job_id].add_end_time(end_time)
for job_id in jobs:
assert jobs[job_id].verify()
return jobs |
def test_lw_tree(dataset: str, version: str, workload: str, params: Dict[(str, Any)], overwrite: bool) -> None:
model_file = ((MODEL_ROOT / dataset) / f"{params['model']}.pkl")
L.info(f'Load model from {model_file} ...')
with open(model_file, 'rb') as f:
state = pickle.load(f)
table = load_table(dataset, state['version'])
args = state['args']
model = state['model']
pg_est = Postgres(table, args.bins, state['seed'])
estimator = LWTree(model, params['model'], pg_est, table)
L.info(f'Load and built lw(tree) estimator: {estimator}')
if params['use_cache']:
test_table = load_table(dataset, version)
lw_dataset = load_lw_dataset(test_table, workload, state['seed'], args.bins)
(X, _, gt) = lw_dataset['test']
run_test(dataset, version, workload, estimator, overwrite, lw_vec=(X, gt))
else:
run_test(dataset, version, workload, estimator, overwrite) |
class MetaLinear(nn.Linear, MetaModule):
__doc__ = nn.Linear.__doc__
def forward(self, input, params=None):
if (params is None):
params = OrderedDict(self.named_parameters())
bias = params.get('bias', None)
return F.linear(input, params['weight'], bias) |
class GripperJointPosition(GripperActionMode):
def __init__(self, attach_grasped_objects: bool=True, detach_before_open: bool=True, absolute_mode: bool=True):
self._attach_grasped_objects = attach_grasped_objects
self._detach_before_open = detach_before_open
self._absolute_mode = absolute_mode
self._control_mode_set = False
def action(self, scene: Scene, action: np.ndarray):
self.action_pre_step(scene, action)
self.action_step(scene, action)
self.action_post_step(scene, action)
def action_pre_step(self, scene: Scene, action: np.ndarray):
if (not self._control_mode_set):
scene.robot.gripper.set_control_loop_enabled(True)
self._control_mode_set = True
assert_action_shape(action, self.action_shape(scene.robot))
action = action.repeat(2)
a = (action if self._absolute_mode else np.array(scene.robot.gripper.get_joint_positions()))
scene.robot.gripper.set_joint_target_positions(a)
def action_step(self, scene: Scene, action: np.ndarray):
scene.step()
def action_post_step(self, scene: Scene, action: np.ndarray):
scene.robot.gripper.set_joint_target_positions(scene.robot.gripper.get_joint_positions())
def action_shape(self, scene: Scene) -> tuple:
return (1,)
def action_bounds(self):
return (np.array([0]), np.array([0.04])) |
def rle2bmask(rle):
bm = cocomask.decode(rle)
if (len(bm.shape) == 3):
bm = np.sum(bm, axis=2)
bm = bm.astype(np.uint8)
return bm |
class LibrispeechLm(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version('0.1.0')
BUILDER_CONFIG_CLASS = LibrispeechLmConfig
def _info(self):
return datasets.DatasetInfo(description=_DESCRIPTION, features=datasets.Features({'text': datasets.Value('string')}), supervised_keys=('text', 'text'), homepage=_URL, citation=_CITATION)
def _split_generators(self, dl_manager):
path_dic = {}
for (split_name, files) in self.config.data_files.items():
if (split_name == 'train'):
path_dic[split_name] = dl_manager.download_and_extract(([self.config.lm_corpus_path] + files))
else:
path_dic[split_name] = dl_manager.download_and_extract(files)
return [datasets.SplitGenerator(name=split_name, gen_kwargs={'archive_path': archive_path}) for (split_name, archive_path) in path_dic.items()]
def _generate_examples(self, archive_path):
key = 0
for p in archive_path:
with open(p, 'r', encoding='utf-8') as f:
for line in f:
line = re.sub('\\d+-\\d+-\\d+\\s', '', line)
text = line.strip()
if (text and (len(text) < 1000)):
(yield (key, {'text': text}))
key += 1 |
class DepthwiseSeparableConvModule(nn.Module):
def __init__(self, in_channels: int, out_channels: int, kernel_size: Union[(int, Tuple[(int, int)])], stride: Union[(int, Tuple[(int, int)])]=1, padding: Union[(int, Tuple[(int, int)])]=0, dilation: Union[(int, Tuple[(int, int)])]=1, norm_cfg: Optional[Dict]=None, act_cfg: Dict=dict(type='ReLU'), dw_norm_cfg: Union[(Dict, str)]='default', dw_act_cfg: Union[(Dict, str)]='default', pw_norm_cfg: Union[(Dict, str)]='default', pw_act_cfg: Union[(Dict, str)]='default', **kwargs):
super().__init__()
assert ('groups' not in kwargs), 'groups should not be specified'
dw_norm_cfg = (dw_norm_cfg if (dw_norm_cfg != 'default') else norm_cfg)
dw_act_cfg = (dw_act_cfg if (dw_act_cfg != 'default') else act_cfg)
pw_norm_cfg = (pw_norm_cfg if (pw_norm_cfg != 'default') else norm_cfg)
pw_act_cfg = (pw_act_cfg if (pw_act_cfg != 'default') else act_cfg)
self.depthwise_conv = ConvModule(in_channels, in_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=in_channels, norm_cfg=dw_norm_cfg, act_cfg=dw_act_cfg, **kwargs)
self.pointwise_conv = ConvModule(in_channels, out_channels, 1, norm_cfg=pw_norm_cfg, act_cfg=pw_act_cfg, **kwargs)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.depthwise_conv(x)
x = self.pointwise_conv(x)
return x |
def _set_wrap_both(padded, axis, width_pair):
(left_pad, right_pad) = width_pair
period = ((padded.shape[axis] - right_pad) - left_pad)
new_left_pad = 0
new_right_pad = 0
if (left_pad > 0):
right_slice = _slice_at_axis(slice(((- right_pad) - min(period, left_pad)), ((- right_pad) if (right_pad != 0) else None)), axis)
right_chunk = padded[right_slice]
if (left_pad > period):
pad_area = _slice_at_axis(slice((left_pad - period), left_pad), axis)
new_left_pad = (left_pad - period)
else:
pad_area = _slice_at_axis(slice(None, left_pad), axis)
padded[pad_area] = right_chunk
if (right_pad > 0):
left_slice = _slice_at_axis(slice(left_pad, (left_pad + min(period, right_pad))), axis)
left_chunk = padded[left_slice]
if (right_pad > period):
pad_area = _slice_at_axis(slice((- right_pad), ((- right_pad) + period)), axis)
new_right_pad = (right_pad - period)
else:
pad_area = _slice_at_axis(slice((- right_pad), None), axis)
padded[pad_area] = left_chunk
return (new_left_pad, new_right_pad) |
class PlusInfinity(_uniq, AnInfinity, InfinityElement):
_sign = 1
_sign_char = '+'
def __init__(self):
InfinityElement.__init__(self, InfinityRing)
def __hash__(self):
return maxsize
def _richcmp_(self, other, op):
if isinstance(other, PlusInfinity):
return rich_to_bool(op, 0)
return rich_to_bool(op, 1)
def _neg_(self):
return self.parent().gen(1)
def sqrt(self):
return self
def _sympy_(self):
import sympy
return sympy.oo
def _gap_init_(self):
return 'infinity' |
.parametrize('hidden_units,activation', [(hidden_units, activation) for hidden_units in [(), (10,)] for activation in ['sigmoid', Dice, PReLU]])
def test_LocalActivationUnit(hidden_units, activation):
if ((tf.__version__ >= '1.13.0') and (activation != 'sigmoid')):
return
with CustomObjectScope({'LocalActivationUnit': layers.LocalActivationUnit}):
layer_test(layers.LocalActivationUnit, kwargs={'hidden_units': hidden_units, 'activation': activation, 'dropout_rate': 0.5}, input_shape=[(BATCH_SIZE, 1, EMBEDDING_SIZE), (BATCH_SIZE, SEQ_LENGTH, EMBEDDING_SIZE)]) |
class GCDataset():
dataset: Dataset
p_randomgoal: float
p_trajgoal: float
p_currgoal: float
geom_sample: int
discount: float
terminal_key: str = 'dones_float'
reward_scale: float = 1.0
reward_shift: float = (- 1.0)
terminal: bool = True
def get_default_config():
return ml_collections.ConfigDict({'p_randomgoal': 0.3, 'p_trajgoal': 0.5, 'p_currgoal': 0.2, 'geom_sample': 0, 'reward_scale': 1.0, 'reward_shift': (- 1.0), 'terminal': True})
def __post_init__(self):
(self.terminal_locs,) = np.nonzero((self.dataset[self.terminal_key] > 0))
assert np.isclose(((self.p_randomgoal + self.p_trajgoal) + self.p_currgoal), 1.0)
def sample_goals(self, indx, p_randomgoal=None, p_trajgoal=None, p_currgoal=None):
if (p_randomgoal is None):
p_randomgoal = self.p_randomgoal
if (p_trajgoal is None):
p_trajgoal = self.p_trajgoal
if (p_currgoal is None):
p_currgoal = self.p_currgoal
batch_size = len(indx)
goal_indx = np.random.randint(self.dataset.size, size=batch_size)
final_state_indx = self.terminal_locs[np.searchsorted(self.terminal_locs, indx)]
distance = np.random.rand(batch_size)
if self.geom_sample:
us = np.random.rand(batch_size)
middle_goal_indx = np.minimum((indx + np.ceil((np.log((1 - us)) / np.log(self.discount))).astype(int)), final_state_indx)
else:
middle_goal_indx = np.round(((np.minimum((indx + 1), final_state_indx) * distance) + (final_state_indx * (1 - distance)))).astype(int)
goal_indx = np.where((np.random.rand(batch_size) < (p_trajgoal / (1.0 - p_currgoal))), middle_goal_indx, goal_indx)
goal_indx = np.where((np.random.rand(batch_size) < p_currgoal), indx, goal_indx)
return goal_indx
def sample(self, batch_size: int, indx=None):
if (indx is None):
indx = np.random.randint((self.dataset.size - 1), size=batch_size)
batch = self.dataset.sample(batch_size, indx)
goal_indx = self.sample_goals(indx)
success = (indx == goal_indx)
batch['rewards'] = ((success.astype(float) * self.reward_scale) + self.reward_shift)
if self.terminal:
batch['masks'] = (1.0 - success.astype(float))
else:
batch['masks'] = np.ones(batch_size)
batch['goals'] = jax.tree_map((lambda arr: arr[goal_indx]), self.dataset['observations'])
return batch |
def plot_column_per_patient(df_demo: pd.DataFrame, path_to_output_dir: str, column: str, x_label: str, title: str, max_clamp: int=None):
(fig, axes) = plt.subplots(1, 3, figsize=(20, 5))
for (idx, split) in enumerate(['train', 'val', 'test']):
df_ = df_demo[(df_demo['split'] == split)]
counts = df_[column].tolist()
if max_clamp:
counts = [min(count, max_clamp) for count in counts]
axes[idx].hist(counts, bins=100)
axes[idx].set_xlabel(f'{x_label}')
axes[idx].set_ylabel('# of Patients')
axes[idx].legend()
axes[idx].set_title(f'{split} (n={len(counts)})')
fig.suptitle(f'Distribution of {title}/patient')
plt.savefig(os.path.join(path_to_output_dir, f'{column}_per_patient.png'))
plt.show() |
def test_pipeline_with_dependencies():
class PassA(MyPass):
def depends_on(self):
return {MyPass}
def apply_pass(self, sdfg, pipeline_results):
res = super().apply_pass(sdfg, pipeline_results)
return (pipeline_results['MyPass'] + res)
p = PassA()
pipe = ppl.Pipeline([p])
sdfg = empty.to_sdfg()
result = pipe.apply_pass(sdfg, {})
assert (p.applied == 1)
assert (result == {'MyPass': 1, 'PassA': 2}) |
class _MyFormatter(logging.Formatter):
def format(self, record):
date = colored('[%(asctime)s %(filename)s:%(lineno)d]', 'green')
msg = '%(message)s'
if (record.levelno == logging.WARNING):
fmt = ((((date + ' ') + colored('WRN', 'red', attrs=['blink'])) + ' ') + msg)
elif ((record.levelno == logging.ERROR) or (record.levelno == logging.CRITICAL)):
fmt = ((((date + ' ') + colored('ERR', 'red', attrs=['blink', 'underline'])) + ' ') + msg)
else:
fmt = ((date + ' ') + msg)
if hasattr(self, '_style'):
self._style._fmt = fmt
self._fmt = fmt
return super(_MyFormatter, self).format(record) |
class staggered_object_creation(object):
def __init__(self, local_rank: int, world_size: int):
super().__init__()
self.local_rank = local_rank
self.world_size = world_size
def __enter__(self, *args, **kwargs):
del args, kwargs
if ((self.world_size > 1) and ((self.local_rank % 2) == 0)):
dist.barrier()
return self
def __exit__(self, *args, **kwargs):
del args, kwargs
if (self.world_size > 1):
if ((self.local_rank % 2) == 1):
dist.barrier()
dist.barrier()
def __call__(self, func):
def decorator(*args, **kwargs):
with self:
return func(*args, **kwargs)
return decorator |
def run_single_experiment(dataset: str, savedir: str, named_configs: List, config_updates: Dict[(str, Any)]):
from tape.__main__ import proteins
config_updates.update({'training': {'learning_rate': 0.0001, 'use_memory_saving_gradients': True}, 'num_epochs': 1000, 'steps_per_epoch': 200, 'tasks': dataset})
if (not os.path.exists(savedir)):
os.mkdir(savedir)
shutil.rmtree(proteins.observers[0].basedir)
proteins.observers[0] = FileStorageObserver.create(os.path.join(savedir, dataset))
proteins.run(named_configs=named_configs, config_updates=config_updates) |
def DoWhile(name, condition_blob_or_net, nets_or_steps):
(condition_not_net, stop_blob) = NotNet(condition_blob_or_net)
if isinstance(condition_blob_or_net, core.Net):
nets_or_steps = _AppendNets(nets_or_steps, condition_blob_or_net, condition_not_net)
else:
nets_or_steps = _AppendNets(nets_or_steps, condition_not_net)
bool_net = BoolNet((stop_blob, False))
return Do((name + '/DoWhile'), bool_net, core.scoped_execution_step(_get_next_step_name('DoWhile-inner', name), nets_or_steps, should_stop_blob=stop_blob)) |
_utils.test()
def test_floor_div_pythonic():
z = ti.field(ti.i32, shape=())
def func(x: ti.i32, y: ti.i32):
z[None] = (x // y)
for i in range((- 10), 11):
for j in range((- 10), 11):
if (j != 0):
func(i, j)
assert (z[None] == (i // j)) |
class MultiplicativeNCSymBases(Category_realization_of_parent):
def super_categories(self):
return [NCSymBases(self.base())]
def _repr_(self):
return 'Category of multiplicative bases of symmetric functions in non-commuting variables over the {}'.format(self.base().base_ring())
class ParentMethods():
def product_on_basis(self, A, B):
return self.monomial(A.pipe(B))
class ElementMethods():
pass |
('revnet-56')
class RevNet56Config(ResNet50Config):
def __init__(self):
super(RevNet56Config, self).__init__()
self.model_class = 'revnet'
self.manual_gradients = True
self.num_residual_units = [2, 2, 3, 2]
self.filters = [128, 128, 256, 512, 832] |
def main():
parser = argparse.ArgumentParser('Interface for DE-GNN framework')
parser.add_argument('--dataset', type=str, default='celegans', help='dataset name')
parser.add_argument('--test_ratio', type=float, default=0.1, help='ratio of the test against whole')
parser.add_argument('--model', type=str, default='DE-GNN', help='model to use', choices=['DE-GNN', 'GIN', 'GCN', 'GraphSAGE', 'GAT'])
parser.add_argument('--layers', type=int, default=2, help='largest number of layers')
parser.add_argument('--hidden_features', type=int, default=100, help='hidden dimension')
parser.add_argument('--metric', type=str, default='auc', help='metric for evaluating performance', choices=['acc', 'auc'])
parser.add_argument('--seed', type=int, default=0, help='seed to initialize all the random modules')
parser.add_argument('--gpu', type=int, default=0, help='gpu id')
parser.add_argument('--data_usage', type=float, default=1.0, help='use partial dataset')
parser.add_argument('--directed', type=bool, default=False, help='(Currently unavailable) whether to treat the graph as directed')
parser.add_argument('--parallel', default=False, action='store_true', help='(Currently unavailable) whether to use multi cpu cores to prepare data')
parser.add_argument('--prop_depth', type=int, default=1, help='propagation depth (number of hops) for one layer')
parser.add_argument('--use_degree', type=bool, default=True, help='whether to use node degree as the initial feature')
parser.add_argument('--use_attributes', type=bool, default=False, help='whether to use node attributes as the initial feature')
parser.add_argument('--feature', type=str, default='sp', help='distance encoding category: shortest path or random walk (landing probabilities)')
parser.add_argument('--rw_depth', type=int, default=3, help='random walk steps')
parser.add_argument('--max_sp', type=int, default=3, help='maximum distance to be encoded for shortest path feature')
parser.add_argument('--epoch', type=int, default=1000, help='number of epochs to train')
parser.add_argument('--bs', type=int, default=64, help='minibatch size')
parser.add_argument('--lr', type=float, default=0.0001, help='learning rate')
parser.add_argument('--optimizer', type=str, default='sgd', help='optimizer to use')
parser.add_argument('--l2', type=float, default=0, help='l2 regularization weight')
parser.add_argument('--dropout', type=float, default=0, help='dropout rate')
parser.add_argument('--k', type=int, default=3, help='node degree (k) or synthetic k-regular graph')
parser.add_argument('--n', nargs='*', help='a list of number of nodes in each connected k-regular subgraph')
parser.add_argument('--N', type=int, default=1000, help='total number of nodes in simultation')
parser.add_argument('--T', type=int, default=6, help='largest number of layers to be tested')
parser.add_argument('--log_dir', type=str, default='./log/', help='log directory')
parser.add_argument('--summary_file', type=str, default='result_summary.log', help='brief summary of training result')
parser.add_argument('--debug', default=False, action='store_true', help='whether to use debug mode')
sys_argv = sys.argv
try:
args = parser.parse_args()
except:
parser.print_help()
sys.exit(0)
check(args)
logger = set_up_log(args, sys_argv)
set_random_seed(args)
if (args.dataset == 'simulation'):
results = simulate(args, logger)
save_simulation_result(results, logger)
return
((G, labels), task) = read_file(args, logger)
(dataloaders, out_features) = get_data(G, task=task, labels=labels, args=args, logger=logger)
storage = estimate_storage(dataloaders, ['train_loader', 'val_loader', 'test_loader'], logger)
model = get_model(layers=args.layers, in_features=dataloaders[0].dataset[0].x.shape[(- 1)], out_features=out_features, prop_depth=args.prop_depth, args=args, logger=logger)
results = train_model(model, dataloaders, args, logger)
save_performance_result(args, logger, results) |
class ClientComm():
def __init__(self, agentName):
self.TOKEN_SEP = '#'
self.io = IOSocket(CompetitionParameters.SOCKET_PORT)
self.sso = SerializableStateObservation()
self.agentName = agentName
self.lastMessageId = 0
self.LOG = False
self.player = None
self.global_ect = None
self.lastSsoType = LEARNING_SSO_TYPE.JSON
def startComm(self):
self.io.initBuffers()
try:
self.listen()
except Exception as e:
logging.exception(e)
print('Start listen [FAILED]')
traceback.print_exc()
sys.exit()
'\n * Method that perpetually listens for messages from the server.\n * With the use of additional helper methods, this function interprets\n * messages and represents the core response-generation methodology of the agent.\n * IOException\n '
def listen(self):
line = ''
while (line is not None):
line = self.io.readLine()
line = line.rstrip('\r\n')
self.processLine(line)
if (self.sso.phase == Phase.START):
self.start()
elif (self.sso.phase == 'INIT'):
self.sso.phase = Phase.INIT
self.init()
elif (self.sso.phase == Phase.INIT):
self.init()
elif (self.sso.phase == 'END'):
self.sso.phase = Phase.END
self.result()
elif (self.sso.phase == Phase.END):
self.result()
elif (self.sso.phase == 'ABORT'):
self.sso.phase = Phase.ABORT
self.result()
elif (self.sso.phase == Phase.ABORT):
self.result()
elif (self.sso.phase == 'ACT'):
self.sso.phase = Phase.ACT
self.act()
elif (self.sso.phase == Phase.ACT):
self.act()
elif (self.sso.phase == Phase.FINISH):
line = None
elif (self.sso.phase == 'FINISH'):
line = None
else:
self.io.writeToServer(self.lastMessageId, 'ERROR', self.LOG)
'\n Helper method that converts a given dictionary into\n a correct SSO type\n '
def as_sso(self, d):
self.sso.__dict__.update(d)
return self.sso
def parse_json(self, input):
parsed_input = json.loads(input)
self.sso.__dict__.update(parsed_input)
if parsed_input.get('observationGrid'):
self.sso.observationGrid = [[[None for j in range(self.sso.observationGridMaxCol)] for i in range(self.sso.observationGridMaxRow)] for k in range(self.sso.observationGridNum)]
for i in range(self.sso.observationGridNum):
for j in range(len(parsed_input['observationGrid'][i])):
for k in range(len(parsed_input['observationGrid'][i][j])):
self.sso.observationGrid[i][j][k] = Observation(parsed_input['observationGrid'][i][j][k])
if parsed_input.get('NPCPositions'):
self.sso.NPCPositions = [[None for j in range(self.sso.NPCPositionsMaxRow)] for i in range(self.sso.NPCPositionsNum)]
for i in range(self.sso.NPCPositionsNum):
for j in range(len(parsed_input['NPCPositions'][i])):
self.sso.NPCPositions[i][j] = Observation(parsed_input['NPCPositions'][i][j])
if parsed_input.get('immovablePositions'):
self.sso.immovablePositions = [[None for j in range(self.sso.immovablePositionsMaxRow)] for i in range(self.sso.immovablePositionsNum)]
for i in range(self.sso.immovablePositionsNum):
for j in range(len(parsed_input['immovablePositions'][i])):
self.sso.immovablePositions[i][j] = Observation(parsed_input['immovablePositions'][i][j])
if parsed_input.get('movablePositions'):
self.sso.movablePositions = [[None for j in range(self.sso.movablePositionsMaxRow)] for i in range(self.sso.movablePositionsNum)]
for i in range(self.sso.movablePositionsNum):
for j in range(len(parsed_input['movablePositions'][i])):
self.sso.movablePositions[i][j] = Observation(parsed_input['movablePositions'][i][j])
if parsed_input.get('resourcesPositions'):
self.sso.resourcesPositions = [[None for j in range(self.sso.resourcesPositionsMaxRow)] for i in range(self.sso.resourcesPositionsNum)]
for i in range(self.sso.resourcesPositionsNum):
for j in range(len(parsed_input['resourcesPositions'][i])):
self.sso.resourcesPositions[i][j] = Observation(parsed_input['resourcesPositions'][i][j])
if parsed_input.get('portalsPositions'):
self.sso.portalsPositions = [[None for j in range(self.sso.portalsPositionsMaxRow)] for i in range(self.sso.portalsPositionsNum)]
for i in range(self.sso.portalsPositionsNum):
for j in range(len(parsed_input['portalsPositions'][i])):
self.sso.portalsPositions[i][j] = Observation(parsed_input['portalsPositions'][i][j])
if parsed_input.get('fromAvatarSpritesPositions'):
self.sso.fromAvatarSpritesPositions = [[None for j in range(self.sso.fromAvatarSpritesPositionsMaxRow)] for i in range(self.sso.fromAvatarSpritesPositionsNum)]
for i in range(self.sso.fromAvatarSpritesPositionsNum):
for j in range(len(parsed_input['fromAvatarSpritesPositions'][i])):
self.sso.fromAvatarSpritesPositions[i][j] = Observation(parsed_input['fromAvatarSpritesPositions'][i][j])
"\n * Method that interprets the received messages from the server's side.\n * A message can either be a string (in the case of initialization), or\n * a json object containing an encapsulated state observation.\n * This method deserializes the json object into a local state observation\n * instance.\n * msg Message received from server to be interpreted.\n * IOException\n "
def processLine(self, msg):
try:
if (msg is None):
print('Message is null')
return
message = msg.split(self.TOKEN_SEP)
if (len(message) < 2):
print('Message not complete')
return
self.lastMessageId = message[0]
js = message[1]
self.sso = SerializableStateObservation()
if (js == 'START'):
self.sso.phase = Phase.START
elif (js == 'FINISH'):
self.sso.phase = Phase.FINISH
else:
js.replace('"', '')
self.parse_json(js)
if (self.sso.phase == 'ACT'):
if ((self.lastSsoType == LEARNING_SSO_TYPE.IMAGE) or (self.lastSsoType == 'IMAGE') or (self.lastSsoType == LEARNING_SSO_TYPE.BOTH) or (self.lastSsoType == 'BOTH')):
if self.sso.imageArray:
self.sso.convertBytesToPng(self.sso.imageArray)
except Exception as e:
logging.exception(e)
print('Line processing [FAILED]')
traceback.print_exc()
sys.exit()
'\n * Manages the start of the communication. It starts the whole process, and sets up the timer for the whole run.\n '
def start(self):
self.global_ect = ElapsedCpuTimer()
self.global_ect.setMaxTimeMillis(CompetitionParameters.TOTAL_LEARNING_TIME)
ect = ElapsedCpuTimer()
ect.setMaxTimeMillis(CompetitionParameters.START_TIME)
self.startAgent()
if ect.exceededMaxTime():
self.io.writeToServer(self.lastMessageId, 'START_FAILED', self.LOG)
else:
self.io.writeToServer(self.lastMessageId, (('START_DONE' + '#') + self.lastSsoType), self.LOG)
def startAgent(self):
try:
try:
module = importlib.import_module(self.agentName, __name__)
try:
self.player = getattr(module, 'Agent')()
self.lastSsoType = self.player.lastSsoType
except AttributeError:
logging.error('ERROR: Class does not exist')
traceback.print_exc()
sys.exit()
except ImportError:
logging.error('ERROR: Module does not exist')
traceback.print_exc()
sys.exit()
print('Agent startup [OK]')
except Exception as e:
logging.exception(e)
print('Agent startup [FAILED]')
traceback.print_exc()
sys.exit()
'\n * Manages the init of a game played.\n '
def init(self):
ect = ElapsedCpuTimer()
ect.setMaxTimeMillis(CompetitionParameters.INITIALIZATION_TIME)
self.player.init(self.sso, ect.copy())
self.lastSsoType = self.player.lastSsoType
if ect.exceededMaxTime():
self.io.writeToServer(self.lastMessageId, 'INIT_FAILED', self.LOG)
else:
self.io.writeToServer(self.lastMessageId, (('INIT_DONE' + '#') + self.lastSsoType), self.LOG)
'\n * Manages the action request for an agent. The agent is requested for an action,\n * which is sent back to the server\n '
def act(self):
ect = ElapsedCpuTimer()
ect.setMaxTimeMillis(CompetitionParameters.ACTION_TIME)
action = str(self.player.act(self.sso, ect.copy()))
if ((not action) or (action == '')):
action = 'ACTION_NIL'
self.lastSsoType = self.player.lastSsoType
if ect.exceededMaxTime():
if (ect.elapsedNanos() > (CompetitionParameters.ACTION_TIME_DISQ * 1000000)):
self.io.writeToServer(self.lastMessageId, 'END_OVERSPENT', self.LOG)
else:
self.io.writeToServer(self.lastMessageId, (('ACTION_NIL' + '#') + self.lastSsoType), self.LOG)
else:
self.io.writeToServer(self.lastMessageId, ((action + '#') + self.lastSsoType), self.LOG)
"\n * Manages the aresult sent to the agent. The time limit for this call will be TOTAL_LEARNING_TIME\n * or EXTRA_LEARNING_TIME if current global time is beyond TOTAL_LEARNING_TIME.\n * The agent is assumed to return the next level to play. It will be ignored if\n * a) All training levels have not been played yet (in which case the starting sequence 0-1-2 continues).\n * b) It's outside the range [0,4] (in which case we play one at random)\n * c) or we are in the validation phase (in which case the starting sequence 3-4 continues).\n "
def result(self):
ect = ElapsedCpuTimer()
if (not self.global_ect.exceededMaxTime()):
ect = self.global_ect.copy()
else:
ect.setMaxTimeMillis(CompetitionParameters.EXTRA_LEARNING_TIME)
nextLevel = self.player.result(self.sso, ect.copy())
self.lastSsoType = self.player.lastSsoType
if ect.exceededMaxTime():
self.io.writeToServer(self.lastMessageId, 'END_OVERSPENT', self.LOG)
elif self.global_ect.exceededMaxTime():
end_message = ('END_VALIDATION' if self.sso.isValidation else 'END_TRAINING')
self.io.writeToServer(self.lastMessageId, end_message, self.LOG)
else:
self.io.writeToServer(self.lastMessageId, ((str(nextLevel) + '#') + self.lastSsoType), self.LOG) |
def certificate_matches(certificate, known_hash):
try:
cert_pem = certificate.exportKey()
cert_bin = cert_pem.replace('-----BEGIN CERTIFICATE-----', '')
cert_bin = cert_bin.replace('-----END CERTIFICATE-----', '')
cert_bin = cert_bin.replace(' ', '')
cert_bin = cert_bin.replace('\n', '')
cert_bin = cert_bin.decode('ascii')
known_hash = hashlib.sha256(cert_bin).hexdigest()
return (known_hash == known_hash)
except Exception as e:
return False |
class TestToeplitz():
def setup_method(self, method):
if ((os.getenv('UNLOCK_SEED') is None) or (os.getenv('UNLOCK_SEED').lower() == 'false')):
self.rng_state = torch.get_rng_state()
torch.manual_seed(0)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0)
def test_sym_toeplitz_constructs_tensor_from_vector(self):
c = torch.tensor([1, 6, 4, 5], dtype=torch.float)
res = sym_toeplitz(c)
actual = torch.tensor([[1, 6, 4, 5], [6, 1, 6, 4], [4, 6, 1, 6], [5, 4, 6, 1]], dtype=torch.float)
assert torch.equal(res, actual)
def test_toeplitz_matmul(self):
col = torch.tensor([1, 6, 4, 5], dtype=torch.float)
row = torch.tensor([1, 2, 1, 1], dtype=torch.float)
rhs_mat = torch.randn(4, 2, dtype=torch.float)
lhs_mat = toeplitz(col, row)
actual = torch.matmul(lhs_mat, rhs_mat)
res = toeplitz_matmul(col, row, rhs_mat)
assert torch.allclose(res, actual)
def test_toeplitz_matmul_batch(self):
cols = torch.tensor([[1, 6, 4, 5], [2, 3, 1, 0], [1, 2, 3, 1]], dtype=torch.float)
rows = torch.tensor([[1, 2, 1, 1], [2, 0, 0, 1], [1, 5, 1, 0]], dtype=torch.float)
rhs_mats = torch.randn(3, 4, 2, dtype=torch.float)
lhs_mats = torch.zeros(3, 4, 4, dtype=torch.float)
for (i, (col, row)) in enumerate(zip(cols, rows)):
lhs_mats[i].copy_(toeplitz(col, row))
actual = torch.matmul(lhs_mats, rhs_mats)
res = toeplitz_matmul(cols, rows, rhs_mats)
assert torch.allclose(res, actual)
def test_toeplitz_matmul_batchmat(self):
col = torch.tensor([1, 6, 4, 5], dtype=torch.float)
row = torch.tensor([1, 2, 1, 1], dtype=torch.float)
rhs_mat = torch.randn(3, 4, 2, dtype=torch.float)
lhs_mat = toeplitz(col, row)
actual = torch.matmul(lhs_mat.unsqueeze(0), rhs_mat)
res = toeplitz_matmul(col.unsqueeze(0), row.unsqueeze(0), rhs_mat)
assert torch.allclose(res, actual) |
def block_inception_c(input):
if (K.image_dim_ordering() == 'th'):
channel_axis = 1
else:
channel_axis = (- 1)
branch_0 = conv2d_bn(input, 256, 1, 1)
branch_1 = conv2d_bn(input, 384, 1, 1)
branch_10 = conv2d_bn(branch_1, 256, 1, 3)
branch_11 = conv2d_bn(branch_1, 256, 3, 1)
branch_1 = merge([branch_10, branch_11], mode='concat', concat_axis=channel_axis)
branch_2 = conv2d_bn(input, 384, 1, 1)
branch_2 = conv2d_bn(branch_2, 448, 3, 1)
branch_2 = conv2d_bn(branch_2, 512, 1, 3)
branch_20 = conv2d_bn(branch_2, 256, 1, 3)
branch_21 = conv2d_bn(branch_2, 256, 3, 1)
branch_2 = merge([branch_20, branch_21], mode='concat', concat_axis=channel_axis)
branch_3 = AveragePooling2D((3, 3), strides=(1, 1), border_mode='same')(input)
branch_3 = conv2d_bn(branch_3, 256, 1, 1)
x = merge([branch_0, branch_1, branch_2, branch_3], mode='concat', concat_axis=channel_axis)
return x |
class ProtoGraphGenerator():
def __init__(self, names, params):
if (names is not None):
self.names = {v.data: k for (k, v) in names.items()}
else:
self.names = {}
self.names.update(params)
self.params = params
self.variables = {}
def __enter__(self):
proto_network = current_network()
proto_network.renaming = self.renaming
return self
def __exit__(self, type, value, traceback):
proto_network = current_network()
variables = OrderedDict()
parameters = OrderedDict()
for (pv_name, pv) in proto_network.variables.items():
if (pv.type == 'Buffer'):
variables[pv.name] = pv
pv.variable_instance = None
else:
parameters[pv.name] = pv
proto_network.parameters = parameters
proto_network.variables = variables
def renaming(self, i, v_name):
return self.names.get(self.outputs[i].data, v_name)
def __call__(self, func):
if (str(func) == 'Sink'):
return
inputs = []
for v in func.inputs:
if (v.data in self.variables):
inputs.append(self.variables[v.data])
else:
if (v.data in self.params):
pv = ProtoVariable(v.d.shape, var_type='Parameter')
else:
pv = ProtoVariable(v.d.shape, var_type='Buffer')
pv.variable_instance = v
pv.name = self.names.get(v.data, None)
inputs.append(pv)
self.variables[v.data] = pv
self.outputs = func.outputs
if (len(inputs) == 0):
outputs = ProtoFunction(func, func.name, func.arguments)(inputs=[], n_outputs=len(func.outputs))
else:
outputs = func(*inputs, n_outputs=len(func.outputs), auto_forward=False)
if (not isinstance(outputs, tuple)):
outputs = (outputs,)
for (pv, v) in zip(outputs, func.outputs):
self.variables[v.data] = pv |
class ValidationScorerBase():
def evaluate(self, model):
raise NotImplementedError()
def __call__(self, model):
model.feature_extractor.eval()
with torch.no_grad():
val_loss = self.evaluate(model)
model.feature_extractor.train()
return val_loss |
def forward(x, is_training=True, update_batch_stats=True, seed=1234):
if is_training:
return logit(x, is_training=True, update_batch_stats=update_batch_stats, stochastic=True, seed=seed)
else:
return logit(x, is_training=False, update_batch_stats=update_batch_stats, stochastic=False, seed=seed) |
def eval(source, target):
targets = ((Path('targets') / target) / 'task23.csv')
targets = pd.read_csv(targets)
targets = targets[(targets['phase_label'] == 'P')]
dataset = data.get_dataset_by_name(data_aliases[target])(sampling_rate=100, component_order='Z', dimension_order='NCW', cache=None)
model_path = (Path('baer_logs') / f'{source}.json')
model = BaerKradolfer.load_from_log(model_path)
for eval_set in ['dev', 'test']:
logging.warning(f'Starting set {eval_set}')
split = dataset.get_split(eval_set)
split.preload_waveforms(pbar=True)
split_targets = targets[(targets['trace_split'] == eval_set)].copy()
generator = sbg.SteeredGenerator(split, split_targets)
generator.add_augmentations(model.get_augmentations())
preds = []
itr = tqdm(range(len(generator)), total=len(generator))
for i in itr:
pred_relative_to_p0 = model.predict(generator[i])
preds.append(pred_relative_to_p0)
split_targets['p_sample_pred'] = (preds + split_targets['start_sample'])
pred_path = ((Path('pred_baer') / f'{source}_baer_{target}') / f'{eval_set}_task23.csv')
pred_path.parent.mkdir(exist_ok=True, parents=True)
split_targets.to_csv(pred_path, index=False) |
def test_fit_2():
X = [1, 2, 3]
y = ['a', 'b', 'c']
classifier = ConstantClassifier()
with pytest.raises(ValueError):
classifier.fit(X, y) |
def isomers_c9h10n2o2pf2cl(mean_function='geometric', n_samples=250) -> GoalDirectedBenchmark:
specification = uniform_specification(n_samples)
return GoalDirectedBenchmark(name='C9H10N2O2PF2Cl', objective=IsomerScoringFunction('C9H10N2O2PF2Cl', mean_function=mean_function), contribution_specification=specification) |
def populate_node_menu(viz, node, menu, statistics_collector):
menu_item = Gtk.MenuItem('Show Interface Statistics')
menu_item.show()
def _show_it(dummy_menu_item):
ShowInterfaceStatistics(viz, node.node_index, statistics_collector)
menu_item.connect('activate', _show_it)
menu.add(menu_item) |
def list_datasets():
dataset_list = filter_english_datasets()
dataset_list.sort(key=(lambda x: x.lower()))
return dataset_list |
def test_bytemasked_concatenate():
one = ak.contents.ByteMaskedArray(ak.index.Index8([True, True, False, True, False, True]), ak.highlevel.Array([1, 2, 3, 4, 5, 6]).layout, valid_when=True)
two = ak.contents.ByteMaskedArray(ak.index.Index8([True, False, False, True, True]), ak.highlevel.Array([7, 99, 999, 8, 9]).layout, valid_when=True)
assert (to_list(ak.operations.concatenate([one, two], 0)) == [1, 2, None, 4, None, 6, 7, None, None, 8, 9])
with pytest.raises(ValueError):
to_list(ak.operations.concatenate([one, two], 1)) |
def euler_to_vec(yaw, pitch):
v = Vector([0.0, 0.0, 0.0])
v[0] = (sin(yaw) * cos(pitch))
v[1] = sin(pitch)
v[2] = (cos(yaw) * cos(pitch))
return v |
def attention_bias_ignore_padding(tokens_to_keep):
mask = (tf.cast((1 - tokens_to_keep), tf.float32) * constants.VERY_SMALL)
return tf.expand_dims(tf.expand_dims(mask, axis=1), axis=1) |
class StretchAudio(object):
def __init__(self, max_scale=0.2):
self.max_scale = max_scale
def __call__(self, data):
if (not should_apply_transform()):
return data
scale = random.uniform((- self.max_scale), self.max_scale)
data = librosa.effects.time_stretch(data, (1 + scale))
return data |
def hecke_operator_on_basis(B, n, k, eps=None, already_echelonized=False):
if (not isinstance(B, (list, tuple))):
raise TypeError(('B (=%s) must be a list or tuple' % B))
if (len(B) == 0):
if (eps is None):
R = CyclotomicField(1)
else:
R = eps.base_ring()
return MatrixSpace(R, 0)(0)
f = B[0]
R = f.base_ring()
if (eps is None):
eps = DirichletGroup(1, R)[0]
all_powerseries = True
for x in B:
if (not is_PowerSeries(x)):
all_powerseries = False
if (not all_powerseries):
raise TypeError('each element of B must be a power series')
n = Integer(n)
k = Integer(k)
prec = ((f.prec() - 1) // n)
A = (R ** prec)
V = A.span_of_basis([g.padded_list(prec) for g in B], already_echelonized=already_echelonized)
return _hecke_operator_on_basis(B, V, n, k, eps) |
def load_successes_from_disk(succ_dir, succ_traj, prune_trials, target_count, cap_count=None, min_count=None):
tuple_counts = {}
for (root, dirs, files) in os.walk(succ_dir):
for d in dirs:
if (d.count('-') == 4):
(goal, pickup, movable, receptacle, scene_num) = d.split('-')
queue_for_delete = []
deleted_all = True
for (_, _dirs, _) in os.walk(os.path.join(succ_dir, d)):
for _d in _dirs:
for (_, _, _files) in os.walk(os.path.join(succ_dir, d, _d)):
if ('video.mp4' in _files):
k = (goal, pickup, movable, receptacle, scene_num)
if (k not in tuple_counts):
tuple_counts[k] = 0
tuple_counts[k] += 1
deleted_all = False
else:
queue_for_delete.append(_d)
break
break
if prune_trials:
if deleted_all:
print(("Removing trial-less parent dir '%s'" % os.path.join(succ_dir, d)))
shutil.rmtree(os.path.join(succ_dir, d))
else:
for _d in queue_for_delete:
print(("Removing unfinished trial '%s'" % os.path.join(succ_dir, d, _d)))
shutil.rmtree(os.path.join(succ_dir, d, _d))
break
for k in tuple_counts:
if ((min_count is None) or (tuple_counts[k] >= min_count)):
to_add = (tuple_counts[k] if (cap_count is None) else cap_count)
for _ in range(to_add):
succ_traj = succ_traj.append({'goal': k[0], 'pickup': k[1], 'movable': k[2], 'receptacle': k[3], 'scene': k[4]}, ignore_index=True)
tuples_at_target_count = set([t for t in tuple_counts if (tuple_counts[t] >= target_count)])
return (succ_traj, tuples_at_target_count) |
_numpy_output(check_dtype=True)
def test_ufunc_less_ff(A: dace.float32[10], B: dace.float32[10]):
return np.less(A, B) |
class Test_LinkEmbedding(object):
d = 100
d_out = 10
def test_ip(self):
(x_src, x_dst) = make_orthonormal_vectors(self.d)
x_src = tf.constant(x_src, shape=(1, self.d), dtype='float64')
x_dst = tf.constant(x_dst, shape=(1, self.d), dtype='float64')
li = LinkEmbedding(method='ip', activation='linear')([x_src, x_dst])
print("link inference with 'ip' operator on orthonormal vectors: {}".format(li.numpy()))
assert (li.numpy() == pytest.approx(0, abs=1.5e-07))
li = LinkEmbedding(method='ip', activation='linear')([x_src, x_src])
print("link inference with 'ip' operator on unit vector: ", li.numpy())
assert (li.numpy() == pytest.approx(1, abs=1.5e-07))
li = LinkEmbedding(method='ip', activation='sigmoid')([x_src, x_dst])
assert (li.numpy() == pytest.approx(0.5, abs=1.5e-07))
li = LinkEmbedding(method='ip', activation='sigmoid')([x_src, x_src])
assert (li.numpy() == pytest.approx(0.7310586, abs=1.5e-07))
def test_ip_single_tensor(self):
(x_src, x_dst) = make_orthonormal_vectors(self.d)
x_src = tf.constant(x_src, shape=(1, self.d), dtype='float64')
x_dst = tf.constant(x_dst, shape=(1, self.d), dtype='float64')
x_link_sd = tf.stack([x_src, x_dst], axis=1)
x_link_ss = tf.stack([x_src, x_src], axis=1)
li = LinkEmbedding(method='ip', activation='linear')(x_link_sd)
print("link inference with 'ip' operator on orthonormal vectors: {}".format(li.numpy()))
assert (li.numpy() == pytest.approx(0, abs=1.5e-07))
li = LinkEmbedding(method='ip', activation='linear')(x_link_ss)
print("link inference with 'ip' operator on unit vector: ", li.numpy())
assert (li.numpy() == pytest.approx(1, abs=1.5e-07))
li = LinkEmbedding(method='ip', activation='sigmoid')(x_link_sd)
assert (li.numpy() == pytest.approx(0.5, abs=1.5e-07))
li = LinkEmbedding(method='ip', activation='sigmoid')(x_link_ss)
assert (li.numpy() == pytest.approx(0.7310586, abs=1.5e-07))
def test_mul_l1_l2_avg(self):
(x_src, x_dst) = make_orthonormal_vectors(self.d)
x_src = x_src.reshape(1, 1, self.d)
x_dst = x_dst.reshape(1, 1, self.d)
inp_src = keras.Input(shape=(1, self.d))
inp_dst = keras.Input(shape=(1, self.d))
for op in ['mul', 'l1', 'l2', 'avg']:
out = LinkEmbedding(method=op)([inp_src, inp_dst])
li = keras.Model(inputs=[inp_src, inp_dst], outputs=out)
res = li.predict(x=[x_src, x_dst])
print("link inference with '{}' operator: {}".format(op, res.flatten()))
assert (res.shape == (1, 1, self.d))
assert isinstance(res.flatten()[0], np.float32)
for op in ['concat']:
out = LinkEmbedding(method=op)([inp_src, inp_dst])
li = keras.Model(inputs=[inp_src, inp_dst], outputs=out)
res = li.predict(x=[x_src, x_dst])
print("link inference with '{}' operator: {}".format(op, res.flatten()))
assert (res.shape == (1, 1, (2 * self.d)))
assert isinstance(res.flatten()[0], np.float32)
def test_mul_l1_l2_avg_single_tensor(self):
(x_src, x_dst) = make_orthonormal_vectors(self.d)
x_src = x_src.reshape(1, self.d)
x_dst = x_dst.reshape(1, self.d)
x_link_np = np.stack([x_src, x_dst], axis=1)
x_link = keras.Input(shape=(2, self.d))
for op in ['mul', 'l1', 'l2', 'avg']:
out = LinkEmbedding(method=op)(x_link)
li = keras.Model(inputs=x_link, outputs=out)
res = li.predict(x=x_link_np)
print("link inference with '{}' operator: {}".format(op, res.flatten()))
assert (res.shape == (1, self.d))
assert isinstance(res.flatten()[0], np.float32)
for op in ['concat']:
out = LinkEmbedding(method=op)(x_link)
li = keras.Model(inputs=x_link, outputs=out)
res = li.predict(x=x_link_np)
print("link inference with '{}' operator: {}".format(op, res.flatten()))
assert (res.shape == (1, (2 * self.d)))
assert isinstance(res.flatten()[0], np.float32) |
def diff_prod(f_derivs, u, g, X, interval, end, uderivs, atc):
from sage.symbolic.relation import solve
for l in interval:
D = {}
rhs = []
lhs = []
new_vars = []
for t in combinations_with_replacement(X, l):
t = list(t)
s = (t + end)
lhs.append(f_derivs[tuple(s)])
rhs.append(diff((u * g), s).subs(atc).subs(uderivs))
new_var = SR.temp_var()
new_vars.append(new_var)
D[diff(u, t).subs(atc)] = new_var
eqns = [(lhs[i] == rhs[i].subs(uderivs).subs(D)) for i in range(len(lhs))]
variables = D.values()
sol = solve(eqns, *variables, solution_dict=True)
uderivs.update(subs_all(D, sol[ZZ.zero()]))
SR.cleanup_var(new_vars)
return uderivs |
class CCTest(ClassifierBaseTest):
def test_if_sparse_classification_works_on_non_dense_base_classifier(self):
classifier = ClassifierChain(classifier=SVC(probability=True), require_dense=[False, True])
self.assertClassifierWorksWithSparsity(classifier, 'sparse')
self.assertClassifierPredictsProbabilities(classifier, 'sparse')
def test_if_dense_classification_works_on_non_dense_base_classifier(self):
classifier = ClassifierChain(classifier=SVC(probability=True), require_dense=[False, True])
self.assertClassifierWorksWithSparsity(classifier, 'dense')
self.assertClassifierPredictsProbabilities(classifier, 'dense')
def test_if_sparse_classification_works_on_dense_base_classifier(self):
classifier = ClassifierChain(classifier=GaussianNB(), require_dense=[True, True])
self.assertClassifierWorksWithSparsity(classifier, 'sparse')
self.assertClassifierPredictsProbabilities(classifier, 'sparse')
def test_if_dense_classification_works_on_dense_base_classifier(self):
classifier = ClassifierChain(classifier=GaussianNB(), require_dense=[True, True])
self.assertClassifierWorksWithSparsity(classifier, 'dense')
self.assertClassifierPredictsProbabilities(classifier, 'dense')
def test_if_works_with_cross_validation(self):
classifier = ClassifierChain(classifier=GaussianNB(), require_dense=[True, True])
self.assertClassifierWorksWithCV(classifier)
def test_if_order_is_set(self):
classifier = ClassifierChain(classifier=GaussianNB(), require_dense=[True, True], order=None)
(X, y) = self.get_multilabel_data_for_tests(sparsity_indicator='sparse')[0]
classifier.fit(X, y)
self.assertEqual(classifier._order(), list(range(y.shape[1])))
def test_if_order_is_set_when_explicitly_given(self):
(X, y) = self.get_multilabel_data_for_tests(sparsity_indicator='sparse')[0]
reversed_chain = list(reversed(range(y.shape[1])))
classifier = ClassifierChain(classifier=GaussianNB(), require_dense=[True, True], order=reversed_chain)
classifier.fit(X, y)
self.assertEqual(classifier._order(), reversed_chain) |
def main(N, family, bc):
SD = FunctionSpace(N, family=family, bc=bcs[bc], domain=domain, alpha=1, beta=1)
K1 = FunctionSpace(N, family='F', dtype='D')
K2 = FunctionSpace(N, family='F', dtype='d')
subcomms = Subcomm(comm, [0, 0, 1])
T = TensorProductSpace(subcomms, (K1, SD, K2), axes=(1, 0, 2))
B = T.get_testspace(kind='PG')
u = TrialFunction(T)
v = TestFunction(B)
constraint = ()
if (bc == 1):
constraint = ((0, (dx(Array(T, buffer=ue), weighted=True) / dx(Array(T, val=1), weighted=True))),)
fj = Array(B, buffer=fe)
f_hat = Function(B)
f_hat = inner(v, fj, output_array=f_hat)
matrices = inner(v, div(grad(u)))
Solver = la.SolverGeneric1ND
H = Solver(matrices)
u_hat = Function(T)
u_hat = H(f_hat, u_hat, constraints=constraint)
uq = u_hat.backward()
uj = Array(T, buffer=ue)
error = np.sqrt(inner(1, ((uj - uq) ** 2)))
if (comm.Get_rank() == 0):
print(f'poisson3D L2 error = {error:2.6e}')
if ('pytest ' in os.environ):
assert (error < 1e-08) |
def _new_process_group_helper(world_size, rank, group_ranks, in_group, group_name, timeout=_default_pg_timeout):
global _pg_map
global _group_count
global _pg_names
if (not group_name):
group_name = str(_group_count)
_group_count += 1
if (group_name in _pg_names.values()):
raise RuntimeError('The specified group name has already been created, please use a different group name')
if (not isinstance(timeout, timedelta)):
raise RuntimeError('Expected timeout argument to be of typedatetime.timedelta')
(default_backend, default_store) = _pg_map[_default_pg]
if (default_backend == Backend.MPI):
if (not is_mpi_available()):
raise RuntimeError("Distributed package doesn't have MPI built in")
pg = ProcessGroupMPI(group_ranks)
_pg_map[pg] = (Backend.MPI, in_group)
_pg_names[pg] = group_name
else:
store = PrefixStore(group_name, default_store)
if (default_backend == Backend.GLOO):
pg = ProcessGroupGloo(store, rank, world_size, timeout=timeout)
_pg_map[pg] = (Backend.GLOO, store)
_pg_names[pg] = group_name
elif (default_backend == Backend.NCCL):
if (not is_nccl_available()):
raise RuntimeError("Distributed package doesn't have NCCL built in")
pg = ProcessGroupNCCL(store, rank, world_size, group_name)
_pg_map[pg] = (Backend.NCCL, store)
_pg_names[pg] = group_name
else:
raise RuntimeError('Unsupported distributed backend by group')
return pg |
def get_data(path_wikisql, args, online_setup=None):
(train_data, train_table, dev_data, dev_table, _, _) = load_wikisql(path_wikisql, args.toy_model, args.toy_size, no_w2i=True, no_hs_tok=True)
if (online_setup is not None):
train_data = [item for (idx, item) in enumerate(train_data) if (idx in set(online_setup['train']))]
print(('## Initial train data size: %d' % len(train_data)))
(train_loader, dev_loader) = get_loader_wikisql(train_data, dev_data, args.bS, shuffle_train=True)
return (train_data, train_table, dev_data, dev_table, train_loader, dev_loader) |
def _assign_variables(formula_node, var_dict):
tester = (lambda node: (isinstance(node, FormulaNode) and node.is_leaf() and (node.signature in var_dict)))
getter = (lambda node: var_dict[node.signature])
if (not isinstance(formula_node, FormulaNode)):
raise Exception(('%s: %r' % (formula_node.__class__.__name__, formula_node)))
out_node = formula_node.replace_node(tester, getter)
return out_node |
def main(args, config):
utils.init_distributed_mode(args)
device = torch.device(args.device)
seed = (args.seed + utils.get_rank())
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
cudnn.benchmark = True
start_epoch = 0
max_epoch = config['schedular']['epochs']
warmup_steps = config['schedular']['warmup_epochs']
print('Creating dataset')
datasets = [create_dataset('pretrain', config)]
if args.distributed:
num_tasks = utils.get_world_size()
global_rank = utils.get_rank()
samplers = create_sampler(datasets, [True], num_tasks, global_rank)
else:
samplers = [None]
data_loader = create_loader(datasets, samplers, batch_size=[config['batch_size']], num_workers=[4], is_trains=[True], collate_fns=[None])[0]
tokenizer = BertTokenizer.from_pretrained(args.text_encoder)
print('Creating model')
model = ALBEF(config=config, text_encoder=args.text_encoder, tokenizer=tokenizer, init_deit=True)
model = model.to(device)
arg_opt = utils.AttrDict(config['optimizer'])
optimizer = create_optimizer(arg_opt, model)
arg_sche = utils.AttrDict(config['schedular'])
(lr_scheduler, _) = create_scheduler(arg_sche, optimizer)
if args.checkpoint:
checkpoint = torch.load(args.checkpoint, map_location='cpu')
state_dict = checkpoint['model']
if args.resume:
optimizer.load_state_dict(checkpoint['optimizer'])
lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
start_epoch = (checkpoint['epoch'] + 1)
else:
pos_embed_reshaped = interpolate_pos_embed(state_dict['visual_encoder.pos_embed'], model.visual_encoder)
m_pos_embed_reshaped = interpolate_pos_embed(state_dict['visual_encoder_m.pos_embed'], model.visual_encoder_m)
state_dict['visual_encoder.pos_embed'] = pos_embed_reshaped
state_dict['visual_encoder_m.pos_embed'] = m_pos_embed_reshaped
model.load_state_dict(state_dict)
print(('load checkpoint from %s' % args.checkpoint))
model_without_ddp = model
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
model_without_ddp = model.module
print('Start training')
start_time = time.time()
for epoch in range(start_epoch, max_epoch):
if (epoch > 0):
lr_scheduler.step((epoch + warmup_steps))
train_stats = train(model, data_loader, optimizer, tokenizer, epoch, warmup_steps, device, lr_scheduler, config)
if utils.is_main_process():
log_stats = {**{f'train_{k}': v for (k, v) in train_stats.items()}, 'epoch': epoch}
save_obj = {'model': model_without_ddp.state_dict(), 'optimizer': optimizer.state_dict(), 'lr_scheduler': lr_scheduler.state_dict(), 'config': config, 'epoch': epoch}
torch.save(save_obj, os.path.join(args.output_dir, ('checkpoint_%02d.pth' % epoch)))
with open(os.path.join(args.output_dir, 'log.txt'), 'a') as f:
f.write((json.dumps(log_stats) + '\n'))
dist.barrier()
total_time = (time.time() - start_time)
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str)) |
def _find_root_python_package(full_path):
p = len(full_path)
while True:
p = full_path.rfind('/', 0, p)
assert (p > 0)
d = full_path[:p]
assert os.path.isdir(d)
if (not os.path.exists((d + '/__init__.py'))):
return d |
def normalize_profile(profile, precision=None, truncation_type='auto', p=2, generic=None):
from sage.rings.infinity import Infinity
if (truncation_type == 'zero'):
truncation_type = 0
if (truncation_type == 'infinity'):
truncation_type = Infinity
if (generic is None):
generic = (p != 2)
if (not generic):
if ((profile is None) or (profile == Infinity)):
new_profile = ()
truncation_type = Infinity
elif isinstance(profile, (list, tuple)):
if (truncation_type == 'auto'):
truncation_type = 0
while (profile and (profile[(- 1)] == truncation_type)):
profile = profile[:(- 1)]
new_profile = tuple(profile)
elif callable(profile):
if (precision is None):
precision = 100
if (truncation_type == 'auto'):
truncation_type = Infinity
new_profile = [max(0, profile(i)) for i in range(1, precision)]
while (new_profile and (new_profile[(- 1)] == truncation_type)):
del new_profile[(- 1)]
new_profile = tuple(new_profile)
if is_valid_profile(new_profile, truncation_type, p):
return (new_profile, truncation_type)
else:
raise ValueError('Invalid profile')
else:
if ((profile is None) or (profile == Infinity)):
new_profile = ((), ())
truncation_type = Infinity
else:
assert (isinstance(profile, (list, tuple)) and (len(profile) == 2)), 'Invalid form for profile'
e = profile[0]
k = profile[1]
if isinstance(e, (list, tuple)):
if (truncation_type == 'auto'):
truncation_type = 0
while (e and (e[(- 1)] == truncation_type)):
e = e[:(- 1)]
e = tuple(e)
elif callable(e):
if (precision is None):
e_precision = 100
else:
e_precision = precision
if (truncation_type == 'auto'):
truncation_type = Infinity
e = [max(0, e(i)) for i in range(1, e_precision)]
while (e and (e[(- 1)] == truncation_type)):
del e[(- 1)]
e = tuple(e)
if isinstance(k, (list, tuple)):
k = tuple(k)
elif callable(k):
if (precision is None):
k_precision = 100
else:
k_precision = precision
k = tuple([k(i) for i in range((k_precision - 1))])
if (truncation_type == 0):
while (k and (k[(- 1)] == 1)):
k = k[:(- 1)]
else:
while (k and (k[(- 1)] == 2)):
k = k[:(- 1)]
new_profile = (e, k)
if is_valid_profile(new_profile, truncation_type, p, generic=True):
return (new_profile, truncation_type)
else:
raise ValueError('Invalid profile') |
class BQQmat(SpectralMatrix):
def assemble(self, method):
(test, trial) = (self.testfunction, self.trialfunction)
assert isinstance(test[0], Q)
assert isinstance(trial[0], Q)
return {0: get_norm_sq(test[0], trial[0], method)} |
def ism_from_django_qs(qs, bounds_class=Bounds3D, bounds_schema={}, with_payload=None, progress=None):
def django_accessor(row, field):
fields = field.split('.')
output = row
for field in fields:
output = attrgetter_accessor(output, field)
return output
final_schema = {'key': 'video_id', 't1': 'min_frame', 't2': 'max_frame'}
final_schema.update(bounds_schema)
total = None
if (progress is not None):
total = qs.count()
def payload_parser(record):
if (not (with_payload is None)):
return with_payload(record)
elif ('payload' in final_schema):
return django_accessor(record, final_schema['payload'])
else:
return None
if (bounds_class == Bounds3D):
return ism_from_iterable_with_schema_bounds3D(qs, django_accessor, bounds_schema=final_schema, with_payload=payload_parser, progress=progress, total=total)
elif (bounds_class == Bounds1D):
return ism_from_iterable_with_schema_bounds1D(qs, django_accessor, bounds_schema=final_schema, with_payload=payload_parser, progress=progress, total=total)
else:
raise NotImplementedError('{} not a supported bounds'.format(bounds_class.__name__)) |
.script
def recurrent_scaleshift(x, scale, shift):
y = x
for i in range(64):
y = ((scale * y) + shift)
return y |
def lex_groebner_basis_points(points, variables):
leads = variety_lex_leading_terms(points, variables)
return [(nf_lex_points(l, points) + l) for l in leads] |
def run_with_reloader(main_func, extra_files=None, interval=1, reloader_type='auto'):
import signal
reloader = reloader_loops[reloader_type](extra_files, interval)
signal.signal(signal.SIGTERM, (lambda *args: sys.exit(0)))
try:
if (os.environ.get('WERKZEUG_RUN_MAIN') == 'true'):
ensure_echo_on()
t = threading.Thread(target=main_func, args=())
t.setDaemon(True)
t.start()
reloader.run()
else:
sys.exit(reloader.restart_with_reloader())
except KeyboardInterrupt:
pass |
def main(unused_argv):
encoded_params = GetEncodedParams()
output_results_file = os.path.join(FLAGS.results_dir, (encoded_params + '.json'))
output_model_file = os.path.join(FLAGS.train_dir, (encoded_params + '.pkl'))
if (os.path.exists(output_results_file) and (not FLAGS.retrain)):
print(('Exiting early. Results are already computed: %s. Pass flag --retrain to override' % output_results_file))
return 0
dataset = mixhop_dataset.ReadDataset(FLAGS.dataset_dir, FLAGS.dataset_name)
x = dataset.sparse_allx_tensor()
y = tf.placeholder(tf.float32, [None, dataset.ally.shape[1]], name='y')
ph_indices = tf.placeholder(tf.int64, [None])
is_training = tf.placeholder_with_default(True, [], name='is_training')
num_x_entries = dataset.x_indices.shape[0]
sparse_adj = dataset.sparse_adj_tensor()
kernel_regularizer = keras_regularizers.l2(FLAGS.l2reg)
gc_towers = []
layer_id = (- 1)
for r in range(FLAGS.replication_factor):
for p in FLAGS.adj_pows.split(','):
p = int(p)
model = mixhop_model.MixHopModel(sparse_adj, x, is_training, kernel_regularizer)
model.add_layer('mixhop_model', 'sparse_dropout', FLAGS.input_dropout, num_x_entries, pass_is_training=True)
model.add_layer('tf', 'sparse_tensor_to_dense')
model.add_layer('tf.nn', 'l2_normalize', axis=1)
layer_dims = [FLAGS.hidden_dim, dataset.ally.shape[1]]
for (j, dim) in enumerate(layer_dims):
layer_id += 1
if (j != 0):
model.add_layer('tf.layers', 'dropout', FLAGS.layer_dropout, pass_training=True)
model.add_layer('self', 'mixhop_layer', [p], [dim], layer_id=layer_id, replica=r, pass_kernel_regularizer=True)
if (j != (len(layer_dims) - 1)):
model.add_layer('tf.contrib.layers', 'batch_norm')
model.add_layer('tf.nn', FLAGS.nonlinearity)
gc_towers.append(model)
gcn_outputs = []
for tower in gc_towers:
tower_logits = tower.activations[(- 1)]
sliced_output = tf.gather(tower_logits, ph_indices)
tower_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=y, logits=sliced_output))
tf.losses.add_loss(tower_loss)
tower_logits = tf.stop_gradient(tower_logits)
if (FLAGS.output_layer == 'wsum'):
gcn_outputs.append(tf.nn.softmax(tower_logits))
elif (FLAGS.output_layer == 'fc'):
gcn_outputs.append(tf.nn.relu(tower_logits))
net = tf.concat(gcn_outputs, 1)
if (FLAGS.output_layer == 'wsum'):
net = mixhop_model.psum_output_layer(net, dataset.ally.shape[1])
elif (FLAGS.output_layer == 'fc'):
net = tf.layers.dense(net, dataset.ally.shape[1])
sliced_output = tf.gather(net, ph_indices)
learn_rate = tf.placeholder(tf.float32, [], 'learn_rate')
label_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=y, logits=sliced_output))
tf.losses.add_loss(label_loss)
loss = tf.losses.get_total_loss()
if (FLAGS.optimizer == 'MomentumOptimizer'):
optimizer = tf.train.MomentumOptimizer(lr, 0.7, use_nesterov=True)
else:
optimizer_class = getattr(tf.train, FLAGS.optimizer)
optimizer = optimizer_class(learn_rate)
train_op = slim.learning.create_train_op(loss, optimizer, gradient_multipliers=[])
sess = tf.Session()
sess.run(tf.global_variables_initializer())
num_train_nodes = None
if (FLAGS.num_train_nodes > 0):
num_train_nodes = FLAGS.num_train_nodes
else:
num_train_nodes = (((- 1) * FLAGS.num_train_nodes) * dataset.ally.shape[1])
(train_indices, validate_indices, test_indices) = dataset.get_partition_indices(num_train_nodes, FLAGS.num_validate_nodes)
train_indices = range(num_train_nodes)
feed_dict = {y: dataset.ally[train_indices]}
dataset.populate_feed_dict(feed_dict)
LAST_STEP = collections.Counter()
accuracy_monitor = AccuracyMonitor(sess, FLAGS.early_stop_steps)
def step(lr=None, columns=None):
if (lr is not None):
feed_dict[learn_rate] = lr
i = LAST_STEP['step']
LAST_STEP['step'] += 1
feed_dict[is_training] = True
feed_dict[ph_indices] = train_indices
(train_preds, loss_value, _) = sess.run((sliced_output, label_loss, train_op), feed_dict)
if numpy.isnan(loss_value).any():
print('NaN value reached. Debug please.')
import IPython
IPython.embed()
train_accuracy = numpy.mean((train_preds.argmax(axis=1) == dataset.ally[train_indices].argmax(axis=1)))
feed_dict[is_training] = False
feed_dict[ph_indices] = test_indices
test_preds = sess.run(sliced_output, feed_dict)
test_accuracy = numpy.mean((test_preds.argmax(axis=1) == dataset.ally[test_indices].argmax(axis=1)))
feed_dict[ph_indices] = validate_indices
validate_preds = sess.run(sliced_output, feed_dict)
validate_accuracy = numpy.mean((validate_preds.argmax(axis=1) == dataset.ally[validate_indices].argmax(axis=1)))
keep_going = accuracy_monitor.mark_accuracy(validate_accuracy, test_accuracy, i)
print(('%i. (loss=%g). Acc: train=%f val=%f test=%f ( best val test=%f)' % (i, loss_value, train_accuracy, validate_accuracy, test_accuracy, accuracy_monitor.best[1])))
if keep_going:
return True
else:
print('Early stopping')
return False
lr = FLAGS.learn_rate
lr_decrement = (FLAGS.lr_decrement_ratio_of_initial * FLAGS.learn_rate)
for i in range(FLAGS.num_train_steps):
if (not step(lr=lr)):
break
if ((i > 0) and ((i % FLAGS.lr_decrement_every) == 0)):
lr -= lr_decrement
if (lr <= 0):
break
if (not os.path.exists(FLAGS.results_dir)):
os.makedirs(FLAGS.results_dir)
if (not os.path.exists(FLAGS.train_dir)):
os.makedirs(FLAGS.train_dir)
with open(output_results_file, 'w') as fout:
results = {'at_best_validate': accuracy_monitor.best, 'current': accuracy_monitor.curr_accuracy}
fout.write(json.dumps(results))
with open(output_model_file, 'wb') as fout:
pickle.dump(accuracy_monitor.params_at_best, fout)
print(('Wrote model to ' + output_model_file))
print(('Wrote results to ' + output_results_file)) |
class BiTemperedLogisticLoss(nn.Module):
def __init__(self, t1: float, t2: float, smoothing=0.0, ignore_index=None, reduction: str='mean'):
super(BiTemperedLogisticLoss, self).__init__()
self.t1 = t1
self.t2 = t2
self.smoothing = smoothing
self.reduction = reduction
self.ignore_index = ignore_index
def forward(self, predictions: Tensor, targets: Tensor) -> Tensor:
loss = bi_tempered_logistic_loss(predictions, targets, t1=self.t1, t2=self.t2, label_smoothing=self.smoothing, reduction='none')
if (self.ignore_index is not None):
mask = (~ targets.eq(self.ignore_index))
loss *= mask
if (self.reduction == 'mean'):
loss = loss.mean()
elif (self.reduction == 'sum'):
loss = loss.sum()
return loss |
class SomicDataset(Dataset):
def __init__(self, cfg: T.DictConfig, augs_dict: T.Dict[(str, T.Compose)], data_type: str):
self.base = Path(cfg.dataset.base)
self.augs = augs_dict[data_type]
self.stem_list = []
df = pd.read_csv((self.base / 'info.csv'))
for query in cfg.dataset[data_type].query:
stem = df.query(query)['stem']
self.stem_list += stem.to_list()
def __getitem__(self, idx: int) -> dict:
stem = self.stem_list[idx]
img = cv2.imread((self.base / f'images/{stem}.png'))
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
mask = cv2.imread((self.base / f'masks/{stem}.png'))
sample = self.augs(image=img, mask=mask)
sample['stem'] = stem
return sample
def __len__(self) -> int:
return len(self.stem_list) |
class SimpleScrapingLocator(Locator):
decoders = {'deflate': zlib.decompress, 'gzip': (lambda b: gzip.GzipFile(fileobj=BytesIO(d)).read()), 'none': (lambda b: b)}
def __init__(self, url, timeout=None, num_workers=10, **kwargs):
super(SimpleScrapingLocator, self).__init__(**kwargs)
self.base_url = ensure_slash(url)
self.timeout = timeout
self._page_cache = {}
self._seen = set()
self._to_fetch = queue.Queue()
self._bad_hosts = set()
self.skip_externals = False
self.num_workers = num_workers
self._lock = threading.RLock()
self._gplock = threading.RLock()
def _prepare_threads(self):
self._threads = []
for i in range(self.num_workers):
t = threading.Thread(target=self._fetch)
t.setDaemon(True)
t.start()
self._threads.append(t)
def _wait_threads(self):
for t in self._threads:
self._to_fetch.put(None)
for t in self._threads:
t.join()
self._threads = []
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
with self._gplock:
self.result = result
self.project_name = name
url = urljoin(self.base_url, ('%s/' % quote(name)))
self._seen.clear()
self._page_cache.clear()
self._prepare_threads()
try:
logger.debug('Queueing %s', url)
self._to_fetch.put(url)
self._to_fetch.join()
finally:
self._wait_threads()
del self.result
return result
platform_dependent = re.compile('\\b(linux-(i\\d86|x86_64|arm\\w+)|win(32|-amd64)|macosx-?\\d+)\\b', re.I)
def _is_platform_dependent(self, url):
return self.platform_dependent.search(url)
def _process_download(self, url):
if self._is_platform_dependent(url):
info = None
else:
info = self.convert_url_to_download_info(url, self.project_name)
logger.debug('process_download: %s -> %s', url, info)
if info:
with self._lock:
self._update_version_data(self.result, info)
return info
def _should_queue(self, link, referrer, rel):
(scheme, netloc, path, _, _, _) = urlparse(link)
if path.endswith(((self.source_extensions + self.binary_extensions) + self.excluded_extensions)):
result = False
elif (self.skip_externals and (not link.startswith(self.base_url))):
result = False
elif (not referrer.startswith(self.base_url)):
result = False
elif (rel not in ('homepage', 'download')):
result = False
elif (scheme not in (' ' 'ftp')):
result = False
elif self._is_platform_dependent(link):
result = False
else:
host = netloc.split(':', 1)[0]
if (host.lower() == 'localhost'):
result = False
else:
result = True
logger.debug('should_queue: %s (%s) from %s -> %s', link, rel, referrer, result)
return result
def _fetch(self):
while True:
url = self._to_fetch.get()
try:
if url:
page = self.get_page(url)
if (page is None):
continue
for (link, rel) in page.links:
if (link not in self._seen):
try:
self._seen.add(link)
if ((not self._process_download(link)) and self._should_queue(link, url, rel)):
logger.debug('Queueing %s from %s', link, url)
self._to_fetch.put(link)
except MetadataInvalidError:
pass
except Exception as e:
self.errors.put(text_type(e))
finally:
self._to_fetch.task_done()
if (not url):
break
def get_page(self, url):
(scheme, netloc, path, _, _, _) = urlparse(url)
if ((scheme == 'file') and os.path.isdir(url2pathname(path))):
url = urljoin(ensure_slash(url), 'index.html')
if (url in self._page_cache):
result = self._page_cache[url]
logger.debug('Returning %s from cache: %s', url, result)
else:
host = netloc.split(':', 1)[0]
result = None
if (host in self._bad_hosts):
logger.debug('Skipping %s due to bad host %s', url, host)
else:
req = Request(url, headers={'Accept-encoding': 'identity'})
try:
logger.debug('Fetching %s', url)
resp = self.opener.open(req, timeout=self.timeout)
logger.debug('Fetched %s', url)
headers = resp.info()
content_type = headers.get('Content-Type', '')
if HTML_CONTENT_TYPE.match(content_type):
final_url = resp.geturl()
data = resp.read()
encoding = headers.get('Content-Encoding')
if encoding:
decoder = self.decoders[encoding]
data = decoder(data)
encoding = 'utf-8'
m = CHARSET.search(content_type)
if m:
encoding = m.group(1)
try:
data = data.decode(encoding)
except UnicodeError:
data = data.decode('latin-1')
result = Page(data, final_url)
self._page_cache[final_url] = result
except HTTPError as e:
if (e.code != 404):
logger.exception('Fetch failed: %s: %s', url, e)
except URLError as e:
logger.exception('Fetch failed: %s: %s', url, e)
with self._lock:
self._bad_hosts.add(host)
except Exception as e:
logger.exception('Fetch failed: %s: %s', url, e)
finally:
self._page_cache[url] = result
return result
_distname_re = re.compile('<a href=[^>]*>([^<]+)<')
def get_distribution_names(self):
result = set()
page = self.get_page(self.base_url)
if (not page):
raise DistlibException(('Unable to get %s' % self.base_url))
for match in self._distname_re.finditer(page.data):
result.add(match.group(1))
return result |
def perturb_single(img: torch.FloatTensor, eps: float, min_pixel=(- 1.0), max_pixel=1.0) -> torch.Tensor:
r = (max_pixel - min_pixel)
b = (r * torch.rand(img.shape))
b += min_pixel
noise = (eps * b)
noise = noise.cuda()
return torch.clamp((img + noise), min_pixel, max_pixel) |
.sm70
_utils.test(arch=[ti.cpu, ti.cuda])
def test_atomic_add_f16():
f = ti.field(dtype=ti.f16, shape=2)
def foo():
for i in range(1000):
f[0] += 1.12
for _ in range(1):
for i in range(1000):
f[1] = (f[1] + 1.12)
foo()
assert (f[0] == test_utils.approx(f[1], rel=0.001)) |
def test_kwargs_validate():
modelc = ModelC({'int_field': 3, 'string_field': 'hi'})
modelc.validate() |
def _check_errors(ret, func, args):
if (ret <= 0):
raise RuntimeError(('FMFT returned error code %d for the given arguments' % ret))
return ret |
.parametrize('extensionarray', [False, True])
def test_numpyarray(tmp_path, extensionarray):
akarray = ak.contents.NumpyArray(np.array([1.1, 2.2, 3.3]), parameters={'which': 'inner'})
paarray = akarray.to_arrow(extensionarray=extensionarray)
arrow_round_trip(akarray, paarray, extensionarray)
parquet_round_trip(akarray, paarray, extensionarray, tmp_path)
akarray = ak.contents.ByteMaskedArray(ak.index.Index8(np.array([False, True, False])), ak.contents.NumpyArray(np.array([1.1, 2.2, 3.3]), parameters={'which': 'inner'}), valid_when=False, parameters={'which': 'outer'})
paarray = akarray.to_arrow(extensionarray=extensionarray)
arrow_round_trip(akarray, paarray, extensionarray)
parquet_round_trip(akarray, paarray, extensionarray, tmp_path)
akarray = ak.contents.NumpyArray(np.arange(((2 * 3) * 5)).reshape(2, 3, 5), parameters={'which': 'inner'})
paarray = akarray.to_arrow(extensionarray=extensionarray)
arrow_round_trip(akarray, paarray, extensionarray)
parquet_round_trip(akarray, paarray, extensionarray, tmp_path) |
_grad()
def evaluate(model, data_loader, tokenizer, device, config, info='None'):
model.eval()
metric_logger = utils.MetricLogger(delimiter=' ')
header = f'{info} Evaluation:'
print_freq = 50
for (images, text, targets) in metric_logger.log_every(data_loader, print_freq, header):
(images, targets) = (images.to(device, non_blocking=True), targets.to(device, non_blocking=True))
text_inputs = tokenizer(text, padding='longest', return_tensors='pt').to(device)
prediction = model(images, text_inputs, targets=targets, train=False)
(_, pred_class) = prediction.max(1)
accuracy = ((targets == pred_class).sum() / targets.size(0))
metric_logger.meters['acc'].update(accuracy.item(), n=images.size(0))
metric_logger.synchronize_between_processes()
print(f'{info} Averaged stats:', metric_logger.global_avg())
return {k: '{:.4f}'.format(meter.global_avg) for (k, meter) in metric_logger.meters.items()} |
class AdaptiveMaxPool2d(_AdaptiveMaxPoolNd):
output_size: _size_2_t
def forward(self, input: Tensor) -> Tensor:
return cF.complex_fcaller(F.adaptive_max_pool2d, input, self.output_size, self.return_indices) |
class DummyDumpDB(DumpDB):
language = None
def __init__(self):
pass
def get_paragraphs(self, page_title: str):
return SAMPLE_PARAGRAPHS[page_title]
def is_disambiguation(self, title: str):
return False
def is_redirect(self, title: str):
return False
def resolve_redirect(self, title: str):
return title
def titles(self):
return list(SAMPLE_PARAGRAPHS.keys()) |
_model_architecture('transformer_encoder_model', 'transformer_encoder_model_6l_16h_1024')
def transformer_encoder_model_6l_16h_1024(args):
args.encoder_layers = getattr(args, 'encoder_layers', 6)
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1024)
args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 4096)
args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 16)
args.activation_fn = getattr(args, 'activation_fn', 'gelu')
base_encoder_model_architecture(args) |
def v_packet_initialize_line_id(v_packet, opacity_state, numba_model):
inverse_line_list_nu = opacity_state.line_list_nu[::(- 1)]
doppler_factor = get_doppler_factor(v_packet.r, v_packet.mu, numba_model.time_explosion)
comov_nu = (v_packet.nu * doppler_factor)
next_line_id = (len(opacity_state.line_list_nu) - np.searchsorted(inverse_line_list_nu, comov_nu))
v_packet.next_line_id = next_line_id |
.parametrize(['mu', 'r', 'time_explosion'], [(1, C_SPEED_OF_LIGHT, 1)])
def test_angle_ab_LF_to_CMF_diverge(mu, r, time_explosion):
nu = 0.4
energy = 0.9
packet = r_packet.RPacket(r, mu, nu, energy)
with pytest.raises(ZeroDivisionError):
obtained = r_packet.angle_aberration_LF_to_CMF(packet, time_explosion, mu) |
def test_no_join_tokenizer():
if True:
sql = 'SELECT avg(age) FROM Student WHERE StuID IN ( SELECT T1.StuID FROM Has_allergy AS T1 JOIN Allergy_Type AS T2 ON T1.Allergy = T2.Allergy WHERE T2.allergytype = "food" INTERSECT SELECT T1.StuID FROM Has_allergy AS T1 JOIN Allergy_Type AS T2 ON T1.Allergy = T2.Allergy WHERE T2.allergytype = "animal")'
sql = 'SELECT T1.Name FROM Tourist_Attractions AS T1 JOIN VISITORS AS T2 JOIN VISITS AS T3 ON T1.Tourist_Attraction_ID = T3.Tourist_Attraction_ID AND T2.Tourist_ID = T3.Tourist_ID WHERE T2.Tourist_Details = "Vincent" INTERSECT SELECT T1.Name FROM Tourist_Attractions AS T1 JOIN VISITORS AS T2 JOIN VISITS AS T3 ON T1.Tourist_Attraction_ID = T3.Tourist_Attraction_ID AND T2.Tourist_ID = T3.Tourist_ID WHERE T2.Tourist_Details = "Marcelle"'
print(sql)
print(tokenize(sql, bu.tokenizer.tokenize, in_execution_order=True)[0])
tokens = tokenize(sql, bu.tokenizer.tokenize, no_join_condition=True, in_execution_order=True)[0]
sql_njc = bu.tokenizer.convert_tokens_to_string(tokens)
print(tokens)
print(sql_njc)
ast_njc = eo_parse(sql_njc)
print(json.dumps(ast_njc, indent=4))
print()
import pdb
pdb.set_trace() |
class MikNeumann(CompositeBase):
def __init__(self, N, quad='GC', bc=(0, 0), domain=((- 1), 1), dtype=float, padding_factor=1, dealias_direct=False, coordinates=None, **kw):
if isinstance(bc, (tuple, list)):
bc = BoundaryConditions({'left': {'N': bc[0]}, 'right': {'N': bc[1]}}, domain=domain)
CompositeBase.__init__(self, N, quad=quad, domain=domain, dtype=dtype, bc=bc, padding_factor=padding_factor, dealias_direct=dealias_direct, coordinates=coordinates)
def boundary_condition():
return 'Neumann'
def short_name():
return 'MN'
def stencil_matrix(self, N=None):
N = (self.N if (N is None) else N)
k = np.arange(N)
k[0] = 1
d = ((2 / k) / (k + 1))
d[(- 2):] = 0
d[0] = 1
d[1] = (3 / 2)
d[2] = (1 / 3)
dm2 = (((- 1) / k[:(- 2)]) / (k[2:] + 1))
dm2[0] = 0
dm2[(- 2):] = 0
dp2 = (((- 1) / k[2:]) / (k[2:] - 1))
dp2[0] = 0
return SparseMatrix({(- 2): dm2, 0: d, 2: dp2}, (N, N))
def sympy_stencil(self, i=sp.Symbol('i', integer=True), j=sp.Symbol('j', integer=True)):
return (RuntimeError, 'Not possible for current basis') |
class MobileNetV1ForImageClassification(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def test_get_caller_tls(insecure_director):
insecure_director.tls = True
context = mock.Mock()
client_id = 'client_id'
context.auth_context = mock.Mock(return_value={'x509_common_name': [client_id.encode('utf-8')]})
result = insecure_director.get_caller(context)
assert (result == client_id) |
def _get_valid_min_max(qparams):
(scale, zero_point, quantized_type) = qparams
adjustment = (1 + torch.finfo(torch.float).eps)
_long_type_info = torch.iinfo(torch.long)
(long_min, long_max) = ((_long_type_info.min / adjustment), (_long_type_info.max / adjustment))
min_value = max(((long_min - zero_point) * scale), ((long_min / scale) + zero_point))
max_value = min(((long_max - zero_point) * scale), ((long_max / scale) + zero_point))
return (np.float32(min_value), np.float32(max_value)) |
class SAGE(torch.nn.Module):
def __init__(self, in_channels, hidden_channels, out_channels, num_layers, dropout):
super(SAGE, self).__init__()
self.convs = torch.nn.ModuleList()
self.convs.append(SAGEConv(in_channels, hidden_channels))
for _ in range((num_layers - 2)):
self.convs.append(SAGEConv(hidden_channels, hidden_channels))
self.convs.append(SAGEConv(hidden_channels, out_channels))
self.dropout = dropout
def reset_parameters(self):
for conv in self.convs:
conv.reset_parameters()
def forward(self, x, edge_index, edge_weight=None):
for conv in self.convs[:(- 1)]:
x = conv(x, edge_index, edge_weight)
x = F.relu(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = self.convs[(- 1)](x, edge_index, edge_weight)
return torch.log_softmax(x, dim=(- 1))
def inference(self, x_all, subgraph_loader, device):
pbar = tqdm(total=(x_all.size(0) * len(self.convs)))
pbar.set_description('Evaluating')
for (i, conv) in enumerate(self.convs):
xs = []
for (batch_size, n_id, adj) in subgraph_loader:
(edge_index, _, size) = adj.to(device)
x = x_all[n_id].to(device)
x_target = x[:size[1]]
x = conv((x, x_target), edge_index)
if (i != (len(self.convs) - 1)):
x = F.relu(x)
xs.append(x.cpu())
pbar.update(batch_size)
x_all = torch.cat(xs, dim=0)
pbar.close()
return x_all |
def test_set_params_passes_all_parameters():
class TestDecisionTree(DecisionTreeClassifier):
def set_params(self, **kwargs):
super().set_params(**kwargs)
assert (kwargs == expected_kwargs)
return self
expected_kwargs = {'max_depth': 5, 'min_samples_leaf': 2}
for est in [Pipeline([('estimator', TestDecisionTree())]), GridSearchCV(TestDecisionTree(), {})]:
est.set_params(estimator__max_depth=5, estimator__min_samples_leaf=2) |
def test_gammaincc_neg_x_scalar():
with pytest.raises(ValueError):
gammaincc(0.5, (- 1.0)) |
def general_cases(channel_last):
inspec_and_axis = []
batch = 16
base_ch = 192
ch_mul = [1, 1, 2, 2, 4, 4]
channels = [(base_ch * factor) for factor in ch_mul]
resolutions = [256, 128, 64, 32, 16, 8]
axis = (3 if channel_last else 1)
for (ch, res) in zip(channels, resolutions):
if channel_last:
shape = (batch, res, res, ch)
else:
shape = (batch, ch, res, res)
inspec_and_axis.append(([Inspec(shape)], axis))
return inspec_and_axis |
def random_crop(image):
image = tf.image.resize_with_crop_or_pad(image, 260, 260)
image = tf.image.random_crop(image, size=[224, 224, 3])
return image |
_module()
class TextLoggerHook(LoggerHook):
def __init__(self, by_epoch=True, interval=10, ignore_last=True, reset_flag=False, interval_exp_name=1000, out_dir=None, out_suffix=('.log.json', '.log', '.py'), keep_local=True, file_client_args=None):
super(TextLoggerHook, self).__init__(interval, ignore_last, reset_flag, by_epoch)
self.by_epoch = by_epoch
self.time_sec_tot = 0
self.interval_exp_name = interval_exp_name
if ((out_dir is None) and (file_client_args is not None)):
raise ValueError('file_client_args should be "None" when `out_dir` is notspecified.')
self.out_dir = out_dir
if (not ((out_dir is None) or isinstance(out_dir, str) or is_tuple_of(out_dir, str))):
raise TypeError('out_dir should be "None" or string or tuple of string, but got {out_dir}')
self.out_suffix = out_suffix
self.keep_local = keep_local
self.file_client_args = file_client_args
if (self.out_dir is not None):
self.file_client = FileClient.infer_client(file_client_args, self.out_dir)
def before_run(self, runner):
super(TextLoggerHook, self).before_run(runner)
if (self.out_dir is not None):
self.file_client = FileClient.infer_client(self.file_client_args, self.out_dir)
basename = osp.basename(runner.work_dir.rstrip(osp.sep))
self.out_dir = self.file_client.join_path(self.out_dir, basename)
runner.logger.info(f'Text logs will be saved to {self.out_dir} by {self.file_client.name} after the training process.')
self.start_iter = runner.iter
self.json_log_path = osp.join(runner.work_dir, f'{runner.timestamp}.log.json')
if (runner.meta is not None):
self._dump_log(runner.meta, runner)
def _get_max_memory(self, runner):
device = getattr(runner.model, 'output_device', None)
mem = torch.cuda.max_memory_allocated(device=device)
mem_mb = torch.tensor([(mem / (1024 * 1024))], dtype=torch.int, device=device)
if (runner.world_size > 1):
dist.reduce(mem_mb, 0, op=dist.ReduceOp.MAX)
return mem_mb.item()
def _log_info(self, log_dict, runner):
if ((runner.meta is not None) and ('exp_name' in runner.meta)):
if (self.every_n_iters(runner, self.interval_exp_name) or (self.by_epoch and self.end_of_epoch(runner))):
exp_info = f"Exp name: {runner.meta['exp_name']}"
runner.logger.info(exp_info)
if (log_dict['mode'] == 'train'):
if isinstance(log_dict['lr'], dict):
lr_str = []
for (k, val) in log_dict['lr'].items():
lr_str.append(f'lr_{k}: {val:.3e}')
lr_str = ' '.join(lr_str)
else:
lr_str = f"lr: {log_dict['lr']:.3e}"
if self.by_epoch:
log_str = f"Epoch [{log_dict['epoch']}][{log_dict['iter']}/{len(runner.data_loader)}] "
else:
log_str = f"Iter [{log_dict['iter']}/{runner.max_iters}] "
log_str += f'{lr_str}, '
if ('time' in log_dict.keys()):
self.time_sec_tot += (log_dict['time'] * self.interval)
time_sec_avg = (self.time_sec_tot / ((runner.iter - self.start_iter) + 1))
eta_sec = (time_sec_avg * ((runner.max_iters - runner.iter) - 1))
eta_str = str(datetime.timedelta(seconds=int(eta_sec)))
log_str += f'eta: {eta_str}, '
log_str += f"time: {log_dict['time']:.3f}, data_time: {log_dict['data_time']:.3f}, "
if torch.cuda.is_available():
log_str += f"memory: {log_dict['memory']}, "
elif self.by_epoch:
log_str = f"Epoch({log_dict['mode']}) [{log_dict['epoch']}][{log_dict['iter']}] "
else:
log_str = f"Iter({log_dict['mode']}) [{log_dict['iter']}] "
log_items = []
for (name, val) in log_dict.items():
if (name in ['mode', 'Epoch', 'iter', 'lr', 'time', 'data_time', 'memory', 'epoch']):
continue
if isinstance(val, float):
val = f'{val:.4f}'
log_items.append(f'{name}: {val}')
log_str += ', '.join(log_items)
runner.logger.info(log_str)
def _dump_log(self, log_dict, runner):
json_log = OrderedDict()
for (k, v) in log_dict.items():
json_log[k] = self._round_float(v)
if (runner.rank == 0):
with open(self.json_log_path, 'a+') as f:
mmcv.dump(json_log, f, file_format='json')
f.write('\n')
def _round_float(self, items):
if isinstance(items, list):
return [self._round_float(item) for item in items]
elif isinstance(items, float):
return round(items, 5)
else:
return items
def log(self, runner):
if ('eval_iter_num' in runner.log_buffer.output):
cur_iter = runner.log_buffer.output.pop('eval_iter_num')
else:
cur_iter = self.get_iter(runner, inner_iter=True)
log_dict = OrderedDict(mode=self.get_mode(runner), epoch=self.get_epoch(runner), iter=cur_iter)
cur_lr = runner.current_lr()
if isinstance(cur_lr, list):
log_dict['lr'] = cur_lr[0]
else:
assert isinstance(cur_lr, dict)
log_dict['lr'] = {}
for (k, lr_) in cur_lr.items():
assert isinstance(lr_, list)
log_dict['lr'].update({k: lr_[0]})
if ('time' in runner.log_buffer.output):
if torch.cuda.is_available():
log_dict['memory'] = self._get_max_memory(runner)
log_dict = dict(log_dict, **runner.log_buffer.output)
self._log_info(log_dict, runner)
self._dump_log(log_dict, runner)
return log_dict
def after_run(self, runner):
if (self.out_dir is not None):
for filename in scandir(runner.work_dir, self.out_suffix, True):
local_filepath = osp.join(runner.work_dir, filename)
out_filepath = self.file_client.join_path(self.out_dir, filename)
with open(local_filepath, 'r') as f:
self.file_client.put_text(f.read(), out_filepath)
runner.logger.info(f'The file {local_filepath} has been uploaded to {out_filepath}.')
if (not self.keep_local):
os.remove(local_filepath)
runner.logger.info(f'{local_filepath} was removed due to the `self.keep_local=False`') |
class PhysicsInformedGNConv(nn.Module):
def __init__(self, edge_block_model, node_block_model, global_block_model, use_edge_block=True, use_node_block=True, use_global_block=False):
super(PhysicsInformedGNConv, self).__init__()
self.a = ((5 * random.random()) - 2.5)
self.b = ((5 * random.random()) - 2.5)
self.eb_module = edge_block_model
self.nb_module = node_block_model
self.gb_module = global_block_model
self._gnc_module = GNConv(self.eb_module, self.nb_module, self.gb_module, use_edge_block=use_edge_block, use_node_block=use_node_block, use_global_block=use_global_block)
def forward(self, input_graphs, laplacian, h_init, coeff=0.1, pde='diff', skip=False):
num_processing_steps = len(input_graphs)
output_tensors = []
time_derivatives = []
spatial_derivatives = []
h_prev = None
h_curr = h_init
for input_graph in input_graphs:
h_curr_concat = graph_concat(input_graph, h_curr, node_cat=True, edge_cat=True, global_cat=False)
h_next = self._gnc_module(h_curr_concat)
if skip:
_global_attr = h_next.global_attr
h_next = Data(x=(h_next.x + h_curr.x), edge_index=input_graph.edge_index, edge_attr=(h_next.edge_attr + h_curr.edge_attr))
h_next.global_attr = _global_attr
if self.training:
if (h_prev and (pde == 'wave')):
time_derivatives.append(((h_next.x - (2 * h_curr.x)) + h_prev.x))
elif (pde == 'diff'):
time_derivatives.append((h_next.x - h_curr.x))
elif (h_prev and (pde == 'random')):
time_derivatives.append(((h_next.x + (self.a * h_curr.x)) + (self.b * h_prev.x)))
elif (h_prev and (pde == 'both')):
time_derivatives.append(((((h_next.x - (2 * h_curr.x)) + h_prev.x) + h_next.x) - h_curr.x))
else:
time_derivatives.append((h_next.x - h_curr.x))
spatial_derivatives.append(((- coeff) * laplacian.mm(h_curr.x)))
h_prev = h_curr
h_curr = h_next
output_tensors.append(copy_geometric_data(h_curr))
return (output_tensors, time_derivatives, spatial_derivatives) |
def extract_archive(from_path: str, to_path: Optional[str]=None, remove_finished: bool=False) -> None:
if (to_path is None):
to_path = os.path.dirname(from_path)
if _is_tar(from_path):
with tarfile.open(from_path, 'r') as tar:
tar.extractall(path=to_path)
elif (_is_targz(from_path) or _is_tgz(from_path)):
with tarfile.open(from_path, 'r:gz') as tar:
tar.extractall(path=to_path)
elif _is_tarxz(from_path):
with tarfile.open(from_path, 'r:xz') as tar:
tar.extractall(path=to_path)
elif _is_gzip(from_path):
to_path = os.path.join(to_path, os.path.splitext(os.path.basename(from_path))[0])
with open(to_path, 'wb') as out_f, gzip.GzipFile(from_path) as zip_f:
out_f.write(zip_f.read())
elif _is_zip(from_path):
with zipfile.ZipFile(from_path, 'r') as z:
z.extractall(to_path)
else:
raise ValueError('Extraction of {} not supported'.format(from_path))
if remove_finished:
os.remove(from_path) |
_pipeline_test
class TranslationPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta):
model_mapping = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
tf_model_mapping = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def get_test_pipeline(self, model, tokenizer, feature_extractor):
if isinstance(model.config, MBartConfig):
(src_lang, tgt_lang) = list(tokenizer.lang_code_to_id.keys())[:2]
translator = TranslationPipeline(model=model, tokenizer=tokenizer, src_lang=src_lang, tgt_lang=tgt_lang)
else:
translator = TranslationPipeline(model=model, tokenizer=tokenizer)
return (translator, ['Some string', 'Some other text'])
def run_pipeline_test(self, translator, _):
outputs = translator('Some string')
self.assertEqual(outputs, [{'translation_text': ANY(str)}])
outputs = translator(['Some string'])
self.assertEqual(outputs, [{'translation_text': ANY(str)}])
outputs = translator(['Some string', 'other string'])
self.assertEqual(outputs, [{'translation_text': ANY(str)}, {'translation_text': ANY(str)}])
_torch
def test_small_model_pt(self):
translator = pipeline('translation_en_to_ro', model='patrickvonplaten/t5-tiny-random', framework='pt')
outputs = translator('This is a test string', max_length=20)
self.assertEqual(outputs, [{'translation_text': 'Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide'}])
_tf
def test_small_model_tf(self):
translator = pipeline('translation_en_to_ro', model='patrickvonplaten/t5-tiny-random', framework='tf')
outputs = translator('This is a test string', max_length=20)
self.assertEqual(outputs, [{'translation_text': 'Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide'}])
_torch
def test_en_to_de_pt(self):
translator = pipeline('translation_en_to_de', model='patrickvonplaten/t5-tiny-random', framework='pt')
outputs = translator('This is a test string', max_length=20)
self.assertEqual(outputs, [{'translation_text': 'monoton monoton monoton monoton monoton monoton monoton monoton monoton monoton urine urine urine urine urine urine urine urine urine'}])
_tf
def test_en_to_de_tf(self):
translator = pipeline('translation_en_to_de', model='patrickvonplaten/t5-tiny-random', framework='tf')
outputs = translator('This is a test string', max_length=20)
self.assertEqual(outputs, [{'translation_text': 'monoton monoton monoton monoton monoton monoton monoton monoton monoton monoton urine urine urine urine urine urine urine urine urine'}]) |
def plot_digraph(dot_string, format='png'):
try:
import graphviz
except ImportError as excep:
raise ImportError('graphviz needs to be available to plot_digraph') from excep
from graphviz import Source
if (format == 'html'):
return _GVPlotter(dot_string)
return Source(dot_string, format=format) |
class GreedyContinuousThompsonSampling(SingleModelGreedyAcquisitionBuilder[HasTrajectorySampler]):
def __init__(self, select_output: Callable[([TensorType], TensorType)]=select_nth_output):
self._select_output = select_output
def __repr__(self) -> str:
return f'GreedyContinuousThompsonSampling({self._select_output!r})'
def prepare_acquisition_function(self, model: HasTrajectorySampler, dataset: Optional[Dataset]=None, pending_points: Optional[TensorType]=None) -> TrajectoryFunction:
if (not isinstance(model, HasTrajectorySampler)):
raise ValueError(f'Thompson sampling from trajectory only supports models with a trajectory_sampler method; received {model.__repr__()}')
self._trajectory_sampler = model.trajectory_sampler()
function = self._trajectory_sampler.get_trajectory()
return negate_trajectory_function(function, self._select_output)
def update_acquisition_function(self, function: TrajectoryFunction, model: HasTrajectorySampler, dataset: Optional[Dataset]=None, pending_points: Optional[TensorType]=None, new_optimization_step: bool=True) -> TrajectoryFunction:
if new_optimization_step:
new_function = self._trajectory_sampler.update_trajectory(function)
else:
new_function = self._trajectory_sampler.resample_trajectory(function)
if (new_function is not function):
function = negate_trajectory_function(new_function, self._select_output)
return function |
def _tuple_to_symexpr(val):
return (symbolic.SymExpr(val[0], val[1]) if isinstance(val, tuple) else symbolic.pystr_to_symbolic(val)) |
def serialize_remote_homology_sequence(sequence: str, seq_id: str, class_label: int, fold_label: int, superfamily_label: int, family_label: int, pssm: List[List[int]], secondary_structure: List[int], solvent_accessibility: List[int], vocab: Dict[(str, int)]):
int_sequence = []
for aa in sequence:
if (aa in string.whitespace):
raise ValueError('whitespace found in string')
aa_idx = vocab.get(aa)
if (aa_idx is None):
raise ValueError(f'{aa} not in vocab')
int_sequence.append(aa_idx)
protein_context = {}
protein_context = to_features(id=seq_id.encode('UTF-8'), protein_length=len(int_sequence), class_label=class_label, fold_label=fold_label, superfamily_label=superfamily_label, family_label=family_label)
protein_features = to_sequence_features(primary=int_sequence, secondary_structure=secondary_structure, solvent_accessibility=solvent_accessibility, evolutionary=pssm)
example = tf.train.SequenceExample(context=protein_context, feature_lists=protein_features)
return example.SerializeToString() |
def onehot_from_logits(logits, dim=1):
return (logits == logits.max(dim, keepdim=True)[0]).float() |
class OIM(autograd.Function):
def forward(ctx, inputs, targets, lut, cq, header, momentum):
ctx.save_for_backward(inputs, targets, lut, cq, header, momentum)
outputs_labeled = inputs.mm(lut.t())
outputs_unlabeled = inputs.mm(cq.t())
return torch.cat([outputs_labeled, outputs_unlabeled], dim=1)
def backward(ctx, grad_outputs):
(inputs, targets, lut, cq, header, momentum) = ctx.saved_tensors
grad_inputs = None
if ctx.needs_input_grad[0]:
grad_inputs = grad_outputs.mm(torch.cat([lut, cq], dim=0))
if (grad_inputs.dtype == torch.float16):
grad_inputs = grad_inputs.to(torch.float32)
for (x, y) in zip(inputs, targets):
if (y < len(lut)):
lut[y] = ((momentum * lut[y]) + ((1.0 - momentum) * x))
lut[y] /= lut[y].norm()
else:
cq[header] = x
header = ((header + 1) % cq.size(0))
return (grad_inputs, None, None, None, None, None) |
_module()
class CGNet(nn.Module):
def __init__(self, in_channels=3, num_channels=(32, 64, 128), num_blocks=(3, 21), dilations=(2, 4), reductions=(8, 16), conv_cfg=None, norm_cfg=dict(type='BN', requires_grad=True), act_cfg=dict(type='PReLU'), norm_eval=False, with_cp=False):
super(CGNet, self).__init__()
self.in_channels = in_channels
self.num_channels = num_channels
assert (isinstance(self.num_channels, tuple) and (len(self.num_channels) == 3))
self.num_blocks = num_blocks
assert (isinstance(self.num_blocks, tuple) and (len(self.num_blocks) == 2))
self.dilations = dilations
assert (isinstance(self.dilations, tuple) and (len(self.dilations) == 2))
self.reductions = reductions
assert (isinstance(self.reductions, tuple) and (len(self.reductions) == 2))
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
if (('type' in self.act_cfg) and (self.act_cfg['type'] == 'PReLU')):
self.act_cfg['num_parameters'] = num_channels[0]
self.norm_eval = norm_eval
self.with_cp = with_cp
cur_channels = in_channels
self.stem = nn.ModuleList()
for i in range(3):
self.stem.append(ConvModule(cur_channels, num_channels[0], 3, (2 if (i == 0) else 1), padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg))
cur_channels = num_channels[0]
self.inject_2x = InputInjection(1)
self.inject_4x = InputInjection(2)
cur_channels += in_channels
self.norm_prelu_0 = nn.Sequential(build_norm_layer(norm_cfg, cur_channels)[1], nn.PReLU(cur_channels))
self.level1 = nn.ModuleList()
for i in range(num_blocks[0]):
self.level1.append(ContextGuidedBlock((cur_channels if (i == 0) else num_channels[1]), num_channels[1], dilations[0], reductions[0], downsample=(i == 0), conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg, with_cp=with_cp))
cur_channels = ((2 * num_channels[1]) + in_channels)
self.norm_prelu_1 = nn.Sequential(build_norm_layer(norm_cfg, cur_channels)[1], nn.PReLU(cur_channels))
self.level2 = nn.ModuleList()
for i in range(num_blocks[1]):
self.level2.append(ContextGuidedBlock((cur_channels if (i == 0) else num_channels[2]), num_channels[2], dilations[1], reductions[1], downsample=(i == 0), conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg, with_cp=with_cp))
cur_channels = (2 * num_channels[2])
self.norm_prelu_2 = nn.Sequential(build_norm_layer(norm_cfg, cur_channels)[1], nn.PReLU(cur_channels))
def forward(self, x):
output = []
inp_2x = self.inject_2x(x)
inp_4x = self.inject_4x(x)
for layer in self.stem:
x = layer(x)
x = self.norm_prelu_0(torch.cat([x, inp_2x], 1))
output.append(x)
for (i, layer) in enumerate(self.level1):
x = layer(x)
if (i == 0):
down1 = x
x = self.norm_prelu_1(torch.cat([x, down1, inp_4x], 1))
output.append(x)
for (i, layer) in enumerate(self.level2):
x = layer(x)
if (i == 0):
down2 = x
x = self.norm_prelu_2(torch.cat([down2, x], 1))
output.append(x)
return output
def init_weights(self, pretrained=None):
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif (pretrained is None):
for m in self.modules():
if isinstance(m, (nn.Conv2d, nn.Linear)):
kaiming_init(m)
elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
constant_init(m, 1)
elif isinstance(m, nn.PReLU):
constant_init(m, 0)
else:
raise TypeError('pretrained must be a str or None')
def train(self, mode=True):
super(CGNet, self).train(mode)
if (mode and self.norm_eval):
for m in self.modules():
if isinstance(m, _BatchNorm):
m.eval() |
class EMT(nn.Module):
def __init__(self, dim, depth, heads, num_modality, learnable_pos_emb=False, emb_dropout=0.0, attn_dropout=0.0, ff_dropout=0.0, ff_expansion=4, max_seq_len=1024, mpu_share=False, modality_share=False, layer_share=False, attn_act_fn='tanh'):
super().__init__()
assert ((dim % heads) == 0), 'Error: hidden dim is not divisible by number of heads'
dim_head = (dim // heads)
self.num_modality = num_modality
self.pos_embed = PositionalEmbedding(dim, max_seq_len=max_seq_len, dropout=emb_dropout, learnable=learnable_pos_emb)
mpu_0 = CrossSelfTransformer(latent_dim=dim, input_dim=dim, depth=1, heads=heads, dim_head=dim_head, ff_expansion=ff_expansion, attn_dropout=attn_dropout, ff_dropout=ff_dropout)
mpu_1 = _get_clones(mpu_0, 2, share=mpu_share)
mpu_2 = _get_clones(mpu_1, num_modality, share=modality_share)
self.mpus = _get_clones(mpu_2, depth, share=layer_share)
attn_pool_0 = NaiveAttention((num_modality * dim), activation_fn=attn_act_fn)
self.attn_pools = _get_clones(attn_pool_0, depth, share=layer_share)
def forward(self, gmc_tokens, modality_inputs, modality_masks):
(batch_size, _, _) = gmc_tokens.shape
modality_inputs = [self.pos_embed(modality_input) for modality_input in modality_inputs]
for (l_idx, layer) in enumerate(self.mpus):
gmc_tokens_list = []
for (m_idx, x) in enumerate(modality_inputs):
x_new = layer[m_idx][0](x, context=gmc_tokens, context_mask=None, mask=modality_masks[m_idx])
gmc_tokens_new = layer[m_idx][1](gmc_tokens, context=x, context_mask=modality_masks[m_idx], mask=None)
gmc_tokens_list.append(gmc_tokens_new)
modality_inputs[m_idx] = x_new
gmc_tokens = self.attn_pools[l_idx](torch.stack(gmc_tokens_list, dim=1).view(batch_size, self.num_modality, (- 1)))
gmc_tokens = gmc_tokens.view(batch_size, self.num_modality, (- 1))
return (gmc_tokens, modality_inputs) |
def get_sample_images(dataset, n):
n_data = len(dataset)
ans = []
if (n < n_data):
indexes = np.random.choice(n_data, n, replace=False)
else:
indexes = list(range(n_data))
for index in indexes:
(sample, _) = dataset[index]
ans.append(tensor_to_img(sample, normalize=True))
return ans |
def load_mnist(path, kind='train'):
import os
import gzip
import numpy as np
labels_path = os.path.join(path, ('%s-labels-idx1-ubyte.gz' % kind))
images_path = os.path.join(path, ('%s-images-idx3-ubyte.gz' % kind))
with gzip.open(labels_path, 'rb') as lbpath:
labels = np.frombuffer(lbpath.read(), dtype=np.uint8, offset=8)
with gzip.open(images_path, 'rb') as imgpath:
images = np.frombuffer(imgpath.read(), dtype=np.uint8, offset=16).reshape(len(labels), 784)
return (images, labels) |
def prepare_dataset():
((train_images, train_labels), (test_images, test_labels)) = load_svhn()
dirpath = os.path.join(FLAGS.data_dir, ('seed' + str(FLAGS.dataset_seed)))
if (not os.path.exists(dirpath)):
os.makedirs(dirpath)
rng = np.random.RandomState(FLAGS.dataset_seed)
rand_ix = rng.permutation(NUM_EXAMPLES_TRAIN)
print(rand_ix)
(_train_images, _train_labels) = (train_images[rand_ix], train_labels[rand_ix])
labeled_ind = np.arange(FLAGS.num_labeled_examples)
(labeled_train_images, labeled_train_labels) = (_train_images[labeled_ind], _train_labels[labeled_ind])
_train_images = np.delete(_train_images, labeled_ind, 0)
_train_labels = np.delete(_train_labels, labeled_ind, 0)
convert_images_and_labels(labeled_train_images, labeled_train_labels, os.path.join(dirpath, 'labeled_train.tfrecords'))
convert_images_and_labels(train_images, train_labels, os.path.join(dirpath, 'unlabeled_train.tfrecords'))
convert_images_and_labels(test_images, test_labels, os.path.join(dirpath, 'test.tfrecords'))
(train_images_valid, train_labels_valid) = (labeled_train_images, labeled_train_labels)
(test_images_valid, test_labels_valid) = (_train_images[:FLAGS.num_valid_examples], _train_labels[:FLAGS.num_valid_examples])
unlabeled_train_images_valid = np.concatenate((train_images_valid, _train_images[FLAGS.num_valid_examples:]), axis=0)
unlabeled_train_labels_valid = np.concatenate((train_labels_valid, _train_labels[FLAGS.num_valid_examples:]), axis=0)
convert_images_and_labels(train_images_valid, train_labels_valid, os.path.join(dirpath, 'labeled_train_val.tfrecords'))
convert_images_and_labels(unlabeled_train_images_valid, unlabeled_train_labels_valid, os.path.join(dirpath, 'unlabeled_train_val.tfrecords'))
convert_images_and_labels(test_images_valid, test_labels_valid, os.path.join(dirpath, 'test_val.tfrecords')) |
def assert_reproducible(func, num_iter=1):
model = func()
for i in range(num_iter):
model_new = func()
models_equals(model, model_new)
tf.keras.backend.clear_session() |
def remove_weight(bmodel):
bmodel.kernel_module.has = False
bmodel.net[0].parameter[0].coeff_mem.has = False
return |
def resblock_up(x_init, channels, use_bias=True, is_training=True, sn=False, scope='resblock_up'):
with tf.variable_scope(scope):
with tf.variable_scope('res1'):
x = batch_norm(x_init, is_training)
x = relu(x)
x = deconv(x, channels, kernel=3, stride=2, use_bias=use_bias, sn=sn)
with tf.variable_scope('res2'):
x = batch_norm(x, is_training)
x = relu(x)
x = deconv(x, channels, kernel=3, stride=1, use_bias=use_bias, sn=sn)
with tf.variable_scope('skip'):
x_init = deconv(x_init, channels, kernel=3, stride=2, use_bias=use_bias, sn=sn)
return (x + x_init) |
def reproduce(parent_a, parent_b, mutation_rate):
(parent_a, parent_b) = (Chem.MolFromSmiles(parent_a), Chem.MolFromSmiles(parent_b))
new_child = crossover(parent_a, parent_b)
if (new_child is not None):
new_child = mutate(new_child, mutation_rate)
smis = (Chem.MolToSmiles(new_child, isomericSmiles=True) if (new_child is not None) else None)
return smis |
class GaloisGroup_ab(_GaloisMixin, AbelianGroup_class):
def __init__(self, field, generator_orders, algorithm=None, gen_names='sigma'):
self._field = field
self._default_algorithm = algorithm
AbelianGroup_class.__init__(self, generator_orders, gen_names)
def is_galois(self):
return True
_attribute
def _gcdata(self):
k = self._field
return (k, k.Hom(k).identity())
_method
def permutation_group(self):
return PermutationGroup(gap_group=self._gap_().RegularActionHomomorphism().Image())
_method(key=_alg_key)
def transitive_number(self, algorithm=None, recompute=False):
return ZZ(self.permutation_group()._gap_().TransitiveIdentification()) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.