code stringlengths 101 5.91M |
|---|
('spacy')
class SpacyWordSplitter(WordSplitter):
def __init__(self, language: str='en_core_web_sm', pos_tags: bool=False, parse: bool=False, ner: bool=False) -> None:
self.spacy = get_spacy_model(language, pos_tags, parse, ner)
def batch_split_words(self, sentences: List[str]) -> List[List[Token]]:
return [_remove_spaces(tokens) for tokens in self.spacy.pipe(sentences, n_threads=(- 1))]
def split_words(self, sentence: str) -> List[Token]:
return _remove_spaces(self.spacy(sentence)) |
def DeepShuffleNetV3PlusD_OS8(args, num_classes, criterion, criterion_aux):
print('Model : DeepLabv3+, Backbone : shufflenetv2')
return DeepV3Plus(num_classes, trunk='shufflenetv2', criterion=criterion, criterion_aux=criterion_aux, variant='D', skip='m1', args=args) |
def heat_diffusion_ind(graph, taus=TAUS, order=ORDER, proc=PROC):
a = nx.adjacency_matrix(graph)
(n_nodes, _) = a.shape
thres = np.vectorize((lambda x: (x if (x > ((0.0001 * 1.0) / n_nodes)) else 0)))
lap = laplacian(a)
n_filters = len(taus)
if (proc == 'exact'):
(lamb, U) = np.linalg.eigh(lap.todense())
heat = {}
for i in range(n_filters):
heat[i] = U.dot(np.diagflat(np.exp(((- taus[i]) * lamb)).flatten())).dot(U.T)
else:
heat = {i: sc.sparse.csc_matrix((n_nodes, n_nodes)) for i in range(n_filters)}
monome = {0: sc.sparse.eye(n_nodes), 1: (lap - sc.sparse.eye(n_nodes))}
for k in range(2, (order + 1)):
monome[k] = ((2 * (lap - sc.sparse.eye(n_nodes)).dot(monome[(k - 1)])) - monome[(k - 2)])
for i in range(n_filters):
coeffs = compute_cheb_coeff_basis(taus[i], order)
heat[i] = sc.sum([(coeffs[k] * monome[k]) for k in range(0, (order + 1))])
temp = thres(heat[i].A)
heat[i] = sc.sparse.csc_matrix(temp)
return (heat, taus) |
class FiniteWordPath_north_east_iter_with_caching(WordDatatype_iter_with_caching, FiniteWordPath_north_east, FiniteWord_class):
pass |
def recurrent_plotting(vl_stats, OUTD_VL, tr_stats, OUTD_TR, CRITERION, OUTD_TLB, args, PLOT_STATS, epoch, plot_freq, force):
cnd = (PLOT_STATS and ((epoch % plot_freq) == 0))
if (cnd or force):
plot_curves_from_dict(vl_stats, join(OUTD_VL.folder, 'validset-stats.png'), title='Validset stats. {}'.format(args.loss), dpi=100, plot_avg=True, avg_perd=10)
plot_curves_from_dict(tr_stats, join(OUTD_TR.folder, 'trainset-stats.png'), title='Trainset stats. {}'.format(args.loss), dpi=100, plot_avg=True, avg_perd=10)
cnd &= (CRITERION.t_tracker != [])
cnd &= force
if cnd:
title = 't evolution. min: {}. max: {}.'.format(min(CRITERION.t_tracker), max(CRITERION.t_tracker))
plot_curve(CRITERION.t_tracker, join(OUTD_TLB.folder, 'tlb-evolution.png'), title, 'epochs', 't', dpi=100) |
def unispeech_sat_base_plus(refresh=False, *args, **kwargs):
kwargs['ckpt'] = '
return unispeech_sat_url(*args, refresh=refresh, **kwargs) |
def getNodeImage(node_type):
image_filename = TYPE_IMAGES[node_type]
encoded_image = base64.b64encode(open(image_filename, 'rb').read())
return 'data:image/png;base64,{}'.format(encoded_image.decode()) |
class ModelArchive(object):
def __init__(self, state_dict: Dict[(str, torch.Tensor)], metadata: dict, entity_vocab: EntityVocab):
self.state_dict = state_dict
self.metadata = metadata
self.entity_vocab = entity_vocab
def bert_model_name(self):
return self.metadata['model_config']['bert_model_name']
def config(self):
config = LukeConfig(**self.metadata['model_config'])
if self.bert_model_name.startswith('roberta'):
config.pad_token_id = 1
return config
def tokenizer(self):
return AutoTokenizer.from_pretrained(self.bert_model_name)
def max_seq_length(self):
return self.metadata['max_seq_length']
def max_mention_length(self):
return self.metadata['max_mention_length']
def max_entity_length(self):
return self.metadata['max_entity_length']
def load(cls, archive_path: str):
if os.path.isdir(archive_path):
return cls._load(archive_path, MODEL_FILE)
elif archive_path.endswith('.bin'):
return cls._load(os.path.dirname(archive_path), os.path.basename(archive_path))
with tempfile.TemporaryDirectory() as temp_path:
f = tarfile.open(archive_path)
f.extractall(temp_path)
return cls._load(temp_path, MODEL_FILE)
def _load(path: str, model_file: str):
state_dict = torch.load(os.path.join(path, model_file), map_location='cpu')
with open(os.path.join(path, METADATA_FILE)) as metadata_file:
metadata = json.load(metadata_file)
entity_vocab = EntityVocab(get_entity_vocab_file_path(path))
return ModelArchive(state_dict, metadata, entity_vocab) |
class AtomicRepresentation(Data):
kind = 'data_atomic_representation'
def from_linear(cls, representation, dataset, linear):
data = atomic_data_dict(dataset.info['atoms_by_system'], linear)
return cls.result(data=data, inputs=dataset, component=representation)
def from_ragged(cls, representation, dataset, ragged):
linear = np.concatenate(ragged, axis=0)
return cls.from_linear(representation, dataset, linear)
def mock(cls, counts, linear):
data = atomic_data_dict(counts, linear)
return cls.create(data=data)
def n(self):
return (len(self.offsets) - 1)
def linear(self):
return self.data['linear']
def offsets(self):
return self.data['offsets']
def counts(self):
return self.data['counts']
def ragged(self):
return np.array([self.linear[self.offsets[i]:self.offsets[(i + 1)]] for i in range(self.n)], dtype=object)
def range(self, _range):
linear = self.linear[self.offsets[_range[0]]:self.offsets[_range[1]]]
counts = self.counts[_range[0]:_range[1]]
return AtomicRepresentation.mock(counts, linear) |
def load_pfm(file):
file = open(file, 'rb')
color = None
width = None
height = None
scale = None
endian = None
header = file.readline().rstrip()
if (header.decode('ascii') == 'PF'):
color = True
elif (header.decode('ascii') == 'Pf'):
color = False
else:
raise Exception('Not a PFM file.')
dim_match = re.match('^(\\d+)\\s(\\d+)\\s$', file.readline().decode('ascii'))
if dim_match:
(width, height) = list(map(int, dim_match.groups()))
else:
raise Exception('Malformed PFM header.')
scale = float(file.readline().decode('ascii').rstrip())
if (scale < 0):
endian = '<'
scale = (- scale)
else:
endian = '>'
data = np.fromfile(file, (endian + 'f'))
shape = ((height, width, 3) if color else (height, width))
data = np.reshape(data, shape)
data = np.flipud(data)
return (data, scale) |
def run_generator(filename):
saved_pretrained_model_file = 'datasets/comet_pretrained_models/atomic_pretrained_model.pickle'
device = 'cpu'
sampling_algorithm = 'topk-3'
(opt, state_dict) = utilfuncs.load_model_file(saved_pretrained_model_file)
(data_loader, text_encoder) = utilfuncs.load_data('atomic', opt)
n_ctx = (data_loader.max_event + data_loader.max_effect)
n_vocab = (len(text_encoder.encoder) + n_ctx)
model = utilfuncs.make_model(opt, n_vocab, n_ctx, state_dict)
if (device != 'cpu'):
cfg.device = int(device)
cfg.do_gpu = True
torch.cuda.set_device(cfg.device)
model.cuda(cfg.device)
else:
cfg.device = 'cpu'
data = csv.reader(open(filename, encoding='utf-8'), delimiter='\t', quoting=csv.QUOTE_NONE)
dialogues = [[uttr for uttr in row if (uttr not in {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9'})] for row in data]
data = csv.reader(open(filename, encoding='utf-8'), delimiter='\t', quoting=csv.QUOTE_NONE)
emotions = [[label for label in row if (label in {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9'})] for row in data]
category = ['oReact', 'xIntent', 'xReact']
sampler = utilfuncs.set_sampler(opt, sampling_algorithm, data_loader)
new_dialogues = []
for dialogue in dialogues:
newuttrs = []
for uttr in dialogue:
concatenate_str = ''
results = utilfuncs.get_atomic_sequence(uttr, model, sampler, data_loader, text_encoder, category)
xintentions = [anevent.replace('\t', ' ') for anevent in results['xIntent']['beams'] if (anevent != 'none')]
if (len(xintentions) > 0):
if (len(xintentions) == 1):
xintention = ('PersonX wanted %s.' % xintentions[0])
else:
xintention = ('PersonX wanted %s.' % ' and '.join(xintentions))
concatenate_str = xintention
else:
xintention = ''
xreactions = [anevent.replace('\t', ' ') for anevent in results['xReact']['beams'] if (anevent != 'none')]
if (len(xreactions) > 0):
if (len(xreactions) == 1):
xreaction = ('PersonX will feel %s.' % xreactions[0])
else:
xreaction = ('PersonX will feel %s.' % ' and '.join(xreactions))
concatenate_str += (' ' + xreaction)
else:
xreaction = ''
oreactions = [anevent.replace('\t', ' ') for anevent in results['oReact']['beams'] if (anevent != 'none')]
if (len(oreactions) > 0):
if (len(oreactions) == 1):
oreaction = ('PersonY will feel %s.' % oreactions[0])
else:
oreaction = ('PersonY will feel %s.' % ' and '.join(oreactions))
concatenate_str += (' ' + oreaction)
else:
oreaction = ''
if (concatenate_str == ''):
newuttrs.append(uttr)
else:
newuttrs.append(((uttr + ' ') + concatenate_str))
new_dialogues.append(newuttrs)
datawriter = csv.writer(open(((filename[:(- 4)] + '_ext') + '.csv'), 'wt', encoding='utf-8'), delimiter='\t', quoting=csv.QUOTE_NONE)
for (idx, dialogue) in enumerate(new_dialogues):
datawriter.writerow((dialogue + emotions[idx])) |
def _inference(observed_target, target_cov, weight_fn, success_params=(1, 1), hypothesis=0, alpha=0.1):
(k, m) = success_params
target_sd = np.sqrt(target_cov[(0, 0)])
target_val = (np.linspace(((- 20) * target_sd), (20 * target_sd), 5001) + observed_target)
if ((k, m) != (1, 1)):
weight_val = np.array([binom(m, p).sf((k - 1)) for p in weight_fn(target_val)])
else:
weight_val = weight_fn(target_val)
weight_val *= ndist.pdf((target_val / target_sd))
exp_family = discrete_family(target_val, weight_val)
pivot = exp_family.cdf((hypothesis / target_cov[(0, 0)]), x=observed_target)
pivot = (2 * min(pivot, (1 - pivot)))
interval = exp_family.equal_tailed_interval(observed_target, alpha=alpha)
rescaled_interval = ((interval[0] * target_cov[(0, 0)]), (interval[1] * target_cov[(0, 0)]))
return (pivot, rescaled_interval) |
def show_image(image):
if (image.shape[2] != 3):
image = image.permute(1, 2, 0)
image = Image.fromarray(image.numpy())
return image |
def response_schema_conformance(response: GenericResponse, case: Case) -> (bool | None):
from .schemas import BaseOpenAPISchema
if (not isinstance(case.operation.schema, BaseOpenAPISchema)):
return True
return case.operation.validate_response(response) |
def test_init():
tl = Timeline()
fs = FiberStretcher('fs', tl, np.pi)
fs_circ = fs._circuit.get_unitary_matrix()
desired = np.array([[complex(1), complex(0)], [complex(0), complex((- 1))]])
assert np.array_equal(fs_circ, desired) |
class DropPath(nn.Module):
def __init__(self, drop_prob=None):
super().__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training) |
_utils.test(arch=[ti.cpu, ti.cuda, ti.vulkan], exclude=[vk_on_mac], debug=True)
def test_multi_print():
def func(x: ti.i32, y: ti.f32):
print(x, 1234.5, y)
func(666, 233.3)
ti.sync() |
def register_Ns3Ipv6MulticastRoutingTableEntry_methods(root_module, cls):
cls.add_output_stream_operator()
cls.add_constructor([])
cls.add_constructor([param('ns3::Ipv6MulticastRoutingTableEntry const &', 'route')])
cls.add_constructor([param('ns3::Ipv6MulticastRoutingTableEntry const *', 'route')])
cls.add_method('CreateMulticastRoute', 'ns3::Ipv6MulticastRoutingTableEntry', [param('ns3::Ipv6Address', 'origin'), param('ns3::Ipv6Address', 'group'), param('uint32_t', 'inputInterface'), param('std::vector< unsigned int >', 'outputInterfaces')], is_static=True)
cls.add_method('GetGroup', 'ns3::Ipv6Address', [], is_const=True)
cls.add_method('GetInputInterface', 'uint32_t', [], is_const=True)
cls.add_method('GetNOutputInterfaces', 'uint32_t', [], is_const=True)
cls.add_method('GetOrigin', 'ns3::Ipv6Address', [], is_const=True)
cls.add_method('GetOutputInterface', 'uint32_t', [param('uint32_t', 'n')], is_const=True)
cls.add_method('GetOutputInterfaces', 'std::vector< unsigned int >', [], is_const=True)
return |
def make_dns_as(asn: int, zones: List[str], exchange: int):
dns_as = base.createAutonomousSystem(asn)
router = dns_as.createRouter('router0')
net = dns_as.createNetwork('net0')
router.joinNetwork('net0')
router.joinNetwork('ix{}'.format(exchange))
for zone in zones:
name = 's_{}dns'.format(zone.replace('.', '_'))
server = dns_as.createHost(name)
server.joinNetwork('net0')
dns.install(name).addZone(zone)
sim.addBinding(Binding(name, filter=Filter(asn=asn, nodeName=name))) |
_metric
def fid50k_cond(opts):
opts.dataset_kwargs.update(max_size=None, xflip=False)
fid = frechet_inception_distance.compute_fid_cond(opts, max_real=None, num_gen=50000)
return dict(fid50k_cond=fid) |
def preprocess_image(image, size=input_resolution):
image = np.array(image)
image_resized = tf.expand_dims(image, 0)
if (size == 224):
image_resized = tf.image.resize(image_resized, (256, 256), method='bicubic')
image_resized = crop_layer(image_resized)
elif (size == 384):
image_resized = tf.image.resize(image, (size, size), method='bicubic')
return norm_layer(image_resized).numpy() |
def search_index_pytorch(index, x, k, D=None, I=None):
assert x.is_contiguous()
(n, d) = x.size()
assert (d == index.d)
if (D is None):
D = torch.empty((n, k), dtype=torch.float32, device=x.device)
else:
assert (D.size() == (n, k))
if (I is None):
I = torch.empty((n, k), dtype=torch.int64, device=x.device)
else:
assert (I.size() == (n, k))
torch.cuda.synchronize()
xptr = swig_ptr_from_FloatTensor(x)
Iptr = swig_ptr_from_LongTensor(I)
Dptr = swig_ptr_from_FloatTensor(D)
index.search_c(n, xptr, k, Dptr, Iptr)
torch.cuda.synchronize()
return (D, I) |
class TemplateTransform(VisitorTransform):
temp_name_counter = 0
def __call__(self, node, substitutions, temps, pos):
self.substitutions = substitutions
self.pos = pos
tempmap = {}
temphandles = []
for temp in temps:
TemplateTransform.temp_name_counter += 1
handle = UtilNodes.TempHandle(PyrexTypes.py_object_type)
tempmap[temp] = handle
temphandles.append(handle)
self.tempmap = tempmap
result = super(TemplateTransform, self).__call__(node)
if temps:
result = UtilNodes.TempsBlockNode(self.get_pos(node), temps=temphandles, body=result)
return result
def get_pos(self, node):
if self.pos:
return self.pos
else:
return node.pos
def visit_Node(self, node):
if (node is None):
return None
else:
c = node.clone_node()
if (self.pos is not None):
c.pos = self.pos
self.visitchildren(c)
return c
def try_substitution(self, node, key):
sub = self.substitutions.get(key)
if (sub is not None):
pos = self.pos
if (pos is None):
pos = node.pos
return ApplyPositionAndCopy(pos)(sub)
else:
return self.visit_Node(node)
def visit_NameNode(self, node):
temphandle = self.tempmap.get(node.name)
if temphandle:
return temphandle.ref(self.get_pos(node))
else:
return self.try_substitution(node, node.name)
def visit_ExprStatNode(self, node):
if isinstance(node.expr, NameNode):
return self.try_substitution(node, node.expr.name)
else:
return self.visit_Node(node) |
def set_environment_variables_philly(single_node=False):
os.environ['PHILLY_USE_INFINIBAND'] = 'True'
os.environ['NCCL_IB_DISABLE'] = '0'
IP_INTERFACE_NAME = os.environ['PHILLY_CONTAINER_ETH_INTERFACES']
print('>>> Rank: {}, IP: {}:{}'.format(get_rank(), os.environ['PHILLY_CONTAINER_ETH_INTERFACES'], get_ip_address(IP_INTERFACE_NAME)))
philly_job_dir = os.environ['PHILLY_SCRATCH_DIRECTORY']
nccl_filename = os.path.join(philly_job_dir, 'nccl.info')
if (is_master_proc() and (not os.path.isfile(nccl_filename))):
master_ip = get_ip_address(IP_INTERFACE_NAME)
master_port = random.randint(int(os.environ['PHILLY_CONTAINER_PORT_RANGE_START']), int(os.environ['PHILLY_CONTAINER_PORT_RANGE_END']))
with open(nccl_filename, 'w') as fid:
fid.write('{}:{}'.format(master_ip, master_port))
print('>>> MASTER: wrote file to {}'.format(nccl_filename))
else:
ready = False
while (not ready):
if os.path.isfile(nccl_filename):
nccl = open(nccl_filename, 'r').readline().strip().split(':')
if (len(nccl) == 2):
ready = True
time.sleep(2.0)
print('>>> CLIENT: waiting for {}'.format(nccl_filename))
(master_ip, master_port) = (nccl[0], nccl[1])
os.environ['MASTER_ADDR'] = str(master_ip)
os.environ['MASTER_PORT'] = str(master_port)
print('>>> [rank: {}] MASTER NODE: {}:{}'.format(get_rank(), master_ip, master_port)) |
def sep_params(model, loaded_roberta_keys):
loaded_params = dict()
not_loaded_params = dict()
params_to_freeze = []
small_lr_params = dict()
large_lr_params = dict()
for (n, p) in model.named_parameters():
if (n in loaded_roberta_keys):
loaded_params[n] = p
params_to_freeze.append(p)
small_lr_params[n] = p
else:
not_loaded_params[n] = p
large_lr_params[n] = p
return (loaded_params, not_loaded_params, params_to_freeze, small_lr_params, large_lr_params) |
def generate_a_transition(batch: int):
return {Episode.CUR_OBS: np.random.random((batch, 2)), Episode.ACTION: np.random.random((batch,)), Episode.REWARD: np.random.random((batch,))} |
def register_Ns3CallbackImplBase_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::CallbackImplBase const &', 'arg0')])
cls.add_method('GetTypeid', 'std::string', [], is_pure_virtual=True, is_const=True, is_virtual=True)
cls.add_method('IsEqual', 'bool', [param('ns3::Ptr< ns3::CallbackImplBase const >', 'other')], is_pure_virtual=True, is_const=True, is_virtual=True)
cls.add_method('Demangle', 'std::string', [param('std::string const &', 'mangled')], is_static=True, visibility='protected')
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::ObjectBase*'])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'void'])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'unsigned int'])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::Ptr<ns3::NetDevice> '])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::Ptr<ns3::Packet const> '])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'unsigned short'])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::Address const&'])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::NetDevice::PacketType'])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'bool'])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::Ptr<ns3::Packet> '])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::Ptr<ns3::MobilityModel const> '])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::Ptr<ns3::SpectrumPhy> '])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'double'])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::Ptr<ns3::SpectrumValue const> '])
return |
def get_running_cuda_version(run_lambda):
return run_and_parse_first_match(run_lambda, 'nvcc --version', 'V(.*)$') |
class IdentityLayer3D(torch.nn.Module):
def __init__(self, m, n, k):
super(IdentityLayer3D, self).__init__()
self.weight = Parameter(torch.Tensor(m, n, k))
torch.nn.init.xavier_normal_(self.weight)
def forward(self):
return self.weight |
def variable_summaries(var):
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square((var - mean))))
tf.summary.scalar('stddev', stddev)
tf.summary.scalar('max', tf.reduce_max(var))
tf.summary.scalar('min', tf.reduce_min(var))
tf.summary.histogram('histogram', var) |
class CQLImpl(SACImpl):
_modules: CQLModules
_alpha_threshold: float
_conservative_weight: float
_n_action_samples: int
_soft_q_backup: bool
def __init__(self, observation_shape: Shape, action_size: int, modules: CQLModules, q_func_forwarder: ContinuousEnsembleQFunctionForwarder, targ_q_func_forwarder: ContinuousEnsembleQFunctionForwarder, gamma: float, tau: float, alpha_threshold: float, conservative_weight: float, n_action_samples: int, soft_q_backup: bool, device: str):
super().__init__(observation_shape=observation_shape, action_size=action_size, modules=modules, q_func_forwarder=q_func_forwarder, targ_q_func_forwarder=targ_q_func_forwarder, gamma=gamma, tau=tau, device=device)
self._alpha_threshold = alpha_threshold
self._conservative_weight = conservative_weight
self._n_action_samples = n_action_samples
self._soft_q_backup = soft_q_backup
def compute_critic_loss(self, batch: TorchMiniBatch, q_tpn: torch.Tensor) -> CQLCriticLoss:
loss = super().compute_critic_loss(batch, q_tpn)
conservative_loss = self._compute_conservative_loss(batch.observations, batch.actions, batch.next_observations)
if self._modules.alpha_optim:
self.update_alpha(conservative_loss)
return CQLCriticLoss(critic_loss=(loss.critic_loss + conservative_loss), conservative_loss=conservative_loss, alpha=self._modules.log_alpha().exp())
def update_alpha(self, conservative_loss: torch.Tensor) -> None:
assert self._modules.alpha_optim
self._modules.alpha_optim.zero_grad()
loss = (- conservative_loss)
loss.backward(retain_graph=True)
self._modules.alpha_optim.step()
def _compute_policy_is_values(self, policy_obs: TorchObservation, value_obs: TorchObservation) -> torch.Tensor:
with torch.no_grad():
dist = build_squashed_gaussian_distribution(self._modules.policy(policy_obs))
(policy_actions, n_log_probs) = dist.sample_n_with_log_prob(self._n_action_samples)
repeated_obs = expand_and_repeat_recursively(value_obs, self._n_action_samples)
flat_obs = flatten_left_recursively(repeated_obs, dim=1)
flat_policy_acts = policy_actions.reshape((- 1), self.action_size)
policy_values = self._q_func_forwarder.compute_expected_q(flat_obs, flat_policy_acts, 'none')
batch_size = (policy_obs.shape[0] if isinstance(policy_obs, torch.Tensor) else policy_obs[0].shape[0])
policy_values = policy_values.view((- 1), batch_size, self._n_action_samples)
log_probs = n_log_probs.view(1, (- 1), self._n_action_samples)
return (policy_values - log_probs)
def _compute_random_is_values(self, obs: TorchObservation) -> torch.Tensor:
repeated_obs = expand_and_repeat_recursively(obs, self._n_action_samples)
flat_obs = flatten_left_recursively(repeated_obs, dim=1)
batch_size = (obs.shape[0] if isinstance(obs, torch.Tensor) else obs[0].shape[0])
flat_shape = ((batch_size * self._n_action_samples), self._action_size)
zero_tensor = torch.zeros(flat_shape, device=self._device)
random_actions = zero_tensor.uniform_((- 1.0), 1.0)
random_values = self._q_func_forwarder.compute_expected_q(flat_obs, random_actions, 'none')
random_values = random_values.view((- 1), batch_size, self._n_action_samples)
random_log_probs = math.log((0.5 ** self._action_size))
return (random_values - random_log_probs)
def _compute_conservative_loss(self, obs_t: TorchObservation, act_t: torch.Tensor, obs_tp1: TorchObservation) -> torch.Tensor:
policy_values_t = self._compute_policy_is_values(obs_t, obs_t)
policy_values_tp1 = self._compute_policy_is_values(obs_tp1, obs_t)
random_values = self._compute_random_is_values(obs_t)
target_values = torch.cat([policy_values_t, policy_values_tp1, random_values], dim=2)
logsumexp = torch.logsumexp(target_values, dim=2, keepdim=True)
data_values = self._q_func_forwarder.compute_expected_q(obs_t, act_t, 'none')
loss = (logsumexp.mean(dim=0).mean() - data_values.mean(dim=0).mean())
scaled_loss = (self._conservative_weight * loss)
clipped_alpha = self._modules.log_alpha().exp().clamp(0, 1000000.0)[0][0]
return (clipped_alpha * (scaled_loss - self._alpha_threshold))
def compute_target(self, batch: TorchMiniBatch) -> torch.Tensor:
if self._soft_q_backup:
target_value = super().compute_target(batch)
else:
target_value = self._compute_deterministic_target(batch)
return target_value
def _compute_deterministic_target(self, batch: TorchMiniBatch) -> torch.Tensor:
with torch.no_grad():
action = self._modules.policy(batch.next_observations).squashed_mu
return self._targ_q_func_forwarder.compute_target(batch.next_observations, action, reduction='min') |
class adaILN(nn.Module):
def __init__(self, num_features, eps=1e-05):
super(adaILN, self).__init__()
self.eps = eps
self.rho = Parameter(torch.Tensor(1, num_features, 1, 1))
self.rho.data.fill_(0.9)
def forward(self, input, gamma, beta):
(in_mean, in_var) = (torch.mean(input, dim=[2, 3], keepdim=True), torch.var(input, dim=[2, 3], keepdim=True))
out_in = ((input - in_mean) / torch.sqrt((in_var + self.eps)))
(ln_mean, ln_var) = (torch.mean(input, dim=[1, 2, 3], keepdim=True), torch.var(input, dim=[1, 2, 3], keepdim=True))
out_ln = ((input - ln_mean) / torch.sqrt((ln_var + self.eps)))
out = ((self.rho.expand(input.shape[0], (- 1), (- 1), (- 1)) * out_in) + ((1 - self.rho.expand(input.shape[0], (- 1), (- 1), (- 1))) * out_ln))
out = ((out * gamma.unsqueeze(2).unsqueeze(3)) + beta.unsqueeze(2).unsqueeze(3))
return out |
def main():
global velocities_pair, pressures_pair, dyes_pair, curl_strength
paused = False
parser = argparse.ArgumentParser()
parser.add_argument('--baseline', action='store_true')
(args, _) = parser.parse_known_args()
gui = ti.GUI('Stable Fluid', (res, res))
md_gen = MouseDataGen()
_velocities = ti.Vector.ndarray(2, float, shape=(res, res))
_new_velocities = ti.Vector.ndarray(2, float, shape=(res, res))
_velocity_divs = ti.ndarray(float, shape=(res, res))
_pressures = ti.ndarray(float, shape=(res, res))
_new_pressures = ti.ndarray(float, shape=(res, res))
_dye_buffer = ti.Vector.ndarray(3, float, shape=(res, res))
_new_dye_buffer = ti.Vector.ndarray(3, float, shape=(res, res))
if args.baseline:
velocities_pair = TexPair(_velocities, _new_velocities)
pressures_pair = TexPair(_pressures, _new_pressures)
dyes_pair = TexPair(_dye_buffer, _new_dye_buffer)
else:
print('running in graph mode')
velocities_pair_cur = ti.graph.Arg(ti.graph.ArgKind.NDARRAY, 'velocities_pair_cur', dtype=ti.math.vec2, ndim=2)
velocities_pair_nxt = ti.graph.Arg(ti.graph.ArgKind.NDARRAY, 'velocities_pair_nxt', dtype=ti.math.vec2, ndim=2)
dyes_pair_cur = ti.graph.Arg(ti.graph.ArgKind.NDARRAY, 'dyes_pair_cur', dtype=ti.math.vec3, ndim=2)
dyes_pair_nxt = ti.graph.Arg(ti.graph.ArgKind.NDARRAY, 'dyes_pair_nxt', dtype=ti.math.vec3, ndim=2)
pressures_pair_cur = ti.graph.Arg(ti.graph.ArgKind.NDARRAY, 'pressures_pair_cur', dtype=ti.f32, ndim=2)
pressures_pair_nxt = ti.graph.Arg(ti.graph.ArgKind.NDARRAY, 'pressures_pair_nxt', dtype=ti.f32, ndim=2)
velocity_divs = ti.graph.Arg(ti.graph.ArgKind.NDARRAY, 'velocity_divs', dtype=ti.f32, ndim=2)
mouse_data = ti.graph.Arg(ti.graph.ArgKind.NDARRAY, 'mouse_data', dtype=ti.f32, ndim=1)
g1_builder = ti.graph.GraphBuilder()
g1_builder.dispatch(advect, velocities_pair_cur, velocities_pair_cur, velocities_pair_nxt)
g1_builder.dispatch(advect, velocities_pair_cur, dyes_pair_cur, dyes_pair_nxt)
g1_builder.dispatch(apply_impulse, velocities_pair_nxt, dyes_pair_nxt, mouse_data)
g1_builder.dispatch(divergence, velocities_pair_nxt, velocity_divs)
for _ in range((p_jacobi_iters // 2)):
g1_builder.dispatch(pressure_jacobi, pressures_pair_cur, pressures_pair_nxt, velocity_divs)
g1_builder.dispatch(pressure_jacobi, pressures_pair_nxt, pressures_pair_cur, velocity_divs)
g1_builder.dispatch(subtract_gradient, velocities_pair_nxt, pressures_pair_cur)
g1 = g1_builder.compile()
g2_builder = ti.graph.GraphBuilder()
g2_builder.dispatch(advect, velocities_pair_nxt, velocities_pair_nxt, velocities_pair_cur)
g2_builder.dispatch(advect, velocities_pair_nxt, dyes_pair_nxt, dyes_pair_cur)
g2_builder.dispatch(apply_impulse, velocities_pair_cur, dyes_pair_cur, mouse_data)
g2_builder.dispatch(divergence, velocities_pair_cur, velocity_divs)
for _ in range((p_jacobi_iters // 2)):
g2_builder.dispatch(pressure_jacobi, pressures_pair_cur, pressures_pair_nxt, velocity_divs)
g2_builder.dispatch(pressure_jacobi, pressures_pair_nxt, pressures_pair_cur, velocity_divs)
g2_builder.dispatch(subtract_gradient, velocities_pair_cur, pressures_pair_cur)
g2 = g2_builder.compile()
swap = True
while gui.running:
if gui.get_event(ti.GUI.PRESS):
e = gui.event
if (e.key == ti.GUI.ESCAPE):
break
elif (e.key == 'r'):
paused = False
reset()
elif (e.key == 's'):
if curl_strength:
curl_strength = 0
else:
curl_strength = 7
elif (e.key == 'p'):
paused = (not paused)
if (not paused):
_mouse_data = md_gen(gui)
if args.baseline:
step_orig(_mouse_data)
gui.set_image(dyes_pair.cur.to_numpy())
else:
invoke_args = {'mouse_data': _mouse_data, 'velocities_pair_cur': _velocities, 'velocities_pair_nxt': _new_velocities, 'dyes_pair_cur': _dye_buffer, 'dyes_pair_nxt': _new_dye_buffer, 'pressures_pair_cur': _pressures, 'pressures_pair_nxt': _new_pressures, 'velocity_divs': _velocity_divs}
if swap:
g1.run(invoke_args)
gui.set_image(_dye_buffer.to_numpy())
swap = False
else:
g2.run(invoke_args)
gui.set_image(_new_dye_buffer.to_numpy())
swap = True
gui.show() |
_properties
class MultiStateTransformation(PatternTransformation, abc.ABC):
def expressions(cls) -> List[gr.SubgraphView]:
pass
def can_be_applied(self, graph: SDFG, expr_index: int, sdfg: SDFG, permissive: bool=False) -> bool:
pass |
class DenseFeat(namedtuple('DenseFeat', ['name', 'dimension', 'dtype', 'transform_fn'])):
__slots__ = ()
def __new__(cls, name, dimension=1, dtype='float32', transform_fn=None):
return super(DenseFeat, cls).__new__(cls, name, dimension, dtype, transform_fn)
def __hash__(self):
return self.name.__hash__() |
class GenEfficientNet(nn.Module):
def __init__(self, block_args, num_classes=1000, in_chans=3, num_features=1280, stem_size=32, fix_stem=False, channel_multiplier=1.0, channel_divisor=8, channel_min=None, pad_type='', act_layer=nn.ReLU, drop_rate=0.0, drop_connect_rate=0.0, se_kwargs=None, norm_layer=nn.BatchNorm2d, norm_kwargs=None, weight_init='goog'):
super(GenEfficientNet, self).__init__()
self.drop_rate = drop_rate
if (not fix_stem):
stem_size = round_channels(stem_size, channel_multiplier, channel_divisor, channel_min)
self.conv_stem = select_conv2d(in_chans, stem_size, 3, stride=2, padding=pad_type)
self.bn1 = norm_layer(stem_size, **norm_kwargs)
self.act1 = act_layer(inplace=True)
in_chs = stem_size
builder = EfficientNetBuilder(channel_multiplier, channel_divisor, channel_min, pad_type, act_layer, se_kwargs, norm_layer, norm_kwargs, drop_connect_rate)
self.blocks = nn.Sequential(*builder(in_chs, block_args))
in_chs = builder.in_chs
self.conv_head = select_conv2d(in_chs, num_features, 1, padding=pad_type)
self.bn2 = norm_layer(num_features, **norm_kwargs)
self.act2 = act_layer(inplace=True)
self.global_pool = nn.AdaptiveAvgPool2d(1)
self.classifier = nn.Linear(num_features, num_classes)
for (n, m) in self.named_modules():
if (weight_init == 'goog'):
initialize_weight_goog(m, n)
else:
initialize_weight_default(m, n)
def features(self, x):
x = self.conv_stem(x)
x = self.bn1(x)
x = self.act1(x)
x = self.blocks(x)
x = self.conv_head(x)
x = self.bn2(x)
x = self.act2(x)
return x
def as_sequential(self):
layers = [self.conv_stem, self.bn1, self.act1]
layers.extend(self.blocks)
layers.extend([self.conv_head, self.bn2, self.act2, self.global_pool, nn.Flatten(), nn.Dropout(self.drop_rate), self.classifier])
return nn.Sequential(*layers)
def forward(self, x):
x = self.features(x)
x = self.global_pool(x)
x = x.flatten(1)
if (self.drop_rate > 0.0):
x = F.dropout(x, p=self.drop_rate, training=self.training)
return self.classifier(x) |
class CorrelationFunction(Function):
def forward(ctx, input1, input2, kernel_size=1, max_displacement=1, stride=1, padding=1, dilation=1, dilation_patch=1):
ctx.save_for_backward(input1, input2)
(kH, kW) = ctx.kernel_size = _pair(kernel_size)
patch_size = ((max_displacement * 2) + 1)
ctx.patch_size = patch_size
(dH, dW) = ctx.stride = _pair(stride)
(padH, padW) = ctx.padding = _pair(padding)
(dilationH, dilationW) = ctx.dilation = _pair(dilation)
(dilation_patchH, dilation_patchW) = ctx.dilation_patch = _pair(dilation_patch)
output_size = CorrelationFunction._output_size(ctx, input1)
output = input1.new_zeros(output_size)
ext_module.correlation_forward(input1, input2, output, kH=kH, kW=kW, patchH=patch_size, patchW=patch_size, padH=padH, padW=padW, dilationH=dilationH, dilationW=dilationW, dilation_patchH=dilation_patchH, dilation_patchW=dilation_patchW, dH=dH, dW=dW)
return output
_differentiable
def backward(ctx, grad_output):
(input1, input2) = ctx.saved_tensors
(kH, kW) = ctx.kernel_size
patch_size = ctx.patch_size
(padH, padW) = ctx.padding
(dilationH, dilationW) = ctx.dilation
(dilation_patchH, dilation_patchW) = ctx.dilation_patch
(dH, dW) = ctx.stride
grad_input1 = torch.zeros_like(input1)
grad_input2 = torch.zeros_like(input2)
ext_module.correlation_backward(grad_output, input1, input2, grad_input1, grad_input2, kH=kH, kW=kW, patchH=patch_size, patchW=patch_size, padH=padH, padW=padW, dilationH=dilationH, dilationW=dilationW, dilation_patchH=dilation_patchH, dilation_patchW=dilation_patchW, dH=dH, dW=dW)
return (grad_input1, grad_input2, None, None, None, None, None, None)
def _output_size(ctx, input1):
(iH, iW) = (input1.size(2), input1.size(3))
batch_size = input1.size(0)
(kH, kW) = ctx.kernel_size
patch_size = ctx.patch_size
(dH, dW) = ctx.stride
(padH, padW) = ctx.padding
(dilationH, dilationW) = ctx.dilation
dilatedKH = (((kH - 1) * dilationH) + 1)
dilatedKW = (((kW - 1) * dilationW) + 1)
oH = int(((((iH + (2 * padH)) - dilatedKH) / dH) + 1))
oW = int(((((iW + (2 * padW)) - dilatedKW) / dW) + 1))
output_size = (batch_size, patch_size, patch_size, oH, oW)
return output_size |
_module()
class PSENet(TextDetectorMixin, SingleStageTextDetector):
def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None, show_score=False, init_cfg=None):
SingleStageTextDetector.__init__(self, backbone, neck, bbox_head, train_cfg, test_cfg, pretrained, init_cfg)
TextDetectorMixin.__init__(self, show_score) |
def main(_):
logging.set_verbosity((logging.DEBUG if FLAGS.debug else logging.INFO))
params = Config(FLAGS.config_path).params
export_dir = os.path.join(FLAGS.export_dir, params.experiment.name, 'onnx_tensorrt')
if (FLAGS.precision == 'int8'):
image_params = params.dataloader_params.preprocessing
calibration_image_paths = []
for ext in ['*.png', '*.jpg', '*.jpeg']:
calibration_image_paths.extend(glob(os.path.join(FLAGS.calibration_images_dir, ext)))
image_generator = ImageGenerator(image_paths=calibration_image_paths, max_images=FLAGS.num_calibration_images, batch_size=FLAGS.calibration_batch_size, target_shape=params.input.input_shape, channel_mean=image_params.mean, channel_stddev=image_params.stddev, pixel_scale=image_params.pixel_scale)
calibrator = get_calibrator(method=FLAGS.calibration_method, image_generator=image_generator, cache_file_path=os.path.join(export_dir, 'trt.cache'))
else:
calibrator = None
trt_builder = TensorRTBuilder(onnx_path=os.path.join(export_dir, 'model.onnx'), engine_path=os.path.join(export_dir, 'model.trt'), workspace=4, precision=FLAGS.precision, calibrator=calibrator, dla_core=FLAGS.dla_core, debug=FLAGS.debug)
trt_builder.build_engine() |
class Dstc8DataProcessorTest(absltest.TestCase):
def setUp(self):
self._processor = data_utils.Dstc8DataProcessor(dstc8_data_dir=_TEST_DATA_DIR, dataset_config=config.DatasetConfig(file_ranges={'train': range(1), 'dev': None, 'test': None}, max_num_cat_slot=6, max_num_noncat_slot=6, max_num_value_per_cat_slot=4, max_num_intent=2), vocab_file=_VOCAB_FILE, do_lower_case=_DO_LOWER_CASE)
super(Dstc8DataProcessorTest, self).setUp()
def test_tokenizer(self):
test_utt_1 = 'Watch, Hellboy?'
(utt_1_tokens, utt_1_aligns, utt_1_inv_alignments) = self._processor._tokenize(test_utt_1)
expected_utt_1_tokens = ['watch', ',', 'hell', '##boy', '?']
expected_utt_1_aligns = {0: 0, 4: 0, 5: 1, 7: 2, 13: 3, 14: 4}
expected_utt_1_inv_alignments = [(0, 4), (5, 5), (7, 13), (7, 13), (14, 14)]
self.assertEqual(utt_1_tokens, expected_utt_1_tokens)
self.assertEqual(utt_1_aligns, expected_utt_1_aligns)
self.assertEqual(utt_1_inv_alignments, expected_utt_1_inv_alignments)
test_utt_2 = 'Extra , spaces'
(utt_2_tokens, utt_2_aligns, utt_2_inv_alignments) = self._processor._tokenize(test_utt_2)
expected_utt_1_inv_alignments = [(0, 4), (5, 5), (7, 13), (7, 13), (14, 14)]
self.assertEqual(utt_2_tokens, ['extra', ',', 'spaces'])
self.assertEqual(utt_2_aligns, {0: 0, 4: 0, 7: 1, 10: 2, 15: 2})
self.assertEqual(utt_2_inv_alignments, [(0, 4), (7, 7), (10, 15)])
test_utt_3 = 'Extra## ##abc'
(utt_3_tokens, utt_3_aligns, utt_3_inv_alignments) = self._processor._tokenize(test_utt_3)
self.assertEqual(utt_3_tokens, ['extra', '#', '#', '#', '#', 'a', '##b', '##c'])
self.assertEqual(utt_3_aligns, {0: 0, 4: 0, 5: 1, 6: 2, 8: 3, 9: 4, 10: 5, 12: 7})
self.assertEqual(utt_3_inv_alignments, [(0, 4), (5, 5), (6, 6), (8, 8), (9, 9), (10, 12), (10, 12), (10, 12)])
def test_get_dialog_examples(self):
examples = self._processor.get_dialog_examples(_DATASET)
expected_summaries = [{'utt_tok_mask_pairs': [('[CLS]', 0), ('[SEP]', 0), ('i', 1), ("'", 1), ('m', 1), ('looking', 1), ('for', 1), ('apartments', 1), ('.', 1), ('[SEP]', 1)], 'utt_len': 10, 'num_categorical_slots': 4, 'num_categorical_slot_values': [2, 4, 4, 2, 0, 0], 'num_noncategorical_slots': 3, 'service_name': 'Homes_1', 'active_intent': 'FindApartment', 'slot_values_in_state': {}}, {'utt_tok_mask_pairs': [('[CLS]', 0), ('which', 0), ('area', 0), ('are', 0), ('you', 0), ('looking', 0), ('in', 0), ('?', 0), ('[SEP]', 0), ('i', 1), ('want', 1), ('an', 1), ('apartment', 1), ('in', 1), ('sa', 1), ('##n', 1), ('j', 1), ('##ose', 1), ('.', 1), ('[SEP]', 1)], 'utt_len': 20, 'num_categorical_slots': 4, 'num_categorical_slot_values': [2, 4, 4, 2, 0, 0], 'num_noncategorical_slots': 3, 'service_name': 'Homes_1', 'active_intent': 'FindApartment', 'slot_values_in_state': {'area': 'san jose'}}, {'utt_tok_mask_pairs': [('[CLS]', 0), ('how', 0), ('many', 0), ('bedrooms', 0), ('do', 0), ('you', 0), ('want', 0), ('?', 0), ('[SEP]', 0), ('2', 1), ('bedrooms', 1), (',', 1), ('please', 1), ('.', 1), ('[SEP]', 1)], 'utt_len': 15, 'num_categorical_slots': 4, 'num_categorical_slot_values': [2, 4, 4, 2, 0, 0], 'num_noncategorical_slots': 3, 'service_name': 'Homes_1', 'active_intent': 'FindApartment', 'slot_values_in_state': {'number_of_beds': '2'}}, {'utt_tok_mask_pairs': [('[CLS]', 0), ('there', 0), ("'", 0), ('s', 0), ('a', 0), ('nice', 0), ('property', 0), ('called', 0), ('a', 0), ('##ege', 0), ('##na', 0), ('at', 0), ('129', 0), ('##0', 0), ('sa', 0), ('##n', 0), ('to', 0), ('##mas', 0), ('a', 0), ('##quin', 0), ('##o', 0), ('road', 0), ('.', 0), ('it', 0), ('has', 0), ('2', 0), ('bedrooms', 0), (',', 0), ('1', 0), ('bath', 0), (',', 0), ('and', 0), ('rent', 0), ('##s', 0), ('for', 0), ('$', 0), ('2', 0), (',', 0), ('650', 0), ('a', 0), ('month', 0), ('.', 0), ('[SEP]', 0), ('can', 1), ('you', 1), ('find', 1), ('me', 1), ('a', 1), ('three', 1), ('bedroom', 1), ('apartment', 1), ('in', 1), ('liver', 1), ('##more', 1), ('?', 1), ('[SEP]', 1)], 'utt_len': 56, 'num_categorical_slots': 4, 'num_categorical_slot_values': [2, 4, 4, 2, 0, 0], 'num_noncategorical_slots': 3, 'service_name': 'Homes_1', 'active_intent': 'FindApartment', 'slot_values_in_state': {'number_of_beds': '3', 'area': 'livermore'}}, {'utt_tok_mask_pairs': [('[CLS]', 0), ('there', 0), ("'", 0), ('s', 0), ('a', 0), ('##cacia', 0), ('capital', 0), ('co', 0), ('##r', 0), ('-', 0), ('iron', 0), ('##wood', 0), ('a', 0), ('##p', 0), ('at', 0), ('56', 0), ('##43', 0), ('ch', 0), ('##ar', 0), ('##lot', 0), ('##te', 0), ('way', 0), ('.', 0), ('it', 0), ('has', 0), ('3', 0), ('bedrooms', 0), (',', 0), ('3', 0), ('baths', 0), (',', 0), ('and', 0), ('rent', 0), ('##s', 0), ('for', 0), ('$', 0), ('4', 0), (',', 0), ('05', 0), ('##0', 0), ('a', 0), ('month', 0), ('.', 0), ('[SEP]', 0), ('that', 1), ('one', 1), ('sounds', 1), ('good', 1), ('.', 1), ('thanks', 1), (',', 1), ('that', 1), ("'", 1), ('s', 1), ('all', 1), ('i', 1), ('need', 1), ('.', 1), ('[SEP]', 1)], 'utt_len': 59, 'num_categorical_slots': 4, 'num_categorical_slot_values': [2, 4, 4, 2, 0, 0], 'num_noncategorical_slots': 3, 'service_name': 'Homes_1', 'active_intent': 'FindApartment', 'slot_values_in_state': {'property_name': 'acacia capital cor - ironwood ap'}}]
for (example, gold) in zip(examples, expected_summaries):
self.assertEqual(example.readable_summary, gold) |
def dot(fun1: Function, fun2: Function, **kwargs) -> DotProduct:
if (not isinstance(fun1, Function)):
fun1 = Constant(fun1)
if (not isinstance(fun2, Function)):
fun2 = Constant(fun2)
return DotProduct(fun1, fun2, **kwargs) |
_function
def fq(n, q=None):
if (q is None):
q = ZZ['q'].gen()
return prod(((1 - (q ** ((- i) - 1))) for i in range(n))) |
class SO3Shortcut(Module):
def __init__(self, nfeature_in, nfeature_out, b_in, b_out):
super(SO3Shortcut, self).__init__()
assert (b_out <= b_in)
if ((nfeature_in != nfeature_out) or (b_in != b_out)):
self.conv = SO3Convolution(nfeature_in=nfeature_in, nfeature_out=nfeature_out, b_in=b_in, b_out=b_out, grid=((0, 0, 0),))
else:
self.conv = None
def forward(self, x):
if (self.conv is not None):
return self.conv(x)
else:
return x |
class DAVIS2016(Dataset):
def __init__(self, train=True, inputRes=None, db_root_dir='./DAVIS', transform=None, meanval=(104.00699, 116.66877, 122.67892), seq_name=None):
self.train = train
self.inputRes = inputRes
self.db_root_dir = db_root_dir
self.transform = transform
self.meanval = meanval
self.seq_name = seq_name
if self.train:
fname = 'train_seqs'
else:
fname = 'val_seqs'
if (self.seq_name is None):
with open(os.path.join(db_root_dir, (fname + '.txt'))) as f:
seqs = f.readlines()
img_list = []
labels = []
for seq in seqs:
images = np.sort(os.listdir(os.path.join(db_root_dir, 'JPEGImages/480p/', seq.strip())))
images_path = list(map((lambda x: os.path.join('JPEGImages/480p/', seq.strip(), x)), images))
img_list.extend(images_path)
lab = np.sort(os.listdir(os.path.join(db_root_dir, 'Annotations/480p/', seq.strip())))
lab_path = list(map((lambda x: os.path.join('Annotations/480p/', seq.strip(), x)), lab))
labels.extend(lab_path)
else:
names_img = np.sort(os.listdir(os.path.join(db_root_dir, 'JPEGImages/480p/', str(seq_name))))
img_list = list(map((lambda x: os.path.join('JPEGImages/480p/', str(seq_name), x)), names_img))
name_label = np.sort(os.listdir(os.path.join(db_root_dir, 'Annotations/480p/', str(seq_name))))
labels = list(map((lambda x: os.path.join('Annotations/480p/', str(seq_name), x)), name_label))
if self.train:
img_list = [img_list[0], img_list[0]]
labels = [labels[0], labels[0]]
assert (len(labels) == len(img_list))
self.img_list = img_list
print(len(img_list))
self.labels = labels
print((('Done initializing ' + fname) + ' Dataset'))
def __len__(self):
return len(self.img_list)
def __getitem__(self, idx):
(img, gt) = self.make_img_gt_pair(idx)
sample = {'image': img, 'label': gt}
if (self.seq_name is not None):
fname = os.path.join(self.seq_name, ('%05d' % idx))
sample['fname'] = fname
if (self.transform is not None):
sample = self.transform(sample)
return sample
def make_img_gt_pair(self, idx):
img = cv2.imread(os.path.join(self.db_root_dir, self.img_list[idx]))
if (self.labels[idx] is not None):
label = cv2.imread(os.path.join(self.db_root_dir, self.labels[idx]), 0)
else:
gt = np.zeros(img.shape[:(- 1)], dtype=np.uint8)
if (self.inputRes is not None):
img = imresize(img, self.inputRes)
if (self.labels[idx] is not None):
label = imresize(label, self.inputRes, interp='nearest')
img = np.array(img, dtype=np.float32)
img = np.subtract(img, np.array(self.meanval, dtype=np.float32))
if (self.labels[idx] is not None):
gt = np.array(label, dtype=np.float32)
gt = (gt / np.max([gt.max(), 1e-08]))
return (img, gt)
def get_img_size(self):
img = cv2.imread(os.path.join(self.db_root_dir, self.img_list[0]))
return list(img.shape[:2]) |
class ResNet(nn.Module):
def __init__(self, bottleneck=True, aligned=False, use_3x3x3stem=False, stride_3x3=False, avg_down=False, stem_width=64, base_width=64, layers=(3, 4, 6, 3), radix=1, stage_with_conv=('Conv2d', 'Conv2d', 'Conv2d', 'Conv2d'), norm='BN', stage_with_ctx=('', '', '', ''), num_classes=1000):
super(ResNet, self).__init__()
if aligned:
block = AlignedBottleneck
elif bottleneck:
block = Bottleneck
else:
block = BasicBlock
self.expansion = block.expansion
self.use_3x3x3stem = use_3x3x3stem
self.stride_3x3 = stride_3x3
self.avg_down = avg_down
self.base_width = base_width
self.radix = radix
self.norm = norm
self.inplanes = stem_width
if (not self.use_3x3x3stem):
self.conv1 = nn.Conv2d(3, self.inplanes, 7, 2, 3, bias=False)
self.bn1 = make_norm(self.inplanes, norm=norm.replace('Mix', ''))
else:
self.conv1 = nn.Conv2d(3, (self.inplanes // 2), 3, 2, 1, bias=False)
self.bn1 = make_norm((self.inplanes // 2), norm=norm.replace('Mix', ''))
self.conv2 = nn.Conv2d((self.inplanes // 2), (self.inplanes // 2), 3, 1, 1, bias=False)
self.bn2 = make_norm((self.inplanes // 2), norm=norm.replace('Mix', ''))
self.conv3 = nn.Conv2d((self.inplanes // 2), self.inplanes, 3, 1, 1, bias=False)
self.bn3 = make_norm(self.inplanes, norm=norm.replace('Mix', ''))
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], 1, conv=stage_with_conv[0], ctx=stage_with_ctx[0])
self.layer2 = self._make_layer(block, 128, layers[1], 2, conv=stage_with_conv[1], ctx=stage_with_ctx[1])
self.layer3 = self._make_layer(block, 256, layers[2], 2, conv=stage_with_conv[2], ctx=stage_with_ctx[2])
self.layer4 = self._make_layer(block, 512, layers[3], 2, conv=stage_with_conv[3], ctx=stage_with_ctx[3])
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Linear((512 * self.expansion), num_classes)
self._init_weights()
def stage_out_dim(self):
return [64, (64 * self.expansion), (128 * self.expansion), (256 * self.expansion), (512 * self.expansion)]
def stage_out_spatial(self):
return [(1 / 2.0), (1 / 4.0), (1 / 8.0), (1 / 16.0), (1 / 32.0)]
def _init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if (m.bias is not None):
nn.init.zeros_(m.bias)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
if (not isinstance(m, (ops.MixtureBatchNorm2d, ops.MixtureGroupNorm))):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.0001)
nn.init.constant_(m.bias, 0)
for m in self.modules():
if isinstance(m, ops.DeformConvPack):
nn.init.constant_(m.conv_offset.weight, 0)
nn.init.constant_(m.conv_offset.bias, 0)
if isinstance(m, ops.ModulatedDeformConvPack):
nn.init.constant_(m.conv_offset_mask.weight, 0)
nn.init.constant_(m.conv_offset_mask.bias, 0)
for m in self.modules():
if isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
elif isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, AlignedBottleneck):
nn.init.constant_(m.bn.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilation=1, conv='Conv2d', ctx=''):
downsample = None
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
if self.avg_down:
downsample = nn.Sequential(nn.AvgPool2d(kernel_size=stride, stride=stride), nn.Conv2d(self.inplanes, (planes * block.expansion), kernel_size=1, stride=1, bias=False), make_norm((planes * block.expansion), norm=self.norm.replace('Mix', '')))
else:
downsample = nn.Sequential(nn.Conv2d(self.inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), make_norm((planes * block.expansion), norm=self.norm.replace('Mix', '')))
layers = []
layers.append(block(self.inplanes, planes, self.base_width, 1, stride, dilation, radix=self.radix, downsample=downsample, stride_3x3=self.stride_3x3, conv=conv, norm=self.norm, ctx=ctx))
self.inplanes = (planes * block.expansion)
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, self.base_width, 1, 1, dilation, radix=self.radix, downsample=None, stride_3x3=self.stride_3x3, conv=conv, norm=self.norm, ctx=ctx))
return nn.Sequential(*layers)
def forward(self, x):
if (not self.use_3x3x3stem):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
else:
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.conv3(x)
x = self.bn3(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), (- 1))
x = self.fc(x)
return x |
def test_parens():
text = '(-LRB- -LRB-) (-RRB- -RRB-)'
trees = tree_reader.read_trees(text)
assert (len(trees) == 2)
assert (trees[0].label == '-LRB-')
assert (trees[0].children[0].label == '(')
assert ('{}'.format(trees[0]) == '(-LRB- -LRB-)')
assert (trees[1].label == '-RRB-')
assert (trees[1].children[0].label == ')')
assert ('{}'.format(trees[1]) == '(-RRB- -RRB-)') |
def test_isa_head():
inputs = [torch.randn(1, 8, 23, 23)]
isa_head = ISAHead(in_channels=8, channels=4, num_classes=19, isa_channels=4, down_factor=(8, 8))
if torch.cuda.is_available():
(isa_head, inputs) = to_cuda(isa_head, inputs)
output = isa_head(inputs)
assert (output.shape == (1, isa_head.num_classes, 23, 23)) |
def _quantize_language_model(data_dir, arch, extra_flags=None, run_validation=False):
train_parser = options.get_training_parser()
train_args = options.parse_args_and_arch(train_parser, (['--task', 'language_modeling', data_dir, '--arch', arch, '--optimizer', 'adam', '--lr', '0.0001', '--criterion', 'adaptive_loss', '--adaptive-softmax-cutoff', '5,10,15', '--max-tokens', '500', '--tokens-per-sample', '500', '--save-dir', data_dir, '--max-epoch', '1', '--no-progress-bar', '--distributed-world-size', '1', '--ddp-backend', 'no_c10d', '--num-workers', '0'] + (extra_flags or [])))
train.main(train_args)
scalar_quant_train_parser = options.get_training_parser()
scalar_quant_train_args = options.parse_args_and_arch(scalar_quant_train_parser, (['--task', 'language_modeling', data_dir, '--arch', arch, '--optimizer', 'adam', '--lr', '0.0001', '--criterion', 'adaptive_loss', '--adaptive-softmax-cutoff', '5,10,15', '--max-tokens', '500', '--tokens-per-sample', '500', '--save-dir', data_dir, '--max-update', '3', '--no-progress-bar', '--distributed-world-size', '1', '--ddp-backend', 'no_c10d', '--num-workers', '0', '--quant-noise-scalar', '0.5'] + (extra_flags or [])))
train.main(scalar_quant_train_args)
quantize_parser = options.get_training_parser()
quantize_args = options.parse_args_and_arch(quantize_parser, (['--task', 'language_modeling', data_dir, '--arch', arch, '--optimizer', 'adam', '--lr', '0.0001', '--criterion', 'adaptive_loss', '--adaptive-softmax-cutoff', '5,10,15', '--max-tokens', '50', '--tokens-per-sample', '50', '--max-update', '6', '--no-progress-bar', '--distributed-world-size', '1', '--ddp-backend', 'no_c10d', '--num-workers', '0', '--restore-file', os.path.join(data_dir, 'checkpoint_last.pt'), '--reset-optimizer', '--quantization-config-path', os.path.join(os.path.dirname(__file__), 'transformer_quantization_config.yaml')] + (extra_flags or [])))
train.main(quantize_args) |
class VoxCeleb1SV(Corpus):
def __init__(self, dataset_root: str, download_dir: str, force_download: bool=True) -> None:
self.dataset_root = Path(dataset_root).resolve()
(train_path, valid_path, test_path, speakerid2label) = self.format_path(self.dataset_root, download_dir, force_download)
self.categories = speakerid2label
self.train_data = self.path2data(train_path, speakerid2label)
self.valid_data = self.path2data(valid_path, speakerid2label)
self.test_data = {self.path2uid(path): {'wav_path': path, 'label': None} for path in test_path}
self.test_trials = self.format_test_trials(download_dir, force_download)
def path2uid(cls, path):
return '-'.join(Path(path).parts[(- 3):])
def path2data(cls, paths, speakerid2label):
data = {cls.path2uid(path): {'wav_path': path, 'label': speakerid2label[Path(path).parts[(- 3)]]} for path in paths}
return data
def format_path(dataset_root, download_dir, force_download: bool):
split_filename = SPLIT_FILE_URL.split('/')[(- 1)]
split_filepath = (Path(download_dir) / split_filename)
_download(split_filepath, SPLIT_FILE_URL, refresh=force_download)
usage_list = open(split_filepath, 'r').readlines()
(train, valid, test) = ([], [], [])
test_list = [item for item in usage_list if (int(item.split(' ')[1].split('/')[0][2:]) in range(10270, 10310))]
usage_list = list(set(usage_list).difference(set(test_list)))
test_list = [item.split(' ')[1] for item in test_list]
logging.info('search specified wav name for each split')
speakerids = []
for string in tqdm(usage_list, desc='Search train, dev wavs'):
pair = string.split()
index = pair[0]
x = list(dataset_root.glob(('dev/wav/' + pair[1])))
speakerStr = pair[1].split('/')[0]
if (speakerStr not in speakerids):
speakerids.append(speakerStr)
if ((int(index) == 1) or (int(index) == 3)):
train.append(str(x[0]))
elif (int(index) == 2):
valid.append(str(x[0]))
else:
raise ValueError
speakerids = sorted(speakerids)
speakerid2label = {}
for (idx, spk) in enumerate(speakerids):
speakerid2label[spk] = idx
for string in tqdm(test_list, desc='Search test wavs'):
x = list(dataset_root.glob(('test/wav/' + string.strip())))
test.append(str(x[0]))
logging.info(f'finish searching wav: train {len(train)}; valid {len(valid)}; test {len(test)} files found')
return (train, valid, test, speakerid2label)
def format_test_trials(cls, download_dir: str, force_download: bool):
trial_filename = TRIAL_FILE_URL.split('/')[(- 1)]
trial_filepath = (Path(download_dir) / trial_filename)
_download(trial_filepath, TRIAL_FILE_URL, refresh=force_download)
trial_list = open(trial_filepath, 'r').readlines()
test_trials = []
for string in tqdm(trial_list, desc='Prepare testing trials'):
pair = string.split()
test_trials.append((int(pair[0]), cls.path2uid(pair[1]), cls.path2uid(pair[2])))
return test_trials
def all_data(self):
return (self.train_data, self.valid_data, self.test_data, self.test_trials)
def data_split_ids(self):
return None |
def from_music21_part(part: Part, resolution: int=DEFAULT_RESOLUTION) -> Union[(Track, List[Track])]:
instruments = partitionByInstrument(part)
if (not instruments):
return parse_track(part, resolution)
return [parse_track(instrument, resolution) for instrument in instruments] |
class ContinuousQFunctionForwarder(metaclass=ABCMeta):
def compute_expected_q(self, x: TorchObservation, action: torch.Tensor) -> torch.Tensor:
pass
def compute_error(self, observations: TorchObservation, actions: torch.Tensor, rewards: torch.Tensor, target: torch.Tensor, terminals: torch.Tensor, gamma: Union[(float, torch.Tensor)]=0.99, reduction: str='mean') -> torch.Tensor:
pass
def compute_target(self, x: TorchObservation, action: torch.Tensor) -> torch.Tensor:
pass |
class DCGAN_D_nobn(nn.Module):
def __init__(self, isize, nz, nc, ndf, ngpu, n_extra_layers=0):
super(DCGAN_D_nobn, self).__init__()
self.ngpu = ngpu
assert ((isize % 16) == 0), 'isize has to be a multiple of 16'
main = nn.Sequential()
main.add_module('initial:conv:{0}-{1}'.format(nc, ndf), nn.Conv2d(nc, ndf, 4, 2, 1, bias=False))
main.add_module('initial:relu:{0}'.format(ndf), nn.LeakyReLU(0.2, inplace=True))
(csize, cndf) = ((isize / 2), ndf)
for t in range(n_extra_layers):
main.add_module('extra-layers-{0}:{1}:conv'.format(t, cndf), nn.Conv2d(cndf, cndf, 3, 1, 1, bias=False))
main.add_module('extra-layers-{0}:{1}:relu'.format(t, cndf), nn.LeakyReLU(0.2, inplace=True))
while (csize > 4):
in_feat = cndf
out_feat = (cndf * 2)
main.add_module('pyramid:{0}-{1}:conv'.format(in_feat, out_feat), nn.Conv2d(in_feat, out_feat, 4, 2, 1, bias=False))
main.add_module('pyramid:{0}:relu'.format(out_feat), nn.LeakyReLU(0.2, inplace=True))
cndf = (cndf * 2)
csize = (csize / 2)
main.add_module('final:{0}-{1}:conv'.format(cndf, 1), nn.Conv2d(cndf, 1, 4, 1, 0, bias=False))
self.main = main
def forward(self, input):
if ((self.ngpu > 1) and isinstance(input.data, torch.cuda.FloatTensor)):
output = nn.parallel.data_parallel(self.main, input, range(self.ngpu))
else:
output = self.main(input)
output = output.mean(0)
return output.view(1) |
def basic_clean(text):
text = ftfy.fix_text(text)
text = html.unescape(html.unescape(text))
return text.strip() |
class KB():
def __init__(self, data_dir, out_dir):
logging.basicConfig(level=logging.INFO)
self.logger = logging.getLogger(' Knowledge base')
self.data_dir = data_dir
self.out_dir = out_dir
self.criteria_counter = Counter()
self.criteria_val_counter = Counter()
self.sc_file = os.path.join(self.out_dir, 'search_criteria.txt')
self.cf_file = os.path.join(self.out_dir, 'compulsory_fields.txt')
self.diff_file = os.path.join(self.out_dir, 'diff_fields.txt')
self.name_file = os.path.join(self.out_dir, 'file_name.txt')
self.sc_pkl = os.path.join(self.out_dir, 'search_criteria.pkl')
self.sc_val_pkl = os.path.join(self.out_dir, 'search_criteria_val.pkl')
def save_to_pickle(self, obj, filename):
if os.path.isfile(filename):
self.logger.info((' Overwriting %s.' % filename))
else:
self.logger.info((' Saving to %s.' % filename))
with open(filename, 'wb') as f:
pkl.dump(obj, f, protocol=pkl.HIGHEST_PROTOCOL)
def create_kb(self):
self.read_jsondir(self.data_dir)
self.save_to_pickle(self.criteria_counter, self.sc_pkl)
self.save_to_pickle(self.criteria_val_counter, self.sc_val_pkl)
def read_jsondir(self, json_dir):
for file in os.listdir(json_dir):
if file.endswith('.json'):
self.read_jsonfile(os.path.join(json_dir, file))
def join_and_append(self, local_list, global_list):
line = ','.join(local_list)
global_list.append(line)
def write_list_to_file(self, out_file_path, out_list):
with open(out_file_path, 'a+') as out_file:
for item in out_list:
out_file.write('{}\n'.format(item))
def read_jsonfile(self, json_file):
try:
dialogue = json.load(open(json_file))
except:
print(json_file)
return None
filter(None, dialogue)
sc_list = []
cf_list = []
file_list = []
diff_list = []
for utterance in dialogue:
if ('search_criteria' in utterance['utterance']):
kb_line = utterance['utterance']
cf = kb_line['compulsory_fields']
cf_line = ';'.join(cf)
sc = kb_line['search_criteria']
sc_keys = sc.keys()
self.criteria_counter.update(sc_keys)
diff = list((set(sc_keys) - set(cf)))
cf_line = ';'.join(cf)
diff_line = ';'.join(diff)
sc_keys_line = ';'.join(sc_keys)
local_kb_list = []
local_kb_str = ''
synset_list = []
url_list = []
for criteria in sc_keys:
sub_criteria_keys = sc[criteria].keys()
for sub_criteria in sub_criteria_keys:
if (criteria == 'synsets'):
synset_list.append(sub_criteria)
value = str(sc[criteria][sub_criteria])
current_kb_str = '|'.join([criteria, sub_criteria, value])
if (criteria != 'url'):
local_kb_list.append(current_kb_str)
else:
url_list.append(current_kb_str)
synset_line = ';'.join(synset_list)
local_kb_str = ';'.join(local_kb_list)
print(synset_line)
print(local_kb_str)
self.join_and_append(sc_keys, sc_list)
self.join_and_append(cf, cf_list)
self.join_and_append(diff, diff_list)
self.write_list_to_file(self.cf_file, cf_list)
self.write_list_to_file(self.diff_file, diff_list)
self.write_list_to_file(self.sc_file, sc_list) |
def _calc_win_score(board: Array) -> int:
g = _is_gammon(board)
return ((1 + g) + (g & _remains_at_inner(board))) |
def load_train_ini(ini_file):
cf = configparser.ConfigParser()
cf.read(ini_file)
param_sections = []
s = cf.sections()
for d in range(len(s)):
level_dict = dict(phase=cf.get(s[d], 'phase'), batch_size=cf.getint(s[d], 'batch_size'), inputI_width_size=cf.getfloat(s[d], 'inputI_width_size'), inputI_height_size=cf.getfloat(s[d], 'inputI_height_size'), r=cf.getint(s[d], 'r'), niters=cf.getint(s[d], 'niters'), inputI_chn=cf.getint(s[d], 'inputI_chn'), output_chn=cf.getint(s[d], 'output_chn'), ImagePath=cf.get(s[d], 'ImagePath'), DepthPath=cf.get(s[d], 'DepthPath'), chkpoint_dir=cf.get(s[d], 'chkpoint_dir'), result_dir=cf.get(s[d], 'result_dir'), learning_rate=cf.getfloat(s[d], 'learning_rate'), epoch=cf.getint(s[d], 'epoch'), labeling_dir=cf.get(s[d], 'labeling_dir'))
param_sections.append(level_dict)
return param_sections |
def _to_tensor(tensor_or_scalar_like: Any) -> Tuple[(Optional[_TestingErrorMeta], Optional[Tensor])]:
error_meta: Optional[_TestingErrorMeta]
if isinstance(tensor_or_scalar_like, Tensor):
tensor = tensor_or_scalar_like
else:
try:
tensor = torch.as_tensor(tensor_or_scalar_like)
except Exception:
error_meta = _TestingErrorMeta(ValueError, f'No tensor can be constructed from type {type(tensor_or_scalar_like)}.')
return (error_meta, None)
error_meta = _check_supported_tensor(tensor)
if error_meta:
return (error_meta, None)
return (None, tensor) |
class ConnectorStub(object):
def __init__(self, channel):
self.AllianceStatusStream = channel.unary_stream('/grpc.Connector/AllianceStatusStream', request_serializer=fedn__pb2.ClientAvailableMessage.SerializeToString, response_deserializer=fedn__pb2.Status.FromString)
self.SendStatus = channel.unary_unary('/grpc.Connector/SendStatus', request_serializer=fedn__pb2.Status.SerializeToString, response_deserializer=fedn__pb2.Response.FromString)
self.ListActiveClients = channel.unary_unary('/grpc.Connector/ListActiveClients', request_serializer=fedn__pb2.ListClientsRequest.SerializeToString, response_deserializer=fedn__pb2.ClientList.FromString)
self.AcceptingClients = channel.unary_unary('/grpc.Connector/AcceptingClients', request_serializer=fedn__pb2.ConnectionRequest.SerializeToString, response_deserializer=fedn__pb2.ConnectionResponse.FromString)
self.SendHeartbeat = channel.unary_unary('/grpc.Connector/SendHeartbeat', request_serializer=fedn__pb2.Heartbeat.SerializeToString, response_deserializer=fedn__pb2.Response.FromString)
self.ReassignClient = channel.unary_unary('/grpc.Connector/ReassignClient', request_serializer=fedn__pb2.ReassignRequest.SerializeToString, response_deserializer=fedn__pb2.Response.FromString)
self.ReconnectClient = channel.unary_unary('/grpc.Connector/ReconnectClient', request_serializer=fedn__pb2.ReconnectRequest.SerializeToString, response_deserializer=fedn__pb2.Response.FromString) |
class TFAutoModelForVision2Seq(_BaseAutoModelClass):
_model_mapping = TF_MODEL_FOR_VISION_2_SEQ_MAPPING |
def html_table(classes, table, rgb_color, normalize=False, shortener=True):
result = ''
result += '<h2>Confusion Matrix '
if normalize:
result += '(Normalized)'
result += ': </h2>\n'
result += '<table>\n'
result += ('<tr style="text-align:center;">' + '\n')
result += '<td>Actual</td>\n'
result += '<td>Predict\n'
table_size = (str(((len(classes) + 1) * 7)) + 'em')
result += '<table style="border:1px solid black;border-collapse: collapse;height:{0};width:{0};">\n'.format(table_size)
result += '<tr style="text-align:center;">\n<td></td>\n'
part_2 = ''
for i in classes:
class_name = str(i)
if ((len(class_name) > 6) and shortener):
class_name = (class_name[:4] + '...')
result += (('<td style="border:1px solid black;padding:10px;height:7em;width:7em;">' + class_name) + '</td>\n')
part_2 += '<tr style="text-align:center;">\n'
part_2 += (('<td style="border:1px solid black;padding:10px;height:7em;width:7em;">' + class_name) + '</td>\n')
for j in classes:
item = table[i][j]
color = 'black'
back_color = html_table_color(table[i], item, rgb_color)
if (min(back_color) < 128):
color = 'white'
part_2 += (('<td style="background-color:rgb({0},{1},{2});color:{3};padding:10px;height:7em;width:7em;">'.format(str(back_color[0]), str(back_color[1]), str(back_color[2]), color) + str(item)) + '</td>\n')
part_2 += '</tr>\n'
result += '</tr>\n'
part_2 += '</table>\n</td>\n</tr>\n</table>\n'
result += part_2
return result |
_params({'data_home': [str, PathLike, None], 'download_if_missing': ['boolean']}, prefer_skip_nested_validation=True)
def fetch_species_distributions(*, data_home=None, download_if_missing=True):
data_home = get_data_home(data_home)
if (not exists(data_home)):
makedirs(data_home)
extra_params = dict(x_left_lower_corner=(- 94.8), Nx=1212, y_left_lower_corner=(- 56.05), Ny=1592, grid_size=0.05)
dtype = np.int16
archive_path = _pkl_filepath(data_home, DATA_ARCHIVE_NAME)
if (not exists(archive_path)):
if (not download_if_missing):
raise OSError('Data not found and `download_if_missing` is False')
logger.info(('Downloading species data from %s to %s' % (SAMPLES.url, data_home)))
samples_path = _fetch_remote(SAMPLES, dirname=data_home)
with np.load(samples_path) as X:
for f in X.files:
fhandle = BytesIO(X[f])
if ('train' in f):
train = _load_csv(fhandle)
if ('test' in f):
test = _load_csv(fhandle)
remove(samples_path)
logger.info(('Downloading coverage data from %s to %s' % (COVERAGES.url, data_home)))
coverages_path = _fetch_remote(COVERAGES, dirname=data_home)
with np.load(coverages_path) as X:
coverages = []
for f in X.files:
fhandle = BytesIO(X[f])
logger.debug(' - converting {}'.format(f))
coverages.append(_load_coverage(fhandle))
coverages = np.asarray(coverages, dtype=dtype)
remove(coverages_path)
bunch = Bunch(coverages=coverages, test=test, train=train, **extra_params)
joblib.dump(bunch, archive_path, compress=9)
else:
bunch = joblib.load(archive_path)
return bunch |
class BaseModel(nn.Module):
def forward(self, *inputs: Tensor) -> Tensor:
raise NotImplementedError
def loss_function(self, batch: Tensor, *inputs: Any, **kwargs) -> Tensor:
raise NotImplementedError |
class CrossEntropyNew(nn.Module):
def __init__(self, num_classes, epsilon=0.1, use_gpu=True):
super(CrossEntropyNew, self).__init__()
self.num_classes = num_classes
self.epsilon = epsilon
self.use_gpu = use_gpu
self.logsoftmax = nn.LogSoftmax(dim=1)
def forward(self, inputs, targets):
inputs = torch.pow(inputs, 2)
log_probs = self.logsoftmax(inputs)
targets = torch.zeros(log_probs.size()).scatter_(1, targets.unsqueeze(1).data.cpu(), 1)
if self.use_gpu:
targets = targets.cuda()
targets = (((1 - self.epsilon) * targets) + (self.epsilon / self.num_classes))
loss = ((- targets) * log_probs).mean(0).sum()
return loss |
class PCQM4MDataset(DatasetBase):
def __init__(self, dataset_path, dataset_name='PCQM4M', **kwargs):
super().__init__(dataset_name=dataset_name, **kwargs)
self.dataset_path = dataset_path
def dataset(self):
try:
return self._dataset
except AttributeError:
from ogb.lsc import PCQM4MDataset
from ogb.utils import smiles2graph
self._smiles2graph = smiles2graph
self._dataset = PCQM4MDataset(root=self.dataset_path, only_smiles=True)
return self._dataset
def record_tokens(self):
try:
return self._record_tokens
except AttributeError:
split = {'training': 'train', 'validation': 'valid', 'test': 'test'}[self.split]
self._record_tokens = self.dataset.get_idx_split()[split]
return self._record_tokens
def read_record(self, token):
(smiles, target) = self.dataset[token]
graph = self._smiles2graph(smiles)
graph['num_nodes'] = np.array(graph['num_nodes'], dtype=np.int16)
graph['edges'] = graph.pop('edge_index').T.astype(np.int16)
graph['edge_features'] = graph.pop('edge_feat').astype(np.int16)
graph['node_features'] = graph.pop('node_feat').astype(np.int16)
graph['target'] = np.array(target, np.float32)
return graph |
class MatrixFactorizationModel(keras.Model):
def __init__(self, num_users, num_items, embed_mf_size, lambda_weights, learning_rate=0.01, name='MF', **kwargs):
super().__init__(name=name, **kwargs)
tf.random.set_seed(42)
self.num_users = num_users
self.num_items = num_items
self.embed_mf_size = embed_mf_size
self.lambda_weights = lambda_weights
self.initializer = tf.initializers.GlorotUniform()
self.user_mf_embedding = keras.layers.Embedding(input_dim=self.num_users, output_dim=self.embed_mf_size, embeddings_initializer=self.initializer, name='U_MF', embeddings_regularizer=keras.regularizers.l2(self.lambda_weights), dtype=tf.float32)
self.item_mf_embedding = keras.layers.Embedding(input_dim=self.num_items, output_dim=self.embed_mf_size, embeddings_regularizer=keras.regularizers.l2(self.lambda_weights), embeddings_initializer=self.initializer, name='I_MF', dtype=tf.float32)
self.user_mf_embedding(0)
self.item_mf_embedding(0)
self.loss = keras.losses.MeanSquaredError()
self.optimizer = tf.optimizers.Adam(learning_rate)
def call(self, inputs, training=None, mask=None):
(user, item) = inputs
user_mf_e = self.user_mf_embedding(user)
item_mf_e = self.item_mf_embedding(item)
mf_output = tf.reduce_sum((user_mf_e * item_mf_e), axis=(- 1))
return mf_output
def train_step(self, batch):
(user, pos, label) = batch
with tf.GradientTape() as tape:
output = self(inputs=(user, pos), training=True)
loss = self.loss(label, output)
grads = tape.gradient(loss, self.trainable_weights)
self.optimizer.apply_gradients(zip(grads, self.trainable_weights))
return loss
def predict(self, inputs, training=False, **kwargs):
output = self.call(inputs=inputs, training=training)
return output
def get_recs(self, inputs, training=False, **kwargs):
(user, item) = inputs
user_mf_e = self.user_mf_embedding(user)
item_mf_e = self.item_mf_embedding(item)
mf_output = tf.reduce_sum((user_mf_e * item_mf_e), axis=(- 1))
return tf.squeeze(mf_output)
def get_top_k(self, preds, train_mask, k=100):
return tf.nn.top_k(tf.where(train_mask, preds, (- np.inf)), k=k, sorted=True) |
def write_log(output, log, info):
(xlabels, ylabels, yscales, names, plot_kwargs) = zip(*info.values())
_write_header(output, xlabels, ylabels, yscales, names, plot_kwargs)
offset = 0
for (ig, (xlabel, ylabel, yscale, names, plot_kwargs)) in ordered_iteritems(info):
for (ip, name) in enumerate(names):
(xs, ys, vlines) = log[(ip + offset)]
for (ir, x) in enumerate(xs):
output('{}: {}: {:.16e}'.format((ip + offset), x, ys[ir]))
if (x in vlines):
output(('%d: -----' % (ip + offset)))
offset += len(names)
output(('# ended: %s' % time.asctime())) |
def split_mnist_by_labels(args, train_loader, test_loader, choice=None):
if (choice is None):
choice = sorted(np.random.choice(np.arange(0, 10), size=5, replace=False))
total = sorted(np.arange(0, 10))
other = np.setdiff1d(total, choice)
print('First train and test loaders')
train_loader_a = partition_by_labels(args, train_loader, choice, kind='train')
if (test_loader is not None):
test_loader_a = partition_by_labels(args, test_loader, choice, kind='test')
print('Second train and test loaders')
train_loader_b = partition_by_labels(args, train_loader, other, kind='train')
if (test_loader is not None):
test_loader_b = partition_by_labels(args, test_loader, other, kind='test')
if (test_loader is not None):
return ((train_loader_a, test_loader_a), (train_loader_b, test_loader_b), other)
else:
return (train_loader_a, train_loader_b, other) |
def parse_config(config_file: str) -> Dict:
with open(config_file, 'r') as fd:
cfg = yaml.load(fd, yaml.FullLoader)
return cfg |
def get_preprocessing(name, is_training=False):
preprocessing_fn_map = {'cifar10': cifar10_preprocessing, 'cifar100': cifar100_preprocessing, 'imgnet32': imgnet32_preprocessing}
if (name not in preprocessing_fn_map):
raise ValueError(('Preprocessing name [%s] was not recognized' % name))
def preprocessing_fn(image, **kwargs):
return preprocessing_fn_map[name].preprocess_image(image, is_training=is_training, **kwargs)
return preprocessing_fn |
def test_tokenize():
nlp = stanfordnlp.Pipeline(processors='tokenize', models_dir=TEST_MODELS_DIR, lang='en')
doc = nlp(EN_DOC)
assert (EN_DOC_GOLD_TOKENS == '\n\n'.join([sent.tokens_string() for sent in doc.sentences])) |
def get_splits(args, task, FIELD, **kwargs):
if ('multi30k' in task):
(src, trg) = [('.' + x) for x in task.split('.')[1:]]
split = torchtext.datasets.generic.Multi30k.splits(exts=(src, trg), fields=FIELD, root=args.data, **kwargs)
elif ('iwslt' in task):
(src, trg) = [('.' + x) for x in task.split('.')[1:]]
split = torchtext.datasets.generic.IWSLT.splits(exts=(src, trg), fields=FIELD, root=args.data, **kwargs)
elif ('squad' in task):
split = torchtext.datasets.generic.SQuAD.splits(fields=FIELD, root=args.data, description=task, **kwargs)
elif ('wikisql' in task):
split = torchtext.datasets.generic.WikiSQL.splits(fields=FIELD, root=args.data, query_as_question=('query_as_question' in task), **kwargs)
elif ('ontonotes.ner' in task):
split_task = task.split('.')
(_, _, subtask, nones, counting) = split_task
split = torchtext.datasets.generic.OntoNotesNER.splits(subtask=subtask, nones=(True if (nones == 'nones') else False), fields=FIELD, root=args.data, **kwargs)
elif ('woz' in task):
split = torchtext.datasets.generic.WOZ.splits(description=task, fields=FIELD, root=args.data, **kwargs)
elif ('multinli' in task):
split = torchtext.datasets.generic.MultiNLI.splits(description=task, fields=FIELD, root=args.data, **kwargs)
elif ('srl' in task):
split = torchtext.datasets.generic.SRL.splits(fields=FIELD, root=args.data, **kwargs)
elif ('snli' in task):
split = torchtext.datasets.generic.SNLI.splits(fields=FIELD, root=args.data, **kwargs)
elif ('schema' in task):
split = torchtext.datasets.generic.WinogradSchema.splits(fields=FIELD, root=args.data, **kwargs)
elif (task == 'cnn'):
split = torchtext.datasets.generic.CNN.splits(fields=FIELD, root=args.data, **kwargs)
elif (task == 'dailymail'):
split = torchtext.datasets.generic.DailyMail.splits(fields=FIELD, root=args.data, **kwargs)
elif (task == 'cnn_dailymail'):
split_cnn = torchtext.datasets.generic.CNN.splits(fields=FIELD, root=args.data, **kwargs)
split_dm = torchtext.datasets.generic.DailyMail.splits(fields=FIELD, root=args.data, **kwargs)
for (scnn, sdm) in zip(split_cnn, split_dm):
scnn.examples.extend(sdm)
split = split_cnn
elif ('sst' in task):
split = torchtext.datasets.generic.SST.splits(fields=FIELD, root=args.data, **kwargs)
elif ('imdb' in task):
kwargs['validation'] = None
split = torchtext.datasets.generic.IMDb.splits(fields=FIELD, root=args.data, **kwargs)
elif ('zre' in task):
split = torchtext.datasets.generic.ZeroShotRE.splits(fields=FIELD, root=args.data, **kwargs)
elif os.path.exists(os.path.join(args.data, task)):
split = torchtext.datasets.generic.JSON.splits(fields=FIELD, root=args.data, name=task, **kwargs)
return split |
class SymbolicSubringRejectingVarsFunctor(GenericSymbolicSubringFunctor):
_functor_name = 'SymbolicSubringRejectingVarsFunctor'
_repr_type_ = 'rejecting'
def merge(self, other):
if (self == other):
return self
elif (type(self) is type(other)):
return type(self)((self.vars & other.vars))
elif isinstance(other, SymbolicSubringAcceptingVarsFunctor):
if (not (self.vars & other.vars)):
return self
def _apply_functor(self, R):
if (R is not SR):
raise NotImplementedError(('This functor can only be applied on the symbolic ring but %s given.' % (R,)))
return SymbolicSubring(rejecting_variables=self.vars) |
def eval(model, val_loader, a2v, args, test=False):
model.eval()
count = 0
(metrics, counts) = (collections.defaultdict(int), collections.defaultdict(int))
results = {}
with torch.no_grad():
if (not args.mc):
model.module._compute_answer_embedding(a2v)
for (i, batch) in enumerate(val_loader):
(answer_id, answer, video, question, question_clip) = (batch['answer_id'], batch['answer'], (batch['video'][0].cuda(), batch['video'][1].cuda()), batch['question'].cuda(), batch['question_clip'].cuda())
video_len = batch['video_len']
question_mask = (question > 0).float()
video_mask = get_mask(video_len, video[1].size(1)).cuda()
count += answer_id.size(0)
if (not args.mc):
predicts = model(video, question, text_mask=question_mask, question_clip=question_clip)
topk = torch.topk(predicts, dim=1, k=10).indices.cpu()
if (args.dataset != 'ivqa'):
answer_id_expanded = answer_id.view((- 1), 1).expand_as(topk)
else:
answer_id = (answer_id / 2).clamp(max=1)
answer_id_expanded = answer_id
metrics = compute_aggreeings(topk, answer_id_expanded, [1, 10], ['acc', 'acc10'], metrics, ivqa=(args.dataset == 'ivqa'))
for (bs, qid) in enumerate(batch['question_id']):
results[qid] = {'prediction': int(topk.numpy()[(bs, 0)]), 'answer': int(answer_id.numpy()[bs])}
else:
(fusion_proj, answer_proj) = model(video, question, text_mask=question_mask, answer=answer.cuda(), question_clip=question_clip)
fusion_proj = fusion_proj.unsqueeze(2)
predicts = torch.bmm(answer_proj, fusion_proj).squeeze()
predicted = torch.max(predicts, dim=1).indices.cpu()
metrics['acc'] += (predicted == answer_id).sum().item()
for (bs, qid) in enumerate(batch['question_id']):
results[qid] = {'prediction': int(predicted.numpy()[bs]), 'answer': int(answer_id.numpy()[bs])}
step = ('val' if (not test) else 'test')
for k in metrics:
v = (metrics[k] / count)
logging.info(f'{step} {k}: {v:.2%}')
acc = (metrics['acc'] / count)
json.dump(results, open(os.path.join(args.save_dir, f'val-{acc:.5%}.json'), 'w'))
return (metrics['acc'] / count) |
def agg_runs(dir, metric_best='auto'):
results = {'train': None, 'val': None, 'test': None}
results_best = {'train': None, 'val': None, 'test': None}
for seed in os.listdir(dir):
if is_seed(seed):
dir_seed = os.path.join(dir, seed)
split = 'val'
if (split in os.listdir(dir_seed)):
dir_split = os.path.join(dir_seed, split)
fname_stats = os.path.join(dir_split, 'stats.json')
stats_list = json_to_dict_list(fname_stats)
if (metric_best == 'auto'):
metric = ('auc' if ('auc' in stats_list[0]) else 'accuracy')
else:
metric = metric_best
performance_np = np.array([stats[metric] for stats in stats_list])
best_epoch = stats_list[eval('performance_np.{}()'.format(cfg.metric_agg))]['epoch']
print(best_epoch)
for split in os.listdir(dir_seed):
if is_split(split):
dir_split = os.path.join(dir_seed, split)
fname_stats = os.path.join(dir_split, 'stats.json')
stats_list = json_to_dict_list(fname_stats)
stats_best = [stats for stats in stats_list if (stats['epoch'] == best_epoch)][0]
print(stats_best)
stats_list = [[stats] for stats in stats_list]
if (results[split] is None):
results[split] = stats_list
else:
results[split] = join_list(results[split], stats_list)
if (results_best[split] is None):
results_best[split] = [stats_best]
else:
results_best[split] += [stats_best]
results = {k: v for (k, v) in results.items() if (v is not None)}
results_best = {k: v for (k, v) in results_best.items() if (v is not None)}
for key in results:
for i in range(len(results[key])):
results[key][i] = agg_dict_list(results[key][i])
for key in results_best:
results_best[key] = agg_dict_list(results_best[key])
for (key, value) in results.items():
dir_out = os.path.join(dir, 'agg', key)
makedirs_rm_exist(dir_out)
fname = os.path.join(dir_out, 'stats.json')
dict_list_to_json(value, fname)
if cfg.tensorboard_agg:
if (SummaryWriter is None):
raise ImportError('Tensorboard support requires `tensorboardX`.')
writer = SummaryWriter(dir_out)
dict_list_to_tb(value, writer)
writer.close()
for (key, value) in results_best.items():
dir_out = os.path.join(dir, 'agg', key)
fname = os.path.join(dir_out, 'best.json')
dict_to_json(value, fname)
logging.info('Results aggregated across runs saved in {}'.format(os.path.join(dir, 'agg'))) |
def extract_current_lr(optimizer):
if isinstance(optimizer.lr, LearningRateSchedule):
current_lr = optimizer.lr(optimizer.iterations).numpy()
elif hasattr(optimizer.lr, 'numpy'):
current_lr = optimizer.lr.numpy()
else:
current_lr = None
return current_lr |
def test_IndexedOptionArray_RecordArray_NumpyArray():
v2a = ak.contents.indexedoptionarray.IndexedOptionArray(ak.index.Index(np.array([2, 2, (- 1), 1, (- 1), 5, 4], np.int64)), ak.contents.recordarray.RecordArray([ak.contents.numpyarray.NumpyArray(np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6]))], ['nest']))
resultv2 = v2a[np.array([0, 1, 4], np.int64)]
assert (to_list(resultv2) == [{'nest': 3.3}, {'nest': 3.3}, None])
assert (v2a.to_typetracer()[np.array([0, 1, 4], np.int64)].form == resultv2.form) |
class GILStatNode(NogilTryFinallyStatNode):
state_temp = None
def __init__(self, pos, state, body):
self.state = state
self.create_state_temp_if_needed(pos, state, body)
TryFinallyStatNode.__init__(self, pos, body=body, finally_clause=GILExitNode(pos, state=state, state_temp=self.state_temp))
def create_state_temp_if_needed(self, pos, state, body):
from .ParseTreeTransforms import YieldNodeCollector
collector = YieldNodeCollector()
collector.visitchildren(body)
if (not collector.yields):
return
if (state == 'gil'):
temp_type = PyrexTypes.c_gilstate_type
else:
temp_type = PyrexTypes.c_threadstate_ptr_type
from . import ExprNodes
self.state_temp = ExprNodes.TempNode(pos, temp_type)
def analyse_declarations(self, env):
env._in_with_gil_block = (self.state == 'gil')
if (self.state == 'gil'):
env.has_with_gil_block = True
return super(GILStatNode, self).analyse_declarations(env)
def analyse_expressions(self, env):
env.use_utility_code(UtilityCode.load_cached('ForceInitThreads', 'ModuleSetupCode.c'))
was_nogil = env.nogil
env.nogil = (self.state == 'nogil')
node = TryFinallyStatNode.analyse_expressions(self, env)
env.nogil = was_nogil
return node
def generate_execution_code(self, code):
code.mark_pos(self.pos)
code.begin_block()
if self.state_temp:
self.state_temp.allocate(code)
variable = self.state_temp.result()
else:
variable = None
old_gil_config = code.funcstate.gil_owned
if (self.state == 'gil'):
code.put_ensure_gil(variable=variable)
code.funcstate.gil_owned = True
else:
code.put_release_gil(variable=variable)
code.funcstate.gil_owned = False
TryFinallyStatNode.generate_execution_code(self, code)
if self.state_temp:
self.state_temp.release(code)
code.funcstate.gil_owned = old_gil_config
code.end_block() |
class LogisticUCB(BaseLogisticPolicy):
epsilon: float = 0.0
def __post_init__(self) -> None:
check_scalar(self.epsilon, 'epsilon', float, min_val=0.0)
self.policy_name = f'logistic_ucb_{self.epsilon}'
super().__post_init__()
def select_action(self, context: np.ndarray) -> np.ndarray:
theta = np.array([model.predict_proba(context) for model in self.model_list]).flatten()
std = np.array([np.sqrt(np.sum(((model._q ** (- 1)) * (context ** 2)))) for model in self.model_list]).flatten()
ucb_score = (theta + (self.epsilon * std))
return ucb_score.argsort()[::(- 1)][:self.len_list] |
def create_calculator(calctype, *args, **kwargs):
return {'asymptotics': AsymptoticCalculator, 'toybased': ToyCalculator}[calctype](*args, **kwargs) |
class GraphNode():
def __init__(self, node_id: int):
self.id = node_id
self.links: Set[GraphNode] = set()
self.visited = False
def link(self, another: 'GraphNode'):
self.links.add(another)
another.links.add(self)
def __repr__(self) -> str:
return str(self.id) |
('torch.distributed._broadcast_coalesced', mock)
('torch.distributed.broadcast', mock)
('torch.nn.parallel.DistributedDataParallel._ddp_init_helper', mock)
def test_build_ddp():
model = Model()
assert (not is_module_wrapper(model))
if torch.cuda.is_available():
mmddp = build_ddp(model, 'cuda', device_ids=[0], process_group=MagicMock())
assert isinstance(mmddp, MMDistributedDataParallel)
if (digit_version(mmcv.__version__) >= digit_version('1.5.0')):
from mmcv.device.mlu import MLUDistributedDataParallel
from mmcv.utils import IS_MLU_AVAILABLE
if IS_MLU_AVAILABLE:
mluddp = build_ddp(model, 'mlu', device_ids=[0], process_group=MagicMock())
assert isinstance(mluddp, MLUDistributedDataParallel) |
def get_encodename(name):
username_quote = quote_plus(str(name))
username_base64 = base64.b64encode(username_quote.encode('utf-8'))
return username_base64.decode('utf-8') |
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.norm = LayerNorm(dim)
self.fn = fn
def forward(self, x, **kwargs):
x = self.norm(x)
return self.fn(x, **kwargs) |
class ResizedCapturedImage(CapturedImage):
def __init__(self, image_path, tgt_size, sampling=PIL.Image.BILINEAR):
CapturedImage.__init__(self, image_path)
self.tgt_size = tgt_size
self.sampling = sampling
def image(self):
if (self._image is not None):
return self._image
else:
_image = self.read_image()
_image = np.array(Image.fromarray(_image).resize(self.tgt_size[::(- 1)], self.sampling))
return _image |
def is_safetensors_available():
if is_torch_available():
if (version.parse(_torch_version) >= version.parse('1.10')):
return (importlib.util.find_spec('safetensors') is not None)
else:
return False
else:
return (importlib.util.find_spec('safetensors') is not None) |
_utils.test()
def test_offload_with_cross_block_locals():
ret = ti.field(ti.f32)
ti.root.place(ret)
def ker():
s = 0
for i in range(10):
s += i
ret[None] = s
ker()
assert (ret[None] == 45) |
def test_consistency_d_dw(problem):
from sfepy.discrete import Variables
ok = True
for aux in test_terms:
(term_template, (prefix, par_name, d_vars, dw_vars)) = aux
tst.report(term_template, prefix, par_name, d_vars, dw_vars)
term1 = (term_template % ((prefix,) + d_vars))
variables = Variables.from_conf(problem.conf.variables, problem.fields)
for var_name in d_vars:
var = variables[var_name]
n_dof = (var.field.n_nod * var.field.shape[0])
aux = nm.arange(n_dof, dtype=nm.float64)
var.set_data(aux)
if (prefix == 'd'):
val1 = problem.evaluate(term1, var_dict=variables.as_dict())
else:
val1 = problem.evaluate(term1, call_mode='d_eval', var_dict=variables.as_dict())
tst.report(('%s: %s' % (term1, val1)))
term2 = (term_template % (('dw',) + dw_vars[:2]))
(vec, vv) = problem.evaluate(term2, mode='weak', var_dict=variables.as_dict(), ret_variables=True)
pvec = vv.get_vec_part(vec, dw_vars[2])
val2 = nm.dot(variables[par_name](), pvec)
tst.report(('%s: %s' % (term2, val2)))
err = (nm.abs((val1 - val2)) / nm.abs(val1))
_ok = (err < 1e-12)
tst.report(('relative difference: %e -> %s' % (err, _ok)))
ok = (ok and _ok)
assert ok |
def read_points3D_text(path):
points3D = {}
with open(path, 'r') as fid:
while True:
line = fid.readline()
if (not line):
break
line = line.strip()
if ((len(line) > 0) and (line[0] != '#')):
elems = line.split()
point3D_id = int(elems[0])
xyz = np.array(tuple(map(float, elems[1:4])))
rgb = np.array(tuple(map(int, elems[4:7])))
error = float(elems[7])
image_ids = np.array(tuple(map(int, elems[8::2])))
point2D_idxs = np.array(tuple(map(int, elems[9::2])))
points3D[point3D_id] = Point3D(id=point3D_id, xyz=xyz, rgb=rgb, error=error, image_ids=image_ids, point2D_idxs=point2D_idxs)
return points3D |
class FTB(nn.Module):
def __init__(self, in_planes, out_planes=512, stride=1):
super(FTB, self).__init__()
self.conv0 = nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, padding=1, bias=False)
self.conv1 = conv3x3(out_planes, out_planes, stride)
self.bn1 = nn.BatchNorm2d(out_planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(out_planes, out_planes)
self.avgpool1 = nn.AvgPool2d(kernel_size=(2, 2), stride=2)
self.avgpool2 = nn.AvgPool2d(kernel_size=(3, 3), stride=1)
def forward(self, x, avg=True):
x1 = self.conv0(x)
residual = x1
out = self.conv1(x1)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out += residual
if avg:
out = self.avgpool1(out)
else:
out = self.avgpool2(out)
return out |
def yaml_load(filename):
with open(filename, 'r') as file:
data = yaml.load(file)
return data |
def get_reg_ref(expr: Expression) -> Optional[Tuple[(Register, int)]]:
if isinstance(expr, ExprCast):
return get_reg_ref(expr.expr)
elif isinstance(expr, ExprDeref):
return get_reg_offset(expr.addr)
return None |
def gaussian_nll(x: Tensor, mean: Tensor, log_var: Tensor, min_noise: float=0.001) -> Tensor:
return ((((((x - mean) ** 2) + min_noise) / ((2 * log_var.exp()) + 1e-08)) + (0.5 * log_var)) + (0.5 * np.log((2 * np.pi)))) |
def upscale_nn(input_tensor, f, use_norm=True, w_l2=w_l2, norm=norm):
x = input_tensor
x = UpSampling2D()(x)
x = Conv2D(f, kernel_size=4, kernel_regularizer=regularizers.l2(w_l2), kernel_initializer=conv_init, padding='same')(x)
x = (normalization(x, norm, f) if use_norm else x)
x = LeakyReLU(0.2)(x)
return x |
def _gather_padding_ref(start_pad_width, end_pad_width, data, lengths):
start_padding = np.zeros(data.shape[1:], dtype=data.dtype)
end_padding = np.zeros(data.shape[1:], dtype=data.dtype)
pad_width = (start_pad_width + end_pad_width)
ptr = 0
for length in lengths:
for _ in range(start_pad_width):
start_padding += data[ptr]
ptr += 1
ptr += (length - pad_width)
for _ in range(end_pad_width):
end_padding += data[ptr]
ptr += 1
return (start_padding, end_padding) |
def load_pytorch_checkpoint_in_flax_state_dict(flax_model, pytorch_checkpoint_path, allow_missing_keys=False):
try:
import torch
except ImportError:
logger.error('Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see and for installation instructions.')
raise
pt_path = os.path.abspath(pytorch_checkpoint_path)
logger.info(f'Loading PyTorch weights from {pt_path}')
pt_state_dict = torch.load(pt_path, map_location='cpu')
logger.info(f'PyTorch checkpoint contains {sum((t.numel() for t in pt_state_dict.values())):,} parameters.')
flax_state_dict = convert_pytorch_state_dict_to_flax(pt_state_dict, flax_model)
return flax_state_dict |
class Params(object):
def clone(source, strict=True):
if isinstance(source, pyhocon.ConfigTree):
return Params(**source.as_plain_ordered_dict())
elif isinstance(source, Params):
return Params(**source.as_dict())
elif isinstance(source, dict):
return Params(**source)
elif strict:
raise ValueError(('Cannot clone from type: ' + str(type(source))))
else:
return None
def __getitem__(self, k):
return getattr(self, k)
def __contains__(self, k):
return (k in self._known_keys)
def __setitem__(self, k, v):
assert isinstance(k, str)
if isinstance(self.get(k, None), types.FunctionType):
raise ValueError(("Invalid parameter name (overrides reserved name '%s')." % k))
converted_val = Params.clone(v, strict=False)
if (converted_val is not None):
setattr(self, k, converted_val)
else:
setattr(self, k, v)
self._known_keys.add(k)
def __delitem__(self, k):
if (k not in self):
raise ValueError('Parameter %s not found.', k)
delattr(self, k)
self._known_keys.remove(k)
def __init__(self, **kw):
self._known_keys = set()
for (k, v) in kw.items():
self[k] = v
def get(self, k, default=None):
if ('.' in k):
keys = k.split('.')
d = self.as_dict()
for key in keys:
d = d[key]
return d
else:
return getattr(self, k, default)
def keys(self):
return sorted(self._known_keys)
def as_dict(self):
def convert(v):
return (v.as_dict() if isinstance(v, Params) else v)
return {k: convert(self[k]) for k in self.keys()}
def __repr__(self):
return self.as_dict().__repr__()
def __str__(self):
return json.dumps(self.as_dict(), indent=2, sort_keys=True) |
class MediumLevelActionManager(object):
def __init__(self, mdp, mlam_params):
self.mdp = mdp
self.params = mlam_params
self.wait_allowed = mlam_params['wait_allowed']
self.counter_drop = mlam_params['counter_drop']
self.counter_pickup = mlam_params['counter_pickup']
self.joint_motion_planner = JointMotionPlanner(mdp, mlam_params)
self.motion_planner = self.joint_motion_planner.motion_planner
def save_to_file(self, filename):
with open(filename, 'wb') as output:
pickle.dump(self, output, pickle.HIGHEST_PROTOCOL)
def from_file(filename):
return load_saved_action_manager(filename)
def from_pickle_or_compute(mdp, mlam_params, custom_filename=None, force_compute=False, info=False):
assert isinstance(mdp, OvercookedGridworld)
filename = (custom_filename if (custom_filename is not None) else (mdp.layout_name + '_am.pkl'))
if force_compute:
return MediumLevelActionManager.compute_mlam(filename, mdp, mlam_params, info=info)
try:
mlam = MediumLevelActionManager.from_file(filename)
if ((mlam.params != mlam_params) or (mlam.mdp != mdp)):
if info:
print('medium level action manager with different params or mdp found, computing from scratch')
return MediumLevelActionManager.compute_mlam(filename, mdp, mlam_params, info=info)
except (FileNotFoundError, ModuleNotFoundError, EOFError, AttributeError) as e:
if info:
print('Recomputing planner due to:', e)
return MediumLevelActionManager.compute_mlam(filename, mdp, mlam_params, info=info)
if info:
print('Loaded MediumLevelActionManager from {}'.format(os.path.join(PLANNERS_DIR, filename)))
return mlam
def compute_mlam(filename, mdp, mlam_params, info=False):
final_filepath = os.path.join(PLANNERS_DIR, filename)
if info:
print('Computing MediumLevelActionManager to be saved in {}'.format(final_filepath))
start_time = time.time()
mlam = MediumLevelActionManager(mdp, mlam_params=mlam_params)
if info:
print('It took {} seconds to create mlam'.format((time.time() - start_time)))
mlam.save_to_file(final_filepath)
return mlam
def joint_ml_actions(self, state):
(agent1_actions, agent2_actions) = tuple((self.get_medium_level_actions(state, player) for player in state.players))
joint_ml_actions = list(itertools.product(agent1_actions, agent2_actions))
valid_joint_ml_actions = list(filter((lambda a: self.is_valid_ml_action(state, a)), joint_ml_actions))
if (len(valid_joint_ml_actions) == 0):
(agent1_actions, agent2_actions) = tuple((self.get_medium_level_actions(state, player, waiting_substitute=True) for player in state.players))
joint_ml_actions = list(itertools.product(agent1_actions, agent2_actions))
valid_joint_ml_actions = list(filter((lambda a: self.is_valid_ml_action(state, a)), joint_ml_actions))
if (len(valid_joint_ml_actions) == 0):
print('WARNING: Found state without valid actions even after adding waiting substitute actions. State: {}'.format(state))
return valid_joint_ml_actions
def is_valid_ml_action(self, state, ml_action):
return self.joint_motion_planner.is_valid_jm_start_goal_pair(state.players_pos_and_or, ml_action)
def get_medium_level_actions(self, state, player, waiting_substitute=False):
player_actions = []
counter_pickup_objects = self.mdp.get_counter_objects_dict(state, self.counter_pickup)
if (not player.has_object()):
onion_pickup = self.pickup_onion_actions(counter_pickup_objects)
tomato_pickup = self.pickup_tomato_actions(counter_pickup_objects)
dish_pickup = self.pickup_dish_actions(counter_pickup_objects)
soup_pickup = self.pickup_counter_soup_actions(counter_pickup_objects)
pot_states_dict = self.mdp.get_pot_states(state)
start_cooking = self.start_cooking_actions(pot_states_dict)
player_actions.extend(((((onion_pickup + tomato_pickup) + dish_pickup) + soup_pickup) + start_cooking))
else:
player_object = player.get_object()
pot_states_dict = self.mdp.get_pot_states(state)
if (len(self.counter_drop) > 0):
player_actions.extend(self.place_obj_on_counter_actions(state))
if (player_object.name == 'soup'):
player_actions.extend(self.deliver_soup_actions())
elif (player_object.name == 'onion'):
player_actions.extend(self.put_onion_in_pot_actions(pot_states_dict))
elif (player_object.name == 'tomato'):
player_actions.extend(self.put_tomato_in_pot_actions(pot_states_dict))
elif (player_object.name == 'dish'):
player_actions.extend(self.pickup_soup_with_dish_actions(pot_states_dict, only_nearly_ready=False))
else:
raise ValueError('Unrecognized object')
if self.wait_allowed:
player_actions.extend(self.wait_actions(player))
if waiting_substitute:
player_actions.extend(self.go_to_closest_feature_actions(player))
is_valid_goal_given_start = (lambda goal: self.motion_planner.is_valid_motion_start_goal_pair(player.pos_and_or, goal))
player_actions = list(filter(is_valid_goal_given_start, player_actions))
return player_actions
def pickup_onion_actions(self, counter_objects, only_use_dispensers=False):
onion_pickup_locations = self.mdp.get_onion_dispenser_locations()
if (not only_use_dispensers):
onion_pickup_locations += counter_objects['onion']
return self._get_ml_actions_for_positions(onion_pickup_locations)
def pickup_tomato_actions(self, counter_objects):
tomato_dispenser_locations = self.mdp.get_tomato_dispenser_locations()
tomato_pickup_locations = (tomato_dispenser_locations + counter_objects['tomato'])
return self._get_ml_actions_for_positions(tomato_pickup_locations)
def pickup_dish_actions(self, counter_objects, only_use_dispensers=False):
dish_pickup_locations = self.mdp.get_dish_dispenser_locations()
if (not only_use_dispensers):
dish_pickup_locations += counter_objects['dish']
return self._get_ml_actions_for_positions(dish_pickup_locations)
def pickup_counter_soup_actions(self, counter_objects):
soup_pickup_locations = counter_objects['soup']
return self._get_ml_actions_for_positions(soup_pickup_locations)
def start_cooking_actions(self, pot_states_dict):
cookable_pots_location = (self.mdp.get_partially_full_pots(pot_states_dict) + self.mdp.get_full_but_not_cooking_pots(pot_states_dict))
return self._get_ml_actions_for_positions(cookable_pots_location)
def place_obj_on_counter_actions(self, state):
all_empty_counters = set(self.mdp.get_empty_counter_locations(state))
valid_empty_counters = [c_pos for c_pos in self.counter_drop if (c_pos in all_empty_counters)]
return self._get_ml_actions_for_positions(valid_empty_counters)
def deliver_soup_actions(self):
serving_locations = self.mdp.get_serving_locations()
return self._get_ml_actions_for_positions(serving_locations)
def put_onion_in_pot_actions(self, pot_states_dict):
partially_full_onion_pots = self.mdp.get_partially_full_pots(pot_states_dict)
fillable_pots = (partially_full_onion_pots + pot_states_dict['empty'])
return self._get_ml_actions_for_positions(fillable_pots)
def put_tomato_in_pot_actions(self, pot_states_dict):
partially_full_onion_pots = self.mdp.get_partially_full_pots(pot_states_dict)
fillable_pots = (partially_full_onion_pots + pot_states_dict['empty'])
return self._get_ml_actions_for_positions(fillable_pots)
def pickup_soup_with_dish_actions(self, pot_states_dict, only_nearly_ready=False):
ready_pot_locations = pot_states_dict['ready']
nearly_ready_pot_locations = pot_states_dict['cooking']
if (not only_nearly_ready):
partially_full_pots = self.mdp.get_partially_full_pots(pot_states_dict)
nearly_ready_pot_locations = ((nearly_ready_pot_locations + pot_states_dict['empty']) + partially_full_pots)
return self._get_ml_actions_for_positions((ready_pot_locations + nearly_ready_pot_locations))
def go_to_closest_feature_actions(self, player):
feature_locations = (((self.mdp.get_onion_dispenser_locations() + self.mdp.get_tomato_dispenser_locations()) + self.mdp.get_pot_locations()) + self.mdp.get_dish_dispenser_locations())
closest_feature_pos = self.motion_planner.min_cost_to_feature(player.pos_and_or, feature_locations, with_argmin=True)[1]
return self._get_ml_actions_for_positions([closest_feature_pos])
def go_to_closest_feature_or_counter_to_goal(self, goal_pos_and_or, goal_location):
valid_locations = ((((self.mdp.get_onion_dispenser_locations() + self.mdp.get_tomato_dispenser_locations()) + self.mdp.get_pot_locations()) + self.mdp.get_dish_dispenser_locations()) + self.counter_drop)
valid_locations.remove(goal_location)
closest_non_goal_feature_pos = self.motion_planner.min_cost_to_feature(goal_pos_and_or, valid_locations, with_argmin=True)[1]
return self._get_ml_actions_for_positions([closest_non_goal_feature_pos])
def wait_actions(self, player):
waiting_motion_goal = (player.position, player.orientation)
return [waiting_motion_goal]
def _get_ml_actions_for_positions(self, positions_list):
possible_motion_goals = []
for pos in positions_list:
for motion_goal in self.joint_motion_planner.motion_planner.motion_goals_for_pos[pos]:
possible_motion_goals.append(motion_goal)
return possible_motion_goals |
def compute_feature_stats_for_dataset(opts, detector_url, detector_kwargs, rel_lo=0, rel_hi=1, batch_size=8, data_loader_kwargs=None, max_items=None, **stats_kwargs):
dataset = dnnlib.util.construct_class_by_name(**opts.dataset_kwargs)
if (data_loader_kwargs is None):
data_loader_kwargs = dict(pin_memory=True, num_workers=3, prefetch_factor=2)
cache_file = None
if opts.cache:
args = dict(dataset_kwargs=opts.dataset_kwargs, detector_url=detector_url, detector_kwargs=detector_kwargs, stats_kwargs=stats_kwargs)
md5 = hashlib.md5(repr(sorted(args.items())).encode('utf-8'))
cache_tag = f'{dataset.name}-{get_feature_detector_name(detector_url)}-{md5.hexdigest()}'
cache_file = dnnlib.make_cache_dir_path('gan-metrics', (cache_tag + '.pkl'))
flag = (os.path.isfile(cache_file) if (opts.rank == 0) else False)
if (opts.num_gpus > 1):
flag = torch.as_tensor(flag, dtype=torch.float32, device=opts.device)
torch.distributed.broadcast(tensor=flag, src=0)
flag = (float(flag.cpu()) != 0)
if flag:
return FeatureStats.load(cache_file)
num_items = len(dataset)
if (max_items is not None):
num_items = min(num_items, max_items)
stats = FeatureStats(max_items=num_items, **stats_kwargs)
progress = opts.progress.sub(tag='dataset features', num_items=num_items, rel_lo=rel_lo, rel_hi=rel_hi)
detector = get_feature_detector(url=detector_url, device=opts.device, num_gpus=opts.num_gpus, rank=opts.rank, verbose=progress.verbose)
item_subset = [(((i * opts.num_gpus) + opts.rank) % num_items) for i in range((((num_items - 1) // opts.num_gpus) + 1))]
for (samples, _labels) in torch.utils.data.DataLoader(dataset=dataset, sampler=item_subset, batch_size=batch_size, **data_loader_kwargs):
W_page = list(samples['W_page'].cpu().numpy())
H_page = list(samples['H_page'].cpu().numpy())
bbox_real = samples['bboxes'].to(opts.device).to(torch.float32)
bbox_class = samples['labels'].to(opts.device).to(torch.int64)
bbox_patch_orig = samples['patches_orig'].to(opts.device).to(torch.float32)
mask = samples['mask'].to(opts.device).to(torch.bool)
padding_mask = (~ mask)
background_orig = samples['background_orig'].to(opts.device).to(torch.float32)
images = save_real_image_with_background(bbox_real, bbox_real, bbox_patch_orig, (~ padding_mask), background_orig, out_path=None, W_page=W_page, H_page=H_page, return_instead_of_save=True)
images = (images * 255.0).to(torch.uint8)
features = detector(images.to(opts.device), **detector_kwargs)
stats.append_torch(features, num_gpus=opts.num_gpus, rank=opts.rank)
progress.update(stats.num_items)
if ((cache_file is not None) and (opts.rank == 0)):
os.makedirs(os.path.dirname(cache_file), exist_ok=True)
temp_file = ((cache_file + '.') + uuid.uuid4().hex)
stats.save(temp_file)
os.replace(temp_file, cache_file)
return stats |
def test_replace_ref_nodes_with_names():
modelb = ModelB()
modelb.name = 'modelbname'
modela = ModelA()
modela.int_field = 2
modela.ref_field = modelb
modela.ref_field2 = 'user_set_name'
model_list = [modelb, modela]
schema._replace_ref_nodes_with_names(modela, model_list)
assert (modela.ref_field == 'modelbname')
assert (modela.ref_field2 == 'user_set_name') |
def local_path_from_s3_or_local_path(filename):
relative_filename = os.path.join(LOCAL_LOG_DIR, filename)
if os.path.isfile(filename):
return filename
elif os.path.isfile(relative_filename):
return relative_filename
else:
return None |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.