code stringlengths 101 5.91M |
|---|
def load_states_from_checkpoint(model_file: str) -> CheckpointState:
print('Reading saved model from %s', model_file)
state_dict = torch.load(model_file, map_location=(lambda s, l: default_restore_location(s, 'cpu')))
return CheckpointState(**state_dict) |
def mk_z3consts_java(api_files):
java = get_component(JAVA_COMPONENT)
full_path_api_files = []
for api_file in api_files:
api_file_c = java.find_file(api_file, java.name)
api_file = os.path.join(api_file_c.src_dir, api_file)
full_path_api_files.append(api_file)
generated_files = mk_genfile_common.mk_z3consts_java_internal(full_path_api_files, java.package_name, java.src_dir)
if VERBOSE:
for generated_file in generated_files:
print("Generated '{}'".format(generated_file)) |
class MultiProcessRamTensorStorage(MultiProcessTensorStorage):
def __init__(self, data_schema: Dict[(str, SizeData)], rank_to_buffer: Dict[(int, io.BytesIO)]):
rank_to_storage = {rank: SingleProcessRamTensorStorage(data_schema, buf) for (rank, buf) in rank_to_buffer.items()}
super().__init__(rank_to_storage) |
def verify(path: Path):
from onnxruntime import InferenceSession, SessionOptions
from onnxruntime.capi.onnxruntime_pybind11_state import RuntimeException
print(f'Checking ONNX model loading from: {path} ...')
try:
onnx_options = SessionOptions()
_ = InferenceSession(path.as_posix(), onnx_options, providers=['CPUExecutionProvider'])
print(f'Model {path} correctly loaded: ')
except RuntimeException as re:
print(f'Error while loading the model {re}: ') |
def build_optimizer(cfg, model):
name = cfg.SOLVER.TYPE
if hasattr(torch.optim, name):
def builder(cfg, model):
return getattr(torch.optim, name)(group_weight(model, cfg.SOLVER.WEIGHT_DECAY), lr=cfg.SOLVER.BASE_LR, **cfg.SOLVER[name])
elif (name in _OPTIMIZER_BUILDERS):
builder = _OPTIMIZER_BUILDERS[name]
else:
raise ValueError('Unsupported type of optimizer.')
return builder(cfg, model) |
class ConvVAE(GaussianLatentVAE):
def __init__(self, representation_size, architecture, encoder_class=CNN, decoder_class=DCNN, decoder_output_activation=identity, decoder_distribution='bernoulli', input_channels=1, imsize=48, init_w=0.001, min_variance=0.001, hidden_init=ptu.fanin_init):
super().__init__(representation_size)
if (min_variance is None):
self.log_min_variance = None
else:
self.log_min_variance = float(np.log(min_variance))
self.input_channels = input_channels
self.imsize = imsize
self.imlength = ((self.imsize * self.imsize) * self.input_channels)
(conv_args, conv_kwargs, deconv_args, deconv_kwargs) = (architecture['conv_args'], architecture['conv_kwargs'], architecture['deconv_args'], architecture['deconv_kwargs'])
conv_output_size = ((deconv_args['deconv_input_width'] * deconv_args['deconv_input_height']) * deconv_args['deconv_input_channels'])
self.encoder = encoder_class(**conv_args, paddings=np.zeros(len(conv_args['kernel_sizes']), dtype=np.int64), input_height=self.imsize, input_width=self.imsize, input_channels=self.input_channels, output_size=conv_output_size, init_w=init_w, hidden_init=hidden_init, **conv_kwargs)
self.fc1 = nn.Linear(self.encoder.output_size, representation_size)
self.fc2 = nn.Linear(self.encoder.output_size, representation_size)
self.fc1.weight.data.uniform_((- init_w), init_w)
self.fc1.bias.data.uniform_((- init_w), init_w)
self.fc2.weight.data.uniform_((- init_w), init_w)
self.fc2.bias.data.uniform_((- init_w), init_w)
self.decoder = decoder_class(**deconv_args, fc_input_size=representation_size, init_w=init_w, output_activation=decoder_output_activation, paddings=np.zeros(len(deconv_args['kernel_sizes']), dtype=np.int64), hidden_init=hidden_init, **deconv_kwargs)
self.epoch = 0
self.decoder_distribution = decoder_distribution
def encode(self, input):
h = self.encoder(input)
mu = self.fc1(h)
if (self.log_min_variance is None):
logvar = self.fc2(h)
else:
logvar = (self.log_min_variance + torch.abs(self.fc2(h)))
return (mu, logvar)
def decode(self, latents):
decoded = self.decoder(latents).view((- 1), ((self.imsize * self.imsize) * self.input_channels))
if (self.decoder_distribution == 'bernoulli'):
return (decoded, [decoded])
elif (self.decoder_distribution == 'gaussian_identity_variance'):
return (torch.clamp(decoded, 0, 1), [torch.clamp(decoded, 0, 1), torch.ones_like(decoded)])
else:
raise NotImplementedError('Distribution {} not supported'.format(self.decoder_distribution))
def logprob(self, inputs, obs_distribution_params):
if (self.decoder_distribution == 'bernoulli'):
inputs = inputs.narrow(start=0, length=self.imlength, dim=1).contiguous().view((- 1), self.imlength)
log_prob = ((- F.binary_cross_entropy(obs_distribution_params[0], inputs, reduction='elementwise_mean')) * self.imlength)
return log_prob
if (self.decoder_distribution == 'gaussian_identity_variance'):
inputs = inputs.narrow(start=0, length=self.imlength, dim=1).contiguous().view((- 1), self.imlength)
log_prob = ((- 1) * F.mse_loss(inputs, obs_distribution_params[0], reduction='elementwise_mean'))
return log_prob
else:
raise NotImplementedError('Distribution {} not supported'.format(self.decoder_distribution)) |
def init(workspace_template: str='default', log_level: str='INFO', log_file: str=None, agg_fqdn: str=None, col_names=None):
if (col_names is None):
col_names = ['one', 'two']
workspace.create(WORKSPACE_PREFIX, workspace_template)
os.chdir(WORKSPACE_PREFIX)
workspace.certify()
aggregator.generate_cert_request(agg_fqdn)
aggregator.certify(agg_fqdn, silent=True)
data_path = 1
for col_name in col_names:
collaborator.create(col_name, str(data_path), silent=True)
collaborator.generate_cert_request(col_name, str(data_path), silent=True, skip_package=True)
collaborator.certify(col_name, silent=True)
data_path += 1
setup_logging(level=log_level, log_file=log_file) |
def recompress_dataset(dataset):
dataset = dataset.map(recompress_image)
dataset = dataset.batch(128)
return dataset |
class DecoderConfig(FairseqDataclass):
type: DECODER_CHOICES = field(default='viterbi', metadata={'help': 'The type of decoder to use'}) |
class Lbl2TransformerVec(Lbl2Vec):
def __init__(self, keywords_list: List[List[str]], documents: List[str], transformer_model: Union[(SentenceTransformer, AutoModel)]=SentenceTransformer('all-MiniLM-L6-v2'), label_names: List[str]=None, similarity_threshold: float=None, similarity_threshold_offset: float=0, min_num_docs: int=1, max_num_docs: int=None, clean_outliers: bool=False, workers: int=(- 1), device: torch.device=torch.device('cpu'), verbose: bool=True):
if (label_names is not None):
if ((not all((isinstance(i, str) for i in label_names))) or (not isinstance(label_names, list))):
raise ValueError('label_names has to be a list of str')
if (len(label_names) != len(keywords_list)):
raise ValueError('keywords_list and label_name have to be the same length')
else:
label_names = [('label_' + str(i)) for i in range(len(list(keywords_list)))]
if ((not isinstance(keywords_list, list)) or (not all((isinstance(i, list) for i in keywords_list))) or (not all((isinstance(i, str) for i in [item for sublist in keywords_list for item in sublist])))):
raise ValueError('keywords_list has to be an iterable list of lists with descriptive keywords of type str')
self.labels = pd.DataFrame(list(zip(label_names, keywords_list)), columns=['label_name', 'description_keywords'])
if isinstance(documents, str):
raise ValueError('Iterable over raw text documents expected, string object received.')
if (similarity_threshold is not None):
if ((not isinstance(similarity_threshold, float)) or (not ((- 1) <= similarity_threshold <= 1))):
raise ValueError('similarity_threshold value has to be a float value betweeen -1 and 1')
if (type(device) != type(torch.device('cpu'))):
raise ValueError("Device needs to be of type torch.device. To use CPU, set device to 'torch.device('cpu')'. To use GPU, you can e.g. specify 'torch.device('cuda:0')'.")
if (not hasattr(documents, '__iter__')):
raise ValueError('Iterable over raw text documents expected.')
if (not isinstance(min_num_docs, int)):
raise ValueError('min_num_docs must be of type int.')
if (min_num_docs < 1):
raise ValueError('min_num_docs must be > 0 and < max_num_docs')
if (max_num_docs is not None):
if (not isinstance(max_num_docs, int)):
raise ValueError('max_num_docs must be of type int.')
if (max_num_docs <= min_num_docs):
raise ValueError('max_num_docs must be > min_num_docs')
if ((workers < (- 1)) or (workers > psutil.cpu_count(logical=True)) or (workers == 0)):
raise ValueError(("'workers' parameter value cannot be 0 and must be between -1 and " + str(psutil.cpu_count(logical=True))))
self.device = device
if (self.device.type != 'cpu'):
self.workers = 1
else:
self.workers = workers
self.transformer_model = transformer_model
self.documents = pd.DataFrame(list(documents), columns=['doc'])
self.verbose = verbose
self.clean_outliers = clean_outliers
self.max_num_docs = max_num_docs
self.similarity_threshold = similarity_threshold
self.min_num_docs = min_num_docs
self.similarity_threshold_offset = similarity_threshold_offset
if (type(self.transformer_model) == type(SentenceTransformer())):
self.tokenizer = None
else:
self.tokenizer = AutoTokenizer.from_pretrained(self.transformer_model.name_or_path)
os.environ['TOKENIZERS_PARALLELISM'] = 'false'
self.logger = logging.getLogger('Lbl2TransformerVec')
self.logger.setLevel(logging.WARNING)
sh = logging.StreamHandler()
sh.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
self.logger.addHandler(sh)
transformers_logs.set_verbosity_error()
if self.verbose:
self.logger.setLevel(logging.DEBUG)
else:
self.logger.setLevel(logging.WARNING)
def fit(self):
self.logger.info('Compute keyword embeddings')
self.labels['keyword_vectors'] = self.labels['description_keywords'].apply((lambda row: [transformer_embedding(model=self.transformer_model, document=keyword, device=self.device, tokenizer=self.tokenizer) for keyword in row]))
self.labels['mean_keyword_vector'] = self.labels['keyword_vectors'].apply((lambda row: centroid(vectors=row)))
self.logger.info('Compute document embeddings')
if (self.workers != 1):
try:
if (self.workers == (- 1)):
if (not ray.is_initialized()):
ray.init(num_cpus=psutil.cpu_count(logical=True), ignore_reinit_error=True, log_to_driver=False, logging_level=logging.ERROR, configure_logging=False)
assert ray.is_initialized()
elif (not ray.is_initialized()):
ray.init(num_cpus=self.workers, ignore_reinit_error=True, log_to_driver=False, logging_level=logging.ERROR, configure_logging=False)
assert ray.is_initialized()
transformer_model_id = ray.put(self.transformer_model)
distributed_transformer_embedding = ray.remote(transformer_embedding)
self.documents['doc_vec'] = ray.get([distributed_transformer_embedding.remote(model=transformer_model_id, document=doc, device=torch.device('cpu'), tokenizer=self.tokenizer) for doc in list(self.documents['doc'])])
finally:
ray.shutdown()
assert (not ray.is_initialized())
else:
self.documents['doc_vec'] = self.documents['doc'].apply((lambda row: transformer_embedding(model=self.transformer_model, document=row, device=self.device, tokenizer=self.tokenizer)))
self.logger.info('Train label embeddings')
self.labels['doc_vectors'] = self.labels['mean_keyword_vector'].apply((lambda row: self._get_similar_documents(keyphrase_vector=row, document_vectors=list(self.documents['doc_vec']), similarity_threshold=self.similarity_threshold, max_num_docs=self.max_num_docs, min_num_docs=self.min_num_docs)))
if self.clean_outliers:
self.labels['cleaned_doc_vectors'] = self.labels['doc_vectors'].apply((lambda row: centroid(vectors=row)))
self.labels['label_vector_from_docs'] = self.labels['cleaned_doc_vectors'].apply((lambda row: centroid(vectors=row)))
else:
self.labels['label_vector_from_docs'] = self.labels['doc_vectors'].apply((lambda row: centroid(vectors=row)))
def predict_model_docs(self, doc_idxs: List[int]=None) -> pd.DataFrame:
if (isinstance(doc_idxs, int) and (doc_idxs is not None)):
raise ValueError('Iterable over integer indices expected. Only single integer received')
if (isinstance(doc_idxs, str) and (doc_idxs is not None)):
raise ValueError('Iterable over integer indices expected. String received')
if ((not hasattr(doc_idxs, '__iter__')) and (doc_idxs is not None)):
raise ValueError('Iterable over integer indices expected.')
doc_key_column = 'doc_key'
most_similar_label_column = 'most_similar_label'
highest_similarity_score_column = 'highest_similarity_score'
self.logger.info('Get document embeddings from model')
if (doc_idxs is not None):
labeled_docs = self.documents.iloc[doc_idxs]
else:
labeled_docs = self.documents
self.logger.info('Calculate document<->label similarities')
labeled_docs = self._get_document_label_similarities(labeled_docs=labeled_docs, doc_key_column=doc_key_column, most_similar_label_column=most_similar_label_column, highest_similarity_score_column=highest_similarity_score_column)
return labeled_docs
def predict_new_docs(self, documents: List[str], workers: int=(- 1), device: torch.device=torch.device('cpu')) -> pd.DataFrame:
if isinstance(documents, str):
raise ValueError('Iterable over raw text documents expected, string object received.')
if (not hasattr(documents, '__iter__')):
raise ValueError('Iterable over raw text documents expected.')
if (type(device) != type(torch.device('cpu'))):
raise ValueError("Device needs to be of type torch.device. To use CPU, set device to 'torch.device('cpu')'. To use GPU, you can e.g. specify 'torch.device('cuda:0')'.")
if ((workers < (- 1)) or (workers > psutil.cpu_count(logical=True)) or (workers == 0)):
raise ValueError(("'workers' parameter value cannot be 0 and must be between -1 and " + str(psutil.cpu_count(logical=True))))
if (device.type != 'cpu'):
workers = 1
doc_key_column = 'doc_key'
most_similar_label_column = 'most_similar_label'
highest_similarity_score_column = 'highest_similarity_score'
prediction_confidence_column = 'prediction_confidence'
self.logger.info('Compute document embeddings')
labeled_docs = pd.DataFrame(list(documents), columns=['doc'])
if (workers != 1):
try:
if (workers == (- 1)):
ray.init(num_cpus=psutil.cpu_count(logical=True), ignore_reinit_error=True)
else:
ray.init(num_cpus=workers, ignore_reinit_error=True)
transformer_model_id = ray.put(self.transformer_model)
distributed_transformer_embedding = ray.remote(transformer_embedding)
labeled_docs['doc_vec'] = ray.get([distributed_transformer_embedding.remote(model=transformer_model_id, document=doc, device=torch.device('cpu'), tokenizer=self.tokenizer) for doc in list(labeled_docs['doc'])])
finally:
ray.shutdown()
else:
labeled_docs['doc_vec'] = labeled_docs['doc'].apply((lambda row: transformer_embedding(model=self.transformer_model, document=row, device=self.device, tokenizer=self.tokenizer)))
labeled_docs = self._get_document_label_similarities(labeled_docs=labeled_docs, doc_key_column=doc_key_column, most_similar_label_column=most_similar_label_column, highest_similarity_score_column=highest_similarity_score_column)
return labeled_docs
def _get_similar_documents(self, keyphrase_vector: np.array, document_vectors: List[np.array], similarity_threshold: float, max_num_docs: int, min_num_docs: int) -> List[np.array]:
if (max_num_docs is None):
max_num_docs = len(document_vectors)
document_vectors = document_vectors[:max_num_docs]
top_results = top_similar_vectors(key_vector=keyphrase_vector, candidate_vectors=document_vectors)
top_cos_scores = [element[0] for element in top_results]
top_indices = [element[1] for element in top_results]
top_results_df = pd.DataFrame([top_cos_scores, top_indices]).transpose().rename(columns={0: 'cos_scores', 1: 'doc_indices'})
top_results_df['doc_indices'] = top_results_df['doc_indices'].astype(int)
top_results_df = top_results_df.head(max_num_docs)
if (similarity_threshold is None):
similarity_threshold = (top_results_df['cos_scores'].iloc[0] - self.similarity_threshold_offset)
if ((min_num_docs is not None) and (top_results_df[(top_results_df['cos_scores'] > similarity_threshold)].shape[0] < min_num_docs)):
top_results_df = top_results_df.head(min_num_docs)
else:
top_results_df = top_results_df[(top_results_df['cos_scores'] > similarity_threshold)]
similar_document_vectors = [document_vectors[i] for i in list(top_results_df['doc_indices'])]
return similar_document_vectors
def _get_document_label_similarities(self, labeled_docs: pd.DataFrame, doc_key_column: str, most_similar_label_column: str, highest_similarity_score_column: str) -> pd.DataFrame:
doc_keys = list(labeled_docs.index)
label_similarities = []
for label_vector in list(self.labels['label_vector_from_docs']):
similarities = top_similar_vectors(key_vector=label_vector, candidate_vectors=list(labeled_docs['doc_vec']))
similarities.sort(key=(lambda x: x[1]))
similarities = [elem[0] for elem in similarities]
label_similarities.append(similarities)
label_similarities_df = pd.DataFrame(label_similarities).transpose()
label_similarities_df.columns = list(self.labels['label_name'])
label_similarities_df[doc_key_column] = doc_keys
label_similarities_df[most_similar_label_column] = label_similarities_df.drop([doc_key_column], axis=1).idxmax(axis=1)
label_similarities_df[highest_similarity_score_column] = label_similarities_df.drop([doc_key_column, most_similar_label_column], axis=1).max(axis=1)
first_columns = [doc_key_column, most_similar_label_column, highest_similarity_score_column]
following_columns = [e for e in label_similarities_df.columns.tolist() if (e not in first_columns)]
column_order = (first_columns + following_columns)
label_similarities_df = label_similarities_df[column_order]
return label_similarities_df |
def ConvertNetForDevice(net, device=None):
mnet = copy.deepcopy(net)
if (device is None):
device = scope.CurrentDeviceScope()
if core.IsGPUDeviceType(device.device_type):
device_prefix = 'gpu'
elif (device.device_type == caffe2_pb2.IDEEP):
device_prefix = 'ideep'
else:
device_prefix = 'cpu'
namescope = '{}_{}/'.format(device_prefix, device.device_id)
for op in mnet.Proto().op:
if ('RecurrentNetwork' in op.type):
raise NotImplementedError('RecurrentNetwork conversion not yet supported')
for (i, inputb) in enumerate(op.input):
op.input[i] = (namescope + inputb)
for (i, outputb) in enumerate(op.output):
op.output[i] = (namescope + outputb)
for (i, blob) in enumerate(op.control_input):
op.control_input[i] = (namescope + blob)
op.device_option.CopyFrom(device)
for (i, einp) in enumerate(mnet.Proto().external_input):
mnet.Proto().external_input[i] = (namescope + einp)
for (i, eoutp) in enumerate(mnet.Proto().external_output):
mnet.Proto().external_output[i] = (namescope + eoutp)
return mnet |
class CPUCountRequirement(Requirement):
MIN_CPU_COUNT = 2
def __init__(self):
super().__init__('CPUs >= {}'.format(self.MIN_CPU_COUNT))
def check(self):
cpu_count = self._get_cpu_count()
if (cpu_count < self.MIN_CPU_COUNT):
raise ValueError('Only {} CPUs available.'.format(cpu_count))
def _get_cpu_count(self):
if _in_container():
return self._get_container_cpu_count()
else:
return self._get_normal_cpu_count()
def _get_container_cpu_count(self):
try:
cpu_quota = self._get_cpu_quota()
cpu_period = self._get_cpu_period()
no_limit = (cpu_quota < 0)
if no_limit:
return self._get_normal_cpu_count()
else:
return (cpu_quota / cpu_period)
except Exception as e:
raise RuntimeError('Failed to check CPU count: {}'.format(e))
def _get_cpu_quota(self):
with open('/sys/fs/cgroup/cpu/cpu.cfs_quota_us') as file:
return int(file.readline())
def _get_cpu_period(self):
with open('/sys/fs/cgroup/cpu/cpu.cfs_period_us') as file:
return int(file.readline())
def _get_normal_cpu_count(self):
psutil = _try_import('psutil')
return psutil.cpu_count(logical=False) |
def register_Ns3MmWavePhySapProvider_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::MmWavePhySapProvider const &', 'arg0')])
cls.add_method('SendControlMessage', 'void', [param('ns3::Ptr< ns3::MmWaveControlMessage >', 'msg')], is_pure_virtual=True, is_virtual=True)
cls.add_method('SendMacPdu', 'void', [param('ns3::Ptr< ns3::Packet >', 'p')], is_pure_virtual=True, is_virtual=True)
cls.add_method('SendRachPreamble', 'void', [param('uint8_t', 'PreambleId'), param('uint8_t', 'Rnti')], is_pure_virtual=True, is_virtual=True)
cls.add_method('SetDlSfAllocInfo', 'void', [param('ns3::SfAllocInfo', 'sfAllocInfo')], is_pure_virtual=True, is_virtual=True)
cls.add_method('SetUlSfAllocInfo', 'void', [param('ns3::SfAllocInfo', 'sfAllocInfo')], is_pure_virtual=True, is_virtual=True)
return |
class NLayerDiscriminator(Module):
def __init__(self, hp):
self.hp = hp
def call(self, x, y):
hp = self.hp
results = []
with nn.parameter_scope('layer_0'):
x = F.pad(x, (0, 0, 7, 7), 'reflect')
x = wn_conv(x, hp.ndf, (15,))
x = F.leaky_relu(x, 0.2, inplace=True)
results.append(x)
nf = hp.ndf
stride = hp.downsamp_factor
for i in range(1, (hp.n_layers_D + 1)):
nf_prev = nf
nf = min((nf * stride), 1024)
with nn.parameter_scope(f'layer_{i}'):
x = wn_conv(x, nf, (((stride * 10) + 1),), stride=(stride,), pad=((stride * 5),), group=(nf_prev // 4))
x = F.leaky_relu(x, 0.2, inplace=True)
results.append(x)
with nn.parameter_scope(f'layer_{(hp.n_layers_D + 1)}'):
nf = min((nf * 2), 1024)
x = wn_conv(x, nf, kernel=(5,), pad=(2,))
x = F.leaky_relu(x, 0.2, inplace=True)
results.append(x)
with nn.parameter_scope(f'layer_{(hp.n_layers_D + 2)}'):
x = wn_conv(x, hp.n_speakers, kernel=(3,), pad=(1,))
if (y is not None):
idx = F.stack(F.arange(0, hp.batch_size), y.reshape((hp.batch_size,)))
x = F.gather_nd(x, idx)
results.append(x)
return results |
class DLDataType(ctypes.Structure):
_fields_ = [('type_code', DLDataTypeCode), ('bits', ctypes.c_uint8), ('lanes', ctypes.c_uint16)] |
class CombinerInterface():
def __init__(self, parent, name, address, fqdn, port, certificate=None, key=None, ip=None, config=None):
self.parent = parent
self.name = name
self.address = address
self.fqdn = fqdn
self.port = port
self.certificate = certificate
self.key = key
self.ip = ip
if (not config):
self.config = {'max_clients': 8}
else:
self.config = config
def from_json(combiner_config):
return CombinerInterface(**combiner_config)
def to_dict(self):
data = {'parent': self.parent, 'name': self.name, 'address': self.address, 'fqdn': self.fqdn, 'port': self.port, 'ip': self.ip, 'certificate': None, 'key': None, 'config': self.config}
if self.certificate:
cert_b64 = base64.b64encode(self.certificate)
key_b64 = base64.b64encode(self.key)
data['certificate'] = str(cert_b64).split("'")[1]
data['key'] = str(key_b64).split("'")[1]
return data
def to_json(self):
return json.dumps(self.to_dict())
def get_certificate(self):
if self.certificate:
cert_b64 = base64.b64encode(self.certificate)
return str(cert_b64).split("'")[1]
else:
return None
def get_key(self):
if self.key:
key_b64 = base64.b64encode(self.key)
return str(key_b64).split("'")[1]
else:
return None
def flush_model_update_queue(self):
channel = Channel(self.address, self.port, self.certificate).get_channel()
control = rpc.ControlStub(channel)
request = fedn.ControlRequest()
try:
control.FlushAggregationQueue(request)
except grpc.RpcError as e:
if (e.code() == grpc.StatusCode.UNAVAILABLE):
raise CombinerUnavailableError
else:
raise
def submit(self, config):
channel = Channel(self.address, self.port, self.certificate).get_channel()
control = rpc.ControlStub(channel)
request = fedn.ControlRequest()
request.command = fedn.Command.START
for (k, v) in config.items():
p = request.parameter.add()
p.key = str(k)
p.value = str(v)
try:
response = control.Start(request)
except grpc.RpcError as e:
if (e.code() == grpc.StatusCode.UNAVAILABLE):
raise CombinerUnavailableError
else:
raise
return response
def get_model(self, id):
channel = Channel(self.address, self.port, self.certificate).get_channel()
modelservice = rpc.ModelServiceStub(channel)
data = BytesIO()
data.seek(0, 0)
parts = modelservice.Download(fedn.ModelRequest(id=id))
for part in parts:
if (part.status == fedn.ModelStatus.IN_PROGRESS):
data.write(part.data)
if (part.status == fedn.ModelStatus.OK):
return data
if (part.status == fedn.ModelStatus.FAILED):
return None
def allowing_clients(self):
channel = Channel(self.address, self.port, self.certificate).get_channel()
connector = rpc.ConnectorStub(channel)
request = fedn.ConnectionRequest()
try:
response = connector.AcceptingClients(request)
except grpc.RpcError as e:
if (e.code() == grpc.StatusCode.UNAVAILABLE):
raise CombinerUnavailableError
else:
raise
if (response.status == fedn.ConnectionStatus.NOT_ACCEPTING):
return False
if (response.status == fedn.ConnectionStatus.ACCEPTING):
return True
if (response.status == fedn.ConnectionStatus.TRY_AGAIN_LATER):
return False
return False
def list_active_clients(self, queue=1):
channel = Channel(self.address, self.port, self.certificate).get_channel()
control = rpc.ConnectorStub(channel)
request = fedn.ListClientsRequest()
request.channel = queue
try:
response = control.ListActiveClients(request)
except grpc.RpcError as e:
if (e.code() == grpc.StatusCode.UNAVAILABLE):
raise CombinerUnavailableError
else:
raise
return response.client |
def test_accept(chromosome):
visitor = MagicMock()
chromosome.accept(visitor)
visitor.visit_test_suite_chromosome.assert_called_once_with(chromosome) |
class Ufunc(Func):
def __init__(self, name, signatures):
super(Ufunc, self).__init__(name, signatures)
self.doc = add_newdocs.get(name)
if (self.doc is None):
raise ValueError(('No docstring for ufunc %r' % name))
self.doc = textwrap.dedent(self.doc).strip()
def _get_signatures_and_loops(self, all_loops):
inarg_num = None
outarg_num = None
seen = set()
variants = []
def add_variant(func_name, inarg, outarg, ret, inp, outp):
if (inp in seen):
return
seen.add(inp)
sig = (func_name, inp, outp)
if ('v' in outp):
raise ValueError(('%s: void signature %r' % (self.name, sig)))
if ((len(inp) != inarg_num) or (len(outp) != outarg_num)):
raise ValueError(('%s: signature %r does not have %d/%d input/output args' % (self.name, sig, inarg_num, outarg_num)))
(loop_name, loop) = generate_loop(inarg, outarg, ret, inp, outp)
all_loops[loop_name] = loop
variants.append((func_name, loop_name, inp, outp))
for (func_name, inarg, outarg, ret, header) in self.signatures:
outp = (re.sub('\\*.*', '', ret) + outarg)
ret = ret.replace('*', '')
if (inarg_num is None):
inarg_num = len(inarg)
outarg_num = len(outp)
(inp, outp) = list(iter_variants(inarg, outp))[0]
add_variant(func_name, inarg, outarg, ret, inp, outp)
for (func_name, inarg, outarg, ret, header) in self.signatures:
outp = (re.sub('\\*.*', '', ret) + outarg)
ret = ret.replace('*', '')
for (inp, outp) in iter_variants(inarg, outp):
add_variant(func_name, inarg, outarg, ret, inp, outp)
variants.sort(key=(lambda v: cast_order(v[2])))
return (variants, inarg_num, outarg_num)
def generate(self, all_loops):
toplevel = ''
(variants, inarg_num, outarg_num) = self._get_signatures_and_loops(all_loops)
loops = []
funcs = []
types = []
for (func_name, loop_name, inputs, outputs) in variants:
for x in inputs:
types.append(TYPE_NAMES[x])
for x in outputs:
types.append(TYPE_NAMES[x])
loops.append(loop_name)
funcs.append(func_name)
toplevel += ('cdef np.PyUFuncGenericFunction ufunc_%s_loops[%d]\n' % (self.name, len(loops)))
toplevel += ('cdef void *ufunc_%s_ptr[%d]\n' % (self.name, (2 * len(funcs))))
toplevel += ('cdef void *ufunc_%s_data[%d]\n' % (self.name, len(funcs)))
toplevel += ('cdef char ufunc_%s_types[%d]\n' % (self.name, len(types)))
toplevel += ('cdef char *ufunc_%s_doc = (\n "%s")\n' % (self.name, self.doc.replace('\\', '\\\\').replace('"', '\\"').replace('\n', '\\n"\n "')))
for (j, function) in enumerate(loops):
toplevel += ('ufunc_%s_loops[%d] = <np.PyUFuncGenericFunction>%s\n' % (self.name, j, function))
for (j, type) in enumerate(types):
toplevel += ('ufunc_%s_types[%d] = <char>%s\n' % (self.name, j, type))
for (j, func) in enumerate(funcs):
toplevel += ('ufunc_%s_ptr[2*%d] = <void*>%s\n' % (self.name, j, self.cython_func_name(func, specialized=True)))
toplevel += ('ufunc_%s_ptr[2*%d+1] = <void*>(<char*>"%s")\n' % (self.name, j, self.name))
for (j, func) in enumerate(funcs):
toplevel += ('ufunc_%s_data[%d] = &ufunc_%s_ptr[2*%d]\n' % (self.name, j, self.name, j))
toplevel += (' = np.PyUFunc_FromFuncAndData(ufunc__loops, ufunc__data, ufunc__types, %d, %d, %d, 0, "", ufunc__doc, 0)\n' % ((len(types) / (inarg_num + outarg_num)), inarg_num, outarg_num)).replace('', self.name)
return toplevel |
def make_fcs(fcs, inpt, activation=tf.nn.relu, initializer=None):
if (initializer is None):
initializer = tf.orthogonal_initializer(np.sqrt(2.0))
out = inpt
with tf.variable_scope('hiddens'):
for hidden in fcs:
out = layers.fully_connected(out, hidden, activation_fn=activation, weights_initializer=initializer)
return out |
class HashFunction():
def __init__(self):
pass
def compute(self, str1: str) -> int:
pass |
def test3():
time.sleep(3)
vj.open()
print('vj opening', flush=True)
time.sleep(2)
print('sending axes', flush=True)
joystickPosition = vj.generateJoystickPosition(wThrottle=32000, wAxisX=16000, wAxisY=16000)
vj.update(joystickPosition)
time.sleep(5)
joystickPosition = vj.generateJoystickPosition()
vj.update(joystickPosition)
print('vj closing', flush=True)
vj.close() |
class LevitFeatureExtractor(LevitImageProcessor):
def __init__(self, *args, **kwargs) -> None:
warnings.warn('The class LevitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please use LevitImageProcessor instead.', FutureWarning)
super().__init__(*args, **kwargs) |
class createBlackBackground(bpy.types.Operator):
bl_idname = 'object.create_black_bg'
bl_label = 'Create Black BG (2D Default)'
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
scene = context.scene
myaddon = scene.my_addon
bpy.ops.mesh.primitive_plane_add()
pln = bpy.context.active_object
pln.name = 'brenderDefaults.background'
pln.location = mathutils.Vector((0.0, 0.0, (- 0.025)))
pln.scale = mathutils.Vector((5.0, 5.0, 5.0))
if (bpy.data.materials.get('BlackMaterial') is None):
mat_name = 'BlackMaterial'
mat = bpy.data.materials.new(mat_name)
mat.use_nodes = True
nodes = mat.node_tree.nodes
diffnode = nodes['Diffuse BSDF']
diffnode.inputs[0].default_value = (0.0, 0.0, 0.0, 1)
diffnode.inputs[1].default_value = 0.0
pln = bpy.data.objects['brenderDefaults.background']
mat = bpy.data.materials.get('BlackMaterial')
pln.select = True
if pln.data.materials:
pln.data.materials[0] = mat
else:
pln.data.materials.append(mat)
pln.select = False
return {'FINISHED'} |
def train(flags):
plogger = FileWriter(xpid=flags.xpid, xp_args=flags.__dict__, rootdir=flags.savedir)
checkpointpath = os.path.expandvars(os.path.expanduser(('%s/%s/%s' % (flags.savedir, flags.xpid, 'model.tar'))))
T = flags.unroll_length
B = flags.batch_size
models = []
pre_models = []
assert (flags.num_actor_devices <= len(flags.gpu_devices.split(','))), 'The number of actor devices can not exceed the number of available devices'
for device in range(flags.num_actor_devices):
model = Model(device=device)
pre_model = Pre_model(device=device)
model.share_memory()
pre_model.share_memory()
model.eval()
pre_model.eval()
models.append(model)
pre_models.append(pre_model)
buffers = create_buffers(flags)
actor_processes = []
ctx = mp.get_context('spawn')
free_queue = []
full_queue = []
for device in range(flags.num_actor_devices):
_free_queue = {'landlord': ctx.SimpleQueue(), 'landlord_up': ctx.SimpleQueue(), 'landlord_down': ctx.SimpleQueue()}
_full_queue = {'landlord': ctx.SimpleQueue(), 'landlord_up': ctx.SimpleQueue(), 'landlord_down': ctx.SimpleQueue()}
free_queue.append(_free_queue)
full_queue.append(_full_queue)
learner_model = Model(device=flags.training_device)
predict_model = Pre_model(device=flags.training_device)
optimizers = create_optimizers(flags, learner_model, predict_model)
stat_keys = ['mean_episode_return_landlord', 'loss_landlord', 'pre_loss_landlord', 'mean_episode_return_landlord_up', 'loss_landlord_up', 'pre_loss_landlord_up', 'mean_episode_return_landlord_down', 'loss_landlord_down', 'pre_loss_landlord_down']
(frames, stats) = (0, {k: 0 for k in stat_keys})
position_frames = {'landlord': 0, 'landlord_up': 0, 'landlord_down': 0}
for k in ['landlord', 'landlord_up', 'landlord_down']:
for device in range(flags.num_actor_devices):
models[device].get_model(k).load_state_dict(torch.load((('/root/doudizhu/DouZero/most_recent_model/' + k) + '0.ckpt')))
pre_models[device].get_model(k).load_state_dict(torch.load((('/root/doudizhu/DouZero/most_recent_model/pre_' + k) + '0.ckpt')))
if (flags.load_model and os.path.exists(checkpointpath)):
checkpoint_states = torch.load(checkpointpath, map_location=('cuda:' + str(flags.training_device)))
for k in ['landlord', 'landlord_up', 'landlord_down']:
learner_model.get_model(k).load_state_dict(checkpoint_states['model_state_dict'][k])
predict_model.get_model(k).load_state_dict(checkpoint_states['pre_model_state_dict'][k])
optimizers[k].load_state_dict(checkpoint_states['optimizer_state_dict'][k])
for device in range(flags.num_actor_devices):
models[device].get_model(k).load_state_dict(learner_model.get_model(k).state_dict())
pre_models[device].get_model(k).load_state_dict(predict_model.get_model(k).state_dict())
stats = checkpoint_states['stats']
frames = checkpoint_states['frames']
position_frames = checkpoint_states['position_frames']
log.logger.info(f'''Resuming preempted job, current stats:
{stats}''')
for device in range(flags.num_actor_devices):
num_actors = flags.num_actors
for i in range(flags.num_actors):
actor = ctx.Process(target=act, args=(i, device, free_queue[device], full_queue[device], pre_models[device], models[device], buffers[device], flags))
actor.start()
actor_processes.append(actor)
def batch_and_learn(i, device, position, local_lock, position_lock, lock=threading.Lock()):
nonlocal frames, position_frames, stats
while (frames < flags.total_frames):
batch = get_batch(free_queue[device][position], full_queue[device][position], buffers[device][position], flags, local_lock)
criterion = torch.nn.CrossEntropyLoss().to(device)
_stats = learn(position, pre_models, predict_model.get_model(position), models, learner_model.get_model(position), batch, optimizers[position], flags, position_lock, criterion)
with lock:
for k in _stats:
stats[k] = _stats[k]
to_log = dict(frames=frames)
to_log.update({k: stats[k] for k in stat_keys})
plogger.log(to_log)
frames += (T * B)
position_frames[position] += (T * B)
for device in range(flags.num_actor_devices):
for m in range(flags.num_buffers):
free_queue[device]['landlord'].put(m)
free_queue[device]['landlord_up'].put(m)
free_queue[device]['landlord_down'].put(m)
threads = []
locks = [{'landlord': threading.Lock(), 'landlord_up': threading.Lock(), 'landlord_down': threading.Lock()} for _ in range(flags.num_actor_devices)]
position_locks = {'landlord': threading.Lock(), 'landlord_up': threading.Lock(), 'landlord_down': threading.Lock()}
for device in range(flags.num_actor_devices):
for i in range(flags.num_threads):
for position in ['landlord', 'landlord_up', 'landlord_down']:
thread = ExceptionThread(target=batch_and_learn, name=('batch-and-learn-%d' % i), args=(i, device, position, locks[device][position], position_locks[position]))
thread.start()
threads.append(thread)
def checkpoint(frames):
if flags.disable_checkpoint:
return
log.logger.info('Saving checkpoint to %s', checkpointpath)
_models = learner_model.get_models()
pre_models = predict_model.get_models()
torch.save({'model_state_dict': {k: _models[k].state_dict() for k in _models}, 'optimizer_state_dict': {k: optimizers[k].state_dict() for k in optimizers}, 'pre_model_state_dict': {k: pre_models[k].state_dict() for k in pre_models}, 'stats': stats, 'flags': vars(flags), 'frames': frames, 'position_frames': position_frames}, checkpointpath)
for position in ['landlord', 'landlord_up', 'landlord_down']:
model_weights_dir = os.path.expandvars(os.path.expanduser(('%s/%s/%s' % (flags.savedir, flags.xpid, (((position + '_weights_') + str(frames)) + '.ckpt')))))
pre_model_weights_dir = os.path.expandvars(os.path.expanduser(('%s/%s/%s' % (flags.savedir, flags.xpid, (((('pre_' + position) + '_weights_') + str(frames)) + '.ckpt')))))
torch.save(learner_model.get_model(position).state_dict(), model_weights_dir)
torch.save(predict_model.get_model(position).state_dict(), pre_model_weights_dir)
timer = timeit.default_timer
try:
last_checkpoint_time = (timer() - (flags.save_interval * 60))
initial_time = (timer() - (flags.save_interval * 60))
last_oppo_time = (timer() - (flags.oppo_interval * 60))
while (frames < flags.total_frames):
start_frames = frames
position_start_frames = {k: position_frames[k] for k in position_frames}
start_time = timer()
time.sleep(10)
if ((timer() - last_checkpoint_time) > (flags.save_interval * 60)):
checkpoint(frames)
test_time = (timer() - initial_time)
last_checkpoint_time = timer()
os.system('python3 generate_eval_data.py --num_games 10000')
time.sleep(10)
os.system((((('python3 /root/doudizhu/DouZero/ADP_test.py --time ' + str(test_time)) + ' --frames ') + str(frames)) + ' &'))
time.sleep(10)
os.system((((('python3 /root/doudizhu/DouZero/sl_test.py --time ' + str(test_time)) + ' --frames ') + str(frames)) + ' &'))
end_time = timer()
fps = ((frames - start_frames) / (end_time - start_time))
position_fps = {k: ((position_frames[k] - position_start_frames[k]) / (end_time - start_time)) for k in position_frames}
log.logger.info('After %i (L:%i U:%i D:%i) frames: %.1f fps (L:%.1f U:%.1f D:%.1f) Stats:\n%s', frames, position_frames['landlord'], position_frames['landlord_up'], position_frames['landlord_down'], fps, position_fps['landlord'], position_fps['landlord_up'], position_fps['landlord_down'], pprint.pformat(stats))
except KeyboardInterrupt:
return
else:
for thread in threads:
thread.join()
log.info('Learning finished after %d frames.', frames)
checkpoint(frames)
plogger.close() |
def build_sqa_zero_dataset(dataset_name, folder):
prompt_templates = get_sqa_prompt_templates()
os.makedirs(f'{folder}/{dataset_name}', exist_ok=True)
table_processor = get_default_processor(max_cell_length=10, max_input_length=MAX_LENGTH, model_name='google/flan-t5-xl')
for (idx, prompt_template) in enumerate(prompt_templates):
print('Current prompt template: ', prompt_template)
dataset = load_dataset('msr_sqa')
for split_name in ['validation', 'test']:
eval_dataset = dataset[split_name]
write_file = f'{folder}/{dataset_name}/{dataset_name}_{split_name}_zero_template_{idx}.json'
with open(write_file, 'w') as write_f:
for (_, (history, table_header, table_values, answer)) in enumerate(zip(eval_dataset['question_and_history'], eval_dataset['table_header'], eval_dataset['table_data'], eval_dataset['answer_text'])):
template_input = table_processor.process_input(table={'header': table_header, 'rows': table_values}, question=' '.join(history), template=prompt_template)
template_output = table_processor.process_output(answer=answer)
write_f.write((json.dumps({'input': template_input, 'output': template_output}) + '\n')) |
.parametrize('data_types', [[1], 'True', None, ''])
.xfail(raises=ValueError)
def test_list_datasets_wrong_data_types(data_types):
list_datasets(data_types=data_types) |
def force_fp32(apply_to=None, out_fp16=False):
def force_fp32_wrapper(old_func):
(old_func)
def new_func(*args, **kwargs):
if (not isinstance(args[0], torch.nn.Module)):
raise TypeError('_fp32 can only be used to decorate the method of nn.Module')
if (not (hasattr(args[0], 'fp16_enabled') and args[0].fp16_enabled)):
return old_func(*args, **kwargs)
args_info = getfullargspec(old_func)
args_to_cast = (args_info.args if (apply_to is None) else apply_to)
new_args = []
if args:
arg_names = args_info.args[:len(args)]
for (i, arg_name) in enumerate(arg_names):
if (arg_name in args_to_cast):
new_args.append(cast_tensor_type(args[i], torch.half, torch.float))
else:
new_args.append(args[i])
new_kwargs = dict()
if kwargs:
for (arg_name, arg_value) in kwargs.items():
if (arg_name in args_to_cast):
new_kwargs[arg_name] = cast_tensor_type(arg_value, torch.half, torch.float)
else:
new_kwargs[arg_name] = arg_value
output = old_func(*new_args, **new_kwargs)
if out_fp16:
output = cast_tensor_type(output, torch.float, torch.half)
return output
return new_func
return force_fp32_wrapper |
.parametrize('spcreator', formats_for_minmax)
class Test_MinMaxMixin1D():
def test_minmax(self, spcreator):
D = np.arange(5)
X = spcreator(D)
assert_equal(X.min(), 0)
assert_equal(X.max(), 4)
assert_equal((- X).min(), (- 4))
assert_equal((- X).max(), 0)
def test_minmax_axis(self, spcreator):
D = np.arange(50)
X = spcreator(D)
for axis in [0, (- 1)]:
assert_array_equal(toarray(X.max(axis=axis)), D.max(axis=axis, keepdims=True))
assert_array_equal(toarray(X.min(axis=axis)), D.min(axis=axis, keepdims=True))
for axis in [(- 2), 1]:
with pytest.raises(ValueError, match='axis out of range'):
X.min(axis=axis)
with pytest.raises(ValueError, match='axis out of range'):
X.max(axis=axis)
def test_numpy_minmax(self, spcreator):
dat = np.array([0, 1, 2])
datsp = spcreator(dat)
assert_array_equal(np.min(datsp), np.min(dat))
assert_array_equal(np.max(datsp), np.max(dat))
def test_argmax(self, spcreator):
D1 = np.array([(- 1), 5, 2, 3])
D2 = np.array([0, 0, (- 1), (- 2)])
D3 = np.array([(- 1), (- 2), (- 3), (- 4)])
D4 = np.array([1, 2, 3, 4])
D5 = np.array([1, 2, 0, 0])
for D in [D1, D2, D3, D4, D5]:
mat = spcreator(D)
assert_equal(mat.argmax(), np.argmax(D))
assert_equal(mat.argmin(), np.argmin(D))
assert_equal(mat.argmax(axis=0), np.argmax(D, axis=0))
assert_equal(mat.argmin(axis=0), np.argmin(D, axis=0))
D6 = np.empty((0,))
for axis in [None, 0]:
mat = spcreator(D6)
with pytest.raises(ValueError, match='to an empty matrix'):
mat.argmin(axis=axis)
with pytest.raises(ValueError, match='to an empty matrix'):
mat.argmax(axis=axis) |
def register_Ns3DataOutputCallback_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::DataOutputCallback const &', 'arg0')])
cls.add_method('OutputSingleton', 'void', [param('std::string', 'key'), param('std::string', 'variable'), param('int', 'val')], is_pure_virtual=True, is_virtual=True)
cls.add_method('OutputSingleton', 'void', [param('std::string', 'key'), param('std::string', 'variable'), param('uint32_t', 'val')], is_pure_virtual=True, is_virtual=True)
cls.add_method('OutputSingleton', 'void', [param('std::string', 'key'), param('std::string', 'variable'), param('double', 'val')], is_pure_virtual=True, is_virtual=True)
cls.add_method('OutputSingleton', 'void', [param('std::string', 'key'), param('std::string', 'variable'), param('std::string', 'val')], is_pure_virtual=True, is_virtual=True)
cls.add_method('OutputSingleton', 'void', [param('std::string', 'key'), param('std::string', 'variable'), param('ns3::Time', 'val')], is_pure_virtual=True, is_virtual=True)
cls.add_method('OutputStatistic', 'void', [param('std::string', 'key'), param('std::string', 'variable'), param('ns3::StatisticalSummary const *', 'statSum')], is_pure_virtual=True, is_virtual=True)
return |
def iterate_eternally(indices):
def infinite_shuffles():
while True:
(yield np.random.permutation(indices))
return itertools.chain.from_iterable(infinite_shuffles()) |
class RandomCell(LTICell):
name = 'random'
def __init__(self, d_input, d_model, memory_size=1, memory_order=(- 1), **kwargs):
if (memory_order < 0):
memory_order = d_model
N = memory_order
A = (np.random.normal(size=(N, N)) / (N ** 0.5))
B = np.random.normal(size=(N, 1))
super().__init__(d_input, d_model, memory_size, memory_order, A, B, **kwargs) |
.parametrize('ctx, func_name', ctxs)
.parametrize('seed', [314])
def test_add2_inplace(seed, ctx, func_name):
from nbla_test_utils import inplace_function_test_helper
x0 = nn.Variable([2, 3, 4], need_grad=True)
x1 = nn.Variable([2, 3, 4], need_grad=True)
inplace_function_test_helper([x0, x1], F.add2, ctx=ctx, func_name=func_name, rng=np.random.RandomState(seed)) |
def compute_l2_norm(h, subtract_mean=False):
h = dim_permute(h)
N = h.size(1)
if subtract_mean:
mn = h.mean(dim=1, keepdim=True)
h = (h - mn)
l2_norm = (h ** 2).sum()
return torch.sqrt(l2_norm) |
def matrix_product_transpose_test(A: dace.float32[(K, M)], B: dace.float32[(N, K)], C: dace.float32[(M, N)]):
C[:] = (np.transpose(A) np.transpose(B)) |
class InterfaceInit(Converter):
def __init__(self, interface):
self.name_init = ('_%s_init_' % interface.name())
self.interface = interface
self.relation_symbols = interface._relation_symbols()
def symbol(self, ex):
if (self.interface.name() == 'maxima'):
return ('_SAGE_VAR_' + repr(SR(ex)))
if (self.interface.name() == 'giac'):
return ('sageVAR' + repr(SR(ex)))
return repr(SR(ex))
def pyobject(self, ex, obj):
if ((self.interface.name() in ['pari', 'gp']) and isinstance(obj, NumberFieldElement_base)):
from sage.rings.number_field.number_field_element_quadratic import NumberFieldElement_gaussian
if isinstance(obj, NumberFieldElement_gaussian):
return repr(obj)
try:
return getattr(obj, self.name_init)()
except AttributeError:
return repr(obj)
def relation(self, ex, operator):
return ('%s %s %s' % (self(ex.lhs()), self.relation_symbols[operator], self(ex.rhs())))
def tuple(self, ex):
x = map(self, ex.operands())
X = ','.join(x)
return ((str(self.interface._left_list_delim()) + X) + str(self.interface._right_list_delim()))
def derivative(self, ex, operator):
if (self.name_init != '_maxima_init_'):
raise NotImplementedError
args = ex.operands()
if ((not all(((isinstance(v, Expression) and v.is_symbol()) for v in args))) or (len(args) != len(set(args)))):
temp_args = [SR.symbol(('_symbol%s' % i)) for i in range(len(args))]
f = operator.function()(*temp_args)
params = operator.parameter_set()
params = [('%s, %s' % (temp_args[i]._maxima_init_(), params.count(i))) for i in set(params)]
subs = [('%s = %s' % (t._maxima_init_(), a._maxima_init_())) for (t, a) in zip(temp_args, args)]
outstr = ('at(diff(%s, %s), [%s])' % (f._maxima_init_(), ', '.join(params), ', '.join(subs)))
else:
f = operator.function()(*args)
params = operator.parameter_set()
params = [('%s, %s' % (args[i]._maxima_init_(), params.count(i))) for i in set(params)]
outstr = ('diff(%s, %s)' % (f._maxima_init_(), ', '.join(params)))
return outstr
def arithmetic(self, ex, operator):
args = [('(%s)' % self(op)) for op in ex.operands()]
return arithmetic_operators[operator].join(args)
def composition(self, ex, operator):
ops = ex.operands()
if hasattr(operator, (self.name_init + 'evaled_')):
return getattr(operator, (self.name_init + 'evaled_'))(*ops)
else:
ops = [self(_) for _ in ops]
try:
op = getattr(operator, self.name_init)()
except (TypeError, AttributeError):
op = repr(operator)
return self.interface._function_call_string(op, ops, []) |
def handle_stacktraces(test_results):
total_stacktraces = test_results.split('\n')[1:(- 1)]
stacktraces = []
for stacktrace in total_stacktraces:
try:
line = stacktrace[:stacktrace.index(' ')].split(':')[(- 2)]
error_message = stacktrace[stacktrace.index(' '):]
stacktraces.append(f'(line {line}) {error_message}')
except Exception:
stacktraces.append('Cannot retrieve error message.')
return stacktraces |
class WeightPredictor(abc.ABC):
def __init__(self, optimizer, fix_fn=None, scheduler=None, nag_with_predictor=False, true_weights_storage=None):
self.optimizer = optimizer
self.fix_fn = fix_fn
self.scheduler = scheduler
self.nag_with_predictor = nag_with_predictor
if nag_with_predictor:
print('-I- Doing NAG with predictor')
self.true_weights_storage = true_weights_storage
def setup(self, n_steps):
if ((n_steps == 0) and self.nag_with_predictor):
n_steps = 1
self.n_steps = n_steps
def forward(self):
raise NotImplementedError()
def revert(self):
raise NotImplementedError() |
class FlaxGPTNeoPreTrainedModel(metaclass=DummyObject):
_backends = ['flax']
def __init__(self, *args, **kwargs):
requires_backends(self, ['flax']) |
def __getattr__(name):
return _sub_module_deprecation(sub_package='io', module='netcdf', private_modules=['_netcdf'], all=__all__, attribute=name) |
def capital_M(n):
n = ZZ(n)
return QQ.prod(((d ** (d * moebius((n / d)))) for d in divisors(n))) |
class AzureCognitiveSearch():
def __init__(self, search_service_name: str, search_api_key: str, search_index_name: str, field_text: str, field_score: str):
self.search_service_name = search_service_name
self.search_api_key = search_api_key
self.search_index_name = search_index_name
self.endpoint = f'
self.field_text = field_text
self.field_score = field_score
self.credential = AzureKeyCredential(self.search_api_key)
self.client = SearchClient(endpoint=self.endpoint, index_name=self.search_index_name, credential=self.credential)
def __call__(self, query: str, k: int=10) -> Union[(list[str], list[dotdict])]:
topk: list[dict[(str, Any)]] = azure_search_request(self.field_text, self.field_score, self.client, query, k)
topk = [{**d, 'long_text': d['text']} for d in topk]
return [dotdict(psg) for psg in topk] |
class DIDEMODataset(BaseDataset):
def __init__(self, *args, split='', **kwargs):
assert (split in ['train', 'val', 'test'])
self.split = split
self.metadata = None
if (split == 'train'):
names = ['didemo_train']
elif (split == 'val'):
names = ['didemo_val']
elif (split == 'test'):
names = ['didemo_val']
super().__init__(*args, **kwargs, names=names, text_column_name='caption')
self._load_metadata()
def _load_metadata(self):
metadata_dir = './meta_data/didemo'
split_files = {'train': 'DiDeMo_train.tsv', 'val': 'DiDeMo_val.tsv', 'test': 'DiDeMo_test.tsv'}
target_split_fp = split_files[self.split]
metadata = pd.read_csv(os.path.join(metadata_dir, target_split_fp), sep='\t')
self.metadata = metadata
print('load split {}, {} samples'.format(self.split, len(metadata)))
def _get_video_path(self, sample):
rel_video_fp = sample[1]
full_video_fp = os.path.join(self.data_dir, '', rel_video_fp)
return (full_video_fp, rel_video_fp)
def _get_caption(self, sample):
return sample[0] |
def merge_beams(beam_1, beam_2, beam_size):
if ((len(beam_1) == 0) or (len(beam_2) == 0)):
return (beam_1, beam_2)
annoated_beam_1 = [('beam_1', b) for b in beam_1]
annoated_beam_2 = [('beam_2', b) for b in beam_2]
merged_beams = (annoated_beam_1 + annoated_beam_2)
merged_beams.sort(key=(lambda x: x[1].score), reverse=True)
ret_beam_1 = []
ret_beam_2 = []
for (label, beam) in merged_beams[:beam_size]:
if (label == 'beam_1'):
ret_beam_1.append(beam)
else:
assert (label == 'beam_2')
ret_beam_2.append(beam)
return (ret_beam_1, ret_beam_2) |
def parse_command_line(args):
from .Main import CompilationOptions, default_options
pending_arg = []
def pop_arg():
if ((not args) or pending_arg):
bad_usage()
if (('=' in args[0]) and args[0].startswith('--')):
(name, value) = args.pop(0).split('=', 1)
pending_arg.append(value)
return name
return args.pop(0)
def pop_value(default=None):
if pending_arg:
return pending_arg.pop()
elif (default is not None):
return default
elif (not args):
bad_usage()
return args.pop(0)
def get_param(option):
tail = option[2:]
if tail:
return tail
else:
return pop_arg()
options = CompilationOptions(default_options)
sources = []
while args:
if args[0].startswith('-'):
option = pop_arg()
if (option in ('-V', '--version')):
options.show_version = 1
elif (option in ('-l', '--create-listing')):
options.use_listing_file = 1
elif (option in ('-+', '--cplus')):
options.cplus = 1
elif (option == '--embed'):
Options.embed = pop_value('main')
elif option.startswith('-I'):
options.include_path.append(get_param(option))
elif (option == '--include-dir'):
options.include_path.append(pop_value())
elif (option in ('-w', '--working')):
options.working_path = pop_value()
elif (option in ('-o', '--output-file')):
options.output_file = pop_value()
elif (option in ('-t', '--timestamps')):
options.timestamps = 1
elif (option in ('-f', '--force')):
options.timestamps = 0
elif (option in ('-v', '--verbose')):
options.verbose += 1
elif (option in ('-p', '--embed-positions')):
Options.embed_pos_in_docstring = 1
elif (option in ('-z', '--pre-import')):
Options.pre_import = pop_value()
elif (option == '--cleanup'):
Options.generate_cleanup_code = int(pop_value())
elif (option in ('-D', '--no-docstrings')):
Options.docstrings = False
elif (option in ('-a', '--annotate')):
Options.annotate = True
elif (option == '--annotate-coverage'):
Options.annotate = True
Options.annotate_coverage_xml = pop_value()
elif (option == '--convert-range'):
Options.convert_range = True
elif (option == '--line-directives'):
options.emit_linenums = True
elif (option == '--no-c-in-traceback'):
options.c_line_in_traceback = False
elif (option == '--gdb'):
options.gdb_debug = True
options.output_dir = os.curdir
elif (option == '--gdb-outdir'):
options.gdb_debug = True
options.output_dir = pop_value()
elif (option == '--lenient'):
Options.error_on_unknown_names = False
Options.error_on_uninitialized = False
elif (option == '-2'):
options.language_level = 2
elif (option == '-3'):
options.language_level = 3
elif (option == '--3str'):
options.language_level = '3str'
elif (option == '--capi-reexport-cincludes'):
options.capi_reexport_cincludes = True
elif (option == '--fast-fail'):
Options.fast_fail = True
elif (option == '--cimport-from-pyx'):
Options.cimport_from_pyx = True
elif (option in ('-Werror', '--warning-errors')):
Options.warning_errors = True
elif (option in ('-Wextra', '--warning-extra')):
options.compiler_directives.update(Options.extra_warnings)
elif (option == '--old-style-globals'):
Options.old_style_globals = True
elif ((option == '--directive') or option.startswith('-X')):
if (option.startswith('-X') and option[2:].strip()):
x_args = option[2:]
else:
x_args = pop_value()
try:
options.compiler_directives = Options.parse_directive_list(x_args, relaxed_bool=True, current_settings=options.compiler_directives)
except ValueError as e:
sys.stderr.write(('Error in compiler directive: %s\n' % e.args[0]))
sys.exit(1)
elif ((option == '--compile-time-env') or option.startswith('-E')):
if (option.startswith('-E') and option[2:].strip()):
x_args = option[2:]
else:
x_args = pop_value()
try:
options.compile_time_env = Options.parse_compile_time_env(x_args, current_settings=options.compile_time_env)
except ValueError as e:
sys.stderr.write(('Error in compile-time-env: %s\n' % e.args[0]))
sys.exit(1)
elif (option == '--module-name'):
options.module_name = pop_value()
elif (option in ('-M', '--depfile')):
options.depfile = True
elif option.startswith('--debug'):
option = option[2:].replace('-', '_')
from . import DebugFlags
if (option in dir(DebugFlags)):
setattr(DebugFlags, option, True)
else:
sys.stderr.write(('Unknown debug flag: %s\n' % option))
bad_usage()
elif (option in ('-h', '--help')):
sys.stdout.write(usage)
sys.exit(0)
else:
sys.stderr.write(usage)
sys.stderr.write(('Unknown compiler flag: %s\n' % option))
sys.exit(1)
else:
sources.append(pop_arg())
if pending_arg:
bad_usage()
if (options.use_listing_file and (len(sources) > 1)):
sys.stderr.write('cython: Only one source file allowed when using -o\n')
sys.exit(1)
if ((len(sources) == 0) and (not options.show_version)):
bad_usage()
if (Options.embed and (len(sources) > 1)):
sys.stderr.write('cython: Only one source file allowed when using --embed\n')
sys.exit(1)
if options.module_name:
if options.timestamps:
sys.stderr.write('cython: Cannot use --module-name with --timestamps\n')
sys.exit(1)
if (len(sources) > 1):
sys.stderr.write('cython: Only one source file allowed when using --module-name\n')
sys.exit(1)
return (options, sources) |
def generate(model, cond, top_k, top_p):
while True:
gen_text = model.generate(cond=cond, top_k=top_k, top_p=top_p)
if (len(list(filter(str.isalpha, gen_text))) > 0):
return gen_text |
class SubSectionTitleOrder():
def __init__(self, src_dir):
self.src_dir = src_dir
self.regex = re.compile('^([\\w ]+)\\n-', re.MULTILINE)
def __repr__(self):
return ('<%s>' % (self.__class__.__name__,))
def __call__(self, directory):
src_path = os.path.normpath(os.path.join(self.src_dir, directory))
if (os.path.basename(src_path) == 'release_highlights'):
return '0'
readme = os.path.join(src_path, 'README.txt')
try:
with open(readme, 'r') as f:
content = f.read()
except FileNotFoundError:
return directory
title_match = self.regex.search(content)
if (title_match is not None):
return title_match.group(1)
return directory |
def save_ckpt(state, path):
def save_arrays(arrays, fname):
with open(fname, 'wb') as f:
np.savez(f, *arrays)
with print_time(f'Saving model in {path}'):
save_arrays(jax.tree_flatten(state['model'])[0], f'{path}/model/{jax.process_index()}.npz')
with print_time(f'Saving opt in {path}'):
save_arrays(jax.tree_flatten(state['opt'])[0], f'{path}/opt/{jax.process_index()}.npz')
return (int(state['step']), int(jax.process_count())) |
def decoration_hop() -> GoalDirectedBenchmark:
smiles = 'CCCOc1cc2ncnc(Nc3ccc4ncsc4c3)c2cc1S(=O)(=O)C(C)(C)C'
pharmacophor_sim = TanimotoScoringFunction(smiles, fp_type='PHCO', score_modifier=ClippedScoreModifier(upper_x=0.85))
deco1 = SMARTSScoringFunction('CS([#6])(=O)=O', inverse=True)
deco2 = SMARTSScoringFunction('[#7]-c1ccc2ncsc2c1', inverse=True)
scaffold = SMARTSScoringFunction('[#7]-c1n[c;h1]nc2[c;h1]c(-[#8])[c;h0][c;h1]c12', inverse=False)
deco_hop1_fn = ArithmeticMeanScoringFunction([pharmacophor_sim, deco1, deco2, scaffold])
specification = uniform_specification(1, 10, 100)
return GoalDirectedBenchmark(name='Deco Hop', objective=deco_hop1_fn, contribution_specification=specification) |
_utils.test(require=ti.extension.quant, debug=True)
def test_1D_quant_array_fixed():
qfxt = ti.types.quant.fixed(bits=8, max_value=2)
x = ti.field(dtype=qfxt)
N = 4
ti.root.quant_array(ti.i, N, max_num_bits=32).place(x)
def set_val():
for i in range(N):
x[i] = (i * 0.5)
def verify_val():
for i in range(N):
assert (x[i] == (i * 0.5))
set_val()
verify_val() |
class TestVoigtProfile():
.parametrize('x, sigma, gamma', [(np.nan, 1, 1), (0, np.nan, 1), (0, 1, np.nan), (1, np.nan, 0), (np.nan, 1, 0), (1, 0, np.nan), (np.nan, 0, 1), (np.nan, 0, 0)])
def test_nan(self, x, sigma, gamma):
assert np.isnan(sc.voigt_profile(x, sigma, gamma))
.parametrize('x, desired', [((- np.inf), 0), (np.inf, 0)])
def test_inf(self, x, desired):
assert (sc.voigt_profile(x, 1, 1) == desired)
def test_against_mathematica(self):
points = np.array([[(- 7.89), 45.06, 6.66, 0.], [(- 0.05), 7.98, 24.13, 0.], [(- 13.98), 16.83, 42.37, 0.], [(- 12.66), 0.21, 6.32, 0.], [11.34, 4.25, 21.96, 0.], [(- 11.56), 20.4, 30.53, 0.], [(- 9.17), 25.61, 8.32, 0.], [16.59, 18.05, 2.5, 0.], [9.11, 2.12, 39.33, 0.], [(- 43.33), 0.3, 45.68, 0.]])
FuncData(sc.voigt_profile, points, (0, 1, 2), 3, atol=0, rtol=1e-15).check()
def test_symmetry(self):
x = np.linspace(0, 10, 20)
assert_allclose(sc.voigt_profile(x, 1, 1), sc.voigt_profile((- x), 1, 1), rtol=1e-15, atol=0)
.parametrize('x, sigma, gamma, desired', [(0, 0, 0, np.inf), (1, 0, 0, 0)])
def test_corner_cases(self, x, sigma, gamma, desired):
assert (sc.voigt_profile(x, sigma, gamma) == desired)
.parametrize('sigma1, gamma1, sigma2, gamma2', [(0, 1, 1e-16, 1), (1, 0, 1, 1e-16), (0, 0, 1e-16, 1e-16)])
def test_continuity(self, sigma1, gamma1, sigma2, gamma2):
x = np.linspace(1, 10, 20)
assert_allclose(sc.voigt_profile(x, sigma1, gamma1), sc.voigt_profile(x, sigma2, gamma2), rtol=1e-16, atol=1e-16) |
def parse_args():
parser = argparse.ArgumentParser(description='Train a classification model')
parser.add_argument('--cfg', dest='cfg_file', help='Config file path', required=True, type=str)
parser.add_argument('--repeat', dest='repeat', help='Repeat how many random seeds', default=1, type=int)
parser.add_argument('--mark_done', dest='mark_done', action='store_true', help='mark yaml as yaml_done after a job has finished')
parser.add_argument('--override_remark', dest='override_remark', type=str, required=False, default=None, help='easily override the remark in the yaml file')
parser.add_argument('--override_data_dir', dest='override_data_dir', type=str, required=False, default=None, help='easily override the dataset.dir in the yaml file')
parser.add_argument('opts', help='See graphgym/config.py for all options', default=None, nargs=argparse.REMAINDER)
if (len(sys.argv) == 1):
parser.print_help()
sys.exit(1)
return parser.parse_args() |
class SchemeMorphism_polynomial_affine_space_field(SchemeMorphism_polynomial_affine_space):
_method
def weil_restriction(self):
if any((isinstance(f, FractionFieldElement) for f in self)):
raise TypeError('coordinate functions must be polynomials')
DS = self.domain()
R = DS.coordinate_ring()
result = R.ideal(self._polys).weil_restriction().gens()
H = Hom(DS.weil_restriction(), self.codomain().weil_restriction())
return H(result)
def reduce_base_field(self):
g = self.homogenize(0).reduce_base_field().dehomogenize(0)
from sage.schemes.affine.affine_space import AffineSpace
new_domain = AffineSpace(g.domain().base_ring(), self.domain().dimension_relative(), self.domain().variable_names())
new_codomain = AffineSpace(g.codomain().base_ring(), self.codomain().dimension_relative(), self.codomain().variable_names())
R = new_domain.coordinate_ring()
H = Hom(new_domain, new_codomain)
if isinstance(g[0], FractionFieldElement):
return H([(R(G.numerator()) / R(G.denominator())) for G in g])
return H([R(G) for G in g])
def indeterminacy_locus(self):
A = self.domain()
X = A.subscheme(0)
return (self * X.hom(A.gens(), A)).indeterminacy_locus()
def indeterminacy_points(self, F=None):
if (F is None):
fcn = self
else:
if (not F.is_field()):
raise NotImplementedError('indeterminacy points only implemented for fields')
fcn = self.change_ring(F)
indScheme = fcn.indeterminacy_locus()
if (indScheme.dimension() > 0):
raise ValueError('indeterminacy scheme is not dimension 0')
return indScheme.rational_points()
def image(self):
X = self.domain().subscheme(0)
e = X.embedding_morphism()
return (self * e).image() |
def _act_backward(ctx, x, dx):
if (ctx.activation == ACT_LEAKY_RELU):
_backend.leaky_relu_backward(x, dx, ctx.slope)
elif (ctx.activation == ACT_ELU):
_backend.elu_backward(x, dx)
elif (ctx.activation == ACT_NONE):
pass |
class PointRCNN(Detector3DTemplate):
def __init__(self, model_cfg, num_class, dataset):
super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset)
self.module_list = self.build_networks()
def forward(self, batch_dict):
for cur_module in self.module_list:
batch_dict = cur_module(batch_dict)
if self.training:
(loss, tb_dict, disp_dict) = self.get_training_loss()
ret_dict = {'loss': loss}
return (ret_dict, tb_dict, disp_dict)
else:
(pred_dicts, recall_dicts) = self.post_processing(batch_dict)
return (pred_dicts, recall_dicts)
def get_training_loss(self):
disp_dict = {}
(loss_point, tb_dict) = self.point_head.get_loss()
(loss_rcnn, tb_dict) = self.roi_head.get_loss(tb_dict)
loss = (loss_point + loss_rcnn)
return (loss, tb_dict, disp_dict) |
def test_with_bert(pretrain_file, tmp_path):
trainer = run_training(pretrain_file, tmp_path, '--bert_model', 'hf-internal-testing/tiny-bert')
model_file = os.path.join(trainer.args['save_dir'], trainer.args['save_name'])
assert (not model_file_has_bert(model_file)) |
class VGG19(torch.nn.Module):
def __init__(self):
super(VGG19, self).__init__()
features = models.vgg19(pretrained=True).features
self.relu1_1 = torch.nn.Sequential()
self.relu1_2 = torch.nn.Sequential()
self.relu2_1 = torch.nn.Sequential()
self.relu2_2 = torch.nn.Sequential()
self.relu3_1 = torch.nn.Sequential()
self.relu3_2 = torch.nn.Sequential()
self.relu3_3 = torch.nn.Sequential()
self.relu3_4 = torch.nn.Sequential()
self.relu4_1 = torch.nn.Sequential()
self.relu4_2 = torch.nn.Sequential()
self.relu4_3 = torch.nn.Sequential()
self.relu4_4 = torch.nn.Sequential()
self.relu5_1 = torch.nn.Sequential()
self.relu5_2 = torch.nn.Sequential()
self.relu5_3 = torch.nn.Sequential()
self.relu5_4 = torch.nn.Sequential()
for x in range(2):
self.relu1_1.add_module(str(x), features[x])
for x in range(2, 4):
self.relu1_2.add_module(str(x), features[x])
for x in range(4, 7):
self.relu2_1.add_module(str(x), features[x])
for x in range(7, 9):
self.relu2_2.add_module(str(x), features[x])
for x in range(9, 12):
self.relu3_1.add_module(str(x), features[x])
for x in range(12, 14):
self.relu3_2.add_module(str(x), features[x])
for x in range(14, 16):
self.relu3_2.add_module(str(x), features[x])
for x in range(16, 18):
self.relu3_4.add_module(str(x), features[x])
for x in range(18, 21):
self.relu4_1.add_module(str(x), features[x])
for x in range(21, 23):
self.relu4_2.add_module(str(x), features[x])
for x in range(23, 25):
self.relu4_3.add_module(str(x), features[x])
for x in range(25, 27):
self.relu4_4.add_module(str(x), features[x])
for x in range(27, 30):
self.relu5_1.add_module(str(x), features[x])
for x in range(30, 32):
self.relu5_2.add_module(str(x), features[x])
for x in range(32, 34):
self.relu5_3.add_module(str(x), features[x])
for x in range(34, 36):
self.relu5_4.add_module(str(x), features[x])
for param in self.parameters():
param.requires_grad = False
def forward(self, x):
relu1_1 = self.relu1_1(x)
relu1_2 = self.relu1_2(relu1_1)
relu2_1 = self.relu2_1(relu1_2)
relu2_2 = self.relu2_2(relu2_1)
relu3_1 = self.relu3_1(relu2_2)
relu3_2 = self.relu3_2(relu3_1)
relu3_3 = self.relu3_3(relu3_2)
relu3_4 = self.relu3_4(relu3_3)
relu4_1 = self.relu4_1(relu3_4)
relu4_2 = self.relu4_2(relu4_1)
relu4_3 = self.relu4_3(relu4_2)
relu4_4 = self.relu4_4(relu4_3)
relu5_1 = self.relu5_1(relu4_4)
relu5_2 = self.relu5_2(relu5_1)
relu5_3 = self.relu5_3(relu5_2)
relu5_4 = self.relu5_4(relu5_3)
out = {'relu1_1': relu1_1, 'relu1_2': relu1_2, 'relu2_1': relu2_1, 'relu2_2': relu2_2, 'relu3_1': relu3_1, 'relu3_2': relu3_2, 'relu3_3': relu3_3, 'relu3_4': relu3_4, 'relu4_1': relu4_1, 'relu4_2': relu4_2, 'relu4_3': relu4_3, 'relu4_4': relu4_4, 'relu5_1': relu5_1, 'relu5_2': relu5_2, 'relu5_3': relu5_3, 'relu5_4': relu5_4}
return out |
def func():
ob = Foo()
ob.attr1 = 1
ob.attr2 = (ob.attr2 + [ob.attr1])
result = ob.attr2
return result |
def plot_parameter(parameter_name: str, train_values: Any, val_values: Any, tags: Any, output_path: str) -> None:
plot_1d(train_values, ('train_' + parameter_name), output_path, ['epoch', parameter_name], tags, (10, 10), 'plot', len(train_values))
plot_1d(val_values, ('val_' + parameter_name), output_path, ['epoch', parameter_name], tags, (10, 10), 'plot', len(val_values))
(train_values_last, train_values_last_mean) = get_values_last(train_values, 10)
(val_values_last, val_values_last_mean) = get_values_last(val_values, 10)
(train_values_top, train_tags_top) = get_values_top(train_values_last, tags, 11)
(val_values_top, val_tags_top) = get_values_top(val_values_last, tags, 11)
print(f'Plotting {parameter_name}')
plot_1d(train_values_top, ('box_train_' + parameter_name), output_path, ['epoch', parameter_name], train_tags_top, (10, 10), 'boxplot', len(train_values_top))
plot_1d(val_values_top, ('box_val_' + parameter_name), output_path, ['epoch', parameter_name], val_tags_top, (10, 10), 'boxplot', len(val_values_top))
plot_1d(train_values_last_mean, ('bar_train_' + parameter_name), output_path, ['epoch', parameter_name], tags, (10, 10), 'bar', len(train_values_last_mean))
plot_1d(val_values_last_mean, ('bar_val_' + parameter_name), output_path, ['epoch', parameter_name], tags, (10, 10), 'bar', len(val_values_last_mean)) |
class Decoder_MDCBlock1(torch.nn.Module):
def __init__(self, num_filter, num_ft, kernel_size=4, stride=2, padding=1, bias=True, activation='prelu', norm=None, mode='iter1'):
super(Decoder_MDCBlock1, self).__init__()
self.mode = mode
self.num_ft = (num_ft - 1)
self.down_convs = nn.ModuleList()
self.up_convs = nn.ModuleList()
for i in range(self.num_ft):
self.down_convs.append(ConvBlock((num_filter * (2 ** i)), (num_filter * (2 ** (i + 1))), kernel_size, stride, padding, bias, activation, norm=None))
self.up_convs.append(DeconvBlock((num_filter * (2 ** (i + 1))), (num_filter * (2 ** i)), kernel_size, stride, padding, bias, activation, norm=None))
def forward(self, ft_h, ft_l_list):
if ((self.mode == 'iter1') or (self.mode == 'conv')):
ft_h_list = []
for i in range(len(ft_l_list)):
ft_h_list.append(ft_h)
ft_h = self.down_convs[((self.num_ft - len(ft_l_list)) + i)](ft_h)
ft_fusion = ft_h
for i in range(len(ft_l_list)):
ft_fusion = (self.up_convs[((self.num_ft - i) - 1)]((ft_fusion - ft_l_list[i])) + ft_h_list[((len(ft_l_list) - i) - 1)])
if (self.mode == 'iter2'):
ft_fusion = ft_h
for i in range(len(ft_l_list)):
ft = ft_fusion
for j in range((self.num_ft - i)):
ft = self.down_convs[j](ft)
ft = (ft - ft_l_list[i])
for j in range((self.num_ft - i)):
ft = self.up_convs[(((self.num_ft - i) - j) - 1)](ft)
ft_fusion = (ft_fusion + ft)
if (self.mode == 'iter3'):
ft_fusion = ft_h
for i in range(len(ft_l_list)):
ft = ft_fusion
for j in range((i + 1)):
ft = self.down_convs[j](ft)
ft = (ft - ft_l_list[((len(ft_l_list) - i) - 1)])
for j in range((i + 1)):
ft = self.up_convs[(((i + 1) - j) - 1)](ft)
ft_fusion = (ft_fusion + ft)
if (self.mode == 'iter4'):
ft_fusion = ft_h
for i in range(len(ft_l_list)):
ft = ft_h
for j in range((self.num_ft - i)):
ft = self.down_convs[j](ft)
ft = (ft - ft_l_list[i])
for j in range((self.num_ft - i)):
ft = self.up_convs[(((self.num_ft - i) - j) - 1)](ft)
ft_fusion = (ft_fusion + ft)
return ft_fusion |
class NonInteractiveSpinner(SpinnerInterface):
def __init__(self, message, min_update_interval_seconds=60):
self._message = message
self._finished = False
self._rate_limiter = RateLimiter(min_update_interval_seconds)
self._update('started')
def _update(self, status):
assert (not self._finished)
self._rate_limiter.reset()
logger.info('%s: %s', self._message, status)
def spin(self):
if self._finished:
return
if (not self._rate_limiter.ready()):
return
self._update('still running...')
def finish(self, final_status):
if self._finished:
return
self._update("finished with status '{final_status}'".format(**locals()))
self._finished = True |
def create_diffuser(cfg: DictConfig, *args: List, **kwargs: Dict) -> nn.Module:
eps_model = MODEL.get(cfg.model.name)(cfg.model, *args, **kwargs)
has_obser = (cfg.task.has_observation if ('has_observation' in cfg.task) else False)
diffuser = DIFFUSER.get(cfg.diffuser.name)(eps_model, cfg.diffuser, has_obser, *args, **kwargs)
if ('optimizer' in cfg):
optimizer = create_optimizer(cfg.optimizer, *args, **kwargs)
diffuser.set_optimizer(optimizer)
if ('planner' in cfg):
planner = create_planner(cfg.planner, *args, **kwargs)
diffuser.set_planner(planner)
return diffuser |
def test_write_statistics_no_individual(search_statistics):
assert (not search_statistics.write_statistics()) |
def _shell_pop_print(old_call):
if (not pybuf_enabled):
return old_call
info('Graphical python shell detected, using wrapped sys.stdout')
(old_call)
def new_call(*args, **kwargs):
ret = old_call(*args, **kwargs)
print(_ti_core.pop_python_print_buffer(), end='')
return ret
return new_call |
class CrossEntropyLoss(_WeightedLoss):
def __init__(self, weight=None, size_average=None, ignore_index=(- 100), reduce=None, reduction='elementwise_mean'):
super(CrossEntropyLoss, self).__init__(weight, size_average, reduce, reduction)
self.ignore_index = ignore_index
def forward(self, input, target):
return F.cross_entropy(input, target, weight=self.weight, ignore_index=self.ignore_index, reduction=self.reduction) |
def get_activation(activation_string):
if (activation_string in ACT2FN):
return ACT2FN[activation_string]
else:
raise KeyError('function {} not found in ACT2FN mapping {} or torch.nn.functional'.format(activation_string, list(ACT2FN.keys()))) |
def align_pos(original_sentence, corrected_sentence):
(orig, cor) = align(original_sentence, corrected_sentence)
(orig_out, cor_out) = ([[]], [[]])
for tok in orig:
if (tok.pos == 'WS'):
orig_out.append([])
else:
orig_out[(- 1)].append((tok.token, tok.pos))
for tok in cor:
if (tok.pos == 'WS'):
cor_out.append([])
else:
cor_out[(- 1)].append((tok.token, tok.pos))
return (orig_out, cor_out) |
class SL2Z_class(Gamma0_class):
def __init__(self):
Gamma0_class.__init__(self, 1)
def __reduce__(self):
return (_SL2Z_ref, ())
def _element_constructor_(self, x, check=True):
return ArithmeticSubgroupElement(self, x, check=check)
def _contains_sl2(self, a, b, c, d):
return True
def _repr_(self):
return 'Modular Group SL(2,Z)'
def _latex_(self):
return ('\\mbox{\\rm SL}_2(%s)' % ZZ._latex_())
def is_subgroup(self, right):
return (right.level() == 1)
def reduce_cusp(self, c):
return Cusp(1, 0)
def random_element(self, bound=100, *args, **kwds):
if (bound <= 1):
raise ValueError('bound must be greater than 1')
c = ZZ.random_element((1 - bound), bound, *args, **kwds)
d = ZZ.random_element((1 - bound), bound, *args, **kwds)
if (gcd(c, d) != 1):
return self.random_element(bound, *args, **kwds)
else:
(a, b, c, d) = lift_to_sl2z(c, d, 0)
whi = bound
wlo = bound
if (c > 0):
whi = min(whi, ((bound - a) / ZZ(c)).ceil())
wlo = min(wlo, ((bound + a) / ZZ(c)).ceil())
elif (c < 0):
whi = min(whi, ((bound + a) / ZZ((- c))).ceil())
wlo = min(wlo, ((bound - a) / ZZ((- c))).ceil())
if (d > 0):
whi = min(whi, ((bound - b) / ZZ(d)).ceil())
wlo = min(wlo, ((bound + b) / ZZ(d)).ceil())
elif (d < 0):
whi = min(whi, ((bound + b) / ZZ((- d))).ceil())
wlo = min(wlo, ((bound - b) / ZZ((- d))).ceil())
w = ZZ.random_element((1 - wlo), whi, *args, **kwds)
a += (c * w)
b += (d * w)
return self([a, b, c, d]) |
class Cn2An(object):
def __init__(self):
self.conf = utils.get_default_conf()
self.ac = An2Cn()
def cn2an(self, inputs=None, mode='strict'):
if (inputs is not None):
if (mode not in ['strict', 'normal', 'smart']):
raise ValueError('mode strict normal smart !')
(negative, inputs, data_type) = self.check_input_data_is_valid(inputs, mode)
if (data_type == 'integer'):
output = self.integer_convert(inputs)
elif (data_type == 'decimal'):
(integer_data, decimal_data) = inputs.split('')
output = (self.integer_convert(integer_data) + self.decimal_convert(decimal_data))
elif (data_type == 'all_num'):
output = self.direct_convert(inputs)
else:
raise ValueError(f':{inputs}!')
else:
raise ValueError('!')
return (negative * output)
def check_input_data_is_valid(self, check_data, mode):
nag = 1
if (mode == 'strict'):
strict_check_key = ((self.conf['number_low'] + self.conf['number_up']) + ['', '', '', '', '', '', '', '', '', '', ''])
for data in check_data:
if (data not in strict_check_key):
raise ValueError(f'{mode},:{data}!')
if (check_data[0] == ''):
check_data = check_data[1:]
nag = (- 1)
elif (mode == 'normal'):
normal_check_key = (list(self.conf['number_unit'].keys()) + ['', ''])
for data in check_data:
if (data not in normal_check_key):
raise ValueError(f'{mode},:{data}!')
if (check_data[0] == ''):
check_data = check_data[1:]
nag = (- 1)
elif (mode == 'smart'):
smart_check_key = (list(self.conf['number_unit'].keys()) + ['', '', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '.', '-'])
for data in check_data:
if (data not in smart_check_key):
raise ValueError(f'{mode},:{data}!')
if (check_data[0] in ['', '-']):
check_data = check_data[1:]
nag = (- 1)
def an2cn_sub(matched):
return self.ac.an2cn(matched.group())
check_data = re.sub('\\d+', an2cn_sub, check_data)
mode = 'normal'
if ('' in check_data):
split_data = check_data.split('')
if (len(split_data) == 2):
(integer_data, decimal_data) = split_data
else:
raise ValueError(' !')
else:
integer_data = check_data
decimal_data = None
all_num = (''.join(set((self.conf['number_low'] + self.conf['number_up']))) + '')
all_unit = ''.join(set((self.conf['unit_low'] + self.conf['unit_up'])))
ptn_normal = re.compile(f'(([{all_num}]+[{all_unit}]+)+?[{all_num}]|([{all_num}]+[{all_unit}]+)+|[][{all_num}]|[{all_num}]|[])$')
re_normal = ptn_normal.search(integer_data)
if re_normal:
if (re_normal.group() != integer_data):
if (mode == 'strict'):
raise ValueError(f':{integer_data}')
elif (mode == 'normal'):
ptn_all_num = re.compile(f'[{all_num}]+')
re_all_num = ptn_all_num.search(integer_data)
if re_all_num:
if (re_all_num.group() != integer_data):
raise ValueError(f':{integer_data}')
else:
return (nag, check_data, 'all_num')
else:
raise ValueError(f':{integer_data}')
elif decimal_data:
return (nag, check_data, 'decimal')
elif (check_data[(- 1)] == ''):
if (mode == 'strict'):
raise ValueError(f':{check_data}')
elif (mode == 'normal'):
return (nag, check_data, 'decimal')
else:
return (nag, check_data, 'integer')
elif (mode == 'strict'):
raise ValueError(f':{integer_data}')
elif (mode == 'normal'):
if decimal_data:
return (nag, check_data, 'decimal')
else:
raise ValueError(f':{integer_data}')
else:
raise ValueError(f':{integer_data}')
def integer_convert(self, integer_data):
all_num = (''.join(set((self.conf['number_low'] + self.conf['number_up']))) + '')
ptn_speaking_mode = re.compile(f'^[{all_num}][][{all_num}]$')
result = ptn_speaking_mode.search(integer_data)
if result:
high_num = (self.conf['number_unit'].get(integer_data[0]) * self.conf['number_unit'].get(integer_data[1]))
low_num = ((self.conf['number_unit'].get(integer_data[2]) * self.conf['number_unit'].get(integer_data[1])) / 10)
output_integer = (high_num + low_num)
else:
output_integer = 0
unit = 1
ten_thousand_unit = 1
max_ten_thousand_unit = 1
last_unit = 0
for (index, cn_num) in enumerate(reversed(integer_data)):
num = self.conf['number_unit'].get(cn_num)
if (num < 10):
output_integer += (num * unit)
else:
if ((num % 10000) == 0):
if (num > ten_thousand_unit):
ten_thousand_unit = num
max_ten_thousand_unit = num
else:
if (last_unit < num < max_ten_thousand_unit):
ten_thousand_unit = (num * max_ten_thousand_unit)
else:
ten_thousand_unit = (num * ten_thousand_unit)
last_unit = num
num = ten_thousand_unit
if (num > unit):
unit = num
else:
unit = (num * ten_thousand_unit)
if (index == (len(integer_data) - 1)):
output_integer += unit
return int(output_integer)
def decimal_convert(self, decimal_data):
len_decimal_data = len(decimal_data)
if (len_decimal_data > 15):
print('warning: {},15,15!'.format(len_decimal_data))
decimal_data = decimal_data[:15]
len_decimal_data = 15
output_decimal = 0
for index in range((len(decimal_data) - 1), (- 1), (- 1)):
unit_key = self.conf['number_unit'].get(decimal_data[index])
output_decimal += (unit_key * (10 ** (- (index + 1))))
output_decimal = round(output_decimal, len_decimal_data)
return output_decimal
def direct_convert(self, data):
output_data = 0
if ('' in data):
point_index = data.index('')
for index_integer in range((point_index - 1), (- 1), (- 1)):
unit_key = self.conf['number_unit'].get(data[index_integer])
output_data += (unit_key * (10 ** ((point_index - index_integer) - 1)))
for index_decimal in range((len(data) - 1), point_index, (- 1)):
unit_key = self.conf['number_unit'].get(data[index_decimal])
output_data += (unit_key * (10 ** (- (index_decimal - point_index))))
output_data = round(output_data, (len(data) - point_index))
else:
for index in range((len(data) - 1), (- 1), (- 1)):
unit_key = self.conf['number_unit'].get(data[index])
output_data += (unit_key * (10 ** ((len(data) - index) - 1)))
return output_data |
.parametrize('dataset_class', [Sinusoid, Harmonic, SinusoidAndLine])
def test_toy_task(dataset_class):
dataset = dataset_class(10, num_tasks=1000, noise_std=None)
task = dataset[0]
assert isinstance(task, Task)
assert (len(task) == 10) |
def gen_grid(nx=5, ny=5, nt=10, Lx=1.0, Ly=1.0, T=1.0):
(x_grid, y_grid, t_grid) = np.meshgrid(np.linspace(0, Lx, nx)[1:(- 1)], np.linspace(0, Ly, ny)[1:(- 1)], np.linspace(0, T, nt)[1:], indexing='ij')
(x_grid, y_grid, t_grid) = [x.reshape((- 1), 1) for x in [x_grid, y_grid, t_grid]]
(x_init, y_init, t_init) = np.meshgrid(np.linspace(0, Lx, ((nx - 2) * int(np.sqrt(nt)))), np.linspace(0, Ly, ((ny - 2) * int(np.sqrt(nt)))), [0], indexing='ij')
(x_init, y_init, t_init) = [x.reshape((- 1), 1) for x in [x_init, y_init, t_init]]
(x_Xbc, y_Xbc, t_Xbc) = np.meshgrid(np.linspace(0, Lx, (nx * int(((ny - 2) / 4)))), np.linspace(0, Ly, 2), np.linspace(0, T, nt)[1:], indexing='ij')
(x_Xbc, y_Xbc, t_Xbc) = [x.reshape((- 1), 1) for x in [x_Xbc, y_Xbc, t_Xbc]]
(x_Ybc, y_Ybc, t_Ybc) = np.meshgrid(np.linspace(0, Lx, 2), np.linspace(0, Ly, (ny * int(((nx - 2) / 4)))), np.linspace(0, T, nt)[1:], indexing='ij')
(x_Ybc, y_Ybc, t_Ybc) = [x.reshape((- 1), 1) for x in [x_Ybc, y_Ybc, t_Ybc]]
(x_bc, y_bc, t_bc) = [np.concatenate([x, y], axis=0) for (x, y) in zip([x_Xbc, y_Xbc, t_Xbc], [x_Ybc, y_Ybc, t_Ybc])]
x_grid = np.concatenate([x_grid, x_init, x_Xbc, x_Ybc], axis=0)
y_grid = np.concatenate([y_grid, y_init, y_Xbc, y_Ybc], axis=0)
t_grid = np.concatenate([t_grid, t_init, t_Xbc, t_Ybc], axis=0)
(x_test, y_test, t_test) = np.meshgrid(np.linspace(0, Lx, (3 * nx)), np.linspace(0, Ly, (3 * ny)), np.linspace(0, T, (3 * nt)), indexing='ij')
t0_ids = np.where((t_grid.flatten() == 0.0))[0]
bc_ids = np.where(np.logical_or(np.logical_or((x_grid.flatten() == 0.0), (x_grid.flatten() == Lx)), np.logical_or((y_grid.flatten() == 0.0), (y_grid.flatten() == Ly))))[0]
dom_ids = np.where(np.logical_and((t_grid.flatten() > 0.0), np.logical_and(np.logical_and((x_grid.flatten() > 0.0), (x_grid.flatten() < Lx)), np.logical_and((y_grid.flatten() > 0.0), (y_grid.flatten() < Ly)))))[0]
return {'x': x_grid, 'y': y_grid, 't': t_grid, 't0_ids': t0_ids, 'bc_ids': bc_ids, 'dom_ids': dom_ids, 'x_test': x_test, 'y_test': y_test, 't_test': t_test} |
def validate_pe_ruc(df: Union[(str, pd.Series, dd.Series, pd.DataFrame, dd.DataFrame)], column: str='') -> Union[(bool, pd.Series, pd.DataFrame)]:
if isinstance(df, (pd.Series, dd.Series)):
return df.apply(ruc.is_valid)
elif isinstance(df, (pd.DataFrame, dd.DataFrame)):
if (column != ''):
return df[column].apply(ruc.is_valid)
else:
return df.applymap(ruc.is_valid)
return ruc.is_valid(df) |
_torch
class DeiTRobertaModelTest(VisionTextDualEncoderMixin, unittest.TestCase):
def get_pretrained_model_and_inputs(self):
model = VisionTextDualEncoderModel.from_vision_text_pretrained('hf-internal-testing/tiny-random-deit', 'hf-internal-testing/tiny-random-roberta')
batch_size = 13
pixel_values = floats_tensor([batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size])
input_ids = ids_tensor([batch_size, 4], model.text_model.config.vocab_size)
attention_mask = random_attention_mask([batch_size, 4])
inputs = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return (model, inputs)
def check_vision_text_output_attention(self, text_config, input_ids, attention_mask, vision_config, pixel_values=None, **kwargs):
(vision_model, text_model) = self.get_vision_text_model(vision_config, text_config)
model = VisionTextDualEncoderModel(vision_model=vision_model, text_model=text_model)
model.to(torch_device)
model.eval()
output = model(input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask, output_attentions=True)
vision_attentions = output.vision_model_output.attentions
self.assertEqual(len(vision_attentions), vision_config.num_hidden_layers)
image_size = to_2tuple(vision_model.config.image_size)
patch_size = to_2tuple(vision_model.config.patch_size)
num_patches = ((image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]))
seq_len = (num_patches + 2)
self.assertEqual(vision_attentions[0].shape[(- 3):], (vision_config.num_attention_heads, seq_len, seq_len))
text_attentions = output.text_model_output.attentions
self.assertEqual(len(text_attentions), text_config.num_hidden_layers)
self.assertEqual(text_attentions[0].shape[(- 3):], (text_config.num_attention_heads, input_ids.shape[(- 1)], input_ids.shape[(- 1)]))
def get_vision_text_model(self, vision_config, text_config):
vision_model = DeiTModel(vision_config).eval()
text_model = RobertaModel(text_config).eval()
return (vision_model, text_model)
def prepare_config_and_inputs(self):
vit_model_tester = DeiTModelTester(self)
bert_model_tester = RobertaModelTester(self)
vision_config_and_inputs = vit_model_tester.prepare_config_and_inputs()
text_config_and_inputs = bert_model_tester.prepare_config_and_inputs()
(vision_config, pixel_values, _) = vision_config_and_inputs
(text_config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels) = text_config_and_inputs
return {'text_config': text_config, 'vision_config': vision_config, 'pixel_values': pixel_values, 'attention_mask': input_mask, 'text_config': text_config, 'input_ids': input_ids, 'text_token_type_ids': token_type_ids, 'text_sequence_labels': sequence_labels, 'text_token_labels': token_labels, 'text_choice_labels': choice_labels}
def test_pt_flax_equivalence(self):
pass |
def activation_name_to_func(activation_name):
assert isinstance(activation_name, str)
if isinstance(activation_name, str):
if (activation_name == 'linear'):
act_fn = tf.identity
elif (activation_name == 'relu'):
act_fn = tf.nn.relu
elif (activation_name == 'elu'):
act_fn = tf.nn.elu
elif (activation_name == 'selu'):
act_fn = selu
elif (activation_name == 'sigmoid'):
act_fn = tf.nn.sigmoid
elif (activation_name == 'tanh'):
act_fn = tf.nn.tanh
elif (activation_name == 'exp'):
act_fn = tf.exp
elif (activation_name == 'log'):
act_fn = tf.log
elif (activation_name == 'gelu'):
act_fn = gelu
elif (activation_name == 'swish'):
act_fn = swish
elif (activation_name == 'lrelu'):
act_fn = tf.nn.leaky_relu
else:
raise AttributeError(('no activation function named as %s' % activation_name))
elif hasattr(activation_name, '__call__'):
act_fn = activation_name
else:
raise AttributeError
return act_fn |
class SquadProcessor(DataProcessor):
train_file = None
dev_file = None
def _get_example_from_tensor_dict(self, tensor_dict, evaluate=False):
if (not evaluate):
answer = tensor_dict['answers']['text'][0].numpy().decode('utf-8')
answer_start = tensor_dict['answers']['answer_start'][0].numpy()
answers = []
else:
answers = [{'answer_start': start.numpy(), 'text': text.numpy().decode('utf-8')} for (start, text) in zip(tensor_dict['answers']['answer_start'], tensor_dict['answers']['text'])]
answer = None
answer_start = None
return SquadExample(qas_id=tensor_dict['id'].numpy().decode('utf-8'), question_text=tensor_dict['question'].numpy().decode('utf-8'), context_text=tensor_dict['context'].numpy().decode('utf-8'), answer_text=answer, start_position_character=answer_start, title=tensor_dict['title'].numpy().decode('utf-8'), answers=answers)
def get_examples_from_dataset(self, dataset, evaluate=False):
if evaluate:
dataset = dataset['validation']
else:
dataset = dataset['train']
examples = []
for tensor_dict in tqdm(dataset):
examples.append(self._get_example_from_tensor_dict(tensor_dict, evaluate=evaluate))
return examples
def get_train_examples(self, data_dir, filename=None):
if (data_dir is None):
data_dir = ''
if (self.train_file is None):
raise ValueError('SquadProcessor should be instantiated via SquadV1Processor or SquadV2Processor')
with open(os.path.join(data_dir, (self.train_file if (filename is None) else filename)), 'r', encoding='utf-8') as reader:
input_data = json.load(reader)['data']
return self._create_examples(input_data, 'train')
def get_dev_examples(self, data_dir, filename=None):
if (data_dir is None):
data_dir = ''
if (self.dev_file is None):
raise ValueError('SquadProcessor should be instantiated via SquadV1Processor or SquadV2Processor')
with open(os.path.join(data_dir, (self.dev_file if (filename is None) else filename)), 'r', encoding='utf-8') as reader:
input_data = json.load(reader)['data']
return self._create_examples(input_data, 'dev')
def _create_examples(self, input_data, set_type):
is_training = (set_type == 'train')
examples = []
for entry in tqdm(input_data):
title = entry['title']
for paragraph in entry['paragraphs']:
context_text = paragraph['context']
for qa in paragraph['qas']:
qas_id = qa['id']
question_text = qa['question']
start_position_character = None
answer_text = None
answers = []
if ('is_impossible' in qa):
is_impossible = qa['is_impossible']
else:
is_impossible = False
if (not is_impossible):
if is_training:
answer = qa['answers'][0]
answer_text = answer['text']
start_position_character = answer['answer_start']
else:
answers = qa['answers']
example = SquadExample(qas_id=qas_id, question_text=question_text, context_text=context_text, answer_text=answer_text, start_position_character=start_position_character, title=title, is_impossible=is_impossible, answers=answers)
examples.append(example)
return examples |
class InvertedResidual(nn.Module):
def __init__(self, inp, oup, stride, expand_ratio, filter_size=1):
super(InvertedResidual, self).__init__()
self.stride = stride
assert (stride in [1, 2])
hidden_dim = int(round((inp * expand_ratio)))
self.use_res_connect = ((self.stride == 1) and (inp == oup))
layers = []
if (expand_ratio != 1):
layers.append(ConvBNReLU(inp, hidden_dim, kernel_size=1))
if (stride == 1):
layers.extend([ConvBNReLU(hidden_dim, hidden_dim, stride=stride, groups=hidden_dim), nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False), nn.BatchNorm2d(oup)])
else:
layers.extend([ConvBNReLU(hidden_dim, hidden_dim, stride=1, groups=hidden_dim), Downsample(filt_size=filter_size, stride=stride, channels=hidden_dim), nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False), nn.BatchNorm2d(oup)])
self.conv = nn.Sequential(*layers)
def forward(self, x):
if self.use_res_connect:
return (x + self.conv(x))
else:
return self.conv(x) |
class TestDiscretePolicies(TfGraphTestCase):
def setup_method(self):
super().setup_method()
self.env = GarageEnv(DummyDiscreteEnv())
def teardown_method(self):
self.env.close()
super().teardown_method()
def test_categorial_gru_policy(self):
categorical_gru_policy = CategoricalGRUPolicy(env_spec=self.env, hidden_dim=1, state_include_action=False)
categorical_gru_policy.reset()
obs = self.env.observation_space.high
assert categorical_gru_policy.get_action(obs)
def test_categorical_lstm_policy(self):
categorical_lstm_policy = CategoricalLSTMPolicy(env_spec=self.env, hidden_dim=1, state_include_action=False)
categorical_lstm_policy.reset()
obs = self.env.observation_space.high
assert categorical_lstm_policy.get_action(obs)
def test_categorial_mlp_policy(self):
categorical_mlp_policy = CategoricalMLPPolicy(env_spec=self.env, hidden_sizes=(1,))
obs = self.env.observation_space.high
assert categorical_mlp_policy.get_action(obs) |
def main(ranking_top_k_path, output_path, jsonl_corpus_path):
json_corpus = load_json_corpus(jsonl_corpus_path)
top_k = 500
with jsonlines.open(output_path, mode='w') as writer:
first_stage_ranking_dict = load_ranking(ranking_top_k_path, top_k=None)
for (query_id, retrieved_docs) in first_stage_ranking_dict.items():
for (did, rank_number) in retrieved_docs:
query_text = json_corpus[query_id]
guid = '{}_{}'.format(query_id, did)
para_text = json_corpus[did]
rank_number = int(rank_number)
if (rank_number <= top_k):
out_ = {'guid': guid, 'q_paras': query_text, 'c_paras': para_text, 'label': 1}
writer.write(out_)
elif (rank_number > top_k):
out_ = {'guid': guid, 'q_paras': query_text, 'c_paras': para_text, 'label': 0} |
class ParserTfds(Parser):
def __init__(self, root, name, split='train', is_training=False, batch_size=None, download=False, repeats=0, seed=42, input_name='image', input_image='RGB', target_name='label', target_image='', prefetch_size=None, shuffle_size=None, max_threadpool_size=None):
super().__init__()
self.root = root
self.split = split
self.is_training = is_training
if self.is_training:
assert (batch_size is not None), 'Must specify batch_size in training mode for reasonable behaviour w/ TFDS wrapper'
self.batch_size = batch_size
self.repeats = repeats
self.common_seed = seed
self.prefetch_size = (prefetch_size or PREFETCH_SIZE)
self.shuffle_size = (shuffle_size or SHUFFLE_SIZE)
self.max_threadpool_size = (max_threadpool_size or MAX_TP_SIZE)
self.input_name = input_name
self.input_image = input_image
self.target_name = target_name
self.target_image = target_image
self.builder = tfds.builder(name, data_dir=root)
if download:
self.builder.download_and_prepare()
self.class_to_idx = (get_class_labels(self.builder.info) if (self.target_name == 'label') else {})
self.split_info = self.builder.info.splits[split]
self.num_examples = self.split_info.num_examples
self.dist_rank = 0
self.dist_num_replicas = 1
if (dist.is_available() and dist.is_initialized() and (dist.get_world_size() > 1)):
self.dist_rank = dist.get_rank()
self.dist_num_replicas = dist.get_world_size()
self.global_num_workers = 1
self.worker_info = None
self.worker_seed = 0
self.subsplit = None
self.ds = None
def _lazy_init(self):
worker_info = torch.utils.data.get_worker_info()
num_workers = 1
global_worker_id = 0
if (worker_info is not None):
self.worker_info = worker_info
self.worker_seed = worker_info.seed
num_workers = worker_info.num_workers
self.global_num_workers = (self.dist_num_replicas * num_workers)
global_worker_id = ((self.dist_rank * num_workers) + worker_info.id)
should_subsplit = ((self.global_num_workers > 1) and ((self.split_info.num_shards < self.global_num_workers) or (not self.is_training)))
if should_subsplit:
if has_buggy_even_splits:
if (not isinstance(self.split_info, tfds.core.splits.SubSplitInfo)):
subsplits = even_split_indices(self.split, self.global_num_workers, self.num_examples)
self.subsplit = subsplits[global_worker_id]
else:
subsplits = tfds.even_splits(self.split, self.global_num_workers)
self.subsplit = subsplits[global_worker_id]
input_context = None
if ((self.global_num_workers > 1) and (self.subsplit is None)):
input_context = tf.distribute.InputContext(num_input_pipelines=self.global_num_workers, input_pipeline_id=global_worker_id, num_replicas_in_sync=self.dist_num_replicas)
read_config = tfds.ReadConfig(shuffle_seed=self.common_seed, shuffle_reshuffle_each_iteration=True, input_context=input_context)
ds = self.builder.as_dataset(split=(self.subsplit or self.split), shuffle_files=self.is_training, read_config=read_config)
options = tf.data.Options()
thread_member = ('threading' if hasattr(options, 'threading') else 'experimental_threading')
getattr(options, thread_member).private_threadpool_size = max(1, (self.max_threadpool_size // num_workers))
getattr(options, thread_member).max_intra_op_parallelism = 1
ds = ds.with_options(options)
if (self.is_training or (self.repeats > 1)):
ds = ds.repeat()
if self.is_training:
ds = ds.shuffle((min(self.num_examples, self.shuffle_size) // self.global_num_workers), seed=self.worker_seed)
ds = ds.prefetch(min((self.num_examples // self.global_num_workers), self.prefetch_size))
self.ds = tfds.as_numpy(ds)
def __iter__(self):
if (self.ds is None):
self._lazy_init()
target_example_count = math.ceil(((max(1, self.repeats) * self.num_examples) / self.global_num_workers))
if self.is_training:
target_example_count = (math.ceil((target_example_count / self.batch_size)) * self.batch_size)
example_count = 0
for example in self.ds:
input_data = example[self.input_name]
if self.input_image:
input_data = Image.fromarray(input_data, mode=self.input_image)
target_data = example[self.target_name]
if self.target_image:
target_data = Image.fromarray(target_data, mode=self.target_image)
(yield (input_data, target_data))
example_count += 1
if (self.is_training and (example_count >= target_example_count)):
break
if ((not self.is_training) and (self.dist_num_replicas > 1) and (self.subsplit is not None) and (0 < example_count < target_example_count)):
while (example_count < target_example_count):
(yield (input_data, target_data))
example_count += 1
def __len__(self):
return math.ceil(((max(1, self.repeats) * self.num_examples) / self.dist_num_replicas))
def _filename(self, index, basename=False, absolute=False):
assert False, 'Not supported'
def filenames(self, basename=False, absolute=False):
if (self.ds is None):
self._lazy_init()
names = []
for sample in self.ds:
if (len(names) > self.num_examples):
break
if ('file_name' in sample):
name = sample['file_name']
elif ('filename' in sample):
name = sample['filename']
elif ('id' in sample):
name = sample['id']
else:
assert False, 'No supported name field present'
names.append(name)
return names |
def parse_argv(parser):
parser.add_argument('--eval_results', nargs='+', required=True, help='path to eval json files') |
def test_graphql_wsgi_loader(graphql_path, graphql_app, run_wsgi_test):
schema = loaders.from_wsgi(graphql_path, graphql_app)
strategy = schema[graphql_path]['POST'].as_strategy()
run_wsgi_test(strategy) |
def main():
gpu_config = tf.ConfigProto()
gpu_config.gpu_options.allow_growth = True
with tf.Session(config=gpu_config) as sess:
_inputs = {'query': tf.placeholder(dtype=tf.float32, shape=[None, flags.dim_text]), 'answer': tf.placeholder(dtype=tf.float32, shape=[None, 5, flags.dim_text]), 'story': tf.placeholder(dtype=tf.float32, shape=[None, flags.dim_mcb]), 'cor_idx': tf.placeholder(dtype=tf.int64, shape=[None]), 'rgb': tf.placeholder(dtype=tf.float32, shape=[None, FLAGS.dim_rgb]), 'sub': tf.placeholder(dtype=tf.float32, shape=[None, FLAGS.dim_sub])}
if (FLAGS.video_features == True):
_inputs.pop('story', None)
else:
_inputs.pop('rgb', None)
_inputs.pop('sub', None)
if (FLAGS.video_features == True):
model = ltm_video_model(flags=flags, inputs=_inputs)
else:
model = ltm_model(flags=flags, inputs=_inputs)
model.build_model()
saved_op = {}
for var in tf.trainable_variables():
print(var.name)
saved_op[var.name] = var
saver = tf.train.Saver(saved_op)
saver.restore(sess, flags.checkpoint_file)
test_queue = LTM_Queue(story_filename=STORY_FILE, qa_filelist=QA_TEST, capacity=30, batch_size=1, num_threads=1)
test_queue.start_threads(sequential=True)
for var in tf.trainable_variables():
print(var.name)
num_test_examples = {'video_sub': 1258, 'video_sub_aug': 1258, 'VideoSubInception': 1258, 'VideoSubResnet': 1258, 'sub': 3138, 'script': 1598, 'plot': 3138, 'dvs': 570}
qid_dict = {}
while True:
ts = time()
queue_inputs = test_queue.get_inputs()
feed = {}
for (key, val) in _inputs.iteritems():
if (key == 'cor_idx'):
continue
try:
feed[_inputs[key]] = queue_inputs[(key + '_rep')]
except:
feed[_inputs[key]] = queue_inputs[key]
batch_predictions = sess.run(model.answer_prediction, feed)
for i in range(len(queue_inputs['qid'])):
qid = queue_inputs['qid'][i]
pred = batch_predictions[i]
print(pred)
result = (((str(qid) + ' ') + str(pred)) + '\n')
assert (qid not in qid_dict.keys())
qid_dict[qid] = str(pred)
if (len(qid_dict) == num_test_examples[flags.data_source]):
break
f = open((('./' + flags.data_source) + '_result.txt'), 'w')
keys = qid_dict.keys()
def sort_fun(x):
return int(x.split(':')[(- 1)])
keys.sort(key=sort_fun)
for key in keys:
print(key, qid_dict[key])
result = (((str(key) + ' ') + str(qid_dict[key])) + '\n')
f.write(result)
f.close() |
class BopomofoConverter(object):
def to_bopomofo(self, pinyin, **kwargs):
pinyin = self._pre_convert(pinyin)
for (find_re, replace) in BOPOMOFO_REPLACE:
pinyin = find_re.sub(replace, pinyin)
pinyin = ''.join((BOPOMOFO_TABLE.get(x, x) for x in pinyin))
return pinyin
def to_bopomofo_first(self, pinyin, **kwargs):
pinyin = self.to_bopomofo(pinyin, **kwargs)
return pinyin[0]
def _pre_convert(self, pinyin):
pinyin = replace_symbol_to_number(pinyin)
return RE_TONE3.sub('\\1\\3\\2', pinyin) |
def crop_to_bounding_box(image, bbox):
(x, y, w, h) = bbox
w = (w + x)
h = (y + h)
bbox = (x, y, w, h)
cropped_image = image.crop(bbox)
return cropped_image |
def demo(seed=None):
if (seed is None):
seed = np.random.randint((2 ** 32))
print('Setting seed to ', seed)
np.random.seed(seed)
K = 5
T = 10000
dt = 1
dt_max = 50
B = 1
(S, true_model) = sample_from_network_hawkes(K, T, dt, dt_max, B)
test_basis = true_model.basis
test_model = DiscreteTimeStandardHawkesModel(K=K, dt=dt, dt_max=(dt_max + dt), beta=1.0, basis=test_basis, allow_self_connections=True)
test_model.add_data(S)
test_model.fit_with_bfgs()
print('lambda0 true: ', true_model.bias_model.lambda0)
print('lambda0 test ', test_model.bias)
print('')
print('W true: ', (true_model.weight_model.A * true_model.weight_model.W))
print('W test: ', test_model.W)
print('')
print('ll true: ', true_model.log_likelihood())
print('ll test: ', test_model.log_likelihood())
plt.figure()
for k in range(3):
plt.subplot(3, 1, (k + 1))
plt.plot((np.arange(T) * dt), true_model.compute_rate(proc=k), '-b')
plt.plot((np.arange(T) * dt), test_model.compute_rate(ks=k), '-r')
lim = plt.ylim()
plt.ylim(0, (1.25 * lim[1]))
plt.ioff()
plt.show() |
def resnet50_fc512(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ResNet(num_classes=num_classes, loss=loss, block=Bottleneck, layers=[3, 4, 6, 3], last_stride=1, fc_dims=[512], dropout_p=None, **kwargs)
if pretrained:
init_pretrained_weights(model, model_urls['resnet50'])
return model |
def rotate_image(image, angle):
image_center = tuple((np.array(image.shape[:2]) / 2))
rot_mat = cv2.getRotationMatrix2D(image_center, angle, 1.0)
angle_r = ((float(angle) / 180) * PI)
result = cv2.warpAffine(image, rot_mat, image.shape[:2], flags=cv2.INTER_NEAREST)
return result |
def check_wmt_test_bleu(raw_folder, wmt_lang_pairs):
not_matchings = []
for (wmt, src_tgts) in wmt_lang_pairs:
for src_tgt in src_tgts:
print(f'checking test bleus for: {src_tgt} at {wmt}')
(src, tgt) = src_tgt.split('-')
(ssrc, stgt) = (src[:2], tgt[:2])
if os.path.exists(f'{raw_folder}/test.{tgt}-{src}.{src}'):
test_src = f'{raw_folder}/test.{tgt}-{src}.{src}'
else:
test_src = f'{raw_folder}/test.{src}-{tgt}.{src}'
cmd1 = f'cat {test_src} | sacrebleu -t "{wmt}" -l {stgt}-{ssrc}; [ $? -eq 0 ] || echo ""'
test_tgt = f'{raw_folder}/test.{src}-{tgt}.{tgt}'
cmd2 = f'cat {test_tgt} | sacrebleu -t "{wmt}" -l {ssrc}-{stgt}; [ $? -eq 0 ] || echo ""'
bleu1 = run_eval_bleu(cmd1)
if (bleu1 != 100.0):
not_matchings.append(f'{wmt}:{src_tgt} source side not matching: {test_src}')
bleu2 = run_eval_bleu(cmd2)
if (bleu2 != 100.0):
not_matchings.append(f'{wmt}:{src_tgt} target side not matching: {test_tgt}')
return not_matchings |
class ComputeStatisticsForBlobs(NetModifier):
def __init__(self, blobs, logging_frequency):
self._blobs = blobs
self._logging_frequency = logging_frequency
self._field_name_suffix = '_summary'
def modify_net(self, net, init_net=None, grad_map=None, blob_to_device=None, modify_output_record=False):
for blob_name in self._blobs:
blob = core.BlobReference(blob_name)
assert net.BlobIsDefined(blob), 'blob {} is not defined in net {} whose proto is {}'.format(blob, net.Name(), net.Proto())
cast_blob = net.Cast(blob, to=core.DataType.FLOAT)
stats_name = net.NextScopedBlob(prefix=(blob + self._field_name_suffix))
stats = net.Summarize(cast_blob, stats_name, to_file=0)
net.Print(stats, [], every_n=self._logging_frequency)
if modify_output_record:
output_field_name = (str(blob) + self._field_name_suffix)
output_scalar = schema.Scalar((np.float, (1,)), stats)
if (net.output_record() is None):
net.set_output_record(schema.Struct((output_field_name, output_scalar)))
else:
net.AppendOutputRecordField(output_field_name, output_scalar)
def field_name_suffix(self):
return self._field_name_suffix |
class LoggingHandler(logging.Handler):
def __init__(self, level=logging.NOTSET):
super().__init__(level)
def emit(self, record):
try:
msg = self.format(record)
tqdm.tqdm.write(msg)
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record) |
def got() -> operations.GraphOfOperations:
operations_graph = operations.GraphOfOperations()
plans = operations.Generate(1, 1)
operations_graph.append_operation(plans)
sorted_sublists = []
for i in range(1, 9):
list_id = f'List {i}'
sub_list = operations.Selector((lambda thoughts, list_id=list_id: [thought for thought in thoughts if (thought.state['part'] == list_id)]))
sub_list.add_predecessor(plans)
operations_graph.add_operation(sub_list)
sort_sub_list = operations.Generate(1, 5)
sort_sub_list.add_predecessor(sub_list)
operations_graph.add_operation(sort_sub_list)
score_sub_list = operations.Score(1, False, utils.num_errors)
score_sub_list.add_predecessor(sort_sub_list)
operations_graph.add_operation(score_sub_list)
keep_best_sub_list = operations.KeepBestN(1, False)
keep_best_sub_list.add_predecessor(score_sub_list)
operations_graph.add_operation(keep_best_sub_list)
sorted_sublists.append(keep_best_sub_list)
aggregate_1 = operations.Aggregate(10)
aggregate_1.add_predecessor(sorted_sublists[0])
aggregate_1.add_predecessor(sorted_sublists[1])
operations_graph.add_operation(aggregate_1)
score_aggregate_1 = operations.Score(1, False, utils.num_errors)
score_aggregate_1.add_predecessor(aggregate_1)
operations_graph.add_operation(score_aggregate_1)
keep_best_aggregate_1 = operations.KeepBestN(1, False)
keep_best_aggregate_1.add_predecessor(score_aggregate_1)
operations_graph.add_operation(keep_best_aggregate_1)
improve_aggregate_1 = operations.Generate(1, 5)
improve_aggregate_1.add_predecessor(keep_best_aggregate_1)
operations_graph.add_operation(improve_aggregate_1)
improve_score_aggregate_1 = operations.Score(1, False, utils.num_errors)
improve_score_aggregate_1.add_predecessor(improve_aggregate_1)
improve_score_aggregate_1.add_predecessor(keep_best_aggregate_1)
operations_graph.add_operation(improve_score_aggregate_1)
improve_keep_best_aggregate_1 = operations.KeepBestN(1, False)
improve_keep_best_aggregate_1.add_predecessor(improve_score_aggregate_1)
operations_graph.add_operation(improve_keep_best_aggregate_1)
aggregate_2 = operations.Aggregate(10)
aggregate_2.add_predecessor(sorted_sublists[2])
aggregate_2.add_predecessor(sorted_sublists[3])
operations_graph.add_operation(aggregate_2)
score_aggregate_2 = operations.Score(1, False, utils.num_errors)
score_aggregate_2.add_predecessor(aggregate_2)
operations_graph.add_operation(score_aggregate_2)
keep_best_aggregate_2 = operations.KeepBestN(1, False)
keep_best_aggregate_2.add_predecessor(score_aggregate_2)
operations_graph.add_operation(keep_best_aggregate_2)
improve_aggregate_2 = operations.Generate(1, 5)
improve_aggregate_2.add_predecessor(keep_best_aggregate_2)
operations_graph.add_operation(improve_aggregate_2)
improve_score_aggregate_2 = operations.Score(1, False, utils.num_errors)
improve_score_aggregate_2.add_predecessor(improve_aggregate_2)
improve_score_aggregate_2.add_predecessor(keep_best_aggregate_2)
operations_graph.add_operation(improve_score_aggregate_2)
improve_keep_best_aggregate_2 = operations.KeepBestN(1, False)
improve_keep_best_aggregate_2.add_predecessor(improve_score_aggregate_2)
operations_graph.add_operation(improve_keep_best_aggregate_2)
aggregate_3 = operations.Aggregate(10)
aggregate_3.add_predecessor(sorted_sublists[4])
aggregate_3.add_predecessor(sorted_sublists[5])
operations_graph.add_operation(aggregate_3)
score_aggregate_3 = operations.Score(1, False, utils.num_errors)
score_aggregate_3.add_predecessor(aggregate_3)
operations_graph.add_operation(score_aggregate_3)
keep_best_aggregate_3 = operations.KeepBestN(1, False)
keep_best_aggregate_3.add_predecessor(score_aggregate_3)
operations_graph.add_operation(keep_best_aggregate_3)
improve_aggregate_3 = operations.Generate(1, 5)
improve_aggregate_3.add_predecessor(keep_best_aggregate_3)
operations_graph.add_operation(improve_aggregate_3)
improve_score_aggregate_3 = operations.Score(1, False, utils.num_errors)
improve_score_aggregate_3.add_predecessor(improve_aggregate_3)
improve_score_aggregate_3.add_predecessor(keep_best_aggregate_3)
operations_graph.add_operation(improve_score_aggregate_3)
improve_keep_best_aggregate_3 = operations.KeepBestN(1, False)
improve_keep_best_aggregate_3.add_predecessor(improve_score_aggregate_3)
operations_graph.add_operation(improve_keep_best_aggregate_3)
aggregate_4 = operations.Aggregate(10)
aggregate_4.add_predecessor(sorted_sublists[6])
aggregate_4.add_predecessor(sorted_sublists[7])
operations_graph.add_operation(aggregate_4)
score_aggregate_4 = operations.Score(1, False, utils.num_errors)
score_aggregate_4.add_predecessor(aggregate_4)
operations_graph.add_operation(score_aggregate_4)
keep_best_aggregate_4 = operations.KeepBestN(1, False)
keep_best_aggregate_4.add_predecessor(score_aggregate_4)
operations_graph.add_operation(keep_best_aggregate_4)
improve_aggregate_4 = operations.Generate(1, 5)
improve_aggregate_4.add_predecessor(keep_best_aggregate_4)
operations_graph.add_operation(improve_aggregate_4)
improve_score_aggregate_4 = operations.Score(1, False, utils.num_errors)
improve_score_aggregate_4.add_predecessor(improve_aggregate_4)
improve_score_aggregate_4.add_predecessor(keep_best_aggregate_4)
operations_graph.add_operation(improve_score_aggregate_4)
improve_keep_best_aggregate_4 = operations.KeepBestN(1, False)
improve_keep_best_aggregate_4.add_predecessor(improve_score_aggregate_4)
operations_graph.add_operation(improve_keep_best_aggregate_4)
aggregate_1_2 = operations.Aggregate(10)
aggregate_1_2.add_predecessor(improve_keep_best_aggregate_1)
aggregate_1_2.add_predecessor(improve_keep_best_aggregate_2)
operations_graph.add_operation(aggregate_1_2)
score_aggregate_1_2 = operations.Score(1, False, utils.num_errors)
score_aggregate_1_2.add_predecessor(aggregate_1_2)
operations_graph.add_operation(score_aggregate_1_2)
keep_best_aggregate_1_2 = operations.KeepBestN(1, False)
keep_best_aggregate_1_2.add_predecessor(score_aggregate_1_2)
operations_graph.add_operation(keep_best_aggregate_1_2)
improve_aggregate_1_2 = operations.Generate(1, 5)
improve_aggregate_1_2.add_predecessor(keep_best_aggregate_1_2)
operations_graph.add_operation(improve_aggregate_1_2)
improve_score_aggregate_1_2 = operations.Score(1, False, utils.num_errors)
improve_score_aggregate_1_2.add_predecessor(improve_aggregate_1_2)
improve_score_aggregate_1_2.add_predecessor(keep_best_aggregate_1_2)
operations_graph.add_operation(improve_score_aggregate_1_2)
improve_keep_best_aggregate_1_2 = operations.KeepBestN(1, False)
improve_keep_best_aggregate_1_2.add_predecessor(improve_score_aggregate_1_2)
operations_graph.add_operation(improve_keep_best_aggregate_1_2)
aggregate_3_4 = operations.Aggregate(10)
aggregate_3_4.add_predecessor(improve_keep_best_aggregate_3)
aggregate_3_4.add_predecessor(improve_keep_best_aggregate_4)
operations_graph.add_operation(aggregate_3_4)
score_aggregate_3_4 = operations.Score(1, False, utils.num_errors)
score_aggregate_3_4.add_predecessor(aggregate_3_4)
operations_graph.add_operation(score_aggregate_3_4)
keep_best_aggregate_3_4 = operations.KeepBestN(1, False)
keep_best_aggregate_3_4.add_predecessor(score_aggregate_3_4)
operations_graph.add_operation(keep_best_aggregate_3_4)
improve_aggregate_3_4 = operations.Generate(1, 5)
improve_aggregate_3_4.add_predecessor(keep_best_aggregate_3_4)
operations_graph.add_operation(improve_aggregate_3_4)
improve_score_aggregate_3_4 = operations.Score(1, False, utils.num_errors)
improve_score_aggregate_3_4.add_predecessor(improve_aggregate_3_4)
improve_score_aggregate_3_4.add_predecessor(keep_best_aggregate_3_4)
operations_graph.add_operation(improve_score_aggregate_3_4)
improve_keep_best_aggregate_3_4 = operations.KeepBestN(1, False)
improve_keep_best_aggregate_3_4.add_predecessor(improve_score_aggregate_3_4)
operations_graph.add_operation(improve_keep_best_aggregate_3_4)
final_aggregate = operations.Aggregate(10)
operations_graph.append_operation(final_aggregate)
operations_graph.append_operation(operations.Score(1, False, utils.num_errors))
keep_best_aggregate_final = operations.KeepBestN(1, False)
operations_graph.append_operation(keep_best_aggregate_final)
operations_graph.append_operation(operations.Generate(1, 10))
score_aggr_3 = operations.Score(1, False, utils.num_errors)
score_aggr_3.add_predecessor(keep_best_aggregate_final)
operations_graph.append_operation(score_aggr_3)
keep_final_best = operations.KeepBestN(1, False)
keep_final_best.add_predecessor(keep_best_aggregate_final)
operations_graph.append_operation(keep_final_best)
operations_graph.append_operation(operations.GroundTruth(utils.test_sorting))
return operations_graph |
def OA_9_135():
from .bibd import BIBD_from_difference_family
from .difference_family import singer_difference_set
(G, B) = singer_difference_set(16, 2)
PG16 = BIBD_from_difference_family(G, B)
n = 273
assert all(((sum((((x % 39) == 0) for x in B)) in [0, 1, 3]) for B in PG16))
lines = [B for B in PG16 if (sum((((x % 39) == 0) for x in B)) == 3)]
p = set(range(237)).difference(*lines).pop()
for B in PG16:
B.sort(key=(lambda x: int(((x % 39) != 0))))
PG16.sort(key=(lambda B: sum((((x % 39) == 0) for x in B))))
r = {}
for B in PG16:
if (p in B):
for x in B:
if (x != p):
r[x] = len(r)
r[p] = (n - 1)
assert all(((r[(x * 39)] >= ((n - 1) - (16 * 7))) for x in range(7)))
assert all((((r[(x * 39)] % 16) == 0) for x in range(7)))
PG = [sorted([r[x] for x in B]) for B in PG16]
OA = [[(x % 16) for x in B] for B in PG if ((n - 1) not in B)]
truncated_OA = [(B[1:(- 7)] + [(x if (x == 0) else None) for x in B[(- 7):]]) for B in OA]
return wilson_construction(truncated_OA, 9, 16, 8, ((1,) * 7), check=False) |
def main(hp, args):
stft = TacotronSTFT(filter_length=hp.audio.filter_length, hop_length=hp.audio.hop_length, win_length=hp.audio.win_length, n_mel_channels=hp.audio.n_mel_channels, sampling_rate=hp.audio.sampling_rate, mel_fmin=hp.audio.mel_fmin, mel_fmax=hp.audio.mel_fmax)
wav_files = glob.glob(os.path.join(args.data_path, '**', '*.wav'), recursive=True)
for wavpath in tqdm.tqdm(wav_files, desc='preprocess wav to mel'):
(sr, wav) = read_wav_np(wavpath)
assert (sr == hp.audio.sampling_rate), ('sample rate mismatch. expected %d, got %d at %s' % (hp.audio.sampling_rate, sr, wavpath))
if (len(wav) < (hp.audio.segment_length + hp.audio.pad_short)):
wav = np.pad(wav, (0, ((hp.audio.segment_length + hp.audio.pad_short) - len(wav))), mode='constant', constant_values=0.0)
wav = torch.from_numpy(wav).unsqueeze(0)
mel = stft.mel_spectrogram(wav)
melpath = wavpath.replace('.wav', '.mel')
torch.save(mel, melpath) |
class DomainEmbedding(nn.Module):
def __init__(self, n_domains, domain_dim) -> None:
super().__init__()
self.embedding = nn.Embedding(n_domains, domain_dim)
self.output_dim = domain_dim
def forward(self, batch):
return {'domain-feature': self.embedding(batch['domains'])}
def get_output_dim(self):
return self.output_dim |
(num_gpus=1, resources={'machine': 1})
class DataWorker(object):
def __init__(self, index, model_type='custom', device='cpu', enable_fail=True):
self.device = device
self.model = ConvNet(model_type).to(device)
if ((index == 2) and enable_fail):
import threading
def kill():
for i in reversed(range(20)):
print(f'failing in {(i + 1)} second(s)...')
time.sleep(1)
import os
os._exit(1)
self.t = threading.Thread(target=kill)
self.t.start()
def poll(self):
pass
def compute_gradients(self, weights, batch_size=128):
self.model.set_weights(weights)
data = torch.randn(batch_size, 3, 224, 224, device=self.device)
self.model.zero_grad()
output = self.model(data)
loss = torch.mean(output)
loss.backward()
return self.model.get_gradients() |
def get_sents_from_tags(text, sent_start_tag, sent_end_tag):
sents = re.findall(('%s (.+?) %s' % (sent_start_tag, sent_end_tag)), text)
sents = [sent for sent in sents if (len(sent) > 0)]
return sents |
class WeightSpaceElement(CombinatorialFreeModule.Element):
def scalar(self, lambdacheck):
if ((lambdacheck not in self.parent().coroot_lattice()) and (lambdacheck not in self.parent().coroot_space())):
raise ValueError('{} is not in the coroot space'.format(lambdacheck))
zero = self.parent().base_ring().zero()
if (len(self) < len(lambdacheck)):
return sum(((lambdacheck[i] * c) for (i, c) in self), zero)
else:
return sum(((self[i] * c) for (i, c) in lambdacheck), zero)
def is_dominant(self):
return all(((self.coefficient(i) >= 0) for i in self.parent().index_set()))
def to_ambient(self):
return self.parent().to_ambient_space_morphism()(self)
def to_weight_space(self):
return self |
def build_network(nb_classes, input_shape, resnet_layers=101, classifier='psp', sigmoid=False, output_size=None, num_input_channels=4):
inp = Input((input_shape[0], input_shape[1], num_input_channels))
if (resnet_layers == 101):
res = ResNet101(inp)
else:
ValueError('Resnet {} does not exist'.format(resnet_layers))
if (classifier == 'psp'):
print(('Building network based on ResNet %i and PSP module expecting inputs of shape %s predicting %i classes' % (resnet_layers, input_shape, nb_classes)))
x = build_pyramid_pooling_module(res, input_shape, nb_classes, sigmoid=sigmoid, output_size=output_size)
else:
raise ValueError('Classifier not implemented.')
model = Model(inputs=inp, outputs=x)
return model |
class fisk_gen(burr_gen):
def _shape_info(self):
return [_ShapeInfo('c', False, (0, np.inf), (False, False))]
def _pdf(self, x, c):
return burr._pdf(x, c, 1.0)
def _cdf(self, x, c):
return burr._cdf(x, c, 1.0)
def _sf(self, x, c):
return burr._sf(x, c, 1.0)
def _logpdf(self, x, c):
return burr._logpdf(x, c, 1.0)
def _logcdf(self, x, c):
return burr._logcdf(x, c, 1.0)
def _logsf(self, x, c):
return burr._logsf(x, c, 1.0)
def _ppf(self, x, c):
return burr._ppf(x, c, 1.0)
def _isf(self, q, c):
return burr._isf(q, c, 1.0)
def _munp(self, n, c):
return burr._munp(n, c, 1.0)
def _stats(self, c):
return burr._stats(c, 1.0)
def _entropy(self, c):
return (2 - np.log(c)) |
.parametrize('dtype', ([torch.float16, torch.float32] + ([torch.bfloat16] if is_sm8x else [])))
.parametrize('inplace_backward', [False, True])
.parametrize('smoothing', [0.0, 0.9])
.parametrize('vocab_size', [50257])
def test_cross_entropy_loss_apex(vocab_size, smoothing, inplace_backward, dtype):
device = 'cuda'
(rtol, atol) = ((1e-05, 1e-06) if (dtype == torch.float32) else (0.001, 0.0001))
torch.random.manual_seed(0)
batch_size = 8
seqlen = 128
x_pt = torch.randn((batch_size * seqlen), vocab_size, device=device, dtype=dtype, requires_grad=True)
x = x_pt.detach().clone().requires_grad_()
y = torch.randint(0, vocab_size, ((batch_size * seqlen),), dtype=torch.long, device=device)
y[torch.randperm((batch_size * seqlen))[:10]] = (- 100)
model_pt = torch.nn.CrossEntropyLoss(label_smoothing=smoothing)
model = CrossEntropyLossApex(label_smoothing=smoothing, inplace_backward=inplace_backward)
out = model(x, y)
out_pt = model_pt(x_pt.float(), y)
assert torch.allclose(out, out_pt, rtol=rtol, atol=atol)
g = torch.randn_like(out)
out_pt.backward(g)
out.backward(g)
assert torch.allclose(x.grad, x_pt.grad, rtol=rtol, atol=atol) |
def test_option_unknown_1_parm():
text = 'option[unknown, parameters={"foo": "bar"}]'
parsedtype = ak.types.from_datashape(text, highlevel=False)
assert isinstance(parsedtype, ak.types.OptionType)
assert (str(parsedtype) == text) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.