code stringlengths 281 23.7M |
|---|
def l1_ewta_waypoint_loss(prediction, target, k=6, waypoint_step=5, eps=1e-07):
num_mixtures = prediction.shape[1]
timesteps = target.shape[1]
target = nn.functional.pad(target, pad=(0, 0, 0, (waypoint_step - 1)))
target = target.unsqueeze(1).expand((- 1), num_mixtures, (- 1), (- 1))
indexes = ((torch.arange(timesteps).to(target.get_device()) + waypoint_step) - 1)
indexes_mask = (indexes < timesteps)
indexes_mask = indexes_mask.float()
curr_label = target.index_select(2, indexes)
curr_loss_all = nn.functional.l1_loss(prediction.squeeze((- 2)), curr_label, reduction='none')
curr_loss_all = (curr_loss_all * indexes_mask.view(1, 1, (- 1), 1))
curr_loss = curr_loss_all.sum(dim=[2, 3])
(curr_mixture_loss_sorted, curr_mixture_ranks) = torch.sort(curr_loss, descending=False)
curr_mixture_loss_topk = curr_mixture_loss_sorted.narrow(1, 0, k)
l1_loss = curr_mixture_loss_topk.sum()
l1_loss = (l1_loss / target.size(0))
l1_loss = (l1_loss / indexes_mask.sum())
l1_loss = (l1_loss / k)
return l1_loss |
def get_time_string(status, options, format='%a %b %d %H:%M:%S +0000 %Y'):
timestamp = options['timestamp']
datestamp = options['datestamp']
t = time.strptime(status['created_at'], format)
i_hate_timezones = time.timezone
if time.daylight:
i_hate_timezones = time.altzone
dt = (datetime.datetime(*t[:(- 3)]) - datetime.timedelta(seconds=i_hate_timezones))
t = dt.timetuple()
if (timestamp and datestamp):
return time.strftime('%Y-%m-%d %H:%M:%S ', t)
elif timestamp:
return time.strftime('%H:%M:%S ', t)
elif datestamp:
return time.strftime('%Y-%m-%d ', t)
return '' |
class DevDataset(Dataset):
def __init__(self, args, raw_datasets, cache_root):
self.raw_datasets = raw_datasets
cache_path = os.path.join(cache_root, 'multiwoz_dev.cache')
if (os.path.exists(cache_path) and args.dataset.use_cache):
self.extended_data = torch.load(cache_path)
else:
self.extended_data = []
for raw_data in tqdm(self.raw_datasets):
extend_data = copy.deepcopy(raw_data)
history = get_constructed_history_and_golden_response(extend_data['dialog']['usr'], extend_data['dialog']['sys'])
slot_ontology_values_str = ''
for (ontology_slot, ontology_values) in zip(extend_data['ontology_slots'], extend_data['ontology_values']):
if (not ontology_values):
ontology_item = '{}: {}'.format(ontology_slot, 'none')
else:
ontology_item = '{}: {}'.format(ontology_slot, ', '.join(ontology_values))
slot_ontology_values_str += '{}; '.format(ontology_item)
if ((not args.seq2seq.mode) or (args.seq2seq.mode == 'sequential')):
output_text = ', '.join(['{}-{}'.format(slot, value).replace('-', ' ') for (slot, value) in zip(extend_data['expanded_turn_belief']['slot'], extend_data['expanded_turn_belief']['value'])])
extend_data.update({'struct_in': slot_ontology_values_str.lower(), 'text_in': history.lower(), 'seq_out': output_text.lower()})
self.extended_data.append(extend_data)
elif (args.seq2seq.mode == 'separate'):
for (slot, value) in zip(extend_data['expanded_turn_belief']['slot'], extend_data['expanded_turn_belief']['value']):
slot_history = '{}: {}'.format(slot, history)
output_text = value
extend_extend_data = copy.deepcopy(extend_data)
del extend_extend_data['expanded_turn_belief']
del extend_extend_data['ontology_slots']
del extend_extend_data['ontology_values']
extend_extend_data.update({'struct_in': slot_ontology_values_str.lower(), 'text_in': slot_history.lower(), 'seq_out': output_text.lower(), 'slot': slot})
self.extended_data.append(extend_extend_data)
else:
raise ValueError('Other seq2seq method not support yet!')
if args.dataset.use_cache:
torch.save(self.extended_data, cache_path)
def __getitem__(self, index) -> T_co:
return self.extended_data[index]
def __len__(self):
return len(self.extended_data) |
def _filter_by_module_availability(datapipes):
filter_set = set()
if (datasets is None):
filter_set.update([iterdp.HuggingFaceHubReader])
if (fsspec is None):
filter_set.update([iterdp.FSSpecFileLister, iterdp.FSSpecFileOpener, iterdp.FSSpecSaver])
if (iopath is None):
filter_set.update([iterdp.IoPathFileLister, iterdp.IoPathFileOpener, iterdp.IoPathSaver])
if (rarfile is None):
filter_set.update([iterdp.RarArchiveLoader])
if ((torcharrow is None) or (not DILL_AVAILABLE)):
filter_set.update([iterdp.DataFrameMaker, iterdp.ParquetDataFrameLoader])
return [dp for dp in datapipes if (dp[0] not in filter_set)] |
def trainIters(args, lang, dataset, encoder, decoder, critic, performer, extractor, all_ans, n_iters, split_id, max_steps, print_every=1, save_every=100):
start = time.time()
env = ThorEnv(x_display=0)
obj_predictor = FeatureExtractor(archi='maskrcnn', device=device, checkpoint='./logs/pretrained/maskrcnn_model.pth', load_heads=True)
(loc_ans, app_ans, dir_ans) = all_ans
encoder_optimizer = optim.Adam(encoder.parameters(), lr=LEARNING_RATE, weight_decay=WEIGHT_DECAY)
decoder_optimizer = optim.Adam(decoder.parameters(), lr=LEARNING_RATE, weight_decay=WEIGHT_DECAY)
critic_optimizer = optim.Adam(critic.parameters(), lr=LEARNING_RATE, weight_decay=WEIGHT_DECAY)
num_subgoals = len(dataset.jsons_and_keys)
actor_losses = []
critic_losses = []
all_rewards = []
succ = []
all_query = []
all_instr = []
object_found = []
sg_pairs = []
num_q = []
all_pws = []
all_decoded_words = []
instr_id = 0
data_instruct_list = []
for i in range(len(dataset.jsons_and_keys)):
(traj_data, traj_key) = dataset.jsons_and_keys[i]
if (traj_data['repeat_idx'] == 0):
data_instruct_list.append(i)
print('dataset length', len(data_instruct_list))
for it in range(0, n_iters):
log_probs = []
log_prob = 0
values = []
rewards = []
masks = []
current_query = []
current_object_found = []
all_log = []
current_num_q = 0
entropy = 0
t_agent = 0
num_fails = 0
episode_end = False
current_d_words = []
dataset_idx = np.random.choice(data_instruct_list)
task_json = dataset.jsons_and_keys[dataset_idx]
turk_annos = task_json[0]['turk_annotations']['anns']
subgoal_idxs = [sg['high_idx'] for sg in task_json[0]['plan']['high_pddl']]
subgoal_idxs = subgoal_idxs[:(- 1)]
subgoal_idx = np.random.choice(subgoal_idxs)
trial_uid = ((('pad:' + str(0)) + ':') + str(subgoal_idx))
dataset_idx_qa = (0 + dataset_idx)
init_states = evaluate_subgoals_start_qa(env, performer, dataset, extractor, trial_uid, dataset_idx_qa, args, obj_predictor)
(_, _, _, init_failed, _) = init_states
(task, trial) = task_json[0]['task'].split('/')
pair = (None, None, task, trial, subgoal_idx)
sg_pairs.append([task, trial, subgoal_idx])
orig_instr = normalizeString(turk_annos[0]['high_descs'][subgoal_idx]).lower().replace(',', '').replace('.', '')
qa = ''
reward = 0
all_instr.append(orig_instr)
interm_states = None
pws = 0.0
t_agent_old = 0
dialog = ''
while True:
f_t = extractFeatureOnline(env, extractor)
input_tensor = torch.unsqueeze(torch.squeeze(tensorFromSentence(lang, orig_instr)), 0).to(device)
input_length = input_tensor.size(1)
encoder.init_state(input_tensor)
seq_lengths = torch.from_numpy(np.array([input_length]))
(ctx, h_t, c_t) = encoder(input_tensor, seq_lengths)
decoder_input = torch.tensor([[SOS_token]], device=device)
decoded_words = []
for di in range(MAX_LENGTH):
(h_t, c_t, alpha, logit) = decoder(decoder_input, f_t, h_t, c_t, ctx)
value = critic(h_t)
dist = Categorical(F.softmax(logit, dim=(- 1)))
selected_word = dist.sample()
dword = lang.index2word[selected_word.item()]
decoded_words.append(dword)
decoder_input = selected_word.detach().to(device)
log_prob += (dist.log_prob(selected_word).unsqueeze(0) / MAX_LENGTH)
entropy += (dist.entropy().mean() / MAX_LENGTH)
if (dword == 'EOS'):
break
repeat_idx = (- 1)
ans = ''
current_d_words.append(decoded_words)
if (decoded_words[0] == 'appearance'):
query = ('<<app>> ' + decoded_words[1])
if ((task in app_ans) and (trial in app_ans[task]) and (subgoal_idx in app_ans[task][trial]) and (0 in app_ans[task][trial][subgoal_idx])):
ans_sg = app_ans[task][trial][subgoal_idx][0]
if ((decoded_words[1] in ans_sg) and (ans_sg[decoded_words[1]]['ans'] is not None)):
ans += ans_sg[decoded_words[1]]['ans']
else:
ans += 'invalid'
else:
logging.info(('invalid answer for %s, %s, %s' % (task, trial, subgoal_idx)))
ans += 'invalid'
elif (decoded_words[0] == 'location'):
query = ('<<loc>> ' + decoded_words[1])
if ((task in loc_ans) and (trial in loc_ans[task]) and (subgoal_idx in loc_ans[task][trial]) and (0 in loc_ans[task][trial][subgoal_idx])):
ans_sg = loc_ans[task][trial][subgoal_idx][0]
if (decoded_words[1] in ans_sg):
obj_id = ans_sg[decoded_words[1]]['obj_id']
event = env.last_event
metadata = event.metadata
odata = get_obj_data(metadata, obj_id)
if (odata is None):
ans += 'invalid'
else:
oname = decoded_words[1]
recs = odata['parentReceptacles']
rel_ang = get_obj_direction(metadata, odata)
ans += objLocAns(oname, rel_ang, recs)
else:
ans += 'invalid'
else:
ans += 'invalid'
elif (decoded_words[0] == 'direction'):
query = '<<dir>> '
if ((task in dir_ans) and (trial in dir_ans[task]) and (subgoal_idx in dir_ans[task][trial])):
target_pos = dir_ans[task][trial][subgoal_idx]['target_pos']
event = env.last_event
cur_metadata = event.metadata
targ_metadata = {'agent': {'position': target_pos}}
(rel_ang, rel_pos) = get_agent_direction(cur_metadata, targ_metadata)
ans += dirAns(rel_ang, rel_pos)
else:
ans += 'invalid'
elif (decoded_words[0] == 'none'):
query = 'none'
else:
query = '<<invalid>>'
if (('invalid' in query) or ('invalid' in ans)):
reward += REWARD_INVALID
current_object_found.append(False)
qa = ''
elif (not (query == 'none')):
reward += REWARD_QUESTION
current_object_found.append(True)
current_num_q += 1
qa = ((query + ' ') + ans)
else:
current_object_found.append(True)
qa = ''
current_query.append(((query + ' ') + ans))
qa = qa.lower().replace(',', '').replace('.', '')
dialog += (' ' + qa)
with torch.no_grad():
(log_entry, interm_states) = evaluate_subgoals_middle_qa(env, performer, dataset, extractor, trial_uid, dataset_idx_qa, args, obj_predictor, init_states, interm_states, qa, num_rollout=5)
if log_entry['success']:
reward += REWARD_SUC
done = 1.0
pws = log_entry['success_spl']
else:
done = 0.0
(t_agent, _, num_fails, _, mc_lists, episode_end, _) = interm_states
reward += (REWARD_TIME * (t_agent - t_agent_old))
t_agent_old = t_agent
log_probs.append(log_prob)
values.append(value)
rewards.append(torch.tensor([reward], dtype=torch.float, device=device))
masks.append(torch.tensor([(1 - done)], dtype=torch.float, device=device))
if (done or (t_agent > args.max_steps) or (num_fails > args.max_fails) or episode_end or init_failed or (len(current_query) > 100)):
break
succ.append(done)
all_rewards.append(reward)
all_pws.append(pws)
all_log.append(log_entry)
next_value = critic(h_t)
returns = compute_returns(next_value, rewards, masks)
log_probs = torch.cat(log_probs)
returns = torch.cat(returns).detach()
values = torch.tensor([values], dtype=torch.float, device=device, requires_grad=True)
advantage = (returns - values)
actor_loss = (- (log_probs * advantage.detach()).mean())
critic_loss = advantage.pow(2).mean()
encoder_optimizer.zero_grad()
decoder_optimizer.zero_grad()
critic_optimizer.zero_grad()
actor_loss.backward()
critic_loss.backward()
encoder_optimizer.step()
decoder_optimizer.step()
critic_optimizer.step()
actor_losses.append(actor_loss.detach().cpu())
critic_losses.append(critic_loss.detach().cpu())
object_found.append(current_object_found)
all_query.append(current_query)
num_q.append(current_num_q)
all_decoded_words.append(current_d_words)
if ((it % print_every) == 0):
logging.info(('task, trial, subgoals: %s' % sg_pairs[(- 1)]))
logging.info(('instruction: %s' % all_instr[(- 1)]))
logging.info(('questions: %s' % all_query[(- 1)]))
logging.info(('number of questions: %s' % num_q[(- 1)]))
logging.info(('%s (%d %d%%) actor loss %.4f, critic loss %.4f, reward %.4f, SR %.4f, pws %.4f' % (timeSince(start, ((it + 1) / n_iters)), (it + 1), (((it + 1) / n_iters) * 100), np.mean(actor_losses), np.mean(critic_losses), np.mean(all_rewards), np.mean(succ), np.mean(all_pws))))
if ((it % save_every) == 0):
torch.save({'encoder': encoder.state_dict(), 'decoder': decoder.state_dict(), 'critic': critic.state_dict()}, (('./logs/questioner_rl/questioner_anytime_' + split_id) + '.pt'))
with open((('./logs/questioner_rl/questioner_anytime_' + split_id) + '.pkl'), 'wb') as pkl_f:
pickle.dump([all_rewards, succ, all_query, all_instr, sg_pairs, num_q, all_pws], pkl_f)
env.stop() |
.parametrize('cv2', [0, 1])
.parametrize('cv1', [0, 1])
def test_truth_table_classical(cv1, cv2):
for (cbloq, a, b) in _iter_and_truth_table(cv1, cv2):
(res,) = cbloq.call_classically()
if ((a == cv1) and (b == cv2)):
assert (res == 1)
else:
assert (res == 0) |
def split(s):
scheme = None
netloc = None
path = None
query = None
fragment = None
end = len(s)
pos = 0
scheme_pos = s.find('://')
if (scheme_pos != (- 1)):
pos = (scheme_pos + 3)
scheme = s[:scheme_pos]
for x in scheme:
if ((not (x in scheme_chars)) and (not ('A' <= x <= 'Z')) and (not ('a' <= x <= 'z'))):
pos = 0
scheme = None
break
end_of_netloc = end
path_pos = s.find('/', pos)
if (path_pos == (- 1)):
path_pos = None
else:
end_of_netloc = path_pos
query_pos = s.find('?', pos)
if (query_pos == (- 1)):
query_pos = None
elif ((path_pos is None) or (query_pos < path_pos)):
path_pos = None
end_of_netloc = query_pos
fragment_pos = s.find('#', pos)
if (fragment_pos == (- 1)):
fragment_pos = None
else:
if ((query_pos is not None) and (fragment_pos < query_pos)):
query_pos = None
if ((path_pos is not None) and (fragment_pos < path_pos)):
path_pos = None
end_of_netloc = fragment_pos
if ((query_pos is None) and (path_pos is None)):
end_of_netloc = fragment_pos
if (end_of_netloc != pos):
netloc = s[pos:end_of_netloc]
if (path_pos is not None):
path = s[(path_pos + 1):(query_pos or fragment_pos or end)]
if (query_pos is not None):
query = s[(query_pos + 1):(fragment_pos or end)]
if (fragment_pos is not None):
fragment = s[(fragment_pos + 1):end]
return (scheme, netloc, path, query, fragment) |
class esxiVm():
def __init__(self, serverObject, vmObject):
self.server = serverObject
self.vmObject = vmObject
self.procList = []
self.revertSnapshots = []
self.snapshotList = []
self.testVm = False
self.vmIdentifier = vmObject.summary.config.vmPathName
self.vmIp = None
self.vmName = vmObject.summary.config.name
self.vmOS = vmObject.summary.config.guestFullName
self.vmPassword = ''
self.vmUsername = ''
self.uploadDir = ''
self.payloadList = []
self.resultDict = {}
if (self.vmOS is not None):
if ('64-bit' in self.vmOS):
self.arch = 'x64'
else:
self.arch = 'x86'
else:
self.arch = 'unknown'
def waitForVmToBoot(self):
for i in range(10):
if (self.isPoweredOn() == False):
time.sleep(1)
self.server.logMsg((self.vmName + ' WAS NOT POWERED ON AS EXPECTED; RETRYING'))
self.powerOn(True)
time.sleep(5)
if (self.isPoweredOn() == False):
self.server.logMsg((self.vmName + ' IS POWERED OFF'))
return False
else:
attempts = 60
tools_ready = False
for i in range(attempts):
if (self.checkTools(True) != 'TOOLS_NOT_READY'):
tools_ready = True
break
time.sleep(5)
if (not tools_ready):
self.server.logMsg((self.vmName + ' IS POWERED ON TOOLS WERE NOT READY IN TIME'))
return tools_ready
for j in range(5):
ipAddress = self.getVmIp()
if (ipAddress != None):
break
else:
self.server.logMsg(('IP ADDRESS LOOKUP FAILED FOR ' + self.vmName))
time.sleep(1)
if (ipAddress == None):
self.server.logMsg((self.vmName + ' FAILED TO INITIALIZE'))
return False
else:
self.server.logMsg(((('IP ADDRESS FOR ' + self.vmName) + ' = ') + ipAddress))
return True
def checkTools(self, waitForTools=True):
tools_status = self.vmObject.guest.toolsStatus
if (tools_status == 'toolsNotRunning'):
retVal = 'TOOLS_NOT_READY'
elif (tools_status == 'toolsOld'):
self.server.logMsg(('YOU SHOULD UPGRADE THE VMWARE TOOLS ON ' + self.vmName))
retVal = 'TOOLS_READY'
elif (tools_status == 'toolsNotInstalled'):
self.server.logMsg(('YOU SHOULD INSTALL VMWARE TOOLS ON ' + self.vmName))
retVal = 'TOOLS_NOT_INSTALLED'
elif (tools_status == 'toolsOk'):
retVal = 'TOOLS_READY'
else:
self.server.logMsg(((('UNKNOWN STATE OF VMWARE TOOLS ON ' + self.vmName) + '::') + tools_status))
retVal = 'TOOLS_NOT_READY'
return retVal
def deleteSnapshot(self, snapshotName):
self.getSnapshots()
for i in self.snapshotList:
if (i[0].name == snapshotName):
self.server.logMsg(((('DELETING SNAPSHOT ' + snapshotName) + ' FROM ') + self.vmName))
return self.waitForTask(i[0].snapshot.RemoveSnapshot_Task(False))
def enumerateSnapshotsRecursively(self, snapshots, snapshot_location):
if (not snapshots):
return
for snapshot in snapshots:
if snapshot_location:
current_snapshot_path = ((snapshot_location + '/') + snapshot.name)
else:
current_snapshot_path = snapshot.name
self.snapshotList.append((snapshot, current_snapshot_path))
self.enumerateSnapshotsRecursively(snapshot.childSnapshotList, current_snapshot_path)
return
def getArch(self):
return self.arch
def getDataCenter(self):
content = self.server.connection.RetrieveContent()
for child in content.rootFolder.childEntity:
if hasattr(child, 'vmFolder'):
datacenter = child
vmFolder = datacenter.vmFolder
vmList = vmFolder.childEntity
if (self.vmObject in vmList):
return child
def getFileFromGuest(self, srcFile, dstFile):
for i in range(3):
self.server.logMsg(('ATTEMPTING TO GET ' + srcFile))
retVal = False
if (self.checkTools() == 'TOOLS_READY'):
creds = vim.vm.guest.NamePasswordAuthentication(username=self.vmUsername, password=self.vmPassword)
content = self.server.connection.RetrieveContent()
try:
file_attribute = vim.vm.guest.FileManager.FileAttributes()
vmFileManager = content.guestOperationsManager.fileManager
ftInfo = vmFileManager.InitiateFileTransferFromGuest(self.vmObject, creds, srcFile)
splitUrl = ftInfo.url.split('*')
realUrl = ((splitUrl[0] + self.server.hostname) + splitUrl[1])
self.server.logMsg(((srcFile + ' URL = ') + realUrl))
self.server.logMsg(((srcFile + ' SIZE = ') + str(ftInfo.size)))
resp = requests.get(realUrl, verify=False)
if (not (resp.status_code == 200)):
self.server.logMsg(((((('ERROR GETTING FILE ' + srcFile) + ' FROM ') + self.vmName) + ' HTTP CODE ') + str(resp.status_code)))
else:
getFile = open(dstFile, 'wb')
getFile.write(resp.content)
getFile.close()
self.server.logMsg(((((('SAVED FILE FROM ' + self.vmName) + ' AS ') + dstFile) + ' HTTP RESPONSE WAS ') + str(resp.status_code)))
retVal = True
except vim.fault.FileNotFound as e:
self.server.logMsg(('FAILED TO FIND FILE ON VM: ' + srcFile))
self.server.logMsg(('SYSTEM ERROR: ' + str(e)))
pass
except Exception as e:
self.server.logMsg(('UNPREDICTED EXCEPTION:\n' + str(e)))
pass
else:
self.server.logMsg(('THERE IS A PROBLEM WITH THE VMWARE TOOLS ON ' + self.vmName))
return retVal
def getSnapshots(self):
self.server.logMsg(('FINDING SNAPSHOTS FOR ' + self.vmName))
self.snapshotList = []
if hasattr(self.vmObject.snapshot, 'rootSnapshotList'):
self.enumerateSnapshotsRecursively(self.vmObject.snapshot.rootSnapshotList, '')
return
def getVmIp(self):
if (self.checkTools(True) != 'TOOLS_NOT_INSTALLED'):
ipAttempts = 120
for i in range(ipAttempts):
self.vmIp = self.vmObject.summary.guest.ipAddress
if (self.vmIp != None):
break
else:
strAttempt = (((('(ATTEMPT ' + str(i)) + ' OF ') + str(ipAttempts)) + ')')
self.server.logMsg(((strAttempt + ' FAILED TO GET IP ADDRESS FROM ') + self.vmName))
time.sleep(1)
return self.vmIp
def getVmInterfaces(self):
ipAttempts = 120
for i in range(ipAttempts):
self.vmInterfaces = []
for nic in self.vmObject.guest.net:
addresses = nic.ipConfig.ipAddress
IPs = []
for addr in addresses:
IPs.append(('%s/%s' % (addr.ipAddress, addr.prefixLength)))
self.vmInterfaces.append((nic.macAddress, IPs, nic.network))
if (self.vmInterfaces != []):
break
else:
strAttempt = (((('(ATTEMPT ' + str(i)) + ' OF ') + str(ipAttempts)) + ')')
self.server.logMsg(((strAttempt + ' FAILED TO GET IP ADDRESS FROM ') + self.vmName))
time.sleep(1)
return self.vmInterfaces
def getUsername(self):
return self.vmUsername
def getPassword(self):
return self.vmPassword
def isTestVm(self):
return self.testVm
def isPoweredOff(self):
return (not self.isPoweredOn())
def isPoweredOn(self):
if (self.vmObject.runtime.powerState == vim.VirtualMachinePowerState.poweredOn):
return True
else:
return False
def makeDirOnGuest(self, dirPath):
self.server.logMsg((((('CREATING ' + dirPath) + ' ON ') + self.vmName) + ' '))
retVal = True
if (self.checkTools() == 'TOOLS_READY'):
creds = vim.vm.guest.NamePasswordAuthentication(username=self.vmUsername, password=self.vmPassword)
content = self.server.connection.RetrieveContent()
try:
content.guestOperationsManager.fileManager.MakeDirectoryInGuest(self.vmObject, creds, dirPath, False)
retVal = True
except IOError as e:
self.server.logMsg('[ERROR]: FILE NOT FOUND')
self.server.logMsg(('SYSTEM ERROR: ' + str(e)))
retVal = False
except vim.fault.FileAlreadyExists as f:
self.server.logMsg(((('[WARNING]: DIRECTORY ' + dirPath) + ' ALREADY EXISTS ON ') + self.vmName))
self.server.logMsg(('SYSTEM ERROR: ' + str(f)))
retVal = True
except vim.fault.InvalidGuestLogin as g:
self.server.logMsg(('[ERROR]: INCORRECT USERNAME/PASSWORD PROVIDED FOR ' + self.vmName))
self.server.logMsg(('SYSTEM ERROR: ' + str(g)))
retVal = False
except Exception as g:
self.server.logMsg(((('[ERROR]: UNKNOWN EXCEPTION WHILE MAKING ' + dirPath) + ' ON ') + self.vmName))
self.server.logMsg(('SYSTEM ERROR: ' + str(g)))
retVal = False
else:
self.server.logMsg(('ERROR: VMWARE TOOLS NOT INSTALLED ON ' + self.vmName))
retVal = False
return retVal
def powerOn(self, asyncFlag=False):
if self.isPoweredOn():
self.server.logMsg((self.vmName + ' IS ALREADY RUNNING, CANNOT POWER-ON HARDER'))
return None
else:
self.server.logMsg(('POWERING ON ' + self.vmName))
if asyncFlag:
return self.vmObject.PowerOnVM_Task()
else:
return self.waitForTask(self.vmObject.PowerOnVM_Task())
def powerOff(self, asyncFlag=False):
if self.isPoweredOff():
self.server.logMsg((self.vmName + ' IS ALREADY OFF, CANNOT POWER-OFF HARDER'))
return None
else:
self.server.logMsg(('POWERING OFF ' + self.vmName))
if asyncFlag:
return self.vmObject.PowerOffVM_Task()
else:
return self.waitForTask(self.vmObject.PowerOffVM_Task())
def prepVm(self):
self.server.logMsg((('PREPARING ' + self.vmName) + ' FOR TESTING'))
self.server.logMsg(((self.vmName + ' OPERATING SYSTEM: ') + self.vmOS))
self.server.logMsg(((self.vmName + ' ARCHITECTURE: ') + self.getArch()))
self.getSnapshots()
self.powerOn(False)
def revertToTestingBase(self):
self.server.logMsg(('RESETTING VM ' + self.vmName))
self.getSnapshots()
for i in self.snapshotList:
if ('testing_base' in i[0].name.lower()):
self.server.logMsg(('REVERTING VM TO ' + i[0].name))
return self.revertToSnapshot(i[0].snapshot)
return None
def revertToSnapshot(self, snapshotObj):
return self.waitForTask(snapshotObj.RevertToSnapshot_Task())
def revertToSnapshotByName(self, snapshotName):
self.server.logMsg(('RESETTING VM ' + self.vmName))
self.getSnapshots()
for snapshotObject in self.snapshotList:
if (snapshotName.strip() == snapshotObject[0].name.strip()):
self.server.logMsg(('REVERTING VM TO ' + snapshotObject[0].name))
return self.revertToSnapshot(snapshotObject[0].snapshot)
return None
def revertMsfVm(self):
self.getSnapshots()
for i in self.snapshotList:
if ('PAYLOAD_TESTING-' in i[0].name):
self.server.logMsg(((('REVERTING ' + self.vmName) + ' TO ') + i[0].name))
self.revertToSnapshot(i[0].snapshot)
self.deleteSnapshot(i[0].name)
def runCmdOnGuest(self, cmdAndArgList):
self.server.logMsg(((("RUNNING '" + ' '.join(cmdAndArgList)) + "' ON ") + self.vmName))
if (self.checkTools() == 'TOOLS_READY'):
try:
creds = vim.vm.guest.NamePasswordAuthentication(username=self.vmUsername, password=self.vmPassword)
content = self.server.connection.RetrieveContent()
cmdspec = vim.vm.guest.ProcessManager.ProgramSpec(programPath=cmdAndArgList[0], arguments=' '.join(cmdAndArgList[1:]))
cmdpid = content.guestOperationsManager.processManager.StartProgramInGuest(vm=self.vmObject, auth=creds, spec=cmdspec)
retVal = False
self.server.logMsg(((("LAUNCHING '" + ' '.join(cmdAndArgList)) + "' ON ") + self.vmName))
retVal = True
except vim.fault.InvalidGuestLogin as e:
self.server.logMsg(('INCORRECT USERTNAME/PASSWORD PROVIDED FOR ' + self.vmName))
self.server.logMsg(('SYSTEM ERROR:\n' + str(e)))
retVal = False
except vim.fault.GuestPermissionDenied as f:
self.server.logMsg(((('INSUFFICIENT PERMISSIONS TO RUN ' + ' '.join(cmdAndArgList)) + ' ON ') + self.vmName))
self.server.logMsg(('SYSTEM ERROR:\n' + str(f)))
retVal = False
except vim.fault.FileNotFound as g:
self.server.logMsg(((('UNABLE TO LOCATE THE FILE YOU ARE TRYING TO EXECUTE ' + ' '.join(cmdAndArgList)) + ' ON ') + self.vmName))
self.server.logMsg(('SYSTEM ERROR:\n' + str(g)))
retVal = False
else:
self.server.logMsg(((("FAILED TO RUN '" + ' '.join(cmdAndArgList)) + "' ON ") + self.vmName))
retVal = False
return retVal
def scheduleCmdOnGuest(self, cmdAndArgList, secDelay):
strTaskName = ('VM-' + ''.join((choice(ascii_lowercase) for i in range(12))))
schedTime = (datetime.datetime.now() + datetime.timedelta(seconds=secDelay))
currentTime = datetime.datetime.now()
self.server.logMsg(((('CURRENT TIME = ' + str(currentTime.hour)) + ':') + str(currentTime.minute)))
strHours = str(schedTime.hour)
if (len(strHours) < 2):
strHours = ('0' + strHours)
strMinutes = str(schedTime.minute)
if (len(strMinutes) < 2):
strMinutes = ('0' + strMinutes)
schedTimeStr = ((strHours + ':') + strMinutes)
self.server.logMsg(('SCHEDULE TIME FOR EXECUTION = ' + schedTimeStr))
schedPrefixStr = (((('c:\\windows\\system32\\schtasks.exe /create /tn ' + strTaskName) + ' /ST ') + schedTimeStr) + ' /SC once /tr ')
schedPrefixList = schedPrefixStr.split()
schedPrefixList.append((('"' + ' '.join(cmdAndArgList)) + '"'))
return self.runCmdOnGuest(schedPrefixList)
def setPassword(self, vmPassword):
self.vmPassword = vmPassword
def setTestVm(self):
self.testVm = True
def setUsername(self, vmUsername):
self.vmUsername = vmUsername
def setVmIp(self, ipAddress):
self.vmIp = ipAddress
return True
def takeSnapshot(self, snapshotName, asyncFlag=False, snapshotDescription='', dumpMemory=False, setQuiescent=False):
self.server.logMsg(((('TAKING SNAPSHOT ' + snapshotName) + ' ON ') + self.vmName))
try:
snapshotTask = self.vmObject.CreateSnapshot_Task(snapshotName, snapshotDescription, dumpMemory, setQuiescent)
except vim.fault.RestrictedVersion:
self.server.logMsg((('[WARNING]: SNAPSHOTS NOT SUPPORTED FOR ' + self.vmName) + ' ON TARGET'))
return False
if (not asyncFlag):
return self.waitForTask(snapshotTask)
else:
return None
def takeTempSnapshot(self, asyncFlag=False):
snapshotName = ('PAYLOAD_TESTING-' + str(time.time()).split('.')[0])
return self.takeSnapshot(snapshotName, asyncFlag)
def updateProcList(self):
content = self.server.connection.RetrieveContent()
creds = vim.vm.guest.NamePasswordAuthentication(username=self.vmUsername, password=self.vmPassword)
for i in range(5):
try:
processList = content.guestOperationsManager.processManager.ListProcessesInGuest(vm=self.vmObject, auth=creds)
except vim.fault.InvalidState as e:
self.server.logMsg('[WARNING]: VM IN A STRANGE STATE; RETRYING PROCLIST UPDATE')
self.server.logMsg(('SYSTEM ERROR:\n' + str(e)))
retVal = False
time.sleep(1)
pass
except Exception as f:
self.server.logMsg('[ERROR]: UNKNOWN ERROR (SORRY!)')
self.server.logMsg(('SYSTEM ERROR:\n' + str(f)))
retVal = False
break
else:
self.procList[:] = []
for runningProc in processList:
self.procList.append(((((((str(runningProc.pid) + '\t\t') + runningProc.name) + '\t\t') + runningProc.cmdLine) + '\t\t') + runningProc.owner))
retVal = True
return retVal
def uploadAndRun(self, srcFile, dstFile, remoteInterpreter=None, useCmdShell=False):
self.server.logMsg(((('SOURCE FILE = ' + srcFile) + '; DESTINATION FILE = ') + dstFile))
remoteCmd = []
if (useCmdShell == True):
remoteCmd.extend(['cmd.exe', '/k'])
if (remoteInterpreter != None):
remoteCmd.append(remoteInterpreter)
remoteCmd.append(dstFile)
if (not self.uploadFileToGuest(srcFile, dstFile)):
self.server.logMsg(((('[FATAL ERROR]: FAILED TO UPLOAD ' + srcFile) + ' TO ') + self.vmName))
return False
if ('win' not in self.vmName.lower()):
chmodCmdList = ('/bin/chmod 755'.split() + [dstFile])
if (not self.runCmdOnGuest(chmodCmdList)):
self.server.logMsg(((('[FATAL ERROR]: FAILED TO RUN ' + ' '.join(chmodCmdList)) + ' ON ') + self.devVm))
return False
if (not self.runCmdOnGuest(remoteCmd)):
self.server.logMsg(((("[FATAL ERROR]: FAILED TO RUN '" + ' '.join(remoteCmd)) + "' ON ") + self.vmName))
return False
return True
def uploadAndSchedule(self, srcFile, dstFile, secDelay, remoteInterpreter=None):
self.server.logMsg(((('SOURCE FILE = ' + srcFile) + '; DESTINATION FILE = ') + dstFile))
if (remoteInterpreter != None):
remoteCmd = [remoteInterpreter, dstFile]
else:
remoteCmd = [dstFile]
if (not self.uploadFileToGuest(srcFile, dstFile)):
self.server.logMsg(((('[FATAL ERROR]: FAILED TO UPLOAD ' + srcFile) + ' TO ') + self.vmName))
return False
if (not self.scheduleCmdOnGuest(remoteCmd, secDelay)):
self.server.logMsg(((("[FATAL ERROR]: FAILED TO RUN '" + ' '.join(remoteCmd)) + "' ON ") + self.vmName))
return False
return True
def uploadFileToGuest(self, srcFile, dstFile):
self.server.logMsg(((((('ATTEMPTING TO UPLOAD ' + srcFile) + ' TO ') + dstFile) + ' ON ') + self.vmName))
self.server.logMsg(((((('USING ' + self.vmUsername) + ' PW ') + self.vmPassword) + ' ON ') + self.vmName))
retVal = False
if (self.checkTools() == 'TOOLS_READY'):
creds = vim.vm.guest.NamePasswordAuthentication(username=self.vmUsername, password=self.vmPassword)
content = self.server.connection.RetrieveContent()
self.server.logMsg('TOOLS CHECKS OUT')
try:
srcFileObj = open(srcFile, 'r')
fileContent = srcFileObj.read()
srcFileObj.close()
except IOError:
self.server.logMsg(('FAILED TO OPEN FILE ' + srcFile))
return retVal
try:
file_attribute = vim.vm.guest.FileManager.FileAttributes()
vmFileManager = content.guestOperationsManager.fileManager
incompleteUrl = vmFileManager.InitiateFileTransferToGuest(self.vmObject, creds, dstFile, file_attribute, len(fileContent), True)
self.server.logMsg(incompleteUrl)
splitUrl = incompleteUrl.split('*')
realUrl = ((splitUrl[0] + self.server.hostname) + splitUrl[1])
self.server.logMsg(realUrl)
resp = requests.put(realUrl, data=fileContent, verify=False)
if (not (resp.status_code == 200)):
self.server.logMsg(((('ERROR UPLOADING FILE TO ' + self.vmName) + ' HTTP CODE ') + str(resp.status_code)))
retVal = True
else:
self.server.logMsg(((('UPLOADED FILE TO ' + self.vmName) + ' HTTP CODE ') + str(resp.status_code)))
retVal = True
except IOError as e:
self.server.logMsg(('FILE NOT FOUND: ' + srcFile))
self.server.logMsg(('SYSTEM ERROR: ' + str(e)))
except vim.fault.InvalidGuestLogin as f:
self.server.logMsg(('INCORRECT USERTNAME/PASSWORD PROVIDED FOR ' + self.vmName))
self.server.logMsg(((('USERNAME: ' + self.vmUsername) + ' PASSWORD: ') + self.vmPassword))
self.server.logMsg(('SYSTEM ERROR: ' + str(f)))
except vmodl.fault.InvalidArgument as f:
self.server.logMsg('INVALID ARGUMENT; OFTEN THIS IS BECAUSE THE SPECIFIED REMOTE PATH IS NOT VALID')
self.server.logMsg(('SYSTEM ERROR: ' + str(f)))
else:
self.server.logMsg(('THERE IS A PROBLEM WITH THE VMWARE TOOLS ON ' + self.vmName))
return retVal
def waitForTask(self, task):
pc = self.server.connection.content.propertyCollector
objSpec = vmodl.query.PropertyCollector.ObjectSpec(obj=task)
propSpec = vmodl.query.PropertyCollector.PropertySpec(type=vim.Task, pathSet=[], all=True)
filterSpec = vmodl.query.PropertyCollector.FilterSpec()
filterSpec.objectSet = [objSpec]
filterSpec.propSet = [propSpec]
filter = pc.CreateFilter(filterSpec, True)
for i in range(20):
update = pc.WaitForUpdates(None)
for filterSet in update.filterSet:
for filterObject in filterSet.objectSet:
if (filterObject.obj == task):
for change in filterObject.changeSet:
taskStatus = 'UNKNOWN'
if (change.name == 'info'):
taskStatus = change.val.state
elif (change.name == 'info.state'):
taskStatus = change.val
else:
continue
if (taskStatus == 'success'):
self.server.logMsg('DONE')
return True
time.sleep(5)
self.server.logMsg('TASK NOT COMPLETED IN ALLOTTED TIME')
return False |
def test_sync_teams_to_groups(user_creation, invite_only_user_creation, blacklisted_emails, app):
database.LoginService.create(name=_FAKE_AUTH)
sync_team_info = model.team.get_team_sync_information('buynlarge', 'synced')
assert (sync_team_info.last_updated is None)
fake_auth = FakeUsers([])
sync_teams_to_groups(fake_auth, timedelta(seconds=1))
updated_sync_info = model.team.get_team_sync_information('buynlarge', 'synced')
assert (updated_sync_info.last_updated is not None)
assert (updated_sync_info.transaction_id != sync_team_info.transaction_id)
current_info = model.team.get_team_sync_information('buynlarge', 'synced')
current_info.last_updated = (datetime.now() - timedelta(seconds=2))
current_info.save()
sync_teams_to_groups(fake_auth, timedelta(seconds=120))
third_sync_info = model.team.get_team_sync_information('buynlarge', 'synced')
assert (third_sync_info.transaction_id == updated_sync_info.transaction_id)
current_info = model.team.get_team_sync_information('buynlarge', 'synced')
current_info.last_updated = (datetime.now() - timedelta(seconds=20))
current_info.save()
sync_teams_to_groups(fake_auth, timedelta(seconds=10))
fourth_sync_info = model.team.get_team_sync_information('buynlarge', 'synced')
assert (fourth_sync_info.transaction_id != updated_sync_info.transaction_id) |
def get_img_annos(nuim, img_info, cat2id, out_dir, data_root, seg_root):
sd_token = img_info['token']
image_id = img_info['id']
name_to_index = name_to_index_mapping(nuim.category)
(width, height) = (img_info['width'], img_info['height'])
semseg_mask = np.zeros((height, width)).astype('uint8')
surface_anns = [o for o in nuim.surface_ann if (o['sample_data_token'] == sd_token)]
for ann in surface_anns:
category_token = ann['category_token']
category_name = nuim.get('category', category_token)['name']
if (ann['mask'] is None):
continue
mask = mask_decode(ann['mask'])
semseg_mask[(mask == 1)] = name_to_index[category_name]
object_anns = [o for o in nuim.object_ann if (o['sample_data_token'] == sd_token)]
object_anns = sorted(object_anns, key=(lambda k: k['token']))
annotations = []
for (i, ann) in enumerate(object_anns, start=1):
category_token = ann['category_token']
category_name = nuim.get('category', category_token)['name']
if (ann['mask'] is None):
continue
mask = mask_decode(ann['mask'])
semseg_mask[(mask == 1)] = name_to_index[category_name]
if (category_name in NAME_MAPPING):
cat_name = NAME_MAPPING[category_name]
cat_id = cat2id[cat_name]
(x_min, y_min, x_max, y_max) = ann['bbox']
mask_anno = dict()
mask_anno['counts'] = base64.b64decode(ann['mask']['counts']).decode()
mask_anno['size'] = ann['mask']['size']
data_anno = dict(image_id=image_id, category_id=cat_id, bbox=[x_min, y_min, (x_max - x_min), (y_max - y_min)], area=((x_max - x_min) * (y_max - y_min)), segmentation=mask_anno, iscrowd=0)
annotations.append(data_anno)
img_filename = img_info['file_name']
seg_filename = img_filename.replace('jpg', 'png')
seg_filename = osp.join(seg_root, seg_filename)
mmcv.imwrite(semseg_mask, seg_filename)
return (annotations, np.max(semseg_mask)) |
def _random_crop(image_list, crop_height, crop_width):
if (not image_list):
raise ValueError('Empty image_list.')
rank_assertions = []
for i in range(len(image_list)):
image_rank = tf.rank(image_list[i])
rank_assert = tf.Assert(tf.equal(image_rank, 3), ['Wrong rank for tensor %s [expected] [actual]', image_list[i].name, 3, image_rank])
rank_assertions.append(rank_assert)
with tf.control_dependencies([rank_assertions[0]]):
image_shape = tf.shape(image_list[0])
image_height = image_shape[0]
image_width = image_shape[1]
crop_size_assert = tf.Assert(tf.logical_and(tf.greater_equal(image_height, crop_height), tf.greater_equal(image_width, crop_width)), ['Crop size greater than the image size.'])
asserts = [rank_assertions[0], crop_size_assert]
for i in range(1, len(image_list)):
image = image_list[i]
asserts.append(rank_assertions[i])
with tf.control_dependencies([rank_assertions[i]]):
shape = tf.shape(image)
height = shape[0]
width = shape[1]
height_assert = tf.Assert(tf.equal(height, image_height), ['Wrong height for tensor %s [expected][actual]', image.name, height, image_height])
width_assert = tf.Assert(tf.equal(width, image_width), ['Wrong width for tensor %s [expected][actual]', image.name, width, image_width])
asserts.extend([height_assert, width_assert])
with tf.control_dependencies(asserts):
max_offset_height = tf.reshape(((image_height - crop_height) + 1), [])
with tf.control_dependencies(asserts):
max_offset_width = tf.reshape(((image_width - crop_width) + 1), [])
offset_height = tf.random_uniform([], maxval=max_offset_height, dtype=tf.int32)
offset_width = tf.random_uniform([], maxval=max_offset_width, dtype=tf.int32)
return [_crop(image, offset_height, offset_width, crop_height, crop_width) for image in image_list] |
class ModbusBaseSyncClient(ModbusClientMixin, ModbusProtocol):
class _params():
retries: (int | None) = None
retry_on_empty: (bool | None) = None
close_comm_on_error: (bool | None) = None
strict: (bool | None) = None
broadcast_enable: (bool | None) = None
reconnect_delay: (int | None) = None
source_address: (tuple[(str, int)] | None) = None
server_hostname: (str | None) = None
def __init__(self, framer: Framer, timeout: float=3, retries: int=3, retry_on_empty: bool=False, close_comm_on_error: bool=False, strict: bool=True, broadcast_enable: bool=False, reconnect_delay: float=0.1, reconnect_delay_max: float=300.0, on_reconnect_callback: (Callable[([], None)] | None)=None, no_resend_on_retry: bool=False, **kwargs: Any) -> None:
ModbusClientMixin.__init__(self)
ModbusProtocol.__init__(self, CommParams(comm_type=kwargs.get('CommType'), comm_name='comm', source_address=kwargs.get('source_address', None), reconnect_delay=reconnect_delay, reconnect_delay_max=reconnect_delay_max, timeout_connect=timeout, host=kwargs.get('host', None), port=kwargs.get('port', 0), sslctx=kwargs.get('sslctx', None), baudrate=kwargs.get('baudrate', None), bytesize=kwargs.get('bytesize', None), parity=kwargs.get('parity', None), stopbits=kwargs.get('stopbits', None), handle_local_echo=kwargs.get('handle_local_echo', False)), False)
self.params = self._params()
self.params.retries = int(retries)
self.params.retry_on_empty = bool(retry_on_empty)
self.params.close_comm_on_error = bool(close_comm_on_error)
self.params.strict = bool(strict)
self.params.broadcast_enable = bool(broadcast_enable)
self.on_reconnect_callback = on_reconnect_callback
self.retry_on_empty: int = 0
self.no_resend_on_retry = no_resend_on_retry
self.slaves: list[int] = []
self.framer = FRAMER_NAME_TO_CLASS.get(framer, cast(Type[ModbusFramer], framer))(ClientDecoder(), self)
self.transaction = DictTransactionManager(self, retries=retries, retry_on_empty=retry_on_empty, **kwargs)
self.reconnect_delay_current = (self.params.reconnect_delay or 0)
self.use_udp = False
self.state = ModbusTransactionState.IDLE
self.last_frame_end: (float | None) = 0
self.silent_interval: float = 0
def connected(self) -> bool:
return self.is_active()
def register(self, custom_response_class: ModbusResponse) -> None:
self.framer.decoder.register(custom_response_class)
def close(self, reconnect: bool=False) -> None:
if reconnect:
self.connection_lost(asyncio.TimeoutError('Server not responding'))
else:
self.transport_close()
def idle_time(self) -> float:
if ((self.last_frame_end is None) or (self.silent_interval is None)):
return 0
return (self.last_frame_end + self.silent_interval)
def execute(self, request: (ModbusRequest | None)=None) -> ModbusResponse:
if (not self.connect()):
raise ConnectionException(f'Failed to connect[{self!s}]')
return self.transaction.execute(request)
async def async_execute(self, request=None):
request.transaction_id = self.transaction.getNextTID()
packet = self.framer.buildPacket(request)
count = 0
while (count <= self.params.retries):
if ((not count) or (not self.no_resend_on_retry)):
self.transport_send(packet)
if (self.params.broadcast_enable and (not request.slave_id)):
resp = b'Broadcast write sent - no response expected'
break
try:
req = self._build_response(request.transaction_id)
resp = (await asyncio.wait_for(req, timeout=self.comm_params.timeout_connect))
break
except asyncio.exceptions.TimeoutError:
count += 1
if (count > self.params.retries):
self.close(reconnect=True)
raise ModbusIOException(f'ERROR: No response received after {self.params.retries} retries')
return resp
def callback_data(self, data: bytes, addr: (tuple | None)=None) -> int:
self.framer.processIncomingPacket(data, self._handle_response, slave=0)
return len(data)
def callback_disconnected(self, _reason: (Exception | None)) -> None:
for tid in list(self.transaction):
self.raise_future(self.transaction.getTransaction(tid), ConnectionException('Connection lost during request'))
async def connect(self):
def raise_future(self, my_future, exc):
if (not my_future.done()):
my_future.set_exception(exc)
def _handle_response(self, reply, **_kwargs):
if (reply is not None):
tid = reply.transaction_id
if (handler := self.transaction.getTransaction(tid)):
if (not handler.done()):
handler.set_result(reply)
else:
Log.debug('Unrequested message: {}', reply, ':str')
def _build_response(self, tid):
my_future = asyncio.Future()
if (not self.transport):
self.raise_future(my_future, ConnectionException('Client is not connected'))
else:
self.transaction.addTransaction(my_future, tid)
return my_future
def send(self, request):
if (self.state != ModbusTransactionState.RETRYING):
Log.debug('New Transaction state "SENDING"')
self.state = ModbusTransactionState.SENDING
return request
def recv(self, size):
return size
def _get_address_family(cls, address):
try:
_ = socket.inet_pton(socket.AF_INET6, address)
except OSError:
return socket.AF_INET
return socket.AF_INET6
def __enter__(self):
if (not self.connect()):
raise ConnectionException(f'Failed to connect[{self.__str__()}]')
return self
async def __aenter__(self):
if (not (await self.connect())):
raise ConnectionException(f'Failed to connect[{self.__str__()}]')
return self
def __exit__(self, klass, value, traceback):
self.close()
async def __aexit__(self, klass, value, traceback):
self.close()
def __str__(self):
return f'{self.__class__.__name__} {self.comm_params.host}:{self.comm_params.port}' |
def load_embedding_txt(path):
words = []
vals = []
with codecs.open(path, 'r', encoding='utf-8') as fin:
fin.readline()
for line in fin:
line = line.strip()
if line:
parts = line.split()
words.append(parts[0])
vals += [float(x) for x in parts[1:]]
return (words, np.asarray(vals).reshape(len(words), (- 1))) |
class Connection(object):
def __init__(self, conn):
self.__conn = conn
def put(self, *args, **kwargs):
return self.__conn.send(*args, **kwargs)
def get(self, *args, **kwargs):
return self.__conn.recv(*args, **kwargs)
def __getattr__(self, name):
return getattr(self.__conn, name) |
class DFN(BaseModel):
def __init__(self, options=None, name='Doyle-Fuller-Newman model', build=True):
self.x_average = False
super().__init__(options, name)
self.set_submodels(build)
pybamm.citations.register('Doyle1993')
def set_intercalation_kinetics_submodel(self):
for domain in ['negative', 'positive']:
electrode_type = self.options.electrode_types[domain]
if (electrode_type == 'porous'):
intercalation_kinetics = self.get_intercalation_kinetics(domain)
phases = self.options.phases[domain]
for phase in phases:
submod = intercalation_kinetics(self.param, domain, 'lithium-ion main', self.options, phase)
self.submodels[f'{domain} {phase} interface'] = submod
if (len(phases) > 1):
self.submodels[f'total {domain} interface'] = pybamm.kinetics.TotalMainKinetics(self.param, domain, 'lithium-ion main', self.options)
def set_particle_submodel(self):
for domain in ['negative', 'positive']:
if (self.options.electrode_types[domain] == 'planar'):
continue
particle = getattr(self.options, domain)['particle']
for phase in self.options.phases[domain]:
if (particle == 'Fickian diffusion'):
submod = pybamm.particle.FickianDiffusion(self.param, domain, self.options, phase=phase, x_average=False)
elif (particle in ['uniform profile', 'quadratic profile', 'quartic profile']):
submod = pybamm.particle.PolynomialProfile(self.param, domain, self.options, phase=phase)
elif (particle == 'MSMR'):
submod = pybamm.particle.MSMRDiffusion(self.param, domain, self.options, phase=phase, x_average=False)
self.submodels[f'{domain} {phase} particle'] = submod
self.submodels[f'{domain} {phase} total particle concentration'] = pybamm.particle.TotalConcentration(self.param, domain, self.options, phase)
def set_solid_submodel(self):
for domain in ['negative', 'positive']:
if (self.options.electrode_types[domain] == 'planar'):
continue
if (self.options['surface form'] == 'false'):
submodel = pybamm.electrode.ohm.Full
else:
submodel = pybamm.electrode.ohm.SurfaceForm
self.submodels[f'{domain} electrode potential'] = submodel(self.param, domain, self.options)
def set_electrolyte_concentration_submodel(self):
self.submodels['electrolyte diffusion'] = pybamm.electrolyte_diffusion.Full(self.param, self.options)
def set_electrolyte_potential_submodel(self):
surf_form = pybamm.electrolyte_conductivity.surface_potential_form
if (self.options['electrolyte conductivity'] not in ['default', 'full']):
raise pybamm.OptionError("electrolyte conductivity '{}' not suitable for DFN".format(self.options['electrolyte conductivity']))
if (self.options['surface form'] == 'false'):
self.submodels['electrolyte conductivity'] = pybamm.electrolyte_conductivity.Full(self.param, self.options)
if (self.options['surface form'] == 'false'):
surf_model = surf_form.Explicit
elif (self.options['surface form'] == 'differential'):
surf_model = surf_form.FullDifferential
elif (self.options['surface form'] == 'algebraic'):
surf_model = surf_form.FullAlgebraic
for domain in ['negative', 'separator', 'positive']:
if (self.options.electrode_types.get(domain) == 'planar'):
continue
self.submodels[f'{domain} surface potential difference'] = surf_model(self.param, domain, self.options) |
def _permute_2e_ints(hijkl: np.ndarray, elements: Set[Tuple[(int, ...)]], norb: int, beta: int=0) -> None:
for elem in elements.copy():
shifted = tuple(((e - ((e >= norb) * norb)) for e in elem))
if ((beta != 1) and (elem[::(- 1)] not in elements)):
hijkl[shifted] = hijkl[shifted[::(- 1)]]
elements.remove(elem)
continue
bra_perms = set(itertools.permutations(elem[:2]))
ket_perms = set(itertools.permutations(elem[2:]))
if (beta == 1):
permutations = itertools.product(bra_perms, ket_perms)
else:
permutations = itertools.chain(itertools.product(bra_perms, ket_perms), itertools.product(ket_perms, bra_perms))
for perm in {(e1 + e2) for (e1, e2) in permutations}:
if (perm in elements):
continue
hijkl[shifted] = hijkl[tuple(((e - ((e >= norb) * norb)) for e in perm))]
elements.remove(elem)
break |
class Crossfeed(GStreamerPlugin):
PLUGIN_ID = _PLUGIN_ID
PLUGIN_NAME = _('Crossfeed')
PLUGIN_DESC = _('Mixes the left and right channel in a way that simulates a speaker setup while using headphones, or to adjust for early Stereo recordings.')
PLUGIN_ICON = 'audio-volume-high'
def setup_element(cls):
return Gst.ElementFactory.make('bs2b', cls.PLUGIN_ID)
def update_element(cls, element):
element.set_property('feed', get_cfg('feed'))
element.set_property('fcut', get_cfg('fcut'))
def PluginPreferences(cls, window):
prefs = Preferences()
prefs.connect('changed', (lambda *x: cls.queue_update()))
return prefs |
class Effect1596(BaseEffect):
type = 'passive'
def handler(fit, src, context, projectionRange, **kwargs):
mod = (src.level if ('skill' in context) else 1)
fit.modules.filteredChargeBoost((lambda mod: mod.charge.requiresSkill('Missile Launcher Operation')), 'explosiveDamage', (src.getModifiedItemAttr('damageMultiplierBonus') * mod), **kwargs) |
_api()
class unique(Stream):
def __init__(self, upstream, maxsize=None, key=identity, hashable=True, **kwargs):
self.key = key
self.maxsize = maxsize
if hashable:
self.seen = dict()
if self.maxsize:
from zict import LRU
self.seen = LRU(self.maxsize, self.seen)
else:
self.seen = []
Stream.__init__(self, upstream, **kwargs)
def update(self, x, who=None, metadata=None):
y = self.key(x)
emit = True
if isinstance(self.seen, list):
if (y in self.seen):
self.seen.remove(y)
emit = False
self.seen.insert(0, y)
if self.maxsize:
del self.seen[self.maxsize:]
if emit:
return self._emit(x, metadata=metadata)
elif (self.seen.get(y, '~~not_seen~~') == '~~not_seen~~'):
self.seen[y] = 1
return self._emit(x, metadata=metadata) |
(allow_output_mutation=True)
def load_indexes():
if LOAD_DENSE_INDEX:
faiss_res = faiss.StandardGpuResources()
wiki40b_passages = datasets.load_dataset(path='wiki_snippets', name='wiki40b_en_100_0')['train']
wiki40b_passage_reps = np.memmap('wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat', dtype='float32', mode='r', shape=(wiki40b_passages.num_rows, 128))
wiki40b_index_flat = faiss.IndexFlatIP(128)
wiki40b_gpu_index_flat = faiss.index_cpu_to_gpu(faiss_res, 1, wiki40b_index_flat)
wiki40b_gpu_index_flat.add(wiki40b_passage_reps)
else:
(wiki40b_passages, wiki40b_gpu_index_flat) = (None, None)
es_client = Elasticsearch([{'host': 'localhost', 'port': '9200'}])
return (wiki40b_passages, wiki40b_gpu_index_flat, es_client) |
_request_params(docs._search_query, docs._pagination)
def get_places_autocomplete(q: Optional[str]=None, **params) -> JsonResponse:
if (params.get('page') == 'all'):
places = PlaceAutocompletePaginator(q=q, **params).all()
else:
places = get(f'{API_V1}/places/autocomplete', q=q, **params).json()
places['results'] = convert_all_coordinates(places['results'])
return places |
class NetModule():
def __init__(self, args):
self.args = args
self.lock = threading.Lock()
self.initializer = tf_initializers.VarianceScaling(seed=args.seed)
self.state = {}
self.models = []
self.heads = []
self.decomposed_layers = {}
self.initial_body_weights = []
self.initial_heads_weights = []
self.lid = 0
self.adaptive_factor = 3
self.input_shape = (32, 32, 3)
if (self.args.base_network == 'lenet'):
self.shapes = [(5, 5, 3, 20), (5, 5, 20, 50), (3200, 800), (800, 500)]
if (self.args.model in ['fedweit']):
self.decomposed_variables = {'shared': [], 'adaptive': {}, 'mask': {}, 'bias': {}}
if (self.args.model == 'fedweit'):
self.decomposed_variables['atten'] = {}
self.decomposed_variables['from_kb'] = {}
def init_state(self, cid):
if (self.args.model in ['fedweit']):
self.state = {'client_id': cid, 'decomposed_weights': {'shared': [], 'adaptive': {}, 'mask': {}, 'bias': {}}, 'heads_weights': self.initial_heads_weights}
if (self.args.model == 'fedweit'):
self.state['decomposed_weights']['atten'] = {}
self.state['decomposed_weights']['from_kb'] = {}
else:
self.state = {'client_id': cid, 'body_weights': self.initial_body_weights, 'heads_weights': self.initial_heads_weights}
def save_state(self):
self.state['heads_weights'] = []
for h in self.heads:
self.state['heads_weights'].append(h.get_weights())
if (self.args.model in ['fedweit']):
for (var_type, layers) in self.decomposed_variables.items():
self.state['decomposed_weights'] = {'shared': [layer.numpy() for layer in self.decomposed_variables['shared']], 'adaptive': {tid: [layer.numpy() for (lid, layer) in self.decomposed_variables['adaptive'][tid].items()] for tid in self.decomposed_variables['adaptive'].keys()}, 'mask': {tid: [layer.numpy() for (lid, layer) in self.decomposed_variables['mask'][tid].items()] for tid in self.decomposed_variables['mask'].keys()}, 'bias': {tid: [layer.numpy() for (lid, layer) in self.decomposed_variables['bias'][tid].items()] for tid in self.decomposed_variables['bias'].keys()}}
if (self.args.model == 'fedweit'):
self.state['decomposed_weights']['from_kb'] = {tid: [layer.numpy() for (lid, layer) in self.decomposed_variables['from_kb'][tid].items()] for tid in self.decomposed_variables['from_kb'].keys()}
self.state['decomposed_weights']['atten'] = {tid: [layer.numpy() for (lid, layer) in self.decomposed_variables['atten'][tid].items()] for tid in self.decomposed_variables['atten'].keys()}
else:
self.state['body_weights'] = self.model_body.get_weights()
np_save(self.args.state_dir, '{}_net.npy'.format(self.state['client_id']), self.state)
def load_state(self, cid):
self.state = np_load(os.path.join(self.args.state_dir, '{}_net.npy'.format(cid))).item()
for (i, h) in enumerate(self.state['heads_weights']):
self.heads[i].set_weights(h)
if (self.args.model in ['fedweit']):
for (var_type, values) in self.state['decomposed_weights'].items():
if (var_type == 'shared'):
for (lid, weights) in enumerate(values):
self.decomposed_variables['shared'][lid].assign(weights)
else:
for (tid, layers) in values.items():
for (lid, weights) in enumerate(layers):
self.decomposed_variables[var_type][tid][lid].assign(weights)
else:
self.model_body.set_weights(self.state['body_weights'])
def init_global_weights(self):
if (self.args.model in ['fedweit']):
global_weights = []
for i in range(len(self.shapes)):
global_weights.append(self.initializer(self.shapes[i]).numpy())
else:
if (self.args.base_network == 'lenet'):
body = self.build_lenet_body(decomposed=False)
global_weights = body.get_weights()
return global_weights
def init_decomposed_variables(self, initial_weights):
self.decomposed_variables['shared'] = [tf.Variable(initial_weights[i], name='layer_{}/sw'.format(i)) for i in range(len(self.shapes))]
for tid in range(self.args.num_tasks):
for lid in range(len(self.shapes)):
var_types = (['adaptive', 'bias', 'mask'] if (self.args.model == 'apd') else ['adaptive', 'bias', 'mask', 'atten', 'from_kb'])
for var_type in var_types:
self.create_variable(var_type, lid, tid)
def create_variable(self, var_type, lid, tid=None):
trainable = True
if (tid not in self.decomposed_variables[var_type]):
self.decomposed_variables[var_type][tid] = {}
if (var_type == 'adaptive'):
init_value = (self.decomposed_variables['shared'][lid].numpy() / self.adaptive_factor)
elif (var_type == 'atten'):
shape = (int(round((self.args.num_clients * self.args.frac_clients))),)
if (tid == 0):
trainable = False
init_value = np.zeros(shape).astype(np.float32)
else:
init_value = self.initializer(shape)
elif (var_type == 'from_kb'):
shape = np.concatenate([self.shapes[lid], [int(round((self.args.num_clients * self.args.frac_clients)))]], axis=0)
trainable = False
if (tid == 0):
init_value = np.zeros(shape).astype(np.float32)
else:
init_value = self.initializer(shape)
else:
init_value = self.initializer((self.shapes[lid][(- 1)],))
var = tf.Variable(init_value, trainable=trainable, name='layer_{}/task_{}/{}'.format(lid, tid, var_type))
self.decomposed_variables[var_type][tid][lid] = var
def get_variable(self, var_type, lid, tid=None):
if (var_type == 'shared'):
return self.decomposed_variables[var_type][lid]
else:
return self.decomposed_variables[var_type][tid][lid]
def generate_mask(self, mask):
return tf_activations.sigmoid(mask)
def get_model_by_tid(self, tid):
if (self.args.model in ['fedweit']):
self.switch_model_params(tid)
return self.models[tid]
def get_trainable_variables(self, curr_task, head=True):
if (self.args.model in ['fedweit']):
return self.get_decomposed_trainaible_variables(curr_task, retroactive=False, head=head)
elif head:
return self.models[curr_task].trainable_variables
else:
return self.model_body.trainable_variables
def get_decomposed_trainaible_variables(self, curr_task, retroactive=False, head=True):
prev_variables = (['mask', 'bias', 'adaptive'] if (self.args.model == 'apd') else ['mask', 'bias', 'adaptive', 'atten'])
trainable_variables = [sw for sw in self.decomposed_variables['shared']]
if retroactive:
for tid in range((curr_task + 1)):
for lid in range(len(self.shapes)):
for pvar in prev_variables:
if ((pvar == 'bias') and (tid < curr_task)):
continue
if ((pvar == 'atten') and (tid == 0)):
continue
trainable_variables.append(self.get_variable(pvar, lid, tid))
else:
for lid in range(len(self.shapes)):
for pvar in prev_variables:
if ((pvar == 'atten') and (curr_task == 0)):
continue
trainable_variables.append(self.get_variable(pvar, lid, curr_task))
if head:
head = self.heads[curr_task]
trainable_variables.append(head.trainable_weights[0])
trainable_variables.append(head.trainable_weights[1])
return trainable_variables
def get_body_weights(self, task_id=None):
if (self.args.model in ['fedweit']):
prev_weights = {}
for lid in range(len(self.shapes)):
prev_weights[lid] = {}
sw = self.get_variable(var_type='shared', lid=lid).numpy()
for tid in range(task_id):
prev_aw = self.get_variable(var_type='adaptive', lid=lid, tid=tid).numpy()
prev_mask = self.get_variable(var_type='mask', lid=lid, tid=tid).numpy()
prev_mask_sig = self.generate_mask(prev_mask).numpy()
prev_weights[lid][tid] = ((sw * prev_mask_sig) + prev_aw)
return prev_weights
else:
return self.model_body.get_weights()
def set_body_weights(self, body_weights):
if (self.args.model in ['fedweit']):
for (lid, wgt) in enumerate(body_weights):
sw = self.get_variable('shared', lid)
sw.assign(wgt)
else:
self.model_body.set_weights(body_weights)
def switch_model_params(self, tid):
for (lid, dlay) in self.decomposed_layers.items():
dlay.sw = self.get_variable('shared', lid)
dlay.aw = self.get_variable('adaptive', lid, tid)
dlay.bias = self.get_variable('bias', lid, tid)
dlay.mask = self.generate_mask(self.get_variable('mask', lid, tid))
if (self.args.model == 'fedweit'):
dlay.atten = self.get_variable('atten', lid, tid)
dlay.aw_kb = self.get_variable('from_kb', lid, tid)
def add_head(self, body):
head = tf_layers.Dense(self.args.num_classes, activation='softmax')
body_out = body.output
head_out = head(body_out)
model = tf.keras.Model(inputs=body.input, outputs=head_out)
self.heads.append(head)
self.initial_heads_weights.append(head.get_weights())
return model
def build_lenet(self, initial_weights, decomposed=False):
self.lock.acquire()
self.models = []
self.model_body = self.build_lenet_body(initial_weights, decomposed=decomposed)
self.set_body_weights(initial_weights)
self.initial_body_weights = initial_weights
for i in range(self.args.num_tasks):
self.models.append(self.add_head(self.model_body))
self.lock.release()
def build_lenet_body(self, initial_weights=None, decomposed=False):
if decomposed:
self.init_decomposed_variables(initial_weights)
tid = 0
model = tf.keras.models.Sequential()
model.add(tf_keras.Input(shape=self.input_shape))
for lid in [0, 1]:
self.decomposed_layers[self.lid] = self.conv_decomposed(lid, tid, filters=self.shapes[lid][(- 1)], kernel_size=(self.shapes[lid][0], self.shapes[lid][1]), strides=(1, 1), padding='same', acti='relu')
model.add(self.decomposed_layers[self.lid])
self.lid += 1
model.add(tf_layers.Lambda((lambda x: tf.nn.lrn(x, 4, bias=1.0, alpha=(0.001 / 9.0), beta=0.75))))
model.add(tf_layers.MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding='same'))
model.add(tf_layers.Flatten())
for lid in [2, 3]:
self.decomposed_layers[self.lid] = self.dense_decomposed(lid, tid, units=self.shapes[lid][(- 1)], acti='relu')
model.add(self.decomposed_layers[self.lid])
self.lid += 1
else:
model = tf_models.Sequential()
model.add(tf_layers.Conv2D(20, kernel_size=(5, 5), use_bias=True, activation='relu', padding='same', kernel_regularizer=tf_regularizers.l2(self.args.wd), input_shape=self.input_shape))
model.add(tf_layers.Lambda((lambda x: tf.nn.lrn(x, 4, bias=1.0, alpha=(0.001 / 9.0), beta=0.75))))
model.add(tf_layers.MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding='same'))
model.add(tf_layers.Conv2D(50, kernel_size=(5, 5), use_bias=True, activation='relu', padding='same', kernel_regularizer=tf_regularizers.l2(self.args.wd)))
model.add(tf_layers.Lambda((lambda x: tf.nn.lrn(x, 4, bias=1.0, alpha=(0.001 / 9.0), beta=0.75))))
model.add(tf_layers.MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding='same'))
model.add(tf_layers.Flatten())
model.add(tf_layers.Dense(800, activation='relu', kernel_regularizer=tf_regularizers.l2(self.args.wd)))
model.add(tf_layers.Dense(500, activation='relu', kernel_regularizer=tf_regularizers.l2(self.args.wd)))
return model
def conv_decomposed(self, lid, tid, filters, kernel_size, strides, padding, acti):
return DecomposedConv(name='layer_{}'.format(lid), filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, activation=acti, lambda_l1=self.args.lambda_l1, lambda_mask=self.args.lambda_mask, shared=self.get_variable('shared', lid), adaptive=self.get_variable('adaptive', lid, tid), from_kb=self.get_variable('from_kb', lid, tid), atten=self.get_variable('atten', lid, tid), bias=self.get_variable('bias', lid, tid), use_bias=True, mask=self.generate_mask(self.get_variable('mask', lid, tid)), kernel_regularizer=tf_regularizers.l2(self.args.wd))
def dense_decomposed(self, lid, tid, units, acti):
return DecomposedDense(name='layer_{}'.format(lid), activation=acti, units=units, lambda_l1=self.args.lambda_l1, lambda_mask=self.args.lambda_mask, shared=self.get_variable('shared', lid), adaptive=self.get_variable('adaptive', lid, tid), from_kb=self.get_variable('from_kb', lid, tid), atten=self.get_variable('atten', lid, tid), bias=self.get_variable('bias', lid, tid), use_bias=True, mask=self.generate_mask(self.get_variable('mask', lid, tid)), kernel_regularizer=tf_regularizers.l2(self.args.wd)) |
class AnnotationTextEdit(QtWidgets.QTextEdit):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.setMaximumHeight(70)
def enterEvent(self, e):
if (self.window().showhelp is True):
QtWidgets.QToolTip.showText(e.globalPos(), '<h3>Annotation Text</h3>Enter the text that should be displayed as an annotation on the map.<p><ul><li>press <b>shift + enter</b> to add the annotation to the map!</li></ul><p>To enter LaTex symbols and equations, encapsulate the LaTex code in two $ symbols, e.g.: <code>$\\sqrt(x^2)=x$</code>') |
class MemoryEfficientSwish(nn.Module):
class F(torch.autograd.Function):
def forward(ctx, x):
ctx.save_for_backward(x)
return (x * torch.sigmoid(x))
def backward(ctx, grad_output):
x = ctx.saved_tensors[0]
sx = torch.sigmoid(x)
return (grad_output * (sx * (1 + (x * (1 - sx)))))
def forward(self, x):
return self.F.apply(x) |
class _DenseLayer(nn.Sequential):
def __init__(self, num_input_features, growth_rate, bn_size, drop_rate):
super(_DenseLayer, self).__init__()
self.add_module('norm.1', nn.BatchNorm3d(num_input_features))
self.add_module('relu.1', nn.ReLU(inplace=True))
self.add_module('conv.1', nn.Conv3d(num_input_features, (bn_size * growth_rate), kernel_size=1, stride=1, bias=False))
self.add_module('norm.2', nn.BatchNorm3d((bn_size * growth_rate)))
self.add_module('relu.2', nn.ReLU(inplace=True))
self.add_module('conv.2', nn.Conv3d((bn_size * growth_rate), growth_rate, kernel_size=3, stride=1, padding=1, bias=False))
self.drop_rate = drop_rate
def forward(self, x):
new_features = super(_DenseLayer, self).forward(x)
if (self.drop_rate > 0):
new_features = F.dropout(new_features, p=self.drop_rate, training=self.training)
return torch.cat([x, new_features], 1) |
class ResNet(nn.Module):
def __init__(self, block, num_blocks, in_channel=3, zero_init_residual=False):
super(ResNet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(in_channel, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.avgpool = nn.AvgPool2d(7, stride=1)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, num_blocks, stride):
strides = ([stride] + ([1] * (num_blocks - 1)))
layers = []
for i in range(num_blocks):
stride = strides[i]
layers.append(block(self.in_planes, planes, stride))
self.in_planes = (planes * block.expansion)
return nn.Sequential(*layers)
def forward(self, x):
out = self.maxpool(F.relu(self.bn1(self.conv1(x))))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = self.avgpool(out)
out = torch.flatten(out, 1)
return out |
class WhisperConfig(PretrainedConfig):
model_type = 'whisper'
keys_to_ignore_at_inference = ['past_key_values']
attribute_map = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__(self, vocab_size=51865, num_mel_bins=80, encoder_layers=6, encoder_attention_heads=4, decoder_layers=6, decoder_attention_heads=4, decoder_ffn_dim=1536, encoder_ffn_dim=1536, encoder_layerdrop=0.0, decoder_layerdrop=0.0, decoder_start_token_id=50257, use_cache=True, is_encoder_decoder=True, activation_function='gelu', d_model=256, dropout=0.0, attention_dropout=0.0, activation_dropout=0.0, init_std=0.02, scale_embedding=False, max_source_positions=1500, max_target_positions=448, pad_token_id=50256, bos_token_id=50257, eos_token_id=50256, suppress_tokens=None, begin_suppress_tokens=[220, 50256], use_weighted_layer_sum=False, classifier_proj_size=256, apply_spec_augment=False, mask_time_prob=0.05, mask_time_length=10, mask_time_min_masks=2, mask_feature_prob=0.0, mask_feature_length=10, mask_feature_min_masks=0, **kwargs):
self.vocab_size = vocab_size
self.num_mel_bins = num_mel_bins
self.d_model = d_model
self.encoder_layers = encoder_layers
self.encoder_attention_heads = encoder_attention_heads
self.decoder_layers = decoder_layers
self.decoder_attention_heads = decoder_attention_heads
self.decoder_ffn_dim = decoder_ffn_dim
self.encoder_ffn_dim = encoder_ffn_dim
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.activation_function = activation_function
self.init_std = init_std
self.encoder_layerdrop = encoder_layerdrop
self.decoder_layerdrop = decoder_layerdrop
self.use_cache = use_cache
self.num_hidden_layers = encoder_layers
self.scale_embedding = scale_embedding
self.max_source_positions = max_source_positions
self.max_target_positions = max_target_positions
self.classifier_proj_size = classifier_proj_size
self.use_weighted_layer_sum = use_weighted_layer_sum
self.apply_spec_augment = apply_spec_augment
self.mask_time_prob = mask_time_prob
self.mask_time_length = mask_time_length
self.mask_time_min_masks = mask_time_min_masks
self.mask_feature_prob = mask_feature_prob
self.mask_feature_length = mask_feature_length
self.mask_feature_min_masks = mask_feature_min_masks
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, is_encoder_decoder=is_encoder_decoder, decoder_start_token_id=decoder_start_token_id, suppress_tokens=suppress_tokens, begin_suppress_tokens=begin_suppress_tokens, **kwargs) |
class DistCrossEntropyFunc(torch.autograd.Function):
def forward(ctx, logits: torch.Tensor, label: torch.Tensor):
batch_size = logits.size(0)
(max_logits, _) = torch.max(logits, dim=1, keepdim=True)
distributed.all_reduce(max_logits, distributed.ReduceOp.MAX)
logits.sub_(max_logits)
logits.exp_()
sum_logits_exp = torch.sum(logits, dim=1, keepdim=True)
distributed.all_reduce(sum_logits_exp, distributed.ReduceOp.SUM)
logits.div_(sum_logits_exp)
index = torch.where((label != (- 1)))[0]
loss = torch.zeros(batch_size, 1, device=logits.device)
loss[index] = logits[index].gather(1, label[index])
distributed.all_reduce(loss, distributed.ReduceOp.SUM)
ctx.save_for_backward(index, logits, label)
return (loss.clamp_min_(1e-30).log_().mean() * (- 1))
def backward(ctx, loss_gradient):
(index, logits, label) = ctx.saved_tensors
batch_size = logits.size(0)
one_hot = torch.zeros(size=[index.size(0), logits.size(1)], device=logits.device)
one_hot.scatter_(1, label[index], 1)
logits[index] -= one_hot
logits.div_(batch_size)
return ((logits * loss_gradient.item()), None) |
class MaskBase(object):
__metaclass__ = abc.ABCMeta
def include(self, data=None, wcs=None, view=(), **kwargs):
self._validate_wcs(data, wcs, **kwargs)
return self._include(data=data, wcs=wcs, view=view)
def view(self, view=()):
return self.exclude(view=view)
def _validate_wcs(self, new_data=None, new_wcs=None, **kwargs):
pass
def _include(self, data=None, wcs=None, view=()):
pass
def exclude(self, data=None, wcs=None, view=(), **kwargs):
self._validate_wcs(data, wcs, **kwargs)
return self._exclude(data=data, wcs=wcs, view=view)
def _exclude(self, data=None, wcs=None, view=()):
return np.logical_not(self._include(data=data, wcs=wcs, view=view))
def any(self):
return np.any(self.exclude())
def _flattened(self, data, wcs=None, view=()):
mask = self.include(data=data, wcs=wcs, view=view)
if (isinstance(data, da.Array) and (not isinstance(mask, da.Array))):
mask = da.asarray(mask, name=str(uuid.uuid4()))
return data[view][mask]
def _filled(self, data, wcs=None, fill=np.nan, view=(), use_memmap=False, **kwargs):
dt = np.result_type(data.dtype, 0.0)
if (use_memmap and (data.size > 0)):
ntf = tempfile.NamedTemporaryFile()
sliced_data = np.memmap(ntf, mode='w+', shape=data[view].shape, dtype=dt)
sliced_data[:] = data[view]
else:
sliced_data = data[view].astype(dt)
ex = self.exclude(data=data, wcs=wcs, view=view, **kwargs)
return np.ma.masked_array(sliced_data, mask=ex).filled(fill)
def __and__(self, other):
return CompositeMask(self, other, operation='and')
def __or__(self, other):
return CompositeMask(self, other, operation='or')
def __xor__(self, other):
return CompositeMask(self, other, operation='xor')
def __invert__(self):
return InvertedMask(self)
def shape(self):
raise NotImplementedError('{0} mask classes do not have shape attributes.'.format(self.__class__.__name__))
def ndim(self):
return len(self.shape)
def size(self):
return np.prod(self.shape)
def dtype(self):
return np.dtype('bool')
def __getitem__(self):
raise NotImplementedError('Slicing not supported by mask class {0}'.format(self.__class__.__name__))
def quicklook(self, view, wcs=None, filename=None, use_aplpy=True, aplpy_kwargs={}):
view_twod = self.include(view=view, wcs=wcs)
if use_aplpy:
if (wcs is not None):
hdu = fits.PrimaryHDU(view_twod.astype(int), wcs.to_header())
else:
hdu = fits.PrimaryHDU(view_twod.astype(int))
try:
import aplpy
FITSFigure = aplpy.FITSFigure(hdu, **aplpy_kwargs)
FITSFigure.show_grayscale()
FITSFigure.add_colorbar()
if (filename is not None):
FITSFigure.save(filename)
except (InconsistentAxisTypesError, ImportError):
use_aplpy = True
if (not use_aplpy):
from matplotlib import pyplot
figure = pyplot.imshow(view_twod)
if (filename is not None):
figure.savefig(filename)
def _get_new_wcs(self, unit, velocity_convention=None, rest_value=None):
from .spectral_axis import convert_spectral_axis, determine_ctype_from_vconv
out_ctype = determine_ctype_from_vconv(self._wcs.wcs.ctype[self._wcs.wcs.spec], unit, velocity_convention=velocity_convention)
newwcs = convert_spectral_axis(self._wcs, unit, out_ctype, rest_value=rest_value)
newwcs.wcs.set()
return newwcs
_get_new_wcs.__doc__ += with_spectral_unit_docs |
class Skeleton():
def __init__(self, parents, joints_left, joints_right):
assert (len(joints_left) == len(joints_right))
self._parents = np.array(parents)
self._joints_left = joints_left
self._joints_right = joints_right
self._compute_metadata()
def num_joints(self):
return len(self._parents)
def parents(self):
return self._parents
def has_children(self):
return self._has_children
def children(self):
return self._children
def remove_joints(self, joints_to_remove):
valid_joints = []
for joint in range(len(self._parents)):
if (joint not in joints_to_remove):
valid_joints.append(joint)
for i in range(len(self._parents)):
while (self._parents[i] in joints_to_remove):
self._parents[i] = self._parents[self._parents[i]]
index_offsets = np.zeros(len(self._parents), dtype=int)
new_parents = []
for (i, parent) in enumerate(self._parents):
if (i not in joints_to_remove):
new_parents.append((parent - index_offsets[parent]))
else:
index_offsets[i:] += 1
self._parents = np.array(new_parents)
if (self._joints_left is not None):
new_joints_left = []
for joint in self._joints_left:
if (joint in valid_joints):
new_joints_left.append((joint - index_offsets[joint]))
self._joints_left = new_joints_left
if (self._joints_right is not None):
new_joints_right = []
for joint in self._joints_right:
if (joint in valid_joints):
new_joints_right.append((joint - index_offsets[joint]))
self._joints_right = new_joints_right
self._compute_metadata()
return valid_joints
def joints_left(self):
return self._joints_left
def joints_right(self):
return self._joints_right
def _compute_metadata(self):
self._has_children = np.zeros(len(self._parents)).astype(bool)
for (i, parent) in enumerate(self._parents):
if (parent != (- 1)):
self._has_children[parent] = True
self._children = []
for (i, parent) in enumerate(self._parents):
self._children.append([])
for (i, parent) in enumerate(self._parents):
if (parent != (- 1)):
self._children[parent].append(i) |
class StandaloneEditor(_KeyValueEditor):
def load_values(cls, filename):
ret = []
if os.path.exists(filename):
fileobj = open(filename, encoding='utf-8')
lines = list(fileobj.readlines())
for i in range((len(lines) // 2)):
ret.append((lines[((i * 2) + 1)].strip(), lines[(i * 2)].strip()))
return ret
def __init__(self, filename, title, initial=None, validator=None):
self.filename = filename
self.initial = (initial or [])
super().__init__(title, validator)
connect_obj(self, 'destroy', self.write, True)
def fill_values(self):
filename = (self.filename + '.saved')
if os.path.exists(filename):
fileobj = open(filename, encoding='utf-8')
lines = list(fileobj.readlines())
lines.reverse()
while (len(lines) > 1):
self.model.prepend(row=[lines.pop(1).strip(), lines.pop(0).strip()])
if ((not len(self.model)) and self.initial):
for (k, v) in self.initial:
self.model.append(row=[v.strip(), k.strip()])
def write(self, create=True):
try:
if create:
if (not os.path.isdir(os.path.dirname(self.filename))):
os.makedirs(os.path.dirname(self.filename))
with open((self.filename + '.saved'), 'w', encoding='utf-8') as saved:
for row in self.model:
saved.write((row[0] + '\n'))
saved.write((row[1] + '\n'))
except OSError:
pass |
def show_transaction(tx: Transaction, *, parent: 'ElectrumWindow', desc=None, prompt_if_unsaved=False):
try:
d = TxDialog(tx, parent=parent, desc=desc, prompt_if_unsaved=prompt_if_unsaved)
except SerializationError as e:
_logger.exception('unable to deserialize the transaction')
parent.show_critical(((_('Electrum was unable to deserialize the transaction:') + '\n') + str(e)))
else:
d.show() |
class TMP4Datatypes(TMP4, TMP4HasTagsMixin):
original = os.path.join(DATA_DIR, 'has-tags.m4a')
def test_has_freeform(self):
key = '----:com.apple.iTunes:iTunNORM'
self.failUnless((key in self.audio.tags))
ff = self.audio.tags[key]
self.failUnlessEqual(ff[0].dataformat, AtomDataType.UTF8)
self.failUnlessEqual(ff[0].version, 0)
def test_has_covr(self):
self.failUnless(('covr' in self.audio.tags))
covr = self.audio.tags['covr']
self.failUnlessEqual(len(covr), 2)
self.failUnlessEqual(covr[0].imageformat, MP4Cover.FORMAT_PNG)
self.failUnlessEqual(covr[1].imageformat, MP4Cover.FORMAT_JPEG)
def test_pprint(self):
text = self.audio.tags.pprint().splitlines()
self.assertTrue((u'ART=Test Artist' in text))
def test_get_padding(self):
self.assertEqual(self.audio._padding, 1634) |
def get_config_from_root(root: str) -> VersioneerConfig:
root_pth = Path(root)
pyproject_toml = (root_pth / 'pyproject.toml')
setup_cfg = (root_pth / 'setup.cfg')
section: Union[(Dict[(str, Any)], configparser.SectionProxy, None)] = None
if (pyproject_toml.exists() and have_tomllib):
try:
with open(pyproject_toml, 'rb') as fobj:
pp = tomllib.load(fobj)
section = pp['tool']['versioneer']
except (tomllib.TOMLDecodeError, KeyError) as e:
print(f'Failed to load config from {pyproject_toml}: {e}')
print('Try to load it from setup.cfg')
if (not section):
parser = configparser.ConfigParser()
with open(setup_cfg) as cfg_file:
parser.read_file(cfg_file)
parser.get('versioneer', 'VCS')
section = parser['versioneer']
cfg = VersioneerConfig()
cfg.VCS = section['VCS']
cfg.style = section.get('style', '')
cfg.versionfile_source = cast(str, section.get('versionfile_source'))
cfg.versionfile_build = section.get('versionfile_build')
cfg.tag_prefix = cast(str, section.get('tag_prefix'))
if (cfg.tag_prefix in ("''", '""', None)):
cfg.tag_prefix = ''
cfg.parentdir_prefix = section.get('parentdir_prefix')
if isinstance(section, configparser.SectionProxy):
cfg.verbose = section.getboolean('verbose')
else:
cfg.verbose = section.get('verbose')
return cfg |
class TestCPythonABI():
.parametrize('py_debug,gettotalrefcount,result', [(1, False, True), (0, False, False), (None, True, True)])
def test_debug(self, py_debug, gettotalrefcount, result, monkeypatch):
config = {'Py_DEBUG': py_debug, 'WITH_PYMALLOC': 0, 'Py_UNICODE_SIZE': 2}
monkeypatch.setattr(sysconfig, 'get_config_var', config.__getitem__)
if gettotalrefcount:
monkeypatch.setattr(sys, 'gettotalrefcount', 1, raising=False)
expected = [('cp37d' if result else 'cp37')]
assert (tags._cpython_abis((3, 7)) == expected)
def test_debug_file_extension(self, monkeypatch):
config = {'Py_DEBUG': None}
monkeypatch.setattr(sysconfig, 'get_config_var', config.__getitem__)
monkeypatch.delattr(sys, 'gettotalrefcount', raising=False)
monkeypatch.setattr(tags, 'EXTENSION_SUFFIXES', {'_d.pyd'})
assert (tags._cpython_abis((3, 8)) == ['cp38d', 'cp38'])
.parametrize('debug,expected', [(True, ['cp38d', 'cp38']), (False, ['cp38'])])
def test__debug_cp38(self, debug, expected, monkeypatch):
config = {'Py_DEBUG': debug}
monkeypatch.setattr(sysconfig, 'get_config_var', config.__getitem__)
assert (tags._cpython_abis((3, 8)) == expected)
.parametrize('pymalloc,version,result', [(1, (3, 7), True), (0, (3, 7), False), (None, (3, 7), True), (1, (3, 8), False)])
def test_pymalloc(self, pymalloc, version, result, monkeypatch):
config = {'Py_DEBUG': 0, 'WITH_PYMALLOC': pymalloc, 'Py_UNICODE_SIZE': 2}
monkeypatch.setattr(sysconfig, 'get_config_var', config.__getitem__)
base_abi = f'cp{version[0]}{version[1]}'
expected = [((base_abi + 'm') if result else base_abi)]
assert (tags._cpython_abis(version) == expected)
.parametrize('unicode_size,maxunicode,version,result', [(4, 1114111, (3, 2), True), (2, 65535, (3, 2), False), (None, 1114111, (3, 2), True), (None, 65535, (3, 2), False), (4, 1114111, (3, 3), False)])
def test_wide_unicode(self, unicode_size, maxunicode, version, result, monkeypatch):
config = {'Py_DEBUG': 0, 'WITH_PYMALLOC': 0, 'Py_UNICODE_SIZE': unicode_size}
monkeypatch.setattr(sysconfig, 'get_config_var', config.__getitem__)
monkeypatch.setattr(sys, 'maxunicode', maxunicode)
base_abi = ('cp' + tags._version_nodot(version))
expected = [((base_abi + 'u') if result else base_abi)]
assert (tags._cpython_abis(version) == expected) |
class PyxParser(object):
def __init__(self, path, unit):
self._path = path
self._includes = []
retargeted = os.path.join(unit.path(), os.path.basename(path))
with open(path, 'rb') as f:
(includes, induced, susp_includes) = self.parse_includes(f.readlines())
for susp_incl in susp_includes:
incl_path = unit.resolve(os.path.join(unit.path(), susp_incl[0]))
if (not os.path.isdir(incl_path)):
includes.append((susp_incl[0] + '.pxd'))
else:
for f in susp_incl[1]:
if (f != '*'):
includes.append((((susp_incl[0] + '/') + f) + '.pxd'))
self._includes = (unit.resolve_include((([retargeted] + includes) + list(find_init_files(includes, unit)))) if includes else [])
self._induced = (unit.resolve_include((([retargeted] + induced) + list(find_init_files(induced, unit)))) if induced else [])
def get_perm_includes():
where = 'contrib/tools/cython/Cython/Utility'
includes = ['Buffer.c', 'CommonTypes.c', 'Exceptions.c', 'Generator.c', 'ModuleSetupCode.c', 'Overflow.c', 'StringTools.c', 'Builtins.c', 'CythonFunction.c', 'ExtensionTypes.c', 'ImportExport.c', 'ObjectHandling.c', 'Printing.c', 'TestUtilityLoader.c', 'Capsule.c', 'Embed.c', 'FunctionArguments.c', 'MemoryView_C.c', 'Optimize.c', 'Profile.c', 'TypeConversion.c']
return [((where + '/') + x) for x in includes]
def parse_includes(content):
includes = PyxParser.get_perm_includes()
induced = []
susp_includes = []
for line in content:
line = line.lstrip()
incl = INCLUDE_PATTERN.match(line)
if incl:
if incl.group(1):
includes.append(incl.group(1))
else:
ind = INDUCED_PATTERN.match(line)
if (ind and ind.group(1)):
induced.append(ind.group(1))
else:
def filter_known_inner_paths(p):
return (p and (p.split('.')[0] not in ('libc', 'libcpp', 'cython', 'cpython')))
cimport = CIMPORT_PATTERN.match(line)
if cimport:
cimport_files = cimport.group(1)
cimport_files = [x.strip() for x in cimport_files.split(',')]
cimport_files = [x.split(' ')[0] for x in cimport_files]
for cimport_file in cimport_files:
if filter_known_inner_paths(cimport_file):
includes.append((cimport_file.replace('.', '/') + '.pxd'))
else:
from_cimport = FROM_CIMPORT_PATTERN.match(line)
if from_cimport:
cimport_source = from_cimport.group(1)
cimport_symbols = (from_cimport.group(2) or '')
cimport_symbols = [x.strip() for x in cimport_symbols.split(',')]
cimport_symbols = [x.split(' ')[0] for x in cimport_symbols]
if filter_known_inner_paths(cimport_source):
susp_includes.append((cimport_source.replace('.', '/'), cimport_symbols))
return (includes, induced, susp_includes)
def includes(self):
return self._includes
def induced_deps(self):
return {'cpp': (['$S/contrib/tools/python/src/Include/Python.h'] + self._induced)} |
.parametrize('version,normalized_version', [('1!2.3.4.5.6a7.post8.dev9+local1.123.abc', '1!2.3.4.5.6a7.post8.dev9+local1.123.abc'), ('1.1RC1', '1.1rc1'), ('00', '0'), ('09000', '9000'), ('1.0+foo0100', '1.0+foo0100'), ('1.1.a1', '1.1a1'), ('1.1-a1', '1.1a1'), ('1.1_a1', '1.1a1'), ('1.1a.1', '1.1a1'), ('1.1a-1', '1.1a1'), ('1.1a_1', '1.1a1'), ('1.1alpha1', '1.1a1'), ('1.1beta2', '1.1b2'), ('1.1c3', '1.1rc3'), ('1.1pre4', '1.1rc4'), ('1.1preview5', '1.1rc5'), ('1.2a', '1.2a0'), ('1.2.post2', '1.2.post2'), ('1.2-post2', '1.2.post2'), ('1.2_post2', '1.2.post2'), ('1.2post.2', '1.2.post2'), ('1.2post-2', '1.2.post2'), ('1.2post_2', '1.2.post2'), ('1.0-r4', '1.0.post4'), ('1.0-rev4', '1.0.post4'), ('1.2.post', '1.2.post0'), ('1.0-1', '1.0.post1'), ('1.2.dev2', '1.2.dev2'), ('1.2-dev2', '1.2.dev2'), ('1.2_dev2', '1.2.dev2'), ('1.2dev.2', '1.2.dev2'), ('1.2dev-2', '1.2.dev2'), ('1.2dev_2', '1.2.dev2'), ('1.2.dev', '1.2.dev0'), ('1.0+ubuntu-1', '1.0+ubuntu.1'), ('1.0+ubuntu_1', '1.0+ubuntu.1'), ('v1.0', '1.0'), (' 1.0 ', '1.0'), ('\t1.0\t', '1.0'), ('\n1.0\n', '1.0'), ('\r\n1.0\r\n', '1.0'), ('\x0c1.0\x0c', '1.0'), ('\x0b1.0\x0b', '1.0')])
def test_to_string_normalizes(version: str, normalized_version: str) -> None:
assert (Version.parse(version).to_string() == normalized_version) |
def main():
models = morefusion.datasets.YCBVideoModels()
with concurrent.futures.ProcessPoolExecutor() as executor:
futures = []
for class_id in range(models.n_class):
if (class_id == 0):
continue
future = executor.submit(_get_top_image, class_id)
futures.append(future)
viz = []
for future in futures:
viz_i = future.result()
viz.append(viz_i)
viz = imgviz.tile(viz, shape=(4, 6))
imgviz.io.pyglet_imshow(viz)
imgviz.io.pyglet_run() |
class FairseqDropout(nn.Module):
def __init__(self, p, module_name=None):
super().__init__()
self.p = p
self.module_name = module_name
self.apply_during_inference = False
def forward(self, x, inplace: bool=False):
if ((self.p > 0) and (self.training or self.apply_during_inference)):
return F.dropout(x, p=self.p, training=True, inplace=inplace)
else:
return x
def make_generation_fast_(self, name: str, retain_dropout: bool=False, retain_dropout_modules: Optional[List[str]]=None, **kwargs):
if retain_dropout:
if ((retain_dropout_modules is not None) and (self.module_name is None)):
logger.warning('Cannot enable dropout during inference for module {} because module_name was not set'.format(name))
elif ((retain_dropout_modules is None) or (self.module_name in retain_dropout_modules)):
logger.info('Enabling dropout during inference for module: {}'.format(name))
self.apply_during_inference = True
else:
logger.info('Disabling dropout for module: {}'.format(name)) |
class CPIBase(object):
def __init__(self, api, adaptor):
self._session = None
self._adaptor = adaptor
self._cpi_cname = self.__class__.__name__
self._logger = ru.Logger('radical.saga.cpi')
self._api = weakref.ref(api)
self._container = None
def _set_container(self, container=None):
self._container = container
def get_cpi_cname(self):
return self._cpi_cname
def get_api(self):
return self._api()
def get_adaptor_name(self):
return self._adaptor.get_name()
def _set_session(self, session):
self._session = session
def get_session(self):
return self._session
def session(self):
return self._session
def session(self, session):
self._session = session |
def analogy_singleseq_encoding_model(inputs, params, is_training, reuse):
enc_cell_fn = NAME_TO_RNNCELL[params.enc_model]
recurrent_dropout_prob = 1.0
if is_training:
recurrent_dropout_prob = params.recurrent_dropout_prob
assert (not params.use_bidirection_lstm)
enc_cell = get_rnn_cell(enc_cell_fn, params.enc_rnn_size, use_dropout=(is_training and params.use_recurrent_dropout), keep_prob=recurrent_dropout_prob, is_bidir=False)
singleseq_encoder = _get_network(SINGLESEQ_ENC_FN)
outputs = dict()
with tf.variable_scope('seq_enc', reuse=reuse):
tmp_outputs = singleseq_encoder(None, inputs['A_landmarks'], inputs['A_lens'], enc_cell, params, is_training=is_training)
enc_state = tmp_outputs['states']
if hasattr(params, 'content_dim'):
outputs['A_content'] = tmp_outputs['content']
outputs['A_style'] = tmp_outputs['style']
with tf.variable_scope('seq_enc', reuse=True):
tmp_outputs = singleseq_encoder(enc_state, inputs['B_landmarks'], inputs['B_lens'], enc_cell, params, is_training=is_training)
if hasattr(params, 'content_dim'):
outputs['B_content'] = tmp_outputs['content']
outputs['B_style'] = tmp_outputs['style']
with tf.variable_scope('seq_enc', reuse=True):
tmp_outputs = singleseq_encoder(None, inputs['C_landmarks'], inputs['C_lens'], enc_cell, params, is_training=is_training)
outputs['C_enc_state'] = tmp_outputs['states']
if hasattr(params, 'content_dim'):
outputs['C_content'] = tmp_outputs['content']
outputs['C_style'] = tmp_outputs['style']
return outputs |
def should_do_dim_bucketing(embedding_tables: List[ShardedEmbeddingTable]) -> bool:
table_pipeline_count = 0
for table in embedding_tables:
if ((table.fused_params is not None) and ('prefetch_pipeline' in table.fused_params) and table.fused_params['prefetch_pipeline']):
table_pipeline_count += 1
if ((table_pipeline_count > 0) and (table_pipeline_count != len(embedding_tables))):
AssertionError(f'Only {table_pipeline_count} tables have prefetch-sparse-dist pipeline. It should be all {len(embedding_tables)} tables.')
for table in embedding_tables:
if ((table.compute_kernel == EmbeddingComputeKernel.FUSED_UVM_CACHING) and table_pipeline_count):
return True
return False |
class TensorPartContainer():
def __init__(self, tensors: Sequence[torch.Tensor], peer_fractions: Sequence[float], compression: CompressionBase=NoCompression(), part_size_bytes: int=DEFAULT_PART_SIZE_BYTES, tensor_infos: Optional[Sequence[CompressionInfo]]=None, prefetch: int=5):
if (tensor_infos is None):
tensor_infos = tuple((CompressionInfo.from_tensor(x, key=i) for (i, x) in enumerate(tensors)))
assert (len(tensor_infos) == len(tensors)), 'compression types do not match the number of tensors'
(self.local_tensors, self.peer_fractions, self.group_size) = (tensors, peer_fractions, len(peer_fractions))
(self.compression, self.part_size_bytes, self.tensor_infos) = (compression, part_size_bytes, tensor_infos)
self.total_size = sum((tensor.numel() for tensor in tensors))
self.prefetch = prefetch
self._input_parts_by_peer = [deque() for _ in range(self.group_size)]
self._output_parts_by_peer = [deque() for _ in range(self.group_size)]
self._inputs_consumed_by_peer = [False for _ in range(self.group_size)]
self._output_part_available = [asyncio.Event() for _ in range(self.group_size)]
self._outputs_registered_by_peer = [0 for _ in range(self.group_size)]
self._outputs_consumed = False
self.finished = asyncio.Event()
self.num_parts_by_tensor = []
current_length = 0
current_peer_index = 0
pivots = ((np.cumsum(peer_fractions) / np.sum(peer_fractions)) * self.total_size).astype(np.int64)
pivots[(- 1)] = self.total_size
for (tensor, info) in zip(self.local_tensors, self.tensor_infos):
bytes_per_value = (tensor.element_size() * compression.estimate_compression_ratio(info))
part_size_values = int((part_size_bytes / bytes_per_value))
tensor_parts = tensor.detach().view((- 1)).split(part_size_values)
self.num_parts_by_tensor.append(len(tensor_parts))
for (part_index, part) in enumerate(tensor_parts):
part_info = info.get_part(part_index, part_size_values)
if ((current_length + len(part)) > pivots[current_peer_index]):
prev_peer_index = current_peer_index
peer_intersections = [(pivots[current_peer_index] - current_length)]
while ((current_length + len(part)) > pivots[current_peer_index]):
current_peer_index += 1
current_peer_part_end = min((current_length + len(part)), pivots[current_peer_index])
peer_intersections.append((current_peer_part_end - pivots[(current_peer_index - 1)]))
assigned_peer_index = (prev_peer_index + np.argmax(peer_intersections))
self._input_parts_by_peer[assigned_peer_index].append((part, part_info))
else:
self._input_parts_by_peer[current_peer_index].append((part, part_info))
current_length += len(part)
assert (current_length == self.total_size)
self.num_parts_by_peer = tuple((len(parts) for parts in self._input_parts_by_peer))
_grad()
def get_raw_input_parts(self, peer_index: int) -> Tuple[(torch.Tensor, ...)]:
assert (not self._inputs_consumed_by_peer[peer_index]), 'input parts of a given peer are already deallocated.'
self._inputs_consumed_by_peer[peer_index] = True
input_parts = tuple((part for (part, compression) in self._input_parts_by_peer[peer_index]))
self._input_parts_by_peer[peer_index].clear()
return input_parts
_grad()
async def iterate_input_parts_for(self, peer_index: int) -> AsyncIterator[runtime_pb2.Tensor]:
assert (not self._inputs_consumed_by_peer[peer_index]), 'input parts of a given peer are already deallocated.'
self._inputs_consumed_by_peer[peer_index] = True
async def _aiterate_parts():
for _ in range(self.num_parts_by_peer[peer_index]):
(yield self._input_parts_by_peer[peer_index].popleft())
async for serialized_part in amap_in_executor((lambda x_and_info: self.compression.compress(*x_and_info)), _aiterate_parts(), max_prefetch=self.prefetch):
(yield serialized_part)
def register_processed_part(self, peer_index: int, part_index: int, part: torch.Tensor):
if (part_index != self._outputs_registered_by_peer[peer_index]):
raise ValueError(f'Could not register part #{part_index} from peer #{peer_index}, expected part index: {self._outputs_registered_by_peer[peer_index]}')
self._output_parts_by_peer[peer_index].append(part)
self._outputs_registered_by_peer[peer_index] += 1
self._output_part_available[peer_index].set()
async def iterate_output_tensors(self) -> AsyncIterable[torch.Tensor]:
assert (not self._outputs_consumed), 'output tensors are already iterated and no longer available.'
self._outputs_consumed = True
peer_index = num_parts_processed = 0
for tensor_index in range(len(self.local_tensors)):
tensor_parts = []
while (len(tensor_parts) < self.num_parts_by_tensor[tensor_index]):
if (num_parts_processed >= self.num_parts_by_peer[peer_index]):
num_parts_processed = 0
peer_index += 1
continue
if (not self._output_parts_by_peer[peer_index]):
self._output_part_available[peer_index].clear()
(await self._output_part_available[peer_index].wait())
if self.finished.is_set():
raise AllreduceException('All-reduce was terminated during iteration.')
tensor_parts.append(self._output_parts_by_peer[peer_index].popleft())
num_parts_processed += 1
tensor = torch.cat(tensor_parts)
del tensor_parts
(yield tensor.reshape(self.local_tensors[tensor_index].shape))
def __del__(self):
self.finalize()
def finalize(self):
if (not self.finished.is_set()):
for peer_index in range(self.group_size):
self._inputs_consumed_by_peer[peer_index] = True
self._input_parts_by_peer[peer_index].clear()
self._output_parts_by_peer[peer_index].clear()
self._output_part_available[peer_index].set()
self._outputs_consumed = True
self.finished.set() |
('/oauth/authorize', methods=['GET'])
_cache
_required('client_id')
_required('redirect_uri')
_required('scope')
_auth_or_cookie
def request_authorization_code():
provider = FlaskAuthorizationProvider()
response_type = request.args.get('response_type', 'code')
client_id = request.args.get('client_id', None)
redirect_uri = request.args.get('redirect_uri', None)
scope = request.args.get('scope', None)
state = request.args.get('state', None)
if ((not current_user.is_authenticated) or (not provider.validate_has_scopes(client_id, current_user.db_user().username, scope))):
if (not provider.validate_redirect_uri(client_id, redirect_uri)):
current_app = provider.get_application_for_client_id(client_id)
if (not current_app):
abort(404)
return provider._make_redirect_error_response(current_app.redirect_uri, 'redirect_uri_mismatch')
scope_info = scopes.get_scope_information(scope)
if (not scope_info):
abort(404)
return
oauth_app = provider.get_application_for_client_id(client_id)
app_email = (oauth_app.avatar_email or oauth_app.organization.email)
oauth_app_view = {'name': oauth_app.name, 'description': oauth_app.description, 'url': oauth_app.application_uri, 'avatar': json.dumps(avatar.get_data(oauth_app.name, app_email, 'app')), 'organization': {'name': oauth_app.organization.username, 'avatar': json.dumps(avatar.get_data_for_org(oauth_app.organization))}}
has_dangerous_scopes = any([check_scope['dangerous'] for check_scope in scope_info])
return render_page_template_with_routedata('oauthorize.html', scopes=scope_info, has_dangerous_scopes=has_dangerous_scopes, application=oauth_app_view, enumerate=enumerate, response_type=response_type, client_id=client_id, redirect_uri=redirect_uri, scope=scope, csrf_token_val=generate_csrf_token(), state=state)
if (response_type == 'token'):
return provider.get_token_response(response_type, client_id, redirect_uri, scope=scope, state=state)
else:
return provider.get_authorization_code(response_type, client_id, redirect_uri, scope=scope, state=state) |
_on_failure
.parametrize('number_of_nodes', [1])
.parametrize('channels_per_node', [0])
def test_channel_with_self(raiden_network: List[RaidenService], settle_timeout, token_addresses):
(app0,) = raiden_network
registry_address = app0.default_registry.address
token_address = token_addresses[0]
current_chanels = views.list_channelstate_for_tokennetwork(views.state_from_raiden(app0), registry_address, token_address)
assert (not current_chanels)
token_network_address = app0.default_registry.get_token_network(token_address, BLOCK_ID_LATEST)
assert token_network_address, 'the token must be registered by the fixtures'
token_network0 = app0.proxy_manager.token_network(token_network_address, BLOCK_ID_LATEST)
with pytest.raises(SamePeerAddress):
token_network0.new_netting_channel(partner=app0.address, settle_timeout=settle_timeout, given_block_identifier=BLOCK_ID_LATEST) |
def test_update_empty_directory_blocklist(ad_blocker, config_stub, empty_dir, caplog):
tmpdir_url = QUrl.fromLocalFile(str(empty_dir)).toString()
config_stub.val.content.blocking.adblock.lists = [tmpdir_url]
config_stub.val.content.blocking.enabled = True
config_stub.val.content.blocking.whitelist = None
assert (len(list(empty_dir.iterdir())) == 0)
with caplog.at_level(logging.INFO):
ad_blocker.adblock_update()
assert_only_one_success_message(caplog.messages)
assert (caplog.messages[(- 1)] == 'braveadblock: Filters successfully read from 0 sources.')
assert_none_blocked(ad_blocker) |
class SawyerDoorOpenV2Policy(Policy):
_fully_parsed
def _parse_obs(obs):
return {'hand_pos': obs[:3], 'gripper': obs[3], 'door_pos': obs[4:7], 'unused_info': obs[7:]}
def get_action(self, obs):
o_d = self._parse_obs(obs)
action = Action({'delta_pos': np.arange(3), 'grab_effort': 3})
action['delta_pos'] = move(o_d['hand_pos'], to_xyz=self._desired_pos(o_d), p=25.0)
action['grab_effort'] = 1.0
return action.array
def _desired_pos(o_d):
pos_curr = o_d['hand_pos']
pos_door = o_d['door_pos']
pos_door[0] -= 0.05
if (np.linalg.norm((pos_curr[:2] - pos_door[:2])) > 0.12):
return (pos_door + np.array([0.06, 0.02, 0.2]))
elif (abs((pos_curr[2] - pos_door[2])) > 0.04):
return (pos_door + np.array([0.06, 0.02, 0.0]))
else:
return pos_door |
class NullifyContractUseCase(BaseUseCaseWithNotifications):
notifications = [notifications.NullifiedContractLogger(), notifications.RefreshSponsorshipsCache()]
def execute(self, contract, **kwargs):
contract.nullify()
self.notify(request=kwargs.get('request'), contract=contract) |
def get_formatter(action_type, options):
formatters_dict = formatters.get(action_type)
if (not formatters_dict):
raise TwitterError(('There was an error finding a class of formatters for your type (%s)' % action_type))
f = formatters_dict.get(options['format'])
if (not f):
raise TwitterError(("Unknown formatter '%s' for status actions" % options['format']))
return f() |
_test
def test_merge_mask_3d():
rand = (lambda *shape: np.asarray((np.random.random(shape) > 0.5), dtype='int32'))
input_a = layers.Input(shape=(3,), dtype='int32')
input_b = layers.Input(shape=(3,), dtype='int32')
embedding = layers.Embedding(3, 4, mask_zero=True)
embedding_a = embedding(input_a)
embedding_b = embedding(input_b)
rnn = layers.SimpleRNN(3, return_sequences=True)
rnn_a = rnn(embedding_a)
rnn_b = rnn(embedding_b)
merged_concat = legacy_layers.merge([rnn_a, rnn_b], mode='concat', concat_axis=(- 1))
model = models.Model([input_a, input_b], [merged_concat])
model.compile(loss='mse', optimizer='sgd')
model.fit([rand(2, 3), rand(2, 3)], [rand(2, 3, 6)]) |
class TestMerge(unittest.TestCase):
def setUp(self):
get_dummy_plugin()
def test_merging_nothing(self):
md = Metadata(pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]}, index=pd.Index(['id1', 'id2', 'id3'], name='id')))
with self.assertRaisesRegex(ValueError, 'At least one Metadata.*nothing to merge'):
md.merge()
def test_merging_two(self):
md1 = Metadata(pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]}, index=pd.Index(['id1', 'id2', 'id3'], name='id')))
md2 = Metadata(pd.DataFrame({'c': [7, 8, 9], 'd': [10, 11, 12]}, index=pd.Index(['id1', 'id2', 'id3'], name='id')))
obs = md1.merge(md2)
exp = Metadata(pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6], 'c': [7, 8, 9], 'd': [10, 11, 12]}, index=pd.Index(['id1', 'id2', 'id3'], name='id')))
self.assertEqual(obs, exp)
def test_merging_three(self):
md1 = Metadata(pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]}, index=pd.Index(['id1', 'id2', 'id3'], name='id')))
md2 = Metadata(pd.DataFrame({'c': [7, 8, 9], 'd': [10, 11, 12]}, index=pd.Index(['id1', 'id2', 'id3'], name='id')))
md3 = Metadata(pd.DataFrame({'e': [13, 14, 15], 'f': [16, 17, 18]}, index=pd.Index(['id1', 'id2', 'id3'], name='id')))
obs = md1.merge(md2, md3)
exp = Metadata(pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6], 'c': [7, 8, 9], 'd': [10, 11, 12], 'e': [13, 14, 15], 'f': [16, 17, 18]}, index=pd.Index(['id1', 'id2', 'id3'], name='id')))
self.assertEqual(obs, exp)
def test_merging_unaligned_indices(self):
md1 = Metadata(pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]}, index=pd.Index(['id1', 'id2', 'id3'], name='id')))
md2 = Metadata(pd.DataFrame({'c': [9, 8, 7], 'd': [12, 11, 10]}, index=pd.Index(['id3', 'id2', 'id1'], name='id')))
md3 = Metadata(pd.DataFrame({'e': [13, 15, 14], 'f': [16, 18, 17]}, index=pd.Index(['id1', 'id3', 'id2'], name='id')))
obs = md1.merge(md2, md3)
exp = Metadata(pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6], 'c': [7, 8, 9], 'd': [10, 11, 12], 'e': [13, 14, 15], 'f': [16, 17, 18]}, index=pd.Index(['id1', 'id2', 'id3'], name='id')))
self.assertEqual(obs, exp)
def test_inner_join(self):
md1 = Metadata(pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]}, index=pd.Index(['id1', 'id2', 'id3'], name='id')))
md2 = Metadata(pd.DataFrame({'c': [7, 8, 9], 'd': [10, 11, 12]}, index=pd.Index(['id2', 'X', 'Y'], name='id')))
md3 = Metadata(pd.DataFrame({'e': [13, 14, 15], 'f': [16, 17, 18]}, index=pd.Index(['X', 'id3', 'id2'], name='id')))
obs = md1.merge(md2, md3)
exp = Metadata(pd.DataFrame({'a': [2], 'b': [5], 'c': [7], 'd': [10], 'e': [15], 'f': [18]}, index=pd.Index(['id2'], name='id')))
self.assertEqual(obs, exp)
obs = md1.merge(md3)
exp = Metadata(pd.DataFrame({'a': [2, 3], 'b': [5, 6], 'e': [15, 14], 'f': [18, 17]}, index=pd.Index(['id2', 'id3'], name='id')))
self.assertEqual(obs, exp)
def test_index_and_column_merge_order(self):
md1 = Metadata(pd.DataFrame([[1], [2], [3], [4]], index=pd.Index(['id1', 'id2', 'id3', 'id4'], name='id'), columns=['a']))
md2 = Metadata(pd.DataFrame([[5], [6], [7]], index=pd.Index(['id4', 'id3', 'id1'], name='id'), columns=['b']))
md3 = Metadata(pd.DataFrame([[8], [9], [10]], index=pd.Index(['id1', 'id4', 'id3'], name='id'), columns=['c']))
obs = md1.merge(md2, md3)
exp = Metadata(pd.DataFrame([[1, 7, 8], [3, 6, 10], [4, 5, 9]], index=pd.Index(['id1', 'id3', 'id4'], name='id'), columns=['a', 'b', 'c']))
self.assertEqual(obs, exp)
obs = md2.merge(md1, md3)
exp = Metadata(pd.DataFrame([[5, 4, 9], [6, 3, 10], [7, 1, 8]], index=pd.Index(['id4', 'id3', 'id1'], name='id'), columns=['b', 'a', 'c']))
self.assertEqual(obs, exp)
def test_id_column_only(self):
md1 = Metadata(pd.DataFrame({}, index=pd.Index(['id1', 'id2', 'id3'], name='id')))
md2 = Metadata(pd.DataFrame({}, index=pd.Index(['id2', 'X', 'id1'], name='id')))
md3 = Metadata(pd.DataFrame({}, index=pd.Index(['id1', 'id3', 'id2'], name='id')))
obs = md1.merge(md2, md3)
exp = Metadata(pd.DataFrame({}, index=pd.Index(['id1', 'id2'], name='id')))
self.assertEqual(obs, exp)
def test_merged_id_column_name(self):
md1 = Metadata(pd.DataFrame({'a': [1, 2]}, index=pd.Index(['id1', 'id2'], name='sample ID')))
md2 = Metadata(pd.DataFrame({'b': [3, 4]}, index=pd.Index(['id1', 'id2'], name='feature ID')))
obs = md1.merge(md2)
exp = Metadata(pd.DataFrame({'a': [1, 2], 'b': [3, 4]}, index=pd.Index(['id1', 'id2'], name='id')))
self.assertEqual(obs, exp)
def test_merging_preserves_column_types(self):
md1 = Metadata(pd.DataFrame({'a': [1, 2, 3], 'b': [np.nan, np.nan, np.nan]}, index=pd.Index(['id1', 'id2', 'id3'], name='id')))
md2 = Metadata(pd.DataFrame({'c': ['1', 'foo', '3'], 'd': np.array([np.nan, np.nan, np.nan], dtype=object)}, index=pd.Index(['id1', 'id4', 'id3'], name='id')))
obs = md1.merge(md2)
exp = Metadata(pd.DataFrame({'a': [1, 3], 'b': [np.nan, np.nan], 'c': ['1', '3'], 'd': np.array([np.nan, np.nan], dtype=object)}, index=pd.Index(['id1', 'id3'], name='id')))
self.assertEqual(obs, exp)
self.assertEqual(obs.columns['a'].type, 'numeric')
self.assertEqual(obs.columns['b'].type, 'numeric')
self.assertEqual(obs.columns['c'].type, 'categorical')
self.assertEqual(obs.columns['d'].type, 'categorical')
def test_no_artifacts(self):
md1 = Metadata(pd.DataFrame({'a': [1, 2]}, index=pd.Index(['id1', 'id2'], name='id')))
md2 = Metadata(pd.DataFrame({'b': [3, 4]}, index=pd.Index(['id1', 'id2'], name='id')))
metadata = md1.merge(md2)
self.assertEqual(metadata.artifacts, ())
def test_with_artifacts(self):
artifact1 = Artifact.import_data('Mapping', {'a': '1', 'b': '2'})
artifact2 = Artifact.import_data('Mapping', {'d': '4'})
md_from_artifact1 = artifact1.view(Metadata)
md_from_artifact2 = artifact2.view(Metadata)
md_no_artifact = Metadata(pd.DataFrame({'c': ['3', '42']}, index=pd.Index(['0', '1'], name='id')))
obs_md = md_from_artifact1.merge(md_no_artifact, md_from_artifact2)
exp_df = pd.DataFrame({'a': '1', 'b': '2', 'c': '3', 'd': '4'}, index=pd.Index(['0'], name='id'))
exp_md = Metadata(exp_df)
exp_md._add_artifacts((artifact1, artifact2))
self.assertEqual(obs_md, exp_md)
self.assertEqual(obs_md.artifacts, (artifact1, artifact2))
def test_disjoint_indices(self):
md1 = Metadata(pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]}, index=pd.Index(['id1', 'id2', 'id3'], name='id')))
md2 = Metadata(pd.DataFrame({'c': [7, 8, 9], 'd': [10, 11, 12]}, index=pd.Index(['X', 'Y', 'Z'], name='id')))
with self.assertRaisesRegex(ValueError, 'no IDs shared'):
md1.merge(md2)
def test_duplicate_columns(self):
md1 = Metadata(pd.DataFrame({'a': [1, 2], 'b': [3, 4]}, index=pd.Index(['id1', 'id2'], name='id')))
md2 = Metadata(pd.DataFrame({'c': [5, 6], 'b': [7, 8]}, index=pd.Index(['id1', 'id2'], name='id')))
with self.assertRaisesRegex(ValueError, "columns overlap: 'b'"):
md1.merge(md2)
def test_duplicate_columns_self_merge(self):
md = Metadata(pd.DataFrame({'a': [1, 2], 'b': [3, 4]}, index=pd.Index(['id1', 'id2'], name='id')))
with self.assertRaisesRegex(ValueError, "columns overlap: 'a', 'b'"):
md.merge(md) |
.parametrize('required_fixtures', [['git/github.com/demo/pyproject-demo']])
def test_add_directory_with_poetry(app: PoetryTestApplication, repo: TestRepository, tester: CommandTester) -> None:
repo.add_package(get_package('pendulum', '1.4.4'))
path = '../git/github.com/demo/pyproject-demo'
tester.execute(f'{path}')
demo_path = app.poetry.file.path.parent.joinpath(path).resolve().as_posix()
expected = f'''
Updating dependencies
Resolving dependencies...
Package operations: 2 installs, 0 updates, 0 removals
- Installing pendulum (1.4.4)
- Installing demo (0.1.2 {demo_path})
Writing lock file
'''
assert (tester.io.fetch_output() == expected)
assert isinstance(tester.command, InstallerCommand)
assert (tester.command.installer.executor.installations_count == 2) |
class TokenSendLayout(QGridLayout):
def __init__(self, dialog, token, send_callback):
QGridLayout.__init__(self)
self.setSpacing(8)
self.setColumnStretch(3, 1)
self.dialog = dialog
self.token = token
self.send_callback = send_callback
address_lb = QLabel(_('My Address:'))
self.address_e = QLineEdit()
self.address_e.setMinimumWidth(300)
self.address_e.setReadOnly(True)
self.address_e.setText(token.bind_addr)
self.addWidget(address_lb, 1, 0)
self.addWidget(self.address_e, 1, 1, 1, (- 1))
address_to_lb = QLabel(_('Pay to:'))
self.address_to_e = QLineEdit()
self.address_to_e.setMinimumWidth(300)
self.addWidget(address_to_lb, 2, 0)
self.addWidget(self.address_to_e, 2, 1, 1, (- 1))
amount_lb = QLabel(_('Amount:'))
self.amount_e = AmountEdit((lambda : self.token.symbol), False, None, self.token.decimals, 0)
self.addWidget(amount_lb, 3, 0)
self.addWidget(self.amount_e, 3, 1, 1, (- 1))
optional_lb = QLabel(_('Optional:'))
self.addWidget(optional_lb, 4, 0)
optional_widget = QWidget()
optional_layout = QHBoxLayout()
optional_layout.setContentsMargins(0, 0, 0, 0)
optional_layout.setSpacing(0)
gas_limit_lb = QLabel(_('gas limit: '))
self.gas_limit_e = AmountEdit((lambda : ''), True, None, 0, 0)
self.gas_limit_e.setText('75000')
gas_price_lb = QLabel(_('gas price: '))
self.gas_price_e = AmountEdit((lambda : ''), False, None, 8, 0)
self.gas_price_e.setText('0.')
optional_layout.addWidget(gas_limit_lb)
optional_layout.addWidget(self.gas_limit_e)
optional_layout.addStretch(1)
optional_layout.addWidget(gas_price_lb)
optional_layout.addWidget(self.gas_price_e)
optional_layout.addStretch(0)
optional_widget.setLayout(optional_layout)
self.addWidget(optional_widget, 4, 1, 1, (- 1))
self.preview_btn = QPushButton(_('Preview'))
self.preview_btn.setDefault(False)
self.preview_btn.clicked.connect(self.preview)
self.cancel_btn = CancelButton(dialog)
self.send_btn = QPushButton(_('Send'))
self.send_btn.setDefault(True)
self.send_btn.clicked.connect(self.send)
buttons = Buttons(*[self.cancel_btn, self.preview_btn, self.send_btn])
buttons.addStretch()
self.addLayout(buttons, 5, 2, 2, (- 1))
def parse_values(self):
if (len(self.amount_e.text()) < 1):
raise Exception('amount should not be empty')
def parse_edit_value(edit, times=(10 ** 8)):
return int((edit.get_amount() * times))
return (parse_edit_value(self.gas_limit_e, 1), parse_edit_value(self.gas_price_e), parse_edit_value(self.amount_e, (10 ** self.token.decimals)))
def get_inputs(self):
try:
(gas_limit, gas_price, amount) = self.parse_values()
except (BaseException,) as e:
raise e
if (self.token.balance < amount):
raise Exception(_('token not enough'))
address_to = self.address_to_e.text().strip()
if is_b58_address(address_to):
(addr_type, hash160) = b58_address_to_hash160(address_to)
if (addr_type == constants.net.ADDRTYPE_P2PKH):
hash160 = hash160.hex()
else:
raise Exception(_('invalid address to send to'))
elif is_hash160(address_to):
hash160 = address_to.lower()
else:
raise Exception(_('invalid address to send to'))
return (hash160, amount, gas_limit, gas_price)
def preview(self):
self.send(preview=True)
def send(self, preview=False):
try:
self.send_callback(*self.get_inputs(), preview)
except BaseException as e:
self.dialog.show_message(str(e)) |
class fashionmnist_dataset(Data.Dataset):
def __init__(self, train=True, transform=None, target_transform=None, noise_rate=0.2, split_percentage=0.9, seed=1, num_classes=10, feature_size=784, norm_std=0.1):
self.transform = transform
self.target_transform = target_transform
self.train = train
original_images = np.load('data/fashionmnist/train_images.npy')
original_labels = np.load('data/fashionmnist/train_labels.npy')
data = torch.from_numpy(original_images).float()
targets = torch.from_numpy(original_labels)
dataset = zip(data, targets)
new_labels = tools.get_instance_noisy_label(noise_rate, dataset, targets, num_classes, feature_size, norm_std, seed)
(self.train_data, self.val_data, self.train_labels, self.val_labels) = tools.data_split(original_images, new_labels, split_percentage, seed)
def __getitem__(self, index):
if self.train:
(img, label) = (self.train_data[index], self.train_labels[index])
else:
(img, label) = (self.val_data[index], self.val_labels[index])
img = Image.fromarray(img)
if (self.transform is not None):
img = self.transform(img)
if (self.target_transform is not None):
label = self.target_transform(label)
return (img, label)
def __len__(self):
if self.train:
return len(self.train_data)
else:
return len(self.val_data) |
class PointLocator():
def __init__(self, points):
warnings.warn(('PointLocator ' + dep_msg), FutureWarning)
self._locator = BruteForcePointLocator(points)
def nearest(self, query_point):
return self._locator.nearest(query_point)
def region(self, region_rect):
return self._locator.region(region_rect)
overlapping = region
def polygon(self, polygon):
def proximity(self, origin, r):
return self._locator.proximity(origin, r) |
def main():
args = parse_args()
root_path = args.root_path
print('Processing training set...')
training_infos = collect_cocotext_info(root_path, 'train')
convert_annotations(training_infos, osp.join(root_path, 'instances_training.json'))
print('Processing validation set...')
val_infos = collect_cocotext_info(root_path, 'val')
convert_annotations(val_infos, osp.join(root_path, 'instances_val.json'))
print('Finish') |
class Entry(cpi_ns.entry.Entry, cpi_att.Attributes):
def __init__(self, api, adaptor):
self._cpi_nsentry = super(Entry, self)
self._cpi_nsentry.__init__(api, adaptor)
def init_instance(self, url, flags, session):
pass
def init_instance_async(self, url, flags, session):
pass
def set_ttl(self, ttl, ttype=None):
pass
def set_ttl_async(self, ttl, ttype=None):
pass
def get_ttl(self, ttype):
pass
def get_ttl_async(self, ttype):
pass
def store_object(self, object, ttype):
pass
def store_object_async(self, object, ttype):
pass
def retrieve_object(self, ttype):
pass
def retrieve_object_async(self, ttype):
pass
def delete_object(self, ttype):
pass
def delete_object_async(self, ttype):
pass |
class TriStageLRScheduleConfig(FairseqDataclass):
warmup_steps: int = field(default=0, metadata={'help': 'warmup the learning rate linearly for the first N updates'})
hold_steps: int = field(default=0, metadata={'help': 'steps in hold stage'})
decay_steps: int = field(default=0, metadata={'help': 'steps in decay stages'})
phase_ratio: Optional[Tuple[(float, float, float)]] = field(default=None, metadata={'help': 'if set, automatically sets warmup/hold/decay steps to the ratio specified here from max_updates. the ratios must add up to 1.0'})
init_lr_scale: float = field(default=0.01, metadata={'help': 'initial learning rate scale during warmup phase'})
final_lr_scale: float = field(default=0.01, metadata={'help': 'final learning rate scale'})
max_update: float = II('optimization.max_update')
lr: List[float] = II('optimization.lr') |
def main():
parser = transformers.HfArgumentParser((ModelArguments, DataArguments, TrainingArguments))
(model_args, data_args, training_args) = parser.parse_args_into_dataclasses()
print('Setup Data')
Train_dataset = PMC_QA_Dataset(data_args.img_dir, data_args.Train_csv_path, data_args.tokenizer_path, text_type='caption')
Eval_dataset = PMC_QA_Dataset(data_args.img_dir, data_args.Eval_csv_path, data_args.tokenizer_path, text_type='caption')
print('Setup Model')
model = QA_model(model_args)
run_name_root = training_args.run_name
output_dir_root = training_args.output_dir
training_args.run_name = (run_name_root + '_caption_pretrain')
training_args.output_dir = (output_dir_root + '/caption_pretrain/')
print('Start Pretraining')
trainer = Trainer(model=model, train_dataset=Train_dataset, eval_dataset=Eval_dataset, args=training_args)
try:
trainer.train(resume_from_checkpoint=True)
except:
trainer.train()
trainer.save_state()
print('Start training')
training_args.run_name = (((run_name_root + '_') + data_args.pred_type) + '_training')
training_args.output_dir = (((output_dir_root + '/') + data_args.pred_type) + '_training/')
Train_dataset.text_type = data_args.pred_type
Eval_dataset.text_type = data_args.pred_type
trainer = Trainer(model=model, train_dataset=Train_dataset, eval_dataset=Eval_dataset, args=training_args)
trainer.train(resume_from_checkpoint=True)
trainer = Trainer(model=model, train_dataset=Train_dataset, eval_dataset=Eval_dataset, args=training_args)
trainer.train()
trainer.save_state() |
class OperationValidator(KeywordValidator):
def __init__(self, registry: 'KeywordValidatorRegistry'):
super().__init__(registry)
self.operation_ids_registry: Optional[List[str]] = []
def responses_validator(self) -> ResponsesValidator:
return cast(ResponsesValidator, self.registry['responses'])
def parameters_validator(self) -> ParametersValidator:
return cast(ParametersValidator, self.registry['parameters'])
def __call__(self, url: str, name: str, operation: SchemaPath, path_parameters: Optional[SchemaPath]) -> Iterator[ValidationError]:
assert (self.operation_ids_registry is not None)
operation_id = operation.getkey('operationId')
if ((operation_id is not None) and (operation_id in self.operation_ids_registry)):
(yield DuplicateOperationIDError(f"Operation ID '{operation_id}' for '{name}' in '{url}' is not unique"))
self.operation_ids_registry.append(operation_id)
if ('responses' in operation):
responses = (operation / 'responses')
(yield from self.responses_validator(responses))
names = []
parameters = None
if ('parameters' in operation):
parameters = (operation / 'parameters')
(yield from self.parameters_validator(parameters))
names += list(self._get_path_param_names(parameters))
if (path_parameters is not None):
names += list(self._get_path_param_names(path_parameters))
all_params = list(set(names))
for path in self._get_path_params_from_url(url):
if (path not in all_params):
(yield UnresolvableParameterError("Path parameter '{}' for '{}' operation in '{}' was not resolved".format(path, name, url)))
return
def _get_path_param_names(self, params: SchemaPath) -> Iterator[str]:
for param in params:
if (param['in'] == 'path'):
(yield param['name'])
def _get_path_params_from_url(self, url: str) -> Iterator[str]:
formatter = string.Formatter()
path_params = [item[1] for item in formatter.parse(url)]
return filter(None, path_params) |
class Pix3DCodeDataset(BaseDataset):
def initialize(self, opt, phase='train', cat='all'):
self.opt = opt
self.max_dataset_size = opt.max_dataset_size
info_file = json_f_dict[hostname]['pix3d']
info_path = f'preprocess/info_files/{info_file}'
with open(info_path) as f:
self.info = json.load(f)
if (cat == 'all'):
cats = self.info['all_cats']
else:
cats = [cat]
code_setting = f'{opt.vq_model}-{opt.vq_dset}-{opt.vq_cat}-T{opt.trunc_thres}'
self.code_dir = f'{dataroot}/extracted_code/{code_setting}'
assert os.path.exists(self.code_dir), f'{self.code_dir} should exist.'
if (cat == 'all'):
cats = self.info['all_cats']
else:
cats = [cat]
self.model_list = []
self.cats_list = []
for c in cats:
with open(f'{dataroot}/pix3d/filelists/{c}_{phase}.lst') as f:
model_list_s = []
for l in f.readlines():
model_id = l.rstrip('\n')
path = f'{self.code_root}/{c}/{model_id}'
model_list_s.append(path)
self.model_list += model_list_s
self.cats_list += ([c] * len(model_list_s))
print(('[*] %d samples for %s.' % (len(model_list_s), c)))
np.random.default_rng(seed=0).shuffle(self.model_list)
np.random.default_rng(seed=0).shuffle(self.cats_list)
cprint(('[*] (Pix3DDataset) there are %d categories.' % len(cats)), 'yellow')
self.model_list = self.model_list[:self.max_dataset_size]
cprint(('[*] %d code loaded.' % len(self.model_list)), 'yellow')
self.N = len(self.model_list)
self.to_tensor = transforms.ToTensor()
self.normalize = transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
def __getitem__(self, index):
catname = self.cats_list[index]
model = self.model_list[index]
sdf_p = f'{model}/x.npy'
code_p = f'{model}/code.npy'
codeix_p = f'{model}/codeix.npy'
sdf = torch.from_numpy(np.load(sdf_p))
code = torch.from_numpy(np.load(code_p))
codeix = torch.from_numpy(np.load(codeix_p))
ret = {'sdf': sdf, 'z_q': code, 'idx': codeix, 'cat_str': catname, 'path': model}
return ret
def __len__(self):
return self.N
def name(self):
return 'Pix3DCodeDataset' |
class BuildBackendHookCaller():
def __init__(self, source_dir: str, build_backend: str, backend_path: Optional[Sequence[str]]=None, runner: Optional['SubprocessRunner']=None, python_executable: Optional[str]=None) -> None:
if (runner is None):
runner = default_subprocess_runner
self.source_dir = abspath(source_dir)
self.build_backend = build_backend
if backend_path:
backend_path = [norm_and_check(self.source_dir, p) for p in backend_path]
self.backend_path = backend_path
self._subprocess_runner = runner
if (not python_executable):
python_executable = sys.executable
self.python_executable = python_executable
def subprocess_runner(self, runner: 'SubprocessRunner') -> Iterator[None]:
prev = self._subprocess_runner
self._subprocess_runner = runner
try:
(yield)
finally:
self._subprocess_runner = prev
def _supported_features(self) -> Sequence[str]:
return self._call_hook('_supported_features', {})
def get_requires_for_build_wheel(self, config_settings: Optional[Mapping[(str, Any)]]=None) -> Sequence[str]:
return self._call_hook('get_requires_for_build_wheel', {'config_settings': config_settings})
def prepare_metadata_for_build_wheel(self, metadata_directory: str, config_settings: Optional[Mapping[(str, Any)]]=None, _allow_fallback: bool=True) -> str:
return self._call_hook('prepare_metadata_for_build_wheel', {'metadata_directory': abspath(metadata_directory), 'config_settings': config_settings, '_allow_fallback': _allow_fallback})
def build_wheel(self, wheel_directory: str, config_settings: Optional[Mapping[(str, Any)]]=None, metadata_directory: Optional[str]=None) -> str:
if (metadata_directory is not None):
metadata_directory = abspath(metadata_directory)
return self._call_hook('build_wheel', {'wheel_directory': abspath(wheel_directory), 'config_settings': config_settings, 'metadata_directory': metadata_directory})
def get_requires_for_build_editable(self, config_settings: Optional[Mapping[(str, Any)]]=None) -> Sequence[str]:
return self._call_hook('get_requires_for_build_editable', {'config_settings': config_settings})
def prepare_metadata_for_build_editable(self, metadata_directory: str, config_settings: Optional[Mapping[(str, Any)]]=None, _allow_fallback: bool=True) -> Optional[str]:
return self._call_hook('prepare_metadata_for_build_editable', {'metadata_directory': abspath(metadata_directory), 'config_settings': config_settings, '_allow_fallback': _allow_fallback})
def build_editable(self, wheel_directory: str, config_settings: Optional[Mapping[(str, Any)]]=None, metadata_directory: Optional[str]=None) -> str:
if (metadata_directory is not None):
metadata_directory = abspath(metadata_directory)
return self._call_hook('build_editable', {'wheel_directory': abspath(wheel_directory), 'config_settings': config_settings, 'metadata_directory': metadata_directory})
def get_requires_for_build_sdist(self, config_settings: Optional[Mapping[(str, Any)]]=None) -> Sequence[str]:
return self._call_hook('get_requires_for_build_sdist', {'config_settings': config_settings})
def build_sdist(self, sdist_directory: str, config_settings: Optional[Mapping[(str, Any)]]=None) -> str:
return self._call_hook('build_sdist', {'sdist_directory': abspath(sdist_directory), 'config_settings': config_settings})
def _call_hook(self, hook_name: str, kwargs: Mapping[(str, Any)]) -> Any:
extra_environ = {'_PYPROJECT_HOOKS_BUILD_BACKEND': self.build_backend}
if self.backend_path:
backend_path = os.pathsep.join(self.backend_path)
extra_environ['_PYPROJECT_HOOKS_BACKEND_PATH'] = backend_path
with tempfile.TemporaryDirectory() as td:
hook_input = {'kwargs': kwargs}
write_json(hook_input, pjoin(td, 'input.json'), indent=2)
with _in_proc_script_path() as script:
python = self.python_executable
self._subprocess_runner([python, abspath(str(script)), hook_name, td], cwd=self.source_dir, extra_environ=extra_environ)
data = read_json(pjoin(td, 'output.json'))
if data.get('unsupported'):
raise UnsupportedOperation(data.get('traceback', ''))
if data.get('no_backend'):
raise BackendUnavailable(data.get('traceback', ''), message=data.get('backend_error', ''), backend_name=self.build_backend, backend_path=self.backend_path)
if data.get('hook_missing'):
raise HookMissing((data.get('missing_hook_name') or hook_name))
return data['return_val'] |
class LEDTokenizer(PreTrainedTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ['input_ids', 'attention_mask']
def __init__(self, vocab_file, merges_file, errors='replace', bos_token='<s>', eos_token='</s>', sep_token='</s>', cls_token='<s>', unk_token='<unk>', pad_token='<pad>', mask_token='<mask>', add_prefix_space=False, **kwargs):
bos_token = (AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token)
eos_token = (AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token)
sep_token = (AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token)
cls_token = (AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token)
unk_token = (AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token)
pad_token = (AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token)
mask_token = (AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token)
super().__init__(errors=errors, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, cls_token=cls_token, pad_token=pad_token, mask_token=mask_token, add_prefix_space=add_prefix_space, **kwargs)
with open(vocab_file, encoding='utf-8') as vocab_handle:
self.encoder = json.load(vocab_handle)
self.decoder = {v: k for (k, v) in self.encoder.items()}
self.errors = errors
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for (k, v) in self.byte_encoder.items()}
with open(merges_file, encoding='utf-8') as merges_handle:
bpe_merges = merges_handle.read().split('\n')[1:(- 1)]
bpe_merges = [tuple(merge.split()) for merge in bpe_merges]
self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
self.cache = {}
self.add_prefix_space = add_prefix_space
self.pat = re.compile("'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)|\\s+")
def vocab_size(self):
return len(self.encoder)
def get_vocab(self):
return dict(self.encoder, **self.added_tokens_encoder)
def bpe(self, token):
if (token in self.cache):
return self.cache[token]
word = tuple(token)
pairs = get_pairs(word)
if (not pairs):
return token
while True:
bigram = min(pairs, key=(lambda pair: self.bpe_ranks.get(pair, float('inf'))))
if (bigram not in self.bpe_ranks):
break
(first, second) = bigram
new_word = []
i = 0
while (i < len(word)):
try:
j = word.index(first, i)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
i = j
if ((word[i] == first) and (i < (len(word) - 1)) and (word[(i + 1)] == second)):
new_word.append((first + second))
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if (len(word) == 1):
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def _tokenize(self, text):
bpe_tokens = []
for token in re.findall(self.pat, text):
token = ''.join((self.byte_encoder[b] for b in token.encode('utf-8')))
bpe_tokens.extend((bpe_token for bpe_token in self.bpe(token).split(' ')))
return bpe_tokens
def _convert_token_to_id(self, token):
return self.encoder.get(token, self.encoder.get(self.unk_token))
def _convert_id_to_token(self, index):
return self.decoder.get(index)
def convert_tokens_to_string(self, tokens):
text = ''.join(tokens)
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors=self.errors)
return text
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
if (not os.path.isdir(save_directory)):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
vocab_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']))
merge_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file']))
with open(vocab_file, 'w', encoding='utf-8') as f:
f.write((json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + '\n'))
index = 0
with open(merge_file, 'w', encoding='utf-8') as writer:
writer.write('#version: 0.2\n')
for (bpe_tokens, token_index) in sorted(self.bpe_ranks.items(), key=(lambda kv: kv[1])):
if (index != token_index):
logger.warning(f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive. Please check that the tokenizer is not corrupted!')
index = token_index
writer.write((' '.join(bpe_tokens) + '\n'))
index += 1
return (vocab_file, merge_file)
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
if (token_ids_1 is None):
return (([self.cls_token_id] + token_ids_0) + [self.sep_token_id])
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return (((((cls + token_ids_0) + sep) + sep) + token_ids_1) + sep)
def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)
if (token_ids_1 is None):
return (([1] + ([0] * len(token_ids_0))) + [1])
return (((([1] + ([0] * len(token_ids_0))) + [1, 1]) + ([0] * len(token_ids_1))) + [1])
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if (token_ids_1 is None):
return (len(((cls + token_ids_0) + sep)) * [0])
return (len((((((cls + token_ids_0) + sep) + sep) + token_ids_1) + sep)) * [0])
def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs):
add_prefix_space = kwargs.pop('add_prefix_space', self.add_prefix_space)
if ((is_split_into_words or add_prefix_space) and ((len(text) > 0) and (not text[0].isspace()))):
text = (' ' + text)
return (text, kwargs)
def _pad(self, encoded_inputs: Union[(Dict[(str, EncodedInput)], BatchEncoding)], max_length: Optional[int]=None, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, pad_to_multiple_of: Optional[int]=None, return_attention_mask: Optional[bool]=None) -> dict:
encoded_inputs = super()._pad(encoded_inputs=encoded_inputs, max_length=max_length, padding_strategy=padding_strategy, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask)
if (return_attention_mask is None):
return_attention_mask = ('attention_mask' in self.model_input_names)
if (return_attention_mask and ('global_attention_mask' in encoded_inputs)):
required_input = encoded_inputs[self.model_input_names[0]]
needs_to_be_padded = (len(encoded_inputs['global_attention_mask']) != len(required_input))
if needs_to_be_padded:
difference = (len(required_input) - len(encoded_inputs['global_attention_mask']))
if (self.padding_side == 'right'):
encoded_inputs['global_attention_mask'] = (encoded_inputs['global_attention_mask'] + ([(- 1)] * difference))
elif (self.padding_side == 'left'):
encoded_inputs['global_attention_mask'] = (([(- 1)] * difference) + encoded_inputs['global_attention_mask'])
else:
raise ValueError(('Invalid padding strategy:' + str(self.padding_side)))
return encoded_inputs |
class ModbusPlusStatistics():
__data = OrderedDict({'node_type_id': ([0] * 2), 'software_version_number': ([0] * 2), 'network_address': ([0] * 2), 'mac_state_variable': ([0] * 2), 'peer_status_code': ([0] * 2), 'token_pass_counter': ([0] * 2), 'token_rotation_time': ([0] * 2), 'program_master_token_failed': [0], 'data_master_token_failed': [0], 'program_master_token_owner': [0], 'data_master_token_owner': [0], 'program_slave_token_owner': [0], 'data_slave_token_owner': [0], 'data_slave_command_transfer': [0], '__unused_10_lowbit': [0], 'program_slave_command_transfer': [0], 'program_master_rsp_transfer': [0], 'program_slave_auto_logout': [0], 'program_master_connect_status': [0], 'receive_buffer_dma_overrun': [0], 'pretransmit_deferral_error': [0], 'frame_size_error': [0], 'repeated_command_received': [0], 'receiver_alignment_error': [0], 'receiver_collision_abort_error': [0], 'bad_packet_length_error': [0], 'receiver_crc_error': [0], 'transmit_buffer_dma_underrun': [0], 'bad_link_address_error': [0], 'bad_mac_function_code_error': [0], 'internal_packet_length_error': [0], 'communication_failed_error': [0], 'communication_retries': [0], 'no_response_error': [0], 'good_receive_packet': [0], 'unexpected_path_error': [0], 'exception_response_error': [0], 'forgotten_transaction_error': [0], 'unexpected_response_error': [0], 'active_station_bit_map': ([0] * 8), 'token_station_bit_map': ([0] * 8), 'global_data_bit_map': ([0] * 8), 'receive_buffer_use_bit_map': ([0] * 8), 'data_master_output_path': ([0] * 8), 'data_slave_input_path': ([0] * 8), 'program_master_outptu_path': ([0] * 8), 'program_slave_input_path': ([0] * 8)})
def __init__(self):
self.reset()
def __iter__(self):
return iter(self.__data.items())
def reset(self):
for key in self.__data:
self.__data[key] = ([0] * len(self.__data[key]))
def summary(self):
return iter(self.__data.values())
def encode(self):
(total, values) = ([], sum(self.__data.values(), []))
for i in range(0, len(values), 2):
total.append(((values[i] << 8) | values[(i + 1)]))
return total |
def fgraph_from_model(model: Model, inlined_views=False) -> Tuple[(FunctionGraph, Dict[(Variable, Variable)])]:
if any(((v is not None) for v in model.rvs_to_initial_values.values())):
raise NotImplementedError('Cannot convert models with non-default initial_values')
if (model.parent is not None):
raise ValueError('Nested sub-models cannot be converted to fgraph. Convert the parent model instead')
rvs_to_values = model.rvs_to_values
rvs = list(rvs_to_values.keys())
free_rvs = model.free_RVs
observed_rvs = model.observed_RVs
potentials = model.potentials
named_vars = model.named_vars.values()
old_deterministics = model.deterministics
deterministics = [(det if inlined_views else det.copy(det.name)) for det in old_deterministics]
old_value_vars = list(rvs_to_values.values())
unnamed_value_vars = [val for val in old_value_vars if (val not in named_vars)]
named_value_vars = [(val if inlined_views else val.copy(val.name)) for val in old_value_vars if (val in named_vars)]
value_vars = old_value_vars.copy()
if inlined_views:
for named_val in named_value_vars:
idx = value_vars.index(named_val)
value_vars[idx] = named_val
accounted_for = set(((((free_rvs + observed_rvs) + potentials) + old_deterministics) + old_value_vars))
other_named_vars = [(var if inlined_views else var.copy(var.name)) for var in named_vars if (var not in accounted_for)]
model_vars = (((((rvs + potentials) + deterministics) + other_named_vars) + named_value_vars) + unnamed_value_vars)
memo = {}
shared_vars_to_copy = find_rng_nodes(model_vars)
shared_vars_to_copy += [v for v in model.dim_lengths.values() if isinstance(v, SharedVariable)]
shared_vars_to_copy += [v for v in model.named_vars.values() if isinstance(v, SharedVariable)]
for var in shared_vars_to_copy:
if isinstance(var, ScalarSharedVariable):
new_var = shared(var.get_value(borrow=False).item())
else:
new_var = shared(var.get_value(borrow=False))
assert (new_var.type == var.type)
new_var.name = var.name
new_var.tag = copy(var.tag)
memo[var] = new_var
fgraph = FunctionGraph(outputs=model_vars, clone=True, memo=memo, copy_orphans=True, copy_inputs=True)
fgraph._coords = model._coords.copy()
fgraph._dim_lengths = {k: memo.get(v, v) for (k, v) in model._dim_lengths.items()}
rvs_to_transforms = model.rvs_to_transforms
named_vars_to_dims = model.named_vars_to_dims
free_rvs_to_transforms = {memo[k]: tr for (k, tr) in rvs_to_transforms.items()}
free_rvs_to_values = {memo[k]: memo[v] for (k, v) in zip(rvs, value_vars) if (k in free_rvs)}
observed_rvs_to_values = {memo[k]: memo[v] for (k, v) in zip(rvs, value_vars) if (k in observed_rvs)}
potentials = [memo[k] for k in potentials]
deterministics = [memo[k] for k in deterministics]
named_vars = [memo[k] for k in (other_named_vars + named_value_vars)]
vars = fgraph.outputs
new_vars = []
for var in vars:
dims = named_vars_to_dims.get(var.name, ())
if (var in free_rvs_to_values):
new_var = model_free_rv(var, free_rvs_to_values[var], free_rvs_to_transforms[var], *dims)
elif (var in observed_rvs_to_values):
new_var = model_observed_rv(var, observed_rvs_to_values[var], *dims)
elif (var in potentials):
new_var = model_potential(var, *dims)
elif (var in deterministics):
new_var = model_deterministic(var, *dims)
elif (var in named_vars):
new_var = model_named(var, *dims)
else:
new_var = var
new_vars.append(new_var)
replacements = tuple(zip(vars, new_vars))
toposort_replace(fgraph, replacements, reverse=True)
inverse_memo = {v: k for (k, v) in memo.items()}
for (var, model_var) in replacements:
if ((not inlined_views) and (model_var.owner and isinstance(model_var.owner.op, (ModelDeterministic, ModelNamed)))):
var = var.owner.inputs[0]
original_var = inverse_memo[var]
memo[original_var] = model_var
first_idx_to_remove = (len(fgraph.outputs) - len(unnamed_value_vars))
for _ in unnamed_value_vars:
fgraph.remove_output(first_idx_to_remove)
remove_identity_rewrite.apply(fgraph)
return (fgraph, memo) |
def test_ansible_unavailable(host):
expected = 'Ansible module is only available with ansible connection backend'
with pytest.raises(RuntimeError) as excinfo:
host.ansible('setup')
assert (expected in str(excinfo.value))
with pytest.raises(RuntimeError) as excinfo:
host.ansible.get_variables()
assert (expected in str(excinfo.value)) |
class TV():
location: str
channel: int
def __init__(self, location: str):
self.location = location
def on(self) -> None:
print('TV is on')
def off(self) -> None:
print('TV is off')
def setInputChannel(self) -> None:
self.channel = 3
print('Channel is set for VCR') |
def _get_stations_from(uri: str, on_done: Callable[([Iterable[IRFile], str], None)]) -> None:
with Task(_('Internet Radio'), _('Add stations')) as task:
irfs: Collection[IRFile] = []
GLib.idle_add(task.pulse)
if (uri.lower().endswith('.pls') or uri.lower().endswith('.m3u') or uri.lower().endswith('.m3u8')):
if (not re.match('^([^/:]+)://', uri)):
uri = (' + uri)
print_d(('Assuming %s' % uri))
sock = None
GLib.idle_add(task.pulse)
(_fn, ext) = splitext(uri.lower())
try:
sock = urlopen(uri, timeout=6)
if (ext == '.pls'):
irfs = parse_pls(sock)
elif (ext in ('.m3u', '.m3u8')):
irfs = parse_m3u(sock)
GLib.idle_add(task.pulse)
except OSError as e:
print_e(f"Couldn't download from {uri} ({e})")
finally:
if sock:
sock.close()
else:
try:
irfs = [IRFile(uri)]
except ValueError as e:
print_e(("Can't add URI %s" % uri), e)
on_done(irfs, uri) |
def test_complement():
assert complement((lambda : False))()
assert (not complement((lambda : True))())
assert complement(iseven)(1)
assert (not complement(iseven)(2))
assert complement(complement(iseven))(2)
assert (not complement(complement(isodd))(2))
both_even = (lambda a, b: (iseven(a) and iseven(b)))
assert complement(both_even)(1, 2)
assert (not complement(both_even)(2, 2))
assert complement((lambda : ''))()
assert complement((lambda : 0))()
assert complement((lambda : None))()
assert complement((lambda : []))()
assert (not complement((lambda : 'x'))())
assert (not complement((lambda : 1))())
assert (not complement((lambda : [1]))()) |
def is_valid_userid_for_address(user_id: Any, address: Address) -> bool:
try:
typecheck(user_id, T_UserID)
except ValueError:
return False
user_id_address = address_from_userid(user_id)
if (not user_id_address):
return False
return (address == user_id_address) |
_on_failure
.parametrize('number_of_nodes', [2])
.parametrize('channels_per_node', [CHAIN])
def test_broadcast_messages_must_be_sent_before_protocol_messages_on_restarts(raiden_network: List[RaidenService], restart_node, number_of_nodes, token_addresses, network_wait):
(app0, app1) = raiden_network
app0.config.services.monitoring_enabled = True
token_address = token_addresses[0]
amount = PaymentAmount(10)
payment_id = PaymentID(23)
transfer(initiator_app=app1, target_app=app0, token_address=token_address, amount=amount, identifier=payment_id, timeout=(network_wait * number_of_nodes), routes=[[app1, app0]])
app0.stop()
transport = MatrixTransport(config=app0.config.transport, environment=app0.config.environment_type)
transport.send_async = Mock()
transport._send_raw = Mock()
old_start_transport = transport.start
def start_transport(*args, **kwargs):
queue_copy = transport._broadcast_queue.copy()
queued_messages = []
for _ in range(len(transport._broadcast_queue)):
queued_messages.append(queue_copy.get())
def num_matching_queued_messages(room: str, message_type: Type) -> int:
return len([item for item in queued_messages if ((item[0] == room) and (type(item[1]) == message_type))])
assert (num_matching_queued_messages(DeviceIDs.MS.value, RequestMonitoring) == 1)
assert (num_matching_queued_messages(DeviceIDs.PFS.value, PFSFeeUpdate) == 1)
assert (num_matching_queued_messages(DeviceIDs.PFS.value, PFSCapacityUpdate) == 1)
old_start_transport(*args, **kwargs)
transport.start = start_transport
app0_restart = RaidenService(config=app0.config, rpc_client=app0.rpc_client, proxy_manager=app0.proxy_manager, query_start_block=BlockNumber(0), raiden_bundle=RaidenBundle(app0.default_registry, app0.default_secret_registry), services_bundle=app0.default_services_bundle, transport=transport, raiden_event_handler=RaidenEventHandler(), message_handler=MessageHandler(), routing_mode=RoutingMode.PFS, pfs_proxy=app0.pfs_proxy)
restart_node(app0_restart) |
class CMakeBuild(build_ext):
user_options = [*build_ext.user_options, ('suitesparse-root=', None, 'suitesparse source location'), ('sundials-root=', None, 'sundials source location')]
def initialize_options(self):
build_ext.initialize_options(self)
self.suitesparse_root = None
self.sundials_root = None
def finalize_options(self):
build_ext.finalize_options(self)
try:
self.get_finalized_command('install', create=0)
calling_cmd = 'install'
except AttributeError:
calling_cmd = 'bdist_wheel'
self.set_undefined_options(calling_cmd, ('suitesparse_root', 'suitesparse_root'), ('sundials_root', 'sundials_root'))
if (not self.suitesparse_root):
self.suitesparse_root = os.path.join(default_lib_dir)
if (not self.sundials_root):
self.sundials_root = os.path.join(default_lib_dir)
def get_build_directory(self):
if (system() == 'Windows'):
return Path(self.build_temp).parents[0]
return self.build_temp
def run(self):
if (not self.extensions):
return
if (system() == 'Windows'):
use_python_casadi = False
else:
use_python_casadi = True
build_type = os.getenv('PYBAMM_CPP_BUILD_TYPE', 'RELEASE')
cmake_args = [f'-DCMAKE_BUILD_TYPE={build_type}', f'-DPYTHON_EXECUTABLE={sys.executable}', '-DUSE_PYTHON_CASADI={}'.format(('TRUE' if use_python_casadi else 'FALSE'))]
if self.suitesparse_root:
cmake_args.append(f'-DSuiteSparse_ROOT={os.path.abspath(self.suitesparse_root)}')
if self.sundials_root:
cmake_args.append(f'-DSUNDIALS_ROOT={os.path.abspath(self.sundials_root)}')
build_dir = self.get_build_directory()
if (not os.path.exists(build_dir)):
os.makedirs(build_dir)
if os.path.isfile(os.path.join(build_dir, 'CMakeError.log')):
os.remove(os.path.join(build_dir, 'CMakeError.log'))
build_env = os.environ
if os.getenv('PYBAMM_USE_VCPKG'):
(vcpkg_root_dir, vcpkg_default_triplet, vcpkg_feature_flags) = set_vcpkg_environment_variables()
build_env['vcpkg_root_dir'] = vcpkg_root_dir
build_env['vcpkg_default_triplet'] = vcpkg_default_triplet
build_env['vcpkg_feature_flags'] = vcpkg_feature_flags
cmake_list_dir = os.path.abspath(os.path.dirname(__file__))
print(('-' * 10), 'Running CMake for IDAKLU solver', ('-' * 40))
subprocess.run(['cmake', cmake_list_dir, *cmake_args], cwd=build_dir, env=build_env, check=True)
if os.path.isfile(os.path.join(build_dir, 'CMakeError.log')):
msg = 'cmake configuration steps encountered errors, and the IDAKLU module could not be built. Make sure dependencies are correctly installed. See
raise RuntimeError(msg)
else:
print(('-' * 10), 'Building IDAKLU module', ('-' * 40))
subprocess.run(['cmake', '--build', '.', '--config', 'Release'], cwd=build_dir, env=build_env, check=True)
for ext in self.extensions:
self.move_output(ext)
def move_output(self, ext):
build_temp = Path(self.build_temp).resolve()
dest_path = Path(self.get_ext_fullpath(ext.name)).resolve()
source_path = (build_temp / os.path.basename(self.get_ext_filename(ext.name)))
dest_directory = dest_path.parents[0]
dest_directory.mkdir(parents=True, exist_ok=True)
self.copy_file(source_path, dest_path) |
def timeout_sigalrm(item, settings):
if ((not settings.disable_debugger_detection) and is_debugging()):
return
__tracebackhide__ = True
nthreads = len(threading.enumerate())
if (nthreads > 1):
write_title('Timeout', sep='+')
dump_stacks()
if (nthreads > 1):
write_title('Timeout', sep='+')
pytest.fail(('Timeout >%ss' % settings.timeout)) |
class nnUNetTrainer_probabilisticOversampling_033(nnUNetTrainer_probabilisticOversampling):
def __init__(self, plans: dict, configuration: str, fold: int, dataset_json: dict, unpack_dataset: bool=True, device: torch.device=torch.device('cuda')):
super().__init__(plans, configuration, fold, dataset_json, unpack_dataset, device)
self.oversample_foreground_percent = 0.33 |
class Parser(unittest.TestCase):
def test_lf(self):
with tempfile.TemporaryDirectory() as root:
fn = os.path.join(root, '_version.py')
with open(fn, 'wb') as f:
f.write(b"version_json = '''\n{}\n''' # END VERSION_JSON\n")
data = versions_from_file(fn)
self.assertEqual(data, {})
def test_crlf(self):
with tempfile.TemporaryDirectory() as root:
fn = os.path.join(root, '_version.py')
with open(fn, 'wb') as f:
f.write(b"version_json = '''\r\n{}\r\n''' # END VERSION_JSON\r\n")
data = versions_from_file(fn)
self.assertEqual(data, {}) |
class VgmFile(AudioFile):
format = 'VGM'
mimes: list[str] = []
def __init__(self, filename):
with translate_errors():
with open(filename, 'rb') as h:
header = h.read(64)
if ((len(header) != 64) or (header[:4] != b'Vgm ')):
raise Exception((filename + ' not a VGM file'))
def samples_to_sec(s):
return (s / 44100.0)
samples = struct.unpack('<i', header[24:28])[0]
loop_offset = struct.unpack('<i', header[28:32])[0]
loop_samples = struct.unpack('<i', header[32:36])[0]
length = samples_to_sec(samples)
if (length <= 0):
length = 150
elif loop_offset:
length += samples_to_sec(loop_samples)
self['~#length'] = length
gd3_position = struct.unpack('<i', header[GD3_TAG_PTR_POS:(GD3_TAG_PTR_POS + GD3_TAG_PTR_SIZE)])[0]
h.seek((GD3_TAG_PTR_POS + gd3_position))
self.update(parse_gd3(h.read()))
self.setdefault('title', fsn2text(path2fsn(os.path.basename(filename)[:(- 4)])))
self.sanitize(filename)
def write(self):
pass
def can_change(self, k=None):
if (k is None):
return ['title']
else:
return (k == 'title') |
def init_detector(config, checkpoint=None, device='cuda:0', cfg_options=None):
if isinstance(config, str):
config = mmcv.Config.fromfile(config)
elif (not isinstance(config, mmcv.Config)):
raise TypeError(f'config must be a filename or Config object, but got {type(config)}')
if (cfg_options is not None):
config.merge_from_dict(cfg_options)
config.model.pretrained = None
config.model.train_cfg = None
model = build_detector(config.model, test_cfg=config.get('test_cfg'))
if (checkpoint is not None):
map_loc = ('cpu' if (device == 'cpu') else None)
checkpoint = load_checkpoint(model, checkpoint, map_location=map_loc)
if ('CLASSES' in checkpoint.get('meta', {})):
model.CLASSES = checkpoint['meta']['CLASSES']
else:
warnings.simplefilter('once')
warnings.warn("Class names are not saved in the checkpoint's meta data, use COCO classes by default.")
model.CLASSES = get_classes('coco')
model.cfg = config
model.to(device)
model.eval()
return model |
class ThermalParameters(BaseParameters):
def __init__(self):
self.geo = pybamm.geometric_parameters
self.n = DomainThermalParameters('negative', self)
self.s = DomainThermalParameters('separator', self)
self.p = DomainThermalParameters('positive', self)
self.domain_params = {'negative': self.n, 'separator': self.s, 'positive': self.p}
self._set_parameters()
def _set_parameters(self):
for domain in self.domain_params.values():
domain._set_parameters()
self.T_ref = pybamm.Parameter('Reference temperature [K]')
self.h_total = pybamm.Parameter('Total heat transfer coefficient [W.m-2.K-1]')
self.T_init = pybamm.Parameter('Initial temperature [K]')
def T_amb(self, y, z, t):
return pybamm.FunctionParameter('Ambient temperature [K]', {'Distance across electrode width [m]': y, 'Distance across electrode height [m]': z, 'Time [s]': t})
def T_amb_av(self, t):
y = pybamm.standard_spatial_vars.y
z = pybamm.standard_spatial_vars.z
return pybamm.yz_average(self.T_amb(y, z, t))
def h_edge(self, y, z):
inputs = {'Distance along electrode width [m]': y, 'Distance along electrode height [m]': z}
return pybamm.FunctionParameter('Edge heat transfer coefficient [W.m-2.K-1]', inputs)
def rho_c_p_eff(self, T):
return ((((((self.n.rho_c_p_cc(T) * self.geo.n.L_cc) + (self.n.rho_c_p(T) * self.geo.n.L)) + (self.s.rho_c_p(T) * self.geo.s.L)) + (self.p.rho_c_p(T) * self.geo.p.L)) + (self.p.rho_c_p_cc(T) * self.geo.p.L_cc)) / self.geo.L)
def lambda_eff(self, T):
return ((((((self.n.lambda_cc(T) * self.geo.n.L_cc) + (self.n.lambda_(T) * self.geo.n.L)) + (self.s.lambda_(T) * self.geo.s.L)) + (self.p.lambda_(T) * self.geo.p.L)) + (self.p.lambda_cc(T) * self.geo.p.L_cc)) / self.geo.L) |
class MagicEncode():
def __init__(self, driver, encoding=None, disabled=False, defaultsymbol='?', encoder=None):
if (disabled and (not encoding)):
raise Error('If you disable magic encode, you need to define an encoding!')
self.driver = driver
self.encoder = (encoder or Encoder(driver.profile.get_code_pages()))
self.encoding = (self.encoder.get_encoding_name(encoding) if encoding else None)
self.defaultsymbol = defaultsymbol
self.disabled = disabled
def force_encoding(self, encoding):
if (not encoding):
self.disabled = False
else:
self.write_with_encoding(encoding, None)
self.disabled = True
def write(self, text):
if self.disabled:
self.write_with_encoding(self.encoding, text)
return
if re.findall('[\\u4e00-\\u9fa5]', text):
self.driver._raw(text.encode('GB18030'))
return
(to_write, text) = split_writable_text(self.encoder, text, self.encoding)
if to_write:
self.write_with_encoding(self.encoding, to_write)
while text:
encoding = self.encoder.find_suitable_encoding(text[0])
if (not encoding):
self._handle_character_failed(text[0])
text = text[1:]
continue
(to_write, text) = split_writable_text(self.encoder, text, encoding)
if to_write:
self.write_with_encoding(encoding, to_write)
def _handle_character_failed(self, char):
self.write(self.defaultsymbol)
def write_with_encoding(self, encoding, text):
if ((text is not None) and (type(text) is not str)):
raise Error(f'The supplied text has to be unicode, but is of type {type(text)}.')
if (encoding != self.encoding):
self.encoding = encoding
self.driver._raw((CODEPAGE_CHANGE + six.int2byte(self.encoder.get_sequence(encoding))))
if text:
self.driver._raw(self.encoder.encode(text, encoding)) |
.fast
def test_find_first(*args, **kwargs):
a = np.arange(10)
assert (find_first(a, (- 1)) == 0)
assert (find_first(a, 0) == 1)
assert (find_first(a, 5) == 6)
assert (find_first(a, 8) == 9)
assert (find_first(a, 9) == 0)
assert (find_first(a, 20) == 0)
assert (not (find_first(a, (- 1)) == (- 1)))
assert (not (find_first(a, 0) == 0))
assert (not (find_first(a, 5) == 5))
assert (not (find_first(a, 10) == 10)) |
def bloqs_to_proto(*bloqs: Bloq, name: str='', pred: Callable[([BloqInstance], bool)]=(lambda _: True), max_depth: int=1) -> bloq_pb2.BloqLibrary:
bloq_to_idx: Dict[(Bloq, int)] = {}
for bloq in bloqs:
_add_bloq_to_dict(bloq, bloq_to_idx)
_populate_bloq_to_idx(bloq, bloq_to_idx, pred, max_depth)
stop_recursing_exceptions = (DecomposeNotImplementedError, DecomposeTypeError, KeyError)
library = bloq_pb2.BloqLibrary(name=name)
for (bloq, bloq_id) in bloq_to_idx.items():
try:
cbloq = (bloq if isinstance(bloq, CompositeBloq) else bloq.decompose_bloq())
decomposition = [_connection_to_proto(cxn, bloq_to_idx) for cxn in cbloq.connections]
except stop_recursing_exceptions:
decomposition = None
try:
bloq_counts = {bloq_to_idx[b]: args.int_or_sympy_to_proto(c) for (b, c) in sorted(bloq.bloq_counts().items(), key=(lambda x: x[1]))}
except stop_recursing_exceptions:
bloq_counts = None
library.table.add(bloq_id=bloq_id, decomposition=decomposition, bloq_counts=bloq_counts, bloq=_bloq_to_proto(bloq, bloq_to_idx=bloq_to_idx))
return library |
class TestFactory(TestCase):
def setUp(self):
self.factory = MachineFactory()
def test_mixins(self):
machine_cls = self.factory.get_predefined()
self.assertFalse(hasattr(machine_cls, 'set_edge_state'))
graph_cls = self.factory.get_predefined(graph=True)
self.assertTrue(hasattr(graph_cls, '_get_graph'))
nested_cls = self.factory.get_predefined(nested=True)
self.assertFalse(hasattr(nested_cls, '_get_graph'))
self.assertTrue(hasattr(nested_cls, 'get_nested_triggers'))
locked_cls = self.factory.get_predefined(locked=True)
self.assertFalse(hasattr(locked_cls, '_get_graph'))
self.assertFalse(hasattr(locked_cls, 'get_nested_triggers'))
self.assertTrue(('__getattribute__' in locked_cls.__dict__))
locked_nested_cls = self.factory.get_predefined(nested=True, locked=True)
self.assertFalse(hasattr(locked_nested_cls, '_get_graph'))
self.assertTrue(hasattr(locked_nested_cls, 'get_nested_triggers'))
self.assertEqual(locked_nested_cls.__getattribute__, locked_cls.__getattribute__)
self.assertNotEqual(machine_cls.__getattribute__, locked_cls.__getattribute__)
graph_locked_cls = self.factory.get_predefined(graph=True, locked=True)
self.assertTrue(hasattr(graph_locked_cls, '_get_graph'))
self.assertEqual(graph_locked_cls.__getattribute__, locked_cls.__getattribute__)
graph_nested_cls = self.factory.get_predefined(graph=True, nested=True)
self.assertNotEqual(nested_cls._create_transition, graph_nested_cls._create_transition)
locked_nested_graph_cls = self.factory.get_predefined(nested=True, locked=True, graph=True)
self.assertNotEqual(locked_nested_graph_cls._create_event, graph_cls._create_event) |
def parse_mypy_comments(args: list[tuple[(int, str)]], template: Options) -> tuple[(dict[(str, object)], list[tuple[(int, str)]])]:
errors: list[tuple[(int, str)]] = []
sections = {}
for (lineno, line) in args:
parser = configparser.RawConfigParser()
(options, parse_errors) = mypy_comments_to_config_map(line, template)
parser['dummy'] = options
errors.extend(((lineno, x) for x in parse_errors))
stderr = StringIO()
strict_found = False
def set_strict_flags() -> None:
nonlocal strict_found
strict_found = True
(new_sections, reports) = parse_section('', template, set_strict_flags, parser['dummy'], ini_config_types, stderr=stderr)
errors.extend(((lineno, x) for x in stderr.getvalue().strip().split('\n') if x))
if reports:
errors.append((lineno, 'Reports not supported in inline configuration'))
if strict_found:
errors.append((lineno, 'Setting "strict" not supported in inline configuration: specify it in a configuration file instead, or set individual inline flags (see "mypy -h" for the list of flags enabled in strict mode)'))
sections.update(new_sections)
return (sections, errors) |
def test_edit_connections(game_editor):
landing_site = game_editor.game.region_list.area_by_area_location(AreaIdentifier('Temple Grounds', 'Landing Site'))
source = landing_site.node_with_name('Save Station')
target = landing_site.node_with_name('Door to Service Access')
assert (landing_site.connections[source][target] != Requirement.trivial())
game_editor.edit_connections(landing_site, source, target, Requirement.trivial())
assert (landing_site.connections[source][target] == Requirement.trivial()) |
def train_CE(train_loader, model, model_ema, optimizer_model, epoch):
print(('\nEpoch: %d' % epoch))
train_loss = 0
train_total = 0
train_correct = 0
for (batch_idx, (inputs, targets, index)) in enumerate(train_loader):
model.train()
model_ema.train()
(inputs, targets) = (inputs.cuda(), targets.cuda())
outputs = model(inputs)
loss = F.cross_entropy(outputs, targets)
prec_train = accuracy(outputs.data, targets.data, topk=(1,))[0]
train_total += 1
train_correct += prec_train.item()
optimizer_model.zero_grad()
loss.backward()
optimizer_model.step()
for (param, param_ema) in zip(model.module.params(), model_ema.module.params()):
param_ema.data.mul_(0.999).add_(0.001, param.data)
train_loss += loss.item()
if (((batch_idx + 1) % 10) == 0):
print(('Epoch: [%d/%d]\tIters: [%d/%d]\tLoss: %.4f\ %.2f' % ((epoch + 1), args.epoch, (batch_idx + 1), (len(train_loader.dataset) / args.batch_size), (train_loss / (batch_idx + 1)), prec_train)))
train_acc = (float(train_correct) / float(train_total))
return train_acc |
def run_collation():
encodings_raw = load_encodings()
profiles_raw = load_profiles()
profiles_substituted = {}
for profile_name in profiles_raw.keys():
profiles_substituted[profile_name] = substitute_profile(profile_name, profiles_raw, encodings_raw)
encodings_filtered = filter_encodings(encodings_raw, profiles_substituted)
capabilities = {'profiles': profiles_substituted, 'encodings': encodings_filtered}
json_capabilities = json.dumps(capabilities, sort_keys=True, indent=4, separators=(',', ': '))
with open((os.path.dirname(__file__) + '/../dist/capabilities.json'), 'wb+') as json_f:
json_f.write(json_capabilities.encode('utf-8'))
ordered_dict = json.loads(json_capabilities, object_pairs_hook=collections.OrderedDict)
yml_capabilities = pyaml.dumps(ordered_dict, string_val_style='"', explicit_start=True)
with open((os.path.dirname(__file__) + '/../dist/capabilities.yml'), 'wb+') as yml_f:
yml_f.write(yml_capabilities.encode('utf-8')) |
def test_MaxAbsScaler_no_change_original_dm(decision_matrix):
dm = decision_matrix(seed=42, min_alternatives=10, max_alternatives=10, min_criteria=20, max_criteria=20, min_objectives_proportion=0.5)
expected = dm.copy()
scaler = MaxAbsScaler(target='both')
dmt = scaler.transform(dm)
assert (dm.equals(expected) and (not dmt.equals(expected)) and (dm is not expected)) |
def eval_model(model: torch.nn.Module, dataset: EgoDataset, logger: Logger, d_set: str, iter_num: int, num_scenes_to_unroll: int, num_simulation_steps: int=None, enable_scene_type_aggregation: Optional[bool]=False, scene_id_to_type_path: Optional[str]=None) -> None:
model.eval()
torch.set_grad_enabled(False)
sim_cfg = SimulationConfig(use_ego_gt=False, use_agents_gt=True, disable_new_agents=False, distance_th_far=30, distance_th_close=15, num_simulation_steps=num_simulation_steps, start_frame_index=0, show_info=False)
device = torch.device(('cuda:0' if torch.cuda.is_available() else 'cpu'))
sim_loop = ClosedLoopSimulator(sim_cfg, dataset, device, model_ego=model, model_agents=None)
metric_set = CLEMetricSet()
batch_unroll = 100
for start_idx in range(0, num_scenes_to_unroll, batch_unroll):
end_idx = min(num_scenes_to_unroll, (start_idx + batch_unroll))
scenes_to_unroll = list(range(start_idx, end_idx))
sim_outs = sim_loop.unroll(scenes_to_unroll)
metric_set.evaluator.evaluate(sim_outs)
(ade, fde) = L5KitEvalCallback.compute_ade_fde(metric_set)
logger.record(f'{d_set}/ade', round(ade, 3))
logger.record(f'{d_set}/fde', round(fde, 3))
validation_results = metric_set.evaluator.validation_results()
agg = ValidationCountingAggregator().aggregate(validation_results)
for (k, v) in agg.items():
logger.record(f'{d_set}/{k}', v.item())
tot_collision = ((agg['collision_front'].item() + agg['collision_side'].item()) + agg['collision_rear'].item())
logger.record(f'{d_set}/total_collision', tot_collision)
composite_metric_results = metric_set.evaluator.composite_metric_results()
comp_agg = CompositeMetricAggregator().aggregate(composite_metric_results)
for (k, v) in comp_agg.items():
logger.record(f'{d_set}/{k}', v.item())
if enable_scene_type_aggregation:
assert (scene_id_to_type_path is not None)
scene_ids_to_scene_types = L5KitEvalCallback.get_scene_types(scene_id_to_type_path)
scene_type_results = compute_cle_scene_type_aggregations(metric_set, scene_ids_to_scene_types, list_validator_table_to_publish=[])
for (k, v) in scene_type_results.items():
logger.record(f'{k}', v)
logger.record('time/total timesteps', iter_num, exclude='tensorboard')
logger.dump(iter_num)
metric_set.evaluator.reset()
torch.set_grad_enabled(True) |
class RequirementEditor():
_editor: (((None | ResourceRequirementEditor) | ArrayRequirementEditor) | TemplateRequirementEditor)
def __init__(self, parent: QWidget, parent_layout: QVBoxLayout, resource_database: ResourceDatabase, *, on_remove=None):
self.parent = parent
self.parent_layout = parent_layout
self.resource_database = resource_database
self._editor = None
self._last_resource = None
self._last_items = ()
self._last_comment = None
self.line_layout = QHBoxLayout()
self.line_layout.setAlignment(Qt.AlignLeft)
self.parent_layout.addLayout(self.line_layout)
if (on_remove is not None):
self.remove_button = QtWidgets.QToolButton(parent)
self.remove_button.setText('X')
self.remove_button.setMaximumWidth(20)
self.remove_button.clicked.connect(on_remove)
self.line_layout.addWidget(self.remove_button)
else:
self.remove_button = None
self.requirement_type_combo = QComboBox(parent)
self.requirement_type_combo.addItem('Resource', ResourceRequirement)
self.requirement_type_combo.addItem('Or', RequirementOr)
self.requirement_type_combo.addItem('And', RequirementAnd)
if resource_database.requirement_template:
self.requirement_type_combo.addItem('Template', RequirementTemplate)
self.requirement_type_combo.setMaximumWidth(75)
self.requirement_type_combo.activated.connect(self._on_change_requirement_type)
self.line_layout.addWidget(self.requirement_type_combo)
def create_specialized_editor(self, requirement: Requirement):
if isinstance(requirement, ResourceRequirement):
requirement_type = ResourceRequirement
else:
requirement_type = type(requirement)
signal_handling.set_combo_with_value(self.requirement_type_combo, requirement_type)
if isinstance(requirement, ResourceRequirement):
self._editor = ResourceRequirementEditor(self.parent, self.line_layout, self.resource_database, requirement)
elif isinstance(requirement, RequirementArrayBase):
self._editor = ArrayRequirementEditor(self.parent, self.parent_layout, self.line_layout, self.resource_database, requirement)
elif isinstance(requirement, RequirementTemplate):
self._editor = TemplateRequirementEditor(self.parent, self.line_layout, self.resource_database, requirement)
else:
raise RuntimeError(f'Unknown requirement type: {type(requirement)} - {requirement}')
def _on_change_requirement_type(self):
current_requirement = self.current_requirement
self._editor.deleteLater()
if isinstance(current_requirement, ResourceRequirement):
self._last_resource = current_requirement
elif isinstance(current_requirement, RequirementArrayBase):
self._last_items = current_requirement.items
self._last_comment = current_requirement.comment
elif isinstance(current_requirement, RequirementTemplate):
pass
else:
raise RuntimeError(f'Unknown requirement type: {type(current_requirement)} - {current_requirement}')
new_class = self.requirement_type_combo.currentData()
if (new_class == ResourceRequirement):
if (self._last_resource is None):
new_requirement = _create_default_resource_requirement(self.resource_database)
else:
new_requirement = self._last_resource
elif (new_class == RequirementTemplate):
new_requirement = _create_default_template_requirement(self.resource_database)
else:
new_requirement = new_class(self._last_items, self._last_comment)
self.create_specialized_editor(new_requirement)
def deleteLater(self):
if (self.remove_button is not None):
self.remove_button.deleteLater()
self.requirement_type_combo.deleteLater()
if (self._editor is not None):
self._editor.deleteLater()
def current_requirement(self) -> Requirement:
return self._editor.current_requirement |
def simple_test(env_fn, learn_fn, min_reward_fraction, n_trials=N_TRIALS):
np.random.seed(0)
np_random.seed(0)
env = DummyVecEnv([env_fn])
with tf.Graph().as_default(), tf.Session(config=tf.ConfigProto(allow_soft_placement=True)).as_default():
tf.set_random_seed(0)
model = learn_fn(env)
sum_rew = 0
done = True
for i in range(n_trials):
if done:
obs = env.reset()
state = model.initial_state
if (state is not None):
(a, v, state, _) = model.step(obs, S=state, M=[False])
else:
(a, v, _, _) = model.step(obs)
(obs, rew, done, _) = env.step(a)
sum_rew += float(rew)
print('Reward in {} trials is {}'.format(n_trials, sum_rew))
assert (sum_rew > (min_reward_fraction * n_trials)), 'sum of rewards {} is less than {} of the total number of trials {}'.format(sum_rew, min_reward_fraction, n_trials) |
class Bits():
__slots__ = ('_nbits', '_uint', '_next')
def nbits(self):
return self._nbits
def __init__(self, nbits, v=0, trunc_int=False):
nbits = int(nbits)
if ((nbits < 1) or (nbits >= 1024)):
raise ValueError(f'Only support 1 <= nbits < 1024, not {nbits}')
self._nbits = nbits
if isinstance(v, Bits):
if (nbits != v.nbits):
if (nbits < v.nbits):
raise ValueError(f'''The Bits{v.nbits} object on RHS is too wide to be used to construct Bits{nbits}!
- Suggestion: directly use trunc( value, {nbits}/Bits{nbits} )''')
else:
raise ValueError(f'''The Bits{v.nbits} object on RHS is too narrow to be used to construct Bits{nbits}!
- Suggestion: directly use zext/sext(value, {nbits}/Bits{nbits} )''')
self._uint = v._uint
else:
v = int(v)
up = _upper[nbits]
if (not trunc_int):
lo = _lower[nbits]
if ((v < lo) or (v > up)):
raise ValueError(f'''Value {hex(v)} is too wide for Bits{nbits}!
(Bits{nbits} only accepts {hex(lo)} <= value <= {hex(up)})''')
self._uint = (v & up)
def __ilshift__(self, v):
nbits = self._nbits
try:
if (v.nbits != nbits):
if (v.nbits < nbits):
raise ValueError(f'''Bitwidth of LHS must be equal to RHS during <<= non-blocking assignment, but here LHS Bits{nbits} > RHS Bits{v.nbits}.
- Suggestion: LHS = zext/sext(RHS, nbits/Type)''')
else:
raise ValueError(f'''Bitwidth of LHS must be equal to RHS during <<= non-blocking assignment, but here LHS Bits{nbits} < RHS Bits{v.nbits}.
- Suggestion: LHS = trunc(RHS, nbits/Type)''')
self._next = v.to_bits()._uint
except AttributeError:
v = int(v)
lo = _lower[nbits]
up = _upper[nbits]
if ((v < lo) or (v > up)):
raise ValueError(f'''RHS value {hex(v)} of <<= is too wide for LHS Bits{nbits}!
(Bits{nbits} only accepts {hex(lo)} <= value <= {hex(up)})''')
self._next = (v & up)
return self
def _flip(self):
self._uint = self._next
def clone(self):
return _new_valid_bits(self._nbits, self._uint)
def __deepcopy__(self, memo):
return _new_valid_bits(self._nbits, self._uint)
def __imatmul__(self, v):
nbits = self._nbits
try:
if (v.nbits != nbits):
if (v.nbits < nbits):
raise ValueError(f'''Bitwidth of LHS must be equal to RHS during = blocking assignment, but here LHS Bits{nbits} > RHS Bits{v.nbits}.
- Suggestion: LHS = zext/sext(RHS, nbits/Type)''')
else:
raise ValueError(f'''Bitwidth of LHS must be equal to RHS during = blocking assignment, but here LHS Bits{nbits} < RHS Bits{v.nbits}.
- Suggestion: LHS = trunc(RHS, nbits/Type)''')
self._uint = v.to_bits()._uint
except AttributeError:
v = int(v)
lo = _lower[nbits]
up = _upper[nbits]
if ((v < lo) or (v > up)):
raise ValueError(f'''RHS value {hex(v)} of = is too wide for LHS Bits{nbits}!
(Bits{nbits} only accepts {hex(lo)} <= value <= {hex(up)})''')
self._uint = (v & up)
return self
def to_bits(self):
return self
def __getitem__(self, idx):
if isinstance(idx, slice):
if idx.step:
raise IndexError('Index cannot contain step')
try:
(start, stop) = (int((idx.start or 0)), int((idx.stop or self._nbits)))
assert (0 <= start < stop <= self._nbits)
except:
raise IndexError(f'Invalid access: [{idx.start}:{idx.stop}] in a Bits{self._nbits} instance')
nbits = (stop - start)
return _new_valid_bits((stop - start), ((self._uint >> start) & _upper[nbits]))
i = int(idx)
if ((i >= self._nbits) or (i < 0)):
raise IndexError(f'Invalid access: [{i}] in a Bits{self._nbits} instance')
return _new_valid_bits(1, ((self._uint >> i) & 1))
def __setitem__(self, idx, v):
sv = int(self._uint)
if isinstance(idx, slice):
if idx.step:
raise IndexError('Index cannot contain step')
try:
(start, stop) = (int((idx.start or 0)), int((idx.stop or self._nbits)))
assert (0 <= start < stop <= self._nbits)
except:
raise IndexError(f'Invalid access: [{idx.start}:{idx.stop}] in a Bits{self._nbits} instance')
slice_nbits = (stop - start)
if isinstance(v, Bits):
if (v.nbits != slice_nbits):
if (v.nbits < slice_nbits):
raise ValueError(f'''Cannot fit a Bits{v.nbits} object into a {slice_nbits}-bit slice [{start}:{stop}]
- Suggestion: sext/zext the RHS''')
else:
raise ValueError(f'''Cannot fit a Bits{v.nbits} object into a {slice_nbits}-bit slice [{start}:{stop}]
- Suggestion: trunc the RHS''')
self._uint = ((sv & (~ ((1 << stop) - (1 << start)))) | ((v._uint & _upper[slice_nbits]) << start))
else:
v = int(v)
lo = _lower[slice_nbits]
up = _upper[slice_nbits]
if ((v < lo) or (v > up)):
raise ValueError(f'''Cannot fit {v} into a Bits{slice_nbits} slice
(Bits{slice_nbits} only accepts {hex(lo)} <= value <= {hex(up)})''')
self._uint = ((sv & (~ ((1 << stop) - (1 << start)))) | ((v & _upper[slice_nbits]) << start))
return
i = int(idx)
if ((i >= self._nbits) or (i < 0)):
raise IndexError(f'Invalid access: [{i}] in a Bits{self._nbits} instance')
if isinstance(v, Bits):
if (v.nbits > 1):
raise ValueError(f'Cannot fit a Bits{v.nbits} object into the 1-bit slice')
self._uint = ((sv & (~ (1 << i))) | ((v._uint & 1) << i))
else:
v = int(v)
if (abs(v) > 1):
raise ValueError(f'''Value {hex(v)} is too big for the 1-bit slice!
''')
self._uint = ((sv & (~ (1 << i))) | ((int(v) & 1) << i))
def __add__(self, other):
nbits = self._nbits
try:
if (other.nbits != nbits):
raise ValueError(f'''Operands of '+' (add) operation must have matching bitwidth, but here Bits{nbits} != Bits{other.nbits}.
''')
return _new_valid_bits(nbits, ((self._uint + other._uint) & _upper[nbits]))
except AttributeError:
other = int(other)
up = _upper[nbits]
if ((other < 0) or (other > up)):
raise ValueError(f'''Integer {hex(other)} is not a valid binop operand with Bits{nbits}!
Suggestion: 0 <= x <= {hex(up)}''')
return _new_valid_bits(nbits, ((self._uint + other) & up))
def __radd__(self, other):
return self.__add__(other)
def __sub__(self, other):
nbits = self._nbits
try:
if (other.nbits != nbits):
raise ValueError(f'''Operands of '-' (sub) operation must have matching bitwidth, but here Bits{nbits} != Bits{other.nbits}.
''')
return _new_valid_bits(nbits, ((self._uint - other._uint) & _upper[nbits]))
except AttributeError:
other = int(other)
up = _upper[nbits]
if ((other < 0) or (other > up)):
raise ValueError(f'''Integer {hex(other)} is not a valid binop operand with Bits{nbits}!
Suggestion: 0 <= x <= {hex(up)}''')
return _new_valid_bits(nbits, ((self._uint - other) & up))
def __rsub__(self, other):
nbits = self._nbits
other = int(other)
up = _upper[nbits]
if ((other < 0) or (other > up)):
raise ValueError(f'''Integer {hex(other)} is not a valid binop operand with Bits{nbits}!
Suggestion: 0 <= x <= {hex(up)}''')
return _new_valid_bits(nbits, ((other - self._uint) & up))
def __mul__(self, other):
nbits = self._nbits
try:
if (other.nbits != nbits):
raise ValueError(f'''Operands of '*' (mul) operation must have matching bitwidth, but here Bits{nbits} != Bits{other.nbits}.
''')
return _new_valid_bits(nbits, ((self._uint * other._uint) & _upper[nbits]))
except AttributeError:
other = int(other)
up = _upper[nbits]
if ((other < 0) or (other > up)):
raise ValueError(f'''Integer {hex(other)} is not a valid binop operand with Bits{nbits}!
Suggestion: 0 <= x <= {hex(up)}''')
return _new_valid_bits(nbits, ((self._uint * other) & up))
def __rmul__(self, other):
return self.__mul__(other)
def __and__(self, other):
nbits = self._nbits
try:
if (other.nbits != nbits):
raise ValueError(f'''Operands of '&' (and) operation must have matching bitwidth, but here Bits{nbits} != Bits{other.nbits}.
''')
return _new_valid_bits(nbits, (self._uint & other._uint))
except AttributeError:
other = int(other)
if ((other < 0) or (other > _upper[nbits])):
raise ValueError(f'''Integer {hex(other)} is not a valid binop operand with Bits{nbits}!
Suggestion: 0 <= x <= {hex(_upper[nbits])}''')
return _new_valid_bits(nbits, (self._uint & other))
def __rand__(self, other):
return self.__and__(other)
def __or__(self, other):
nbits = self._nbits
try:
if (other.nbits != nbits):
raise ValueError(f'''Operands of '|' (or) operation must have matching bitwidth, but here Bits{nbits} != Bits{other.nbits}.
''')
return _new_valid_bits(nbits, (self._uint | other._uint))
except AttributeError:
other = int(other)
if ((other < 0) or (other > _upper[nbits])):
raise ValueError(f'''Integer {hex(other)} is not a valid binop operand with Bits{nbits}!
Suggestion: 0 <= x <= {hex(_upper[nbits])}''')
return _new_valid_bits(nbits, (self._uint | other))
def __ror__(self, other):
return self.__or__(other)
def __xor__(self, other):
nbits = self._nbits
try:
if (other.nbits != nbits):
raise ValueError(f'''Operands of '^' (xor) operation must have matching bitwidth, but here Bits{nbits} != Bits{other.nbits}.
''')
return _new_valid_bits(nbits, (self._uint ^ other._uint))
except AttributeError:
other = int(other)
if ((other < 0) or (other > _upper[self._nbits])):
raise ValueError(f'''Integer {hex(other)} is not a valid binop operand with Bits{nbits}!
Suggestion: 0 <= x <= {hex(_upper[nbits])}''')
return _new_valid_bits(nbits, (self._uint ^ other))
def __rxor__(self, other):
return self.__xor__(other)
def __floordiv__(self, other):
nbits = self._nbits
try:
if (other.nbits != nbits):
raise ValueError(f'''Operands of '//' (div) operation must have matching bitwidth, but here Bits{nbits} != Bits{other.nbits}.
''')
return _new_valid_bits(nbits, ((self._uint // other._uint) & _upper[nbits]))
except AttributeError:
other = int(other)
if ((other < 0) or (other > _upper[self._nbits])):
raise ValueError(f'''Integer {hex(other)} is not a valid binop operand with Bits{nbits}!
Suggestion: 0 <= x <= {hex(_upper[nbits])}''')
return _new_valid_bits(nbits, (self._uint // other))
def __rfloordiv__(self, other):
nbits = self._nbits
other = int(other)
up = _upper[nbits]
if ((other < 0) or (other > up)):
raise ValueError(f'''Integer {hex(other)} is not a valid binop operand with Bits{nbits}!
Suggestion: 0 <= x <= {hex(up)}''')
return _new_valid_bits(nbits, (other // self._uint))
def __mod__(self, other):
nbits = self._nbits
try:
if (other.nbits != nbits):
raise ValueError(f'''Operands of '%' (mod) operation must have matching bitwidth, but here Bits{nbits} != Bits{other.nbits}.
''')
return _new_valid_bits(nbits, ((self._uint % other._uint) & _upper[nbits]))
except AttributeError:
other = int(other)
if ((other < 0) or (other > _upper[nbits])):
raise ValueError(f'''Integer {hex(other)} is not a valid binop operand with Bits{nbits}!
Suggestion: 0 <= x <= {hex(_upper[nbits])}''')
return _new_valid_bits(nbits, (self._uint % other))
def __rmod__(self, other):
nbits = self._nbits
other = int(other)
up = _upper[nbits]
if ((other < 0) or (other > up)):
raise ValueError(f'''Integer {hex(other)} is not a valid binop operand with Bits{nbits}!
Suggestion: 0 <= x <= {hex(up)}''')
return _new_valid_bits(nbits, (other % self._uint))
def __invert__(self):
nbits = self._nbits
return _new_valid_bits(nbits, ((~ self._uint) & _upper[nbits]))
def __lshift__(self, other):
nbits = self._nbits
try:
if (other.nbits != nbits):
raise ValueError(f'''Operands of '<<' (lshift) operation must have matching bitwidth, but here Bits{nbits} != Bits{other.nbits}.
''')
uint = other._uint
if (uint >= nbits):
return _new_valid_bits(self._nbits, 0)
return _new_valid_bits(nbits, ((self._uint << uint) & _upper[nbits]))
except AttributeError:
other = int(other)
if ((other < 0) or (other > _upper[nbits])):
raise ValueError(f'''Integer {hex(other)} is not a valid binop operand with Bits{nbits}!
Suggestion: 0 <= x <= {hex(_upper[nbits])}''')
if (other >= nbits):
return _new_valid_bits(self._nbits, 0)
return _new_valid_bits(nbits, ((self._uint << other) & _upper[nbits]))
def __rshift__(self, other):
nbits = self._nbits
try:
if (other.nbits != nbits):
raise ValueError(f'''Operands of '>>' (rshift) operation must have matching bitwidth, but here Bits{nbits} != Bits{other.nbits}.
''')
return _new_valid_bits(nbits, (self._uint >> other._uint))
except AttributeError:
other = int(other)
if ((other < 0) or (other > _upper[nbits])):
raise ValueError(f'''Integer {hex(other)} is not a valid binop operand with Bits{nbits}!
Suggestion: 0 <= x <= {hex(_upper[nbits])}''')
return _new_valid_bits(nbits, (self._uint >> other))
def __eq__(self, other):
nbits = self._nbits
try:
if (other.nbits != nbits):
raise ValueError(f'''Operands of '==' (eq) operation must have matching bitwidth, but here Bits{nbits} != Bits{other.nbits}.
''')
return _new_valid_bits(1, (self._uint == other._uint))
except AttributeError:
try:
other = int(other)
except:
return _new_valid_bits(1, 0)
if ((other < 0) or (other > _upper[nbits])):
raise ValueError(f'''Integer {hex(other)} is not a valid binop operand with Bits{nbits}!
Suggestion: 0 <= x <= {hex(_upper[nbits])}''')
return _new_valid_bits(1, (self._uint == other))
def __ne__(self, other):
nbits = self._nbits
try:
if (other.nbits != nbits):
raise ValueError(f'''Operands of '!=' (ne) operation must have matching bitwidth, but here Bits{nbits} != Bits{other.nbits}.
''')
return _new_valid_bits(1, (self._uint != other._uint))
except AttributeError:
try:
other = int(other)
except:
return _new_valid_bits(1, 1)
if ((other < 0) or (other > _upper[nbits])):
raise ValueError(f'''Integer {hex(other)} is not a valid binop operand with Bits{nbits}!
Suggestion: 0 <= x <= {hex(_upper[nbits])}''')
return _new_valid_bits(1, (self._uint != other))
def __lt__(self, other):
nbits = self._nbits
try:
if (other.nbits != nbits):
raise ValueError(f'''Operands of '<' (lt) operation must have matching bitwidth, but here Bits{nbits} != Bits{other.nbits}.
''')
return _new_valid_bits(1, (self._uint < other._uint))
except AttributeError:
other = int(other)
if ((other < 0) or (other > _upper[self._nbits])):
raise ValueError(f'''Integer {hex(other)} is not a valid binop operand with Bits{self._nbits}!
Suggestion: 0 <= x <= {hex(_upper[self._nbits])}''')
return _new_valid_bits(1, (self._uint < other))
def __le__(self, other):
nbits = self._nbits
try:
if (other.nbits != nbits):
raise ValueError(f'''Operands of '<=' (le) operation must have matching bitwidth, but here Bits{nbits} != Bits{other.nbits}.
''')
return _new_valid_bits(1, (self._uint <= other._uint))
except AttributeError:
other = int(other)
if ((other < 0) or (other > _upper[self._nbits])):
raise ValueError(f'''Integer {hex(other)} is not a valid binop operand with Bits{self._nbits}!
Suggestion: 0 <= x <= {hex(_upper[self._nbits])}''')
return _new_valid_bits(1, (self._uint <= other))
def __gt__(self, other):
nbits = self._nbits
try:
if (other.nbits != nbits):
raise ValueError(f'''Operands of '>' (gt) operation must have matching bitwidth, but here Bits{nbits} != Bits{other.nbits}.
''')
return _new_valid_bits(1, (self._uint > other._uint))
except AttributeError:
other = int(other)
if ((other < 0) or (other > _upper[self._nbits])):
raise ValueError(f'''Integer {hex(other)} is not a valid binop operand with Bits{self._nbits}!
Suggestion: 0 <= x <= {hex(_upper[self._nbits])}''')
return _new_valid_bits(1, (self._uint > other))
def __ge__(self, other):
nbits = self._nbits
try:
if (other.nbits != nbits):
raise ValueError(f'''Operands of '>=' (ge) operation must have matching bitwidth, but here Bits{nbits} != Bits{other.nbits}.
''')
return _new_valid_bits(1, (self._uint >= other._uint))
except AttributeError:
other = int(other)
if ((other < 0) or (other > _upper[self._nbits])):
raise ValueError(f'''Integer {hex(other)} is not a valid binop operand with Bits{self._nbits}!
Suggestion: 0 <= x <= {hex(_upper[self._nbits])}''')
return _new_valid_bits(1, (self._uint >= other))
def __bool__(self):
return (self._uint != 0)
def __int__(self):
return int(self._uint)
def int(self):
if (self._uint >> (self._nbits - 1)):
return (- int(((~ self) + 1)))
return self._uint
def uint(self):
return self._uint
def __index__(self):
return int(self._uint)
def __hash__(self):
return hash((self._nbits, self._uint))
def __repr__(self):
return 'Bits{}(0x{})'.format(self._nbits, '{:x}'.format(int(self._uint)).zfill((((self._nbits - 1) // 4) + 1)))
def __str__(self):
str = '{:x}'.format(int(self._uint)).zfill((((self._nbits - 1) // 4) + 1))
return str
def bin(self):
str = '{:b}'.format(int(self._uint)).zfill(self._nbits)
return ('0b' + str)
def oct(self):
str = '{:o}'.format(int(self._uint)).zfill((((self._nbits - 1) // 3) + 1))
return ('0o' + str)
def hex(self):
str = '{:x}'.format(int(self._uint)).zfill((((self._nbits - 1) // 4) + 1))
return ('0x' + str) |
def test_sync_2():
with cluster() as (s, [a, b]):
with Client(s['address']):
source = Stream()
L = source.scatter().map(inc).gather().sink_to_list()
for i in range(10):
source.emit(i)
assert (len(L) == (i + 1))
assert (L == list(map(inc, range(10)))) |
class ToggledPlayOrderMenu(Gtk.Box):
__gsignals__ = {'toggled': (GObject.SignalFlags.RUN_LAST, None, ()), 'changed': (GObject.SignalFlags.RUN_LAST, None, (object,))}
def __init__(self, icon_name, orders, current_order, enabled=False, tooltip=None, arrow_down=False):
assert issubclass(current_order, Order)
if (current_order not in orders):
raise ValueError(f'{current_order.__name__} is not supported by {orders}')
super().__init__()
self.__inhibit = True
context = self.get_style_context()
context.add_class(Gtk.STYLE_CLASS_LINKED)
self._toggle_button = toggle = HighlightToggleButton(image=SymbolicIconImage(icon_name, Gtk.IconSize.SMALL_TOOLBAR))
if tooltip:
toggle.set_tooltip_text(tooltip)
toggle.set_active(enabled)
toggle.show_all()
qltk.remove_padding(toggle)
toggle.set_size_request(26, 26)
self.pack_start(toggle, True, True, 0)
def forward_signal(*args):
if (not self.__inhibit):
self.emit('toggled')
toggle.connect('toggled', forward_signal)
self._toggle_button = toggle
from quodlibet.qltk.menubutton import MenuButton
arrow = MenuButton(arrow=True, down=arrow_down)
arrow.show_all()
arrow.set_size_request(20, 26)
qltk.remove_padding(arrow)
self.pack_start(arrow, True, True, 0)
self._menu_button = arrow
self.__current = current_order
self.__orders = orders
self.__rebuild_menu()
self.__inhibit = False
def enabled(self):
return self._toggle_button.get_active()
def enabled(self, value):
self.__inhibit = True
self._toggle_button.set_active(bool(value))
self.emit('toggled')
self.__inhibit = False
def orders(self):
return self.__orders
def orders(self, values):
self.__orders = values
if (self.__current not in self.orders):
self.__current = None
self.enabled = False
self.__rebuild_menu()
def current(self):
return self.__current
def current(self, value):
if (value not in self.orders):
orders = ', '.join((o.__name__ for o in self.__orders))
raise ValueError(f'Unknown order {value}. Try: {orders}')
self.__current = value
if (not self.__inhibit):
self.emit('changed', self.__current)
self.__rebuild_menu()
def set_active_by_name(self, name):
for cls in self.__orders:
if (cls.name == name):
self.current = cls
return
raise ValueError(f'Unknown order named "{name}". Try: {[o.name for o in self.__orders]}')
def set_orders(self, orders):
self.orders = orders
def __rebuild_menu(self):
def toggled_cb(item, order):
if item.get_active():
self.current = order
menu = Gtk.Menu()
group = None
prev_priority = None
def ui_sorted(items):
return sorted(items, key=(lambda k: (k.priority, k.display_name)))
for order in ui_sorted(self.__orders):
if (prev_priority and (order.priority > prev_priority)):
menu.append(SeparatorMenuItem())
prev_priority = order.priority
group = RadioMenuItem(label=order.accelerated_name, use_underline=True, group=group)
group.set_active((order == self.__current))
group.connect('toggled', toggled_cb, order)
menu.append(group)
menu.show_all()
self._menu_button.set_menu(menu) |
class PluginManager():
def __init__(self, group: str, disable_plugins: bool=False) -> None:
self._group = group
self._disable_plugins = disable_plugins
self._plugins: list[Plugin] = []
def load_plugins(self, env: (Env | None)=None) -> None:
if self._disable_plugins:
return
plugin_entrypoints = self.get_plugin_entry_points(env=env)
for ep in plugin_entrypoints:
self._load_plugin_entry_point(ep)
def _is_plugin_candidate(ep: metadata.EntryPoint, env: (Env | None)=None) -> bool:
return ((env is None) or ((ep.dist is not None) and (env.site_packages.find_distribution(ep.dist.name) is not None)))
def get_plugin_entry_points(self, env: (Env | None)=None) -> list[metadata.EntryPoint]:
return [ep for ep in metadata.entry_points(group=self._group) if self._is_plugin_candidate(ep, env)]
def add_plugin(self, plugin: Plugin) -> None:
if (not isinstance(plugin, (Plugin, ApplicationPlugin))):
raise ValueError('The Poetry plugin must be an instance of Plugin or ApplicationPlugin')
self._plugins.append(plugin)
def activate(self, *args: Any, **kwargs: Any) -> None:
for plugin in self._plugins:
plugin.activate(*args, **kwargs)
def _load_plugin_entry_point(self, ep: metadata.EntryPoint) -> None:
logger.debug('Loading the %s plugin', ep.name)
plugin = ep.load()
if (not issubclass(plugin, (Plugin, ApplicationPlugin))):
raise ValueError('The Poetry plugin must be an instance of Plugin or ApplicationPlugin')
self.add_plugin(plugin()) |
class F28_FcoeData(F13_FcoeData):
removedKeywords = F13_FcoeData.removedKeywords
removedAttrs = F13_FcoeData.removedAttrs
def __init__(self, *args, **kwargs):
F13_FcoeData.__init__(self, *args, **kwargs)
self.autovlan = kwargs.get('autovlan', False)
def _getArgsAsStr(self):
retval = F13_FcoeData._getArgsAsStr(self)
if self.autovlan:
retval += ' --autovlan'
return retval |
class TestListOrValue():
def klass(self):
return configtypes.ListOrValue
def strtype(self):
return configtypes.String()
.parametrize('val, expected', [('["foo"]', ['foo']), ('["foo", "bar"]', ['foo', 'bar']), ('foo', 'foo')])
def test_from_str(self, klass, strtype, val, expected):
assert (klass(strtype).from_str(val) == expected)
def test_from_str_invalid(self, klass):
valtype = configtypes.String(minlen=10)
with pytest.raises(configexc.ValidationError):
klass(valtype).from_str('123')
.parametrize('val, expected', [(['foo'], ['foo']), ('foo', ['foo'])])
def test_to_py_valid(self, klass, strtype, val, expected):
assert (klass(strtype).to_py(val) == expected)
.parametrize('val', [[42], ['']])
def test_to_py_invalid(self, klass, strtype, val):
with pytest.raises(configexc.ValidationError):
klass(strtype).to_py(val)
.parametrize('val', [None, ['foo', 'bar'], 'abcd'])
def test_to_py_length(self, strtype, klass, val):
klass(strtype, none_ok=True, length=2).to_py(val)
.parametrize('obj, expected', [(['a'], ['a']), ([], []), (None, [])])
def test_from_obj(self, klass, obj, expected):
typ = klass(none_ok=True, valtype=configtypes.String())
assert (typ.from_obj(obj) == expected)
.parametrize('val', [['a'], ['a', 'b'], ['a', 'b', 'c', 'd']])
def test_wrong_length(self, strtype, klass, val):
with pytest.raises(configexc.ValidationError, match='Exactly 3 values need to be set!'):
klass(strtype, length=3).to_py(val)
def test_get_name(self, strtype, klass):
assert (klass(strtype).get_name() == 'List of String, or String')
def test_get_valid_values(self, klass):
valid_values = configtypes.ValidValues('foo', 'bar', 'baz')
valtype = configtypes.String(valid_values=valid_values)
assert (klass(valtype).get_valid_values() == valid_values)
def test_to_str(self, strtype, klass):
assert (klass(strtype).to_str(['a', True]) == '["a", true]')
(val=strategies.lists(strategies.just('foo')))
def test_hypothesis(self, strtype, klass, val):
typ = klass(strtype, none_ok=True)
try:
converted = typ.to_py(val)
except configexc.ValidationError:
pass
else:
expected = (converted if converted else [])
assert (typ.to_py(typ.from_str(typ.to_str(converted))) == expected)
(val=strategies.lists(strategies.just('foo')))
def test_hypothesis_text(self, strtype, klass, val):
typ = klass(strtype)
text = json.dumps(val)
try:
typ.to_str(typ.from_str(text))
except configexc.ValidationError:
pass
.parametrize('val, expected', [(['foo', 'bar'], '\n\n- +pass:[foo]+\n- +pass:[bar]+'), (['foo'], '+pass:[foo]+'), ('foo', '+pass:[foo]+'), ([], 'empty'), (None, 'empty')])
def test_to_doc(self, klass, strtype, val, expected):
doc = klass(strtype).to_doc(val)
print(doc)
assert (doc == expected) |
def RESNET50(include_top=True, weights='vggface', input_tensor=None, input_shape=None, pooling=None, classes=8631):
input_shape = _obtain_input_shape(input_shape, default_size=224, min_size=32, data_format=K.image_data_format(), require_flatten=include_top, weights=weights)
if (input_tensor is None):
img_input = Input(shape=input_shape)
elif (not K.is_keras_tensor(input_tensor)):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
if (K.image_data_format() == 'channels_last'):
bn_axis = 3
else:
bn_axis = 1
x = Conv2D(64, (7, 7), use_bias=False, strides=(2, 2), padding='same', name='conv1/7x7_s2')(img_input)
x = BatchNormalization(axis=bn_axis, name='conv1/7x7_s2/bn')(x)
x = Activation('relu')(x)
x = MaxPooling2D((3, 3), strides=(2, 2))(x)
x = resnet_conv_block(x, 3, [64, 64, 256], stage=2, block=1, strides=(1, 1))
x = resnet_identity_block(x, 3, [64, 64, 256], stage=2, block=2)
x = resnet_identity_block(x, 3, [64, 64, 256], stage=2, block=3)
x = resnet_conv_block(x, 3, [128, 128, 512], stage=3, block=1)
x = resnet_identity_block(x, 3, [128, 128, 512], stage=3, block=2)
x = resnet_identity_block(x, 3, [128, 128, 512], stage=3, block=3)
x = resnet_identity_block(x, 3, [128, 128, 512], stage=3, block=4)
x = resnet_conv_block(x, 3, [256, 256, 1024], stage=4, block=1)
x = resnet_identity_block(x, 3, [256, 256, 1024], stage=4, block=2)
x = resnet_identity_block(x, 3, [256, 256, 1024], stage=4, block=3)
x = resnet_identity_block(x, 3, [256, 256, 1024], stage=4, block=4)
x = resnet_identity_block(x, 3, [256, 256, 1024], stage=4, block=5)
x = resnet_identity_block(x, 3, [256, 256, 1024], stage=4, block=6)
x = resnet_conv_block(x, 3, [512, 512, 2048], stage=5, block=1)
x = resnet_identity_block(x, 3, [512, 512, 2048], stage=5, block=2)
x = resnet_identity_block(x, 3, [512, 512, 2048], stage=5, block=3)
x = AveragePooling2D((7, 7), name='avg_pool')(x)
if include_top:
x = Flatten()(x)
x = Dense(classes, activation='softmax', name='classifier')(x)
elif (pooling == 'avg'):
x = GlobalAveragePooling2D()(x)
elif (pooling == 'max'):
x = GlobalMaxPooling2D()(x)
if (input_tensor is not None):
inputs = get_source_inputs(input_tensor)
else:
inputs = img_input
model = Model(inputs, x, name='vggface_resnet50')
if (weights == 'vggface'):
if include_top:
weights_path = get_file('rcmalli_vggface_tf_resnet50.h5', utils.RESNET50_WEIGHTS_PATH, cache_subdir=utils.VGGFACE_DIR)
else:
weights_path = get_file('rcmalli_vggface_tf_notop_resnet50.h5', utils.RESNET50_WEIGHTS_PATH_NO_TOP, cache_subdir=utils.VGGFACE_DIR)
model.load_weights(weights_path)
if (K.backend() == 'theano'):
layer_utils.convert_all_kernels_in_model(model)
if include_top:
maxpool = model.get_layer(name='avg_pool')
shape = maxpool.output_shape[1:]
dense = model.get_layer(name='classifier')
layer_utils.convert_dense_weights_data_format(dense, shape, 'channels_first')
if ((K.image_data_format() == 'channels_first') and (K.backend() == 'tensorflow')):
warnings.warn('You are using the TensorFlow backend, yet you are using the Theano image data format convention (`image_data_format="channels_first"`). For best performance, set `image_data_format="channels_last"` in your Keras config at ~/.keras/keras.json.')
elif (weights is not None):
model.load_weights(weights)
return model |
class TrainerTest(tf.test.TestCase):
def test_configure_trainer_and_train_two_steps(self):
train_config_text_proto = '\n optimizer {\n adam_optimizer {\n learning_rate {\n constant_learning_rate {\n learning_rate: 0.01\n }\n }\n }\n }\n data_augmentation_options {\n random_adjust_brightness {\n max_delta: 0.2\n }\n }\n data_augmentation_options {\n random_adjust_contrast {\n min_delta: 0.7\n max_delta: 1.1\n }\n }\n num_steps: 2\n '
train_config = train_pb2.TrainConfig()
text_format.Merge(train_config_text_proto, train_config)
train_dir = self.get_temp_dir()
trainer.train(create_tensor_dict_fn=get_input_function, create_model_fn=FakeDetectionModel, train_config=train_config, master='', task=0, num_clones=1, worker_replicas=1, clone_on_cpu=True, ps_tasks=0, worker_job_name='worker', is_chief=True, train_dir=train_dir) |
class UCCSD(UCC):
def __init__(self, num_spatial_orbitals: (int | None)=None, num_particles: (tuple[(int, int)] | None)=None, qubit_mapper: (QubitMapper | None)=None, *, reps: int=1, initial_state: (QuantumCircuit | None)=None, generalized: bool=False, preserve_spin: bool=True, include_imaginary: bool=False) -> None:
super().__init__(num_spatial_orbitals=num_spatial_orbitals, num_particles=num_particles, excitations='sd', qubit_mapper=qubit_mapper, alpha_spin=True, beta_spin=True, max_spin_excitation=None, generalized=generalized, preserve_spin=preserve_spin, include_imaginary=include_imaginary, reps=reps, initial_state=initial_state) |
class Test_keyImport(ElectrumTestCase):
priv_pub_addr = ({'priv': 'KzMFjMC2MPadjvX5Cd7b8AKKjjpBSoRKUTpoAtN6B3J9ezWYyXS6', 'exported_privkey': 'p2pkh:KzMFjMC2MPadjvX5Cd7b8AKKjjpBSoRKUTpoAtN6B3J9ezWYyXS6', 'pub': '02c6467b7eed3e4835b0b4ab7e35266a2ae1c4f8baa19e9ca', 'address': '17azqT8T16coRmWKYFj3UjzJuxiYrYFRBR', 'minikey': False, 'txin_type': 'p2pkh', 'compressed': True, 'addr_encoding': 'base58', 'scripthash': 'c9aecd1fef8d661a42c560bf75c8163eb8face5ca3d1393a30508a7'}, {'priv': 'p2pkh:Kzj8VjwpZ99bQqVeUiRXrKuX9mLr1o6sWxFMCBJn1umC38BMiQTD', 'exported_privkey': 'p2pkh:Kzj8VjwpZ99bQqVeUiRXrKuX9mLr1o6sWxFMCBJn1umC38BMiQTD', 'pub': '0352d78b4b37e0f6d4ef2925faeca550a88f2821973c41', 'address': '1GXgZ5Qi6gmXTHVSpUPZLy4Ci2nbfb3ZNb', 'minikey': False, 'txin_type': 'p2pkh', 'compressed': True, 'addr_encoding': 'base58', 'scripthash': 'a9b2a76fc196c553b352186dfcca81fcf323a721cd8431328f8e9dc1'}, {'priv': '5Hxn5C4SQuiV6e62A1MtZmbSeQyrLFhu5uYks62pU5VBUygK2KD', 'exported_privkey': 'p2pkh:5Hxn5C4SQuiV6e62A1MtZmbSeQyrLFhu5uYks62pU5VBUygK2KD', 'pub': '04e5fe91a20fac945845a5518450d23405ff3e3e1ce39827b47ee6d5db020a9075422d56a59195ada0035e4a52a238849f68e7a325ba5b2247013e0481c5c7cb3f', 'address': '1GPHVTY8UD9my6jyP4tb2TYJwUbDetyNC6', 'minikey': False, 'txin_type': 'p2pkh', 'compressed': False, 'addr_encoding': 'base58', 'scripthash': 'fe1166f725a5829ff9576d0dbfbf13abd2af7f79473'}, {'priv': 'p2pkh:5KhYQCe1xd5g2tqpmmGpUWDpDuTbA8vnpbiCNDwMPAx29WNQYfN', 'exported_privkey': 'p2pkh:5KhYQCe1xd5g2tqpmmGpUWDpDuTbA8vnpbiCNDwMPAx29WNQYfN', 'pub': '048f0431b0776e8210376cc2b68be43194cb00bd47b7e9aa66284b713ce09556cde3fee606051a07613f3c159ef3953b8927c96ae3dae94a6ba4182e0e', 'address': '147kiRHHm9fqeMQSgqf4k35XzuWLP9fmmS', 'minikey': False, 'txin_type': 'p2pkh', 'compressed': False, 'addr_encoding': 'base58', 'scripthash': '6dd2e07ad2de9ba8eec4bbe8467eb53f8845acff0d9e6f5627391acc22ff62df'}, {'priv': 'LHJnnvRzsdrTX2j5QeWVsaBkabK7gfMNqNNqxnbBVRaJYfk24iJz', 'exported_privkey': 'p2wpkh-p2sh:Kz9XebiCXL2BZzhYJViiHDzn5iup1povWV8aqstzWU4sz1K5nVva', 'pub': '0279ad237ca0d812fb503ab86f25e15ebd5fa5dd95c193639a8a738dcd1acbad81', 'address': '3GeVJB3oKr7psgKR6BTXSxKtWUkfsHHhk7', 'minikey': False, 'txin_type': 'p2wpkh-p2sh', 'compressed': True, 'addr_encoding': 'base58', 'scripthash': 'd7b04e882fa6bac552a2b21461d9152eb00f0a6adb58457a3e63d7c5'}, {'priv': 'p2wpkh-p2sh:L3CZH1pm87X4bbE6mSGvZnAZ1KcFDRomBudUkrkBG7EZhDtBVXMW', 'exported_privkey': 'p2wpkh-p2sh:L3CZH1pm87X4bbE6mSGvZnAZ1KcFDRomBudUkrkBG7EZhDtBVXMW', 'pub': '0229da20a15b3363b2c28e3c5093c180b56c439df0b968a970366bb1fe', 'address': '3C79goMwT7zSTjXnPoCg6VFGAnUpZAkyus', 'minikey': False, 'txin_type': 'p2wpkh-p2sh', 'compressed': True, 'addr_encoding': 'base58', 'scripthash': '714bf6bfe1083e69539f40d4c7a7dca85d187471b35642e55f20d7e866494cf7'}, {'priv': 'L8g5V8kFFeg2WbecahRSdobARbHz2w2STH9S8ePHVSY4fmia7Rsj', 'exported_privkey': 'p2wpkh:Kz6SuyPM5VktY5dr2d2YqdVgBA6LCWkiHqXJaC3BzxnMPSUuYzmF', 'pub': '03e9f948421aaa89415dc5f281a61b60dde12aae3181b3a76cd2d849b164fc6d0b', 'address': 'bc1qqmpt7u5e9hfznljta5gnvhyvfd2kdd0r90hwue', 'minikey': False, 'txin_type': 'p2wpkh', 'compressed': True, 'addr_encoding': 'bech32', 'scripthash': '1929acaaef3a208c715228e9f1ca0318e3a6b9394ab53c8d026137f847ecf97b'}, {'priv': 'p2wpkh:KyDWy5WbjLA58Zesh1o8m3pADGdJ3v33DKk4m7h8BD5zDKDmDFwo', 'exported_privkey': 'p2wpkh:KyDWy5WbjLA58Zesh1o8m3pADGdJ3v33DKk4m7h8BD5zDKDmDFwo', 'pub': '038cc1f73e34d5b3971d05867d50221ad94980f7e87cbc2344425e6a1e', 'address': 'bc1qpakeeg4d9ydyjxd8paqrw4xy9htsg532xzxn50', 'minikey': False, 'txin_type': 'p2wpkh', 'compressed': True, 'addr_encoding': 'bech32', 'scripthash': '242f02adde84ebb2a7dd778b2f3a81b3826f111da4d8960d826d7a4b816cb261'}, {'priv': 'SzavMBLoXU6kDrqtUVmffv', 'exported_privkey': 'p2pkh:5Kb8kLf9zgWQnogidDA76MzPL6TsZZY36hWXMssSzNydYXYB9KF', 'pub': '04588d202afcc1ee4ab5254c7847ec25b9a135bbda0f2bc69ee1a714749fd77dc9f88ff2a00d7e752d44cbe16e1ebcf0890b76ec7cdee76ccfc8445424', 'address': '1CC3X2gu58d6wXUWMffpuzN9JAfTUWu4Kj', 'minikey': True, 'txin_type': 'p2pkh', 'compressed': False, 'addr_encoding': 'base58', 'scripthash': '5b07ddfde826f5125eeceaecead5505a766a07c34445'})
def test_public_key_from_private_key(self):
for priv_details in self.priv_pub_addr:
(txin_type, privkey, compressed) = deserialize_privkey(priv_details['priv'])
result = ecc.ECPrivkey(privkey).get_public_key_hex(compressed=compressed)
self.assertEqual(priv_details['pub'], result)
self.assertEqual(priv_details['txin_type'], txin_type)
self.assertEqual(priv_details['compressed'], compressed)
def test_address_from_private_key(self):
for priv_details in self.priv_pub_addr:
addr2 = address_from_private_key(priv_details['priv'])
self.assertEqual(priv_details['address'], addr2)
def test_is_valid_address(self):
for priv_details in self.priv_pub_addr:
addr = priv_details['address']
self.assertFalse(is_address(priv_details['priv']))
self.assertFalse(is_address(priv_details['pub']))
self.assertTrue(is_address(addr))
is_enc_b58 = (priv_details['addr_encoding'] == 'base58')
self.assertEqual(is_enc_b58, is_b58_address(addr))
is_enc_bech32 = (priv_details['addr_encoding'] == 'bech32')
self.assertEqual(is_enc_bech32, is_segwit_address(addr))
self.assertFalse(is_address('not an address'))
def test_is_address_bad_checksums(self):
self.assertTrue(is_address('1819s5TxxbBtuRPr3qYskMVC8sb1pqapWx'))
self.assertFalse(is_address('1819s5TxxbBtuRPr3qYskMVC8sb1pqapWw'))
self.assertTrue(is_address('3LrjLVnngqnaJeo3BQwMBg34iqYsjZjQUe'))
self.assertFalse(is_address('3LrjLVnngqnaJeo3BQwMBg34iqYsjZjQUd'))
self.assertTrue(is_address('bc1qxq64lrwt02hm7tu25lr3hm9tgzh58snfe67yt6'))
self.assertFalse(is_address('bc1qxq64lrwt02hm7tu25lr3hm9tgzh58snfe67yt5'))
def test_is_private_key(self):
for priv_details in self.priv_pub_addr:
self.assertTrue(is_private_key(priv_details['priv']))
self.assertTrue(is_private_key(priv_details['exported_privkey']))
self.assertFalse(is_private_key(priv_details['pub']))
self.assertFalse(is_private_key(priv_details['address']))
self.assertFalse(is_private_key('not a privkey'))
def test_serialize_privkey(self):
for priv_details in self.priv_pub_addr:
(txin_type, privkey, compressed) = deserialize_privkey(priv_details['priv'])
priv2 = serialize_privkey(privkey, compressed, txin_type)
self.assertEqual(priv_details['exported_privkey'], priv2)
def test_address_to_scripthash(self):
for priv_details in self.priv_pub_addr:
sh = address_to_scripthash(priv_details['address'])
self.assertEqual(priv_details['scripthash'], sh)
def test_is_minikey(self):
for priv_details in self.priv_pub_addr:
minikey = priv_details['minikey']
priv = priv_details['priv']
self.assertEqual(minikey, is_minikey(priv))
def test_is_compressed_privkey(self):
for priv_details in self.priv_pub_addr:
self.assertEqual(priv_details['compressed'], is_compressed_privkey(priv_details['priv']))
def test_segwit_uncompressed_pubkey(self):
with self.assertRaises(BitcoinException):
is_private_key('p2wpkh-p2sh:5JKXxT3wAZHcybJ9YNkuHur9vou6uuAnorBV9A8vVxGNFH5wvTW', raise_on_error=True)
def test_wif_with_invalid_magic_byte_for_compressed_pubkey(self):
with self.assertRaises(BitcoinException):
is_private_key('KwFAa6AumokBD2dVqQLPou42jHiVsvThY1n25HJ8Ji8REf1wxAQb', raise_on_error=True) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.