input
stringlengths
2.65k
237k
output
stringclasses
1 value
from pathlib import Path from itertools import zip_longest import difflib from Framework import Utility_Wrapper from Framework import Plugin_Log from Framework import Print from Framework.File_Manager import XML_File from Framework.File_Manager.Cat_Reader import Get_Hash_String from Framework.File_Manager.XML_Diff import Print as XML_Print # TODO: merge this in with the Game_File system if run as part of # a script and not from the command line launcher, where the outputs # would instead be to the corresponding path in the dest extension. # TODO: maybe add generate_sigs support. @Utility_Wrapper(uses_paths_from_settings = False) def Generate_Diffs( original_dir_path, modified_dir_path, output_dir_path, skip_unchanged = False, verbose = False, ): ''' Generate diffs for changes between two xml containing folders, creating diff patches. * original_dir_path - Path to the original xml file that acts as the baseline. * modified_dir_path - Path to the modified version of the xml file. * output_dir_path - Path to write the diff patch to. * skip_unchanged - Bool, skip output for files that are unchanged (removing any existing diff patch). - Default will generate empty diff patches. * verbose - Bool, print the path of the outputs on succesful writes. ''' # Cast to paths to be safe. original_dir_path = Path(original_dir_path).resolve() modified_dir_path = Path(modified_dir_path).resolve() output_dir_path = Path(output_dir_path).resolve() # Gather all xml files from the input directorys. # Make dicts for ease of use, keyed by relative path from the # base folder. #original_paths = {x.relative_to(original_dir_path) : x for x in original_dir_path.glob('**/*.xml')} modified_paths = {x.relative_to(modified_dir_path) : x for x in modified_dir_path.glob('**/*.xml')} # Pair off the modified files with originals by name. # If an original is not found, error. # Ignore excess originals. for rel_path, mod_path in modified_paths.items(): orig_path = original_dir_path / rel_path if not orig_path.exists(): Print('No matching original file found for {}'.format(rel_path.name)) continue # Set up the output. out_path = output_dir_path / rel_path if verbose: Print('Generating diff for {}'.format(rel_path.name)) # Generate the diff. If this errors, the file will be skipped # (due to plugin wrapper). Generate_Diff( original_file_path = orig_path, modified_file_path = mod_path, output_file_path = out_path, skip_unchanged = skip_unchanged, verbose = verbose ) return @Utility_Wrapper(uses_paths_from_settings = False) def Generate_Diff( original_file_path, modified_file_path, output_file_path, skip_unchanged = False, verbose = False, ): ''' Generate a diff of changes between two xml files, creating a diff patch. * original_file_path - Path to the original xml files that act as the baseline. * modified_file_path - Path to the modified versions of the xml files. * output_file_path - Path to write the diff patches to. * skip_unchanged - Bool, skip output for files that are unchanged (removing any existing diff patch). - Default will generate empty diff patches. * verbose - Bool, print the path of the outputs on succesful writes. ''' # Cast to paths to be safe. original_file_path = Path(original_file_path).resolve() modified_file_path = Path(modified_file_path).resolve() output_file_path = Path(output_file_path).resolve() if (original_file_path == modified_file_path or output_file_path == original_file_path or output_file_path == modified_file_path): raise Exception('Path conflict error') # List of messages to print out. messages = [] def Print_Messages(): 'Prints all pending messages.' while messages: message = messages.pop(0) # TODO: maybe allow this if Settings are set up, otherwise # might give an error on eg. missing x4 path. #Plugin_Log.Print(message) if verbose: Print(message) # Load the original. base_game_file = XML_File( # Virtual path doesn't matter, though can be useful for debug, # so try to fill in something. virtual_path = output_file_path.name, binary = original_file_path.read_bytes(), # Flag as the source; this will trigger diff patch generation later. from_source = True, ) # Finish initializing it; no diff patches to wait for. # This fills in initial node ids. base_game_file.Delayed_Init() # Load the modified. Just want the xml nodes, but use a game_file # for consistent loading format. temp_game_file = XML_File( virtual_path = '', binary = modified_file_path.read_bytes(), ) # Go ahead and give node ids. Not too important, but might do some # misc formatting, eg. removing tails. temp_game_file.Delayed_Init() # Pick out the roots. original_root = base_game_file.Get_Root() modified_root = temp_game_file.Get_Root() # Start by using a standard text diff library. # This is very good at matching up exact nodes regardless of their # parentage. Not so good at handling attribute changes or data # structure changes. # Returns a dict pairing original with modified nodes. text_based_node_matches, changed = Get_Text_Diff_Matches(original_root, modified_root) # If files match, check arg for skipping the file. if not changed and skip_unchanged: messages.append('File unchanged: {}'.format(modified_file_path)) # Check if an output file already exists and delete it. if output_file_path.exists(): output_file_path.unlink() messages.append('Removing prior diff: {}'.format(output_file_path)) else: # Don't need to put the modified root back if there are no changes. if changed: # Follow up with a manual traversal of the trees, completing matches. Match_Trees(original_root, modified_root, text_based_node_matches) # Put the modified xml back in the game_file. base_game_file.Update_Root(modified_root) # Write to file. This will trigger the diff patch generation, # empty if no changes. # This also makes the directory if needed. base_game_file.Write_File(output_file_path) # The above can be handy as a print message to verify the update. messages.append('Generated diff written to: {}'.format(output_file_path)) Print_Messages() return class Element_Wrap: ''' Wrapper on xml elements with custom comparison rules. ''' def __init__(self, xml): self.xml = xml self.tag = xml.tag self.attrib = dict(xml.attrib) self.text = xml.text # String version of this element, flattened, for easy comparison. # TODO: maybe check parent tags as well, for conservative matching. self.hash_str = '{}{{{}}}{}'.format( self.tag, ','.join(['{}:{}'.format(k,v) for k,v in sorted(self.attrib.items())]), self.text) # Hash to be used. # Note: just doing something like id(self) gives horrible results; # the hash appears to be part of the comparison. self.hash_int = hash(self.hash_str) return def __eq__(self, other): return self.hash_str == other.hash_str def __ne__(self, other): return not self.__eq__(other) def __hash__(self): return self.hash_int # Alternative to the above: breaks out the element into a series # of pieces for finer grain matching. class Element_Piece: ''' For better difflib matching, this will be just a piece of an xml element. * text - String, text for this piece. * is_tag - Bool, True if this is the tag string. * xml - Original XML Element. ''' def __init__(self, text, is_tag = False, xml = None): self.text = text self.is_tag = is_tag self.xml = xml # Stich the is_tag flag in the hash/comparison string for # robustness. self.hash_str = f'{"_t_" if is_tag else ""}{text}' self.hash_int = hash(self.hash_str) return def __eq__(self, other): return self.hash_str == other.hash_str def __ne__(self, other): return not self.__eq__(other) def __hash__(self): return self.hash_int def Element_To_Pieces(node): ''' Returns a list of Element_Pieces for a given xml node. ''' ret_list = [Element_Piece(node.tag, is_tag = True, xml = node)] for k,v in sorted(node.attrib.items()): ret_list.append(Element_Piece(f'{k}:{v}')) if node.text: ret_list.append(Element_Piece(node.text)) return ret_list def Get_Text_Diff_Matches(original_root, modified_root): ''' Identify modifications with the help of a text diff library. Returns a dict matching original elements to modified elements that appear to be the same, along with a bool "changed" flag indicating if any changes were found. ''' # Flatten out all of the nodes, and wrap them with custom # match logic. original_nodes = [Element_Wrap(x) for x in original_root.iter()] modified_nodes = [Element_Wrap(x) for x in modified_root.iter()] # -Removed; expanded style didn't help, and just ran a lot slower. # Alternative: get tighter matching logic by breaking apart tags, # attributes, and tails (eg. an element expands to 2+ subelements), since # the difflib really wants to match sequences, and does poorly at matching # a full xml element that has changes on both sides. #original_nodes = [y for x in original_root.iter() for y in Element_To_Pieces(x)] #modified_nodes = [y for x in modified_root.iter() for y in Element_To_Pieces(x)] # Sequence matcher will pair up the nodes. matcher = difflib.SequenceMatcher( # Lambda function that takes an element and returns if it is ignorable. # Nothing ignorable, so no function. None, original_nodes, modified_nodes, # There is some weird background algorithm that selects elements # to ignore based on frequency? Anyway, in practice on a big # wares file it caused a bunch of matches to be missed, so # disable it.
bias_correction1 = 1 - beta1 ** state['step'] bias_correction2 = 1 - beta2 ** state['step'] step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1 binary_weight_before_update = torch.sign(p.data) condition_consolidation = (torch.mul(binary_weight_before_update, exp_avg) > 0.0 ) # exp_avg has the same sign as exp_avg/denom #decayed_exp_avg = torch.where(p.data.abs()>group['meta'], torch.zeros_like(p.data), exp_avg) if p.dim()==1: # True if p is bias, false if p is weight p.data.addcdiv_(-step_size, exp_avg, denom) else: decayed_exp_avg = torch.mul(torch.ones_like(p.data)-torch.pow(torch.tanh(group['meta'][p.newname]*torch.abs(p.data)),2), exp_avg) #p.data.addcdiv_(-step_size, exp_avg , denom) #normal update p.data.addcdiv_(-step_size, torch.where(condition_consolidation, decayed_exp_avg, exp_avg), denom) #assymetric lr for metaplasticity return loss class Adam_bk(torch.optim.Optimizer): def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), n_bk=1, ratios=[0], areas=[1], meta = 0.0, feedback=0.0, eps=1e-8, weight_decay=0, amsgrad=False, path='.'): if not 0.0 <= lr: raise ValueError("Invalid learning rate: {}".format(lr)) if not 0.0 <= eps: raise ValueError("Invalid epsilon value: {}".format(eps)) if not 0.0 <= betas[0] < 1.0: raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0])) if not 0.0 <= betas[1] < 1.0: raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1])) defaults = dict(lr=lr, betas=betas, n_bk=n_bk, ratios=ratios, areas=areas, meta=meta, feedback=feedback, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad, path=path) super(Adam_bk, self).__init__(params, defaults) def __setstate__(self, state): super(Adam_bk, self).__setstate__(state) for group in self.param_groups: group.setdefault('amsgrad', False) def step(self, closure=None): """Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: loss = closure() for group in self.param_groups: n_bk = group['n_bk'] ratios = group['ratios'] areas = group['areas'] meta = group['meta'] feedback = group['feedback'] path = group['path'] for p in group['params']: if p.grad is None: continue grad = p.grad.data if grad.is_sparse: raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead') amsgrad = group['amsgrad'] state = self.state[p] # State initialization if len(state) == 0: state['step'] = 0 # Exponential moving average of gradient values state['exp_avg'] = torch.zeros_like(p.data) # Exponential moving average of squared gradient values state['exp_avg_sq'] = torch.zeros_like(p.data) # Initializing beakers for bk_idx in range(n_bk+1): if bk_idx==n_bk: # create an additional beaker clamped at 0 state['bk'+str(bk_idx)+'_t-1'] = torch.zeros_like(p) state['bk'+str(bk_idx)+'_t'] = torch.zeros_like(p) else: # create other beakers at equilibrium state['bk'+str(bk_idx)+'_t-1'] = torch.empty_like(p).copy_(p) state['bk'+str(bk_idx)+'_t'] = torch.empty_like(p).copy_(p) state['bk'+str(bk_idx)+'_lvl'] = [] if amsgrad: # Maintains max of all exp. moving avg. of sq. grad. values state['max_exp_avg_sq'] = torch.zeros_like(p.data) exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] if amsgrad: max_exp_avg_sq = state['max_exp_avg_sq'] beta1, beta2 = group['betas'] state['step'] += 1 if group['weight_decay'] != 0: grad.add_(group['weight_decay'], p.data) #p.data # Decay the first and second moment running average coefficient exp_avg.mul_(beta1).add_(1 - beta1, grad) exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad) if amsgrad: # Maintains the maximum of all 2nd moment running avg. till now torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq) # Use the max. for normalizing running avg. of gradient denom = max_exp_avg_sq.sqrt().add_(group['eps']) else: denom = exp_avg_sq.sqrt().add_(group['eps']) bias_correction1 = 1 - beta1 ** state['step'] bias_correction2 = 1 - beta2 ** state['step'] step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1 if p.dim()==1: # True if p is bias, false if p is weight p.data.addcdiv_(-step_size, exp_avg, denom) else: # weight update p.data.addcdiv_(-step_size, exp_avg, denom) p.data.add_((ratios[0]/areas[0])*(state['bk1_t-1']-state['bk0_t-1'])) p.data.add_(torch.where( (state['bk'+str(n_bk-1)+'_t-1'] - state['bk0_t-1']) * state['bk'+str(n_bk-1)+'_t-1'].sign() > 0 , feedback*(state['bk'+str(n_bk-1)+'_t-1'] - state['bk0_t-1']), torch.zeros_like(p.data))) # Update of the beaker levels with torch.no_grad(): for bk_idx in range(1, n_bk): # diffusion entre les bk dans les deux sens + metaplasticité sur le dernier if bk_idx==(n_bk-1): condition = (state['bk'+str(bk_idx-1)+'_t-1'] - state['bk'+str(bk_idx)+'_t-1'])*state['bk'+str(bk_idx)+'_t-1'] < 0 decayed_m = 1 - torch.tanh(meta[p.newname]*state['bk'+str(bk_idx)+'_t-1'])**2 state['bk'+str(bk_idx)+'_t'] = torch.where(condition, state['bk'+str(bk_idx)+'_t-1'] + (ratios[bk_idx-1]/areas[bk_idx])*decayed_m*(state['bk'+str(bk_idx-1)+'_t-1'] - state['bk'+str(bk_idx)+'_t-1']) + (ratios[bk_idx]/areas[bk_idx])*(state['bk'+str(bk_idx+1)+'_t-1'] - state['bk'+str(bk_idx)+'_t-1']), state['bk'+str(bk_idx)+'_t-1'] + (ratios[bk_idx-1]/areas[bk_idx])*(state['bk'+str(bk_idx-1)+'_t-1'] - state['bk'+str(bk_idx)+'_t-1']) + (ratios[bk_idx]/areas[bk_idx])*(state['bk'+str(bk_idx+1)+'_t-1'] - state['bk'+str(bk_idx)+'_t-1'])) else: state['bk'+str(bk_idx)+'_t'] = state['bk'+str(bk_idx)+'_t-1'] + (ratios[bk_idx-1]/areas[bk_idx])*(state['bk'+str(bk_idx-1)+'_t-1'] - state['bk'+str(bk_idx)+'_t-1']) + (ratios[bk_idx]/areas[bk_idx])*(state['bk'+str(bk_idx+1)+'_t-1'] - state['bk'+str(bk_idx)+'_t-1']) # Plotting beaker levels and distributions fig = plt.figure(figsize=(12,9)) for bk_idx in range(n_bk): if bk_idx==0: state['bk'+str(bk_idx)+'_t-1'] = p.data else: state['bk'+str(bk_idx)+'_t-1'] = state['bk'+str(bk_idx)+'_t'] if p.size() == torch.empty(4096,4096).size() : state['bk'+str(bk_idx)+'_lvl'].append(state['bk'+str(bk_idx)+'_t-1'][11, 100].detach().item()) if state['step']%600==0: plt.plot(state['bk'+str(bk_idx)+'_lvl']) fig.savefig(path + '/trajectory.png', fmt='png', dpi=300) plt.close() if p.dim()!=1 and state['step']%600==0: fig2 = plt.figure(figsize=(12,9)) for bk_idx in range(n_bk): plt.hist(state['bk'+str(bk_idx)+'_t-1'].detach().cpu().numpy().flatten(), 100, label='bk'+str(bk_idx), alpha=0.5) plt.legend() fig2.savefig(path+'/bk_'+str(bk_idx)+'_'+str(p.size(0))+'-'+str(p.size(1))+'_task'+str((state['step']//48000)%2)+'.png', fmt='png') torch.save(state, path + '/state_'+str(p.size(0))+'-'+str(p.size(1))+'_task'+str((state['step']//48000)%2)+'.tar') plt.close() return loss def train(model, train_loader, current_task_index, optimizer, device, args, prev_cons=None, prev_params=None, path_integ=None, criterion = torch.nn.CrossEntropyLoss()): model.train() for data, target in train_loader: if torch.cuda.is_available(): data, target = data.to(device), target.to(device) optimizer.zero_grad() output = model(data) loss = criterion(output, target) if args.ewc: ewc_loss = EWC_loss(model, prev_cons, prev_params, current_task_index, device, ewc_lambda=args.ewc_lambda) total_loss = loss + ewc_loss elif args.si: p_prev, p_old = prev_params si_loss = SI_loss(model, prev_cons, p_prev, args.si_lambda) total_loss = loss + si_loss else: total_loss = loss total_loss.backward() # This loop is for BNN parameters having 'org' attribute for p in list(model.parameters()): # blocking weights with org value greater than a threshold by setting grad to 0 if hasattr(p,'org'): p.data.copy_(p.org) optimizer.step() if args.si: update_W(model, path_integ, p_old, args) # This loop is only for BNN parameters as they have 'org' attribute for p in list(model.parameters()): # updating the org attribute if hasattr(p,'org'): p.org.copy_(p.data) def test(model, test_loader, device, criterion = torch.nn.CrossEntropyLoss(reduction='sum'), verbose = False): model.eval() test_loss = 0 correct = 0 for data, target in test_loader: if torch.cuda.is_available(): data, target = data.to(device), target.to(device) output = model(data) test_loss += criterion(output, target).item() # mean batch loss pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability correct += pred.eq(target.data.view_as(pred)).cpu().sum() test_loss /= len(test_loader.dataset) test_acc = round( 100. * float(correct) / len(test_loader.dataset) , 2) if verbose : print('Test accuracy: {}/{} ({:.2f}%)'.format( correct, len(test_loader.dataset), test_acc)) return test_acc, test_loss def estimate_fisher(model, dataset, device, num = 1000, empirical = True): # Estimate the FI-matrix for num batches of size 1 loader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=False, num_workers=1) est_fisher_info = {} for n, p in model.named_parameters(): if p.requires_grad: n = n.replace('.', '__') est_fisher_info[n] = p.detach().clone().zero_() model.eval() for index,(x,y) in enumerate(loader): # break from for-loop if max number of samples has been reached if index >= num: break # run forward pass of model x = x.to(device) output = model(x) if empirical: # -use provided label to calculate loglikelihood --> "empirical Fisher": label = torch.LongTensor([y]) if type(y)==int else y label = label.to(device) else: # -use predicted label to calculate loglikelihood: label = output.max(1)[1] # calculate negative log-likelihood negloglikelihood = torch.nn.functional.nll_loss(torch.nn.functional.log_softmax(output, dim=1), label) # Calculate gradient of negative loglikelihood model.zero_grad() negloglikelihood.backward() # Square gradients and keep running sum for n, p in model.named_parameters(): if p.requires_grad: n = n.replace('.', '__') if p.grad is not None: est_fisher_info[n] += p.grad.detach() ** 2 est_fisher_info = {n: p/index for n, p in est_fisher_info.items()} return est_fisher_info def EWC_loss(model, previous_tasks_fisher, previous_tasks_parameters, current_task_index, device, ewc_lambda=5000): if current_task_index == 0: #no task to remember -> return 0 return torch.tensor(0.).to(device) else: losses = [] for task_idx in range(current_task_index): # for all previous tasks and parameters for n, p in model.named_parameters(): if ((p.requires_grad) and (n.find('bn') == -1)): n = n.replace('.', '__') mean = previous_tasks_parameters[n][task_idx] fisher = previous_tasks_fisher[n][task_idx] #print('in ewc loss, param =', p[0,0]) losses.append((fisher * (p-mean)**2).sum()) return ewc_lambda*(1./2)*sum(losses) def update_omega(model, omega, p_prev, W, epsilon=0.1): for n, p in model.named_parameters(): if n.find('bn') == -1: # not batchnorm if p.requires_grad: n = n.replace('.', '__') if isinstance(model, BNN): p_current = p.org.detach().clone() # sign() else: p_current = p.detach().clone() p_change = p_current - p_prev[n] omega_add = W[n]/(p_change**2 + epsilon) omega[n] += omega_add print('parameter :\t', n, '\nomega :\t', omega[n]) W[n] = p.data.clone().zero_() return omega def update_W(model, W, p_old, args): for n, p in model.named_parameters(): if p.requires_grad and (n.find('bn')==-1): n = n.replace('.', '__') if p.grad is not None: if isinstance(model, BNN): if args.bin_path: W[n].add_(-p.grad*(p.sign().detach()-p_old[n])) else: W[n].add_(-p.grad*(p.org.detach()-p_old[n])) else: W[n].add_(-p.grad*(p.detach()-p_old[n])) if isinstance(model, BNN): if args.bin_path: p_old[n] = p.sign().detach().clone() else: p_old[n] = p.org.detach().clone() else: p_old[n] = p.detach().clone() def SI_loss(model, omega, prev_params, si_lambda): losses = [] for n, p in model.named_parameters(): if p.requires_grad and (n.find('bn')==-1): n = n.replace('.', '__') if isinstance(model, BNN): losses.append((omega[n] * (p - prev_params[n].sign())**2).sum()) #org or sign print('p =\t',p,'\np_prev =\t', prev_params[n]) else: losses.append((omega[n] * (p - prev_params[n])**2).sum()) return si_lambda*sum(losses) def switch_sign_induced_loss_increase(model, loader, bins = 10, sample = 100, layer = 2, num_run = 1, verbose = False): """ The hidden weights of
line: if line.startswith("#"): out_line = "" elif line == "\n": # tweet ends out_line = "\n-DOCSTART-\n\n" else: line_list = line.split("\t") out_line = line_list[1] + "\t" if line_list[3] == "-\n": # no wiki name out_line += "O\n" else: out_line += line_list[2][:2] + line_list[3].split("|")[0].replace(" ", "_") + "\n" write.write(out_line) line = read.readline() os.rename(original_file_path, str(original_file_path) + "_original") super(NEL_ENGLISH_TWEEKI, self).__init__( data_folder, column_format={0: "text", 1: "nel"}, train_file=corpus_file_name, in_memory=in_memory, **corpusargs, ) class NEL_ENGLISH_REDDIT(ColumnCorpus): def __init__( self, base_path: Union[str, Path] = None, in_memory: bool = True, **corpusargs, ): """ Initialize the Reddit Entity Linking corpus containing gold annotations only (https://arxiv.org/abs/2101.01228v2) in the NER-like column format. The first time you call this constructor it will automatically download the dataset. :param base_path: Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this to point to a different folder but typically this should not be necessary. :param in_memory: If True, keeps dataset in memory giving speedups in training. :param document_as_sequence: If True, all sentences of a document are read into a single Sentence object """ if not base_path: base_path = flair.cache_root / "datasets" else: base_path = Path(base_path) # this dataset name dataset_name = self.__class__.__name__.lower() data_folder = base_path / dataset_name # download and parse data if necessary reddit_el_path = "https://zenodo.org/record/3970806/files/reddit_el.zip" corpus_file_name = "reddit_el_gold.txt" parsed_dataset = data_folder / corpus_file_name if not parsed_dataset.exists(): reddit_el_zip = cached_path(f"{reddit_el_path}", Path("datasets") / dataset_name) unpack_file(reddit_el_zip, data_folder, "zip", False) with open(data_folder / corpus_file_name, "w", encoding="utf-8") as txtout: # First parse the post titles with open(data_folder / "posts.tsv", "r", encoding="utf-8") as tsvin1, open( data_folder / "gold_post_annotations.tsv", "r", encoding="utf-8" ) as tsvin2: posts = csv.reader(tsvin1, delimiter="\t") self.post_annotations = csv.reader(tsvin2, delimiter="\t") self.curr_annot = next(self.post_annotations) for row in posts: # Go through all the post titles txtout.writelines("-DOCSTART-\n\n") # Start each post with a -DOCSTART- token # Keep track of how many and which entity mentions does a given post title have link_annots = [] # [start pos, end pos, wiki page title] of an entity mention # Check if the current post title has an entity link and parse accordingly if row[0] == self.curr_annot[0]: link_annots.append( ( int(self.curr_annot[4]), int(self.curr_annot[5]), self.curr_annot[3], ) ) link_annots = self._fill_annot_array(link_annots, row[0], post_flag=True) # Post titles with entity mentions (if any) are handled via this function self._text_to_cols( Sentence(row[2], use_tokenizer=True), link_annots, txtout, ) else: self._text_to_cols( Sentence(row[2], use_tokenizer=True), link_annots, txtout, ) # Then parse the comments with open(data_folder / "comments.tsv", "r", encoding="utf-8") as tsvin3, open( data_folder / "gold_comment_annotations.tsv", "r", encoding="utf-8" ) as tsvin4: self.comments = csv.reader(tsvin3, delimiter="\t") self.comment_annotations = csv.reader(tsvin4, delimiter="\t") self.curr_annot = next(self.comment_annotations) self.curr_row: Optional[List[str]] = next(self.comments) self.stop_iter = False # Iterate over the comments.tsv file, until the end is reached while not self.stop_iter: txtout.writelines("-DOCSTART-\n") # Start each comment thread with a -DOCSTART- token # Keep track of the current comment thread and its corresponding key, on which the annotations are matched. # Each comment thread is handled as one 'document'. self.curr_comm: str = self.curr_row[4] comm_key = self.curr_row[0] # Python's csv package for some reason fails to correctly parse a handful of rows inside the comments.tsv file. # This if-condition is needed to handle this problem. if comm_key in {"en5rf4c", "es3ia8j", "es3lrmw"}: if comm_key == "en5rf4c": self.parsed_row = (r.split("\t") for r in self.curr_row[4].split("\n")) self.curr_comm = next(self.parsed_row) # type: ignore self._fill_curr_comment(fix_flag=True) # In case we are dealing with properly parsed rows, proceed with a regular parsing procedure else: self._fill_curr_comment(fix_flag=False) link_annots = [] # [start pos, end pos, wiki page title] of an entity mention # Check if the current comment thread has an entity link and parse accordingly, same as with post titles above if comm_key == self.curr_annot[0]: link_annots.append( ( int(self.curr_annot[4]), int(self.curr_annot[5]), self.curr_annot[3], ) ) link_annots = self._fill_annot_array(link_annots, comm_key, post_flag=False) self._text_to_cols( Sentence(self.curr_comm, use_tokenizer=True), link_annots, txtout, ) else: # In two of the comment thread a case of capital letter spacing occurs, which the SegtokTokenizer cannot properly handle. # The following if-elif condition handles these two cases and as result writes full capitalized words in each corresponding row, # and not just single letters into single rows. if comm_key == "dv74ybb": self.curr_comm = " ".join( [word.replace(" ", "") for word in self.curr_comm.split(" ")] ) elif comm_key == "eci2lut": self.curr_comm = ( self.curr_comm[:18] + self.curr_comm[18:27].replace(" ", "") + self.curr_comm[27:55] + self.curr_comm[55:68].replace(" ", "") + self.curr_comm[68:85] + self.curr_comm[85:92].replace(" ", "") + self.curr_comm[92:] ) self._text_to_cols( Sentence(self.curr_comm, use_tokenizer=True), link_annots, txtout, ) super(NEL_ENGLISH_REDDIT, self).__init__( data_folder, column_format={0: "text", 1: "nel"}, train_file=corpus_file_name, in_memory=in_memory, **corpusargs, ) def _text_to_cols(self, sentence: Sentence, links: list, outfile): """ Convert a tokenized sentence into column format :param sentence: Flair Sentence object containing a tokenized post title or comment thread :param links: array containing information about the starting and ending position of an entity mention, as well as its corresponding wiki tag :param outfile: file, to which the output is written """ for i in range(0, len(sentence)): # If there are annotated entity mentions for given post title or a comment thread if links: # Keep track which is the correct corresponding entity link, in cases where there is >1 link in a sentence link_index = [ j for j, v in enumerate(links) if (sentence[i].start_pos >= v[0] and sentence[i].end_pos <= v[1]) ] # Write the token with a corresponding tag to file try: if any(sentence[i].start_pos == v[0] and sentence[i].end_pos == v[1] for j, v in enumerate(links)): outfile.writelines(sentence[i].text + "\tS-" + links[link_index[0]][2] + "\n") elif any( sentence[i].start_pos == v[0] and sentence[i].end_pos != v[1] for j, v in enumerate(links) ): outfile.writelines(sentence[i].text + "\tB-" + links[link_index[0]][2] + "\n") elif any( sentence[i].start_pos >= v[0] and sentence[i].end_pos <= v[1] for j, v in enumerate(links) ): outfile.writelines(sentence[i].text + "\tI-" + links[link_index[0]][2] + "\n") else: outfile.writelines(sentence[i].text + "\tO\n") # IndexError is raised in cases when there is exactly one link in a sentence, therefore can be dismissed except IndexError: pass # If a comment thread or a post title has no entity link, all tokens are assigned the O tag else: outfile.writelines(sentence[i].text + "\tO\n") # Prevent writing empty lines if e.g. a quote comes after a dot or initials are tokenized # incorrectly, in order to keep the desired format (empty line as a sentence separator). try: if ( (sentence[i].text in {".", "!", "?", "!*"}) and (sentence[i + 1].text not in {'"', "“", "'", "''", "!", "?", ";)", "."}) and ("." not in sentence[i - 1].text) ): outfile.writelines("\n") except IndexError: # Thrown when the second check above happens, but the last token of a sentence is reached. # Indicates that the EOS punctuaion mark is present, therefore an empty line needs to be written below. outfile.writelines("\n") # If there is no punctuation mark indicating EOS, an empty line is still needed after the EOS if sentence[-1].text not in {".", "!", "?"}: outfile.writelines("\n") def _fill_annot_array(self, annot_array: list, key: str, post_flag: bool) -> list: """ Fills the array containing information about the entity mention annotations, used in the _text_to_cols method :param annot_array: array to be filled :param key: reddit id, on which the post title/comment thread is matched with its corresponding annotation :param post_flag: flag indicating whether the annotations are collected for the post titles (=True) or comment threads (=False) """ next_annot = None while True: # Check if further annotations belong to the current post title or comment thread as well try: next_annot = next(self.post_annotations) if post_flag else next(self.comment_annotations) if next_annot[0] == key: annot_array.append((int(next_annot[4]), int(next_annot[5]), next_annot[3])) else: self.curr_annot = next_annot break # Stop when the end of an annotation file is reached except StopIteration: break return annot_array def _fill_curr_comment(self, fix_flag: bool): """ Extends the string containing the current comment thread, which is passed to _text_to_cols method, when the comments are parsed. :param fix_flag: flag indicating whether the method is called when the incorrectly imported rows are parsed (=True) or regular rows (=False) """ next_row = None while True: # Check if further annotations belong to the current sentence as well try: next_row = next(self.comments) if not fix_flag else next(self.parsed_row) if len(next_row) <
max(Decimal('0.00'), start) end = min(transcript.length, end) task = transcript.transcribetask_set.create( is_review=is_review, media=None, fragment=fragment, start=fragment.start, end=fragment.end, ) try: task.lock() except locks.LockException: task.delete() raise if not is_review: next = fragment.revisions.create(sequence=1, editor=user) text = '' else: latest = fragment.revisions.latest() text = latest.text next = fragment.revisions.create(sequence=latest.sequence + 1, editor=user) media, created = transcript.media.get_or_create( is_processed=True, is_full_length=False, start=start, end=end, ) task.media = media task.revision = next task.text = text task.prepare() task.assign_to(user) return task class TranscribeTask(Task): TASK_TYPE = 'transcribe' fragment = models.ForeignKey('TranscriptFragment', blank=True, null=True) revision = models.ForeignKey('TranscriptFragmentRevision', blank=True, null=True) text = models.TextField(blank=True, null=True) # Keep start and end even if `revision` goes away. start = models.DecimalField(max_digits=8, decimal_places=2) end = models.DecimalField(max_digits=8, decimal_places=2) objects = TranscribeTaskManager() class Meta: ordering = ('-created',) permissions = ( ('add_transcribetask_review', 'Can add review transcribe task'), ) def lock(self): self.fragment.lock() def _assign_to(self): pass def _submit(self): from .tasks import process_transcribe_task result = process_transcribe_task.delay(self.pk) def _validate(self): self.fragment.last_editor = self.assignee self.fragment.unlock() def _invalidate(self): self.revision.delete() self.revision = None self.fragment.unlock() # --------------------- class StitchTaskManager(TaskManager): def _available_stitches(self, user, transcript, is_review, request=None): if not is_review: stitches = transcript.stitches.filter( state='unstitched', lock_state='unlocked', ) else: stitches = transcript.stitches.filter( state='stitched', lock_state='unlocked', ) if settings.TRANSCRIPTS_REQUIRE_TEAMWORK and not request_bypasses_teamwork(request): stitches = stitches.exclude(last_editor=user) return stitches def can_create(self, user, transcript, is_review, request=None): return bool(self._available_stitches(user, transcript, is_review, request).count()) def create_next(self, user, transcript, is_review, request=None): stitch = self._available_stitches(user, transcript, is_review, request).first() if not stitch: return None # Apply overlap. start = stitch.left.start - settings.TRANSCRIPT_FRAGMENT_OVERLAP end = stitch.right.end + settings.TRANSCRIPT_FRAGMENT_OVERLAP # Correct for out of bounds. start = max(Decimal('0.00'), start) end = min(transcript.length, end) media, created = transcript.media.get_or_create( is_processed=True, is_full_length=False, start=start, end=end, ) task = transcript.stitchtask_set.create( is_review=is_review, media=media, stitch=stitch, ) try: task.lock() except locks.LockException: task.delete() raise if is_review: task.create_pairings_from_prior_task() task.prepare() task.assign_to(user) return task class StitchTask(Task): TASK_TYPE = 'stitch' stitch = models.ForeignKey('TranscriptStitch', related_name='+') objects = StitchTaskManager() class Meta: get_latest_by = 'created' ordering = ('-created',) permissions = ( ('add_stitchtask_review', 'Can add review stitch task'), ) def lock(self): self.stitch.lock() def _assign_to(self): pass def _submit(self): from .tasks import process_stitch_task process_stitch_task.delay(self.pk) def _validate(self): self.stitch.last_editor = self.assignee self.stitch.unlock() def _invalidate(self): self.stitch.unlock() def create_pairings_from_prior_task(self): # Create StitchTaskPairings based on previous completed task. previous_completed_task = StitchTask.objects.filter( state='valid', stitch=self.stitch, ).latest() for previous_pairing in previous_completed_task.pairings.all(): self.pairings.get_or_create( left=previous_pairing.left, right=previous_pairing.right, ) def suggested_pairs(self): """Return a list of suggested (left, right) sentence fragment pairs.""" suggestions = [ # (left_sentence_fragment_id, right_sentence_fragment_id), ] stitch = self.stitch left_sentence_fragments = stitch.left.revisions.latest().sentence_fragments.all() right_sentence_fragments = stitch.right.revisions.latest().sentence_fragments.all() def normify(text): text = unicodedata.normalize('NFKD', text) text = re.sub(ur'[^\w\s:\)-]', u'', text).strip().lower() return re.sub(ur'[ \s]+', u' ', text) for left_sf in left_sentence_fragments: left_text = left_sf.text left_norm = normify(left_text) left_words = left_norm.split(' ') for right_sf in right_sentence_fragments: right_text = right_sf.text if left_text.startswith('[m]') and right_text.startswith('[m]'): # Both start with music; suggest. suggestions.append((left_sf.id, right_sf.id)) continue right_norm = normify(right_text) right_words = right_norm.split(' ') for i in range(len(left_words)): left_partial_norm = ' '.join(left_words[i:]) if right_norm.startswith(left_partial_norm): # Potential overlap of text; suggest. suggestions.append((left_sf.id, right_sf.id)) break return suggestions class StitchTaskPairing(models.Model): task = models.ForeignKey('StitchTask', related_name='pairings') left = models.ForeignKey('SentenceFragment', related_name='+') right = models.ForeignKey('SentenceFragment', related_name='+') class Meta: ordering = ('left__revision__fragment__start', 'left__sequence') unique_together = [ ('task', 'left',), ] def __unicode__(self): return 'Pairing between "{self.left.text}" and "{self.right.text}"'.format(**locals()) # --------------------- class CleanTaskManager(TaskManager): def _available_sentences(self, user, transcript, is_review, request=None): if not is_review: sentences = transcript.sentences.filter( state='completed', clean_state='untouched', ) else: sentences = transcript.sentences.filter( state='completed', clean_state='edited', ) if settings.TRANSCRIPTS_REQUIRE_TEAMWORK and not request_bypasses_teamwork(request): sentences = sentences.exclude(clean_last_editor=user) return sentences def can_create(self, user, transcript, is_review, request=None): return bool(self._available_sentences(user, transcript, is_review, request).count()) def create_next(self, user, transcript, is_review, request=None): sentence = self._available_sentences(user, transcript, is_review, request).first() if sentence is None: return None media, created = transcript.media.get_or_create( is_processed=True, is_full_length=False, start=sentence.latest_start, end=sentence.latest_end, ) task = transcript.cleantask_set.create( is_review=is_review, media=media, sentence=sentence, text=sentence.latest_text, ) try: task.lock() except locks.LockException: task.delete() raise task.prepare() task.assign_to(user) return task class CleanTask(Task): TASK_TYPE = 'clean' sentence = models.ForeignKey('Sentence') text = models.TextField() objects = CleanTaskManager() class Meta: ordering = ('-created',) permissions = ( ('add_cleantask_review', 'Can add review clean task'), ) def lock(self): self.sentence.lock_clean() def _assign_to(self): if not self.is_review: self.sentence.clean_state = 'editing' else: self.sentence.clean_state = 'reviewing' self.sentence.save() def _submit(self): from .tasks import process_clean_task process_clean_task.delay(self.pk) def _validate(self): if not self.is_review: self.sentence.clean_state = 'edited' else: latest, previous = self.sentence.revisions.order_by('-sequence')[:2] if latest.text.strip() == previous.text.strip(): self.sentence.clean_state = 'reviewed' else: self.sentence.clean_state = 'edited' self.sentence.clean_last_editor = self.assignee self.sentence.unlock_clean() self.sentence.save() self.finish_transcript_if_all_tasks_complete() def _invalidate(self): if not self.is_review: self.sentence.clean_state = 'untouched' else: self.sentence.clean_state = 'edited' self.sentence.unlock_clean() self.sentence.save() # --------------------- class BoundaryTaskManager(TaskManager): def _available_sentences(self, user, transcript, is_review, request=None): if not is_review: sentences = transcript.sentences.filter( state='completed', boundary_state='untouched', ) else: sentences = transcript.sentences.filter( state='completed', boundary_state='edited', ) if settings.TRANSCRIPTS_REQUIRE_TEAMWORK and not request_bypasses_teamwork(request): sentences = sentences.exclude(boundary_last_editor=user) return sentences def can_create(self, user, transcript, is_review, request=None): return bool(self._available_sentences(user, transcript, is_review, request).count()) def create_next(self, user, transcript, is_review, request=None): sentence = self._available_sentences(user, transcript, is_review, request).first() if sentence is None: return None media_start = sentence.fragments.first().revision.fragment.start - settings.TRANSCRIPT_FRAGMENT_OVERLAP media_end = sentence.fragments.last().revision.fragment.end + settings.TRANSCRIPT_FRAGMENT_OVERLAP media_start = max(Decimal(0), media_start) media_end = min(transcript.length, media_end) if not is_review: # Apply overlap. start = sentence.latest_start - settings.TRANSCRIPT_FRAGMENT_OVERLAP end = sentence.latest_end + settings.TRANSCRIPT_FRAGMENT_OVERLAP # Correct for out of bounds. start = max(Decimal('0.00'), start) end = min(transcript.length, end) # Find a suggested sentence start: # Look for the end of the most recently bounded sentence # (ending within the maximum region of this sentence) # to try to predict where this sentence will start. bounded_sentences = transcript.sentences.completed().filter( boundary_state__in=['edited', 'reviewed'], latest_end__gt=media_start, latest_end__lt=media_end, ) if bounded_sentences.exists(): latest_bounded = bounded_sentences.order_by('-latest_end')[0] # Suggest the end of the last sentence as the start of this one... suggested_start = latest_bounded.latest_end # ...but only if it comes after the default starting position... suggested_start = max(suggested_start, start) # ...and only if the calculated starting position comes before # the default ending position. if suggested_start > end: suggested_start = start start = suggested_start else: # Reviews pass through. start = sentence.latest_start end = sentence.latest_end media, created = transcript.media.get_or_create( is_processed=True, is_full_length=False, start=media_start, end=media_end, ) task = transcript.boundarytask_set.create( is_review=is_review, media=media, sentence=sentence, start=start, end=end, ) try: task.lock() except locks.LockException: task.delete() raise task.prepare() task.assign_to(user) return task class BoundaryTask(Task): TASK_TYPE = 'boundary' sentence = models.ForeignKey('Sentence') start = models.DecimalField(max_digits=8, decimal_places=2) end = models.DecimalField(max_digits=8, decimal_places=2) objects = BoundaryTaskManager() class Meta: ordering = ('-created',) permissions = ( ('add_boundarytask_review', 'Can add review boundary task'), ) def lock(self): self.sentence.lock_boundary() def _assign_to(self): if not self.is_review: self.sentence.boundary_state = 'editing' else: self.sentence.boundary_state = 'reviewing' self.sentence.save() def _submit(self): # See below in `process_boundary_task_synchronously_on_submit`. pass def _validate(self): if not self.is_review: self.sentence.boundary_state = 'edited' else: latest, previous = self.sentence.boundaries.order_by('-sequence')[:2] if (latest.start, latest.end) == (previous.start, previous.end): self.sentence.boundary_state = 'reviewed' else: self.sentence.boundary_state = 'edited' self.sentence.boundary_last_editor = self.assignee self.sentence.unlock_boundary() self.sentence.save() self.finish_transcript_if_all_tasks_complete() def _invalidate(self): if not self.is_review: self.sentence.boundary_state = 'untouched' else: self.sentence.boundary_state = 'edited' self.sentence.unlock_boundary() self.sentence.save() # NOTE: Calls to _submit are normally processed as a celery task, # but we are interested in getting very quick feedback for new tasks # as far as predicting the next sentence start. # # Therefore, we are running it synchronously here: # @receiver(post_transition, sender=BoundaryTask) def process_boundary_task_synchronously_on_submit(instance, target, **kwargs): if target == 'submitted': from .tasks import process_boundary_task process_boundary_task(instance.pk) # --------------------- class SpeakerTaskManager(TaskManager): def _available_sentences(self, user, transcript, is_review, request=None): if not is_review: sentences = transcript.sentences.filter( state='completed', speaker_state='untouched', ) else: sentences = transcript.sentences.filter( state='completed', speaker_state='edited', ) if settings.TRANSCRIPTS_REQUIRE_TEAMWORK and not request_bypasses_teamwork(request): sentences = sentences.exclude(speaker_last_editor=user) return sentences def can_create(self, user, transcript, is_review, request=None): return bool(self._available_sentences(user, transcript, is_review, request).count()) def create_next(self, user, transcript, is_review, request=None): sentence = self._available_sentences(user, transcript, is_review, request).first() if sentence is None: return None start = sentence.latest_start media, created = transcript.media.get_or_create( is_processed=True, is_full_length=False, start=sentence.latest_start, end=sentence.latest_end, ) task = transcript.speakertask_set.create( is_review=is_review, media=media, sentence=sentence, speaker=sentence.latest_speaker, ) try: task.lock() except locks.LockException: task.delete() raise task.prepare() task.assign_to(user) return task class SpeakerTask(Task): TASK_TYPE = 'speaker' sentence = models.ForeignKey('Sentence') speaker = models.ForeignKey('Speaker', blank=True, null=True) new_name = models.CharField(max_length=100, blank=True, null=True) objects = SpeakerTaskManager() class Meta: ordering = ('-created',) permissions = ( ('add_speakertask_review', 'Can add review speaker task'), ) def lock(self): self.sentence.lock_speaker() def _assign_to(self): if not self.is_review: self.sentence.speaker_state = 'editing' else: self.sentence.speaker_state = 'reviewing' self.sentence.save() def _submit(self): from .tasks import process_speaker_task process_speaker_task.delay(self.pk) def _validate(self): if not self.is_review: self.sentence.speaker_state = 'edited' else: prior_task = self.sentence.speakertask_set.order_by('-created')[1] if self.speaker == prior_task.speaker: # No more changes; finished reviewing. self.sentence.speaker_state = 'reviewed' else: # Speaker changed; need to review again. self.sentence.speaker_state = 'edited' self.sentence.speaker_last_editor = self.assignee self.sentence.unlock_speaker() self.sentence.save() self.finish_transcript_if_all_tasks_complete() def _invalidate(self): if not self.is_review: self.sentence.speaker_state = 'untouched' else: self.sentence.speaker_state =
#The main idea here that we try to approximate the light curve by Fourier series with different periods #and choose that one, for which the sum of square deviations dots from the approximation is the smallest. #Then programm build a light curve and phase curve. All dots that are stands out from the approximation #is cutted off. Program writes in the file the pictures of phase curves and data with cutted points """===========================================""" """IMPORTING LIBRUARIES""" """===========================================""" import scipy.optimize as spo #for the method of LS import numpy as np #for math stuff import matplotlib.pyplot as plt #for plotting import time #to know time of calculations import tkinter as tnk #graphic interface import os #to work with directories """===========================================""" """Path to files""" """===========================================""" path_file = os.getcwd() #constant for the path to the folder, where code is stored """===========================================""" """Errors""" """===========================================""" def Error_1(): #function to display an error in Manual mode that is caused by inputting not correct value of T window_error = tnk.Tk() bcg_cl = '#ffff00' window_error.title("Period D&P V5.4") w = 550 h = 180 window_error.geometry(str(w) + 'x' + str(h)) window_error.config(bg=bcg_cl) window_error.resizable(width=False, height=False) lb_error = tnk.Label(window_error, font = ('Algerian', 19), text = 'Error #1', bg=bcg_cl) lb_describtion_1 = tnk.Label(window_error, font = ('Bookman Old Style', 14), text = 'The program has not found minimum in periodogram', bg=bcg_cl) #words and labels lb_describtion_2 = tnk.Label(window_error, font = ('Bookman Old Style', 14), text = 'Please try another period or its error', bg=bcg_cl) lb_error.place(x = 200, y = 30) #their place on the window lb_describtion_1.place(x = 20, y = 80) lb_describtion_2.place(x = 90, y = 110) window_error.mainloop() def Error_2(File): #function to display an error in Manual mode that is caused by inputting not correct value of T window_error = tnk.Tk() bcg_cl = 'green' window_error.title("Period D&P V5.4") w = 850 h = 180 window_error.geometry(str(w) + 'x' + str(h)) window_error.config(bg=bcg_cl) window_error.resizable(width=False, height=False) lb_error = tnk.Label(window_error, font = ('Algerian', 19), text = 'Error #2', bg=bcg_cl) error_text = 'The program has not found ' + File lb_describtion_1 = tnk.Label(window_error, font = ('Bookman Old Style', 14), text = error_text, bg=bcg_cl) #words and labels lb_describtion_2 = tnk.Label(window_error, font = ('Bookman Old Style', 14), text = 'Please check and repeat', bg=bcg_cl) lb_error.place(x = 350, y = 30) #their place on the window lb_describtion_1.place(x = 20, y = 80) lb_describtion_2.place(x = 240, y = 110) window_error.mainloop() """===========================================""" """TRIGONOMETRIC POLYNOMIAL FUNCTIONS""" """===========================================""" def sin(t, pp, n): #approximation of function by Fourie series (t -> x_data, pp - parameters) x = np.zeros(len(t)) #creating array with the size of data x += pp[0] #constant for i in range(n): x += pp[2*i+2]*np.sin(2*np.pi*t*(i+1)/pp[1]+pp[2*i+3]) # x = SUM( A*sin(t + phi)) return x def sin1(t, pp, n): #the same as sin but give you not array, but a value y = pp[0] for i in range(n): y += pp[0] + pp[2*i+2]*np.sin(2*np.pi*t/pp[1]*(i+1)+pp[2*i+3]) return y """===========================================""" """READING DATA FROM FILE""" """===========================================""" def read_data(name, ftype): Name = path_file + '\\data\\' + name + ftype #data is stored in the same sirectory in the folder "data" try: with open(Name, 'r') as file: #each file should be named "Name_of_star.type" x, y, y_err = [], [], [] #set arrays lines = file.readlines() #lines - list; read() - not work for i in lines: if (not i.startswith("#")): #to avoid comments data = i.split() #split into words because of spaces x.append(float(data[0])) y.append(float(data[1])) y_err.append(float(data[2])) x, y, y_err = np.array(x), np.array(y), np.array(y_err) #to make arrays more cool and suitable with method of LS Number_of_elements_0 = len(x) Error_program = 0 except FileNotFoundError: Error_program = 1 x = 0 y = 0 y_err = 0 Number_of_elements_0 = 0 return x, y, y_err, Number_of_elements_0, Error_program """===========================================""" """READING PARAMENTRS FROM FILE""" """===========================================""" def read_parametrs(Parametrs_file): parametrs = [] try: with open(Parametrs_file, 'r') as file: for line in file: if (not line.startswith("#")): #to avoid comments parametrs.append(float(line)) n_app_T = int(parametrs[0]) #number of additions in Fourie series in function Approximation T n_becoming_perfect = int(parametrs[1]) #number of additions in Fourie series in function becoming perfect n_bec_per_sec = int(parametrs[2]) #number of additions in Fourie series in function becoming perfect second edge_appr_T = float(parametrs[3]) #to cut minimum Parametr_sigma = float(parametrs[4]) #to cut phase diagramm TT_min_par = float(parametrs[5]) #the minimum value of period in Periodogram Presize_appr_T = float(parametrs[6]) #the distance between points in the Periodogram ratio = float(parametrs[7]) #size of Phase picture (x:y) max_width = float(parametrs[8]) #to cut Phase diagramm N_cutting = int(parametrs[9]) #Number of pictures of Phase diagram and also detalization of cutting it N_fragmentation = int(parametrs[10]) #detalization of cuttind phase diagram dpi_picture = int (parametrs[11]) #quality of picture dots_size = int(parametrs[12]) #size of dots ob phase curves Error_program = 0 except FileNotFoundError: Error_program = 1 n_app_T, n_becoming_perfect, edge_appr_T, Parametr_sigma, TT_min_par, Presize_appr_T, ratio, N_cutting, n_bec_per_sec, max_width, N_fragmentation, dpi_picture, dots_size = 0 return n_app_T, n_becoming_perfect, edge_appr_T, Parametr_sigma, TT_min_par, Presize_appr_T, ratio, N_cutting, n_bec_per_sec, max_width, N_fragmentation, dpi_picture, dots_size, Error_program """===========================================""" """CALCULATING PRESIZE VALUE OF PERIOD""" """===========================================""" def becoming_perfect(Tappr, A0, x, y, y_err, n_becoming_perfect, name, n_app_T, ans_start, dpi_picture, dots_size, I = 0, Repeats = 0): p0 = np.zeros(2*n_becoming_perfect + 2) #start conditions p0[0] = ans_start[0] #first = ideal from periodogram p0[1] = Tappr if(n_becoming_perfect > n_app_T): #set conditions the same as the best in ApproximationT for i in range(2*n_app_T): p0[i+2] = ans_start[i+1] for i in range(2*n_app_T + 2, 2*n_becoming_perfect + 2): p0[i] = 1 else: for i in range(2*n_becoming_perfect + 2): p0[i] = ans_start[i] fun = lambda pp: (y - sin(x, pp, n_becoming_perfect))/y_err #core of least squares ans = spo.leastsq(fun, p0, full_output=1) sigma = np.sum((y - sin(x, ans[0], n_becoming_perfect))**2)/len(x) error = np.sqrt(np.diag(ans[1]*sigma)) T_ideal = ans[0][1] error_T = error[1] ans_ideal = ans[0] #ideal parametrs order_Error = -int(np.log10(error_T))+1 #evaluate order of Error save_path = path_file + '\\Results\\' + name + '\\' #save results in the folder "Results" fig = plt.figure(I*(Repeats+2) + 2) #plot dots and curve plt.gca().invert_yaxis() #to invert y axis fig.set_size_inches(15, 6) #size plt.rc('xtick', labelsize=20) #size of tics plt.rc('ytick', labelsize=20) plt.plot(x, y, '.b') #blue dots plt.xlabel('BJD', fontsize = 20) #name of axis plt.ylabel('V mmag', fontsize = 20) plt.title('Light curve', fontsize = 20) plt.savefig(save_path + name + " light curve.png", dpi = 300) #without approximation xx = np.linspace(min(x), max(x), len(x)) #to plot approximation on the parts, where are not data plt.plot(xx, sin(xx, ans_ideal, n_becoming_perfect), '-r') plt.savefig(save_path + name + " light curve with approximation.png", dpi = dpi_picture) #with approximation plt.show() #to show plot during process return ans_ideal, np.round(T_ideal, order_Error), np.round(error_T, order_Error) def becoming_perfect_second(I, answ, x, y, y_err, n_becoming_perfect, name, ftype, Parametr, n, answ_2, ratio, max_width, N_cutting, N_fragmentation, dpi_picture, dots_size, I_star = 0): p0 = np.zeros(2*n+2) #take ideal start conditions either from becoming_perfect or becoming_perfect_second if(I == 0): if(n > n_becoming_perfect): for i in range(2*n_becoming_perfect + 2): p0[i] = answ[i] for i in range(2*n_becoming_perfect + 2, 2*n+2): p0[i] = 1 else: for i in range (2*n+2): p0[i] = answ[i] else: for i in range(2*n+2): p0[i] = answ_2[i] fun = lambda pp: (y - sin(x, pp, n))/y_err #core of least squares ans = spo.leastsq(fun, p0, full_output=1) sigma = np.sqrt(np.sum((y - sin(x, ans[0], n))**2)/len(x)) error = np.sqrt(np.diag(ans[1]*sigma)) order_Error = -int(np.log10(error[1]))+1 T_ideal = ans[0][1] Number_of_elements = len(x) ans_id = ans[0] Number_periods = (x - x[0])/T_ideal #To cuild phase curve (need to rearrange x array) X_E = np.zeros(Number_of_elements) y_max = y[0] for i in range(Number_of_elements): #find y max if (y[i] > y_max): y_max = y[i] I_max = i for i in range(Number_of_elements): X_E[i] = (x[i] - x[0]) - int(Number_periods[i])*T_ideal delta = X_E[I_max] for i in range(Number_of_elements): #shift plot so it starts from the minimum X_E[i] -= delta if (X_E[i] < 0): X_E[i] += T_ideal save_path = path_file + '\\Results\\' + name + '\\' A = max(x) - min(x) B = max(y) - min(y) N_periods = np.round(((max(x) - min(x))/T_ideal), 1) hfont = {'fontname':'Helvetica'} fig = plt.figure(3 + I + I_star*(N_cutting + 2)) plt.gca().invert_yaxis() fig.set_size_inches(ratio*7, 7) plt.xlim(-0.02, 1.02) plt.ylim((max(y) + 0.05*B), (min(y) - 0.1*B)) plt.rc('xtick', labelsize=20) plt.rc('ytick', labelsize=20)
<filename>keystone/notifications.py # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Notifications module for OpenStack Identity Service resources.""" import collections import functools import inspect import socket from oslo_log import log import oslo_messaging from oslo_utils import reflection import pycadf from pycadf import cadftaxonomy as taxonomy from pycadf import cadftype from pycadf import credential from pycadf import eventfactory from pycadf import resource from keystone.i18n import _, _LE from keystone.common import dependency from keystone.common import utils import keystone.conf _CATALOG_HELPER_OBJ = None LOG = log.getLogger(__name__) # NOTE(gyee): actions that can be notified. One must update this list whenever # a new action is supported. _ACTIONS = collections.namedtuple( 'NotificationActions', 'created, deleted, disabled, updated, internal') ACTIONS = _ACTIONS(created='created', deleted='deleted', disabled='disabled', updated='updated', internal='internal') """The actions on resources.""" CADF_TYPE_MAP = { 'group': taxonomy.SECURITY_GROUP, 'project': taxonomy.SECURITY_PROJECT, 'role': taxonomy.SECURITY_ROLE, 'user': taxonomy.SECURITY_ACCOUNT_USER, 'domain': taxonomy.SECURITY_DOMAIN, 'region': taxonomy.SECURITY_REGION, 'endpoint': taxonomy.SECURITY_ENDPOINT, 'service': taxonomy.SECURITY_SERVICE, 'policy': taxonomy.SECURITY_POLICY, 'OS-TRUST:trust': taxonomy.SECURITY_TRUST, 'OS-OAUTH1:access_token': taxonomy.SECURITY_CREDENTIAL, 'OS-OAUTH1:request_token': taxonomy.SECURITY_CREDENTIAL, 'OS-OAUTH1:consumer': taxonomy.SECURITY_ACCOUNT, } SAML_AUDIT_TYPE = 'http://docs.oasis-open.org/security/saml/v2.0' # resource types that can be notified _SUBSCRIBERS = {} _notifier = None SERVICE = 'identity' CONF = keystone.conf.CONF # NOTE(morganfainberg): Special case notifications that are only used # internally for handling token persistence token deletions INVALIDATE_USER_TOKEN_PERSISTENCE = 'invalidate_user_tokens' INVALIDATE_USER_PROJECT_TOKEN_PERSISTENCE = 'invalidate_user_project_tokens' INVALIDATE_USER_OAUTH_CONSUMER_TOKENS = 'invalidate_user_consumer_tokens' class Audit(object): """Namespace for audit notification functions. This is a namespace object to contain all of the direct notification functions utilized for ``Manager`` methods. """ @classmethod def _emit(cls, operation, resource_type, resource_id, initiator, public, actor_dict=None): """Directly send an event notification. :param operation: one of the values from ACTIONS :param resource_type: type of resource being affected :param resource_id: ID of the resource affected :param initiator: CADF representation of the user that created the request :param public: If True (default), the event will be sent to the notifier API. If False, the event will only be sent via notify_event_callbacks to in process listeners :param actor_dict: dictionary of actor information in the event of assignment notification """ # NOTE(stevemar): the _send_notification function is # overloaded, it's used to register callbacks and to actually # send the notification externally. Thus, we should check # the desired notification format in the function instead # of before it. _send_notification( operation, resource_type, resource_id, actor_dict, public=public) if CONF.notification_format == 'cadf' and public: outcome = taxonomy.OUTCOME_SUCCESS _create_cadf_payload(operation, resource_type, resource_id, outcome, initiator) @classmethod def created(cls, resource_type, resource_id, initiator=None, public=True): cls._emit(ACTIONS.created, resource_type, resource_id, initiator, public) @classmethod def updated(cls, resource_type, resource_id, initiator=None, public=True): cls._emit(ACTIONS.updated, resource_type, resource_id, initiator, public) @classmethod def disabled(cls, resource_type, resource_id, initiator=None, public=True): cls._emit(ACTIONS.disabled, resource_type, resource_id, initiator, public) @classmethod def deleted(cls, resource_type, resource_id, initiator=None, public=True): cls._emit(ACTIONS.deleted, resource_type, resource_id, initiator, public) @classmethod def added_to(cls, target_type, target_id, actor_type, actor_id, initiator=None, public=True): actor_dict = {'id': actor_id, 'type': actor_type, 'actor_operation': 'added'} cls._emit(ACTIONS.updated, target_type, target_id, initiator, public, actor_dict=actor_dict) @classmethod def removed_from(cls, target_type, target_id, actor_type, actor_id, initiator=None, public=True): actor_dict = {'id': actor_id, 'type': actor_type, 'actor_operation': 'removed'} cls._emit(ACTIONS.updated, target_type, target_id, initiator, public, actor_dict=actor_dict) @classmethod def internal(cls, resource_type, resource_id): # NOTE(lbragstad): Internal notifications are never public and have # never used the initiator variable, but the _emit() method expects # them. Let's set them here but not expose them through the method # signature - that way someone can not do something like send an # internal notification publicly. initiator = None public = False cls._emit(ACTIONS.internal, resource_type, resource_id, initiator, public) def _get_callback_info(callback): """Return list containing callback's module and name. If the callback is a bound instance method also return the class name. :param callback: Function to call :type callback: function :returns: List containing parent module, (optional class,) function name :rtype: list """ module_name = getattr(callback, '__module__', None) func_name = callback.__name__ if inspect.ismethod(callback): class_name = reflection.get_class_name(callback.__self__, fully_qualified=False) return [module_name, class_name, func_name] else: return [module_name, func_name] def register_event_callback(event, resource_type, callbacks): """Register each callback with the event. :param event: Action being registered :type event: keystone.notifications.ACTIONS :param resource_type: Type of resource being operated on :type resource_type: str :param callbacks: Callback items to be registered with event :type callbacks: list :raises ValueError: If event is not a valid ACTION :raises TypeError: If callback is not callable """ if event not in ACTIONS: raise ValueError(_('%(event)s is not a valid notification event, must ' 'be one of: %(actions)s') % {'event': event, 'actions': ', '.join(ACTIONS)}) if not hasattr(callbacks, '__iter__'): callbacks = [callbacks] for callback in callbacks: if not callable(callback): msg = _('Method not callable: %s') % callback LOG.error(msg) raise TypeError(msg) _SUBSCRIBERS.setdefault(event, {}).setdefault(resource_type, set()) _SUBSCRIBERS[event][resource_type].add(callback) if LOG.logger.getEffectiveLevel() <= log.DEBUG: # Do this only if its going to appear in the logs. msg = 'Callback: `%(callback)s` subscribed to event `%(event)s`.' callback_info = _get_callback_info(callback) callback_str = '.'.join(i for i in callback_info if i is not None) event_str = '.'.join(['identity', resource_type, event]) LOG.debug(msg, {'callback': callback_str, 'event': event_str}) def listener(cls): """A class decorator to declare a class to be a notification listener. A notification listener must specify the event(s) it is interested in by defining a ``event_callbacks`` attribute or property. ``event_callbacks`` is a dictionary where the key is the type of event and the value is a dictionary containing a mapping of resource types to callback(s). :data:`.ACTIONS` contains constants for the currently supported events. There is currently no single place to find constants for the resource types. Example:: @listener class Something(object): def __init__(self): self.event_callbacks = { notifications.ACTIONS.created: { 'user': self._user_created_callback, }, notifications.ACTIONS.deleted: { 'project': [ self._project_deleted_callback, self._do_cleanup, ] }, } """ def init_wrapper(init): @functools.wraps(init) def __new_init__(self, *args, **kwargs): init(self, *args, **kwargs) _register_event_callbacks(self) return __new_init__ def _register_event_callbacks(self): for event, resource_types in self.event_callbacks.items(): for resource_type, callbacks in resource_types.items(): register_event_callback(event, resource_type, callbacks) cls.__init__ = init_wrapper(cls.__init__) return cls def notify_event_callbacks(service, resource_type, operation, payload): """Send a notification to registered extensions.""" if operation in _SUBSCRIBERS: if resource_type in _SUBSCRIBERS[operation]: for cb in _SUBSCRIBERS[operation][resource_type]: subst_dict = {'cb_name': cb.__name__, 'service': service, 'resource_type': resource_type, 'operation': operation, 'payload': payload} LOG.debug('Invoking callback %(cb_name)s for event ' '%(service)s %(resource_type)s %(operation)s for ' '%(payload)s', subst_dict) cb(service, resource_type, operation, payload) def _get_notifier(): """Return a notifier object. If _notifier is None it means that a notifier object has not been set. If _notifier is False it means that a notifier has previously failed to construct. Otherwise it is a constructed Notifier object. """ global _notifier if _notifier is None: host = CONF.default_publisher_id or socket.gethostname() try: transport = oslo_messaging.get_notification_transport(CONF) _notifier = oslo_messaging.Notifier(transport, "identity.%s" % host) except Exception: LOG.exception(_LE("Failed to construct notifier")) _notifier = False return _notifier def clear_subscribers(): """Empty subscribers dictionary. This effectively stops notifications since there will be no subscribers to publish to. """ _SUBSCRIBERS.clear() def reset_notifier(): """Reset the notifications internal state. This is used only for testing purposes. """ global _notifier _notifier = None def _create_cadf_payload(operation, resource_type, resource_id, outcome, initiator): """Prepare data for CADF audit notifier. Transform the arguments into content to be consumed by the function that emits CADF events (_send_audit_notification). Specifically the ``resource_type`` (role, user, etc) must be transformed into a CADF keyword, such as: ``data/security/role``. The ``resource_id`` is added as a top level value for the ``resource_info`` key. Lastly, the ``operation`` is used to create the CADF ``action``, and the ``event_type`` name. As per the CADF specification, the ``action`` must start with create, update, delete, etc... i.e.: created.user or deleted.role However the ``event_type`` is an OpenStack-ism that is typically of the form project.resource.operation. i.e.: identity.project.updated :param operation: operation being performed (created, updated, or deleted) :param resource_type: type of resource being operated on (role, user, etc) :param resource_id: ID of resource being operated on :param outcome: outcomes of the operation (SUCCESS, FAILURE, etc) :param initiator: CADF representation of the user that created the request """ if resource_type not in CADF_TYPE_MAP: target_uri = taxonomy.UNKNOWN else: target_uri = CADF_TYPE_MAP.get(resource_type) target = resource.Resource(typeURI=target_uri, id=resource_id) audit_kwargs = {'resource_info': resource_id} cadf_action = '%s.%s' % (operation, resource_type) event_type = '%s.%s.%s' % (SERVICE, resource_type, operation) _send_audit_notification(cadf_action, initiator, outcome, target, event_type, **audit_kwargs) def _send_notification(operation, resource_type, resource_id, actor_dict=None, public=True): """Send notification
ne tXr": 1993, "223 X": -1994, "rolcX": 1995, "Xrune": 1996, " zXri": 1997, "herkX": 1998, "Xstah": 1999, "UsXr": -2000, "UnsX": 2001, "iki tXrl": -2002, " sXslu": 2003, " Xmme": 2004, " cXru": 2005, " Xfl": 2006, "OlcX": 2007, "mXsia": 2008, "mXsi": -2009, "Xrub": -2010, "hCX": -2011, " Xvey": 2012, " lXn": -2013, " klX": 2014, " CXl": -2015, " lXc": -2016, "UhX": 2017, "Xpe": 2018, " Xzel": -2019, "sUslX": 2020, "hXzn": 2021, "UmdX": 2022, " mXstak": 2024, "Uz mX ": 2025, " Xnes": -2026, "raSX": 2027, "cXbb": 2028, " etX": 2029, "UmcX": 2030, "OplX": 2031, "Xrad": -2032, " mXhur": 2033, "ohl X": 2034, "Xstad": 2035, " rXsd": 2036, " kamX": -2037, "sedX": 2038, "OkmX": 2039, "0 lXk ": -2040, "zulXm": 2041, " sXk": 2042, "arolX": -2043, " Xto": 2044, "Xsev": -2045, "kXme": 2046, " Xzum": 2047, "USkX": 2048, "Xlisla": 2049, " kXlli": 2050, "mbolX": 2051, "kXrtc": 2052, "OksXz": 2053, "lUzX": -2054, " nXk": 2055, "Xlge": 2056, "Xlha": 2057, "UCsX": 2058, "Uk Xn ": 2059, "OGsX": 2060, "Us X": 2061, "Xlf": 2062, "hXrda": -2063, "edXk": 2064, "aX": -2065, " pXru": 2066, "skXd": 2067, "dXme": 2068, "Xti": -2069, "e bXrun": 2070, "Ur Xn": 2071, " Xzd": 2072, "UrbX": 2073, "OzdX": 2074, "U yX ": 2075, "kXrdi": 2076, "Xrle": 2077, " Xmra": 2078, "Oz X": 2079, " Xslu ": -2080, "Un mX ": 2081, "Xzaf": -2082, "Xnet": -2084, "UsbX": 2085, "kXrsus": 2086, " tXcc": 2087, " mXy": -2088, "rXce": -2089, "zahX": 2090, "Xrsude": 2091, "altXs": 2092, " mXs ": -2093, "Xtle": 2094, "Uz Xn ": 2095, "kXle ": -2096, "OrsX": 2097, " Xlu": -2098, "hXsr": 2099, "sulsX": 2100, "bXyur": -2101, " tXy": 2102, "CXb": -2103, "Xrunde": 2104, "ilgX": 2105, "strXma": 2106, " Xsku": 2107, "kXrsuy": 2108, "rodXk": 2109, "OvdX": 2110, "U mX ": 2111, "pXsku": 2112, "ongX": -2113, "Xndee": -2114, "Xsn": 2115, "zsX": 2116, "kXtuk": 2117, "rabXk": 2118, "tfX ": 2119, "Xrf": -2120, "r tXr ": 2121, "UsnX": 2122, "rtXk": 2123, " usulX": 2124, " Xzuc": 2125, "CXmr": -2126, "Xnsa": 2128, "UlsX": 2129, "Xdurn": -2130, "sUtXn": -2131, "UstlX": 2132, "hXsam": 2133, "UrtX": 2134, "rXtb": 2135, "UpX": 2136, "UzdX": 2137, "kabulX": 2138, "mXts": -2139, " nXr": -2140, "Ok X": 2141, "bXlt": 2142, "iyanX": 2143, "esadX": 2144, "UftX": 2145, "Un Xn ": 2146, "UrcX": 2147, "sansX": 2148, " cXz": 2149, "oylX": -2150, "OlmX": 2151, " eymXr ": 2152, "Xhim": 2153, "Xrbet": -2154, "UkrX": 2155, "tokolX": 2156, " dXdu": 2157, "ollX ": 2158, "tXrde": 2159, "ktXe": 2160, "Xnited": -2161, "cavX": 2162, "OpX": 2163, "hXzun": 2164, "erXv": 2165, "OrtlX": 2166, "Xtuph": 2167, "katX": 2168, "UssX": 2169, "tXdy": 2170, "Xrtle": 2171, " tXbi": 2172, "kXtah": 2173, "bXtt": -2174, "UydX": 2175, "Xva": -2178, "UtlX": 2179, "OnmX": 2180, "00 X": 2181, "OktX": 2182, "sXmer": 2183, " jXri": 2184, "Xi": -2185, " Xssu": 2186, "Xcg": 2187, "Xrla": -2188, "OSkX": 2189, "OklX": 2190, " bXk": 2191, "SgXl": -2192, " lX ": -2193, "uyX": -2194, "hXly": 2195, "UttX": 2196, "OmrX": 2197, "umX": -2198, " mXm ": -2199, "sXnn": 2200, "reddX": 2201, "sXret": -2202, "popXl": 2203, "golcX": 2204, "UkmX": 2205, "gXe": -2206, " Xra": -2207, "UktX": 2208, "UmsX": 2209, "abX": -2210, "Xcte": 2211, "3 X": 2212, "dtX": 2213, "kXba": 2214, "hhX": 2215, "USlX": 2216, "ungX": -2217, "Xgl": -2218, "tXzu": 2219, "UlkX": 2220, "UskX": 2221, "elCX": -2222, "mXtf": -2223, "Xkse": 2224, "Xzunt": 2225, "UrmX": 2226, "ercXm": 2227, "opCX": -2228, " CXk": -2229, " Xslu": 2230, " dXk": 2231, " rXy": 2232, "ObX": 2233, "akX ": 2234, "tCX ": -2235, "rXstu ": 2236, "ekX": 2237, "4 X": 2238, "UrkX": 2239, " Xclu": 2240, "Xci": -2241, "Xsv": 2242, "Xtab": -2243, "UfXr": 2244, "UrsX": 2245, "Xkm": 2246, "OGX": 2247, "ukCX": -2248, "Xne": 2249, "kXf": 2250, " eyX": 2251, "tXrb": 2252, "Ul X": 2253, "OncX": 2254, "dXz": 2255, "gXrur": -2256, "Xzul": 2257, " kXrt ": 2258, "ribX": 2259, "hUcX": -2260, "UzlX": 2261, "OnlX": 2262, "OvX": 2263, "yor mX": -2264, "OrlX": 2265, " Xnal": 2266, "USmX": 2267, " vUcX": -2268, " vXc": 2269, "Xmle": 2270, " SXk": 2271, " hXc": 2272, "tatX": 2273, "itX": 2274, "UllX": 2275, " sXt": 2277, " tXp": 2278, "OrmX": 2279, " golX": 2280, "ecrX": 2281, "Xph": 2282, "tXsia": 2283, "Xkle": 2284, "lgX": -2285, "komXn": 2286, "UmlX": 2287, "UnmX": 2288, " hXr": 2289, "OykX": 2290, "ordX": -2291, "UlmX": 2293, " dXg": 2294, "dXrust": 2295, "Xhe": 2296, "Xfe": 2297, "mXrat": -2298, "mXl": 2299, "Xny": 2300, "OzlX": 2301, " sXp": 2302, "anXstu": 2303, "hXsey": 2304, "OrtX": 2305, "Urk X": 2306, "OprX": 2307, "UklX": 2308, "UmrX": 2309, "mXmc": -2310, "Xzg": 2311, "OmX": 2312, "Xka": -2313, "sXri": -2314, " lX": 2315, "UbX": 2316, " Xmi": 2317, "OyX": 2318, "bXs": 2320, "OzcX": 2321, "lbXm": 2322, "orgX": -2323, "UntX": 2324, "Xce": 2325, "u tXr ": 2326, "Xa": -2327, "rolX": 2328, "kXz": -2329, "U nX": 2330, "bXro": 2331, " Xcunc": 2332, "OndX": 2333, " mX ": -2334, "mUslX": 2335, " nXf": 2336, " tXke": 2337, "enX": 2338, " Xr": 2339, "UldX": 2340, "tXrl": 2341, "uCX": -2342, "UmkX": 2343, " kulX": 2344, "UClX": 2345, "UncX": 2346, "Xcr": 2347, "UndX": 2348, "OdX": 2349, "Xsta": -2350, "mXtl": -2351, " Xnl": 2352, "OldX": 2353, "UcX": 2354, "urgX": -2356, "OrdX": 2357, "UStX": 2358, "UltX": 2359, " Xni": 2360, "ylX": 2361, "UstX": 2362, "OtX": 2363, " yXru": 2364, "kXcu": 2365, "UsX": 2366, "kXlt": 2367, "UzX": 2368, "UnlX": 2369, "Xle": 2370, "Xh": -2372, " Xc ": 2373, "UrdX": 2374, "UlX": 2375, " mX": 2376, " Xst": 2377, "UrlX": 2378, " tXm": 2379, "UnkX": 2380, "Xye": 2381, " bXt": 2382, "CX": 2383, "OlX": 2384, " sXr": 2385, "UtX": 2386, "OzX": 2387, "ygX": -2388, "UmX": 2389, "UGX": 2390, "USX": 2392, "hXkum": 2393, "yXz": 2394, "dXs": 2395, " dXn": 2396, "UkX": 2397, "Xlk": 2398, "bXyu": 2399, "UrX": 2400, "Xre": 2402, "OrX": 2403, "yXk": 2404, "Xze": 2405, "tXrk": 2406, "gX": 2407, "X": -2408}, u'i': {"n kXsan": -1, " nin Xn": -2, "tIyor X": -3, " armanX": -4, "Xstirab": 5, "aktXgim": 6, "eci Xsi": 7, "er de X": -8, "ere Xsi": 9, "ne takX": 10, "ratan X": -11, "Uyen Xn": 12, " alanXs": -13, "sI sXr ": -14, "ivan mX": 15, "iran Xn": 16, "na katX": 17, "ka katX": 18, "fsIz lX": 19, "ble X ": 20, "fbI lX": 21, "Cal mX": 22, " otanX": -23, "yevarX": -24, "c k nX": -25, "when X": 26, "ortarX": -27, "an e X": 28, " mXnc": 29, "le rXo": 30, "und Xn": 31, "zaks X": 32, "lain X": 33, "daS lX": 34, " laklX": -35, "endarX": -36, "rsal X": -37, "nat sX": 38, "tovanX": -39, "Iz mXy": 40, "ilallX": -41, "yanC X": 42, "solarX": -43, "scu sX": -44, "gi acX": -45, "glantX": -46, "inlarX": -47, "glarXz": -48, "ikmamX": -49, "asadXg": -50, "k acXs": -51, "l acXk": -52, "c sayX": -53, "6 yasX": -54, "asamX ": -55, "asamXs": -56, "stayXm": -57, "sXnanm": 58, "r cXne": 59, "in deX": 60, "ears X": -61, "nanayX": -62, " farX ": 63, "az Xnk": 64, " aSXya": -65, "k stXn": 66, "re dX ": 67, "ayaldX": -68, " arXs ": -69, "r mXri": 70, "mhk nX": -71, "Ul clX": 72, "ssa kX": 73, "itladX": -74, "islarX": -75, " sXniz": -76, "tXciyd": -77, "Im dXr": 78, "yara X": -79, "ol Xn ": 80, "ru bX ": 81, "al Xni": 82, "bI Xnt": -83, "nike X": 84, "sisasX": -85, "rodalX": -86, "InC Xn": 87, "kuk tX": 88, "rI rXf": 89, "Ip mX ": 90, "sXzinc": 91, "am akX": -92, "fXtild": 93, "ur anX": 94, "ah mX ": 95, "n hXr ": -96, "kal lX": 97, " kXng": 98, "I aczX": 99, "t mXst": 100, "mu Xnt": 101, " day X": -102, " nasX ": -103, "ars sX": -104, "4 takX": 105, "miranX": -106, "mir nX": 107, "an cX ": 108, "fayI X": 109, "ayrXni": 110, " Xnfo": 111, "cu Xl ": 112, " alXmd": -113, "partlX": -114, "non bX": 115, "k pXli": 116, "ISI lX":
''' Implements the Domain Specific Pipeline (DSP) Runs an MCTS modified to implement Eterna player strategies Second process of the SAP ''' import sys import numpy as np from eterna_score import get_pairmap_from_secstruct import RNA from subprocess import Popen, PIPE, STDOUT import re from difflib import SequenceMatcher import copy import math def encode_struc(dots): s = [] for i in dots: if i == '.': s.append(1) elif i == '(': s.append(2) elif i == ')': s.append(3) return s def find_parens(s): toret = {} pstack = [] for i, c in enumerate(s): if c == '(': pstack.append(i) elif c == ')': if len(pstack) == 0: raise IndexError("No matching closing parens at: " + str(i)) toret[pstack.pop()] = i if len(pstack) > 0: raise IndexError("No matching opening parens at: " + str(pstack.pop())) return toret def str_to_num(s): if s == 'A': return 1 elif s == 'U': return 2 elif s == 'G': return 3 elif s == 'C': return 4 def pairmap_from_sequence(seq, vienna_version, vienna_path='../../../EteRNABot/eternabot/./RNAfold'): new_struc = '' if vienna_version == 1: if sys.version_info[:3] > (3,0): p = Popen([vienna_path, '-T','37.0'], stdout=PIPE, stdin=PIPE, stderr=STDOUT, encoding='utf8') else: p = Popen([vienna_path, '-T','37.0'], stdout=PIPE, stdin=PIPE, stderr=STDOUT) pair = p.communicate(input=''.join(seq))[0] formatted = re.split('\s+| \(?\s?',pair) new_struc = formatted[1] elif vienna_version == 2: new_struc = RNA.fold(''.join(seq))[0] return get_pairmap_from_secstruct(new_struc) dot_bracket = '.....((((..((((....)))).)))).....' seq_str = 'A'*len(dot_bracket) def dsp(dot_bracket, seq_str, vienna_version='1', vienna_path='../../../EteRNABot/eternabot/./RNAfold'): # domain specific pipeline ''' Adds player strategies via a MCTS :param dot_bracket: The target structure of the RNA in dot-bracket notation :param seq_str: The current RNA sequence :param vienna_version: Vienna 1.8.5 or Vienna 2 :param vienna_path: Path to the Vienna 1.8.5 RNAfold :return: The updated RNA sequence after the DSP ''' try: vienna_version = int(vienna_version) except TypeError: raise TypeError('Please pass in a valid Vienna version') assert(vienna_version <= 2, "Please pass in a valid Vienna version") assert(vienna_version >= 1, "Please pass in a valid Vienna version") seq = list(seq_str) m = [] SOLVE = False current_struc,_ = RNA.fold(seq_str) target_struc = encode_struc(dot_bracket) target_pm = get_pairmap_from_secstruct(dot_bracket) current_pm = get_pairmap_from_secstruct(current_struc) pairs = find_parens(dot_bracket) #print target_pm #print current_pm """ Correcting incorrect base pairings """ ############ Comment out from here to remove this strategy ############# for base1, base2 in pairs.items(): # corrects incorrect base pairings #print base1,base2 if (seq[base1] == 'A' and seq[base2] == 'U') or (seq[base1] == 'U' and seq[base2] == 'A'): continue elif (seq[base1] == 'G' and seq[base2] == 'U') or (seq[base1] == 'U' and seq[base2] == 'G'): continue elif (seq[base1] == 'G' and seq[base2] == 'C') or (seq[base1] == 'C' and seq[base2] == 'G'): continue elif (seq[base1] == 'G' and seq[base2] == 'A'): seq[base1] = 'U' m.append([2,base1+1]) elif (seq[base1] == 'A' and seq[base2] == 'G'): seq[base1] = 'C' m.append([4,base1+1]) elif (seq[base1] == 'C' and seq[base2] == 'U'): seq[base1] = 'A' m.append([1,base1+1]) elif (seq[base1] == 'U' and seq[base2] == 'C'): seq[base1] = 'G' m.append([3,base1+1]) elif (seq[base1] == 'A' and seq[base2] == 'C'): seq[base1] = 'G' m.append([3,base1+1]) elif (seq[base1] == 'C' and seq[base2] == 'A'): seq[base1] = 'U' m.append([2,base1+1]) elif (seq[base1] == 'A' and seq[base2] == 'A'): seq[base1] = 'U' m.append([2,base1+1]) elif (seq[base1] == 'U' and seq[base2] == 'U'): seq[base1] = 'A' m.append([1,base1+1]) elif (seq[base1] == 'G' and seq[base2] == 'G'): seq[base1] = 'C' m.append([4,base1+1]) elif (seq[base1] == 'C' and seq[base2] == 'C'): seq[base1] = 'G' m.append([3,base1+1]) #print ''.join(seq) for i in range(len(target_pm)): if target_pm[i] == -1: seq[i] = 'A' m.append([1,i+1]) else: continue ###################################################################### """ End pairs to G-C """ ############ Comment out from here to remove this strategy ############# for i in range(len(dot_bracket)): try: if dot_bracket[i] == '(':# or dot_bracket[i] == ')': #print dot_bracket[i] if dot_bracket[i-1] == '.' or dot_bracket[i-1] == ')' or dot_bracket[i+1] == '.' or dot_bracket[i+1] == ')': #print i if (seq[i] == 'G' and seq[target_pm[i]] == 'C') or (seq[i] == 'C' and seq[target_pm[i]] == 'G'): continue else: seq[i] = 'G' seq[target_pm[i]] = 'C' m.append([3,i+1]) m.append([4,target_pm[i]+1]) # elif dot_bracket[i+1] == '.' and dot_bracket[i+2] == '.' and dot_bracket[i+3] == '.' and dot_bracket[i+4] == '.': # seq[i+1] = 'G' elif dot_bracket[i] == ')':# or dot_bracket[i] == ')': #print dot_bracket[i] if dot_bracket[i-1] == '.' or dot_bracket[i-1] == '(' or dot_bracket[i+1] == '.' or dot_bracket[i+1] == '(': #print i if (seq[i] == 'G' and seq[target_pm[i]] == 'C') or (seq[i] == 'C' and seq[target_pm[i]] == 'G'): continue else: seq[i] = 'G' seq[target_pm[i]] = 'C' m.append([3,i+1]) m.append([4,target_pm[i]+1]) except IndexError: continue ###################################################################### """ G External Loop Boost """ ############ Comment out from here to remove this strategy ############# for i in range(len(dot_bracket)): if dot_bracket[i] == '(': if dot_bracket[i+1] == '.' and dot_bracket[i+2] == '.' and dot_bracket[i+3] == '.' and dot_bracket[i+4] == '.': seq[i+1] = 'G' m.append([3,i+2]) # elif (dot_bracket[i+1] == '.' and dot_bracket[i+2] == '('): # seq[i+1] = 'G' ######################################################################## """ G-A Internal Loop Boost """ ############ Comment out ###################################################### for i in range(len(dot_bracket)): #pairing = target_pm[i] if dot_bracket[i] == '(' and dot_bracket[i+1] == '.':# and dot_bracket[target_pm[i]] == ")" and dot_bracket[target_pm[i-1]] == '.': leftdots = [] starter = 0 for j in range(i+1, len(dot_bracket)): if dot_bracket[j] == '(':# or dot_bracket[j] == ')': starter = j break leftdots.append(dot_bracket[j]) rightdots = [] idx = target_pm[i] ender = 0 for k in range(idx-1,-1,-1): if dot_bracket[k] == ')':# or dot_bracket[k] == '(': ender = k break rightdots.append(dot_bracket[k]) if len(leftdots) > 0 and len(rightdots) > 0: if (len(leftdots) != 2 or len(rightdots) != 2) and (math.fabs(len(rightdots) - len(leftdots)) <= 5): if target_pm[starter] == starter or target_pm[starter] == ender: seq[i+1] = 'G' seq[ender+1] = 'G' ############################################################################## # if dot_bracket[i] == ')' and dot_bracket[i+1] == '.' and dot_bracket[target_pm[i]] == "(" and dot_bracket[target_pm[i+1]] == '.': # dots = [] # starter = 0 # for j in range(i+1, len(dot_bracket)): # if dot_bracket[j] == ')': # starter = j # break # dots.append(dot_bracket[j]) # # idx = target_pm[i] # ender = 0 # for k in range(idx-1,-1,-1): # if dot_bracket[k] == '(': # ender = k # break # dots.append(dot_bracket[k]) # # if dots.count(dots[0]) == len(dots) and target_pm[ender] == starter: # seq[i+1] = 'G' # seq[ender+1] = 'G' """ U-G-U-G Superboost """ ############ Comment out from here to remove this strategy ############# if dot_bracket[i] == '(' and dot_bracket[i+1] == '.' and dot_bracket[i+2] == '.' and dot_bracket[i+3] == '(': # UGUG superboost idx = target_pm[i] dots = [] starter = 0 for j in range(i + 1, len(dot_bracket)): if dot_bracket[j] == '(': starter = j break dots.append(dot_bracket[j]) idx = target_pm[i] ender = 0 for k in range(idx - 1, -1, -1): if dot_bracket[k] == ')': ender = k break dots.append(dot_bracket[k]) if dot_bracket[idx] == ')' and dot_bracket[idx-1] == '.' and dot_bracket[idx-2] == '.' and dot_bracket[idx-3] == ')' and target_pm[ender] == starter: seq[i+1] = 'U' seq[i+2] = 'G' seq[idx-2] = 'U' seq[idx-1] = 'G' m.append([2,i+2]) m.append([3,i+3]) m.append([2,idx-1]) m.append([3,idx]) elif dot_bracket[idx] == ')' and dot_bracket[idx-1] == '.' and dot_bracket[idx-2] == ')': seq[i+1] = 'G' seq[idx-1] = 'G' m.append([3,i+2]) m.append([3,idx]) ###################################################################### # if dot_bracket[i] == '(' and dot_bracket[i+1] == '.' and dot_bracket[i+2] == '(': # G-G in 2 pair internal loop # idx = target_pm[i] # if dot_bracket[idx] == ')' and dot_bracket[idx-1] == '.' and dot_bracket[idx-2] == ')': # seq[i+1] = 'G' # seq[idx-1] = 'G' # m.append([3,i+2]) # m.append([3,idx]) # elif dot_bracket[idx] == ')' and dot_bracket[idx-1] == '.' and dot_bracket[idx-2] == '.' and dot_bracket[idx-3] == ')': # seq[i+1] = 'G' # seq[idx-1] = 'G' # m.append([3,i+2]) # m.append([3,idx]) # # if dot_bracket[i] == '(' and dot_bracket[i+1] == '.' and dot_bracket[i+2] == '.' and dot_bracket[i+3] == '.' and dot_bracket[i+4] == '(': # G-G in 2 pair internal loop # idx = target_pm[i] # if dot_bracket[idx] == ')' and dot_bracket[idx-1] == '.' and dot_bracket[idx-2] == '.' and dot_bracket[idx-3] == '.' and dot_bracket[idx-4] == ')': # seq[i+1] = 'G' # seq[idx-3] = 'G' # m.append([3,i+2]) # m.append([3,idx]) # elif dot_bracket[idx] == ')' and dot_bracket[idx-1] == '.' and dot_bracket[idx-2] == '.' and dot_bracket[idx-3] == ')': # seq[i+1] = 'G' # seq[idx-1] = 'G' # m.append([3,i+2]) # m.append([3,idx]) ''' Flips base pairs ''' ############ Comment out from here to remove this strategy ############# new_pm = pairmap_from_sequence(seq, vienna_version) match = SequenceMatcher(None,new_pm,target_pm).ratio()
returns the item information """ params = { "f" : "json", "token" : self._token } uURL = self._url + "/iteminfo" return self._do_get(url=uURL, param_dict=params) #---------------------------------------------------------------------- def addPermission(self, principal, isAllowed=True): """ Assigns a new permission to a role (principal). The permission on a parent resource is automatically inherited by all child resources. Inputs: principal - role to be assigned isAllowed - access of resource by boolean Output: JSON message as dictionary """ uURL = self._url + "/permissions/add" params = { "f" : "json", "token" : self._token, "principal" : principal, "isAllowed" : isAllowed } return self._do_post(url=uURL, param_dict=params) ######################################################################## class Machines(BaseAGSServer): """ his resource represents a collection of all the server machines that have been registered with the site. It other words, it represents the total computing power of your site. A site will continue to run as long as there is one server machine online. For a server machine to start hosting GIS services, it must be grouped (or clustered). When you create a new site, a cluster called 'default' is created for you. The list of server machines in your site can be dynamic. You can register additional server machines when you need to increase the computing power of your site or unregister them if you no longer need them. """ _machines = None _proxy_port = None _proxy_url = None #---------------------------------------------------------------------- def __init__(self, url, token_url, username, password, initialize=False, proxy_url=None, proxy_port=None): """Constructor Inputs: url - admin url token_url - url to generate token username - admin username password - <PASSWORD> """ self._proxy_port = proxy_port self._proxy_url = proxy_url self._url = url self._token_url = token_url self._username = username self._password = password self.generate_token() if initialize: self.__init() #---------------------------------------------------------------------- def __init(self): """ populates server admin information """ params = { "f" : "json", "token" : self._token } json_dict = self._do_get(url=self._url, param_dict=params) attributes = [attr for attr in dir(self) if not attr.startswith('__') and \ not attr.startswith('_')] for k,v in json_dict.iteritems(): if k == "machines": self._machines = [] for m in v: self._machines.append( Machine(url=self._url +"/%s" % m['machineName'], token_url=self._token_url, username=self._username, password=self._password, proxy_url=self._proxy_url, proxy_port=self._proxy_port) ) elif k in attributes: setattr(self, "_"+ k, json_dict[k]) else: print k, " - attribute not implmented for Machines" del k del v #---------------------------------------------------------------------- @property def machines(self): """ returns the list of machines in the cluster """ if self._machines is None: self.__init() return self._machines #---------------------------------------------------------------------- def registerMachine(self, machineName, adminURL): """ For a server machine to participate in a site, it needs to be registered with the site. The server machine must have ArcGIS Server software installed and authorized. Registering machines this way is a "pull" approach to growing the site and is a convenient way when a large number of machines need to be added to a site. In contrast, a server machine can choose to join a site. Inputs: machineName - name of the server machine adminURL - URL wher ethe Administrator API is running on the server machine. Example: http://<machineName>:6080/arcgis/admin Output: JSON message as dictionary """ params = { "f" : "json", "token" : self._token, "machineName" : machineName, "adminURL" : adminURL } uURL = "%s/register" % self._url return self._do_post(url=uURL, param_dict=params) #---------------------------------------------------------------------- def renameMachine(self, machineName, newMachineName): """ You must use this operation if one of the registered machines has undergone a name change. This operation updates any references to the former machine configuration. By default, when the server is restarted, it is capable of identifying a name change and repairing itself and all its references. This operation is a manual call to handle the machine name change. Input: machineName - The former name of the server machine that is registered with the site. newMachineName - The new name of the server machine. Output: JSON messages as dictionary """ params = { "f" : "json", "token" : self._token, "machineName" : machineName, "newMachineName" : newMachineName } uURL = self._url + "/rename" return self._do_post(url=uURL, param_dict=params) ######################################################################## class Machine(BaseAGSServer): """ A server machine represents a machine on which ArcGIS Server software has been installed and licensed. A site is made up one or more of such machines that work together to host GIS services and data and provide administrative capabilities for the site. Each server machine is capable of performing all these tasks and hence a site can be thought of as a distributed peer-to-peer network of such machines. A server machine communicates with its peers over a range of TCP and UDP ports that can be configured using the edit operation. For a server machine to host GIS services, it needs to be added to a cluster. Starting and stopping the server machine enables and disables, respectively, its ability to host GIS services. The administrative capabilities of the server machine are available through the ArcGIS Server Administrator API that can be accessed over HTTP(S). For a server machine to participate in a site, it must be registered with the site. A machine can participate in only one site at a time. To remove a machine permanently from the site, you can use the unregister operation. """ _appServerMaxHeapSize = None _webServerSSLEnabled = None _webServerMaxHeapSize = None _platform = None _adminURL = None _machineName = None _ServerStartTime = None _webServerCertificateAlias = None _socMaxHeapSize = None _synchronize = None _configuredState = None _ports = None _proxy_port = None _proxy_url = None #---------------------------------------------------------------------- def __init__(self, url, token_url, username, password, initialize=False, proxy_url=None, proxy_port=None): """Constructor Inputs: url - admin url token_url - url to generate token username - admin username password - <PASSWORD> """ self._proxy_url = proxy_url self._proxy_port = proxy_port self._url = url self._currentURL = url self._token_url = token_url self._username = username self._password = password self.generate_token() if initialize: self.__init() #---------------------------------------------------------------------- def __init(self): """ populates server admin information """ params = { "f" : "json", "token" : self._token } json_dict = self._do_get(url=self._currentURL, param_dict=params) attributes = [attr for attr in dir(self) if not attr.startswith('__') and \ not attr.startswith('_')] for k,v in json_dict.iteritems(): if k in attributes: setattr(self, "_"+ k, json_dict[k]) else: print k, " - attribute not implmented for Machine" del k del v #---------------------------------------------------------------------- @property def appServerMaxHeapSize(self): """ returns the app server max heap size """ if self._appServerMaxHeapSize is None: self.__init() return self._appServerMaxHeapSize #---------------------------------------------------------------------- @property def webServerSSLEnabled(self): """ SSL enabled """ if self._webServerSSLEnabled is None: self.__init() return self._webServerSSLEnabled #---------------------------------------------------------------------- @property def webServerMaxHeapSize(self): """ returns the web server max heap size """ if self._webServerMaxHeapSize is None: self.__init() return self._webServerMaxHeapSize #---------------------------------------------------------------------- @property def platform(self): """ returns the platform information """ if self._platform is None: self.__init() return self._platform #---------------------------------------------------------------------- @property def adminURL(self): """ returns the administration URL """ if self._adminURL is None: self.__init() return self._adminURL #---------------------------------------------------------------------- @property def machineName(self): """ returns the machine name """ if self._machineName is None: self.__init() return self._machineName #---------------------------------------------------------------------- @property def ServerStartTime(self): """ returns the server start date/time """ if self._ServerStartTime is None: self.__init() return self._ServerStartTime #---------------------------------------------------------------------- @property def webServerCertificateAlias(self): """ returns the webserver cert alias""" if self._webServerCertificateAlias is None: self.__init() return self._webServerCertificateAlias #---------------------------------------------------------------------- @property def socMaxHeapSize(self): """ returns the soc's max heap size """ if self._socMaxHeapSize is None: self.__init() return self._socMaxHeapSize #---------------------------------------------------------------------- @property def synchronize(self): """synchronize value""" if self._synchronize is None: self.__init() return self._synchronize #---------------------------------------------------------------------- @property def ports(self): """ returns the used ports """ if self._ports is None: self.__init() return self._ports #---------------------------------------------------------------------- @property def configuredState(self): """ returns the configured state """ if self._configuredState is None: self.__init() return self._configuredState #---------------------------------------------------------------------- @property def status(self): """ returns the state """ uURL = self._url + "/status" params = { "f" : "json", "token" : self._token } return self._do_get(url=uURL, param_dict=params) #---------------------------------------------------------------------- def startMachine(self): """ Starts the server machine """ params = { "f" : "json", "token" : self._token } uURL = self._url + "/start" return self._do_post(url=uURL, param_dict=params) #---------------------------------------------------------------------- def stopMachine(self): """ Stops the server machine """ params = { "f" : "json", "token" : self._token } uURL = self._url + "/stop" return self._do_post(url=uURL, param_dict=params) #---------------------------------------------------------------------- def unregisterMachine(self): """ This operation causes the server machine to be deleted from the Site. The server machine will
op1, op2) = specSub OPTIONS.debug(2, ".. instr substitution to mne: ", mne, " op1: ", op1, " op2: ", op2) # make up an AssyRec as well assyrec = AssembleRecord(orgpos=self.orgpos, line=line, lineno=lineno, \ instrParts=(label, mne, op1, op2, rest)) # makes only sense WITH an instruction if len(instr) > 0: proceed = True # (1) special instruction? if not self.investigateForSpecialDirective(assyrec, label, mne, op1, op2, rest, stage=1): # error messages already done proceed = False # if no directive, then try normal instruction sequence if assyrec.directive is None: # (2) try find an appropriate opcode definition if not self.findBestOpCodeDefinition(assyrec, mne, op1, op2): # error messages already done proceed = False # (3) try read out numerical values for immediate expressions, displacements and # more und turn them to bit patterns if proceed and not self.substituteExpressionsWithBitPatterns(assyrec): # error messages already done proceed = False # (4) try join and translate bit patterns to a byte stream if proceed and not assyrec.translateBitPatternsIntoBytes(): # error messages already done proceed = False # (5) produce a listing if OPTIONS.listStage1: assyrec.listing() # (9) not to forget about increasing orgpos properly self.orgpos += assyrec.bytesize() # do not forget about the assyrec self.assyrecs.append(assyrec) OPTIONS.debug(1, "Stage 1 completed.") def assembleStage2(self): """ Iterate the existing `self.assyrecs` and revisit the instructions, which feature invalid bytes. Check, that overall byte size remain constants. Check, if still invalid bytes exist. """ # start OPTIONS.debug(1, "Starting assemble stage 2 ..") noErr = 0 noInvBytes = 0 self.orgpos = 0 # numerically for ari in range(len(self.assyrecs)): # get access assyrec = self.assyrecs[ari] bl = len(assyrec.bytes) ib = len(assyrec.invalidBytePos) OPTIONS.debug(2, "[%d] bl=%d, ib%d: %s" % (assyrec.lineno, bl, ib, assyrec.line)) OPTIONS.setLineNo(assyrec.lineno) OPTIONS.indentTo(0) # the orgpos is taken from stage 1 self.orgpos = assyrec.orgpos # MARK? if OPTIONS.markAtLineNo is not None: if OPTIONS.markAtLineNo == assyrec.lineno: print("*MARK@STAGE2*") # any real action required proceed = True if ib >= 0: # (1') special instruction? if not self.investigateForSpecialDirective(assyrec, "", "", "", "", "", stage=2): # error messages already done proceed = False # if no directive, then try normal instruction sequence if assyrec.directive is None: # (3') try read out numerical values for immediate expressions, displacements and # more und turn them to bit patterns if not self.substituteExpressionsWithBitPatterns(assyrec, stage=2): # error messages already done proceed = False # (4') try join and translate bit patterns to a byte stream if proceed and not assyrec.translateBitPatternsIntoBytes(): # error messages already done proceed = False # (5') produce a listing if OPTIONS.listStage2: assyrec.listing(indicator=('*' if ib > 0 else ' ')) # stat noInvBytes += len(assyrec.invalidBytePos) noErr += len(assyrec.invalidBytePos) + (1 if not proceed else 0) # some severe error? if len(assyrec.bytes) != bl: OPTIONS.error(112, "Number of bytes per instruction changed for stage 2! Ignore!") continue # write back (not sure, if this make sense) self.assyrecs[ari] = assyrec OPTIONS.debug(1, "Stage 2 completed. %d errors, %d invalid bytes remaining!" % (noErr, noInvBytes)) def evalBoundariesOfAssyRecs(self): """ Evaluates the minmal, maximal address in `self.assyrecs`. Returns `(minadr, maxadr)` """ # evaluate minimal org start minadr = sys.maxsize maxadr = -1 for assyrec in self.assyrecs: if len(assyrec.bytes) > 0: # valid info in this assyrec if assyrec.orgpos < minadr: minadr = assyrec.orgpos if assyrec.orgpos + len(assyrec.bytes) - 1 > maxadr: maxadr = assyrec.orgpos + len(assyrec.bytes) - 1 if minadr == sys.maxsize or maxadr < 0: OPTIONS.error(116, "Error evaluating minimal/ maximal org pos. Assuming $0.") minadr = 0 maxadr = 0 return (minadr, maxadr) def compareWithBin(self, fn): """ Compare bytes in the `self.assyrecs` with the contents of an external binary file. """ # start OPTIONS.debug(1, "Comparing assembled bytes with: ", fn) try: fh = open(fn, 'rb') ba = bytearray(fh.read()) except Exception as e: OPTIONS.error(113, "Error accessing %s gave %s" % (fn, str(e))) # outline the binary (minadr, maxadr) = self.evalBoundariesOfAssyRecs() OPTIONS.debug(1, "Found minimal org pos to be $%x" % minadr) # listing with compare stats = [0, 0, 0] # bytes, invalids, diffs for assyrec in self.assyrecs: # MARK? if OPTIONS.markAtLineNo is not None: if OPTIONS.markAtLineNo == assyrec.lineno: print("*MARK*") # list assyrec.listing(compareWithBytes=ba, shiftCompare=minadr) # compare cp = assyrec.compareWithBytes(compareWithBytes=ba, shiftCompare=minadr) for i in (0, 1, 2): stats[i] += cp[i] # done OPTIONS.debug(1, "Compare completed. %d bytes compared, %d invalid bytes, %d differences at all." % tuple(stats)) def outputToBin(self, fn): """ Write binary out """ OPTIONS.debug(1, "Writing binary bytes to: ", fn) # outline the binary (minadr, maxadr) = self.evalBoundariesOfAssyRecs() OPTIONS.debug(1, "Found minimal org pos to be $%x = %d, last byte to be $%x = %d" % (minadr, minadr, maxadr, maxadr)) # make a blob accordingly blob = bytearray() blob.extend(b' ' * (1 + maxadr - minadr)) # zero it! for i in range(len(blob)): blob[i] = 0x00 # simply overwrite bytes for assyrec in self.assyrecs: # MARK? if OPTIONS.markAtLineNo is not None: if OPTIONS.markAtLineNo == assyrec.lineno: print("*MARK*") # copy in for i in range(len(assyrec.bytes)): blob[assyrec.orgpos - minadr + i] = assyrec.bytes[i] # IO try: fh = open(fn, 'wb') fh.write(blob) fh.close() except Exception as e: OPTIONS.error(113, "Error accessing %s gave %s" % (fn, str(e))) # done OPTIONS.debug(1, "Writing completed. %d bytes written" % len(blob)) # # RAINBOW Tables # class RainbowEntry: """ Entry of an rainbow table """ def __init__(self, ocd): self.dummy = 1 self.ocd = ocd # opcode def self.symsub = [] # list of (sym, substitution bit pattern) self.bitpattern class RainbowTable: """ Rainbow table. Fast, efficient mapping of byte codes to opcode definitions. `rank` means byte 0,1,2 in opcode. """ def __init__(self, rank=0, rootbytes=[]): self.rank = rank # either 0,1,2 self.rootbytes = rootbytes # the bytes in machine code, which are leading to this table self.title = "" self.entries = {} def add(self, key: int, assyrec: AssembleRecord): """ Puts an entry into the table. The entry is indexed by a int-key, and shall be an `AssembleRecord`. """ self.entries[key] = assyrec def addTable(self, key: int, rb): """ Puts an entry into the table. The entry is indexed by a int-key, and shall be an `RainbowTable`. """ self.entries[key] = rb def get(self, key: int): """ clear """ if not (key in self.entries): return None else: return self.entries[key] class SetOfRainbows: """ Container for rainbow based instruction decoding. """ def __init__(self, opcodes: OpCodeDefList, syms: SymDict): self.opcodes = opcodes self.syms = syms self.rainbows = {} def getOrCreateRainbow(self, rootbytes): """ The set of rainbows maintains one or more rainbow tables. They differ in the way, at which position in the bytecode matching process they are relevant. These indicative bytes are called here 'rootbytes'. There shall be exactly one table with no rootbytes, this is the table where the decoding process starts, and exactly one table for each valid sequence of rootbytes. This function always points to an valid `RainbowTable` for certain `rootbytes`. If necessary, this table is created. """ # silently fix errors if rootbytes is None: rootbytes = [] rootbytes = tuple(rootbytes) # immutable # access? if rootbytes in self.rainbows: return self.rainbows[rootbytes] # no, create rb = RainbowTable(len(rootbytes), rootbytes) self.rainbows[rootbytes] = rb return rb def get(self, rootbytes=None, title=None): """ Look for `rootbytes` or for `title`. Return `None`, else. """ # title? if title is not None: for k in self.rainbows.keys(): if self.rainbows[k].title.strip().lower() == title.strip().lower(): return self.rainbows[k] return None # no! # silently fix errors if rootbytes is None: rootbytes = [] rootbytes = tuple(rootbytes) # immutable # access? if rootbytes in self.rainbows: return self.rainbows[rootbytes] # no return None def prepareSingleOpSubstTuples(self, ops, ams): """ If no substitution possible, at least `("","","")` will be in! """ # any symbolic substitution? if " R M RI B MPZA ".find(" "+ams+" ") < 0: return [("","","")] # id # find curly brackets match = re.search(r'({\w+})', ops) if match is None: return [("","","")] # id l = [] # get the key from the match key = match.group(1) rawkey = key[1:-1] (keyStart, keyEnd) = match.span(1) # access corresponding symbol defs foundSyms = self.syms.get(rawkey) if
self.type = gdb.lookup_type(f"arrow::{name}") self.val = cast_to_concrete(val, self.type) @property def fields(self): return FieldVector(self.val['children_']) def _format_type(self): r = type_reprs.get(self.name, self.name) return f"arrow::{r}" def _for_evaluation(self): return for_evaluation(self.val, self.type) class PrimitiveTypePrinter(TypePrinter): """ Pretty-printer for non-parametric types. """ def to_string(self): return f"{self._format_type()}()" class TimeTypePrinter(TypePrinter): """ Pretty-printer for time and duration types. """ def _get_unit(self): return self.val['unit_'] def to_string(self): return f"{self._format_type()}({self._get_unit()})" class TimestampTypePrinter(TimeTypePrinter): """ Pretty-printer for timestamp types. """ def to_string(self): tz = StdString(self.val['timezone_']) if tz: return f'{self._format_type()}({self._get_unit()}, {tz})' else: return f'{self._format_type()}({self._get_unit()})' class FixedSizeBinaryTypePrinter(TypePrinter): """ Pretty-printer for fixed-size binary types. """ def to_string(self): width = int(self.val['byte_width_']) return f"{self._format_type()}({width})" class DecimalTypePrinter(TypePrinter): """ Pretty-printer for decimal types. """ def to_string(self): precision = int(self.val['precision_']) scale = int(self.val['scale_']) return f"{self._format_type()}({precision}, {scale})" class ListTypePrinter(TypePrinter): """ Pretty-printer for list types. """ def _get_value_type(self): fields = self.fields if len(fields) != 1: return None return fields[0].type def to_string(self): child = self._get_value_type() if child is None: return f"{self._format_type()}<uninitialized or corrupt>" else: return f"{self._format_type()}({child})" class FixedSizeListTypePrinter(ListTypePrinter): """ Pretty-printer for fixed-size list type. """ def to_string(self): child = self._get_value_type() if child is None: return f"{self._format_type()}<uninitialized or corrupt>" list_size = int(self.val['list_size_']) return f"{self._format_type()}({child}, {list_size})" class MapTypePrinter(ListTypePrinter): """ Pretty-printer for map types. """ def to_string(self): struct_type = self._get_value_type() if struct_type is None: return f"{self._format_type()}<uninitialized or corrupt>" struct_children = FieldVector(struct_type['children_']) if len(struct_children) != 2: return f"{self._format_type()}<uninitialized or corrupt>" key_type = struct_children[0].type item_type = struct_children[1].type return (f"{self._format_type()}({key_type}, {item_type}, " f"keys_sorted={self.val['keys_sorted_']})") class DictionaryTypePrinter(TypePrinter): """ Pretty-printer for dictionary types. """ def to_string(self): index_type = deref(self.val['index_type_']) value_type = deref(self.val['value_type_']) ordered = self.val['ordered_'] return (f"{self._format_type()}({index_type}, {value_type}, " f"ordered={ordered})") class StructTypePrinter(TypePrinter): """ Pretty-printer for struct types. """ def to_string(self): return f"{self._format_type()}({self.fields})" class UnionTypePrinter(TypePrinter): """ Pretty-printer for union types. """ def to_string(self): type_codes = StdVector(self.val['type_codes_']) type_codes = "{" + ", ".join(str(x.cast(gdb.lookup_type('int'))) for x in type_codes) + "}" return f"{self._format_type()}(fields={self.fields}, type_codes={type_codes})" class ExtensionTypePrinter(TypePrinter): """ Pretty-printer for extension types. """ def to_string(self): ext_type = ExtensionType(self.val) return (f"{self._format_type()} {ext_type.to_string().string_literal()} " f"with storage type {ext_type.storage_type}") class ScalarPrinter: """ Pretty-printer for arrow::Scalar and subclasses. """ def __new__(cls, val): # Lookup actual (derived) class to instantiate type_id = int(deref(val['type'])['id_']) type_class = lookup_type_class(type_id) if type_class is not None: cls = type_class.scalar_printer assert issubclass(cls, ScalarPrinter) self = object.__new__(cls) self.type_class = type_class self.type_name = type_class.name self.name = scalar_class_from_type(self.type_name) self.type_id = type_id # Cast to concrete Scalar class to access derived attributes. concrete_type = gdb.lookup_type(f"arrow::{self.name}") self.val = cast_to_concrete(val, concrete_type) self.is_valid = bool(self.val['is_valid']) return self @property def type(self): """ The concrete DataTypeClass instance. """ concrete_type = gdb.lookup_type(f"arrow::{self.type_name}") return cast_to_concrete(deref(self.val['type']), concrete_type) def _format_type(self): return f"arrow::{self.name}" def _format_null(self): if self.type_class.is_parametric: return f"{self._format_type()} of type {self.type}, null value" else: return f"{self._format_type()} of null value" def _for_evaluation(self): return for_evaluation(self.val) class NullScalarPrinter(ScalarPrinter): """ Pretty-printer for arrow::NullScalar. """ def to_string(self): return self._format_type() class NumericScalarPrinter(ScalarPrinter): """ Pretty-printer for numeric Arrow scalars. """ def to_string(self): if not self.is_valid: return self._format_null() value = self.val['value'] if self.type_name == "HalfFloatType": return (f"{self._format_type()} " f"of value {half_float_value(value)} [{value}]") if self.type_name in ("UInt8Type", "Int8Type"): value = value.cast(gdb.lookup_type('int')) return f"{self._format_type()} of value {value}" class TimeScalarPrinter(ScalarPrinter): """ Pretty-printer for Arrow time-like scalars. """ def to_string(self): unit = short_time_unit(self.type['unit_']) if not self.is_valid: return f"{self._format_type()} of null value [{unit}]" value = self.val['value'] return f"{self._format_type()} of value {value}{unit}" class Date32ScalarPrinter(TimeScalarPrinter): """ Pretty-printer for arrow::Date32Scalar. """ def to_string(self): if not self.is_valid: return self._format_null() value = self.val['value'] return f"{self._format_type()} of value {format_date32(value)}" class Date64ScalarPrinter(TimeScalarPrinter): """ Pretty-printer for arrow::Date64Scalar. """ def to_string(self): if not self.is_valid: return self._format_null() value = self.val['value'] return f"{self._format_type()} of value {format_date64(value)}" class TimestampScalarPrinter(ScalarPrinter): """ Pretty-printer for arrow::TimestampScalar. """ def to_string(self): unit = short_time_unit(self.type['unit_']) tz = StdString(self.type['timezone_']) tz = tz.string_literal() if tz.size != 0 else "no timezone" if not self.is_valid: return f"{self._format_type()} of null value [{unit}, {tz}]" value = self.val['value'] return f"{self._format_type()} of value {value}{unit} [{tz}]" class MonthIntervalScalarPrinter(ScalarPrinter): """ Pretty-printer for arrow::MonthIntervalScalarPrinter. """ def to_string(self): if not self.is_valid: return self._format_null() value = self.val['value'] return f"{self._format_type()} of value {format_month_interval(value)}" class DecimalScalarPrinter(ScalarPrinter): """ Pretty-printer for arrow::DecimalScalar and subclasses. """ @property def decimal_class(self): return decimal_type_to_class[self.type_name] def to_string(self): ty = self.type precision = int(ty['precision_']) scale = int(ty['scale_']) suffix = f"[precision={precision}, scale={scale}]" if not self.is_valid: return f"{self._format_type()} of null value {suffix}" value = self.decimal_class.from_value(self.val['value'] ).format(precision, scale) return f"{self._format_type()} of value {value} {suffix}" class BaseBinaryScalarPrinter(ScalarPrinter): """ Pretty-printer for arrow::BaseBinaryScalar and subclasses. """ def _format_buf(self, bufptr): if 'String' in self.type_name: return utf8_literal(bufptr.data, bufptr.size) else: return bufptr.bytes_literal() def to_string(self): if not self.is_valid: return self._format_null() bufptr = BufferPtr(SharedPtr(self.val['value']).get()) size = bufptr.size if size is None: return f"{self._format_type()} of value <unallocated>" return (f"{self._format_type()} of size {size}, " f"value {self._format_buf(bufptr)}") class FixedSizeBinaryScalarPrinter(BaseBinaryScalarPrinter): """ Pretty-printer for arrow::FixedSizeBinaryScalar. """ def to_string(self): size = self.type['byte_width_'] if not self.is_valid: return f"{self._format_type()} of size {size}, null value" bufptr = BufferPtr(SharedPtr(self.val['value']).get()) if bufptr.data is None: return f"{self._format_type()} of size {size}, <unallocated>" return (f"{self._format_type()} of size {size}, " f"value {self._format_buf(bufptr)}") class DictionaryScalarPrinter(ScalarPrinter): """ Pretty-printer for arrow::DictionaryScalar. """ def to_string(self): if not self.is_valid: return self._format_null() index = deref(self.val['value']['index']) dictionary = deref(self.val['value']['dictionary']) return (f"{self._format_type()} of index {index}, " f"dictionary {dictionary}") class BaseListScalarPrinter(ScalarPrinter): """ Pretty-printer for arrow::BaseListScalar and subclasses. """ def to_string(self): if not self.is_valid: return self._format_null() value = deref(self.val['value']) return f"{self._format_type()} of value {value}" class StructScalarPrinter(ScalarPrinter): """ Pretty-printer for arrow::StructScalar. """ def display_hint(self): return 'map' def children(self): eval_fields = StdVector(self.type['children_']) eval_values = StdVector(self.val['value']) for field, value in zip(eval_fields, eval_values): name = StdString(deref(field)['name_']).string_literal() yield ("name", name) yield ("value", deref(value)) def to_string(self): if not self.is_valid: return self._format_null() return f"{self._format_type()}" class UnionScalarPrinter(ScalarPrinter): """ Pretty-printer for arrow::UnionScalar and subclasses. """ def to_string(self): type_code = self.val['type_code'].cast(gdb.lookup_type('int')) if not self.is_valid: return (f"{self._format_type()} of type {self.type}, " f"type code {type_code}, null value") value = deref(self.val['value']) return (f"{self._format_type()} of type code {type_code}, " f"value {value}") class MapScalarPrinter(ScalarPrinter): """ Pretty-printer for arrow::MapScalar. """ def to_string(self): if not self.is_valid: return self._format_null() array = deref(self.val['value']) data = deref(array['data_']) data_printer = ArrayDataPrinter("arrow::ArrayData", data) return (f"{self._format_type()} of type {self.type}, " f"value {data_printer._format_contents()}") class ExtensionScalarPrinter(ScalarPrinter): """ Pretty-printer for arrow::ExtensionScalar. """ def to_string(self): ext_type = ExtensionType(self.type) if not self.is_valid: return (f"{self._format_type()} of type " f"{ext_type.to_string().string_literal()}, null value") value = deref(self.val['value']) return (f"{self._format_type()} of type " f"{ext_type.to_string().string_literal()}, value {value}") class ArrayDataPrinter: """ Pretty-printer for arrow::ArrayData. """ def __new__(cls, name, val): # Lookup actual (derived) class to instantiate type_id = int(deref(val['type'])['id_']) type_class = lookup_type_class(type_id) if type_class is not None: cls = type_class.array_data_printer assert issubclass(cls, ArrayDataPrinter) self = object.__new__(cls) self.name = name self.val = val self.type_class = type_class self.type_name = type_class.name self.type_id = type_id self.offset = int(self.val['offset']) self.length = int(self.val['length']) return self @property def type(self): """ The concrete DataTypeClass instance. """ concrete_type = gdb.lookup_type(f"arrow::{self.type_name}") return cast_to_concrete(deref(self.val['type']), concrete_type) def _format_contents(self): return (f"length {self.length}, " f"offset {self.offset}, " f"{format_null_count(self.val['null_count'])}") def _buffer(self, index, type_id=None): buffers = StdVector(self.val['buffers']) bufptr = SharedPtr(buffers[index]).get() if int(bufptr) == 0: return None if type_id is not None: return TypedBuffer.from_type_id(bufptr.dereference(), type_id) else: return Buffer(bufptr.dereference()) def _buffer_values(self, index, type_id, length=None): """ Return a typed view of values in the buffer with the given index. Values are returned as tuples since some types may decode to multiple values (for example day_time_interval). """ buf = self._buffer(index, type_id) if buf is None: return None if length is None: length = self.length return buf.view(self.offset, length) def _unpacked_buffer_values(self, index, type_id, length=None): """ Like _buffer_values(), but assumes values are 1-tuples and returns them unpacked. """ return StarMappedView(identity, self._buffer_values(index, type_id, length)) def _null_bitmap(self): buf = self._buffer(0) if has_null_bitmap(self.type_id) else None return NullBitmap.from_buffer(buf, self.offset, self.length) def _null_child(self, i): return str(i), "null" def _valid_child(self, i, value): return str(i), value def display_hint(self): return None def children(self): return () def to_string(self): ty = self.type return (f"{self.name} of type {ty}, " f"{self._format_contents()}") class NumericArrayDataPrinter(ArrayDataPrinter): """ ArrayDataPrinter specialization for numeric data types. """ _format_value = staticmethod(identity) def _values_view(self): return StarMappedView(self._format_value, self._buffer_values(1, self.type_id)) def display_hint(self): return "array" def children(self): if self.length == 0: return values = self._values_view() null_bits = self._null_bitmap() for i, (valid, value) in enumerate(zip(null_bits, values)): if valid: yield self._valid_child(i, str(value)) else: yield self._null_child(i) class BooleanArrayDataPrinter(NumericArrayDataPrinter): """ ArrayDataPrinter specialization for boolean. """ def _format_value(self, v): return str(v).lower() def _values_view(self): return MappedView(self._format_value, self._buffer_values(1, self.type_id)) class Date32ArrayDataPrinter(NumericArrayDataPrinter): """ ArrayDataPrinter specialization for date32. """ _format_value = staticmethod(format_date32) class Date64ArrayDataPrinter(NumericArrayDataPrinter): """ ArrayDataPrinter specialization for date64. """ _format_value = staticmethod(format_date64) class TimeArrayDataPrinter(NumericArrayDataPrinter): """ ArrayDataPrinter specialization for time32 and time64. """ def __init__(self, name, val): self.unit = self.type['unit_'] self.unit_string = short_time_unit(self.unit) def _format_value(self, val): return f"{val}{self.unit_string}" class TimestampArrayDataPrinter(NumericArrayDataPrinter): """ ArrayDataPrinter
{} query_params = [] if 'states' in params: query_params.append(('states', params['states'])) # noqa: E501 collection_formats['states'] = 'multi' # noqa: E501 header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['bearerAuth'] # noqa: E501 return self.api_client.call_api( '/slices', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='Success', # noqa: E501 auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def slices_modify_slice_id_put(self, body, slice_id, **kwargs): # noqa: E501 """Modify slice # noqa: E501 Request to modify slice as described in the request. Request would be a Graph ML describing the requested resources for slice or a dictionary for sliver. On success, for one or more slivers are modified. This API returns list and description of the resources reserved for the slice in the form of Graph ML. Orchestrator would also trigger provisioning of the new resources on the appropriate sites either now or in the future based as requested. Modify operations may include add/delete/modify a container/VM/Baremetal server/network or other resources to the slice. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.slices_modify_slice_id_put(body, slice_id, async_req=True) >>> result = thread.get() :param async_req bool :param str body: (required) :param str slice_id: Slice identifier as UUID (required) :return: Success If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.slices_modify_slice_id_put_with_http_info(body, slice_id, **kwargs) # noqa: E501 else: (data) = self.slices_modify_slice_id_put_with_http_info(body, slice_id, **kwargs) # noqa: E501 return data def slices_modify_slice_id_put_with_http_info(self, body, slice_id, **kwargs): # noqa: E501 """Modify slice # noqa: E501 Request to modify slice as described in the request. Request would be a Graph ML describing the requested resources for slice or a dictionary for sliver. On success, for one or more slivers are modified. This API returns list and description of the resources reserved for the slice in the form of Graph ML. Orchestrator would also trigger provisioning of the new resources on the appropriate sites either now or in the future based as requested. Modify operations may include add/delete/modify a container/VM/Baremetal server/network or other resources to the slice. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.slices_modify_slice_id_put_with_http_info(body, slice_id, async_req=True) >>> result = thread.get() :param async_req bool :param str body: (required) :param str slice_id: Slice identifier as UUID (required) :return: Success If the method is called asynchronously, returns the request thread. """ all_params = ['body', 'slice_id'] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method slices_modify_slice_id_put" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'body' is set if ('body' not in params or params['body'] is None): raise ValueError("Missing the required parameter `body` when calling `slices_modify_slice_id_put`") # noqa: E501 # verify the required parameter 'slice_id' is set if ('slice_id' not in params or params['slice_id'] is None): raise ValueError("Missing the required parameter `slice_id` when calling `slices_modify_slice_id_put`") # noqa: E501 collection_formats = {} path_params = {} if 'slice_id' in params: path_params['sliceID'] = params['slice_id'] # noqa: E501 query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in params: body_params = params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['text/plain']) # noqa: E501 # Authentication setting auth_settings = ['bearerAuth'] # noqa: E501 return self.api_client.call_api( '/slices/modify/{sliceID}', 'PUT', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='Success', # noqa: E501 auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def slices_redeem_slice_id_post(self, slice_id, **kwargs): # noqa: E501 """Redeem resources reserved via Create API # noqa: E501 Request that the reserved resources be made provisioned, instantiating or otherwise realizing the resources, such that they have a valid operational status and may possibly be made ready for experimenter use. This operation is synchronous, but may start a longer process, such as creating and imaging a virtual machine. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.slices_redeem_slice_id_post(slice_id, async_req=True) >>> result = thread.get() :param async_req bool :param str slice_id: Slice identifier as UUID (required) :return: Success If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.slices_redeem_slice_id_post_with_http_info(slice_id, **kwargs) # noqa: E501 else: (data) = self.slices_redeem_slice_id_post_with_http_info(slice_id, **kwargs) # noqa: E501 return data def slices_redeem_slice_id_post_with_http_info(self, slice_id, **kwargs): # noqa: E501 """Redeem resources reserved via Create API # noqa: E501 Request that the reserved resources be made provisioned, instantiating or otherwise realizing the resources, such that they have a valid operational status and may possibly be made ready for experimenter use. This operation is synchronous, but may start a longer process, such as creating and imaging a virtual machine. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.slices_redeem_slice_id_post_with_http_info(slice_id, async_req=True) >>> result = thread.get() :param async_req bool :param str slice_id: Slice identifier as UUID (required) :return: Success If the method is called asynchronously, returns the request thread. """ all_params = ['slice_id'] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method slices_redeem_slice_id_post" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'slice_id' is set if ('slice_id' not in params or params['slice_id'] is None): raise ValueError("Missing the required parameter `slice_id` when calling `slices_redeem_slice_id_post`") # noqa: E501 collection_formats = {} path_params = {} if 'slice_id' in params: path_params['sliceID'] = params['slice_id'] # noqa: E501 query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['bearerAuth'] # noqa: E501 return self.api_client.call_api( '/slices/redeem/{sliceID}', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='Success', # noqa: E501 auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def slices_renew_slice_id_post(self, slice_id, new_lease_end_time, **kwargs): # noqa: E501 """Renew slice # noqa: E501 Request to extend slice be renewed with their expiration extended. If possible, the orchestrator should extend the slivers to the requested expiration time, or to a sooner time if policy limits apply. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.slices_renew_slice_id_post(slice_id, new_lease_end_time, async_req=True) >>> result = thread.get() :param async_req bool :param str slice_id: Slice identifier as UUID (required) :param str new_lease_end_time: New Lease End Time for the Slice (required) :return: Success If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.slices_renew_slice_id_post_with_http_info(slice_id, new_lease_end_time, **kwargs) # noqa: E501 else: (data) = self.slices_renew_slice_id_post_with_http_info(slice_id, new_lease_end_time, **kwargs) # noqa: E501 return data def slices_renew_slice_id_post_with_http_info(self, slice_id, new_lease_end_time, **kwargs): # noqa: E501 """Renew slice # noqa: E501 Request to extend slice be renewed with their expiration extended. If possible, the orchestrator should extend the slivers to the requested expiration time, or to a sooner time if policy limits apply. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.slices_renew_slice_id_post_with_http_info(slice_id, new_lease_end_time, async_req=True) >>> result = thread.get() :param async_req bool :param str slice_id: Slice identifier as UUID (required) :param str new_lease_end_time: New Lease End Time for the Slice (required) :return: Success If the method is called asynchronously, returns the request thread. """ all_params = ['slice_id', 'new_lease_end_time'] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in
import logging import cPickle from functools import partial from plow import client from plow.client import DependType from plow.gui.manifest import QtCore, QtGui from plow.gui import util LOGGER = logging.getLogger(__name__) ######################## # FilterManager # class FilterManager(QtGui.QDialog): """ """ def __init__(self, project, *args, **kwargs): super(FilterManager, self).__init__(*args, **kwargs) self._projectLabel = QtGui.QLabel(self) self._filtersList = filt = FiltersList(parent=self) self._matchersList = match = MatchersList(parent=self) self._actionsList = act = ActionsList(parent=self) self._toolbar = tb = QtGui.QToolBar(self) tb.setSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum) tb.setMaximumHeight(32) tb.addAction(QtGui.QIcon(":/images/refresh.png"), "Refresh", self.refresh) self._status = QtGui.QStatusBar(self) h_splitter = QtGui.QSplitter(QtCore.Qt.Horizontal, self) h_splitter.setSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Expanding) v_splitter = QtGui.QSplitter(QtCore.Qt.Vertical, self) v_splitter.addWidget(self._matchersList) v_splitter.addWidget(self._actionsList) h_splitter.addWidget(self._filtersList) h_splitter.addWidget(v_splitter) tbLayout = QtGui.QHBoxLayout() tbLayout.setContentsMargins(0, 0, 4, 0) tbLayout.addWidget(self._toolbar) tbLayout.addWidget(self._projectLabel) layout = QtGui.QVBoxLayout(self) layout.setContentsMargins(6, 0, 6, 4) layout.setSpacing(2) layout.addLayout(tbLayout) layout.addWidget(h_splitter) layout.addWidget(self._status) v_splitter.setSizes([100,100]) h_splitter.setSizes([100,100]) self.setStyleSheet(""" DragDropItem { border: 1px solid black; border-radius: 4px; background-color: QLinearGradient(x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 rgb(40, 40, 40), stop: 1 rgb(27, 28, 30) ); } DragDropItem:checked { border: 1px solid rgb(100,100,100); background-color: QLinearGradient(x1: 0, y1: 0, x2: 0, y2: 1, stop: 1 rgb(40, 40, 40), stop: 0 rgb(27, 28, 30) ); } """) self.setProject(project) # Connnections filt.filterSelected.connect(match.setFilterObject) filt.filterSelected.connect(act.setFilterObject) def setProject(self, project): if project: self._filtersList.setProject(project) self._projectLabel.setText("Project: %s" % project.title) def refresh(self): self._filtersList.refresh() ######################## # DragDropList # class DragDropList(QtGui.QFrame): """ A generic list widget that holds DragDropItem instances and supports drag and drop re-ordering. """ ITEM_SPACING = 0 COLUMN_WIDTHS = [] def __init__(self, parent=None): super(DragDropList, self).__init__(parent) self.setFrameStyle(self.Panel|self.Sunken) self.setAcceptDrops(True) self._buttonGroup = QtGui.QButtonGroup(self) self._buttonGroup.setExclusive(True) header = QtGui.QWidget(self) header.setFixedHeight(20) headerLayout = QtGui.QHBoxLayout(header) headerLayout.setSpacing(0) headerLayout.setContentsMargins(8, 0, 8, 0) self._headerLayer = headerLayout self._contentWidget = content = QtGui.QWidget(self) scroll = QtGui.QScrollArea(self) scroll.setFrameStyle(self.Panel|self.Raised) scroll.setWidgetResizable(True) scroll.setWidget(content) scrollLayout = QtGui.QVBoxLayout(content) scrollLayout.setSpacing(0) scrollLayout.setContentsMargins(0, 0, 0, 0) self._itemLayout = QtGui.QVBoxLayout() self._itemLayout.setSpacing(self.ITEM_SPACING) self._itemLayout.setContentsMargins(0, 12, 0, 12) scrollLayout.addLayout(self._itemLayout) scrollLayout.addStretch() layout = QtGui.QVBoxLayout(self) layout.setContentsMargins(4, 4, 4, 4) layout.addWidget(header) layout.addWidget(scroll) # Connections self._buttonGroup.buttonClicked.connect(self.itemClicked) def __iter__(self): layout = self._itemLayout for i in xrange(layout.count()): yield layout.itemAt(i).widget() ######### # Events # def mousePressEvent(self, event): super(DragDropList, self).mousePressEvent(event) self.setFocus() def dragEnterEvent(self, event): source = event.source() if isinstance(source, DragDropItem) and self.isAncestorOf(source): event.accept() return event.ignore() def dragMoveEvent(self, event): dropPos = event.pos() dropItem = self.childAt(dropPos) if dropItem and dropItem is event.source(): event.ignore() else: event.accept() def dropEvent(self, event): layout = self._itemLayout parent = layout.parentWidget() or self sourceItem = event.source() sourceIndex = layout.indexOf(sourceItem) dropPos = event.pos() dropItem = parent.childAt(dropPos) insertIndex = None # The item was dropped directly on another item # so figure out if it was a bit above, or a bit below if dropItem: dropIndex = layout.indexOf(dropItem) middle = dropItem.geometry().center().y() if dropPos.y() < middle: # print "Drop source", sourceIndex, "before", dropIndex insertIndex = dropIndex else: # print "Drop source", sourceIndex, "after", dropIndex insertIndex = dropIndex else: itemsRect = self.itemsRect() # The item was dropped somewhere inside the items layout if itemsRect.contains(dropPos): droppedY = dropPos.y() # Figure out which two items it was dropped between for child1, child2 in util.pairwise(self): r1 = child1.geometry() r2 = child2.geometry() r1_bottom = r1.bottomLeft() r2_top = r2.topRight() testRect = QtCore.QRect(r1_bottom, r2_top) if testRect.contains(dropPos): if child1 is sourceItem or child2 is sourceItem: # print "Dropped near original. Ignored." event.ignore() return # print "Dropped between", child1, child2 insertIndex = layout.indexOf(child2) break # This item was dropped above the first item # or below the last one else: if dropPos.y() < itemsRect.y(): # print "Dropped above layout" insertIndex = 0 else: # print "Dropped below layout" insertIndex = -1 if insertIndex is not None: if insertIndex > sourceIndex: insertIndex -= 1 # print "Final insert index is", insertIndex, "(from original %d)" % sourceIndex layout.insertWidget(insertIndex, sourceItem) event.acceptProposedAction() else: event.ignore() # ######### def itemsRect(self): """ Return a QRect of the specific boundary of the items in the layout """ count = self._itemLayout.count() if not count: return QtCore.QRect(0,0,0,0) first = self._itemLayout.itemAt(0).widget() if count == 1: rect = first.geometry() return QtCore.QRect(rect.topLeft(), rect.bottomRight()) last = self._itemLayout.itemAt(count-1).widget() return QtCore.QRect(first.geometry().topLeft(), last.geometry().bottomRight()) def clear(self): """ Remove all items """ layout = self._itemLayout buttons = self._buttonGroup while layout.count(): item = layout.takeAt(0) widget = item.widget() if widget: buttons.removeButton(widget) widget.deleteLater() def appendItem(self, item): self._itemLayout.addWidget(item) self._buttonGroup.addButton(item) item.setColumnWidths(self.COLUMN_WIDTHS) def setHeaderLabels(self, labels): layout = self._headerLayer while layout.count(): item = layout.takeAt(0) widget = item.widget() if widget: widget.deleteLater() for i, name in enumerate(labels): label = QtGui.QLabel(name, self) try: label.setFixedWidth(self.COLUMN_WIDTHS[i]) except: pass layout.addWidget(label) def refresh(self): pass def selectedFilter(self): return self._buttonGroup.checkedButton() def itemClicked(self, item): pass ######################## # DragDropItem # class DragDropItem(QtGui.QToolButton): """ A custom widget to be used in a DragDropList """ ITEM_SPACING = 6 def __init__(self, *args, **kwargs): super(DragDropItem, self).__init__(*args, **kwargs) self.__dragStartPos = QtCore.QPoint(0,0) self.setMinimumHeight(24) self.setMaximumHeight(50) self.setCheckable(True) self.setFocusPolicy(QtCore.Qt.NoFocus) self.setSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Preferred) wrapperLayout = QtGui.QHBoxLayout(self) wrapperLayout.setSpacing(0) wrapperLayout.setContentsMargins(0,0,0,0) self._widgetLayout = layout = QtGui.QHBoxLayout() layout.setSpacing(self.ITEM_SPACING) layout.setContentsMargins(8,1,8,1) wrapperLayout.addLayout(layout) wrapperLayout.addStretch() def mousePressEvent(self, event): if event.button() == QtCore.Qt.LeftButton: self.__dragStartPos = event.pos() self.setChecked(True) super(DragDropItem, self).mousePressEvent(event) def mouseMoveEvent(self, event): startDrag = QtGui.QApplication.startDragDistance() if (event.pos() - self.__dragStartPos).manhattanLength() < startDrag: return mimeData = QtCore.QMimeData() data = cPickle.dumps(self.mapToParent(self.__dragStartPos)) mimeData.setData("application/x-DragDropList", QtCore.QByteArray(data)) pix = QtGui.QPixmap(self.size()) self.render(pix) drag = QtGui.QDrag(self) drag.setMimeData(mimeData) drag.setPixmap(pix) drag.setHotSpot(event.pos()) drag.exec_(QtCore.Qt.MoveAction) def setColumnWidths(self, widths): layout = self._widgetLayout for i, width in zip(xrange(layout.count()), widths): widget = layout.itemAt(i).widget() widget.setFixedWidth(max(width - self.ITEM_SPACING, 10)) ######################## # FiltersList # class FiltersList(DragDropList): """ Display a list of Filters for a project, and allow them to be managed. """ ITEM_SPACING = 10 COLUMN_WIDTHS = [80] filterSelected = QtCore.Signal(object) def __init__(self, project=None, *args, **kwargs): super(FiltersList, self).__init__(*args, **kwargs) self.__project = None self.setHeaderLabels(['Enabled', 'Filter Name']) if project: self.setProject(project) def project(self): return self.__project def setProject(self, project): if not isinstance(project, client.Project): raise TypeError("Invalid type %r. Must provide a Project instance" % type(project)) self.__project = project self.refresh() def refresh(self): layout = self._itemLayout layout.setEnabled(False) try: self.clear() if not self.__project: return widths = self.COLUMN_WIDTHS filters = client.get_filters(self.__project) for f in filters: widget = FilterItem(f, self) self.appendItem(widget) widget.filterUpdated.connect(self._filterUpdated) widget.filterEnabled.connect(self._filterUpdated) finally: layout.setEnabled(True) def itemClicked(self, item): filterObj = item.filterObject() if filterObj: self.filterSelected.emit(filterObj) def _filterUpdated(self): print "refresh" self.refresh() ######################## # FilterItem # class FilterItem(DragDropItem): filterUpdated = QtCore.Signal() filterEnabled = QtCore.Signal(bool) def __init__(self, filterObj=None, *args, **kwargs): super(FilterItem, self).__init__(*args, **kwargs) self.__filter = None self._enabledCheck = check = QtGui.QCheckBox(self) check.setToolTip("Enable or Disable this filter") check.setSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Preferred) self._nameLabel = name = QtGui.QLineEdit(self) name.setPlaceholderText("<Set Filter Name>") name.setFrame(False) name.setReadOnly(True) name.setSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Preferred) name.installEventFilter(self) self._widgetLayout.addWidget(self._enabledCheck) self._widgetLayout.addWidget(self._nameLabel) self.setStyleSheet(""" QCheckBox, QLineEdit {background-color: transparent; } QLineEdit {border: none; } """) if filterObj: self.setFilterObject(filterObj) # Connections name.editingFinished.connect(self.__nameEditingFinished) check.toggled.connect(self.__filterEnabled) def __repr__(self): f = self.__filter return "<FilterItem: %s >" % (f.name if f else "") def eventFilter(self, obj, event): if obj is self._nameLabel: typ = event.type() if typ == event.FocusIn: # we will trigger our own focus event self.click() return True elif typ == event.MouseButtonDblClick: obj.setReadOnly(False) obj.focusInEvent(QtGui.QFocusEvent(event.FocusIn, QtCore.Qt.MouseFocusReason)) return super(FilterItem, self).eventFilter(obj, event) def filterObject(self): return self.__filter def setFilterObject(self, filt): self.__filter = filt self._enabledCheck.setChecked(filt.enabled) self._nameLabel.setText(filt.name) def setColumnWidths(self, widths): widgets = (self._enabledCheck, self._nameLabel) for width, widget in zip(widths, widgets): widget.setMinimumWidth(width) def __nameEditingFinished(self): nameLabel = self._nameLabel nameLabel.clearFocus() nameLabel.setReadOnly(True) filt = self.__filter newName = nameLabel.text() if not newName.strip(): nameLabel.setText(filt.name) return if newName != filt.name: filt.set_name(newName) self.filterUpdated.emit() def __filterEnabled(self, enabled): if enabled != self.__filter.enabled: print "Toggle filter status", enabled # self.filterEnabled.emit(enabled) ######################## # MatchersList # class MatchersList(DragDropList): ITEM_SPACING = 6 COLUMN_WIDTHS = [100, 120] def __init__(self, filterObj=None, parent=None): super(MatchersList, self).__init__(parent) self.__filter = None self.setHeaderLabels(['Matcher Field', 'Type', 'Value']) if filterObj: self.setFilterObject(filterObj) def filterObject(self): return self.__filter def setFilterObject(self, filt): if not isinstance(filt, client.Filter): raise TypeError("Invalid type %r. Must provide a Filter instance" % type(filt)) self.__filter = filt self.refresh() def refresh(self): layout = self._itemLayout layout.setEnabled(False) try: self.clear() if not self.__filter: return widths = self.COLUMN_WIDTHS matchers = self.__filter.get_matchers() for m in matchers: widget = MatcherItem(m, self) self.appendItem(widget) print "Matcher:", m.field, m.type, m.value finally: layout.setEnabled(True) ######################## # MatcherItem # class MatcherItem(DragDropItem): FIELDS = dict((f.replace('_', ' ').title(), getattr(client.MatcherField, f)) \ for f in dir(client.MatcherField) if not f.startswith('_')) FIELDS_SORTED = None TYPES = dict((t.replace('_', ' ').title(), getattr(client.MatcherType, t)) \ for t in dir(client.MatcherType) if not t.startswith('_')) TYPES_SORTED = None def __init__(self, matcherObj=None, *args, **kwargs): super(MatcherItem, self).__init__(*args, **kwargs) if self.FIELDS_SORTED is None: self.FIELDS_SORTED = sorted(self.FIELDS, key=lambda k: self.FIELDS[k]) if self.TYPES_SORTED is None: self.TYPES_SORTED = sorted(self.TYPES, key=lambda k: self.TYPES[k]) self.__matcher = None self.setMinimumHeight(30) self.setMaximumHeight(50) self._field = field = QtGui.QComboBox(self) field.addItems(self.FIELDS_SORTED) field.setSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Preferred) self._type = typ = QtGui.QComboBox(self) typ.addItems(self.TYPES_SORTED) typ.setSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Preferred) self._value = value = QtGui.QLineEdit(self) value.setSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Preferred) layout = self._widgetLayout layout.setContentsMargins(8, 4, 8, 4) layout.addWidget(field)
""" This file was generated automatically by xsd-to-vol. Do not edit. """ from datetime import datetime import pytz from voluptuous import All, In, Length, Required, Schema, Url def DateTime(dt): dt = pytz.utc.localize(dt) return f"{dt.strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3]}{dt.strftime('%z')}" """TariffMetaDataRequest """ TariffMetaDataRequest = Schema({}) """TariffZoneNeighboursRequest Requests all HVV zones and their zone neighbours. """ TariffZoneNeighboursRequest = Schema({}) """TariffRequest """ TariffRequest = Schema({}) """GRRequest """ GRRequest = Schema({}) """ErrorResponse An error occurred on the server side. This type of Response will only be send in combination with the http status code 401 and 403. """ ErrorResponse = Schema({}) """SingleTicketOptimizerRequestLine id: str (None - None) name: str (None - None) """ SingleTicketOptimizerRequestLine = Schema({"id": str, "name": str}) """SingleTicketOptimizerRequestStation id: str (None - None) name: str (None - None) """ SingleTicketOptimizerRequestStation = Schema({"id": str, "name": str}) """SingleTicketOptimizerRequestTrip start: SingleTicketOptimizerRequestStation (None - None) destination: SingleTicketOptimizerRequestStation (None - None) line: SingleTicketOptimizerRequestLine (None - None) vehicleType: str (None - None) """ SingleTicketOptimizerRequestTrip = Schema( { "start": SingleTicketOptimizerRequestStation, "destination": SingleTicketOptimizerRequestStation, "line": SingleTicketOptimizerRequestLine, "vehicleType": str, } ) """TariffRegions regions: str (None - unbounded) """ TariffRegions = Schema({"regions": [str]}) """TariffOptimizerRegions Regions to be covered by tickets zones: TariffRegions (0 - unbounded) rings: TariffRegions (0 - unbounded) counties: TariffRegions (0 - unbounded) """ TariffOptimizerRegions = Schema( {"zones": [TariffRegions], "rings": [TariffRegions], "counties": [TariffRegions]} ) """TariffCounty contains information about a tariff county id: str Unique identifier for this tariff county (None - None) label: str label of the tariff county (None - None) """ TariffCounty = Schema({"id": str, "label": str}) """TimePeriod The ticket is valid within this period. begin: str Begin of the period. Format: HH:mm (This might contain hour values > 24, in case of next day) (None - None) end: str End of the period. Format: HH:mm (This might contain hour values > 24, in case of next day) (None - None) """ TimePeriod = Schema({"begin": str, "end": str}) """TariffZone zone: str Contains a HVV fare zone. (None - None) ring: str Contains a HVV fare ring. (None - None) neighbours: str The neighbouring zones of the current zone. (None - unbounded) """ TariffZone = Schema({"zone": str, "ring": str, "neighbours": [str]}) """Link label: str additional summazied informations about the linked content (e.g. "Ersatzfahrplan"). (None - None) url: str Defines a link providing further information about a actual notice. (None - None) """ Link = Schema({"label": str, "url": str}) """TimeRange begin: DateTime Begin of TimeRange. (Null means "open end") (0 - None) end: DateTime End of TimeRange. (Null means "open end") (0 - None) """ TimeRange = Schema({"begin": DateTime, "end": DateTime}) """StationLight Represents a light version of a station. id: str The unique id of the station (None - None) name: str The name of the station. (0 - None) """ StationLight = Schema({"id": str, "name": str}) """TariffRegionList Holds a list of tariff regions (e.g. tariff-zones, tariff-rings). regions: str list of crossed tariff regions. (0 - unbounded) """ TariffRegionList = Schema({"regions": [str]}) """ScheduleElementLight One element of a schedule with from/to station and line IDs. This is a lighter form of a ScheduleElement. ScheduleElementLights are allways public transport elements (no footpathes allowed). Departure and arrival are allways at a station. departureStationId: str ID of the departure station. (None - None) arrivalStationId: str ID of the arrival station. (None - None) lineId: str ID of the line used between departure and arrivel station. (None - None) """ ScheduleElementLight = Schema( {"departureStationId": str, "arrivalStationId": str, "lineId": str} ) """Property Property which can be part of InitRequest and InitResponse. Is needed for transferring properties from server to client. key: str The key which identifies a property. (None - None) value: str The value of a property which is identified by the key. (0 - None) """ Property = Schema({"key": str, "value": str}) """Ticket The ticket information. DEPRECATED since API Version 13 replaced by "tariffInfos" price: float The price of the ticket. (None - None) reducedPrice: float The reduced price for online shopping. * since version 16 (0 - None) currency: str The currency of the price. Default is Euro. Default: EUR (0 - None) type: str The type information of the ticket. e.g. Einzelfahrt (None - None) level: str The level information of the ticket. e.g. Nahbereich (None - None) tariff: str The tariff information of the ticket. e.g. HVV (None - None) range: str The stations of the ticket. (None - None) ticketRemarks: str Additional information about ticket. For example if it is only valid on a part of the trip. * since version 18 (0 - None) """ Ticket = Schema( { "price": float, "reducedPrice": float, "currency": str, "type": str, "level": str, "tariff": str, "range": str, "ticketRemarks": str, } ) """TariffInfoSelector Returns Ticketinfos (in schedule) for each of this tariffs and kinds, if the given tariff and kind is valid on the returned schedule. tariff: str tariff of HVV or SH? "all" will select all tariffs. Default: HVV (0 - None) tariffRegions: bool also return tariff regions (tariffzones, rings, counties)? Default: true (0 - None) kinds: int Returns Ticketinfos (in schedule) for each of this tariff kinds, if the kind is valid on the returned schedule. An empty list will return cards of every valid kind. (0 - unbounded) """ TariffInfoSelector = Schema({"tariff": str, "tariffRegions": bool, "kinds": [int]}) """Penalty The penalties for a GRRequest. name: str The name of the penalty. (None - None) value: str The value of the penalty. In most cases a single int value. Exception: - DesiredType - string:int (e.g. bus:10) - DesiredLine - string,...,string:int (e.g. U1,S21,U3:3) (None - None) """ Penalty = Schema({"name": str, "value": str}) """DLFilterEntry Filter for DLRequest. * since Version 20 serviceID: str ID of the Departure's service. Either serviceID or stationID must be filled in for Request. (0 - None) stationIDs: str IDs of stations of which one must be on the journey after (before in case of departure=false) the reference station. Either serviceID or stationIDs must be filled for Request. (0 - unbounded) label: str A string that discribes the direction for the user. This field could be empty in DLRequest (server will not evaluate this field) and is allways filled in the list of possible filter entries in DLResponse. (0 - None) serviceName: str A string that represents the public name of the service. (0 - None) """ DLFilterEntry = Schema( {"serviceID": str, "stationIDs": [str], "label": str, "serviceName": str} ) """GTITime date: str The date as string. Format: dd.mm.yyyy (None - None) time: str The time as string. Format: hh:mm (None - None) """ GTITime = Schema({"date": str, "time": str}) """ContSearchByServiceId serviceId: int the service id of the first/last trip part with vehicle (1 - None) lineKey: str the line key of the first/last trip part with vehicle (1 - None) plannedDepArrTime: GTITime the planned departure/arrival time at the start/dest station (1 - None) additionalOffset: int an additional offset for the footway to the start/dest; negative in the dest case (1 - None) """ ContSearchByServiceId = Schema( { Required("serviceId"): int, Required("lineKey"): str, Required("plannedDepArrTime"): GTITime, Required("additionalOffset"): int, } ) """Attribute title: str The title of the attribute. (None - None) isPlanned: bool Is the attribute planned? (None - None) value: str A text to describe the attribute. (None - None) types: str Type of an attribute. Currently used attribute types are: -NORMAL: The default attribute type for simple text attributes. -ANNOUNCEMENT: Attribute for announcement messages -REALTIME: Informations about missed connections, cancelled journeys, etc. -DIRECTION_NAME: Indicates that the attribute value defines a direction name. -ENTRY_PROHIBITED: Indicates that no passenger can enter the vehicle on that station. -EXIT_PROHIBITED: Indicates that the passengers can't exit the vehicle on this station. -STOP_ON_DEMAND: Indicates that the vehicle does not necessarily hold on that station. The vehicle holds only if a passenger wants to exit. -PLATFORM: Indicates that the attribute value defines the platform number where the vehicle holds. -NOCHANGE: Indicates that the change (from fussweg.asc) is not a real change. The passenger can stay in the same vehicle. -POSITION_FRONT: Indicates that the attribute value defines the optimal position in the train (In this case: front). -POSITION_BACK: Indicates that the attribute value defines the optimal position in the train (In this case: back). -POSITION_MIDDLE: Indicates that the attribute value defines the optimal position in the train (In this case: middle). These types could be supplemented with new ones without creating a new interface version. Clients should be prepared to handle unknown types. * since version 3 (0 - unbounded) """ Attribute = Schema({"title": str, "isPlanned": bool, "value": str, "types": [str]}) """TariffDetails Some detailed information about a tariff. innerCity: bool (None - None) city: bool (None - None) cityTraffic: bool (None - None) gratis: bool (None - None) greaterArea: bool (None - None) tariffZones: int (0 - unbounded) regions: int (0 - unbounded) counties: str (0 - unbounded) rings: str (0 - unbounded) fareStage: bool (None - None) fareStageNumber: int (None - None) tariffNames: str (0 - unbounded) uniqueValues: bool (None - None) """ TariffDetails = Schema( { "innerCity": bool, "city": bool, "cityTraffic": bool, "gratis": bool, "greaterArea": bool, "tariffZones": [int], "regions": [int], "counties": [str], "rings": [str],
shape: {}'.format(batch['data'].shape)) img = batch['data'] if img.shape[0] <= self.cf.batch_size: if self.mode == 'val': # call training method to monitor losses results_dict = self.net.train_forward(batch, is_validation=True) # discard returned ground-truth boxes (also training info boxes). results_dict['boxes'] = [[box for box in b if box['box_type'] == 'det'] for b in results_dict['boxes']] else: results_dict = self.net.test_forward(batch, return_masks=self.cf.return_masks_in_test) else: split_ixs = np.split(np.arange(img.shape[0]), np.arange(img.shape[0])[::self.cf.batch_size]) chunk_dicts = [] for chunk_ixs in split_ixs[1:]: # first split is elements before 0, so empty b = {k: batch[k][chunk_ixs] for k in batch.keys() if (isinstance(batch[k], np.ndarray) and batch[k].shape[0] == img.shape[0])} if self.mode == 'val': chunk_dicts += [self.net.train_forward(b, is_validation=True)] else: chunk_dicts += [self.net.test_forward(b, return_masks=self.cf.return_masks_in_test)] results_dict = {} # flatten out batch elements from chunks ([chunk, chunk] -> [b, b, b, b, ...]) results_dict['boxes'] = [item for d in chunk_dicts for item in d['boxes']] results_dict['seg_preds'] = np.array([item for d in chunk_dicts for item in d['seg_preds']]) if self.mode == 'val': # estimate metrics by mean over batch_chunks. Most similar to training metrics. results_dict['monitor_values'] = \ {k:np.mean([d['monitor_values'][k] for d in chunk_dicts]) for k in chunk_dicts[0]['monitor_values'].keys()} # discard returned ground-truth boxes (also training info boxes). results_dict['boxes'] = [[box for box in b if box['box_type'] == 'det'] for b in results_dict['boxes']] return results_dict def apply_wbc_to_patient(inputs): """ wrapper around prediction box consolidation: weighted cluster scoring (wcs). processes a single patient. loops over batch elements in patient results (1 in 3D, slices in 2D) and foreground classes, aggregates and stores results in new list. :return. patient_results_list: list over batch elements. each element is a list over boxes, where each box is one dictionary: [[box_0, ...], [box_n,...]]. batch elements are slices for 2D predictions, and a dummy batch dimension of 1 for 3D predictions. :return. pid: string. patient id. """ in_patient_results_list, pid, class_dict, wcs_iou, n_ens = inputs out_patient_results_list = [[] for _ in range(len(in_patient_results_list))] for bix, b in enumerate(in_patient_results_list): for cl in list(class_dict.keys()): boxes = [(ix, box) for ix, box in enumerate(b) if (box['box_type'] == 'det' and box['box_pred_class_id'] == cl)] box_coords = np.array([b[1]['box_coords'] for b in boxes]) box_scores = np.array([b[1]['box_score'] for b in boxes]) box_center_factor = np.array([b[1]['box_patch_center_factor'] for b in boxes]) box_n_overlaps = np.array([b[1]['box_n_overlaps'] for b in boxes]) box_patch_id = np.array([b[1]['patch_id'] for b in boxes]) if 0 not in box_scores.shape: keep_scores, keep_coords = weighted_box_clustering( np.concatenate((box_coords, box_scores[:, None], box_center_factor[:, None], box_n_overlaps[:, None]), axis=1), box_patch_id, wcs_iou, n_ens) for boxix in range(len(keep_scores)): out_patient_results_list[bix].append({'box_type': 'det', 'box_coords': keep_coords[boxix], 'box_score': keep_scores[boxix], 'box_pred_class_id': cl}) # add gt boxes back to new output list. out_patient_results_list[bix].extend([box for box in b if box['box_type'] == 'gt']) return [out_patient_results_list, pid] def merge_2D_to_3D_preds_per_patient(inputs): """ wrapper around 2Dto3D merging operation. Processes a single patient. Takes 2D patient results (slices in batch dimension) and returns 3D patient results (dummy batch dimension of 1). Applies an adaption of Non-Maximum Surpression (Detailed methodology is described in nms_2to3D). :return. results_dict_boxes: list over batch elements (1 in 3D). each element is a list over boxes, where each box is one dictionary: [[box_0, ...], [box_n,...]]. :return. pid: string. patient id. """ in_patient_results_list, pid, class_dict, merge_3D_iou = inputs out_patient_results_list = [] for cl in list(class_dict.keys()): boxes, slice_ids = [], [] # collect box predictions over batch dimension (slices) and store slice info as slice_ids. for bix, b in enumerate(in_patient_results_list): det_boxes = [(ix, box) for ix, box in enumerate(b) if (box['box_type'] == 'det' and box['box_pred_class_id'] == cl)] boxes += det_boxes slice_ids += [bix] * len(det_boxes) box_coords = np.array([b[1]['box_coords'] for b in boxes]) box_scores = np.array([b[1]['box_score'] for b in boxes]) slice_ids = np.array(slice_ids) if 0 not in box_scores.shape: keep_ix, keep_z = nms_2to3D( np.concatenate((box_coords, box_scores[:, None], slice_ids[:, None]), axis=1), merge_3D_iou) else: keep_ix, keep_z = [], [] # store kept predictions in new results list and add corresponding z-dimension info to coordinates. for kix, kz in zip(keep_ix, keep_z): out_patient_results_list.append({'box_type': 'det', 'box_coords': list(box_coords[kix]) + kz, 'box_score': box_scores[kix], 'box_pred_class_id': cl}) out_patient_results_list += [box for b in in_patient_results_list for box in b if box['box_type'] == 'gt'] out_patient_results_list = [out_patient_results_list] # add dummy batch dimension 1 for 3D. return [out_patient_results_list, pid] def weighted_box_clustering(dets, box_patch_id, thresh, n_ens): """ consolidates overlapping predictions resulting from patch overlaps, test data augmentations and temporal ensembling. clusters predictions together with iou > thresh (like in NMS). Output score and coordinate for one cluster are the average weighted by individual patch center factors (how trustworthy is this candidate measured by how centered its position the patch is) and the size of the corresponding box. The number of expected predictions at a position is n_data_aug * n_temp_ens * n_overlaps_at_position (1 prediction per unique patch). Missing predictions at a cluster position are defined as the number of unique patches in the cluster, which did not contribute any predict any boxes. :param dets: (n_dets, (y1, x1, y2, x2, (z1), (z2), scores, box_pc_facts, box_n_ovs) :param thresh: threshold for iou_matching. :param n_ens: number of models, that are ensembled. (-> number of expected predicitions per position) :return: keep_scores: (n_keep) new scores of boxes to be kept. :return: keep_coords: (n_keep, (y1, x1, y2, x2, (z1), (z2)) new coordinates of boxes to be kept. """ dim = 2 if dets.shape[1] == 7 else 3 y1 = dets[:, 0] x1 = dets[:, 1] y2 = dets[:, 2] x2 = dets[:, 3] scores = dets[:, -3] box_pc_facts = dets[:, -2] box_n_ovs = dets[:, -1] areas = (y2 - y1 + 1) * (x2 - x1 + 1) if dim == 3: z1 = dets[:, 4] z2 = dets[:, 5] areas *= (z2 - z1 + 1) # order is the sorted index. maps order to index o[1] = 24 (rank1, ix 24) order = scores.argsort()[::-1] keep = [] keep_scores = [] keep_coords = [] while order.size > 0: i = order[0] # higehst scoring element xx1 = np.maximum(x1[i], x1[order]) yy1 = np.maximum(y1[i], y1[order]) xx2 = np.minimum(x2[i], x2[order]) yy2 = np.minimum(y2[i], y2[order]) w = np.maximum(0.0, xx2 - xx1 + 1) h = np.maximum(0.0, yy2 - yy1 + 1) inter = w * h if dim == 3: zz1 = np.maximum(z1[i], z1[order]) zz2 = np.minimum(z2[i], z2[order]) d = np.maximum(0.0, zz2 - zz1 + 1) inter *= d # overall between currently highest scoring box and all boxes. ovr = inter / (areas[i] + areas[order] - inter) # get all the predictions that match the current box to build one cluster. matches = np.argwhere(ovr > thresh) match_n_ovs = box_n_ovs[order[matches]] match_pc_facts = box_pc_facts[order[matches]] match_patch_id = box_patch_id[order[matches]] match_ov_facts = ovr[matches] match_areas = areas[order[matches]] match_scores = scores[order[matches]] # weight all socres in cluster by patch factors, and size. match_score_weights = match_ov_facts * match_areas * match_pc_facts match_scores *= match_score_weights # for the weigted average, scores have to be divided by the number of total expected preds at the position # of the current cluster. 1 Prediction per patch is expected. therefore, the number of ensembled models is # multiplied by the mean overlaps of patches at this position (boxes of the cluster might partly be # in areas of different overlaps). n_expected_preds = n_ens * np.mean(match_n_ovs) # the number of missing predictions is obtained as the number of patches, # which did not contribute any prediction to the current cluster. n_missing_preds = np.max((0, n_expected_preds - np.unique(match_patch_id).shape[0])) # missing preds are given the mean weighting # (expected prediction is the mean over all predictions in cluster). denom = np.sum(match_score_weights) + n_missing_preds * np.mean(match_score_weights) # compute weighted average score for the cluster avg_score = np.sum(match_scores) / denom # compute weighted average of coordinates for the cluster. now only take existing # predictions into account. avg_coords = [np.sum(y1[order[matches]] * match_scores) / np.sum(match_scores), np.sum(x1[order[matches]] * match_scores) / np.sum(match_scores), np.sum(y2[order[matches]] * match_scores) / np.sum(match_scores), np.sum(x2[order[matches]] * match_scores) / np.sum(match_scores)] if dim == 3: avg_coords.append(np.sum(z1[order[matches]] * match_scores) / np.sum(match_scores)) avg_coords.append(np.sum(z2[order[matches]] * match_scores) / np.sum(match_scores)) # some clusters might have very low scores due to high amounts of missing predictions. # filter out the with a conservative threshold, to speed up evaluation. if avg_score > 0.01: keep_scores.append(avg_score) keep_coords.append(avg_coords) # get index of all
<gh_stars>1-10 import urllib.request,sys,time from bs4 import BeautifulSoup import requests from datetime import date print("A PRDUCT MADE BY <NAME>"); print("MIT LICENSE 2020. COPYRIGHT <NAME>") while True: # print("\nPLEASE ENTER A SEARCH TERM(PRESS ENTER WHEN DONE).\n (TO EXIT, JUST PRESS ENTER) \n ->>") # Change the terms to show results term = input("\nPLEASE ENTER A SEARCH TERM(PRESS ENTER WHEN DONE).\n (TO EXIT, JUST PRESS ENTER) \n ->>") pagesToGet= 1 # TOI, Hindustan Times, Aaj Tak hindi, Indian Express, NDTV if term: # SET UP THE FILE to store data filename="NEWS " + str(date.today())+ ' ' + term +" .csv" f=open(filename,"w", encoding = 'utf-8') headers="Term,Source,Statement,Content, Date, Link\n" f.write(headers) ####################### # Times of India ##################### for pageNo in range(1,pagesToGet+1): print('processing page :', pageNo) url = 'https://timesofindia.indiatimes.com/topic/' + term +'/'+str(pageNo) print(url) try: page=requests.get(url) except Exception as e: # this describes what to do if an exception is thrown error_type, error_obj, error_info = sys.exc_info() # get the exception information print ('ERROR FOR LINK:',url) #print the link that cause the problem print (error_type, 'Line:', error_info.tb_lineno) #print error info and line that threw the exception continue # Wait for 2 seconds time.sleep(2) # Get page links soup = BeautifulSoup(page.text, "html.parser") links = soup.find_all('li', attrs={'class': 'article'} ) print( "Page "+str(pageNo) +" : " + str(len(links)) + " articles") for j in links: Term = term.capitalize() Source = 'Times of India' Statement = j.find("meta", attrs={'itemprop': 'name'})['content'].strip() Link = j.find('meta', attrs={'itemprop': 'url'})['content'].strip() Content = j.find('div', attrs={'class': 'content'}).find('a').find('p').text.strip() Date = j.find('div', attrs={'class': 'content'}).find('a').find('span', attrs={'class' : 'meta'}).text.strip() f.write(Term + "," +Source + "," + Statement.replace(',', '|') + "," + Content.replace(',', '|') + "," + Date.replace(',', '|') + "," + Link + "\n") # # ####################### # # Hindustan Times # # ####################### for pageNo in range(1,pagesToGet+1): print('processing page :', pageNo) url = 'https://www.hindustantimes.com/search?q='+ term #+ "&pageno=" +str(pageNo) print(url) headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.76 Safari/537.36', "Upgrade-Insecure-Requests": "1","DNT": "1","Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8","Accept-Language": "en-US,en;q=0.5","Accept-Encoding": "gzip, deflate"} try: page=requests.get(url,headers=headers) except Exception as e: # this describes what to do if an exception is thrown error_type, error_obj, error_info = sys.exc_info() # get the exception information print ('ERROR FOR LINK:',url) #print the link that cause the problem print (error_type, 'Line:', error_info.tb_lineno) #print error info and line that threw the exception continue # Wait for 2 seconds time.sleep(2) print(page) # Get page links soup = BeautifulSoup(page.text, "html.parser") links = soup.find_all('div', attrs={'class': 'media-body'} ) print( "Page "+str(pageNo) +" : " + str(len(links)) + " articles") for j in links: Term = term.capitalize() Source = 'Hindustan Times' if(j.find('div', attrs={'class': 'media-heading'})): Statement = j.find('div', attrs={'class': 'media-heading'}).find("a").text.strip() else: continue if (j.find('div', attrs={'class': 'media-heading'})): Link = j.find('div', attrs={'class': 'media-heading'}).find("a")['href'].strip() else: Link = " " if(j.find('div', attrs={'class': 'para-txt'})): Content = j.find('div', attrs={'class': 'para-txt'}).text.strip() else: Content= " " if(j.find('span', attrs={'class': 'time-dt'})): Date = j.find('span', attrs={'class': 'time-dt'}).text.strip() else: Date = " " # print(Source, Statement, Link, Content, Date) f.write(Term + "," + Source + "," + Statement.replace(',', '|') + "," + Content.replace(',', '|') + "," + Date.replace(',', '|') + "," + Link + "\n") # ####################### # <NAME>DIII # ####################### pagesToGet = 2 for pageNo in range(1,pagesToGet+1): print('processing page :', pageNo) url = 'https://aajtak.intoday.in/topic/'+ term + "-page-"+ str(pageNo) + ".html" print(url) # headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.76 Safari/537.36', "Upgrade-Insecure-Requests": "1","DNT": "1","Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8","Accept-Language": "en-US,en;q=0.5","Accept-Encoding": "gzip, deflate"} try: page=requests.get(url)#,headers=headers) except Exception as e: # this describes what to do if an exception is thrown error_type, error_obj, error_info = sys.exc_info() # get the exception information print ('ERROR FOR LINK:',url) #print the link that cause the problem print (error_type, 'Line:', error_info.tb_lineno) #print error info and line that threw the exception continue # Wait for 2 seconds time.sleep(2) # print(page.text) # Get page links soup = BeautifulSoup(page.text, "html.parser") links = soup.find_all('div', attrs={'class': 'scc_kv_st'} ) print( "Page "+str(pageNo) +" : " + str(len(links)) + " articles") for j in links: Term = term.capitalize() Source = 'AAJ TAK HINDI' Statement = j.find('div', attrs={'class': 'scc_kv_all'}).find('h3').find("a").text Link = j.find('div', attrs={'class': 'scc_kv_all'}).find('h3').find("a")['href'].strip() Content = j.find('span', attrs={'class':'scc_st'}).text Date = j.find('div', attrs={'class': 'scc_kv_all'}).find('cite').text # print(Source, Statement, Link, Content, Date) f.write(Term + "," + Source + "," + ' '.join(Statement.replace(',', ' ').split()) + "," + ' '.join(Content.replace(',', ' ').split()) + "," + Date.replace(',', ' ') + "," + Link + "\n") # # ####################### # # INdian Express # # ####################### pagesToGet = 2 for pageNo in range(1,pagesToGet+1): print('processing page :', pageNo) url = 'https://indianexpress.com/?s='+ term print(url) headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.76 Safari/537.36', "Upgrade-Insecure-Requests": "1","DNT": "1","Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8","Accept-Language": "en-US,en;q=0.5","Accept-Encoding": "gzip, deflate"} try: page=requests.get(url,headers=headers) except Exception as e: # this describes what to do if an exception is thrown error_type, error_obj, error_info = sys.exc_info() # get the exception information print ('ERROR FOR LINK:',url) #print the link that cause the problem print (error_type, 'Line:', error_info.tb_lineno) #print error info and line that threw the exception continue # Wait for 2 seconds time.sleep(2) print(page) # Get page links soup = BeautifulSoup(page.text, "html.parser") links = soup.find_all('div', attrs={'class': 'details'} ) print( "Page "+str(pageNo) +" : " + str(len(links)) + " articles") for j in links: Term = term.capitalize() Source = 'Indian Express' Statement = j.find('h3').find('a').text.strip() Link = j.find('h3').find("a")['href'].strip() Content = j.find('p').text.strip() Date = j.find('time').text.strip() # print(Source, Statement, Link, Content, Date) f.write(Term + ',' + Source + "," + " ".join(Statement.replace(',', '|').split()) + "," + " ".join(Content.replace(',', '|').split()) + "," + " ".join(Date.replace(',', '|').split()) + "," + " ".join(Link.split()) + "\n") # # ####################### # # THE HINDU # # ####################### pagesToGet = 2 for pageNo in range(1,pagesToGet+1): print('processing page :', pageNo) url = 'https://www.thehindu.com/search/?q=' + term + '&order=DESC&sort=publishdate' print(url) headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.76 Safari/537.36', "Upgrade-Insecure-Requests": "1","DNT": "1","Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8","Accept-Language": "en-US,en;q=0.5","Accept-Encoding": "gzip, deflate"} try: page=requests.get(url,headers=headers) except Exception as e: # this describes what to do if an exception is thrown error_type, error_obj, error_info = sys.exc_info() # get the exception information print ('ERROR FOR LINK:',url) #print the link that cause the problem print (error_type, 'Line:', error_info.tb_lineno) #print error info and line that threw the exception continue # Wait for 2 seconds time.sleep(2) print(page) # Get page links soup = BeautifulSoup(page.text, "html.parser") links = soup.find_all('div', attrs={'class': 'story-card-news'} ) print( "Page "+str(pageNo) +" : " + str(len(links)) + " articles") for j in links: Term = term.capitalize() Source = 'The Hindu' Statement = j.find("a", attrs={'class' : 'story-card75x1-text'}).text.strip() Link = j.find("a", attrs={'class' : 'story-card75x1-text'})['href'] Content = j.find('span', attrs={'class': 'light-gray-color'}).text.strip() Date = j.find('span', attrs={'class': ''}).find('span', attrs={'class' : 'dateline'}).text.strip() # print( Source, Statement, Link, Content, Date) f.write(Term + ',' + Source + "," + " ".join(Statement.replace(',', '|').split()) + "," + " ".join(Content.replace(',', '|').split()) + "," + " ".join(Date.replace(',', '|').split()) + "," + " ".join(Link.split()) + "\n") # # ####################### # # NDTV NEWS # # ####################### pagesToGet = 1 for pageNo in range(1,pagesToGet+1): print('processing page :', pageNo) url = 'https://www.ndtv.com/search?searchtext=' + term print(url) headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.76 Safari/537.36', "Upgrade-Insecure-Requests": "1","DNT": "1","Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8","Accept-Language": "en-US,en;q=0.5","Accept-Encoding": "gzip, deflate"} try: page=requests.get(url,headers=headers) except Exception as e: # this describes what to do if an exception is thrown error_type, error_obj, error_info = sys.exc_info() # get the exception information print ('ERROR FOR LINK:',url) #print the link that cause the problem print (error_type, 'Line:', error_info.tb_lineno) #print error info and line that threw the exception continue # Wait for 2 seconds time.sleep(2) print(page) # Get page links soup = BeautifulSoup(page.text, "html.parser") links = soup.find_all('li') print( "Page "+str(pageNo) +" : " + str(len(links)) + " articles") for j in links: # print(j.get('style')) # print(j) if j.get('style') == "padding: 5px;": Term = term.capitalize() Source = 'NDTV NEWS' Statement = j.find("p", attrs={'class' : 'header
# -*- coding: UTF-8 -*- """Learner for StyleGANs. Typical usage example: First configure your desired GAN on the command-line: go to root directory... $ python config.py stylegan $ python data_config.py FFHQ path/to/datasets/ffhq Then write a custom script (or use train.py): from gan_lab import get_current_configuration from gan_lab.utils.data_utils import prepare_dataset, prepare_dataloader from gan_lab.stylegan.learner import StyleGANLearner # get most recent configurations: config = get_current_configuration( 'config' ) data_config = get_current_configuration( 'data_config' ) # get DataLoader(s) train_ds, valid_ds = prepare_dataset( data_config ) train_dl, valid_dl, z_valid_dl = prepare_dataloader( config, data_config, train_ds, valid_ds ) # instantiate StyleGANLearner and train: learner = StyleGANLearner( config ) learner.train( train_dl, valid_dl, z_valid_dl ) # train for config.num_main_iters iterations learner.config.num_main_iters = 300000 # this is one example of changing your instantiated learner's configurations learner.train( train_dl, valid_dl, z_valid_dl ) # train for another 300000 iterations Note that the above custom script is just a more flexible alternative to running train.py (you can, for example, run the above on a Jupyter Notebook). You can always just run train.py. """ # ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ # from .base import StyleGAN from .architectures import StyleAddNoise, StyleGenerator from _int import get_current_configuration, LearnerConfigCopy from progan.architectures import ProDiscriminator from progan.learner import ProGANLearner from utils.latent_utils import gen_rand_latent_vars from utils.backprop_utils import calc_gp, configure_adam_for_gan from utils.custom_layers import Conv2dBias, LinearBias import os import sys from abc import ABC import copy import logging import warnings from pathlib import Path from functools import partial from itertools import accumulate from timeit import default_timer as timer import numpy as np from PIL import Image from indexed import IndexedOrderedDict import matplotlib.pyplot as plt plt.rcParams.update( { 'figure.max_open_warning': 0 } ) import torch from torch import nn import torch.nn.functional as F from torchvision import transforms # from tqdm import tqdm # ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ # NONREDEFINABLE_ATTRS = ( 'model', 'init_res', 'res_samples', 'res_dataset', 'len_latent', 'num_classes', 'class_condition', 'use_auxiliary_classifier', 'model_upsample_type', 'model_downsample_type', 'align_corners', 'blur_type', 'nonlinearity', 'use_equalized_lr', 'normalize_z', 'use_pixelnorm', 'mbstd_group_size', 'use_ewma_gen', 'use_instancenorm', 'use_noise', 'pct_mixing_reg', 'beta_trunc_trick', 'psi_trunc_trick', 'cutoff_trunc_trick', 'len_dlatent', 'mapping_num_fcs', 'mapping_lrmul', ) REDEFINABLE_FROM_LEARNER_ATTRS = ( 'batch_size', 'loss', 'gradient_penalty', 'optimizer', 'lr_sched', 'latent_distribution', ) COMPUTE_EWMA_VIA_HALFLIFE = True EWMA_SMOOTHING_HALFLIFE = 10. EWMA_SMOOTHING_BETA = .999 # ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ # class StyleGANLearner( ProGANLearner ): """GAN Learner specifically designed for StyleGAN architectures. Once instantiated, the StyleGANLearner object's configuration can be changed, but only via its self.config attribute (i.e. running 'python config.py [model]' post-instantiation will not affect this learner's configuration). """ def __init__( self, config ): super( StyleGANLearner, self ).__init__( config ) if self.model == 'StyleGAN': # If you want to change an attribute in an already-instantiated StyleGANLearner's config or data_config, # change self.config (below) instead of config and self.data_config (also below) instead of data_config: self.config = LearnerConfigCopy( config, self.__class__.__name__, NONREDEFINABLE_ATTRS, REDEFINABLE_FROM_LEARNER_ATTRS ) # pretrained models loaded later on for evaluation should not require data_config.py to have been run: self._is_data_configed = False; self._stats_set = False self._update_data_config( raise_exception = False ) self._latent_distribution = self.config.latent_distribution global StyleGAN, StyleGenerator, ProDiscriminator, StyleDiscriminator StyleGAN = type( 'StyleGAN', ( nn.Module, ABC, ), dict( StyleGAN.__dict__ ) ) StyleGAN.reset_state( ) StyleGenerator = type( 'StyleGenerator', ( StyleGAN, ), dict( StyleGenerator.__dict__ ) ) self.gen_model = StyleGenerator( final_res = self.config.res_samples, latent_distribution = self._latent_distribution, len_latent = self.config.len_latent, len_dlatent = self.config.len_dlatent, mapping_num_fcs = self.config.mapping_num_fcs, mapping_lrmul = self.config.mapping_lrmul, use_instancenorm = self.config.use_instancenorm, use_noise = self.config.use_noise, upsampler = self.gen_model_upsampler, blur_type = self.config.blur_type, nl = self.nl, num_classes = self.num_classes_gen, equalized_lr = self.config.use_equalized_lr, normalize_z = self.config.normalize_z, use_pixelnorm = self.config.use_pixelnorm, pct_mixing_reg = self.config.pct_mixing_reg, truncation_trick_params = { 'beta': self.config.beta_trunc_trick, 'psi': self.config.psi_trunc_trick, 'cutoff_stage': self.config.cutoff_trunc_trick } ) # Create `StyleDiscriminator` type object by converting `ProDiscriminator` class into `StyleDiscriminator` class: StyleDiscriminator = type( 'StyleDiscriminator', ( StyleGAN, ), dict( ProDiscriminator.__dict__ ) ) self.disc_model = StyleDiscriminator( final_res = self.config.res_samples, pooler = self.disc_model_downsampler, blur_type = self.config.blur_type, nl = self.nl, num_classes = self.num_classes_disc, equalized_lr = self.config.use_equalized_lr, mbstd_group_size = self.config.mbstd_group_size ) # If one wants to start at a higher resolution than 4: assert self.config.init_res <= self.config.res_samples if self.config.init_res > 4: _init_res_log2 = int( np.log2( self.config.init_res ) ) if float( self.config.init_res ) != 2**_init_res_log2: raise ValueError( 'Only resolutions that are powers of 2 are supported.' ) num_scale_inc = _init_res_log2 - 2 for _ in range( num_scale_inc ): self.gen_model.increase_scale() self.disc_model.increase_scale() self.gen_model.fade_in_phase = False # this applies it to both networks simultaneously # Generator and Discriminator state data must match: assert self.gen_model.cls_base.__dict__ == \ self.disc_model.cls_base.__dict__ # Initialize EWMA Generator Model: self.gen_model_lagged = None if self.config.use_ewma_gen: _orig_mode = self.gen_model.training self.gen_model.train() self.gen_model.to( 'cpu' ) with torch.no_grad(): self.gen_model_lagged = copy.deepcopy( self.gen_model ) # for memory efficiency in GPU self.gen_model_lagged.to( self.config.metrics_dev ) self.gen_model_lagged.train( mode = _orig_mode ) self.gen_model.train( mode = _orig_mode ) self.gen_model.to( self.config.dev ) self.disc_model.to( self.config.dev ) self.batch_size = self.config.bs_dict[ self.gen_model.curr_res ] if self.cond_gen: self.labels_one_hot_disc = self._tensor( self.batch_size, self.num_classes ) self.labels_one_hot_gen = self._tensor( self.batch_size * self.config.gen_bs_mult, self.num_classes ) # Loss Function: self._loss = config.loss.casefold() self._set_loss( ) # Gradient Regularizer: self.gp_func = partial( calc_gp, gp_type = self._gradient_penalty, nn_disc = self.disc_model, lda = self.config.lda, gamma = self.config.gamma ) # Optimizer: self._set_optimizer( ) # Epsilon Loss to punish possible outliers from training distribution: self.eps = False if self.config.eps_drift > 0: self.eps = True # Print configuration: print( '-------- Initialized Model Configuration --------' ) print( self.config ) print( '-------------------------------------------------' ) # print( " If you would like to alter any of the above configurations,\n" + \ # " please do so via altering your instantiated StyleGANLearner().config's attributes." ) print( '\n Ready to train!\n' ) def _apply_lagged_weights( self, m ): # TODO: Include support for other learnable layers such as BatchNorm _keys = m.state_dict().keys() if isinstance( m, ( nn.Linear, nn.Conv2d, LinearBias, Conv2dBias, ) ): if 'weight' in _keys: m.weight = nn.Parameter( self.lagged_params.values()[ self._param_tensor_num + 1 ] ) self._param_tensor_num += 1 if 'bias' in _keys: m.bias = nn.Parameter( self.lagged_params.values()[ self._param_tensor_num + 1 ] ) self._param_tensor_num += 1 elif isinstance( m, StyleAddNoise ): if 'noise_weight' in _keys: m.noise_weight = nn.Parameter( self.lagged_params.values()[ self._param_tensor_num + 1 ] ) self._param_tensor_num += 1 elif isinstance( m, StyleGenerator ): if 'const_input' in _keys: m.const_input = nn.Parameter( self.lagged_params.values()[ 0 ] ) self._param_tensor_num += 1 @property def latent_distribution( self ): return self._latent_distribution @latent_distribution.setter def latent_distribution( self, new_latent_distribution ): new_latent_distribution = new_latent_distribution.casefold() self._latent_distribution = new_latent_distribution self.gen_model.latent_distribution = new_latent_distribution if self.config.use_ewma_gen: self.gen_model_lagged.latent_distribution = new_latent_distribution # def reset_stylegan_state( self ): # self.gen_model.cls_base.reset_state( ) # this applies to both networks simultaneously # .......................................................................... # @torch.no_grad() def plot_sample( self, z_test, z_mixing = None, style_mixing_stage = None, noise = None, label = None, time_average = True ): """Plots and shows 1 sample from input latent code; offers stylemixing and noise input capabilities.""" if self.ds_mean is None or self.ds_std is None: raise ValueError( "This model does not hold any information about your dataset's mean and/or std.\n" + \ "Please provide these (either from your current data configuration or from your pretrained model)." ) for z in ( z_test, z_mixing, ): if z is not None: if z.dim() == 2: if z.shape[0] != 1: raise IndexError( 'This method only permits plotting 1 generated sample at a time.' ) elif z.dim() != 1: raise IndexError( 'Incorrect dimensions of input latent vector. Must be either `dim == 1` or `dim == 2`.' ) if not self.cond_gen: if z.shape[-1] != self.config.len_latent: message = f"Input latent vector must be of size {self.config.len_latent}." raise IndexError( message ) else: if z.shape[-1] != self.config.len_latent + self.num_classes_gen: message = f"This is a generator class-conditioned model. So please make sure to append a one-hot encoded vector\n" + \ f"of size {self.num_classes_gen} that indicates the to-be generated sample's class to a latent vector of\n" + \ f"size {self.config.len_latent}. Total input size must therefore be {self.config.len_latent + self.num_classes_gen}." raise IndexError( message ) # z_test = z_test.to( self.config.dev ) x_test = \ self.gen_model_lagged( z_test, x_mixing = z_mixing, style_mixing_stage = style_mixing_stage, noise = noise ).squeeze() if time_average else \ self.gen_model( z_test, x_mixing = z_mixing, style_mixing_stage = style_mixing_stage, noise = noise ).squeeze() if label is not None: print( f'Label Index for Generated Image: {label}' ) logger = logging.getLogger() _old_level = logger.level logger.setLevel( 100 ) # ignores potential "clipping input data" warning plt.imshow( ( ( ( x_test ) \ .cpu().detach() * self.ds_std ) + self.ds_mean ) \ .numpy().transpose( 1, 2, 0 ), interpolation = 'none' ) logger.setLevel( _old_level ) plt.show() @torch.no_grad() def make_stylemixing_grid( self, zs_sourceb, zs_coarse = [], zs_middle = [], zs_fine = [], labels = None, time_average = True, save_path
<filename>pytransact/test/test_commit.py # Copyright 2019 Open End AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import bson, logging, os, gridfs, py, pymongo from bson.objectid import ObjectId from pymongo.errors import OperationFailure from pytransact.difftoi import DiffTOI from pytransact import commit, mongo from pytransact.contextbroker import ContextBroker from pytransact.exceptions import * from pytransact.object.attribute import BlobVal from pytransact.testsupport import ContextTests, Fake, RuntimeContext, Time import blm def setup_module(mod): from blm import fundamental mod.blm = blm blm.addBlmPath(os.path.join(os.path.dirname(__file__), 'blm')) from blm import testcommit logging.basicConfig() commit.log.setLevel(logging.DEBUG) def teardown_module(mod): blm.removeBlmPath(os.path.join(os.path.dirname(__file__), 'blm')) blm.clear() class BaseCommitContextTests(ContextTests): def setup_method(self, method): super(BaseCommitContextTests, self).setup_method(method) with RuntimeContext(self.database): self.user = blm.fundamental.AccessHolder(super=[True]) self.sync() def newcontext(self, user=None): if user is None: user = self.user ctx = commit.CommitContext(self.database, user) ctx.setMayChange(True) ContextBroker().pushContext(ctx) return ctx def commit(self): ctx = ContextBroker().context ctx.runCommit([], interested=None) ContextBroker().popContext() self.sync() self.newcontext() def set_primary(self): self._database = self.database.client.get_database( self.database.name, read_preference=pymongo.ReadPreference.PRIMARY) def find(self, query, collection=None): collection = collection or self.database.tois return mongo.find(collection, query) def find_one(self, query, collection=None): collection = collection or self.database.tois return mongo.find_one(collection, query) class TestCommitContext(BaseCommitContextTests): def test_wait_for_commit(self): self._commit('interested') result, error = commit.wait_for_commit(self.database, 'interested', timeout=1) assert result assert not error def test_wait_for_commit_timeout(self): py.test.raises(commit.Timeout, commit.wait_for_commit, self.database, 'interested', timeout=0.1) class MyException(Exception): pass py.test.raises(MyException, commit.wait_for_commit, self.database, 'interested', onfail=MyException, timeout=0.1) def test_simple(self): cctx = commit.CommitContext(self.database) def test_createToi(self): cctx = self.newcontext() toi = cctx.createToi(blm.testcommit.Test, cctx.newId(), {'name': ['test']}) assert toi.name == ['test'] assert toi.__class__._query(name='test').run()[0] is toi assert self.find({'_toc': 'testcommit.Test'}).count() == 0 ContextBroker().popContext() cctx.runCommit([]) assert self.find({'_toc': 'testcommit.Test'}).count() == 1 def test_canWrite_new_toi(self): user = blm.fundamental.AccessHolder() cctx = self.newcontext(user=user) toi = cctx.createToi(blm.testcommit.Test, cctx.newId(), {'name': ['test']}) self.sync() assert toi.name == ['test'] assert toi.__class__._query(name='test').run()[0] is toi assert self.find({'_toc': 'testcommit.Test'}).count() == 0 ContextBroker().popContext() cctx.runCommit([]) assert self.find({'_toc': 'testcommit.Test'}).count() == 1 def test_changeToi(self): toi = blm.testcommit.Test(name=['test']) self.sync() cctx = self.newcontext() # New context, so we have to look it up again toi = blm.testcommit.Test._query().run()[0] toi(extra=['fOo']) assert toi.extra == ['fOo'] assert toi.__class__._query(extra='fOo').run()[0] is toi assert toi.__class__._query(extra=None).run() == [] dbtoi, = list(self.database.tois.find({'_toc': 'testcommit.Test'})) assert dbtoi.get('extra',[]) == [] ContextBroker().popContext() cctx.runCommit([]) dbtoi, = list(self.database.tois.find({'_toc': 'testcommit.Test'})) assert dbtoi['extra'] == ['fOo'] def test_changeToi_with_nop_change(self): toi = blm.testcommit.Test(name=['test']) self.sync() cctx = self.newcontext() # New context, so we have to look it up again toi = blm.testcommit.Test._query(_attrList=['name']).run()[0] toi(name=['fOo']) assert toi.name == ['fOo'] assert toi.__class__._query(name='fOo').run()[0] is toi assert toi.__class__._query(name=None).run() == [] dbtoi, = list(self.database.tois.find({'_toc': 'testcommit.Test'})) assert dbtoi.get('extra',[]) == [] toi(name=['test']) # Restore to original value ContextBroker().popContext() commit = cctx.runCommit([]) assert commit.state != 'failed' dbtoi, = list(self.database.tois.find({'_toc': 'testcommit.Test'})) assert dbtoi['name'] == ['test'] def test_deleteToi(self): toi = blm.testcommit.Test(name=['text']) print(toi, toi.__class__) self.sync() cctx = self.newcontext() toi, = blm.testcommit.Test._query().run() toi._delete() print(toi, toi.__class__) assert toi.__class__._query().run() == [] self.sync() assert self.find({'_toc': 'testcommit.Test'}).count() == 1 ContextBroker().popContext() cctx.runCommit([]) assert self.find({'_toc': 'testcommit.Test'}).count() == 0 def test_runQuery_simple(self): # This is actually already tested by the queries # in the above *Toi tests, but we make an explicit test # anyway blm.testcommit.Test(name=['text']) self.sync() cctx = self.newcontext() toi, = blm.testcommit.Test._query(name='text').run() name = toi.name[0] assert name == 'text' def test_runQuery_subQuery(self): foo = blm.testcommit.Test(name=['foo']) blm.testcommit.Test(name=['text'], toiref=[foo]) self.commit() cctx = self.newcontext() q = blm.testcommit.Test._query( toiref=blm.testcommit.Test._query(name='foo')) toi, = q.run() assert toi.name == ['text'] def test_requestAttribute(self): cctx = self.newcontext() toi = blm.fundamental.AccessHolder._query().run()[0] attrVal = cctx.requestAttribute(toi, blm.fundamental.AccessHolder.super) assert attrVal == [True] toi = blm.testcommit.Test(name=['text']) attrVal = cctx.requestAttribute(toi, blm.testcommit.Test.name) assert attrVal == ['text'] def test_requestAttribute_with_toi_deleted(self): cctx = self.newcontext() toi = blm.testcommit.Test(name=['foo']) toi._delete() py.test.raises(RuntimeError, cctx.requestAttribute, toi, None) def test_preloadAttributes(self): py.test.skip('Not really useful, remove it?') def test_validateAttrValues_simple(self): cctx = self.newcontext() toi1 = blm.testcommit.RestTest(name=['test']) value = ['foo'] rval = cctx.validateAttrValues(toi1, toi1.name, value) assert rval == value py.test.raises(ClientError, cctx.validateAttrValues, toi1, toi1.name, []) py.test.raises(ClientError, cctx.validateAttrValues, toi1, toi1.name, ['foo', 'bar']) def test_validateAttrValues_readonly(self): toi1 = blm.testcommit.Test(name=['test']) cctx = self.newcontext() py.test.raises(ClientError, cctx.validateAttrValues, None, blm.testcommit.Test.readonly, ['foo']) toi1 = blm.testcommit.Test._query(name='test').run()[0] py.test.raises(ClientError, cctx.validateAttrValues, toi1, blm.testcommit.Test.readonly, ['foo']) def test_validateAttrValues_computed(self): toi1 = blm.testcommit.Test(name=['test']) cctx = self.newcontext() py.test.raises(ClientError, cctx.validateAttrValues, None, blm.testcommit.Test.computed, ['foo']) toi1 = blm.testcommit.Test._query(name='test').run()[0] py.test.raises(ClientError, cctx.validateAttrValues, toi1, blm.testcommit.Test.computed, ['foo']) def test_validateAttrValues_unchangeable(self): toi1 = blm.testcommit.Test(name=['test']) cctx = self.newcontext() value = ['foo'] rval = cctx.validateAttrValues(None, blm.testcommit.Test.unchangeable, ['foo']) assert value == rval toi1 = blm.testcommit.Test._query(name='test').run()[0] # XXX unchangeable is tested against what (if any) change # has been made in the toi! toi1.unchangeable = ['foo'] py.test.raises(ClientError, cctx.validateAttrValues, toi1, blm.testcommit.Test.unchangeable, None) def test_validateAttrValues_weakref(self): # Check that deleted tois are dropped cctx = self.newcontext() toi1 = blm.testcommit.Test(name=['toi1']) toi2 = blm.testcommit.Test(name=['toi2']) toi3 = blm.testcommit.Test(name=['toi3']) toi3._delete() value = [toi1, toi2, toi3] rval = cctx.validateAttrValues(toi1, blm.testcommit.Test.weakref, value) assert rval == [toi1, toi2] def test_validateAttrValues_reorder(self): toi1 = blm.testcommit.Test(name=['toi1'], reorder=['a','b','c']) self.sync() cctx = self.newcontext() toi1, = blm.testcommit.Test._query(name='toi1').run() py.test.raises(ClientError, cctx.validateAttrValues, toi1, toi1.reorder, ['a']) value = ['c','b','a'] rval = cctx.validateAttrValues(toi1, toi1.reorder, value) assert value == rval def test_validateAttrValues_unique(self): cctx = self.newcontext() toi1 = blm.testcommit.Test(name=['toi1'], unique=['toi1']) py.test.raises(ClientError, cctx.validateAttrValues, None, toi1.unique, ['toi1']) value = ['toi1'] rval = cctx.validateAttrValues(toi1, toi1.unique, value) assert rval == value def test_validateAttrValues_simple_toitype(self): cctx = self.newcontext() toi1 = blm.testcommit.Test(name=['toi1']) py.test.raises(ClientError, cctx.validateAttrValues, None, toi1.simpleToiType, [toi1]) toi1.name = ['test'] value = [toi1] rval = cctx.validateAttrValues(None, toi1.simpleToiType, value) assert value == rval def test_validateAttrValues_toiref_exists(self): cctx = self.newcontext() toi1 = blm.testcommit.Test(name=['toi1']) phantom = blm.testcommit.Test._create(ObjectId()) value = [phantom] rval = cctx.validateAttrValues(None, toi1.toiref, value, pre=True) assert rval == value # do not accept phantom tois in database py.test.raises(ClientError, cctx.validateAttrValues, None, toi1.toiref, value, pre=False) def test_validateAttrValues_complex_toitype(self): cctx = self.newcontext() toi1 = blm.testcommit.Test(name=['toi1']) toi2 = blm.testcommit.Other(name=['toi2']) toi3 = blm.testcommit.Test(name=['toi3'], toiref = [toi2]) py.test.raises(ClientError, cctx.validateAttrValues, None, toi1.complexToiType, [ toi1 ]) toi1.complexToiType = [toi3] py.test.raises(ClientError, cctx.validateAttrValues, None, toi1.complexToiType, [ toi3 ]) q = blm.testcommit.Test._query(toiref = blm.testcommit.Other._query(name='test'), id = [toi3]) toi2.name = ['test'] value = [toi3] rval = cctx.validateAttrValues(None, toi1.complexToiType, value) assert rval == value def test_validateAttrValues_toirefmap(self): cctx = self.newcontext() toi1 = blm.testcommit.Test(name=['toi1']) toi2 = blm.testcommit.Test(name=['toi2'], toirefmap={'toi1': toi1}) value = {'foo': toi2} rval = cctx.validateAttrValues(None, toi1.toirefmap, value) assert rval == value def test_findRelatedAttr(self): cctx = self.newcontext() toi1 = blm.testcommit.Other(name=['other']) toi2 = blm.testcommit.Related() rval = cctx.findRelatedAttr(toi1, toi2, toi1.related) assert rval == toi2.other def test_updateRelations(self): toi1 = blm.testcommit.Other(name=['other']) toi2 = blm.testcommit.Related(name=['related'], other=[toi1]) toi1.related = [toi2] # Simple commit context doesn't fix this self.sync() cctx = self.newcontext() toi1 = blm.testcommit.Other._query().run()[0] toi2 = blm.testcommit.Related._query(name=['related']).run()[0] toi3 = blm.testcommit.Related(name=['releated3'], other=[toi1]) self.commit() toi1 = blm.testcommit.Other._query(id=toi1.id).run()[0] assert toi1.related == [toi2, toi3] toi2 = blm.testcommit.Related._query(id=toi2.id).run()[0] toi2.other = [] self.commit() toi1 = blm.testcommit.Other._query(id=toi1.id).run()[0] assert toi1.related == [toi3] toi2 = blm.testcommit.Related._query(id=toi2.id).run()[0] toi2._orgAttrData['other'] = [] # pretend it changed in DB toi2.other = [toi1] self.commit() toi1 = blm.testcommit.Other._query(id=toi1.id).run()[0] assert toi1.related == [toi3, toi2] toi2 = blm.testcommit.Related._query(id=toi2.id).run()[0] toi2._orgAttrData['other'] = [toi1] toi2.other = [] toi2._delete() self.commit() toi1 = blm.testcommit.Other._query(id=toi1.id).run()[0] assert toi1.related == [toi3] def test_commitRelations(self): self.newcontext() toi1 = blm.testcommit.Other(name=['other']) toi2 = blm.testcommit.Related(name=['related'], other=[toi1]) self.commit() toi1, = blm.testcommit.Other._query(id=toi1.id).run() assert toi1.related == [toi2] toi2, = blm.testcommit.Related._query(id=toi2.id).run() toi2.other = [] toi2._delete() self.commit() toi1, = blm.testcommit.Other._query(id=toi1.id).run() assert toi1.related == [] self.newcontext() toi1 = blm.testcommit.OtherWeak() toi2 = blm.testcommit.Related(name=['related'], weak=[toi1]) self.commit() toi1, = blm.testcommit.OtherWeak._query(id=toi1.id).run() assert toi1.related == [toi2] toi2, = blm.testcommit.Related._query(id=toi2.id).run() toi2._delete() self.commit() toi1, = blm.testcommit.OtherWeak._query(id=toi1.id).run() assert toi1.related == [] def test_updateBlobs(self): self.newcontext() val1 = BlobVal('foo') val1.large_blob = 2 toi = blm.testcommit.Test(blob=[val1]) self.commit() assert val1.references == {toi.id[0]} self.newcontext() ref = ObjectId() val1.addref(ref) self.sync() toi, = blm.testcommit.Test._query().run() val1 = toi.blob[0] val2 = BlobVal('foo') val2.large_blob = 2 toi.blob = [val2] self.commit() self.sync() assert val1.references == {ref} assert val2.references == {toi.id[0]} self.newcontext() toi, = blm.testcommit.Test._query().run() val2 = toi.blob[0] toi._delete() self.commit() self.sync() assert val2.references == set() py.test.raises(Exception, gridfs.GridFS(self.database, 'blobvals').get, val2.value._id) def test_runAfterCommit(self): py.test.xfail("post-commit hooks not supported") callbackCalled = [] def callback(tid, *args, **kw): callbackCalled.append((tid, args, kw)) class Op(commit.OperateBase): def checkPermissions(self, context): pass def operate(self, context): context.runAfterCommit(callback, 42, foo='bar') cctx = commit.CommitContext(self.database) ContextBroker().pushContext(cctx) cctx.setMayChange(True) results = cctx.runCommit([Op()]) assert callbackCalled == [(None, (42,), {'foo':'bar'})] def test_runAfterCommitFailing(self): py.test.xfail("post-commit hooks not supported") callbackCalled = [] def callback(tid, *args, **kw): callbackCalled.append((tid, args, kw)) raise RuntimeError('error') def callback2(tid, *args, **kw): callbackCalled.append((tid, args, kw)) class Op(commit.OperateBase): def checkPermissions(self, context): pass def operate(self, context): context.runAfterCommit(callback, 42, foo='bar') context.runAfterCommit(callback2, 43) cctx = commit.CommitContext(self.database) ContextBroker().pushContext(cctx) cctx.setMayChange(True) results = cctx.runCommit([Op()]) assert callbackCalled == [(None, (42,), {'foo':'bar'}), (None, (43,),
# Copyright 2015 Google Inc. All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Default regression functions for PrettyTensor.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import tensorflow as tf from prettytensor import functions from prettytensor import bookkeeper from prettytensor import parameters from prettytensor import pretty_tensor_class as prettytensor from prettytensor.pretty_tensor_class import Phase from prettytensor.pretty_tensor_class import PROVIDED class SoftmaxResult( collections.namedtuple('SoftmaxResult', ['softmax', 'loss']), prettytensor.PrettyTensorTupleMixin): """Holds a softmax activation and a cross entropy loss. This also provides binding and construction if the result contains a template. """ pass class SampledSoftmaxResult( collections.namedtuple('SoftmaxResult', ['logits', 'loss']), prettytensor.PrettyTensorTupleMixin): """Holds logits and a sampled cross entropy loss. This also provides binding and construction if the result contains a template. """ pass def _convert_and_assert_tensors_compatible(input_, target): target = tf.convert_to_tensor(target, dtype=input_.dtype) if not input_.get_shape().is_compatible_with(target.get_shape()): raise ValueError('target and input_ are not compatible: %s != %s' % (input_.get_shape(), target.get_shape())) return target def _convert_and_assert_per_example_weights_compatible( input_, per_example_weights, dtype): """Converts per_example_weights to a tensor and validates the shape.""" per_example_weights = tf.convert_to_tensor( per_example_weights, name='per_example_weights', dtype=dtype) if input_.get_shape().ndims: expected_length = input_.get_shape().dims[0] message = ('per_example_weights must have rank 1 and length %s, but was: %s' % (expected_length, per_example_weights.get_shape())) else: expected_length = None message = ('per_example_weights must have rank 1 and length equal to the ' 'first dimension of inputs (unknown), but was: %s' % per_example_weights.get_shape()) if per_example_weights.get_shape().ndims not in (1, None): raise ValueError(message) if not per_example_weights.get_shape().is_compatible_with((expected_length,)): raise ValueError(message) return per_example_weights def apply_regression(input_, regression_fn, target, regression_args=(), regression_kwargs=None, name=PROVIDED, loss_weight=None, per_example_weights=None): """Applies the given regression and adds the loss to the bookkeeper. This does not change tensor. Args: input_: A Tensor or a Pretty Tensor holding the input. regression_fn: A function that takes (in order) tensor, labels. target: The targe of the regression. regression_args: Other arguments for the regression. regression_kwargs: Keyword args for the regression. name: The name, also added to regression_kwargs. loss_weight: A scalar multiplier for the loss. per_example_weights: A Tensor with a weight per example. Returns: The loss tensor's name. Raises: ValueError: If the target is not a compatible shape with input_. """ if regression_kwargs is None: regression_kwargs = {} if name is not None and 'name' not in regression_kwargs: regression_kwargs['name'] = name elif name is None: name = input_.tensor.op.name tensor = input_.tensor loss = regression_fn(tensor, target, *regression_args, **regression_kwargs) if loss_weight is not None: loss *= loss_weight if per_example_weights is not None: per_example_weights = _convert_and_assert_per_example_weights_compatible( input_, per_example_weights, dtype=loss.dtype) loss *= per_example_weights # Use mean so that the learning rate is independent of the batch size. if name is None: name = loss.op.name if tensor.get_shape()[0].value is not None: # Try to use division instead of reduce_mean because reduce_mean doesn't # work on GPU. avg_loss = tf.reduce_sum(loss) / tensor.get_shape()[0].value else: avg_loss = tf.reduce_mean(loss) return input_.add_loss(avg_loss, name=name) @prettytensor.Register def l2_regression( input_, target, name=PROVIDED, loss_weight=None, per_example_weights=None): """Applies an L2 Regression (Sum of Squared Error) to the target.""" target = _convert_and_assert_tensors_compatible(input_, target) return apply_regression(input_, functions.l2_regression_sq_loss, target, [], name='%s_loss' % name, loss_weight=loss_weight, per_example_weights=per_example_weights) @prettytensor.Register def l1_regression( input_, target, name=PROVIDED, loss_weight=None, per_example_weights=None): """Applies an L1 Regression (Sum of Absolute Error) to the target.""" target = _convert_and_assert_tensors_compatible(input_, target) return apply_regression(input_, functions.l1_regression_loss, target, [], name='%s_loss' % name, loss_weight=loss_weight, per_example_weights=per_example_weights) @prettytensor.Register def softmax_activation(input_): """Computes the softmax. Args: input_: A rank 2 `Tensor` or a Pretty Tensor holding the logits. Returns: A new Pretty Tensor with the softmax applied. """ return input_.with_tensor(tf.nn.softmax(input_)) @prettytensor.Register def cross_entropy(input_, labels, name=PROVIDED, loss_weight=None, per_example_weights=None): """Calculates the Cross Entropy of input_ vs labels. Args: input_: A rank 2 `Tensor` or a Pretty Tensor holding the logits. labels: A rank 2 tf.float32 or tf.float64 tensor containing the labels. name: The optional name. loss_weight: A weight to scale the loss. Used when there are multiple losses. per_example_weights: A weighting for each example. Returns: A loss. Raises: ValueError: if labels is None or the type is not float or double. """ if labels is None: raise ValueError('Labels must be set') labels = _convert_and_assert_tensors_compatible(input_, labels) if per_example_weights is not None: per_example_weights = _convert_and_assert_per_example_weights_compatible( input_, per_example_weights, dtype=input_.dtype) correct_predictions, examples = _compute_average_correct( input_, labels, per_example_weights) correct_ratio = correct_predictions / examples if correct_ratio.get_shape().is_fully_defined(): input_.bookkeeper.add_average_summary( correct_ratio, 'average_accuracy_%s' % name) return apply_regression( input_, tf.contrib.nn.deprecated_flipped_softmax_cross_entropy_with_logits, labels, [], name='%s_loss' % name, loss_weight=loss_weight, per_example_weights=per_example_weights) @prettytensor.Register def sparse_cross_entropy(input_, labels, name=PROVIDED, loss_weight=None, per_example_weights=None): """Calculates the Cross Entropy of input_ vs labels. Args: input_: A rank 2 `Tensor` or a Pretty Tensor holding the logits. labels: A rank 1 integer `Tensor` with class ordinals name: The optional name. loss_weight: A weight to scale the loss. Used when there are multiple losses. per_example_weights: A weighting for each example. Returns: A loss. Raises: ValueError: if labels is None or the type is not float or double. """ if labels is None: raise ValueError('Labels must be set') if per_example_weights is not None: per_example_weights = _convert_and_assert_per_example_weights_compatible( input_, per_example_weights, dtype=input_.dtype) return apply_regression( input_, tf.contrib.nn.deprecated_flipped_sparse_softmax_cross_entropy_with_logits, labels, [], name='%s_loss' % name, loss_weight=loss_weight, per_example_weights=per_example_weights) @prettytensor.Register def binary_cross_entropy_with_logits(input_, target, name=PROVIDED, loss_weight=None, per_example_weights=None, per_output_weights=None): """Calculates the binary cross entropy of the input_ vs inputs. Expects unscaled logits. Do not pass in results of sigmoid operation. Args: input_: A rank 2 Tensor or a Pretty Tensor holding the logits. target: A rank 2 tf.float32 or tf.float64 tensor containing class label probabilities. Note that binary cross entropy is equivalent to logistic loss. name: The optional name. loss_weight: A scalar multiplier for the loss. per_example_weights: A `Tensor` with a weight per example. per_output_weights: A weight `Tensor` that is the same shape as the input_ that can be used to scale individual prediction losses. See `tf.tile` to turn a per-column weight vector into a `per_output_weights` `Tensor`. Returns: Binary cross entropy loss after sigmoid operation. Raises: ValueError: if target is None or the type is not float or double. """ if target is None: raise ValueError('target must be set') target = _convert_and_assert_tensors_compatible(input_, target) with tf.name_scope('stats'): selected, sum_retrieved, sum_relevant = _compute_precision_recall( input_, target, 0, per_example_weights) precision = selected / sum_retrieved recall = selected / sum_relevant if precision.get_shape().is_fully_defined(): input_.bookkeeper.add_average_summary( precision, 'average_precision_%s' % name) if recall.get_shape().is_fully_defined(): input_.bookkeeper.add_average_summary( recall, 'average_recall_%s' % name) input_.bookkeeper.add_scalar_summary( tf.reduce_sum(tf.to_float(tf.greater(input_, 0))), 'activations') if per_output_weights is not None: per_output_weights = tf.convert_to_tensor( per_output_weights, name='per_output_weights', dtype=input_.dtype.base_dtype) input_.get_shape().assert_is_compatible_with( per_output_weights.get_shape()) def _batch_sum_bce(x, target, name='binary_cross_entropy'): logits = functions.binary_cross_entropy_loss_with_logits(x, target, name=name) if per_output_weights is not None: logits *= per_output_weights return functions.reduce_batch_sum(logits) return apply_regression( input_, _batch_sum_bce, target, [], name='%s_bce_loss' % name, loss_weight=loss_weight, per_example_weights=per_example_weights) @prettytensor.RegisterCompoundOp(assign_defaults=('parameter_modifier',)) def softmax_classifier_with_sampled_loss(inputs, num_classes, labels, num_sampled, num_true=None, sampled_values=None, remove_accidental_hits=True, loss_weight=None, per_example_weights=None, weights=None, bias=tf.zeros_initializer(), parameter_modifier=parameters.identity, name='softmax_classifier'): """Applies softmax and if labels is not None, then it adds a sampled loss. This is a faster way to train a softmax classifier over a huge number of classes. It is generally an underestimate of the full softmax loss. At inference time, you can compute full softmax probabilities with the expression `tf.nn.softmax(tf.matmul(inputs, weights) + biases)`. See `tf.nn.sampled_softmax_loss` for more details. Also see Section 3 of [Jean et al., 2014](http://arxiv.org/abs/1412.2007) ([pdf](http://arxiv.org/pdf/1412.2007.pdf)) for the math. Note: If you depend on the softmax part of the loss, then you will lose most of the speed benefits of sampling the loss. It should be used for evaluation only and not executed on every update op. Note: This is not checkpoint compatible with `softmax_classifier` since it optimizes a transpose by pushing it down to the `fully_connected` layer. Args: inputs: A `Tensor` of shape `[batch_size, dim]`. The forward activations of the input network. num_classes: An `int`. The number of possible classes. labels: A `Tensor` of type `int64` and shape `[batch_size, num_true]`. The target classes. Note that this format differs from the `labels` argument of `nn.softmax_cross_entropy_with_logits`. num_sampled: An `int`. The number of classes to randomly sample per batch. num_true: An `int`. The number of target classes per training example, defaults to the second dim
= True Locations.formDetails[25][2] = True Locations.formDetails[26][2] = True Locations.formDetails[27][2] = True Locations.formDetails[28][2] = True Locations.formDetails[29][2] = True self.action.checkForm(Locations.formDetails, (), () ) sel.type("gis_location_lat", "51") sel.type("gis_location_lon", "1") # Open Converter sel.click("gis_location_converter-btn") # Check it's now visible time.sleep(1) self.failUnless(sel.is_visible("gis-convert-win")) # @ToDo: Use this to do a conversion # Close Converter sel.click("//div[@id='gis-convert-win']/div/div/div/div/div[contains(@class, 'x-tool-close')]") # Check it's not visible self.failIf(sel.is_visible("gis-convert-win")) # Fill in Lat & Lon sel.type("gis_location_lat", "51") sel.type("gis_location_lon", "1") self.action.saveForm("Shelter updated") Locations.line.append(L0b) # Load again self.openRecord(shelterName) self.action.checkHeading({"Name:" : shelterName, "Location:" : "%s (N 51.0 E 1.0)" % L0b, }) self.initFormDetails() location = sel.get_attribute("//a[starts-with(@onclick, 's3_viewMap')]/@onclick") location_id = location.split("(")[1].split(")")[0] Locations.formDetails[0][3] = location_id Locations.formDetails[13][2] = True Locations.formDetails[14][2] = True Locations.formDetails[17][2] = True Locations.formDetails[25][3] = '51.0' Locations.formDetails[27][3] = '1.0' self.action.checkForm(Locations.formDetails, (), () ) # Click on 'Details' button sel.click("gis_location_details-btn") Locations.formDetails[20][2] = True Locations.formDetails[20][3] = address Locations.formDetails[21][2] = True Locations.formDetails[22][2] = True Locations.formDetails[23][2] = True Locations.formDetails[28][2] = True Locations.formDetails[29][2] = True self.action.checkForm(Locations.formDetails, (), () ) # Open the Advanced Tab sel.click("gis_location_advanced_checkbox") Locations.formDetails[22][2] = True Locations.formDetails[23][2] = True Locations.formDetails[24][2] = True Locations.formDetails[25][2] = True Locations.formDetails[26][2] = True Locations.formDetails[27][2] = True self.action.checkForm(Locations.formDetails, (), () ) self.action.saveForm("Shelter updated") # Load again self.openRecord(shelterName) self.action.checkHeading({"Name:" : shelterName, "Location:" : "%s (N 51.0 E 1.0)" % L0b, }) # Select the L0 sel.select("gis_location_L0", "label=Haiti") self.initFormDetails() Locations.formDetails[5][2] = True Locations.formDetails[6][2] = True self.action.checkForm(Locations.formDetails, (), () ) self.action.saveForm("Shelter updated") def test_locationL0(self): """ Create a new Shelter with an L0 location """ # Create the name variables shelterName = "Shelter with an L0 Location" L0 = "Haiti" sel = self.selenium sel.open("cr/shelter/create") # Fill in the mandatory fields sel.type("cr_shelter_name", shelterName) # Select the L0 sel.select("gis_location_L0", "label=%s" % L0) # Save the form Locations.shelter.append(shelterName) self.action.saveForm("Shelter added") # Load again self.openRecord(shelterName) self.action.checkHeading({"Name:" : shelterName, "Location:" : L0, }) location = sel.get_attribute("//a[starts-with(@onclick, 's3_viewMap')]/@onclick") self.initFormDetails() location_id = location.split("(")[1].split(")")[0] Locations.formDetails[0][3] = location_id Locations.formDetails[5][2] = True Locations.formDetails[6][2] = True self.action.checkForm(Locations.formDetails, (), () ) def test_locationInL0(self): """ Create a new Shelter inside an L0 location NB This should fail if deployment_settings.gis.strict_hierarchy = True """ # Create the name variables shelterName = "Shelter within L0 Location" L1 = self.makeNameUnique("Specific Location in L0") sel = self.selenium sel.open("cr/shelter/create") # Fill in the mandatory fields sel.type("cr_shelter_name", shelterName) # Select the L0 sel.select("gis_location_L0", "label=Haiti") # Create a new location sel.click("gis_location_add-btn") sel.type("gis_location_name", L1) # Save the form Locations.shelter.append(shelterName) Locations.line.append(L1) self.action.saveForm("Shelter added") # Load again self.openRecord(shelterName) self.action.checkHeading({"Name:" : shelterName, "Location:" : L1, }) location = sel.get_attribute("//a[starts-with(@onclick, 's3_viewMap')]/@onclick") location_id = location.split("(")[1].split(")")[0] Locations.formDetails[0][3] = location_id Locations.formDetails[5][2] = True Locations.formDetails[6][2] = True Locations.formDetails[13][2] = True Locations.formDetails[14][2] = True Locations.formDetails[17][2] = True self.action.checkForm(Locations.formDetails, (), () ) def test_locationL1(self): """ Create a new Shelter with an L1 location """ # Create the name variables shelterName = "Shelter with an L1 Location" L1 = self.makeNameUnique("Ouest") sel = self.selenium sel.open("cr/shelter/create") # Fill in the mandatory fields sel.type("cr_shelter_name", shelterName) # Select the L0 sel.select("gis_location_L0", "label=Haiti") # wait for the L1 list to be populated for i in range(10): try: # Select the L1 sel.select("gis_location_L1", "label=%s" % L1) break except: time.sleep(1) # Save the form Locations.shelter.append(shelterName) self.action.saveForm("Shelter added") # Load again self.openRecord(shelterName) self.action.checkHeading({"Name:" : shelterName, "Location:" : "%s (Province, Haiti)" % L1, }) location = sel.get_attribute("//a[starts-with(@onclick, 's3_viewMap')]/@onclick") location_id = location.split("(")[1].split(")")[0] self.initFormDetails() Locations.formDetails[0][3] = location_id Locations.formDetails[5][2] = True Locations.formDetails[6][2] = True Locations.formDetails[7][2] = True Locations.formDetails[8][2] = True self.action.checkForm(Locations.formDetails, (), () ) def test_locationInL1(self): """ Create a new Shelter inside an L1 location """ # Create the name variables shelterName = "Shelter within L1 Location" L1a = self.makeNameUnique("Ouest") L1b = self.makeNameUnique("Specific Location in L1") sel = self.selenium sel.open("cr/shelter/create") # Fill in the mandatory fields sel.type("cr_shelter_name", shelterName) # Select the L0 sel.select("gis_location_L0", "label=Haiti") # wait for the L1 list to appear time.sleep(4) for i in range(10): if sel.is_visible("gis_location_L1"): break time.sleep(1) self.assertTrue(sel.is_visible("gis_location_L1")) # Select the L1 sel.select("gis_location_L1", "label=%s" % L1a) # Create a new location sel.click("gis_location_add-btn") sel.type("gis_location_name", L1b) # Save the form Locations.shelter.append(shelterName) Locations.line.append(L1b) self.action.saveForm("Shelter added") # Load again self.openRecord(shelterName) self.action.checkHeading({"Name:" : shelterName, "Location:" : L1b, }) location = sel.get_attribute("//a[starts-with(@onclick, 's3_viewMap')]/@onclick") location_id = location.split("(")[1].split(")")[0] Locations.formDetails[0][3] = location_id Locations.formDetails[5][2] = True Locations.formDetails[6][2] = True Locations.formDetails[7][2] = True Locations.formDetails[8][2] = True Locations.formDetails[13][2] = True Locations.formDetails[14][2] = True Locations.formDetails[17][2] = True self.action.checkForm(Locations.formDetails, (), () ) def test_locationL2(self): """ Create a new Shelter with an L2 location """ # Create the name variables shelterName = "Shelter with an L2 Location" L1 = self.makeNameUnique("Ouest") L2 = self.makeNameUnique("Port-Au-Prince") sel = self.selenium sel.open("cr/shelter/create") # Fill in the mandatory fields sel.type("cr_shelter_name", shelterName) # Select the L0 sel.select("gis_location_L0", "label=Haiti") # wait for the L1 list to be populated for i in range(10): try: # Select the L1 sel.select("gis_location_L1", "label=%s" % L1) break except: time.sleep(1) # wait for the L2 list to be populated for i in range(10): try: # Select the L1 sel.select("gis_location_L2", "label=%s" % L2) break except: time.sleep(1) # Save the form Locations.shelter.append(shelterName) self.action.saveForm("Shelter added") # Load again self.openRecord(shelterName) self.action.checkHeading({"Name:" : "%s" % shelterName, "Location:" : "%s (District, %s)" % (L2, L1), }) location = sel.get_attribute("//a[starts-with(@onclick, 's3_viewMap')]/@onclick") location_id = location.split("(")[1].split(")")[0] self.initFormDetails() Locations.formDetails[0][3] = location_id Locations.formDetails[5][2] = True Locations.formDetails[6][2] = True Locations.formDetails[7][2] = True Locations.formDetails[8][2] = True Locations.formDetails[9][2] = True Locations.formDetails[10][2] = True self.action.checkForm(Locations.formDetails, (), () ) def test_locationInL2(self): """ Create a new Shelter inside an L2 location """ # Create the name variables shelterName = "Shelter within L2 Location" L1 = self.makeNameUnique("Ouest") L2a = self.makeNameUnique("Port-Au-Prince") L2b = self.makeNameUnique("Specific Location in L2") sel = self.selenium sel.open("cr/shelter/create") # Fill in the mandatory fields sel.type("cr_shelter_name", shelterName) # Select the L0 sel.select("gis_location_L0", "label=Haiti") # wait for the L1 list to be populated for i in range(10): try: # Select the L1 sel.select("gis_location_L1", "label=%s" % L1) break except: time.sleep(1) # wait for the L2 list to be populated for i in range(10): try: # Select the L1 sel.select("gis_location_L2", "label=%s" % L2a) break except: time.sleep(1) # Create a new location sel.click("gis_location_add-btn") sel.type("gis_location_name", L2b) # Save the form Locations.shelter.append(shelterName) Locations.line.append(L2b) self.action.saveForm("Shelter added") # Load again self.openRecord(shelterName) self.action.checkHeading({"Name:" : shelterName, "Location:" : L2b, }) location = sel.get_attribute("//a[starts-with(@onclick, 's3_viewMap')]/@onclick") location_id = location.split("(")[1].split(")")[0] self.initFormDetails() Locations.formDetails[0][3] = location_id Locations.formDetails[5][2] = True Locations.formDetails[6][2] = True Locations.formDetails[7][2] = True Locations.formDetails[8][2] = True Locations.formDetails[9][2] = True Locations.formDetails[10][2] = True Locations.formDetails[13][2] = True Locations.formDetails[14][2] = True Locations.formDetails[17][2] = True self.action.checkForm(Locations.formDetails, (), () ) def test_locationL3(self): """ Create a new Shelter with an L3 location """ # Create the name variables shelterName = "Shelter with an L3 Location" L1 = self.makeNameUnique("Ouest") L2 = self.makeNameUnique("Port-Au-Prince") L3 = self.makeNameUnique("Martissant") sel = self.selenium sel.open("cr/shelter/create") # Fill in the mandatory fields sel.type("cr_shelter_name", shelterName) # Select the L0 sel.select("gis_location_L0", "label=Haiti") # wait for the L1 list to be populated for i in range(10): try: # Select the L1 sel.select("gis_location_L1", "label=%s" % L1) break except: time.sleep(1) # wait for the L2 list to be populated for i in range(10): try: # Select the L1 sel.select("gis_location_L2", "label=%s" % L2) break except: time.sleep(1) # wait for the L3 list to be populated for i in range(10): try: # Select the L1 sel.select("gis_location_L3", "label=%s" % L3) break except: time.sleep(1) # Save the form Locations.shelter.append(shelterName) self.action.saveForm("Shelter added") # Load again self.openRecord(shelterName) self.action.checkHeading({"Name:" : "%s" % shelterName, "Location:" : "%s (Town, %s)" % (L3, L2), }) location = sel.get_attribute("//a[starts-with(@onclick, 's3_viewMap')]/@onclick") location_id = location.split("(")[1].split(")")[0] self.initFormDetails() Locations.formDetails[0][3] = location_id Locations.formDetails[5][2] = True Locations.formDetails[6][2] = True Locations.formDetails[7][2] = True Locations.formDetails[8][2] = True Locations.formDetails[9][2] = True Locations.formDetails[10][2] = True Locations.formDetails[11][2] = True Locations.formDetails[12][2] = True self.action.checkForm(Locations.formDetails, (), () ) def test_locationInL3(self): """ Create a new Shelter inside an L3 location """ # Create the name variables shelterName = "Shelter within L3 Location" L1 = self.makeNameUnique("Ouest") L2 = self.makeNameUnique("Port-Au-Prince") L3a = self.makeNameUnique("Martissant") L3b = self.makeNameUnique("Specific Location in L3") sel = self.selenium sel.open("cr/shelter/create") # Fill in the mandatory fields sel.type("cr_shelter_name", shelterName) # Select the L0 sel.select("gis_location_L0", "label=Haiti") # wait for the L1 list to be populated for i in range(10): try: # Select the L1 sel.select("gis_location_L1", "label=%s" % L1) break except: time.sleep(1) # wait for the L2 list to
if taxes_ref is None: taxes_ref = {} if not code_digits: code_digits = self.code_digits AccountTaxObj = self.env['account.tax'] # Generate taxes from templates. generated_tax_res = self.with_context(active_test=False).tax_template_ids._generate_tax(company) taxes_ref.update(generated_tax_res['tax_template_to_tax']) # Generating Accounts from templates. account_template_ref = self.generate_account(taxes_ref, account_ref, code_digits, company) account_ref.update(account_template_ref) # writing account values after creation of accounts for key, value in generated_tax_res['account_dict']['account.tax'].items(): if value['cash_basis_transition_account_id'] or value['cash_basis_base_account_id']: AccountTaxObj.browse(key).write({ 'cash_basis_transition_account_id': account_ref.get(value['cash_basis_transition_account_id'], False), 'cash_basis_base_account_id': account_ref.get(value['cash_basis_base_account_id'], False), }) AccountTaxRepartitionLineObj = self.env['account.tax.repartition.line'] for key, value in generated_tax_res['account_dict']['account.tax.repartition.line'].items(): if value['account_id']: AccountTaxRepartitionLineObj.browse(key).write({ 'account_id': account_ref.get(value['account_id']), }) # Set the company accounts self._load_company_accounts(account_ref, company) # Create Journals - Only done for root chart template if not self.parent_id: self.generate_journals(account_ref, company) # generate properties function self.generate_properties(account_ref, company) # Generate Fiscal Position , Fiscal Position Accounts and Fiscal Position Taxes from templates self.generate_fiscal_position(taxes_ref, account_ref, company) # Generate account operation template templates self.generate_account_reconcile_model(taxes_ref, account_ref, company) return account_ref, taxes_ref def _load_company_accounts(self, account_ref, company): # Set the default accounts on the company accounts = { 'default_cash_difference_income_account_id': self.default_cash_difference_income_account_id.id, 'default_cash_difference_expense_account_id': self.default_cash_difference_expense_account_id.id, 'account_default_pos_receivable_account_id': self.default_pos_receivable_account_id.id, } values = {} # The loop is to avoid writing when we have no values, thus avoiding erasing the account from the parent for key, account in accounts.items(): if account_ref.get(account): values[key] = account_ref.get(account) company.write(values) def create_record_with_xmlid(self, company, template, model, vals): return self._create_records_with_xmlid(model, [(template, vals)], company).id def _create_records_with_xmlid(self, model, template_vals, company): """ Create records for the given model name with the given vals, and create xml ids based on each record's template and company id. """ if not template_vals: return self.env[model] template_model = template_vals[0][0] template_ids = [template.id for template, vals in template_vals] template_xmlids = template_model.browse(template_ids).get_external_id() data_list = [] for template, vals in template_vals: module, name = template_xmlids[template.id].split('.', 1) xml_id = "%s.%s_%s" % (module, company.id, name) data_list.append(dict(xml_id=xml_id, values=vals, noupdate=True)) return self.env[model]._load_records(data_list) @api.model def _load_records(self, data_list, update=False): # When creating a chart template create, for the liquidity transfer account # - an account.account.template: this allow to define account.reconcile.model.template objects refering that liquidity transfer # account although it's not existing in any xml file # - an entry in ir_model_data: this allow to still use the method create_record_with_xmlid() and don't make any difference between # regular accounts created and that liquidity transfer account records = super(AccountChartTemplate, self)._load_records(data_list, update) account_data_list = [] for data, record in zip(data_list, records): # Create the transfer account only for leaf chart template in the hierarchy. if record.parent_id: continue if data.get('xml_id'): account_xml_id = data['xml_id'] + '_liquidity_transfer' if not self.env.ref(account_xml_id, raise_if_not_found=False): account_vals = record._prepare_transfer_account_template() account_data_list.append(dict( xml_id=account_xml_id, values=account_vals, noupdate=data.get('noupdate'), )) self.env['account.account.template']._load_records(account_data_list, update) return records def _get_account_vals(self, company, account_template, code_acc, tax_template_ref): """ This method generates a dictionary of all the values for the account that will be created. """ self.ensure_one() tax_ids = [] for tax in account_template.tax_ids: tax_ids.append(tax_template_ref[tax.id]) val = { 'name': account_template.name, 'currency_id': account_template.currency_id and account_template.currency_id.id or False, 'code': code_acc, 'user_type_id': account_template.user_type_id and account_template.user_type_id.id or False, 'reconcile': account_template.reconcile, 'note': account_template.note, 'tax_ids': [(6, 0, tax_ids)], 'company_id': company.id, 'tag_ids': [(6, 0, [t.id for t in account_template.tag_ids])], 'group_id': account_template.group_id.id, } return val def generate_account(self, tax_template_ref, acc_template_ref, code_digits, company): """ This method generates accounts from account templates. :param tax_template_ref: Taxes templates reference for write taxes_id in account_account. :param acc_template_ref: dictionary containing the mapping between the account templates and generated accounts (will be populated) :param code_digits: number of digits to use for account code. :param company_id: company to generate accounts for. :returns: return acc_template_ref for reference purpose. :rtype: dict """ self.ensure_one() account_tmpl_obj = self.env['account.account.template'] acc_template = account_tmpl_obj.search([('nocreate', '!=', True), ('chart_template_id', '=', self.id)], order='id') template_vals = [] for account_template in acc_template: code_main = account_template.code and len(account_template.code) or 0 code_acc = account_template.code or '' if code_main > 0 and code_main <= code_digits: code_acc = str(code_acc) + (str('0'*(code_digits-code_main))) vals = self._get_account_vals(company, account_template, code_acc, tax_template_ref) template_vals.append((account_template, vals)) accounts = self._create_records_with_xmlid('account.account', template_vals, company) for template, account in zip(acc_template, accounts): acc_template_ref[template.id] = account.id return acc_template_ref def _prepare_reconcile_model_vals(self, company, account_reconcile_model, acc_template_ref, tax_template_ref): """ This method generates a dictionary of all the values for the account.reconcile.model that will be created. """ self.ensure_one() return { 'name': account_reconcile_model.name, 'sequence': account_reconcile_model.sequence, 'has_second_line': account_reconcile_model.has_second_line, 'company_id': company.id, 'account_id': acc_template_ref[account_reconcile_model.account_id.id], 'label': account_reconcile_model.label, 'to_check': account_reconcile_model.to_check, 'amount_type': account_reconcile_model.amount_type, 'force_tax_included': account_reconcile_model.force_tax_included, 'amount': account_reconcile_model.amount, 'tax_ids': [[4, tax_template_ref[tax.id], 0] for tax in account_reconcile_model.tax_ids], 'second_account_id': account_reconcile_model.second_account_id and acc_template_ref[account_reconcile_model.second_account_id.id] or False, 'second_label': account_reconcile_model.second_label, 'second_amount_type': account_reconcile_model.second_amount_type, 'force_second_tax_included': account_reconcile_model.force_second_tax_included, 'second_amount': account_reconcile_model.second_amount, 'rule_type': account_reconcile_model.rule_type, 'auto_reconcile': account_reconcile_model.auto_reconcile, 'match_journal_ids': [(6, None, account_reconcile_model.match_journal_ids.ids)], 'match_nature': account_reconcile_model.match_nature, 'match_amount': account_reconcile_model.match_amount, 'match_amount_min': account_reconcile_model.match_amount_min, 'match_amount_max': account_reconcile_model.match_amount_max, 'match_label': account_reconcile_model.match_label, 'match_label_param': account_reconcile_model.match_label_param, 'match_note': account_reconcile_model.match_note, 'match_note_param': account_reconcile_model.match_note_param, 'match_transaction_type': account_reconcile_model.match_transaction_type, 'match_transaction_type_param': account_reconcile_model.match_transaction_type_param, 'match_same_currency': account_reconcile_model.match_same_currency, 'match_total_amount': account_reconcile_model.match_total_amount, 'match_total_amount_param': account_reconcile_model.match_total_amount_param, 'match_partner': account_reconcile_model.match_partner, 'match_partner_ids': [(6, None, account_reconcile_model.match_partner_ids.ids)], 'match_partner_category_ids': [(6, None, account_reconcile_model.match_partner_category_ids.ids)], 'second_tax_ids': [[4, tax_template_ref[tax.id], 0] for tax in account_reconcile_model.second_tax_ids], } def generate_account_reconcile_model(self, tax_template_ref, acc_template_ref, company): """ This method creates account reconcile models :param tax_template_ref: Taxes templates reference for write taxes_id in account_account. :param acc_template_ref: dictionary with the mapping between the account templates and the real accounts. :param company_id: company to create models for :returns: return new_account_reconcile_model for reference purpose. :rtype: dict """ self.ensure_one() account_reconcile_models = self.env['account.reconcile.model.template'].search([ ('chart_template_id', '=', self.id) ]) for account_reconcile_model in account_reconcile_models: vals = self._prepare_reconcile_model_vals(company, account_reconcile_model, acc_template_ref, tax_template_ref) self.create_record_with_xmlid(company, account_reconcile_model, 'account.reconcile.model', vals) # Create a default rule for the reconciliation widget matching invoices automatically. self.env['account.reconcile.model'].sudo().create({ "name": _('Invoices Matching Rule'), "sequence": '1', "rule_type": 'invoice_matching', "auto_reconcile": False, "match_nature": 'both', "match_same_currency": True, "match_total_amount": True, "match_total_amount_param": 100, "match_partner": True, "company_id": company.id, }) return True def _get_fp_vals(self, company, position): return { 'company_id': company.id, 'sequence': position.sequence, 'name': position.name, 'note': position.note, 'auto_apply': position.auto_apply, 'vat_required': position.vat_required, 'country_id': position.country_id.id, 'country_group_id': position.country_group_id.id, 'state_ids': position.state_ids and [(6,0, position.state_ids.ids)] or [], 'zip_from': position.zip_from, 'zip_to': position.zip_to, } def generate_fiscal_position(self, tax_template_ref, acc_template_ref, company): """ This method generates Fiscal Position, Fiscal Position Accounts and Fiscal Position Taxes from templates. :param taxes_ids: Taxes templates reference for generating account.fiscal.position.tax. :param acc_template_ref: Account templates reference for generating account.fiscal.position.account. :param company_id: the company to generate fiscal position data for :returns: True """ self.ensure_one() positions = self.env['account.fiscal.position.template'].search([('chart_template_id', '=', self.id)]) # first create fiscal positions in batch template_vals = [] for position in positions: fp_vals = self._get_fp_vals(company, position) template_vals.append((position, fp_vals)) fps = self._create_records_with_xmlid('account.fiscal.position', template_vals, company) # then create fiscal position taxes and accounts tax_template_vals = [] account_template_vals = [] for position, fp in zip(positions, fps): for tax in position.tax_ids: tax_template_vals.append((tax, { 'tax_src_id': tax_template_ref[tax.tax_src_id.id], 'tax_dest_id': tax.tax_dest_id and tax_template_ref[tax.tax_dest_id.id] or False, 'position_id': fp.id, })) for acc in position.account_ids: account_template_vals.append((acc, { 'account_src_id': acc_template_ref[acc.account_src_id.id], 'account_dest_id': acc_template_ref[acc.account_dest_id.id], 'position_id': fp.id, })) self._create_records_with_xmlid('account.fiscal.position.tax', tax_template_vals, company) self._create_records_with_xmlid('account.fiscal.position.account', account_template_vals, company) return True class AccountTaxTemplate(models.Model): _name = 'account.tax.template' _description = 'Templates for Taxes' _order = 'id' chart_template_id = fields.Many2one('account.chart.template', string='Chart Template', required=True) name = fields.Char(string='Tax Name', required=True) type_tax_use = fields.Selection(TYPE_TAX_USE, string='Tax Scope', required=True, default="sale", help="Determines where the tax is selectable. Note : 'None' means a tax can't be used by itself, however it can still be used in a group.") amount_type = fields.Selection(default='percent', string="Tax Computation", required=True, selection=[('group', 'Group of Taxes'), ('fixed', 'Fixed'), ('percent', 'Percentage of Price'), ('division', 'Percentage of Price Tax Included')]) active = fields.Boolean(default=True, help="Set active to false to hide the tax without removing it.") children_tax_ids = fields.Many2many('account.tax.template', 'account_tax_template_filiation_rel', 'parent_tax', 'child_tax', string='Children Taxes') sequence = fields.Integer(required=True, default=1, help="The sequence field is used to define order in which the tax lines are applied.") amount = fields.Float(required=True, digits=(16, 4), default=0) description = fields.Char(string='Display on Invoices') price_include = fields.Boolean(string='Included in Price', default=False, help="Check this if the price you use on the product and invoices includes this tax.") include_base_amount = fields.Boolean(string='Affect Subsequent Taxes', default=False, help="If set, taxes which are computed after this one will be computed based on the price tax included.") analytic = fields.Boolean(string="Analytic Cost", help="If set, the amount computed by this tax will be assigned to the same analytic account as the invoice line (if any)") invoice_repartition_line_ids = fields.One2many(string="Repartition for Invoices", comodel_name="account.tax.repartition.line.template", inverse_name="invoice_tax_id", copy=True, help="Repartition when the tax is used on an invoice") refund_repartition_line_ids = fields.One2many(string="Repartition for Refund Invoices", comodel_name="account.tax.repartition.line.template", inverse_name="refund_tax_id", copy=True, help="Repartition when the tax is used on a refund") tax_group_id = fields.Many2one('account.tax.group', string="Tax Group") tax_exigibility = fields.Selection( [('on_invoice', 'Based on Invoice'), ('on_payment', 'Based on Payment'), ], string='Tax Due', default='on_invoice', help="Based on Invoice: the tax is due as soon as the
<gh_stars>10-100 # Copyright 2018 <NAME> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Contains the prepare_molecules definition which reads, prepares, and writes small molecules. """ import __future__ import sys import json import os from datetime import datetime from collections import OrderedDict import gypsum_dl.Utils as Utils from gypsum_dl.Parallelizer import Parallelizer from gypsum_dl.Parallelizer import flatten_list try: from rdkit.Chem import AllChem from rdkit import Chem except: Utils.exception("You need to install rdkit and its dependencies.") try: import numpy except: Utils.exception("You need to install numpy and its dependencies.") try: from scipy.cluster.vq import kmeans2 except: Utils.exception("You need to install scipy and its dependencies.") from gypsum_dl.MolContainer import MolContainer from gypsum_dl.Steps.SMILES.PrepareSmiles import prepare_smiles from gypsum_dl.Steps.ThreeD.PrepareThreeD import prepare_3d from gypsum_dl.Steps.IO.ProcessOutput import proccess_output from gypsum_dl.Steps.IO.LoadFiles import load_smiles_file from gypsum_dl.Steps.IO.LoadFiles import load_sdf_file # see http://www.rdkit.org/docs/GettingStartedInPython.html#working-with-3d-molecules def prepare_molecules(args): """A function for preparing small-molecule models for docking. To work, it requires that the python module rdkit be installed on the system. :param args: The arguments, from the commandline. :type args: dict """ # Keep track of the tim the program starts. start_time = datetime.now() # A list of command-line parameters that will be ignored if using a json # file. json_warning_list = [ "source", "output_folder", "num_processors", "min_ph", "max_ph", "delta_ph_increment", "thoroughness", "max_variants_per_compound", "pka_precision", ] # Whether to warn the user that the above parameters, if specified, will # be ignored. need_to_print_override_warning = False if "json" in args: # "json" is one of the parameters, so we'll be ignoring the rest. try: params = json.load(open(args["json"])) except: Utils.exception("Is your input json file properly formed?") params = set_parameters(params) if [i for i in json_warning_list if i in list(args.keys())]: need_to_print_override_warning = True else: # We're actually going to use all the command-line parameters. No # warning necessary. params = set_parameters(args) # If running in serial mode, make sure only one processor is used. if params["job_manager"] == "serial": if params["num_processors"] != 1: Utils.log( "Because --job_manager was set to serial, this will be run on a single processor." ) params["num_processors"] = 1 # Handle mpi errors if mpi4py isn't installed if params["job_manager"] == "mpi": # Before executing Parallelizer with mpi4py (which override python raise Exceptions) # We must check that it is being run with the "-m mpi4py" runpy flag sys_modules = sys.modules if "runpy" not in sys_modules.keys(): printout = "\nTo run in mpi mode you must run with -m flag. ie) mpirun -n $NTASKS python -m mpi4py run_gypsum_dl.py\n" print(printout) Utils.exception(printout) # Check mpi4py import try: import mpi4py except: printout = "\nmpi4py not installed but --job_manager is set to mpi. \n Either install mpi4py or switch job_manager to multiprocessing or serial.\n" print(printout) Utils.exception(printout) # Check mpi4py import version. This must be at version 2.1.0 and higher mpi4py_version = mpi4py.__version__ mpi4py_version = [int(x) for x in mpi4py_version.split(".")] if mpi4py_version[0] == 2: if mpi4py_version[1] < 1: printout = "\nmpi4py version 2.1.0 or higher is required. Use the 'python -m mpi4py' flag to run in mpi mode.\nPlease update mpi4py to a newer version, or switch job_manager to multiprocessing or serial.\n" print(printout) Utils.exception(printout) elif mpi4py_version[0] < 2: printout = "\nmpi4py version 2.1.0 or higher is required. Use the 'python -m mpi4py' flag to run in mpi mode.\nPlease update mpi4py to a newer version, or switch job_manager to multiprocessing or serial.\n" print(printout) Utils.exception(printout) # Throw a message if running on windows. Windows doesn't deal with with # multiple processors, so use only 1. if sys.platform == "win32": Utils.log( "WARNING: Multiprocessing is not supported on Windows. Tasks will be run in Serial mode." ) params["num_processors"] = 1 params["job_manager"] = "serial" # Launch mpi workers if that's what's specified. if params["job_manager"] == "mpi": params["Parallelizer"] = Parallelizer( params["job_manager"], params["num_processors"] ) else: # Lower-level mpi (i.e. making a new Parallelizer within an mpi) has # problems with importing the MPI environment and mpi4py. So we will # flag it to skip the MPI mode and just go to multiprocess/serial. # This is a saftey precaution params["Parallelizer"] = Parallelizer( params["job_manager"], params["num_processors"], True ) # Let the user know that their command-line parameters will be ignored, if # they have specified a json file. if need_to_print_override_warning == True: Utils.log("WARNING: Using the --json flag overrides all other flags.") # If running in mpi mode, separate_output_files must be set to true. if params["job_manager"] == "mpi" and params["separate_output_files"] == False: Utils.log( "WARNING: Running in mpi mode, but separate_output_files is not set to True. Setting separate_output_files to True anyway." ) params["separate_output_files"] = True # Outputing HTML files not supported in mpi mode. if params["job_manager"] == "mpi" and params["add_html_output"] == True: Utils.log( "WARNING: Running in mpi mode, but add_html_output is set to True. HTML output is not supported in mpi mode." ) params["add_html_output"] = False # Warn the user if he or she is not using the Durrant lab filters. if params["use_durrant_lab_filters"] ==- False: Utils.log( "WARNING: Running Gypsum-DL without the Durrant-lab filters. In looking over many Gypsum-DL-generated " + "variants, we have identified a number of substructures that, though technically possible, strike us " + "as improbable or otherwise poorly suited for virtual screening. We strongly recommend removing these " + "by running Gypsum-DL with the --use_durrant_lab_filters option.", trailing_whitespace="\n" ) # Load SMILES data if isinstance(params["source"], str): Utils.log("Loading molecules from " + os.path.basename(params["source"]) + "...") # Smiles must be array of strs. src = params["source"] if src.lower().endswith(".smi") or src.lower().endswith(".can"): # It's an smi file. smiles_data = load_smiles_file(src) elif params["source"].lower().endswith(".sdf"): # It's an sdf file. Convert it to a smiles. smiles_data = load_sdf_file(src) else: smiles_data = [params["source"]] else: pass # It's already in the required format. # Make the output directory if necessary. if os.path.exists(params["output_folder"]) == False: os.mkdir(params["output_folder"]) if os.path.exists(params["output_folder"]) == False: Utils.exception("Output folder directory couldn't be found or created.") # For Debugging # print("") # print("###########################") # print("num_procs : ", params["num_processors"]) # print("chosen mode : ", params["job_manager"]) # print("Parallel style: ", params["Parallelizer"].return_mode()) # print("Number Nodes: ", params["Parallelizer"].return_node()) # print("###########################") # print("") # Make the molecule containers. contnrs = [] idx_counter = 0 for i in range(0, len(smiles_data)): try: smiles, name, props = smiles_data[i] except: msg = 'Unexpected error. Does your "source" parameter specify a ' msg = msg + "filename that ends in a .can, .smi, or .sdf extension?" Utils.exception(msg) if detect_unassigned_bonds(smiles) is None: Utils.log( "WARNING: Throwing out SMILES because of unassigned bonds: " + smiles ) continue new_contnr = MolContainer(smiles, name, idx_counter, props) if ( new_contnr.orig_smi_canonical == None or type(new_contnr.orig_smi_canonical) != str ): Utils.log( "WARNING: Throwing out SMILES because of it couldn't convert to mol: " + smiles ) continue contnrs.append(new_contnr) idx_counter += 1 # Remove None types from failed conversion contnrs = [x for x in contnrs if x.orig_smi_canonical != None] if len(contnrs) != idx_counter: Utils.exception("There is a corrupted container") # In multiprocessing mode, Gypsum-DL parallelizes each small-molecule # preparation step separately. But this scheme is inefficient in MPI mode # because it increases the amount of communication required between nodes. # So for MPI mode, we will run all the preparation steps for a given # molecule container on a single thread. if params["Parallelizer"].return_mode() != "mpi": # Non-MPI (e.g., multiprocessing) execute_gypsum_dl(contnrs, params) else: # MPI mode. Group the molecule containers so they can be passed to the # parallelizer. job_input = [] temp_param = {} for key in list(params.keys()): if key == "Parallelizer": temp_param["Parallelizer"] = None else: temp_param[key] = params[key] for contnr in contnrs: contnr.contnr_idx = 0 # Because each container being run in isolation. job_input.append(tuple([[contnr], temp_param])) job_input = tuple(job_input) params["Parallelizer"].run(job_input, execute_gypsum_dl) # Calculate the total run time. end_time = datetime.now() run_time = end_time - start_time params["start_time"] = str(start_time) params["end_time"] = str(end_time) params["run_time"] = str(run_time) Utils.log("\nStart time at: " +
object, the associated policies are sent to agent. Agent will use this information to find out the list of policies to be applied and their sequence during flow evaluation. User can attach application tag to allowed objects (Project, VN, VM or VMI).</p> <h3 id="jd0e297">Policy-management Configuration Object</h3> <p>Policy-management is a global container object for all policy-related configuration. </p> <div style=""> <p>Policy-management object contains</p> </div> <ul> <li style=""> <p>network-policies (NPs)</p> </li> <li style=""> <p>firewall-policies (FWPs)</p> </li> <li style=""> <p>application-policy-sets</p> </li> <li style=""> <p>global-policy objects</p> </li> <li style=""> <p>global-policy-apply objects</p> </li> <li style=""> <p>NPs - List of contrail networking policy objects</p> </li> <li style=""> <p>FWPs - List of new firewall policy objects</p> </li> <li style=""> <p>Application-policies - List of Application-policy objects </p> </li> <li style=""> <p>Global-policies - List of new firewall policy objects, that are defined for global access</p> </li> <li style=""> <p>Global-policy-apply - List of global policies in a sequence, and these policies applied during flow evaluation.</p> </li> <li style=""> <p>Network Policies (NP) references are available, as they are today.</p> </li> </ul> <h3 id="jd0e339">Firewall-policy Configuration Object</h3> <p> <code class="inline" v-pre="">Firewall-policy </code> is a new policy object that contains a list of firewall-rule-objects and audited flag. Firewall-policy can be project or global scoped depending on usage. Includes an audited Boolean flag to indicate that the owner of the policy indicated that the policy is audited. Default is False, and will have to explicitly be set to True after review. Generates a log event for audited with timestamp and user details. </p> <h3 id="jd0e346">Firewall-rule Configuration Object</h3> <p>Firewall-rule is a new rule object, which contains the following fields. The syntax is to give information about their layout inside the rule.</p> <ul> <li style=""> <p> &lt;sequence number&gt; <br> </br> There is a string object sequence number on the link from firewall-policy to firewall-policy-rule objects. The sequence number decides the order in which the rules are applied. </p> </li> <li style=""> <p>[&lt; id &gt;] </p> <p>uuid</p> </li> <li style=""> <p>[name &lt; name &gt;] </p> <p>Unique name selected by user</p> </li> <li style=""> <p>[description &lt; description &gt;] </p> </li> <li style=""> <p>public</p> </li> <li style=""> <p>{permit | deny} </p> </li> <li style=""> <p>[ protocol {&lt; protocol-name &gt; | any } destination-port { &lt; port range &gt; | any } [ source-port { &lt; port range &gt; | any} ] ] | service-group &lt; name &gt; </p> </li> <li style=""> <p>endpoint-1 { [ip &lt; prefix &gt; ] | [virtual-network &lt; vnname &gt;] | [address-group &lt; group name &gt;] | [tags T1 == V1 &amp;&amp; T2 == V2 &#8230; &amp;&amp; Tn == Vn &amp;&amp; label == label name...] | any} </p> </li> <li style=""> <p>{ -&gt; | &lt;- | &lt;-&gt; } </p> <p>Specifies connection direction. All the rules are connection oriented and this option gives the direction of the connection.</p> </li> <li style=""> <p>endpoint-2 { [ip &lt; prefix &gt; ] | [virtual-network &lt; vnname &gt;] | [address-group &lt; group name &gt;] | [tags T1 == V1 &amp;&amp; T2 == V2 &#8230; &amp;&amp; Tn == Vn &amp;&amp; label == label name...] | any } </p> <p>Tags at endpoints support an expression of tags. We support only &#8216;==&#8216; and &#8216;&amp;&amp;&#8217; operators. User can specify labels also as part the expression. Configuration object contains list of tag names (or global:tag-name in case of global tags) for endpoints. </p> </li> <li style=""> <p>[ match_tags {T1 &#8230;. Tn} | none} ] </p> <p>List of tag types or none. User can specify either match with list of tags or none. Match with list of tags mean, source and destination tag values should match for the rule to take effect.</p> </li> <li style=""> <p>[ log| mirror | alert | activate | drop | reject | sdrop ] </p> <p>complex actions</p> </li> <li style=""> <p>{ enable | disable }</p> <p>A boolean flag to indicate the rule is enabled or disabled. Facilitates selectively turn off the rules, without remove the rule from the policy. Default is True.</p> </li> <li style=""> <p>filter </p> </li> </ul> <h4 id="jd0e410">Compilation of Rules</h4> <p>Whenever the API server receives a request to create/update a firewall policy rule object, it analyzes the object data to make sure that all virtual-networks, address-group, tag objects exist. If any of them do not exist, the request will be rejected. In addition, it will actually create a reference to those objects mentioned in the two endpoints. This achieves two purposes. First, we don't allow users to name non-existent objects in the rule and second, the user is not allowed to delete those objects without first removing them from all rules that are referring to them. </p> <p></p> <h2 id="jd0e416">Using the Contrail Web User Interface to Manage Security Policies</h2> <ul> <li style=""> <p> <a href="security-policy-enhancements.html#jd0e421">Adding Security Policies</a> </p> </li> <li style=""> <p> <a href="security-policy-enhancements.html#jd0e495">Managing Policy Tags</a> </p> </li> <li style=""> <p> <a href="security-policy-enhancements.html#jd0e528">Viewing Global Policies</a> </p> </li> <li style=""> <p> <a href="security-policy-enhancements.html#jd0e566">Visualizing Traffic Groups</a> </p> </li> </ul> <h3 id="jd0e421">Adding Security Policies</h3> <div style=""></div> <ol type="1"> <li id="jd0e426" style=""> To add a security policy, go to <strong v-pre="">Configure &gt; Security &gt; Global Policies</strong> . Near the upper right, click the button <strong v-pre="">Firewall Policy Wizard</strong> . The <strong v-pre="">Firewall Policy Wizard</strong> appears, where you can create your new firewall policy by adding or selecting an application policy set. See <a href="security-policy-enhancements.html#fw1">Figure&nbsp;1</a> . <figure id="fw1"> <figurecaption>Figure 1: Firewall Policy Wizard</figurecaption> <div class="graphic"> <img alt="Firewall Policy Wizard" src="/documentation/images/s019913.png" style=""> </img> </div> </figure> </li> <li id="jd0e444" style=""> Click the large + on the Firewall Policy Wizard screen to view the <strong v-pre=""> Application Policy Sets</strong> window. The existing application policy sets are displayed. See <a href="security-policy-enhancements.html#fw2">Figure&nbsp;2</a> . <figure id="fw2"> <figurecaption>Figure 2: Application Policy Sets</figurecaption> <div class="graphic"> <img alt="Application Policy Sets" src="/documentation/images/s019914.png" style=""> </img> </div> </figure> </li> <li id="jd0e456" style=""> To create a new firewall policy, click the application policy set in the list to which the new firewall policy will belong. The <strong v-pre="">Edit Application Policy Sets </strong> window appears, displaying a field for the description of the selected policy set and listing firewall policies associated with the set. See <a href="security-policy-enhancements.html#fw3">Figure&nbsp;3</a> , where the <strong v-pre="">HRPolicySet</strong> has been selected. <figure id="fw3"> <figurecaption>Figure 3: Edit Application Policy Sets</figurecaption> <div class="graphic"> <img alt="Edit Application Policy Sets" src="/documentation/images/s019915.png" style=""> </img> </div> </figure> </li> <li id="jd0e471" style=""> To view all firewall policies, click the Application Policy Sets link in the left side. <p> See <a href="security-policy-enhancements.html#fw4">Figure&nbsp;4</a> . </p> <figure id="fw4"> <figurecaption>Figure 4: All Firewall Policies</figurecaption> <div class="graphic"> <img alt="All Firewall Policies" src="/documentation/images/s019916.png" style=""> </img> </div> </figure> </li> <li id="jd0e482" style=""> Select any listed firewall policy to view or edit the rules associated with that policy. See <a href="security-policy-enhancements.html#fw5">Figure&nbsp;5</a> , where all the rules for the <strong v-pre="">AdminPolicy</strong> are listed. Use the dropdown menus in each field to add or change policy rules, and use the +, - icons to the right of each rule to add or delete the rule. <figure id="fw5"> <figurecaption>Figure 5: Firewall Policy Rules</figurecaption> <div class="graphic"> <img alt="Firewall Policy Rules" src="/documentation/images/s019917.png" style=""> </img> </div> </figure> </li> </ol> <h3 id="jd0e495">Managing Policy Tags</h3> <p>You can use the Contrail web user interface to create and manage the tags used to provide granularity to security policies. You can have global tags, applicable to the entire system, or project tags, defined for specific uses in specific projects.</p> <ol type="1"> <li id="jd0e501" style=""> To manage policy tags, go to <strong v-pre="">Configure &gt; Tags &gt; Global Tags</strong> . The <strong v-pre="">Tags</strong> window appears, listing all of the tags in use in the system, with the associated virtual networks, ports, and projects for each tag. Tags are defined first by type, such as application, deployment, site, tier, and so on. See <a href="security-policy-enhancements.html#fw6">Figure&nbsp;6</a> . <figure id="fw6">
\mat{J} \ddvec{q} where .. math:: \dmat{J} = \mat{H} \dvec{q} and :math:`\mat{H} \in \mathbb{R}^{6\times n \times n}` is the Hessian tensor. The elements of the Hessian are .. math:: \mat{H}_{i,j,k} = \frac{d^2 u_i}{d q_j d q_k} where :math:`u = \{t_x, t_y, t_z, r_x, r_y, r_z\}` are the elements of the spatial velocity vector. Similarly, we can write .. math:: \mat{J}_{i,j} = \frac{d u_i}{d q_j} :references: - Kinematic Derivatives using the Elementary Transform Sequence, <NAME> and <NAME> """ return self.ets(start, end).hessiane(q, Je=Je, tool=tool) def partial_fkine0( self, q: ArrayLike, n: int = 3, end: Union[str, Link, Gripper, None] = None, start: Union[str, Link, Gripper, None] = None, tool: Union[ndarray, SE3, None] = None, ): r""" Manipulator Forward Kinematics nth Partial Derivative The manipulator Hessian tensor maps joint acceleration to end-effector spatial acceleration, expressed in the ee frame. This function calulcates this based on the ETS of the robot. One of Je or q is required. Supply Je if already calculated to save computation time :param q: The joint angles/configuration of the robot (Optional, if not supplied will use the stored q values). :type q: ArrayLike :param end: the final link/Gripper which the Hessian represents :param start: the first link which the Hessian represents :param tool: a static tool transformation matrix to apply to the end of end, defaults to None :return: The nth Partial Derivative of the forward kinematics :references: - Kinematic Derivatives using the Elementary Transform Sequence, <NAME> and <NAME> """ end, start, _ = self._get_limit_links(end, start) def cross(a, b): x = a[1] * b[2] - a[2] * b[1] y = a[2] * b[0] - a[0] * b[2] z = a[0] * b[1] - a[1] * b[0] return array([x, y, z]) _, nl, _ = self.get_path(end, start) J = self.jacob0(q, end=end, start=start) H = self.hessian0(q, end=end, start=start, J0=J) d = [J, H] size = [6, nl, nl] count = array([0, 0]) c = 2 def add_indices(indices, c): total = len(indices * 2) new_indices = [] for i in range(total): j = i // 2 new_indices.append([]) new_indices[i].append(indices[j][0].copy()) new_indices[i].append(indices[j][1].copy()) # if even number if i % 2 == 0: new_indices[i][0].append(c) # if odd number else: new_indices[i][1].append(c) return new_indices def add_pdi(pdi): total = len(pdi * 2) new_pdi = [] for i in range(total): j = i // 2 new_pdi.append([]) new_pdi[i].append(pdi[j][0]) new_pdi[i].append(pdi[j][1]) # if even number if i % 2 == 0: new_pdi[i][0] += 1 # if odd number else: new_pdi[i][1] += 1 return new_pdi # these are the indices used for the hessian indices = [[[1], [0]]] # the are the pd indices used in the cross prods pdi = [[0, 0]] while len(d) != n: size.append(nl) count = concatenate((count, 0)) indices = add_indices(indices, c) pdi = add_pdi(pdi) c += 1 pd = zeros(size) for i in range(nl**c): rot = zeros(3) trn = zeros(3) for j in range(len(indices)): pdr0 = d[pdi[j][0]] pdr1 = d[pdi[j][1]] idx0 = count[indices[j][0]] idx1 = count[indices[j][1]] rot += cross(pdr0[(slice(3, 6), *idx0)], pdr1[(slice(3, 6), *idx1)]) trn += cross(pdr0[(slice(3, 6), *idx0)], pdr1[(slice(0, 3), *idx1)]) pd[(slice(0, 3), *count)] = trn pd[(slice(3, 6), *count)] = rot count[0] += 1 for j in range(len(count)): if count[j] == nl: count[j] = 0 if j != len(count) - 1: count[j + 1] += 1 d.append(pd) return d[-1] def link_collision_damper( self, shape, q=None, di=0.3, ds=0.05, xi=1.0, end=None, start=None, collision_list=None, ): """ Formulates an inequality contraint which, when optimised for will make it impossible for the robot to run into a collision. Requires See examples/neo.py for use case :param ds: The minimum distance in which a joint is allowed to approach the collision object shape :type ds: float :param di: The influence distance in which the velocity damper becomes active :type di: float :param xi: The gain for the velocity damper :type xi: float :param from_link: The first link to consider, defaults to the base link :type from_link: Link :param to_link: The last link to consider, will consider all links between from_link and to_link in the robot, defaults to the end-effector link :type to_link: Link :returns: Ain, Bin as the inequality contraints for an omptimisor :rtype: ndarray(6), ndarray(6) """ end, start, _ = self._get_limit_links(start=start, end=end) links, n, _ = self.get_path(start=start, end=end) # if q is None: # q = copy(self.q) # else: # q = getvector(q, n) j = 0 Ain = None bin = None def indiv_calculation(link, link_col, q): d, wTlp, wTcp = link_col.closest_point(shape, di) if d is not None: lpTcp = -wTlp + wTcp norm = lpTcp / d norm_h = expand_dims(concatenate((norm, [0, 0, 0])), axis=0) # tool = (self.fkine(q, end=link).inv() * SE3(wTlp)).A[:3, 3] # Je = self.jacob0(q, end=link, tool=tool) # Je[:3, :] = self._T[:3, :3] @ Je[:3, :] # n_dim = Je.shape[1] # dp = norm_h @ shape.v # l_Ain = zeros((1, self.n)) Je = self.jacobe(q, start=self.base_link, end=link, tool=link_col.T) n_dim = Je.shape[1] dp = norm_h @ shape.v l_Ain = zeros((1, n)) l_Ain[0, :n_dim] = norm_h @ Je l_bin = (xi * (d - ds) / (di - ds)) + dp else: l_Ain = None l_bin = None return l_Ain, l_bin for link in links: if link.isjoint: j += 1 if collision_list is None: col_list = link.collision else: col_list = collision_list[j - 1] for link_col in col_list: l_Ain, l_bin = indiv_calculation(link, link_col, q) if l_Ain is not None and l_bin is not None: if Ain is None: Ain = l_Ain else: Ain = concatenate((Ain, l_Ain)) if bin is None: bin = array(l_bin) else: bin = concatenate((bin, l_bin)) return Ain, bin def vision_collision_damper( self, shape, camera=None, camera_n=0, q=None, di=0.3, ds=0.05, xi=1.0, end=None, start=None, collision_list=None, ): """ Formulates an inequality contraint which, when optimised for will make it impossible for the robot to run into a line of sight. See examples/fetch_vision.py for use case :param camera: The camera link, either as a robotic link or SE3 pose :type camera: ERobot or SE3 :param camera_n: Degrees of freedom of the camera link :type camera_n: int :param ds: The minimum distance in which a joint is allowed to approach the collision object shape :type ds: float :param di: The influence distance in which the velocity damper becomes active :type di: float :param xi: The gain for the velocity damper :type xi: float :param from_link: The first link to consider, defaults to the base link :type from_link: ELink :param to_link: The last link to consider, will consider all links between from_link and to_link in the robot, defaults to the end-effector link :type to_link: ELink :returns: Ain, Bin as the inequality contraints for an omptimisor :rtype: ndarray(6), ndarray(6) """ if start is None: start = self.base_link if end is None: end = self.ee_link links, n, _ = self.get_path(start=start, end=end) j = 0 Ain = None bin = None def rotation_between_vectors(a, b): a = a / npnorm(a) b = b / npnorm(b) angle = arccos(dot(a, b)) axis = cross(a, b) return SE3.AngleAxis(angle, axis) if isinstance(camera, ERobot): wTcp = camera.fkine(camera.q).A[:3, 3] elif isinstance(camera, SE3): wTcp = camera.t wTtp = shape.T[:3, -1] # Create line of sight object los_mid = SE3((wTcp + wTtp) / 2) los_orientation = rotation_between_vectors(array([0.0, 0.0, 1.0]), wTcp - wTtp) los = Cylinder( radius=0.001, length=npnorm(wTcp - wTtp), base=(los_mid * los_orientation), ) def indiv_calculation(link, link_col, q): d, wTlp, wTvp = link_col.closest_point(los, di) if d is not None: lpTvp = -wTlp + wTvp norm = lpTvp / d norm_h = expand_dims(concatenate((norm, [0, 0, 0])), axis=0) tool = SE3((inv(self.fkine(q, end=link).A) @ SE3(wTlp).A)[:3, 3]) Je = self.jacob0(q, end=link, tool=tool.A) Je[:3, :] = self._T[:3, :3] @ Je[:3, :] n_dim = Je.shape[1] if isinstance(camera, ERobot): Jv = camera.jacob0(camera.q) Jv[:3, :] = self._T[:3, :3] @ Jv[:3, :] Jv *= npnorm(wTvp - shape.T[:3, -1]) / los.length dpc = norm_h @ Jv dpc = concatenate( ( dpc[0, :-camera_n], zeros(self.n - (camera.n - camera_n)), dpc[0, -camera_n:], ) ) else: dpc = zeros((1, self.n + camera_n)) dpt = norm_h @ shape.v dpt *= npnorm(wTvp
return redirect('/run/download_' + run_id + '/') elif "run_atacseq_advanced" in request.POST: command = [] if "run_name" in request.POST: run_name = request.POST['run_name'] command.extend(['-name', '%s' % run_name]) else: run_name = None if "config_file" in request.FILES: config_file = request.FILES['config_file'] handle_uploaded_file(config_file, run_id) command.extend(['-profile', '%s' % config_file]) else: config_file = None if "design_file" in request.FILES: design_file = request.FILES['design_file'] handle_uploaded_file(design_file, run_id) command.extend(['--input', '%s' % design_file]) else: print("Here be dragons") raise Http404 if "file_folder" in request.FILES: file_folder = request.FILES['file_folder'] file_folder_name = "" if file_folder.name[-4:] == ".zip": handle_and_unzip(file_folder, run_id) file_folder_name = zipfile.ZipFile.namelist(file_folder)[0] elif file_folder.name[-7:] == ".tar.gz": handle_and_untar(file_folder, run_id) tar = tarfile.open(file_folder, "r:gz") file_folder_name = tar.getnames()[0] else: file_folder_name = None if "single_end" in request.POST: command.extend(['--single_end']) single_end = request.POST['single_end'] if "fragment_size" in request.POST: fragment_size = request.POST['fragment_size'] command.extend(['--fragment_size', '%s' % fragment_size]) else: fragment_size = None if "seq_center" in request.POST: seq_center = request.POST['seq_center'] command.extend(['--seq_center', '%s' % seq_center]) else: seq_center = None if "email" in request.POST: email = request.POST['email'] command.extend(['--email', '%s' % email]) else: email = None if "genome_reference" in request.POST: genome_reference = request.POST['genome_reference'] command.extend(['--genome', '%s' % genome_reference]) else: genome_reference = None if "fasta_file" in request.FILES: fasta_file = request.FILES['fasta_file'] handle_uploaded_file(fasta_file, run_id) command.extend(['--fasta', '%s' % fasta_file]) else: fasta_file = None if "gtf_annotation" in request.FILES: gtf_annotation = request.FILES['gtf_annotation'] handle_uploaded_file(gtf_annotation) command.extend(['--gtf', '%s' % gtf_annotation]) else: gtf_annotation = None if "bwa_index" in request.FILES: bwa_index = request.FILES['bwa_index'] if bwa_index.name[-4:] == ".zip": handle_and_unzip(bwa_index, run_id) bwa_index_name = zipfile.ZipFile.namelist(bwa_index)[0] command.extend(['--bwa_index', '%s' % bwa_index_name]) elif bwa_index.name[-7:] == ".tar.gz": handle_and_untar(bwa_index, run_id) tar = tarfile.TarFile.open(bwa_index, "r:gz") bwa_index_name = tar.getnames()[0] command.extend(['--bwa_index', '%s' % bwa_index_name]) else: bwa_index_name = None if "gene_bed" in request.FILES: gene_bed = request.FILES['gene_bed'] handle_uploaded_file(gene_bed, run_id) command.extend(['--gene_bed', '%s' % gene_bed]) else: gene_bed = None if "tss_bed" in request.FILES: tss_bed = request.FILES['tss_bed'] handle_uploaded_file(tss_bed, run_id) command.extend(['--tss_bed', '%s' % tss_bed]) else: tss_bed = None if "macs_gsize" in request.POST: macs_gsize = request.POST['macs_gsize'] command.extend(['--macs_gsize', '%s' % macs_gsize]) else: macs_gsize = None if "blacklist" in request.FILES: blacklist = request.FILES['blacklist'] handle_uploaded_file(blacklist, run_id) command.extend(['--blacklist', '%s' % blacklist]) else: blacklist = None if "mito_name" in request.POST: mito_name = request.POST['mito_name'] command.extend(['--mito_name', '%s' % mito_name]) else: mito_name = None if 'save_reference' in request.POST: command.extend(['--save_reference']) save_reference = request.POST['save_reference'] if "clip_r1" in request.POST: clip_r1 = request.POST['clip_r1'] command.extend(['--clip_r1', '%s' % clip_r1]) else: clip_r1 = None if "clip_r2" in request.POST: clip_r2 = request.POST['clip_r2'] command.extend(['--clip_r2', '%s' % clip_r2]) else: clip_r2 = None if "three_prime_clip_r1" in request.POST: three_prime_clip_r1 = request.POST['three_prime_clip_r1'] command.extend(['--three_prime_clip_r1', '%s' % three_prime_clip_r1]) else: three_prime_clip_r1 = None if "three_prime_clip_r2" in request.POST: three_prime_clip_r2 = request.POST['three_prime_clip_r2'] command.extend(['--three_prime_clip_r2', '%s' % three_prime_clip_r2]) else: three_prime_clip_r2 = None if "trim_nextseq" in request.POST: trim_nextseq = request.POST['trim_nextseq'] command.extend(['--trim_nextseq', '%s' % trim_nextseq]) else: trim_nextseq = None if 'skip_trimming' in request.POST: command.extend(['--skip_trimming']) skip_trimming = request.POST['skip_trimming'] if 'save_trimmed' in request.POST: command.extend(['--save_trimmed']) save_trimmed = request.POST['save_trimmed'] if 'keep_mito' in request.POST: command.extend(['--keep_mito']) keep_mito = request.POST['keep_mito'] if request.POST['keep_dups'] is True: command.extend(['--keep_dups']) keep_dups = request.POST['keep_dups'] if request.POST['keep_multi_map'] is True: command.extend(['--keep_multi_map']) keep_multi_map = request.POST['keep_multi_map'] if "bwa_min_score" in request.POST: bwa_min_score = request.POST['bwa_min_score'] command.extend(['--bwa_min_score', '%s' % bwa_min_score]) else: bwa_min_score = None if request.POST['skip_merge_replicates'] is True: command.extend(['--skip_merge_replicates']) skip_merge_replicates = request.POST['skip_merge_replicates'] if request.POST['save_align_intermeds'] is True: command.extend(['--save_align_intermeds']) save_align_intermeds = request.POST['save_align_intermeds'] if request.POST['narrow_peaks']: command.extend(['--narrow_peak']) narrow_peak = request.POST['narrow_peaks'] if "broad_cutoff" in request.POST: broad_cutoff = request.POST['broad_cutoff'] command.extend(['--broad_cutoff', '%s' % broad_cutoff]) else: broad_cutoff = None if "macs_fdr" in request.POST: macs_fdr = request.POST['macs_fdr'] command.extend(['--macs_fdr', '%s' % macs_fdr]) else: macs_fdr = None if "macs_pvalue" in request.POST: macs_pvalue = request.POST['macs_pvalue'] command.extend(['--macs_pvalue', '%s' % macs_pvalue]) else: macs_pvalue = None if "min_reps_consensus" in request.POST: min_reps_consensus = request.POST['min_reps_consensus'] command.extend(['--min_reps_consensus', '%s' % min_reps_consensus]) else: min_reps_consensus = None save_macs_pileup = request.POST['save_macs_pileup'] if save_macs_pileup: command.extend(['--save_macs_pileup']) skip_peak_qc = request.POST['skip_peak_qc'] if skip_peak_qc: command.extend(['--skip_peak_qc']) skip_peak_annotation = request.POST['skip_peak_annotation'] if skip_peak_annotation: command.extend(['--skip_peak_annotation']) skip_consensus_peaks = request.POST['skip_consensus_peaks'] if skip_consensus_peaks: command.extend(['--skip_consensus_peaks']) deseq2_vst = request.POST['deseq2_vst'] if deseq2_vst: command.extend(['--deseq2_vst']) skip_diff_analysis = request.POST['skip_diff_analysis'] if skip_diff_analysis: command.extend(['--skip_diff_analysis']) skip_fastqc = request.POST['skip_fastqc'] if skip_fastqc: command.extend(['--skip_fastqc']) skip_picard_metrics = request.POST['skip_picard_metrics'] if skip_picard_metrics: command.extend(['--skip_picard_metrics']) skip_preseq = request.POST['skip_preseq'] if skip_preseq: command.extend(['--skip_preseq']) skip_plot_profile = request.POST['skip_plot_profile'] if skip_plot_profile: command.extend(['--skip_plot_profile']) skip_plot_fingerprint = request.POST['skip_plot_fingerprint'] if skip_plot_fingerprint: command.extend(['--skip_plot_fingerprint']) skip_ataqv = request.POST['skip_ataqv'] if skip_ataqv: command.extend(['--skip_ataqv']) skip_igv = request.POST['skip_igv'] if skip_igv: command.extend(['--skip_igv']) skip_multiqc = request.POST['skip_multiqc'] if skip_multiqc: command.extend(['--skip_multiqc']) command.extend(['--skip_multiqc']) os.chdir(id_path) run = Run(run_id=run_id, pipeline="nf-core/ATAC-Seq", start_time=datetime.now()) run.save() # import and run pipeline call from .scripts.nfcore.start_pipeline import atacseq_advanced result = atacseq_advanced(run_name, config_file, design_file, single_end, fragment_size, seq_center, email, genome_reference, fasta_file, gtf_annotation, gene_bed, tss_bed, macs_gsize, blacklist, mito_name, save_reference, clip_r1, clip_r2, three_prime_clip_r1, three_prime_clip_r2, trim_nextseq, skip_trimming, save_trimmed, keep_mito, keep_dups, keep_multi_map, bwa_min_score, skip_merge_replicates, save_align_intermeds, narrow_peak, broad_cutoff, macs_fdr, macs_pvalue, min_reps_consensus, save_macs_pileup, skip_peak_qc, skip_peak_annotation, skip_consensus_peaks, deseq2_vst, skip_diff_analysis, skip_fastqc, skip_picard_metrics, skip_preseq, skip_plot_profile, skip_plot_fingerprint, skip_ataqv, skip_igv, skip_multiqc, run=run) # compress results from .tasks import zip_file, tar_file tar_file("results.tar.gz", "results/") zip_file("results.zip", "results/") from .tasks import del_file # deleting progress file del_file([".inprogress.txt"]) # remove large folders to save space clean_wd() # redirect to download or fail page, based on pipeline results if result != 0: result = str(result) from .tasks import create_crash_file create_crash_file(id_path, result) return redirect('/run/fail_' + run_id + '_' + result + '/') else: # redirect to download page # return redirect('/run/nfcore/download_' + run_id + '/') return redirect('/run/download_' + run_id + '/') # class function for the nf-core ChIP-Seq pipeline class ChipSeqRun(View): # set template for pipeline page template_name = 'run/run_chipseq_html.html' # get function def get(self, request, *args, **kwargs): # set run_id run_id = generate_and_check_id() # render pipeline page return render(request, self.template_name, {'run_id': run_id}) # post function def post(self, request, *args, **kwargs): # set variables # run_id = kwargs['run_id'] run_id = request.POST['run_id'] id_path = get_id_path(run_id) # id_path is nextflow's working directory in the media/run directory # check if directory already exists check_for_run_dir(run_id) # check if directory already exists print("starting 'check_for_run_dir'") # taken = check_for_run_dir(run_id) # if taken is True: # return redirect('run:idTaken', run_id) if check_for_run_dir(run_id): return redirect('run:idTaken', run_id) # create working directory create_directory(id_path) # create progress file create_progress_file(id_path) # get organism_name # organism_name = request.POST['organism_name'] # get design_file and handle file design_file = request.FILES['design_file'] handle_uploaded_file(design_file, run_id) # get file_folder, handle and decompress if 'file_folder' in request.FILES: file_folder = request.FILES['file_folder'] if file_folder is not None: if file_folder.name[-4:] == ".zip": handle_and_unzip(file_folder, run_id) elif file_folder.name[-7:] == ".tar.gz": handle_and_untar(file_folder, run_id) else: pass # get single_end value single_end = request.POST['single_end'] # get igenome_reference if request.POST['igenome_reference'] != "": igenome_reference = request.POST['igenome_reference'] else: igenome_reference = None print("igenome reference:", igenome_reference) # get fasta_file and handle file if "fasta_file" in request.FILES: fasta_file = request.FILES['fasta_file'] handle_uploaded_file(fasta_file, run_id) else: fasta_file = None # get gtf_file and handle file if "gtf_file" in request.FILES: gtf_file = request.FILES['gtf_file'] handle_uploaded_file(gtf_file, run_id) else: gtf_file = None # get bed_file and handle file if "bed_file" in request.FILES: bed_file = request.FILES['bed_file'] handle_uploaded_file(bed_file, run_id) else: bed_file = None # get mac_size if request.POST['macs_size'] != "": macs_size = request.POST['macs_size'] else: macs_size = None # get narrow_peaks if 'narrow peaks' in request.POST: narrow_peaks = True else: narrow_peaks = False # change to working directory os.chdir(id_path) if 'post_chipseq' in request.POST: post_chipseq = True else: post_chipseq = False from django.utils import timezone run = Run(run_id=run_id, pipeline="nf-core/ChIP-Seq", start_time=timezone.now()) run.save() # import and run pipeline call from .scripts.nfcore.start_pipeline import chipseq result = chipseq(design_file=design_file, single_end=single_end, igenome_reference=igenome_reference, fasta_file=fasta_file, gtf_file=gtf_file, bed_file=bed_file, macs_size=macs_size, narrow_peaks=narrow_peaks, run=run) # compress results from .tasks import zip_file, tar_file tar_file("results.tar.gz", "results/") zip_file("results.zip", "results/") if post_chipseq is True: if result == 0: # prepare work directory from distutils.dir_util import copy_tree from .tasks import del_file copy_tree(id_path + "results/bwa/mergedLibrary/bigwig", "bigwig/") # copy_file(id_path + "results/bwa/*/bigwig", ".") # copy_file("results/bwa/*/bigwig", ".") bigwig_dir = str(id_path) + "/bigwig/" if 'ext_chr' in request.POST: ext_chr = request.POST['ext_chr'] else: ext_chr = None computation_method = request.POST['computation_method'] if 'upstream' in request.POST: upstream = request.POST['upstream'] else: upstream = None if 'downstream' in request.POST: downstream = request.POST['downstream'] else: downstream = None if 'regions_length' in request.POST: regions_length = request.POST['regions_length'] else: regions_length = None if 'ref_point' in request.POST: ref_point = request.POST['ref_point'] if ref_point not in ['TSS', 'TES', 'center']: ref_point = 'TSS' else: ref_point = 'TSS' if 'collect' in request.POST: collect = True else: collect = False if bed_file is not None: post_bed_file = bed_file else: from .tasks import get_genes_bed # , copy_file from distutils.file_util import copy_file post_bed_file = get_genes_bed(run_id) copy_file("results/genome/" + post_bed_file, ".") # post_annotation_file = gtf_file if gtf_file is not None: post_annotation_file = gtf_file else: from .tasks import get_gtf from distutils.file_util import copy_file post_annotation_file = get_gtf(run_id) copy_file("results/genome/"
rect.left+ rect.width//2 - self.rect.width//2 self.rect.bottom = rect.bottom self.coolDown = 0 self.damage = 4 self.doom = 0 def move(self, delay, sprites): self.checkHitBack() # deal move if not delay % 2: self.rect.left += self.speed if (getPos(self,0.75,0)[0] >= self.scope[1] and self.speed > 0) or (getPos(self,0.25,0)[0] <= self.scope[0] and self.speed < 0): self.alterSpeed(-self.speed) if not delay % 8: self.imgIndx = (self.imgIndx+1) % len(self.imgLeftList) self.image = self.imgLeftList[self.imgIndx] if self.speed<0 else self.imgRightList[self.imgIndx] # deal attack if self.coolDown == 0: for each in sprites: if ( pygame.sprite.collide_mask(self, each) ): self.coolDown = 60 if (self.coolDown > 0): self.coolDown -= 1 if ( self.coolDown == 45 ): cldList( self, sprites ) def level(self, dist): self.rect.left += dist self.scope = (self.scope[0]+dist, self.scope[1]+dist) # ----------------------------------- class Bowler(Monster): def __init__(self, wallGroup, onlayer): # calculate its position Monster.__init__(self, "bowler", (10,60,80,240), 45, 0, 4, onlayer, 3, 2) # note: 这里bowler的onlayer,以及其stone的onlayer值均为砖的行数,并非自身的行数,使用时不需要-1操作 self.wallList = [] # 存储本行的所有砖块; for aWall in wallGroup: # 由于spriteGroup不好进行索引/随机选择操作,因此将其中的sprite逐个存入列表中存储 self.wallList.append(aWall) wall = choice(self.wallList) # initialize the sprite self.imgLeftList = [ pygame.image.load("image/stg2/bowler0.png").convert_alpha(), pygame.image.load("image/stg2/bowler0.png").convert_alpha(), \ pygame.image.load("image/stg2/bowler0.png").convert_alpha(), pygame.image.load("image/stg2/bowler1.png").convert_alpha() ] self.imgRightList = [ pygame.transform.flip(self.imgLeftList[0], True, False), pygame.transform.flip(self.imgLeftList[1], True, False), \ pygame.transform.flip(self.imgLeftList[2], True, False), pygame.transform.flip(self.imgLeftList[3], True, False) ] self.throwLeft = pygame.image.load("image/stg2/bowlerThrow.png").convert_alpha() self.throwRight = pygame.transform.flip(self.throwLeft, True, False) self.imgIndx = 0 self.image = self.imgLeftList[0] self.mask = pygame.mask.from_surface(self.image) self.rect = self.image.get_rect() self.rect.left = wall.rect.left self.rect.bottom = wall.rect.top self.stoneList = [] def move(self, delay, sprites): self.checkHitBack() if not (delay % 20 ): heroX = choice(sprites).rect.left self.imgIndx = (self.imgIndx+1) % len(self.imgLeftList) trPos = [ self.rect.left + self.rect.width//2, self.rect.bottom ] if self.rect.left > heroX: self.image = self.imgLeftList[self.imgIndx] self.direction = "left" else: self.image = self.imgRightList[self.imgIndx] self.direction = "right" self.rect = self.image.get_rect() self.rect.left = trPos[0]-self.rect.width//2 self.rect.bottom = trPos[1] def throw(self, delay): if not (delay % 120) and (random() > 0.5): # 控制投石频率 trPos = [ self.rect.left + self.rect.width//2, self.rect.bottom ] self.image = self.throwLeft if self.direction=="left" else self.throwRight self.rect = self.image.get_rect() self.rect.left = trPos[0]-self.rect.width//2 self.rect.bottom = trPos[1] stone = Stone((self.rect.left, self.rect.bottom), self.onlayer, self.direction) self.stoneList.append(stone) return stone class Stone(pygame.sprite.Sprite): def __init__(self, pos, onlayer, direction): pygame.sprite.Sprite.__init__(self) self.image = pygame.image.load("image/stg2/stone.png").convert_alpha() self.oriImage = self.image self.deg = 0 self.rect = self.image.get_rect() self.mask = pygame.mask.from_surface(self.image) self.damage = 0.5 #self.speed = 5 self.category = "stone" self.rect.left = pos[0] self.rect.bottom = pos[1] self.onlayer = int(onlayer) self.gravity = 1 self.speed = -2 if direction == "left" else 2 # 负数表示向左,正数向右 self.duration = 3 # 可横向撞击次数 self.doom = 0 def move(self, delay, sideWalls, downWalls, keyLine, sprites, canvas): self.rect.left += self.speed if self.doom > 0: # 如果石球要碎裂: trPos = [ self.rect.left + self.rect.width//2, self.rect.bottom ] if self.doom == 21: self.kill() del self return elif self.doom >= 16: self.image = pygame.image.load("image/stg2/crack3.png") elif self.doom >= 11: self.image = pygame.image.load("image/stg2/crack2.png") elif self.doom >= 6: self.image = pygame.image.load("image/stg2/crack1.png") elif self.doom >= 1: self.image = pygame.image.load("image/stg2/crack0.png") self.rect = self.image.get_rect() self.rect.left = trPos[0]-self.rect.width//2 self.rect.bottom = trPos[1] self.doom += 1 else: if not (delay % 4): self.deg = (self.deg+20) % 360 self.image = rot_center(self.oriImage, self.deg) if self.speed <= 0 else rot_center(self.oriImage, -self.deg) # 水平 for each in sprites: if ( pygame.sprite.collide_mask(self, each) ): pos = getPos(self, 0, 0.6) if self.speed<0 else getPos(self, 1, 0.6) canvas.addSpatters(2, (2,3,4), (6,7,8), (40,40,40,255), pos) push = -6 if (self.speed<0) else 6 each.hitted(self.damage, push) if ( pygame.sprite.spritecollide(self, sideWalls, False, pygame.sprite.collide_mask) ) or ( pygame.sprite.spritecollide(self, downWalls, False, pygame.sprite.collide_mask) ): # 和英雄的处理方法一样,尝试纵坐标-4,再看是否还会碰撞。4以内的高度都可以自动滚上去 self.rect.top -= 4 if getCld(self, downWalls, ["lineWall","specialWall"]) or getCld(self, sideWalls, ["baseWall","sideWall"]): pos = getPos(self, 0, 0.6) if self.speed<0 else getPos(self, 1, 0.6) canvas.addSpatters(4, (2,4,6), (6,7,8), (40,40,40,255), pos) self.rect.top += 4 self.rect.left -= self.speed self.speed = - self.speed self.duration -= 1 if ( self.duration <= 0 ): self.doom += 1 # 下落 self.rect.bottom += self.gravity while ( pygame.sprite.spritecollide(self, downWalls, False, pygame.sprite.collide_mask) ): self.rect.bottom -= 1 if not ( pygame.sprite.spritecollide(self, downWalls, False, pygame.sprite.collide_mask) ): # 如果不再和wall有重合 canvas.addSpatters(1, (2,3,4), (5,6,7), (40,40,40,255), getPos(self,0.5,1)) self.gravity = 1 return False if (self.gravity <= 6): self.gravity += 1 if ( self.rect.top >= keyLine ): self.onlayer -= 2 if self.onlayer<-1: # 防止切换area或删除底层时,onlayer减到-3 self.kill() del self def lift(self, dist): self.rect.bottom += dist def level(self, dist): self.rect.left += dist # -----------------------------------------------Assistant function for stone.fall def getCld(core, group, cateList): spriteList = [] cldList = pygame.sprite.spritecollide(core, group, False, pygame.sprite.collide_mask) for item in cldList: if item.category in cateList: spriteList.append(item) return spriteList # ================================================ class GiantSpider(Monster): def __init__(self, wallGroup, blockSize, onlayer, boundaries): # initialize the sprite Monster.__init__(self, "GiantSpider", (255, 0, 0, 240), 200, 8, 3, onlayer, 40, 20) self.boundaries = boundaries self.wallList = [] # 存储本行的所有砖块; # 每次初始化一个新实例时,清空此类的wallList(否则会在上一个实例的基础上再加!) posList = [] # 辅助列表,用于暂时存储本行砖块的位置(左边线) for aWall in wallGroup: # 由于spriteGroup不好进行索引/随机选择操作,因此将其中的sprite逐个存入列表中存储 self.wallList.append(aWall) posList.append(aWall.rect.left) wall = choice(self.wallList) self.scope = self.boundaries # ----- body part (the core of the RedDragon) ------ self.bodyLeft = pygame.image.load("image/stg2/spiderBody.png").convert_alpha() self.bodyRight = pygame.transform.flip(self.bodyLeft, True, False) self.imgIndx = 0 self.image = self.bodyLeft self.mask = pygame.mask.from_surface(self.image) # calculate its position self.rect = self.image.get_rect() self.rect.left = wall.rect.left self.rect.bottom = wall.rect.top-20 # --------------- head part ------------------ self.headLeft = [ pygame.image.load("image/stg2/spiderHead.png").convert_alpha(), rot_center(pygame.image.load("image/stg2/spiderHeadAtt.png").convert_alpha(), 20), rot_center(pygame.image.load("image/stg2/spiderHeadAtt.png").convert_alpha(), 10), rot_center(pygame.image.load("image/stg2/spiderHeadAtt.png").convert_alpha(), 40) ] self.headRight = [ pygame.transform.flip(self.headLeft[0], True, False), pygame.transform.flip(self.headLeft[1], True, False), pygame.transform.flip(self.headLeft[2], True, False), pygame.transform.flip(self.headLeft[3], True, False) ] self.headR = { "left":[ (0.12,0.6), (0.2,0.7), (0.2,0.7), (0.2,0.7) ], "right":[ (0.88,0.6), (0.8,0.7), (0.8,0.7), (0.8,0.7) ] } self.head = Ajunction( self.headLeft[0], getPos(self, self.headR[self.direction][0][0], self.headR[self.direction][0][1]) ) # ------------- front Legs part -------------- self.frontLeft = [ pygame.image.load("image/stg2/frontLeg0.png").convert_alpha(), pygame.image.load("image/stg2/frontLeg1.png").convert_alpha(), \ pygame.image.load("image/stg2/frontLeg0.png").convert_alpha(), pygame.image.load("image/stg2/frontLeg2.png").convert_alpha() ] self.frontRight = [ pygame.transform.flip(self.frontLeft[0], True, False), pygame.transform.flip(self.frontLeft[1], True, False), \ pygame.transform.flip(self.frontLeft[2], True, False), pygame.transform.flip(self.frontLeft[3], True, False) ] self.frontR = { "left":[ (0.84,0.5), (0.82,0.5), (0.84,0.5), (0.82,0.5) ], "right":[ (0.14,0.5), (0.16,0.5), (0.14,0.5), (0.16,0.5) ] } # 分别为左和右时的位置信息 self.front = Ajunction( self.frontLeft[2], getPos(self, self.frontR[self.direction][0][0], self.frontR[self.direction][0][1]) ) #self.frontJmpLeft = [ pygame.image.load("image/stg2/frontJmp0.png").convert_alpha(), pygame.image.load("image/stg2/frontJmp1.png").convert_alpha() ] #self.frontJmpRight = [ pygame.transform.flip(self.frontJmpLeft[0], True, False), pygame.transform.flip(self.frontJmpLeft[1], True, False) ] # ------------- rear legs part ---------------- self.rearLeft = [ pygame.image.load("image/stg2/rearLeg0.png").convert_alpha(), pygame.image.load("image/stg2/rearLeg1.png").convert_alpha(), pygame.image.load("image/stg2/rearLeg0.png").convert_alpha(), pygame.image.load("image/stg2/rearLeg2.png").convert_alpha() ] self.rearRight = [ pygame.transform.flip(self.rearLeft[0], True, False), pygame.transform.flip(self.rearLeft[1], True, False), pygame.transform.flip(self.rearLeft[2], True, False), pygame.transform.flip(self.rearLeft[3], True, False) ] self.rearR = { "left":[ (0.5,0.5), (0.5,0.5), (0.5,0.5), (0.5,0.5) ], "right":[ (0.5,0.5), (0.5,0.5), (0.5,0.5), (0.5,0.5) ] } self.rear = Ajunction( self.rearLeft[0], getPos(self, self.rearR[self.direction][0][0], self.rearR[self.direction][0][1]) ) # ------------------- 本boss比较特殊的一点:前后腿的图片数量相同,动画顺序相同 ------------------ self.legIndx = 0 self.headIndx = 0 # ----------- other attributes ------------------------- self.damage = 8 self.coolDown = 0 self.doom = 0 self.alterSpeed( choice( [-1,1] ) ) self.cnt = 0 # count for the loop of shift position self.coolDown = 0 # count for attack coolDown self.growlSnd = pygame.mixer.Sound("audio/redDragonGrowl.wav") self.moanSnd = pygame.mixer.Sound("audio/redDragonMoan.wav") self.upDown = 3 def move(self, delay, sprites, canvas): self.checkHitBack() if (self.speed): # 排除self.speed = 0的情况 self.rect.left += self.speed if (self.rect.right >= self.scope[1] and self.speed > 0) or (self.rect.left <= self.scope[0] and self.speed < 0): self.alterSpeed( -self.speed ) if not delay % 2: if not delay%20: self.rect.top += self.upDown self.upDown = -self.upDown # Decide whether to Jump #if not self.onlayer== # move horizent if not ( delay % 12 ): if self.direction == "left": self.image = self.bodyLeft self.mask = pygame.mask.from_surface(self.image) # deal legs: self.legIndx = (self.legIndx+1) % len(self.frontLeft) self.front.updateImg( self.frontLeft[self.legIndx] ) self.rear.updateImg( self.rearLeft[self.legIndx] ) self.head.updateImg( self.headLeft[self.headIndx] ) elif self.direction == "right": self.image = self.bodyRight self.mask = pygame.mask.from_surface(self.image) # deal legs: self.legIndx = (self.legIndx+1) % len(self.frontLeft) self.front.updateImg( self.frontRight[self.legIndx] ) self.rear.updateImg( self.rearRight[self.legIndx] ) self.head.updateImg( self.headRight[self.headIndx] ) self.front.updatePos( getPos(self, self.frontR[self.direction][self.legIndx][0], self.frontR[self.direction][self.legIndx][1]) ) self.rear.updatePos( getPos(self, self.rearR[self.direction][self.legIndx][0], self.rearR[self.direction][self.legIndx][1]) ) self.head.updatePos( getPos(self, self.headR[self.direction][self.headIndx][0], self.headR[self.direction][self.headIndx][1]) ) if self.coolDown == 0: for each in sprites: if ( pygame.sprite.collide_mask(self, each) ): self.coolDown = 60 if (self.coolDown > 0): self.coolDown -= 1 self.cratch(sprites) def cratch(self, sprites): if (self.coolDown <= 40): self.headIndx = 0 return if (self.coolDown >= 54): self.headIndx = 1 elif (self.coolDown >= 48): self.headIndx = 2 elif (self.coolDown >= 42): self.headIndx = 3 if ( self.coolDown == 42 ): for each in sprites: if pygame.sprite.collide_mask( self.head, each ): each.hitted( self.damage, self.push ) # 鉴于本对象的构造非常复杂,因此提供一个专门的绘制接口 # 给此函数传递一个surface参数,即可在该surface上绘制(blit)完整的本对象 def paint(self, screen): screen.blit( self.rear.image, self.rear.rect ) screen.blit( self.image, self.rect ) screen.blit( self.head.image, self.head.rect ) screen.blit( self.front.image, self.front.rect ) def erase(self): self.moanSnd.play(0) self.front.kill() del self.front self.rear.kill() del self.rear self.kill() del self return True # dead # =========================================================================== # -------------------------------- stage3 ----------------------------------- # =========================================================================== class Mist(): canvas = None canvasRect = None def __init__(self, bg_size): self.canvas = pygame.Surface(bg_size).convert_alpha() self.canvasRect = self.canvas.get_rect() self.canvasRect.left =
import os from functools import cmp_to_key import numpy from . import cInterface from .extraData import ExtraDataUnpacker class UMFileException(Exception): pass class File: """A class for a UM file that gives a view of the file including sets of PP records combined into variables.""" def __init__( self, path, byte_ordering=None, word_size=None, fmt=None, parse=True ): """Open andparse a UM file. The optional *byte_ordering*, *word_size* and *fmt* arguments specify the file type. If all three are set, then this forces the file type; otherwise, the file type is autodetected and any of them that are set are ignored. :Parameters: path: `str` The name of the UM file. byte_ordering: `str`, optional 'little_endian' or 'big_endian' word_size: `int`, optional 4 or 8 fmt: `str`, optional 'FF' or 'PP' parse: `bool`, optional The default action is to open the file, store the file type from the arguments or autodetection as described above, and then parse the contents, giving a tree of variables and records under the `File` object. However, if *parse* is False, then an object is returned in which the last step is omitted, so only the file type is stored, and there are no variables under it. Such an object can be passed when instantiating Rec objects, and contains sufficient info about the file type to ensure that the `get_data` method of those `Rec` objects will work. """ c = cInterface.CInterface() self._c_interface = c self.path = path self.fd = None self.open_fd() if byte_ordering and word_size and fmt: self.fmt = fmt self.byte_ordering = byte_ordering self.word_size = word_size else: self._detect_file_type() self.path = path file_type_obj = c.create_file_type( self.fmt, self.byte_ordering, self.word_size ) # Set the word size used to interpret file pointers c.set_word_size(file_type_obj) if parse: # -------------------------------------------------------- # Work out information from the file and store it in the # `vars` attribute. # # Note that the word size used to interpret file pointers # needs to have been previously set. # -------------------------------------------------------- info = c.parse_file(self.fd, file_type_obj) self.vars = info["vars"] self._add_back_refs() def open_fd(self): """(Re)open the low-level file descriptor. :Returns: `int` The file descriptor. """ if self.fd is None: self.fd = os.open(self.path, os.O_RDONLY) return self.fd def close_fd(self): """Close the low-level file descriptor. :Returns: `None` """ if self.fd: os.close(self.fd) self.fd = None def _detect_file_type(self): """TODO. :Returns: `None` """ c = self._c_interface try: file_type_obj = c.detect_file_type(self.fd) except Exception: self.close_fd() raise IOError("File {} has unsupported format".format(self.path)) d = c.file_type_obj_to_dict(file_type_obj) self.fmt = d["fmt"] self.byte_ordering = d["byte_ordering"] self.word_size = d["word_size"] def _add_back_refs(self): """Add file attribute to `Var` objects, and both `!file` and `!var` attributes to `Rec` objects. The important one is the file attribute in the `Rec` object, as this is used when reading data. The others are provided for extra convenience. :Returns: `None` """ for var in self.vars: var.file = self for rec in var.recs: rec.var = var rec.file = self class Var: """Container for some information about variables.""" def __init__(self, recs, nz, nt, supervar_index=None): self.recs = recs self.nz = nz self.nt = nt self.supervar_index = supervar_index @staticmethod def _compare(x, y): """Method equivalent to the Python 2 'cmp'. Note that (x > y) - (x < y) is equivalent but not as performant since it would not short-circuit. :Returns: `int` """ if x == y: return 0 elif x > y: return 1 else: return -1 def _compare_recs_by_extra_data(self, a, b): """TODO. :Returns: `int` """ return self._compare(a.get_extra_data(), b.get_extra_data()) def _compare_recs_by_orig_order(self, a, b): """TODO. :Returns: `int` """ return self._compare(self.recs.index(a), self.recs.index(b)) def group_records_by_extra_data(self): """Returns a list of (sub)lists of records where each records within each sublist has matching extra data (if any), so if the whole variable has consistent extra data then the return value will be of length 1. Within each group, the ordering of returned records is the same as in the `!recs` attribute. :Returns: `list` """ compare = self._compare_recs_by_extra_data recs = self.recs[:] n = len(recs) if n == 0: # shouldn't have a var without records, but... return [] # recs.sort(compare) #python2 recs.sort(key=cmp_to_key(compare)) # optimise simple case - if two ends of a sorted list match, # the whole list matches if not compare(recs[0], recs[-1]): return [self.recs[:]] groups = [] this_grp = [] for i, rec in enumerate(recs): this_grp.append(rec) if i == n - 1 or compare(rec, recs[i + 1]): this_grp.sort(key=self._compare_recs_by_orig_order) groups.append(this_grp) this_grp = [] return groups class Rec: """Container for some information about records.""" def __init__( self, int_hdr, real_hdr, hdr_offset, data_offset, disk_length, file=None, ): """Default instantiation, which stores the supplied headers and offsets. :Parameters: file: `File`, optional Used to set the `!file` attribute. Does not need to be supplied, but if it is not then it will have to be set on the returned `Rec` object before calling `get_data` will work. If set it should be set to the `File` object that contains the returned `Rec` object. Normally this would be done by the calling code instantiating via `File` rather than directly. """ self.int_hdr = int_hdr self.real_hdr = real_hdr self.hdr_offset = hdr_offset self.data_offset = data_offset self.disk_length = disk_length self._extra_data = None if file: self.file = file @classmethod def from_file_and_offsets(cls, file, hdr_offset, data_offset, disk_length): """Instantiate a `Rec` object from the `File` object and the header and data offsets. The headers are read in, and also the record object is ready for calling `get_data`. :Parameters: file: `File` A view of a file including sets of PP records combined into variables. hdr_offset: `int` The start word in the file of the header. data_offset: `int` The start word in the file of the data. disk_length: `int` The length in words of the data in the file. :Returns: `Rec` """ c = file._c_interface int_hdr, real_hdr = c.read_header( file.fd, hdr_offset, file.byte_ordering, file.word_size ) return cls( int_hdr, real_hdr, hdr_offset, data_offset, disk_length, file=file ) def read_extra_data(self): """Read the extra data associated with the record. :Returns: `numpy.ndarray` """ c = self.file._c_interface file = self.file ( extra_data_offset, extra_data_length, ) = c.get_extra_data_offset_and_length( self.int_hdr, self.data_offset, self.disk_length ) raw_extra_data = c.read_extra_data( file.fd, extra_data_offset, extra_data_length, file.byte_ordering, file.word_size, ) edu = ExtraDataUnpacker( raw_extra_data, file.word_size, file.byte_ordering ) return edu.get_data() def get_extra_data(self): """Get extra data associated with the record, either by reading or using cached read. :Returns: `numpy.ndarray` """ if self._extra_data is None: self._extra_data = self.read_extra_data() return self._extra_data def get_type_and_num_words(self): """Get the data type (as numpy type) and number of words. :Returns: `numpy.dtype`, `int` """ c = self.file._c_interface ntype, num_words = c.get_type_and_num_words(self.int_hdr) if ntype == "integer": dtype = numpy.dtype(c.file_data_int_type) elif ntype == "real": dtype = numpy.dtype(c.file_data_real_type) return dtype, num_words def get_data(self): """Get the data array associated with the record. :Returns: `numpy.ndarray` """ c = self.file._c_interface file = self.file data_type, nwords = c.get_type_and_num_words(self.int_hdr) return c.read_record_data( file.fd, self.data_offset, self.disk_length, file.byte_ordering, file.word_size, self.int_hdr, self.real_hdr, data_type, nwords, ) if __name__ == "__main__": import sys path = sys.argv[1] f = File(path) print(f.fmt, f.byte_ordering, f.word_size) print("num variables: %s" % len(f.vars)) for varno, var in enumerate(f.vars): print() print("var %s: nz = %s, nt = %s" % (varno, var.nz, var.nt)) for recno, rec in enumerate(var.recs): print("var %s record %s" % (varno, recno)) print("hdr offset: %s" % rec.hdr_offset) print("data offset: %s" % rec.data_offset) print("disk length: %s" % rec.disk_length) print("int hdr: %s" % rec.int_hdr) print("real hdr: %s" % rec.real_hdr) print("data: %s" % rec.get_data()) print("extra_data: %s" % rec.get_extra_data()) print("type %s, num words: %s" % rec.get_type_and_num_words()) # if recno == 1: # rec._extra_data['y'] += .01 # print("massaged_extra_data: %s" % rec.get_extra_data()) print("-----------------------") print("all records", var.recs) print( "records grouped by extra data ", var.group_records_by_extra_data() ) print("===============================") f.close_fd() # also read a record using saved metadata if f.vars: fmt = f.fmt byte_ordering = f.byte_ordering word_size = f.word_size myrec = f.vars[0].recs[0] hdr_offset = myrec.hdr_offset data_offset = myrec.data_offset disk_length = myrec.disk_length del f fnew = File( path, fmt=fmt, byte_ordering=byte_ordering, word_size=word_size, parse=False, ) rnew = Rec.from_file_and_offsets( fnew, hdr_offset, data_offset, disk_length ) print("record read using saved file type and offsets:") print("int hdr: %s" % rnew.int_hdr) print("real hdr: %s" % rnew.real_hdr) print("data: %s" % rnew.get_data()) print("extra data: %s" % rnew.get_extra_data()) print("nx = %s" % rnew.int_hdr[18]) print("ny = %s" % rnew.int_hdr[17]) rdata = open("recdata0.txt", "w") for value in rnew.get_data(): rdata.write("%s\n" % value)
<reponame>paquet-a/netptune_concon # # Copyright (c) 2019, Neptune Labs Sp. z o.o. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import base64 import os import sys import threading import traceback import pandas as pd import six from pandas.errors import EmptyDataError from neptune.api_exceptions import ExperimentAlreadyFinished from neptune.exceptions import FileNotFound, InvalidChannelValue, NoChannelValue, NoExperimentContext from neptune.internal.channels.channels import ChannelValue from neptune.internal.channels.channels_values_sender import ChannelsValuesSender from neptune.internal.storage.storage_utils import upload_to_storage from neptune.internal.utils.image import get_image_content from neptune.utils import align_channels_on_x, is_float class Experiment(object): """It contains all the information about a Neptune Experiment This class lets you extract experiment by, short experiment id, names of all the channels, system properties and other properties, parameters, numerical channel values, information about the hardware utilization during the experiment Args: client(`neptune.Client'): Client object leaderboard_entry(`neptune.model.LeaderboardEntry`): LeaderboardEntry object Examples: Instantiate a session. >>> from neptune.sessions import Session >>> session = Session() Fetch a project and a list of experiments. >>> project = session.get_projects('neptune-ml')['neptune-ml/Salt-Detection'] >>> experiments = project.get_experiments(state=['aborted'], owner=['neyo'], min_running_time=100000) Get an experiment instance. >>> experiment = experiments[0] >>> experiment Experiment(SAL-1609) Todo: Column sorting """ def __init__(self, client, _id, internal_id, project_full_id): self._client = client self._id = _id self._internal_id = internal_id self._project_full_id = project_full_id self._channels_values_sender = ChannelsValuesSender(self) self._ping_thread = None self._hardware_metric_thread = None self._aborting_thread = None self._stdout_uploader = None self._stderr_uploader = None self._uncaught_exception_handler = sys.__excepthook__ @property def id(self): """ Experiment short id Examples: Instantiate a session. >>> from neptune.sessions import Session >>> session = Session() Fetch a project and a list of experiments. >>> project = session.get_projects('neptune-ml')['neptune-ml/Salt-Detection'] >>> experiments = project.get_experiments(state=['aborted'], owner=['neyo'], min_running_time=100000) Get an experiment instance. >>> experiment = experiments[0] Get experiment short id. >>> experiment.id 'SAL-1609' """ return self._id @property def name(self): return self._client.get_experiment(self._internal_id).name @property def state(self): return self._client.get_experiment(self._internal_id).state @property def internal_id(self): return self._internal_id def get_system_properties(self): """Retrieve system properties like owner, times of creation and completion, worker type, etc. Returns: dict: A dictionary mapping a property name to value. Examples: Instantiate a session. >>> from neptune.sessions import Session >>> session = Session() Fetch a project and a list of experiments. >>> project = session.get_projects('neptune-ml')['neptune-ml/Salt-Detection'] >>> experiments = project.get_experiments(state=['aborted'], owner=['neyo'], min_running_time=100000) Get an experiment instance. >>> experiment = experiments[0] Get experiment system properties. >>> experiment.get_system_properties Note: The list of supported system properties may change over time. """ experiment = self._client.get_experiment(self._internal_id) return { 'id': experiment.shortId, 'name': experiment.name, 'created': experiment.timeOfCreation, 'finished': experiment.timeOfCompletion, 'running_time': experiment.runningTime, 'owner': experiment.owner, 'size': experiment.storageSize, 'tags': experiment.tags, 'notes': experiment.description } def get_tags(self): return self._client.get_experiment(self._internal_id).tags def append_tag(self, tag): self._client.update_tags(experiment=self, tags_to_add=[tag], tags_to_delete=[]) def remove_tag(self, tag): self._client.update_tags(experiment=self, tags_to_add=[], tags_to_delete=[tag]) def get_channels(self): """Retrieve all channel names along with their representations for this experiment. Returns: dict: A dictionary mapping a channel name to channel. Examples: Instantiate a session. >>> from neptune.sessions import Session >>> session = Session() Fetch a project and a list of experiments. >>> project = session.get_projects('neptune-ml')['neptune-ml/Salt-Detection'] >>> experiments = project.get_experiments(state=['aborted'], owner=['neyo'], min_running_time=100000) Get an experiment instance. >>> experiment = experiments[0] Get experiment channels. >>> experiment.get_channels() """ experiment = self._client.get_experiment(self.internal_id) channels_last_values_by_name = dict((ch.channelName, ch) for ch in experiment.channelsLastValues) channels = dict() for ch in experiment.channels: last_value = channels_last_values_by_name.get(ch.name, None) if last_value: ch.x = last_value.x ch.y = last_value.y else: ch.x = None ch.y = None channels[ch.name] = ch return channels def upload_source_files(self, source_files): """ Raises: `StorageLimitReached`: When storage limit in the project has been reached. """ files_list = [] for source_file in source_files: if not os.path.exists(source_file): raise FileNotFound(source_file) files_list.append((os.path.abspath(source_file), source_file)) upload_to_storage(files_list=files_list, upload_api_fun=self._client.upload_experiment_source, upload_tar_api_fun=self._client.extract_experiment_source, experiment=self) def send_metric(self, channel_name, x, y=None, timestamp=None): x, y = self._get_valid_x_y(x, y) if not is_float(y): raise InvalidChannelValue(expected_type='float', actual_type=type(y).__name__) value = ChannelValue(x, dict(numeric_value=y), timestamp) self._channels_values_sender.send(channel_name, 'numeric', value) def send_text(self, channel_name, x, y=None, timestamp=None): x, y = self._get_valid_x_y(x, y) if not isinstance(y, six.string_types): raise InvalidChannelValue(expected_type='str', actual_type=type(y).__name__) value = ChannelValue(x, dict(text_value=y), timestamp) self._channels_values_sender.send(channel_name, 'text', value) def send_image(self, channel_name, x, y=None, name=None, description=None, timestamp=None): x, y = self._get_valid_x_y(x, y) input_image = dict( name=name, description=description, data=base64.b64encode(get_image_content(y)).decode('utf-8') ) value = ChannelValue(x, dict(image_value=input_image), timestamp) self._channels_values_sender.send(channel_name, 'image', value) def send_artifact(self, artifact): """ Raises: `StorageLimitReached`: When storage limit in the project has been reached. """ if not os.path.exists(artifact): raise FileNotFound(artifact) upload_to_storage(files_list=[(os.path.abspath(artifact), artifact)], upload_api_fun=self._client.upload_experiment_output, upload_tar_api_fun=self._client.extract_experiment_output, experiment=self) def send_graph(self, graph_id, value): """Upload a tensorflow graph for this experiment. Args: graph_id: a string UUID identifying the graph (managed by user) value: a string representation of Tensorflow graph Examples: Instantiate a session. >>> from neptune.sessions import Session >>> session = Session() Fetch a project and a list of experiments. >>> project = session.get_projects('neptune-ml')['neptune-ml/Salt-Detection'] >>> experiments = project.get_experiments(state=['aborted'], owner=['neyo'], min_running_time=100000) Get an experiment instance. >>> experiment = experiments[0] Send graph to experiment. >>> import uuid >>> experiment.send_graph(str(uuid.uuid4()), str("tf.GraphDef instance")) """ self._client.put_tensorflow_graph(self, graph_id, value) def get_parameters(self): """Retrieve parameters for this experiment. Returns: dict: A dictionary mapping a parameter name to value. Examples: Instantiate a session. >>> from neptune.sessions import Session >>> session = Session() Fetch a project and a list of experiments. >>> project = session.get_projects('neptune-ml')['neptune-ml/Salt-Detection'] >>> experiments = project.get_experiments(state=['aborted'], owner=['neyo'], min_running_time=100000) Get an experiment instance. >>> experiment = experiments[0] Get experiment parameters. >>> experiment.get_parameters() """ experiment = self._client.get_experiment(self.internal_id) return dict((p.name, p.value) for p in experiment.parameters) def get_properties(self): """Retrieve user-defined properties for this experiment. Returns: dict: A dictionary mapping a property key to value. Examples: Instantiate a session. >>> from neptune.sessions import Session >>> session = Session() Fetch a project and a list of experiments. >>> project = session.get_projects('neptune-ml')['neptune-ml/Salt-Detection'] >>> experiments = project.get_experiments(state=['aborted'], owner=['neyo'], min_running_time=100000) Get an experiment instance. >>> experiment = experiments[0] Get experiment properties. >>> experiment.get_properties """ experiment = self._client.get_experiment(self.internal_id) return dict((p.key, p.value) for p in experiment.properties) def set_property(self, key, value): properties = {p.key: p.value for p in self._client.get_experiment(self.internal_id).properties} properties[key] = value return self._client.update_experiment( experiment=self, properties=properties ) def remove_property(self, key): properties = {p.key: p.value for p in self._client.get_experiment(self.internal_id).properties} del properties[key] return self._client.update_experiment( experiment=self, properties=properties ) def get_hardware_utilization(self): """Retrieve RAM, CPU and GPU utilization throughout the experiment. The returned DataFrame contains 2 columns (x_*, y_*) for each of: RAM, CPU and each GPU. The x_ column contains the time (in milliseconds) from the experiment start, while the y_ column contains the value of the appropriate metric. RAM and GPU memory usage is returned in gigabytes. CPU and GPU utilization is returned as a percentage (0-100). E.g. For an experiment using a single GPU, this method will return a DataFrame of the following columns: x_ram, y_ram, x_cpu, y_cpu, x_gpu_util_0, y_gpu_util_0, x_gpu_mem_0, y_gpu_mem_0 The following values denote that after 3 seconds, the experiment used 16.7 GB of RAM. x_ram, y_ram = 3000, 16.7 The returned DataFrame may contain NaNs if one of the metrics has more values than others. Returns: `pandas.DataFrame`: Dataframe containing the hardware utilization metrics throughout the experiment. Examples: Instantiate a session. >>> from neptune.sessions import Session >>> session = Session() Fetch a project and a list of experiments. >>> project = session.get_projects('neptune-ml')['neptune-ml/Salt-Detection'] >>> experiments = project.get_experiments(state=['aborted'], owner=['neyo'], min_running_time=100000) Get an experiment instance. >>> experiment = experiments[0] Get hardware utilization channels. >>> experiment.get_hardware_utilization """ metrics_csv = self._client.get_metrics_csv(self) try: return pd.read_csv(metrics_csv) except EmptyDataError: return pd.DataFrame() def get_numeric_channels_values(self, *channel_names): """ Retrieve values of specified numeric channels. The returned DataFrame contains 1 additional column x along with the requested channels. E.g. get_numeric_channels_values('loss', 'auc') will return a DataFrame of the following structure: x, loss, auc The returned DataFrame may contain NaNs if one of the channels has more values than others. Args: *channel_names: variable length list of names of the channels to retrieve values for. Returns: `pandas.DataFrame`: Dataframe containing the values for the requested numerical channels. Examples: Instantiate a session. >>> from neptune.sessions import Session >>> session = Session() Fetch a project and a list of experiments. >>> project = session.get_projects('neptune-ml')['neptune-ml/Salt-Detection'] >>> experiments = project.get_experiments(state=['aborted'], owner=['neyo'], min_running_time=100000) Get an experiment instance. >>> exp =
<filename>code/RB_tree.py import Color import node class RBTree(object): RBNode = node.RBNode def __init__(self, new_node=RBNode): """setters""" self._nil = new_node(data=None) # Листья нули и всегда черны self._root = self.nil # В начале корень нулевой self._new_node = new_node # вызов, создающий узел """getters""" @property def root(self): return self._root @property def nil(self): return self._nil def _grandfather(self, node): # возвращает дедушку узла if node != self.nil and node.parent != self.nil: return node.parent.parent else: return self.nil # mb None def _uncle(self, node): # возвращает дядю узла g = self._grandfather(node) if g == self.nil: return self.nil else: if node.parent == g.leftChild: return g.rightChild else: return g.leftChild def _brother(self, node): # возвращает правого или левого брата assert node.parent != self.nil if node == node.parent.leftChild: return node.parent.rightChild else: return node.parent.leftChild def min_data(self, node=None): # Находит минимум в поддереве узла х if node is None: node = self.root while node.leftChild != self.nil: node = node.leftChild return node.data def max_data(self, node=None): # Находит максимум в поддереве узла х if node is None: node = self.root while node.rightChild != self.nil: node = node.rightChild return node def delete_data(self, data): # вызывает операцию удаления для узла с параметром data node = self.find(data) if node == self.nil: return False self.delete_node(node) return True def delete_node(self, node): c = Color.Color() if not node or node == self.nil: return if node.leftChild == self.nil or node.rightChild == self.nil: y_node = node else: y_node = node.rightChild while y_node.leftChild != self.nil: y_node = y_node.leftChild if y_node.leftChild != self.nil: x = y_node.leftChild else: x = y_node.rightChild x._parent = y_node.parent if y_node.parent: if y_node == y_node.parent.leftChild: y_node.parent._leftChild = x else: y_node.parent._rightChild = x else: self._root = x if y_node != node: node._data = y_node.data if y_node.color == c.BLACK: self._delete_fix(x) def _delete_fix(self, node): c = Color.Color() while node.color == c.BLACK and node != self.root: b = self._brother(node) if b.color == c.RED: b._color = c.BLACK node.parent._color = c.RED self._turn_left(node.parent) if node == node.parent.leftChild else self._turn_right(node.parent) b = self._brother(node) if b.leftChild.color == c.BLACK and b.rightChild.color == c.BLACK: b._color = c.RED node = node.parent else: if node == node.parent.leftChild: if b.rightChild.color == c.BLACK: b.leftChild._color = c.BLACK b._color = c.RED self._turn_right(b) b = self._brother(node) else: if b.leftChild.color == c.BLACK: b.rightChild._color = c.BLACK b._color = c.RED self._turn_left(b) b = self._brother(node) b._color = node.parent.color node.parent._color = c.BLACK if node == node.parent.leftChild: b.rightChild._color = c.BLACK self._turn_left(node.parent) else: b.leftChild._color = c.BLACK self._turn_right(node.parent) node = self.root node._color = c.BLACK def find(self, data, node=None): # находит узел с параметром data, если такой есть if node is None: node = self.root while node != self.nil and data != node.data: if data < node.data: node = node.leftChild else: node = node.rightChild return node def add_data(self, data): self.add_node(self._new_node(data=data)) def add_node(self, node): # добавление узла node в дерево c = Color.Color() par = self.nil ch = self.root while ch != self.nil: par = ch if node.data < ch.data: ch = ch.leftChild else: ch = ch.rightChild node._parent = par if par == self.nil: self._root = node elif node.data < par.data: par._leftChild = node else: par._rightChild = node node._leftChild = self.nil node._rightChild = self.nil node._color = c.RED self._add_fix(node) def _add_fix(self, node): # восстановление свойств красно-черного дерева c = Color.Color() while node.parent.color: u = self._uncle(node) if u.color: node.parent._color = c.BLACK u._color = c.BLACK self._grandfather(node)._color = c.RED node = self._grandfather(node) else: if node.parent == node.parent.parent.leftChild: if node == node.parent.rightChild: node = node.parent self._turn_left(node) node.parent._color = c.BLACK self._grandfather(node)._color = c.RED self._turn_right(self._grandfather(node)) else: if node == node.parent.leftChild: node = node.parent self._turn_right(node) node.parent._color = c.BLACK self._grandfather(node)._color = c.RED self._turn_left(self._grandfather(node)) self.root._color = c.BLACK def tree_black_height(self): node = self.root count = 0 while node is not None: if not node.color or node == self.nil: count += 1 node = node.leftChild return count def tree_height(self, node=None, l_height=0, r_height=0): if node is None: node = self.root if node.leftChild is None and node.rightChild is None: return 1 else: if node.leftChild is not None: l_height = self.tree_height(node.leftChild, l_height, r_height) if node.rightChild is not None: r_height = self.tree_height(node.rightChild, l_height, r_height) if l_height > r_height: return l_height + 1 else: return r_height + 1 def _turn_left(self, node): # выполнить левый поворот узла ch = node.rightChild node._rightChild = ch.leftChild if ch.leftChild != self.nil: ch.leftChild._parent = node ch._parent = node.parent if node.parent == self.nil: self._root = ch elif node == node.parent.leftChild: node.parent._leftChild = ch else: node.parent._rightChild = ch ch._leftChild = node node._parent = ch def _turn_right(self, node): # выполнить правый поворот узла ch = node.leftChild node._leftChild = ch.rightChild if ch.rightChild != self.nil: ch.rightChild._parent = node ch._parent = node.parent if node.parent == self.nil: self._root = ch elif node == node.parent.rightChild: node.parent._rightChild = ch else: node.parent._leftChild = ch ch._rightChild = node node._parent = ch def check_prop(self): # returns True if RBTree is ok def check(x): if (x.leftChild and not x.rightChild) or (x.rightChild and not x.leftChild): return 0, False if not x.leftChild and not x.rightChild and x.color: return 0, False if x.color and x.leftChild and x.rightChild: if x.leftChild.color or x.rightChild.color: return 0, False if x.leftChild and x.rightChild: if x.leftChild != self.nil and x != x.leftChild.parent: return 0, False if x.rightChild != self.nil and x != x.rightChild.parent: return 0, False l_count, l_ok = check(x.leftChild) if not l_ok: return 0, False r_count, r_ok = check(x.rightChild) if not r_ok: return 0, False if l_count != r_count: return 0, False return l_count, True else: return 0, True num_black, is_ok = check(self.root) return is_ok and not self.root.color def save(t, f,): # writing file in a file f.dot def node_c(x): if x.color: return "RED" else: return "BLACK" def writing(x): # BFA pre-order search f.write(" data=\"%s\", color=\"%s\" \t[" % (x, node_c(x))) if x.leftChild != t.nil: f.write("leftChild = \"%s\" " % (x.leftChild)) if x.rightChild != t.nil: f.write("rightChild = \"%s\"" % (x.rightChild)) f.write("]") f.write("\n") if x.leftChild: if x.leftChild != t.nil: writing(x.leftChild) if x.rightChild: if x.rightChild != t.nil: writing(x.rightChild) f.write("Red black tree" + '\n') writing(t.root) def test_add(t): # Insert datas one by one checking prop datas = [5, 3, 6, 7, 2, 4, 21, 8, 99, 9, 32, 23] for i, data in enumerate(datas): t.add_data(data) assert t.check_prop() def test_min_max(t): datas = [5, 3, 6, 7, 2, 4, 21, 8, 99, 9, 32, 23] m_datas = [5, 3, 21, 10, 32] for i, data in enumerate(datas): t.add_data(data) for i, m_data in enumerate(m_datas): if t.find(m_data).data is not None: print("максимум в поддереве узла", m_data, " = ", t.max_data(t.find(m_data))) print("минимум в поддереве узла", m_data, " = ", t.min_data(t.find(m_data))) print("") else: print("нет узла", m_data, "в дереве") print("") def test_find(t): datas = [5, 3, 6, 7, 2, 4, 21, 8, 99, 9, 32, 23] s_datas = [6, 3, 24, 23, 99, 101] for i, data in enumerate(datas): t.add_data(data) for i, s_data in enumerate(s_datas): if t.find(s_data).data is not None: print("data", s_data, "exists") else: print("data", s_data, "is not exist") def test_random_insert(t, s): max_data = 2000 r.seed(2) rand_datas = list(r.SystemRandom().sample(range(max_data), s)) for i, data in enumerate(rand_datas): t.add_data(data) assert t.check_prop() def test_delete(t): datas = [5, 3, 6, 7, 2, 4, 21, 8, 99, 9, 32, 23] ddatas = [3, 21, 7, 32] for i, data in enumerate(datas): t.add_data(data) for i, ddata in enumerate(ddatas): t.delete_data(ddata) for k, data in enumerate(datas): if t.find(data).data is not None: print("%d" % data, end=' ') print("") assert t.check_prop() if '__main__' == __name__: import os import random as r def save_tree(tree, filename): f = open('%s.txt' % filename, 'w') save(tree, f) f.close() os.system('txt %s.txt -T' % filename) r.seed(2) t = RBTree() print("Введите цифру 1, если хотите построить дерево со случайным набором ключей и определить его высоту") print("Введите цифру 2, если хотите построить дерево с заданным набором ключей, чтобы проверить вставку") print("Введите цифру 3, если хотите протестировать удаление узлов") print("Введите цифру 4, если хотите протестировать max и min") print("Введите цифру 5, если хотите протестировать поиск") a = int(input()) if a == 1: for size in range(30, 101, 10): h_1, h_2, hh_1, hh_2, c_1, c_2, c_3, c_4 = 0, 0, 0, 0, 0, 0, 0, 0 for i in range(1000): t = RBTree() test_random_insert(t, size) if i == 0: h_1 = t.tree_height() h_2 = t.tree_black_height() if t.tree_height() == h_1:
the the betweenness centrality in parallel at if the number of nodes in the graph is less than this value it will run in a single thread. The default value is 50 :returns: A dictionary mapping each node index to its betweenness centrality. :rtype: dict """ raise TypeError("Invalid input type %s for graph" % type(graph)) @betweenness_centrality.register(PyDiGraph) def _digraph_betweenness_centrality(graph, normalized=True, endpoints=False, parallel_threshold=50): return digraph_betweenness_centrality( graph, normalized=normalized, endpoints=endpoints, parallel_threshold=parallel_threshold, ) @betweenness_centrality.register(PyGraph) def _graph_betweenness_centrality(graph, normalized=True, endpoints=False, parallel_threshold=50): return graph_betweenness_centrality( graph, normalized=normalized, endpoints=endpoints, parallel_threshold=parallel_threshold, ) @functools.singledispatch def vf2_mapping( first, second, node_matcher=None, edge_matcher=None, id_order=True, subgraph=False, induced=True, call_limit=None, ): """ Return an iterator over all vf2 mappings between two graphs. This funcion will run the vf2 algorithm used from :func:`~retworkx.is_isomorphic` and :func:`~retworkx.is_subgraph_isomorphic` but instead of returning a boolean it will return an iterator over all possible mapping of node ids found from ``first`` to ``second``. If the graphs are not isomorphic then the iterator will be empty. A simple example that retrieves one mapping would be:: graph_a = retworkx.generators.path_graph(3) graph_b = retworkx.generators.path_graph(2) vf2 = retworkx.vf2_mapping(graph_a, graph_b, subgraph=True) try: mapping = next(vf2) except StopIteration: pass :param first: The first graph to find the mapping for :param second: The second graph to find the mapping for :param node_matcher: An optional python callable object that takes 2 positional arguments, one for each node data object in either graph. If the return of this function evaluates to True then the nodes passed to it are viewed as matching. :param edge_matcher: A python callable object that takes 2 positional one for each edge data object. If the return of this function evaluates to True then the edges passed to it are viewed as matching. :param bool id_order: If set to ``False`` this function will use a heuristic matching order based on [VF2]_ paper. Otherwise it will default to matching the nodes in order specified by their ids. :param bool subgraph: If set to ``True`` the function will return the subgraph isomorphic found between the graphs. :param bool induced: If set to ``True`` this function will check the existence of a node-induced subgraph of first isomorphic to second graph. Default: ``True``. :param int call_limit: An optional bound on the number of states that VF2 algorithm visits while searching for a solution. If it exceeds this limit, the algorithm will stop. Default: ``None``. :returns: An iterator over dicitonaries of node indices from ``first`` to node indices in ``second`` representing the mapping found. :rtype: Iterable[NodeMap] """ raise TypeError("Invalid Input Type %s for graph" % type(first)) @vf2_mapping.register(PyDiGraph) def _digraph_vf2_mapping( first, second, node_matcher=None, edge_matcher=None, id_order=True, subgraph=False, induced=True, call_limit=None, ): return digraph_vf2_mapping( first, second, node_matcher=node_matcher, edge_matcher=edge_matcher, id_order=id_order, subgraph=subgraph, induced=induced, call_limit=call_limit, ) @vf2_mapping.register(PyGraph) def _graph_vf2_mapping( first, second, node_matcher=None, edge_matcher=None, id_order=True, subgraph=False, induced=True, call_limit=None, ): return graph_vf2_mapping( first, second, node_matcher=node_matcher, edge_matcher=edge_matcher, id_order=id_order, subgraph=subgraph, induced=induced, call_limit=call_limit, ) @functools.singledispatch def union( first, second, merge_nodes=False, merge_edges=False, ): """Return a new graph by forming a union from two input graph objects The algorithm in this function operates in three phases: 1. Add all the nodes from ``second`` into ``first``. operates in :math:`\\mathcal{O}(n_2)`, with :math:`n_2` being number of nodes in ``second``. 2. Merge nodes from ``second`` over ``first`` given that: - The ``merge_nodes`` is ``True``. operates in :math:`\\mathcal{O}(n_1 n_2)`, with :math:`n_1` being the number of nodes in ``first`` and :math:`n_2` the number of nodes in ``second`` - The respective node in ``second`` and ``first`` share the same weight/data payload. 3. Adds all the edges from ``second`` to ``first``. If the ``merge_edges`` parameter is ``True`` and the respective edge in ``second`` and ``first`` share the same weight/data payload they will be merged together. :param first: The first graph object :param second: The second graph object :param bool merge_nodes: If set to ``True`` nodes will be merged between ``second`` and ``first`` if the weights are equal. Default: ``False``. :param bool merge_edges: If set to ``True`` edges will be merged between ``second`` and ``first`` if the weights are equal. Default: ``False``. :returns: A new graph object that is the union of ``second`` and ``first``. It's worth noting the weight/data payload objects are passed by reference from ``first`` and ``second`` to this new object. :rtype: :class:`~retworkx.PyGraph` or :class:`~retworkx.PyDiGraph` """ raise TypeError("Invalid Input Type %s for graph" % type(first)) @union.register(PyDiGraph) def _digraph_union( first, second, merge_nodes=False, merge_edges=False, ): return digraph_union(first, second, merge_nodes=merge_nodes, merge_edges=merge_edges) @union.register(PyGraph) def _graph_union( first, second, merge_nodes=False, merge_edges=False, ): return graph_union(first, second, merge_nodes=merge_nodes, merge_edges=merge_edges) @functools.singledispatch def tensor_product( first, second, ): """Return a new graph by forming the tensor product from two input graph objects :param first: The first graph object :param second: The second graph object :returns: A new graph object that is the tensor product of ``second`` and ``first``. It's worth noting the weight/data payload objects are passed by reference from ``first`` and ``second`` to this new object. A read-only dictionary of the product of nodes is also returned. The keys are a tuple where the first element is a node of the first graph and the second element is a node of the second graph, and the values are the map of those elements to node indices in the product graph. For example:: { (0, 0): 0, (0, 1): 1, } :rtype: Tuple[:class:`~retworkx.PyGraph` or :class:`~retworkx.PyDiGraph`, :class:`~retworkx.ProductNodeMap`] """ raise TypeError("Invalid Input Type %s for graph" % type(first)) @tensor_product.register(PyDiGraph) def _digraph_tensor_product( first, second, ): return digraph_tensor_product(first, second) @tensor_product.register(PyGraph) def _graph_tensor_product( first, second, ): return graph_tensor_product(first, second) @functools.singledispatch def cartesian_product( first, second, ): """Return a new graph by forming the cartesian product from two input graph objects :param first: The first graph object :param second: The second graph object :returns: A new graph object that is the union of ``second`` and ``first``. It's worth noting the weight/data payload objects are passed by reference from ``first`` and ``second`` to this new object. A read-only dictionary of the product of nodes is also returned. The keys are a tuple where the first element is a node of the first graph and the second element is a node of the second graph, and the values are the map of those elements to node indices in the product graph. For example:: { (0, 0): 0, (0, 1): 1, } :rtype: Tuple[:class:`~retworkx.PyGraph` or :class:`~retworkx.PyDiGraph`, :class:`~retworkx.ProductNodeMap`] """ raise TypeError("Invalid Input Type %s for graph" % type(first)) @cartesian_product.register(PyDiGraph) def _digraph_cartesian_product( first, second, ): return digraph_cartesian_product(first, second) @cartesian_product.register(PyGraph) def _graph_cartesian_product( first, second, ): return graph_cartesian_product(first, second) @functools.singledispatch def bfs_search(graph, source, visitor): """Breadth-first traversal of a directed/undirected graph. The pseudo-code for the BFS algorithm is listed below, with the annotated event points, for which the given visitor object will be called with the appropriate method. :: BFS(G, s) for each vertex u in V color[u] := WHITE end for color[s] := GRAY EQUEUE(Q, s) discover vertex s while (Q != Ø) u := DEQUEUE(Q) for each vertex v in Adj[u] (u,v) is a tree edge if (color[v] = WHITE) color[v] = GRAY else (u,v) is a non - tree edge if (color[v] = GRAY) (u,v) has a gray target ... else if (color[v] = BLACK) (u,v) has a black target ... end for color[u] := BLACK finish vertex u end while If an exception is raised inside the callback function, the graph traversal will be stopped immediately. You can exploit this to exit early by raising a :class:`~retworkx.visit.StopSearch` exception, in which case the search function will return but without raising back the exception. You can also prune part of the search tree by raising :class:`~retworkx.visit.PruneSearch`. In the following example we keep track of the tree edges: .. jupyter-execute:: import retworkx from retworkx.visit import BFSVisitor class TreeEdgesRecorder(BFSVisitor): def __init__(self): self.edges = [] def tree_edge(self, edge): self.edges.append(edge) graph = retworkx.PyDiGraph() graph.extend_from_edge_list([(1, 3), (0, 1), (2, 1), (0, 2)]) vis = TreeEdgesRecorder() retworkx.bfs_search(graph, [0], vis) print('Tree edges:', vis.edges) .. note:: Graph can **not** be mutated while traversing. :param graph: The graph to be used. This can be a :class:`~retworkx.PyGraph` or a :class:`~retworkx.PyDiGraph` :param List[int] source: An optional list of node indices to use as the starting nodes for the breadth-first search. If this is not specified then a source will be chosen arbitrarly
logger.debug("%r._before_start()", self) self.before_start() def _process_started(self, protocol): """ Called by :meth:`.ProcessProtocol.connectionMade` when a process has started running. """ logger.debug("%r._process_started(%r)", self, protocol) logpool.log(self.uuid, "internal", "Started %r" % protocol, protocol.pid) process_data = self.processes[protocol.uuid] process_data.started.callback(protocol) if not self.stop_called: self.process_started(protocol) else: self.stop() def _process_stopped(self, protocol, reason): """ Internal implementation for :meth:`process_stopped`. If ``--capture-process-output`` was set when the agent was launched all standard output from the process will be sent to the stdout of the agent itself. In all other cases we send the data to the logger pool so it can be stored in a file without blocking the event loop. """ logger.info("%r stopped (code: %r)", protocol, reason.value.exitCode) process_data = self.processes.pop(protocol.uuid) try: successful = self.is_successful(protocol, reason) except Exception as e: message = ("Exception caught from is_successful(): %r. " "Assuming not successful." % e) logger.error(message) self._log(message) successful = False if successful: logpool.log( self.uuid, "internal", "Process has terminated successfully, code %s" % reason.value.exitCode, protocol.pid) else: self.failed_processes.add((protocol, reason)) logpool.log( self.uuid, "internal", "Process has not terminated successfully, code %s" % reason.value.exitCode, protocol.pid) try: self.process_stopped(protocol, reason) except Exception as e: logger.error("Exception caught from process_stopped: %s", e) process_data.stopped.callback(reason) # If there are no processes running at this point, we assume # the assignment is finished if len(self.processes) == 0: self.stopped_deferred.callback(None) return succeed([]) def _spawn_process(self, command): """ Starts one child process using input from :meth:`command_data`. Job types should never start child processes through any other means. The only exception to this rule is code that resides in :meth:`prepare_for_job`, which should use :meth:`spawn_persistent_job_process` instead. :raises OSError: Raised if `working_dir` was provided but the provided path does not exist :raises EnvironmentError: Raised if an attempt is made to change the user or group without root access. This error will only occur on Linux or Unix platforms. """ process_protocol = self.PROCESS_PROTOCOL(self) process_protocol.id = getattr(command, "id", None) if not isinstance(process_protocol, ProcessProtocol): raise TypeError("Expected ProcessProtocol for `protocol`") # The first argument should always be the command name by convention. # Under Windows, this needs to be the whole path, under POSIX only the # basename. if WINDOWS: arguments = [command.command] + list(command.arguments) else: arguments = [basename(command.command)] + list(command.arguments) # WARNING: `env` should always be None to ensure the same operation # of the environment setup across platforms. See Twisted's # documentation for more information on why `env` should be None: # http://twistedmatrix.com/documents/current/api/ # twisted.internet.interfaces.IReactorProcess.spawnProcess.html kwargs = {"args": arguments, "env": None} uid, gid = self.get_uid_gid(command.user, command.group) if uid is not None: kwargs.update(uid=uid) if gid is not None: kwargs.update(gid=gid) # Capture the protocol instance so we can keep track # of the process we're about to spawn. self.processes[process_protocol.uuid] = ProcessData( protocol=process_protocol, started=Deferred(), stopped=Deferred()) return self._spawn_twisted_process(command, process_protocol, kwargs) def _process_output(self, protocol, output, stream): """ Called by :meth:`.ProcessProtocol.outReceived` and :meth:`.ProcessProtocol.errReceived` whenever output is produced by a process. This method will wire up the proper calls under the hood to process the output. """ if stream == STDOUT: line_fragments = self._stdout_line_fragments line_handler = self.handle_stdout_line elif stream == STDERR: line_fragments = self._stderr_line_fragments line_handler = self.handle_stderr_line else: raise ValueError("Expected STDOUT or STDERR for `stream`") self.process_output(protocol, output, line_fragments, line_handler) def _has_running_processes(self): """ Internal functionto determine whether the batch represented by this instance still has running child processes. """ for process in self.processes.values(): if process.protocol.running(): return True return False def _register_logfile_on_master(self, log_path): def post_logfile(task, log_path, post_deferred=None, num_retry_errors=0, delay=0): deferred = post_deferred or Deferred() url = "%s/jobs/%s/tasks/%s/attempts/%s/logs/" % ( config["master_api"], self.assignment["job"]["id"], task["id"], task["attempt"]) data = {"identifier": log_path, "agent_id": self.node()["id"]} post_func = partial( post, url, data=data, callback=lambda x: result_callback(task, log_path, deferred, x), errback=lambda x: error_callback(task, log_path, deferred, num_retry_errors, x)) reactor.callLater(delay, post_func) return deferred def result_callback(task, log_path, deferred, response): if 500 <= response.code < 600: delay = http_retry_delay() logger.error( "Server side error while registering log file %s for " "task %s (frame %s) in job %s (id %s), status code: %s. " "Retrying in %s seconds", log_path, task["id"], task["frame"], self.assignment["job"]["title"], self.assignment["job"]["id"], response.code, delay) post_logfile(task, log_path, post_deferred=deferred, delay=delay) # The server will return CONFLICT if we try to register a logfile # twice. elif response.code not in [OK, CONFLICT, CREATED]: # Nothing else we could do about that, this is # a problem on our end. logger.error( "Could not register logfile %s for task %s (frame %s) in " "job %s (id %s), status code: %s. This is a client side " "error, giving up.", log_path, task["id"], task["frame"], self.assignment["job"]["title"], self.assignment["job"]["id"], response.code) deferred.errback(None) else: logger.info("Registered logfile %s for task %s on master", log_path, task["id"]) deferred.callback(None) def error_callback(task, log_path, deferred, num_retry_errors, failure_reason): if num_retry_errors > config["broken_connection_max_retry"]: logger.error( "Error while registering logfile %s for task %s on master. " "Maximum number of retries reached. Not retrying the " "request.", log_path, task["id"]) deferred.errback(None) else: if (failure_reason.type in (ResponseNeverReceived, RequestTransmissionFailed)): logger.debug( "Error while registering logfile %s for task %s on " "master: %s, retrying immediately", log_path, task["id"], failure_reason.type.__name__) post_logfile(task, log_path, post_deferred=deferred) else: delay = http_retry_delay() logger.error( "Error while registering logfile %s for task %s on " "master: %r, retrying in %s seconds.", log_path, task["id"], failure_reason, delay) post_logfile(task, log_path, post_deferred=deferred, delay=delay) deferreds = [] for task in self.assignment["tasks"]: deferreds.append(post_logfile(task, log_path)) return DeferredList(deferreds) def _upload_logfile(self): path = join(config["jobtype_task_logs"], self.log_identifier) url = "%s/jobs/%s/tasks/%s/attempts/%s/logs/%s/logfile" % ( config["master_api"], self.assignment["job"]["id"], self.assignment["tasks"][0]["id"], self.assignment["tasks"][0]["attempt"], self.log_identifier) upload_deferred = Deferred() def upload(url, log_identifier, delay=0): logfile = open(path, "rb") if delay != 0: reactor.callLater(delay, upload, url, log_identifier=log_identifier) else: # FIXME persistent=False is a workaround to help with some # problems in unit testing. deferred = treq.put(url=url, data=logfile, headers={"Content-Type": ["text/csv"]}, persistent=False) deferred.addCallback(lambda x: result_callback( url, log_identifier, x)) deferred.addErrback(lambda x: error_callback( url, log_identifier, x)) def result_callback(url, log_identifier, response): if 500 <= response.code < 600: delay = http_retry_delay() logger.error( "Server side error while uploading log file %s, " "status code: %s. Retrying. in %s seconds", log_identifier, response.code, delay) upload(url, log_identifier, delay=delay) elif response.code not in [OK, CREATED, ACCEPTED]: # Nothing else we could do about that, this is # a problem on our end. logger.error( "Could not upload logfile %s status code: %s. " "This is a client side error, giving up.", log_identifier, response.code) try: upload_deferred.errback(ValueError( "Bad return code on uploading logfile: %s" % response.code)) except Exception as e: logger.error( "Caught exception calling upload_deferred.errback: %s", e) else: logger.info("Uploaded logfile %s for to master", log_identifier) try: upload_deferred.callback(None) except Exception as e: logger.error( "Caught exception calling upload_deferred.callback: %s", e) def error_callback(url, log_identifier, failure_reason): if (failure_reason.type in (ResponseNeverReceived, RequestTransmissionFailed)): logger.debug( "Error while uploading logfile %s to master: " "%s, retrying immediately", log_identifier, failure_reason.type.__name__) upload(url, log_identifier) else: delay = http_retry_delay() logger.error( "Error while uploading logfile %s to master: " "%r, retrying in %s seconds.", log_identifier, failure_reason, delay) upload(url, log_identifier, delay=delay) logger.info("Uploading log file %s to master, URL %r", self.log_identifier, url) upload(url, self.log_identifier) return upload_deferred class System(object): # overridden in the job type _tempdirs = NotImplemented uuid = NotImplemented def _get_uid_gid_value(self, value, value_name, func_name, module, module_name): """ Internal function which handles both user name and group conversion. """ # This platform does not implement the module if module is NotImplemented: logger.warning( "This platform does not implement the %r module, skipping " "%s()", module_name, func_name) # Convert a user/group string to an integer elif isinstance(value, STRING_TYPES): try: if module_name == "pwd": return pwd.getpwnam(value).pw_uid elif module_name == "grp": return grp.getgrnam(value).gr_gid else: raise ValueError( "Internal error, failed to get module to use for " "conversion. Was given %r" % module) except KeyError: logger.error( "Failed to convert %s to a %s", value, func_name.split("_")[1]) if not config.get("jobtype_ignore_id_mapping_errors"): raise # Verify that the provided user/group string is real elif isinstance(value, INTEGER_TYPES): try: if module_name == "pwd": pwd.getpwuid(value) elif module_name == "grp": grp.getgrgid(value) else: raise ValueError( "Internal error, failed to get module to use for " "conversion. Was given %r" % module) # Seems to check out, return the original value return value except KeyError: logger.error( "%s %s does not seem to exist", value_name, value) if not config.get("jobtype_ignore_id_mapping_errors"): raise else: raise TypeError( "Expected an integer or string for `%s`" % value_name) def _remove_directories(self, directories, retry_on_exit=True): """ Removes multiple multiple directories at once,
TypeError: print('Код после ошибки') # Вывод: Код после ошибки - была обработана первая ошибка. Следующая строка в блоке Try print(my_var) не выполняется. # Если мы поменяем местами принты, будет выведен NameError has happend! потому что вторая строка в коде не срабатывает. # После того, как произошла ошибка, будет выведен соответствующий типу ошибки блок except # и затем идет продолжение выполнения кода. # Рассмотрим это на примере реальной ф-и. # У нас есть словарь, в котором содержатся какие-то данные о пользователе user_dictionary = {'first_name': 'Jack', 'last_name': 'White', 'age': 24} # Выводим данные из словаря # print(user_dictionary['first_name']) # но, если мы попробуем сделать то же самое, но по отсутствующему ключу # print(user_dictionary['name']) # получим сообщение об ошибке: KeyError: 'name' - ошибка по ключу. Такого ключа нет. # Но, если мы получаем это значение при помощи ф-и get(), которая существует для словаря # print(user_dictionary.get('first_name')) # Jack # print(user_dictionary.get('ame')) # None # При использовании ф-и get() в случае ошибки получаем None, То есть, мы не получаем ошибку # В этом методе ошибки обрабатываются. # Напишем похожую ф-ю с обработкой ошибки. # C помощью этой ф-и будем получать значения из словаря по ключу # def get_dict_value(dict, key): # будем передавать ф-и два параметра: dict и key # return dict[key] # вызываем эту ф-ю для словаря user_dictionary # print(get_dict_value(user_dictionary, 'age')) # 24 # Если мы передадим что-то не валидное, то получим ошибку # print(get_dict_value(user_dictionary, 'a')) # KeyError: 'a' # И мы можем обработать это при помощи блока try def get_dict_value(dict, key): ''' # Если ф-я не получает нужный ключ, то будет передано None :param dict: :param key: :return: ''' try: return dict[key] except KeyError: # в этом случае возвращаем значение None return None print(get_dict_value(user_dictionary, 'age')) # 24 print(get_dict_value(user_dictionary, 'a')) # KeyError: 'a' # сделаем еще один вызов, чтобы проверить, что код работает после вывода ошибки print(get_dict_value(user_dictionary, 'first_name')) # получаем Jack # Мы обработали ситуацию. И теперь при ошибке никакого сообщения не выводится, а ф-я возвращает значение None # И мы указали это в спецификации к этой ф-и # Блок try - except самая короткая запись. # Есть расширени для этого блока с помощью дополнений else и finally # Рассмотрим пример. number = input('Введите число') # print(number / 2) # здесь происходит математическая операция, что требует два объекта типа int, # но операнд number имеет тип 'str',так как ф-я input() всегда возвращает строку. Делить строку не чаисло не допустимо, # поэтому возвращается ошибка. Эту ошибку можно исправить, если привести number к типу int: int(number) # Но, может возникнуть ситуация, когда введенная строка не может быть приведена к типу int. Допустим, ввели строку # В этом случае получим ошибку ValueError, так как строка не может быть приведена к типу int # Здесь приходит на помощью блок try - except try: print(int(number) / 2) except: print('Вы должны ввести число!') # дальше мы можем указать (не обязательно) блок else else: print('Блок else срабатывает, когда в коде нет ошибки и блок except не срабатывает') finally: print('Финальный блок выполняется в любой случае, даже если была возвращена ошибка.') # Как это использовать? while True: # создаем бесконечный цикл, который будет продолжаться, пока пользователь не введет число try: number = int(input('Введите число!')) print(number / 2) except: print('Вы должны ввести число!') else: print('Все верно!') break # здесь выходим из бесконечного цикла при условии, что пользователь ввел число finally: # в этом блоке можем делать операции, которые необходимы в любом случае, есть ошибка или нет. # например, закрывать файл, который был открыт на этапе try print('Финальный блок выполняется в любой случае, даже если была возвращена ошибка.') print('Следующий код после блока try - finally') try: # выполняет код внутри своего тела и натыкается на исключение k = 1 / 0 # если возникает какое-то исключение, мы должны знать название исключения, которое возникло except ArithmeticError: # названия можно указывать в самом общем виде. # Чем выше название, тем менее точно мы можем сказать, что произошло # В данном случае указываем, что это ArithmeticError - арефметическая ошибка. k = 0 # при получении ошибки # дальше менее используемые ветви програрммы else и finally else: # выполняется в том случае, если не было исключений print('All right') finally: # выполняется всегда, не зависимо от сценария работы программы. print('Division complete!') # Сценарий # try: # исполняем какой-то код # except Exception as e: - в переменной 'e' будет находится информация об этом исключении, текст самой ошибки, # обработка исключения. Здесь мы можем испльзвоать описание ошибки из переменной 'e' # Выводим какую-то информацию, сохраняем какие-то логи или что-то другое # else: # Код, который будет выплнен в том случае, если не возникает исключения # finally: # Код, который выполняется всегда. # Пример кода # a = int(input('Введите первое число: ')) # b = int(input('Введите второе число: ')) # # print(a / b) # Сознательно поделим на 0 # Получим исключение: ZeroDivisionError: division by zero # Обработаем это исключение с помощью метода try - except # try: # a = int(input('Введите первое число: ')) # b = int(input('Введите второе число: ')) # # print(a / b) # except ZeroDivisionError as e: # print('Так больше не делай: ', e) # # Получаем вывод исключения: division by zero # else: # print('Все хорошо!') # finally: # print('Это было что-то!') # Пример с перехватом двух вариантов ошибок def divide(x, y): try: print(x / y) # если тут вместо print будет return, блок else никогда не выполнится; выполнится только finally except ZeroDivisionError as e: print(e) print('Вы не можете делить на ноль!') except TypeError as e: print(e) print('Вводимые значения долджны быть числами!') else: print('х был поделен на у') finally: print('Блок finally выполняется в любом случае') # Рассмотрим еще один скрип со считыванием данных из файла. # У нас есть файл data. В нем находятся числа. # Открываем файл # f = open('data') # это реальный файл, находитмя в том же каталоге # # Там находятся числа. Поэтому мы создадим лист и туда считаем эти числа # int_arr_list = [] # for line in f: # int_arr_list.append(int(line)) # приводим к int, так как в файле числа # print(int_arr_list) # [123, 1234, 12345] # Но, если в файл добавим не цифру, то получим исключение: # ValueError: invalid literal for int() with base 10: 'выаыв\т' # ОБработаем эту ошибку с помощью try - except f = open('data') int_arr_list = [] try: for line in f: int_arr_list.append(int(line)) print(int_arr_list) # [123, 1234, 12345] except ValueError: print('У вас там кроме чисел еще что-то непонятное!') else: print('Все прошло хорошо!') finally: f.close() # если файл открыли, то обязательно в конце работы с ним его надо закрыть! print(int_arr_list) # Таким образом можно писать свои искючения. Это бывает полезно, когда при работе программы надо получить # какую-то информацию. Вы прописываете искоючение, пишите в нем какое-то сообщение, которое выходит при # получении какого-то события. # ---------------- 2 ПОЛУАВТОМАТИЧЕСКИЕ МЕОДЫ ТЕСТИРОВАНИЯ ------------------------- # Это подход, когда пишутся ручные самостоятельные ф-и, которые проводят некое тестирование по выбранным вами # кретериям. # def test_function(): # if something: # print('Тест пройден!') # else: # print('Тест не пройден') # Код написан в ручную, вами, и здесь проверка проводится в автоматическом режиме, # поэтому, можно сказать, что это полуавтоматический режим тестировния # Проверим, как это работает на практике. # Напишем ф-ю для тестирования. # Передаем список list, функция очищает лист от значений, которые не являются числами и передает данные в виде list def test_function(list_enter): ''' вход: list выход: list, содержащий только числа :return: ''' list_temp = [] i = 0 while (type(list_enter[i]) == int): list_temp.append(list_enter[i]) i += 1 return list_temp list_temp = [1, 2, 3, 'abc'] print(test_function(list_temp)) # Пишем полуавтоматическую ф-ю def function_test_1(): # на вход ничего не принемает list_temp = [1, 2, 3, 'abc'] # ф-я должна вернуть [1, 2, 3] list_out = test_function(list_temp) if list_out == [1, 2, 3]: print('Тест 1 пройден!') else: print('Тест 1 не пройден!') function_test_1() # Запускаем, проверяем - Тест 1 пройден! # Теперь в той же ф-и допустим ошибку def function_test_2(): list_temp = [1, 2, 3, 'abc', 4] # Добавляем 4 и ф-я должна вернуть [1, 2, 3, 4] list_out = test_function(list_temp) if list_out == [1, 2, 3, 4]: print('Тест 2 пройден!') else: print('Тест 2 не пройден!') function_test_2() # Запускаем, проверяем - Тест 2 не пройден! # Почему не правильно? Потому что мы в ней использовали цикл while, который, выполняя условие, будет работать до # первого непопадания и затем прекращает работу. # Поэтому, переделываем на цикл for def test_function_2(list_in): ''' вход: list выход: list, содержащий только числа :return: ''' list_temp = [] i = 0 for i in range(len(list_in)): if type(list_in[i]) == int: list_temp.append(list_in[i]) return list_temp # и снова запустим тест def function_test_3(): list_temp = [1, 2, 3, 'abc', 4] # Добавляем 4 и ф-я должна вернуть
from os import kill import pygame import random from collections import deque import sys import grequests from sprites import (MasterSprite, Ship2, Ship3, Alien, Missile, BombPowerup, DistPowerup, ShieldPowerup, DoublemissilePowerup, Explosion, Siney, Spikey, Fasty, Roundy, Crawly) from database import Database from load import load_image, load_sound, load_music if not pygame.mixer: print('Warning, sound disabled') if not pygame.font: print('Warning, fonts disabled') # BLUE = (0, 0, 255) RED = (255, 0, 0) BLACK= (0, 0, 0) WHITE= (255, 255, 255) GREEN= (0, 255, 0) YELLOW = (255, 255, 0) url = "http://osspcshooting.shop" class Button: def __init__(self, gameDisplay, img_in, x, y, width, height, img_act, x_act, y_act, action = None): self.lvl_size = 0 mouse = pygame.mouse.get_pos() click = pygame.mouse.get_pressed() if x + width > mouse[0] > x and y + height > mouse[1] > y: gameDisplay.blit(img_act,(x_act, y_act)) if click[0] and action == 'quitgame': pygame.quit() sys.exit() elif click[0] and action == 'mode_one': self.lvl_size = -1 elif click[0] and action == 'shooting_game': self.lvl_size = -2 else: gameDisplay.blit(img_in,(x,y)) class Keyboard(object): keys = {pygame.K_a: 'A', pygame.K_b: 'B', pygame.K_c: 'C', pygame.K_d: 'D', pygame.K_e: 'E', pygame.K_f: 'F', pygame.K_g: 'G', pygame.K_h: 'H', pygame.K_i: 'I', pygame.K_j: 'J', pygame.K_k: 'K', pygame.K_l: 'L', pygame.K_m: 'M', pygame.K_n: 'N', pygame.K_o: 'O', pygame.K_p: 'P', pygame.K_q: 'Q', pygame.K_r: 'R', pygame.K_s: 'S', pygame.K_t: 'T', pygame.K_u: 'U', pygame.K_v: 'V', pygame.K_w: 'W', pygame.K_x: 'X', pygame.K_y: 'Y', pygame.K_z: 'Z'} def main(scr, level, id, language): scr_size, level_size = scr, level user_size = round(scr_size / level_size) id = id language = language main_lvl_size = 2 mode1_lvl_size = 3 mode2_lvl_size = 1.6 class size : x_background_ratio = 2 x_background = scr_size*x_background_ratio speed = scr_size*0.004 background = scr_size*4 backgroundLoc = scr_size*3 star_seq = round(scr_size*0.06) star_s = round(scr_size*0.004) star_l = round(scr_size*0.01) font_eng = round(scr_size*0.065) font_kor = round(scr_size*0.040) toppos = scr_size*0.2 ratio = scr_size*0.002 middletoppos = scr_size*0.35 topendpos = scr_size*0.15 middlepos = scr_size*0.5 achievement = scr_size/3000 achievementpos = scr_size*0.25 hi_achievement = scr_size*0.0001 hi_achievementx = scr_size*0.3 hi_achievementx2 = scr_size*0.35 hi_achievementy = scr_size*0.16 hi_achievementy_seq = scr_size*0.043 selectitemposx = scr_size*0.2 selectitemposy = scr_size*0.5 button1pos_1 = round(x_background*0.08) button2pos_1 = round(x_background*0.43) button3pos_1 = round(x_background*0.86) buttonpos_2 = round(scr_size*0.9) buttonpos_3 = round(scr_size*0.25) buttonpos_4 = round(scr_size*0.1) button1pos_1_ad = round(x_background*0.07) button2pos_1_ad = round(x_background*0.42) button3pos_1_ad = round(x_background*0.85) button_ad = round(scr_size*0.896) lifex = scr_size * 0.80 lifey = scr_size * 0.01 def kill_alien(alien, aliensLeftThisWave, kill_count, score) : aliensLeftThisWave -= 1 kill_count += 1 #score differentiation by Alien color #wave1 aliens if alien.pType == 'green' or alien.pType == 'orange': score += 1 #wave2 alien elif alien.pType == 'white': score += 2 #wave3 alien elif alien.pType == 'red': score += 4 #wave4 alien elif alien.pType == 'yellow': score += 8 return aliensLeftThisWave, kill_count, score def background_update(screen, background, backgroundLoc) : screen.blit( background, (0, 0), area=pygame.Rect( 0, backgroundLoc, size.x_background, scr_size)) backgroundLoc -= speed if backgroundLoc - speed <= speed: backgroundLoc = size.backgroundLoc return screen, background, backgroundLoc # 인게임에서 배경색으로 플레이어 영역 구분 def background_update_half(screen, background, backgroundLoc) : screen.blit( background, (0, 0), area=pygame.Rect( 0, backgroundLoc, size.x_background, scr_size)) screen.fill((80, 20, 30, 125),(0, 0, screen.get_width()//size.x_background_ratio, screen.get_height()), special_flags = 1) # special_flags = 3 : 별 색깔만 바뀜 backgroundLoc -= speed if backgroundLoc - speed <= speed: backgroundLoc = size.backgroundLoc return screen, background, backgroundLoc def background_update_half_two(screen, background, backgroundLoc) : screen.blit( background, (0, 0), area=pygame.Rect( 0, backgroundLoc, size.x_background, scr_size)) screen.fill((80, 20, 30, 125),(screen.get_width()//size.x_background_ratio, 0, screen.get_width()//size.x_background_ratio, screen.get_height()), special_flags = 1) backgroundLoc -= speed if backgroundLoc - speed <= speed: backgroundLoc = size.backgroundLoc return screen, background, backgroundLoc def set_language(lan) : # 언어 설정 if language == "ENG": #기본 설정 영어 return text_eng_set else : return text_kor_set def beforegame_text_update(language) : if language == "ENG" : return [font.render("PLAYER 1" , 1, WHITE), font.render("PLAYER 2" , 1, WHITE), font.render("START : Press U" , 1, WHITE), font.render("Switch Player : Press L" , 1, WHITE)] else : return [font2.render("플레이어1" , 1, WHITE), font2.render("플레이어2" , 1, WHITE), font2.render("시작버튼: U키" , 1, WHITE), font2.render("플레이어 위치 변경: L키 ", 1, WHITE)] def ingame_text_update(language) : if language == "ENG" : return [font.render("Wave: " + str(wave), 1, WHITE), font.render("Aliens Left: " + str(aliensLeftThisWave), 1, WHITE), font.render("Score: " + str(score), 1, WHITE), font.render("Score: " + str(score2), 1, WHITE), font.render("Bombs: " + str(bombsHeld), 1, WHITE), font.render("Bombs: " + str(bombsHeld2), 1, WHITE), font.render('PLAYER 1 WIN!', 1, WHITE), font.render('PLAYER 2 WIN!', 1, WHITE), font.render('DRAW!', 1, WHITE)] else : return [font2.render("웨이브: " + str(wave), 1, WHITE), font2.render("남은 적: " + str(aliensLeftThisWave), 1, WHITE), font2.render("점수: " + str(score), 1, WHITE), font2.render("점수: " + str(score2), 1, WHITE), font2.render("폭탄: " + str(bombsHeld), 1, WHITE), font2.render("폭탄: " + str(bombsHeld2), 1, WHITE), font2.render("플레이어 1 승!", 1, WHITE), font2.render("플레이어 2 승!", 1, WHITE), font2.render('무승부!', 1, WHITE)] direction = {None: (0, 0), pygame.K_UP: (0, -size.speed), pygame.K_DOWN: (0, size.speed), pygame.K_LEFT: (-size.speed, 0), pygame.K_RIGHT: (size.speed, 0)} direction2 = {None: (0, 0), pygame.K_w: (0, -size.speed), pygame.K_s: (0, size.speed), pygame.K_a: (-size.speed, 0), pygame.K_d: (size.speed, 0)} # Initialize everything pygame.mixer.pre_init(11025, -16, 2, 512) pygame.init() screen = pygame.display.set_mode((size.x_background, scr_size), pygame.HWSURFACE|pygame.DOUBLEBUF|pygame.RESIZABLE) pygame.display.set_caption('Shooting Game') pygame.mouse.set_visible(True) # Create the background which will scroll and loop over a set of different # size stars background = pygame.Surface((size.x_background, size.background)) background = background.convert() background.fill(BLACK) backgroundLoc = size.backgroundLoc finalStars = deque() for y in range(0, size.backgroundLoc, size.star_seq): starsize = random.randint(size.star_s, size.star_l) x = random.randint(0, size.x_background - starsize) if y <= scr_size: finalStars.appendleft((x, y + size.backgroundLoc, starsize)) pygame.draw.rect( background, RED, pygame.Rect(x, y, starsize, starsize)) while finalStars: x, y, starsize = finalStars.pop() pygame.draw.rect( background, RED, pygame.Rect(x, y, starsize, starsize)) # Display the background screen.blit(background, (0, 0)) pygame.display.flip() # Prepare game objects speed = 1.5 MasterSprite.speed = speed alienPeriod = 60 / speed clockTime = 60 # maximum FPS clock = pygame.time.Clock() ship = Ship2() ship2 = Ship3() initialAlienTypes = (Siney, Spikey) powerupTypes = (BombPowerup, ShieldPowerup, DoublemissilePowerup, DistPowerup) # Sprite groups alldrawings = pygame.sprite.Group() allsprites = pygame.sprite.RenderPlain((ship, ship2)) MasterSprite.allsprites = allsprites Alien.pool = pygame.sprite.Group( [alien() for alien in initialAlienTypes for _ in range(5)]) Alien.active = pygame.sprite.Group() Missile.pool = pygame.sprite.Group([Missile() for _ in range(10)]) Missile.active = pygame.sprite.Group() Explosion.pool = pygame.sprite.Group([Explosion() for _ in range(10)]) Explosion.active = pygame.sprite.Group() bombs = pygame.sprite.Group() bombs2 = pygame.sprite.Group() powerups = pygame.sprite.Group() # Sounds missile_sound = load_sound('missile.ogg') bomb_sound = load_sound('bomb.ogg') alien_explode_sound = load_sound('alien_explode.ogg') ship_explode_sound = load_sound('ship_explode.ogg') load_music('music_loop.ogg') aliennum = 10 # 아이템 나오는 alien 숫자(aliennum 이상 남은 경우) setaliennum = 10 # 4웨이브마다 초기 웨이브 수 speedup = 0.5 # 웨이브마다 speed += speedup aliennumup = 2 # 4웨이브 주기로 alienthiswave = int(alienthiswave * aliennumup) finalwave = 4 alienPeriod = clockTime // 2 curTime = 0 aliensThisWave, aliensLeftThisWave, Alien.numOffScreen = 10, 10, 10 wave = 1 bombsHeld = 3 doublemissile = False #doublemissile아이템이 지속되는 동안(5초) 미사일이 두배로 발사됨 Itemdouble = False score = 0 bombsHeld2 = 3 doublemissile2 = False Itemdouble2 = False score2 = 0 missilesFired = 0 powerupTime = 1 * clockTime powerupTimeLeft = powerupTime betweenWaveTime = 5 * clockTime betweenWaveCount = betweenWaveTime betweenDoubleTime = 2 * clockTime betweenDoubleCount = betweenDoubleTime betweenDoubleCount2 = betweenDoubleTime font = pygame.font.Font(None, size.font_eng) font2 = pygame.font.SysFont('nanumgothic', size.font_kor) inMenu = True half_tf = True distTime = 2 * clockTime # 2초동안 화면이 안보임 distItem = 0 # 화면 안보이는 시간(distItem = distTime) distItem2 = 0 before_game = True hiScores = Database.getScores() highScoreTexts = [font.render("NAME", 1, RED), font.render("SCORE", 1, RED), font.render("ACCURACY", 1, RED)] highScorePos = [highScoreTexts[0].get_rect( topleft=screen.get_rect().inflate(-size.toppos, -size.toppos).topleft), highScoreTexts[1].get_rect( midtop=screen.get_rect().inflate(-size.toppos, -size.toppos).midtop), highScoreTexts[2].get_rect( topright=screen.get_rect().inflate(-size.toppos, -size.toppos).topright)] for hs in hiScores: highScoreTexts.extend([font.render(str(hs[x]), 1, WHITE) for x in range(3)]) highScorePos.extend([highScoreTexts[x].get_rect( topleft=highScorePos[x].bottomleft) for x in range(-3, 0)]) ######## title, titleRect = load_image('title_mode2.png') title = pygame.transform.scale(title, (round(title.get_width()*size.ratio), round(title.get_height()*size.ratio))) titleRect = pygame.Rect(0, 0, title.get_width(), title.get_height()) pause,pauseRect = load_image('pause.png',WHITE) pause = pygame.transform.scale(pause, (round(pause.get_width()*size.ratio), round(pause.get_height()*size.ratio))) pauseRect = pygame.Rect(0, 0, pause.get_width(), pause.get_height()) titleRect.midtop = screen.get_rect().inflate(0, -size.middletoppos).midtop pauseRect.midtop = screen.get_rect().inflate(0, -size.middletoppos).midtop dist, distRect = load_image('black.png', WHITE) dist = pygame.transform.scale(dist, (scr_size, scr_size)) distRect = pygame.Rect(0, 0, dist.get_width(), dist.get_height()) distRect.midtop = screen.get_rect().inflate(0, 0).midtop dist2, distRect2 = load_image('black.png', WHITE) dist2 = pygame.transform.scale(dist2, (scr_size, scr_size)) distRect2 = pygame.Rect(0, 0, dist2.get_width(), dist2.get_height()) distRect2.midtop = screen.get_rect().inflate(0, 0).midtop text_eng_set = [font.render('START GAME', 1, WHITE), font.render('SOUND FX', 1, WHITE), font.render(' ON', 1, RED), font.render(' OFF', 1, RED), font.render('MUSIC', 1, WHITE), font.render(' ON', 1, RED), font.render(' OFF', 1, RED), font.render('QUIT', 1, WHITE), font.render('RESTART', 1, WHITE), font.render('LANGUAGE', 1, WHITE), font.render('GAME OVER', 1, WHITE), font.render('SPEED UP!', 1, RED)] text_kor_set
#!/usr/bin/env python #coding: utf-8 from collections import Counter from util import Document from util import Query import re class Parser: def __init__(self, stopWordsPath="sw.txt"): self.stopWords = self.readStopWords(stopWordsPath) self.cfcCollectionAttrs = [ "PN", # paper number "RN", # doc id in the collection "AN", # super collection id i guess "AU", # authors "TI", # title "SO", # source "MJ", # major subjects "MN", # minor subjects "AB", # abstract when present, or excerpt otherwise "EX", # abstract when present, or excerpt otherwise "RF", # list of references used in the doc "CT", # citation list to the doc ] self.cfcQueryAttrs = [ "QN", # query number "QU", # proper query "NR", # number of relevant docs "RD", # relevant documents ] def initializeLastItem(self, attrList, lastItem) : """ Helper method, to reinitialize a dictionary containing the data from a CFC collection document or query. Used by the file parsers param doc: a dict. return: the reinitialized dict. """ for attr in attrList: lastItem[attr] = '' lastItem["lastAttr"] = '' return lastItem def parseCFCFile(self, path, regex, lastItemAttrs, treatLastItemFunction): """ CFC Collection specific file parser. It's a internal generic file parser, users should use the parseFile or parseQueryFile methods instead of this one. If it fails to open the file, does not attempt to treat the exception. param path: string containig the path to the file to parse. param regex: a string containing a regex to separate attributes and content. The regex must contain a named attribute called "attr" and another called "content". param lastItemAttrs: a list containing the attributes of the items present in the file for usel of self.initializeLastItem method. param treatLastItemFunction: a function to be called when we finish parsing an item from the path. The function should receive a dict containing the data of the file, and return a result to be yielded by this method. yield: results of treatLastItemFunction for each item in the file on the param path. """ fin = open(path) # helper funcion to reset the dict used for parsing the fin. Last doc # holds the temporary data of the current document being parsed lastItem = self.initializeLastItem(lastItemAttrs, {}) for line in fin: line = line.strip() # if there's content in the line we haven't finished parsing a doc if line and fin: # add the content of the line to the correct attr in the # lastItem dict lastItem = self.parseLine(line, lastItem, regex) # else we finished reading a doc else: if self.isEmptyItem(lastItem): continue result = treatLastItemFunction(lastItem) lastItem = self.initializeLastItem(lastItemAttrs, lastItem) yield result fin.close() def parseFile(self, path): """ Wrapper method for the self.parseCFCFile method, for parsing the proper file containng the documents from the CFC collection. Does not treat the exception that may be raised when opening the file in the path. param path: string containing the path to the file. yield: each query found in the file, the returned objects are tuples of the kind (util.Document, collections.Counter). The counter is a dict with word keys and frequency values. """ print("Processing file: {}".format(path)) # regex for separating the attributes of the document from content regex = r"^((?P<attr>(PN|RN|AN|AU|TI|SO|MJ|MN|AB|EX|RF|CT))\s+)?(?P<content>(.*\w+.*)*)" # attrs present in the cfc collection documents attrs = self.cfcCollectionAttrs # helper function to deal with the parsed data. Transforms the data # parsed in a tpuple of util.Document object and a Counter with the # frequency of the words in the document function = self.treatLastDoc for result in self.parseCFCFile(path, regex, attrs, function): yield result def parseLine(self, line, lastItem, regex): """ Parse a single line of a CFC file, adding the content of the line to the last seen attribute. The regex should have a named field called "attr" and another "content". If an attr is found in the line, updates a "lastItem" entry in the lastItem dict, with the attr found. param line: a string containing the line to be parsed. param lastItem: a dict that will contain the temporary data of the item being parsed. param regex: a string containing a regex to parse the line. Must have a named fields "attr" and "content". return: the param lastItem dict, updated with the param line. """ assert type(lastItem) == dict sep = re.compile(regex) # separate a possible attribute from content, with a regex match = sep.match(line) assert match # groups named in the sep regex attr = match.group("attr") content = match.group("content") # in the case there's an attribute in the line, we know we have # finished the last attribute we have seen, otherwise we append to # the last attribute seen if attr: lastItem["lastAttr"] = attr lastAttr = lastItem["lastAttr"] # assert lastAttr # buggy because of strange ^Z lines in the end of some files # add the content of the line to the lastAttr seen if lastAttr: lastItem[lastAttr] = (' '.join([lastItem[lastAttr], content.strip()])).strip() return lastItem def parseQueryFile(self, path): """ Wrapper method for the self.parseCFCFile method, for parsing the query file from the CFC collection. Does not treat the exception that may be raised when opening the file in the path. param path: string containing the path to the file. yield: each query found in the file, the returned objects are util.Query objects. """ # regex for separating the attributes from the content regex = r"^\s*(?P<attr>QN|QU|NR|RD)?\s*(?P<content>(.*\w+.*)*)" # list of attributes present in the cfc query file attrs = self.cfcQueryAttrs # helper function that deals with the data parsed and transforms it on # util.Query objects function = self.treatLastQuery for result in self.parseCFCFile(path, regex, attrs, function): yield result def tokenize(self, string, regex=r"[a-zA-Z']+"): """ Get a list with the words in the string, while also removing the stop words defined in the creation of the class. param string: string with the content to be tokenized. param regex: string containing a regex of what is considered a word. return: a list of strings containing the non stop words words, from the string param. """ # regex for separating the words in the content tokenizer = re.compile(regex) # get the words that match the regex and set them to lower case words = [word.lower() for word in tokenizer.findall(string)] # removal of the stop words defined in the __init__ method from the # list of words for sw in self.stopWords.intersection(words): while sw in words: words.remove(sw) return words def readStopWords(self, path): """ Used in the __init__ method to load the stop words from a file. If the file is empty returns an empty set. The file must contain words separated by white space characters, and must be lower case. If it fails to open the file the exception is not handled. param path: string containing the path to the file containg the stop words. return: a set containig the stop words from the file """ fin = open(path) # place the stop words in a set for faster access sws = set() for line in fin: line = line.strip() for word in line.split(): sws.add(word.lower()) fin.close() return sws def treatLastDoc(self, lastDoc): """ Helper method that transforms the data in the lastDoc dict into a tuple of util.Document object and a Counter containing the frequencies of the words in the document param lastDoc: a dict containing the data parsed. return: a tuple(util.Document, collections.Counter). The counter is a dict with word keys and frequency values. """ total = Counter() # the list of relevant attributes to tokenize. Tokenize also # removes stop words defined in the init method relevant = ["TI", "AB", "EX", "MJ", "MN"] for attr in relevant: content = lastDoc[attr] assert type(content) == str words = self.tokenize(content) counter = Counter(words) total += counter # form the Document object return docId = int(lastDoc["RN"]) # get the year of publishment regex = r"(?P<year>\d{2})(?P<idInYear>\d{3})" sep = re.compile(regex) match = sep.match(lastDoc["PN"]) year = int(match.group("year")) title = lastDoc["TI"] authors = lastDoc["AU"] tempNorm = 0 # irrelevant norm to be udated in the future doc = Document(docId,
import os import sys import time import math import torch.nn.functional as F from datetime import datetime import random import logging from collections import OrderedDict import numpy as np import cv2 import torch from torchvision.utils import make_grid from shutil import get_terminal_size import yaml try: from yaml import CLoader as Loader, CDumper as Dumper except ImportError: from yaml import Loader, Dumper def OrderedYaml(): '''yaml orderedDict support''' _mapping_tag = yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG def dict_representer(dumper, data): return dumper.represent_dict(data.items()) def dict_constructor(loader, node): return OrderedDict(loader.construct_pairs(node)) Dumper.add_representer(OrderedDict, dict_representer) Loader.add_constructor(_mapping_tag, dict_constructor) return Loader, Dumper #################### # miscellaneous #################### def get_timestamp(): return datetime.now().strftime('%y%m%d-%H%M%S') def mkdir(path): if not os.path.exists(path): os.makedirs(path) def mkdirs(paths): if isinstance(paths, str): mkdir(paths) else: for path in paths: mkdir(path) def mkdir_and_rename(path): if os.path.exists(path): new_name = path + '_archived_' + get_timestamp() print('Path already exists. Rename it to [{:s}]'.format(new_name)) logger = logging.getLogger('base') logger.info('Path already exists. Rename it to [{:s}]'.format(new_name)) os.rename(path, new_name) os.makedirs(path) def set_random_seed(seed): random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) def setup_logger(logger_name, root, phase, level=logging.INFO, screen=False, tofile=False): '''set up logger''' lg = logging.getLogger(logger_name) formatter = logging.Formatter('%(asctime)s.%(msecs)03d - %(levelname)s: %(message)s', datefmt='%y-%m-%d %H:%M:%S') lg.setLevel(level) if tofile: log_file = os.path.join(root, phase + '_{}.log'.format(get_timestamp())) fh = logging.FileHandler(log_file, mode='w') fh.setFormatter(formatter) lg.addHandler(fh) if screen: sh = logging.StreamHandler() sh.setFormatter(formatter) lg.addHandler(sh) #################### # image convert #################### def crop_border(img_list, crop_border): """Crop borders of images Args: img_list (list [Numpy]): HWC crop_border (int): crop border for each end of height and weight Returns: (list [Numpy]): cropped image list """ if crop_border == 0: return img_list else: return [v[crop_border:-crop_border, crop_border:-crop_border] for v in img_list] def tensor2img(tensor, out_type=np.uint8, min_max=(0, 1)): ''' Converts a torch Tensor into an image Numpy array Input: 4D(B,(3/1),H,W), 3D(C,H,W), or 2D(H,W), any range, RGB channel order Output: 3D(H,W,C) or 2D(H,W), [0,255], np.uint8 (default) ''' tensor = tensor.squeeze().float().cpu().clamp_(*min_max) # clamp tensor = (tensor - min_max[0]) / (min_max[1] - min_max[0]) # to range [0,1] n_dim = tensor.dim() if n_dim == 4: n_img = len(tensor) img_np = make_grid(tensor, nrow=int(math.sqrt(n_img)), normalize=False).numpy() img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0)) # HWC, BGR elif n_dim == 3: img_np = tensor.numpy() img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0)) # HWC, BGR elif n_dim == 2: img_np = tensor.numpy() else: raise TypeError( 'Only support 4D, 3D and 2D tensor. But received with dimension: {:d}'.format(n_dim)) if out_type == np.uint8: img_np = (img_np * 255.0).round() # Important. Unlike matlab, numpy.unit8() WILL NOT round by default. return img_np.astype(out_type) def save_img(img, img_path, mode='RGB'): cv2.imwrite(img_path, img) def DUF_downsample(x, scale=4): """Downsamping with Gaussian kernel used in the DUF official code Args: x (Tensor, [B, T, C, H, W]): frames to be downsampled. scale (int): downsampling factor: 2 | 3 | 4. """ assert scale in [2, 3, 4], 'Scale [{}] is not supported'.format(scale) def gkern(kernlen=13, nsig=1.6): import scipy.ndimage.filters as fi inp = np.zeros((kernlen, kernlen)) # set element at the middle to one, a dirac delta inp[kernlen // 2, kernlen // 2] = 1 # gaussian-smooth the dirac, resulting in a gaussian filter mask return fi.gaussian_filter(inp, nsig) B, T, C, H, W = x.size() x = x.view(-1, 1, H, W) pad_w, pad_h = 6 + scale * 2, 6 + scale * 2 # 6 is the pad of the gaussian filter r_h, r_w = 0, 0 if scale == 3: r_h = 3 - (H % 3) r_w = 3 - (W % 3) x = F.pad(x, [pad_w, pad_w + r_w, pad_h, pad_h + r_h], 'reflect') gaussian_filter = torch.from_numpy(gkern(13, 0.4 * scale)).type_as(x).unsqueeze(0).unsqueeze(0) x = F.conv2d(x, gaussian_filter, stride=scale) x = x[:, :, 2:-2, 2:-2] x = x.view(B, T, C, x.size(2), x.size(3)) return x def single_forward(model, inp): """PyTorch model forward (single test), it is just a simple warpper Args: model (PyTorch model) inp (Tensor): inputs defined by the model Returns: output (Tensor): outputs of the model. float, in CPU """ with torch.no_grad(): model_output = model(inp) if isinstance(model_output, list) or isinstance(model_output, tuple): output = model_output[0] else: output = model_output output = output.data.float().cpu() return output #################### # metric #################### def calculate_psnr(img1, img2): # img1 and img2 have range [0, 255] img1 = img1.astype(np.float64) img2 = img2.astype(np.float64) mse = np.mean((img1 - img2)**2) if mse == 0: return float('inf') return 20 * math.log10(255.0 / math.sqrt(mse)) def ssim(img1, img2): C1 = (0.01 * 255)**2 C2 = (0.03 * 255)**2 img1 = img1.astype(np.float64) img2 = img2.astype(np.float64) kernel = cv2.getGaussianKernel(11, 1.5) window = np.outer(kernel, kernel.transpose()) mu1 = cv2.filter2D(img1, -1, window)[5:-5, 5:-5] # valid mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5] mu1_sq = mu1**2 mu2_sq = mu2**2 mu1_mu2 = mu1 * mu2 sigma1_sq = cv2.filter2D(img1**2, -1, window)[5:-5, 5:-5] - mu1_sq sigma2_sq = cv2.filter2D(img2**2, -1, window)[5:-5, 5:-5] - mu2_sq sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2 ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2)) return ssim_map.mean() def calculate_ssim(img1, img2): '''calculate SSIM the same outputs as MATLAB's img1, img2: [0, 255] ''' if not img1.shape == img2.shape: raise ValueError('Input images must have the same dimensions.') if img1.ndim == 2: return ssim(img1, img2) elif img1.ndim == 3: if img1.shape[2] == 3: ssims = [] for i in range(3): ssims.append(ssim(img1, img2)) return np.array(ssims).mean() elif img1.shape[2] == 1: return ssim(np.squeeze(img1), np.squeeze(img2)) else: raise ValueError('Wrong input image dimensions.') class ProgressBar(object): '''A progress bar which can print the progress modified from https://github.com/hellock/cvbase/blob/master/cvbase/progress.py ''' def __init__(self, task_num=0, bar_width=50, start=True): self.task_num = task_num max_bar_width = self._get_max_bar_width() self.bar_width = (bar_width if bar_width <= max_bar_width else max_bar_width) self.completed = 0 if start: self.start() def _get_max_bar_width(self): terminal_width, _ = get_terminal_size() max_bar_width = min(int(terminal_width * 0.6), terminal_width - 50) if max_bar_width < 10: print('terminal width is too small ({}), please consider widen the terminal for better ' 'progressbar visualization'.format(terminal_width)) max_bar_width = 10 return max_bar_width def start(self): if self.task_num > 0: sys.stdout.write('[{}] 0/{}, elapsed: 0s, ETA:\n{}\n'.format( ' ' * self.bar_width, self.task_num, 'Start...')) else: sys.stdout.write('completed: 0, elapsed: 0s') sys.stdout.flush() self.start_time = time.time() def update(self, msg='In progress...'): self.completed += 1 elapsed = time.time() - self.start_time fps = self.completed / elapsed if self.task_num > 0: percentage = self.completed / float(self.task_num) eta = int(elapsed * (1 - percentage) / percentage + 0.5) mark_width = int(self.bar_width * percentage) bar_chars = '>' * mark_width + '-' * (self.bar_width - mark_width) sys.stdout.write('\033[2F') # cursor up 2 lines sys.stdout.write('\033[J') # clean the output (remove extra chars since last display) sys.stdout.write('[{}] {}/{}, {:.1f} task/s, elapsed: {}s, ETA: {:5}s\n{}\n'.format( bar_chars, self.completed, self.task_num, fps, int(elapsed + 0.5), eta, msg)) else: sys.stdout.write('completed: {}, elapsed: {}s, {:.1f} tasks/s'.format( self.completed, int(elapsed + 0.5), fps)) sys.stdout.flush() def get_patch(*args, patch_size=17, scale=1): """ Get patch from an image """ ih, iw, _ = args[0].shape ip = patch_size tp = scale * ip ix = random.randrange(0, iw - ip + 1) iy = random.randrange(0, ih - ip + 1) tx, ty = scale * ix, scale * iy ret = [ args[0][iy:iy + ip, ix:ix + ip, :], *[a[ty:ty + tp, tx:tx + tp, :] for a in args[1:]] ] return ret def np2Tensor(*args, rgb_range=255, n_colors=1): def _np2Tensor(img): img = img.astype('float64') np_transpose = np.ascontiguousarray(img.transpose((2, 0, 1))) # NHWC -> NCHW tensor = torch.from_numpy(np_transpose).float() # numpy -> tensor tensor.mul_(rgb_range / 255) # (0,255) -> (0,1) return tensor return [_np2Tensor(a) for a in args] def data_augment(*args, hflip=True, rot=True): hflip = hflip and random.random() < 0.5 vflip = rot and random.random() < 0.5 rot90 = rot and random.random() < 0.5 def _augment(img): if hflip: img = img[:, ::-1, :] if vflip: img = img[::-1, :, :] if rot90: img = np.rot90(img) return img def postprocess(*images, rgb_range, ycbcr_flag, device): def _postprocess(img, rgb_coefficient, ycbcr_flag, device): if ycbcr_flag: out = img.mul(rgb_coefficient).clamp(16, 235) else: out = img.mul(rgb_coefficient).clamp(0, 255).round() return out rgb_coefficient = 255 / rgb_range return [_postprocess(img, rgb_coefficient, ycbcr_flag, device) for img in images] def calc_psnr(img1, img2, rgb_range=1., shave=4): if isinstance(img1, torch.Tensor): img1 = img1[:, :, shave:-shave, shave:-shave] img1 = img1.to('cpu').numpy() if isinstance(img2, torch.Tensor): img2 = img2[:, :, shave:-shave, shave:-shave] img2 = img2.to('cpu').numpy() mse = np.mean((img1 / rgb_range - img2 / rgb_range) ** 2) if mse == 0: return 100 PIXEL_MAX = 1 return 20 * math.log10(PIXEL_MAX / math.sqrt(mse)) def calc_grad_sobel(img, device='cuda'): if not isinstance(img, torch.Tensor): raise Exception("Now just support torch.Tensor. See the Type(img)={}".format(type(img))) if not img.ndimension() == 4: raise Exception("Tensor ndimension must equal to 4. See
# Lint as: python3 # Copyright 2021 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Training utilities for Global Features model.""" import os import pickle import time import numpy as np import tensorflow as tf from delf.python import whiten from delf.python.datasets.revisited_op import dataset as test_dataset from delf.python.datasets.sfm120k import sfm120k from delf.python.training import global_features_utils from delf.python.training.model import global_model def _compute_loss_and_gradient(criterion, model, input, target, neg_num=5): """Records gradients and loss through the network. Args: criterion: Loss function. model: Network for the gradient computation. input: Tuple of query, positive and negative images. target: List of indexes to specify queries (-1), positives(1), negatives(0). neg_num: Integer, number of negatives per a tuple. Returns: loss: Loss for the training step. gradients: Computed gradients for the network trainable variables. """ # Record gradients and loss through the network. with tf.GradientTape() as tape: descriptors = tf.zeros(shape=(0, model.meta['outputdim']), dtype=tf.float32) for img in input: # Compute descriptor vector for each image. o = model(tf.expand_dims(img, axis=0), training=True) descriptors = tf.concat([descriptors, o], 0) queries = descriptors[target == -1] positives = descriptors[target == 1] negatives = descriptors[target == 0] negatives = tf.reshape(negatives, [tf.shape(queries)[0], neg_num, model.meta['outputdim']]) # Loss calculation. loss = criterion(queries, positives, negatives) return loss, tape.gradient(loss, model.trainable_variables) def train_val_one_epoch( loader, model, criterion, optimizer, epoch, train=True, batch_size=5, query_size=2000, neg_num=5, update_every=1, debug=False): """Executes either training or validation step based on `train` value. Args: loader: Training/validation iterable dataset. model: Network to train/validate. criterion: Loss function. optimizer: Network optimizer. epoch: Integer, epoch number. train: Bool, specifies training or validation phase. batch_size: Integer, number of (q,p,n1,...,nN) tuples in a mini-batch. query_size: Integer, number of queries randomly drawn per one training epoch. neg_num: Integer, number of negatives per a tuple. update_every: Integer, update model weights every N batches, used to handle relatively large batches batch_size effectively becomes update_every x batch_size. debug: Bool, whether debug mode is used. Returns: average_epoch_loss: Average epoch loss. """ batch_time = global_features_utils.AverageMeter() data_time = global_features_utils.AverageMeter() losses = global_features_utils.AverageMeter() # Retrieve all trainable variables we defined in the graph. tvs = model.trainable_variables accum_grads = [tf.zeros_like(tv.read_value()) for tv in tvs] end = time.time() batch_num = 0 print_frequency = 10 all_batch_num = query_size // batch_size state = 'Train' if train else 'Val' global_features_utils.debug_and_log('>> {} step:'.format(state)) # For every batch in the dataset; Stops when all batches in the dataset have # been processed. while True: data_time.update(time.time() - end) if train: try: # Train on one batch. # Each image in the batch is loaded into memory consecutively. for _ in range(batch_size): # Because the images are not necessarily of the same size, we can't # set the batch size with .batch(). batch = loader.get_next() input_tuple = batch[0:-1] target_tuple = batch[-1] loss_value, grads = _compute_loss_and_gradient( criterion, model, input_tuple, target_tuple, neg_num) losses.update(loss_value) # Accumulate gradients. accum_grads += grads # Perform weight update if required. if (batch_num + 1) % update_every == 0 or ( batch_num + 1) == all_batch_num: # Do one step for multiple batches. Accumulated gradients are # used. optimizer.apply_gradients( zip(accum_grads, model.trainable_variables)) accum_grads = [tf.zeros_like(tv.read_value()) for tv in tvs] # We break when we run out of range, i.e., we exhausted all dataset # images. except tf.errors.OutOfRangeError: break else: # Validate one batch. # We load full batch into memory. input = [] target = [] try: for _ in range(batch_size): # Because the images are not necessarily of the same size, we can't # set the batch size with .batch(). batch = loader.get_next() input.append(batch[0:-1]) target.append(batch[-1]) # We break when we run out of range, i.e., we exhausted all dataset # images. except tf.errors.OutOfRangeError: break descriptors = tf.zeros(shape=(0, model.meta['outputdim']), dtype=tf.float32) for input_tuple in input: for img in input_tuple: # Compute the global descriptor vector. model_out = model(tf.expand_dims(img, axis=0), training=False) descriptors = tf.concat([descriptors, model_out], 0) # No need to reduce memory consumption (no backward pass): # Compute loss for the full batch. queries = descriptors[target == -1] positives = descriptors[target == 1] negatives = descriptors[target == 0] negatives = tf.reshape(negatives, [tf.shape(queries)[0], neg_num, model.meta['outputdim']]) loss = criterion(queries, positives, negatives) # Record loss. losses.update(loss / batch_size, batch_size) # Measure elapsed time. batch_time.update(time.time() - end) end = time.time() # Record immediate loss and elapsed time. if debug and ((batch_num + 1) % print_frequency == 0 or batch_num == 0 or (batch_num + 1) == all_batch_num): global_features_utils.debug_and_log( '>> {0}: [{1} epoch][{2}/{3} batch]\t Time val: {' 'batch_time.val:.3f} ' '(Batch Time avg: {batch_time.avg:.3f})\t Data {' 'data_time.val:.3f} (' 'Time avg: {data_time.avg:.3f})\t Immediate loss value: {' 'loss.val:.4f} ' '(Loss avg: {loss.avg:.4f})'.format( state, epoch, batch_num + 1, all_batch_num, batch_time=batch_time, data_time=data_time, loss=losses), debug=True, log=False) batch_num += 1 return losses.avg def test_retrieval(datasets, net, epoch, writer=None, model_directory=None, precompute_whitening=None, data_root='data', multiscale=[1.], test_image_size=1024): """Testing step. Evaluates the network on the provided test datasets by computing single-scale mAP for easy/medium/hard cases. If `writer` is specified, saves the mAP values in a tensorboard supported format. Args: datasets: List of dataset names for model testing (from `_TEST_DATASET_NAMES`). net: Network to evaluate. epoch: Integer, epoch number. writer: Tensorboard writer. model_directory: String, path to the model directory. precompute_whitening: Dataset used to learn whitening. If no precomputation required, then `None`. Only 'retrieval-SfM-30k' and 'retrieval-SfM-120k' datasets are supported for whitening pre-computation. data_root: Absolute path to the data folder. multiscale: List of scales for multiscale testing. test_image_size: Integer, maximum size of the test images. """ global_features_utils.debug_and_log(">> Testing step:") global_features_utils.debug_and_log( '>> Evaluating network on test datasets...') # Precompute whitening. if precompute_whitening is not None: # If whitening already precomputed, load it and skip the computations. filename = os.path.join( model_directory, 'learned_whitening_mP_{}_epoch.pkl'.format(epoch)) filename_layer = os.path.join( model_directory, 'learned_whitening_layer_config_{}_epoch.pkl'.format( epoch)) if tf.io.gfile.exists(filename): global_features_utils.debug_and_log( '>> {}: Whitening for this epoch is already precomputed. ' 'Loading...'.format(precompute_whitening)) with tf.io.gfile.GFile(filename, 'rb') as learned_whitening_file: learned_whitening = pickle.load(learned_whitening_file) else: start = time.time() global_features_utils.debug_and_log( '>> {}: Learning whitening...'.format(precompute_whitening)) # Loading db. db_root = os.path.join(data_root, 'train', precompute_whitening) ims_root = os.path.join(db_root, 'ims') db_filename = os.path.join(db_root, '{}-whiten.pkl'.format(precompute_whitening)) with tf.io.gfile.GFile(db_filename, 'rb') as f: db = pickle.load(f) images = [sfm120k.id2filename(db['cids'][i], ims_root) for i in range(len(db['cids']))] # Extract whitening vectors. global_features_utils.debug_and_log( '>> {}: Extracting...'.format(precompute_whitening)) wvecs = global_model.extract_global_descriptors_from_list(net, images, test_image_size) # Learning whitening. global_features_utils.debug_and_log( '>> {}: Learning...'.format(precompute_whitening)) wvecs = wvecs.numpy() mean_vector, projection_matrix = whiten.whitenlearn(wvecs, db['qidxs'], db['pidxs']) learned_whitening = {'m': mean_vector, 'P': projection_matrix} global_features_utils.debug_and_log( '>> {}: Elapsed time: {}'.format(precompute_whitening, global_features_utils.htime( time.time() - start))) # Save learned_whitening parameters for a later use. with tf.io.gfile.GFile(filename, 'wb') as learned_whitening_file: pickle.dump(learned_whitening, learned_whitening_file) # Saving whitening as a layer. bias = -np.dot(mean_vector.T, projection_matrix.T) whitening_layer = tf.keras.layers.Dense( net.meta['outputdim'], activation=None, use_bias=True, kernel_initializer=tf.keras.initializers.Constant( projection_matrix.T), bias_initializer=tf.keras.initializers.Constant(bias) ) with tf.io.gfile.GFile(filename_layer, 'wb') as learned_whitening_file: pickle.dump(whitening_layer.get_config(), learned_whitening_file) else: learned_whitening = None # Evaluate on test datasets. for dataset in datasets: start = time.time() # Prepare config structure for the test dataset. cfg = test_dataset.CreateConfigForTestDataset(dataset, os.path.join(data_root)) images = [cfg['im_fname'](cfg, i) for i in range(cfg['n'])] qimages = [cfg['qim_fname'](cfg, i) for i in range(cfg['nq'])] bounding_boxes = [tuple(cfg['gnd'][i]['bbx']) for i in range(cfg['nq'])] # Extract database and query vectors. global_features_utils.debug_and_log( '>> {}: Extracting database images...'.format(dataset)) vecs = global_model.extract_global_descriptors_from_list( net, images, test_image_size, scales=multiscale) global_features_utils.debug_and_log( '>> {}: Extracting query images...'.format(dataset)) qvecs = global_model.extract_global_descriptors_from_list( net, qimages, test_image_size, bounding_boxes, scales=multiscale) global_features_utils.debug_and_log('>> {}: Evaluating...'.format(dataset)) # Convert the obtained descriptors to numpy. vecs = vecs.numpy() qvecs = qvecs.numpy() # Search, rank and print test set metrics. _calculate_metrics_and_export_to_tensorboard(vecs, qvecs, dataset, cfg, writer, epoch, whiten=False) if learned_whitening is not None: # Whiten the vectors. mean_vector = learned_whitening['m'] projection_matrix = learned_whitening['P'] vecs_lw = whiten.whitenapply(vecs, mean_vector, projection_matrix) qvecs_lw = whiten.whitenapply(qvecs, mean_vector, projection_matrix) # Search, rank, and print. _calculate_metrics_and_export_to_tensorboard( vecs_lw, qvecs_lw, dataset, cfg, writer, epoch, whiten=True) global_features_utils.debug_and_log( '>> {}: Elapsed time: {}'.format( dataset, global_features_utils.htime(time.time() - start))) def _calculate_metrics_and_export_to_tensorboard(vecs, qvecs, dataset, cfg, writer, epoch, whiten=False): """ Calculates metrics and exports
= [0] for x in t: c.append(c[-1] + x) a = b = 0 best = 0 while b <= N: if a == b or c[b] - c[a] <= total / 3: b += 1 if b > N: break else: a += 1 best = max(best, total - max((c[a], c[b] - c[a], total - c[b])))( 'Case #%d: %.10f' % (testCase, float(best) / float(total))) return a def func_c851643bc0a541aa8275ef2fddd6c1c0(s, q, r, testCase, p, N): t = [((i * p + q) % r + s) for i in range(N)] total = sum(t) c = [0] for x in t: c.append(c[-1] + x) a = b = 0 best = 0 while b <= N: if a == b or c[b] - c[a] <= total / 3: b += 1 if b > N: break else: a += 1 best = max(best, total - max((c[a], c[b] - c[a], total - c[b])))( 'Case #%d: %.10f' % (testCase, float(best) / float(total))) return b def func_99ed9c98b565492ea90e4a6364d35576(s, q, r, testCase, p, N): t = [((i * p + q) % r + s) for i in range(N)] total = sum(t) c = [0] for x in t: c.append(c[-1] + x) a = b = 0 best = 0 while b <= N: if a == b or c[b] - c[a] <= total / 3: b += 1 if b > N: break else: a += 1 best = max(best, total - max((c[a], c[b] - c[a], total - c[b])))( 'Case #%d: %.10f' % (testCase, float(best) / float(total))) return i def func_4efbc6814e8244a79cab2cf8d68c8973(infile, testCase): N, p, q, r, s = get(infile) t = [((i * p + q) % r + s) for i in range(N)] total = sum(t) c = [0] for x in t: c.append(c[-1] + x) a = b = 0 best = 0 while b <= N: if a == b or c[b] - c[a] <= total / 3: b += 1 if b > N: break else: a += 1 best = max(best, total - max((c[a], c[b] - c[a], total - c[b])))( 'Case #%d: %.10f' % (testCase, float(best) / float(total))) return s def func_3b6ef64e62094b2197d6bd4fdc837a8e(infile, testCase): N, p, q, r, s = get(infile) t = [((i * p + q) % r + s) for i in range(N)] total = sum(t) c = [0] for x in t: c.append(c[-1] + x) a = b = 0 best = 0 while b <= N: if a == b or c[b] - c[a] <= total / 3: b += 1 if b > N: break else: a += 1 best = max(best, total - max((c[a], c[b] - c[a], total - c[b])))( 'Case #%d: %.10f' % (testCase, float(best) / float(total))) return best def func_c45fa7fd64d8486aa278d5d72a330e06(infile, testCase): N, p, q, r, s = get(infile) t = [((i * p + q) % r + s) for i in range(N)] total = sum(t) c = [0] for x in t: c.append(c[-1] + x) a = b = 0 best = 0 while b <= N: if a == b or c[b] - c[a] <= total / 3: b += 1 if b > N: break else: a += 1 best = max(best, total - max((c[a], c[b] - c[a], total - c[b])))( 'Case #%d: %.10f' % (testCase, float(best) / float(total))) return t def func_4a13e52b15be49a1b47ae55ea86e275f(infile, testCase): N, p, q, r, s = get(infile) t = [((i * p + q) % r + s) for i in range(N)] total = sum(t) c = [0] for x in t: c.append(c[-1] + x) a = b = 0 best = 0 while b <= N: if a == b or c[b] - c[a] <= total / 3: b += 1 if b > N: break else: a += 1 best = max(best, total - max((c[a], c[b] - c[a], total - c[b])))( 'Case #%d: %.10f' % (testCase, float(best) / float(total))) return q def func_d0ac9f3ead7b4a3598b16cbc82f8e0b6(infile, testCase): N, p, q, r, s = get(infile) t = [((i * p + q) % r + s) for i in range(N)] total = sum(t) c = [0] for x in t: c.append(c[-1] + x) a = b = 0 best = 0 while b <= N: if a == b or c[b] - c[a] <= total / 3: b += 1 if b > N: break else: a += 1 best = max(best, total - max((c[a], c[b] - c[a], total - c[b])))( 'Case #%d: %.10f' % (testCase, float(best) / float(total))) return a def func_49fb1def787c4019bdd74a7e6cfda98a(infile, testCase): N, p, q, r, s = get(infile) t = [((i * p + q) % r + s) for i in range(N)] total = sum(t) c = [0] for x in t: c.append(c[-1] + x) a = b = 0 best = 0 while b <= N: if a == b or c[b] - c[a] <= total / 3: b += 1 if b > N: break else: a += 1 best = max(best, total - max((c[a], c[b] - c[a], total - c[b])))( 'Case #%d: %.10f' % (testCase, float(best) / float(total))) return p def func_71bb3d45584d4c2cab1a8b21268ae2c5(infile, testCase): N, p, q, r, s = get(infile) t = [((i * p + q) % r + s) for i in range(N)] total = sum(t) c = [0] for x in t: c.append(c[-1] + x) a = b = 0 best = 0 while b <= N: if a == b or c[b] - c[a] <= total / 3: b += 1 if b > N: break else: a += 1 best = max(best, total - max((c[a], c[b] - c[a], total - c[b])))( 'Case #%d: %.10f' % (testCase, float(best) / float(total))) return c def func_89217ecf92574925aa21e5e7d55388c7(infile, testCase): N, p, q, r, s = get(infile) t = [((i * p + q) % r + s) for i in range(N)] total = sum(t) c = [0] for x in t: c.append(c[-1] + x) a = b = 0 best = 0 while b <= N: if a == b or c[b] - c[a] <= total / 3: b += 1 if b > N: break else: a += 1 best = max(best, total - max((c[a], c[b] - c[a], total - c[b])))( 'Case #%d: %.10f' % (testCase, float(best) / float(total))) return total def func_a67fde96b9944fb1bdedb57b37f370ed(infile, testCase): N, p, q, r, s = get(infile) t = [((i * p + q) % r + s) for i in range(N)] total = sum(t) c = [0] for x in t: c.append(c[-1] + x) a = b = 0 best = 0 while b <= N: if a == b or c[b] - c[a] <= total / 3: b += 1 if b > N: break else: a += 1 best = max(best, total - max((c[a], c[b] - c[a], total - c[b])))( 'Case #%d: %.10f' % (testCase, float(best) / float(total))) return N def func_88ad799150474423b1dbc8eacd7c196e(infile, testCase): N, p, q, r, s = get(infile) t = [((i * p + q) % r + s) for i in range(N)] total = sum(t) c = [0] for x in t: c.append(c[-1] + x) a = b = 0 best = 0 while b <= N: if a == b or c[b] - c[a] <= total / 3: b += 1 if b > N: break else: a += 1 best = max(best, total - max((c[a], c[b] - c[a], total - c[b])))( 'Case #%d: %.10f' % (testCase, float(best) / float(total))) return r def func_359feb9d47704306ac8776237331c9df(infile, testCase): N, p, q, r, s = get(infile) t = [((i * p + q) % r + s) for i in range(N)] total = sum(t) c = [0] for x in t: c.append(c[-1] + x) a = b = 0 best = 0 while b <= N: if a == b or c[b] - c[a] <= total / 3: b += 1 if b > N: break else: a += 1 best = max(best, total - max((c[a],
Leakage': 0.00611897, 'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00348781, 'Renaming Unit/Peak Dynamic': 4.56169, 'Renaming Unit/Runtime Dynamic': 1.05357, 'Renaming Unit/Subthreshold Leakage': 0.070483, 'Renaming Unit/Subthreshold Leakage with power gating': 0.0362779, 'Runtime Dynamic': 11.555, 'Subthreshold Leakage': 6.21877, 'Subthreshold Leakage with power gating': 2.58311}, {'Area': 32.0201, 'Execution Unit/Area': 7.68434, 'Execution Unit/Complex ALUs/Area': 0.235435, 'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646, 'Execution Unit/Complex ALUs/Peak Dynamic': 0.187522, 'Execution Unit/Complex ALUs/Runtime Dynamic': 0.349977, 'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111, 'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163, 'Execution Unit/Floating Point Units/Area': 4.6585, 'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156, 'Execution Unit/Floating Point Units/Peak Dynamic': 1.00371, 'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033, 'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829, 'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061, 'Execution Unit/Gate Leakage': 0.120359, 'Execution Unit/Instruction Scheduler/Area': 1.66526, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.332035, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453, 'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519, 'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913, 'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223, 'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562, 'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763, 'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.535561, 'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755, 'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964, 'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262, 'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388, 'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608, 'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451, 'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.270333, 'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853, 'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446, 'Execution Unit/Instruction Scheduler/Runtime Dynamic': 1.13793, 'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892, 'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346, 'Execution Unit/Integer ALUs/Area': 0.47087, 'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291, 'Execution Unit/Integer ALUs/Peak Dynamic': 0.225869, 'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344, 'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222, 'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833, 'Execution Unit/Peak Dynamic': 6.03865, 'Execution Unit/Register Files/Area': 0.570804, 'Execution Unit/Register Files/Floating Point RF/Area': 0.208131, 'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788, 'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.189622, 'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.013927, 'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698, 'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968, 'Execution Unit/Register Files/Gate Leakage': 0.000622708, 'Execution Unit/Register Files/Integer RF/Area': 0.362673, 'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992, 'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.17128, 'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.102999, 'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175, 'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675, 'Execution Unit/Register Files/Peak Dynamic': 0.360901, 'Execution Unit/Register Files/Runtime Dynamic': 0.116926, 'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387, 'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643, 'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912, 'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402, 'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.407729, 'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.854021, 'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478, 'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543, 'Execution Unit/Runtime Dynamic': 2.86423, 'Execution Unit/Subthreshold Leakage': 1.79543, 'Execution Unit/Subthreshold Leakage with power gating': 0.688821, 'Gate Leakage': 0.368936, 'Instruction Fetch Unit/Area': 5.85939, 'Instruction Fetch Unit/Branch Predictor/Area': 0.138516, 'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221, 'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362, 'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831, 'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00103665, 'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719, 'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236, 'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00103665, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719, 'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000912801, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344, 'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000358764, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347, 'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045, 'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838, 'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732, 'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05, 'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602, 'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00147959, 'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505, 'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733, 'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00446569, 'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703, 'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282, 'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954, 'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758, 'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867, 'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00958634, 'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682, 'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357, 'Instruction Fetch Unit/Gate Leakage': 0.0589979, 'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323, 'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05, 'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827, 'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0990156, 'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885, 'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682, 'Instruction Fetch Unit/Instruction Cache/Area': 3.14635, 'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931, 'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 6.29824, 'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.242189, 'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022, 'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386, 'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799, 'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493, 'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404, 'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.336302, 'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943, 'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104, 'Instruction Fetch Unit/Peak Dynamic': 8.82242, 'Instruction Fetch Unit/Runtime Dynamic': 0.691558, 'Instruction Fetch Unit/Subthreshold Leakage': 0.932286, 'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843, 'L2/Area': 4.53318, 'L2/Gate Leakage': 0.015464, 'L2/Peak Dynamic': 0.0538906, 'L2/Runtime Dynamic': 0.00520913, 'L2/Subthreshold Leakage': 0.834142, 'L2/Subthreshold Leakage with power gating': 0.401066, 'Load Store Unit/Area': 8.80901, 'Load Store Unit/Data Cache/Area': 6.84535, 'Load Store Unit/Data Cache/Gate Leakage': 0.0279261, 'Load Store Unit/Data Cache/Peak Dynamic': 3.36699, 'Load Store Unit/Data Cache/Runtime Dynamic': 1.02829, 'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675, 'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085, 'Load Store Unit/Gate Leakage': 0.0350888, 'Load Store Unit/LoadQ/Area': 0.0836782, 'Load Store Unit/LoadQ/Gate Leakage': 0.00059896, 'Load Store Unit/LoadQ/Peak Dynamic': 0.0689063, 'Load Store Unit/LoadQ/Runtime Dynamic': 0.0689062, 'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961, 'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918, 'Load Store Unit/Peak Dynamic': 3.69238, 'Load Store Unit/Runtime Dynamic': 1.43701, 'Load Store Unit/StoreQ/Area': 0.322079, 'Load Store Unit/StoreQ/Gate Leakage': 0.00329971, 'Load Store Unit/StoreQ/Peak Dynamic': 0.169911, 'Load Store Unit/StoreQ/Runtime Dynamic': 0.339822, 'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621, 'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004, 'Load Store Unit/Subthreshold Leakage': 0.591321, 'Load Store Unit/Subthreshold Leakage with power gating': 0.283293, 'Memory Management Unit/Area': 0.4339, 'Memory Management Unit/Dtlb/Area': 0.0879726, 'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729, 'Memory Management Unit/Dtlb/Peak Dynamic': 0.060302, 'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0611009, 'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699, 'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485, 'Memory Management Unit/Gate Leakage': 0.00808595, 'Memory Management Unit/Itlb/Area': 0.301552, 'Memory Management Unit/Itlb/Gate Leakage': 0.00393464, 'Memory Management Unit/Itlb/Peak Dynamic': 0.391602, 'Memory Management Unit/Itlb/Runtime Dynamic': 0.039734, 'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758, 'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842, 'Memory Management Unit/Peak Dynamic': 0.651299, 'Memory Management Unit/Runtime Dynamic': 0.100835, 'Memory Management Unit/Subthreshold Leakage': 0.0766103, 'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333, 'Peak Dynamic': 22.8481, 'Renaming Unit/Area': 0.303608, 'Renaming Unit/FP Front End RAT/Area': 0.131045, 'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123, 'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468, 'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.498808, 'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571, 'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885, 'Renaming Unit/Free List/Area': 0.0340654, 'Renaming Unit/Free List/Gate Leakage': 2.5481e-05, 'Renaming Unit/Free List/Peak Dynamic': 0.0306032, 'Renaming Unit/Free List/Runtime Dynamic': 0.0210509, 'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144, 'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064, 'Renaming Unit/Gate Leakage': 0.00708398, 'Renaming Unit/Int Front End RAT/Area': 0.0941223, 'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242, 'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965, 'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.158193, 'Renaming Unit/Int Front End RAT/Subthreshold Leakage':
urllib.request.urlopen('https://ipapi.co/postal'): # Reads the page source of the url, grabs the postal code the user resides in based upon their IP Address. ipPostalOutput = ipPostal.decode('utf-8') # Decodes the postal code to avoid unwanted prefixes. for ipLatLong in urllib.request.urlopen('https://ipinfo.io/loc'): # Reads the page source of the url, grabs the lattitude and longitude of the user based upon the geo location of the user's ip address. ipLatLongOutput = ipLatLong.decode('utf-8') # Decodes the lattitude and longitude to avoid unwanted prefixes. latOutput = float(ipLatLongOutput[0:7]) # Seperates the lattitude from the longitude in the string and converts it to a float value. longOutput = float(ipLatLongOutput[8:16]) # Seperates the longitude from the lattitude in the string and converts it to a float value. #--- Haversine Formula Pre-requisites. logging.debug('Haversine formula variable assignment.') R = 6371 # Radius of the Earth (KM). #assert R == 6371, 'Incorrect radius!' lat1 = radians(38.895370) # Lattitude for FBI Headquarters, converts value to a radian. #assert lat1 == radians(38.895370), 'Incorrect lattitude for headquarters!' lon1 = radians(-77.024971) # Longitude for FBI Headquarters, converts value to a radian. #assert lon1 == radians(-77.024971), 'Incorrect longitude for headquarters!' lat2 = radians(latOutput) # Converts the lattitude of the user to a radian. lon2 = radians(longOutput) # Converts the longitude of the user to a radian. dlon = lon2 - lon1 # Subtracts the 2nd longitude point from the first longitude point, assigns it to a variable. dlat = lat2 - lat1 # Subtracts the 2nd lattitude point from the first lattitude point, assigns it to a vairable. #--- Haversine Formula ''' Formula to calculate the distance (KM) between two lattitude and longitude points. This formula is used to calculate the distance between the lattitude and longitude coordinates of the FBI Headquarters and the users geo location determined by their IP Address. Source: http://mathforum.org/library/drmath/view/51879.html (Explanation and demonstration of the Formula) ''' logging.debug('Haversine formula variable execution.') a = (sin(dlat/2))**2 + cos(lat1) * cos(lat2) * (sin(dlon/2))**2 # Straight line distance. c = 2 * atan2(sqrt(a), sqrt(1-a)) # Great circle distance. distance = R * c # Total distance between the coordinates. distanceOutput = str(R * c) # Converts the total distance to a string. travelTime = str(distance/100) # Determines the time in hours for the evacuation team to arrive, assuming the 'backup' is travelling 100 KM/h print() # Prints blank space. print('IP Address: ' + ipAddressOutput) # Prints the user's decoded IP Address. print() # Prints a blank space. print('Coordinates: ' + str(latOutput) + ', ' + str(longOutput)) # Prints the users decoded lattitude and longitude coordinates based upon the geo location of their ip address. print('Location: ' + str(ipCountryOutput) + ', ' + str(ipRegionOutput) + ', ' + str(ipCityOutput) + ', ' + str(ipPostalOutput) + '.') # Prints the users decoded country, region, city and postal code. print('Distance from FBI Headquarters: ' + str(distanceOutput[0:5]) + ' KM') # Prints the distance the user is from the FBI Headquarters to the nearest hundreth. print() # Prints blank space. print('An evacuation team is on the way, ETA: ' + str(travelTime[0:5]) + ' Hours' ) # Prints the time estimated in hours till the evacuation team arrives. elif backupAsk.lower() not in acceptableBackupMenu and acceptableMenu: # If the user doesn't enter a word in either acceptableBackupMenu and acceptableMenu, the following code is executed. print() # Prints a blank space. print(Fore.RED + 'Error: ' + Style.RESET_ALL + 'Unacceptable input, restarting function.') # Prints an error message. print() # Prints a blank space. time.sleep(2)# Adds a 2 second delay before the next line of code is executed. print() # Prints a blank space. os.system('cls') # Clears the console. print(launchScreen) # Prints the launchScreen backupEvac() # Calls the function backupEvac. print() # Prints a blank space. logging.debug('End of evacuation portion.') #--- MENU RETURN logging.debug('Requesting menu return input.') backupEvacMenu = input('Would you like to go back to the main menu?: ').strip(' ') # Asks if the user would like to go back to the main menu. #assert isinstance(backupEvacMenu, str), 'Expecting string!' if backupEvacMenu.lower() in acceptableReturnMenu: # If the user enters "Yes" or "yes" (acceptableReturnMenu) the following code is executed. os.system('cls') # Clears the terminal/console. print(launchScreen) # Prints the launch screen. menu() # Calls the menu function. elif backupEvacMenu.lower() in remain: # If the user enters "No" or "no" (remain) the following code is executed. print() # Prints a blank space. print(Style.BRIGHT + Fore.GREEN + 'Notification: ' + Style.RESET_ALL + 'Restarting function, standby.') # Prints a notification message. time.sleep(2) # Adds a 2 second delay before the next line of code is executed. os.system('cls') # Clears the console. print(launchScreen) # Prints the launch screen. backupEvac() # Calls the function backupEvac else: # If the user's input isn't in acceptableReturnMenu or remain the following code is executed. print() # Prints a blank space. print(Fore.RED + 'Error: ' + Style.RESET_ALL + 'Unacceptable input, returning to menu.') # Prints an error message. print() # Prints a blank message. time.sleep(2) # Adds a 2 second delay before the next line of code is executed. os.system('cls') # Clears the onsole. print() # Prints a blank space print(launchScreen) # Prints the launch screen. menu() # Calls the function menu. return backupEvacMenu # Returns backupEvacMenu (Exits the function) logging.debug('End of function backupEvac.') #--- Track an IP Address logging.debug('Start of function ip locate.') def ipLocate(): ''' Determines the geolocation of an IP Address. This function will prompt the user to input an IP Address, the function will then output information about the geolocation of said IP Address (Approximate coordinates, country, city, region, postal code.) Parameters ---------- none Returns ------- string The input of ipLocateMenu Raises (this section is only applicable if your function raises an exception) ------ TypeError If the users input is less than 7 characters, minimum number of charaters for a dns server (i.e 8.8.8.8) TypeError If the users input contains characters from the alphabet, presumably indiciating the users provided an IPV6 address. ''' logging.debug('Requesting user for ip input.') ipLocateAsk = input("Please enter the IP Address you would like to locate: ").strip(' ') # Prompts the user to input an IP Address. #assert isinstance (ipLocateAsk, str), 'Expecting string!' if ipLocateAsk in acceptableMenu: print() # Prints blank message. os.system('cls') # Clears console. print(launchScreen) # Prints launchScreen. menu() # Calls menu function. elif ipLocateAsk.isspace() or ipLocateAsk == str(''): print(Style.BRIGHT + Fore.RED + 'Error: ' + Style.RESET_ALL + 'No IP Address entered, restarting function.') time.sleep(2) # Delays execution of next line of code by 2 seconds. os.system('cls') # Clears console. print(launchScreen) # Prints launchScreen. ipLocate() # Calls function ipLocate try: # The program will attempt to run the following code, if an error is given the code under "except:" will be executed. if len(ipLocateAsk) < 7: # If ipLocateAsk is less than 7 chars the following code is executed. raise TypeError('An invalid ip address was given.') # Raises TypeError. elif ipLocateAsk.isalpha(): # If ipLocateAsk contains chars from the alphabet, the following code is executed. raise TypeError("IPV6 Address' are not supported at this time.") # Raises TypeError logging.debug('Fetching ip location data from API.') for ipCountry in urllib.request.urlopen('https://ipapi.co/' + ipLocateAsk + '/country_name'): # Reads the page source of the url, grabs the country of the ip address given. ipCountryOutput = ipCountry.decode('utf-8') # Decodes the country name to avoid unwanted prefixes. for ipCity in urllib.request.urlopen('https://ipapi.co/' + ipLocateAsk + '/city'): # Reads the page source of the url, grabs the city of the ip address given. ipCityOutput = ipCity.decode('utf-8') # Decodes the city name to avoid unwanted prefixes. for ipRegion in urllib.request.urlopen('https://ipapi.co/' + ipLocateAsk + '/region'): # Reads the page source of the url, grabs the region of the ip address given. ipRegionOutput = ipRegion.decode('utf-8') # Decodes the region name to avoid unwanted prefixes. for ipPostal in urllib.request.urlopen('https://ipapi.co/' + ipLocateAsk + '/postal'): # Reads the page source of the url, grabs the postal code
selected row. This function should be triggered only in a row click event as the this (self) object is used. :link Datatable website: https://datatables.net/reference/api/row() """ if isPyData: jsData = json.dumps(jsData) if jsDataKey is not None: jsData = "%s.%s" % (jsData, jsDataKey) updateFnc = self.jsDraw('page') if update else "" return "var row = %(jsTableId)s.row(%(rowIdx)s); var rowNode = row.node(); row.remove();%(jsDraw)s" % {"rowIdx": jsData, 'jsTableId': self.jsTableId, "jsDraw": updateFnc} def jsUpdateCell(self, jsData='data', jsDataKey='cell', isPyData=False, update=True): """ :category: Javascript - Datatable Refresh :rubric: JS :example: >>> myTable.jsUpdateCell() ; :example: >>> myTable.jsUpdateCell( jsData= {'cell': ["A", "B"], 'col_id': 1, 'row_id': 1 }, isPyData=True) ; :dsc: Function to update the value of a cell. Can be the current one or another. This information should be defined in the jsDataKey object. :link Datatable website: https://datatables.net/reference/api/cell() """ if isPyData: jsData = json.dumps(jsData) updateFnc = self.jsDraw('page') if update else "" return "%(jsTableId)s.cell( %(jsData)s['row_id'], %(jsData)s['col_id'] ).data(%(jsData)s['%(cell)s']);%(jsDraw)s" % {'jsTableId': self.jsTableId, 'jsData': jsData, 'cell': jsDataKey, "jsDraw": updateFnc} def jsCellGoTo(self, url=None, jsData='data', jsDataKey='cell', jsCellCode='cell', isPyData=False): """ :category: Javascript function :rubric: JS :type: Cell event :example: >>> myObj.jsCellGoTo( 'http://www.google.fr' ) :dsc: The href property sets or returns the entire URL of the current page. :return: A string representing the Javascript fragment to be added to the page to go to another web page :link W3C Documentation: https://www.w3schools.com/jsref/prop_loc_href.asp """ if isPyData: jsData = json.dumps(jsData) if url is None: return "%s;location.href=buildBreadCrum();" % self.jsAddUrlParam(jsCellCode, "%s.%s" %(jsData, jsDataKey), isPyData=False) return 'window.location.href="%s?%s=" + %s;' % (url, jsCellCode, "%s.%s" %(jsData, jsDataKey)) def jsUpdateRow(self, jsData='data', jsDataKey='row', isPyData=False, update=True): """ :category: Javascript - Datatable Refresh :rubric: JS :example: >>> myTable.jsUpdateRow() ; :example: >>> myTable.jsUpdateCell( jsData= {'row': ["A", "B"], 'row_id': 1 }, isPyData=True) ; :dsc: Function to update a row in a table. This can work very well with a clickRow event as the object will already have the expected format. So by returning from a ajax call from this kind of data and calling this function the source row will be changed :link Datatable website: https://datatables.net/reference/api/row() """ if isPyData: jsData = json.dumps(jsData) updateFnc = self.jsDraw('page') if update else "" return "%(jsTableId)s.row( %(jsData)s['row_id']).data( %(jsData)s['%(jsDataKey)s']);%(jsDraw)s" % {'jsTableId': self.jsTableId, 'jsData': jsData, 'jsDataKey': jsDataKey, "jsDraw": updateFnc} def jsAddRow(self, jsData='data', uniqKey=None, jsDataKey=None, pyCssCls='CssTableNewRow', isPyData=False): """ :category: Javascript - Datatable Refresh :rubric: JS :type: Table Event :example: >>> myTable.jsAddRow() :example: >>> .click(myTable.jsAddRow([{'direction': 'test', 'dn': -11}], isPyData=True)) :example: >>> myTable.jsAddRow( [{}], isPyData=True ) :dsc: Function to add a row to a table. This will use the internal Javascript data object generated automatically from the event. Even a service call will return a data object as a dictionary. The jsDataKey is the key in the data dictionary where the relevant row information are. :link Datatable website: https://datatables.net/reference/api/row() """ if pyCssCls == 'CssTableNewRow': # Add the class to the Python factory and create the reference to it pyCssCls = self.addPyCss(pyCssCls) if isPyData: jsData = json.dumps(jsData) return ''' var uniqKeys = %(uniqKey)s ; var rows = %(rows)s; var keys = {} ; if (%(jsDataKey)s != null) { rows = rows[%(jsDataKey)s] ;}; if (uniqKeys != null) { rows.forEach( function(rec) { var newKey = [] ; uniqKeys.forEach( function(code) { newKey.push( rec[code] ) ; }) ; keys[ newKey.join('#') ] = true ; }) ; var rowToBeDeleted = -1; %(jsTableId)s.rows().every( function ( rowIdx, tableLoop, rowLoop ) { var data = this.data(); var newKey = [] ; uniqKeys.forEach( function(code) { newKey.push( data[code] ) ; }) ; if ( newKey in keys) { rowToBeDeleted = rowIdx; } } ); if (rowToBeDeleted != -1) { %(jsTableId)s.row( rowToBeDeleted ).remove().draw() } ; } %(jsTableId)s.rows.add( rows ).draw().nodes().to$().addClass( '%(pyCssCls)s' ); %(extraJsInitFnc)s; if (typeof data != 'undefined') { data.uniqKeys = uniqKeys; data.row = JSON.stringify(%(rows)s) ; }; ''' % {'jsTableId': self.jsTableId, 'uniqKey': json.dumps(uniqKey), 'rows': jsData, 'pyCssCls': pyCssCls, 'jsDataKey': json.dumps(jsDataKey), 'extraJsInitFnc': ";".join(self.extraJsInitFnc)} # # def jsLoadFromSrc(self, jsDataKey=None): # return ''' # $('#%(htmlId)s_loading_icon').show() ; $('#%(htmlId)s').hide(); $('#%(htmlId)s_loading').show(); # %(ajax)s ; # ''' % {"ajax": self.aresObj.jsPost(self.dataSrc['script'], jsData=self.dataSrc.get('htmlObjs'), htmlCodes=self.dataSrc.get('htmlCodes'), # jsFnc=["$('#%(htmlId)s').show(); $('#%(htmlId)s_loading').hide(); $('#%(htmlId)s_loading_icon').hide() ; " % {"htmlId": self.htmlId}, # self.jsLoad('data', jsDataKey=jsDataKey), self.jsLastUpdate()] ), # 'htmlId': self.htmlId} def jsSetRowSelected(self, colNames, jsValue='data.row', jsDataKey=None, isPyData=False, pyCssCls='CssTableSelected'): """ :category: Javascript - Datatable Selections :rubric: JS :type: Table Event :example: >>> click(tb.jsSetRowSelected(['direction'], {'direction': 'Increasing'}, isPyData=True)) :example: >>> button.click(t3.jsSetRowSelected(["C"], {"C": 1}, isPyData=True)) :dsc: Force the row selection based on a list of value per columns in the table. this event should be defined in a Javascript event but as usual parameters can be both Javascript and Python. :tip: You can get hold of the selected row at any point of time in the Javascript by using jsClickState() in a js Event :return: The javascript fragment to select the matching rows and unselect the rest """ pyCss = self.cssSelection(pyCssCls) if isPyData: jsValue = json.dumps(jsValue) if jsValue == 'data.row': jsValue = "JSON.parse(%s)" % jsValue if jsDataKey is not None: # Here we do not consider the default value of the jsValue as this is coming from jsDataKey jsValue = "data['%s']" % jsDataKey return ''' if (DATATABLES_STATE['#%(htmlId)s'] != undefined) { DATATABLES_STATE['#%(htmlId)s'].forEach( function(rec) {$(%(jsTableId)s.row(rec.row_id).node()).removeClass('%(pyCss)s')})} ; DATATABLES_STATE['#%(htmlId)s'] = [] ; %(jsTableId)s.rows().every( function (rowIdx, tableLoop, rowLoop) { var dataRow = this.data(); var isSelected = true; console.log(dataRow); %(colName)s.forEach( function(col) {if (dataRow[col] != %(jsValue)s[col] ) {isSelected = false}}); if (isSelected) { $( %(jsTableId)s.row(rowIdx).node() ).addClass( '%(pyCss)s' ); DATATABLES_STATE['#%(htmlId)s'].push( {row: JSON.stringify(%(jsTableId)s.rows( $(this) ).data()[0]), row_id: %(jsTableId)s.row( $(this) ).index() } ) }})''' % {'jsTableId': self.jsTableId, 'colName': json.dumps(colNames), 'jsValue': jsValue, 'htmlId': self.htmlId, 'pyCss': pyCss} def jsDestroy(self): """ :category: Javascript - Datatable Refresh :rubric: JS :type: Table Event :example: >>> myTable.jsDetroy() ; :dsc: Function to fully detroy the table definition. once this function is trigger there is no definition at all of this object and the datatable needs to be fully redefined (with the column headers, the styles...) :return: The javascript string fragment to destroy the table :link Datatable Documentation: https://datatables.net/reference/api/destroy() """ return "%s.destroy()" % self.jsTableId def jsGetData(self): """ :category: Javascript - Datatable Data Retrieval :rubric: JS :example: >>> myTable.jsGetData() ; :dsc: Function to get the datatable data in a table :return: The javascript string fragment to destroy the table """ return 'GetTableData(%s)' % self.jsTableId def jsGetSize(self): """ :category: Javascript function :rubric: JS :type: Table Event :example: >>> myTable.jsGetSize() ; :dsc: Function to get the number of rows in the javascript side :return: The Javascript string function to get the number of rows as an integer """ return '%s.rows().data().length' % self.jsTableId def jsGetRow(self, jsData='data', jsDataKey=None, isPyData=False): """ :category: Javascript function :rubric: JS :type: Table Event :example: >>> myTable.jsGetRow( 1, isPyData=True ) ; :dsc: Function to get the row in the datatable from the row ID :return: The Javascript string function to get the row as an javascript Array """ if isPyData: jsData = json.dumps(jsData) if jsDataKey is not None: jsData = "%s.%s" % (jsData, jsDataKey) return '%s.rows().data()[%s]' % (self.jsTableId, jsData) def jsGetCol(self, jsData='data', jsDataKey=None, removeDuplicate=True, isPyData=False): """ :category: Javascript function :rubric: JS :type: Table Event :example: >>> click(aresObj.jsConsole(tb.jsGetCol('direction', isPyData=True))) :dsc: Function to get the column in the datatable from the column name. This will return a list with the distinct values or the full column. By default distinct values are removed :return: The Javascript string function to get the column as an javascript Array """ if isPyData: jsData = json.dumps(jsData) if jsDataKey is not None: jsData = "%s.%s" % (jsData, jsDataKey) return ''' function(){ var columnName = %(jsData)s; var columnNames = []; %(jsTableId)s.settings().context[0].aoColumns.forEach(function(col){columnNames.push(col.data)}); return %(jsTableId)s.column(columnNames.indexOf(columnName)).data().toArray()%(uniqueVals)s}() ''' % {'jsData': jsData, 'jsTableId': self.jsTableId, 'uniqueVals': '.unique()' if removeDuplicate else ''} # ----------------------------------------------------------------------------------------- # ADD SYSTEM EVENT COLUMNS # ----------------------------------------------------------------------------------------- # def addEventCol(self, icon, jsEvent, eventName=None, tooltip=''): # if isinstance(jsEvent, list): # jsEvent = ";".join(jsEvent) # pyCssCls = self.addPyCss('CssTableColumnSystem') # colReference = icon.replace(" ", "").replace("-", '') if eventName is None else eventName # self.__table.header.append({'data': colReference, 'className': '%s %s' % (pyCssCls, colReference), 'title': '<div class=\'%s\'></div>' % icon, 'width': '5px', # 'dsc': tooltip, 'format': '"<div name=\'%s\' title=\'%s\' style=\'cursor:pointer\' class=\'%s\'></div>"' % (self.htmlId, tooltip, icon)}) # self.aresObj.jsOnLoadFnc.add(''' # $(document).on('click', '.%(colReference)s', function() { # var
pulumi.get(self, "cpu_architecture") @cpu_architecture.setter def cpu_architecture(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "cpu_architecture", value) @property @pulumi.getter(name="operatingSystemFamily") def operating_system_family(self) -> Optional[pulumi.Input[str]]: """ If the `requires_compatibilities` is `FARGATE` this field is required; must be set to a valid option from the [operating system family in the runtime platform](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definition_parameters.html#runtime-platform) setting """ return pulumi.get(self, "operating_system_family") @operating_system_family.setter def operating_system_family(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "operating_system_family", value) @pulumi.input_type class TaskDefinitionVolumeArgs: def __init__(__self__, *, name: pulumi.Input[str], docker_volume_configuration: Optional[pulumi.Input['TaskDefinitionVolumeDockerVolumeConfigurationArgs']] = None, efs_volume_configuration: Optional[pulumi.Input['TaskDefinitionVolumeEfsVolumeConfigurationArgs']] = None, fsx_windows_file_server_volume_configuration: Optional[pulumi.Input['TaskDefinitionVolumeFsxWindowsFileServerVolumeConfigurationArgs']] = None, host_path: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input[str] name: Name of the volume. This name is referenced in the `sourceVolume` parameter of container definition in the `mountPoints` section. :param pulumi.Input['TaskDefinitionVolumeDockerVolumeConfigurationArgs'] docker_volume_configuration: Configuration block to configure a docker volume. Detailed below. :param pulumi.Input['TaskDefinitionVolumeEfsVolumeConfigurationArgs'] efs_volume_configuration: Configuration block for an EFS volume. Detailed below. :param pulumi.Input['TaskDefinitionVolumeFsxWindowsFileServerVolumeConfigurationArgs'] fsx_windows_file_server_volume_configuration: Configuration block for an FSX Windows File Server volume. Detailed below. :param pulumi.Input[str] host_path: Path on the host container instance that is presented to the container. If not set, ECS will create a nonpersistent data volume that starts empty and is deleted after the task has finished. """ pulumi.set(__self__, "name", name) if docker_volume_configuration is not None: pulumi.set(__self__, "docker_volume_configuration", docker_volume_configuration) if efs_volume_configuration is not None: pulumi.set(__self__, "efs_volume_configuration", efs_volume_configuration) if fsx_windows_file_server_volume_configuration is not None: pulumi.set(__self__, "fsx_windows_file_server_volume_configuration", fsx_windows_file_server_volume_configuration) if host_path is not None: pulumi.set(__self__, "host_path", host_path) @property @pulumi.getter def name(self) -> pulumi.Input[str]: """ Name of the volume. This name is referenced in the `sourceVolume` parameter of container definition in the `mountPoints` section. """ return pulumi.get(self, "name") @name.setter def name(self, value: pulumi.Input[str]): pulumi.set(self, "name", value) @property @pulumi.getter(name="dockerVolumeConfiguration") def docker_volume_configuration(self) -> Optional[pulumi.Input['TaskDefinitionVolumeDockerVolumeConfigurationArgs']]: """ Configuration block to configure a docker volume. Detailed below. """ return pulumi.get(self, "docker_volume_configuration") @docker_volume_configuration.setter def docker_volume_configuration(self, value: Optional[pulumi.Input['TaskDefinitionVolumeDockerVolumeConfigurationArgs']]): pulumi.set(self, "docker_volume_configuration", value) @property @pulumi.getter(name="efsVolumeConfiguration") def efs_volume_configuration(self) -> Optional[pulumi.Input['TaskDefinitionVolumeEfsVolumeConfigurationArgs']]: """ Configuration block for an EFS volume. Detailed below. """ return pulumi.get(self, "efs_volume_configuration") @efs_volume_configuration.setter def efs_volume_configuration(self, value: Optional[pulumi.Input['TaskDefinitionVolumeEfsVolumeConfigurationArgs']]): pulumi.set(self, "efs_volume_configuration", value) @property @pulumi.getter(name="fsxWindowsFileServerVolumeConfiguration") def fsx_windows_file_server_volume_configuration(self) -> Optional[pulumi.Input['TaskDefinitionVolumeFsxWindowsFileServerVolumeConfigurationArgs']]: """ Configuration block for an FSX Windows File Server volume. Detailed below. """ return pulumi.get(self, "fsx_windows_file_server_volume_configuration") @fsx_windows_file_server_volume_configuration.setter def fsx_windows_file_server_volume_configuration(self, value: Optional[pulumi.Input['TaskDefinitionVolumeFsxWindowsFileServerVolumeConfigurationArgs']]): pulumi.set(self, "fsx_windows_file_server_volume_configuration", value) @property @pulumi.getter(name="hostPath") def host_path(self) -> Optional[pulumi.Input[str]]: """ Path on the host container instance that is presented to the container. If not set, ECS will create a nonpersistent data volume that starts empty and is deleted after the task has finished. """ return pulumi.get(self, "host_path") @host_path.setter def host_path(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "host_path", value) @pulumi.input_type class TaskDefinitionVolumeDockerVolumeConfigurationArgs: def __init__(__self__, *, autoprovision: Optional[pulumi.Input[bool]] = None, driver: Optional[pulumi.Input[str]] = None, driver_opts: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, scope: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input[bool] autoprovision: If this value is `true`, the Docker volume is created if it does not already exist. *Note*: This field is only used if the scope is `shared`. :param pulumi.Input[str] driver: Docker volume driver to use. The driver value must match the driver name provided by Docker because it is used for task placement. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] driver_opts: Map of Docker driver specific options. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: Map of custom metadata to add to your Docker volume. :param pulumi.Input[str] scope: Scope for the Docker volume, which determines its lifecycle, either `task` or `shared`. Docker volumes that are scoped to a `task` are automatically provisioned when the task starts and destroyed when the task stops. Docker volumes that are scoped as `shared` persist after the task stops. """ if autoprovision is not None: pulumi.set(__self__, "autoprovision", autoprovision) if driver is not None: pulumi.set(__self__, "driver", driver) if driver_opts is not None: pulumi.set(__self__, "driver_opts", driver_opts) if labels is not None: pulumi.set(__self__, "labels", labels) if scope is not None: pulumi.set(__self__, "scope", scope) @property @pulumi.getter def autoprovision(self) -> Optional[pulumi.Input[bool]]: """ If this value is `true`, the Docker volume is created if it does not already exist. *Note*: This field is only used if the scope is `shared`. """ return pulumi.get(self, "autoprovision") @autoprovision.setter def autoprovision(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "autoprovision", value) @property @pulumi.getter def driver(self) -> Optional[pulumi.Input[str]]: """ Docker volume driver to use. The driver value must match the driver name provided by Docker because it is used for task placement. """ return pulumi.get(self, "driver") @driver.setter def driver(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "driver", value) @property @pulumi.getter(name="driverOpts") def driver_opts(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: """ Map of Docker driver specific options. """ return pulumi.get(self, "driver_opts") @driver_opts.setter def driver_opts(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): pulumi.set(self, "driver_opts", value) @property @pulumi.getter def labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: """ Map of custom metadata to add to your Docker volume. """ return pulumi.get(self, "labels") @labels.setter def labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): pulumi.set(self, "labels", value) @property @pulumi.getter def scope(self) -> Optional[pulumi.Input[str]]: """ Scope for the Docker volume, which determines its lifecycle, either `task` or `shared`. Docker volumes that are scoped to a `task` are automatically provisioned when the task starts and destroyed when the task stops. Docker volumes that are scoped as `shared` persist after the task stops. """ return pulumi.get(self, "scope") @scope.setter def scope(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "scope", value) @pulumi.input_type class TaskDefinitionVolumeEfsVolumeConfigurationArgs: def __init__(__self__, *, file_system_id: pulumi.Input[str], authorization_config: Optional[pulumi.Input['TaskDefinitionVolumeEfsVolumeConfigurationAuthorizationConfigArgs']] = None, root_directory: Optional[pulumi.Input[str]] = None, transit_encryption: Optional[pulumi.Input[str]] = None, transit_encryption_port: Optional[pulumi.Input[int]] = None): """ :param pulumi.Input[str] file_system_id: The Amazon FSx for Windows File Server file system ID to use. :param pulumi.Input['TaskDefinitionVolumeEfsVolumeConfigurationAuthorizationConfigArgs'] authorization_config: Configuration block for authorization for the Amazon FSx for Windows File Server file system detailed below. :param pulumi.Input[str] root_directory: The directory within the Amazon FSx for Windows File Server file system to mount as the root directory inside the host. :param pulumi.Input[str] transit_encryption: Whether or not to enable encryption for Amazon EFS data in transit between the Amazon ECS host and the Amazon EFS server. Transit encryption must be enabled if Amazon EFS IAM authorization is used. Valid values: `ENABLED`, `DISABLED`. If this parameter is omitted, the default value of `DISABLED` is used. :param pulumi.Input[int] transit_encryption_port: Port to use for transit encryption. If you do not specify a transit encryption port, it will use the port selection strategy that the Amazon EFS mount helper uses. """ pulumi.set(__self__, "file_system_id", file_system_id) if authorization_config is not None: pulumi.set(__self__, "authorization_config", authorization_config) if root_directory is not None: pulumi.set(__self__, "root_directory", root_directory) if transit_encryption is not None: pulumi.set(__self__, "transit_encryption", transit_encryption) if transit_encryption_port is not None: pulumi.set(__self__, "transit_encryption_port", transit_encryption_port) @property @pulumi.getter(name="fileSystemId") def file_system_id(self) -> pulumi.Input[str]: """ The Amazon FSx for Windows File Server file system ID to use. """ return pulumi.get(self, "file_system_id") @file_system_id.setter def file_system_id(self, value: pulumi.Input[str]): pulumi.set(self, "file_system_id", value) @property @pulumi.getter(name="authorizationConfig") def authorization_config(self) -> Optional[pulumi.Input['TaskDefinitionVolumeEfsVolumeConfigurationAuthorizationConfigArgs']]: """ Configuration block for authorization for the Amazon FSx for Windows File Server file system detailed below. """ return pulumi.get(self, "authorization_config") @authorization_config.setter def authorization_config(self, value: Optional[pulumi.Input['TaskDefinitionVolumeEfsVolumeConfigurationAuthorizationConfigArgs']]): pulumi.set(self, "authorization_config", value) @property @pulumi.getter(name="rootDirectory") def root_directory(self) -> Optional[pulumi.Input[str]]: """ The directory within the Amazon FSx for Windows File Server file system to mount as the root directory inside the host. """ return pulumi.get(self, "root_directory") @root_directory.setter def root_directory(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "root_directory", value) @property @pulumi.getter(name="transitEncryption") def transit_encryption(self) -> Optional[pulumi.Input[str]]: """ Whether or not to enable encryption for Amazon EFS data in transit between the Amazon ECS host and the Amazon EFS server. Transit encryption must be enabled if Amazon EFS IAM authorization is used. Valid values: `ENABLED`, `DISABLED`. If this parameter is omitted, the default value of `DISABLED` is used. """ return pulumi.get(self, "transit_encryption") @transit_encryption.setter def transit_encryption(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "transit_encryption", value) @property @pulumi.getter(name="transitEncryptionPort") def transit_encryption_port(self) -> Optional[pulumi.Input[int]]: """ Port to use for transit encryption. If you do not specify a transit encryption port, it will use the port selection strategy that the Amazon EFS mount helper uses. """ return pulumi.get(self, "transit_encryption_port") @transit_encryption_port.setter def transit_encryption_port(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "transit_encryption_port", value) @pulumi.input_type class TaskDefinitionVolumeEfsVolumeConfigurationAuthorizationConfigArgs: def __init__(__self__, *, access_point_id: Optional[pulumi.Input[str]] = None, iam: Optional[pulumi.Input[str]] = None): """ :param pulumi.Input[str] access_point_id: Access point ID to use. If an access point is specified, the root directory value will be relative to the directory set for the access point. If specified, transit encryption must be enabled in the EFSVolumeConfiguration. :param pulumi.Input[str] iam: Whether or not to use the Amazon ECS
# MIT License # # Copyright (c) 2020-2022, <NAME> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import os import subprocess import numpy as np from mako.template import Template from mbgdml import utils class CalcTemplate: """Contains all quantum chemistry templates for mako. Parameters ---------- package : :obj:`str` Computational chemistry package to perform calculations. """ def __init__(self, package): if package.lower() == 'orca': self.input = ( "# ${job_name}\n" "${command_signal} ${theory} ${basis_set} ${calc_type} ${options}\n" "\n" "${control_signal}pal\n" " nprocs ${cores}\n" "end\n" "\n" "${control_blocks}\n" "\n" "*xyz ${charge} ${multiplicity}\n" "${coords}" "*\n" ) self.submit = ( "#!/bin/bash\n" "#SBATCH --job-name=${job_name}\n" "#SBATCH --output=${output_name}.out\n" "#SBATCH --nodes=${nodes}\n" "#SBATCH --ntasks-per-node=${cores}\n" "#SBATCH --time=${days}-${hours}:00:00\n" "#SBATCH --cluster=${cluster}\n" "\n" "${submit_script}\n" ) self.add_job="\n\n$new_job\n\n" class ORCA: """Prepares, writes, and submits ORCA 4 calculations. """ def __init__( self, job_name, input_name, output_name ): """ Parameters ---------- job_name : :obj:`str` Name of the job for SLURM input file. input_name : :obj:`str` File name for the input file. output_name : :obj:`str` File name for the output file. """ templates = CalcTemplate('orca') # ORCA properties self.command_signal = '!' self.control_signal = '%' self.input_extension = 'inp' self.progression_parameters = '' self.template_input = templates.input self.template_submit = templates.submit # Calculation properties self.job_name = job_name self.input_name = input_name self.output_name = output_name def input( self, calc_type, coords, theory, basis_set, charge, multiplicity, cores, options='', control_blocks='', write=True, write_dir='.', input_extension='inp' ): """Rendered input file as string. Parameters ---------- calc_type : :obj:`str` Type of calculation. Options are ``'SP'``, ``'ENGRAD'``, ``'OPT'``, ``'FREQ'``, ``'NUMFREQ'``. Note that analytical frequency calculation is called with ``'FREQ'``. coords : :obj:`str` XYZ atomic coordinates as a string. A water molecule for example, ``'O 0.00000 0.00000 0.11779\\nH 0.00000 0.75545 -0.47116\\nH 0.00000 -0.75545 -0.47116'``. theory : :obj:`str` The level of theory for the calculations. For example, ``'B3LYP'`` or ``'MP2'``. basis_set : :obj:`str` The basis set to be used in the calculations. For example, ``'def2-TZVP'``. charge : :obj:`int` System charge. multiplicity : :obj:`int` System multiplicity. cores : :obj:`int` Number of requested cores per node. options : :obj:`str` Other calculations options such as implicit solvents, convergence criteria, etc. For example, ``'CPCM(water) Grid4 TightSCF'``. control_blocks : :obj:`str`, optional All options that control the calculation. For example ``'%scf\\n ConvForced true\\nend\\n%maxcore 8000\\n'``. write : :obj:`bool`, optional Whether or not to write the file. Defaults to ``True``. write_dir : :obj:`str`, optional Directory to write the input file. Defaults to ``'.'`` input_extension: :obj:`str`, optional File extension for ORCA input. Defaults to ``'inp'``. """ self.calc_type = calc_type self.charge = charge self.multiplicity = multiplicity self.coords = coords self.theory = theory self.basis_set = basis_set if calc_type.lower() in ['sp', 'engrad', 'opt', 'freq', 'numfreq']: self.calc_type = calc_type else: raise ValueError(f'{calc_type} is unsupported.') self.options = options self.control_blocks = control_blocks self.cores = cores templateOrca = Template(self.template_input) self.input_check() rendered = templateOrca.render( job_name=self.job_name, command_signal=self.command_signal, control_signal=self.control_signal, theory=self.theory, basis_set=self.basis_set, calc_type=self.calc_type, options=self.options, cores=str(self.cores), charge=str(self.charge), multiplicity=str(self.multiplicity), control_blocks=self.control_blocks, coords=self.coords ) filename = str(self.input_name).replace(' ', '-') \ + '.' + self.input_extension if write: if write_dir[-1] != '/': write_dir += '/' with open(write_dir + filename, 'w') as inputFile: inputFile.write(rendered) return filename, rendered def submit( self, cluster, nodes, cores, days, hours, submit_script, write=True, write_dir='.' ): """Prepare submission script. Parameters ---------- cluster : :obj:`str` Name of cluster for calculations. For example, ``'smp'``. nodes : :obj:`int` Number of requested nodes. cores : :obj:`int` Number of requested cores per node. days : :obj:`int` Requested run time days. hours : :obj:`int` Requested run time hours. write : :obj:`bool`, optional Whether or not to write the file. Defaults to ``True``. write_dir : :obj:`str`, optional Directory to write the input file. Defaults to ``'.'`` Returns ------- :obj:`str` File name (with extension) of the submission script. :obj:`str` Rendered submission script. """ # Run options self.cluster = cluster self.nodes = nodes self.cores = cores self.days = days self.hours = hours self.submit_script = submit_script templateOrcaSubmit = Template(self.template_submit) rendered = templateOrcaSubmit.render( job_name = self.job_name, output_name = self.output_name, cluster = self.cluster, nodes = self.nodes, cores = self.cores, days = str(self.days), hours = str(self.hours), input_name = self.input_name, submit_script=self.submit_script ) file_name = 'submit-orca.420.slurm' if write: if write_dir[-1] != '/': write_dir += '/' with open(write_dir + file_name, 'w') as inputFile: inputFile.write(rendered) return file_name, rendered def input_check(self): """Performs checks on input specifications. """ # ORCA requires numerical frequencies if using frozencore and MP2. if ' frozencore' in self.options.lower() and self.calc_type == 'freq' \ and 'mp2' in self.theory.lower(): self.calc_type = 'NumFreq' def slurm_engrad_calculation( package, z, R, job_name, input_name, output_name, theory='MP2', basis_set='def2-TZVP', charge=0, multiplicity=1, cluster='smp', nodes=1, cores=12, days=0, hours=24, calc_dir='.', options='', control_blocks='', submit_script='', write=True, submit=False ): """Generates a quantum chemistry Slurm job for multiple energy+gradient calculations of different configurations of the same system. Parameters ---------- package : :obj:`str` Specifies the quantum chemistry program. ``'ORCA'`` is currently the only package directly supported. z : :obj:`numpy.ndarray` A ``(n,)`` or ``(m, n)`` shape array of type :obj:`numpy.int32` containing atomic numbers of atoms in the order as they appear for every ``m`` structure. R : :obj:`numpy.ndarray` A :obj:`numpy.ndarray` with shape of ``(n, 3)`` or ``(m, n, 3)`` where ``m`` is the number of structures and ``n`` is the number of atoms. job_name : :obj:`str` A unique name for the Slurm job. input_name : :obj:`str` Desired file name of the input file. output_name : :obj:`str` Desired name of the output file specified by Slurm. theory : :obj:`str`, optional Keword that specifies the level of theory used for energy+gradient calculations (specific to the ``package``). For example, ``'MP2'``, ``'BP86'``, ``'B3LYP'``, ``'CCSD'``, etc. Defaults to ``'MP2'``. basis_set : :obj:`str`, optional Keyword that specifies the desired basis set (specific to the ``package``). For example, ``'def2-SVP''`, ``'def2-TZVP'``, ``'cc-pVQZ'``, etc. Defaults to ``'def2-TZVP'``. charge : :obj:`int`, optional System charge. Defaults to ``0``. multiplicity : :obj:`int`, optional System multiplicity. Defaults to ``1``. cluster : :obj:`str`, optional Name of the Slurm computing cluster for calculations. Defaults to ``'smp'``. nodes : :obj:`int`, optional Number of requested nodes. Defaults to ``1``. cores : :obj:`int`, optional Number of processing cores for the calculation. Defaults to ``12``. days : :obj:`int`, optional Requested run time days. Defaults to ``0``. hours : :obj:`int`, optional Requested run time hours. Defaults to ``24``. calc_dir : :obj:`str`, optional Path to write calculation. Defaults to current directory (``'.'``). options : :obj:`str`, optional All option keywords for the energy+gradient calculation (e.g., SCF convergence criteria, algorithms, etc.) specific for the package. For example, ``'TightSCF FrozenCore'`` for ORCA 4.2.0. Defaults to `''`. control_blocks : :obj:`str`, optional Options that will be directly added to the input file (stuff that does not have a keyword). For example, ``'%maxcore 8000'``. Defaults to ``''``. submit_script : :obj:`str`, optional The Slurm submission script content excluding . Defaults to ``pitt_crc_orca_420_submit``. write : :obj:`bool`, optional Whether or not to write the calculation files. Defaults to ``True``. submit : :obj:`bool`, optional Controls whether the calculation is submitted using the ``sbatch`` command. Defaults to ``False``. Returns ------- :obj:`str` The SLURM submission script. :obj:`str` The input file. """ if calc_dir[-1] != '/': calc_dir += '/' os.makedirs(calc_dir, exist_ok=True) if z.ndim == 1: z = np.array([z]) if R.ndim == 2: R = np.array([R]) # Prepares calculation
chance that this method might result in a 404 Not Found for messages that were sent recently (such as when using the realtime websocket API (:py:class:`pyryver.ryver_ws.RyverWS`) to respond to messages), as those messages have not been fully added to Ryver's database yet. You can use :py:func:`pyryver.util.retry_until_available()` to wrap around this coroutine to get around this. The message with the given ID is also included as a part of the result. :param msg_id: The ID of the message to use as the reference point. :param before: How many messages to retrieve before the specified one (optional). :param after: How many messages to retrieve after the specified one (optional). :return: The messages requested, including the reference point message. """ url = self.get_api_url( f"Chat.History.Message(id='{msg_id}',before={before},after={after})", format="json") async with self._ryver._session.get(url) as resp: messages = (await resp.json())["d"]["results"] return [ChatMessage(self._ryver, data) for data in messages] async def get_task_board(self) -> typing.Optional["TaskBoard"]: """ Get the task board of this chat. If tasks are not set up for this chat, this will return None. This method works on users too. If used on a user, it will get their personal task board. :return: The task board of this chat. """ url = self.get_api_url(action="board") async with self._ryver._session.get(url, raise_for_status=False) as resp: # No task board if resp.status == 404: return None resp.raise_for_status() return TaskBoard(self._ryver, (await resp.json())["d"]["results"]) async def delete_task_board(self) -> bool: """ Delete (or "reset", according to the UI) the task board of this chat. This method will not yield an error even if there is no task board set up. In those cases, it will simply return false. :return: Whether the task board was deleted. """ url = self.get_api_url(action="TaskBoard.Delete()") async with self._ryver._session.post(url) as resp: return (await resp.json())["d"] async def create_task_board(self, board_type: str, prefix: typing.Optional[str] = None, categories: typing.Optional[typing.List[typing.Union[str, typing.Tuple[str, str]]]] = None, uncategorized_name: typing.Optional[str] = None) -> "TaskBoard": """ Create the task board for this chat if it has not yet been set up. The board type should be one of the :py:class:`TaskBoard` ``BOARD_TYPE_`` constants; it specified whether this task board should be a simple list or a board with categories. You can also specify a list of category names and optional category types to pre-populate the task board with categories. Each entry in the list should either be a string, which specifies the category name, or a tuple of the name and the type of the category (a ``CATEGORY_TYPE_`` constant). The default category type is :py:attr:`TaskCategory.CATEGORY_TYPE_OTHER`. An "uncategorized" category is always automatically added. Therefore, the type :py:attr:`TaskCategory.CATEGORY_TYPE_UNCATEGORIZED` cannot be used in the list. You can, however, change the name of the default "Uncategorized" category by specifying ``uncategorized_name``. Categories should not be specified if the type of the task board is :py:attr:`TaskBoard.BOARD_TYPE_LIST`. :param board_type: The type of the task board. :param prefix: The task prefix (optional). :param categories: A list of categories and optional types to pre-populate the task board with (see above) (optional). :param uncategorized_name: The name for the default "Uncategorized" category. """ data = { "board": { "type": board_type, "prefix": prefix } } if categories or uncategorized_name is not None: cats = [ { "categoryType": TaskCategory.CATEGORY_TYPE_UNCATEGORIZED, "name": uncategorized_name if uncategorized_name is not None else "Uncategorized", "position": 0, } ] if categories: for i, category in enumerate(categories): if isinstance(category, tuple): cats.append({ "categoryType": category[1], "name": category[0], "position": i + 1, }) else: cats.append({ "categoryType": TaskCategory.CATEGORY_TYPE_OTHER, "name": category, "position": i + 1, }) data["board"]["categories"] = { "results": cats } url = self.get_api_url(action="TaskBoard.Create()") async with self._ryver._session.post(url, json=data) as resp: return TaskBoard(self._ryver, (await resp.json())) async def delete_avatar(self) -> None: """ Delete the avatar of this chat. """ url = self.get_api_url(action="Contatta.Storage.DeleteAvatars()") await self._ryver._session.post(url) async def set_avatar(self, filename: str, filedata: typing.Any, filetype: typing.Optional[str] = None) -> None: """ Set the avatar of this chat. A wrapper for :py:meth:`Storage.make_avatar_of()` and :py:meth:`Ryver.upload_file()`. :param filename: The filename of the image. :param filedata: The image's raw data, sent directly to :py:meth:`aiohttp.FormData.add_field`. :param filetype: The MIME type of the file. """ img = await self._ryver.upload_file(filename, filedata, filetype) await img.make_avatar_of(self) class User(Chat): """ A Ryver user. :cvar ROLE_USER: Regular organization member. Admins also have this role in addition to ``ROLE_ADMIN``. :cvar ROLE_ADMIN: An org admin. :cvar ROLE_GUEST: A guest. :cvar USER_TYPE_MEMBER: A member. :cvar USER_TYPE_GUEST: A guest. """ __slots__ = () _OBJ_TYPE = TYPE_USER ROLE_USER = "ROLE_USER" ROLE_ADMIN = "ROLE_ADMIN" ROLE_GUEST = "ROLE_GUEST" USER_TYPE_MEMBER = "member" USER_TYPE_GUEST = "guest" def get_username(self) -> str: """ Get the username of this user. :return: The username of this user. """ return self._data["username"] def get_display_name(self) -> str: """ Get the display name of this user. :return: The display name of this user. """ return self._data["displayName"] def get_name(self) -> str: """ Get the display name of this user (same as the display name). :return: The name of this user. """ return self._data["displayName"] def get_role(self) -> str: """ Get this user's role in their profile. .. note:: This is different from :py:meth:`get_roles()`. While this one gets the "Role" of the user from the profile, ``get_roles()`` gets the user's roles in the organization (user, guest, admin). :return: The user's "Role" as described in their profile. """ return self._data["description"] def get_about(self) -> str: """ Get this user's About. :return: The user's "About" as described in their profile. """ return self._data["aboutMe"] def get_time_zone(self) -> str: """ Get this user's Time Zone. :return: The user's time zone. """ return self._data["timeZone"] def get_email_address(self) -> str: """ Get this user's Email Address. :return: The user's email address. """ return self._data["emailAddress"] def get_activated(self) -> bool: """ Get whether this user's account is activated. :return: Whether this user's account is activated (enabled). """ return self._data["active"] def get_roles(self) -> typing.List[str]: """ Get this user's role in the organization. .. note:: This is different from :py:meth:`get_role()`. While this one gets the user's roles in the organization (user, guest, admin), ``get_role()`` gets the user's role from their profile. :return: The user's roles in the organization. """ return self._data["roles"] def get_user_type(self) -> str: """ Get the type of this user (member or guest). The returned value will be either :py:attr:`User.USER_TYPE_MEMBER` or :py:attr:`User.USER_TYPE_GUEST`. :return: The type of the user. """ return self._data["type"] def is_admin(self) -> bool: """ Get whether this user is an org admin. :return: Whether the user is an org admin. """ return User.ROLE_ADMIN in self.get_roles() def accepted_invite(self) -> bool: """ Get whether this user has accepted their user invite. :return: Whether the user has accepted their invite. """ return not self._data["newUser"] async def set_profile(self, display_name: typing.Optional[str] = None, role: typing.Optional[str] = None, about: typing.Optional[str] = None) -> None: """ Update this user's profile. If any of the arguments are None, they will not be changed. .. note:: This also updates these properties in this object. :param display_name: The user's new display_name. :param role: The user's new role, as described in :py:meth:`get_role()`. :param about: The user's new "about me" blurb. """ url = self.get_api_url() data = { "aboutMe": about if about is not None else self.get_about(), "description": role if role is not None else self.get_role(), "displayName": display_name if display_name is not None else self.get_display_name(), } await self._ryver._session.patch(url, json=data) self._data["aboutMe"] = data["aboutMe"] self._data["description"] = data["description"] self._data["displayName"] = data["displayName"] async def set_activated(self, activated: bool) -> None: """ Activate or deactivate the user. Requires admin. .. note:: This also updates these properties in this object. """ url = self.get_api_url( f"User.Active.Set(value='{'true' if activated else 'false'}')") await self._ryver._session.post(url) self._data["active"] = activated async def set_org_role(self, role: str) -> None: """ Set a user's role in this organization, as described in :py:meth:`get_roles()`. This can be either ``ROLE_USER``, ``ROLE_ADMIN`` or ``ROLE_GUEST``. .. note:: Although for org admins, :py:meth:`get_roles()` will return both ``ROLE_USER`` and ``ROLE_ADMIN``, to make someone an org admin you only need to pass ``ROLE_ADMIN`` into this method. .. note:: This also updates these properties in this object. """ url = self.get_api_url(f"User.Role.Set(role='{role}')") await self._ryver._session.post(url) self._data["roles"] =
<gh_stars>0 #!/usr/bin/env python # coding: utf-8 # In[65]: from sklearn.feature_extraction.text import CountVectorizer,TfidfVectorizer from sklearn.naive_bayes import BernoulliNB, MultinomialNB from sklearn import metrics from sklearn.metrics import roc_auc_score, accuracy_score import requests from bs4 import BeautifulSoup import numpy as np import pandas as pd import seaborn as sns from matplotlib import pyplot as plt import warnings from sklearn.naive_bayes import BernoulliNB, MultinomialNB from sklearn.linear_model import LogisticRegression from sklearn.feature_extraction.text import CountVectorizer,TfidfVectorizer from nltk.tokenize import word_tokenize import re import nltk import emoji import string from textblob import TextBlob import langid from nltk.tokenize import word_tokenize from nltk.corpus import stopwords from gensim import models, corpora from sklearn.model_selection import train_test_split warnings.filterwarnings('ignore') # In[66]: from bs4 import BeautifulSoup import jsonpickle import requests from datetime import datetime, timedelta from textblob import TextBlob from productClass import Product def main(): baseUrl = "https://www.amazon.in" mainCategory = "electronics" productCategory = "Samsung SSD" pagesToFetch = 51 productObjectDataset = [] print("Processing...") ## interate over amazon pages where upper limit is a big number as we donts know how many pages there can be for i in range(1, pagesToFetch + 1): urlToFetch = baseUrl + "/s?k=" + productCategory + "&i=" + mainCategory if (i > 1): urlToFetch += "&page=" + str(i) #endif res = requests.get(urlToFetch) soup = BeautifulSoup(res.text, 'html.parser') content = soup.find_all('a', class_='a-link-normal a-text-normal', href=True) print("Fetching: " + urlToFetch) # breaking the loop if page not found if (len(content) == 0): print("Nothing found in: " + str(i)) break #endif for title in content: productUrl = baseUrl + title.get('href') productTitle = title.text productObject = Product(productTitle, productUrl) productObjectDataset.append(productObject) #endfor #endfor for productObject in productObjectDataset: reviews = [] needToReplace = "/product-reviews/" for i in range(1, 1000000): urlToFetch = extract_url(productObject).replace( "/dp/", needToReplace) + "?pageNumber=" + str(i) res = requests.get(urlToFetch) soup = BeautifulSoup(res.text, 'html.parser') content = soup.find_all( 'span', class_='a-size-base review-text review-text-content') if (len(content) == 0): break #endif for title in content: reviews.append(title.text.strip()) #endfor #endfor productObject.add_reviews(reviews) print( extract_url(productObject) + ": status completed!, review found :" + str(len(reviews))) #endfor print(len(productObjectDataset)) jsonProductObjectDataset = jsonpickle.encode(productObjectDataset) outputFile = open('filepath.json', 'w') outputFile.write(jsonProductObjectDataset) outputFile.close() #enddef def extract_title(productObject): return productObject.title #enddef def extract_url(productObject): return productObject.url #enddef def extract_review_list(productObject): return productObject.review_list #enddef if __name__ == "__main__": main() ############################################################################# import requests from bs4 import BeautifulSoup # links and Headers HEADERS = ({'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36', 'Accept-Language': 'en-US, en;q=0.5'}) # Link to the amazon product reviews url = 'https://www.amazon.in/Samsung-Internal-Solid-State-MZ-V7S500BW/product-reviews/B07MFBLN7K/ref=cm_cr_arp_d_paging_btm_next_2?ie=UTF8&reviewerType=all_reviews&pageNumber=' review_list = [] def retrieve_reviews(soup): # Get only those divs from the website which have a property data-hook and its value is review reviews = soup.find_all("div", {'data-hook': "review"}) # Retrieving through the raw text inside the reviews for item in reviews: review = { # Get the title of the review 'title': item.find("a", {'data-hook': "review-title"}).text.strip(), # Get the rating. It will be like 4.5 out of 5 stars. So we have to remove out of 5 stars from it and only keep float value 4.5, 3.4, etc. 'rating': item.find("i", {'data-hook': "review-star-rating"}).text.replace("out of 5 stars", "").strip(), # Get the actual review text 'review_text': item.find("span", {'data-hook': "review-body"}).text.strip() } review_list.append(review) # Get the page content from amazon # as we know we have 43 pages to visit and get content from for pageNumber in range(1, 51): raw_text = requests.get(url=url+(str(pageNumber)), headers = HEADERS) soup = BeautifulSoup(raw_text.text, 'lxml') retrieve_reviews(soup) for index in range(len(review_list)): # Print out all the reviews inside of a reviews_list print(f"{index+1}) {review_list[index]}") print("") import csv import pandas as pd # Create dataframe out of all the reviews from amazon reviews_df = pd.DataFrame(review_list) # Put that dataframe into an excel file reviews_df.to_excel('samsung.xlsx', index = False) print("Done.") # In[67]: def remove_emojis(text): reg = emoji.get_emoji_regexp() emoji_free_text = reg.sub(r'', text) return emoji_free_text # Cleaining function def preprocess(input_text): lower_text = review.lower() punctuations = '''`!()-[]{};:'"\,<>./?@#$%^&*_~=+°''' lower_text = re.sub(r"@[A-Za-z0-9]+", "", lower_text) # Removes the @mentions from the tweets lower_text = re.sub(r"[0-9]+", "", lower_text) # Removes the Numbers from the tweets # tokenization tokens = word_tokenize(lower_text) stopwords = stopwords.words("english") # Removing stopwords filtered_text = [word for word in tokens if word not in stopwords] # look for empty words or words just made of two letters and remove that for token in filtered_text: if token == "": filtered_text.remove(token) filtered_text = ' '.join([word for word in filtered_text]) clean_text = remove_emojis(filtered_text) # Removing punctuations in string # Using loop + punctuation string for ele in clean_text: if ele in punctuations: clean_text = clean_text.replace(ele, "") # Removing small words with length less than 3 clean_text = ' '.join([t for t in clean_text.split() if len(t)>=3]) return word_tokenize(clean_text) # In[70]: reviews = pd.read_excel("samsung.xlsx") reviews.head() # In[71]: reviews.shape # In[72]: plt.figure(figsize = (7, 7)) sns.countplot(reviews["rating"]) # In[73]: rating_count = pd.DataFrame(reviews["rating"].value_counts().reset_index()) rating_count # In[74]: explode = [0.05, 0.04, 0, 0.02, 0] names = ["Rating 5.0", "Rating 4.0", "Rating 1.0", "Rating 3.0", "Rating 2.0"] plt.figure(figsize = (10, 10)) plt.pie(rating_count["rating"], labels = names, labeldistance=1.05, wedgeprops = { 'linewidth' : 1.5, 'edgecolor' : 'white' }, explode = explode, autopct = '%.2f%%', shadow = True, pctdistance = .85, textprops = {"fontsize": 14, "color":'w'}, rotatelabels = True, radius = 1.3 ) plt.show() # The most given rating to the product is 5.0 and 4.0. We can say here that the product is working fine. # In[75]: review_text = list(reviews["review_text"]) review_text[:5] # In[76]: reviews_df.shape # In[77]: product_review = list(reviews_df["review_text"]) # In[78]: product_review[0] # In[79]: import emoji def remove_emojis(text): reg = emoji.get_emoji_regexp() emoji_free_text = reg.sub(r'', text) return emoji_free_text # In[80]: # Cleaining function def preprocess(reviews, stopwords): cleaned_reviews = [] for review in reviews: lower_text = review.lower() punctuations = '''`!()-[]{};:'"\,<>./?@#$%^&*_~=+°''' lower_text = re.sub(r"@[A-Za-z0-9]+", "", lower_text) # Removes the @mentions from the tweets lower_text = re.sub(r"[0-9]+", "", lower_text) # Removes the Numbers from the tweets # tokenization tokens = word_tokenize(lower_text) # Removing stopwords filtered_text = [word for word in tokens if word not in stopwords] # look for empty words or words just made of two letters and remove that for token in filtered_text: if token == "": filtered_text.remove(token) filtered_text = ' '.join([word for word in filtered_text]) clean_text = remove_emojis(filtered_text) # Removing punctuations in string # Using loop + punctuation string for ele in clean_text: if ele in punctuations: clean_text = clean_text.replace(ele, "") # Removing small words with length less than 3 clean_text = ' '.join([t for t in clean_text.split() if len(t)>=3]) cleaned_reviews.append(clean_text) return cleaned_reviews # In[81]: from nltk.corpus import stopwords stopwords = stopwords.words("english") len(stopwords) # #### Call the preprocess function and pass the text string to clean data # In[82]: clean_reviews = preprocess(product_review, stopwords) clean_reviews # #### Stemming and Lemmatization # In[83]: wn_lem = nltk.wordnet.WordNetLemmatizer() stemmer = nltk.stem.PorterStemmer() def lemmatization(reviews): lemmatized_reviews = [] for review in reviews: # Tokenization tokens = word_tokenize(review) for index in range(len(tokens)): tokens[index] = wn_lem.lemmatize(tokens[index]) tokens[index] = stemmer.stem(tokens[index]) lemmatized = ' '.join([token for token in tokens]) lemmatized_reviews.append(lemmatized) return lemmatized_reviews # In[84]: clean_reviews = lemmatization(clean_reviews) # 5 reviews from the list for index in range(5): print(f"{index+1}) {clean_reviews[index]}\n") # ### Frequencies # In[85]: from collections import Counter frequencies = Counter(' '.join([review for review in clean_reviews]).split()) frequencies.most_common(10) # In[86]: # Words with least frequency that is 1 singletons = [k for k, v in frequencies.items() if v == 1] singletons[0:10] # In[87]: print(f"Total words used once are {len(singletons)} out of {len(frequencies)}") # 993 words that have been used only once # In[88]: # This function will remove words with less frequencies def remove_useless_words(reviews, useless_words): filtered_reviews = [] for single_review in reviews: tokens = word_tokenize(single_review) usefull_text = [word for word in tokens if word not in useless_words] usefull_text = ' '.join([word for word in usefull_text]) filtered_reviews.append(usefull_text) return filtered_reviews # In[89]: # Store a copy so we not need to go back for any mistake clean_reviews_copy = clean_reviews # In[90]: clean_reviews = remove_useless_words(clean_reviews, singletons) # 5 reviews from the list for index in range(5): print(f"{index+1}) {clean_reviews[index]}\n") # In[91]: # count vectoriser tells the frequency of a word. from sklearn.feature_extraction.text import CountVectorizer vectorizer = CountVectorizer(min_df = 1, max_df = 0.9) X = vectorizer.fit_transform(clean_reviews) word_freq_df = pd.DataFrame({'term': vectorizer.get_feature_names(), 'occurrences':np.asarray(X.sum(axis=0)).ravel().tolist()}) word_freq_df['frequency'] = word_freq_df['occurrences']/np.sum(word_freq_df['occurrences']) # In[92]: word_freq_df = word_freq_df.sort_values(by="occurrences", ascending = False) word_freq_df.head() # #### TfidfVectorizer # In[93]: from sklearn.feature_extraction.text import TfidfVectorizer vectorizer = TfidfVectorizer(stop_words='english', max_df = 0.5, smooth_idf=True) doc_vec = vectorizer.fit_transform(clean_reviews) names_features = vectorizer.get_feature_names() dense = doc_vec.todense() denselist = dense.tolist() df = pd.DataFrame(denselist, columns = names_features) df.head() # # N-gram # In[94]: #Bi-gram def get_top_n2_words(corpus, n=None): vec1 = CountVectorizer(ngram_range=(2,2), #for tri-gram, put ngram_range=(3,3) max_features=2000).fit(corpus) bag_of_words = vec1.transform(corpus) sum_words = bag_of_words.sum(axis=0) words_freq = [(word, sum_words[0, idx]) for word, idx in vec1.vocabulary_.items()] words_freq =sorted(words_freq, key = lambda x: x[1], reverse=True) return words_freq[:n] # In[95]: top2_words = get_top_n2_words(clean_reviews, n=200) #top 200 top2_df = pd.DataFrame(top2_words) top2_df.columns=["Bi-gram", "Freq"] top2_df.head() # In[96]: #Bi-gram plot import matplotlib.pyplot as plt import seaborn as sns top20_bigram = top2_df.iloc[0:20,:] fig = plt.figure(figsize = (10, 5)) plot=sns.barplot(x=top20_bigram["Bi-gram"],y=top20_bigram["Freq"]) plot.set_xticklabels(rotation=45,labels = top20_bigram["Bi-gram"]) # In[97]: #Tri-gram def get_top_n3_words(corpus, n=None): vec1 = CountVectorizer(ngram_range=(3,3), max_features=2000).fit(corpus) bag_of_words = vec1.transform(corpus) sum_words = bag_of_words.sum(axis=0) words_freq = [(word, sum_words[0, idx]) for word, idx in
#!/usr/bin/env python # -*- coding: utf-8 -*- """ This generates data for the Flight Network Heat Map described here: https://docs.google.com/document/d/1ajv6hJ_lz9JNpzsNjoKLUhPt7pfv2bmh63J_EhycoYA/edit The goal is to create a heatmap for where an infectious disease could spread to given our knowledge of the air traffic network. """ import pymongo from dateutil import parser as dateparser import datetime from geopy.distance import great_circle import math import random from pylru import lrudecorator from collections import defaultdict import numpy # Paramters derived from fit_flight_parameters.py A_load_ratio = 0.000861 b_load_ratio = 0.674728 def compute_direct_seat_flows(db, match_query): result = defaultdict(dict) for pair in db.flights.aggregate([ { '$match': match_query }, { '$group': { '_id': { '$concat': ['$departureAirport', '-', '$arrivalAirport'] }, 'totalSeats': { '$sum': '$totalSeats' } } } ]): if pair['totalSeats'] > 0: origin, destination = pair['_id'].split('-') result[origin][destination] = pair['totalSeats'] return result def compute_direct_passenger_flows( db, match_query, A_load_ratio_p=A_load_ratio, b_load_ratio_p=b_load_ratio): result = defaultdict(dict) for pair in db.flights.aggregate([ { '$match': match_query }, { '$group': { '_id': { '$concat': ['$departureAirport', '-', '$arrivalAirport'] }, 'totalPassengers': { '$sum': { '$multiply': [ { '$sum': [ { '$multiply' : [ A_load_ratio_p, '$totalSeats' ] }, b_load_ratio_p ] }, '$totalSeats' ] } } } } ]): if pair['totalPassengers'] > 0: origin, destination = pair['_id'].split('-') result[origin][destination] = pair['totalPassengers'] return result def compute_airport_distances(airport_to_coords_items): """ :param airport_to_coords_items: A array of airports and their coordinates alphabetically sorted by code. :return: A distance matrix where the row/column index corresponds to the index of the airport in the array. """ dist_mat = numpy.zeros(shape=(len(airport_to_coords_items), len(airport_to_coords_items))) for idx, (airport_a, (airport_a_long, airport_a_lat)) in enumerate(airport_to_coords_items): j = idx for airport_b, (airport_b_long, airport_b_lat) in airport_to_coords_items[idx:]: dist_mat[idx, j] = great_circle( (airport_a_lat, airport_a_long), (airport_b_lat, airport_b_long)).kilometers j += 1 # Make distance matrix symmetrical. dist_mat += dist_mat.T return dist_mat def is_logical(airport_distance_matrix, airport_a, airport_b, intermediate_airport): # In logical layovers the intermediate airport is closer to the destination or # it is closer to the origin than the destination is to the origin. ab_distance = airport_distance_matrix.item(airport_a, airport_b) ia_distance = airport_distance_matrix.item(airport_a, intermediate_airport) ib_distance = airport_distance_matrix.item(airport_b, intermediate_airport) return ib_distance < ab_distance or ia_distance < ab_distance # Memoization speeds up the simulation but its use is limited by memory consumption. # Using slotted objects reduces the size of the flights stored in memory # allowing more of them to be cached. class LightweightFlight(object): __slots__ = [ 'passengers', 'total_seats', 'departure_datetime', 'arrival_datetime', 'arrival_airport'] def __init__(self, flight_dict): load_ratio = A_load_ratio * flight_dict['totalSeats'] + b_load_ratio self.passengers = load_ratio * flight_dict['totalSeats'] self.total_seats = flight_dict['totalSeats'] self.departure_datetime = flight_dict['departureDateTime'] self.arrival_datetime = flight_dict['arrivalDateTime'] self.arrival_airport = flight_dict['arrivalAirport'] class AirportFlowCalculator(object): # Assumption: We will assume that the probability distribution for the # number of legs in a jouney is homogenous across point of origin and # time of travel. # We assume the probability distribution has the following values: LEG_PROBABILITY_DISTRIBUTION = { 0: 0.0, 1: 0.6772732, 2: 0.2997706, 3: 0.0211374, 4: 0.0016254, 5: 0.0001632, 6: 0.0000215, 7: 0.0000072, 8: 0.0000012, 9: 0.0000002, 10: 0.0000001 } MEAN_LAYOVER_DELAY_HOURS = 2 def __init__(self, db, weight_by_departure_time=True, aggregated_seats=None, use_schedules=True, use_layover_checking=True): self.use_schedules = use_schedules self.db = db self.db.flights.ensure_index('departureAirport') self.db.flights.ensure_index( [('departureAirport', pymongo.ASCENDING), ('departureDateTime', pymongo.ASCENDING)]) self.use_layover_checking = use_layover_checking if self.use_layover_checking: if aggregated_seats: active_airports = set() for origin, destinations in aggregated_seats.items(): active_airports.add(origin) active_airports.update(destinations) airport_to_coords = {} for airport in self.db.airports.find(): if airport['_id'] in active_airports: airport_to_coords[airport['_id']] = airport['loc']['coordinates'] else: airport_to_coords = {airport['_id']: airport['loc']['coordinates'] for airport in self.db.airports.find()} airport_to_coords_items = sorted(airport_to_coords.items(), key=lambda x: x[0]) self.airport_to_coords_items = airport_to_coords_items self.airport_to_idx = {airport: idx for idx, (airport, noop) in enumerate(airport_to_coords_items)} self.airport_distance_matrix = compute_airport_distances(airport_to_coords_items) self.weight_by_departure_time = weight_by_departure_time self.aggregated_seats = aggregated_seats # LEG_PROBABILITY_DISTRIBUTION shows the probability of ending a journey # at each leg given one is at the start of the journey. # TERMINAL_LEG_PROBABILITIES shows the probability of ending a journey # at each leg given one has already reached it. self.TERMINAL_LEG_PROBABILITIES = { leg_num: ( leg_prob / (1.0 - sum([ self.LEG_PROBABILITY_DISTRIBUTION[n] for n in range(1, leg_num)]))) for leg_num, leg_prob in self.LEG_PROBABILITY_DISTRIBUTION.items()} self.max_legs = len(self.LEG_PROBABILITY_DISTRIBUTION) - 1 def get_itinerary_distance(self, itinerary): idx_itinerary = [self.airport_to_idx.get(airport) for airport in itinerary] idx_itinerary = filter(lambda x: x, idx_itinerary) total_distance = 0.0 for a, b in zip(idx_itinerary, idx_itinerary[1:]): total_distance += self.airport_distance_matrix.item(a, b) return total_distance def check_logical_layovers(self, itinerary): """ Check that the last 3 airports in the itinerary form a logical layover and that every airport in the itinerary is a logical layover between first and final airports. The criteria used to determine if a layover is logical is essentially to draw a circle around the start and end airports with a radius equal to the distance between them and only allow layovers airports within at least one of the two circles. Another way to put it is that if a layover flight leg both takes longer than a direct flight to the destination would and puts the passenger at a location further from the destination than they were initially it is illogical. """ idx_itinerary = [self.airport_to_idx.get(airport) for airport in itinerary] origin = idx_itinerary[0] destination = idx_itinerary[-1] if destination is None: # When the airport location is unknown the layover cannot be checked. return True if origin == destination: return False layovers = filter(lambda x: x, idx_itinerary[1:-1]) # Check last 3 airports in long itineraries. if len(layovers) > 2 and not is_logical(self.airport_distance_matrix, layovers[-2], destination, layovers[-1]): return False if origin is None: return True result = all([ is_logical(self.airport_distance_matrix, origin, destination, intermediate) for intermediate in layovers]) return result @lrudecorator(30000) def get_flights_from_airport(self, airport, date): """ Retrieve all the flight that that happened up to 2 days after the given date from the database then return them in an array with a distinct element for each flight. Notes: * This function is memoized to redues the number of database queries needed. """ query_results = self.db.flights.find({ "departureAirport": airport, "totalSeats": {"$gt": 0}, "departureDateTime": { "$gte": date, "$lte": date + datetime.timedelta(1) } }, { "_id": 1, "departureDateTime": 1, "arrivalDateTime": 1, "arrivalAirport": 1, "totalSeats": 1, }) flights = [] for result in query_results: flights.append(LightweightFlight(result)) # print "Flights:", len(flights) return flights def calculate_itins(self, starting_airport, simulated_passengers=100, start_date=datetime.datetime.now(), end_date=datetime.datetime.now()): """ Calculate the probability of a given passenger reaching each destination from the departure airport by simulating several voyages. """ def layover_pmf(hours): # Implementation of Poisson PMF based on: # http://stackoverflow.com/questions/280797/calculate-poisson-probability-percentage p = math.exp(-self.MEAN_LAYOVER_DELAY_HOURS) for i in range(int(hours)): p *= self.MEAN_LAYOVER_DELAY_HOURS p /= i + 1 return p def simulate_passenger(itin_sofar, departure_airport_arrival_time): """ This function simulates a passenger then returns their the airports they stop at. It is a recusive function that calls itself to simulate transfers on multi-leg flights. """ departure_airport = itin_sofar[-1] flights = self.get_flights_from_airport(departure_airport, datetime.datetime( departure_airport_arrival_time.year, departure_airport_arrival_time.month, departure_airport_arrival_time.day)) if len(itin_sofar) - 1 >= self.max_legs: return itin_sofar if self.use_layover_checking: # only include flights with logical layovers flights = [ flight for flight in flights if self.check_logical_layovers(itin_sofar + [flight.arrival_airport])] # only include flights that the passenger arrived prior to flights = [ flight for flight in flights if departure_airport_arrival_time < flight.departure_datetime] # Weight flights from the origin city (A1) based on the summed # direct flow between A and all other destinations (B1). # However, these situations might cause some error since # there is nowhere for the passengers we expect to transfer # to go. cumulative_outbound_passengers = sum([ flight.passengers for flight in flights]) # Assumption: People are likely to take flights that occur shortly # after they arrived at an airport. This may differ for, say, # flights crossing an administrative boundary, but at first pass, # we will assume that it is the same for all flights. # If person x, is arriving in FOO from destination unknown, # and is going to catch a connecting flight, # it is more likely that they are there to catch the connecting # flight to BAR which leaves an hour after their arrival than # the connecting flight to BAZ which leaves twelve hours after their arrival. # So, the airport inflows on multileg journeys are weighted by # where the layover time falls on the poisson distribution. if self.weight_by_departure_time: layover_probs = [ layover_pmf( float((flight.departure_datetime - departure_airport_arrival_time).total_seconds()) / 3600) for flight in flights] time_weighted_cumulative_outbound_passengers = sum([ flight.passengers * prob for flight, prob in zip(flights, layover_probs)]) # Filter out flights with a zero probability filtered_flights
4),('zi', 3),('chuan', 2),('fen', 1),('fang', 1), ('lin', 2),('ying', 3),('mei', 3),('ren', 2),('zai', 4),('bai', 2),('di', 4), ('miao', 4),('wu', 3),('ci', 3),('qu', 3),('shen', 2),('yang', 2),('yang', 2), ('yu', 3),('yu', 2),('wen', 4),('da', 2),('ji', 4),('you', 2),('yi', 3), ('gan', 3),('shi', 2),('fu', 3),('shi', 4),('zeng', 1),('wan', 3),('shang', 1), ('xian', 1),('di', 4),('shi', 4),('nv', 3),('ba', 1),('qian', 1),('ren', 2), ('gong', 1),('sun', 1),('jian', 4),('qi', 4),('chu', 1),('di', 4),('yi', 1), ('wu',3), ('shi',2), ('nian',2), ('jian',1),('si',4),('fan',2),('zhang',3), ('feng',1),('chen',2),('hong',4),('tong',2),('hun',1),('wang',2),('shi',4), ('li', 2),('yuan', 2),('zi', 3),('di', 4),('san', 4),('ru', 2),('yan', 1), ('nv', 3),('yue', 4),('yu', 2),('zi', 1),('ying', 4),('han', 2),('ri', 4), ('jin', 1),('su', 4),('dui', 1),('qian', 2),('mu', 4),('yi', 2),('gong', 3), ('qu', 2),('tang', 2),('shi', 2),('cheng', 2),('cao', 3),('xiao', 1),('se', 4), ('dai', 4),('yan', 2),('ji', 2),('guan', 3),('qu', 3),('fu', 4),('zhong', 1), ('le', 4),('ji', 2),('ai', 1),('lai', 2),('yue', 4),('dong', 1),('chu', 1), ('lao', 3),('fu', 1),('bu', 4),('zhi', 1),('qi', 2),('suo', 2),('wang', 3), ('zu', 2),('jian', 3),('huang', 1),('shan', 1),('zhuan', 3),('chou', 2),('ji', 2), ]), (7,[('shan', 1),('shi', 2),('luo', 4),('que', 4),('xing', 2),('jing', 4),('wei', 1), ('huang', 2),('hun', 1),('dao', 4),('si', 4),('bian', 1),('fu', 2),('fei', 1), ('sheng', 1),('tang', 2),('zuo', 4),('jie', 1),('xin', 1),('yu', 3),('zu', 2), ('ba', 1),('jiao', 1),('ye', 4),('da', 4),('zhi', 1),('zi', 3),('fei', 2), ('seng', 1),('yan', 2),('gu', 3),('bi', 4),('fo', 2),('hua', 4),('hao', 3), ('yi', 2),('huo', 3),('lai', 2),('zhao', 4),('suo', 3),('jian', 4),('xi', 1), ('pu', 1),('chuang', 2),('fu', 2),('xi', 2),('zhi', 4),('geng', 1),('fan', 4), ('shu', 1),('li', 4),('yi', 4),('zu', 2),('bao', 2),('wo', 3),('ji', 1), ('ye', 4),('shen', 1),('jing', 4),('wo', 4),('bai', 3),('chong', 2),('jue', 2), ('qing', 1),('yue', 4),('chu', 1),('ling', 3),('guang', 1),('ru', 4),('fei', 1), ('tian', 1),('ming', 2),('du', 2),('qu', 4),('wu', 2),('dao', 4),('lu', 4), ('chu', 1),('ru', 4),('gao', 1),('xia', 4),('qiong', 2),('yan', 1),('fei', 1), ('shan', 1),('hong', 2),('jian', 4),('bi', 4),('fen', 1),('lan', 4),('man', 4), ('shi', 2),('jian', 4),('song', 1),('li', 4),('jie', 1),('shi', 2),('wei', 2), ('dang', 1),('liu', 2),('chi', 4),('zu', 2),('ta', 4),('jian', 4),('shi', 2), ('shui', 3),('sheng', 1),('ji', 1),('ji', 1),('feng', 1),('chui', 1),('yi', 1), ('ren',2),('sheng',1),('ru',2), ('ci',3), ('zi',4), ('ke',3), ('le',4), ('qi',3), ('bi',4), ('ju',2), ('shu',4), ('wei',2), ('ren',2), ('ji',1), ('jie', 1),('zai', 1),('wu', 2),('dang', 3),('er', 4),('san', 1),('zi', 3), ('an', 1),('de', 2),('zhi', 4),('lao', 3),('bu', 2),('geng', 4),('gui', 1) ]), (7,[('xian', 1),('yun', 2),('si', 4),('juan', 3),('tian', 1),('wu', 2),('he', 2), ('qing', 1),('feng', 1),('chui', 1),('kong', 1),('yue', 4),('shu', 1),('bo', 1), ('sha', 1),('ping', 2),('shui', 3),('xi', 1),('sheng', 1),('ying', 3),('jue', 2), ('yi', 4),('bei', 1),('xiang', 1),('shu', 3),('jun', 1),('dang', 1),('ge', 1), ('jun', 1),('ge', 1),('sheng', 1),('suan', 1),('ci', 2),('qie', 2),('ku', 3), ('bu', 4),('neng', 2),('ting', 1),('zhong', 1),('lei', 4),('ru', 2),('yu', 3), ('dong', 4),('ting', 2),('lian', 2),('tian', 1),('jiu', 3),('yi', 2),('gao', 1), ('jiao', 1),('long', 2),('chu', 1),('mo', 4),('xing', 1),('wu', 2),('hao', 2), ('shi', 2),('sheng', 1),('jiu', 2),('si', 3),('dao', 4),('guan', 1),('suo', 3), ('you', 1),('ju', 1),('mo', 4),('mo', 4),('ru', 2),('cang', 2),('tao', 2), ('xia', 4),('chuang', 2),('wei', 4),('she', 2),('shi', 2),('wei', 4),('yao', 4), ('hai', 3),('qi', 4),('shi', 1),('zhe', 2),('xun', 1),('xing', 1),('sao', 4), ('zuo', 2),('zhe', 3),('zhou', 1),('qian', 2),('chui', 2),('da', 4),('gu', 3), ('si', 4),('huang', 2),('ji', 4),('sheng', 4),('deng', 1),('kui', 2),('gao', 1), ('she', 4),('shu', 1),('yi', 2),('ri', 4),('xing', 2),('wan', 4),('li', 3), ('zui', 4),('cong', 2),('da', 4),('pi', 4),('jie', 1),('chu', 2),('si', 3), ('qian', 1),('zhe', 3),('zhui', 1),('hui', 2),('liu', 2),('zhe', 3),('huan', 2), ('di', 2),('xia', 2),('dang', 4),('gou', 4),('qing', 1),('chao', 2),('ban', 1), ('zhou', 1),('jia', 1),('shen', 1),('ming', 2),('shi', 3),('jia', 1),('yi', 4), ('kan', 3),('ke', 1),('zhi', 3),('de', 2),('yi', 2),('jing', 1),('man', 2), ('pan', 4),('si', 1),('bei', 1),('guan', 1),('bu', 4),('kan', 1),('shuo', 1), ('wei', 4),('mian', 3),('chui', 2),('chu', 3),('chen', 2),('ai', 1),('jian', 1), ('tong', 2),('shi', 2),('bei', 4),('liu', 2),('duo', 1),('shang', 4),('dao', 4), ('tian', 1),('lu', 4),('you', 1),('xian', 3),('nan', 2),('zhui', 1),('pan', 1), ('jun', 1),('ge', 1),('qie', 3),('xiu', 1),('ting', 1),('wo', 3),('ge', 1), ('wo', 3),('ge', 1),('jin', 1),('yu', 3),('jun', 1),('shu', 1),('ke', 1), ('yi', 4),('nian', 2),('ming', 2),('yue', 4),('jin', 1),('xiao', 1),('duo', 1), ('ren', 2),('sheng', 1),('you', 2),('ming', 4),('fei', 1),('you', 2),('ta', 1), ]), (7,[('wu', 3),('yue', 4),('ji', 4),('zhi', 4),('jie', 1),('san', 1),('gong', 1), ('si', 4),('fang', 1),('huan', 2),('zhen', 4),('song', 1),('dang', 1),('zhong', 1), ('huo', 3),('wei', 2),('di', 4),('huang', 1),('zu', 2),('yao', 1),('guai', 4), ('tian', 1),('jia', 3),('shen', 2),('bing', 3),('zhuan', 1),('qi', 2),('xiong', 2), ('pen', 1),('yun', 2),('xie', 4),('wu', 4),('cang', 2),('ban', 4),('fu', 4), ('sui', 1),('you', 3),('jue', 2),('ding', 3),('shui', 2),('neng', 2),('qiong', 2), ('wo', 3),('lai', 2),('zheng', 4),('feng', 2),('qiu', 1),('yu', 3),('jie', 2), ('yin', 1),('qi', 4),('hui', 4),('mei', 4),('wu', 2),('qing', 1),('feng', 1), ('qian', 2),('xin', 1),('mo', 4),('dao', 3),('ruo', 4),('you', 3),('ying', 4), ('qi', 3),('fei', 1),('zheng', 4),('zhi', 2),('neng', 2),('gan', 3),('tong', 1), ('xu', 1),('yu', 2),('jing', 4),('sao', 3),('zhong', 4),('feng', 1),('chu', 1), ('yang', 3),('jian', 4),('tu', 1),('wu', 4),('cheng', 1),('qing', 1),('kong', 1), ('zi', 3),('gai', 4),('lian', 2),('yan', 2),('jie', 1),('tian', 1),('zhu', 4), ('shi', 2),('lin', 3),('teng', 2),('zhi', 4),('dui', 1),('zhu', 4),('rong', 2), ('sen', 1),('ran', 2),('po', 4),('dong', 4),('xia', 4),('ma', 3),('bai', 4), ('song', 1),('bai', 3),('yi', 2),('jing', 4),('qu', 1),('ling', 2),('gong', 1), ('fen', 3),('qiang', 2),('dan', 1),('zhu', 4),('dong', 4),('guang', 1),('cai', 3), ('gui', 3),('wu', 4),('tu', 2),('hua', 4),('tian', 2),('qing', 1),('hong', 2), ('sheng', 1),('jie', 1),('yu', 2),('lv', 3),('jian', 4),('pu', 2),('jiu', 3), ('yu', 4),('yi', 3),('fei', 3),('bo', 2),('ming', 2),('qi', 2),('zhong', 1), ('miao', 4),('nei', 4),('lao', 3),('ren', 2),('shi', 2),('shen', 2),('yi', 4), ('sui', 1),('xu', 1),('zhen', 1),('si', 4),('neng', 2),('ju', 1),('gong', 1), ('shou',3),('chi',2),('bei',1),('jiao',4),('dao',2),('wo',3),('zhi',4), ('yun',2),('ci',3),('zui',4),('ji',2),('yu',2),('nan',2),('tong',2), ('cuan', 4),('zhu', 2),('man', 2),('huang', 1),('xing', 4),('bu', 4),('si', 3), ('yi', 1),('shi', 2),('cai', 2),('zu', 2),('gan', 1),('chang', 2),('zhong', 1), ('hou', 2),('wang', 2),('jiang', 4),('xiang', 4),('wang', 4),('jiu', 3),('jue', 2), ('shen', 2),('zong', 4),('yu', 4),('fu', 2),('nan', 2),('wei', 2),('gong', 1), ('ye',4), ('tou',2), ('fo',2), ('si',4), ('shang',4), ('gao',1), ('ge',2), ('xing',1), ('yue',4), ('yan',3), ('ying',4), ('yun',2), ('tong',2), ('long',2), ('yuan', 2),('ming', 2),('zhong', 1),('dong', 4),('bu', 4),('zhi', 1),('shu', 3), ('gao', 2),('gao', 3),('han', 2),('ri', 4),('sheng', 1),('yu', 2),('dong', 1) ]), (7,[('zhang', 1),('sheng', 1),('shou', 3),('chi', 2),('shi', 2),('gu', 3),('wen', 2), ('quan', 4),('wo', 3),('shi', 4),('zuo', 4),('shi', 2),('gu', 3),('ge', 1), ('shao', 4),('ling', 2),('wu', 2),('ren', 2),('zhe', 2),('xian', 1),('si', 3), ('cai', 2),('bo', 2),('jiang', 1),('nai', 4),('shi', 2),('gu', 3),('he', 2), ('zhou', 1),('gang', 1),('ling', 2),('chi', 2),('si', 4),('hai', 3),('fei', 4), ('xuan', 1),('wang', 2),('fen', 4),('qi', 3),('hui', 1),('tian', 1),('ge', 1), ('da', 4),('kai', 1),('ming', 2),('tang', 2),('shou', 4),('chao', 2),('he', 4), ('zhu', 1),('hou', 2),('jian', 4),('pei', 4),('ming', 2),('xiang', 1),('mo', 2), ('sou', 1),('yu', 2),('qi', 2),('yang', 2),('cheng', 3),('xiong', 2),('jun', 4), ('wan', 4),('li', 3),('qin', 2),('shou', 4),('jie', 1),('zhe', 1),('luo', 2), ('juan', 1),('gong', 1),('le', 4),('cheng', 2),('gao', 4),('wan', 4),('shi', 4), ('zao', 2),('shi', 2),('zuo', 4),('gu', 3),('hui', 1),('cuo', 2),('e', 2), ('cong', 2),('chen', 2),('cai', 2),('yi', 4),('xian', 2),('di', 4),('yi', 1), ('jian', 2),('xuan', 3),('zhuan', 4),('ke', 4),('liu', 2),('shan', 1),('e', 1), ('yu',3), ('lin',2), ('ri',4), ('jiu',3), ('ye',2), ('huo',3), ('liao',2), ('gui',3), ('wu',4), ('shou',3), ('hu',4), ('fan',2), ('hui',1), ('he',1), ('gong', 1),('cong', 2),('he', 2),('chu', 4),('de', 2),('zhi', 2),('ben', 3), ('hao', 2),('fa', 4),('jin', 4),('bei', 4),('wu', 2),('cha', 1),('e', 2), ('ci', 2),('yan', 2),('yi', 4),('mi', 4),('du', 2),('nan', 2),('xiao', 3), ('zi', 4),('ti', 3),('bu', 2),('lei', 4),('li', 4),('yu', 3),('ke', 1), ('nian', 2),('shen', 1),('qi', 2),('mian', 3),('you', 3),('que', 1),('hua', 4), ('kuai', 4),('jian', 4),('kan', 3),('duan', 4),('sheng', 1),('jiao', 1),('tuo', 2), ('luan', 2),('xiang', 2),('feng', 4),('zhu', 4),('zhong', 4),('xian', 1),('xia', 4), ('shan', 1),('hu', 2),('bi', 4),('shu', 4),('jiao', 1),('zhi', 1),('ke', 1), ('jin', 1),('sheng', 2),('tie', 2),('suo', 3),('suo', 2),('niu', 3),('zhuang', 4), ('gu', 2),('ding', 3),('yue', 4),('shui', 3),('long', 2),('teng', 2),('suo', 1), ('lou', 4),('ru', 2),('bian', 1),('shi', 1),('bu', 4),('shou', 1),('ru', 4), ('er', 4),('ya', 3),('bian', 3),('po', 4),('wu', 2),('wei', 1),('yi', 2), ('kong', 3),('zi', 3),('xi', 1),('xing', 2),('bu', 2),('dao', 4),('qin', 2), ('ji', 3),('zhi', 2),('xing', 1),('xiu', 4),('yi', 2),('xi', 1),('e', 2), ('jie', 1),('yu', 2),('hao', 4),('gu', 3),('sheng', 1),('ku', 2),('wan', 3), ('dui', 4),('ci', 3),('ti', 4),('lei', 4),('shuang', 1),('pang', 1),('tuo', 2), ('yi', 4),('xi', 1),('chu', 1),('meng', 2),('bo', 2),('shi', 4),('zheng', 1), ('qi', 2),('nian', 2),('shi', 2),('gai', 3),('cheng', 1),('yuan', 2),('he', 2), ('gu', 4),('ren', 2),('cong', 2),('jun', 1),('zai', 4),('you', 4),('fu', 3), ('wei', 2),('wo', 3),('du', 4),('liang', 4),('jue', 2),('jiu', 4),('ke', 1), ('zhuo', 2),('guan', 4),('mu', 4),('yu', 4),('gao', 4),('ji', 4),('jiu', 3), ('ru', 2),('ci', 3),('zhi', 4),('bao', 3),('cun', 2),('qi', 3),('duo', 1), ('zhan', 1),('bao', 1),('xi', 2),('guo', 3),('ke', 3),('li', 4),('zhi', 4), ('shi', 2),('gu', 3),('zhi', 3),('zai', 4),('shu', 4),('luo', 4),('tuo', 2), ('jian', 4),('zhu', 1),('tai', 4),('miao', 4),('bi', 3),('gao', 4),('ding', 3), ('guang', 1),('jia', 4),('qi', 2),('zhi', 3),('bai', 3),('bei', 4),('guo', 4), ('sheng', 4),('en', 1),('ruo', 4),('xu', 3),('liu', 2),('tai', 4),('xue', 2), ('zhu', 1),('sheng', 1),('jiang', 2),('jie', 3),('de', 2),('qie', 1),('cuo', 1), ('guan', 1),('jing', 1),('hong', 2),('du', 1),('shang', 4),('tian', 2),('yan', 4), ('zuo', 4),('jian', 4),('ju', 3),('guo', 2),('lai', 2),('ben', 1),('bo', 1), ('wan', 1),('tai', 2),('ti', 1),('xian', 3),('lu', 4),('jie', 2),('jiao', 3), ('an', 1),('zhi', 4),('tuo', 3),('tie', 4),('ping', 2),('bu', 4),('po', 1), ('da', 4),('sha', 4),('shen', 1),('yan', 2),('yu', 3),('gai', 4),('fu', 4), ('jing', 1),('li', 4),('jiu', 2),('yuan', 3),('qi', 1),('wu', 2),('tuo', 2), ('zhong',1), ('chao',2), ('da', 4), ('guan',1), ('lao',3), ('yu',2),('shi',4), ('ju',4), ('ken',3), ('gan',3), ('ji',1), ('tu',2 ), ('an',1), ('e',1), ('mu', 4),('tong', 2),('qiao', 1),('huo', 3),('niu', 2),('li', 4),('jiao', 3), ('shui', 2),('fu', 4),('zhu', 4),('shou', 3),('wei', 2),('mo', 2),('suo', 1), ('ri', 4),('xiao', 1),('yue', 4),('shuo', 4),('jiu', 4),('mai', 2),('mo', 4), ('liu', 4),('nian', 2),('xi', 1),('gu', 4),('kong', 1),('yin', 2),('e', 2), ('xi', 1),('zhi', 1),('su', 2),('shu', 1),('chen', 4),('zi', 1),('mei', 4), ('shu', 4),('zhi', 3),('shang', 4),('ke', 3),('bo', 2),('bai', 2),('e', 2), ('ji', 4),('zhou', 1),('ba', 1),('dai', 4),('zheng', 1),('zhan', 4),('ba', 4), ('wu', 2),('ren', 2),('shou', 1),('shi', 2),('li', 3),('ze', 2),('ne', 4), ('fang', 1),('jin', 1),('tai', 4),('ping', 2),('ri', 4),('wu', 2),('shi', 4), ('bing', 3),('ren', 4),('ru', 2),('shu', 4),('chong', 2),('qiu', 1),('ke', 1), ('an', 1),('neng', 2),('yi', 2),('ci', 3),('shang', 4),('lun', 4),('lie', 4), ('yuan', 4),('jie', 4),('bian', 4),('kou', 3),('ru', 2),('xuan', 2),('he', 2), ('shi', 2),('gu', 3),('zhi', 1),('ge', 1),('zhi', 3),('yu', 2),('ci', 3), ('wu', 1),('hu', 1),('wu', 2),('yi', 4),('qi', 2),('cuo', 1),('tuo', 2), ]), (7,[('yu', 2),('weng', 1),('ye', 4),('bang', 4),('xi', 1),('yan', 2),('su', 4), ('xiao', 3),('ji', 2),('qing', 1),('xiang', 1),('ran', 2),('chu', 3),('zhu', 2), ('yan',1), ('xiao',1), ('ri',4), ('chu',1), ('bu',2), ('jian',4), ('ren',2), ('ai',2), ('nai',3), ('yi',4), ('sheng',1), ('shan',1), ('shui',3), ('lv',4), ('hui', 2),('kan', 4),('tian', 1),('ji', 4),('xia', 4),('zhong', 1),('liu', 2), ('yan', 2),('shang', 4),('wu', 2),('xin', 1),('yun', 2),('xiang', 1),('zhu', 2) ]), (7,[('han', 4),('huang', 2),('zhong', 4),('se', 4),('si', 1),('qing', 1),('guo', 2), ('yu', 4),('yu', 3),('duo', 1),('nian', 2),('qiu', 2),('bu', 4),('de', 2), ('yang', 2),('jia', 1),('you', 2),('nv', 3),('chu', 1),('zhang', 3),('cheng', 2), ('yang', 3),('zai', 4),('shen', 1),('gui', 1),('ren', 2),('wei', 4),('shi', 2), ('tian', 1),('sheng', 1),('li', 4),('zhi', 4),('nan', 2),('zi', 4),('qi', 4), ('yi', 4),('zhao', 1),('xuan', 3),('zai', 4),('jun', 1),('wang', 2),('ce', 4), ('hui', 2),('mou', 2),('yi', 2),('xiao', 4),('bai', 3),('mei', 4),('sheng', 1), ('liu', 4),('gong', 1),('fen', 3),('dai', 4),('wu', 2),('yan', 2),('se', 4), ('chun', 1),('han', 2),('ci', 4),('yu', 4),('hua', 2),('qing', 1),('chi', 2), ('wen', 1),('quan', 2),('shui', 3),('hua', 2),('xi', 3),('ning', 2),('zhi', 1), ('shi', 4),('er', 2),('fu', 2),('qi', 3),('jiao', 1),('wu', 2),('li', 4), ('shi', 3),('shi', 4),('xin', 1),('cheng', 2),('en', 1),('ze', 2),('shi', 2), ('yun', 2),('bin', 4),('hua', 1),('yan', 2),('jin', 1),('bu', 4),('yao', 2), ('fu', 2),('rong', 2),('zhang', 4),('nuan', 3),('du', 4),('chun', 1),('xiao', 1), ('chun', 1),('xiao', 1),('ku', 2),('duan', 3),('ri', 4),('gao', 1),('qi', 3), ('cong', 2),('ci', 3),('jun', 1),('wang', 2),('bu', 4),('zao', 3),('chao', 2), ('cheng', 2),('huan', 1),('shi', 4),('yan', 4),('wu', 2),('xian', 2),('xia', 2), ('chun', 1),('cong', 2),('chun', 1),('you', 2),('ye', 4),('zhuan', 1),('ye', 4), ('hou', 4),('gong',
not is_not_loaded(self.frame_set_hash) @staticmethod def build_frame_set_hash(top_dir, language_id, lemma_hash={}): """ Read all the frame files from disk and return a hash of :class:`frame_set` instances """ frame_set_hash = {} sys.stderr.write("reading the frames files ....") def list_frames(basedir): frame_sets = [] for curpath, curdirs, curfiles in os.walk(basedir): for curfile in curfiles: if curfile.endswith(".xml"): frame_sets.append((curfile, os.path.join(curpath, curfile))) return frame_sets #---- lets process the framesets ----# for frame_set_file_name, frame_set_file_name_full in list_frames("%s/metadata/frames" % top_dir): prop_type = "v" fname_lemma = frame_set_file_name.replace(".xml", "") for x in ["n", # noun "j", # adj "v", # verb ]: if frame_set_file_name.endswith("-%s.xml" % x): prop_type = x fname_lemma = frame_set_file_name.replace("-%s.xml" % x, "") if language_id in ["en", "ar"]: #---- check if we want to add this lemma (whether there are any instances annotated in the frame bank ----# a_lemma = fname_lemma elif language_id == "ch": frame_set_file = codecs.open(frame_set_file_name_full, "r", "utf-8") try: frame_set_file_string = frame_set_file.read() except UnicodeDecodeError, e: continue try: a_lemma = re.findall("<id>\s+(.*?)\s+</id>", frame_set_file_string)[0] except Exception, e: continue else: on.common.log.error("please change this code to address the new langauge (given %s)" % language_id, False) break lemma_pos = "%s-%s" % (a_lemma, prop_type) if lemma_hash and not lemma_hash.has_key(a_lemma): on.common.log.debug("skipping %s ...." % (a_lemma), on.common.log.DEBUG, on.common.log.MAX_VERBOSITY) continue else: on.common.log.debug("adding %s ...." % (a_lemma), on.common.log.DEBUG, on.common.log.MAX_VERBOSITY) on.common.log.debug("processing %s ...." % (frame_set_file_name), on.common.log.DEBUG, on.common.log.MAX_VERBOSITY) sys.stderr.write(".") with codecs.open(frame_set_file_name_full, "r", "utf-8") as frame_set_file: try: frame_set_file_string = frame_set_file.read() a_frame_set = frame_set(frame_set_file_string, lang_id=language_id) on.common.log.debug(a_frame_set, on.common.log.MAX_VERBOSITY) if a_frame_set.lemma != a_lemma: a_frame_set.lemma = a_lemma frame_set_hash[lemma_pos] = a_frame_set except Exception, e: on.common.log.report("prop", "found some problem processing frame file", fname=frame_set_file_name) sys.stderr.write("\n") return frame_set_hash def is_valid_argument_number(self, lemma, pos, frameset, argument_number): if argument_number is None: return True # unnumbered arguments are always fine if not is_db_ref(self.frame_set_hash): return True # this was already checked when ran the data through the db a_cursor = self.frame_set_hash["DB"] try: a_cursor.execute("""SELECT argument_type FROM pb_sense_type_argument_type WHERE pb_sense_type = '%s.%s'""" % esc(lemma, frameset)) allowed_argument_types = [row['argument_type'] for row in a_cursor.fetchall()] given_argument_type = "ARG%s" % (argument_number) return given_argument_type in allowed_argument_types except MySQLdb.Error: on.common.log.report("proposition", "issue with lemma db argument number lookup", lemma=lemma, fsid=frameset) return True def is_valid_frameset(self, lemma, pos, frameset): if frameset is None: return False return self.is_valid_frameset_helper(self.frame_set_hash, lemma, pos, frameset) def is_valid_lemma(self, lemma, pos): return self.is_valid_frameset_helper(self.frame_set_hash, lemma, pos) @classmethod def is_valid_frameset_helper(cls, a_frame_set_hash, lemma, pos, frameset=None): if not lemma or (not frameset and frameset is not None): return False if is_db_ref(a_frame_set_hash): a_cursor = a_frame_set_hash["DB"] try: if frameset: a_cursor.execute("""SELECT id FROM pb_sense_type WHERE id = '%s.%s'""" % esc(lemma, frameset)) else: a_cursor.execute("""SELECT id FROM pb_sense_type WHERE id regexp '^%s.0'""" % esc(lemma)) return a_cursor.fetchall() except MySQLdb.Error: on.common.log.report("proposition", "issue with lemma db lookup", lemma=lemma, fsid=frameset) return False return frameset in cls.list_valid_frameset_helper(a_frame_set_hash, lemma, pos) @classmethod def list_valid_frameset_helper(cls, a_frame_set_hash, lemma, pos): if is_db_ref(a_frame_set_hash): raise Exception("Not supported -- use is_valid_frameset") lemma_pos = "%s-%s" % (lemma, pos) if lemma_pos not in a_frame_set_hash: return [] return [x.split(".")[1] for x in a_frame_set_hash[lemma_pos].argument_composition_hash] def list_valid_framesets(self, lemma, pos): """ given a lemma, return a list of the valid frame references. For example: get_valid_frame_references('keep') == [ '01', '02', '03', '04', '05', '06', '08']. """ return self.list_valid_frameset_helper(self.frame_set_hash, lemma, pos) def check_proposition(self, a_proposition, mode="normal", document_id=None, ignore_errors=False): """ if ignore_errors is set, ignore minor errors """ pripred=None if not a_proposition.get_primary_predicate(): a_subtree = None else: pripred=a_proposition.get_primary_predicate() a_subtree = a_proposition.get_primary_predicate().subtree if not document_id: document_id = a_proposition.document_id def reject(errcomms, full_reject=False): where = ["docid", document_id, "prop"] dropped_from = "prop" if mode == "normal" or full_reject: on.common.log.reject(where, dropped_from, errcomms, a_proposition.original_enc_prop) else: on.common.log.adjust(where, dropped_from, errcomms, a_proposition.original_enc_prop, a_proposition.path_enc_prop) def is_warning(errcode): # errors are in the form [prefix]+[suffix] where # prefix is two digits and suffix is three. If the # suffix starts with 5, then this is a real error, # otherwise it's just a warning. # # We use dropped_from and errcode to look up the # appropriate suffix in the ERRS table in # on/common/log.py suffix = on.common.log.ERRS[dropped_from][1][errcode][0] return suffix[0] != "5" dropme = any(not is_warning(errcode) for errcode, comments in errcomms) if dropme: a_proposition.valid = False return None if not a_subtree: reject([["nopripred", ["pripred: %s" % pripred]]], full_reject=True) return errcomms = [] def adderr(errcode, *comments): errcomms.append([errcode, comments]) if a_subtree.language == "en" and a_subtree.is_aux(prop=True): adderr("notinc", "auxilliary verb") def is_vp_directed_trace(a_leaf): if not a_leaf.identity_subtree: return False return a_leaf.identity_subtree.tag.startswith("VP") nounable = any(a_leaf.is_noun() for a_leaf in a_subtree) verbable = any(a_leaf.is_verb() for a_leaf in a_subtree) or any(is_vp_directed_trace(a_leaf) for a_leaf in a_subtree) if "-n " in a_proposition.enc_prop and not nounable: adderr("nnotn") elif "-v " in a_proposition.enc_prop and not verbable: adderr("vnotv") if not verbable: adderr("notinc", "coverage calculated only on verbs") if a_subtree.get_height() != 0: adderr("hnotzero", "height: %s" % a_subtree.get_height()) if a_proposition.args_overlap(ignore_traces=True): adderr("ovargnt", "debug output: %s" % a_proposition.args_overlap(ignore_traces=True)) elif a_proposition.args_overlap(): adderr("ovarg", "debug output: %s" % a_proposition.args_overlap()) if a_subtree.language in ["ch", "ar"]: leaf_lemma = a_subtree[0].get_lemma() if leaf_lemma is not None and leaf_lemma != a_proposition.lemma: adderr("badlemma", "leaf_lemma='%s'" % leaf_lemma, "prop_lemma='%s'" % a_proposition.lemma) for a_node in a_proposition.all_nodes(): if a_node.errcomms: errcomms += a_node.errcomms if not a_node.subtree: adderr("invrgp", "node id: %s" % a_node.node_id) if a_proposition.pb_sense_num == "XX": if (a_subtree.language == "ch" and a_subtree.is_leaf() and a_subtree.part_of_speech == "VV" and not a_proposition.argument_analogues): adderr("modalXX") else: adderr("invsenseXX") errcomms += a_proposition.errcomms assert a_proposition.valid # nothing should have invalidated the prop before we got it def is_serious(errcom): """ copy to new trees errors are not serious """ errcode, comments = errcom serious = errcode not in ["notarget", "notracetarget", "leafdiff", "nosubtree", "spanstrees", "modtok", "modinstrace", "deltrace", "prop_modinstrace", "prop_deltrace", "invrgp", "invsenseXX", "badargtype", "notinc", "vnotv"] return serious if errcomms: if ignore_errors and not any(is_serious(errcomm) for errcomm in errcomms): pass else: reject(errcomms) if a_proposition.valid and not ignore_errors: for a_node in a_proposition.all_nodes(): assert a_node.subtree def enrich_treebank(self, a_treebank, a_cursor=None, ignore_errors=False): abstract_bank.enrich_treebank(self, a_treebank) for a_proposition_document in self: sys.stderr.write(".") for a_proposition in a_proposition_document: a_tree = a_proposition_document.tree_document.tree_hash[a_proposition.tree_id] if not a_proposition.get_primary_predicate(): self.check_proposition(a_proposition, ignore_errors=ignore_errors) # will drop it continue old_enc_prop = a_proposition.enc_prop try: a_proposition.enrich_tree(a_tree) except Exception: print old_enc_prop raise self.check_proposition(a_proposition, ignore_errors=ignore_errors) sys.stderr.write("\n") return a_treebank sql_table_name = "proposition_bank" sql_exists_table = "proposition" sql_create_statement = \ """ create table proposition_bank ( id varchar(255) not null collate utf8_bin primary key, subcorpus_id varchar(255) not null, tag varchar (255) not null, foreign key (subcorpus_id) references subcorpus.id ) default character set utf8; """ sql_insert_statement = \ """ insert into proposition_bank ( id, subcorpus_id, tag ) values(%s, %s, %s) """ def write_to_db(self, a_cursor): abstract_bank.write_to_db(self, a_cursor) self.write_frame_set_hash_to_db(self.frame_set_hash, a_cursor) @classmethod def write_frame_set_hash_to_db(cls, a_frame_set_hash, a_cursor): if a_frame_set_hash and not is_not_loaded(a_frame_set_hash) and not is_db_ref(a_frame_set_hash): for a_frame_set in a_frame_set_hash.itervalues(): a_frame_set.write_to_db(a_cursor) @classmethod def from_db(cls, a_subcorpus, tag, a_cursor, affixes=None): #---- create an empty proposition bank ----# sys.stderr.write("reading the proposition bank ....") a_proposition_bank = proposition_bank(a_subcorpus, tag, a_cursor) #---- now get document ids for this treebank ----# a_cursor.execute("""select document.id from document where subcorpus_id = '%s';""" % (a_subcorpus.id)) document_rows = a_cursor.fetchall() #---- and process each document ----# for document_row in document_rows: a_proposition_document = proposition_document(document_row["id"], a_proposition_bank.extension) if not on.common.util.matches_an_affix(a_proposition_document.document_id, affixes): continue sys.stderr.write(".") #---- process each proposition in this document ----# a_cursor.execute("""select * from proposition where document_id = '%s';""" % (a_proposition_document.document_id)) for a_proposition_row in a_cursor.fetchall(): #---- create an empty proposition object ----# a_proposition = proposition("", a_subcorpus.id, a_proposition_document.document_id, a_proposition_bank) a_proposition.quality = a_proposition_row["quality"] #--- process each predicate in this proposition ----# a_cursor.execute("""select * from predicate where proposition_id = '%s' order by index_in_parent asc;""" % (a_proposition_row["id"])) for a_predicate_row in a_cursor.fetchall(): if not a_proposition.predicate: predicate_analogue("", a_predicate_row["type"], sentence_index=None, token_index=None, a_proposition=a_proposition) a_proposition.lemma = a_predicate_row["lemma"] a_proposition.pb_sense_num = a_predicate_row["pb_sense_num"] else: assert a_proposition.lemma == a_predicate_row["lemma"] assert a_proposition.pb_sense_num == a_predicate_row["pb_sense_num"] assert a_proposition.predicate.type == a_predicate_row["type"] a_predicate = predicate("", sentence_index=None, token_index=None, a_predicate_analogue=a_proposition.predicate) #---- get the predicate part information ----# a_cursor.execute("""select * from predicate_node where predicate_id = '%s' order by index_in_parent asc;""" % (a_predicate_row["id"])) for predicate_node_row in a_cursor.fetchall(): token_index, height = predicate_node_row["node_id"].split(":") a_predicate_node = predicate_node(a_predicate_row["sentence_index"], token_index, height, a_predicate, bool(predicate_node_row["primary_flag"])) assert predicate_node_row["index_in_parent"] == a_predicate_node.index_in_parent assert predicate_node_row["node_id"] == a_predicate_node.node_id assert bool(predicate_node_row["primary_flag"]) == a_predicate_node.primary maxes = {} for a_table, a_index in [["argument", "argument_analogue_index"], ["proposition_link", "link_analogue_index"]]: a_cursor.execute("""select max(%s) from %s where proposition_id = '%s';""" % (a_index, a_table, a_proposition.id)) max_rows = a_cursor.fetchall() assert len(max_rows) == 1 max_index = max_rows[0]["max(%s)" % a_index] maxes[a_index] = -1 if max_index is None else int(max_index) for a_argument_analogue_index in range(maxes["argument_analogue_index"] + 1): a_argument_analogue = argument_analogue("", a_proposition) assert a_argument_analogue_index == a_argument_analogue.index_in_parent a_cursor.execute("""select * from argument where proposition_id = '%s' and argument_analogue_index = '%s';""" % ( a_proposition.id, a_argument_analogue_index)) for argument_row in a_cursor.fetchall():
# %% import enum import functools import itertools import re import subprocess import uuid from pathlib import Path from typing import Callable, List, NewType, Tuple, Union import bokeh.plotting as bplotting import cmocean import geopandas import numpy as np import pandas as pd from bokeh.colors import RGB from bokeh.embed import autoload_static from bokeh.io import export_png from bokeh.layouts import column as layout_column from bokeh.layouts import gridplot from bokeh.layouts import row as layout_row from bokeh.models import ( BoxZoomTool, CDSView, ColorBar, ColumnDataSource, CustomJS, DateSlider, GroupFilter, HoverTool, LogColorMapper, PanTool, RadioButtonGroup, Range1d, ResetTool, Title, Toggle, ZoomInTool, ZoomOutTool, ) from bokeh.models.formatters import NumeralTickFormatter, PrintfTickFormatter from bokeh.models.tickers import FixedTicker from bokeh.resources import CDN from IPython.display import display # noqa F401 from shapely.geometry import mapping as shapely_mapping from typing_extensions import Literal from constants import USA_STATE_CODES, Columns, Counting, DiseaseStage, Paths, Select from plotting_utils import resize_to_even_dims GEO_DATA_DIR = Paths.DATA / "Geo" GEO_FIG_DIR: Path = Paths.FIGURES / "Geo" PNG_SAVE_ROOT_DIR: Path = GEO_FIG_DIR / "BokehInteractiveStatic" PNG_SAVE_ROOT_DIR.mkdir(parents=True, exist_ok=True) Polygon = NewType("Polygon", List[Tuple[float, float]]) MultiPolygon = NewType("MultiPolygon", List[Tuple[Polygon]]) DateString = NewType("DateString", str) BokehColor = NewType("BokehColor", str) InfoForAutoload = NewType("InfoForAutoload", Tuple[str, str]) LAT_COL = "Lat_" LONG_COL = "Long_" REGION_NAME_COL = "Region_Name_" class WorldCRS(enum.Enum): EQUIRECTANGULAR: "WorldCRS" = "EPSG:4087" ECKERT_IV: "WorldCRS" = "ESRI:54012" LOXIMUTHAL: "WorldCRS" = "ESRI:54023" # If you don't care about faithfully representing data and hate the truth in # general, you can use this as a case # WEB_MERCATOR: "WorldCRS" = "EPSG:3857" @staticmethod def default() -> "WorldCRS": """Get the default CRS (effectively a file-wide constant, except you can't define constants in enums b/c they'll be interpreted as cases) :return: The default case :rtype: WorldCRS """ return WorldCRS.EQUIRECTANGULAR def get_axis_info(self) -> dict: """Get axis parameters appropriate for this CRS CRSes project the world into different coordinate systems (like, some are long/lat, some are numbers in the hundreds of thousands). This function maps CRSes to appropriate axis kwargs for plotting a choropleth in the given CRS. The kwargs are used to construct Bokeh's Range1d, but there isn't a direct 1:1 correspondence between kwarg keys and Range1d's parameters (so you can use any keys you want, but you then have to map them back to Range1d parameters). :raises NotImplementedError: If `self` is anything other than EQUIRECTANGULAR :return: The arguments to be :rtype: dict """ if self is WorldCRS.EQUIRECTANGULAR: return { "x_range": (-2.125e7, 2.125e7), "y_range": (-7e6, 1e7), "min_visible_y_range": 1e6, "plot_aspect_ratio": 2, } raise NotImplementedError( "Just use WorldCRS.EQUIRECTANGULAR; it's the best EPSG" ) def get_longs_lats(geo_df: geopandas.GeoDataFrame) -> geopandas.GeoDataFrame: """Given a geopandas.GeoDataFrame, add two columns, long and lat, containing the coordinates of the geometry's (multi)polygons in a format appropriate for Bokeh :param geo_df: The GeoDataFrame for the region of interest (e.g., the world, the US) :type geo_df: geopandas.GeoDataFrame :return: The same GeoDataFrame with two additional columns, one with long and one with lat. These columns' elements are lists of (multi)polygon vertices. :rtype: geopandas.GeoDataFrame """ geo_df = geo_df.copy() # geopandas gives us geometry as (Multi)Polygons # bokeh expects two lists, lat and long, each of which is a 1-D list of floats with # "NaN" used to separate the discontiguous regions of a multi-polygon # No that's not a typo, it's really the string "NaN" # Contrary to the usual English pairing "latitude/longitude", we always have long # precede lat here, as long is the x and lat is the y (and in this sense the # usual English specification is backwards) longs = [] lats = [] for multipoly in geo_df.geometry: multipoly_vertex_longs = [] multipoly_vertex_lats = [] # Shapely Polygons are mapped as 1-tuples containing a list of (x,y) 2-tuples # representing the vertices # MultiPolygons are lists thereof # I don't know why they use 1-tuples instead of just tup[0], but they do shape_info: dict = shapely_mapping(multipoly) shape_type: str = shape_info["type"] # Another option would be Point, but our geo data doesn't have locations # like that assert shape_type in ["Polygon", "MultiPolygon"] polygons = shape_info["coordinates"] if shape_type == "Polygon": # Turn Polygon into 1-list of Polygons polygons = [polygons] polygons: MultiPolygon for poly_index, poly_tup in enumerate(polygons): # Extract the sole element of the 1-tuple poly: Polygon = poly_tup[0] polygon_vertex_longs, polygon_vertex_lats = zip(*poly) multipoly_vertex_longs.extend(polygon_vertex_longs) multipoly_vertex_lats.extend(polygon_vertex_lats) # Add the nan dividers (but not after the last polygon) if poly_index < len(polygons) - 1: multipoly_vertex_longs.append("NaN") multipoly_vertex_lats.append("NaN") longs.append(multipoly_vertex_longs) lats.append(multipoly_vertex_lats) geo_df[LONG_COL] = longs geo_df[LAT_COL] = lats return geo_df @functools.lru_cache(None) def get_usa_states_geo_df() -> geopandas.GeoDataFrame: """Get geometry and long/lat coords for each US state :return: GeoDataFrame containing, for each US state: 2-letter state code, geometry (boundary), and lists of long/lat coords in bokeh-compatible format :rtype: geopandas.GeoDataFrame """ geo_df: geopandas.GeoDataFrame = geopandas.read_file( GEO_DATA_DIR / "cb_2017_us_state_20m" / "cb_2017_us_state_20m.shp" ).to_crs( "EPSG:2163" # US National Atlas Equal Area (Google it) ).rename( columns={"STUSPS": REGION_NAME_COL}, errors="raise" ) return get_longs_lats(geo_df) @functools.lru_cache(None) def get_countries_geo_df() -> geopandas.GeoDataFrame: """Get geometry and long/lat coords for world countries The country names in the returned GeoDataFrame must match those in the COVID data source; if not, they must be remapped here. :return: GeoDataFrame containing, for each country: name, geometry (boundary), and lists of long/lat coords in bokeh-compatible format :rtype: geopandas.GeoDataFrame """ geo_df: geopandas.GeoDataFrame = geopandas.read_file( GEO_DATA_DIR / "ne_110m_admin_0_map_units" / "ne_110m_admin_0_map_units.shp" ).to_crs(WorldCRS.default().value) geo_df = geo_df.rename(columns={"ADMIN": REGION_NAME_COL}, errors="raise") # Keys are what's in the geo df, values are what we want to rename them to # Values must match the names in the original data source. If you don't like those # names, change them there and then come back and change the values here. geo_df[REGION_NAME_COL] = ( geo_df[REGION_NAME_COL] .map( { "Central African Republic": "Central African Rep.", "Democratic Republic of the Congo": "Dem. Rep. Congo", "Equatorial Guinea": "Eq. Guinea", "eSwatini": "Eswatini", "Georgia (Country)": "Georgia (country)", "South Sudan": "S. Sudan", "United Arab Emirates": "UAE", "United Kingdom": "Britain", "Western Sahara": "W. Sahara", "United States of America": "United States", } ) .fillna(geo_df[REGION_NAME_COL]) ) return get_longs_lats(geo_df) def __make_daybyday_interactive_timeline( df: pd.DataFrame, *, geo_df: geopandas.GeoDataFrame, value_col: str, transform_df_func: Callable[[pd.DataFrame], pd.DataFrame] = None, stage: Union[DiseaseStage, Literal[Select.ALL]] = Select.ALL, count: Union[Counting, Literal[Select.ALL]] = Select.ALL, out_file_basename: str, subplot_title_prefix: str, plot_aspect_ratio: float = None, cmap=None, n_cbar_buckets: int = None, n_buckets_btwn_major_ticks: int = None, n_minor_ticks_btwn_major_ticks: int = None, per_capita_denominator: int = None, x_range: Tuple[float, float], y_range: Tuple[float, float], min_visible_y_range: float, should_make_video: bool, ) -> InfoForAutoload: """Create the bokeh interactive timeline plot(s) This function takes the given DataFrame, which must contain COVID data for locations on different dates, and a GeoDataFrame, which contains the long/lat coords for those locations, and creates an interactive choropleth of the COVID data over time. :param df: The COVID data DataFrame :type df: pd.DataFrame :param geo_df: The geometry GeoDataFrame for the locations in `df` :type geo_df: geopandas.GeoDataFrame :param value_col: The column of `df` containing the values to plot in the choropleth; should be something like "Case_Counts" or "Case_Diff_From_Prev_Day" :type value_col: str :param stage: The DiseaseStage to plot, defaults to Select.ALL. If ALL, then all stages are plotted and are stacked vertically. :type stage: Union[DiseaseStage, Literal[Select.ALL]], optional :param count: The Counting to plot, defaults to Select.ALL. If ALL, then all count types are plotted and are stacked horizontally. :type count: Union[Counting, Literal[Select.ALL]], optional :param out_file_basename: The basename of the file to save the interactive plots to (there are two components, the JS script and the HTML <div>) :type out_file_basename: str :param subplot_title_prefix: What the first part of the subplot title should be; probably a function of `value_col` (if value_col is "Case_Counts" then this param might be "Cases" or "# of Cases") :type subplot_title_prefix: str :param x_range: The range of the x-axis as (min, max) :type x_range: Tuple[float, float] :param y_range: The range of the y-axis as (min, max) :type y_range: Tuple[float, float] :param min_visible_y_range: The minimum height (in axis units) of the y-axis; it will not be possible to zoom in farther than this on the choropleth. :type min_visible_y_range: float :param should_make_video: Optionally run through the timeline day by day, capture a screenshot for each day, and then stitch the screenshots into a video. The video shows the same info as the interactive plots, but not interactively. This easily takes 20x as long as just making the graphs themselves, so use with caution. :type should_make_video: bool :param transform_df_func: This function expects data in a certain format, and does a
import re from flask import Blueprint, request, jsonify from .models import User, Note, Video from . import db from firebase import Firebase from flask_cors import cross_origin from werkzeug.utils import secure_filename from pytube import YouTube import os import shutil import math import datetime import ssl from google.cloud import storage import requests from google.cloud import speech import time import cv2 import ffmpeg import subprocess import openai # Load your API key from an environment variable or secret management service openai.api_key = "<KEY>" views = Blueprint('views', __name__) os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = r'deeped-keys.json' storage_client = storage.Client() BUCKET_NAME = 'deeped-videostorage' def upload_blob(bucket_name, source_file_name, destination_blob_name): """Uploads a file to the bucket.""" # The ID of your GCS bucket # bucket_name = "your-bucket-name" # The path to your file to upload # source_file_name = "local/path/to/file" # The ID of your GCS object # destination_blob_name = "storage-object-name" storage_client = storage.Client() bucket = storage_client.bucket(bucket_name) blob = bucket.blob(destination_blob_name) blob.upload_from_filename(source_file_name) print( "File {} uploaded to {}.".format( source_file_name, destination_blob_name ) ) def download_blob(bucket_name, source_blob_name, destination_file_name): """Downloads a blob from the bucket.""" # bucket_name = "your-bucket-name" # source_blob_name = "storage-object-name" # destination_file_name = "local/path/to/file" storage_client = storage.Client() bucket = storage_client.bucket(bucket_name) # Construct a client side representation of a blob. # Note `Bucket.blob` differs from `Bucket.get_blob` as it doesn't retrieve # any content from Google Cloud Storage. As we don't need additional data, # using `Bucket.blob` is preferred here. blob = bucket.blob(source_blob_name) blob.download_to_filename(destination_file_name) print( "Blob {} downloaded to {}.".format( source_blob_name, destination_file_name ) ) config = { "apiKey": "<KEY>", "authDomain": "fir-auth-tutorial-a425c.firebaseapp.com", "databaseURL": "https://fir-auth-tutorial-a425c-default-rtdb.firebaseio.com/", "storageBucket": "" } firebase = Firebase(config) auth = firebase.auth() from google.cloud import storage import datetime def combine_audio(vidname, audname, outpath, fps=60): subprocess.run("ffmpeg -i '{}' -i '{}' -c:v copy -c:a aac -map 0:v:0 -map 1:a:0 '{}' -shortest".format(vidname, audname, outpath), shell=True) def translate_text(target, text): print("translating...") """Translates text into the target language. Target must be an ISO 639-1 language code. See https://g.co/cloud/translate/v2/translate-reference#supported_languages """ import six from google.cloud import translate_v2 as translate translate_client = translate.Client() if isinstance(text, six.binary_type): text = text.decode("utf-8") # Text can also be a sequence of strings, in which case this method # will return a sequence of results for each text. result = translate_client.translate(text, target_language=target) print(u"Text: {}".format(result["input"])) print(u"Translation: {}".format(result["translatedText"])) translatedText1 = result["translatedText"] print(u"Detected source language: {}".format(result["detectedSourceLanguage"])) return translatedText1 MAX = 1000 languageDict = { "Afrikaans": ["af-ZA", "af-ZA-Standard-A", "FEMALE"], "Arabic": ["ar-XA", " ar-XA-Wavenet-C", "MALE"] , "Bengali": ["bn-IN", "bn-IN-Wavenet-B", "MALE"] , "Bulgarian": ["bg-BG", "bg-bg-Standard-A", "FEMALE"], "Catalan": ["ca-ES","ca-es-Standard-A", "FEMALE"] , "Chinese": ["yue-HK","yue-HK-Standard-D", "MALE"], "Czech": ["cs-CZ", "cs-CZ-Wavenet-A", "FEMALE"], "Danish": ["da-DK", "da-DK-Wavenet-C", "MALE"], "Dutch": ["ni-NL", "nl-NL-Wavenet-C", "MALE"], "English": ["en-US", "en-US-Wavenet-J", "MALE"], "Finnish": ["fi-FI", "fi-FI-Wavenet-A", "FEMALE"], "Filipino": ["fil-PH", "fil-PH-Wavenet-D", "MALE"], "French": ["fr-FR", "fr-FR-Wavenet-D", "MALE"], "German" : ["de-DE", "de-DE-Wavenet-E", "MALE"], "Greek": ["el-GR", "el-GR-Wavenet-A", "FEMALE"], "Gujarati": ["gu-IN", "gu-IN-Wavenet-B", "MALE"], "Hindi": ["hi-IN", "hi-IN-Wavenet-C", "MALE"], "Hungarian": ["hu-HU", "hu-HU-Wavenet-A", "FEMALE"], "Icelandic": ["is-IS", "is-is-Standard-A", "FEMALE"], "Indonesian": ["id-ID", "id-ID-Wavenet-C", "MALE"], "Italian": ["it-IT", "it-IT-Wavenet-D", "MALE"], "Japanese": ["ja-JP", "ja-JP-Wavenet-D", "MALE"], "Kannada": ["kn-IN", " kn-IN-Wavenet-B", "MALE"], "Korean": ["ko-KR", "ko-KR-Wavenet-D", "MALE"], "Latvian": ["lv-LV", "lv-lv-Standard-A", "MALE"], "Malayalam": ["ml-IN", "ml-IN-Wavenet-B", "MALE"], "Mandarin": ["cmn-TW", "cmn-TW-Wavenet-C", "MALE"], "Polish": ["pl-PL", "pl-PL-Wavenet-C", "MALE"], "Portruguese": ["pt-PT", "pt-PT-Wavenet-C", "MALE"], "Romanian": ["ro-RO", "ro-RO-Wavenet-A", "FEMALE"], "Russian": ["ru-RU", "ru-RU-Wavenet-D", "MALE"], "Slovak": ["sk-SK", "sk-SK-Wavenet-A", "FEMALE"], "Serbian": ["sr-RS", "sr-rs-Standard-A", "FEMALE"], "Spanish": ["es-US", "es-US-Wavenet-C", "MALE"], "Swedish": ["sv-SE", "sv-SE-Wavenet-A", "FEMALE"], "Tamil": ["ta-IN", "ta-IN-Wavenet-B", "MALE"], "Thai": ["th-TH", "th-TH-Standard-A", "FEMALE"], "Telegu": ["te-IN", "te-IN-Standard-B", "MALE"], "Turkish": ["tr-TR", "tr-TR-Wavenet-E", "MALE"], "Ukrainian": ["uk-UA", "uk-UA-Wavenet-A", "MALE"], "Vietnam": ["vi-VN", "vi-VN-Wavenet-D", "MALE"], } def replaceSpaces(string): # Remove remove leading and trailing spaces string = string.strip() i = len(string) # count spaces and find current length space_count = string.count(' ') # Find new length. new_length = i + space_count * 2 # New length must be smaller than length # of string provided. if new_length > MAX: return -1 # Start filling character from end index = new_length - 1 string = list(string) # Fill string array for f in range(i - 2, new_length - 2): string.append('0') # Fill rest of the string from end for j in range(i - 1, 0, -1): # inserts %20 in place of space if string[j] == ' ': string[index] = '0' string[index - 1] = '2' string[index - 2] = '%' index = index - 3 else: string[index] = string[j] index -= 1 return ''.join(string) def generate_url(video_title): video_title = replaceSpaces(video_title) return "https://storage.cloud.google.com/sharp-vidstorage/{}".format(video_title) user = None @views.route("/home", methods=['POST']) @cross_origin(supports_credentials=True) def home(): global user_info token = request.get_json() user_token = token["token"]["i"] user_t = user_token user_info = auth.get_account_info(user_t) user_info = user_info['users'][0] user = User.query.filter_by(email=user_info["email"]).first() if user: # session["user_id"] = user.id print("User already exists!") else: if "photoURL" not in user_info.keys(): new_user = User(id=user_info["localId"], email=user_info["email"], name=user_info["displayName"], photoURL="") else: new_user = User(id=user_info["localId"], email=user_info["email"], name=user_info["displayName"], photoURL=user_info["photoUrl"]) db.session.add(new_user) db.session.commit() print("User Added!") user = User.query.filter_by(email=user_info["email"]).first() likes_per_post = [i.likes for i in user.notes] posts = [(i.text, i.date, x, i.category) for i in user.notes for x in likes_per_post] return jsonify({"data": posts}) @views.route("/add-post", methods=['POST']) @cross_origin(supports_credentials=True) def add_post(): global user_info post_content = request.get_json() post = Note(text=post_content["text"], category=post_content["noteCategory"], video=post_content["video"]) user = User.query.filter_by(email=post_content["email"]).first() user.notes.append(post) db.session.commit() allPostTexts = [i.text for i in user.notes] allPostDates = [i.date for i in user.notes] likes_per_post = [i.likes for i in user.notes] categories_per_post = [i.category for i in user.notes] video_names = [i.video for i in user.notes] videos = [i for i in user.videos] final_videos = [(i.title, i.original_url, i.translated_url, i.transcript, i.date, i.summary) for i in videos] return jsonify({"data" : (allPostTexts, allPostDates, likes_per_post, categories_per_post, video_names, final_videos)}) @views.route("/get-posts", methods=['GET', 'POST']) @cross_origin(supports_credentials=True) def get_posts(): global user_info print(user_info["email"]) current_user = User.query.filter_by(email=user_info["email"]).first() print(current_user) if current_user: print("Hello") user = current_user allPostTexts = [i.text for i in user.notes] allPostDates = [i.date for i in user.notes] likes_per_post = [i.likes for i in user.notes] categories_per_post = [i.category for i in user.notes] video_names = [i.video for i in user.notes] videos = [i for i in user.videos] final_videos = [(i.title, i.original_url, i.translated_url, i.transcript, i.date, i.summary) for i in videos] return jsonify({"data" : (allPostTexts, allPostDates, likes_per_post, categories_per_post, video_names, final_videos)}) else: return jsonify({"data" : "error"}) @views.route("/like", methods=['GET', 'POST']) @cross_origin(supports_credentials=True) def like(): global user_info print(user_info["email"]) current_user = User.query.filter_by(email=user_info["email"]).first() print(current_user) post_data = request.get_json() if current_user: curr_post = Note.query.filter_by(user_id=user_info["localId"], text=post_data["text"]).first() curr_post.likes = curr_post.likes+1 db.session.commit() return jsonify({"data" : curr_post.likes}) else: return jsonify({"data" : "error"}) @views.route("/translate", methods=['GET','POST']) @cross_origin(supports_credentials=True) def translate(): # get curr Video object data = request.form video_name = data["title"] video_url = data["url"] translate_lang = data["language"] #figure out how to get video object from front end form n send to backend, hardcoded for testing purposes rn if video_url: payload = {"url" : video_url, "title" : video_name} cloud_function = "https://us-west2-deepeducation-316206.cloudfunctions.net/DeepEdPyTube" r = requests.post(cloud_function, data=payload) print ("Status Code ", r.status_code) funcStatus = "Success" if r.ok else "Failed" print (funcStatus) client = speech.SpeechClient() audio = speech.RecognitionAudio(uri="gs://deeped-videostorage/{}.wav".format(video_name)) config = speech.RecognitionConfig( language_code="en-US", # Automatically transcribe punctuation enable_automatic_punctuation=False, model="video") res = client.long_running_recognize(config=config, audio=audio).result() print("transcript: ") text = "" for result in res.results: print(result.alternatives[0].transcript) text += result.alternatives[0].transcript print(text) response = openai.Completion.create( engine="davinci", prompt= (text + "tl;dr:"), temperature=0.3, max_tokens=300, top_p=1, frequency_penalty=0, presence_penalty=0 ) summary=response["choices"][0]["text"] translatedText2 = translate_text(languageDict[translate_lang][0][0:2], text) summary = translate_text(languageDict[translate_lang][0][0:2], summary) from google.cloud import texttospeech # Instantiates a client client = texttospeech.TextToSpeechClient() # Set the text input to be synthesized synthesis_input = texttospeech.SynthesisInput(text=translatedText2) # Build the voice request, select the language code ("en-US") and the ssml # voice gender ("neutral") voice2 = texttospeech.VoiceSelectionParams( language_code=languageDict[translate_lang][0], name = languageDict[translate_lang][1], ssml_gender=languageDict[translate_lang][2] ) # Select the type of audio file you want returned audio_config = texttospeech.AudioConfig( audio_encoding=texttospeech.AudioEncoding.MP3 ) # Perform the text-to-speech request on the text input with the selected # voice parameters and audio file type response = client.synthesize_speech( input=synthesis_input, voice=voice2, audio_config=audio_config ) source_blob_name = video_name + ".mp4" source_audname = video_name + ".mp3" destination_file_name = "/Users/t/Sharp/{}.mp4".format(video_name) download_blob(BUCKET_NAME, source_blob_name, destination_file_name) with open(source_audname, "wb") as out: # Write the response to the output file. out.write(response.audio_content) print(f'Audio content written to file {source_audname}') destination_blob_name = video_name + "_translated.mp4" combine_audio(source_blob_name, source_audname, destination_blob_name, fps=60) upload_blob(BUCKET_NAME, destination_blob_name, destination_blob_name) translated_url = generate_url(destination_blob_name) return {"translated_url": translated_url, "transcript" : translatedText2, "summary" : summary} return 'video url seems to be missing!?' @views.route("/add-video", methods=['GET','POST']) @cross_origin(supports_credentials=True) def add_video(): global user_info print(user_info["email"]) video_info = request.get_json() user = User.query.filter_by(email=video_info["email"]).first() video = Video(original_url=video_info["url"], translated_url="", title=video_info["title"], transcript="", summary="") r = requests.post("http://localhost:5000/translate", data={"url" : video_info["url"], "title" : video.title, "language" : video_info["language"]}) print ("Status Code ", r.status_code) res = r.json() print(res) video.transcript = res["transcript"] video.translated_url = res["translated_url"] video.summary = res["summary"] user.videos.append(video) db.session.commit() allPostTexts = [i.text for i in user.notes] allPostDates = [i.date
Example') plt.plot(time_reflect, time_series_reflect, 'g--', LineWidth=2, label=textwrap.fill('Symmetric signal', 10)) plt.plot(time_reflect[:51], time_series_anti_reflect[:51], '--', c='purple', LineWidth=2, label=textwrap.fill('Anti-symmetric signal', 10)) plt.plot(max_dash_time, max_dash, 'k-') plt.plot(min_dash_time, min_dash, 'k-') plt.plot(dash_1_time, dash_1, 'k--') plt.plot(dash_2_time, dash_2, 'k--') plt.plot(length_distance_time, length_distance, 'k--') plt.plot(length_distance_time_2, length_distance_2, 'k--') plt.plot(length_time, length_top, 'k-') plt.plot(length_time, length_bottom, 'k-') plt.plot(length_time_2, length_top_2, 'k-') plt.plot(length_time_2, length_bottom_2, 'k-') plt.plot(end_time, end_signal, 'k-') plt.plot(symmetry_axis_1_time, symmetry_axis, 'r--', zorder=1) plt.plot(anti_symmetric_time, anti_symmetric_signal, 'r--', zorder=1) plt.plot(symmetry_axis_2_time, symmetry_axis, 'r--', label=textwrap.fill('Axes of symmetry', 10), zorder=1) plt.text(5.1 * np.pi, -0.7, r'$\beta$L') plt.text(5.34 * np.pi, -0.05, 'L') plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.scatter(max_discard_time, max_discard, c='purple', zorder=4, label=textwrap.fill('Symmetric Discard maxima', 10)) plt.scatter(end_point_time, end_point, c='orange', zorder=4, label=textwrap.fill('Symmetric Anchor maxima', 10)) plt.scatter(anti_max_point_time, anti_max_point, c='green', zorder=4, label=textwrap.fill('Anti-Symmetric maxima', 10)) plt.scatter(no_anchor_max_time, no_anchor_max, c='gray', zorder=4, label=textwrap.fill('Symmetric maxima', 10)) plt.xlim(3.9 * np.pi, 5.5 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/edge_effects_symmetry_anti.png') plt.show() # plot 4 a = 0.21 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] max_dash_1 = np.linspace(maxima_y[-1] - width, maxima_y[-1] + width, 101) max_dash_2 = np.linspace(maxima_y[-2] - width, maxima_y[-2] + width, 101) max_dash_time_1 = maxima_x[-1] * np.ones_like(max_dash_1) max_dash_time_2 = maxima_x[-2] * np.ones_like(max_dash_1) min_dash_1 = np.linspace(minima_y[-1] - width, minima_y[-1] + width, 101) min_dash_2 = np.linspace(minima_y[-2] - width, minima_y[-2] + width, 101) min_dash_time_1 = minima_x[-1] * np.ones_like(min_dash_1) min_dash_time_2 = minima_x[-2] * np.ones_like(min_dash_1) dash_1_time = np.linspace(maxima_x[-1], minima_x[-1], 101) dash_1 = np.linspace(maxima_y[-1], minima_y[-1], 101) dash_2_time = np.linspace(maxima_x[-1], minima_x[-2], 101) dash_2 = np.linspace(maxima_y[-1], minima_y[-2], 101) s1 = (minima_y[-2] - maxima_y[-1]) / (minima_x[-2] - maxima_x[-1]) slope_based_maximum_time = maxima_x[-1] + (maxima_x[-1] - maxima_x[-2]) slope_based_maximum = minima_y[-1] + (slope_based_maximum_time - minima_x[-1]) * s1 max_dash_time_3 = slope_based_maximum_time * np.ones_like(max_dash_1) max_dash_3 = np.linspace(slope_based_maximum - width, slope_based_maximum + width, 101) dash_3_time = np.linspace(minima_x[-1], slope_based_maximum_time, 101) dash_3 = np.linspace(minima_y[-1], slope_based_maximum, 101) s2 = (minima_y[-1] - maxima_y[-1]) / (minima_x[-1] - maxima_x[-1]) slope_based_minimum_time = minima_x[-1] + (minima_x[-1] - minima_x[-2]) slope_based_minimum = slope_based_maximum - (slope_based_maximum_time - slope_based_minimum_time) * s2 min_dash_time_3 = slope_based_minimum_time * np.ones_like(min_dash_1) min_dash_3 = np.linspace(slope_based_minimum - width, slope_based_minimum + width, 101) dash_4_time = np.linspace(slope_based_maximum_time, slope_based_minimum_time) dash_4 = np.linspace(slope_based_maximum, slope_based_minimum) maxima_dash = np.linspace(2.5 - width, 2.5 + width, 101) maxima_dash_time_1 = maxima_x[-2] * np.ones_like(maxima_dash) maxima_dash_time_2 = maxima_x[-1] * np.ones_like(maxima_dash) maxima_dash_time_3 = slope_based_maximum_time * np.ones_like(maxima_dash) maxima_line_dash_time = np.linspace(maxima_x[-2], slope_based_maximum_time, 101) maxima_line_dash = 2.5 * np.ones_like(maxima_line_dash_time) minima_dash = np.linspace(-3.4 - width, -3.4 + width, 101) minima_dash_time_1 = minima_x[-2] * np.ones_like(minima_dash) minima_dash_time_2 = minima_x[-1] * np.ones_like(minima_dash) minima_dash_time_3 = slope_based_minimum_time * np.ones_like(minima_dash) minima_line_dash_time = np.linspace(minima_x[-2], slope_based_minimum_time, 101) minima_line_dash = -3.4 * np.ones_like(minima_line_dash_time) # slightly edit signal to make difference between slope-based method and improved slope-based method more clear time_series[time >= minima_x[-1]] = 1.5 * (time_series[time >= minima_x[-1]] - time_series[time == minima_x[-1]]) + \ time_series[time == minima_x[-1]] improved_slope_based_maximum_time = time[-1] improved_slope_based_maximum = time_series[-1] improved_slope_based_minimum_time = slope_based_minimum_time improved_slope_based_minimum = improved_slope_based_maximum + s2 * (improved_slope_based_minimum_time - improved_slope_based_maximum_time) min_dash_4 = np.linspace(improved_slope_based_minimum - width, improved_slope_based_minimum + width, 101) min_dash_time_4 = improved_slope_based_minimum_time * np.ones_like(min_dash_4) dash_final_time = np.linspace(improved_slope_based_maximum_time, improved_slope_based_minimum_time, 101) dash_final = np.linspace(improved_slope_based_maximum, improved_slope_based_minimum, 101) ax = plt.subplot(111) figure_size = plt.gcf().get_size_inches() factor = 0.9 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) plt.plot(time, time_series, LineWidth=2, label='Signal') plt.title('Slope-Based Edge Effects Example') plt.plot(max_dash_time_1, max_dash_1, 'k-') plt.plot(max_dash_time_2, max_dash_2, 'k-') plt.plot(max_dash_time_3, max_dash_3, 'k-') plt.plot(min_dash_time_1, min_dash_1, 'k-') plt.plot(min_dash_time_2, min_dash_2, 'k-') plt.plot(min_dash_time_3, min_dash_3, 'k-') plt.plot(min_dash_time_4, min_dash_4, 'k-') plt.plot(maxima_dash_time_1, maxima_dash, 'k-') plt.plot(maxima_dash_time_2, maxima_dash, 'k-') plt.plot(maxima_dash_time_3, maxima_dash, 'k-') plt.plot(minima_dash_time_1, minima_dash, 'k-') plt.plot(minima_dash_time_2, minima_dash, 'k-') plt.plot(minima_dash_time_3, minima_dash, 'k-') plt.text(4.34 * np.pi, -3.2, r'$\Delta{t^{min}_{m}}$') plt.text(4.74 * np.pi, -3.2, r'$\Delta{t^{min}_{m}}$') plt.text(4.12 * np.pi, 2, r'$\Delta{t^{max}_{M}}$') plt.text(4.50 * np.pi, 2, r'$\Delta{t^{max}_{M}}$') plt.text(4.30 * np.pi, 0.35, r'$s_1$') plt.text(4.43 * np.pi, -0.20, r'$s_2$') plt.text(4.30 * np.pi + (minima_x[-1] - minima_x[-2]), 0.35 + (minima_y[-1] - minima_y[-2]), r'$s_1$') plt.text(4.43 * np.pi + (slope_based_minimum_time - minima_x[-1]), -0.20 + (slope_based_minimum - minima_y[-1]), r'$s_2$') plt.text(4.50 * np.pi + (slope_based_minimum_time - minima_x[-1]), 1.20 + (slope_based_minimum - minima_y[-1]), r'$s_2$') plt.plot(minima_line_dash_time, minima_line_dash, 'k--') plt.plot(maxima_line_dash_time, maxima_line_dash, 'k--') plt.plot(dash_1_time, dash_1, 'k--') plt.plot(dash_2_time, dash_2, 'k--') plt.plot(dash_3_time, dash_3, 'k--') plt.plot(dash_4_time, dash_4, 'k--') plt.plot(dash_final_time, dash_final, 'k--') plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.scatter(slope_based_maximum_time, slope_based_maximum, c='orange', zorder=4, label=textwrap.fill('Slope-based maximum', 11)) plt.scatter(slope_based_minimum_time, slope_based_minimum, c='purple', zorder=4, label=textwrap.fill('Slope-based minimum', 11)) plt.scatter(improved_slope_based_maximum_time, improved_slope_based_maximum, c='deeppink', zorder=4, label=textwrap.fill('Improved slope-based maximum', 11)) plt.scatter(improved_slope_based_minimum_time, improved_slope_based_minimum, c='dodgerblue', zorder=4, label=textwrap.fill('Improved slope-based minimum', 11)) plt.xlim(3.9 * np.pi, 5.5 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-3, -2, -1, 0, 1, 2), ('-3', '-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/edge_effects_slope_based.png') plt.show() # plot 5 a = 0.25 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] A2 = np.abs(maxima_y[-2] - minima_y[-2]) / 2 A1 = np.abs(maxima_y[-1] - minima_y[-1]) / 2 P2 = 2 * np.abs(maxima_x[-2] - minima_x[-2]) P1 = 2 * np.abs(maxima_x[-1] - minima_x[-1]) Huang_time = (P1 / P2) * (time[time >= maxima_x[-2]] - time[time == maxima_x[-2]]) + maxima_x[-1] Huang_wave = (A1 / A2) * (time_series[time >= maxima_x[-2]] - time_series[time == maxima_x[-2]]) + maxima_y[-1] Coughlin_time = Huang_time Coughlin_wave = A1 * np.cos(2 * np.pi * (1 / P1) * (Coughlin_time - Coughlin_time[0])) Average_max_time = maxima_x[-1] + (maxima_x[-1] - maxima_x[-2]) Average_max = (maxima_y[-2] + maxima_y[-1]) / 2 Average_min_time = minima_x[-1] + (minima_x[-1] - minima_x[-2]) Average_min = (minima_y[-2] + minima_y[-1]) / 2 utils_Huang = emd_utils.Utility(time=time, time_series=Huang_wave) Huang_max_bool = utils_Huang.max_bool_func_1st_order_fd() Huang_min_bool = utils_Huang.min_bool_func_1st_order_fd() utils_Coughlin = emd_utils.Utility(time=time, time_series=Coughlin_wave) Coughlin_max_bool = utils_Coughlin.max_bool_func_1st_order_fd() Coughlin_min_bool = utils_Coughlin.min_bool_func_1st_order_fd() Huang_max_time = Huang_time[Huang_max_bool] Huang_max = Huang_wave[Huang_max_bool] Huang_min_time = Huang_time[Huang_min_bool] Huang_min = Huang_wave[Huang_min_bool] Coughlin_max_time = Coughlin_time[Coughlin_max_bool] Coughlin_max = Coughlin_wave[Coughlin_max_bool] Coughlin_min_time = Coughlin_time[Coughlin_min_bool] Coughlin_min = Coughlin_wave[Coughlin_min_bool] max_2_x_time = np.linspace(maxima_x[-2] - width, maxima_x[-2] + width, 101) max_2_x_time_side = np.linspace(5.3 * np.pi - width, 5.3 * np.pi + width, 101) max_2_x = maxima_y[-2] * np.ones_like(max_2_x_time) min_2_x_time = np.linspace(minima_x[-2] - width, minima_x[-2] + width, 101) min_2_x_time_side = np.linspace(5.3 * np.pi - width, 5.3 * np.pi + width, 101) min_2_x = minima_y[-2] * np.ones_like(min_2_x_time) dash_max_min_2_x = np.linspace(minima_y[-2], maxima_y[-2], 101) dash_max_min_2_x_time = 5.3 * np.pi * np.ones_like(dash_max_min_2_x) max_2_y = np.linspace(maxima_y[-2] - width, maxima_y[-2] + width, 101) max_2_y_side = np.linspace(-1.8 - width, -1.8 + width, 101) max_2_y_time = maxima_x[-2] * np.ones_like(max_2_y) min_2_y = np.linspace(minima_y[-2] - width, minima_y[-2] + width, 101) min_2_y_side = np.linspace(-1.8 - width, -1.8 + width, 101) min_2_y_time = minima_x[-2] * np.ones_like(min_2_y) dash_max_min_2_y_time = np.linspace(minima_x[-2], maxima_x[-2], 101) dash_max_min_2_y = -1.8 * np.ones_like(dash_max_min_2_y_time) max_1_x_time = np.linspace(maxima_x[-1] - width, maxima_x[-1] + width, 101) max_1_x_time_side = np.linspace(5.4 * np.pi - width, 5.4 * np.pi + width, 101) max_1_x = maxima_y[-1] * np.ones_like(max_1_x_time) min_1_x_time = np.linspace(minima_x[-1] - width, minima_x[-1] + width, 101) min_1_x_time_side = np.linspace(5.4 * np.pi - width, 5.4 * np.pi + width, 101) min_1_x = minima_y[-1] * np.ones_like(min_1_x_time) dash_max_min_1_x = np.linspace(minima_y[-1], maxima_y[-1], 101) dash_max_min_1_x_time = 5.4 * np.pi * np.ones_like(dash_max_min_1_x) max_1_y = np.linspace(maxima_y[-1] - width, maxima_y[-1] + width, 101) max_1_y_side = np.linspace(-2.1 - width, -2.1 + width, 101) max_1_y_time = maxima_x[-1] * np.ones_like(max_1_y) min_1_y = np.linspace(minima_y[-1] - width, minima_y[-1] + width, 101) min_1_y_side = np.linspace(-2.1 - width, -2.1 + width, 101) min_1_y_time = minima_x[-1] * np.ones_like(min_1_y) dash_max_min_1_y_time = np.linspace(minima_x[-1], maxima_x[-1], 101) dash_max_min_1_y = -2.1 * np.ones_like(dash_max_min_1_y_time) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('Characteristic Wave Effects Example') plt.plot(time, time_series, LineWidth=2, label='Signal') plt.scatter(Huang_max_time, Huang_max, c='magenta', zorder=4, label=textwrap.fill('Huang maximum', 10)) plt.scatter(Huang_min_time, Huang_min, c='lime', zorder=4, label=textwrap.fill('Huang minimum', 10)) plt.scatter(Coughlin_max_time, Coughlin_max, c='darkorange', zorder=4, label=textwrap.fill('Coughlin maximum', 14)) plt.scatter(Coughlin_min_time, Coughlin_min, c='dodgerblue', zorder=4, label=textwrap.fill('Coughlin minimum', 14)) plt.scatter(Average_max_time, Average_max, c='orangered', zorder=4, label=textwrap.fill('Average maximum', 14)) plt.scatter(Average_min_time, Average_min, c='cyan', zorder=4, label=textwrap.fill('Average minimum', 14)) plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.plot(Huang_time, Huang_wave, '--', c='darkviolet', label=textwrap.fill('Huang Characteristic Wave', 14)) plt.plot(Coughlin_time, Coughlin_wave, '--', c='darkgreen', label=textwrap.fill('Coughlin Characteristic Wave', 14)) plt.plot(max_2_x_time, max_2_x, 'k-') plt.plot(max_2_x_time_side, max_2_x, 'k-') plt.plot(min_2_x_time, min_2_x, 'k-') plt.plot(min_2_x_time_side, min_2_x, 'k-') plt.plot(dash_max_min_2_x_time, dash_max_min_2_x, 'k--') plt.text(5.16 * np.pi, 0.85, r'$2a_2$') plt.plot(max_2_y_time, max_2_y, 'k-') plt.plot(max_2_y_time, max_2_y_side, 'k-') plt.plot(min_2_y_time, min_2_y, 'k-') plt.plot(min_2_y_time, min_2_y_side, 'k-') plt.plot(dash_max_min_2_y_time, dash_max_min_2_y, 'k--') plt.text(4.08 * np.pi, -2.2, r'$\frac{p_2}{2}$') plt.plot(max_1_x_time, max_1_x, 'k-') plt.plot(max_1_x_time_side, max_1_x, 'k-') plt.plot(min_1_x_time, min_1_x, 'k-') plt.plot(min_1_x_time_side, min_1_x, 'k-') plt.plot(dash_max_min_1_x_time, dash_max_min_1_x, 'k--') plt.text(5.42 * np.pi, -0.1, r'$2a_1$') plt.plot(max_1_y_time, max_1_y, 'k-') plt.plot(max_1_y_time, max_1_y_side, 'k-') plt.plot(min_1_y_time, min_1_y, 'k-') plt.plot(min_1_y_time, min_1_y_side, 'k-') plt.plot(dash_max_min_1_y_time, dash_max_min_1_y, 'k--') plt.text(4.48 * np.pi, -2.5, r'$\frac{p_1}{2}$') plt.xlim(3.9 * np.pi, 5.6 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.84, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/edge_effects_characteristic_wave.png') plt.show() # plot 6 t = np.linspace(5, 95, 100) signal_orig = np.cos(2 * np.pi * t / 50) + 0.6 * np.cos(2 * np.pi * t / 25) + 0.5 * np.sin(2 * np.pi * t / 200) util_nn = emd_utils.Utility(time=t, time_series=signal_orig) maxima = signal_orig[util_nn.max_bool_func_1st_order_fd()] minima = signal_orig[util_nn.min_bool_func_1st_order_fd()] cs_max = CubicSpline(t[util_nn.max_bool_func_1st_order_fd()], maxima) cs_min = CubicSpline(t[util_nn.min_bool_func_1st_order_fd()], minima) time = np.linspace(0, 5 * np.pi, 1001) lsq_signal = np.cos(time) + np.cos(5 * time) knots = np.linspace(0, 5 * np.pi, 101) time_extended = time_extension(time) time_series_extended = np.zeros_like(time_extended) / 0 time_series_extended[int(len(lsq_signal) - 1):int(2 * (len(lsq_signal) - 1) + 1)] = lsq_signal neural_network_m = 200 neural_network_k = 100 # forward -> P = np.zeros((int(neural_network_k + 1), neural_network_m)) for col in range(neural_network_m): P[:-1, col] = lsq_signal[(-(neural_network_m + neural_network_k - col)):(-(neural_network_m - col))] P[-1, col] = 1 # for additive constant t = lsq_signal[-neural_network_m:] # test - top seed_weights = np.ones(neural_network_k) / neural_network_k weights = 0 * seed_weights.copy() train_input = P[:-1, :] lr = 0.01 for iterations in range(1000): output = np.matmul(weights, train_input) error = (t - output) gradients =
<gh_stars>0 #!/usr/bin/env python3 # # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Evaluates the trained model on the given test set.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os from enum import Enum from collections import OrderedDict from absl import app, flags import numpy as np from official.nlp.data import tagging_data_lib from official.nlp.tasks import utils from official.nlp.tasks.tagging import TaggingTask from official.nlp.data import tagging_dataloader from seqeval.scheme import IOB2 from seqeval.metrics import classification_report import tensorflow as tf from tensorflow.io.gfile import GFile import orbit from com_google_research_bert import tokenization from training.model_setup_config import ModelSetupConfig, ModelSize from training.utils import (ADDITIONAL_LABELS, LABELS, LABEL_ID_MAP, LABEL_OUTSIDE, MAIN_LABELS, LabeledExample, add_tfrecord_label, get_tokenizer, remove_whitespace_and_parse, write_example_to_file, get_tagging_config) from training.file_reader import get_file_reader import protocol_buffer.documents_pb2 as proto_documents flags.DEFINE_enum("size", "base", ["base", "tiny"], "The size of the BERT model.") flags.DEFINE_bool("pretrained", True, "If set, the pretrained model is loaded from the TF hub.") flags.DEFINE_bool("case_sensitive", False, "If set, the model is case sensitive.") flags.DEFINE_string("model_path", None, "The path to the trained model.") flags.DEFINE_multi_string( "input_paths", [], "The paths to the test data in .tfrecord format (to be labeled) or a " "folder containing .lftxt files with precomputed labels.") flags.DEFINE_multi_string( "raw_paths", [], "The paths to the test data in its original .binproto or .lftxt format.") flags.DEFINE_string( "visualisation_folder", None, "If set, a comparison of the target/hypothesis labeling is saved in .html " "format") flags.DEFINE_boolean( "strict_eval", False, "Only used for scoring. If True, a label must not begin with an 'I-' tag.") flags.DEFINE_boolean( "train_with_additional_labels", False, "Needs to be set if the flags other than address/phone were used for " "training, too.") flags.DEFINE_multi_enum( "save_output_formats", [], ["lftxt", "binproto", "tfrecord"], "If set, the hypotheses are saved in the corresponding formats.") flags.DEFINE_string("output_directory", None, "Controls where to save the hypotheses.") flags.DEFINE_integer( "moving_window_overlap", 20, "The size of the overlap for a moving window. " "Setting it to zero restores the default behaviour of hard splitting.") flags.DEFINE_integer( "max_seq_length", 128, "The maximal sequence length. Longer sequences are split.") flags.DEFINE_integer("batch_size", 64, "The number of samples per batch.") flags.DEFINE_string( "tpu_address", None, "The internal address of the TPU node, including 'grpc://'. If not set, no " "tpu is used.") flags.DEFINE_multi_integer( "unlabeled_sentence_filters", [1], "For each entry, a copy of the hypothesis is saved (if activated). Only " "every nth sentence without any labels is kept, the other unlabeled " "sentences are dropped. Sentences with at least one predicted label are " "always kept.") flags.DEFINE_boolean( "eval", True, "If set, scores are computed. Disabling this is useful if the hypotheses " "are saved and reused later, so no scores are needed.") flags.DEFINE_boolean("viterbi_decoding", True, "If set, Viterbi is used for decoding.") FLAGS = flags.FLAGS LabelType = Enum("LabelType", "OUTSIDE BEGINNING INSIDE") def _assert_same_length(sequences, sentence_id=None): lengths = [len(sequence) for sequence in sequences] if len(lengths) == 0: return if sentence_id is not None: additional_information = "(sentence id %d)" % sentence_id else: additional_information = "" for length in lengths[1:]: assert length == lengths[ 0], "Not all sequences have the same length: %s %s" % ( str(lengths), additional_information) def _predict(task, params, model): """Predicts on the input data. Similiar to official.nlp.tasks.tagging.predict, but returns the logits instead of the final label. Args: task: A `TaggingTask` object. params: A `cfg.DataConfig` object. model: A keras.Model. Returns: A list of tuple. Each tuple contains `sentence_id`, `sub_sentence_id` and a list of predicted ids. """ def _predict_step(inputs): """Replicated prediction calculation.""" x, y = inputs sentence_ids = x.pop("sentence_id") sub_sentence_ids = x.pop("sub_sentence_id") outputs = task.inference_step(x, model) logits = outputs["logits"] label_mask = tf.greater_equal(y, 0) return dict(logits=logits, label_mask=label_mask, sentence_ids=sentence_ids, sub_sentence_ids=sub_sentence_ids) def _aggregate_fn(state, outputs): """Concatenates model's outputs.""" if state is None: state = [] for (batch_logits, batch_label_mask, batch_sentence_ids, batch_sub_sentence_ids) in zip(outputs["logits"], outputs["label_mask"], outputs["sentence_ids"], outputs["sub_sentence_ids"]): batch_probs = tf.keras.activations.softmax(batch_logits) for (tmp_prob, tmp_label_mask, tmp_sentence_id, tmp_sub_sentence_id) in zip(batch_probs.numpy(), batch_label_mask.numpy(), batch_sentence_ids.numpy(), batch_sub_sentence_ids.numpy()): real_probs = [] assert len(tmp_prob) == len(tmp_label_mask) _assert_same_length([tmp_prob, tmp_label_mask], tmp_sentence_id) for i in range(len(tmp_prob)): # Skip the padding label. if tmp_label_mask[i]: real_probs.append(tmp_prob[i]) state.append( (tmp_sentence_id, tmp_sub_sentence_id, real_probs)) return state dataset = orbit.utils.make_distributed_dataset( tf.distribute.get_strategy(), task.build_inputs, params) outputs = utils.predict(_predict_step, _aggregate_fn, dataset) return sorted(outputs, key=lambda x: (x[0], x[1])) def _viterbi_decoding(probabilities, train_with_additional_labels): """"Applies the viterbi algorithm. This searches for the most likely valid label sequence. """ labels = LABELS if train_with_additional_labels: labels += ADDITIONAL_LABELS path_probabilities = np.full(len(labels), -np.inf) label_outside_index = labels.index(LABEL_OUTSIDE) path_probabilities[label_outside_index] = 0.0 path_pointers = [] for prob_token in probabilities: prev_path_probabilities = path_probabilities.copy() path_probabilities = np.zeros(len(labels)) new_pointers = [len(labels) + 1] * len( labels) # An invalid value ensures it will be updated. for current_label_id in range(len(labels)): current_label_name = labels[current_label_id] if _is_label_type(current_label_name, LabelType.INSIDE): current_main_label_name = current_label_name[2:] valid_prev_label_names = [("B-%s" % current_main_label_name), ("I-%s" % current_main_label_name)] mask = np.full(len(labels), True) for prev_label_name in valid_prev_label_names: prev_label_id = LABEL_ID_MAP[prev_label_name] mask[prev_label_id] = False masked_prev_path_probabilities = prev_path_probabilities.copy() masked_prev_path_probabilities[mask] = -np.inf else: masked_prev_path_probabilities = prev_path_probabilities total_prob = masked_prev_path_probabilities + np.log( prob_token[current_label_id]) max_prob_index = np.argmax(total_prob) path_probabilities[current_label_id] = total_prob[max_prob_index] new_pointers[current_label_id] = max_prob_index path_pointers.append(new_pointers) most_likely_path = [] most_likely_end = np.core.fromnumeric.argmax(np.array(path_probabilities)) while path_pointers: pointers = path_pointers.pop() most_likely_path.insert(0, most_likely_end) most_likely_end = pointers[most_likely_end] return most_likely_path def _greedy_decoding(probabilities): """"Applies greedy search. For each position, the most likely label is selected. This may lead to invalid sequences (entities starting with an "I-" label). """ path = [] for prob_token in probabilities: path.append(np.argmax(prob_token)) return path def _get_model_and_task(model_config, model_path): """Returns the loaded model and corresponding task.""" labels = LABELS if model_config.train_with_additional_labels: labels += ADDITIONAL_LABELS tagging_config = get_tagging_config(model_config, label_list=labels) task = TaggingTask(tagging_config) if model_path: model = task.build_model() model.load_weights(model_path) else: model = None return model, task def _infer(model, task, test_data_path, train_with_additional_labels, batch_size): """Computes the predicted label sequence using the trained model.""" test_data_config = tagging_dataloader.TaggingDataConfig( input_path=test_data_path, seq_length=128, global_batch_size=batch_size, is_training=False, include_sentence_id=True, drop_remainder=False) predictions = _predict(task, test_data_config, model) merged_probabilities = [] for _, part_id, predicted_probabilies in predictions: if part_id == 0: merged_probabilities.append(predicted_probabilies) else: merged_probabilities[-1].extend(predicted_probabilies) merged_predictions = [] for i, probabilities in enumerate(merged_probabilities): assert not np.isnan(probabilities).any(), ( "There was an error during decoding. Try reducing the batch size." " First error in sentence %d" % i) if FLAGS.viterbi_decoding: prediction = _viterbi_decoding(probabilities, train_with_additional_labels) else: prediction = _greedy_decoding(probabilities) merged_predictions.append(prediction) return merged_predictions def _visualise(test_name, characterwise_target_labels_per_sentence, characterwise_predicted_labels_per_sentence, characters_per_sentence, words_per_sentence, visualised_label, visualisation_folder): """Generates a .html file comparing the hypothesis/target labels.""" _assert_same_length([ characterwise_target_labels_per_sentence, characterwise_predicted_labels_per_sentence, characters_per_sentence ]) number_of_sentences = len(characterwise_target_labels_per_sentence) directory = os.path.join(visualisation_folder, test_name) if not os.path.exists(directory): os.makedirs(directory) file_name = os.path.join(directory, "%s.html" % visualised_label.lower()) with GFile(file_name, "w") as file: file.write("%s labels in %s <br>\n" % (visualised_label, test_name)) file.write("<font color='green'>Correct labels</font> <br>\n") file.write("<font color='blue'>Superfluous labels</font> <br>\n") file.write("<font color='red'>Missed labels</font> <br>\n") file.write("<br>\n") for i in range(number_of_sentences): characterwise_target_labels = ( characterwise_target_labels_per_sentence[i]) characterwise_predicted_labels = ( characterwise_predicted_labels_per_sentence[i]) characters = characters_per_sentence[i] words = words_per_sentence[i] characterwise_target_labels_length = len( characterwise_target_labels) characterwise_predicted_labels_length = len( characterwise_predicted_labels) characters_length = len(characters) assert ( characterwise_target_labels_length == characterwise_predicted_labels_length == characters_length ), ("Hypotheses/targets have different lengths: %d, %d, %d" " (sentence %d)") % (characterwise_target_labels_length, characterwise_predicted_labels_length, characters_length, i) word_index = 0 word_position = 0 for target_label, predicted_label, character in zip( characterwise_target_labels, characterwise_predicted_labels, characters): if target_label.endswith( visualised_label) and predicted_label.endswith( visualised_label): file.write("<font color='green'>" + character + "</font>") elif target_label.endswith(visualised_label): file.write("<font color='red'>" + character + "</font>") elif predicted_label.endswith(visualised_label): file.write("<font color='blue'>" + character + "</font>") else: file.write(character) word_position += 1 if word_position == len(words[word_index]): word_index += 1 word_position = 0 file.write(" ") file.write("<br>\n") def _score(characterwise_target_labels_per_sentence, characterwise_predicted_labels_per_sentence, use_strict_mode): """Computes the precision, recall and f1 scores of the hypotheses.""" if use_strict_mode: return classification_report( characterwise_target_labels_per_sentence, characterwise_predicted_labels_per_sentence, mode="strict", scheme=IOB2) else: return classification_report( characterwise_target_labels_per_sentence, characterwise_predicted_labels_per_sentence) def _is_label_type(label_name, label_type): """Checks whether the label is of the specified type.""" if label_name == LABEL_OUTSIDE: real_label_type = LabelType.OUTSIDE elif label_name.startswith("B-"): real_label_type = LabelType.BEGINNING else: assert label_name.startswith("I-") real_label_type = LabelType.INSIDE return label_type == real_label_type def _transform_wordwise_labels_to_characterwise_labels( words_per_sentence, predicted_label_ids_per_sentence): """Duplicates the labels such that each character is assigned a label. For "B-" labels, only the first character of the word is assigned the "B-" label, all other characters are assigned the corresponding "I-" label. """ characterwise_predicted_label_ids_per_sentence = [] _assert_same_length([words_per_sentence, predicted_label_ids_per_sentence]) for i, (words, predicted_label_ids) in enumerate( zip(words_per_sentence, predicted_label_ids_per_sentence)): characterwise_predicted_label_ids = [] _assert_same_length([words, predicted_label_ids], i) for word, label_id in zip(words, predicted_label_ids): if _is_label_type(LABELS[label_id], LabelType.BEGINNING): characterwise_predicted_label_ids += [ label_id ] +
tree["meshes"].append({ "name": name, "primitives": [{ "attributes": {"POSITION": len(tree["accessors"])}, "mode": 1, # mode 1 is GL_LINES "material": len(tree["materials"])}]}) # if units are defined, store them as an extra: # https://github.com/KhronosGroup/glTF/tree/master/extensions if path.units is not None and 'meter' not in path.units: tree["meshes"][-1]["extras"] = {"units": str(path.units)} tree["accessors"].append( { "bufferView": len(buffer_items), "componentType": 5126, "count": vxlist[0], "type": "VEC3", "byteOffset": 0, "max": path.vertices.max(axis=0).tolist(), "min": path.vertices.min(axis=0).tolist()}) # TODO add color support to Path object # this is just exporting everying as black tree["materials"].append(_default_material) # data is the second value of the fifth field # which is a (data type, data) tuple buffer_items.append(_byte_pad( vxlist[4][1].astype(float32).tobytes())) # add color to attributes tree["meshes"][-1]["primitives"][0]["attributes"]["COLOR_0"] = len(tree["accessors"]) # the vertex color accessor data tree["accessors"].append({ "bufferView": len(buffer_items), "componentType": 5121, "count": vxlist[0], "normalized": True, "type": "VEC4", "byteOffset": 0}) # the actual color data buffer_items.append(_byte_pad( np.array(vxlist[5][1]).astype(uint8).tobytes())) def _append_point(points, name, tree, buffer_items): """ Append a 2D or 3D pointCloud to the scene structure and put the data into buffer_items. Parameters ------------- points : trimesh.PointCloud Source geometry name : str Name of geometry tree : dict Will be updated with data from points buffer_items Will have buffer appended with points data """ # convert the points to the unnamed args for # a pyglet vertex list vxlist = rendering.points_to_vertexlist(points=points.vertices, colors=points.colors) tree["meshes"].append({ "name": name, "primitives": [{ "attributes": {"POSITION": len(tree["accessors"])}, "mode": 0, # mode 0 is GL_POINTS "material": len(tree["materials"])}]}) tree["accessors"].append( { "bufferView": len(buffer_items), "componentType": 5126, "count": vxlist[0], "type": "VEC3", "byteOffset": 0, "max": points.vertices.max(axis=0).tolist(), "min": points.vertices.min(axis=0).tolist()}) # TODO add color support to Points object # this is just exporting everying as black tree["materials"].append(_default_material) # data is the second value of the fifth field # which is a (data type, data) tuple buffer_items.append(_byte_pad( vxlist[4][1].astype(float32).tobytes())) # add color to attributes tree["meshes"][-1]["primitives"][0]["attributes"]["COLOR_0"] = len(tree["accessors"]) # the vertex color accessor data tree["accessors"].append({ "bufferView": len(buffer_items), "componentType": 5121, "count": vxlist[0], "normalized": True, "type": "VEC4", "byteOffset": 0}) # the actual color data buffer_items.append(_byte_pad( np.array(vxlist[5][1]).astype(uint8).tobytes())) def _parse_materials(header, views, resolver=None): """ Convert materials and images stored in a GLTF header and buffer views to PBRMaterial objects. Parameters ------------ header : dict Contains layout of file views : (n,) bytes Raw data Returns ------------ materials : list List of trimesh.visual.texture.Material objects """ try: import PIL.Image except ImportError: log.warning("unable to load textures without pillow!") return None # load any images images = None if "images" in header: # images are referenced by index images = [None] * len(header["images"]) # loop through images for i, img in enumerate(header["images"]): # get the bytes representing an image if 'bufferView' in img: blob = views[img["bufferView"]] elif 'uri' in img: # will get bytes from filesystem or base64 URI blob = _uri_to_bytes(uri=img['uri'], resolver=resolver) else: log.warning('unable to load image from: {}'.format( img.keys())) continue # i.e. 'image/jpeg' # mime = img['mimeType'] try: # load the buffer into a PIL image images[i] = PIL.Image.open(util.wrap_as_stream(blob)) except BaseException: log.error("failed to load image!", exc_info=True) # store materials which reference images materials = [] if "materials" in header: for mat in header["materials"]: # flatten key structure so we can loop it loopable = mat.copy() # this key stores another dict of crap if "pbrMetallicRoughness" in loopable: # add keys of keys to top level dict loopable.update(loopable.pop("pbrMetallicRoughness")) # save flattened keys we can use for kwargs pbr = {} for k, v in loopable.items(): if not isinstance(v, dict): pbr[k] = v elif "index" in v: # get the index of image for texture idx = header["textures"][v["index"]]["source"] # store the actual image as the value pbr[k] = images[idx] # create a PBR material object for the GLTF material materials.append(visual.material.PBRMaterial(**pbr)) return materials def _read_buffers(header, buffers, mesh_kwargs, merge_primitives=False, resolver=None): """ Given a list of binary data and a layout, return the kwargs to create a scene object. Parameters ----------- header : dict With GLTF keys buffers : list of bytes Stored data passed : dict Kwargs for mesh constructors Returns ----------- kwargs : dict Can be passed to load_kwargs for a trimesh.Scene """ if "bufferViews" in header: # split buffer data into buffer views views = [None] * len(header["bufferViews"]) for i, view in enumerate(header["bufferViews"]): if "byteOffset" in view: start = view["byteOffset"] else: start = 0 end = start + view["byteLength"] views[i] = buffers[view["buffer"]][start:end] assert len(views[i]) == view["byteLength"] # load data from buffers into numpy arrays # using the layout described by accessors access = [None] * len(header['accessors']) for index, a in enumerate(header["accessors"]): # number of items count = a['count'] # what is the datatype dtype = _dtypes[a["componentType"]] # basically how many columns per_item = _shapes[a["type"]] # use reported count to generate shape shape = np.append(count, per_item) # number of items when flattened # i.e. a (4, 4) MAT4 has 16 per_count = np.abs(np.product(per_item)) if 'bufferView' in a: # data was stored in a buffer view so get raw bytes data = views[a["bufferView"]] # is the accessor offset in a buffer if "byteOffset" in a: start = a["byteOffset"] else: # otherwise assume we start at first byte start = 0 # length is the number of bytes per item times total length = np.dtype(dtype).itemsize * count * per_count # load the bytes data into correct dtype and shape access[index] = np.frombuffer( data[start:start + length], dtype=dtype).reshape(shape) else: # a "sparse" accessor should be initialized as zeros access[index] = np.zeros( count * per_count, dtype=dtype).reshape(shape) # load images and textures into material objects materials = _parse_materials( header, views=views, resolver=resolver) mesh_prim = collections.defaultdict(list) # load data from accessors into Trimesh objects meshes = collections.OrderedDict() if "meshes" in header: for index, m in enumerate(header["meshes"]): metadata = {} try: # try loading units from the GLTF extra metadata['units'] = str(m["extras"]["units"]) except BaseException: # GLTF spec indicates the default units are meters metadata['units'] = 'meters' for j, p in enumerate(m["primitives"]): # if we don't have a triangular mesh continue # if not specified assume it is a mesh if "mode" in p and p["mode"] != 4: log.warning('skipping primitive with mode {}!'.format(p['mode'])) continue # store those units kwargs = {"metadata": {}} kwargs.update(mesh_kwargs) kwargs["metadata"].update(metadata) # get vertices from accessors kwargs["vertices"] = access[p["attributes"]["POSITION"]] # get faces from accessors if 'indices' in p: kwargs["faces"] = access[p["indices"]].reshape((-1, 3)) else: # indices are apparently optional and we are supposed to # do the same thing as webGL drawArrays? kwargs['faces'] = np.arange( len(kwargs['vertices']), dtype=np.int64).reshape((-1, 3)) # do we have UV coordinates visuals = None if "material" in p: if materials is None: log.warning('no materials! `pip install pillow`') else: uv = None if "TEXCOORD_0" in p["attributes"]: # flip UV's top- bottom to move origin to lower-left: # https://github.com/KhronosGroup/glTF/issues/1021 uv = access[p["attributes"]["TEXCOORD_0"]].copy() uv[:, 1] = 1.0 - uv[:, 1] # create a texture visual visuals = visual.texture.TextureVisuals( uv=uv, material=materials[p["material"]]) if 'COLOR_0' in p['attributes']: try: # try to load vertex colors from the accessors colors = access[p['attributes']['COLOR_0']] if len(colors) == len(kwargs['vertices']): if visuals is None: # just pass to mesh as vertex color kwargs['vertex_colors'] = colors else: # we ALSO have texture so save as vertex attribute visuals.vertex_attributes['color'] = colors except BaseException: # survive failed colors log.debug('failed to load colors', exc_info=True) if visuals is not None: kwargs['visual'] = visuals # create a unique mesh name per- primitive if "name" in m: name = m["name"] else: name = "GLTF_geometry" # make name unique across multiple meshes if name in meshes: name += "_{}".format(util.unique_id()) # each primitive gets it's own Trimesh object if len(m["primitives"]) > 1: name += "_{}".format(j) custom_attrs = [attr for attr in p["attributes"] if attr.startswith("_")] if len(custom_attrs): vertex_attributes = {} for attr in custom_attrs: vertex_attributes[attr] = access[p["attributes"][attr]] kwargs["vertex_attributes"] = vertex_attributes kwargs["process"] = False meshes[name] = kwargs mesh_prim[index].append(name) # sometimes GLTF "meshes" come with multiple "primitives" # by default we return one Trimesh object per "primitive" # but if merge_primitives is True we combine the primitives # for the "mesh" into a single Trimesh object if merge_primitives: # if we are only returning one Trimesh object # replace `mesh_prim` with updated values mesh_prim_replace = dict() mesh_pop = [] for
Thus, it is not used for VNF chaining cp_list.pop(0) # gets all virtual links in VNFFGD vnffgd_vls = topology_template['groups']['VNFFG1']['properties']['dependent_virtual_link'] # getting the previous network_name for correct VNF chaining previous_net_name = '' if vnffgd_vls: previous_vl = vnffgd_vls[-1] # gets the current last VL in VNFFG # gets the current last VNF Name in VNFFGD previous_vnfd_name = topology_template['groups']['VNFFG1']['properties']['constituent_vnfs'][-1] previous_vnf_pkg = database.list_catalog(vnfd_name=previous_vnfd_name) previous_vnfp_dir = previous_vnf_pkg[0]['dir_id'] # gets all connection points data from previous VNFD previous_vnfd_cps = self.list_vnf_pkg_cps(previous_vnfp_dir) for cp in previous_vnfd_cps: if previous_vnfd_cps[cp]['virtual_link'] == previous_vl: previous_net_name = previous_vnfd_cps[cp]['network_name'] break cp_in, cp_out = "", "" # including cp_input for cp in cp_list: if vnffgd_vls: # if there are previous Virtual Links included in VNFFGD # cp_in is valid just if it is connected to the same network_name from previous VNF output if vnf_pkg_cps[cp]['network_name'] == previous_net_name: cp_in = cp break else: # if this VNF is the first one being included in VNFFGD cp_in = cp break if not cp_in: raise NFVOAgentsException(ERROR, 'There is no suitable CP for chaining with the previous VNF!') # including cp_output num_cps = len(cp_list) if num_cps == 1: cp_out = cp_in else: # num_cps surely will be > 1, because previous return # output CP requirements are dependent of NFVO capabilities, thus it was implemented in the related agent cp_out = self.select_and_validate_cp_out(options_cp_out, vnf_pkg_cps, cp_in) if cp_in == cp_out: capability = [cp_in] else: capability = [cp_in, cp_out] for cp in capability: # including connection points topology_template['groups']['VNFFG1']['properties']['connection_point'].append(cp) # including dependent virtual links virtual_link = vnf_pkg_cps[cp]['virtual_link'] # if virtual_link not in topology_template['groups']['VNFFG1']['properties']['dependent_virtual_link']: topology_template['groups']['VNFFG1']['properties']['dependent_virtual_link'].append(virtual_link) # including constituent VNFs topology_template['groups']['VNFFG1']['properties']['constituent_vnfs'].append(vnfd_name) vnf_end_points = len(capability) if vnf_end_points == 1: capability = capability[0] else: capability = ','.join(capability) # including number of endpoints topology_template['groups']['VNFFG1']['properties']['number_of_endpoints'] += vnf_end_points # TODO we disabled sfc_encap since our VNFs are NSH-unaware (i.e. VNFs are receiving MPLS packets from OVS) # As a result we are only creating SFCs using NSH-unaware VNFs. NSH-aware VNFs still need to be implemented path = {"forwarder": vnfd_name, "capability": capability, "sfc_encap": False} # hard coded # including VNF forwarding path topology_template['node_templates']['Forwarding_path1']['properties']['path'].append(path) return sfc_descriptor def get_vnf_nfvo_resource_id(self, vnf_id, resource_name): """Retrieves the NFVO resource ID (such as VDU and CP) from from a particular VNF :param vnf_id: :param resource_name: the resource name to get the ID Raises ------ NFVOAgentsException """ resources = self.list_vnf_nfvo_resources(vnf_id) for resource in resources: if resource['name'] == resource_name: return resource['id'] raise NFVOAgentsException(ERROR, 'VNF Resource ID not found!') def get_sfc_traffic_origin(self, core): # fields defines which information should be shown dynamically by client applications fields = [ {'id': 'ID'}, {'name': 'Name'}, {'instance': 'Instance Name'}, {'address': 'Mgmt Address'}, {'status': 'Status'}, {'platform': 'Platform'} ] vnfs = self.list_vnfs() src_vnfs = [] for vnf in vnfs: src_vnf = { 'id': vnf.get('vnf_id'), 'name': vnf.get('vnf_name'), 'instance': vnf.get('instance_name'), 'address': vnf.get('mgmt_url'), 'status': vnf.get('vnf_status'), 'platform': TACKER_NFVO } src_vnfs.append(src_vnf) return fields, src_vnfs def configure_traffic_src_policy(self, sfc_descriptor, origin, src_id, cp_out, database): """ Includes ACL criteria according to INTERNAL or EXTERNAL traffic source INTERNAL traffic is sourced from VNFs managed by NFVO, while EXTERNAL traffic is sourced from everything out from NFVO networks. This function also includes specific requirements to select the source port for Tacker. Tacker has the requirement for 'network_source_port_id' in ACL criteria, which is included in VNFFGD by this function. One important rule is applied: 1. Tacker's network_name from the origin VNF CP must be the same as the input CP of the first VNF in the chain. If there are more CPs than 1, then a message with status OPTIONS and a cp_list is replied to the user to inform a desirable connection point. :param sfc_descriptor: :param origin: INTERNAL or EXTERNAL as in *utils module* :param src_id: the Tacker's VNF ID of the VNF which generates the SFC incoming traffic :param cp_out: :param database: :return: the VNFFGD being composed Raises ------ NFVOAgentsException, NFVOAgentOptions ReRaises ------ DatabaseException """ net_src_port_id = None topology_template = sfc_descriptor['vnffgd']['template']['vnffgd']['topology_template'] # sfp = service function path sfp_cps = topology_template['groups']['VNFFG1']['properties']['connection_point'] sfp_vnfs = topology_template['groups']['VNFFG1']['properties']['constituent_vnfs'] # network_src_port_id is a requirement for Tacker NFVO criteria = topology_template['node_templates']['Forwarding_path1']['properties']['policy']['criteria'] catalog = database.list_catalog(vnfd_name=sfp_vnfs[0]) sfp_first_pkg_dir_id = catalog[0]['dir_id'] sfp_first_vnf_cps = self.list_vnf_pkg_cps(sfp_first_pkg_dir_id) if origin == INTERNAL: data = database.list_vnf_instances(vnf_id=src_id) # Only VNFs instantiated by this framework can be used as origin, # as we need get information of its CP on VNF Packages if not data: raise NFVOAgentsException(ERROR, 'The chosen VNF was not instantiated by this framework!') vnf_pkg_id = data[0]['vnf_pkg_id'] catalog = database.list_catalog(vnf_pkg_id=vnf_pkg_id) vnf_pkg_dir = catalog[0]['dir_id'] vnf_pkg_cps = self.list_vnf_pkg_cps(vnf_pkg_dir) # Leave just the CPs that are in the same subnet of the first VNF CP_in of the SFC cps = vnf_pkg_cps.keys() cps = list(cps) for cp in cps: if vnf_pkg_cps[cp]['network_name'] != sfp_first_vnf_cps[sfp_cps[0]]['network_name']: vnf_pkg_cps.pop(cp) if cp_out is None: # Selects the suitable CP_out automatically if not vnf_pkg_cps: raise NFVOAgentsException(ERROR, 'No suitable CP on this VNF!') if len(vnf_pkg_cps) == 1: cp_name = list(vnf_pkg_cps.keys())[0] else: raise NFVOAgentOptions('Choose an CP!', vnf_pkg_cps) else: cp_name = cp_out if cp_name not in vnf_pkg_cps: raise NFVOAgentsException(ERROR, 'Invalid CP!') net_src_port_id = self.get_vnf_nfvo_resource_id(src_id, cp_name) elif origin == EXTERNAL: net_src_port_id = self.get_fip_router_interface_id(sfp_first_vnf_cps[sfp_cps[0]]['network_name']) else: raise NFVOAgentsException(ERROR, 'SFC network traffic should be INTERNAL or EXTERNAL.') if not net_src_port_id: logger.error('Unable to get a value for network_src_port_id') raise NFVOAgentsException(ERROR, 'Unable to get the source port id to configure the SFC classifier') # currently multi-sfc uses only one network_src_port_id for all classifiers (the same) # the configure_policies gets this value and uses for all subsequent classifiers for classifier in criteria: classifier['classifier']['network_src_port_id'] = net_src_port_id return sfc_descriptor def acl_criteria_parser(self, acl): """Parses all ACL criteria according of Tacker NFVO requirements. It parses from strings to ints all ACL criteria to match the NFVO requirements. :param acl: a dict with the acl criteria :return: a dict with the parsed acl criteria Raises ------ NFVOAgentsException """ with open('tacker_nfv_defs.yaml', 'r') as defs_file: acl_defs = defs_file.read() acl_defs = yaml.full_load(acl_defs) acl_types = acl_defs['data_types']['tosca.nfv.datatypes.aclType']['properties'] tmp_acl = acl.copy() for k, v in tmp_acl.items(): if k not in acl_types: msg = 'Invalid ACL criteria "%s"!' % k logger.error(msg) raise NFVOAgentsException(ERROR, msg) if 'constraints' in acl_types[k]: item_range = acl_types[k]['constraints'][0]['in_range'] start, end = item_range if int(v) not in range(start, end+1): msg = "Invalid value for ACL criteria '%s'! Use a value between %s and %s." % (k, start, end) logger.error(msg) raise NFVOAgentsException(ERROR, msg) if acl_types[k]['type'] == 'integer': acl[k] = int(v) return acl def configure_policies(self, sfc_descriptor, policies): """Configure ACL rules for all Tacker SFC classifiers""" topology_template = sfc_descriptor['vnffgd']['template']['vnffgd']['topology_template'] criteria = topology_template['node_templates']['Forwarding_path1']['properties']['policy']['criteria'] net_src_port_id = criteria[0]['classifier'].get('network_src_port_id') criteria.clear() # remove the partial data of the first classifier to build all of them in one shot (simplify) for acl in policies: # acl is the content of a policy acl = self.acl_criteria_parser(acl) acl['network_src_port_id'] = net_src_port_id classifier = { "classifier": acl # classifier unique name is configured in create_sfc function, since the SFC name is required # "name": "" } criteria.append(classifier) return sfc_descriptor def set_next_vnffgd_path_id(self, vnffgd): """Set up the next VNFFGD SFP id in the SFC being composed Retrieves the largest number of the SFP ID in the vnffgd catalog from the NFVO and sets the next one in the currently vnffgd being composed. :return: the vnffgd begin composed ReRaises ------ NFVOAgentsException """ data = self.list_vnffgds() last_path_id = 0 for item in data: path_id = item['template']['vnffgd']['topology_template'][ 'node_templates']['Forwarding_path1']['properties']['id'] if path_id > last_path_id: last_path_id = path_id vnffgd['vnffgd']['template']['vnffgd']['topology_template'][ 'node_templates']['Forwarding_path1']['properties']['id'] = last_path_id + 1 return vnffgd def destroy_sfc_actions(self, destroy_vnf_fn, vnf_instance_ids=None, vnffgd_id=None, vnffg_id=None): """Executes the required actions do destroy an SFC This function can be employed on a regular workflow to destroy SFCs, and on rollback actions due to errors while creating an SFC :param destroy_vnf_fn: callback function from the core module :param vnf_instance_ids: a list of vnf instance ids to destroy, only required if vnffg_id is None :param vnffgd_id: the vnffg descriptor id to remove, only required if vnffg_id is None :param vnffg_id: the vnffg id to remove, if any Raises ------ NFVOAgentsException ReRaises ------ NFVOAgentsException """ vnffg_vnfs = [] # list of vnf ids to destroy if vnffg_id: data = self.show_vnffg(vnffg_id) vnffgd_id = data['vnffgd_id'] vnf_mapping =
a Settings object is passed, detect the settings that differ # from defaults, collect them into a dict, and apply them using `source`. # This comes up in `wandb.init(settings=wandb.Settings(...))` and # seems like the behavior that the user would expect when calling init that way. defaults = Settings() settings_dict = dict() for k, v in settings.__dict__.items(): if isinstance(v, Property): if v._value != defaults.__dict__[k]._value: settings_dict[k] = v._value # todo: store warnings from the passed Settings object, if any, # to collect telemetry on validation errors and unexpected args. # remove this once strict checking is enforced. for attr in ( "_Settings__unexpected_args", "_Settings__preprocessing_warnings", "_Settings__validation_warnings", ): getattr(self, attr).update(getattr(settings, attr)) # replace with the generated dict settings = settings_dict # add kwargs to settings settings = settings or dict() # explicit kwargs take precedence over settings settings = {**settings, **kwargs} unknown_properties = [] for key in settings.keys(): # only allow updating known Properties if key not in self.__dict__ or not isinstance(self.__dict__[key], Property): unknown_properties.append(key) if unknown_properties: raise KeyError(f"Unknown settings: {unknown_properties}") # only if all keys are valid, update them for key, value in settings.items(): self.__dict__[key].update(value, source) # todo: this is to collect stats on preprocessing and validation errors if self.__dict__[key].__dict__["_Property__failed_preprocessing"]: self.__preprocessing_warnings[key] = str(self.__dict__[key]._value) else: self.__preprocessing_warnings.pop(key, None) if self.__dict__[key].__dict__["_Property__failed_validation"]: self.__validation_warnings[key] = str(self.__dict__[key]._value) else: self.__validation_warnings.pop(key, None) def items(self) -> ItemsView[str, Any]: return self.make_static().items() def get(self, key: str, default: Any = None) -> Any: return self.make_static().get(key, default) def freeze(self) -> None: object.__setattr__(self, "_Settings__frozen", True) def unfreeze(self) -> None: object.__setattr__(self, "_Settings__frozen", False) def is_frozen(self) -> bool: return self.__frozen def make_static(self) -> Dict[str, Any]: """Generate a static, serializable version of the settings.""" # get attributes that are instances of the Property class: attributes = { k: v.value for k, v in self.__dict__.items() if isinstance(v, Property) } return attributes # apply settings from different sources # TODO(dd): think about doing some|all of that at init def _apply_settings( self, settings: "Settings", _logger: Optional[_EarlyLogger] = None, ) -> None: """Apply settings from a Settings object.""" if _logger is not None: _logger.info(f"Applying settings from {settings}") attributes = { k: v for k, v in settings.__dict__.items() if isinstance(v, Property) } for k, v in attributes.items(): # note that only the same/higher priority settings are propagated self.update({k: v._value}, source=v.source) # todo: this is to pass on info on unexpected args in settings if settings.__dict__["_Settings__unexpected_args"]: self.__dict__["_Settings__unexpected_args"].update( settings.__dict__["_Settings__unexpected_args"] ) @staticmethod def _load_config_file(file_name: str, section: str = "default") -> dict: parser = configparser.ConfigParser() parser.add_section(section) parser.read(file_name) config: Dict[str, Any] = dict() for k in parser[section]: config[k] = parser[section][k] # TODO (cvp): we didn't do this in the old cli, but it seems necessary if k == "ignore_globs": config[k] = config[k].split(",") return config def _apply_config_files(self, _logger: Optional[_EarlyLogger] = None) -> None: # TODO(jhr): permit setting of config in system and workspace if self.settings_system is not None: if _logger is not None: _logger.info(f"Loading settings from {self.settings_system}") self.update( self._load_config_file(self.settings_system), source=Source.SYSTEM, ) if self.settings_workspace is not None: if _logger is not None: _logger.info(f"Loading settings from {self.settings_workspace}") self.update( self._load_config_file(self.settings_workspace), source=Source.WORKSPACE, ) def _apply_env_vars( self, environ: Mapping[str, Any], _logger: Optional[_EarlyLogger] = None, ) -> None: env_prefix: str = "WANDB_" special_env_var_names = { "WANDB_TRACELOG": "_tracelog", "WANDB_REQUIRE_SERVICE": "_require_service", "WANDB_SERVICE_TRANSPORT": "_service_transport", "WANDB_DIR": "root_dir", "WANDB_NAME": "run_name", "WANDB_NOTES": "run_notes", "WANDB_TAGS": "run_tags", "WANDB_JOB_TYPE": "run_job_type", } env = dict() for setting, value in environ.items(): if not setting.startswith(env_prefix): continue if setting in special_env_var_names: key = special_env_var_names[setting] else: # otherwise, strip the prefix and convert to lowercase key = setting[len(env_prefix) :].lower() if key in self.__dict__: if key in ("ignore_globs", "run_tags"): value = value.split(",") env[key] = value elif _logger is not None: _logger.warning(f"Unknown environment variable: {setting}") if _logger is not None: _logger.info( f"Loading settings from environment variables: {_redact_dict(env)}" ) self.update(env, source=Source.ENV) def _infer_settings_from_environment( self, _logger: Optional[_EarlyLogger] = None ) -> None: """Modify settings based on environment (for runs and cli).""" settings: Dict[str, Union[bool, str, Sequence]] = dict() # disable symlinks if on windows (requires admin or developer setup) settings["symlink"] = True if self._windows: settings["symlink"] = False # TODO(jhr): this needs to be moved last in setting up settings ? # (dd): loading order does not matter as long as source is set correctly # For code saving, only allow env var override if value from server is true, or # if no preference was specified. if (self.save_code is True or self.save_code is None) and ( os.getenv(wandb.env.SAVE_CODE) is not None or os.getenv(wandb.env.DISABLE_CODE) is not None ): settings["save_code"] = wandb.env.should_save_code() settings["disable_git"] = wandb.env.disable_git() # Attempt to get notebook information if not already set by the user if self._jupyter and (self.notebook_name is None or self.notebook_name == ""): meta = wandb.jupyter.notebook_metadata(self.silent) settings["_jupyter_path"] = meta.get("path") settings["_jupyter_name"] = meta.get("name") settings["_jupyter_root"] = meta.get("root") elif ( self._jupyter and self.notebook_name is not None and os.path.exists(self.notebook_name) ): settings["_jupyter_path"] = self.notebook_name settings["_jupyter_name"] = self.notebook_name settings["_jupyter_root"] = os.getcwd() elif self._jupyter: wandb.termwarn( "WANDB_NOTEBOOK_NAME should be a path to a notebook file, " f"couldn't find {self.notebook_name}.", ) # host and username are populated by apply_env_vars if corresponding env # vars exist -- but if they don't, we'll fill them in here if self.host is None: settings["host"] = socket.gethostname() # type: ignore if self.username is None: try: # type: ignore settings["username"] = getpass.getuser() except KeyError: # getuser() could raise KeyError in restricted environments like # chroot jails or docker containers. Return user id in these cases. settings["username"] = str(os.getuid()) settings["_executable"] = sys.executable settings["docker"] = wandb.env.get_docker(wandb.util.image_id_from_k8s()) # TODO: we should use the cuda library to collect this if os.path.exists("/usr/local/cuda/version.txt"): with open("/usr/local/cuda/version.txt") as f: settings["_cuda"] = f.read().split(" ")[-1].strip() settings["_args"] = sys.argv[1:] settings["_os"] = platform.platform(aliased=True) settings["_python"] = platform.python_version() # hack to make sure we don't hang on windows if self._windows and self._except_exit is None: settings["_except_exit"] = True # type: ignore if _logger is not None: _logger.info( f"Inferring settings from compute environment: {_redact_dict(settings)}" ) self.update(settings, source=Source.ENV) def _infer_run_settings_from_environment( self, _logger: Optional[_EarlyLogger] = None, ) -> None: """Modify settings based on environment (for runs only).""" # If there's not already a program file, infer it now. settings: Dict[str, Union[bool, str, None]] = dict() program = self.program or _get_program() if program is not None: program_relpath = self.program_relpath or _get_program_relpath_from_gitrepo( program, _logger=_logger ) settings["program_relpath"] = program_relpath else: program = "<python with no main file>" settings["program"] = program if _logger is not None: _logger.info( f"Inferring run settings from compute environment: {_redact_dict(settings)}" ) self.update(settings, source=Source.ENV) def _apply_setup( self, setup_settings: Dict[str, Any], _logger: Optional[_EarlyLogger] = None ) -> None: if _logger: _logger.info(f"Applying setup settings: {_redact_dict(setup_settings)}") self.update(setup_settings, source=Source.SETUP) def _apply_user( self, user_settings: Dict[str, Any], _logger: Optional[_EarlyLogger] = None ) -> None: if _logger: _logger.info(f"Applying user settings: {_redact_dict(user_settings)}") self.update(user_settings, source=Source.USER) def _apply_init(self, init_settings: Dict[str, Union[str, int, None]]) -> None: # prevent setting project, entity if in sweep # TODO(jhr): these should be locked elements in the future if self.sweep_id: for key in ("project", "entity", "id"): val = init_settings.pop(key, None) if val: wandb.termwarn( f"Ignored wandb.init() arg {key} when running a sweep." ) if self.launch: for key in ("project", "entity", "id"): val = init_settings.pop(key, None) if val: wandb.termwarn( "Project, entity and id are ignored when running from wandb launch context. " f"Ignored wandb.init() arg {key} when running running from launch.", ) # strip out items where value is None param_map = dict( name="run_name", id="run_id", tags="run_tags", group="run_group", job_type="run_job_type", notes="run_notes", dir="root_dir", ) init_settings = { param_map.get(k, k): v for k, v in init_settings.items() if v is not None } # fun logic to convert the resume init arg if init_settings.get("resume"): if isinstance(init_settings["resume"], str): if init_settings["resume"] not in ("allow", "must", "never", "auto"): if init_settings.get("run_id") is None: # TODO: deprecate or don't support init_settings["run_id"] = init_settings["resume"] init_settings["resume"] = "allow" elif init_settings["resume"] is True: init_settings["resume"] = "auto" # update settings self.update(init_settings, source=Source.INIT) # handle auto resume logic if self.resume == "auto": if os.path.exists(self.resume_fname): with open(self.resume_fname) as f: resume_run_id = json.load(f)["run_id"] if self.run_id is None: self.update({"run_id": resume_run_id}, source=Source.INIT) # type: ignore elif self.run_id != resume_run_id: wandb.termwarn( "Tried to auto resume run with " f"id {resume_run_id} but id {self.run_id} is set.", )
To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.source_stats(source, async_req=True) >>> result = thread.get() :param async_req bool :param str source: (required) :return: SystemMetricsOut If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.source_stats_with_http_info(source, **kwargs) # noqa: E501 else: (data) = self.source_stats_with_http_info(source, **kwargs) # noqa: E501 return data def source_stats_with_http_info(self, source, **kwargs): # noqa: E501 """Print basic source statistics. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.source_stats_with_http_info(source, async_req=True) >>> result = thread.get() :param async_req bool :param str source: (required) :return: SystemMetricsOut If the method is called asynchronously, returns the request thread. """ local_var_params = locals() all_params = ['source'] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method source_stats" % key ) local_var_params[key] = val del local_var_params['kwargs'] # verify the required parameter 'source' is set if ('source' not in local_var_params or local_var_params['source'] is None): raise ValueError("Missing the required parameter `source` when calling `source_stats`") # noqa: E501 collection_formats = {} path_params = {} if 'source' in local_var_params: path_params['source'] = local_var_params['source'] # noqa: E501 query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['api_key'] # noqa: E501 return self.api_client.call_api( '/api2/json/sourceStats/{source}', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='SystemMetricsOut', # noqa: E501 auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501 _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) def stats(self, **kwargs): # noqa: E501 """Print basic system statistics. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.stats(async_req=True) >>> result = thread.get() :param async_req bool :return: SystemMetricsOut If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.stats_with_http_info(**kwargs) # noqa: E501 else: (data) = self.stats_with_http_info(**kwargs) # noqa: E501 return data def stats_with_http_info(self, **kwargs): # noqa: E501 """Print basic system statistics. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.stats_with_http_info(async_req=True) >>> result = thread.get() :param async_req bool :return: SystemMetricsOut If the method is called asynchronously, returns the request thread. """ local_var_params = locals() all_params = [] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method stats" % key ) local_var_params[key] = val del local_var_params['kwargs'] collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['api_key'] # noqa: E501 return self.api_client.call_api( '/api2/json/stats', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='SystemMetricsOut', # noqa: E501 auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501 _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) def stripe_connect(self, **kwargs): # noqa: E501 """Connects a Stripe Account. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.stripe_connect(async_req=True) >>> result = thread.get() :param async_req bool :param str scope: :param str code: :param str error: :param str error_description: :return: None If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.stripe_connect_with_http_info(**kwargs) # noqa: E501 else: (data) = self.stripe_connect_with_http_info(**kwargs) # noqa: E501 return data def stripe_connect_with_http_info(self, **kwargs): # noqa: E501 """Connects a Stripe Account. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.stripe_connect_with_http_info(async_req=True) >>> result = thread.get() :param async_req bool :param str scope: :param str code: :param str error: :param str error_description: :return: None If the method is called asynchronously, returns the request thread. """ local_var_params = locals() all_params = ['scope', 'code', 'error', 'error_description'] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method stripe_connect" % key ) local_var_params[key] = val del local_var_params['kwargs'] collection_formats = {} path_params = {} query_params = [] if 'scope' in local_var_params: query_params.append(('scope', local_var_params['scope'])) # noqa: E501 if 'code' in local_var_params: query_params.append(('code', local_var_params['code'])) # noqa: E501 if 'error' in local_var_params: query_params.append(('error', local_var_params['error'])) # noqa: E501 if 'error_description' in local_var_params: query_params.append(('error_description', local_var_params['error_description'])) # noqa: E501 header_params = {} form_params = [] local_var_files = {} body_params = None # Authentication setting auth_settings = ['api_key'] # noqa: E501 return self.api_client.call_api( '/api2/json/stripeConnect', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, # noqa: E501 auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501 _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) def subscribe_plan(self, plan_name, token, **kwargs): # noqa: E501 """Subscribe to a give API plan, using the user's preferred or default currency. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.subscribe_plan(plan_name, token, async_req=True) >>> result = thread.get() :param async_req bool :param str plan_name: (required) :param str token: (required) :return: APIPlanSubscriptionOut If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.subscribe_plan_with_http_info(plan_name, token, **kwargs) # noqa: E501 else: (data) = self.subscribe_plan_with_http_info(plan_name, token, **kwargs) # noqa: E501 return data def subscribe_plan_with_http_info(self, plan_name, token, **kwargs): # noqa: E501 """Subscribe to a give API plan, using the user's preferred or default currency. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.subscribe_plan_with_http_info(plan_name, token, async_req=True) >>> result = thread.get() :param async_req bool :param str plan_name: (required) :param str token: (required) :return: APIPlanSubscriptionOut If the method is called asynchronously, returns the request thread. """ local_var_params = locals() all_params = ['plan_name', 'token'] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method subscribe_plan" % key ) local_var_params[key] = val del local_var_params['kwargs'] # verify the required parameter 'plan_name' is set if ('plan_name' not in local_var_params or local_var_params['plan_name'] is None): raise ValueError("Missing the required parameter `plan_name` when calling `subscribe_plan`") # noqa: E501 # verify the required parameter 'token' is set if ('token' not in local_var_params or local_var_params['token'] is None): raise ValueError("Missing the required parameter `token` when calling `subscribe_plan`") # noqa: E501 collection_formats = {} path_params = {} if 'plan_name' in local_var_params: path_params['planName'] = local_var_params['plan_name'] # noqa: E501 if 'token' in local_var_params: path_params['token'] = local_var_params['token'] # noqa: E501 query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['api_key'] # noqa: E501 return self.api_client.call_api( '/api2/json/subscribePlan/{planName}/{token}', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='APIPlanSubscriptionOut', # noqa: E501 auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501 _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) def subscribe_plan_on_behalf(self, plan_name, api_key, **kwargs): # noqa: E501 """Subscribe to a give API plan, using the user's preferred or default currency (admin only). # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.subscribe_plan_on_behalf(plan_name, api_key, async_req=True) >>> result = thread.get() :param async_req bool :param str plan_name: (required) :param str api_key: (required) :return: APIPlanSubscriptionOut If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.subscribe_plan_on_behalf_with_http_info(plan_name, api_key, **kwargs) # noqa: E501 else: (data) = self.subscribe_plan_on_behalf_with_http_info(plan_name, api_key, **kwargs) # noqa: E501 return data def subscribe_plan_on_behalf_with_http_info(self, plan_name, api_key, **kwargs): # noqa: E501 """Subscribe to a give API plan, using the user's preferred or default currency (admin only). #
i += 1 print("<br>pValues: "+str(r.pValues)+"<br>") print("degreesOfFreedom: "+str(r.degreesOfFreedom)+"<br>") print("statistics: "+str(new_statistic)+"<br>") """) return code def normalizer(self, table_name, input_col,output_col, p): # input_columns_str = ",".join(input_columns) code = self.session_assertion() + textwrap.dedent(""" from pyspark.ml.feature import Normalizer import io, pandas as pd str_io = io.StringIO() Normalizer_""" + table_name + """ = Normalizer( inputCol ='""" + input_col + """', outputCol='"""+ output_col +"""', p=""" + p + """) append_stage('Normalizer_""" + table_name + """') norm_""" + table_name + """ = Normalizer_""" + table_name + """.transform(""" + table_name + """) norm_""" + table_name + """.createOrReplaceTempView('norm_""" + table_name + """') norm_""" + table_name + """.name = 'norm_"""+table_name+"""' print("<b>Dataframe name : "), norm_""" + table_name + """.name+"</b>" print('<br><b>Number of rows: </b>') print('<b>'+str(norm_"""+table_name+""".count())+'</b>') # norm_""" + table_name + """.show() df_ = norm_""" + table_name + """.limit(100).toPandas() df_.to_html(buf=str_io, classes='table dataframe', index=False) html_str = str_io.getvalue() print(html_str) """) return code def pca(self, table_name, num_of_components, input_col, output_col): # input_columns_str = ",".join(input_columns) code = self.session_assertion() + textwrap.dedent(""" from pyspark.ml.feature import PCA import io, pandas as pd str_io = io.StringIO() PCA_""" + table_name + """ = PCA( k="""+num_of_components+ """, inputCol='"""+ input_col +"""', outputCol='"""+ output_col +"""') append_stage('PCA_""" + table_name + """') pca_""" + table_name + """ = PCA_""" + table_name + """.fit(""" + table_name + """) pca_""" + table_name + """ = pca_"""+ table_name + """.transform("""+ table_name + """) pca_""" + table_name + """.createOrReplaceTempView('pca_""" + table_name + """') pca_""" + table_name + """.name = 'pca_"""+table_name+"""' print("<b>Dataframe name : "), pca_""" + table_name + """.name+"</b>" print('<br><b>Number of rows: </b>') print('<b>'+str(pca_"""+table_name+""".count())+'</b>') # pca_""" + table_name + """.show() df_ = pca_""" + table_name + """.limit(100).toPandas() df_.to_html(buf=str_io, classes='table dataframe', index=False) html_str = str_io.getvalue() print(html_str) """) return code def bucketizer(self, table_name, split, input_col, output_col): code = self.session_assertion() + textwrap.dedent(""" from pyspark.ml.feature import Bucketizer import io, pandas as pd str_io = io.StringIO() Bucketizer_"""+ table_name + """ = Bucketizer(splits=[-float("inf"),"""+ split +""", float("inf")], inputCol='""" + input_col + """', outputCol='"""+ output_col +"""') append_stage('Bucketizer_"""+ table_name + """') bucket_"""+ table_name + """ = Bucketizer_"""+ table_name + """ .setHandleInvalid("keep").transform("""+ table_name +""") bucket_""" + table_name + """.createOrReplaceTempView('bucket_""" + table_name + """') bucket_""" + table_name + """.name = 'bucket_"""+table_name+"""' print("<b>Dataframe name : "), bucket_""" + table_name + """.name+"</b>" print('<br><b>Number of rows: </b>') print('<b>'+str(bucket_"""+table_name+""".count())+'</b>') # bucket_""" + table_name + """.show() df_ = bucket_""" + table_name + """.limit(100).toPandas() df_.to_html(buf=str_io, classes='table dataframe', index=False) html_str = str_io.getvalue() print(html_str) """) return code def standardscaler(self, table_name, input_col, output_col, withStd, withMean): code = self.session_assertion() + textwrap.dedent(""" from pyspark.ml.feature import StandardScaler import io, pandas as pd str_io = io.StringIO() StandardScaler_""" + table_name + """ = StandardScaler(withMean="""+withMean+""", withStd="""+withStd+""", inputCol='"""+ input_col +"""', outputCol='"""+ output_col +"""') append_stage('StandardScaler_""" + table_name + """') StandardScaler_""" + table_name + """ = StandardScaler_""" + table_name + """.fit("""+ table_name +""") stdscaler_"""+ table_name +""" = StandardScaler_""" + table_name + """.transform("""+ table_name +""") stdscaler_"""+ table_name +""".createOrReplaceTempView('stdscaler_"""+ table_name +"""') stdscaler_""" + table_name + """.name = 'stdscaler_"""+table_name+"""' print("<b>Dataframe name : "), stdscaler_""" + table_name + """.name+"</b>" print('<br><b>Number of rows: </b>') print('<b>'+str(stdscaler_"""+table_name+""".count())+'</b>') # stdscaler_"""+ table_name +""".show() df_ = stdscaler_"""+ table_name +""".limit(100).toPandas() df_.to_html(buf=str_io, classes='table dataframe', index=False) html_str = str_io.getvalue() print(html_str) """) return code def tokenizer(self, table_name, input_col, output_col): code = self.session_assertion() + textwrap.dedent(""" from pyspark.ml.feature import Tokenizer import io, pandas as pd str_io = io.StringIO() fe_tknzr_""" + table_name + """ = Tokenizer(inputCol='"""+ input_col +"""', outputCol='"""+output_col+"""') append_stage('fe_tknzr_""" + table_name + """') tokenizer_"""+ table_name +""" = fe_tknzr_""" + table_name + """.transform("""+ table_name +""") tokenizer_"""+ table_name +""".createOrReplaceTempView('tokenizer_"""+ table_name +"""') tokenizer_""" + table_name + """.name = 'tokenizer_"""+table_name+"""' print("<b>Dataframe name : "), tokenizer_""" + table_name + """.name+"</b>" print('<br><b>Number of rows: </b>') print('<b>'+str(tokenizer_"""+table_name+""".count())+'</b>') # tokenizer_"""+ table_name +""".show() df_ = tokenizer_"""+ table_name +""".limit(100).toPandas() df_.to_html(buf=str_io, classes='table dataframe', index=False) html_str = str_io.getvalue() print(html_str) """) return code def missingval(self, table_name, radioValue): code = self.session_assertion() + textwrap.dedent(""" import numpy as np import io, pandas as pd str_io = io.StringIO() # radioValue="" df_contents="" if '"""+radioValue+"""'=='remove': df = """+ table_name +""".toPandas() df_contents = df.dropna(axis=0) missingval_"""+ table_name +""" = spark.createDataFrame(df_contents) elif '"""+radioValue+"""'=='average': df = """+ table_name +""".toPandas() df_contents = df.fillna(df.mean()) missingval_"""+ table_name +""" = spark.createDataFrame(df_contents) else: df = """+ table_name +""".toPandas() missingval_"""+ table_name +""" = spark.createDataFrame(df) missingval_"""+ table_name +""".createOrReplaceTempView('missingval_"""+ table_name +"""') missingval_""" + table_name + """.name = 'missingval_"""+table_name+"""' print("<b>Dataframe name : "), missingval_""" + table_name + """.name+"</b>" print('<br><b>Number of rows: </b>') print('<b>'+str(missingval_"""+table_name+""".count())+'</b>') # missingval_"""+ table_name +""".show() df_ = missingval_"""+ table_name +""".limit(100).toPandas() df_.to_html(buf=str_io, classes='table dataframe', index=False) html_str = str_io.getvalue() print(html_str) """) return code def stopword(self, table_name, input_col, output_col): code = self.session_assertion() + textwrap.dedent(""" from pyspark.ml.feature import StopWordsRemover import io, pandas as pd str_io = io.StringIO() fe_stpwd_""" + table_name + """ = StopWordsRemover(inputCol='"""+ input_col +"""', outputCol='"""+output_col+"""') append_stage('fe_stpwd_""" + table_name + """') stopword_"""+ table_name +""" = fe_stpwd_""" + table_name + """.transform("""+ table_name +""") stopword_"""+ table_name +""".createOrReplaceTempView('stopword_"""+ table_name +"""') stopword_""" + table_name + """.name = 'stopword_"""+table_name+"""' print("<b>Dataframe name : "), stopword_""" + table_name + """.name+"</b>" print('<br><b>Number of rows: </b>') print('<b>'+str(stopword_"""+table_name+""".count())+'</b>') # stopword_"""+ table_name +""".show() df_ =stopword_"""+ table_name +""".limit(100).toPandas() df_.to_html(buf=str_io, classes='table dataframe', index=False) html_str = str_io.getvalue() print(html_str) """) return code def stringindexer(self, table_name, input_col, output_col): input_columns_str = ",".join(input_col) # input_col = [] # print(input_col) code = self.session_assertion() + textwrap.dedent(""" from pyspark.ml.feature import StringIndexer import io, pandas as pd str_io = io.StringIO() from pyspark.ml import Pipeline # columnList = [item[0] for item in """+table_name+""".dtypes if item[1].startswith('string')] # columnList=['cabin', 'embarked'] columnList=["""+input_columns_str+"""] # print(columnList) StringIndexer_""" + table_name + """ = [StringIndexer(inputCol=column, outputCol=column+"_index").fit("""+table_name+""") for column in columnList] pipeline_idx = Pipeline(stages=StringIndexer_""" + table_name + """) append_stage('StringIndexer_""" + table_name + """') indexed_""" + table_name + """ = pipeline_idx.fit("""+table_name+""") category_name_""" + table_name + """ = dict() for idx, col in enumerate(columnList): current_col = indexed_"""+table_name+""".stages[idx] category_name_""" + table_name + """[col] = current_col.labels # print(category_name_""" + table_name + """) indexed_""" + table_name + """ = indexed_""" + table_name + """ .transform("""+table_name+""") indexed_""" + table_name + """.createOrReplaceTempView('indexed_"""+ table_name +"""') indexed_""" + table_name + """.name = 'indexed_"""+table_name+"""' print("<b>Dataframe name : "), indexed_""" + table_name + """.name+"</b>" print('<br><b>Number of rows: </b>') print('<b>'+str(indexed_"""+table_name+""".count())+'</b>') # indexed_"""+ table_name +""".show() df_ = indexed_"""+ table_name +""".limit(100).toPandas() df_.to_html(buf=str_io, classes='table dataframe', index=False) html_str = str_io.getvalue() print(html_str) # df_r.show() """) return code def labelindexer(self, table_name, input_col, output_col, handleInvalid, stringOrderType): code = self.session_assertion() + textwrap.dedent(""" from pyspark.ml.feature import StringIndexer, IndexToString import io, pandas as pd str_io = io.StringIO() LabelIndexer_""" + table_name + """ = StringIndexer(inputCol='"""+ input_col +"""', outputCol='"""+ output_col +"""', handleInvalid='"""+handleInvalid+"""', stringOrderType='"""+stringOrderType+"""') labels = LabelIndexer_""" + table_name + """.fit(""" + table_name + """).labels append_stage('LabelIndexer_""" + table_name + """') model = LabelIndexer_""" + table_name + """.fit("""+ table_name +""") labelindexed_"""+ table_name +""" = model.transform("""+ table_name + """) labelindexed_"""+ table_name +""".createOrReplaceTempView('labelindexed_"""+ table_name +"""') labelindexed_""" + table_name + """.name = 'labelindexed_"""+table_name+"""' print("<b>Dataframe name : "), labelindexed_""" + table_name + """.name+"</b>" print('<br><b>Number of rows: </b>') print('<b>'+str(labelindexed_"""+table_name+""".count())+'</b>') # indexed_"""+ table_name +""".show() df_ = labelindexed_"""+ table_name +""".limit(100).toPandas() df_.to_html(buf=str_io, classes='table dataframe', index=False) html_str = str_io.getvalue() print(html_str) """) return code def labelconverter(self, table_name, input_col, output_col, labels): code = self.session_assertion() + textwrap.dedent(""" from pyspark.ml.feature import IndexToString import io, pandas as pd str_io = io.StringIO() LabelConverter_""" + table_name + """ = IndexToString(inputCol='"""+ input_col +"""', outputCol='"""+ output_col +"""', labels="""+labels+""") append_stage('LabelConverter_""" + table_name + """') converted_"""+ table_name +""" = LabelConverter_""" + table_name + """.transform("""+ table_name + """) converted_"""+ table_name +""".createOrReplaceTempView('converted_"""+ table_name +"""') converted_""" + table_name + """.name = 'converted_"""+table_name+"""' print("<b>Dataframe name : "), converted_""" + table_name + """.name+"</b>" print('<br><b>Number of rows: </b>') print('<b>'+str(converted_"""+table_name+""".count())+'</b>') # converted_"""+ table_name +""".show() df_ = converted_"""+ table_name +""".limit(100).toPandas() df_.to_html(buf=str_io, classes='table dataframe', index=False) html_str = str_io.getvalue() print(html_str) """) return code def onehot(self, table_name, input_col, output_col): code = self.session_assertion() + textwrap.dedent(""" from pyspark.ml.feature import OneHotEncoder import io, pandas as pd str_io = io.StringIO() OneHotEncoder_""" +table_name + """ = OneHotEncoder(inputCol='"""+ input_col +"""', outputCol='"""+ output_col +"""') append_stage('OneHotEncoder_""" + table_name + """') encoded_"""+ table_name +""" = OneHotEncoder_""" +table_name + """ .transform("""+ table_name +""") encoded_"""+ table_name +""".createOrReplaceTempView('encoded_"""+ table_name +"""') encoded_""" + table_name + """.name = 'encoded_"""+table_name+"""' print("<b>Dataframe name : "), encoded_""" + table_name + """.name+"</b>" print('<br><b>Number of rows: </b>') print('<b>'+str(encoded_"""+table_name+""".count())+'</b>') # encoded_"""+ table_name +""".show() df_ = encoded_"""+ table_name +""".limit(100).toPandas() df_.to_html(buf=str_io, classes='table dataframe', index=False) html_str
"name", name) pulumi.set(__self__, "values", values) if regex is not None: pulumi.set(__self__, "regex", regex) @property @pulumi.getter def name(self) -> str: return pulumi.get(self, "name") @property @pulumi.getter def values(self) -> Sequence[str]: return pulumi.get(self, "values") @property @pulumi.getter def regex(self) -> Optional[bool]: return pulumi.get(self, "regex") @pulumi.output_type class GetLogGroupsLogGroupResult(dict): def __init__(__self__, *, compartment_id: str, defined_tags: Mapping[str, Any], description: str, display_name: str, freeform_tags: Mapping[str, Any], id: str, state: str, time_created: str, time_last_modified: str): """ :param str compartment_id: Compartment OCID to list resources in. See compartmentIdInSubtree for nested compartments traversal. :param Mapping[str, Any] defined_tags: Defined tags for this resource. Each key is predefined and scoped to a namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"Operations.CostCenter": "42"}` :param str description: Description for this resource. :param str display_name: Resource name :param Mapping[str, Any] freeform_tags: Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"Department": "Finance"}` :param str id: The OCID of the resource. :param str state: The log group object state. :param str time_created: Time the resource was created. :param str time_last_modified: Time the resource was last modified. """ pulumi.set(__self__, "compartment_id", compartment_id) pulumi.set(__self__, "defined_tags", defined_tags) pulumi.set(__self__, "description", description) pulumi.set(__self__, "display_name", display_name) pulumi.set(__self__, "freeform_tags", freeform_tags) pulumi.set(__self__, "id", id) pulumi.set(__self__, "state", state) pulumi.set(__self__, "time_created", time_created) pulumi.set(__self__, "time_last_modified", time_last_modified) @property @pulumi.getter(name="compartmentId") def compartment_id(self) -> str: """ Compartment OCID to list resources in. See compartmentIdInSubtree for nested compartments traversal. """ return pulumi.get(self, "compartment_id") @property @pulumi.getter(name="definedTags") def defined_tags(self) -> Mapping[str, Any]: """ Defined tags for this resource. Each key is predefined and scoped to a namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"Operations.CostCenter": "42"}` """ return pulumi.get(self, "defined_tags") @property @pulumi.getter def description(self) -> str: """ Description for this resource. """ return pulumi.get(self, "description") @property @pulumi.getter(name="displayName") def display_name(self) -> str: """ Resource name """ return pulumi.get(self, "display_name") @property @pulumi.getter(name="freeformTags") def freeform_tags(self) -> Mapping[str, Any]: """ Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"Department": "Finance"}` """ return pulumi.get(self, "freeform_tags") @property @pulumi.getter def id(self) -> str: """ The OCID of the resource. """ return pulumi.get(self, "id") @property @pulumi.getter def state(self) -> str: """ The log group object state. """ return pulumi.get(self, "state") @property @pulumi.getter(name="timeCreated") def time_created(self) -> str: """ Time the resource was created. """ return pulumi.get(self, "time_created") @property @pulumi.getter(name="timeLastModified") def time_last_modified(self) -> str: """ Time the resource was last modified. """ return pulumi.get(self, "time_last_modified") @pulumi.output_type class GetLogSavedSearchesFilterResult(dict): def __init__(__self__, *, name: str, values: Sequence[str], regex: Optional[bool] = None): """ :param str name: Resource name """ pulumi.set(__self__, "name", name) pulumi.set(__self__, "values", values) if regex is not None: pulumi.set(__self__, "regex", regex) @property @pulumi.getter def name(self) -> str: """ Resource name """ return pulumi.get(self, "name") @property @pulumi.getter def values(self) -> Sequence[str]: return pulumi.get(self, "values") @property @pulumi.getter def regex(self) -> Optional[bool]: return pulumi.get(self, "regex") @pulumi.output_type class GetLogSavedSearchesLogSavedSearchSummaryCollectionResult(dict): def __init__(__self__, *, items: Sequence['outputs.GetLogSavedSearchesLogSavedSearchSummaryCollectionItemResult']): pulumi.set(__self__, "items", items) @property @pulumi.getter def items(self) -> Sequence['outputs.GetLogSavedSearchesLogSavedSearchSummaryCollectionItemResult']: return pulumi.get(self, "items") @pulumi.output_type class GetLogSavedSearchesLogSavedSearchSummaryCollectionItemResult(dict): def __init__(__self__, *, compartment_id: str, defined_tags: Mapping[str, Any], description: str, freeform_tags: Mapping[str, Any], id: str, name: str, query: str, state: str, time_created: str, time_last_modified: str): """ :param str compartment_id: Compartment OCID to list resources in. See compartmentIdInSubtree for nested compartments traversal. :param Mapping[str, Any] defined_tags: Defined tags for this resource. Each key is predefined and scoped to a namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"Operations.CostCenter": "42"}` :param str description: Description for this resource. :param Mapping[str, Any] freeform_tags: Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"Department": "Finance"}` :param str id: The OCID of the resource. :param str name: Resource name :param str query: The search query that is saved. :param str state: The state of the LogSavedSearch :param str time_created: Time the resource was created. :param str time_last_modified: Time the resource was last modified. """ pulumi.set(__self__, "compartment_id", compartment_id) pulumi.set(__self__, "defined_tags", defined_tags) pulumi.set(__self__, "description", description) pulumi.set(__self__, "freeform_tags", freeform_tags) pulumi.set(__self__, "id", id) pulumi.set(__self__, "name", name) pulumi.set(__self__, "query", query) pulumi.set(__self__, "state", state) pulumi.set(__self__, "time_created", time_created) pulumi.set(__self__, "time_last_modified", time_last_modified) @property @pulumi.getter(name="compartmentId") def compartment_id(self) -> str: """ Compartment OCID to list resources in. See compartmentIdInSubtree for nested compartments traversal. """ return pulumi.get(self, "compartment_id") @property @pulumi.getter(name="definedTags") def defined_tags(self) -> Mapping[str, Any]: """ Defined tags for this resource. Each key is predefined and scoped to a namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"Operations.CostCenter": "42"}` """ return pulumi.get(self, "defined_tags") @property @pulumi.getter def description(self) -> str: """ Description for this resource. """ return pulumi.get(self, "description") @property @pulumi.getter(name="freeformTags") def freeform_tags(self) -> Mapping[str, Any]: """ Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"Department": "Finance"}` """ return pulumi.get(self, "freeform_tags") @property @pulumi.getter def id(self) -> str: """ The OCID of the resource. """ return pulumi.get(self, "id") @property @pulumi.getter def name(self) -> str: """ Resource name """ return pulumi.get(self, "name") @property @pulumi.getter def query(self) -> str: """ The search query that is saved. """ return pulumi.get(self, "query") @property @pulumi.getter def state(self) -> str: """ The state of the LogSavedSearch """ return pulumi.get(self, "state") @property @pulumi.getter(name="timeCreated") def time_created(self) -> str: """ Time the resource was created. """ return pulumi.get(self, "time_created") @property @pulumi.getter(name="timeLastModified") def time_last_modified(self) -> str: """ Time the resource was last modified. """ return pulumi.get(self, "time_last_modified") @pulumi.output_type class GetLogsFilterResult(dict): def __init__(__self__, *, name: str, values: Sequence[str], regex: Optional[bool] = None): pulumi.set(__self__, "name", name) pulumi.set(__self__, "values", values) if regex is not None: pulumi.set(__self__, "regex", regex) @property @pulumi.getter def name(self) -> str: return pulumi.get(self, "name") @property @pulumi.getter def values(self) -> Sequence[str]: return pulumi.get(self, "values") @property @pulumi.getter def regex(self) -> Optional[bool]: return pulumi.get(self, "regex") @pulumi.output_type class GetLogsLogResult(dict): def __init__(__self__, *, compartment_id: str, configuration: 'outputs.GetLogsLogConfigurationResult', defined_tags: Mapping[str, Any], display_name: str, freeform_tags: Mapping[str, Any], id: str, is_enabled: bool, log_group_id: str, log_type: str, retention_duration: int, state: str, tenancy_id: str, time_created: str, time_last_modified: str): """ :param str compartment_id: The OCID of the compartment that the resource belongs to. :param 'GetLogsLogConfigurationArgs' configuration: Log object configuration. :param Mapping[str, Any] defined_tags: Defined tags for this resource. Each key is predefined and scoped to a namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"Operations.CostCenter": "42"}` :param str display_name: Resource name :param Mapping[str, Any] freeform_tags: Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"Department": "Finance"}` :param str id: The OCID of the resource. :param bool is_enabled: Whether or not this resource is currently enabled. :param str log_group_id: OCID of a log group to work with. :param str log_type: The logType that the log object is for, whether custom or service. :param int retention_duration: Log retention duration in 30-day increments (30, 60, 90 and so on). :param str state: Lifecycle state of the log object :param str tenancy_id: The OCID of the tenancy. :param str time_created: Time the resource was created. :param str time_last_modified: Time the resource was last modified. """ pulumi.set(__self__, "compartment_id", compartment_id) pulumi.set(__self__, "configuration", configuration) pulumi.set(__self__, "defined_tags", defined_tags) pulumi.set(__self__, "display_name", display_name) pulumi.set(__self__, "freeform_tags", freeform_tags) pulumi.set(__self__, "id", id) pulumi.set(__self__, "is_enabled", is_enabled) pulumi.set(__self__, "log_group_id", log_group_id) pulumi.set(__self__, "log_type", log_type) pulumi.set(__self__, "retention_duration", retention_duration) pulumi.set(__self__, "state", state) pulumi.set(__self__, "tenancy_id", tenancy_id) pulumi.set(__self__, "time_created", time_created) pulumi.set(__self__, "time_last_modified", time_last_modified) @property @pulumi.getter(name="compartmentId") def compartment_id(self) -> str: """ The OCID of the compartment that the resource belongs to. """ return pulumi.get(self, "compartment_id") @property @pulumi.getter def configuration(self) -> 'outputs.GetLogsLogConfigurationResult': """ Log object configuration. """ return pulumi.get(self, "configuration") @property @pulumi.getter(name="definedTags") def defined_tags(self) -> Mapping[str, Any]: """ Defined tags for this resource. Each key is predefined and scoped to a namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"Operations.CostCenter": "42"}` """ return pulumi.get(self, "defined_tags") @property @pulumi.getter(name="displayName") def display_name(self) -> str: """ Resource name """ return pulumi.get(self, "display_name") @property @pulumi.getter(name="freeformTags") def freeform_tags(self) -> Mapping[str, Any]: """ Free-form tags
tupV = ExternalAnnotation(name=name, value=sV["String"], dataType="string", featureType=ky, provReferenceId=referenceNumber, provSourceName=reference) pcD.setdefault(pcKy, {}).setdefault(ky, set()).update([tupV]) elif "Number" in infD["Value"]: units = infD["Value"]["Unit"] if "Unit" in infD["Value"] else "" for nV in infD["Value"]["Number"]: tupV = ExternalAnnotation(name=name, value=nV, dataType="number", units=units, featureType=ky, provReferenceId=referenceNumber, provSourceName=reference) # tV = str(str(sV) + " " + units).strip() pcD.setdefault(pcKy, {}).setdefault(ky, set()).update([tupV]) except Exception as e: logger.exception("Failing with %s", str(e)) return pcD # --- def __parsePubChemRecord(self, recordD): """Parse selected content from a PubChem PUG API full RECORD response data. Args: recordD (dict): response from a PubChem PUG API record request with potentially multiple compound records Returns: list : [{'cid': <pc_cid>, ....}, ...] """ retL = [] try: tDL = recordD["PC_Compounds"] if "PC_Compounds" in recordD else [] for tD in tDL: rD = {} tId = self.__getKeyValue(tD, "id.id.cid") rD["cid"] = str(tId) if tId else None logger.debug("cId = %r", rD["cid"]) elementCountD = defaultdict(int) elements = self.__getKeyValue(tD, "atoms.element") for el in elements: elementCountD[str(el)] += 1 rD["elementCounts"] = elementCountD rD["formalCharge"] = self.__getKeyValue(tD, "charge") pDL = self.__getKeyValue(tD, "props") for pD in pDL: pName = self.__getKeyValue(pD, "urn.name") pLabel = self.__getKeyValue(pD, "urn.label") if pName == "XLogP3" and pLabel == "Log P": rD["XlogP"] = self.__getKeyValue(pD, "value.fval") elif pLabel == "Topological" and pName == "Polar Surface Area": rD["polarSurfaceArea"] = self.__getKeyValue(pD, "value.fval") elif pLabel == "IUPAC Name" and pName == "Preferred": rD["iupacName"] = self.__getKeyValue(pD, "value.sval") elif pLabel == "Molecular Formula": rD["formula"] = self.__getKeyValue(pD, "value.sval") elif pLabel == "InChIKey": rD["InChiKey"] = self.__getKeyValue(pD, "value.sval") retL.append(rD) except Exception as e: logger.exception("Failing with %s", str(e)) return retL def __parsePubChemXrefs(self, recordD): """Parse selected content from a PubChem PUG API xrefs response data. Args: recordD (dict): response from a PubChem PUG API record request with potentially multiple records Returns: list : [{'cid': <pc_cid>, ....}, ...] { "InformationList": { "Information": [ { "CID": 2244, "RegistryID": [ ], "RN": [ "11126-35-5", ] } } } """ retL = [] try: tDL = self.__getKeyValue(recordD, "InformationList.Information") for tD in tDL: rD = {} tId = self.__getKeyValue(tD, "CID") rD["cid"] = str(tId) if tId else None logger.debug("cId = %r", rD["cid"]) # rnL = self.__getKeyValue(tD, "RN") rnD = {rn: True for rn in rnL} if rnL else {} rIdL = self.__getKeyValue(tD, "RegistryID") # This is a laundry list of IDs and it only possible to pull out clearly prefixed idcodes. for rId in rIdL: if rId in rnD: rD.setdefault("CAS", []).append(rId) if rId.startswith("CHEMBL"): rD.setdefault("ChEMBL", []).append(rId) if rId.startswith("CHEBI"): rD.setdefault("ChEBI", []).append(rId) if rId.startswith("HMDB"): rD.setdefault("HMDB", []).append(rId) retL.append(rD) except Exception as e: logger.exception("Failing with %s", str(e)) return retL def __parsePubChemProperties(self, recordD): """Parse selected content from a PubChem PUG API properties response data. Args: recordD (dict): response from a PubChem PUG API record request with potentially multiple records Returns: list : [{'cid': <pc_cid>, ....}, ...] { "PropertyTable": { "Properties": [ { "CID": 2244, "MolecularFormula": "C9H8O4", "XLogP": 1.2, "TPSA": 63.6, "Volume3D": 136 } ] } } """ retL = [] try: tDL = self.__getKeyValue(recordD, "PropertyTable.Properties") for tD in tDL: rD = {} tId = self.__getKeyValue(tD, "CID") rD["cid"] = str(tId) if tId else None logger.debug("cId = %r", rD["cid"]) # rD["formula"] = self.__getKeyValue(tD, "MolecularFormula") rD["XLogP"] = self.__getKeyValue(tD, "XLogP") rD["polarSurfaceArea"] = self.__getKeyValue(tD, "TPSA") rD["volume3D"] = self.__getKeyValue(tD, "Volume3D") retL.append(rD) except Exception as e: logger.exception("Failing with %s", str(e)) return retL def __parsePubChemSynonyms(self, recordD): """Parse selected content from a PubChem PUG API synonyms response data. Args: recordD (dict): response from a PubChem PUG API record request with potentially multiple records Returns: list : [{'cid': <pc_cid>, ....}, ...] { "InformationList": { "Information": [ { "CID": 2244, "Synonym": [ "aspirin", "ACETYLSALICYLIC ACID", "50-78-2", "2-Acetoxybenzoic acid", ] }, """ retL = [] try: tDL = self.__getKeyValue(recordD, "InformationList.Information") for tD in tDL: rD = {} tId = self.__getKeyValue(tD, "CID") rD["cid"] = str(tId) if tId else None logger.debug("cId = %r", rD["cid"]) rD["synonyms"] = self.__getKeyValue(tD, "Synonym") retL.append(rD) except Exception as e: logger.exception("Failing with %s", str(e)) return retL def __parsePubChemClassifications(self, cId, recordD): """Parse selected content from a PubChem PUG API classification response data. Args: recordD (dict): response from a PubChem PUG API record request with potentially multiple records Returns: list : [{'cid': <pc_cid>, ....}, ...] { "Hierarchies": { "Hierarchy": [ { "SourceName": "WHO ATC", "SourceID": "ATCTree", "RootID": "root", "HID": 79, "Information": { "Name": "ATC Code", "Description": [ "In the World Health Organization (WHO) Anatomical Therapeutic Chemical (ATC) ..." ], "Comments": [ "Drugs are classified in groups at five different levels. The drugs are divided into fourteen main groups .." "Medicinal products are classified according to the main therapeutic use of the main active ingredient, on the ..." ], "URL": "https://www.whocc.no/atc_ddd_index/", "HNID": 1949731, "ChildID": [ "node_1", "node_803", "node_1105", "node_1888", "node_2369", "node_2727", "node_2844", "node_3500", "node_3931", "node_4209", "node_4938", "node_5112", "node_5565", "node_5911" ], "HasCountsOfType": [ "CID", "SID" ], "Counts": [ { "Type": "CID", "Count": 15837 }, { "Type": "SID", "Count": 178510 } ] }, "Node": [ { "NodeID": "node_41", "ParentID": [ "node_38" ], "Information": { "Name": "A01AD05 - Acetylsalicylic acid", "URL": "https://www.whocc.no/atc_ddd_index/?code=A01AD05", "HNID": 1949772, "Match": true, "Counts": [ { "Type": "CID", "Count": 4 }, { "Type": "SID", "Count": 84 } ] } }, { "NodeID": "node_38", "ParentID": [ "node_3" ], "Information": { "Name": "A01AD - Other agents for local oral treatment", "URL": "https://www.whocc.no/atc_ddd_index/?code=A01AD", "HNID": 1949769, "ChildID": [ "node_39", "node_40", "node_41", "node_42", "node_43", "node_44", "node_45" ], "Counts": [ { "Type": "CID", "Count": 20 }, { "Type": "SID", "Count": 303 } ] } }, """ retL = [] rD = {} rD["cid"] = cId try: tDL = self.__getKeyValue(recordD, "Hierarchies.Hierarchy") for tD in tDL: sName = self.__getKeyValue(tD, "SourceName") sId = self.__getKeyValue(tD, "SourceID") logger.debug("Finding sourceName %rsourceId %r", sName, sId) # provName = self.__getKeyValue(tD, "Information.Name") provDescr = self.__getKeyValue(tD, "Information.Description") provComments = self.__getKeyValue(tD, "Information.Description") provUrl = self.__getKeyValue(tD, "Information.URL") provD = {"source": provName, "description": provDescr, "details": provComments, "url": provUrl} # logger.debug("++++>KEY (%r,%r)", sName, provName) if (sName, provName) in [ ("WHO ATC", "ATC Code"), ("FDA Pharm Classes", "FDA Pharmacological Classification"), ("IUPHAR/BPS Guide to PHARMACOLOGY", "Target Classification"), # ("ChEMBL", "Target Tree"), # ("ChemIDplus", "ChemIDplus Chemical Information Classification"), ("ChEBI", "ChEBI Ontology"), ("MeSH", "MeSH Tree"), ]: nDL = self.__getKeyValue(tD, "Node") cD = {} for nD in nDL: nodeId = self.__getKeyValue(nD, "NodeID") parentL = self.__getKeyValue(nD, "ParentID") childL = self.__getKeyValue(nD, "Information.ChildID") childCount = len(childL) if childL else 0 nm = self.__getKeyValue(nD, "Information.Name") descr = self.__getKeyValue(nD, "Information.Description") url = self.__getKeyValue(nD, "Information.URL") idCode = None # --- Add a bit to separate unique identifiers - try: if provName in ["ATC Code"] and nm: fL = nm.split("-") idCode = fL[0].strip() nm = fL[1].strip() elif provName in ["MeSH Tree"] and url: ff = url.split("/") idCode = ff[-1].strip() elif sName == "IUPHAR/BPS Guide to PHARMACOLOGY" and url: ff = url.split("=") idCode = ff[-1].strip() except Exception as e: logger.exception("%s failing with %s", provName, str(e)) # --- cD[nodeId] = {"parentList": parentL, "childCount": childCount, "idCode": idCode, "name": nm, "description": descr, "url": url} rD[sName] = {"data": cD, "provenance": provD} # retL.append(rD) except Exception as e: logger.exception("Failing with %s", str(e)) # --- Process the extracted classification data ---- try: ontClassL = [] for ret in retL: logger.debug("Classification keys %r", ret.keys()) ontClassD = {} ontClassD["cid"] = cId for ont, oD in ret.items(): childD = {} parentD = {} # if ont in ["cid"]: continue logger.debug("%s node count %d", ont, len(oD["data"])) rootNodeL = [] for nm, nD in oD["data"].items(): if "parentList" in nD and len(nD["parentList"]) > 1: logger.info("Multiple parents for %s nd %s", ont, nm) continue pN = nD["parentList"][0] if pN not in oD["data"]: rootNodeL.append(nm) childD.setdefault(nm, []).append(pN) parentD.setdefault(pN, []).append(nm) # logger.debug("%s root nodes (%d)", ont, len(rootNodeL)) # for ch, prL in childD.items(): if len(prL) > 1: logger.info("%s multiple parents for node %s %d", ont, ch, len(prL)) # -- enumerate leaf nodes - nodes without children leafL = [] for nm in oD["data"]: if nm not in parentD: logger.debug("%s leaf node %r - %s", ont, nm, oD["data"][nm]["name"]) leafL.append(nm) # linD = {} for nm in leafL: pN = childD[nm][0] if nm in childD else None linD.setdefault(nm, []).append(nm) while pN and pN in oD["data"]: linD[nm].append(pN) pN = childD[pN][0] if pN in childD else None # for leafN,
<filename>synapse/state/v2.py # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import heapq import itertools import logging from typing import ( Any, Awaitable, Callable, Collection, Dict, Generator, Iterable, List, Mapping, Optional, Sequence, Set, Tuple, overload, ) from typing_extensions import Literal, Protocol from synapse import event_auth from synapse.api.constants import EventTypes from synapse.api.errors import AuthError from synapse.api.room_versions import RoomVersion from synapse.events import EventBase from synapse.types import MutableStateMap, StateMap logger = logging.getLogger(__name__) class Clock(Protocol): # This is usually synapse.util.Clock, but it's replaced with a FakeClock in tests. # We only ever sleep(0) though, so that other async functions can make forward # progress without waiting for stateres to complete. def sleep(self, duration_ms: float) -> Awaitable[None]: ... class StateResolutionStore(Protocol): # This is usually synapse.state.StateResolutionStore, but it's replaced with a # TestStateResolutionStore in tests. def get_events( self, event_ids: Collection[str], allow_rejected: bool = False ) -> Awaitable[Dict[str, EventBase]]: ... def get_auth_chain_difference( self, room_id: str, state_sets: List[Set[str]] ) -> Awaitable[Set[str]]: ... # We want to await to the reactor occasionally during state res when dealing # with large data sets, so that we don't exhaust the reactor. This is done by # awaiting to reactor during loops every N iterations. _AWAIT_AFTER_ITERATIONS = 100 __all__ = [ "resolve_events_with_store", ] async def resolve_events_with_store( clock: Clock, room_id: str, room_version: RoomVersion, state_sets: Sequence[StateMap[str]], event_map: Optional[Dict[str, EventBase]], state_res_store: StateResolutionStore, ) -> StateMap[str]: """Resolves the state using the v2 state resolution algorithm Args: clock room_id: the room we are working in room_version: The room version state_sets: List of dicts of (type, state_key) -> event_id, which are the different state groups to resolve. event_map: a dict from event_id to event, for any events that we happen to have in flight (eg, those currently being persisted). This will be used as a starting point for finding the state we need; any missing events will be requested via state_res_store. If None, all events will be fetched via state_res_store. state_res_store: Returns: A map from (type, state_key) to event_id. """ logger.debug("Computing conflicted state") # We use event_map as a cache, so if its None we need to initialize it if event_map is None: event_map = {} # First split up the un/conflicted state unconflicted_state, conflicted_state = _seperate(state_sets) if not conflicted_state: return unconflicted_state logger.debug("%d conflicted state entries", len(conflicted_state)) logger.debug("Calculating auth chain difference") # Also fetch all auth events that appear in only some of the state sets' # auth chains. auth_diff = await _get_auth_chain_difference( room_id, state_sets, event_map, state_res_store ) full_conflicted_set = set( itertools.chain( itertools.chain.from_iterable(conflicted_state.values()), auth_diff ) ) events = await state_res_store.get_events( [eid for eid in full_conflicted_set if eid not in event_map], allow_rejected=True, ) event_map.update(events) # everything in the event map should be in the right room for event in event_map.values(): if event.room_id != room_id: raise Exception( "Attempting to state-resolve for room %s with event %s which is in %s" % ( room_id, event.event_id, event.room_id, ) ) full_conflicted_set = {eid for eid in full_conflicted_set if eid in event_map} logger.debug("%d full_conflicted_set entries", len(full_conflicted_set)) # Get and sort all the power events (kicks/bans/etc) power_events = ( eid for eid in full_conflicted_set if _is_power_event(event_map[eid]) ) sorted_power_events = await _reverse_topological_power_sort( clock, room_id, power_events, event_map, state_res_store, full_conflicted_set ) logger.debug("sorted %d power events", len(sorted_power_events)) # Now sequentially auth each one resolved_state = await _iterative_auth_checks( clock, room_id, room_version, sorted_power_events, unconflicted_state, event_map, state_res_store, ) logger.debug("resolved power events") # OK, so we've now resolved the power events. Now sort the remaining # events using the mainline of the resolved power level. set_power_events = set(sorted_power_events) leftover_events = [ ev_id for ev_id in full_conflicted_set if ev_id not in set_power_events ] logger.debug("sorting %d remaining events", len(leftover_events)) pl = resolved_state.get((EventTypes.PowerLevels, ""), None) leftover_events = await _mainline_sort( clock, room_id, leftover_events, pl, event_map, state_res_store ) logger.debug("resolving remaining events") resolved_state = await _iterative_auth_checks( clock, room_id, room_version, leftover_events, resolved_state, event_map, state_res_store, ) logger.debug("resolved") # We make sure that unconflicted state always still applies. resolved_state.update(unconflicted_state) logger.debug("done") return resolved_state async def _get_power_level_for_sender( room_id: str, event_id: str, event_map: Dict[str, EventBase], state_res_store: StateResolutionStore, ) -> int: """Return the power level of the sender of the given event according to their auth events. Args: room_id event_id event_map state_res_store Returns: The power level. """ event = await _get_event(room_id, event_id, event_map, state_res_store) pl = None for aid in event.auth_event_ids(): aev = await _get_event( room_id, aid, event_map, state_res_store, allow_none=True ) if aev and (aev.type, aev.state_key) == (EventTypes.PowerLevels, ""): pl = aev break if pl is None: # Couldn't find power level. Check if they're the creator of the room for aid in event.auth_event_ids(): aev = await _get_event( room_id, aid, event_map, state_res_store, allow_none=True ) if aev and (aev.type, aev.state_key) == (EventTypes.Create, ""): if aev.content.get("creator") == event.sender: return 100 break return 0 level = pl.content.get("users", {}).get(event.sender) if level is None: level = pl.content.get("users_default", 0) if level is None: return 0 else: return int(level) async def _get_auth_chain_difference( room_id: str, state_sets: Sequence[Mapping[Any, str]], event_map: Dict[str, EventBase], state_res_store: StateResolutionStore, ) -> Set[str]: """Compare the auth chains of each state set and return the set of events that only appear in some but not all of the auth chains. Args: state_sets event_map state_res_store Returns: Set of event IDs """ # The `StateResolutionStore.get_auth_chain_difference` function assumes that # all events passed to it (and their auth chains) have been persisted # previously. This is not the case for any events in the `event_map`, and so # we need to manually handle those events. # # We do this by: # 1. calculating the auth chain difference for the state sets based on the # events in `event_map` alone # 2. replacing any events in the state_sets that are also in `event_map` # with their auth events (recursively), and then calling # `store.get_auth_chain_difference` as normal # 3. adding the results of 1 and 2 together. # Map from event ID in `event_map` to their auth event IDs, and their auth # event IDs if they appear in the `event_map`. This is the intersection of # the event's auth chain with the events in the `event_map` *plus* their # auth event IDs. events_to_auth_chain: Dict[str, Set[str]] = {} for event in event_map.values(): chain = {event.event_id} events_to_auth_chain[event.event_id] = chain to_search = [event] while to_search: for auth_id in to_search.pop().auth_event_ids(): chain.add(auth_id) auth_event = event_map.get(auth_id) if auth_event: to_search.append(auth_event) # We now a) calculate the auth chain difference for the unpersisted events # and b) work out the state sets to pass to the store. # # Note: If the `event_map` is empty (which is the common case), we can do a # much simpler calculation. if event_map: # The list of state sets to pass to the store, where each state set is a set # of the event ids making up the state. This is similar to `state_sets`, # except that (a) we only have event ids, not the complete # ((type, state_key)->event_id) mappings; and (b) we have stripped out # unpersisted events and replaced them with the persisted events in # their auth chain. state_sets_ids: List[Set[str]] = [] # For each state set, the unpersisted event IDs reachable (by their auth # chain) from the events in that set. unpersisted_set_ids: List[Set[str]] = [] for state_set in state_sets: set_ids: Set[str] = set() state_sets_ids.append(set_ids) unpersisted_ids: Set[str] = set() unpersisted_set_ids.append(unpersisted_ids) for event_id in state_set.values(): event_chain = events_to_auth_chain.get(event_id) if event_chain is not None: # We have an event in `event_map`. We add all the auth # events that it references (that aren't also in `event_map`). set_ids.update(e for e in event_chain if e not in event_map) # We also add the full chain of unpersisted event IDs # referenced by this state set, so that we can
from copy import deepcopy import os from typing import Callable, Optional from urllib.request import Request import pystac import pystac.stac_object import pystac.validation from pystac import STAC_IO from pystac_client.conformance import ConformanceClasses from pystac_client.exceptions import ConformanceError from pystac_client.item_search import ( BBoxLike, CollectionsLike, DatetimeLike, IDsLike, IntersectsLike, QueryLike, ItemSearch, ) from pystac_client.stac_api_object import STACAPIObjectMixin class Client(pystac.Catalog, STACAPIObjectMixin): """Instances of the ``Client`` class inherit from :class:`pystac.Catalog` and provide a convenient way of interacting with Catalogs OR APIs that conform to the `STAC API spec <https://github.com/radiantearth/stac-api-spec>`_. In addition to being a valid `STAC Catalog <https://github.com/radiantearth/stac-spec/blob/master/catalog-spec/catalog-spec.md>`_ the API must have a ``"conformsTo"`` property that lists the conformance URIs. All :class:`~pystac_client.Client` instances must be given a ``conformance`` argument at instantiation, and when calling the :meth:`~pystac_client.Client.from_dict` method the dictionary must contain a ``"conformsTo"`` attribute. If this is not true then a :exc:`KeyError` is raised. In addition to the methods and attributes inherited from :class:`pystac.Catalog`, this class offers some convenience methods to testing conformance to various specs. Attributes ---------- conformance : List[str] The list of conformance URIs detailing the capabilities of the service. This object adheres to the `OGC API - Features conformance declaration <http://docs.opengeospatial.org/is/17-069r3/17-069r3.html#_declaration_of_conformance_classes>`_. """ def __init__(self, id, description, title=None, stac_extensions=None, extra_fields=None, href=None, catalog_type=None, conformance=None, headers=None): super().__init__(id=id, description=description, title=title, stac_extensions=stac_extensions, extra_fields=extra_fields, href=href, catalog_type=catalog_type) self.conformance = conformance # Check that the API conforms to the STAC API - Core spec (or ignore if None) if conformance is not None and not self.conforms_to(ConformanceClasses.STAC_API_CORE): allowed_uris = "\n\t".join(ConformanceClasses.STAC_API_CORE.all_uris) raise ConformanceError( 'API does not conform to {ConformanceClasses.STAC_API_CORE}. Must contain one of the following ' f'URIs to conform (preferably the first):\n\t{allowed_uris}.') self.headers = headers or {} def __repr__(self): return '<Catalog id={}>'.format(self.id) @classmethod def open(cls, url=None, headers=None): """Alias for PySTAC's STAC Object `from_file` method Parameters ---------- url : str, optional The URL of a STAC Catalog. If not specified, this will use the `STAC_URL` environment variable. Returns ------- catalog : Client """ import pystac_client.stac_io if url is None: url = os.environ.get("STAC_URL") if url is None: raise TypeError( "'url' must be specified or the 'STAC_URL' environment variable must be set.") def read_text_method(url): request = Request(url, headers=headers or {}) return pystac_client.stac_io.read_text_method(request) old_read_text_method = STAC_IO.read_text_method STAC_IO.read_text_method = read_text_method try: catalog = cls.from_file(url) finally: STAC_IO.read_text_method = old_read_text_method catalog.headers = headers return catalog @classmethod def from_dict( cls, d, href=None, root=None, ): """Overwrites the :meth:`pystac.Catalog.from_dict` method to add the ``user_agent`` initialization argument and to check if the content conforms to the STAC API - Core spec. Raises ------ pystac_client.exceptions.ConformanceError If the Catalog does not publish conformance URIs in either a ``"conformsTo"`` attribute in the landing page response or in a ``/conformance``. According to the STAC API - Core spec, services must publish this as part of a ``"conformsTo"`` attribute, but some legacy APIs fail to do so. """ catalog_type = pystac.CatalogType.determine_type(d) d = deepcopy(d) id = d.pop('id') description = d.pop('description') title = d.pop('title', None) stac_extensions = d.pop('stac_extensions', None) links = d.pop('links') # allow for no conformance, for now conformance = d.pop('conformsTo', None) d.pop('stac_version') catalog = cls( id=id, description=description, title=title, stac_extensions=stac_extensions, conformance=conformance, extra_fields=d, href=href, catalog_type=catalog_type, ) for link in links: if link['rel'] == 'root': # Remove the link that's generated in Catalog's constructor. catalog.remove_links('root') if link['rel'] != 'self' or href is None: catalog.add_link(pystac.Link.from_dict(link)) return catalog def get_collections_list(self): """Gets list of available collections from this Catalog. Alias for get_child_links since children of an API are always and only ever collections """ return self.get_child_links() def search(self, *, limit: Optional[int] = None, bbox: Optional[BBoxLike] = None, datetime: Optional[DatetimeLike] = None, intersects: Optional[IntersectsLike] = None, ids: Optional[IDsLike] = None, collections: Optional[CollectionsLike] = None, query: Optional[QueryLike] = None, max_items: Optional[int] = None, method: Optional[str] = 'POST', next_resolver: Optional[Callable] = None) -> ItemSearch: """Query the ``/search`` endpoint using the given parameters. This method returns an :class:`~pystac_client.ItemSearch` instance, see that class's documentation for details on how to get the number of matches and iterate over results. All keyword arguments are passed directly to the :class:`~pystac_client.ItemSearch` instance. .. warning:: This method is only implemented if the API conforms to the `STAC API - Item Search <https://github.com/radiantearth/stac-api-spec/tree/master/item-search>`__ spec *and* contains a link with a ``"rel"`` type of ``"search"`` in its root catalog. If the API does not meet either of these criteria, this method will raise a :exc:`NotImplementedError`. Parameters ---------- limit : int, optional The maximum number of items to return *per page*. Defaults to ``None``, which falls back to the limit set by the service. bbox: list or tuple or Iterator or str, optional May be a list, tuple, or iterator representing a bounding box of 2D or 3D coordinates. Results will be filtered to only those intersecting the bounding box. datetime: str or datetime.datetime or list or tuple or Iterator, optional Either a single datetime or datetime range used to filter results. You may express a single datetime using a :class:`datetime.datetime` instance, a `RFC 3339-compliant <https://tools.ietf.org/html/rfc3339>`__ timestamp, or a simple date string (see below). Instances of :class:`datetime.datetime` may be either timezone aware or unaware. Timezone aware instances will be converted to a UTC timestamp before being passed to the endpoint. Timezone unaware instances are assumed to represent UTC timestamps. You may represent a datetime range using a ``"/"`` separated string as described in the spec, or a list, tuple, or iterator of 2 timestamps or datetime instances. For open-ended ranges, use either ``".."`` (``'2020-01-01:00:00:00Z/..'``, ``['2020-01-01:00:00:00Z', '..']``) or a value of ``None`` (``['2020-01-01:00:00:00Z', None]``). If using a simple date string, the datetime can be specified in ``YYYY-mm-dd`` format, optionally truncating to ``YYYY-mm`` or just ``YYYY``. Simple date strings will be expanded to include the entire time period, for example: - ``2017`` expands to ``2017-01-01T00:00:00Z/2017-12-31T23:59:59Z`` - ``2017-06`` expands to ``2017-06-01T00:00:00Z/2017-06-30T23:59:59Z`` - ``2017-06-10`` expands to ``2017-06-10T00:00:00Z/2017-06-10T23:59:59Z`` If used in a range, the end of the range expands to the end of that day/month/year, for example: - ``2017/2018`` expands to ``2017-01-01T00:00:00Z/2018-12-31T23:59:59Z`` - ``2017-06/2017-07`` expands to ``2017-06-01T00:00:00Z/2017-07-31T23:59:59Z`` - ``2017-06-10/2017-06-11`` expands to ``2017-06-10T00:00:00Z/2017-06-11T23:59:59Z`` intersects: str or dict, optional A GeoJSON-like dictionary or JSON string. Results will be filtered to only those intersecting the geometry ids: list, optional List of Item ids to return. All other filter parameters that further restrict the number of search results (except ``limit``) are ignored. collections: list, optional List of one or more Collection IDs or :class:`pystac.Collection` instances. Only Items in one of the provided Collections will be searched max_items : int or None, optional The maximum number of items to return from the search. *Note that this is not a STAC API - Item Search parameter and is instead used by the client to limit the total number of returned items*. method : str or None, optional The HTTP method to use when making a request to the service. This must be either ``"GET"``, ``"POST"``, or ``None``. If ``None``, this will default to ``"POST"`` if the ``intersects`` argument is present and ``"GET"`` if not. If a ``"POST"`` request receives a ``405`` status for the response, it will automatically retry with a ``"GET"`` request for all subsequent requests. next_resolver: Callable, optional A callable that will be used to construct the next request based on a "next" link and the previous request. Defaults to using the :func:`~pystac_client.paging.simple_stac_resolver`. Returns ------- results : ItemSearch Raises ------ NotImplementedError If the API does not conform to the `Item Search spec <https://github.com/radiantearth/stac-api-spec/tree/master/item-search>`__ or does not have a link with a ``"rel"`` type of ``"search"``. """ if self.conformance is not None and not self.conforms_to( ConformanceClasses.STAC_API_ITEM_SEARCH): spec_name = ConformanceClasses.STAC_API_ITEM_SEARCH.name spec_uris = '\n\t'.join(ConformanceClasses.STAC_API_ITEM_SEARCH.all_uris) msg = f'This service does not conform to the {spec_name} spec and therefore the search method is not ' \ f'implemented. Services must publish one of the following conformance URIs in order to conform to ' \ f'this spec (preferably the first one):\n\t{spec_uris}' raise NotImplementedError(msg) search_link = self.get_single_link('search') if search_link is None: raise NotImplementedError( 'No link with a "rel" type of "search" could be found in this services\'s ' 'root catalog.') return ItemSearch(search_link.target, limit=limit, bbox=bbox, datetime=datetime, intersects=intersects, ids=ids, collections=collections, query=query, max_items=max_items, method=method,
<gh_stars>10-100 #! /usr/bin/env python2.7 # -*- coding: utf-8 -*- import datetime import difflib import json import logging import Queue as Q import random import re import socket import sys import threading import time import traceback import isodate import pytz import requests import modules.irc as irc import salty_listener as SaltyListener RESTART = "<restart>" CHECK = "<check threads>" UPDATE = "<update>" TYPE = 0 DATA = 1 interface = Q.Queue() #Set up all the global variables with open('general_config.json', 'r') as data_file: general_config = json.load(data_file, encoding='utf-8') # API keys and sensitive info lol_api_key = general_config['general_info']['lol_api_key'] youtube_api_key = general_config['general_info']['youtube_api_key'] osu_api_key = general_config['general_info']['osu']['osu_api_key'] osu_irc_nick = general_config['general_info']['osu']['osu_irc_nick'] osu_irc_pass = general_config['general_info']['osu']['osu_irc_pass'] db_url = general_config['general_info']['db_url'] default_nick = general_config['general_info']['default_nick'] default_oauth = general_config['general_info']['default_oauth'] # Debugging prints more stuff, development may do more in the future debuging = general_config["general_info"]["debugging"] development = general_config["general_info"]["development"] # IP and port to listen for the website to talk to it when there is an update to the data web_listen_ip = general_config["general_info"]["web_listen_ip"] web_listen_port = general_config['general_info']["web_listen_port"] web_secret = general_config["general_info"]["web_secret"] # super users are used for bot breaking commands and beta commands SUPER_USER = general_config['general_info']['super_users'] logging.basicConfig( filename='debug.log', filemode='w', level=logging.DEBUG, format="[%(levelname)s %(asctime)s] %(message)s", datefmt="%m-%d %H:%M:%S" ) logging.getLogger("requests").setLevel(logging.WARNING) class SaltyBot(object): # Possible values for "user-type" tag in messages elevated_user = ["staff", "admin", "global_mod", "mod"] def __init__(self, config_data, debug = False, irc_obj = None): # Default rate limits for non-mods is 20 messages per 30 seconds. # Start with this and elevate if find as mod self.message_limit = 30 self.is_mod = False # If the bot does not have a nickname set use the nick "TheSaltyBot" # Oauth cannot be set unless it is valid, so only check for that if config_data["bot_oauth"] == None: config_data["bot_nick"] = default_nick config_data["bot_oauth"] = default_oauth self.twitch_nick = config_data["bot_nick"] self.twitch_oauth = config_data["bot_oauth"] if not self.twitch_oauth.startswith("oauth:"): self.twitch_oauth = "oauth:" + self.twitch_oauth self.__DB = debug # Used to stop the bot from reviving itself if the user wishes for it to leave self.running = True # Used for social message tracking self.messages_received = 0 # Used to stop us from getting globaled self.rate_limit = 0 # Session key from the website, used to change settings and add/del stuff self.session = config_data["session"] # Associated ID of the user on the website self.user_id = config_data["id"] # Keep the rest of the data around just in case self.config_data = config_data # Only create a new IRC connection if the old one is toast if not irc_obj: self.irc = irc.IRC("irc.twitch.tv", 6667, self.twitch_nick, self.twitch_oauth) else: self.irc = irc_obj self.channel = config_data["twitch_name"] if config_data["speedruncom_nick"]: self.speedruncom_nick = config_data["speedruncom_nick"].lower() else: self.speedruncom_nick = self.channel # Various channel information from twitch self.game = "" self.title = "" self.time_start = None self.stream_online = False # Active commands, custom commands, and commands that are admin only self.commands = [] self.admin_commands = [] self.custom_commands = [] # User blacklist, bot will ignore all users in this list self.blacklist = [] # Voting system built into the bot self.votes = {} # Highlight timestamp system self.to_highlight = [] self.review = {"quote": [], "pun": []} # Make sure quotes/puns aren't used twice in a row self.last_text = {"quote": "", "pun": ""} self.t_trig = None with open('blacklists/{}_blacklist.txt'.format(self.channel), 'a+') as data_file: blacklist = data_file.readlines() for i in blacklist: self.blacklist.append(i.split('\n')[0]) # Command rate limiting self.command_times = {} self.custom_command_times = {} def start(self): # Bots are started by calling this method after being initializedd self.thread = threading.Thread(target=self.twitch_run) self.thread.setDaemon(True) self.thread.start() return self.thread def twitch_info(self, game, title, live, online_status): # Called by the auto-updater only, sets the game playing, current title, # if the stream is live, and when it started if not game: game = "" if not title: title = "" self.game = game.lower() self.game_normal = game self.title = title.lower() self.time_start = live self.stream_online = online_status def twitch_connect(self): # Connect to Twitch IRC for new IRC instances if not self.irc.connected: if self.__DB: print "Joining {} as {}.\n".format(self.channel, self.twitch_nick) try: #If it fails to conenct try again in 60 seconds self.irc.connect() self.irc.recv(4096) except Exception: print '{} failed to connect.'.format(self.channel) traceback.print_exc(limit=4) time.sleep(60) self.twitch_connect() # Request the needed capabilites to function correctly self.irc.capability("twitch.tv/tags twitch.tv/commands") self.irc.recv(1024) self.irc.join(self.channel) time.sleep(.5) self.irc.recv(4096) else: if self.__DB: print "{} already connected.\n".format(self.channel) def twitch_commands(self): # Initialize all commands self.command_times["!bot_info"] = {"last": 0, "limit": 300} self.commands.append("!bot_info") self.command_times["!help"] = {"last": 0, "limit": 2} self.commands.append("!help") for i in self.config_data["commands"]: if i["on"]: curr_com = "!" + i["name"] if i["admin"]: self.admin_commands.append(curr_com) else: self.commands.append(curr_com) self.command_times[curr_com] = {"last": 0, "limit": i["limit"] or 30} # Setup the social feature if active if self.config_data["social_active"]: self.command_times["social"] = {"time_last": int(time.time()), "messages": self.config_data["social_messages"] or 0, "messages_last": self.messages_received, "time": self.config_data["social_time"] or 0} self.social_text = self.config_data["social_output"] # Setup the toobou feature if active if self.config_data["toobou_active"]: self.t_trig = self.config_data["toobou_trigger"] self.command_times["toobou"] = {"trigger": self.t_trig, "last": 0, "limit": self.config_data["toobou_limit"] or 1} # Initialize all custom commands for i in self.config_data["custom_commands"]: if i["on"]: self.custom_commands.append("!{}".format(i["trigger"])) self.custom_command_times["!{}".format(i["trigger"])] = {"last": 0, "limit": i["limit"] or 30, "output": i["output"], "admin": i["admin"]} def live_commands(self): # Remove any commands that would not currently work when !commands is used # Type cast to not mess with the original lists active_commands = list(self.commands) + list(self.custom_commands) admin_commands_tmp = list(self.admin_commands) if not self.time_start: try: active_commands.remove('!uptime') except Exception: pass if self.config_data["voting_active"] and self.votes: active_commands.append("!checkvotes") else: try: active_commands.remove("!vote") except Exception: pass if self.config_data["voting_active"]: if self.config_data["voting_mods"]: admin_commands_tmp.append("!createvote") admin_commands_tmp.append("!endvote") if self.game == '': try: active_commands.remove('!leaderboard') except Exception: pass if self.game != 'osu!': try: active_commands.remove('!rank') except Exception: pass try: active_commands.remove('!song') except Exception: pass if self.game != 'league of legends': try: active_commands.remove('!runes') except Exception: pass try: active_commands.remove('!masteries') except Exception: pass if 'race' not in self.title and 'racing' not in self.title: try: active_commands.remove('!race') except Exception: pass command_string = ', '.join(sorted(active_commands)) if self.admin_commands: command_string += " | Mod Only Commands: " + ", ".join(sorted(admin_commands_tmp)) if command_string == '!bot_info, !commands': self.twitch_send_message('There are no current active commands.', '!commands') else: self.twitch_send_message(command_string, '!commands') def clear_limiter(self): # Called every 30 seconds by the helper thread # Resets how many messages have been sent self.rate_limit = 0 def twitch_send_message(self, response, command = None): # Sending any message to chat goes through this function try: response = response.encode('utf-8') except Exception: pass if response.startswith('/me') or response.startswith('.me'): # Grant exception for /me because it can't do any harm pass elif response.startswith('.') or response.startswith('/'): # Prevent people from issuing server commands since bot is usually mod (aka /ban) response = "Please stop trying to abuse me BibleThump (messages cannot start with '/' or '.')" command = '' if self.rate_limit < self.message_limit: # Make sure we don't get globaled (this is a re-occuring theme), send message if under limit self.irc.privmsg(self.channel, response) self.rate_limit += 1 else: print "{} has exceeded the IRC rate limit".format(self.channel) return if self.__DB == True: try: db_message = '#' + self.channel + ' ' + self.twitch_nick + ": " + response.decode('utf-8') db_message = db_message.encode('utf-8') print datetime.datetime.now().strftime('[%Y-%m-%d %H:%M:%S] ') + db_message except Exception: print("Message contained unicode, could not display in terminal\n\n") if command: # Update when the command was last used for rate limiting self.command_times[command]['last'] = int(time.time()) def command_check(self, c_msg, command): # Determines if a user can use a given command, and the command is off cooldown # Super users and channel owners do bypass cooldowns and admin if command in self.commands or command in self.admin_commands: if c_msg["sender"] == self.channel or c_msg["sender"] in SUPER_USER: return True if command in self.admin_commands: if c_msg["tags"]["user-type"] in self.elevated_user: return True else: if self.time_check(command): return True return False def time_check(self, command): # Determines if the command is off cooldown, True=available False=cooldown return int(time.time()) - self.command_times[command]['last'] >= self.command_times[command]['limit'] def api_caller(self, url, headers = None): # Call JSON api's for other functions if self.__DB: print url try: data = requests.get(url, headers=headers) if data.status_code == 200: data_decode = data.json() return data_decode else: print data print data.text return False except Exception: traceback.print_exc(limit=2) return False def osu_api_user(self, c_msg): # Retrieve basic information about the osu player (!rank) try: user = c_msg["message"].split("rank ")[1] except IndexError: user = "" osu_nick = user or self.config_data["osu_nick"] url = 'https://osu.ppy.sh/api/get_user?k={}&u={}'.format(osu_api_key, osu_nick) data_decode = self.api_caller(url) if data_decode == False: return try: data_decode = data_decode[0] except IndexError: self.twitch_send_message('User with name "{}" not found.'.format(user)) return username = data_decode['username'] level = data_decode['level'] level = round(float(level)) level = int(level) accuracy = data_decode['accuracy'] accuracy = round(float(accuracy), 2) pp_rank = "{:,}".format(int(data_decode['pp_rank'])) response = '{} is level
from typing import List, Dict BOOLEAN = 0 NATURAL = 1 INDENT = " " * 4 types = {BOOLEAN, NATURAL} ID: int = 0 class Symbol: def __init__(self, line: int = None, name: str = None, type: int = None): self.line = line self.name = name self.symbol_type = type self.label = next_label() def declare(self): if self.name in symbol_table: error(self.line, "Re-declared variable: {0}".format(self.name)) symbol_table[self.name] = self def get_code(self) -> str: return "{0}: resb {1} \t; variable: {2}\n".format(self.label, self.get_size(), self.name) def get_size(self) -> int: if self.symbol_type == BOOLEAN: return 1 else: return 4 symbol_table: Dict[str, Symbol] = {} value_table: Dict[str, int] = {} class Expression: def get_type(self) -> int: pass def get_code(self) -> str: pass def get_value(self) -> int: pass def print(self): print(self.to_string()) def to_string(self) -> str: pass class NumberExpression(Expression): def __init__(self, text: str): self.value = int(text) def get_type(self) -> int: return NATURAL def get_code(self) -> str: return "mov eax,{0}\n".format(self.value) def get_value(self) -> int: return self.value def to_string(self) -> str: return str(self.value) class BooleanExpression(Expression): def __init__(self, value: bool): self.value = value def get_type(self) -> int: return BOOLEAN def get_code(self) -> str: return "mov al,{0}\n".format(1 if self.value else 0) def get_value(self) -> int: return int(self.value) def to_string(self) -> str: return "true" if self.value else "false" class IdExpression(Expression): def __init__(self, line: int, name: str): self.line = line self.name = name def get_type(self) -> int: if self.name not in symbol_table: error(self.line, "Undefined variable: {0}".format(self.name)) return symbol_table[self.name].symbol_type def get_code(self) -> str: if self.name not in symbol_table: error(self.line, "Undefined variable: {0}".format(self.name)) return "mov eax,[{0}]\n".format(symbol_table[self.name].label) def get_value(self) -> int: if self.name not in symbol_table: error(self.line, "Variable has not been initialized {0}".format(self.name)) return value_table[self.name] def to_string(self) -> str: return self.name class BinopExpression(Expression): def __init__(self, line: int, op: str, left: Expression, right: Expression): self.line = line self.op = op self.left = left self.right = right def get_type(self) -> int: if self.op == "=": if self.left.get_type() != self.right.get_type(): error(self.line, "Left and right operands of '=' have different types.") else: if self.left.get_type() != operand_type(self.op): error(self.line, "Left operand of '{0}' has unexpected type.".format(self.op)) if self.right.get_type() != operand_type(self.op): error(self.line, "Right operand of '{0}' has unexpected type.".format(self.op)) return return_type(self.op) def get_code(self) -> str: s = self.left.get_code() s += "push eax\n" s += self.right.get_code() s += "pop eax\n" s += eq_code(self.left.get_type() if self.op == "=" else operator_code(self.op)) return s def get_value(self) -> int: left_value: int = self.left.get_value() right_value: int = self.right.get_value() if self.op == "+": return left_value + right_value elif self.op == "-": return left_value - right_value elif self.op == "*": return left_value * right_value elif self.op == "/": return left_value // right_value elif self.op == "%": return left_value % right_value elif self.op == "<": return left_value < right_value elif self.op == ">": return left_value > right_value elif self.op == "<=": return left_value <= right_value elif self.op == ">=": return left_value <= right_value elif self.op == "and": return left_value and right_value elif self.op == "or": return left_value or right_value elif self.op == "=": return left_value == right_value else: error(self.line, "Unkonwn operator: {0}".format(self.op)) def to_string(self) -> str: return "({0}) {1} ({2})".format(self.left.to_string(), self.op, self.right.to_string()) class NotExpression(Expression): def __init__(self, line: int, op: str, operand: Expression): self.line = line self.op = op self.operand = operand def get_type(self) -> int: if self.operand.get_type() != BOOLEAN: error(self.line, "Operand of 'not' is not boolean.") return BOOLEAN def get_code(self) -> str: s = self.operand.get_code() s += "xor al,1\n" return s def get_value(self) -> int: return int(not self.operand.get_value()) def to_string(self) -> str: return "{0} ({1})".format(self.op, self.operand.to_string()) class TernaryExpression(Expression): def __init__(self, line: int, condition: Expression, true_expression: Expression, false_expression: Expression): self.line = line self.condition = condition self.true_expression = true_expression self.false_expression = false_expression def get_type(self) -> int: if self.condition.get_type() != BOOLEAN: error(self.line, "Condition of '?:' expression is not boolean.") if self.true_expression.get_type() != self.false_expression.get_type(): error(self.line, "The sides of '?:' expression are not the same type.") return self.true_expression.get_type() def get_code(self) -> str: else_label = next_label() end_label = next_label() s = self.condition.get_code() s += "cmp al,1\n" s += "jne near {0}\n".format(else_label) s += self.true_expression.get_code() s += "jmp {0}\n".format(end_label) s += "{0}:\n".format(else_label) s += self.false_expression.get_code() s += "{0}:\n".format(end_label) return s def get_value(self) -> int: if self.condition.get_value(): return self.true_expression.get_value() else: return self.false_expression.get_value() def to_string(self) -> str: return "({0} ? {1} : {2})".format(self.condition.to_string(), self.true_expression.to_string(), self.false_expression.to_string()) class Instruction: def __init__(self, line: int): self.line = line def type_check(self): pass def get_code(self) -> str: pass def execute(self): pass def get_line(self): return self.line def print(self, indent_level: int): pass class AssignInstruction(Instruction): def __init__(self, line: int, left: str, right: Expression): super().__init__(line) self.left = left self.right = right def type_check(self): if self.left not in symbol_table: error(self.line, "Undefined variable: {0}".format(self.left)) if symbol_table[self.left].symbol_type != self.right.get_type(): error(self.line, "Left and right hand sides of assignment are of different types.") def get_code(self) -> str: s = self.right.get_code() s += "mov [{0}], {1}\n".format(symbol_table[self.left].label, get_register(symbol_table[self.left].symbol_type)) return s def execute(self): value_table[self.left] = self.right.get_value() def print(self, indent_level: int): indent(indent_level) print("{0} := {1}".format(self.left, self.right.to_string())) class ReadInstruction(Instruction): def __init__(self, line: int, id: str): super().__init__(line) self.id = id def type_check(self): if self.id not in symbol_table: error(self.line, "Undefined variable: {0}".format(self.id)) def get_code(self) -> str: t = symbol_table[self.id].symbol_type s = "call read_{0}\n".format(get_type_name(t)) s += "mov [{0}],{1}\n".format(symbol_table[self.id].label, get_type_name(t)) return s def execute(self): input_line = input() if symbol_table[self.id].symbol_type == NATURAL: value_table[self.id] = int(input_line) elif symbol_table[self.id].symbol_type == BOOLEAN: if input_line == "true": value_table[self.id] = 1 else: value_table[self.id] = 0 def print(self, indent_level: int): indent(indent_level) print("read({0})".format(self.id)) class WriteInstruction(Instruction): def __init__(self, line: int, exp: Expression): super().__init__(line) self.exp = exp self.exp_type = None def type_check(self): self.exp_type = self.exp.get_type() def get_code(self) -> str: s = self.exp.get_code() if self.exp_type == BOOLEAN: s += "and eax,1\n" s += "push eax\n" s += "call write_{0}\n".format(get_type_name(self.exp_type)) return s def execute(self): if self.exp_type == NATURAL: print(self.exp.get_value()) else: print("true" if self.exp.get_value() else "false") def print(self, indent_level: int): indent(indent_level) print("write({0})".format(self.exp.to_string())) class IfInstruction(Instruction): def __init__(self, line: int, condition: Expression, true_branch: List[Instruction], false_branch: List[Instruction]): super().__init__(line) self.condition = condition self.true_branch = true_branch self.false_branch = false_branch def type_check(self): if self.condition.get_type() != BOOLEAN: error(self.line, "Condition of 'if' instruction is not boolean.") type_check_commands(self.true_branch) type_check_commands(self.false_branch) def get_code(self) -> str: else_label = next_label() end_label = next_label() s = self.condition.get_code() s += "cmp al,1\n" s += "jne near {0}\n".format(else_label) s += generate_code_of_commands(self.true_branch) s += "jmp {0}\n".format(end_label) s += "{0}:\n".format(else_label) s += generate_code_of_commands(self.false_branch) s += "{0}:\n".format(end_label) return s def execute(self): if self.condition.get_value(): execute_commands(self.true_branch) else: execute_commands(self.false_branch) def print(self, indent_level: int): indent(indent_level) print("if {0} then".format(self.condition.to_string())) print_commands(indent_level + 1, self.true_branch) if self.false_branch: indent(indent_level) print("else") print_commands(indent_level + 1, self.false_branch) indent(indent_level) print("endif") class WhileInstruction(Instruction): def __init__(self, line: int, condition: Expression, body: List[Instruction]): super().__init__(line) self.condition = condition self.body = body def type_check(self): if self.condition.get_type() != BOOLEAN: error(self.line, "Condition of 'while' instruction is not boolean.") type_check_commands(self.body) def get_code(self) -> str: begin_label = next_label() end_label = next_label() s = "{0}:\n".format(begin_label) s += self.condition.get_code() s += "cmp al,1\n" s += "jne near {0}\n".format(end_label) s += generate_code_of_commands(self.body) s += "jmp {0}\n".format(begin_label) s += "{0}:\n".format(end_label) return s def execute(self): while self.condition.get_value(): execute_commands(self.body) def print(self, indent_level: int): indent(indent_level) print("while {0} do".format(self.condition.to_string())) print_commands(indent_level + 1, self.body) indent(indent_level) print("done") class RepeatInstruction(Instruction): def __init__(self, line: int, count: Expression, body: List[Instruction]): super().__init__(line) self.count = count self.body = body def type_check(self): if self.count.get_type() != NATURAL: error(self.line, "Count of 'repeat' instruction is not natural.") type_check_commands(self.body) def get_code(self) -> str: begin_label = next_label() s = self.count.get_code() s += "mov ecx,eax\n" s += "{0}:\n".format(begin_label) s += "push ecx\n" s += generate_code_of_commands(self.body) s += "pop ecx\n" s += "loop {0}\n".format(begin_label) return s def execute(self): for i in range(self.count.get_value(), 0, -1): execute_commands(self.body) def print(self, indent_level: int): indent(indent_level) print("repeat {0} do".format(self.count.get_value())) print_commands(indent_level + 1, self.body) indent(indent_level) print("done") def type_check_commands(commands: List[Instruction]): for it in commands: it.type_check() def execute_commands(commands: List[Instruction]): for command in commands: command.execute() def print_program(name: str, commands: List[Instruction]): print("program {0}".format(name)) for value in symbol_table.values(): print("{0}{1} {2}".format(INDENT, "boolean" if value.symbol_type == 0 else "natural", value.name)) print("begin") print_commands(1, commands) print("end") def error(line: int, text: str): print("Line {1}: Error: {1}".format(line, text)) exit(1) def print_commands(indent_level: int, commands: List[Instruction]): for it in commands: it.print(indent_level) def indent(indent_level: int): print(INDENT * indent_level, end="") def next_label() -> str: global ID ID += 1 return "label{0}".format(ID) def generate_code(commands: List[Instruction]): s = "global main\n" s += "extern write_natural\n" s += "extern read_natural\n" s += "extern write_boolean\n" s += "extern read_boolean\n" s +=
<reponame>kanz76/PaddleHelix # Copyright (c) 2021 PaddlePaddle Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Data transforms""" import numpy as np from alphafold_paddle.common import residue_constants # ================================================== # helper function # ================================================== def curry1(f): """Supply all arguments except the first.""" def fc(*args, **kwargs): return lambda x: f(x, *args, **kwargs) return fc @curry1 def compose(x, fs): for f in fs: x = f(x) return x def shape_list(x): """Return list of dimensions of an array.""" x = np.array(x) if x.ndim is None: return x.shape static = x.shape ret = [] for _, dim in enumerate(static): ret.append(dim) return ret def one_hot(depth, indices): res = np.eye(depth)[indices.reshape(-1)] return res.reshape(list(indices.shape) + [depth]) def shaped_categorical(probs): ds = shape_list(probs) num_classes = ds[-1] probs = np.reshape(probs, (-1, num_classes)) nums = list(range(num_classes)) counts = [] for prob in probs: counts.append(np.random.choice(nums, p=prob)) return np.reshape(np.array(counts, np.int32), ds[:-1]) class SeedMaker(object): """Return unique seeds.""" def __init__(self, initial_seed=0): self.next_seed = initial_seed def __call__(self): i = self.next_seed self.next_seed += 1 return i NUM_RES = 'num residues placeholder' NUM_MSA_SEQ = 'msa placeholder' NUM_EXTRA_SEQ = 'extra msa placeholder' NUM_TEMPLATES = 'num templates placeholder' MS_MIN32 = -2147483648 MS_MAX32 = 2147483647 _MSA_FEATURE_NAMES = [ 'msa', 'deletion_matrix', 'msa_mask', 'msa_row_mask', 'bert_mask', 'true_msa' ] Seed_maker = SeedMaker() def make_random_seed(size, low=MS_MIN32, high=MS_MAX32): global Seed_maker np.random.seed(Seed_maker()) return np.random.uniform(size=size, low=low, high=high) # ================================================== # transform for nonensembled data # ================================================== def correct_msa_restypes(protein): """Correct MSA restype to have the same order as residue_constants.""" new_order_list = residue_constants.MAP_HHBLITS_AATYPE_TO_OUR_AATYPE new_order = np.array(new_order_list, dtype=protein['msa'].dtype) protein['msa'] = new_order[protein['msa']] perm_matrix = np.zeros((22, 22), dtype=np.float32) perm_matrix[range(len(new_order_list)), new_order_list] = 1. return protein @curry1 def add_distillation_flag(protein, distillation): protein['is_distillation'] = np.array( float(distillation), dtype=np.float32) return protein def cast_64bit_ints(protein): for k, v in protein.items(): if v.dtype == np.int64: protein[k] = v.astype(np.int32) return protein def squeeze_features(protein): """Remove singleton and repeated dimensions in protein features.""" protein['aatype'] = np.argmax(protein['aatype'], axis=-1) for k in ['msa', 'num_alignments', 'seq_length', 'sequence', 'superfamily', 'deletion_matrix', 'resolution', 'between_segment_residues', 'residue_index', 'template_all_atom_masks']: if k in protein: final_dim = shape_list(protein[k])[-1] if isinstance(final_dim, int) and final_dim == 1: protein[k] = np.squeeze(protein[k], axis=-1) for k in ['seq_length', 'num_alignments']: if k in protein: # Remove fake sequence dimension protein[k] = protein[k][0] return protein @curry1 def randomly_replace_msa_with_unknown(protein, replace_proportion): """Replace a proportion of the MSA with 'X'.""" msa_mask = np.random.uniform(size=shape_list(protein['msa']), low=0, high=1) < replace_proportion x_idx, gap_idx = 20, 21 msa_mask = np.logical_and(msa_mask, protein['msa'] != gap_idx) protein['msa'] = np.where( msa_mask, np.ones_like(protein['msa']) * x_idx, protein['msa']) aatype_mask = np.random.uniform(size=shape_list(protein['aatype']), low=0, high=1) < replace_proportion protein['aatype'] = np.where( aatype_mask, np.ones_like(protein['aatype']) * x_idx, protein['aatype']) return protein def make_seq_mask(protein): protein['seq_mask'] = np.ones(shape_list(protein['aatype']), dtype=np.float32) return protein def make_msa_mask(protein): """Mask features are all ones, but will later be zero-padded.""" protein['msa_mask'] = np.ones(shape_list(protein['msa']), dtype=np.float32) protein['msa_row_mask'] = np.ones(shape_list(protein['msa'])[0], dtype=np.float32) return protein def make_hhblits_profile(protein): """Compute the HHblits MSA profile if not already present.""" if 'hhblits_profile' in protein: return protein protein['hhblits_profile'] = np.mean(one_hot(22, protein['msa']), axis=0) return protein def make_random_crop_to_size_seed(protein): """Random seed for cropping residues and templates.""" protein['random_crop_to_size_seed'] = np.array( make_random_seed([2]), np.int32) return protein def fix_templates_aatype(protein): """Fixes aatype encoding of templates.""" protein['template_aatype'] = np.argmax(protein['template_aatype'], axis=-1).astype(np.int32) new_order_list = residue_constants.MAP_HHBLITS_AATYPE_TO_OUR_AATYPE new_order = np.array(new_order_list, np.int32) protein['template_aatype'] = new_order[protein['template_aatype']] return protein def pseudo_beta_fn(aatype, all_atom_positions, all_atom_masks): """Create pseudo beta features.""" is_gly = np.equal(aatype, residue_constants.restype_order['G']) ca_idx = residue_constants.atom_order['CA'] cb_idx = residue_constants.atom_order['CB'] pseudo_beta = np.where( np.tile(is_gly[..., None].astype("int32"), [1,] * len(is_gly.shape) + [3,]).astype("bool"), all_atom_positions[..., ca_idx, :], all_atom_positions[..., cb_idx, :]) if all_atom_masks is not None: pseudo_beta_mask = np.where(is_gly, all_atom_masks[..., ca_idx], all_atom_masks[..., cb_idx]) pseudo_beta_mask = pseudo_beta_mask.astype(np.float32) return pseudo_beta, pseudo_beta_mask return pseudo_beta @curry1 def make_pseudo_beta(protein, prefix=''): """Create pseudo-beta (alpha for glycine) position and mask.""" assert prefix in ['', 'template_'] pseudo_beta, pseudo_beta_mask = pseudo_beta_fn( protein['template_aatype' if prefix else 'all_atom_aatype'], protein[prefix + 'all_atom_positions'], protein['template_all_atom_masks' if prefix else 'all_atom_mask']) protein[prefix + 'pseudo_beta'] = pseudo_beta protein[prefix + 'pseudo_beta_mask'] = pseudo_beta_mask return protein def make_atom14_masks(protein): """Construct denser atom positions (14 dimensions instead of 37).""" restype_atom14_to_atom37 = [] restype_atom37_to_atom14 = [] restype_atom14_mask = [] for rt in residue_constants.restypes: atom_names = residue_constants.restype_name_to_atom14_names[ residue_constants.restype_1to3[rt]] restype_atom14_to_atom37.append([ (residue_constants.atom_order[name] if name else 0) for name in atom_names]) atom_name_to_idx14 = {name: i for i, name in enumerate(atom_names)} restype_atom37_to_atom14.append([ (atom_name_to_idx14[name] if name in atom_name_to_idx14 else 0) for name in residue_constants.atom_types]) restype_atom14_mask.append([ (1. if name else 0.) for name in atom_names]) restype_atom14_to_atom37.append([0] * 14) restype_atom37_to_atom14.append([0] * 37) restype_atom14_mask.append([0.] * 14) restype_atom14_to_atom37 = np.array(restype_atom14_to_atom37, np.int32) restype_atom37_to_atom14 = np.array(restype_atom37_to_atom14, np.int32) restype_atom14_mask = np.array(restype_atom14_mask, np.float32) residx_atom14_to_atom37 = restype_atom14_to_atom37[protein['aatype']] residx_atom14_mask = restype_atom14_mask[protein['aatype']] protein['atom14_atom_exists'] = residx_atom14_mask protein['residx_atom14_to_atom37'] = residx_atom14_to_atom37 residx_atom37_to_atom14 = restype_atom37_to_atom14[protein['aatype']] protein['residx_atom37_to_atom14'] = residx_atom37_to_atom14 restype_atom37_mask = np.zeros([21, 37], np.float32) for restype, restype_letter in enumerate(residue_constants.restypes): restype_name = residue_constants.restype_1to3[restype_letter] atom_names = residue_constants.residue_atoms[restype_name] for atom_name in atom_names: atom_type = residue_constants.atom_order[atom_name] restype_atom37_mask[restype, atom_type] = 1 residx_atom37_mask = restype_atom37_mask[protein['aatype']] protein['atom37_atom_exists'] = residx_atom37_mask return protein # ================================================== # transform for ensembled data # ================================================== @curry1 def sample_msa(protein, max_seq, keep_extra): """Sample MSA randomly, remaining sequences are stored as `extra_*`.""" num_seq = protein['msa'].shape[0] shuffled = list(range(1, num_seq)) np.random.shuffle(shuffled) shuffled.insert(0, 0) index_order = np.array(shuffled, np.int32) num_sel = min(max_seq, num_seq) sel_seq = index_order[:num_sel] not_sel_seq = index_order[num_sel:] is_sel = num_seq - num_sel for k in _MSA_FEATURE_NAMES: if k in protein: if keep_extra and not is_sel: new_shape = list(protein[k].shape) new_shape[0] = 1 protein['extra_' + k] = np.zeros(new_shape) elif keep_extra and is_sel: protein['extra_' + k] = protein[k][not_sel_seq] if k == 'msa': protein['extra_msa'] = protein['extra_msa'].astype(np.int32) protein[k] = protein[k][sel_seq] return protein @curry1 def make_masked_msa(protein, config, replace_fraction): """Create data for BERT on raw MSA.""" random_aa = np.array([0.05] * 20 + [0., 0.], dtype=np.float32) categorical_probs = config.uniform_prob * random_aa + \ config.profile_prob * protein['hhblits_profile'] + \ config.same_prob * one_hot(22, protein['msa']) pad_shapes = [[0, 0] for _ in range(len(categorical_probs.shape))] pad_shapes[-1][1] = 1 mask_prob = 1. - config.profile_prob - config.same_prob - \ config.uniform_prob assert mask_prob >= 0. categorical_probs = np.pad(categorical_probs, pad_shapes, constant_values=(mask_prob,)) mask_position = np.random.uniform(size=shape_list(protein['msa']), low=0, high=1) < replace_fraction bert_msa = shaped_categorical(categorical_probs) bert_msa = np.where(mask_position, bert_msa, protein['msa']) protein['bert_mask'] = mask_position.astype(np.int32) protein['true_msa'] = protein['msa'] protein['msa'] = bert_msa return protein @curry1 def nearest_neighbor_clusters(protein, gap_agreement_weight=0.): """Assign each extra MSA sequence to its nearest neighbor in sampled MSA.""" weights = np.concatenate([ np.ones(21), gap_agreement_weight * np.ones(1), np.zeros(1)], 0) sample_one_hot = protein['msa_mask'][:, :, None] * \ one_hot(23, protein['msa']) num_seq, num_res, _ = shape_list(sample_one_hot) array_extra_msa_mask = protein['extra_msa_mask'] if array_extra_msa_mask.any(): extra_one_hot = protein['extra_msa_mask'][:, :, None] * \ one_hot(23, protein['extra_msa']) extra_num_seq, _, _ = shape_list(extra_one_hot) agreement = np.matmul( np.reshape(extra_one_hot, [extra_num_seq, num_res * 23]), np.reshape(sample_one_hot * weights, [num_seq, num_res * 23]).T) protein['extra_cluster_assignment'] = np.argmax(agreement, axis=1) else: protein['extra_cluster_assignment'] = np.array([]) return protein @curry1 def summarize_clusters(protein): """Produce profile and deletion_matrix_mean within each cluster.""" num_seq = shape_list(protein['msa'])[0] def _csum(x): result = [] for i in range(num_seq): result.append(np.sum(x[np.where( protein['extra_cluster_assignment'] == i)], axis=0)) return np.array(result) mask = protein['extra_msa_mask'] mask_counts = 1e-6 + protein['msa_mask'] + _csum(mask) # Include center msa_sum = _csum(mask[:, :, None] * np.zeros(mask.shape + (23,), np.float32)) msa_sum += one_hot(23, protein['msa']) # Original sequence protein['cluster_profile'] = msa_sum / mask_counts[:, :, None] del msa_sum del_sum = _csum(mask * protein['extra_deletion_matrix']) del_sum += protein['deletion_matrix'] # Original sequence protein['cluster_deletion_mean'] = del_sum / mask_counts del del_sum return protein @curry1 def crop_extra_msa(protein, max_extra_msa): """MSA features are cropped so only `max_extra_msa` sequences are kept.""" if protein['extra_msa'].any(): num_seq = protein['extra_msa'].shape[0] num_sel = np.minimum(max_extra_msa, num_seq) shuffled = list(range(num_seq)) np.random.shuffle(shuffled) select_indices = shuffled[:num_sel] for k in _MSA_FEATURE_NAMES: if 'extra_' + k in protein: protein['extra_' + k] = protein['extra_' + k][ select_indices] return protein def delete_extra_msa(protein): for k in _MSA_FEATURE_NAMES: if 'extra_' + k in protein: del protein['extra_' + k] return protein @curry1 def make_msa_feat(protein): """Create and concatenate MSA features.""" has_break = np.clip(protein['between_segment_residues'].astype( np.float32), np.array(0), np.array(1)) aatype_1hot = one_hot(21, protein['aatype']) target_feat = [np.expand_dims(has_break, axis=-1), aatype_1hot] msa_1hot = one_hot(23, protein['msa']) has_deletion = np.clip(protein['deletion_matrix'], np.array(0), np.array(1)) c = 2. / np.pi deletion_value = np.arctan(protein['deletion_matrix'] / 3.) * c msa_feat = [msa_1hot, np.expand_dims(has_deletion, axis=-1), np.expand_dims(deletion_value, axis=-1)] if 'cluster_profile' in protein: deletion_mean_value = ( np.arctan(protein['cluster_deletion_mean'] / 3.) * c) msa_feat.extend([protein['cluster_profile'], np.expand_dims(deletion_mean_value, axis=-1)]) if 'extra_deletion_matrix' in protein: protein['extra_has_deletion'] = np.clip( protein['extra_deletion_matrix'], np.array(0), np.array(1)) protein['extra_deletion_value'] = np.arctan( protein['extra_deletion_matrix'] / 3.) * c protein['msa_feat'] = np.concatenate(msa_feat, axis=-1) protein['target_feat'] = np.concatenate(target_feat, axis=-1) return protein @curry1 def select_feat(protein, feature_list): return {k: v for k, v in protein.items() if k in feature_list} @curry1 def random_crop_to_size(protein, crop_size, max_templates,
#!/usr/bin/python3 # -*- coding: utf-8 -*- # *****************************************************************************/ # * Authors: <NAME>, <NAME> # *****************************************************************************/ ## @package telemetryRNN from __future__ import absolute_import, division, print_function, \ unicode_literals # , nested_scopes, generators, generator_stop, with_statement, annotations """ Brief: Library for Recurrent Neural Networks time series analysis proposed by Hochreiter and Schmidhuber, 1997. Description: Class library is designed to be used as a tracking template file to ease use across all files and ensure tracking. Requires: Python 3 Usage Documentation: https://www.tensorflow.org/tutorials/keras """ class Transformer: @staticmethod def get(transformType='AsIs'): if transformType == 'AsIs': return 1 if transformType == 'Noise': return 2 elif transformType == 'Chaos': return 3 elif transformType == 'Novel': return 4 else: return 1 class itemObject(object): def __init__(self, data=None, itemFrequency=None, itemInterval=None, itemOffset=None, itemMin=None, itemMax=None): if itemFrequency is None: itemFrequency = 1 elif itemFrequency < 1: itemFrequency = 1 if itemMin is None: itemMin = 0 if itemMax is None: itemMax = 1 if itemMin > itemMax: itemMin = (itemMax - 1) if itemInterval is None: itemInterval = [itemMin, itemMax] self.interval = itemInterval if itemOffset is None: itemOffset = itemInterval.min else: itemOffset = (itemOffset % itemInterval.max) + itemInterval.min self.itemSet = [itemFrequency, itemInterval, itemOffset] self.payload = data def __getitem__(self): return self @staticmethod def getType(): return 'itemObject' def applyEntropy(self, newItem=None, transformType='AsIs'): return newItem class itemSet(itemObject): def __init__(self, data, superSet=None, item=None): super().__init__(data) if superSet is None: itemSetObject = itemObject(item) self.superSet = [itemSetObject, itemSetObject, itemSetObject] else: self.superSet = superSet return def getType(self): return 'itemSet' def applyEntropy(self, newItem=itemObject(), transformType='AsIs'): import numpy if transformType == 'Noise' or transformType == 'Chaos' or transformType == 'Novel': totalItems = len(newItem) arraySet = [] for iterator in range(totalItems): arraySet.append(float(newItem.payload)) averageSet = numpy.average(arraySet) standardDeviationSet = numpy.std(arraySet) noise = numpy.random.normal(averageSet, standardDeviationSet, totalItems) for iterator in range(totalItems): newItem[iterator].payload = newItem.payload + noise else: # 'As Is' means to not modify the item. newItem = newItem return newItem class itemSequence(itemObject, itemSet): def __init__(self, superSet=None): self.data = superSet @staticmethod def getType(): return 'itemSequence' def applyEntropy(self, newItem, signature): return class seriesRNNPredict(object): import ctypes, datetime, os maxPath = 256 maxTime = 32 maxName = 16 _pack_ = 1 _fields_ = [ ("absPath", ctypes.c_wchar * maxPath, ctypes.sizeof(ctypes.c_char * maxPath)), # Path to the file. ("filename", ctypes.c_uint32, ctypes.sizeof(ctypes.c_uint32)), # Identification file. ("major", ctypes.c_uint16, ctypes.sizeof(ctypes.c_uint16)), # Major version number of the file. ("minor", ctypes.c_uint16, ctypes.sizeof(ctypes.c_uint16)), # Minor version number of the file. ("time", ctypes.c_wchar * maxTime, ctypes.sizeof(ctypes.c_char * maxTime)), # Time of execution or creation. ("user", ctypes.c_wchar * maxName, ctypes.sizeof(ctypes.c_char * maxName)), # Name of the creator. ] def __init__(self, absPath=os.path.abspath(os.getcwd()), filename=__file__, major=0, minor=0, time=datetime.datetime.now(), user="jdtarang", uid=1, trainingInFileName="data/training/data.cvs", delimiterToken=',', headerRowLocation=0, missingDataFill='0', dateLabel='year-month-day_hour-minute-second-milliseconds', dateFormat='%Y-%m-%d_%H-%M-%S-%f', trainingDataStream=None, dataSetEnumerationMap=None, dataSetEnumerationMapTranslated=None, vocabulary=None, sequenceLength=100, predictionLength=1, trainingNetworkInput=None, trainingNetworkOutput=None, patterns=None, hiddenLayers=128, dropoutRate=0.2, model=None, epochs=200, batch_size=32, bestWeightsFile=None, trainingHistoryCheckpoint=None, checkpoint=None, seedDataPoint=None, generateWidth=500, predictionOutput=None, predictTrainingData='data/trainingData/dataSeries_predicted.dat', splittingDelimiter='.', transformType='AsIs', predictedDataStream=None): """ Initalizes an object. The parameters have default values to ensure all fields have a setting. The setting of variable by the user will allow for customization on tracking of meta data. Args: uid: Applicaiton Unique Identifier. absPath: absoilute path of current file. filename: curernt file name. major: The major field is used for detection of structural ordering. minor: The minor field is used for detection of extensions. time: Creation time. user: The organization user information. trainingInFileName: Name of the cvs format file with training data. delimiterToken = Token in delimiting the data. headerRowLocation: Indication of labels for data set missingDataFill: In the case data is missing fill it with this field. dateLabel: Data label to look for in columns dateFormat: Format parsing function for data column. trainingDataStream: Prepared data stream of values. dataSetEnumerationMap: Conversion of data to strings in a unique list. dataSetEnumerationMapTranslated: Encoding of data into one-hot encoding. vocabulary: the scope of generating from input set. sequenceLength: based on the properties of the data, define the sequence length for the best nearest neighbor sequence prediction. predictionLength: Forward window of prediction based on trained inputs. trainingNetworkInput: Neural network input size of sequences. trainingNetworkOutput: Neural network input size of sequences. patterns: Sequence encoding length. hiddenLayers: Depth of the neural network. dropoutRate: Rate define to ensure over fitting does not occur. model: Neural network construction for the application. epochs: Forward and backward pass of all training data. batch_size: The number of training data samples in one forward and backwards pass. Note the larger the value the more memory consumed. bestWeightsFile: Hierarchical Data Format version 5 (HDF5). trainingHistoryCheckpoint: History table of the data in the case of failure. checkpoint: Neural network snapshot in the case of failure. seedDataPoint: The starting data point in which prediction is to occur. generateWidth: In the prediction process, we want to specify the width of data generated. predictionOutput: Prediction stream generated. predictTrainingData: Prediction training data stream generation file save location. splittingDelimiter: Depending on the data layout, if subsequences dual data exist then we will want to know the delimiter. If this does not exist ignore. transformType: When predicting data, we may want to apply a transformation on the data to generate simularity, chaos in noise, or nothing. Specify the types here. Returns: None. Raises: None Format: Contents: Labels Data Example: year-month-day_hour-minute-second-milliseconds, dataStructure.itemOne, dataStructure.itemTwo, dataStructure.itemN 2009-01-06_15-08-24-789150, 1, 100, 15, 1 Examples: $ python telemetryRNN.py """ import os __origin__ = '' self.uid = uid self.absPath = absPath self.filename = filename self.major = major self.minor = minor self.time = time self.user = user self.debugStatus = True print("Filepath ", absPath) print("Origin ", __origin__) print("Absolute Path ", absPath) print("Filename ", filename) print("Major ", major) print("Minor ", minor) print("Time ", time) print("User ", user) print("Debug Status", self.debugStatus) self.debugPath = os.path.join(absPath, "CrashDump") self.trainingInFileName = trainingInFileName self.delimiterToken = delimiterToken self.headerRowLocation = headerRowLocation self.missingDataFill = missingDataFill self.dateLabel = dateLabel self.dateFormat = dateFormat self.trainingDataStream = trainingDataStream self.dataSetEnumerationMap = dataSetEnumerationMap self.dataSetEnumerationMapTranslated = dataSetEnumerationMapTranslated self.vocabulary = vocabulary self.sequenceLength = sequenceLength self.predictionLength = predictionLength self.trainingNetworkInput = trainingNetworkInput self.trainingNetworkOutput = trainingNetworkOutput self.patterns = patterns self.hiddenLayers = hiddenLayers self.dropoutRate = dropoutRate self.model = model self.epochs = epochs self.batch_size = batch_size if bestWeightsFile is None: self.bestWeightsFile = (self.dataFormatter() + '.weights.best.hdf5') self.trainingHistoryCheckpoint = trainingHistoryCheckpoint self.checkpoint = checkpoint self.seedDataPoint = seedDataPoint self.generateWidth = generateWidth self.predictionOutput = predictionOutput self.predictTrainingData = predictTrainingData self.splittingDelimiter = splittingDelimiter self.transformType = transformType self.predictedDataStream = predictedDataStream return def enableDebug(self): self.debugStatus = True return def disableDebug(self): self.debugStatus = True return def executeAll(self, trainingInFileName="data/training/data.cvs", delimiterToken=',', headerRowLocation=0, missingDataFill='0', dateLabel='year-month-day_hour-minute-second-milliseconds', dateFormat='%Y-%m-%d_%H-%M-%S-%f', trainingDataStream=None, dataSetEnumerationMap=None, dataSetEnumerationMapTranslated=None, vocabulary=None, sequenceLength=100, predictionLength=1, trainingNetworkInput=None, trainingNetworkOutput=None, patterns=None, hiddenLayers=128, dropoutRate=0.2, model=None, epochs=200, batch_size=32, bestWeightsFile=('UnknownTime.{epoch:02d}-{val_accuracy:.2f}.weights.best.hdf5'), trainingHistoryCheckpoint=None, checkpoint=None, seedDataPoint=None, generateWidth=500, predictionOutput=None, predictTrainingData='run/predicted/dataSeries_predicted.dat', splittingDelimiter='.', transformType='AsIs'): trainingDataStream = self.prepareCVS(trainingInFileName, delimiterToken, headerRowLocation, missingDataFill, dateLabel='year-month-day_hour-minute-second-milliseconds', dateFormat='%Y-%m-%d_%H-%M-%S-%f') (dataSetEnumerationMap, dataSetEnumerationMapTranslated, vocabulary) = self.dataMaps(trainingDataStream) (trainingNetworkInput, trainingNetworkOutput, patterns) = self.normalizeEncodingSequence(trainingDataStream, dataSetEnumerationMap, dataSetEnumerationMapTranslated, vocabulary, sequenceLength, predictionLength) model = self.createRecurrentNeuralNetworkModel(trainingNetworkInput, vocabulary, trainingNetworkOutput, hiddenLayers, dropoutRate) (trainingHistoryCheckpoint, checkpoint) = self.fitRecurrentNeuralNetworkModel(model, trainingNetworkInput, trainingNetworkOutput, epochs, batch_size, bestWeightsFile) predictionOutput = self.generatePredictSequences(seedDataPoint, generateWidth, dataSetEnumerationMapTranslated, trainingNetworkInput, patterns, vocabulary, model) predictedDataStream = self.predictionTransformAndSave(predictTrainingData, predictionOutput, splittingDelimiter, transformType) return predictedDataStream @staticmethod def dataFormatter(timeValue=None, dateFormat='%Y-%m-%d_%H-%M-%S-%f'): import datetime, time if timeValue is None: timeValue = datetime.datetime.now() # Note: dateLabel = 'year-month-day_hour-minute-second-milliseconds' return time.strftime(dateFormat, timeValue) def weightFileName(self, time=None): dateTag = self.dataFormatter(time) filename = (dateTag + "-{epoch:02d}-{val_accuracy:.2f}") return filename @staticmethod def loadModelFolder(modelFolder): import tensorflow model = tensorflow.keras.models.load_model(modelFolder) # Check its architecture model.summary() return model @staticmethod def loadModelFile(weightsFile): import tensorflow model = tensorflow.keras.Model().load_weights(filepath=weightsFile) # Check its architecture model.summary() return model @staticmethod def showDataStream(trainingDataStream): for batch, label in trainingDataStream.take(1): for key, value in batch.items(): print("{:20s}: {}".format(key, value.numpy())) return def prepareCVS(self, filename="data.cvs", delimiterToken=',', headerRowLocation=0, missingDataFill='0', dateLabel='year-month-day_hour-minute-second-milliseconds', dateFormat='%Y-%m-%d_%H-%M-%S-%f'): import pandas ''' Example File Format, with all data in integer form. year-month-day_hour-minute-second-milliseconds, dataStructure.itemOne, dataStructure.itemTwo, dataStructure.itemN 2009-01-06_15-08-24-789150, 1, 100, 15, 1 ''' dataStream = pandas.read_csv(filename, delimiter=delimiterToken, header=headerRowLocation, na_values=[missingDataFill], verbose=self.debugStatus) dataStream[dateLabel] = pandas.to_datetime(dataStream[dateLabel], format=dateFormat) pandas.setIndex(dateLabel) trainingDataStream = dataStream if self.debugStatus is True: rowStart = 0 rowStop = 2 print("Sample rows \n", trainingDataStream[rowStart:rowStop:]) self.showDataStream(trainingDataStream) return trainingDataStream @staticmethod def dataTypeEncoding(encoderType=None): # @todo in progress # Integer Encoding: Where each unique label is mapped to an integer. Used when fields have relation. if encoderType == 'Integer': return # One Hot Encoding: Where each label is mapped to a binary vector. Used for no relationship # Two main drawbacks: # For high-cardinality variables —
in dimension estimate" ## print " ... current dim = %.3f, old dim = %.3f, error = %.3f"%(dim,old_dim,err) ## not_done = False ## continue ## if in_zone: ## zone_err = abs(dim-ref_dim)/ref_dim ## if in_zone and zone_err > tol_up: ## not_done = False # end neighbourhood growth ## if logd[hiix]-logd[loix] > log_max_radius_ratio: ## not_done = False # end neighbourhood growth ## if not quiet: ## print "Radius too large: %.4f/%.4f"%(exp(logd[hiix]),exp(logd[loix])) ## elif err < tol_down: ## in_zone = True ## ref_dim = dim ## if not quiet: ## print " - entered zone at ix", hiix, " - with ref dim ", ref_dim old_residual = residual old_dim = dim nhd_too_small = nhd_too_small or hiix-loix < min_nhd_size if nhd_too_small: print("Neighbourhood too small. Try a different starting index or a new reference point ...") print("Dim found over ixs [%i, %i] = %.4f"%(loix,hiix,dim)) raise RuntimeError else: if not quiet: print("\nDimension = %f"%dim) print("Found best fit line from relative ix %i to %i (radius %f)"%(loix, hiix, d[hiix])) # consolidate results in terms of global index positions in data covered_ixs = [d_inv(d[ix])[0] for ix in range(loix, hiix+1)] covered_ixs.append(refptix) covering[refptix] = (dim, d[loix], d[hiix], covered_ixs) for ix in covered_ixs: if covered[ix] is None: covered[ix] = [refptix] else: covered[ix].append(refptix) return (covered, covering) def do_stats(covering, covered, maxD, bin_width=1, fignum=1, num_panels=4, s_frac=0.5, nhd_size_thresh=None, nhd_max_plot_size=None, minD=None, D_estimate_method="absolute", weight_by_size=False, force_xaxis_limits=False, radius_yaxis_limits=None, save=''): """Produces statistics of pointwise dimension estimates of a data set, including a histogram of the distribution of neighbourhood dimensions. <NAME>, June 2006. INPUTS: maxD sets the maximum dimension to cater for. bin_width selects the width of bins for dimensions (default 1). fignum forces a particular figure number to be used for display. (Use fignum=0 to suppress plotting of the results and just return the statistics.) num_panels argument must be 2 (for just nhd size and bin histo) or 4 (also include min and max nhd radius). s_frac selects the fraction (or multiple) of the standard deviation of neighbourhood sizes to use as the cutoff for the "bulk" of the data set, in order to avoid including large outliers in determining nhd_size_threshold (default 0.5). nhd_size_threshold selects the smallest neighbourhood size to be included in histogram of neighbourhood dimensionalities (default None => automatic selection, i.e., the mean of the first s_frac of the neighbourhood sizes' standard deviation). Set to zero to prevent it being used or plotted. nhd_max_plot_size selects the y-axis limit on the plot of neighbourhood sizes (default None => automatic selection). D_estimate_method can be 'absolute' (uses cutoff of size = 1 to estimate D in histogram tail), or 'relative' (uses a cutoff = %age of peak binned neighbourhoods) force_xaxis_limits (Boolean) determines whether x-axis limits should be forced to be the value of the (minD,maxD) parameters. minD sets the minimum dimension to cater for (default to auto-choice). radius_yaxis_limits (pair) sets the r_min and r_max y-axis upper limits, respectively. save (string) sets a filename to use for saving the figure in available formats (so don't include a file extension). OUTPUTS: d_stats: dictionary {"D_tail": estimated D from tail, "D_mean": histo mean, "D_std": histo std dev} dbins: Pointset of D bins cover_by_dimix: dictionary of covers by index of dimensions binned (i.e., [0, bin_width, 2*bin_width, ..., maxD+1]) cover_by_size: list of neighbourhood coverings ordered by their size cbu: array indicating how many points in overlap between neighbourhoods (array index corresponds to a number of points shared, value = number of nhds with that number of points shared) ixs: list of indices of neighbourhoods of size > nhd_size_threshold (integral: internal diagnostic feature) """ dx = zeros((len(covering),1),'f') ly = zeros((len(covering),1),'f') rloy = zeros((len(covering),1),'f') rhiy = zeros((len(covering),1),'f') drange = arange(0, maxD+1, bin_width, 'd').tolist() dbins = data_bins('D', drange) cover_by_dimix = {}.fromkeys(range(len(drange))) for dix in range(len(drange)): cover_by_dimix[dix] = [] cover_by_size_dict = {}.fromkeys(range(len(covered)+1)) ix = 0 integral = 0 largest = 0 try: for p, (d, rlo, rhi, l) in covering.items(): dx[ix] = d rloy[ix] = rlo rhiy[ix] = rhi lenl = len(l) if lenl > largest: largest = lenl try: cover_by_size_dict[lenl].append(p) except AttributeError: cover_by_size_dict[lenl] = [p] ly[ix] = lenl integral += lenl ix += 1 except ValueError: # No max radius information available! # compatible with old version of covering that does not return rhi for p, (d, rlo, l) in covering.items(): dx[ix] = d rloy[ix] = rlo rhiy[ix] = 0 lenl = len(l) if lenl > largest: largest = lenl try: cover_by_size_dict[lenl].append(p) except AttributeError: cover_by_size_dict[lenl] = [p] ly[ix] = lenl integral += lenl ix += 1 print("\nNeighbourhood statistics:") print("There are %i neighbourhoods to this covering"%len(covering)) print("Largest neighbourhood has %i points"%largest) cover_by_size = [di for di in cover_by_size_dict.items() if di[1] is not None] cover_by_size.sort() csizes = [c[0] for c in cover_by_size] if nhd_size_thresh is None: s = std(csizes) sm = s_frac*s m = mean(csizes) print("Std. dev. of cover_by_size =", s) print("Max size found =", max(csizes)) print("Mean size found =", m) ## # find largest index of set of covering nhds such that its size ## # is smaller than s_frac% of the std deviation of the sizes ## for i in range(len(cover_by_size)): ## if csizes[i] > sm: ## break ## try: ## nhd_size_thresh = mean(csizes[:i]) ## except ZeroDivisionError: ## nhd_size_thresh = 0 ## print "N'hood size threshold used = mean(cover_by_sizes restricted to %.3f of 1st std. dev.)"%s_frac ## print " =", nhd_size_thresh nhd_size_thresh = m-sm print("N'hood size threshold used = mean - %.3f of std. dev."%s_frac) print(" =", nhd_size_thresh) else: if nhd_size_thresh > 0: print("N'hood size threshold set by user =", nhd_size_thresh) if nhd_max_plot_size is None: ## # 2 * original set sizes mean ## try: ## nhd_max_plot_size = 2*mean(csizes) ## except ZeroDivisionError: ## nhd_max_plot_size = largest ## print "Using max plot nhd size = 2*mean(cover_by_sizes) =", nhd_max_plot_size nhd_max_plot_size = largest print("Using max plot nhd size of largest neighbourhood found =", nhd_max_plot_size) else: print("Using max plot nhd size set by user =", nhd_max_plot_size) # reverse so that largest first for returning to user cover_by_size.reverse() filtered_ixs = [] try: for p, (d, rlo, rhi, l) in covering.items(): if d <= maxD: dix = dbins.resolve_bin_index(d) if len(l) > nhd_size_thresh: if weight_by_size: inc = len(l)/largest else: inc = 1 dbins.increment(d, inc) filtered_ixs.append(p) cover_by_dimix[dix].append(p) except ValueError: # compatible with old version of covering that does not return rhi for p, (d, rlo, l) in covering.items(): if d <= maxD: dix = dbins.resolve_bin_index(d) if len(l) > nhd_size_thresh: if weight_by_size: inc = len(l)/largest else: inc = 1 dbins.increment(d, inc) filtered_ixs.append(p) cover_by_dimix[dix].append(p) # decide on single dimension estimate ks=dbins.midpoints d_est = 0 # initial value if D_estimate_method == "absolute": # based on largest D > 1 in # tail of dbins histogram (assumes unimodal) if len(covering) < 3: dbin_thresh = 0 else: dbin_thresh = 1 for k in ks: n = dbins(k) if d_est > 0 and n == 0: # stop before a second spike might occur break if n > dbin_thresh: d_est = k elif D_estimate_method == "relative": peak = 0 last_peak = 0 for k in ks: n = dbins(k) if n > peak and n > 1: peak = n last_peak = n if d_est > 0 and n == 0: # stop before a second spike might occur break if peak > 0: # only applies if peak already found if n > .2*last_peak: # discard bins if smaller than 20% of last peak d_est = k # only change last peak record if new one is actually smaller! last_peak = n else: raise ValueError("Invalid dimension estimation method name") if fignum > 0: fontmath = args(fontsize=22,fontname='Times') fonttext = args(fontsize=18,fontname='Times') # make plot print("Plotting histograms to figure", fignum) assert num_panels in [2,4], "num_panels argument must be 2 or 4" figure(fignum) if num_panels == 4: subplot(2,2,1) plot(dx, rhiy, 'ro') glines = getp(gca(), 'xticklines')+getp(gca(), 'yticklines') setp(glines, 'linewidth', 2) ## title('nhd max radius') xlabel(r'$\rm{Dimension}$', fonttext) ylabel(r'$r_{max}$', fontmath) if num_panels == 4: subplot(2,2,2) else: subplot(2,1,1) plot(dx, ly, 'ko') glines = getp(gca(), 'xticklines')+getp(gca(), 'yticklines') setp(glines, 'linewidth', 2) dmin, dmax = figure(fignum).axes[0].get_xlim() if dmax > maxD or force_xaxis_limits: dmax = maxD if dmin < minD or force_xaxis_limits: if minD is not None: dmin = minD
<gh_stars>0 from django.contrib.auth.models import User from django.contrib.contenttypes.models import ContentType from django.db import models, IntegrityError from django.db.models.signals import post_save from django.utils import timezone from django.utils.translation import ugettext as _ # Create your models here. from ASHMC.main.models import GradYear, Utility from ASHMC.roster.models import Dorm, DormRoom, UserRoom import datetime import logging logger = logging.getLogger(__name__) class IntegerRangeField(models.IntegerField): def __init__(self, verbose_name=None, name=None, min_value=None, max_value=None, **kwargs): self.min_value, self.max_value = min_value, max_value models.IntegerField.__init__(self, verbose_name, name, **kwargs) def formfield(self, **kwargs): defaults = {'min_value': self.min_value, 'max_value': self.max_value} defaults.update(kwargs) return super(IntegerRangeField, self).formfield(**defaults) class InstantRerunVotingRound(models.Model): """ A single round of IRV voting. See en.wikipedia.org/wiki/Instant-runoff_voting for more. """ number = models.IntegerField(default=1) ballot = models.ForeignKey("Ballot") class Meta: unique_together = (('ballot', 'number'),) def __unicode__(self): return "Round {} for {}".format(self.number, self.ballot) class IRVCandidate(models.Model): """A wrapper around a candidate in an IRV round.""" irv_round = models.ForeignKey(InstantRerunVotingRound) candidate = models.ForeignKey("Candidate") # Need to track votes between rounds and candidates. votes = models.ManyToManyField("PreferentialVote") def __unicode__(self): return "{}".format(self.candidate) class Ballot(models.Model): """For example, a ballot for ASHMC President would have candidates (actually PersonCandidates). Multiple ballots can appear in a measure; that is, you can have a ballot for ASHMC President election and one for VP election in the same measure. """ VOTE_TYPES = Utility.enum('POPULARITY', 'PREFERENCE', 'SELECT_X', 'INOROUT', type_name='BallotVoteType') TYPES = ( (VOTE_TYPES.POPULARITY, "Popularity"), (VOTE_TYPES.PREFERENCE, 'Preference'), (VOTE_TYPES.SELECT_X, 'Select Top X'), (VOTE_TYPES.INOROUT, 'Yes or No'), ) vote_type = models.SmallIntegerField(default=0, choices=TYPES) number_to_select = models.PositiveIntegerField(blank=True, null=True) measure = models.ForeignKey('Measure', null=True) display_position = models.IntegerField(default=1) title = models.CharField(max_length=50) blurb = models.TextField() can_write_in = models.BooleanField(default=False) can_abstain = models.BooleanField(default=True) is_secret = models.BooleanField(default=False) is_irv = models.BooleanField(default=False, help_text='Only applies to Preferential type; changes the way winners are calculated.', ) def get_winners(self): """does not break ties.""" if self.candidate_set.count() == 0: return [] if self.vote_type == self.VOTE_TYPES.POPULARITY or self.vote_type == self.VOTE_TYPES.INOROUT: max_choices = max( self.candidate_set.annotate(pv_max=models.Count('popularityvote')).values_list('pv_max', flat=True) ) return self.candidate_set.annotate(models.Count('popularityvote')).filter(popularityvote__count=max_choices) elif self.vote_type == self.VOTE_TYPES.PREFERENCE: if not self.is_irv: # The lower the sum of the ranks of a candidate, the better they're doing overall. min_choices = min(self.candidate_set.annotate(pf_sum=models.Sum('preferentialvote__amount')).values_list('pf_sum', flat=True)) return self.candidate_set.annotate(models.Sum('preferentialvote__amount')).filter(preferentialvote__amount=min_choices) # IRV voting is a little more intense. # we do rounds - until one candidate has a majority, we eliminate the # least popular candidate and apply the voters' other votes to their # next favorite choice. # http://en.wikipedia.org/wiki/Instant-runoff_voting # Since each candidate gets a vote with a different ballot, # we normalize the number of votes by the number of voters -- # each voter should have contributed the same number of votes per # ballot (e.g., the number of candidates on the ballot) # If no more votes can come in, then don't calculate again. # Don't do anything while votes can still come in. if not self.measure.voting_closed: return Candidate.objects.none() # Don't re-calculate if it's been calculated. if InstantRerunVotingRound.objects.filter(ballot=self).count() != 0: irvr = InstantRerunVotingRound.objects.filter(ballot=self).order_by('-number')[0] try: max_votes = max(IRVCandidate.objects.filter( irv_round=irvr, ).annotate(models.Count('votes')).values_list( 'votes__count', flat=True, )) return irvr.irvcandidate_set.annotate(models.Count('votes')).filter(votes__count=max_votes) except ValueError: return IRVCandidate.objects.none() logger.debug("Calcluating IRV for BALLOT %s", self) total_votes = self.measure.vote_set.count() candidates = list(self.candidate_set.all()) logger.debug("total votes: %s", total_votes) the_round = InstantRerunVotingRound.objects.create( number=1, ballot=self, ) for candidate in candidates: logger.debug("Creating initial IRVCandidate for %s", candidate) irvc = IRVCandidate.objects.create( candidate=candidate, irv_round=the_round, # Get the FIRST-CHOICE votes for every candidate. ) irvc.votes.add(*list(self.preferentialvote_set.filter( candidate=candidate, amount=1, ))) # Do rounds until *someone* has a majority. logger.debug("calculating starting max votes") try: max_votes = max(IRVCandidate.objects.filter( irv_round=the_round, ).annotate(models.Count('votes')).values_list( 'votes__count', flat=True, )) logger.debug("max_votes: %s", max_votes) except ValueError: Candidate.objects.none() while max_votes <= total_votes / 2: logger.debug("%s out of %s", max_votes, total_votes) old_candidates = IRVCandidate.objects.filter(irv_round=the_round).annotate(pv_count=models.Count('votes')).order_by('pv_count') logger.debug("old candidates: %s", old_candidates) the_round = InstantRerunVotingRound.objects.create( ballot=self, number=the_round.number + 1, ) logger.debug("creating new round: %s", the_round.number) # TODO: break ties LOLOLOL loser = old_candidates[0] logger.debug("found loser: %s", loser) # Move all the surviving votes over, and delete the # loser from the running. for irvcandidate in old_candidates[1:]: irvc = IRVCandidate.objects.create( irv_round=the_round, candidate=irvcandidate.candidate, ) irvc.votes.add(*list(irvcandidate.votes.all())) # Attach losers votes to their next preference for pvote in loser.votes.all(): logger.debug("attaching loser's votes to other candidates") v = pvote.vote try: next_pvote = v.preferentialvote_set.get( amount=pvote.amount + 1, ) except PreferentialVote.ObjectDoesNotExist: logger.info("losing a preference due to lack of ranking") # If there are no more preferences, that's fine. # their vote no longer contributes to the total, # and they don't get a candidate assigned to them. total_votes -= 1 continue next_irc = IRVCandidate.objects.get( irv_round=the_round, candidate=next_pvote.candidate, ) next_irc.votes.add(next_pvote) max_votes = max(IRVCandidate.objects.filter( irv_round=the_round, ).annotate(models.Count('votes')).values_list( 'votes__count', flat=True )) # The rounds are over! Time to find the max votes and return the list # of winners. return IRVCandidate.objects.annotate(models.Count('votes')).filter(votes__count=max_votes, irv_round=the_round) elif self.vote_type == self.VOTE_TYPES.SELECT_X: max_choices = self.candidate_set.annotate(pv_max=models.Count('popularityvote')).order_by('-pv_max').values_list('pv_max', flat=True)[0] return self.candidate_set.annotate(models.Count('popularityvote')).filter(popularityvote__count=max_choices) def ordered_candidates(self): if self.vote_type == self.VOTE_TYPES.POPULARITY or self.vote_type == self.VOTE_TYPES.INOROUT: return self.candidate_set.annotate(votes=models.Count('popularityvote')).order_by('-votes') elif self.vote_type == self.VOTE_TYPES.PREFERENCE: return self.candidate_set.annotate(votes=models.Sum('preferentialvote__amount')).order_by('votes') elif self.vote_type == self.VOTE_TYPES.SELECT_X: return self.candidate_set.annotate(votes=models.Count('popularityvote')).order_by('-votes') def __unicode__(self): return u"Ballot #{}: {}".format(self.id, self.title) class Meta: unique_together = (('measure', 'title'), ) def save(self, *args, **kwargs): if self.vote_type == self.VOTE_TYPES.SELECT_X and self.number_to_select is None: raise IntegrityError("Can't have a SELECT_X vote type and no number_to_select.") super(Ballot, self).save(*args, **kwargs) if self.vote_type == self.VOTE_TYPES.INOROUT: # create the two candidates now. yes, _ = Candidate.objects.get_or_create( title="Yes", ballot=self, ) no, _ = Candidate.objects.get_or_create( title="No", ballot=self, ) class Measure(models.Model): """A collection of ballots. This is probably where you'd want to calculate things like quorum.""" name = models.CharField(max_length=50) summary = models.TextField() vote_start = models.DateTimeField(default=datetime.datetime.now) vote_end = models.DateTimeField(null=True, blank=True, help_text="""If you don't specify an end time, the measure will automatically close the midnight after quorum is reached.""", ) is_open = models.BooleanField(default=True) real_type = models.ForeignKey(ContentType, editable=False, null=True) banned_accounts = models.ManyToManyField(User, null=True, blank=True) quorum = IntegerRangeField(default=50, help_text="Integer value between 0 and 100; what percentage of student response is quorum for this ballot?", max_value=100, min_value=0, ) @property def actual_quorum(self): return (float(Vote.objects.filter(measure=self).count()) / self.eligible_voters.count()) * 100 @property def has_reached_quorum(self): return self.quorum <= self.actual_quorum @property def eligible_voters(self): if self.restrictions is None: return User.objects.filter(inactive=False) return self.restrictions.get_grad_year_users() & self.restrictions.get_dorm_users() @property def voting_closed(self): return (not self.is_open) or (self.vote_end is not None and self.vote_end < timezone.now()) class Meta: verbose_name = _('Measure') verbose_name_plural = _('Measures') def save(self, *args, **kwargs): if self.vote_start is None: raise IntegrityError("vote_measure.vote_start may not be NULL") if self.vote_end is not None: # Ensure that the measure is open for at least 2 days. if self.vote_end - self.vote_start < datetime.timedelta(days=2): self.vote_end = self.vote_start + datetime.timedelta(days=2) super(Measure, self).save(*args, **kwargs) # Ensures there's a restrictions object to check against in views. try: self.restrictions except models.ObjectDoesNotExist: try: Restrictions.objects.create(restricted_to=self) except IntegrityError: # This means that someone built in restrictions at creation time. pass def __unicode__(self): return u"{}".format(self.name) def destroy_user_associations(self): """This method *should* destroy the links between users and their votes.""" for ballot in self.ballot_set.filter(is_secret=True): for vote in ballot.popularityvote_set.all(): vote.vote = None vote.save() for vote in ballot.preferentialvote_set.all(): vote.vote = None vote.save() class Restrictions(models.Model): gradyears = models.ManyToManyField(GradYear, null=True, blank=True, help_text="Only these gradyears will be able to see this measure. If none are selected, visible to all gradyears." ) dorms = models.ManyToManyField(Dorm, null=True, blank=True, help_text="Only residents of these dorms will be able to see this measure. If none are selected, visible to all dorms." ) restricted_to = models.OneToOneField(Measure) class Meta: verbose_name_plural = _('Restrictions') def get_grad_year_users(self): if self.gradyears.all().count() == 0: return User.objects.all() user_ids = self.gradyears.all().values_list('student__user__id', flat=True) return User.objects.filter(id__in=[x for x in user_ids if x is not None]) def get_dorm_users(self): if self.dorms.count() == 0: return User.objects.all() dormrooms = DormRoom.objects.filter(dorm__in=self.dorms.all()) user_ids = UserRoom.objects.filter(room__in=dormrooms).values_list('user__id', flat=True) return User.objects.filter(id__in=user_ids) def __unicode__(self): return u"Restrictions for {}".format(self.restricted_to) class Vote(models.Model): account = models.ForeignKey(User) measure = models.ForeignKey(Measure) class Meta: verbose_name = _('Vote') verbose_name_plural = _('Votes') # Never vote twice. unique_together = (('account', 'measure'),) def __unicode__(self): return u"{} in #{}-{}".format(self.account, self.measure.id, self.measure.name) class PopularityVote(models.Model): """Represents the most common kind of vote: where each student gets a single vote.""" vote = models.ForeignKey(Vote, null=True) ballot = models.ForeignKey(Ballot) candidate = models.ForeignKey("Candidate", null=True, blank=True) class Meta: verbose_name = _('PopularityVote') verbose_name_plural = _('PopularityVotes') def __unicode__(self): votee = self.candidate return "{} ({}) for {}".format(self.vote, self.ballot, votee) class PreferentialVote(models.Model): vote = models.ForeignKey(Vote, null=True) ballot = models.ForeignKey(Ballot) candidate = models.ForeignKey("Candidate") amount = models.PositiveSmallIntegerField() def __unicode__(self): return u"{} ({}) ranked {} at {}".format( self.vote, self.ballot, self.candidate.cast(), self.amount ) class Candidate(models.Model): """An abstract candidate, be it a person or a law or funding""" ballot = models.ForeignKey(Ballot) description = models.TextField(null=True, blank=True) title = models.CharField(max_length=200, blank=True, null=True) is_write_in = models.BooleanField(default=False)
return source_code def str_visible_len(s): """ :param str s: :return: len without escape chars :rtype: int """ import re # via: https://github.com/chalk/ansi-regex/blob/master/index.js s = re.sub("[\x1b\x9b][[()#;?]*(?:[0-9]{1,4}(?:;[0-9]{0,4})*)?[0-9A-PRZcf-nqry=><]", "", s) return len(s) def add_indent_lines(prefix, s): if not s: return prefix prefix_len = str_visible_len(prefix) lines = s.splitlines(True) return "".join([prefix + lines[0]] + [" " * prefix_len + l for l in lines[1:]]) def get_indent_prefix(s): return s[:len(s) - len(s.lstrip())] def get_same_indent_prefix(lines): if not lines: return "" prefix = get_indent_prefix(lines[0]) if not prefix: return "" if all([l.startswith(prefix) for l in lines]): return prefix return None def remove_indent_lines(s): if not s: return "" lines = s.splitlines(True) prefix = get_same_indent_prefix(lines) if prefix is None: # not in expected format. just lstrip all lines return "".join([l.lstrip() for l in lines]) return "".join([l[len(prefix):] for l in lines]) def replace_tab_indent(s, replace=" "): prefix = get_indent_prefix(s) return prefix.replace("\t", replace) + s[len(prefix):] def replace_tab_indents(s, replace=" "): lines = s.splitlines(True) return "".join([replace_tab_indent(l, replace) for l in lines]) def to_bool(s, fallback=None): """ :param str s: str to be converted to bool, e.g. "1", "0", "true", "false" :param T fallback: if s is not recognized as a bool :return: boolean value, or fallback :rtype: bool|T """ if not s: return fallback s = s.lower() if s in ["1", "true", "yes", "y"]: return True if s in ["0", "false", "no", "n"]: return False return fallback class Color: ColorIdxTable = {k: i for (i, k) in enumerate([ "black", "red", "green", "yellow", "blue", "magenta", "cyan", "white"])} @classmethod def get_global_color_enabled(cls): return to_bool(os.environ.get("CLICOLOR", ""), fallback=True) def __init__(self, enable=None): """ :param bool|None enable: """ if enable is None: enable = self.get_global_color_enabled() self.enable = enable def color(self, s, color=None, bold=False): """ :param str s: :param str|None color: e.g. "blue" :param bool bold: :return: s optionally wrapped with ansi escape codes :rtype: str """ if not self.enable: return s code_seq = [] if color: code_seq += [30 + self.ColorIdxTable[color]] # foreground color if bold: code_seq += [1] if not code_seq: return s start = "\x1b[%sm" % ";".join(map(str, code_seq)) end = "\x1b[0m" return start + s + end def __call__(self, *args, **kwargs): return self.color(*args, **kwargs) def py_syntax_highlight(self, s): if not self.enable: return s state = 0 spaces = " \t\n" ops = ".,;:+-*/%&!=|(){}[]^<>" i = 0 curtoken = "" color_args = {0: {}, len(s): {}} # type: dict[int,dict[str]] # i -> color kwargs def finish_identifier(): if curtoken in pykeywords: color_args[max([k for k in color_args.keys() if k < i])] = {"color": "blue"} while i < len(s): c = s[i] i += 1 if c == "\n": if state == 3: finish_identifier() color_args[i] = {}; state = 0 elif state == 0: if c in spaces: pass elif c in ops: color_args[i - 1] = {"color": "blue"}; color_args[i] = {} elif c == "#": color_args[i - 1] = {"color": "white"}; state = 6 elif c == '"': color_args[i - 1] = {"color": "cyan"}; state = 1 elif c == "'": color_args[i - 1] = {"color": "cyan"}; state = 2 else: curtoken = c color_args[i - 1] = {} state = 3 elif state == 1: # string via " if c == "\\": state = 4 elif c == "\"": color_args[i] = {} state = 0 elif state == 2: # string via ' if c == "\\": state = 5 elif c == "'": color_args[i] = {} state = 0 elif state == 3: # identifier if c in spaces + ops + "#\"'": finish_identifier() color_args[i] = {} state = 0 i -= 1 else: curtoken += c elif state == 4: # escape in " state = 1 elif state == 5: # escape in ' state = 2 elif state == 6: # comment pass if state == 3: finish_identifier() out = "" i = 0 while i < len(s): j = min([k for k in color_args.keys() if k > i]) out += self.color(s[i:j], **color_args[i]) i = j return out def is_at_exit(): """ Some heuristics to figure out whether this is called at a stage where the Python interpreter is shutting down. :return: whether the Python interpreter is currently in the process of shutting down :rtype: bool """ if _threading_main_thread is not None: if not hasattr(threading, "main_thread"): return True if threading.main_thread() != _threading_main_thread: return True if not _threading_main_thread.is_alive(): return True return False def format_tb(tb=None, limit=None, allLocals=None, allGlobals=None, withTitle=False, with_color=None, with_vars=None): """ :param types.TracebackType|types.FrameType|StackSummary tb: traceback. if None, will use sys._getframe :param int|None limit: limit the traceback to this number of frames. by default, will look at sys.tracebacklimit :param dict[str]|None allLocals: if set, will update it with all locals from all frames :param dict[str]|None allGlobals: if set, will update it with all globals from all frames :param bool withTitle: :param bool|None with_color: output with ANSI escape codes for color :param bool with_vars: will print var content which are referenced in the source code line. by default enabled. :return: list of strings (line-based) :rtype: list[str] """ color = Color(enable=with_color) out = [] def output(s1, s2=None, **kwargs): if kwargs: s1 = color(s1, **kwargs) if s2 is not None: s1 = add_indent_lines(s1, s2) out.append(s1 + "\n") def format_filename(s): base = os.path.basename(s) return ( color('"' + s[:-len(base)], "cyan") + color(base, "cyan", bold=True) + color('"', "cyan")) def format_py_obj(obj): return color.py_syntax_highlight(pretty_print(obj)) if tb is None: try: tb = get_current_frame() assert tb except Exception: output(color("format_tb: tb is None and sys._getframe() failed", "red", bold=True)) return out def isstacksummary(_tb): return isinstance(_tb, StackSummary) isframe = inspect.isframe if withTitle: if isframe(tb) or isstacksummary(tb): output(color('Traceback (most recent call first):', "blue")) else: # expect traceback-object (or compatible) output(color('Traceback (most recent call last):', "blue")) if with_vars is None and is_at_exit(): # Better to not show __repr__ of some vars, as this might lead to crashes # when native extensions are involved. with_vars = False if withTitle: output("(Exclude vars because we are exiting.)") if with_vars is None: if any([f.f_code.co_name == "__del__" for f in iter_traceback()]): # __del__ is usually called via the Python garbage collector (GC). # This can happen and very random / non-deterministic places. # There are cases where it is not safe to access some of the vars on the stack # because they might be in a non-well-defined state, thus calling their __repr__ is not safe. # See e.g. this bug: # https://github.com/tensorflow/tensorflow/issues/22770 with_vars = False if withTitle: output("(Exclude vars because we are on a GC stack.)") if with_vars is None: with_vars = True try: if limit is None: if hasattr(sys, 'tracebacklimit'): limit = sys.tracebacklimit n = 0 _tb = tb class NotFound(Exception): pass def _resolve_identifier(namespace, id): if id[0] not in namespace: raise NotFound() obj = namespace[id[0]] for part in id[1:]: obj = getattr(obj, part) return obj def _try_set(old, prefix, func): if old is not None: return old try: return add_indent_lines(prefix, func()) except NotFound: return old except Exception as e: return prefix + "!" + e.__class__.__name__ + ": " + str(e) while _tb is not None and (limit is None or n < limit): if isframe(_tb): f = _tb elif isstacksummary(_tb): if isinstance(_tb[0], ExtendedFrameSummary): f = _tb[0].tb_frame else: f = DummyFrame.from_frame_summary(_tb[0]) else: f = _tb.tb_frame if allLocals is not None: allLocals.update(f.f_locals) if allGlobals is not None: allGlobals.update(f.f_globals) if hasattr(_tb, "tb_lineno"): lineno = _tb.tb_lineno elif isstacksummary(_tb): lineno = _tb[0].lineno else: lineno = f.f_lineno co = f.f_code filename = co.co_filename name = co.co_name output("".join([ ' ', color("File ", "blue", bold=True), format_filename(filename), ", ", color("line ", "blue"), color("%d" % lineno, "magenta"), ", ", color("in ", "blue"), name])) if not os.path.isfile(filename): altfn = fallback_findfile(filename) if altfn: output(color(" -- couldn't find file, trying this instead: ", "blue") + format_filename(altfn)) filename = altfn source_code = get_source_code(filename, lineno, f.f_globals) if source_code: source_code = remove_indent_lines(replace_tab_indents(source_code)).rstrip() output(" line: ", color.py_syntax_highlight(source_code), color="blue") if not with_vars: pass elif isinstance(f, DummyFrame) and not f.have_vars_available: pass else: output(color(' locals:', "blue")) alreadyPrintedLocals = set() for tokenstr in grep_full_py_identifiers(parse_py_statement(source_code)): splittedtoken = tuple(tokenstr.split(".")) for token in [splittedtoken[0:i] for i in range(1, len(splittedtoken) + 1)]: if token in alreadyPrintedLocals: continue tokenvalue = None tokenvalue =
class MaxBoxGen(): def findMaxRect(data): """http://stackoverflow.com/a/30418912/5008845""" nrows, ncols = data.shape w = np.zeros(dtype=int, shape=data.shape) h = np.zeros(dtype=int, shape=data.shape) skip = 1 area_max = (0, []) for r in range(nrows): for c in range(ncols): if data[r][c] == skip: continue if r == 0: h[r][c] = 1 else: h[r][c] = h[r - 1][c] + 1 if c == 0: w[r][c] = 1 else: w[r][c] = w[r][c - 1] + 1 minw = w[r][c] for dh in range(h[r][c]): minw = min(minw, w[r - dh][c]) area = (dh + 1) * minw if area > area_max[0]: area_max = (area, [(r - dh, c - minw + 1, r, c)]) return area_max ######################################################################## def residual(angle, data): nx, ny = data.shape M = cv2.getRotationMatrix2D(((nx - 1) / 2, (ny - 1) / 2), int(angle), 1) RotData = cv2.warpAffine( data, M, (nx, ny), flags=cv2.INTER_NEAREST, borderValue=1 ) rectangle = findMaxRect(RotData) return 1.0 / rectangle[0] ######################################################################## def residual_star(args): return residual(*args) ######################################################################## def get_rectangle_coord(angle, data, flag_out=None): nx, ny = data.shape M = cv2.getRotationMatrix2D(((nx - 1) / 2, (ny - 1) / 2), angle, 1) RotData = cv2.warpAffine( data, M, (nx, ny), flags=cv2.INTER_NEAREST, borderValue=1 ) rectangle = findMaxRect(RotData) if flag_out: return rectangle[1][0], M, RotData else: return rectangle[1][0], M ######################################################################## def findRotMaxRect( data_in, flag_opt=False, flag_parallel=False, nbre_angle=10, flag_out=None, flag_enlarge_img=False, limit_image_size=300, ): """ flag_opt : True only nbre_angle are tested between 90 and 180 and a opt descent algo is run on the best fit False 100 angle are tested from 90 to 180. flag_parallel: only valid when flag_opt=False. the 100 angle are run on multithreading flag_out : angle and rectangle of the rotated image are output together with the rectangle of the original image flag_enlarge_img : the image used in the function is double of the size of the original to ensure all feature stay in when rotated limit_image_size : control the size numbre of pixel of the image use in the function. this speeds up the code but can give approximated results if the shape is not simple """ # time_s = datetime.datetime.now() # make the image square # ---------------- nx_in, ny_in = data_in.shape if nx_in != ny_in: n = max([nx_in, ny_in]) data_square = np.ones([n, n]) xshift = int((n - nx_in) / 2) yshift = int((n - ny_in) / 2) if yshift == 0: data_square[xshift: (xshift + nx_in), :] = data_in[:, :] else: data_square[:, yshift: yshift + ny_in] = data_in[:, :] else: xshift = 0 yshift = 0 data_square = data_in # apply scale factor if image bigger than limit_image_size # ---------------- if data_square.shape[0] > limit_image_size: data_small = cv2.resize( data_square, (limit_image_size, limit_image_size), interpolation=0 ) scale_factor = 1.0 * data_square.shape[0] / data_small.shape[0] else: data_small = data_square scale_factor = 1 # set the input data with an odd number of point in each dimension to make rotation easier # ---------------- nx, ny = data_small.shape nx_extra = -nx ny_extra = -ny if nx % 2 == 0: nx += 1 nx_extra = 1 if ny % 2 == 0: ny += 1 ny_extra = 1 data_odd = np.ones( [ data_small.shape[0] + max([0, nx_extra]), data_small.shape[1] + max([0, ny_extra]), ] ) data_odd[:-nx_extra, :-ny_extra] = data_small nx, ny = data_odd.shape nx_odd, ny_odd = data_odd.shape if flag_enlarge_img: data = ( np.zeros([2 * data_odd.shape[0] + 1, 2 * data_odd.shape[1] + 1]) + 1 ) nx, ny = data.shape data[ nx / 2 - nx_odd / 2: nx / 2 + nx_odd / 2, ny / 2 - ny_odd / 2: ny / 2 + ny_odd / 2, ] = data_odd else: data = np.copy(data_odd) nx, ny = data.shape # print((datetime.datetime.now()-time_s).total_seconds() if flag_opt: myranges_brute = [ (90.0, 180.0), ] coeff0 = np.array( [ 0.0, ] ) coeff1 = optimize.brute( residual, myranges_brute, args=(data,), Ns=nbre_angle, finish=None ) popt = optimize.fmin( residual, coeff1, args=(data,), xtol=5, ftol=1.0e-5, disp=False ) angle_selected = popt[0] else: rotation_angle = np.linspace(90, 180, 100 + 1)[:-1] args_here = [] for angle in rotation_angle: args_here.append([angle, data]) if flag_parallel: # set up a pool to run the parallel processing cpus = multiprocessing.cpu_count() pool = multiprocessing.Pool(processes=cpus) # then the map method of pool actually does the parallelisation results = pool.map(residual_star, args_here) pool.close() pool.join() else: results = [] for arg in args_here: results.append(residual_star(arg)) argmin = np.array(results).argmin() angle_selected = args_here[argmin][0] rectangle, M_rect_max, RotData = get_rectangle_coord( angle_selected, data, flag_out=True ) M_invert = cv2.invertAffineTransform(M_rect_max) rect_coord = [ rectangle[:2], [rectangle[0], rectangle[3]], rectangle[2:], [rectangle[2], rectangle[1]], ] rect_coord_ori = [] for coord in rect_coord: rect_coord_ori.append( np.dot(M_invert, [coord[0], (ny - 1) - coord[1], 1]) ) # transform to numpy coord of input image coord_out = [] for coord in rect_coord_ori: coord_out.append( [ scale_factor * round(coord[0] - (nx / 2 - nx_odd / 2), 0) - xshift, scale_factor * round((ny - 1) - coord[1] - (ny / 2 - ny_odd / 2), 0) - yshift, ] ) coord_out_rot = [] coord_out_rot_h = [] for coord in rect_coord: coord_out_rot.append( [ scale_factor * round(coord[0] - (nx / 2 - nx_odd / 2), 0) - xshift, scale_factor * round(coord[1] - (ny / 2 - ny_odd / 2), 0) - yshift, ] ) coord_out_rot_h.append( [ scale_factor * round(coord[0] - (nx / 2 - nx_odd / 2), 0), scale_factor * round(coord[1] - (ny / 2 - ny_odd / 2), 0), ] ) if flag_out is None: return coord_out elif flag_out == "rotation": return coord_out, angle_selected, coord_out_rot else: print("bad def in findRotMaxRect input. stop") pdb.set_trace() ###################################################### def factors(n): return set( reduce( list.__add__, ([i, n // i] for i in range(1, int(n ** 0.5) + 1) if n % i == 0), ) ) # test scale poly def scale_polygon(path, offset): center = centroid_of_polygon(path) path_temp = path for i in path_temp: if i[0] > center[0]: i[0] += offset else: i[0] -= offset if i[1] > center[1]: i[1] += offset else: i[1] -= offset return path_temp def area_of_polygon(x, y): """Calculates the signed area of an arbitrary polygon given its verticies http://stackoverflow.com/a/4682656/190597 (Joe Kington) http://softsurfer.com/Archive/algorithm_0101/algorithm_0101.htm#2D%20Polygons """ area = 0.0 for i in range(-1, len(x) - 1): area += x[i] * (y[i + 1] - y[i - 1]) return area / 2.0 def centroid_of_polygon(points): """ http://stackoverflow.com/a/14115494/190597 (mgamba) """ area = area_of_polygon(*zip(*points)) result_x = 0 result_y = 0 N = len(points) points = IT.cycle(points) x1, y1 = next(points) for i in range(N): x0, y0 = x1, y1 x1, y1 = next(points) cross = (x0 * y1) - (x1 * y0) result_x += (x0 + x1) * cross result_y += (y0 + y1) * cross result_x /= (area * 6.0) result_y /= (area * 6.0) return (result_x, result_y) def perimiter(points): """ returns the length of the perimiter of some shape defined by a list of points """ distances = get_distances(points) width=min(distances) length = 0 for distance in distances: length = length + distance return length, width def get_distances(points): """ convert a list of points into a list of distances """ i = 0 distances = [] for i in range(len(points)): point = points[i] if i + 1 < len(points): next_point = points[i + 1] else: next_point = points[0] x0 = point[0] y0 = point[1] x1 = next_point[0] y1 = next_point[1] point_distance = get_distance(x0, y0, x1, y1) distances.append(point_distance) return distances def get_distance(x0, y0, x1, y1): """ use pythagorean theorm to find distance between 2 points """ a = x1 - x0 b = y1 - y0 c_2 = a * a + b * b return c_2 ** (.5) def random_coord(origin_coord, threshold): new_coord = origin_coord points = [] for row in origin_coord: x = row[0] y = row[1] points.append((float(x), float(y))) peri, width = perimiter(points) threshold *= peri if threshold >= width/2: threshold = math.floor(width/2) print(peri, width, threshold) #x1y1-top left new_coord[0][0]=random.uniform(origin_coord[0][0], origin_coord[0][0]+threshold) new_coord[0][1]=random.uniform(origin_coord[0][1]-threshold, origin_coord[0][1]) # x2y2-top right new_coord[1][0] = random.uniform(origin_coord[1][0] - threshold, origin_coord[1][0]) new_coord[1][1] = random.uniform(origin_coord[1][1] - threshold, origin_coord[1][1]) # x3y3-bottom right new_coord[2][0] = random.uniform(origin_coord[2][0] - threshold, origin_coord[2][0]) new_coord[2][1] = random.uniform(origin_coord[2][1], origin_coord[2][1] + threshold) # x4y4-bottom left new_coord[3][0] = random.uniform(origin_coord[3][0], origin_coord[3][0] + threshold) new_coord[3][1] = random.uniform(origin_coord[3][1], origin_coord[3][1]
# coding: utf-8 """ Scubawhere API Documentation This is the documentation for scubawhere's RMS API. This API is only to be used by authorized parties with valid auth tokens. [Learn about scubawhere](http://www.scubawhere.com) to become an authorized consumer of our API OpenAPI spec version: 1.0.0 Contact: <EMAIL> Generated by: https://github.com/swagger-api/swagger-codegen.git Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from __future__ import absolute_import import sys import os import re # python 2 and python 3 compatibility library from six import iteritems from ..configuration import Configuration from ..api_client import ApiClient class CustomerApi(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. Ref: https://github.com/swagger-api/swagger-codegen """ def __init__(self, api_client=None): config = Configuration() if api_client: self.api_client = api_client else: if not config.api_client: config.api_client = ApiClient() self.api_client = config.api_client def create_customer(self, email, firstname, lastname, **kwargs): """ Create a new customer This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.create_customer(email, firstname, lastname, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str email: (required) :param str firstname: (required) :param str lastname: (required) :param date birthday: :param int gender: :param str address_1: :param str address_2: :param str city: :param str county: :param str postcode: :param int country_id: :param str phone: :param date last_dive: :param int number_of_dives: :param str chest_size: :param str show_size: :param str height: :param list[int] certificates: :return: InlineResponse20029 If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.create_customer_with_http_info(email, firstname, lastname, **kwargs) else: (data) = self.create_customer_with_http_info(email, firstname, lastname, **kwargs) return data def create_customer_with_http_info(self, email, firstname, lastname, **kwargs): """ Create a new customer This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.create_customer_with_http_info(email, firstname, lastname, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str email: (required) :param str firstname: (required) :param str lastname: (required) :param date birthday: :param int gender: :param str address_1: :param str address_2: :param str city: :param str county: :param str postcode: :param int country_id: :param str phone: :param date last_dive: :param int number_of_dives: :param str chest_size: :param str show_size: :param str height: :param list[int] certificates: :return: InlineResponse20029 If the method is called asynchronously, returns the request thread. """ all_params = ['email', 'firstname', 'lastname', 'birthday', 'gender', 'address_1', 'address_2', 'city', 'county', 'postcode', 'country_id', 'phone', 'last_dive', 'number_of_dives', 'chest_size', 'show_size', 'height', 'certificates'] all_params.append('callback') all_params.append('_return_http_data_only') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method create_customer" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'email' is set if ('email' not in params) or (params['email'] is None): raise ValueError("Missing the required parameter `email` when calling `create_customer`") # verify the required parameter 'firstname' is set if ('firstname' not in params) or (params['firstname'] is None): raise ValueError("Missing the required parameter `firstname` when calling `create_customer`") # verify the required parameter 'lastname' is set if ('lastname' not in params) or (params['lastname'] is None): raise ValueError("Missing the required parameter `lastname` when calling `create_customer`") resource_path = '/customer/add'.replace('{format}', 'json') path_params = {} query_params = {} if 'email' in params: query_params['email'] = params['email'] if 'firstname' in params: query_params['firstname'] = params['firstname'] if 'lastname' in params: query_params['lastname'] = params['lastname'] if 'birthday' in params: query_params['birthday'] = params['birthday'] if 'gender' in params: query_params['gender'] = params['gender'] if 'address_1' in params: query_params['address_1'] = params['address_1'] if 'address_2' in params: query_params['address_2'] = params['address_2'] if 'city' in params: query_params['city'] = params['city'] if 'county' in params: query_params['county'] = params['county'] if 'postcode' in params: query_params['postcode'] = params['postcode'] if 'country_id' in params: query_params['country_id'] = params['country_id'] if 'phone' in params: query_params['phone'] = params['phone'] if 'last_dive' in params: query_params['last_dive'] = params['last_dive'] if 'number_of_dives' in params: query_params['number_of_dives'] = params['number_of_dives'] if 'chest_size' in params: query_params['chest_size'] = params['chest_size'] if 'show_size' in params: query_params['show_size'] = params['show_size'] if 'height' in params: query_params['height'] = params['height'] if 'certificates' in params: query_params['certificates'] = params['certificates'] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json']) if not header_params['Accept']: del header_params['Accept'] # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json']) # Authentication setting auth_settings = [] return self.api_client.call_api(resource_path, 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='InlineResponse20029', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only')) def delete_customer(self, id, **kwargs): """ Delete a customer by ID This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.delete_customer(id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int id: (required) :return: InlineResponse2003 If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.delete_customer_with_http_info(id, **kwargs) else: (data) = self.delete_customer_with_http_info(id, **kwargs) return data def delete_customer_with_http_info(self, id, **kwargs): """ Delete a customer by ID This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.delete_customer_with_http_info(id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int id: (required) :return: InlineResponse2003 If the method is called asynchronously, returns the request thread. """ all_params = ['id'] all_params.append('callback') all_params.append('_return_http_data_only') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method delete_customer" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'id' is set if ('id' not in params) or (params['id'] is None): raise ValueError("Missing the required parameter `id` when calling `delete_customer`") resource_path = '/customer/delete'.replace('{format}', 'json') path_params = {} query_params = {} if 'id' in params: query_params['id'] = params['id'] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.\ select_header_accept(['application/json']) if not header_params['Accept']: del header_params['Accept'] # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.\ select_header_content_type(['application/json']) # Authentication setting auth_settings = [] return self.api_client.call_api(resource_path, 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='InlineResponse2003', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only')) def edit_customer(self, id, **kwargs): """ Update a customer by ID This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.edit_customer(id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int id: (required) :param str email: :param str firstname: :param str lastname: :param date birthday: :param int gender: :param str address_1: :param str address_2: :param str city: :param str county: :param str postcode: :param int country_id: :param str phone: :param date last_dive: :param int number_of_dives: :param str chest_size: :param str show_size: :param str height: :param list[int] certificates: :return: InlineResponse20030 If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'):
-11.666598888071467, '特种兵': -12.359746068631413, '沮丧': -11.666598888071467, '赌输': -11.666598888071467, '移交': -10.567986599403358, '水产': -10.567986599403358, '勇': -12.359746068631413, '祖国': -9.5871573463916331, '女双': -11.666598888071467, '民乐团': -12.359746068631413, '导游': -7.7953978771635777, '自由行': -10.280304526951578, '具': -10.413835919576099, '行辕': -10.567986599403358, '关心': -12.359746068631413, '嘎': -11.666598888071467, '隋': -12.359746068631413, '女房东': -12.359746068631413, '化学品': -10.750308156197313, '灵秀': -12.359746068631413, '炒青': -11.261133779963304, '四方': -10.973451707511522, '更': -8.1112508265820544, '教谕': -12.359746068631413, '哈姆雷特': -12.359746068631413, '示爱': -12.359746068631413, '国债': -12.359746068631413, '犯下': -8.8940101658316877, '开幕式': -12.359746068631413, '现已': -10.162521491295195, '常设': -10.973451707511522, '名将': -8.3894541550792923, '大城市': -10.973451707511522, '身亡': -11.261133779963304, '盲童': -11.666598888071467, '格斗场': -12.359746068631413, '无疑': -10.280304526951578, '老街': -10.750308156197313, '上市': -8.3166948007968635, '雅图': -11.261133779963304, '慢慢': -11.666598888071467, '要求': -8.4679257705207878, '攻': -12.359746068631413, '比华利山': -12.359746068631413, '文': -9.5265327245751976, '节前': -10.973451707511522, '泽雅': -12.359746068631413, '残障': -12.359746068631413, '高水平': -10.413835919576099, '进口商': -11.261133779963304, '污染物': -10.567986599403358, '被扣': -10.567986599403358, '法界': -11.666598888071467, '得知': -10.750308156197313, '生态村': -12.359746068631413, '智能': -10.973451707511522, '战功': -12.359746068631413, '边境': -8.6220764503480449, '适应': -12.359746068631413, '经常': -10.057160975637368, '评选': -11.261133779963304, '拿': -10.057160975637368, '环境': -7.9902982161643923, '二手': -9.5871573463916331, '快': -10.567986599403358, '官方': -6.7837969654850978, '太极': -12.359746068631413, '传教士': -10.057160975637368, '东西': -9.720688739016154, '助学': -10.567986599403358, '时代': -8.3707620220671384, '斜塔': -12.359746068631413, '后花园': -12.359746068631413, '重估': -12.359746068631413, '本': -9.1016495306099312, '海螺': -12.359746068631413, '联队': -10.280304526951578, '厚厚的': -12.359746068631413, '挂职': -10.567986599403358, '灌溉': -12.359746068631413, '教改': -11.261133779963304, '查干湖': -12.359746068631413, '育儿': -12.359746068631413, '垦区': -12.359746068631413, '新生儿': -12.359746068631413, '建筑师': -11.666598888071467, '狠抓': -11.666598888071467, '版图': -9.3640137950774225, '搜查': -12.359746068631413, '调集': -11.666598888071467, '药材': -11.666598888071467, ')': -5.8675062336109418, '生源毕业生': -9.5265327245751976, '久留': -9.6516958675292042, '做完': -11.666598888071467, '复建': -10.973451707511522, '异地': -10.750308156197313, '专题': -12.359746068631413, '金光': -12.359746068631413, '金孔雀': -11.666598888071467, '卡巴斯基': -10.973451707511522, '明知': -11.666598888071467, '伐木': -12.359746068631413, '断流': -11.261133779963304, '育': -11.666598888071467, '蔡': -11.666598888071467, '户籍': -8.0830799496153585, '农业': -8.8333855440152522, '远': -10.973451707511522, '起草': -10.413835919576099, '两市': -8.5530835788610933, '现': -9.1408702437632119, '租赁': -12.359746068631413, '内阁总理': -11.666598888071467, '从': -7.3491107745351583, '金小丑': -11.666598888071467, '私人': -10.280304526951578, '小洋': -12.359746068631413, '存钱': -12.359746068631413, '祥': -10.973451707511522, '体育报': -11.666598888071467, '开拍': -12.359746068631413, '印象': -9.874839418843413, '滑雪者': -11.666598888071467, '威根': -12.359746068631413, '城市化': -11.666598888071467, '达扎寺': -9.720688739016154, '推举': -12.359746068631413, '岳麓山': -11.261133779963304, '华': -9.2687036152730968, '温泉': -10.567986599403358, '痛诉': -11.261133779963304, '重点': -7.9652969139589747, '宰恩': -12.359746068631413, '巡回赛': -12.359746068631413, '老姑娘': -12.359746068631413, '师生': -12.359746068631413, '有钱人': -11.261133779963304, '卡纳科纳': -11.666598888071467, '住房': -8.4279204359070885, '唯美': -12.359746068631413, '杂记': -12.359746068631413, '特派': -9.6516958675292042, '启幕': -11.666598888071467, '党': -9.1408702437632119, '全年': -8.4279204359070885, '普通人': -10.750308156197313, '光谷': -11.666598888071467, '圆': -12.359746068631413, '超高': -10.162521491295195, '苗庄': -12.359746068631413, '现有': -8.2488722044581024, '装点': -12.359746068631413, '管辖区': -12.359746068631413, '文殊坊': -11.666598888071467, '冰上': -10.973451707511522, '古街': -12.359746068631413, '结缘': -10.567986599403358, '羊腿': -12.359746068631413, '战机': -7.785035090128031, '柯伦': -11.666598888071467, '憋憋': -11.261133779963304, '年夜饭': -12.359746068631413, '临邛': -12.359746068631413, '基地': -8.3894541550792923, '搞起': -12.359746068631413, '威斯康星': -12.359746068631413, '地方性': -12.359746068631413, '古典': -10.413835919576099, '但': -10.973451707511522, '五德': -12.359746068631413, '兰花': -10.973451707511522, '漂洗': -11.666598888071467, '足球场': -11.261133779963304, '怀俄明': -12.359746068631413, '来自': -12.359746068631413, '熬': -12.359746068631413, '大伙房': -12.359746068631413, '纳粹党': -12.359746068631413, '游轮': -12.359746068631413, '便宜': -11.261133779963304, '短尾猴': -12.359746068631413, '追': -12.359746068631413, '先驱报': -11.261133779963304, '方兴未艾': -12.359746068631413, '终': -10.973451707511522, '官房长官': -9.3640137950774225, '周转房': -11.666598888071467, '亚特拉': -11.666598888071467, '商贸': -10.567986599403358, '危化品': -11.666598888071467, '共商': -12.359746068631413, '孝子': -10.973451707511522, '北部': -6.3808603037302918, '丽': -11.261133779963304, '塬': -12.359746068631413, '受害者': -11.261133779963304, '嘉': -10.413835919576099, '镇山': -12.359746068631413, '遍布': -11.261133779963304, '手术': -12.359746068631413, '报到': -10.973451707511522, '横穿': -12.359746068631413, '千代田区': -11.261133779963304, '不再': -9.5265327245751976, '城建': -9.1816922382834676, '不够': -12.359746068631413, '夜市': -9.874839418843413, '可移动': -11.666598888071467, '机': -10.750308156197313, '权威': -10.413835919576099, '打交道': -10.280304526951578, '人妻': -10.567986599403358, '水源': -10.413835919576099, '国王': -9.1016495306099312, '传到': -12.359746068631413, '该': -9.3152236309079903, '揩': -12.359746068631413, '新影联': -11.666598888071467, '乐迷': -10.973451707511522, '植被': -11.261133779963304, '东': -6.7875920364536491, '信托': -11.666598888071467, '原则上': -11.666598888071467, '长陵': -12.359746068631413, '宣恩': -12.359746068631413, '返乡': -10.750308156197313, '检': -8.334394377896265, '辉山': -11.666598888071467, '房间': -12.359746068631413, '后期': -11.666598888071467, '龙腾': -12.359746068631413, '網友': -12.359746068631413, '放映': -11.666598888071467, '进步': -10.973451707511522, '不可': -11.666598888071467, '夫妻': -11.261133779963304, '一日游': -8.5530835788610933, '固': -11.666598888071467, '石': -10.280304526951578, '焕然一新': -12.359746068631413, '钻开': -12.359746068631413, '淀粉': -10.973451707511522, '茂': -12.359746068631413, '影帝': -10.413835919576099, '藤隆': -12.359746068631413, '奓山': -9.5265327245751976, '巨富': -11.666598888071467, '夺宝': -12.359746068631413, '春芽': -12.359746068631413, '乘客': -9.9618507958330422, '样本': -12.359746068631413, '烧鸭': -12.359746068631413, '末年': -12.359746068631413, '秋': -12.359746068631413, '要玩': -12.359746068631413, '民主': -9.720688739016154, '高层次': -11.666598888071467, '围圈': -11.666598888071467, '傍': -12.359746068631413, '大幅让利': -11.666598888071467, '坠下': -11.666598888071467, '交行': -11.666598888071467, '礼宾': -12.359746068631413, '起初': -12.359746068631413, '迎新春': -11.261133779963304, '单个': -10.973451707511522, '差远': -11.666598888071467, '同仁堂': -12.359746068631413, '疯': -10.567986599403358, '详细': -11.261133779963304, '最高档': -12.359746068631413, '站': -7.4544712901929842, '地域': -9.3640137950774225, '自诩': -10.973451707511522, '系统化': -12.359746068631413, '道署': -12.359746068631413, '桥': -8.5755564347131532, '组建': -9.5265327245751976, '香烟': -11.666598888071467, '区位': -10.567986599403358, '寸步难行': -10.280304526951578, '不折不扣': -12.359746068631413, '男': -8.9585486869692588, '侨乡': -12.359746068631413, '凭吊': -10.750308156197313, '构想': -12.359746068631413, '军事法庭': -10.973451707511522, '转播': -12.359746068631413, '竹': -11.261133779963304, '淘汰': -10.973451707511522, '纯': -10.973451707511522, '周围': -10.280304526951578, '投产': -11.666598888071467, '停建': -10.567986599403358, '舆情': -12.359746068631413, '牌': -8.9924502386449401, '寸滩港': -12.359746068631413, '单反': -12.359746068631413, '姊妹': -12.359746068631413, '牛角': -12.359746068631413, '内衣': -11.666598888071467, '省政府': -12.359746068631413, '嫡系': -12.359746068631413, '合作者': -12.359746068631413, '文人': -9.3152236309079903, '板块构造': -12.359746068631413, '神经内科': -12.359746068631413, '卓越': -11.261133779963304, '擦背': -11.261133779963304, '发难': -11.666598888071467, '专柜': -10.162521491295195, '弗莱龙': -12.359746068631413, '坐落': -10.567986599403358, '独立': -8.8043980071419998, '沃尔玛': -10.567986599403358, '鹈鹕': -12.359746068631413, '洪沟': -12.359746068631413, '士人': -12.359746068631413, '乡村游': -10.413835919576099, '洋人': -12.359746068631413, '中心论': -12.359746068631413, '宅邸': -12.359746068631413, '优惠': -11.666598888071467, '鸿福': -12.359746068631413, '同龄人': -12.359746068631413, '黄牌': -11.666598888071467, '吸烟者': -11.666598888071467, '坐船': -10.973451707511522, '圆圈': -12.359746068631413, '大象': -11.261133779963304, '系列': -12.359746068631413, '古文明': -12.359746068631413, '五星街': -12.359746068631413, '富平': -7.9652969139589747, '定制': -9.874839418843413, '核力量': -12.359746068631413, '热线': -11.666598888071467, '回来': -8.9585486869692588, '一旦': -10.973451707511522, '东三省': -12.359746068631413, '漂': -11.666598888071467, '一侧': -9.6516958675292042, '公路桥': -11.261133779963304, '截然不同': -12.359746068631413, '卡斯县': -12.359746068631413, '一跃': -12.359746068631413, '相提并论': -12.359746068631413, '出入': -12.359746068631413, '一楼': -10.750308156197313, '盐步': -12.359746068631413, '展馆': -12.359746068631413, '贩毒': -11.261133779963304, '虫草': -12.359746068631413, '火山群': -12.359746068631413, '熟女': -12.359746068631413, '胜': -8.9924502386449401, '书记': -9.2242518527022632, '新龙': -12.359746068631413, '之外': -8.4679257705207878, '各族': -10.567986599403358, '国树': -12.359746068631413, '篮网': -11.261133779963304, '篇': -9.874839418843413, '驰名': -12.359746068631413, '超级计算机': -12.359746068631413, '有关': -8.0159406467777305, '贯穿': -12.359746068631413, '回答': -10.057160975637368, '暨': -11.261133779963304, '破局': -10.973451707511522, '有着': -8.3894541550792923, '水土': -11.666598888071467, '柳叶': -12.359746068631413, '德比战': -12.359746068631413, '派': -9.5871573463916331, '十佳': -9.874839418843413, '彩绘': -12.359746068631413, '新彩': -10.973451707511522, '狗仔': -11.261133779963304, '综合国力': -12.359746068631413, '门': -8.8333855440152522, '阵雪': -12.359746068631413, '叫': -11.261133779963304, '小': -8.2166113422398812, '海空': -10.750308156197313, '走到': -11.261133779963304, '遗体': -12.359746068631413, '婚礼': -8.3894541550792923, '慢活': -12.359746068631413, '查看': -11.666598888071467, '森林': -8.9585486869692588, '蟹黄汤包': -11.666598888071467, '气息': -10.750308156197313, '香醋': -12.359746068631413, '试点': -9.0275415584562104, '作品': -10.162521491295195, '讲究': -11.261133779963304, '山': -9.5871573463916331, '民政': -9.0639092026270838, '相媲美': -12.359746068631413, '资格赛': -12.359746068631413, '双向': -11.666598888071467, '官场': -9.2687036152730968, '京东方': -11.666598888071467, '现场会': -11.666598888071467, '四合院': -10.567986599403358, '能力': -10.750308156197313, '全': -9.6516958675292042, '动车段': -10.567986599403358, '园博园': -12.359746068631413, '提档': -12.359746068631413, '游览': -10.750308156197313, '学前教育': -11.261133779963304, '病逝': -9.2242518527022632, '类似': -10.280304526951578, '民盟': -11.261133779963304, '跑堂': -9.6516958675292042, '吴家营': -12.359746068631413, '饭店': -12.359746068631413, '专场': -12.359746068631413, '篮球': -9.0275415584562104, '有利': -10.567986599403358, '开播': -10.750308156197313, '大红门': -11.261133779963304, '限定': -11.261133779963304, '这段': -9.720688739016154, '正气': -12.359746068631413, '三貂角': -11.666598888071467, '瓜鲁雅': -11.261133779963304, '花园': -9.5871573463916331, '照样': -11.666598888071467, '特使': -10.973451707511522, '土门': -12.359746068631413, '藏胞': -11.666598888071467, '反政变': -12.359746068631413, '高安屯': -12.359746068631413, '武官': -10.973451707511522, '微软创始人比尔·盖茨': -11.666598888071467, '往往': -10.750308156197313, '筹备': -10.280304526951578, '大包子': -10.413835919576099, '向上': -12.359746068631413, '心中': -11.261133779963304, '新法': -10.973451707511522, '奥': -10.567986599403358, '紧急': -10.973451707511522, '双山': -12.359746068631413, '相会': -10.973451707511522, '新生代': -10.162521491295195, '莫宁顿': -12.359746068631413, '香炉': -11.666598888071467, '以内': -9.0275415584562104, '神话': -10.973451707511522, '追星': -10.280304526951578, '电量': -11.666598888071467, '头条': -12.359746068631413, '大区': -12.359746068631413, '定为': -10.973451707511522, '张姓': -11.261133779963304, '卡拉': -12.359746068631413, '游览车': -12.359746068631413, '国际象棋': -12.359746068631413, '装备': -9.6516958675292042, '深入人心': -12.359746068631413, '村组': -12.359746068631413, '古琴台': -12.359746068631413, '排': -8.8632385071649331, '外围': -10.413835919576099, '化学武器': -8.8043980071419998, '安特尼亚': -11.666598888071467, '省份': -11.666598888071467, '摆': -10.750308156197313, '降至': -10.973451707511522, '星官': -12.359746068631413, '涉': -9.3152236309079903, '孟': -12.359746068631413, '保信': -11.261133779963304, '大事记': -12.359746068631413, '泉港': -12.359746068631413, '全方位': -12.359746068631413, '学府路': -10.750308156197313, '副科级': -11.666598888071467, '彩': -10.973451707511522, '稀土': -10.280304526951578, '温馨': -11.261133779963304, '受理': -12.359746068631413, '海盗': -10.567986599403358, '战斗舰': -11.666598888071467, '落实': -9.4153070894649726, '范围内': -8.6961844225017675, '反潜': -10.973451707511522, '商管': -10.973451707511522, '主办': -10.162521491295195, '司门口': -12.359746068631413, '进口': -8.598545952937851, '阳春': -12.359746068631413, '产子': -11.261133779963304, '皇后区': -12.359746068631413, '核导弹': -9.2687036152730968, '新政': -12.359746068631413, '面板': -11.666598888071467, '窃取': -12.359746068631413, '一期': -9.5871573463916331, '积雪': -11.666598888071467, '奥林匹克': -12.359746068631413, '陪': -9.5265327245751976, '大十字': -12.359746068631413, '玉石': -11.666598888071467, '有点': -10.413835919576099, '血库': -10.973451707511522, '工笔': -12.359746068631413, '下雪天': -11.261133779963304, '运动': -10.973451707511522, '补贴款': -11.261133779963304, '名城': -9.5871573463916331, '试行': -10.162521491295195, '德马雷斯特皮德蒙特': -11.666598888071467, '常年': -10.750308156197313, '感谢': -12.359746068631413, '捧': -10.280304526951578, '阴郁': -12.359746068631413, '招录': -12.359746068631413, '竞争': -12.359746068631413, '样式': -12.359746068631413, '泰坦尼克': -11.666598888071467, '运行': -11.261133779963304, '警察': -7.4999336642697418, '排除': -11.261133779963304, '百步亭': -12.359746068631413, '妹': -10.973451707511522, '中长跑': -12.359746068631413, '规范': -12.359746068631413, '相似': -9.9618507958330422, '梅花': -12.359746068631413, '另有': -12.359746068631413, '失和': -11.261133779963304, '里': -7.7057857184738898, '创': -12.359746068631413, '泉城路': -9.9618507958330422, '压力': -10.280304526951578, '长': -7.6776148415071939, '科技城': -10.750308156197313, '立案': -11.666598888071467, '动员': -12.359746068631413, '氛围': -11.666598888071467, '修': -11.666598888071467, '动力煤': -10.280304526951578, '女皇': -12.359746068631413, '空港': -10.973451707511522, '领跑': -11.666598888071467, '学说': -12.359746068631413, '避风': -11.666598888071467, '蛋蛋': -12.359746068631413, '合计': -11.666598888071467, '岷江': -12.359746068631413, '议长': -12.359746068631413, '供职': -12.359746068631413, '月坛': -12.359746068631413, '贪污': -12.359746068631413, '部族': -10.750308156197313, '争议性': -11.666598888071467, '年终奖': -10.973451707511522, '虚拟': -12.359746068631413, '集中': -9.1016495306099312, '它': -11.261133779963304, '白马': -10.567986599403358, '恰逢': -12.359746068631413, '常驻': -9.1816922382834676, '拼凑': -12.359746068631413, '那里': -10.973451707511522, '舞者': -12.359746068631413, '女孩儿': -11.666598888071467, '原创力': -12.359746068631413, '训练': -8.9257588641462675, '岩美町': -12.359746068631413, '承受': -10.973451707511522, '借助': -11.666598888071467, '观音庙': -12.359746068631413, '非营利': -10.057160975637368, '转角': -11.666598888071467, '菜园坝': -10.973451707511522, '水貂': -12.359746068631413, '省市': -10.973451707511522, '和平': -7.9777194339575317, '企业主': -12.359746068631413, '籽种': -12.359746068631413, '御史': -12.359746068631413, '交巡警': -10.973451707511522, '挤得': -10.162521491295195, '始发': -8.5311046721423178, '翡翠台': -11.666598888071467, '金寨路': -10.973451707511522, '迟': -12.359746068631413, '曾经': -8.6708666145174771, '安然': -12.359746068631413, '田径': -9.720688739016154, '逐步': -9.874839418843413, '回访': -12.359746068631413, '竟会': -12.359746068631413, '复杂': -12.359746068631413, '阶段性': -10.280304526951578, '美丽': -8.8043980071419998, '坐骑': -12.359746068631413, '个别人': -9.7947967111698766, '巧妙': -12.359746068631413, '初等': -11.666598888071467, '孝丰': -12.359746068631413, '环境污染': -12.359746068631413, '娱乐': -8.3166948007968635, '察看': -10.973451707511522, '宇航': -11.666598888071467, '获利': -12.359746068631413, '大规模': -8.7221599089050272, '反复': -11.261133779963304, '副市长': -7.0918879095680856, '肉丸': -12.359746068631413, '贫困户': -11.666598888071467, '男友': -12.359746068631413, '艾玛乡': -12.359746068631413, '相遇': -12.359746068631413, '定向': -10.750308156197313, '跑': -9.4693743107352493, '山下': -11.261133779963304, '污染防治': -10.750308156197313, '话': -9.7947967111698766, '交通事故': -11.666598888071467, '干净': -10.567986599403358, '成熟': -11.666598888071467, '旧石器': -12.359746068631413, '南门': -10.973451707511522, '阿罗': -11.666598888071467, '接洽': -12.359746068631413, '足球界': -10.162521491295195, '投入': -9.2242518527022632, '愈演愈烈': -12.359746068631413, '婚庆': -10.567986599403358, '地震': -8.3524128833989426, '安全': -7.8599363983011488, '舞台': -10.567986599403358, '涉及': -9.6516958675292042, '屠杀': -9.5871573463916331, '绝好': -10.567986599403358, '上演': -9.1816922382834676, '家园网': -10.973451707511522, '每日': -9.2687036152730968, '逼入': -12.359746068631413, '职权': -12.359746068631413, '完婚': -9.2242518527022632, '直到': -10.973451707511522, '马科姆': -12.359746068631413, '何时': -10.413835919576099, '探险者': -11.666598888071467, '时': -6.3583311906702633, '坑道': -12.359746068631413, '考点': -10.973451707511522, '杨桃': -11.666598888071467, '绵长': -12.359746068631413, '八角': -11.666598888071467, '主席': -9.0275415584562104, '我军': -12.359746068631413, '优于': -12.359746068631413, '领导': -7.764626218496824, '珀纳尔巴舍': -11.666598888071467, '集体经济': -11.261133779963304, '零售业': -12.359746068631413, '外长': -7.2783417036469507, '腿部': -12.359746068631413, '书法': -10.280304526951578, '下降': -9.1408702437632119, '关停': -11.261133779963304, '长腿': -10.057160975637368, '部长': -11.666598888071467, '出家': -10.750308156197313, '排名': -8.2993030580849947, '琴童': -11.261133779963304, '台前': -11.261133779963304, '分校': -9.6516958675292042, '各': -6.4516631304624825, '挑选': -12.359746068631413, '茶叶': -9.5871573463916331, '并不': -8.7221599089050272, '出炉': -12.359746068631413, '答道': -12.359746068631413, '决心': -11.666598888071467, '魏国': -11.666598888071467, '空运': -10.973451707511522, '住着': -12.359746068631413, '环绕': -10.750308156197313, '检察官': -9.720688739016154, '王朝': -9.874839418843413, '停车楼': -12.359746068631413, '再次': -8.5311046721423178, '江淮': -12.359746068631413, '刘': -11.261133779963304, '尊严': -10.973451707511522, '通胀率': -12.359746068631413, '工程': -8.2008629852717423, '整形外科': -12.359746068631413, '一齐': -12.359746068631413, '备选': -12.359746068631413, '船': -7.9652969139589747, '玩儿': -12.359746068631413, '压阵': -12.359746068631413, '主会场': -10.973451707511522, '判断': -11.666598888071467, '效率': -12.359746068631413, '农牧区': -10.280304526951578, '防卫': -9.3152236309079903, '引进': -8.1550534492404481, '布鲁金斯': -11.666598888071467, '唯': -10.567986599403358, '散散心': -12.359746068631413, '迁安站': -12.359746068631413, '再现': -9.9618507958330422, '五环': -10.162521491295195, '画家': -8.4477230632032683, '东部': -6.9615833671136604, '飞奔': -12.359746068631413, '不能': -8.3894541550792923, '倍': -12.359746068631413, '早期': -10.280304526951578, '炼油': -12.359746068631413, '崩溃': -11.666598888071467, '飞行器': -11.261133779963304, '亲眼': -11.261133779963304, '有线电视': -12.359746068631413, '海燕': -12.359746068631413, '混': -11.666598888071467, '昆仑站': -12.359746068631413, '财务': -10.750308156197313, '科技型': -11.666598888071467, '自驾车': -12.359746068631413, '交涉': -12.359746068631413, '大邱': -9.720688739016154, '归还': -10.567986599403358, '配音': -12.359746068631413, '运回': -12.359746068631413, '最新': -8.0692866274830219, '征求': -12.359746068631413, '核爆': -12.359746068631413, '自强': -11.261133779963304, '打的': -10.973451707511522, '停战': -10.567986599403358, '当地': -6.5456155368063467, '台盟': -12.359746068631413, '总领事馆': -9.6516958675292042, '观察家': -10.162521491295195, '居民': -6.7002638528717924, '进出': -11.666598888071467, '邻近': -10.413835919576099, '内': -7.4619062686805018, '独揽': -11.261133779963304, '大洋': -11.666598888071467, '刑警': -10.750308156197313, '万达影城': -12.359746068631413, '认定': -12.359746068631413, '四绝': -12.359746068631413, '反超': -10.567986599403358, '隐私权': -12.359746068631413, '旅行者': -12.359746068631413, '龙虎': -12.359746068631413, '继承': -10.280304526951578, '音乐家': -11.261133779963304, '司机': -10.280304526951578, '山川': -12.359746068631413, '医生': -8.9924502386449401, '地质公园': -11.666598888071467, '经侦': -12.359746068631413, '边疆': -11.261133779963304, '牛奶河': -12.359746068631413, '小时': -12.359746068631413, '海运': -11.666598888071467, '标识': -12.359746068631413, '党派': -12.359746068631413, '北塔': -11.666598888071467, '白种人': -12.359746068631413, '依视路': -12.359746068631413, '写信': -12.359746068631413, '须经': -12.359746068631413, '违': -12.359746068631413, '中长期': -12.359746068631413, '习水': -11.666598888071467, '县镇': -11.666598888071467, '灵响': -11.666598888071467, '星星': -12.359746068631413, '加密': -12.359746068631413, '添乱': -12.359746068631413, '城山': -10.750308156197313, '柘': -12.359746068631413, '疫苗': -10.280304526951578, '岩': -11.261133779963304, '开张': -10.567986599403358, '人气': -8.1256395640341541, '制片人': -12.359746068631413, '却': -7.8711096988992733, '元首': -11.261133779963304, '伤': -12.359746068631413, '发音': -12.359746068631413, '彩灯': -12.359746068631413, '郑': -12.359746068631413, '签售': -12.359746068631413, '买不起': -10.280304526951578, '歌颂': -12.359746068631413, '留学生': -10.057160975637368, '联军': -10.750308156197313, '暂': -10.413835919576099, '网球赛': -10.973451707511522, '柯蒂斯': -12.359746068631413, '注册': -8.748828155987189, '阴转多云': -11.666598888071467, '大使们': -11.261133779963304, '\\': -12.359746068631413, '保钓': -12.359746068631413, '霸主': -10.973451707511522, '论': -10.567986599403358, '城北': -9.4153070894649726, '摒弃': -12.359746068631413, '邀请': -9.874839418843413, '凡': -12.359746068631413, '很早': -11.666598888071467, '糖醋排骨': -10.567986599403358, '左侧': -12.359746068631413, '恺': -10.413835919576099, '直营': -12.359746068631413, '二线': -12.359746068631413, '焦点': -12.359746068631413, '北南': -11.666598888071467, '拥有': -7.1557393815546178, '分获': -12.359746068631413, '壮烈': -12.359746068631413, '换花节': -12.359746068631413, '危房': -11.666598888071467, '拍照': -12.359746068631413, '班子': -10.567986599403358, '届时': -11.261133779963304, '桥东': -11.261133779963304, '关东店': -12.359746068631413, '特大型': -12.359746068631413, '据点': -11.666598888071467, '美名': -12.359746068631413, '贝尔曼': -12.359746068631413, '政权': -10.057160975637368, '炙手可热': -11.261133779963304, '碳': -12.359746068631413, '沐': -12.359746068631413, '大屯山': -11.261133779963304, '全兴': -12.359746068631413, '即使': -11.261133779963304, '上车': -10.567986599403358, '车站': -8.4279204359070885, '放在': -10.567986599403358, '有史以来': -9.2242518527022632, '小村': -12.359746068631413, '敞开': -12.359746068631413, '滞留': -10.973451707511522, '急需': -10.750308156197313, '诏安': -12.359746068631413, '东旭': -12.359746068631413, '先秦史': -12.359746068631413, '极为': -11.666598888071467, '汉文化': -11.666598888071467, '拘捕': -10.057160975637368, '造船': -10.750308156197313, '漫游': -11.666598888071467, '佳苑': -11.666598888071467, '北湖': -12.359746068631413, '安居工程': -12.359746068631413, '新四军': -10.750308156197313, '物色': -12.359746068631413, '诚信': -11.261133779963304, '银桥': -10.750308156197313, '靠拢': -10.750308156197313, '会考': -12.359746068631413, '上涨': -8.1700913266049877, '20:': -12.359746068631413, '河蟹': -11.666598888071467, '孙': -11.261133779963304, '开弓': -12.359746068631413, '法兰哥尼亚': -11.261133779963304, '仅存': -12.359746068631413, '无关': -11.666598888071467, '答应': -12.359746068631413, '新材料': -11.666598888071467, '提议': -9.4153070894649726, '考试': -12.359746068631413, '公益性': -10.750308156197313, '司法部长': -10.567986599403358, '电子': -9.7947967111698766, '名符其实': -12.359746068631413, '直达': -10.973451707511522, '夹': -10.413835919576099, '王后': -10.162521491295195, '贷款': -9.720688739016154, '天主': -11.666598888071467, '元素': -9.1016495306099312, '防总': -11.261133779963304, '卡瓦伊': -11.666598888071467, '晒佛节': -12.359746068631413, '年仅': -10.162521491295195, '感到': -9.4693743107352493, '叶姓': -12.359746068631413, '发射': -9.7947967111698766, '连体': -11.666598888071467, '平原': -9.9618507958330422, '简直': -11.261133779963304, '认识': -10.750308156197313, '就学': -10.750308156197313, '毛尖': -12.359746068631413, '手工': -11.666598888071467, '侨领': -10.973451707511522, '省级': -8.5311046721423178, '雪松': -12.359746068631413, '别号': -11.261133779963304, '入盟': -12.359746068631413, '全线': -10.973451707511522, '动物': -10.973451707511522, '菊花茶': -12.359746068631413, '珍稀': -10.567986599403358, '狗': -12.359746068631413, '股': -9.3152236309079903, '词汇': -11.666598888071467, '局长': -11.666598888071467, '涌现': -11.666598888071467, '玩起': -11.261133779963304, '血吸虫病': -12.359746068631413, '赌场': -12.359746068631413, '爽约': -12.359746068631413, '宁津镇': -11.666598888071467, '独裁者': -12.359746068631413, '穿过': -12.359746068631413, '通过': -7.7153551694900404, '审结': -11.261133779963304, '胡': -10.750308156197313, '极': -10.413835919576099, '起飞': -8.5095984669213554, '磁器口': -12.359746068631413, '人力': -12.359746068631413, '警备区': -9.6516958675292042, '谷物': -11.261133779963304, '烟台山': -12.359746068631413, '移': -11.666598888071467, '三季度': -12.359746068631413, '司法权': -12.359746068631413, '民俗专家': -12.359746068631413, '独特': -8.8333855440152522, '云景': -12.359746068631413, '其它': -10.413835919576099, '外经贸': -12.359746068631413, '教授': -10.413835919576099, '社会学家': -10.567986599403358, '乘警': -11.261133779963304, '天堂河': -10.973451707511522, '分裂': -9.874839418843413, '耗时': -11.666598888071467, '小雪': -12.359746068631413, '外环线': -12.359746068631413, '夏日': -12.359746068631413, '摩斯': -12.359746068631413, '敢为人先': -12.359746068631413, '国策': -11.261133779963304, '整日': -12.359746068631413, '即便': -12.359746068631413, '吏治': -11.666598888071467, '移居': -12.359746068631413, '典型': -9.720688739016154, '履新': -9.4693743107352493, '绝': -10.413835919576099, '枪击': -9.720688739016154, '舞动': -11.261133779963304, '梧桐树': -11.666598888071467, '绅士': -11.666598888071467, '|': -12.359746068631413, '安州市': -12.359746068631413, '粮油': -12.359746068631413, '鹿': -11.261133779963304, '文士': -12.359746068631413, '冰雪节': -10.162521491295195, '悦': -12.359746068631413, '进口贸易': -10.750308156197313, '时事': -11.261133779963304, '打败': -10.973451707511522, '甩掉': -10.057160975637368, '十陵': -12.359746068631413, '供认': -12.359746068631413, '单身': -10.280304526951578, '藏药': -10.973451707511522, '丰富': -10.280304526951578, '当': -7.754575882643322, '阻止': -12.359746068631413, '大权': -12.359746068631413, '金字塔': -11.666598888071467, '北方': -10.750308156197313, '简姓': -10.973451707511522, '就业': -9.1408702437632119, '地价': -11.666598888071467, '买楼': -12.359746068631413, '高峰': -11.666598888071467, '南麓': -10.413835919576099, '花灯': -10.973451707511522, '丰泽路': -12.359746068631413, '真实': -10.973451707511522, '墓地': -10.973451707511522, '经援': -11.666598888071467, '撤走': -12.359746068631413, '秀美': -11.666598888071467, '三岔口': -12.359746068631413, '国有': -10.057160975637368, '奇女子': -12.359746068631413, '便衣': -11.666598888071467, '撤侨': -10.973451707511522, '赏花': -11.666598888071467, '位居': -9.1408702437632119, '防': -8.6461740019271058, '志高': -12.359746068631413, '指数': -11.666598888071467, '班底': -11.261133779963304, '竹竿': -12.359746068631413, '骑楼': -10.567986599403358, '舆论界': -11.261133779963304, '赛道': -12.359746068631413, '微': -11.666598888071467, '扶手': -12.359746068631413, '富有': -11.666598888071467, '代理商': -11.261133779963304, '植保': -12.359746068631413, '商品': -8.9924502386449401, '披碱草': -11.666598888071467, '数字': -12.359746068631413, '荒园': -11.666598888071467, '往来': -11.261133779963304, '焦氏': -10.567986599403358, '导致': -12.359746068631413, '特务': -8.2008629852717423, '风灾': -12.359746068631413, '像': -10.057160975637368, '农林路': -12.359746068631413, '统': -11.666598888071467, '薪酬': -11.261133779963304, '灯展': -11.666598888071467, '茶产业': -10.750308156197313, '一连': -12.359746068631413, '前面': -10.280304526951578, '抵制': -12.359746068631413, '峡谷': -10.973451707511522, '税收': -11.261133779963304, '过半': -9.9618507958330422, '诱': -10.973451707511522, '桥梁专家': -12.359746068631413, '垃圾车': -12.359746068631413, '茶': -11.261133779963304, '古楼': -11.666598888071467, '鲍威尔': -12.359746068631413, '洽谈': -12.359746068631413, '意识': -12.359746068631413, '拆迁': -10.413835919576099, '联': -9.2687036152730968, '奶粉': -9.9618507958330422, '马克思·米罗': -11.261133779963304, '呀': -11.261133779963304, '影响': -9.9618507958330422, '丧失': -11.666598888071467, '供应量': -12.359746068631413, '拍电影': -10.162521491295195, '身患': -12.359746068631413, '连日': -10.413835919576099, '收视': -11.261133779963304, '行政区': -10.057160975637368, '结合': -8.9257588641462675, '衣': -11.261133779963304, '涨价': -12.359746068631413, '以西': -9.5871573463916331, '自杀': -10.973451707511522, '康泰': -9.1408702437632119, '间谍': -9.1816922382834676, '狗仔队': -11.261133779963304, '咸宁县': -12.359746068631413, '桥头': -10.750308156197313, '活鱼': -11.666598888071467, '末##末': -5.1728450482197816, '哥': -12.359746068631413, '文明史': -11.261133779963304, '标志': -12.359746068631413, '返': -9.3152236309079903, '宋': -11.261133779963304, '暗': -12.359746068631413, '领导层': -8.8333855440152522, '热炒': -11.261133779963304, '减肥法': -10.973451707511522, '宣战': -10.973451707511522, '十字口': -11.666598888071467, '车场': -12.359746068631413, '永田町': -10.162521491295195, '史料': -12.359746068631413, '殿': -11.666598888071467, '可望': -12.359746068631413, '滨湖': -10.973451707511522, '直面': -12.359746068631413, '羊绒': -11.261133779963304, '卫生': -8.2993030580849947, '赛场': -9.9618507958330422, '坚信': -10.973451707511522, '担保人': -12.359746068631413, '武打': -12.359746068631413, '拿到': -11.666598888071467, '一定会': -10.973451707511522, '坐镇': -12.359746068631413, '艺术界': -11.666598888071467, '村庄': -10.973451707511522, '成': -8.5755564347131532, '宁阳': -12.359746068631413, '访': -11.666598888071467, '内战': -9.9618507958330422, '下': -7.8824092541532069, '卑南乡': -10.973451707511522, '新闻': -8.2166113422398812, '被投诉': -12.359746068631413, '挥师': -12.359746068631413, '副县级': -12.359746068631413, '低热值': -12.359746068631413, '众议院': -10.567986599403358, '幸福': -9.7947967111698766, '数独': -12.359746068631413, '市场占有率': -11.666598888071467, '荞麦': -11.666598888071467, '入住': -10.750308156197313, '客游': -12.359746068631413, '团体': -9.3152236309079903, '仿': -11.666598888071467, '保姆': -12.359746068631413, '页岩气': -12.359746068631413, '从政': -12.359746068631413, '退市': -10.973451707511522, '女间谍': -11.666598888071467, '园': -9.5265327245751976, '黄金': -9.6516958675292042, '内陆': -9.5265327245751976, '植树造林': -12.359746068631413, '明城墙': -12.359746068631413, '账户': -10.280304526951578, '南磁极': -12.359746068631413, '营养学': -10.750308156197313, '决策层': -10.750308156197313, '兰贝思': -12.359746068631413, '本土': -7.086746510067667, '隅': -11.666598888071467, '风吹': -12.359746068631413, '地标': -10.280304526951578, '交叉路': -12.359746068631413, '东购': -12.359746068631413, '看板': -11.666598888071467, '艰辛': -12.359746068631413, '艺境': -12.359746068631413, '特首': -9.4153070894649726, '反动': -10.973451707511522, '独具': -12.359746068631413, '大会战': -12.359746068631413, '代表性': -10.162521491295195, '教育改革': -10.413835919576099, '常见': -10.280304526951578, '自': -7.0664412439069215, '荟萃': -12.359746068631413, '更好': -10.567986599403358, '干': -10.057160975637368, '仿造': -12.359746068631413, '发车': -9.874839418843413, '露天': -12.359746068631413, '咸阳县': -10.413835919576099, '在线': -7.785035090128031, '大海': -12.359746068631413, '基于': -10.750308156197313, '生物': -11.666598888071467, '路面': -10.973451707511522, '贝利': -12.359746068631413, '陆地港': -12.359746068631413, '燃气': -11.666598888071467, '修法': -12.359746068631413, '团圆饭': -12.359746068631413, '实时': -12.359746068631413, '演艺': -10.057160975637368, '岸上': -11.261133779963304, '接班人': -11.261133779963304, '直线': -12.359746068631413, '孤身一人': -11.666598888071467, '红旗街': -12.359746068631413, '现实': -10.567986599403358, '乘着': -12.359746068631413, '眼镜': -12.359746068631413, '民风': -12.359746068631413, '预订': -12.359746068631413, '着陆': -12.359746068631413, '男孩': -9.0275415584562104, '确实': -9.4153070894649726, '奇幻': -12.359746068631413, '家长': -9.9618507958330422, '进出境': -11.666598888071467, '千家万户': -12.359746068631413, '警用': -11.261133779963304, '出任': -10.973451707511522, '光线': -12.359746068631413, '彭德尔顿': -11.666598888071467, '主次': -10.973451707511522, '拉斯韦加斯': -9.9618507958330422, '中段': -9.720688739016154, '古韵': -12.359746068631413, '打零工': -10.413835919576099, '有偏见': -12.359746068631413, '爱美': -10.413835919576099, '峰会': -10.280304526951578, '观看': -10.413835919576099, '歌迷': -10.750308156197313, '接待工作': -12.359746068631413, '包机': -10.973451707511522, '其次': -10.750308156197313, '林区': -9.9618507958330422, '棉农': -11.666598888071467, '皇家': -10.973451707511522, '奇异': -12.359746068631413, ';': -7.4109861782532453, '投票': -12.359746068631413, '念书': -10.162521491295195, '新义州': -10.973451707511522, '桂': -10.162521491295195, '男歌手': -12.359746068631413, '给母亲': -12.359746068631413, '近乎': -12.359746068631413, '联系': -9.9618507958330422, '~': -12.359746068631413, '佐贺': -11.666598888071467, '直': -12.359746068631413, '见到': -9.720688739016154, '定都': -12.359746068631413, '杰拉德·R·福特级航空母舰': -11.666598888071467, '地': -7.0222079889300959, '交界处': -9.3152236309079903, '博士后': -11.666598888071467, '义军': -12.359746068631413, '太阳': -10.750308156197313, '上将': -12.359746068631413, '四顶山': -12.359746068631413, '休养生息': -12.359746068631413, '紧锣密鼓': -11.666598888071467, '须': -11.666598888071467, '两委': -10.750308156197313, '太多': -12.359746068631413, '面孔': -11.261133779963304, '农机': -11.261133779963304, '景教': -11.261133779963304, '艺术': -9.874839418843413, '珊': -12.359746068631413, '库伦': -11.261133779963304, '张某': -12.359746068631413, '鼎': -12.359746068631413, '福禄': -12.359746068631413, '板块': -10.567986599403358, '比特币': -10.973451707511522, '武器库': -9.7947967111698766, '入口': -10.973451707511522, '客机': -10.413835919576099, '重量级': -11.666598888071467, '一方面': -9.874839418843413, '家暴': -12.359746068631413, '為': -12.359746068631413, '进入': -8.185358798735777, '山沟': -10.973451707511522, '水价': -11.666598888071467, '、': -4.3917728889684788, '养殖': -11.666598888071467, '于今': -12.359746068631413, '母城': -12.359746068631413, '寄出': -12.359746068631413, '专业': -9.9618507958330422, '时空': -12.359746068631413, '何苦': -11.666598888071467, '赤道': -12.359746068631413, '新发地': -11.261133779963304, '刊物': -11.666598888071467, '禁火': -10.567986599403358, '失败': -12.359746068631413, '纪': -12.359746068631413, '携手':
<filename>tests/ut/python/ops/test_nn_ops_check.py<gh_stars>1-10 # Copyright 2020 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """ test ops """ import functools import numpy as np from mindspore import ops from mindspore.ops import functional as F from mindspore.ops import operations as P from mindspore.ops import composite as C from mindspore.ops.operations import _grad_ops as G import mindspore.nn as nn from mindspore import Tensor from mindspore.common import dtype as mstype from mindspore.common.parameter import Parameter from ..ut_filter import non_graph_engine from mindspore.common.api import _executor from ....mindspore_test_framework.mindspore_test import mindspore_test from ....mindspore_test_framework.pipeline.forward.compile_forward\ import (pipeline_for_compile_forward_ge_graph_for_case_by_case_config, pipeline_for_compile_forward_ge_graph_for_case_by_case_config_exception) from ....mindspore_test_framework.pipeline.gradient.compile_gradient\ import pipeline_for_compile_grad_ge_graph_for_case_by_case_config class Conv2DBackpropInputNet(nn.Cell): def __init__(self, net, x_shape): super(Conv2DBackpropInputNet, self).__init__() self.net = net self.x_shape = x_shape def construct(self, dout, w): return self.net(dout, w, self.x_shape) class TopKNet(nn.Cell): def __init__(self, net, k): super(TopKNet, self).__init__() self.net = net self.k = k def construct(self, x): return self.net(x, self.k) raise_set = [ # input is scalar ('Flatten0', { 'block': (P.Flatten(), {'exception': TypeError, 'error_keywords': ['Flatten']}), 'desc_inputs': [5.0], 'skip': ['backward']}), # dim of input is zero ('Flatten1', { 'block': (P.Flatten(), {'exception': ValueError, 'error_keywords': ['Flatten']}), 'desc_inputs': [F.scalar_to_tensor(5.0)], 'skip': ['backward']}), # input is scalar ('Softmax0', { 'block': (P.Softmax(), {'exception': TypeError, 'error_keywords': ['Softmax']}), 'desc_inputs': [5.0], 'skip': ['backward']}), # axis is empty tuple ('Softmax1', { 'block': (P.Softmax(axis=()), {'exception': ValueError, 'error_keywords': ['Softmax']}), 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.float32))], 'skip': ['backward']}), # axis value is not in range ('Softmax2', { 'block': (P.Softmax(axis=2), {'exception': ValueError, 'error_keywords': ['Softmax']}), 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.float32))], 'skip': ['backward']}), # input is scalar ('LogSoftmax0', { 'block': (P.LogSoftmax(), {'exception': TypeError, 'error_keywords': ['LogSoftmax']}), 'desc_inputs': [5.0], 'skip': ['backward']}), # axis value is not in range ('LogSoftmax1', { 'block': (P.LogSoftmax(axis=2), {'exception': ValueError, 'error_keywords': ['LogSoftmax']}), 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.float32))], 'skip': ['backward']}), # input is scalar ('ReLU0', { 'block': (P.ReLU(), {'exception': TypeError, 'error_keywords': ['ReLU']}), 'desc_inputs': [5.0], 'skip': ['backward']}), # input is Tensor(Bool) ('ReLU1', { 'block': (P.ReLU(), {'exception': TypeError, 'error_keywords': ['ReLU']}), 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.bool_))], 'skip': ['backward']}), # input is scalar ('ReLU60', { 'block': (P.ReLU6(), {'exception': TypeError, 'error_keywords': ['ReLU6']}), 'desc_inputs': [5.0], 'skip': ['backward']}), # input is Tensor(int32) ('ReLU61', { 'block': (P.ReLU6(), {'exception': TypeError, 'error_keywords': ['ReLU6']}), 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.int32))], 'skip': ['backward']}), # input is scalar ('Elu0', { 'block': (P.Elu(), {'exception': TypeError, 'error_keywords': ['Elu']}), 'desc_inputs': [5.0], 'skip': ['backward']}), # input is Tensor(int32) ('Elu1', { 'block': (P.Elu(), {'exception': TypeError, 'error_keywords': ['Elu']}), 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.int32))], 'skip': ['backward']}), # input is scalar ('Sigmoid0', { 'block': (P.Sigmoid(), {'exception': TypeError, 'error_keywords': ['Sigmoid']}), 'desc_inputs': [5.0], 'skip': ['backward']}), # input is Tensor(int32) ('Sigmoid1', { 'block': (P.Sigmoid(), {'exception': TypeError, 'error_keywords': ['Sigmoid']}), 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.int32))], 'skip': ['backward']}), # input is scalar ('Tanh0', { 'block': (P.Tanh(), {'exception': TypeError, 'error_keywords': ['Tanh']}), 'desc_inputs': [5.0], 'skip': ['backward']}), # input is scalar ('BatchNorm0', { 'block': (P.BatchNorm(is_training=False), {'exception': TypeError, 'error_keywords': ['BatchNorm']}), 'desc_inputs': [5.0, 5.0, 5.0, 5.0, 5.0], 'skip': ['backward']}), # is_training=False and mean=None ('BatchNorm1', { 'block': (P.BatchNorm(is_training=False), {'exception': TypeError, 'error_keywords': ['BatchNorm']}), 'desc_inputs': [Tensor(np.ones([5, 3]).astype(np.float32)), Tensor(np.ones([5, 3]).astype(np.float32)), Tensor(np.ones([5, 3]).astype(np.float32)), None, None], 'skip': ['backward']}), # is_training=True and mean=None ('BatchNorm2', { 'block': (P.BatchNorm(is_training=True), {'exception': TypeError, 'error_keywords': ['BatchNorm']}), 'desc_inputs': [Tensor(np.ones([5, 3]).astype(np.float32)), Tensor(np.ones([3]).astype(np.float32)), Tensor(np.ones([3]).astype(np.float32)), Tensor(np.ones([3]).astype(np.float16)), Tensor(np.ones([3]).astype(np.float32))], 'skip': ['backward']}), # scale and bias rank > 1 ('BatchNorm3', { 'block': (P.BatchNorm(is_training=True), {'exception': ValueError, 'error_keywords': ['BatchNorm']}), 'desc_inputs': [Tensor(np.ones([5, 3]).astype(np.float32)), Tensor(np.ones([5, 3]).astype(np.float32)), Tensor(np.ones([5, 3]).astype(np.float32)), Tensor(np.ones([3]).astype(np.float32)), Tensor(np.ones([3]).astype(np.float32))], 'skip': ['backward']}), # scale and bias shape not match ('BatchNorm4', { 'block': (P.BatchNorm(is_training=True), {'exception': ValueError, 'error_keywords': ['BatchNorm']}), 'desc_inputs': [Tensor(np.ones([5, 3]).astype(np.float32)), Tensor(np.ones([3]).astype(np.float32)), Tensor(np.ones([7]).astype(np.float32)), Tensor(np.ones([3]).astype(np.float32)), Tensor(np.ones([3]).astype(np.float32))], 'skip': ['backward']}), # is_training=False, mean and variance shape not match ('BatchNorm5', { 'block': (P.BatchNorm(is_training=False), {'exception': ValueError, 'error_keywords': ['BatchNorm']}), 'desc_inputs': [Tensor(np.ones([5, 3]).astype(np.float32)), Tensor(np.ones([3]).astype(np.float32)), Tensor(np.ones([3]).astype(np.float32)), Tensor(np.ones([3]).astype(np.float32)), Tensor(np.ones([5]).astype(np.float32))], 'skip': ['backward']}), # is_training=False, mean and scale shape not match ('BatchNorm6', { 'block': (P.BatchNorm(is_training=False), {'exception': ValueError, 'error_keywords': ['BatchNorm']}), 'desc_inputs': [Tensor(np.ones([5, 3]).astype(np.float32)), Tensor(np.ones([3]).astype(np.float32)), Tensor(np.ones([3]).astype(np.float32)), Tensor(np.ones([5]).astype(np.float32)), Tensor(np.ones([5]).astype(np.float32))], 'skip': ['backward']}), # input is scalar ('Conv2D0', { 'block': (P.Conv2D(2, (5, 5)), {'exception': TypeError, 'error_keywords': ['Conv2D']}), 'desc_inputs': [5.0, 5.0], 'skip': ['backward']}), # input is Tensor(bool) ('Conv2D1', { 'block': (P.Conv2D(2, (5, 5)), {'exception': TypeError, 'error_keywords': ['Conv2D']}), 'desc_inputs': [Tensor(np.ones([5]).astype(np.bool_)), Tensor(np.ones([5]).astype(np.bool_))], 'skip': ['backward']}), # input x and w type mismatch ('Conv2D2', { 'block': (P.Conv2D(2, (5, 5)), {'exception': TypeError, 'error_keywords': ['Conv2D']}), 'desc_inputs': [Tensor(np.ones([5]).astype(np.float32)), Tensor(np.ones([5]).astype(np.float16))], 'skip': ['backward']}), # rank of x is not 4 ('Conv2D3', { 'block': (P.Conv2D(2, (5, 5)), {'exception': ValueError, 'error_keywords': ['Conv2D']}), 'desc_inputs': [Tensor(np.ones([1, 1]).astype(np.float32)), Tensor(np.ones([1,1,9,9]).astype(np.float32))], 'skip': ['backward']}), # rank of 2 is not 4 ('Conv2D4', { 'block': (P.Conv2D(2, (5, 5)), {'exception': ValueError, 'error_keywords': ['Conv2D']}), 'desc_inputs': [Tensor(np.ones([1,1,9,9]).astype(np.float32)), Tensor(np.ones([1,1,9]).astype(np.float32))], 'skip': ['backward']}), # x_shape[1] / group != w_shape[1] ('Conv2D5', { 'block': (P.Conv2D(2, (5, 5)), {'exception': ValueError, 'error_keywords': ['Conv2D']}), 'desc_inputs': [Tensor(np.ones([1,1,9,9]).astype(np.float32)), Tensor(np.ones([1,2,9,9]).astype(np.float32))], 'skip': ['backward']}), # out_channel != w_shape[0] ('Conv2D6', { 'block': (P.Conv2D(2, (5, 5)), {'exception': ValueError, 'error_keywords': ['Conv2D']}), 'desc_inputs': [Tensor(np.ones([1,1,9,9]).astype(np.float32)), Tensor(np.ones([1,1,9,9]).astype(np.float32))], 'skip': ['backward']}), # kernel_size != w_shape[2:4] ('Conv2D7', { 'block': (P.Conv2D(2, (5, 5)), {'exception': ValueError, 'error_keywords': ['Conv2D']}), 'desc_inputs': [Tensor(np.ones([1,1,9,9]).astype(np.float32)), Tensor(np.ones([2,1,5,6]).astype(np.float32))], 'skip': ['backward']}), # input is scalar ('DepthwiseConv2dNative0', { 'block': (P.DepthwiseConv2dNative(2, (5, 5)), {'exception': TypeError, 'error_keywords': ['DepthwiseConv2dNative']}), 'desc_inputs': [5.0, 5.0], 'skip': ['backward']}), # input is Tensor(bool) ('DepthwiseConv2dNative1', { 'block': (P.DepthwiseConv2dNative(2, (5, 5)), {'exception': TypeError, 'error_keywords': ['DepthwiseConv2dNative']}), 'desc_inputs': [Tensor(np.ones([5]).astype(np.bool_)), Tensor(np.ones([5]).astype(np.bool_))], 'skip': ['backward']}), # input x and w type mismatch ('DepthwiseConv2dNative2', { 'block': (P.DepthwiseConv2dNative(2, (5, 5)), {'exception': TypeError, 'error_keywords': ['DepthwiseConv2dNative']}), 'desc_inputs': [Tensor(np.ones([5]).astype(np.float32)), Tensor(np.ones([5]).astype(np.float16))], 'skip': ['backward']}), # rank of x is not 4 ('DepthwiseConv2dNative3', { 'block': (P.DepthwiseConv2dNative(2, (5, 5)), {'exception': ValueError, 'error_keywords': ['DepthwiseConv2dNative']}), 'desc_inputs': [Tensor(np.ones([1, 1]).astype(np.float32)), Tensor(np.ones([1,1,9,9]).astype(np.float32))], 'skip': ['backward']}), # rank of 2 is not 4 ('DepthwiseConv2dNative4', { 'block': (P.DepthwiseConv2dNative(2, (5, 5)), {'exception': ValueError, 'error_keywords': ['DepthwiseConv2dNative']}), 'desc_inputs': [Tensor(np.ones([1,1,9,9]).astype(np.float32)), Tensor(np.ones([1,1,9]).astype(np.float32))], 'skip': ['backward']}), # x_shape[1] != w_shape[1] ('DepthwiseConv2dNative5', { 'block': (P.DepthwiseConv2dNative(2, (5, 5)), {'exception': ValueError, 'error_keywords': ['DepthwiseConv2dNative']}), 'desc_inputs': [Tensor(np.ones([1,1,9,9]).astype(np.float32)), Tensor(np.ones([1,2,9,9]).astype(np.float32))], 'skip': ['backward']}), # kernel_size != w_shape[2:4] ('DepthwiseConv2dNative6', { 'block': (P.DepthwiseConv2dNative(2, (5, 5)), {'exception': ValueError, 'error_keywords': ['DepthwiseConv2dNative']}), 'desc_inputs': [Tensor(np.ones([1,1,9,9]).astype(np.float32)), Tensor(np.ones([2,1,5,6]).astype(np.float32))], 'skip': ['backward']}), # input is scalar ('MaxPoolWithArgmax0', { 'block': (P.MaxPoolWithArgmax(), {'exception': TypeError, 'error_keywords': ['MaxPoolWithArgmax']}), 'desc_inputs': [5.0], 'skip': ['backward']}), # input is Tensor(bool) ('MaxPoolWithArgmax1', { 'block': (P.MaxPoolWithArgmax(), {'exception': TypeError, 'error_keywords': ['MaxPoolWithArgmax']}), 'desc_inputs': [Tensor(np.ones([5]).astype(np.bool_))], 'skip': ['backward']}), # rank of x is not 4 ('MaxPoolWithArgmax2', { 'block': (P.MaxPoolWithArgmax(), {'exception': ValueError, 'error_keywords': ['MaxPoolWithArgmax']}), 'desc_inputs': [Tensor(np.ones([1,1,32]).astype(np.float32))], 'skip': ['backward']}), # kernel size is invalid(very large) ('MaxPoolWithArgmax3', { 'block': (P.MaxPoolWithArgmax(ksize=50), {'exception': ValueError, 'error_keywords': ['MaxPoolWithArgmax']}), 'desc_inputs': [Tensor(np.ones([1,1,32,32]).astype(np.float32))], 'skip': ['backward']}), # input is scalar ('MaxPool0', { 'block': (P.MaxPool(), {'exception': TypeError, 'error_keywords': ['MaxPool']}), 'desc_inputs': [5.0], 'skip': ['backward']}), # rank of x is not 4 ('MaxPool1', { 'block': (P.MaxPool(), {'exception': ValueError, 'error_keywords': ['MaxPool']}), 'desc_inputs': [Tensor(np.ones([1,1,32]).astype(np.float32))], 'skip': ['backward']}), # rank of x is not 4 ('MaxPool2', { 'block': (P.MaxPool(ksize=50, strides=1), {'exception': ValueError, 'error_keywords': ['MaxPool']}), 'desc_inputs': [Tensor(np.ones([1,1,32,32]).astype(np.float32))], 'skip': ['backward']}), # input is scalar ('AvgPool0', { 'block': (P.AvgPool(), {'exception': TypeError, 'error_keywords': ['AvgPool']}), 'desc_inputs': [5.0], 'skip': ['backward']}), # rank of x is not 4 ('AvgPool1', { 'block': (P.AvgPool(), {'exception': ValueError, 'error_keywords': ['AvgPool']}), 'desc_inputs': [Tensor(np.ones([1,1,32]).astype(np.float32))], 'skip': ['backward']}), # rank of x is not 4 ('AvgPool2', { 'block': (P.AvgPool(ksize=50, strides=1), {'exception': ValueError, 'error_keywords': ['AvgPool']}), 'desc_inputs': [Tensor(np.ones([1,1,32,32]).astype(np.float32))], 'skip': ['backward']}), # input is scalar ('Conv2DBackpropInput0', { 'block': (Conv2DBackpropInputNet(P.Conv2DBackpropInput(2, (5, 5)), (2,3)), {'exception': TypeError, 'error_keywords': ['Conv2DBackpropInput']}), 'desc_inputs': [5.0, 5.0], 'skip': ['backward']}), # input is Tensor(bool) ('Conv2DBackpropInput1', { 'block': (Conv2DBackpropInputNet(P.Conv2DBackpropInput(2, (5, 5)), (2,3)), {'exception': TypeError, 'error_keywords': ['Conv2DBackpropInput']}), 'desc_inputs': [Tensor(np.ones([5]).astype(np.bool_)), Tensor(np.ones([5]).astype(np.bool_))], 'skip': ['backward']}), # types of doutput and w mismatch ('Conv2DBackpropInput2', { 'block': (Conv2DBackpropInputNet(P.Conv2DBackpropInput(2, (5, 5)), (2,3)), {'exception': TypeError, 'error_keywords': ['Conv2DBackpropInput']}), 'desc_inputs': [Tensor(np.ones([5]).astype(np.int32)), Tensor(np.ones([5]).astype(np.float32))], 'skip': ['backward']}), # types x_size is not tuple ('Conv2DBackpropInput3', { 'block': (Conv2DBackpropInputNet(P.Conv2DBackpropInput(2, (5, 5)), 2), {'exception': TypeError, 'error_keywords': ['Conv2DBackpropInput']}), 'desc_inputs': [Tensor(np.ones([5]).astype(np.int32)), Tensor(np.ones([5]).astype(np.float32))], 'skip': ['backward']}), # types x_size is not tuple(int,...) ('Conv2DBackpropInput4', { 'block': (Conv2DBackpropInputNet(P.Conv2DBackpropInput(2, (5, 5)), (2, 3.0)), {'exception': TypeError, 'error_keywords': ['Conv2DBackpropInput']}), 'desc_inputs': [Tensor(np.ones([5]).astype(np.int32)), Tensor(np.ones([5]).astype(np.float32))], 'skip': ['backward']}), # input is scalar ('BiasAdd0', { 'block': (P.BiasAdd(), {'exception': TypeError, 'error_keywords': ['BiasAdd']}), 'desc_inputs': [5.0, 5.0], 'skip': ['backward']}), # input is Tensor(bool) ('BiasAdd1', { 'block': (P.BiasAdd(), {'exception': TypeError, 'error_keywords': ['BiasAdd']}), 'desc_inputs': [Tensor(np.ones([5]).astype(np.bool_)), Tensor(np.ones([5]).astype(np.bool_))], 'skip': ['backward']}), # types of x and bias mismatch ('BiasAdd2', { 'block': (P.BiasAdd(), {'exception': TypeError, 'error_keywords': ['BiasAdd']}), 'desc_inputs': [Tensor(np.ones([5]).astype(np.int32)), Tensor(np.ones([5]).astype(np.float32))], 'skip': ['backward']}), # rank of x less than
the created resource. """ return pulumi.get(self, "self_link") @self_link.setter def self_link(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "self_link", value) @property @pulumi.getter def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ A list of instance tags to which this route applies. """ return pulumi.get(self, "tags") @tags.setter def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "tags", value) class Route(pulumi.CustomResource): @overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, description: Optional[pulumi.Input[str]] = None, dest_range: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, network: Optional[pulumi.Input[str]] = None, next_hop_gateway: Optional[pulumi.Input[str]] = None, next_hop_ilb: Optional[pulumi.Input[str]] = None, next_hop_instance: Optional[pulumi.Input[str]] = None, next_hop_instance_zone: Optional[pulumi.Input[str]] = None, next_hop_ip: Optional[pulumi.Input[str]] = None, next_hop_vpn_tunnel: Optional[pulumi.Input[str]] = None, priority: Optional[pulumi.Input[int]] = None, project: Optional[pulumi.Input[str]] = None, tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, __props__=None): """ Represents a Route resource. A route is a rule that specifies how certain packets should be handled by the virtual network. Routes are associated with virtual machines by tag, and the set of routes for a particular virtual machine is called its routing table. For each packet leaving a virtual machine, the system searches that virtual machine's routing table for a single best matching route. Routes match packets by destination IP address, preferring smaller or more specific ranges over larger ones. If there is a tie, the system selects the route with the smallest priority value. If there is still a tie, it uses the layer three and four packet headers to select just one of the remaining matching routes. The packet is then forwarded as specified by the next_hop field of the winning route -- either to another virtual machine destination, a virtual machine gateway or a Compute Engine-operated gateway. Packets that do not match any route in the sending virtual machine's routing table will be dropped. A Route resource must have exactly one specification of either nextHopGateway, nextHopInstance, nextHopIp, nextHopVpnTunnel, or nextHopIlb. To get more information about Route, see: * [API documentation](https://cloud.google.com/compute/docs/reference/rest/v1/routes) * How-to Guides * [Using Routes](https://cloud.google.com/vpc/docs/using-routes) ## Example Usage ### Route Basic ```python import pulumi import pulumi_gcp as gcp default_network = gcp.compute.Network("defaultNetwork") default_route = gcp.compute.Route("defaultRoute", dest_range="172.16.31.10/24", network=default_network.name, next_hop_ip="10.132.1.5", priority=100) ``` ### Route Ilb ```python import pulumi import pulumi_gcp as gcp default_network = gcp.compute.Network("defaultNetwork", auto_create_subnetworks=False) default_subnetwork = gcp.compute.Subnetwork("defaultSubnetwork", ip_cidr_range="10.0.1.0/24", region="us-central1", network=default_network.id) hc = gcp.compute.HealthCheck("hc", check_interval_sec=1, timeout_sec=1, tcp_health_check=gcp.compute.HealthCheckTcpHealthCheckArgs( port=80, )) backend = gcp.compute.RegionBackendService("backend", region="us-central1", health_checks=[hc.id]) default_forwarding_rule = gcp.compute.ForwardingRule("defaultForwardingRule", region="us-central1", load_balancing_scheme="INTERNAL", backend_service=backend.id, all_ports=True, network=default_network.name, subnetwork=default_subnetwork.name) route_ilb = gcp.compute.Route("route-ilb", dest_range="0.0.0.0/0", network=default_network.name, next_hop_ilb=default_forwarding_rule.id, priority=2000) ``` ### Route Ilb Vip ```python import pulumi import pulumi_gcp as gcp producer_network = gcp.compute.Network("producerNetwork", auto_create_subnetworks=False, opts=pulumi.ResourceOptions(provider=google_beta)) producer_subnetwork = gcp.compute.Subnetwork("producerSubnetwork", ip_cidr_range="10.0.1.0/24", region="us-central1", network=producer_network.id, opts=pulumi.ResourceOptions(provider=google_beta)) consumer_network = gcp.compute.Network("consumerNetwork", auto_create_subnetworks=False, opts=pulumi.ResourceOptions(provider=google_beta)) consumer_subnetwork = gcp.compute.Subnetwork("consumerSubnetwork", ip_cidr_range="10.0.2.0/24", region="us-central1", network=consumer_network.id, opts=pulumi.ResourceOptions(provider=google_beta)) peering1 = gcp.compute.NetworkPeering("peering1", network=consumer_network.id, peer_network=producer_network.id, opts=pulumi.ResourceOptions(provider=google_beta)) peering2 = gcp.compute.NetworkPeering("peering2", network=producer_network.id, peer_network=consumer_network.id, opts=pulumi.ResourceOptions(provider=google_beta)) hc = gcp.compute.HealthCheck("hc", check_interval_sec=1, timeout_sec=1, tcp_health_check=gcp.compute.HealthCheckTcpHealthCheckArgs( port=80, ), opts=pulumi.ResourceOptions(provider=google_beta)) backend = gcp.compute.RegionBackendService("backend", region="us-central1", health_checks=[hc.id], opts=pulumi.ResourceOptions(provider=google_beta)) default = gcp.compute.ForwardingRule("default", region="us-central1", load_balancing_scheme="INTERNAL", backend_service=backend.id, all_ports=True, network=producer_network.name, subnetwork=producer_subnetwork.name, opts=pulumi.ResourceOptions(provider=google_beta)) route_ilb = gcp.compute.Route("route-ilb", dest_range="0.0.0.0/0", network=consumer_network.name, next_hop_ilb=default.ip_address, priority=2000, tags=[ "tag1", "tag2", ], opts=pulumi.ResourceOptions(provider=google_beta, depends_on=[ peering1, peering2, ])) ``` ## Import Route can be imported using any of these accepted formats ```sh $ pulumi import gcp:compute/route:Route default projects/{{project}}/global/routes/{{name}} ``` ```sh $ pulumi import gcp:compute/route:Route default {{project}}/{{name}} ``` ```sh $ pulumi import gcp:compute/route:Route default {{name}} ``` :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] description: An optional description of this resource. Provide this property when you create the resource. :param pulumi.Input[str] dest_range: The destination range of outgoing packets that this route applies to. Only IPv4 is supported. :param pulumi.Input[str] name: Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `a-z?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. :param pulumi.Input[str] network: The network that this route applies to. :param pulumi.Input[str] next_hop_gateway: URL to a gateway that should handle matching packets. Currently, you can only specify the internet gateway, using a full or partial valid URL: * `https://www.googleapis.com/compute/v1/projects/project/global/gateways/default-internet-gateway` * `projects/project/global/gateways/default-internet-gateway` * `global/gateways/default-internet-gateway` * The string `default-internet-gateway`. :param pulumi.Input[str] next_hop_ilb: The IP address or URL to a forwarding rule of type loadBalancingScheme=INTERNAL that should handle matching packets. With the GA provider you can only specify the forwarding rule as a partial or full URL. For example, the following are all valid values: * 10.128.0.56 * https://www.googleapis.com/compute/v1/projects/project/regions/region/forwardingRules/forwardingRule * regions/region/forwardingRules/forwardingRule When the beta provider, you can also specify the IP address of a forwarding rule from the same VPC or any peered VPC. Note that this can only be used when the destinationRange is a public (non-RFC 1918) IP CIDR range. :param pulumi.Input[str] next_hop_instance: URL to an instance that should handle matching packets. You can specify this as a full or partial URL. For example: * `https://www.googleapis.com/compute/v1/projects/project/zones/zone/instances/instance` * `projects/project/zones/zone/instances/instance` * `zones/zone/instances/instance` * Just the instance name, with the zone in `next_hop_instance_zone`. :param pulumi.Input[str] next_hop_instance_zone: (Optional when `next_hop_instance` is specified) The zone of the instance specified in `next_hop_instance`. Omit if `next_hop_instance` is specified as a URL. :param pulumi.Input[str] next_hop_ip: Network IP address of an instance that should handle matching packets. :param pulumi.Input[str] next_hop_vpn_tunnel: URL to a VpnTunnel that should handle matching packets. :param pulumi.Input[int] priority: The priority of this route. Priority is used to break ties in cases where there is more than one matching route of equal prefix length. In the case of two routes with equal prefix length, the one with the lowest-numbered priority value wins. Default value is 1000. Valid range is 0 through 65535. :param pulumi.Input[str] project: The ID of the project in which the resource belongs. If it is not provided, the provider project is used. :param pulumi.Input[Sequence[pulumi.Input[str]]] tags: A list of instance tags to which this route applies. """ ... @overload def __init__(__self__, resource_name: str, args: RouteArgs, opts: Optional[pulumi.ResourceOptions] = None): """ Represents a Route resource. A route is a rule that specifies how certain packets should be handled by the virtual network. Routes are associated with virtual machines by tag, and the set of routes for a particular virtual machine is called its routing table. For each packet leaving a virtual machine, the system searches that virtual machine's routing table for a single best matching route. Routes match packets by destination IP address, preferring smaller or more specific ranges over larger ones. If there is a tie, the system selects the route with the smallest priority value. If there is still a tie, it uses the layer three and four packet headers to select just one of the remaining matching routes. The packet is then forwarded as specified by the next_hop field of the winning route -- either to another virtual machine destination, a virtual machine gateway or a Compute Engine-operated gateway. Packets that do not match any route in the sending virtual machine's routing table will be dropped. A Route resource must have exactly one specification of either nextHopGateway, nextHopInstance, nextHopIp, nextHopVpnTunnel, or nextHopIlb. To get more information about Route, see: * [API documentation](https://cloud.google.com/compute/docs/reference/rest/v1/routes) * How-to Guides * [Using Routes](https://cloud.google.com/vpc/docs/using-routes) ## Example Usage ### Route Basic ```python import pulumi import pulumi_gcp as gcp default_network = gcp.compute.Network("defaultNetwork") default_route = gcp.compute.Route("defaultRoute", dest_range="172.16.31.10/24", network=default_network.name, next_hop_ip="10.132.1.5", priority=100) ``` ### Route Ilb ```python import pulumi import pulumi_gcp as gcp default_network = gcp.compute.Network("defaultNetwork", auto_create_subnetworks=False) default_subnetwork = gcp.compute.Subnetwork("defaultSubnetwork", ip_cidr_range="10.0.1.0/24", region="us-central1", network=default_network.id) hc = gcp.compute.HealthCheck("hc", check_interval_sec=1, timeout_sec=1, tcp_health_check=gcp.compute.HealthCheckTcpHealthCheckArgs( port=80, )) backend = gcp.compute.RegionBackendService("backend", region="us-central1", health_checks=[hc.id]) default_forwarding_rule = gcp.compute.ForwardingRule("defaultForwardingRule", region="us-central1", load_balancing_scheme="INTERNAL", backend_service=backend.id, all_ports=True, network=default_network.name, subnetwork=default_subnetwork.name) route_ilb = gcp.compute.Route("route-ilb", dest_range="0.0.0.0/0", network=default_network.name, next_hop_ilb=default_forwarding_rule.id, priority=2000) ``` ### Route Ilb Vip ```python import pulumi import pulumi_gcp as gcp producer_network = gcp.compute.Network("producerNetwork", auto_create_subnetworks=False, opts=pulumi.ResourceOptions(provider=google_beta)) producer_subnetwork = gcp.compute.Subnetwork("producerSubnetwork", ip_cidr_range="10.0.1.0/24", region="us-central1", network=producer_network.id, opts=pulumi.ResourceOptions(provider=google_beta)) consumer_network = gcp.compute.Network("consumerNetwork", auto_create_subnetworks=False, opts=pulumi.ResourceOptions(provider=google_beta)) consumer_subnetwork = gcp.compute.Subnetwork("consumerSubnetwork", ip_cidr_range="10.0.2.0/24", region="us-central1", network=consumer_network.id, opts=pulumi.ResourceOptions(provider=google_beta)) peering1 = gcp.compute.NetworkPeering("peering1", network=consumer_network.id, peer_network=producer_network.id, opts=pulumi.ResourceOptions(provider=google_beta)) peering2
<gh_stars>100-1000 # Copyright 2015 Ufora Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pyfora import pyfora.Exceptions as Exceptions import numpy.random.mtrand as mtrand import time class APackedClass: def __init__(self,x,y): self.x = x self.y = y class BuiltinTestCases(object): def test_range_builtin_simple(self): def f(x): return range(x) self.equivalentEvaluationTest(f, 10) def test_range_builtin_overloads(self): def f(start, stop, incr=1): return range(start, stop, incr) self.equivalentEvaluationTest(f, 1, 10) self.equivalentEvaluationTest(f, 10, 5) self.equivalentEvaluationTest(f, 5, 10) self.equivalentEvaluationTest(f, 10, 1, 2) self.equivalentEvaluationTest(f, 10, 5, 5) self.equivalentEvaluationTest(f, 10, 10, 10) def test_ord_chr_builtins(self): def f(): chars = [chr(val) for val in range(40, 125)] vals = [ord(val) for val in chars] return (chars, vals) self.equivalentEvaluationTest(f) def test_builtins_max_1(self): def f(arg): return max(arg) x = [1,2,0,4,4,5,9] self.equivalentEvaluationTest(f, x) def test_builtins_max_2(self): def f(): return max(xrange(5)) self.equivalentEvaluationTest(f) def test_builtins_max_3(self): def f(): return max(1,2) self.equivalentEvaluationTest(f) def test_builtins_min_1(self): def f(arg): return min(arg) x = [1,2,0,4,4,5,9] self.equivalentEvaluationTest(f, x) def test_issue_200(self): def f(): if False: return eval return 0 self.equivalentEvaluationTest(f) def test_builtins_min_2(self): def f(): return min(xrange(5)) self.equivalentEvaluationTest(f) def test_builtins_min_3(self): def f(): return min(1,2) self.equivalentEvaluationTest(f) def test_builtins_abs(self): def f(x): return abs(x) for x in range(-10, 10): self.equivalentEvaluationTest(f, x) self.equivalentEvaluationTest(f, True) self.equivalentEvaluationTest(f, False) with self.assertRaises(pyfora.ComputationError): self.evaluateWithExecutor(f, []) with self.assertRaises(pyfora.ComputationError): self.evaluateWithExecutor(f, ["test"]) with self.assertRaises(pyfora.ComputationError): self.evaluateWithExecutor(f, "test") def test_builtins_all(self): def f(x): return all(x) self.equivalentEvaluationTest(f, []) self.equivalentEvaluationTest(f, [True]) self.equivalentEvaluationTest(f, [False]) self.equivalentEvaluationTest(f, [True, True]) self.equivalentEvaluationTest(f, [True, False]) self.equivalentEvaluationTest(f, [False, True]) self.equivalentEvaluationTest(f, [False, False]) def test_builtins_any(self): def f(x): return any(x) self.equivalentEvaluationTest(f, []) self.equivalentEvaluationTest(f, [True]) self.equivalentEvaluationTest(f, [False]) self.equivalentEvaluationTest(f, [True, True]) self.equivalentEvaluationTest(f, [True, False]) self.equivalentEvaluationTest(f, [False, True]) self.equivalentEvaluationTest(f, [False, False]) def test_builtins_zip_not_implemented(self): def f(x): return zip(x) with self.assertRaises(pyfora.Exceptions.ComputationError) as ctx: self.equivalentEvaluationTest(f, []) exception = ctx.exception self.assertIsInstance(exception.remoteException, pyfora.Exceptions.UnconvertibleValueError) self.assertEqual(exception.remoteException.message, "Pyfora didn't know how to convert zip") def test_reversed_builtins(self): def f(): a = [1, 2, 3, 4, 5, 6] b = reversed(a) toReturn = [] for v in b: toReturn = toReturn + [v] return toReturn self.equivalentEvaluationTest(f) def test_reduce_builtin(self): def mul(x,y): return x*y def sub(x,y): return x-y self.equivalentEvaluationTest(lambda: reduce(mul, [1,2,3,4,5])) self.equivalentEvaluationTest(lambda: reduce(mul, [1,2,3,4,5], 0)) def nonparallel(x): for v in x: yield v self.equivalentEvaluationTest(lambda: reduce(sub, nonparallel([1.0,2.0,3.0,4.0,5.0]))) self.equivalentEvaluationTest(lambda: reduce(sub, nonparallel([1.0,2.0,3.0,4.0,5.0]), 10)) def test_map_builtin(self): def addOne(x): return x + 1 self.equivalentEvaluationTest(lambda: map(None, [1,2,3])) self.equivalentEvaluationTest(lambda: map(addOne, [1,2,3])) self.equivalentEvaluationTest(lambda: map(addOne, (x for x in [1,2,3]))) def test_supported_builtin_member(self): import math def f(x): return x + math.pi self.equivalentEvaluationTest(f, 2) def test_enumerate(self): def f(x): return [_ for _ in x] self.equivalentEvaluationTest(f, [1,2,3]) self.equivalentEvaluationTest(f, "asdf") def test_sorted_1(self): xs = [5, 2, 3, 1, 4] def f(): return sorted(xs) self.equivalentEvaluationTest(f) def test_sorted_2(self): def f(): return sorted(1) try: self.evaluateWithExecutor(f) self.assertTrue(False) except pyfora.ComputationError as e: self.assertIsInstance(e.remoteException, TypeError) def test_sorted_3(self): xs = (5, 2, 3, 1, 4) def f(): return sorted(xs) self.equivalentEvaluationTest(f) def test_sorted_4(self): xs = { 1: 2, 3: 4, 5: 6, 7: 8, 9: 10 } def f(): return sorted(xs) self.equivalentEvaluationTest(f) def test_sorted_large_1(self): def f(): rng = mtrand.RandomState(seed=250015) x = rng.uniform(size=1000) res = sorted(x) return all( [res[ix] <= res[ix + 1] for ix in xrange(len(res) - 1)] ) self.equivalentEvaluationTest(f) def test_sorted_large_2(self): def f(): rng = mtrand.RandomState(seed=250015) x = rng.uniform(size=1000000) res = sorted(x) return all( [res[ix] <= res[ix + 1] for ix in xrange(len(res) - 1)] ) self.equivalentEvaluationTest(f) def test_python_if_int(self): def f(): if 1: return True else: return False self.equivalentEvaluationTest(f) def test_python_if_int_2(self): def f2(): if 0: return True else: return False self.equivalentEvaluationTest(f2) def test_python_and_or(self): def f(): return ( 0 or 1, 1 or 2, 1 or 0, 0 or False, 1 or 2 or 3, 0 or 1, 0 or 1 or 2, 1 and 2, 0 and 1, 1 and 0, 0 and False, 1 and 2 and 3, 0 and 1 and 2, 1 and 2 and 0, 1 and 0 and 2, 0 and False and 2 ) self.equivalentEvaluationTest(f) def test_primitive_type_comparisons(self): def f(): toReturn = [] toCompare = [True, False, 0, 1, 2, 0.0, 1.0, 2.0, -1, -1.1, "test", []] l = len(toCompare) for idx1 in range(l): for idx2 in range(l): a = toCompare[idx1] b = toCompare[idx2] toReturn = toReturn + [a < b] toReturn = toReturn + [a > b] toReturn = toReturn + [a <= b] toReturn = toReturn + [a >= b] return toReturn self.equivalentEvaluationTest(f) def test_len_0(self): def f(x): return len(x) self.equivalentEvaluationTest(f, "asdf") def test_TrueFalseNone(self): def f(): return (True, False, None) self.equivalentEvaluationTest(f) def test_returns_len(self): def f(): return len res = self.evaluateWithExecutor(f) self.assertIs(res, f()) def test_returns_str(self): def f(): return str res = self.evaluateWithExecutor(f) self.assertIs(str, res) def test_pass_returns_None(self): with self.create_executor() as executor: def f(): pass self.assertIs(self.evaluateWithExecutor(f), None) def test_issubclass(self): test = self.equivalentEvaluationTestThatHandlesExceptions types = [float, int, bool, object, Exception] for t1 in types: for t2 in types: test(issubclass, t1, t2) test(issubclass, t1, (t2,)) def test_isinstance_1(self): test = self.equivalentEvaluationTestThatHandlesExceptions for inst in [10, 10.0, True]: for typ in [float, object, int, bool]: test(lambda: isinstance(inst, typ)) test(lambda: issubclass(type(inst), typ)) def test_isinstance_2(self): class IsInstanceClass: pass def f(): c = IsInstanceClass() return c.__class__ is IsInstanceClass and \ not isinstance(c, list) self.equivalentEvaluationTest(f) def test_isinstance_3(self): class IsinstanceClassTest: pass def f(): x = IsinstanceClassTest() return x.__class__ is IsinstanceClassTest and isinstance(x, IsinstanceClassTest) self.equivalentEvaluationTest(f) def test_sum_isPrime(self): def isPrime(p): x = 2 while x*x <= p: if p%x == 0: return 0 x = x + 1 return x self.equivalentEvaluationTest(lambda: sum(isPrime(x) for x in xrange(1000000))) def test_in_expr(self): x = [0,1,2,3] def f(arg): return arg in x for arg in range(-len(x), len(x)): self.equivalentEvaluationTest(f, arg) def test_notin_expr(self): def f(x): return x not in [2,3] for x in [2]: self.equivalentEvaluationTest(f, x) def test_len_1(self): class ThingWithLen: def __init__(self, len): self.len = len def __len__(self): return self.len def f(x): return len(ThingWithLen(x)) self.equivalentEvaluationTest(f, 2) def test_len_2(self): def f(): return len([1,2,3]) self.equivalentEvaluationTest(f) def test_len_3(self): def f(): return len("asdf") self.equivalentEvaluationTest(f) def test_str_1(self): class ThingWithStr: def __init__(self, str): self.str = str def __str__(self): return self.str def f(x): return str(ThingWithStr(x)) self.equivalentEvaluationTest(f, "2") def test_str_2(self): def f(x): return str(x) self.equivalentEvaluationTest(f, 42) self.equivalentEvaluationTest(f, "foo") self.equivalentEvaluationTest(f, None) def test_implicitReturnNone_1(self): def f(): x = 2 self.equivalentEvaluationTest(f) def test_implicitReturnNone_2(self): def f(x): x self.equivalentEvaluationTest(f, 2) def test_implicitReturnNone_3(self): def f(x): if x > 0: return else: return 1 self.equivalentEvaluationTest(f, 1) self.equivalentEvaluationTest(f, -1) def test_whileLoop(self): def whileLoop(x): y = 0 while x < 100: y = y + x x = x + 1 return y for ix in range(4): self.equivalentEvaluationTest(whileLoop, ix) def test_variableAssignment(self): def variableAssignment(x): y = x + 1 return x+y for ix in range(3): self.equivalentEvaluationTest(variableAssignment, ix) def test_argumentAssignment(self): def argumentAssignment(x): x = x + 1 return x self.equivalentEvaluationTest(argumentAssignment, 100) def test_basicAddition(self): def basicAddition(x): return x + 1 self.equivalentEvaluationTest(basicAddition, 4) def test_pass(self): def passStatement(): def f(): pass x = f() return x self.equivalentEvaluationTest(passStatement) def test_inStatement_2(self): def inStatement(): x = [0,1,2,3] return 0 in x self.equivalentEvaluationTest(inStatement) def test_continue_in_while(self): def f(): x = 0 y = 0 while x < 100: x = x + 1 if x % 2: continue y = y + x self.equivalentEvaluationTest(f) def test_continue_in_for(self): def f(): x = 0 y = 0 for x in xrange(100): if x % 2: continue y = y + x self.equivalentEvaluationTest(f) def test_print_is_noop(self): def f(): print "hello world" return 10 self.assertEqual(self.evaluateWithExecutor(f), 10) def test_import_sys(self): def f(): import sys try: self.evaluateWithExecutor(f) self.assertTrue(False) except Exceptions.ComputationError as e: self.assertIsInstance(e.message, str) self.assertIn( "Pyfora can't convert this code", e.message ) def test_for_loop_values_carry_over(self): with self.create_executor() as executor: def f(): y = 0 for x in [1, 2, 3, 4]: y = y + x return (y, x) self.equivalentEvaluationTest(f) def test_is_returns_true(self): self.equivalentEvaluationTest(lambda x: x is 10, 10) self.equivalentEvaluationTest(lambda x: x is 10, 11) def test_assert_1(self): def f(): assert True self.equivalentEvaluationTest(f) def test_assert_2(self): def f(): try: assert False, "omg" except AssertionError as e: return e.message self.equivalentEvaluationTest(f) def test_assert_3(self): def f(): try: assert False except AssertionError as e: return e.message self.equivalentEvaluationTest(f) def test_assert_4(self): def f():
import os,sys,string import re import types import glob if sys.version[:3]<='2.1': from distutils import util util_get_platform = util.get_platform util.get_platform = lambda : util_get_platform().replace(' ','_') def cyg2win32(path): if sys.platform=='cygwin' and path.startswith('/cygdrive'): path = path[10] + ':' + os.path.normcase(path[11:]) return path # Hooks for colored terminal output. # See also http://www.livinglogic.de/Python/ansistyle def terminal_has_colors(): if sys.platform=='cygwin' and not os.environ.has_key('USE_COLOR'): # Avoid importing curses that causes illegal operation # with a message: # PYTHON2 caused an invalid page fault in # module CYGNURSES7.DLL as 015f:18bbfc28 # Details: Python 2.3.3 [GCC 3.3.1 (cygming special)] # ssh to Win32 machine from debian # curses.version is 2.2 # CYGWIN_98-4.10, release 1.5.7(0.109/3/2)) return 0 if hasattr(sys.stdout,'isatty') and sys.stdout.isatty(): try: import curses curses.setupterm() if (curses.tigetnum("colors") >= 0 and curses.tigetnum("pairs") >= 0 and ((curses.tigetstr("setf") is not None and curses.tigetstr("setb") is not None) or (curses.tigetstr("setaf") is not None and curses.tigetstr("setab") is not None) or curses.tigetstr("scp") is not None)): return 1 except Exception,msg: pass return 0 if terminal_has_colors(): def red_text(s): return '\x1b[31m%s\x1b[0m'%s def green_text(s): return '\x1b[32m%s\x1b[0m'%s def yellow_text(s): return '\x1b[33m%s\x1b[0m'%s def blue_text(s): return '\x1b[34m%s\x1b[0m'%s def cyan_text(s): return '\x1b[35m%s\x1b[0m'%s else: def red_text(s): return s def green_text(s): return s def yellow_text(s): return s def cyan_text(s): return s def blue_text(s): return s class PostponedException: """Postpone exception until an attempt is made to use a resource.""" #Example usage: # try: import foo # except ImportError: foo = PostponedException() __all__ = [] def __init__(self): self._info = sys.exc_info()[:2] self.__doc__ = '%s: %s' % tuple(self._info) def __getattr__(self,name): raise self._info[0],self._info[1] def get_path(mod_name,parent_path=None): """ This function makes sure installation is done from the correct directory no matter if it is installed from the command line or from another package or run_setup function. """ if mod_name == '__main__': d = os.path.abspath('.') elif mod_name == '__builtin__': #builtin if/then added by Pearu for use in core.run_setup. d = os.path.dirname(os.path.abspath(sys.argv[0])) else: mod = __import__(mod_name) file = mod.__file__ d = os.path.dirname(os.path.abspath(file)) if parent_path is not None: pd = os.path.abspath(parent_path) if pd==d[:len(pd)]: d = d[len(pd)+1:] return d or '.' def add_local_to_path(mod_name): local_path = get_path(mod_name) sys.path.insert(0,local_path) def add_grandparent_to_path(mod_name): local_path = get_path(mod_name) gp_dir = os.path.split(local_path)[0] sys.path.insert(0,gp_dir) def restore_path(): del sys.path[0] def append_package_dir_to_path(package_name): """ Search for a directory with package_name and append it to PYTHONPATH The local directory is searched first and then the parent directory. """ # first see if it is in the current path # then try parent. If it isn't found, fail silently # and let the import error occur. # not an easy way to clean up after this... import os,sys if os.path.exists(package_name): sys.path.append(package_name) elif os.path.exists(os.path.join('..',package_name)): sys.path.append(os.path.join('..',package_name)) def get_package_config(package_name): """ grab the configuration info from the setup_xxx.py file in a package directory. The package directory is searched from the current directory, so setting the path to the setup.py file directory of the file calling this is usually needed to get search the path correct. """ append_package_dir_to_path(package_name) mod = __import__('setup_'+package_name) config = mod.configuration() return config def package_config(primary,dependencies=[]): """ Create a configuration dictionary ready for setup.py from a list of primary and dependent package names. Each package listed must have a directory with the same name in the current or parent working directory. Further, it should have a setup_xxx.py module within that directory that has a configuration() function in it. """ config = [] config.extend([get_package_config(x) for x in primary]) config.extend([get_package_config(x) for x in dependencies]) config_dict = merge_config_dicts(config) return config_dict list_keys = ['packages', 'ext_modules', 'data_files', 'include_dirs', 'libraries', 'fortran_libraries', 'headers', 'scripts'] dict_keys = ['package_dir'] def default_config_dict(name = None, parent_name = None, local_path=None): """ Return a configuration dictionary for usage in configuration() function defined in file setup_<name>.py. """ d={} for key in list_keys: d[key] = [] for key in dict_keys: d[key] = {} full_name = dot_join(parent_name,name) if full_name: # XXX: The following assumes that default_config_dict is called # only from setup_<name>.configuration(). # Todo: implement check for this assumption. if local_path is None: frame = get_frame(1) caller_name = eval('__name__',frame.f_globals,frame.f_locals) local_path = get_path(caller_name) test_path = os.path.join(local_path,'tests') if 0 and name and parent_name is None: # Useful for local builds d['version'] = get_version(path=local_path) if os.path.exists(os.path.join(local_path,'__init__.py')): d['packages'].append(full_name) d['package_dir'][full_name] = local_path if os.path.exists(test_path): d['packages'].append(dot_join(full_name,'tests')) d['package_dir'][dot_join(full_name,'tests')] = test_path d['name'] = full_name if 0 and not parent_name: # Include scipy_distutils to local distributions for p in ['.','..']: dir_name = os.path.abspath(os.path.join(local_path, p,'scipy_distutils')) if os.path.exists(dir_name): d['packages'].append('scipy_distutils') d['packages'].append('scipy_distutils.command') d['package_dir']['scipy_distutils'] = dir_name break return d def get_frame(level=0): try: return sys._getframe(level+1) except AttributeError: frame = sys.exc_info()[2].tb_frame for i in range(level+1): frame = frame.f_back return frame def merge_config_dicts(config_list): result = default_config_dict() for d in config_list: if not d: continue name = d.get('name',None) if name is not None: result['name'] = name break for d in config_list: if not d: continue for key in list_keys: result[key].extend(d.get(key,[])) for key in dict_keys: result[key].update(d.get(key,{})) return result def dict_append(d,**kws): for k,v in kws.items(): if d.has_key(k): d[k].extend(v) else: d[k] = v def dot_join(*args): return string.join(filter(None,args),'.') def fortran_library_item(lib_name, sources, **attrs ): #obsolete feature """ Helper function for creating fortran_libraries items. """ build_info = {'sources':sources} known_attrs = ['module_files','module_dirs', 'libraries','library_dirs'] for key,value in attrs.items(): if key not in known_attrs: raise TypeError,\ "fortran_library_item() got an unexpected keyword "\ "argument '%s'" % key build_info[key] = value return (lib_name,build_info) def get_environ_include_dirs(): #obsolete feature includes = [] if os.environ.has_key('PYTHONINCLUDE'): includes = os.environ['PYTHONINCLUDE'].split(os.pathsep) return includes def get_build_temp(): from distutils.util import get_platform plat_specifier = ".%s-%s" % (get_platform(), sys.version[0:3]) return os.path.join('build','temp'+plat_specifier) def get_build_platlib(): from distutils.util import get_platform plat_specifier = ".%s-%s" % (get_platform(), sys.version[0:3]) return os.path.join('build','lib'+plat_specifier) class SourceGenerator: #obsolete feature """ SourceGenerator func - creates target, arguments are (target,sources)+args sources - target source files args - extra arguments to func If func is None then target must exist and it is touched whenever sources are newer. """ def __init__(self,func,target,sources=[],*args): if not os.path.isabs(target) and func is not None: g = sys._getframe(1).f_globals fn = g.get('__file__',g.get('__name__')) if fn=='__main__': fn = sys.argv[0] caller_dir = os.path.abspath(os.path.dirname(fn)) prefix = os.path.commonprefix([caller_dir,os.getcwd()]) target_dir = caller_dir[len(prefix)+1:] target = os.path.join(get_build_temp(),target_dir,target) self.func = func self.target = target self.sources = sources self.args = args def __str__(self): return str(self.target) def generate(self): from distutils import dep_util,dir_util if dep_util.newer_group(self.sources,self.target): print 'Running generate',self.target dir_util.mkpath(os.path.dirname(self.target),verbose=1) if self.func is None: # Touch target os.utime(self.target,None) else: self.func(self.target,self.sources,*self.args) assert os.path.exists(self.target),`self.target` return self.target def __call__(self, extension, src_dir): return self.generate() class SourceFilter: #obsolete feature """ SourceFilter func - implements criteria to filter sources sources - source files args - extra arguments to func """ def __init__(self,func,sources,*args): self.func = func self.sources = sources self.args = args def filter(self): return self.func(self.sources,*self.args) def __call__(self, extension, src_dir): return self.filter() ## #XXX need support for .C that is also C++ cxx_ext_match = re.compile(r'.*[.](cpp|cxx|cc)\Z',re.I).match fortran_ext_match = re.compile(r'.*[.](f90|f95|f77|for|ftn|f)\Z',re.I).match f90_ext_match = re.compile(r'.*[.](f90|f95)\Z',re.I).match f90_module_name_match = re.compile(r'\s*module\s*(?P<name>[\w_]+)',re.I).match def get_f90_modules(source): """ Return a list of Fortran f90 module names that given source file defines. """ if not f90_ext_match(source): return [] modules = [] f = open(source,'r') f_readlines = getattr(f,'xreadlines',f.readlines) for line in f_readlines(): m = f90_module_name_match(line) if m: name = m.group('name') modules.append(name) # break # XXX can we assume that there is one module per file? f.close() return modules def all_strings(lst): """ Return True if all items in lst are string objects. """ for item in lst: if type(item) is not types.StringType: return 0 return 1 def has_f_sources(sources): """ Return True if sources contains Fortran files """ for source in sources: if fortran_ext_match(source): return 1 return 0 def has_cxx_sources(sources): """ Return True if sources contains C++ files """ for source in sources: if cxx_ext_match(source): return 1 return 0 def filter_sources(sources): """ Return four lists of filenames containing C, C++, Fortran, and Fortran 90 module sources, respectively. """ c_sources = [] cxx_sources = [] f_sources = [] fmodule_sources = [] for source in sources: if fortran_ext_match(source): modules = get_f90_modules(source) if modules: fmodule_sources.append(source) else: f_sources.append(source) elif cxx_ext_match(source): cxx_sources.append(source) else: c_sources.append(source) return c_sources, cxx_sources, f_sources, fmodule_sources def compiler_to_string(compiler): props = [] mx = 0 keys = compiler.executables.keys() for key in ['version','libraries','library_dirs', 'object_switch','compile_switch', 'include_dirs','define','undef','rpath','link_objects']: if key not in keys: keys.append(key) for key in keys: if hasattr(compiler,key): v = getattr(compiler, key) mx = max(mx,len(key)) props.append((key,`v`)) lines = [] format = '%-' +`mx+1`+ 's = %s' for prop in props: lines.append(format % prop) return '\n'.join(lines) def _get_dirs_with_init((packages,path), dirname, names): """Internal: used by get_subpackages.""" for bad in ['.svn','build']: if bad in names:
<gh_stars>10-100 """Automated various tests for the blender nif scripts.""" # ***** BEGIN LICENSE BLOCK ***** # # BSD License # # Copyright (c) 2005-2011, NIF File Format Library and Tools # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. The name of the NIF File Format Library and Tools project may not be # used to endorse or promote products derived from this software # without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR # IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES # OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT # NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF # THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # ***** END LICENSE BLOCK ***** from __future__ import with_statement from contextlib import closing from itertools import izip import Blender from nif_test import TestSuite from pyffi.formats.nif import NifFormat # some tests to import and export nif files class VariaTestSuite(TestSuite): def isTwoSided(self, b_mesh): return b_mesh.data.show_double_sided def hasStencil(self, nif_geom): return any(isinstance(prop, NifFormat.NiStencilProperty) for prop in nif_geom.properties) def run(self): self.test_bounding_box() self.test_bounding_box_bsbound() self.test_stencil() self.test_alpha() self.test_name_ends_with_null() self.test_unsupported_root() self.test_packed_textures() self.test_fo3_texture_slots() self.test_fo3_emit() self.test_fo3_emit2() self.test_uv_controller() self.test_mw_nifxnifkf() self.test_anim_buffer_out_of_range() self.test_ob_animsequencename() def test_bounding_box(self): """Bounding box test.""" # import nif_import = self.test( filename='test/nif/bounding_box.nif') b_bbox = Blender.Object.Get("Bounding Box") # test stuff assert(b_bbox.display_bounds_type == 'BOX') # export nif_export = self.test( filename='test/nif/_bounding_box.nif', config=dict(game = 'MORROWIND'), selection = ['Bounding Box']) # test stuff... bbox = nif_export.root_blocks[0].children[0] assert(bbox.has_bounding_box) def test_bounding_box_bsbound(self): """Oblivion bounding box (BSBound) test.""" def check_bsbound(root_blocks): bsbound = root_blocks[0].extra_data_list[0] assert(isinstance(bsbound, NifFormat.BSBound)) assert(bsbound.name == "BBX") assert(bsbound.next_extra_data is None) # using assert_equal because we compare floats self.assert_equal(bsbound.center.x, 0.0) self.assert_equal(bsbound.center.y, 0.0) self.assert_equal(bsbound.center.z, 66.2201843262) self.assert_equal(bsbound.dimensions.x, 23.0976696014) self.assert_equal(bsbound.dimensions.y, 17.6446208954) self.assert_equal(bsbound.dimensions.z, 66.2201843262) # import with closing(open('test/nif/bounding_box_bsbound.nif')) as stream: self.info("Reading test/nif/bounding_box_bsbound.nif") nif = NifFormat.Data() nif.read(stream) check_bsbound(nif.roots) nif_import = self.test( filename='test/nif/bounding_box_bsbound.nif') b_bbox = Blender.Object.Get("BSBound") # test stuff assert(b_bbox.display_bounds_type == 'BOX') # export nif_export = self.test( filename='test/nif/_bounding_box_bsbound.nif', config=dict(game = 'OBLIVION'), selection = ['BSBound']) # test stuff... with closing(open('test/nif/_bounding_box_bsbound.nif')) as stream: self.info("Reading test/nif/_bounding_box_bsbound.nif") nif = NifFormat.Data() nif.read(stream) check_bsbound(nif.roots) def test_stencil(self): # stencil test self.test( filename = 'test/nif/stenciltest.nif') assert(self.isTwoSided(Blender.Object.Get("Stencil"))) assert(not self.isTwoSided(Blender.Object.Get("NoStencil"))) nif_export = self.test( filename = 'test/nif/_stenciltest.nif', config = dict(game = 'OBLIVION'), selection = ['NoStencil', 'Stencil']) nif_stencil = nif_export.root_blocks[0].find( block_type = NifFormat.NiGeometry, block_name = "Stencil") nif_nostencil = nif_export.root_blocks[0].find( block_type = NifFormat.NiGeometry, block_name = "NoStencil") assert(self.hasStencil(nif_stencil)) assert(not self.hasStencil(nif_nostencil)) def test_alpha(self): # alpha property test self.test( filename = 'test/nif/alphatest.nif') alpha_obj = Blender.Object.Get("Alpha") # check Z transparency assert(alpha_obj.data.materials[0].mode & Blender.Material.Modes.ZTRANSP) # check that transparency was exported alpha_obj_alpha = alpha_obj.data.materials[0].alpha assert(alpha_obj_alpha < 0.99) nif_export = self.test( filename = 'test/nif/_alphatest.nif', config = dict(game = 'OBLIVION'), selection = ['Alpha']) nif_alpha = nif_export.root_blocks[0].find( block_type = NifFormat.NiGeometry, block_name = "Alpha") nif_alpha_mat = nif_alpha.find( block_type = NifFormat.NiMaterialProperty) nif_alpha_alpha = nif_alpha.find( block_type = NifFormat.NiAlphaProperty) assert(nif_alpha_alpha.flags == 0x12ED) assert(nif_alpha_mat.alpha == alpha_obj_alpha) def test_name_ends_with_null(self): # name ends with null test self.test( filename = 'test/nif/name_ends_with_null.nif') obj = Blender.Object.Get("nullatend") # exists: null removed def test_unsupported_root(self): # unsupported root block (just check that it does not raise an # exception) self.test( filename='test/nif/unsupported_root.nif', next_layer=True) def test_packed_textures(self): """Check that textures: * raise an error if they have no filename * if they are packed, the filename is used and they are not packed in the nif. """ # create a mesh self.info("creating mesh") mesh_data = Blender.Mesh.Primitives.Cube() mesh_obj = self.context.scene.objects.new(mesh_data, "packed_tex_test") # add a texture self.info("creating material and texture") mat = bpy.data.materials.new("packed_tex_mat") tex = bpy.ops.texture.new() tex.name = "packed_tex_tex" tex.setType("Image") # do not set an image for now... export must fail mat.setTexture(0, tex, 'UV', FIXME.use_map_color_diffuse) mesh_data.materials += [mat] mesh_data.addUVLayer("packed_tex_uv") try: nif_export = self.test( filename='test/nif/_packedtexturestest1.nif', config=dict(game = 'FALLOUT_3'), selection=['packed_tex_test'], next_layer=False) except NifExportError, e: if str(e).startswith("image type texture has no file loaded"): pass else: raise ValueError( f"no texture loaded but wrong exception raised: " f"{e}") raise ValueError( "no texture loaded but no exception raised") # now set the image image = Blender.Image.New("test/nif/stub.tga", 1, 1, 24) # stub image tex.setImage(image) # this should work nif_export = self.test( filename='test/nif/_packedtexturestest2.nif', config=dict(game = 'FALLOUT_3'), selection=['packed_tex_test'], next_layer=False) # now pack the image image.pack() # this should work too - although with a warning nif_export = self.test( filename='test/nif/_packedtexturestest3.nif', config=dict(game = 'FALLOUT_3'), selection=['packed_tex_test'], next_layer=True) def test_fo3_texture_slots(self): self.test( filename = 'test/nif/fo3_textureslots.nif') # check textures (this example has all supported slots) obj = Blender.Object.Get("FO3TextureSlots") mat = obj.data.materials[0] mtex_diff = None mtex_norm = False mtex_glow = False for mtex in mat.textures: # skip empty ones if mtex is None: continue # check that mapping input is UV assert(mtex.texture_coords == 'UV') # check mapping output if mtex.use_map_color_diffuse: if mtex_diff: raise ValueError("more than one diffuse texture!") mtex_diff = mtex if mtex.use_map_normal: if mtex_norm: raise ValueError("more than one normal texture!") mtex_norm = mtex if mtex.use_map_emit: if mtex_glow: raise ValueError("more than one glow texture!") mtex_glow = mtex if not mtex_diff: raise ValueError("missing diffuse texture!") if not mtex_norm: raise ValueError("missing normal texture!") if not mtex_glow: raise ValueError("missing glow texture!") # test export too nif_export = self.test( filename='test/nif/_fo3_textureslots.nif', config=dict(game='FALLOUT_3'), selection=['FO3TextureSlots'], next_layer=True) # check presence of the slots nif_textureset = nif_export.root_blocks[0].find( block_type = NifFormat.BSShaderTextureSet) assert(nif_textureset.num_textures == 6) assert(nif_textureset.textures[0] == "stub.dds") assert(nif_textureset.textures[1] == "stub_n.dds") assert(nif_textureset.textures[2] == "stub_g.dds") assert(nif_textureset.textures[3] == "") assert(nif_textureset.textures[4] == "") assert(nif_textureset.textures[5] == "") def test_mw_nifxnifkf(self): """Test the nif xnif kf export option.""" def check_ctrl_flags(root): # test the kfctrl flags to be active + clamp for ctrl in root.get_global_iterator(): if not isinstance(ctrl, NifFormat.NiTimeController): continue if ctrl.flags != 12: raise ValueError("bad value for controller flags") # import a nif with animation dance = self.test( filename = 'test/nif/mw/dance.nif') check_ctrl_flags(dance.root_blocks[0]) # export as nif + xnif + kf self.test( filename='test/nif/mw/_testnifxnifkf.nif', config=dict(game='MORROWIND', animation='ALL_NIF_XNIF_XKF'), selection=['Dance'], next_layer=True) # check that these files are present, and check some of their properties with closing(open('test/nif/mw/_testnifxnifkf.nif')) as stream: self.info("Reading test/nif/mw/_testnifxnifkf.nif") nif = NifFormat.Data() nif.read(stream) with closing(open('test/nif/mw/x_testnifxnifkf.nif')) as stream: self.info("Reading test/nif/mw/x_testnifxnifkf.nif") xnif = NifFormat.Data() xnif.read(stream) with closing(open('test/nif/mw/x_testnifxnifkf.kf')) as stream: self.info("Reading test/nif/mw/x_testnifxnifkf.kf") xkf = NifFormat.Data() xkf.read(stream) # check root blocks assert(len(nif.roots) == 1) assert(len(xnif.roots) == 1) assert(len(xkf.roots) == 1) assert(isinstance(nif.roots[0], NifFormat.NiNode)) assert(isinstance(xnif.roots[0], NifFormat.NiNode)) assert(isinstance(xkf.roots[0], NifFormat.NiSequenceStreamHelper)) # compare text keys nif_textkeys = nif.roots[0].extra_data xkf_textkeys = xkf.roots[0].extra_data assert(isinstance(nif_textkeys, NifFormat.NiTextKeyExtraData)) assert(isinstance(xkf_textkeys, NifFormat.NiTextKeyExtraData)) #assert(nif_textkeys == xkf_textkeys) # ... up to extra data chain # check that xkf has no target set in keyframe controller ctrl = xkf.roots[0].controller while ctrl: if ctrl.target is not None: raise ValueError( "NiKeyframeController target should be None in xkf") ctrl = ctrl.next_controller # check controller flags check_ctrl_flags(xkf.roots[0]) def test_fo3_emit(self): def check_emit(nif): nif_mat = nif.root_blocks[0].find( block_type = NifFormat.NiMaterialProperty) self.assert_equal(nif_mat.emissive_color.r, 0.123) self.assert_equal(nif_mat.emissive_color.g, 0.456) self.assert_equal(nif_mat.emissive_color.b, 0.789) self.assert_equal(nif_mat.emit_multi, 3.82) # loading the test nif # (this nif has emit color 1,0,1 and emitmulti 3) # stencil test nif = self.test(filename='test/nif/fo3/test_emit.nif') # double check that the nif itself has the claimed values check_emit(nif) # check imported values obj = Blender.Object.Get("TestEmit") self.assert_equal(obj.data.materials[0].rgbCol[0], 0.123) self.assert_equal(obj.data.materials[0].rgbCol[1], 0.456) self.assert_equal(obj.data.materials[0].rgbCol[2], 0.789) self.assert_equal(obj.data.materials[0].emit, 0.382) # emitmulti divided by 10 # write the file nif = self.test( filename='test/nif/fo3/_test_emit.nif', config=dict(game = 'FALLOUT_3'), selection=['TestEmit'], next_layer=True) # check that the correct values were exported check_emit(nif) def test_fo3_emit2(self): """Check that emissive and multi are preserved also when they are zero and one. """ def check_emit2(nif): nif_mat = nif.root_blocks[0].find( block_type = NifFormat.NiMaterialProperty) self.assert_equal(nif_mat.emissive_color.r, 0.0) self.assert_equal(nif_mat.emissive_color.g, 0.0) self.assert_equal(nif_mat.emissive_color.b, 0.0) self.assert_equal(nif_mat.emit_multi, 1.0) # loading the test nif # (this nif has emit color 1,0,1 and emitmulti 3) # stencil test nif = self.test(filename='test/nif/fo3/test_emit2.nif') # double check that the nif itself has
"""Sets up a a basic test DB for external (non-test suite) testing. Adds Users, BTSs, Numbers, Subscribers, UsageEvents and SystemEvents. -- all this should then be visible in a local dashboard. Make sure you've first run fabric's init_dev to properly setup the db and its migrations. Then you can login with the test username and pw as defined below. Usage: python manage.py setup_test_db To reset the local test db: python manage.py flush python manage.py migrate endagaweb python manage.py migrate authtoken Or to really start afresh: vagrant ssh db sudo -u postgres psql drop database endagaweb_dev; create database endagaweb_dev; exit vagrant ssh web python manage.py migrate Copyright (c) 2016-present, Facebook, Inc. All rights reserved. This source code is licensed under the BSD-style license found in the LICENSE file in the root directory of this source tree. An additional grant of patent rights can be found in the PATENTS file in the same directory. """ import datetime import json import random import sys import uuid from django.core.management.base import BaseCommand from django.db import transaction from django.db.utils import DataError from django.utils import timezone import pytz from endagaweb import stats_app from endagaweb.models import BTS from endagaweb.models import ClientRelease from endagaweb.models import Destination from endagaweb.models import Network from endagaweb.models import Number from endagaweb.models import Subscriber from endagaweb.models import SystemEvent from endagaweb.models import TimeseriesStat from endagaweb.models import Transaction from endagaweb.models import UsageEvent from endagaweb.models import User from rest_framework.authtoken.models import Token from django.db.models.signals import post_save from django.db import IntegrityError from django.contrib.auth.models import User, Permission, ContentType from guardian.shortcuts import assign_perm, get_perms, remove_perm, \ get_users_with_perms from endagaweb.models import UserProfile class Command(BaseCommand): """A custom management command. As per the docs: docs.djangoproject.com/en/1.7/howto/custom-management-commands/ """ help = 'sets up a test db -- see management/commands for more' def handle(self, *args, **options): # first create some client releases for channel in ['beta', 'stable']: cr = ClientRelease(date=datetime.datetime(year=1984, month=6, day=24, hour=21, tzinfo=pytz.utc), version='0.0.1', channel=channel) cr.save() # Add two users with a lot of activity, towers and subs. Note that the # first sub, unlike the second one, has no 'endaga_version.' self.create_admin("myadmin", "myadmin", 2, "number.telecom.permanent", "63917555", '0.3.26') # Add one user with no such activity. username = 'newuser' sys.stdout.write('creating user "%s"..\n' % username) user = User(username=username, email="<EMAIL>" % username) user.set_password('<PASSWORD>') user.save() def create_admin(self, username, password, usernum, kind, prefix, endaga_version): # Create a user. sys.stdout.write('Creating admin user: %s %s %s..\n' % ( username, password, usernum)) user = User(username=username, email="<EMAIL>" % username) user.set_password(password) user.is_superuser = True user.is_staff = True user.save() # Get user profile and add some credit. sys.stdout.write('setting user profile..\n') user_profile = UserProfile.objects.get(user=user) user_profile.save() network = user_profile.network network.name = "%s Network Test " % username.upper() network.save() # Create role users self.create_role_users(network) self.create_towers(network, usernum, kind, prefix, endaga_version) def create_role_users(self, network): sys.stdout.write('Creating role users for : %s ..\n' % ( network.name)) role_usernames = { 'netadmin': 'Network Admin', 'analyst': 'Business Analyst', 'loader': 'Loader', 'partner': 'Partner' } business_analyst = ( 'view_activity', 'view_bts', 'view_denomination', 'view_graph', 'view_network', 'view_notification', 'view_report', 'view_subscriber', ) loader = ( 'view_activity', 'view_bts', 'view_denomination', 'view_graph', 'view_network', 'view_notification', 'view_report', 'view_subscriber', 'adjust_credit', 'send_sms', 'edit_subscriber', ) partner = ( 'view_activity', 'view_bts', 'view_denomination', 'view_graph', 'view_network', 'view_notification', 'view_report', 'view_subscriber', 'send_sms', 'edit_subscriber', ) roles_and_permissions = { 'Business Analyst': business_analyst, 'Loader': loader, 'Partner': partner } # Create a role users. for key, value in role_usernames.iteritems(): username = key password = <PASSWORD> user_role = value email = <EMAIL>" % ( username, network.name.replace(" ", "").lower()) sys.stdout.write('Creating role : %s..\n' % (email)) post_save.disconnect(UserProfile.new_user_hook, sender=User) try: with transaction.atomic(): user = User(username=username) if user_role == 'Network Admin': user.is_staff = True else: user.is_staff = user.is_superuser = False user.email = email user.set_password(password) user.save() # creates Token that BTSs on the network use to # authenticate Token.objects.create(user=user) sys.stdout.write('Setting user profile..\n') user_profile = UserProfile.objects.create(user=user) user_network = Network.objects.get(id=network.id) # assign permissions to a give role content_type = ContentType.objects.filter( app_label='endagaweb', model='network').values_list( 'id', flat=True)[0] permission = Permission.objects.filter( content_type=content_type) role_permission = [] if user_role in roles_and_permissions.keys(): role_permission = permission.filter( codename__in=roles_and_permissions[user_role]) else: for i in permission: role_permission.append(i) for i in role_permission: assign_perm(i.codename, user, user_network) # Set last network as default network for User user_profile.network = user_network user_profile.role = user_role user_profile.save() except IntegrityError: post_save.connect(UserProfile.new_user_hook, sender=User) sys.stdout.write('Unable to create a user role!') post_save.connect(UserProfile.new_user_hook, sender=User) def create_towers(self, network, usernum, kind, prefix, endaga_version): # Add some towers. towers_to_add = random.randint(4, 7) added_towers = [] print 'Adding %s towers..' % towers_to_add for index in range(towers_to_add): nickname = None if random.random() < 0.5: nickname = 'Test Tower %s' % index bts = BTS(uuid=str(uuid.uuid4()), nickname=nickname, secret='mhm', inbound_url='http://localhost:8090', network=network) added_towers.append(bts) # Set the last_active time and uptime randomly. random_seconds = random.randint(0, 24 * 60 * 60) random_date = (timezone.now() - datetime.timedelta(seconds=random_seconds)) bts.last_active = random_date bts.uptime = random.randint(24 * 60 * 60, 100 * 24 * 60 * 60) bts.status = random.choice(['no-data', 'active', 'inactive']) bts.save() # Set the metapackage version. This has to be done after initially # creating the BTS or the post-create hook will override. if endaga_version is not None: endaga_version = bts.sortable_version(endaga_version) versions = { 'endaga_version': endaga_version, 'freeswitch_version': None, 'gsm_version': None, 'python_endaga_core_version': None, 'python_gsm_version': None, } bts.package_versions = json.dumps(versions) bts.save() # Add some TimeseriesStats for each tower. stats_to_add = random.randint(100, 1000) print 'Adding %s TimeseriesStats..' % stats_to_add for _ in range(stats_to_add): date = ( timezone.now() - datetime.timedelta( seconds=random.randint( 0, 7 * 24 * 60 * 60))) key = random.choice(stats_app.views.TIMESERIES_STAT_KEYS) if key in ('noise_rssi_db', 'noise_ms_rssi_target_db'): value = random.randint(-75, -20) elif 'percent' in key: value = random.randint(0, 100) elif 'bytes' in key: value = random.randint(0, 10000) else: value = random.randint(0, 10) stat = TimeseriesStat(key=key, value=value, date=date, bts=bts, network=network) stat.save() # Add some SystemEvents for each tower (either small or large # number) number_of_events = [0, 1, 2, 5, 18, 135, 264] events_to_add = random.choice(number_of_events) print 'adding %s SystemEvents..' % events_to_add for _ in range(events_to_add): # Actual events should be in order. But we should support # out-of-order events just in case date = ( timezone.now() - datetime.timedelta( seconds=random.randint( 0, 7 * 24 * 60 * 60))) event = SystemEvent(date=date, bts=bts, type=random.choice(['bts up', 'bts down'])) event.save() # Make at least one BTS active recently. bts.last_active = timezone.now() bts.status = 'active' bts.save() # Make one BTS in the no-data state. bts = BTS(uuid=str(uuid.uuid4()), nickname='No-data tower', secret='z', inbound_url='http://localhost:5555', network=network, package_versions=json.dumps(versions)) bts.save() # Add some subscribers. sys.stdout.write("adding subscribers and numbers..\n") added_subscribers = [] for index in range(random.randint(3, 20)): imsi = "IMSI%d999900000000%s" % (usernum, index) if random.random() < 0.5: name = "test name %s" % index else: name = '' balance = random.randint(40000000, 60000000) state = "active" bts = BTS.objects.filter( network=network).order_by('?').first() subscriber = Subscriber( network=network, imsi=imsi, name=name, balance=balance, state=state, bts=bts, last_camped=bts.last_active) # , role=role) subscriber.save() added_subscribers.append(subscriber) # And attach some numbers. for _ in range(random.randint(1, 5)): msisdn = int(prefix + str(random.randint(1000, 9999))) number = Number( number=msisdn, state="inuse", network=network, kind=kind, subscriber=subscriber) number.save() # Add one last subscriber so we have at least one sub with no activity. imsi = "IMSI%d8888000000000" % usernum name = 'test name (no activity)' subscriber = Subscriber(network=network, imsi=imsi, bts=bts, name=name, balance=1000, state='active') subscriber.save() imsi = "IMSI%d1888000000000" % usernum name = 'test name (no activity)' subscriber2 = Subscriber(network=network, imsi=imsi, bts=bts, name=name, balance=1000, state='first_expired') subscriber2.save() imsi = "IMSI%d8848000000000" % usernum name = 'test name (no activity)' subscriber3 = Subscriber(network=network, imsi=imsi, bts=bts, name=name, balance=1000, state='expired') subscriber3.save() imsi = "IMSI%d8828000000000" % usernum name = 'test name (no activity)' subscriber4 = Subscriber(network=network, imsi=imsi, bts=bts, name=name, balance=1000, state='blocked') subscriber4.save() # Add some UsageEvents attached to random subscribers. events_to_add = random.randint(100, 2000) sys.stdout.write("adding %s usage events..\n" % events_to_add) all_destinations = list(Destination.objects.all()) with transaction.atomic(): for _ in range(events_to_add): random_sub = random.choice(added_subscribers) time_delta = datetime.timedelta( minutes=random.randint(0, 60000)) date = (timezone.now() - time_delta) kinds = [ ('outside_sms', 10000), ('incoming_sms', 2000), ('local_sms', 4000), ('local_recv_sms', 1000), ('free_sms', 0), ('error_sms', 0), ('outside_call', 8000), ('incoming_call', 3000), ('local_call', 2000), ('local_recv_call', 1000), ('free_call', 0), ('error_call', 0), ('gprs', 5000), ('transfer', 2000), ('add-money', 43333), ('Provisioned', 1000), ('deactivate_number', 4000), ] (kind, tariff) = random.choice(kinds) to_number, billsec, up_bytes, call_duration = 4 * [None] from_number, down_bytes, timespan, change = 4 * [None] if 'call' in kind: billsec = random.randint(0, 120) change = tariff * billsec call_duration = billsec + random.randint(0, 10) to_number = str(random.randint(1234567890, 9876543210)) from_number = str(random.randint(1234567890, 9876543210)) reason = '%s sec call to %s (%s)' % (billsec, to_number, kind) elif 'sms' in kind: change
#!/usr/bin/env python2 # Copyright (c) 2016 The Zcash developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. from test_framework.test_framework import BitcoinTestFramework from test_framework.authproxy import JSONRPCException from test_framework.util import assert_equal, initialize_chain_clean, \ start_nodes, connect_nodes_bi, stop_node import sys import time import timeit from decimal import Decimal class WalletProtectCoinbaseTest (BitcoinTestFramework): def setup_chain(self): print("Initializing test directory "+self.options.tmpdir) initialize_chain_clean(self.options.tmpdir, 4) # Start nodes with -regtestprotectcoinbase to set fCoinbaseMustBeProtected to true. def setup_network(self, split=False): self.nodes = start_nodes(4, self.options.tmpdir, extra_args=[['-regtestprotectcoinbase', '-debug=zrpcunsafe']] * 4 ) connect_nodes_bi(self.nodes,0,1) connect_nodes_bi(self.nodes,1,2) connect_nodes_bi(self.nodes,0,2) connect_nodes_bi(self.nodes,0,3) self.is_network_split=False self.sync_all() # Returns txid if operation was a success or None def wait_and_assert_operationid_status(self, myopid, in_status='success', in_errormsg=None): print('waiting for async operation {}'.format(myopid)) opids = [] opids.append(myopid) timeout = 300 status = None errormsg = None txid = None for x in xrange(1, timeout): results = self.nodes[0].z_getoperationresult(opids) if len(results)==0: time.sleep(1) else: status = results[0]["status"] if status == "failed": errormsg = results[0]['error']['message'] elif status == "success": txid = results[0]['result']['txid'] break print('...returned status: {}'.format(status)) assert_equal(in_status, status) if errormsg is not None: assert(in_errormsg is not None) assert_equal(in_errormsg in errormsg, True) print('...returned error: {}'.format(errormsg)) return txid def run_test (self): print "Mining blocks..." self.nodes[0].generate(4) walletinfo = self.nodes[0].getwalletinfo() assert_equal(walletinfo['immature_balance'], 40) assert_equal(walletinfo['balance'], 0) self.sync_all() self.nodes[1].generate(101) self.sync_all() assert_equal(self.nodes[0].getbalance(), 40) assert_equal(self.nodes[1].getbalance(), 10) assert_equal(self.nodes[2].getbalance(), 0) assert_equal(self.nodes[3].getbalance(), 0) # Send will fail because we are enforcing the consensus rule that # coinbase utxos can only be sent to a zaddr. errorString = "" try: self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1) except JSONRPCException,e: errorString = e.error['message'] assert_equal("Coinbase funds can only be sent to a zaddr" in errorString, True) # Prepare to send taddr->zaddr mytaddr = self.nodes[0].getnewaddress() myzaddr = self.nodes[0].z_getnewaddress() # Node 3 will test that watch only address utxos are not selected self.nodes[3].importaddress(mytaddr) recipients= [{"address":myzaddr, "amount": Decimal('1')}] myopid = self.nodes[3].z_sendmany(mytaddr, recipients) errorString="" status = None opids = [myopid] timeout = 10 for x in xrange(1, timeout): results = self.nodes[3].z_getoperationresult(opids) if len(results)==0: time.sleep(1) else: status = results[0]["status"] errorString = results[0]["error"]["message"] break assert_equal("failed", status) assert_equal("no UTXOs found for taddr from address" in errorString, True) stop_node(self.nodes[3], 3) self.nodes.pop() # This send will fail because our wallet does not allow any change when protecting a coinbase utxo, # as it's currently not possible to specify a change address in z_sendmany. recipients = [] recipients.append({"address":myzaddr, "amount":Decimal('1.23456789')}) errorString = "" myopid = self.nodes[0].z_sendmany(mytaddr, recipients) opids = [] opids.append(myopid) timeout = 10 status = None for x in xrange(1, timeout): results = self.nodes[0].z_getoperationresult(opids) if len(results)==0: time.sleep(1) else: status = results[0]["status"] errorString = results[0]["error"]["message"] # Test that the returned status object contains a params field with the operation's input parameters assert_equal(results[0]["method"], "z_sendmany") params =results[0]["params"] assert_equal(params["fee"], Decimal('0.0001')) # default assert_equal(params["minconf"], Decimal('1')) # default assert_equal(params["fromaddress"], mytaddr) assert_equal(params["amounts"][0]["address"], myzaddr) assert_equal(params["amounts"][0]["amount"], Decimal('1.23456789')) break assert_equal("failed", status) assert_equal("wallet does not allow any change" in errorString, True) # This send will succeed. We send two coinbase utxos totalling 20.0 less a fee of 0.00010000, with no change. recipients = [] recipients.append({"address":myzaddr, "amount": Decimal('20.0') - Decimal('0.0001')}) myopid = self.nodes[0].z_sendmany(mytaddr, recipients) mytxid = self.wait_and_assert_operationid_status(myopid) self.sync_all() self.nodes[1].generate(1) self.sync_all() # Verify that debug=zrpcunsafe logs params, and that full txid is associated with opid logpath = self.options.tmpdir+"/node0/regtest/debug.log" logcounter = 0 with open(logpath, "r") as myfile: logdata = myfile.readlines() for logline in logdata: if myopid + ": z_sendmany initialized" in logline and mytaddr in logline and myzaddr in logline: assert_equal(logcounter, 0) # verify order of log messages logcounter = logcounter + 1 if myopid + ": z_sendmany finished" in logline and mytxid in logline: assert_equal(logcounter, 1) logcounter = logcounter + 1 assert_equal(logcounter, 2) # check balances (the z_sendmany consumes 3 coinbase utxos) resp = self.nodes[0].z_gettotalbalance() assert_equal(Decimal(resp["transparent"]), Decimal('20.0')) assert_equal(Decimal(resp["private"]), Decimal('19.9999')) assert_equal(Decimal(resp["total"]), Decimal('39.9999')) # A custom fee of 0 is okay. Here the node will send the note value back to itself. recipients = [] recipients.append({"address":myzaddr, "amount": Decimal('19.9999')}) myopid = self.nodes[0].z_sendmany(myzaddr, recipients, 1, Decimal('0.0')) mytxid = self.wait_and_assert_operationid_status(myopid) self.sync_all() self.nodes[1].generate(1) self.sync_all() resp = self.nodes[0].z_gettotalbalance() assert_equal(Decimal(resp["transparent"]), Decimal('20.0')) assert_equal(Decimal(resp["private"]), Decimal('19.9999')) assert_equal(Decimal(resp["total"]), Decimal('39.9999')) # convert note to transparent funds recipients = [] recipients.append({"address":mytaddr, "amount":Decimal('10.0')}) myopid = self.nodes[0].z_sendmany(myzaddr, recipients) mytxid = self.wait_and_assert_operationid_status(myopid) assert(mytxid is not None) self.sync_all() # check that priority of the tx sending from a zaddr is not 0 mempool = self.nodes[0].getrawmempool(True) assert(Decimal(mempool[mytxid]['startingpriority']) >= Decimal('1000000000000')) self.nodes[1].generate(1) self.sync_all() # check balances resp = self.nodes[0].z_gettotalbalance() assert_equal(Decimal(resp["transparent"]), Decimal('30.0')) assert_equal(Decimal(resp["private"]), Decimal('9.9998')) assert_equal(Decimal(resp["total"]), Decimal('39.9998')) # z_sendmany will return an error if there is transparent change output considered dust. # UTXO selection in z_sendmany sorts in ascending order, so smallest utxos are consumed first. # At this point in time, unspent notes all have a value of 10.0 and standard z_sendmany fee is 0.0001. recipients = [] amount = Decimal('10.0') - Decimal('0.00010000') - Decimal('0.00000001') # this leaves change at 1 zatoshi less than dust threshold recipients.append({"address":self.nodes[0].getnewaddress(), "amount":amount }) myopid = self.nodes[0].z_sendmany(mytaddr, recipients) self.wait_and_assert_operationid_status(myopid, "failed", "Insufficient transparent funds, have 10.00, need 0.00000053 more to avoid creating invalid change output 0.00000001 (dust threshold is 0.00000054)") # Send will fail because send amount is too big, even when including coinbase utxos errorString = "" try: self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 99999) except JSONRPCException,e: errorString = e.error['message'] assert_equal("Insufficient funds" in errorString, True) # z_sendmany will fail because of insufficient funds recipients = [] recipients.append({"address":self.nodes[1].getnewaddress(), "amount":Decimal('10000.0')}) myopid = self.nodes[0].z_sendmany(mytaddr, recipients) self.wait_and_assert_operationid_status(myopid, "failed", "Insufficient transparent funds, have 10.00, need 10000.0001") myopid = self.nodes[0].z_sendmany(myzaddr, recipients) self.wait_and_assert_operationid_status(myopid, "failed", "Insufficient protected funds, have 9.9998, need 10000.0001") # Send will fail because of insufficient funds unless sender uses coinbase utxos try: self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 21) except JSONRPCException,e: errorString = e.error['message'] assert_equal("Insufficient funds, coinbase funds can only be spent after they have been sent to a zaddr" in errorString, True) # Verify that mempools accept tx with joinsplits which have at least the default z_sendmany fee. # If this test passes, it confirms that issue #1851 has been resolved, where sending from # a zaddr to 1385 taddr recipients fails because the default fee was considered too low # given the tx size, resulting in mempool rejection. errorString = '' recipients = [] num_t_recipients = 2500 amount_per_recipient = Decimal('0.00000546') # dust threshold # Note that regtest chainparams does not require standard tx, so setting the amount to be # less than the dust threshold, e.g. 0.00000001 will not result in mempool rejection. start_time = timeit.default_timer() for i in xrange(0,num_t_recipients): newtaddr = self.nodes[2].getnewaddress() recipients.append({"address":newtaddr, "amount":amount_per_recipient}) elapsed = timeit.default_timer() - start_time print("...invoked getnewaddress() {} times in {} seconds".format(num_t_recipients, elapsed)) # Issue #2263 Workaround START # HTTP connection to node 0 may fall into a state, during the few minutes it takes to process # loop above to create new addresses, that when z_sendmany is called with a large amount of # rpc data in recipients, the connection fails with a 'broken pipe' error. Making a RPC call # to node 0 before calling z_sendmany appears to fix this issue, perhaps putting the HTTP # connection into a good state to handle a large amount of data in recipients. self.nodes[0].getinfo() # Issue #2263 Workaround END myopid = self.nodes[0].z_sendmany(myzaddr, recipients) try: self.wait_and_assert_operationid_status(myopid) except JSONRPCException as e: print("JSONRPC error: "+e.error['message']) assert(False) except Exception as e: print("Unexpected exception caught during testing: "+str(sys.exc_info()[0])) assert(False) self.sync_all() self.nodes[1].generate(1) self.sync_all() # check balance node2balance = amount_per_recipient * num_t_recipients assert_equal(self.nodes[2].getbalance(), node2balance) # Send will fail because fee is negative try: self.nodes[0].z_sendmany(myzaddr, recipients, 1, -1) except JSONRPCException,e: errorString = e.error['message'] assert_equal("Amount out of range" in errorString, True) # Send will fail because fee is larger than MAX_MONEY try: self.nodes[0].z_sendmany(myzaddr, recipients, 1, Decimal('21000000.00000001')) except JSONRPCException,e: errorString = e.error['message'] assert_equal("Amount out of range" in errorString, True) # Send will fail because fee is larger than sum of outputs try: self.nodes[0].z_sendmany(myzaddr, recipients, 1, (amount_per_recipient * num_t_recipients) + Decimal('0.00000001')) except JSONRPCException,e: errorString = e.error['message'] assert_equal("is greater than the sum of outputs" in errorString, True) # Send will succeed because the balance of non-coinbase utxos is 10.0 try: self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 9) except JSONRPCException: assert(False) self.sync_all() self.nodes[1].generate(1) self.sync_all() # check balance node2balance = node2balance + 9 assert_equal(self.nodes[2].getbalance(), node2balance) # Check that chained joinsplits in a single tx are created successfully. recipients = [] num_recipients = 3 amount_per_recipient =
EIyp, GKt, GA, kxs, kys, x_C, y_C, theta_p, x_S, y_S, theta_s) # Note theta_p/s in rad lM.append(M) lK.append(K) vx_G.append(x_G); vy_G.append(y_G) vx_S.append(x_S); vy_S.append(y_S) vx_C.append(x_C); vy_C.append(y_C) # --- Writing BeamDyn blade file span=hwc['r_[m]'].values s_bar=span/span[-1] print('Writing BeamDyn blade file:',BDBldFileOut) write_beamdyn_sections(BDBldFileOut,s_bar,lK,lM,Mu,Label=Label) # --- db #M=np.column_stack((zref, x_off, y_off)) #np.savetxt(BDBldFileOut.replace('.dat','offsets.txt'), M, delimiter=',',header='z_[m], xoff_[m], yoff_[m]') # --- Writing BeamDyn main file based on template file if BDMainTemplate is not None and BDMainFileOut is not None: BD=FASTInputFile(BDMainTemplate) #print(BD.keys()) BD.data[1]['value']=Label BD['MemberGeom'] = np.column_stack((x_O,y_O,z_O,twist)) BD['kp_total'] = len(x_O) BD['BldFile'] = '"'+os.path.basename(BDBldFileOut)+'"' BD.data[BD.getID('kp_total')+1]['value']= '1 {}'.format(len(x_O)) print('Writing BeamDyn file:',BDMainFileOut) BD.write(BDMainFileOut) # --- if bPlot: import matplotlib.pyplot as plt colrs=plt.rcParams['axes.prop_cycle'].by_key()['color'] EdgStiff= np.array([K[3,3] for K in lK]) FlpStiff= np.array([K[4,4] for K in lK]) EIxp = hwc['E_[N/m^2]']*hwc['I_y_[m^4]'].values # Should be [N.m^2] EIyp = hwc['E_[N/m^2]']*hwc['I_x_[m^4]'].values # fig=plt.figure() fig,axes = plt.subplots(4, 2, sharex=True, figsize=(12.4,09.)) # (6.4,4.8) fig.subplots_adjust(left=0.07, right=0.99, top=0.98, bottom=0.07, hspace=0.25, wspace=0.15) for ax in axes.ravel(): ax.tick_params(direction='in') # --- Plot mean line from hawc2 and beamdyn x_O_h2 = c2def_old['y_[m]'].values # kp_xr y_O_h2 = -c2def_old['x_[m]'].values # kp_yr z_O_h2 = c2def_old['z_[m]'].values # kp_zr twist = -c2def_old['twist_[deg]'].values # initial_twist [deg] # fig,axes = plt.subplots(2, 1, sharex=True, figsize=(6.4,4.8)) # (6.4,4.8) # ax=axes[0,0] axes[0,0].text(0.5, 1.01, 'Mean line x', horizontalalignment='center', verticalalignment='bottom', transform = axes[0,0].transAxes) axes[0,1].text(0.5, 1.01, 'Mean line y', horizontalalignment='center', verticalalignment='bottom', transform = axes[0,1].transAxes) axes[0,0].plot(z_O, x_O , '-' , label = 'BD smooth)') axes[0,0].plot(z_O, x_O_h2, '--', label = 'H2 c2def', ms=3, color='k') axes[0,0].plot(z_O, x_off_g, ':', label = r'"$\Delta$" to c2def', color=colrs[6]) axes[0,1].plot(z_O, y_O , '-' , label = 'BD y (smooth)') axes[0,1].plot(z_O, y_O_h2, '--' , label = 'H2 "y"', ms=3, color='k') axes[0,1].plot(z_O, y_off_g , ':', label = 'y_off', color=colrs[6]) if 'Relative_thickness_[%]' and 'Chord_[m]' in c2def.columns.values: c = c2def['Chord_[m]'] t = c2def['Relative_thickness_[%]'] *c/100 axes[0,0].plot(z_O, x_O_h2+c/2*np.sin(twist*np.pi/180), '-', color=[0.5,0.5,0.5] ) axes[0,0].plot(z_O, x_O_h2-c/2*np.sin(twist*np.pi/180), '-', color=[0.5,0.5,0.5] ) axes[0,1].plot(z_O, y_O_h2+c/2*np.cos(twist*np.pi/180), '-', color=[0.5,0.5,0.5] ) axes[0,1].plot(z_O, y_O_h2-c/2*np.cos(twist*np.pi/180), '-', color=[0.5,0.5,0.5] ) # Chord_[m] Relative_thickness_[%] # axes[0,0].set_xlabel('z [m]') # axes[0,1].set_xlabel('z [m]') axes[0,0].set_ylabel('x [m]') axes[0,1].set_ylabel('y [m]') axes[0,0].legend(loc='upper right', fontsize=8) # --- Plot COG, Shear Center vx_G=np.asarray(vx_G); vy_G=np.asarray(vy_G) vx_S=np.asarray(vx_S); vy_S=np.asarray(vy_S) vx_C=np.asarray(vx_C); vy_C=np.asarray(vy_C) # fig,axes = plt.subplots(2, 1, sharex=True, figsize=(6.4,4.8)) # (6.4,4.8) # fig.subplots_adjust(left=0.12, right=0.95, top=0.95, bottom=0.11, hspace=0.20, wspace=0.20) axes[1,0].text(0.5, 1.01, 'Abs. position, x', horizontalalignment='center', verticalalignment='bottom', transform = axes[1,0].transAxes) axes[1,1].text(0.5, 1.01, 'Abs. position, y', horizontalalignment='center', verticalalignment='bottom', transform = axes[1,1].transAxes) axes[1,0].plot(z_O , x_O , '-' , label = 'BD meanline') axes[1,0].plot(z_O , x_O + vx_G , 'd' , ms=6, color=None, markeredgecolor=colrs[1], markerfacecolor="None", label = 'G (COG)') axes[1,0].plot(z_O , x_O + vx_S , 's' , ms=6, color=None, markeredgecolor=colrs[2], markerfacecolor="None", label = 'S (shear center)') axes[1,0].plot(z_O , x_O + vx_C , 'o' , ms=6, color=None, markeredgecolor=colrs[3], markerfacecolor="None", label = 'C (elastic center)') axes[1,0].plot(hwc['r_[m]'].values, x_O_h2 + hwc['y_cg_[m]'].values, 'd' , ms=1, color=colrs[1] , label='HAWC2') axes[1,0].plot(hwc['r_[m]'].values, x_O_h2 + hwc['y_sh_[m]'].values, 's' , ms=1, color=colrs[2] ) axes[1,0].plot(hwc['r_[m]'].values, x_O_h2 + hwc['y_e_[m]' ].values , 'o' , ms=1, color=colrs[3] ) axes[1,1].plot(z_O, y_O , '-' , label = 'BD y (smooth)') axes[1,1].plot(z_O , y_O + vy_G , 'd' , ms=6, color=None, markeredgecolor=colrs[1], markerfacecolor="None", label = 'G (COG)') axes[1,1].plot(z_O , y_O + vy_S , 's' , ms=6, color=None, markeredgecolor=colrs[2], markerfacecolor="None", label = 'S (shear center)') axes[1,1].plot(z_O , y_O + vy_C , 'o' , ms=6, color=None, markeredgecolor=colrs[3], markerfacecolor="None", label = 'C (elastic center)') axes[1,1].plot(hwc['r_[m]'].values, y_O_h2 - hwc['x_cg_[m]'].values, 'd' , ms=1, color=colrs[1] ) axes[1,1].plot(hwc['r_[m]'].values, y_O_h2 - hwc['x_sh_[m]'].values, 's' , ms=1, color=colrs[2] ) axes[1,1].plot(hwc['r_[m]'].values, y_O_h2 - hwc['x_e_[m]'].values, 'o' , ms=1, color=colrs[3] ) # axes[1,0].set_xlabel('z [m]') # axes[1,1].set_xlabel('z [m]') axes[1,0].set_ylabel('x [m]') axes[1,1].set_ylabel('y [m]') axes[1,0].legend(loc='upper right', fontsize=8) if 'Relative_thickness_[%]' and 'Chord_[m]' in c2def.columns.values: c = c2def['Chord_[m]'].values t = c2def['Relative_thickness_[%]'].values *c/100 axes[1,0].plot(z_O, x_O_h2+c/2*np.sin(twist*np.pi/180), '-', color=[0.5,0.5,0.5] ) axes[1,0].plot(z_O, x_O_h2-c/2*np.sin(twist*np.pi/180), '-', color=[0.5,0.5,0.5] ) axes[1,1].plot(z_O, y_O_h2+c/2*np.cos(twist*np.pi/180), '-', color=[0.5,0.5,0.5] ) axes[1,1].plot(z_O, y_O_h2-c/2*np.cos(twist*np.pi/180), '-', color=[0.5,0.5,0.5] ) # # --- Positions rel to mean line axes[2,0].text(0.5, 1.01, r'Pos. wrt. meanline, x', horizontalalignment='center', verticalalignment='bottom', transform = axes[2,0].transAxes) axes[2,1].text(0.5, 1.01, r'Pos. wrt. meanline, y', horizontalalignment='center', verticalalignment='bottom', transform = axes[2,1].transAxes) axes[2,0].plot(z_O , x_O-x_O , '-' , label = 'BD meanline') axes[2,0].plot(z_O , vx_G , 'd' , ms=6, color=None, markeredgecolor=colrs[1], markerfacecolor="None", label = 'G (COG)') axes[2,0].plot(z_O , vx_S , 's' , ms=6, color=None, markeredgecolor=colrs[2], markerfacecolor="None", label = 'S (shear center)') axes[2,0].plot(z_O , vx_C , 'o' , ms=6, color=None, markeredgecolor=colrs[3], markerfacecolor="None", label = 'C (elastic center)') axes[2,0].plot(z_O , x_off_g , ':', label = r'"$\Delta$" to c2def', color=colrs[6]) # axes[1,0].plot(hwc['r_[m]'], x_O_h2 + hwc['y_cg_[m]'], 'o' , ms=1, color=colrs[1] , label='HAWC2') # axes[1,0].plot(hwc['r_[m]'], x_O_h2 + hwc['y_sh_[m]'], 'o' , ms=1, color=colrs[2] ) # axes[1,0].plot(hwc['r_[m]'], x_O_h2 + hwc['y_e_[m]'] , 'd' , ms=1, color=colrs[3] ) axes[2,1].plot(z_O , y_O -y_O , '-' , label = 'BD meanline') axes[2,1].plot(z_O , vy_G , 'd' , ms=6, color=None, markeredgecolor=colrs[1], markerfacecolor="None", label = 'G (COG)') axes[2,1].plot(z_O , vy_S , 's' , ms=6, color=None, markeredgecolor=colrs[2], markerfacecolor="None", label = 'S (shear center)') axes[2,1].plot(z_O , vy_C , 'o' , ms=6, color=None, markeredgecolor=colrs[3], markerfacecolor="None", label = 'C (elastic center)') axes[2,1].plot(z_O , y_off_g , ':', label = r'"$\Delta$" to c2def', color=colrs[6]) # axes[1,1].plot(hwc['r_[m]'], y_O_h2 - hwc['x_cg_[m]'], 'o' , ms=1, color=colrs[1] ) # axes[1,1].plot(hwc['r_[m]'], y_O_h2 - hwc['x_sh_[m]'], 's' , ms=1, color=colrs[2] ) # axes[1,1].plot(hwc['r_[m]'], y_O_h2 - hwc['x_e_[m]'] , 'd' , ms=1, color=colrs[3] ) axes[2,0].set_xlabel('z [m]') axes[2,1].set_xlabel('z [m]') axes[2,0].set_ylabel(r'$\Delta x$ [m]') axes[2,1].set_ylabel(r'$\Delta y$ [m]') axes[2,0].legend(loc='upper right', fontsize=8) # # --- Plot Stiffness # ax=fig.add_subplot(111) ax=axes[3,0] ax.text(0.5, 1.01, 'Stiffnesses', horizontalalignment='center', verticalalignment='bottom', transform = ax.transAxes) ax.plot(z_O,EdgStiff,'-' , color=colrs[0], label='Edge Stiffness (K_44)') ax.plot(z_O,EIxp.values ,'--', color=colrs[0], label='EIx "edge" at elastic center') ax.plot(z_O,FlpStiff,'-' , color=colrs[1], label='Flap Stiffness (K_55)') ax.plot(z_O,EIyp.values ,'--', color=colrs[1], label='EIy "flap" at elastic center') ax.set_xlabel('z [m]') ax.set_ylabel('Stiffness [Nm^2]') ax.legend(fontsize=8) return fig #fig.savefig(BDMainFileOut.replace('.dat','.png')) #plt.show() # --------------------------------------------------------------------------------} # --- # --------------------------------------------------------------------------------{ def beamDynToHawc2(BD_mainfile, BD_bladefile, H2_htcfile=None, H2_stfile=None, bodyname=None, A=None, E=None, G=None, theta_p_in=None, FPM=False): """ FPM: fully populated matrix, if True, use the FPM format of hawc2 """ # --- Read BeamDyn files if isinstance(BD_mainfile, str): BD_mainfile = weio.read(BD_mainfile) if isinstance(BD_bladefile, str): BD_bladefile = weio.read(BD_bladefile) bdLine = BD_mainfile.toDataFrame() bd = BD_bladefile.toDataFrame() # --- Extract relevant info prop = bd['BeamProperties'] kp_x = bdLine['kp_xr_[m]'].values kp_y = bdLine['kp_yr_[m]'].values kp_z = bdLine['kp_zr_[m]'].values twist = bdLine['initial_twist_[deg]'].values*np.pi/180 # BeamDyn convention r_bar = prop['Span'].values K = np.zeros((6,6),dtype='object') M = np.zeros((6,6),dtype='object') for i in np.arange(6): for j in np.arange(6): K[i,j]=prop['K{}{}'.format(i+1,j+1)].values M[i,j]=prop['M{}{}'.format(i+1,j+1)].values # Map 6x6 data to "beam" data # NOTE: theta_* are in [rad] EA, EIx, EIy, kxsGA, kysGA, GKt, x_C, y_C, x_S, y_S, theta_p, theta_s = K66toProps(K, theta_p_in) m, Ixi, Iyi, Ip, x_G, y_G, theta_i = M66toProps(M) # print('kxGA {:e}'.format(np.mean(kxsGA))) # print('kyGA {:e}'.format(np.mean(kysGA))) # print('EA {:e}'.format(np.mean(EA))) # print('EIx {:e}'.format(np.mean(EIx))) # print('EIy {:e}'.format(np.mean(EIy))) # print('GKt {:e}'.format(np.mean(GKt))) # print('xC ',np.mean(x_C)) # print('yC ',np.mean(y_C)) # print('xS ',np.mean(x_S)) # print('yS ',np.mean(y_S)) # print('thetap',np.mean(theta_p)) # print('thetas',np.mean(theta_s)) # print('m ',np.mean(m)) # print('Ixi ',np.mean(Ixi)) # print('Iyi ',np.mean(Iyi)) # print('Ip ',np.mean(Ip)) # print('x_G ',np.mean(x_G)) # print('y_G ',np.mean(y_G)) # print('thetai',np.mean(theta_i)) # Convert to Hawc2 system if FPM: dfMeanLine , dfStructure = beamDyn2Hawc2FPM_raw(r_bar, kp_x, kp_y, kp_z, twist, # BeamDyn convention, twist around -z [in rad] m, Ixi, Iyi, x_G, y_G, theta_i, # theta_i/p around z (in rad) x_C, y_C, theta_p, K) else: dfMeanLine , dfStructure = beamDyn2Hawc2_raw(r_bar, kp_x, kp_y, kp_z, twist, m, Ixi, Iyi, x_G, y_G, theta_i, EA, EIx, EIy, GKt, kxsGA, kysGA, x_C, y_C, theta_p, x_S, y_S, theta_s, A=A, E=E, G=G) # --- Rewrite st file if H2_stfile is not None: with open(H2_stfile, 'w') as f: f.write('%i ; number of sets, Nset\n' % 1) f.write('-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n') f.write('#%i ; set number\n' % 1) if FPM: cols=['r','m_[kg/m]','x_cg_[m]','y_cg_[m]','ri_x_[m]','ri_y_[m]','pitch_[deg]','x_e_[m]','y_e_[m]','K11','K12','K13','K14','K15','K16','K22','K23','K24','K25','K26','K33','K34','K35','K36','K44','K45','K46','K55','K56','K66'] else: cols=['r_[m]','m_[kg/m]','x_cg_[m]','y_cg_[m]','ri_x_[m]','ri_y_[m]', 'x_sh_[m]','y_sh_[m]','E_[N/m^2]','G_[N/m^2]','I_x_[m^4]','I_y_[m^4]','I_p_[m^4]','k_x_[-]','k_y_[-]','A_[m^2]','pitch_[deg]','x_e_[m]','y_e_[m]'] f.write('\t'.join(['{:20s}'.format(s) for s in cols])+'\n') f.write('$%i %i\n' % (1, dfStructure.shape[0])) f.write('\n'.join('\t'.join('%19.13e' %x for x in y) for y in dfStructure.values)) # --- Rewrite htc file if H2_htcfile is not None: def readToMarker(lines, marker, i, nMax=None, noException=False): l_sel=[] if nMax is None: nMax=len(lines) while i<nMax: line=lines[i] if line.replace(' ','').lower().find(marker)>=0: break l_sel.append(line.strip()) i+=1 if line.strip().replace(' ','').lower().find(marker)<0: if noException: return None, None, None else: raise Exception('Marker not found '+ marker) return l_sel, line, i with open(H2_htcfile, 'r') as f: lines_in = f.readlines() lines_out = [] bodyNotFound=True iBodyEnd=0 nBodies=0 while bodyNotFound and nBodies<10: _, line, iBodyStart = readToMarker(lines_in, 'beginmain_body',iBodyEnd) _, line, iBodyEnd = readToMarker(lines_in, 'endmain_body', iBodyStart) _, line, iBody = readToMarker(lines_in, 'name'+bodyname, iBodyStart, iBodyEnd, True) nBodies+=1 if line is None: iBody=-1 else: #print('Body {} found between lines {} and {} '.format(bodyname, iBodyStart+1, iBodyEnd+1)) bodyNotFound=False if nBodies>=10: raise Exception('Body {} not found in file'.format(bodyname)) _, line,
<gh_stars>1-10 # NLP written by GAMS Convert at 04/21/18 13:53:12 # # Equation counts # Total E G L N X C B # 1384 979 0 405 0 0 0 0 # # Variable counts # x b i s1s s2s sc si # Total cont binary integer sos1 sos2 scont sint # 1246 1246 0 0 0 0 0 0 # FX 0 0 0 0 0 0 0 0 # # Nonzero counts # Total const NL DLL # 32255 30319 1936 0 # # Reformulation has removed 1 variable and 1 equation from pyomo.environ import * model = m = ConcreteModel() m.x2 = Var(within=Reals,bounds=(0,1),initialize=0) m.x3 = Var(within=Reals,bounds=(0,1),initialize=0) m.x4 = Var(within=Reals,bounds=(0,1),initialize=0) m.x5 = Var(within=Reals,bounds=(0,1),initialize=0) m.x6 = Var(within=Reals,bounds=(0,1),initialize=0) m.x7 = Var(within=Reals,bounds=(0,1),initialize=0) m.x8 = Var(within=Reals,bounds=(0,1),initialize=0) m.x9 = Var(within=Reals,bounds=(0,1),initialize=0) m.x10 = Var(within=Reals,bounds=(0,1),initialize=0) m.x11 = Var(within=Reals,bounds=(0,1),initialize=0) m.x12 = Var(within=Reals,bounds=(0,1),initialize=0) m.x13 = Var(within=Reals,bounds=(0,1),initialize=0) m.x14 = Var(within=Reals,bounds=(0,1),initialize=0) m.x15 = Var(within=Reals,bounds=(0,1),initialize=0) m.x16 = Var(within=Reals,bounds=(0,1),initialize=0) m.x17 = Var(within=Reals,bounds=(0,1),initialize=0) m.x18 = Var(within=Reals,bounds=(0,1),initialize=0) m.x19 = Var(within=Reals,bounds=(0,1),initialize=0) m.x20 = Var(within=Reals,bounds=(0,1),initialize=0) m.x21 = Var(within=Reals,bounds=(0,1),initialize=0) m.x22 = Var(within=Reals,bounds=(0,1),initialize=0) m.x23 = Var(within=Reals,bounds=(0,1),initialize=0) m.x24 = Var(within=Reals,bounds=(0,1),initialize=0) m.x25 = Var(within=Reals,bounds=(0,1),initialize=0) m.x26 = Var(within=Reals,bounds=(0,1),initialize=0) m.x27 = Var(within=Reals,bounds=(0,1),initialize=0) m.x28 = Var(within=Reals,bounds=(0,1),initialize=0) m.x29 = Var(within=Reals,bounds=(0,1),initialize=0) m.x30 = Var(within=Reals,bounds=(0,1),initialize=0) m.x31 = Var(within=Reals,bounds=(0,1),initialize=0) m.x32 = Var(within=Reals,bounds=(0,1),initialize=0) m.x33 = Var(within=Reals,bounds=(0,1),initialize=0) m.x34 = Var(within=Reals,bounds=(0,1),initialize=0) m.x35 = Var(within=Reals,bounds=(0,1),initialize=0) m.x36 = Var(within=Reals,bounds=(0,1),initialize=0) m.x37 = Var(within=Reals,bounds=(0,1),initialize=0) m.x38 = Var(within=Reals,bounds=(0,1),initialize=0) m.x39 = Var(within=Reals,bounds=(0,1),initialize=0) m.x40 = Var(within=Reals,bounds=(0,1),initialize=0) m.x41 = Var(within=Reals,bounds=(0,1),initialize=0) m.x42 = Var(within=Reals,bounds=(0,1),initialize=0) m.x43 = Var(within=Reals,bounds=(0,1),initialize=0) m.x44 = Var(within=Reals,bounds=(0,1),initialize=0) m.x45 = Var(within=Reals,bounds=(0,1),initialize=0) m.x46 = Var(within=Reals,bounds=(0,1),initialize=0) m.x47 = Var(within=Reals,bounds=(0,1),initialize=0) m.x48 = Var(within=Reals,bounds=(0,1),initialize=0) m.x49 = Var(within=Reals,bounds=(0,1),initialize=0) m.x50 = Var(within=Reals,bounds=(0,1),initialize=0) m.x51 = Var(within=Reals,bounds=(0,1),initialize=0) m.x52 = Var(within=Reals,bounds=(0,1),initialize=0) m.x53 = Var(within=Reals,bounds=(0,1),initialize=0) m.x54 = Var(within=Reals,bounds=(0,1),initialize=0) m.x55 = Var(within=Reals,bounds=(0,1),initialize=0) m.x56 = Var(within=Reals,bounds=(0,1),initialize=0) m.x57 = Var(within=Reals,bounds=(0,1),initialize=0) m.x58 = Var(within=Reals,bounds=(0,1),initialize=0) m.x59 = Var(within=Reals,bounds=(0,1),initialize=0) m.x60 = Var(within=Reals,bounds=(0,1),initialize=0) m.x61 = Var(within=Reals,bounds=(0,1),initialize=0) m.x62 = Var(within=Reals,bounds=(0,1),initialize=0) m.x63 = Var(within=Reals,bounds=(0,1),initialize=0) m.x64 = Var(within=Reals,bounds=(0,1),initialize=0) m.x65 = Var(within=Reals,bounds=(0,1),initialize=0) m.x66 = Var(within=Reals,bounds=(0,1),initialize=0) m.x67 = Var(within=Reals,bounds=(0,1),initialize=0) m.x68 = Var(within=Reals,bounds=(0,1),initialize=0) m.x69 = Var(within=Reals,bounds=(0,1),initialize=0) m.x70 = Var(within=Reals,bounds=(0,1),initialize=0) m.x71 = Var(within=Reals,bounds=(0,1),initialize=0) m.x72 = Var(within=Reals,bounds=(0,1),initialize=0) m.x73 = Var(within=Reals,bounds=(0,1),initialize=0) m.x74 = Var(within=Reals,bounds=(0,1),initialize=0) m.x75 = Var(within=Reals,bounds=(0,1),initialize=0) m.x76 = Var(within=Reals,bounds=(0,1),initialize=0) m.x77 = Var(within=Reals,bounds=(0,1),initialize=0) m.x78 = Var(within=Reals,bounds=(0,1),initialize=0) m.x79 = Var(within=Reals,bounds=(0,1),initialize=0) m.x80 = Var(within=Reals,bounds=(0,1),initialize=0) m.x81 = Var(within=Reals,bounds=(0,1),initialize=0) m.x82 = Var(within=Reals,bounds=(0,1),initialize=0) m.x83 = Var(within=Reals,bounds=(0,1),initialize=0) m.x84 = Var(within=Reals,bounds=(0,1),initialize=0) m.x85 = Var(within=Reals,bounds=(0,1),initialize=0) m.x86 = Var(within=Reals,bounds=(0,1),initialize=0) m.x87 = Var(within=Reals,bounds=(0,1),initialize=0) m.x88 = Var(within=Reals,bounds=(0,95),initialize=0) m.x89 = Var(within=Reals,bounds=(0,95),initialize=0) m.x90 = Var(within=Reals,bounds=(0,95),initialize=0) m.x91 = Var(within=Reals,bounds=(0,95),initialize=0) m.x92 = Var(within=Reals,bounds=(0,95),initialize=0) m.x93 = Var(within=Reals,bounds=(0,20),initialize=0) m.x94 = Var(within=Reals,bounds=(0,95),initialize=0) m.x95 = Var(within=Reals,bounds=(0,95),initialize=0) m.x96 = Var(within=Reals,bounds=(0,69),initialize=0) m.x97 = Var(within=Reals,bounds=(0,69),initialize=0) m.x98 = Var(within=Reals,bounds=(0,34),initialize=0) m.x99 = Var(within=Reals,bounds=(0,69),initialize=0) m.x100 = Var(within=Reals,bounds=(0,69),initialize=0) m.x101 = Var(within=Reals,bounds=(0,69),initialize=0) m.x102 = Var(within=Reals,bounds=(0,69),initialize=0) m.x103 = Var(within=Reals,bounds=(0,102),initialize=0) m.x104 = Var(within=Reals,bounds=(0,34),initialize=0) m.x105 = Var(within=Reals,bounds=(0,102),initialize=0) m.x106 = Var(within=Reals,bounds=(0,20),initialize=0) m.x107 = Var(within=Reals,bounds=(0,102),initialize=0) m.x108 = Var(within=Reals,bounds=(0,102),initialize=0) m.x109 = Var(within=Reals,bounds=(0,102),initialize=0) m.x110 = Var(within=Reals,bounds=(0,102),initialize=0) m.x111 = Var(within=Reals,bounds=(0,102),initialize=0) m.x112 = Var(within=Reals,bounds=(0,102),initialize=0) m.x113 = Var(within=Reals,bounds=(0,102),initialize=0) m.x114 = Var(within=Reals,bounds=(0,102),initialize=0) m.x115 = Var(within=Reals,bounds=(0,102),initialize=0) m.x116 = Var(within=Reals,bounds=(0,102),initialize=0) m.x117 = Var(within=Reals,bounds=(0,102),initialize=0) m.x118 = Var(within=Reals,bounds=(0,102),initialize=0) m.x119 = Var(within=Reals,bounds=(0,102),initialize=0) m.x120 = Var(within=Reals,bounds=(0,102),initialize=0) m.x121 = Var(within=Reals,bounds=(0,102),initialize=0) m.x122 = Var(within=Reals,bounds=(0,102),initialize=0) m.x123 = Var(within=Reals,bounds=(0,102),initialize=0) m.x124 = Var(within=Reals,bounds=(0,102),initialize=0) m.x125 = Var(within=Reals,bounds=(0,102),initialize=0) m.x126 = Var(within=Reals,bounds=(0,102),initialize=0) m.x127 = Var(within=Reals,bounds=(0,102),initialize=0) m.x128 = Var(within=Reals,bounds=(0,102),initialize=0) m.x129 = Var(within=Reals,bounds=(0,102),initialize=0) m.x130 = Var(within=Reals,bounds=(0,102),initialize=0) m.x131 = Var(within=Reals,bounds=(0,102),initialize=0) m.x132 = Var(within=Reals,bounds=(0,102),initialize=0) m.x133 = Var(within=Reals,bounds=(0,102),initialize=0) m.x134 = Var(within=Reals,bounds=(0,20),initialize=0) m.x135 = Var(within=Reals,bounds=(0,102),initialize=0) m.x136 = Var(within=Reals,bounds=(0,102),initialize=0) m.x137 = Var(within=Reals,bounds=(0,102),initialize=0) m.x138 = Var(within=Reals,bounds=(0,102),initialize=0) m.x139 = Var(within=Reals,bounds=(0,102),initialize=0) m.x140 = Var(within=Reals,bounds=(0,50),initialize=0) m.x141 = Var(within=Reals,bounds=(0,50),initialize=0) m.x142 = Var(within=Reals,bounds=(0,34),initialize=0) m.x143 = Var(within=Reals,bounds=(0,20),initialize=0) m.x144 = Var(within=Reals,bounds=(0,50),initialize=0) m.x145 = Var(within=Reals,bounds=(0,50),initialize=0) m.x146 = Var(within=Reals,bounds=(0,50),initialize=0) m.x147 = Var(within=Reals,bounds=(0,50),initialize=0) m.x148 = Var(within=Reals,bounds=(0,50),initialize=0) m.x149 = Var(within=Reals,bounds=(0,50),initialize=0) m.x150 = Var(within=Reals,bounds=(0,50),initialize=0) m.x151 = Var(within=Reals,bounds=(0,34),initialize=0) m.x152 = Var(within=Reals,bounds=(0,50),initialize=0) m.x153 = Var(within=Reals,bounds=(0,20),initialize=0) m.x154 = Var(within=Reals,bounds=(0,50),initialize=0) m.x155 = Var(within=Reals,bounds=(0,50),initialize=0) m.x156 = Var(within=Reals,bounds=(0,50),initialize=0) m.x157 = Var(within=Reals,bounds=(0,50),initialize=0) m.x158 = Var(within=Reals,bounds=(0,50),initialize=0) m.x159 = Var(within=Reals,bounds=(0,50),initialize=0) m.x160 = Var(within=Reals,bounds=(0,50),initialize=0) m.x161 = Var(within=Reals,bounds=(0,50),initialize=0) m.x162 = Var(within=Reals,bounds=(0,50),initialize=0) m.x163 = Var(within=Reals,bounds=(0,50),initialize=0) m.x164 = Var(within=Reals,bounds=(0,50),initialize=0) m.x165 = Var(within=Reals,bounds=(0,50),initialize=0) m.x166 = Var(within=Reals,bounds=(0,50),initialize=0) m.x167 = Var(within=Reals,bounds=(0,50),initialize=0) m.x168 = Var(within=Reals,bounds=(0,50),initialize=0) m.x169 = Var(within=Reals,bounds=(0,34),initialize=0) m.x170 = Var(within=Reals,bounds=(0,50),initialize=0) m.x171 = Var(within=Reals,bounds=(0,50),initialize=0) m.x172 = Var(within=Reals,bounds=(0,50),initialize=0) m.x173 = Var(within=Reals,bounds=(0,50),initialize=0) m.x174 = Var(within=Reals,bounds=(0,50),initialize=0) m.x175 = Var(within=Reals,bounds=(0,50),initialize=0) m.x176 = Var(within=Reals,bounds=(0,50),initialize=0) m.x177 = Var(within=Reals,bounds=(0,34),initialize=0) m.x178 = Var(within=Reals,bounds=(0,50),initialize=0) m.x179 = Var(within=Reals,bounds=(0,50),initialize=0) m.x180 = Var(within=Reals,bounds=(0,50),initialize=0) m.x181 = Var(within=Reals,bounds=(0,50),initialize=0) m.x182 = Var(within=Reals,bounds=(0,24),initialize=0) m.x183 = Var(within=Reals,bounds=(0,24),initialize=0) m.x184 = Var(within=Reals,bounds=(0,24),initialize=0) m.x185 = Var(within=Reals,bounds=(0,24),initialize=0) m.x186 = Var(within=Reals,bounds=(0,24),initialize=0) m.x187 = Var(within=Reals,bounds=(0,20),initialize=0) m.x188 = Var(within=Reals,bounds=(0,24),initialize=0) m.x189 = Var(within=Reals,bounds=(0,24),initialize=0) m.x190 = Var(within=Reals,bounds=(0,24),initialize=0) m.x191 = Var(within=Reals,bounds=(0,24),initialize=0) m.x192 = Var(within=Reals,bounds=(0,24),initialize=0) m.x193 = Var(within=Reals,bounds=(0,20),initialize=0) m.x194 = Var(within=Reals,bounds=(0,24),initialize=0) m.x195 = Var(within=Reals,bounds=(0,24),initialize=0) m.x196 = Var(within=Reals,bounds=(0,24),initialize=0) m.x197 = Var(within=Reals,bounds=(0,24),initialize=0) m.x198 = Var(within=Reals,bounds=(0,24),initialize=0) m.x199 = Var(within=Reals,bounds=(0,24),initialize=0) m.x200 = Var(within=Reals,bounds=(0,24),initialize=0) m.x201 = Var(within=Reals,bounds=(0,24),initialize=0) m.x202 = Var(within=Reals,bounds=(0,24),initialize=0) m.x203 = Var(within=Reals,bounds=(0,24),initialize=0) m.x204 = Var(within=Reals,bounds=(0,24),initialize=0) m.x205 = Var(within=Reals,bounds=(0,24),initialize=0) m.x206 = Var(within=Reals,bounds=(0,24),initialize=0) m.x207 = Var(within=Reals,bounds=(0,24),initialize=0) m.x208 = Var(within=Reals,bounds=(0,24),initialize=0) m.x209 = Var(within=Reals,bounds=(0,24),initialize=0) m.x210 = Var(within=Reals,bounds=(0,24),initialize=0) m.x211 = Var(within=Reals,bounds=(0,24),initialize=0) m.x212 = Var(within=Reals,bounds=(0,24),initialize=0) m.x213 = Var(within=Reals,bounds=(0,24),initialize=0) m.x214 = Var(within=Reals,bounds=(0,24),initialize=0) m.x215 = Var(within=Reals,bounds=(0,24),initialize=0) m.x216 = Var(within=Reals,bounds=(0,24),initialize=0) m.x217 = Var(within=Reals,bounds=(0,24),initialize=0) m.x218 = Var(within=Reals,bounds=(0,24),initialize=0) m.x219 = Var(within=Reals,bounds=(0,24),initialize=0) m.x220 = Var(within=Reals,bounds=(0,24),initialize=0) m.x221 = Var(within=Reals,bounds=(0,24),initialize=0) m.x222 = Var(within=Reals,bounds=(0,24),initialize=0) m.x223 = Var(within=Reals,bounds=(0,24),initialize=0) m.x224 = Var(within=Reals,bounds=(0,24),initialize=0) m.x225 = Var(within=Reals,bounds=(0,24),initialize=0) m.x226 = Var(within=Reals,bounds=(0,24),initialize=0) m.x227 = Var(within=Reals,bounds=(0,24),initialize=0) m.x228 = Var(within=Reals,bounds=(0,24),initialize=0) m.x229 = Var(within=Reals,bounds=(0,24),initialize=0) m.x230 = Var(within=Reals,bounds=(0,24),initialize=0) m.x231 = Var(within=Reals,bounds=(0,24),initialize=0) m.x232 = Var(within=Reals,bounds=(0,24),initialize=0) m.x233 = Var(within=Reals,bounds=(0,24),initialize=0) m.x234 = Var(within=Reals,bounds=(0,24),initialize=0) m.x235 = Var(within=Reals,bounds=(0,20),initialize=0) m.x236 = Var(within=Reals,bounds=(0,24),initialize=0) m.x237 = Var(within=Reals,bounds=(0,24),initialize=0) m.x238 = Var(within=Reals,bounds=(0,24),initialize=0) m.x239 = Var(within=Reals,bounds=(0,24),initialize=0) m.x240 = Var(within=Reals,bounds=(0,24),initialize=0) m.x241 = Var(within=Reals,bounds=(0,80),initialize=0) m.x242 = Var(within=Reals,bounds=(0,80),initialize=0) m.x243 = Var(within=Reals,bounds=(0,34),initialize=0) m.x244 = Var(within=Reals,bounds=(0,20),initialize=0) m.x245 = Var(within=Reals,bounds=(0,80),initialize=0) m.x246 = Var(within=Reals,bounds=(0,80),initialize=0) m.x247 = Var(within=Reals,bounds=(0,80),initialize=0) m.x248 = Var(within=Reals,bounds=(0,80),initialize=0) m.x249 = Var(within=Reals,bounds=(0,80),initialize=0) m.x250 = Var(within=Reals,bounds=(0,80),initialize=0) m.x251 = Var(within=Reals,bounds=(0,69),initialize=0) m.x252 = Var(within=Reals,bounds=(0,69),initialize=0) m.x253 = Var(within=Reals,bounds=(0,34),initialize=0) m.x254 = Var(within=Reals,bounds=(0,69),initialize=0) m.x255 = Var(within=Reals,bounds=(0,69),initialize=0) m.x256 = Var(within=Reals,bounds=(0,69),initialize=0) m.x257 = Var(within=Reals,bounds=(0,69),initialize=0) m.x258 = Var(within=Reals,bounds=(0,124),initialize=0) m.x259 = Var(within=Reals,bounds=(0,124),initialize=0) m.x260 = Var(within=Reals,bounds=(0,110),initialize=0) m.x261 = Var(within=Reals,bounds=(0,124),initialize=0) m.x262 = Var(within=Reals,bounds=(0,124),initialize=0) m.x263 = Var(within=Reals,bounds=(0,124),initialize=0) m.x264 = Var(within=Reals,bounds=(0,124),initialize=0) m.x265 = Var(within=Reals,bounds=(0,55),initialize=0) m.x266 = Var(within=Reals,bounds=(0,34),initialize=0) m.x267 = Var(within=Reals,bounds=(0,55),initialize=0) m.x268 = Var(within=Reals,bounds=(0,55),initialize=0) m.x269 = Var(within=Reals,bounds=(0,55),initialize=0) m.x270 = Var(within=Reals,bounds=(0,55),initialize=0) m.x271 = Var(within=Reals,bounds=(0,55),initialize=0) m.x272 = Var(within=Reals,bounds=(0,55),initialize=0) m.x273 = Var(within=Reals,bounds=(0,170),initialize=0) m.x274 = Var(within=Reals,bounds=(0,34),initialize=0) m.x275 = Var(within=Reals,bounds=(0,105),initialize=0) m.x276 = Var(within=Reals,bounds=(0,170),initialize=0) m.x277 = Var(within=Reals,bounds=(0,20),initialize=0) m.x278 = Var(within=Reals,bounds=(0,131),initialize=0) m.x279 = Var(within=Reals,bounds=(0,126),initialize=0) m.x280 = Var(within=Reals,bounds=(0,135),initialize=0) m.x281 = Var(within=Reals,bounds=(0,170),initialize=0) m.x282 = Var(within=Reals,bounds=(0,150),initialize=0) m.x283 = Var(within=Reals,bounds=(0,139),initialize=0) m.x284 = Var(within=Reals,bounds=(0,34),initialize=0) m.x285 = Var(within=Reals,bounds=(0,139),initialize=0) m.x286 = Var(within=Reals,bounds=(0,126),initialize=0) m.x287 = Var(within=Reals,bounds=(0,135),initialize=0) m.x288 = Var(within=Reals,bounds=(0,139),initialize=0) m.x289 = Var(within=Reals,bounds=(0,111),initialize=0) m.x290 = Var(within=Reals,bounds=(0,111),initialize=0) m.x291 = Var(within=Reals,bounds=(0,111),initialize=0) m.x292 = Var(within=Reals,bounds=(0,105),initialize=0) m.x293 = Var(within=Reals,bounds=(0,111),initialize=0) m.x294 = Var(within=Reals,bounds=(0,110),initialize=0) m.x295 = Var(within=Reals,bounds=(0,20),initialize=0) m.x296 = Var(within=Reals,bounds=(0,111),initialize=0) m.x297 = Var(within=Reals,bounds=(0,111),initialize=0) m.x298 = Var(within=Reals,bounds=(0,111),initialize=0) m.x299 = Var(within=Reals,bounds=(0,111),initialize=0) m.x300 = Var(within=Reals,bounds=(0,111),initialize=0) m.x301 = Var(within=Reals,bounds=(0,95),initialize=0) m.x302 = Var(within=Reals,bounds=(0,95),initialize=0) m.x303 = Var(within=Reals,bounds=(0,95),initialize=0) m.x304 = Var(within=Reals,bounds=(0,95),initialize=0) m.x305 = Var(within=Reals,bounds=(0,95),initialize=0) m.x306 = Var(within=Reals,bounds=(0,20),initialize=0) m.x307 = Var(within=Reals,bounds=(0,95),initialize=0) m.x308 = Var(within=Reals,bounds=(0,95),initialize=0) m.x309 = Var(within=Reals,bounds=(0,80),initialize=0) m.x310 = Var(within=Reals,bounds=(0,80),initialize=0) m.x311 = Var(within=Reals,bounds=(0,34),initialize=0) m.x312 = Var(within=Reals,bounds=(0,20),initialize=0) m.x313 = Var(within=Reals,bounds=(0,80),initialize=0) m.x314 = Var(within=Reals,bounds=(0,80),initialize=0) m.x315 = Var(within=Reals,bounds=(0,80),initialize=0) m.x316 = Var(within=Reals,bounds=(0,80),initialize=0) m.x317 = Var(within=Reals,bounds=(0,80),initialize=0) m.x318 = Var(within=Reals,bounds=(0,80),initialize=0) m.x319 = Var(within=Reals,bounds=(0,69),initialize=0) m.x320 = Var(within=Reals,bounds=(0,69),initialize=0) m.x321 = Var(within=Reals,bounds=(0,34),initialize=0) m.x322 = Var(within=Reals,bounds=(0,69),initialize=0) m.x323 = Var(within=Reals,bounds=(0,69),initialize=0) m.x324 = Var(within=Reals,bounds=(0,69),initialize=0) m.x325 = Var(within=Reals,bounds=(0,69),initialize=0) m.x326 = Var(within=Reals,bounds=(0,179),initialize=0) m.x327 = Var(within=Reals,bounds=(0,179),initialize=0) m.x328 = Var(within=Reals,bounds=(0,176),initialize=0) m.x329 = Var(within=Reals,bounds=(0,105),initialize=0) m.x330 = Var(within=Reals,bounds=(0,177),initialize=0) m.x331 = Var(within=Reals,bounds=(0,131),initialize=0) m.x332 = Var(within=Reals,bounds=(0,136),initialize=0) m.x333 = Var(within=Reals,bounds=(0,135),initialize=0) m.x334 = Var(within=Reals,bounds=(0,150),initialize=0) m.x335 = Var(within=Reals,bounds=(0,55),initialize=0) m.x336 = Var(within=Reals,bounds=(0,34),initialize=0) m.x337 = Var(within=Reals,bounds=(0,55),initialize=0) m.x338 = Var(within=Reals,bounds=(0,55),initialize=0) m.x339 = Var(within=Reals,bounds=(0,55),initialize=0) m.x340 = Var(within=Reals,bounds=(0,55),initialize=0) m.x341 = Var(within=Reals,bounds=(0,55),initialize=0) m.x342 = Var(within=Reals,bounds=(0,55),initialize=0) m.x343 = Var(within=Reals,bounds=(0,91),initialize=0) m.x344 = Var(within=Reals,bounds=(0,91),initialize=0) m.x345 = Var(within=Reals,bounds=(0,91),initialize=0) m.x346 = Var(within=Reals,bounds=(0,91),initialize=0) m.x347 = Var(within=Reals,bounds=(0,91),initialize=0) m.x348 = Var(within=Reals,bounds=(0,20),initialize=0) m.x349 = Var(within=Reals,bounds=(0,91),initialize=0) m.x350 = Var(within=Reals,bounds=(0,91),initialize=0) m.x351 = Var(within=Reals,bounds=(0,91),initialize=0) m.x352 = Var(within=Reals,bounds=(0,91),initialize=0) m.x353 = Var(within=Reals,bounds=(0,91),initialize=0) m.x354 = Var(within=Reals,bounds=(0,91),initialize=0) m.x355 = Var(within=Reals,bounds=(0,91),initialize=0) m.x356 = Var(within=Reals,bounds=(0,91),initialize=0) m.x357 = Var(within=Reals,bounds=(0,91),initialize=0) m.x358 = Var(within=Reals,bounds=(0,91),initialize=0) m.x359 = Var(within=Reals,bounds=(0,91),initialize=0) m.x360 = Var(within=Reals,bounds=(0,91),initialize=0) m.x361 = Var(within=Reals,bounds=(0,91),initialize=0) m.x362 = Var(within=Reals,bounds=(0,91),initialize=0) m.x363 = Var(within=Reals,bounds=(0,91),initialize=0) m.x364 = Var(within=Reals,bounds=(0,91),initialize=0) m.x365 = Var(within=Reals,bounds=(0,91),initialize=0) m.x366 = Var(within=Reals,bounds=(0,91),initialize=0) m.x367 = Var(within=Reals,bounds=(0,55),initialize=0) m.x368 = Var(within=Reals,bounds=(0,34),initialize=0) m.x369 = Var(within=Reals,bounds=(0,55),initialize=0) m.x370 = Var(within=Reals,bounds=(0,55),initialize=0) m.x371 = Var(within=Reals,bounds=(0,55),initialize=0) m.x372 = Var(within=Reals,bounds=(0,55),initialize=0) m.x373 = Var(within=Reals,bounds=(0,55),initialize=0) m.x374 = Var(within=Reals,bounds=(0,55),initialize=0) m.x375 = Var(within=Reals,bounds=(0,91),initialize=0) m.x376 = Var(within=Reals,bounds=(0,34),initialize=0) m.x377 = Var(within=Reals,bounds=(0,91),initialize=0) m.x378 = Var(within=Reals,bounds=(0,91),initialize=0) m.x379 = Var(within=Reals,bounds=(0,20),initialize=0) m.x380 = Var(within=Reals,bounds=(0,91),initialize=0) m.x381 = Var(within=Reals,bounds=(0,91),initialize=0) m.x382 = Var(within=Reals,bounds=(0,91),initialize=0) m.x383 = Var(within=Reals,bounds=(0,91),initialize=0) m.x384 = Var(within=Reals,bounds=(0,91),initialize=0) m.x385 = Var(within=Reals,bounds=(0,91),initialize=0) m.x386 = Var(within=Reals,bounds=(0,34),initialize=0) m.x387 = Var(within=Reals,bounds=(0,91),initialize=0) m.x388 = Var(within=Reals,bounds=(0,91),initialize=0) m.x389 = Var(within=Reals,bounds=(0,91),initialize=0) m.x390 = Var(within=Reals,bounds=(0,91),initialize=0) m.x391 = Var(within=Reals,bounds=(0,91),initialize=0) m.x392 = Var(within=Reals,bounds=(0,91),initialize=0) m.x393 = Var(within=Reals,bounds=(0,91),initialize=0) m.x394 = Var(within=Reals,bounds=(0,91),initialize=0) m.x395 = Var(within=Reals,bounds=(0,91),initialize=0) m.x396 = Var(within=Reals,bounds=(0,91),initialize=0) m.x397 = Var(within=Reals,bounds=(0,20),initialize=0) m.x398 = Var(within=Reals,bounds=(0,91),initialize=0) m.x399 = Var(within=Reals,bounds=(0,91),initialize=0) m.x400 = Var(within=Reals,bounds=(0,91),initialize=0) m.x401 = Var(within=Reals,bounds=(0,91),initialize=0) m.x402 = Var(within=Reals,bounds=(0,91),initialize=0) m.x403 = Var(within=Reals,bounds=(0,64),initialize=0) m.x404 = Var(within=Reals,bounds=(0,64),initialize=0) m.x405 = Var(within=Reals,bounds=(0,64),initialize=0) m.x406 = Var(within=Reals,bounds=(0,64),initialize=0) m.x407 = Var(within=Reals,bounds=(0,64),initialize=0) m.x408 = Var(within=Reals,bounds=(0,20),initialize=0) m.x409 = Var(within=Reals,bounds=(0,64),initialize=0) m.x410 = Var(within=Reals,bounds=(0,64),initialize=0) m.x411 = Var(within=Reals,bounds=(0,64),initialize=0) m.x412 = Var(within=Reals,bounds=(0,64),initialize=0) m.x413 = Var(within=Reals,bounds=(0,34),initialize=0) m.x414 = Var(within=Reals,bounds=(0,20),initialize=0) m.x415 = Var(within=Reals,bounds=(0,64),initialize=0) m.x416 = Var(within=Reals,bounds=(0,64),initialize=0) m.x417 = Var(within=Reals,bounds=(0,64),initialize=0) m.x418 = Var(within=Reals,bounds=(0,64),initialize=0) m.x419 = Var(within=Reals,bounds=(0,64),initialize=0) m.x420 = Var(within=Reals,bounds=(0,64),initialize=0) m.x421 = Var(within=Reals,bounds=(0,64),initialize=0) m.x422 = Var(within=Reals,bounds=(0,64),initialize=0) m.x423 = Var(within=Reals,bounds=(0,34),initialize=0) m.x424 = Var(within=Reals,bounds=(0,64),initialize=0) m.x425 = Var(within=Reals,bounds=(0,64),initialize=0) m.x426 = Var(within=Reals,bounds=(0,64),initialize=0) m.x427 = Var(within=Reals,bounds=(0,64),initialize=0) m.x428 = Var(within=Reals,bounds=(0,64),initialize=0) m.x429 = Var(within=Reals,bounds=(0,64),initialize=0) m.x430 = Var(within=Reals,bounds=(0,64),initialize=0) m.x431 = Var(within=Reals,bounds=(0,64),initialize=0) m.x432 = Var(within=Reals,bounds=(0,64),initialize=0) m.x433 = Var(within=Reals,bounds=(0,64),initialize=0) m.x434 = Var(within=Reals,bounds=(0,64),initialize=0) m.x435 = Var(within=Reals,bounds=(0,64),initialize=0) m.x436 = Var(within=Reals,bounds=(0,34),initialize=0) m.x437 = Var(within=Reals,bounds=(0,64),initialize=0) m.x438 = Var(within=Reals,bounds=(0,64),initialize=0) m.x439 = Var(within=Reals,bounds=(0,20),initialize=0) m.x440 = Var(within=Reals,bounds=(0,64),initialize=0) m.x441 = Var(within=Reals,bounds=(0,64),initialize=0) m.x442 = Var(within=Reals,bounds=(0,64),initialize=0) m.x443 = Var(within=Reals,bounds=(0,64),initialize=0) m.x444 = Var(within=Reals,bounds=(0,64),initialize=0) m.x445 = Var(within=Reals,bounds=(0,64),initialize=0) m.x446 = Var(within=Reals,bounds=(0,64),initialize=0) m.x447 = Var(within=Reals,bounds=(0,64),initialize=0) m.x448 = Var(within=Reals,bounds=(0,64),initialize=0) m.x449 = Var(within=Reals,bounds=(0,64),initialize=0) m.x450 = Var(within=Reals,bounds=(0,64),initialize=0) m.x451 = Var(within=Reals,bounds=(0,20),initialize=0) m.x452 = Var(within=Reals,bounds=(0,64),initialize=0) m.x453 = Var(within=Reals,bounds=(0,64),initialize=0) m.x454 = Var(within=Reals,bounds=(0,64),initialize=0) m.x455 = Var(within=Reals,bounds=(0,64),initialize=0) m.x456 = Var(within=Reals,bounds=(0,64),initialize=0) m.x457 = Var(within=Reals,bounds=(0,33),initialize=0) m.x458 = Var(within=Reals,bounds=(0,33),initialize=0) m.x459 = Var(within=Reals,bounds=(0,33),initialize=0) m.x460 = Var(within=Reals,bounds=(0,33),initialize=0) m.x461 = Var(within=Reals,bounds=(0,33),initialize=0) m.x462 = Var(within=Reals,bounds=(0,20),initialize=0) m.x463 = Var(within=Reals,bounds=(0,33),initialize=0) m.x464 = Var(within=Reals,bounds=(0,33),initialize=0) m.x465 = Var(within=Reals,bounds=(0,33),initialize=0) m.x466 = Var(within=Reals,bounds=(0,33),initialize=0) m.x467 = Var(within=Reals,bounds=(0,33),initialize=0) m.x468 = Var(within=Reals,bounds=(0,20),initialize=0) m.x469 = Var(within=Reals,bounds=(0,33),initialize=0) m.x470 = Var(within=Reals,bounds=(0,33),initialize=0) m.x471 = Var(within=Reals,bounds=(0,33),initialize=0) m.x472 = Var(within=Reals,bounds=(0,33),initialize=0) m.x473 = Var(within=Reals,bounds=(0,33),initialize=0) m.x474 = Var(within=Reals,bounds=(0,33),initialize=0) m.x475 = Var(within=Reals,bounds=(0,33),initialize=0) m.x476 = Var(within=Reals,bounds=(0,33),initialize=0) m.x477 = Var(within=Reals,bounds=(0,33),initialize=0) m.x478 = Var(within=Reals,bounds=(0,33),initialize=0) m.x479 = Var(within=Reals,bounds=(0,33),initialize=0) m.x480 = Var(within=Reals,bounds=(0,33),initialize=0) m.x481 = Var(within=Reals,bounds=(0,33),initialize=0) m.x482 = Var(within=Reals,bounds=(0,33),initialize=0) m.x483 = Var(within=Reals,bounds=(0,33),initialize=0) m.x484 = Var(within=Reals,bounds=(0,33),initialize=0) m.x485 = Var(within=Reals,bounds=(0,20),initialize=0) m.x486 = Var(within=Reals,bounds=(0,33),initialize=0) m.x487 = Var(within=Reals,bounds=(0,33),initialize=0) m.x488 = Var(within=Reals,bounds=(0,33),initialize=0) m.x489 = Var(within=Reals,bounds=(0,33),initialize=0) m.x490 = Var(within=Reals,bounds=(0,33),initialize=0) m.x491 = Var(within=Reals,bounds=(0,33),initialize=0) m.x492 = Var(within=Reals,bounds=(0,33),initialize=0) m.x493 = Var(within=Reals,bounds=(0,33),initialize=0) m.x494 = Var(within=Reals,bounds=(0,33),initialize=0) m.x495 = Var(within=Reals,bounds=(0,33),initialize=0) m.x496 = Var(within=Reals,bounds=(0,33),initialize=0) m.x497 = Var(within=Reals,bounds=(0,33),initialize=0) m.x498 = Var(within=Reals,bounds=(0,33),initialize=0) m.x499 = Var(within=Reals,bounds=(0,33),initialize=0) m.x500 = Var(within=Reals,bounds=(0,33),initialize=0) m.x501 = Var(within=Reals,bounds=(0,33),initialize=0) m.x502 = Var(within=Reals,bounds=(0,33),initialize=0) m.x503 = Var(within=Reals,bounds=(0,33),initialize=0) m.x504 = Var(within=Reals,bounds=(0,33),initialize=0) m.x505 = Var(within=Reals,bounds=(0,33),initialize=0) m.x506 = Var(within=Reals,bounds=(0,33),initialize=0) m.x507 = Var(within=Reals,bounds=(0,33),initialize=0) m.x508 = Var(within=Reals,bounds=(0,33),initialize=0) m.x509 = Var(within=Reals,bounds=(0,33),initialize=0) m.x510 = Var(within=Reals,bounds=(0,33),initialize=0) m.x511 = Var(within=Reals,bounds=(0,33),initialize=0) m.x512 = Var(within=Reals,bounds=(0,20),initialize=0) m.x513 = Var(within=Reals,bounds=(0,33),initialize=0) m.x514 = Var(within=Reals,bounds=(0,33),initialize=0) m.x515 = Var(within=Reals,bounds=(0,33),initialize=0) m.x516 = Var(within=Reals,bounds=(0,33),initialize=0) m.x517 = Var(within=Reals,bounds=(0,33),initialize=0) m.x518 = Var(within=Reals,bounds=(0,33),initialize=0) m.x519 = Var(within=Reals,bounds=(0,33),initialize=0) m.x520 = Var(within=Reals,bounds=(0,33),initialize=0) m.x521 = Var(within=Reals,bounds=(0,33),initialize=0) m.x522 = Var(within=Reals,bounds=(0,33),initialize=0) m.x523 = Var(within=Reals,bounds=(0,33),initialize=0) m.x524 = Var(within=Reals,bounds=(0,20),initialize=0) m.x525 = Var(within=Reals,bounds=(0,33),initialize=0) m.x526 = Var(within=Reals,bounds=(0,33),initialize=0) m.x527 = Var(within=Reals,bounds=(0,33),initialize=0) m.x528 = Var(within=Reals,bounds=(0,33),initialize=0) m.x529 = Var(within=Reals,bounds=(0,33),initialize=0) m.x530 = Var(within=Reals,bounds=(0,80),initialize=0) m.x531 = Var(within=Reals,bounds=(0,80),initialize=0) m.x532 = Var(within=Reals,bounds=(0,34),initialize=0) m.x533 = Var(within=Reals,bounds=(0,20),initialize=0) m.x534 = Var(within=Reals,bounds=(0,80),initialize=0) m.x535 = Var(within=Reals,bounds=(0,80),initialize=0) m.x536 = Var(within=Reals,bounds=(0,80),initialize=0) m.x537 = Var(within=Reals,bounds=(0,80),initialize=0) m.x538 = Var(within=Reals,bounds=(0,80),initialize=0) m.x539 = Var(within=Reals,bounds=(0,80),initialize=0) m.x540 = Var(within=Reals,bounds=(0,69),initialize=0) m.x541 = Var(within=Reals,bounds=(0,69),initialize=0) m.x542 = Var(within=Reals,bounds=(0,34),initialize=0) m.x543 = Var(within=Reals,bounds=(0,69),initialize=0) m.x544 = Var(within=Reals,bounds=(0,69),initialize=0) m.x545 = Var(within=Reals,bounds=(0,69),initialize=0) m.x546 = Var(within=Reals,bounds=(0,69),initialize=0) m.x547 = Var(within=Reals,bounds=(0,124),initialize=0) m.x548 = Var(within=Reals,bounds=(0,124),initialize=0) m.x549 = Var(within=Reals,bounds=(0,110),initialize=0) m.x550 = Var(within=Reals,bounds=(0,124),initialize=0) m.x551 = Var(within=Reals,bounds=(0,124),initialize=0) m.x552 = Var(within=Reals,bounds=(0,124),initialize=0) m.x553 = Var(within=Reals,bounds=(0,124),initialize=0) m.x554 = Var(within=Reals,bounds=(0,179),initialize=0) m.x555 = Var(within=Reals,bounds=(0,179),initialize=0) m.x556 = Var(within=Reals,bounds=(0,176),initialize=0) m.x557 = Var(within=Reals,bounds=(0,105),initialize=0) m.x558 = Var(within=Reals,bounds=(0,177),initialize=0) m.x559 = Var(within=Reals,bounds=(0,131),initialize=0) m.x560 = Var(within=Reals,bounds=(0,136),initialize=0) m.x561 = Var(within=Reals,bounds=(0,135),initialize=0) m.x562 = Var(within=Reals,bounds=(0,150),initialize=0) m.x563 = Var(within=Reals,bounds=(0,170),initialize=0) m.x564 = Var(within=Reals,bounds=(0,34),initialize=0) m.x565 = Var(within=Reals,bounds=(0,105),initialize=0) m.x566 = Var(within=Reals,bounds=(0,170),initialize=0) m.x567 = Var(within=Reals,bounds=(0,20),initialize=0) m.x568 = Var(within=Reals,bounds=(0,131),initialize=0) m.x569 = Var(within=Reals,bounds=(0,126),initialize=0) m.x570 = Var(within=Reals,bounds=(0,135),initialize=0) m.x571 = Var(within=Reals,bounds=(0,170),initialize=0) m.x572 = Var(within=Reals,bounds=(0,150),initialize=0) m.x573 = Var(within=Reals,bounds=(0,139),initialize=0) m.x574 = Var(within=Reals,bounds=(0,34),initialize=0) m.x575 = Var(within=Reals,bounds=(0,139),initialize=0) m.x576 = Var(within=Reals,bounds=(0,126),initialize=0) m.x577 = Var(within=Reals,bounds=(0,135),initialize=0) m.x578 = Var(within=Reals,bounds=(0,139),initialize=0) m.x579 = Var(within=Reals,bounds=(0,111),initialize=0) m.x580 = Var(within=Reals,bounds=(0,111),initialize=0) m.x581 = Var(within=Reals,bounds=(0,111),initialize=0) m.x582 = Var(within=Reals,bounds=(0,105),initialize=0) m.x583 = Var(within=Reals,bounds=(0,111),initialize=0) m.x584 = Var(within=Reals,bounds=(0,110),initialize=0) m.x585 = Var(within=Reals,bounds=(0,20),initialize=0) m.x586 = Var(within=Reals,bounds=(0,111),initialize=0) m.x587 = Var(within=Reals,bounds=(0,111),initialize=0) m.x588 = Var(within=Reals,bounds=(0,111),initialize=0) m.x589 = Var(within=Reals,bounds=(0,111),initialize=0) m.x590 = Var(within=Reals,bounds=(0,111),initialize=0) m.x591 = Var(within=Reals,bounds=(0,80),initialize=0) m.x592 = Var(within=Reals,bounds=(0,80),initialize=0) m.x593 = Var(within=Reals,bounds=(0,34),initialize=0) m.x594 = Var(within=Reals,bounds=(0,20),initialize=0) m.x595 = Var(within=Reals,bounds=(0,80),initialize=0) m.x596 = Var(within=Reals,bounds=(0,80),initialize=0) m.x597
<filename>build-html.py #!/usr/bin/env python # -*- coding: utf-8 -*- import requests import os import zipfile import os.path import json import StringIO import re from pprint import pprint import shutil expected_vulnbox_store_json_fields = [ 'id', 'game', 'name', 'version', 'created', 'author', 'created', 'issues', 'checker-install-linux-packages', 'checker-install-run-commands', 'keywords', 'contacts', 'service-using-ports', 'skip' ] if not os.path.isdir("./html"): os.mkdir("./html") if not os.path.isdir("./html/css"): os.mkdir("./html/css") if not os.path.isdir("./html/images"): os.mkdir("./html/images") if not os.path.isdir("./html/teams"): os.mkdir("./html/teams") # shutil.copyfile('./src/index.html', './html/index.html') shutil.copyfile('./src/favicon.ico', './html/favicon.ico') shutil.copyfile('./src/css/index.css', './html/css/index.css') shutil.copyfile('./src/images/logo.png', './html/images/logo.png') shutil.copyfile('./vbs.py', './html/vbs.py') d = './services' services_path = [os.path.join(d, o) for o in os.listdir(d) if os.path.isdir(os.path.join(d,o))] services_path.sort() d = './teams' teams_path = [os.path.join(d, o) for o in os.listdir(d) if os.path.isdir(os.path.join(d,o))] teams_path.sort() class bcolors: HEADER = '\033[95m' OKBLUE = '\033[94m' OKGREEN = '\033[92m' WARNING = '\033[93m' FAIL = '\033[91m' ENDC = '\033[0m' BOLD = '\033[1m' UNDERLINE = '\033[4m' def print_success(msg): global bcolors print(bcolors.OKGREEN + "OK: " + msg + bcolors.ENDC) def print_header(msg): global bcolors print(bcolors.BOLD + msg + bcolors.ENDC) def log_warn(msg): global bcolors print(bcolors.WARNING + "Warning: " + msg + bcolors.ENDC) def log_err(msg): global bcolors print(bcolors.FAIL + "ERROR: " + msg + bcolors.ENDC) def sortById(inputStr): return inputStr[0] def downloadFromGithub(path, github_link): current_version = '' if os.path.isfile(path + "/vulnbox-store.json"): with open(path + "/vulnbox-store.json") as f: _json = json.load(f) if 'version' in _json: current_version = _json['version'] print("\tCompare versions from " + github_link + " and local version") github_link_content = github_link.replace("//github.com/", "//raw.githubusercontent.com/") if github_link_content[len(github_link_content)-1] != '/': github_link_content = github_link_content + '/' github_link_content = github_link_content + 'master/vulnbox-store.json' r = requests.get(github_link_content) if r.status_code != 200: return False if 'version' not in r.json(): log_err("Field 'version' not found in " + github_link_content) return False if r.json()['version'] != current_version: print("\tVersions do not match, try downloading...") github_link_zip = github_link if github_link_zip[len(github_link_zip)-1] != '/': github_link_zip = github_link_zip + '/' github_link_zip = github_link_zip + 'archive/master.zip' r = requests.get(github_link_zip, stream=True) z = zipfile.ZipFile(StringIO.StringIO(r.content)) fileslist = z.namelist() z.extractall(path) parent_dir = '' for fn in fileslist: fn2 = fn.split("/") parent_dir = fn2.pop(0) fn2 = "/".join(fn2) print(fn2) if os.path.isdir(path + '/' + fn): if not os.path.isdir(path + '/' + fn2): os.mkdir(path + '/' + fn2) elif os.path.isfile(path + '/' + fn): if fn2 == 'github-download.json': log_warn('Skipped file from repository "github-download.json"') continue shutil.copyfile(path + '/' + fn, path + '/' + fn2) shutil.rmtree(path + '/' + parent_dir) else: print_header("\tVersions match. Do nothing.") return True def prepareArchive(id, path): vulnbox_store_json_path = path + '/vulnbox-store.json' if not os.path.isfile(vulnbox_store_json_path): log_err(vulnbox_store_json_path + ' - not found') return False zip_file_name = 'html/' + id + '.zip' if os.path.isfile('html/' + id + '.zip'): os.remove(zip_file_name) zipf = zipfile.ZipFile(zip_file_name, 'w', zipfile.ZIP_DEFLATED) # write checker directory path_checker = path + '/checker' for root, dirs, files in os.walk(path_checker): root2 = root[len(path):] for file in files: zipf.write(os.path.join(root, file), os.path.join(root2, file)) # write service directory path_service = path + '/service' for root, dirs, files in os.walk(path_service): root2 = root[len(path):] for file in files: zipf.write(os.path.join(root, file), os.path.join(root2, file)) zipf.write(path + '/vulnbox-store.json', 'vulnbox-store.json') zipf.close() return True def checkServiceConf(id, path, vulnbox_store_json): service_conf_path = path + '/checker/service.conf' if not os.path.isfile(service_conf_path): log_err(service_conf_path + ' - not found') return False with open(service_conf_path) as f: content = f.readlines() content = [x.strip() for x in content] prefix = 'services.' + id + '.' expected_fields = [ prefix + 'enabled', prefix + 'name', prefix + 'script_path', prefix + 'script_wait_in_sec', prefix + 'time_sleep_between_run_scripts_in_sec', ] service_conf = {} bErr = False for lin in content: if lin.find('=') != -1: param_name = lin[0:lin.find('=')] param_value = lin[lin.find('=')+1:] param_name = param_name.strip() param_value = param_value.strip() if param_name not in expected_fields: log_err(service_conf_path + " - extra param_name '" + param_name + "'") bErr = True else: service_conf[param_name] = param_value for name in expected_fields: if name not in service_conf: log_err(service_conf_path + " - missing param '" + param_name + "'") bErr = True if bErr == True: return False if service_conf[prefix + "name"] != vulnbox_store_json["name"]: log_err(service_conf_path + " - don't match name with defined in vulnbox-store.json") checker_script_path = path + "/checker/" + service_conf[prefix + "script_path"] if not os.path.isfile(checker_script_path): log_err(checker_script_path + ' - not found file defined in ' + service_conf_path ) return False return True def checkPackageService(id, path): vulnbox_store_json_path = path + '/vulnbox-store.json' if not os.path.isfile(vulnbox_store_json_path): log_err(vulnbox_store_json_path + ' - not found') return False with open(vulnbox_store_json_path) as f: vulnbox_store_json = json.load(f) bErr = False for field in vulnbox_store_json: if field not in expected_vulnbox_store_json_fields: log_err(vulnbox_store_json_path + " - extra field '" + field + "'") bErr = True for field in expected_vulnbox_store_json_fields: if field not in vulnbox_store_json: log_err(vulnbox_store_json_path + " - not found field '" + field + "'") bErr = True if bErr: return False if not isinstance(vulnbox_store_json['service-using-ports'], list): log_err(vulnbox_store_json_path + " - field 'service-using-ports' expected list") return False if not isinstance(vulnbox_store_json['checker-install-linux-packages'], list): log_err(vulnbox_store_json_path + " - field 'checker-install-linux-packages' expected list") return False if not isinstance(vulnbox_store_json['checker-install-run-commands'], list): log_err(vulnbox_store_json_path + " - field 'checker-install-run-commands' expected list") return False if vulnbox_store_json['id'] != id: log_err(vulnbox_store_json_path + ' has id = ' + vulnbox_store_json['id'] + ' but expected ' + id) return False if not os.path.isdir(path + '/service'): log_err(path + '/service' + " - directory not found") return False if not os.path.isfile(path + '/service/Dockerfile'): log_err(path + '/service/Dockerfile' + " - file missing") return False if not os.path.isfile(path + '/service/docker-compose.yml'): log_err(path + '/service/docker-compose.yml' + " - file missing") return False if not os.path.isfile(path + '/service/README.md'): log_err(path + '/service/README.md' + " - file missing") return False if not os.path.isdir(path + '/checker'): log_err(path + '/checker' + " - directory not found") return False if not os.path.isfile(path + '/checker/service.conf'): log_err(path + '/checker/service.conf' + " - file missing") return False if not checkServiceConf(id, path, vulnbox_store_json): log_err(path + '/checker/service.conf - invalid params') return False return True teams_names = [] teams_json = [] for s in teams_path: filename = s.split("/") filename = filename[len(filename)-1] filename = filename.lower() if filename in teams_names: log_err(filename + ' - duplicates') continue teams_names.append(filename) pattern = re.compile("^([a-z0-9_]+)$") if not pattern.match(filename): log_err('teams/' + filename + ' - allow contains a lower english characters, numbers and _') continue print_header("\n* teams/" + filename + " scanning...") info_json_path = s + '/info.json' if not os.path.isfile(info_json_path): log_err(info_json_path + ' - not found') continue with open(info_json_path) as f: info_json = json.load(f) if 'id' not in info_json: log_err(info_json_path + " - not found field 'id'") continue if info_json['id'] != filename: log_err(info_json_path + " - 'id' must contains " + filename) continue if 'name' not in info_json: log_err(info_json_path + " - not found field 'name'") continue if 'logo' not in info_json: log_err(info_json_path + " - not found field 'logo'") continue logo_img = info_json['logo'] if logo_img.split(".")[0] != filename: log_err(info_json_path + " - base file name of logo must be " + filename) continue logo_img = s + '/' + info_json['logo'] if not os.path.isfile(logo_img): log_err(info_json_path + ' - not found file ' + logo_img) continue shutil.copyfile(logo_img, './html/teams/' + info_json['logo']) teams_json.append(info_json) print_success ("Done") with open('html/teams.json', 'w') as outfile: json.dump(teams_json, outfile, ensure_ascii=False, indent=4) services_names = [] services_json = [] for s in services_path: filename = s.split("/") filename = filename[len(filename)-1] filename = filename.lower() if filename in services_names: log_err(filename + ' - duplicates') continue services_names.append(filename) pattern = re.compile("^([a-z0-9_]+)$") if not pattern.match(filename): log_err('services/' + filename + ' - allow contains a lower english characters, numbers and _') continue zip_file_name = 'html/' + filename + '.zip' if os.path.isfile(zip_file_name): os.remove(zip_file_name) print_header("\n* services/" + filename + " scanning...") """ Try downloads form github """ github_download_json_path = s + '/github-download.json' if os.path.isfile(github_download_json_path): with open(github_download_json_path) as f: github_download_json = json.load(f) if "github" not in github_download_json: log_err(github_download_json_path + " - not found field 'github'") continue github_link = github_download_json['github'] if downloadFromGithub(s, github_link) == False: log_err(github_download_json_path + " could not download from github") continue """ check the struct of folder """ if not checkPackageService(filename, s): log_err('./services/' + filename + ' - invalid') continue vulnbox_store_json_path = s + '/vulnbox-store.json' if not prepareArchive(filename, s): log_err('./services/' + filename + ' - could not prepare zip archive') continue with open(vulnbox_store_json_path) as f: vulnbox_store_json = json.load(f) services_json.append(vulnbox_store_json) print_success ("Done") with open('html/services.json', 'w') as outfile: json.dump(services_json, outfile, ensure_ascii=False, indent=4) with open('src/index.html') as f: index_html = f.readlines() index_html = "".join(index_html) start_i = index_html.find("<template-service>") + len("<template-service>") end_i = index_html.find("</template-service>") template_service = index_html[start_i:end_i] _prev = index_html[:start_i] _next = index_html[end_i:] rows = [] for srvc in services_json: row = template_service for name in expected_vulnbox_store_json_fields: if isinstance(srvc[name], basestring): row = row.replace('{' + name +
<reponame>rajkubp020/helloword # # # lmpsdata.py # # For reading, writing and manipulating lammps data files # For calculation of certain properities using lammps data files # For creating VMD input text files using lammps data files # All x,y,z calculations assume the information includes image flags class Lmpsdata: def __init__(self,file,atomtype): """initiates lammps data structures""" self.atomtype=atomtype self.keywords=[] self.atoms=[] self.angles=[] self.bonds=[] self.dihedrals=[] self.dipoles=[] self.impropers=[] self.masses=[] self.shapes=[] self.velocities=[] self.anglecoef=[] self.bondcoef=[] self.dihedralcoef=[] self.impropercoef=[] self.paircoef=[] self.read(file) def read(self,file): """Reads in lammps data file Skips the first line of the file (Comment line) First reads the header portion of the file (sectflag=1) blank lines are skipped header keywords delineate an assignment if no header keyword is found, body portion begins Second reads the body portion of the file (sectflag=2) first line of a section has only a keyword next line is skipped remaining lines contain values a blank line signifies the end of that section if no value is listed on a line than a keyword must be used File is read until the end The keywords read in are stored in self.keywords""" if file=='': print 'no file is given. Will have to build keywords and class structures manually' return sectflag=1 f=open(file,'r') f.readline() for line in f: row=line.split() if sectflag==1: if len(row)==0: #skip line the line is blank pass elif len(row)==1: # Set Sectflag to 2, assume line is a keyword sectflag=2 checkkey=1 #ensures keyword will be checked in body portion keyword=row elif len(row)==2: if row[1]=='atoms': self.atomnum=row[0] self.keywords.append('atoms') elif row[1]=='bonds': self.bondnum=row[0] self.keywords.append('bonds') elif row[1]=='angles': self.anglenum=row[0] self.keywords.append('angles') elif row[1]=='dihedrals': self.dihedralnum=row[0] self.keywords.append('dihedrals') elif row[1]=='impropers': self.impropernum=row[0] self.keywords.append('impropers') else: # Set Sectflag to 2, assume line is a keyword sectflag=2 checkkey=1 #ensures keyword will be checked in body portion keyword=row elif len(row)==3: if row[1]=='atom' and row[2]=='types': self.atomtypenum=row[0] self.keywords.append('atom types') elif row[1]=='bond' and row[2]=='types': self.bondtypenum=row[0] self.keywords.append('bond types') elif row[1]=='angle' and row[2]=='types': self.angletypenum=row[0] self.keywords.append('angle types') elif row[1]=='dihedral' and row[2]=='types': self.dihedraltypenum=row[0] self.keywords.append('dihedral types') elif row[1]=='improper' and row[2]=='types': self.impropertypenum=row[0] self.keywords.append('improper types') else: # Set Sectflag to 2, assume line is a keyword sectflag=2 checkkey=1 #ensures keyword will be checked in body portion keyword=row elif len(row)==4: if row[2]=='xlo' and row[3]=='xhi': self.xdimension=[row[0], row[1]] self.keywords.append('xlo xhi') elif row[2]=='ylo' and row[3]=='yhi': self.ydimension=[row[0], row[1]] self.keywords.append('ylo yhi') elif row[2]=='zlo' and row[3]=='zhi': self.zdimension=[row[0], row[1]] self.keywords.append('zlo zhi') else: # Set Sectflag to 2, assume line is a keyword sectflag=2 checkkey=1 #ensures keyword will be checked in body portion keyword=row elif len(row)==5: if row[1]=='extra' and row[2]=='bond' and row[3]=='per' and row[4]=='atom': self.extrabonds=row[0] self.keywords.append('extra bond per atom') else: # Set Sectflag to 2, assume line is a keyword sectflag=2 checkkey=1 #ensures keyword will be checked in body portion keyword=row elif len(row)==6: if row[3]=='xy' and row[4]=='xz' and row[5]=='yz': self.tilt=[row[0], row[1], row[2]] self.keywords.append('xy xz yz') else: # Set Sectflag to 2, assume line is a keyword sectflag=2 checkkey=1 #ensures keyword will be checked in body portion keyword=row else: # set sectflag to 2, assume line is a keyword sectflag=2 checkkey=1 #ensures keyword will be checked in body portion keyword=row elif sectflag==2: if checkkey==1: if len(keyword)==1: if keyword[0]=='Atoms' or keyword[0]=='Velocities' or keyword[0]=='Masses' or\ keyword[0]=='Shapes' or keyword[0]=='Dipoles' or keyword[0]=='Bonds' or\ keyword[0]=='Angles' or keyword[0]=='Dihedrals' or keyword[0]=='Impropers': bodyflag=1 blanknum=0 self.keywords.append(keyword[0]) checkkey=0 else: bodyflag=0 checkkey=0 elif len(keyword)==2: if row[1]=='Coeffs' and (row[0]=='Pair' or row[0]=='Bond' or row[0]=='Angle' or\ row[0]=='Dihedral' or row[0]=='Improper'):#class 2 force field keywords not included bodyflag=1 blanknum=0 self.keywords.append('{0} {1}'.format(keyword[0],keyword[1])) checkkey=0 else: bodyflag=0 checkkey=0 else: #egnore line and set bodyflag=0 bodyflag=0 checkkey=0 if bodyflag==0: #bodyflag 0 means no body keyword has been found if len(row)==1: if row[0]=='Atoms' or row[0]=='Velocities' or row[0]=='Masses' or\ row[0]=='Shapes' or row[0]=='Dipoles' or row[0]=='Bonds' or\ row[0]=='Angles' or row[0]=='Dihedrals' or row[0]=='Impropers': bodyflag=1 blanknum=0 keyword=row self.keywords.append(keyword[0]) else: #egnore line pass elif len(row)==2: if row[1]=='Coeffs' and (row[0]=='Pair' or row[0]=='Bond' or row[0]=='Angle' or\ row[0]=='Dihedral' or row[0]=='Improper'):#class 2 force field keywords not included bodyflag=1 blanknum=0 keyword=row self.keywords.append('{0} {1}'.format(keyword[0],keyword[1])) else: #egnore line pass else: #egnore line pass elif bodyflag==1: #currently assumes 1 or more blank lines are between body data keywords if len(row)==0: blanknum+=1 if blanknum>1: bodyflag=0 elif len(keyword)==1: if keyword[0]=='Atoms': try: int(row[0]) except ValueError: keyword=row checkkey=1 else: self.atoms.append(row) elif keyword[0]=='Velocities': try: int(row[0]) except ValueError: keyword=row checkkey=1 else: self.velocities.append(row) elif keyword[0]=='Masses': try: int(row[0]) except ValueError: keyword=row checkkey=1 else: self.masses.append(row) elif keyword[0]=='Shapes': try: int(row[0]) except ValueError: keyword=row checkkey=1 else: self.shapes.append(row) elif keyword[0]=='Dipoles': try: int(row[0]) except ValueError: keyword=row checkkey=1 else: self.dipoles.append(row) elif keyword[0]=='Bonds': try: int(row[0]) except ValueError: keyword=row checkkey=1 else: self.bonds.append(row) elif keyword[0]=='Angles': try: int(row[0]) except ValueError: keyword=row checkkey=1 else: self.angles.append(row) elif keyword[0]=='Dihedrals': try: int(row[0]) except ValueError: keyword=row checkkey=1 else: self.dihedrals.append(row) elif keyword[0]=='Impropers': try: int(row[0]) except ValueError: keyword=row checkkey=1 else: self.impropers.append(row) else: #egnore line and change bodyflag to 0 bodyflag=0 elif len(keyword)==2: if keyword[0]=='Pair' and keyword[1]=='Coeffs': try: int(row[0]) except ValueError: keyword=row checkkey=1 else: self.paircoef.append(row) elif keyword[0]=='Bond' and keyword[1]=='Coeffs': try: int(row[0]) except ValueError: keyword=row checkkey=1 else: self.bondcoef.append(row) elif keyword[0]=='Angle' and keyword[1]=='Coeffs': try: int(row[0]) except ValueError: keyword=row checkkey=1 else: self.anglecoef.append(row) elif keyword[0]=='Dihedral' and keyword[1]=='Coeffs': try: int(row[0]) except ValueError: keyword=row checkkey=1 else: self.dihedralcoef.append(row) elif keyword[0]=='Improper' and keyword[1]=='Coeffs': try: int(row[0]) except ValueError: keyword=row checkkey=1 else: self.impropercoef.append(row) else: #egnore line and change bodyflag to 0 bodyflag=0 else: #egnore line and change bodyflag to 0 bodyflag=0 f.close() def write(self,file,modflag): """Write lammps data files using the lammps keywords and lammpsdata structures writes first line of the file as a Comment line if no modifications to any of the lammpsdata structures (modflag=0) Use Keywords to write lammpsdata structures directly if modifications to any of the lammpsdata structures (modflag=1) Key Lammpsdata structures like atom numbers, coeficient numbers need to be modified to match the other modified lammpsdata structures This section will still use the keywords to write lammpsdata structures. For all modflags, the code will write data to the file until all of the keyword's data structures have been finished writing. The keywords are stored in self.keywords""" f=open(file,'w') f.write('polymer data file\n') for row in self.keywords: if row=='atoms': if modflag==0: f.write('{0} {1}\n'.format(self.atomnum,row)) elif modflag==1: f.write('{0} {1}\n'.format(len(self.atoms),row)) elif row=='bonds': if modflag==0: f.write('{0} {1}\n'.format(self.bondnum,row)) elif modflag==1: f.write('{0} {1}\n'.format(len(self.bonds),row)) elif row=='angles': if modflag==0: f.write('{0} {1}\n'.format(self.anglenum,row)) elif modflag==1: f.write('{0} {1}\n'.format(len(self.angles),row)) elif row=='dihedrals': if modflag==0: f.write('{0} {1}\n'.format(self.dihedralnum,row)) elif modflag==1: f.write('{0} {1}\n'.format(len(self.dihedrals),row)) elif row=='impropers': if modflag==0: f.write('{0} {1}\n'.format(self.impropernum,row)) elif modflag==1: f.write('{0} {1}\n'.format(len(self.impropers),row)) elif row=='atom types': if modflag==0: f.write('{0} {1}\n'.format(self.atomtypenum,row)) elif modflag==1: f.write('{0} {1}\n'.format(len(self.masses),row)) elif row=='bond types': if modflag==0: f.write('{0} {1}\n'.format(self.bondtypenum,row)) elif modflag==1: f.write('{0} {1}\n'.format(len(self.bondcoef),row)) elif row=='angle types': if modflag==0: f.write('{0} {1}\n'.format(self.angletypenum,row)) elif modflag==1: f.write('{0} {1}\n'.format(len(self.anglecoef),row)) elif row=='dihedral types': if modflag==0: f.write('{0} {1}\n'.format(self.dihedraltypenum,row)) elif modflag==1: f.write('{0} {1}\n'.format(len(self.dihedralcoef),row)) elif row=='improper types': if modflag==0: f.write('{0} {1}\n'.format(self.impropertypenum,row)) elif modflag==1: f.write('{0} {1}\n'.format(len(self.impropercoef),row)) elif row=='xlo xhi': f.write('{0} {1} {2}\n'.format(self.xdimension[0],self.xdimension[1],row)) elif row=='ylo yhi': f.write('{0} {1} {2}\n'.format(self.ydimension[0],self.ydimension[1],row)) elif row=='zlo zhi': f.write('{0} {1} {2}\n'.format(self.zdimension[0],self.zdimension[1],row)) elif row=='extra bond per atom': f.write('{0} {1}\n'.format(self.extrabonds,row)) elif row=='xy xz yz': f.write('{0} {1} {2} {3}\n'.format(self.tilt[0],self.tilt[1],self.tilt[2],row)) elif row=='Atoms': f.write('\n{0}'.format(row)) #new line between header and body portion or two body keywords f.write('\n') #new line between body keyword and body data for line in self.atoms: f.write('\n') #creates a new line for adding body data for item in line: f.write(' {0}'.format(item)) #adds in each peice of body data with space imbetween f.write('\n') #allows space to be added between the end of body data and a new keyword elif row=='Velocities': f.write('\n{0}'.format(row)) #new line between header and body portion or two body keywords f.write('\n') #new line between body keyword and body data for line in self.velocities: f.write('\n') #creates a new line for adding body data for item in line: f.write(' {0}'.format(item)) #adds in each peice of body data with space imbetween f.write('\n') #allows space to be added between the end of body data and a new keyword elif row=='Masses': f.write('\n{0}'.format(row)) #new line between header and body portion or two body keywords f.write('\n') #new line between body keyword and body data for line in self.masses: f.write('\n') #creates a new line for adding body data for item in line: f.write(' {0}'.format(item)) #adds in each peice of body data with space imbetween f.write('\n') #allows space to be added between the end of body data and a new keyword elif row=='Shapes': f.write('\n{0}'.format(row)) #new line between header and body portion or two body keywords f.write('\n') #new line between body keyword and body data for line in self.shapes: f.write('\n') #creates a new line for adding body data for item in line: f.write(' {0}'.format(item)) #adds in each peice of body data with space imbetween f.write('\n') #allows space to be added between the end of body data and a new keyword elif row=='Dipoles': f.write('\n{0}'.format(row)) #new line between header and body portion or two body keywords f.write('\n') #new line between body keyword and body data for line in self.dipoles: f.write('\n') #creates a new line for adding body data for item in line: f.write(' {0}'.format(item)) #adds in each peice of body data with space imbetween f.write('\n') #allows space to be added between the end of body data and a new keyword elif row=='Bonds': f.write('\n{0}'.format(row)) #new line between header and body portion or two body keywords f.write('\n') #new line between body keyword and body data for line in self.bonds: f.write('\n') #creates a new line for adding body data for item in line: f.write(' {0}'.format(item)) #adds in each peice of body data with space imbetween f.write('\n') #allows space to be added between the end of body data and a new keyword elif row=='Angles': f.write('\n{0}'.format(row)) #new line between header and body portion or two body keywords f.write('\n') #new line between body keyword and body data for line in self.angles: f.write('\n') #creates a new line for adding body data for item in line: f.write(' {0}'.format(item)) #adds in each peice of body data with space imbetween f.write('\n') #allows space to be added between the end of body data and a new keyword elif row=='Dihedrals': f.write('\n{0}'.format(row)) #new line between header and body portion or two body keywords f.write('\n') #new line between body keyword and body data for line in self.dihedrals: f.write('\n') #creates a new line for adding body data for item in line: f.write(' {0}'.format(item)) #adds in each peice of body data with space imbetween elif row=='Impropers': f.write('\n{0}'.format(row)) #new line between header and body portion or two body keywords f.write('\n') #new line between body keyword and body data for line in self.impropers: f.write('\n') #creates a new line for adding body data for item in line: f.write(' {0}'.format(item)) #adds in each peice of body data with space imbetween f.write('\n') #allows space to be added between the end of body data and a new keyword elif row=='Pair Coeffs': f.write('\n{0}'.format(row)) #new line between header and body portion or two body keywords f.write('\n') #new line between body keyword and body data for line in self.paircoef: f.write('\n') #creates a new line for adding body data for item in line: f.write(' {0}'.format(item)) #adds in each peice of body
<filename>blaze/compute/tests/test_sql_compute.py<gh_stars>0 from __future__ import absolute_import, division, print_function import pytest sa = pytest.importorskip('sqlalchemy') import itertools import sqlite3 from distutils.version import LooseVersion import datashape from odo import into, discover import numpy as np import pandas as pd import pandas.util.testing as tm from pandas import DataFrame from toolz import unique from odo import odo, drop, resource from blaze import data as bz_data from blaze.compatibility import xfail from blaze.compute.sql import compute, select, lower_column, compute_up from blaze.expr import ( by, coalesce, cos, count, datetime as bz_datetime, exp, floor, greatest, join, least, mean, merge, nunique, sin, sum, summary, symbol, transform, ) from blaze.utils import tmpfile, example, normalize, literal_compile def computefull(t, s): return select(compute(t, s, return_type='native')) names = ('tbl%d' % i for i in itertools.count()) @pytest.fixture(scope='module') def city_data(): # make the engine engine = sa.create_engine('sqlite:///:memory:') metadata = sa.MetaData(engine) # name table name = sa.Table('name', metadata, sa.Column('id', sa.Integer), sa.Column('name', sa.String), ) name.create() # city table city = sa.Table('city', metadata, sa.Column('id', sa.Integer), sa.Column('city', sa.String), sa.Column('country', sa.String), ) city.create() s = symbol('s', discover(engine)) return {'engine': engine, 'metadata': metadata, 'name': name, 'city': city, 's': s} t = symbol('t', 'var * {name: string, amount: int, id: int}') t_str_cat = symbol('t', """var * {name: string, amount: int, id: int, comment: string, product: string}""") nt = symbol('t', 'var * {name: ?string, amount: float64, id: int}') metadata = sa.MetaData() s = sa.Table('accounts', metadata, sa.Column('name', sa.String), sa.Column('amount', sa.Integer), sa.Column('id', sa.Integer, primary_key=True)) s_str_cat = sa.Table('accounts2', metadata, sa.Column('name', sa.String), sa.Column('amount', sa.Integer), sa.Column('id', sa.Integer, primary_key=True), sa.Column('comment', sa.String), sa.Column('product', sa.String)) tdate = symbol('t', """var * { name: string, amount: int, id: int, occurred_on: datetime }""") ns = sa.Table('nullaccounts', metadata, sa.Column('name', sa.String, nullable=True), sa.Column('amount', sa.REAL), sa.Column('id', sa.Integer, primary_key=True), ) sdate = sa.Table('accdate', metadata, sa.Column('name', sa.String), sa.Column('amount', sa.Integer), sa.Column('id', sa.Integer, primary_key=True), sa.Column('occurred_on', sa.DateTime)) tbig = symbol('tbig', 'var * {name: string, sex: string[1], amount: int, id: int}') sbig = sa.Table('accountsbig', metadata, sa.Column('name', sa.String), sa.Column('sex', sa.String), sa.Column('amount', sa.Integer), sa.Column('id', sa.Integer, primary_key=True)) def test_table(): result = str(computefull(t, s)) expected = """ SELECT accounts.name, accounts.amount, accounts.id FROM accounts """.strip() assert normalize(result) == normalize(expected) def test_projection(): print(compute(t[['name', 'amount']], s, return_type='native')) assert str(compute(t[['name', 'amount']], s, return_type='native')) == \ str(sa.select([s.c.name, s.c.amount])) def test_eq(): assert str(compute(t['amount'] == 100, s, post_compute=False, return_type='native')) == \ str(s.c.amount == 100) def test_eq_unicode(): assert str(compute(t['name'] == u'Alice', s, post_compute=False, return_type='native')) == \ str(s.c.name == u'Alice') def test_selection(): assert str(compute(t[t['amount'] == 0], s, return_type='native')) == \ str(sa.select([s]).where(s.c.amount == 0)) assert str(compute(t[t['amount'] > 150], s, return_type='native')) == \ str(sa.select([s]).where(s.c.amount > 150)) def test_arithmetic(): assert str(compute(t['amount'] + t['id'], s, return_type='native')) == \ str(sa.select([s.c.amount + s.c.id])) assert str(compute(t['amount'] + t['id'], s, post_compute=False, return_type='native')) == \ str(s.c.amount + s.c.id) assert str(compute(t['amount'] * t['id'], s, post_compute=False, return_type='native')) == \ str(s.c.amount * s.c.id) assert str(compute(t['amount'] * 2, s, post_compute=False, return_type='native')) == \ str(s.c.amount * 2) assert str(compute(2 * t['amount'], s, post_compute=False, return_type='native')) == \ str(2 * s.c.amount) assert (str(compute(~(t['amount'] > 10), s, post_compute=False, return_type='native')) == "accounts.amount <= :amount_1") assert str(compute(t['amount'] + t['id'] * 2, s, return_type='native')) == \ str(sa.select([s.c.amount + s.c.id * 2])) def test_join(): metadata = sa.MetaData() lhs = sa.Table('amounts', metadata, sa.Column('name', sa.String), sa.Column('amount', sa.Integer)) rhs = sa.Table('ids', metadata, sa.Column('name', sa.String), sa.Column('id', sa.Integer)) expected = lhs.join(rhs, lhs.c.name == rhs.c.name) expected = select(list(unique(expected.columns, key=lambda c: c.name))).select_from(expected) L = symbol('L', 'var * {name: string, amount: int}') R = symbol('R', 'var * {name: string, id: int}') joined = join(L, R, 'name') result = compute(joined, {L: lhs, R: rhs}, return_type='native') assert normalize(str(result)) == normalize(""" SELECT amounts.name, amounts.amount, ids.id FROM amounts JOIN ids ON amounts.name = ids.name""") assert str(select(result)) == str(select(expected)) # Schemas match assert list(result.c.keys()) == list(joined.fields) # test sort on join result = compute(joined.sort('amount'), {L: lhs, R: rhs}, return_type='native') assert normalize(str(result)) == normalize(""" select anon_1.name, anon_1.amount, anon_1.id from (select amounts.name as name, amounts.amount as amount, ids.id as id from amounts join ids on amounts.name = ids.name) as anon_1 order by anon_1.amount asc""") def test_clean_complex_join(): metadata = sa.MetaData() lhs = sa.Table('amounts', metadata, sa.Column('name', sa.String), sa.Column('amount', sa.Integer)) rhs = sa.Table('ids', metadata, sa.Column('name', sa.String), sa.Column('id', sa.Integer)) L = symbol('L', 'var * {name: string, amount: int}') R = symbol('R', 'var * {name: string, id: int}') joined = join(L[L.amount > 0], R, 'name') result = compute(joined, {L: lhs, R: rhs}, return_type='native') expected1 = """ SELECT amounts.name, amounts.amount, ids.id FROM amounts JOIN ids ON amounts.name = ids.name WHERE amounts.amount > :amount_1""" expected2 = """ SELECT alias.name, alias.amount, ids.id FROM (SELECT amounts.name AS name, amounts.amount AS amount FROM amounts WHERE amounts.amount > :amount_1) AS alias JOIN ids ON alias.name = ids.name""" assert (normalize(str(result)) == normalize(expected1) or normalize(str(result)) == normalize(expected2)) def test_multi_column_join(): metadata = sa.MetaData() lhs = sa.Table('aaa', metadata, sa.Column('x', sa.Integer), sa.Column('y', sa.Integer), sa.Column('z', sa.Integer)) rhs = sa.Table('bbb', metadata, sa.Column('w', sa.Integer), sa.Column('x', sa.Integer), sa.Column('y', sa.Integer)) L = symbol('L', 'var * {x: int, y: int, z: int}') R = symbol('R', 'var * {w: int, x: int, y: int}') joined = join(L, R, ['x', 'y']) expected = lhs.join(rhs, (lhs.c.x == rhs.c.x) & (lhs.c.y == rhs.c.y)) expected = select(list(unique(expected.columns, key=lambda c: c.name))).select_from(expected) result = compute(joined, {L: lhs, R: rhs}, return_type='native') assert str(result) == str(expected) assert str(select(result)) == str(select(expected)) # Schemas match print(result.c.keys()) print(joined.fields) assert list(result.c.keys()) == list(joined.fields) def test_unary_op(): assert str(compute(exp(t['amount']), s, post_compute=False, return_type='native')) == \ str(sa.func.exp(s.c.amount)) assert str(compute(-t['amount'], s, post_compute=False, return_type='native')) == \ str(-s.c.amount) @pytest.mark.parametrize('unbiased', [True, False]) def test_std(unbiased): assert str(compute(t.amount.std(unbiased=unbiased), s, post_compute=False, return_type='native')) == \ str(getattr(sa.func, 'stddev_%s' % ('samp' if unbiased else 'pop'))(s.c.amount)) @pytest.mark.parametrize('unbiased', [True, False]) def test_var(unbiased): assert str(compute(t.amount.var(unbiased=unbiased), s, post_compute=False, return_type='native')) == \ str(getattr(sa.func, 'var_%s' % ('samp' if unbiased else 'pop'))(s.c.amount)) def test_reductions(): assert str(compute(sum(t['amount']), s, post_compute=False, return_type='native')) == \ str(sa.sql.functions.sum(s.c.amount)) assert str(compute(mean(t['amount']), s, post_compute=False, return_type='native')) == \ str(sa.sql.func.avg(s.c.amount)) assert str(compute(count(t['amount']), s, post_compute=False, return_type='native')) == \ str(sa.sql.func.count(s.c.amount)) assert 'amount_sum' == compute( sum(t['amount']), s, post_compute=False, return_type='native').name def test_reduction_with_invalid_axis_argument(): with pytest.raises(ValueError): compute(t.amount.mean(axis=1)) with pytest.raises(ValueError): compute(t.count(axis=1)) with pytest.raises(ValueError): compute(t[['amount', 'id']].count(axis=1)) def test_nelements(): rhs = str(compute(t.count(), s, return_type='native')) assert str(compute(t.nelements(), s, return_type='native')) == rhs assert str(compute(t.nelements(axis=None), s, return_type='native')) == rhs assert str(compute(t.nelements(axis=0), s, return_type='native')) == rhs assert str(compute(t.nelements(axis=(0,)), s, return_type='native')) == rhs @pytest.mark.xfail(raises=Exception, reason="We don't support axis=1 for" " Record datashapes") def test_nelements_axis_1(): assert compute(t.nelements(axis=1), s, return_type='native') == len(s.columns) def test_count_on_table(): result = compute(t.count(), s, return_type='native') assert normalize(str(result)) == normalize(""" SELECT count(accounts.id) as count_1 FROM accounts""") result = compute(t[t.amount > 0].count(), s, return_type='native') assert ( normalize(str(result)) == normalize(""" SELECT count(accounts.id) as t_count FROM accounts WHERE accounts.amount > :amount_1""") or normalize(str(result)) == normalize(""" SELECT count(alias.id) as t_count FROM (SELECT accounts.name AS name, accounts.amount AS amount, accounts.id AS id FROM accounts WHERE accounts.amount > :amount_1) as alias_2""")) def test_distinct(): result = str(compute(t['amount'].distinct(), s, post_compute=False, return_type='native')) assert 'distinct' in result.lower() assert 'amount' in result.lower() print(result) assert result == str(sa.distinct(s.c.amount)) def test_distinct_multiple_columns(): assert normalize(str(compute(t.distinct(), s, return_type='native'))) == normalize(""" SELECT DISTINCT accounts.name, accounts.amount, accounts.id FROM accounts""") def test_nunique(): result = str(computefull(nunique(t['amount']), s)) print(result) assert 'distinct' in result.lower() assert 'count' in result.lower() assert 'amount' in result.lower() def test_nunique_table(): result = normalize(str(computefull(t.nunique(), s))) expected = normalize("""SELECT count(alias.id) AS tbl_row_count FROM (SELECT DISTINCT accounts.name AS name, accounts.amount AS amount, accounts.id AS id FROM accounts) as alias""") assert result == expected @xfail(reason="Fails because SQLAlchemy doesn't seem to know binary reductions") def test_binary_reductions(): assert str(compute(any(t['amount'] > 150), s, return_type='native')) == \ str(sa.sql.functions.any(s.c.amount > 150)) def test_by(): expr = by(t['name'], total=t['amount'].sum()) result = compute(expr, s, return_type='native') expected = sa.select([s.c.name, sa.sql.functions.sum(s.c.amount).label('total')] ).group_by(s.c.name) assert str(result) == str(expected) def test_by_head(): t2 = t.head(100) expr = by(t2['name'], total=t2['amount'].sum()) result = compute(expr, s, return_type='native') expected = """ SELECT accounts.name, sum(accounts.amount) as total FROM accounts GROUP by accounts.name LIMIT :param_1""" assert normalize(str(result)) == normalize(str(expected)) def test_by_two(): expr = by(tbig[['name', 'sex']], total=tbig['amount'].sum()) result = compute(expr, sbig, return_type='native') expected = (sa.select([sbig.c.name, sbig.c.sex, sa.sql.functions.sum(sbig.c.amount).label('total')]) .group_by(sbig.c.name, sbig.c.sex)) assert str(result) == str(expected) def test_by_three(): result = compute(by(tbig[['name', 'sex']], total=(tbig['id'] + tbig['amount']).sum()), sbig, return_type='native') assert normalize(str(result)) == normalize(""" SELECT accountsbig.name, accountsbig.sex, sum(accountsbig.id + accountsbig.amount) AS total FROM accountsbig GROUP BY accountsbig.name, accountsbig.sex """) def test_by_summary_clean(): expr = by(t.name, min=t.amount.min(), max=t.amount.max()) result = compute(expr, s, return_type='native') expected = """ SELECT accounts.name, max(accounts.amount) AS max, min(accounts.amount) AS min FROM accounts GROUP BY accounts.name """ assert normalize(str(result)) == normalize(expected) def test_by_summary_single_column(): expr = by(t.name, n=t.name.count(), biggest=t.name.max()) result = compute(expr, s, return_type='native') expected = """ SELECT accounts.name, max(accounts.name) AS biggest, count(accounts.name) AS n FROM accounts GROUP BY accounts.name """ assert normalize(str(result)) == normalize(expected) def test_join_projection(): metadata = sa.MetaData() lhs = sa.Table('amounts', metadata, sa.Column('name', sa.String), sa.Column('amount', sa.Integer)) rhs = sa.Table('ids', metadata, sa.Column('name', sa.String), sa.Column('id', sa.Integer)) L = symbol('L', 'var * {name: string, amount:
decomp.similarity_compress( E, max_bond, method=method, renorm=renorm) # absorb them into the tensors to compress this bond bond, = bix ta.gate_(Cr, bond) tb.gate_(Cl.T, bond) if absorb != 'both': tensor_canonize_bond(ta, tb, absorb=absorb) def _compress_between_local_fit( self, tid1, tid2, max_bond, cutoff=0.0, absorb='both', method='als', select_local_distance=1, select_local_opts=None, include=None, exclude=None, **fit_opts ): if cutoff != 0.0: import warnings warnings.warn("Non-zero cutoff ignored by local fit compress.") select_local_opts = ensure_dict(select_local_opts) tn_loc_target = self._select_local_tids( (tid1, tid2), max_distance=select_local_distance, virtual=False, include=include, exclude=exclude, **select_local_opts) tn_loc_compress = tn_loc_target.copy() tn_loc_compress._compress_between_tids( tid1, tid2, max_bond=max_bond, cutoff=0.0) tn_loc_opt = tn_loc_compress.fit_( tn_loc_target, method=method, **fit_opts) for tid, t in tn_loc_opt.tensor_map.items(): self.tensor_map[tid].modify(data=t.data) if absorb != 'both': self._canonize_between_tids(tid1, tid2, absorb=absorb) def _compress_between_tids( self, tid1, tid2, max_bond=None, cutoff=1e-10, absorb='both', canonize_distance=None, canonize_opts=None, canonize_after_distance=None, canonize_after_opts=None, mode='basic', equalize_norms=False, squeeze=True, callback=None, **compress_opts ): ta = self.tensor_map[tid1] tb = self.tensor_map[tid2] tensor_fuse_squeeze(ta, tb, squeeze=squeeze) lix, bix, rix = group_inds(ta, tb) if len(bix) == 0: return if (max_bond is not None) and (cutoff == 0.0): lsize = prod(map(self.ind_size, lix)) rsize = prod(map(self.ind_size, rix)) if (lsize <= max_bond) or (rsize <= max_bond): # special case - fixing any orthonormal basis for the left or # right tensor (whichever has smallest outer dimensions) will # produce the required compression without any SVD compress_absorb = 'right' if lsize <= rsize else 'left' tensor_canonize_bond(ta, tb, absorb=compress_absorb) if absorb != compress_absorb: tensor_canonize_bond(ta, tb, absorb=absorb) if equalize_norms: self.strip_exponent(tid1, equalize_norms) self.strip_exponent(tid2, equalize_norms) return if canonize_distance: # gauge around pair by absorbing QR factors along bonds canonize_opts = ensure_dict(canonize_opts) canonize_opts.setdefault('equalize_norms', equalize_norms) self._canonize_around_tids( (tid1, tid2), max_distance=canonize_distance, **canonize_opts) compress_opts['max_bond'] = max_bond compress_opts['cutoff'] = cutoff compress_opts['absorb'] = absorb if mode == 'basic': tensor_compress_bond(ta, tb, **compress_opts) elif mode == 'full-bond': self._compress_between_full_bond_tids(tid1, tid2, **compress_opts) elif mode == 'local-fit': self._compress_between_local_fit(tid1, tid2, **compress_opts) else: # assume callable mode(self, tid1, tid2, **compress_opts) if equalize_norms: self.strip_exponent(tid1, equalize_norms) self.strip_exponent(tid2, equalize_norms) if canonize_after_distance: # 'undo' the inwards canonization canonize_after_opts = ensure_dict(canonize_after_opts) self._gauge_local_tids( tids=(tid1, tid2), max_distance=canonize_after_distance, **canonize_after_opts ) if callback is not None: callback(self, (tid1, tid2)) def compress_between( self, tags1, tags2, max_bond=None, cutoff=1e-10, absorb='both', canonize_distance=0, canonize_opts=None, equalize_norms=False, **compress_opts, ): r"""Compress the bond between the two single tensors in this network specified by ``tags1`` and ``tags2`` using :func:`~quimb.tensor.tensor_core.tensor_compress_bond`:: | | | | | | | | ==●====●====●====●== ==●====●====●====●== /| /| /| /| /| /| /| /| | | | | | | | | ==●====1====2====●== ==> ==●====L----R====●== /| /| /| /| /| /| /| /| | | | | | | | | ==●====●====●====●== ==●====●====●====●== /| /| /| /| /| /| /| /| This is an inplace operation. The compression is unlikely to be optimal with respect to the frobenius norm, unless the TN is already canonicalized at the two tensors. The ``absorb`` kwarg can be specified to yield an isometry on either the left or right resulting tensors. Parameters ---------- tags1 : Tags uniquely identifying the first ('left') tensor. tags2 : str or sequence of str Tags uniquely identifying the second ('right') tensor. max_bond : int or None, optional The maxmimum bond dimension. cutoff : float, optional The singular value cutoff to use. canonize_distance : int, optional How far to locally canonize around the target tensors first. canonize_opts : None or dict, optional Other options for the local canonization. equalize_norms : bool or float, optional If set, rescale the norms of all tensors modified to this value, stripping the rescaling factor into the ``exponent`` attribute. compress_opts Supplied to :func:`~quimb.tensor.tensor_core.tensor_compress_bond`. See Also -------- canonize_between """ tid1, = self._get_tids_from_tags(tags1, which='all') tid2, = self._get_tids_from_tags(tags2, which='all') self._compress_between_tids( tid1, tid2, max_bond=max_bond, cutoff=cutoff, absorb=absorb, canonize_distance=canonize_distance, canonize_opts=canonize_opts, equalize_norms=equalize_norms, **compress_opts) def compress_all(self, inplace=False, **compress_opts): """Inplace compress all bonds in this network. """ tn = self if inplace else self.copy() tn.fuse_multibonds_() for ix in tuple(tn.ind_map): try: tid1, tid2 = tn.ind_map[ix] except (ValueError, KeyError): # not a bond, or index already compressed away continue tn._compress_between_tids(tid1, tid2, **compress_opts) return tn compress_all_ = functools.partialmethod(compress_all, inplace=True) def _canonize_between_tids( self, tid1, tid2, absorb='right', equalize_norms=False, **canonize_opts, ): Tl = self.tensor_map[tid1] Tr = self.tensor_map[tid2] tensor_canonize_bond(Tl, Tr, absorb=absorb, **canonize_opts) if equalize_norms: self.strip_exponent(tid1, equalize_norms) self.strip_exponent(tid2, equalize_norms) def canonize_between(self, tags1, tags2, absorb='right', **canonize_opts): r"""'Canonize' the bond between the two single tensors in this network specified by ``tags1`` and ``tags2`` using ``tensor_canonize_bond``:: | | | | | | | | --●----●----●----●-- --●----●----●----●-- /| /| /| /| /| /| /| /| | | | | | | | | --●----1----2----●-- ==> --●---->~~~~R----●-- /| /| /| /| /| /| /| /| | | | | | | | | --●----●----●----●-- --●----●----●----●-- /| /| /| /| /| /| /| /| This is an inplace operation. This can only be used to put a TN into truly canonical form if the geometry is a tree, such as an MPS. Parameters ---------- tags1 : Tags uniquely identifying the first ('left') tensor, which will become an isometry. tags2 : str or sequence of str Tags uniquely identifying the second ('right') tensor. absorb : {'left', 'both', 'right'}, optional Which side of the bond to absorb the non-isometric operator. canonize_opts Supplied to :func:`~quimb.tensor.tensor_core.tensor_canonize_bond`. See Also -------- compress_between """ tid1, = self._get_tids_from_tags(tags1, which='all') tid2, = self._get_tids_from_tags(tags2, which='all') self._canonize_between_tids(tid1, tid2, absorb=absorb, **canonize_opts) def reduce_inds_onto_bond(self, inda, indb, tags=None, drop_tags=False): """Use QR factorization to 'pull' the indices ``inda`` and ``indb`` off of their respective tensors and onto the bond between them. This is an inplace operation. """ tida, = self._get_tids_from_inds(inda) tidb, = self._get_tids_from_inds(indb) ta, tb = self._tids_get(tida, tidb) bix = bonds(ta, tb) if ta.ndim > 3: self._split_tensor_tid( tida, left_inds=None, right_inds=[inda, *bix], method='qr') # get new location of ind tida, = self._get_tids_from_inds(inda) else: drop_tags = False if tb.ndim > 3: self._split_tensor_tid( tidb, left_inds=None, right_inds=[indb, *bix], method='qr') # get new location of ind tidb, = self._get_tids_from_inds(indb) else: drop_tags = False # contract the reduced factors and get the tensor self._contract_between_tids(tida, tidb) tab, = self._inds_get(inda, indb) # modify with the desired tags tags = tags_to_oset(tags) if drop_tags: tab.modify(tags=tags) else: tab.modify(tags=tab.tags | tags) def _get_neighbor_tids(self, tids): """Get the tids of tensors connected to the tensor at ``tid``. """ tids = tags_to_oset(tids) neighbors = oset_union( self.ind_map[ind] for tid in tids for ind in self.tensor_map[tid].inds ) # discard rather than remove to account for scalar ``tid`` tensor neighbors -= tids return neighbors def subgraphs(self, virtual=False): """Split this tensor network into disconneceted subgraphs. Parameters ---------- virtual : bool, optional Whether the tensor networks should view the original tensors or not - by default take copies. Returns ------- list[TensorNetwork] """ groups = [] tids = oset(self.tensor_map) # check all nodes while tids: # get a remaining node tid0 = tids.popright() queue = [tid0] group = oset(queue) while queue: # expand it until no neighbors tid = queue.pop() for tid_n in self._get_neighbor_tids(tid): if tid_n in group: continue else: group.add(tid_n) queue.append(tid_n) # remove current subgraph and continue tids -= group groups.append(group) return [self._select_tids(group, virtual=virtual) for group in groups] def get_tree_span( self, tids, min_distance=0, max_distance=None, include=None, exclude=None, ndim_sort='max', distance_sort='min', sorter=None, weight_bonds=True, inwards=True, ): """Generate a tree on the tensor network graph, fanning out from the tensors identified by ``tids``, up to a maximum of ``max_distance`` away. The tree can be visualized with :meth:`~quimb.tensor.tensor_core.TensorNetwork.draw_tree_span`. Parameters ---------- tids : sequence of str The nodes that define the region to span out of. min_distance : int, optional Don't add edges to the tree until this far from the region. For example, ``1`` will not include the last merges from neighboring tensors in the region defined by ``tids``. max_distance : None or int, optional Terminate branches once they reach this far away. If ``None`` there is no limit, include : sequence of str, optional If specified, only ``tids`` specified here can be part of the tree. exclude : sequence of str, optional If specified, ``tids`` specified here cannot be part of the tree. ndim_sort : {'min', 'max', 'none'}, optional When expanding the tree, how to choose what nodes to expand to next, once connectivity to the current surface has been taken into account. distance_sort : {'min', 'max', 'none'}, optional When expanding the tree, how to choose what nodes
<reponame>SanPen/GridCal<filename>src/GridCal/Gui/Main/MainWindow.py # -*- coding: utf-8 -*- ################################################################################ ## Form generated from reading UI file 'MainWindow.ui' ## ## Created by: Qt User Interface Compiler version 5.15.2 ## ## WARNING! All changes made in this file will be lost when recompiling UI file! ################################################################################ from PySide2.QtCore import * from PySide2.QtGui import * from PySide2.QtWidgets import * from .icons_rc import * class Ui_mainWindow(object): def setupUi(self, mainWindow): if not mainWindow.objectName(): mainWindow.setObjectName(u"mainWindow") mainWindow.resize(1210, 764) mainWindow.setBaseSize(QSize(0, 0)) icon = QIcon() icon.addFile(u":/Program icon/GridCal_icon.svg", QSize(), QIcon.Normal, QIcon.Off) mainWindow.setWindowIcon(icon) mainWindow.setAutoFillBackground(False) mainWindow.setIconSize(QSize(48, 48)) mainWindow.setToolButtonStyle(Qt.ToolButtonIconOnly) mainWindow.setDocumentMode(False) mainWindow.setTabShape(QTabWidget.Rounded) mainWindow.setDockNestingEnabled(False) mainWindow.setUnifiedTitleAndToolBarOnMac(False) self.actionOpen_file = QAction(mainWindow) self.actionOpen_file.setObjectName(u"actionOpen_file") icon1 = QIcon() icon1.addFile(u":/Icons/icons/loadc.svg", QSize(), QIcon.Normal, QIcon.Off) self.actionOpen_file.setIcon(icon1) self.actionSave = QAction(mainWindow) self.actionSave.setObjectName(u"actionSave") icon2 = QIcon() icon2.addFile(u":/Icons/icons/savec.svg", QSize(), QIcon.Normal, QIcon.Off) self.actionSave.setIcon(icon2) self.actionExport = QAction(mainWindow) self.actionExport.setObjectName(u"actionExport") icon3 = QIcon() icon3.addFile(u":/Icons/icons/save.svg", QSize(), QIcon.Normal, QIcon.Off) self.actionExport.setIcon(icon3) self.actionNew_project = QAction(mainWindow) self.actionNew_project.setObjectName(u"actionNew_project") icon4 = QIcon() icon4.addFile(u":/Icons/icons/new2c.svg", QSize(), QIcon.Normal, QIcon.Off) self.actionNew_project.setIcon(icon4) self.actionPower_flow = QAction(mainWindow) self.actionPower_flow.setObjectName(u"actionPower_flow") icon5 = QIcon() icon5.addFile(u":/Icons/icons/pf.svg", QSize(), QIcon.Normal, QIcon.Off) self.actionPower_flow.setIcon(icon5) self.actionPower_Flow_Time_series = QAction(mainWindow) self.actionPower_Flow_Time_series.setObjectName(u"actionPower_Flow_Time_series") icon6 = QIcon() icon6.addFile(u":/Icons/icons/pf_ts.svg", QSize(), QIcon.Normal, QIcon.Off) self.actionPower_Flow_Time_series.setIcon(icon6) self.actionBigger_nodes = QAction(mainWindow) self.actionBigger_nodes.setObjectName(u"actionBigger_nodes") icon7 = QIcon() icon7.addFile(u":/Icons/icons/plus (gray).svg", QSize(), QIcon.Normal, QIcon.Off) self.actionBigger_nodes.setIcon(icon7) self.actionSmaller_nodes = QAction(mainWindow) self.actionSmaller_nodes.setObjectName(u"actionSmaller_nodes") icon8 = QIcon() icon8.addFile(u":/Icons/icons/minus (gray).svg", QSize(), QIcon.Normal, QIcon.Off) self.actionSmaller_nodes.setIcon(icon8) self.actionPower_flow_Stochastic = QAction(mainWindow) self.actionPower_flow_Stochastic.setObjectName(u"actionPower_flow_Stochastic") icon9 = QIcon() icon9.addFile(u":/Icons/icons/stochastic_pf.svg", QSize(), QIcon.Normal, QIcon.Off) self.actionPower_flow_Stochastic.setIcon(icon9) self.actionVoltage_stability = QAction(mainWindow) self.actionVoltage_stability.setObjectName(u"actionVoltage_stability") icon10 = QIcon() icon10.addFile(u":/Icons/icons/continuation_power_flow.svg", QSize(), QIcon.Normal, QIcon.Off) self.actionVoltage_stability.setIcon(icon10) self.actionAbout = QAction(mainWindow) self.actionAbout.setObjectName(u"actionAbout") self.actionAbout.setIcon(icon) self.actionCenter_view = QAction(mainWindow) self.actionCenter_view.setObjectName(u"actionCenter_view") icon11 = QIcon() icon11.addFile(u":/Icons/icons/resize.svg", QSize(), QIcon.Normal, QIcon.Off) self.actionCenter_view.setIcon(icon11) self.actionShort_Circuit = QAction(mainWindow) self.actionShort_Circuit.setObjectName(u"actionShort_Circuit") icon12 = QIcon() icon12.addFile(u":/Icons/icons/short_circuit.svg", QSize(), QIcon.Normal, QIcon.Off) self.actionShort_Circuit.setIcon(icon12) self.actionAutoatic_layout = QAction(mainWindow) self.actionAutoatic_layout.setObjectName(u"actionAutoatic_layout") icon13 = QIcon() icon13.addFile(u":/Icons/icons/automatic_layout.svg", QSize(), QIcon.Normal, QIcon.Off) self.actionAutoatic_layout.setIcon(icon13) self.actionBlackout_cascade = QAction(mainWindow) self.actionBlackout_cascade.setObjectName(u"actionBlackout_cascade") self.actionBlackout_cascade.setCheckable(True) icon14 = QIcon() icon14.addFile(u":/Icons/icons/blackout.svg", QSize(), QIcon.Normal, QIcon.Off) self.actionBlackout_cascade.setIcon(icon14) self.actionOPF = QAction(mainWindow) self.actionOPF.setObjectName(u"actionOPF") icon15 = QIcon() icon15.addFile(u":/Icons/icons/dcopf.svg", QSize(), QIcon.Normal, QIcon.Off) self.actionOPF.setIcon(icon15) self.actionOPF_time_series = QAction(mainWindow) self.actionOPF_time_series.setObjectName(u"actionOPF_time_series") icon16 = QIcon() icon16.addFile(u":/Icons/icons/dcopf_ts.svg", QSize(), QIcon.Normal, QIcon.Off) self.actionOPF_time_series.setIcon(icon16) self.actionDetect_transformers = QAction(mainWindow) self.actionDetect_transformers.setObjectName(u"actionDetect_transformers") icon17 = QIcon() icon17.addFile(u":/Icons/icons/detect_tr.svg", QSize(), QIcon.Normal, QIcon.Off) self.actionDetect_transformers.setIcon(icon17) self.actionAuto_rate_branches = QAction(mainWindow) self.actionAuto_rate_branches.setObjectName(u"actionAuto_rate_branches") icon18 = QIcon() icon18.addFile(u":/Icons/icons/rate_br.svg", QSize(), QIcon.Normal, QIcon.Off) self.actionAuto_rate_branches.setIcon(icon18) self.actionExport_all_the_device_s_profiles = QAction(mainWindow) self.actionExport_all_the_device_s_profiles.setObjectName(u"actionExport_all_the_device_s_profiles") self.actionExport_all_the_device_s_profiles.setIcon(icon3) self.actionGrid_Reduction = QAction(mainWindow) self.actionGrid_Reduction.setObjectName(u"actionGrid_Reduction") icon19 = QIcon() icon19.addFile(u":/Icons/icons/grid_reduction.svg", QSize(), QIcon.Normal, QIcon.Off) self.actionGrid_Reduction.setIcon(icon19) self.actionStorage_location_suggestion = QAction(mainWindow) self.actionStorage_location_suggestion.setObjectName(u"actionStorage_location_suggestion") self.actionStorage_location_suggestion.setCheckable(True) icon20 = QIcon() icon20.addFile(u":/Icons/icons/storage_loc.svg", QSize(), QIcon.Normal, QIcon.Off) self.actionStorage_location_suggestion.setIcon(icon20) self.actionLaunch_data_analysis_tool = QAction(mainWindow) self.actionLaunch_data_analysis_tool.setObjectName(u"actionLaunch_data_analysis_tool") icon21 = QIcon() icon21.addFile(u":/Icons/icons/bars.svg", QSize(), QIcon.Normal, QIcon.Off) self.actionLaunch_data_analysis_tool.setIcon(icon21) self.actionOnline_documentation = QAction(mainWindow) self.actionOnline_documentation.setObjectName(u"actionOnline_documentation") icon22 = QIcon() icon22.addFile(u":/Icons/icons/new.svg", QSize(), QIcon.Normal, QIcon.Off) self.actionOnline_documentation.setIcon(icon22) self.actionExport_all_results = QAction(mainWindow) self.actionExport_all_results.setObjectName(u"actionExport_all_results") icon23 = QIcon() icon23.addFile(u":/Icons/icons/export_pickle.svg", QSize(), QIcon.Normal, QIcon.Off) self.actionExport_all_results.setIcon(icon23) self.actionSave_as = QAction(mainWindow) self.actionSave_as.setObjectName(u"actionSave_as") self.actionSave_as.setIcon(icon3) self.actionDelete_selected = QAction(mainWindow) self.actionDelete_selected.setObjectName(u"actionDelete_selected") icon24 = QIcon() icon24.addFile(u":/Icons/icons/delete3.svg", QSize(), QIcon.Normal, QIcon.Off) self.actionDelete_selected.setIcon(icon24) self.actionLinearAnalysis = QAction(mainWindow) self.actionLinearAnalysis.setObjectName(u"actionLinearAnalysis") icon25 = QIcon() icon25.addFile(u":/Icons/icons/ptdf.svg", QSize(), QIcon.Normal, QIcon.Off) self.actionLinearAnalysis.setIcon(icon25) self.actionReset_console = QAction(mainWindow) self.actionReset_console.setObjectName(u"actionReset_console") icon26 = QIcon() icon26.addFile(u":/Icons/icons/undo.svg", QSize(), QIcon.Normal, QIcon.Off) self.actionReset_console.setIcon(icon26) self.actionOpf_to_Power_flow = QAction(mainWindow) self.actionOpf_to_Power_flow.setObjectName(u"actionOpf_to_Power_flow") self.actionOpf_to_Power_flow.setCheckable(True) icon27 = QIcon() icon27.addFile(u":/Icons/icons/dcopf2ts.svg", QSize(), QIcon.Normal, QIcon.Off) self.actionOpf_to_Power_flow.setIcon(icon27) self.actionTry_to_fix_buses_location = QAction(mainWindow) self.actionTry_to_fix_buses_location.setObjectName(u"actionTry_to_fix_buses_location") icon28 = QIcon() icon28.addFile(u":/Icons/icons/move_bus.svg", QSize(), QIcon.Normal, QIcon.Off) self.actionTry_to_fix_buses_location.setIcon(icon28) self.actionSet_OPF_generation_to_profiles = QAction(mainWindow) self.actionSet_OPF_generation_to_profiles.setObjectName(u"actionSet_OPF_generation_to_profiles") self.actionSet_OPF_generation_to_profiles.setIcon(icon27) self.actionPTDF_time_series = QAction(mainWindow) self.actionPTDF_time_series.setObjectName(u"actionPTDF_time_series") icon29 = QIcon() icon29.addFile(u":/Icons/icons/ptdf_ts.svg", QSize(), QIcon.Normal, QIcon.Off) self.actionPTDF_time_series.setIcon(icon29) self.actionShow_color_controls = QAction(mainWindow) self.actionShow_color_controls.setObjectName(u"actionShow_color_controls") self.actionShow_color_controls.setCheckable(True) icon30 = QIcon() icon30.addFile(u":/Icons/icons/map.svg", QSize(), QIcon.Normal, QIcon.Off) self.actionShow_color_controls.setIcon(icon30) self.actionAdd_circuit = QAction(mainWindow) self.actionAdd_circuit.setObjectName(u"actionAdd_circuit") icon31 = QIcon() icon31.addFile(u":/Icons/icons/load_add.svg", QSize(), QIcon.Normal, QIcon.Off) self.actionAdd_circuit.setIcon(icon31) self.actionSync = QAction(mainWindow) self.actionSync.setObjectName(u"actionSync") self.actionSync.setCheckable(True) icon32 = QIcon() icon32.addFile(u":/Icons/icons/sync.svg", QSize(), QIcon.Normal, QIcon.Off) self.actionSync.setIcon(icon32) self.actionDrawSchematic = QAction(mainWindow) self.actionDrawSchematic.setObjectName(u"actionDrawSchematic") icon33 = QIcon() icon33.addFile(u":/Icons/icons/grid_icon.svg", QSize(), QIcon.Normal, QIcon.Off) self.actionDrawSchematic.setIcon(icon33) self.actionSigma_analysis = QAction(mainWindow) self.actionSigma_analysis.setObjectName(u"actionSigma_analysis") icon34 = QIcon() icon34.addFile(u":/Icons/icons/sigma.svg", QSize(), QIcon.Normal, QIcon.Off) self.actionSigma_analysis.setIcon(icon34) self.actionClear_stuff_running_right_now = QAction(mainWindow) self.actionClear_stuff_running_right_now.setObjectName(u"actionClear_stuff_running_right_now") icon35 = QIcon() icon35.addFile(u":/Icons/icons/clear_runs.svg", QSize(), QIcon.Normal, QIcon.Off) self.actionClear_stuff_running_right_now.setIcon(icon35) self.actionAdd_default_catalogue = QAction(mainWindow) self.actionAdd_default_catalogue.setObjectName(u"actionAdd_default_catalogue") icon36 = QIcon() icon36.addFile(u":/Icons/icons/CatalogueAdd.svg", QSize(), QIcon.Normal, QIcon.Off) self.actionAdd_default_catalogue.setIcon(icon36) self.actionFind_node_groups = QAction(mainWindow) self.actionFind_node_groups.setObjectName(u"actionFind_node_groups") self.actionFind_node_groups.setCheckable(True) icon37 = QIcon() icon37.addFile(u":/Icons/icons/color_grid2.svg", QSize(), QIcon.Normal, QIcon.Off) self.actionFind_node_groups.setIcon(icon37) self.actiongrid_Generator = QAction(mainWindow) self.actiongrid_Generator.setObjectName(u"actiongrid_Generator") self.actiongrid_Generator.setIcon(icon13) self.actionLicense = QAction(mainWindow) self.actionLicense.setObjectName(u"actionLicense") icon38 = QIcon() icon38.addFile(u":/Icons/icons/new2.svg", QSize(), QIcon.Normal, QIcon.Off) self.actionLicense.setIcon(icon38) self.actionImportPlexosNodeLoad = QAction(mainWindow) self.actionImportPlexosNodeLoad.setObjectName(u"actionImportPlexosNodeLoad") icon39 = QIcon() icon39.addFile(u":/Icons/icons/load.svg", QSize(), QIcon.Normal, QIcon.Off) self.actionImportPlexosNodeLoad.setIcon(icon39) self.actionImportPlexosGeneratorGeneration = QAction(mainWindow) self.actionImportPlexosGeneratorGeneration.setObjectName(u"actionImportPlexosGeneratorGeneration") self.actionImportPlexosGeneratorGeneration.setIcon(icon39) self.actionOTDF_time_series = QAction(mainWindow) self.actionOTDF_time_series.setObjectName(u"actionOTDF_time_series") icon40 = QIcon() icon40.addFile(u":/Icons/icons/otdf_ts.svg", QSize(), QIcon.Normal, QIcon.Off) self.actionOTDF_time_series.setIcon(icon40) self.actionImportPlexosBranchRates = QAction(mainWindow) self.actionImportPlexosBranchRates.setObjectName(u"actionImportPlexosBranchRates") self.actionImportPlexosBranchRates.setIcon(icon39) self.actionClustering_time_series = QAction(mainWindow) self.actionClustering_time_series.setObjectName(u"actionClustering_time_series") icon41 = QIcon() icon41.addFile(u":/Icons/icons/pf_ts_cluster.svg", QSize(), QIcon.Normal, QIcon.Off) self.actionClustering_time_series.setIcon(icon41) self.actionSetSelectedBusArea = QAction(mainWindow) self.actionSetSelectedBusArea.setObjectName(u"actionSetSelectedBusArea") self.actionSetSelectedBusZone = QAction(mainWindow) self.actionSetSelectedBusZone.setObjectName(u"actionSetSelectedBusZone") self.actionSetSelectedBusCountry = QAction(mainWindow) self.actionSetSelectedBusCountry.setObjectName(u"actionSetSelectedBusCountry") self.actionImport_bus_coordinates = QAction(mainWindow) self.actionImport_bus_coordinates.setObjectName(u"actionImport_bus_coordinates") self.actionImport_bus_coordinates.setIcon(icon31) self.actionATC = QAction(mainWindow) self.actionATC.setObjectName(u"actionATC") icon42 = QIcon() icon42.addFile(u":/Icons/icons/atc.svg", QSize(), QIcon.Normal, QIcon.Off) self.actionATC.setIcon(icon42) self.actionATC_Time_Series = QAction(mainWindow) self.actionATC_Time_Series.setObjectName(u"actionATC_Time_Series") icon43 = QIcon() icon43.addFile(u":/Icons/icons/atc_ts.svg", QSize(), QIcon.Normal, QIcon.Off) self.actionATC_Time_Series.setIcon(icon43) self.actionContingency_analysis = QAction(mainWindow) self.actionContingency_analysis.setObjectName(u"actionContingency_analysis") icon44 = QIcon() icon44.addFile(u":/Icons/icons/otdf.svg", QSize(), QIcon.Normal, QIcon.Off) self.actionContingency_analysis.setIcon(icon44) self.actionApply_new_rates = QAction(mainWindow) self.actionApply_new_rates.setObjectName(u"actionApply_new_rates") icon45 = QIcon() icon45.addFile(u":/Icons/icons/data.svg", QSize(), QIcon.Normal, QIcon.Off) self.actionApply_new_rates.setIcon(icon45) self.actionOptimal_Net_Transfer_Capacity = QAction(mainWindow) self.actionOptimal_Net_Transfer_Capacity.setObjectName(u"actionOptimal_Net_Transfer_Capacity") icon46 = QIcon() icon46.addFile(u":/Icons/icons/ntc_opf.svg", QSize(), QIcon.Normal, QIcon.Off) self.actionOptimal_Net_Transfer_Capacity.setIcon(icon46) self.actionSet_schematic_positions_from_GPS_coordinates = QAction(mainWindow) self.actionSet_schematic_positions_from_GPS_coordinates.setObjectName(u"actionSet_schematic_positions_from_GPS_coordinates") self.actionSet_schematic_positions_from_GPS_coordinates.setIcon(icon33) self.actionInputs_analysis = QAction(mainWindow) self.actionInputs_analysis.setObjectName(u"actionInputs_analysis") icon47 = QIcon() icon47.addFile(u":/Icons/icons/inputs_analysis.svg", QSize(), QIcon.Normal, QIcon.Off) self.actionInputs_analysis.setIcon(icon47) self.centralwidget = QWidget(mainWindow) self.centralwidget.setObjectName(u"centralwidget") self.verticalLayout = QVBoxLayout(self.centralwidget) self.verticalLayout.setObjectName(u"verticalLayout") self.verticalLayout.setContentsMargins(0, 0, 0, 0) self.tabWidget = QTabWidget(self.centralwidget) self.tabWidget.setObjectName(u"tabWidget") self.GridTab = QWidget() self.GridTab.setObjectName(u"GridTab") self.verticalLayout_9 = QVBoxLayout(self.GridTab) self.verticalLayout_9.setSpacing(0) self.verticalLayout_9.setObjectName(u"verticalLayout_9") self.verticalLayout_9.setContentsMargins(0, 0, 0, 0) self.tabWidget_3 = QTabWidget(self.GridTab) self.tabWidget_3.setObjectName(u"tabWidget_3") self.tabWidget_3.setTabPosition(QTabWidget.South) self.tabWidget_3.setTabShape(QTabWidget.Rounded) self.tabWidget_3.setElideMode(Qt.ElideNone) self.tabWidget_3.setDocumentMode(True) self.GridSectionTab = QWidget() self.GridSectionTab.setObjectName(u"GridSectionTab") self.horizontalLayout_5 = QHBoxLayout(self.GridSectionTab) self.horizontalLayout_5.setObjectName(u"horizontalLayout_5") self.horizontalLayout_5.setContentsMargins(0, 0, 0, 0) self.cascade_grid_splitter = QSplitter(self.GridSectionTab) self.cascade_grid_splitter.setObjectName(u"cascade_grid_splitter") self.cascade_grid_splitter.setOrientation(Qt.Horizontal) self.cascade_menu = QFrame(self.cascade_grid_splitter) self.cascade_menu.setObjectName(u"cascade_menu") self.cascade_menu.setFrameShape(QFrame.NoFrame) self.cascade_menu.setFrameShadow(QFrame.Raised) self.verticalLayout_3 = QVBoxLayout(self.cascade_menu) self.verticalLayout_3.setObjectName(u"verticalLayout_3") self.verticalLayout_3.setContentsMargins(0, 0, 0, 0) self.verticalLayout_2 = QVBoxLayout() self.verticalLayout_2.setObjectName(u"verticalLayout_2") self.frame_10 = QFrame(self.cascade_menu) self.frame_10.setObjectName(u"frame_10") self.frame_10.setFrameShape(QFrame.NoFrame) self.frame_10.setFrameShadow(QFrame.Raised) self.verticalLayout_5 = QVBoxLayout(self.frame_10) self.verticalLayout_5.setObjectName(u"verticalLayout_5") self.frame_11 = QFrame(self.frame_10) self.frame_11.setObjectName(u"frame_11") self.frame_11.setFrameShape(QFrame.NoFrame) self.frame_11.setFrameShadow(QFrame.Raised) self.horizontalLayout_3 = QHBoxLayout(self.frame_11) self.horizontalLayout_3.setObjectName(u"horizontalLayout_3") self.horizontalLayout_3.setContentsMargins(0, 0, 0, 0) self.run_cascade_pushButton = QPushButton(self.frame_11) self.run_cascade_pushButton.setObjectName(u"run_cascade_pushButton") icon48 = QIcon() icon48.addFile(u":/Icons/icons/run_cascade.svg", QSize(), QIcon.Normal, QIcon.Off) self.run_cascade_pushButton.setIcon(icon48) self.horizontalLayout_3.addWidget(self.run_cascade_pushButton) self.run_cascade_step_pushButton = QPushButton(self.frame_11) self.run_cascade_step_pushButton.setObjectName(u"run_cascade_step_pushButton") icon49 = QIcon() icon49.addFile(u":/Icons/icons/run_cascade_step.svg", QSize(), QIcon.Normal, QIcon.Off) self.run_cascade_step_pushButton.setIcon(icon49) self.horizontalLayout_3.addWidget(self.run_cascade_step_pushButton) self.copy_cascade_step_pushButton = QPushButton(self.frame_11) self.copy_cascade_step_pushButton.setObjectName(u"copy_cascade_step_pushButton") icon50 = QIcon() icon50.addFile(u":/Icons/icons/copy.svg", QSize(), QIcon.Normal, QIcon.Off) self.copy_cascade_step_pushButton.setIcon(icon50) self.horizontalLayout_3.addWidget(self.copy_cascade_step_pushButton) self.horizontalSpacer_2 = QSpacerItem(40, 20, QSizePolicy.Expanding, QSizePolicy.Minimum) self.horizontalLayout_3.addItem(self.horizontalSpacer_2) self.clear_cascade_pushButton = QPushButton(self.frame_11) self.clear_cascade_pushButton.setObjectName(u"clear_cascade_pushButton") icon51 = QIcon() icon51.addFile(u":/Icons/icons/delete.svg", QSize(), QIcon.Normal, QIcon.Off) self.clear_cascade_pushButton.setIcon(icon51) self.horizontalLayout_3.addWidget(self.clear_cascade_pushButton) self.verticalLayout_5.addWidget(self.frame_11) self.label_27 = QLabel(self.frame_10) self.label_27.setObjectName(u"label_27") self.verticalLayout_5.addWidget(self.label_27) self.cascade_tableView = QTableView(self.frame_10) self.cascade_tableView.setObjectName(u"cascade_tableView") self.verticalLayout_5.addWidget(self.cascade_tableView) self.verticalLayout_2.addWidget(self.frame_10) self.verticalLayout_3.addLayout(self.verticalLayout_2) self.cascade_grid_splitter.addWidget(self.cascade_menu) self.frame_6 = QFrame(self.cascade_grid_splitter) self.frame_6.setObjectName(u"frame_6") self.frame_6.setFrameShape(QFrame.NoFrame) self.frame_6.setFrameShadow(QFrame.Raised) self.verticalLayout_34 = QVBoxLayout(self.frame_6) self.verticalLayout_34.setObjectName(u"verticalLayout_34") self.grid_colouring_frame = QFrame(self.frame_6) self.grid_colouring_frame.setObjectName(u"grid_colouring_frame") self.grid_colouring_frame.setMaximumSize(QSize(16777215, 34)) self.grid_colouring_frame.setFrameShape(QFrame.NoFrame) self.grid_colouring_frame.setFrameShadow(QFrame.Raised) self.gridLayout_20 = QGridLayout(self.grid_colouring_frame) self.gridLayout_20.setObjectName(u"gridLayout_20") self.gridLayout_20.setContentsMargins(1, 1, 1, 1) self.simulation_results_step_comboBox = QComboBox(self.grid_colouring_frame) self.simulation_results_step_comboBox.setObjectName(u"simulation_results_step_comboBox") self.simulation_results_step_comboBox.setMinimumSize(QSize(256, 0)) self.gridLayout_20.addWidget(self.simulation_results_step_comboBox, 1, 6, 1, 1) self.horizontalSpacer_19 = QSpacerItem(40, 20, QSizePolicy.Expanding, QSizePolicy.Minimum) self.gridLayout_20.addItem(self.horizontalSpacer_19, 1, 0, 1, 1) self.view_next_simulation_step_pushButton = QPushButton(self.grid_colouring_frame) self.view_next_simulation_step_pushButton.setObjectName(u"view_next_simulation_step_pushButton") icon52 = QIcon() icon52.addFile(u":/Icons/icons/next.svg", QSize(), QIcon.Normal, QIcon.Off) self.view_next_simulation_step_pushButton.setIcon(icon52) self.gridLayout_20.addWidget(self.view_next_simulation_step_pushButton, 1, 7, 1, 1) self.available_results_to_color_comboBox = QComboBox(self.grid_colouring_frame) self.available_results_to_color_comboBox.setObjectName(u"available_results_to_color_comboBox") self.available_results_to_color_comboBox.setMinimumSize(QSize(164, 0)) self.gridLayout_20.addWidget(self.available_results_to_color_comboBox, 1, 2, 1, 1) self.view_previous_simulation_step_pushButton = QPushButton(self.grid_colouring_frame) self.view_previous_simulation_step_pushButton.setObjectName(u"view_previous_simulation_step_pushButton") icon53 = QIcon() icon53.addFile(u":/Icons/icons/prev.svg", QSize(), QIcon.Normal, QIcon.Off) self.view_previous_simulation_step_pushButton.setIcon(icon53) self.gridLayout_20.addWidget(self.view_previous_simulation_step_pushButton, 1, 5, 1, 1) self.colour_results_pushButton = QPushButton(self.grid_colouring_frame) self.colour_results_pushButton.setObjectName(u"colour_results_pushButton") icon54 = QIcon() icon54.addFile(u":/Icons/icons/color_grid.svg", QSize(), QIcon.Normal, QIcon.Off) self.colour_results_pushButton.setIcon(icon54) self.gridLayout_20.addWidget(self.colour_results_pushButton, 1, 4, 1, 1) self.show_map_pushButton = QPushButton(self.grid_colouring_frame) self.show_map_pushButton.setObjectName(u"show_map_pushButton") icon55 = QIcon() icon55.addFile(u":/Icons/icons/show_color_controls.svg", QSize(), QIcon.Normal, QIcon.Off) self.show_map_pushButton.setIcon(icon55) self.gridLayout_20.addWidget(self.show_map_pushButton, 1, 1, 1, 1) self.verticalLayout_34.addWidget(self.grid_colouring_frame) self.frame_33 = QFrame(self.frame_6) self.frame_33.setObjectName(u"frame_33") self.frame_33.setFrameShape(QFrame.NoFrame) self.frame_33.setFrameShadow(QFrame.Raised) self.verticalLayout_30 = QVBoxLayout(self.frame_33) self.verticalLayout_30.setObjectName(u"verticalLayout_30") self.verticalLayout_30.setContentsMargins(0, 0, 0, 0) self.schematic_layout = QHBoxLayout() self.schematic_layout.setObjectName(u"schematic_layout") self.verticalLayout_30.addLayout(self.schematic_layout) self.verticalLayout_34.addWidget(self.frame_33) self.cascade_grid_splitter.addWidget(self.frame_6) self.horizontalLayout_5.addWidget(self.cascade_grid_splitter) icon56 = QIcon() icon56.addFile(u":/Icons/icons/schematic.svg", QSize(), QIcon.Normal, QIcon.Off) self.tabWidget_3.addTab(self.GridSectionTab, icon56, "") self.DataTab = QWidget() self.DataTab.setObjectName(u"DataTab") self.verticalLayout_8 = QVBoxLayout(self.DataTab) self.verticalLayout_8.setObjectName(u"verticalLayout_8") self.verticalLayout_8.setContentsMargins(0, 0, 0, 0) self.dataStructuresSplitter = QSplitter(self.DataTab) self.dataStructuresSplitter.setObjectName(u"dataStructuresSplitter") self.dataStructuresSplitter.setOrientation(Qt.Horizontal) self.frame_26 = QFrame(self.dataStructuresSplitter) self.frame_26.setObjectName(u"frame_26") self.frame_26.setFrameShape(QFrame.NoFrame) self.frame_26.setFrameShadow(QFrame.Raised) self.verticalLayout_27 = QVBoxLayout(self.frame_26) self.verticalLayout_27.setObjectName(u"verticalLayout_27") self.verticalLayout_27.setContentsMargins(-1, -1, 0, -1) self.frame_55 = QFrame(self.frame_26) self.frame_55.setObjectName(u"frame_55") self.frame_55.setFrameShape(QFrame.NoFrame) self.frame_55.setFrameShadow(QFrame.Raised) self.horizontalLayout_29 = QHBoxLayout(self.frame_55) self.horizontalLayout_29.setObjectName(u"horizontalLayout_29") self.horizontalLayout_29.setContentsMargins(0, 4, 0, 5) self.label_3 = QLabel(self.frame_55) self.label_3.setObjectName(u"label_3") self.horizontalLayout_29.addWidget(self.label_3) self.verticalLayout_27.addWidget(self.frame_55) self.dataStructuresListView = QListView(self.frame_26) self.dataStructuresListView.setObjectName(u"dataStructuresListView") self.verticalLayout_27.addWidget(self.dataStructuresListView) self.frame_17 = QFrame(self.frame_26) self.frame_17.setObjectName(u"frame_17") self.frame_17.setFrameShape(QFrame.NoFrame) self.frame_17.setFrameShadow(QFrame.Raised) self.horizontalLayout_26 = QHBoxLayout(self.frame_17) self.horizontalLayout_26.setObjectName(u"horizontalLayout_26") self.horizontalLayout_26.setContentsMargins(0, 0, 0, 0) self.analyze_objects_pushButton = QPushButton(self.frame_17) self.analyze_objects_pushButton.setObjectName(u"analyze_objects_pushButton") self.analyze_objects_pushButton.setIcon(icon21) self.horizontalLayout_26.addWidget(self.analyze_objects_pushButton) self.horizontalSpacer_14 = QSpacerItem(40, 20, QSizePolicy.Expanding, QSizePolicy.Minimum) self.horizontalLayout_26.addItem(self.horizontalSpacer_14) self.verticalLayout_27.addWidget(self.frame_17) self.dataStructuresSplitter.addWidget(self.frame_26) self.templatesSplitter = QSplitter(self.dataStructuresSplitter) self.templatesSplitter.setObjectName(u"templatesSplitter") sizePolicy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.MinimumExpanding) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.templatesSplitter.sizePolicy().hasHeightForWidth()) self.templatesSplitter.setSizePolicy(sizePolicy) self.templatesSplitter.setOrientation(Qt.Horizontal) self.frame_38 = QFrame(self.templatesSplitter) self.frame_38.setObjectName(u"frame_38") self.frame_38.setFrameShape(QFrame.NoFrame) self.frame_38.setFrameShadow(QFrame.Raised) self.verticalLayout_26 = QVBoxLayout(self.frame_38) self.verticalLayout_26.setObjectName(u"verticalLayout_26") self.verticalLayout_26.setContentsMargins(0, -1, 0, -1) self.frame_54 = QFrame(self.frame_38) self.frame_54.setObjectName(u"frame_54") self.frame_54.setMinimumSize(QSize(0, 25)) self.frame_54.setFrameShape(QFrame.NoFrame) self.frame_54.setFrameShadow(QFrame.Raised) self.horizontalLayout_28 = QHBoxLayout(self.frame_54) self.horizontalLayout_28.setObjectName(u"horizontalLayout_28") self.horizontalLayout_28.setContentsMargins(0, 0, 0, 0) self.property_comboBox = QComboBox(self.frame_54) self.property_comboBox.setObjectName(u"property_comboBox") self.property_comboBox.setMinimumSize(QSize(120, 0)) self.horizontalLayout_28.addWidget(self.property_comboBox) self.smart_search_lineEdit = QLineEdit(self.frame_54) self.smart_search_lineEdit.setObjectName(u"smart_search_lineEdit") self.smart_search_lineEdit.setMinimumSize(QSize(120, 0)) self.horizontalLayout_28.addWidget(self.smart_search_lineEdit) self.filter_pushButton = QPushButton(self.frame_54) self.filter_pushButton.setObjectName(u"filter_pushButton") icon57 = QIcon() icon57.addFile(u":/Icons/icons/magnifying_glass.svg", QSize(), QIcon.Normal, QIcon.Off) self.filter_pushButton.setIcon(icon57) self.horizontalLayout_28.addWidget(self.filter_pushButton) self.highlight_selection_buses_pushButton = QPushButton(self.frame_54) self.highlight_selection_buses_pushButton.setObjectName(u"highlight_selection_buses_pushButton") icon58 = QIcon() icon58.addFile(u":/Icons/icons/highlight.svg", QSize(), QIcon.Normal, QIcon.Off) self.highlight_selection_buses_pushButton.setIcon(icon58) self.horizontalLayout_28.addWidget(self.highlight_selection_buses_pushButton) self.busViewerButton = QPushButton(self.frame_54) self.busViewerButton.setObjectName(u"busViewerButton") self.busViewerButton.setIcon(icon33) self.horizontalLayout_28.addWidget(self.busViewerButton) self.horizontalSpacer_6 = QSpacerItem(40, 20, QSizePolicy.Expanding, QSizePolicy.Minimum) self.horizontalLayout_28.addItem(self.horizontalSpacer_6) self.processTemplatesPushButton = QPushButton(self.frame_54) self.processTemplatesPushButton.setObjectName(u"processTemplatesPushButton") icon59 = QIcon() icon59.addFile(u":/Icons/icons/calculator.svg", QSize(), QIcon.Normal, QIcon.Off) self.processTemplatesPushButton.setIcon(icon59) self.horizontalLayout_28.addWidget(self.processTemplatesPushButton) self.viewTemplatesButton = QPushButton(self.frame_54) self.viewTemplatesButton.setObjectName(u"viewTemplatesButton") icon60 = QIcon() icon60.addFile(u":/Icons/icons/Catalogue.svg", QSize(), QIcon.Normal, QIcon.Off) self.viewTemplatesButton.setIcon(icon60) self.horizontalLayout_28.addWidget(self.viewTemplatesButton) self.verticalLayout_26.addWidget(self.frame_54) self.dataStructureTableView = QTableView(self.frame_38) self.dataStructureTableView.setObjectName(u"dataStructureTableView") self.verticalLayout_26.addWidget(self.dataStructureTableView) self.frame_9 = QFrame(self.frame_38) self.frame_9.setObjectName(u"frame_9") self.frame_9.setFrameShape(QFrame.NoFrame) self.frame_9.setFrameShadow(QFrame.Raised) self.horizontalLayout_25 = QHBoxLayout(self.frame_9) self.horizontalLayout_25.setObjectName(u"horizontalLayout_25") self.horizontalLayout_25.setContentsMargins(0, 0, 0, 0) self.add_object_pushButton = QPushButton(self.frame_9) self.add_object_pushButton.setObjectName(u"add_object_pushButton") icon61 = QIcon() icon61.addFile(u":/Icons/icons/plus.svg", QSize(), QIcon.Normal, QIcon.Off) self.add_object_pushButton.setIcon(icon61) self.horizontalLayout_25.addWidget(self.add_object_pushButton) self.delete_selected_objects_pushButton = QPushButton(self.frame_9) self.delete_selected_objects_pushButton.setObjectName(u"delete_selected_objects_pushButton") self.delete_selected_objects_pushButton.setIcon(icon51) self.horizontalLayout_25.addWidget(self.delete_selected_objects_pushButton) self.delete_and_reduce_pushButton
from django.test import TestCase from django.urls import reverse from mercury.models import EventCodeAccess, GFConfig from ag_data.models import AGEvent, AGVenue, AGSensor, AGSensorType from mercury.grafanaAPI.grafana_api import Grafana import os import datetime # grafana host with basic auth HOST = "http://admin:admin@localhost:3000" class TestGFConfig(TestCase): TESTCODE = "testcode" test_sensor_name = "Wind Sensor" test_sensor_type = "Dual wind" test_sensor_format = { "left_gust": {"unit": "km/h", "format": "float"}, "right_gust": {"unit": "km/h", "format": "float"}, } test_sensor_graph_type = "graph" test_event_data = { "name": "Sunny Day Test Drive", "date": datetime.datetime(2020, 2, 2, 20, 21, 22), "description": "A very progressive test run at \ Sunnyside Daycare's Butterfly Room.", "location": "New York, NY", } test_venue_data = { "name": "Venue 1", "description": "foo", "latitude": 100, "longitude": 200, } def create_venue_and_event(self, event_name): venue = AGVenue.objects.create( name=self.test_venue_data["name"], description=self.test_venue_data["description"], latitude=self.test_venue_data["latitude"], longitude=self.test_venue_data["longitude"], ) venue.save() event = AGEvent.objects.create( name=event_name, date=self.test_event_data["date"], description=self.test_event_data["description"], venue_uuid=venue, ) event.save() return event def setUp(self): # api keys with admin and viewer level permissions self.ADMIN = Grafana.create_api_key(HOST, "admin", "Admin") self.VIEWER = Grafana.create_api_key(HOST, "viewer", "Viewer") self.login_url = "mercury:EventAccess" self.sensor_url = "mercury:sensor" self.event_url = "mercury:events" self.config_url = "mercury:gfconfig" self.config_update_url = "mercury:gfconfig_update" self.config_delete_url = "mercury:gfconfig_delete" self.config_update_dashboard_url = "mercury:gfconfig_update_dashboard" self.config_reset_dashboard_url = "mercury:gfconfig_reset_dashboard" self.config_delete_dashboard_url = "mercury:gfconfig_delete_dashboard" self.config_add_dashboard_url = "mercury:gfconfig_create_dashboard" test_code = EventCodeAccess(event_code="testcode", enabled=True) test_code.save() # Login self._get_with_event_code(self.sensor_url, self.TESTCODE) self.gfconfig = GFConfig.objects.create( gf_name="Test", gf_host=HOST, gf_token=<PASSWORD>, gf_current=True ) self.gfconfig.save() # Create fresh grafana object self.grafana = Grafana(self.gfconfig) # Create random name to be used for event and datasource self.event_name = self.grafana.generate_random_string(10) self.datasource_name = self.grafana.generate_random_string(10) # Clear existing dashboard and datasource self.grafana.delete_all_dashboards() self.grafana.delete_all_datasources() def tearDown(self): # Create fresh grafana instance (in case test invalidated any tokens, etc.) self.grafana = Grafana(self.gfconfig) # Clear all of the created dashboards self.grafana.delete_all_dashboards() self.grafana.delete_all_datasources() def _get_with_event_code(self, url, event_code): self.client.get(reverse(self.login_url)) self.client.post(reverse(self.login_url), data={"eventcode": event_code}) response = self.client.get(reverse(url)) session = self.client.session return response, session def test_config_view_get_success(self): # Login self._get_with_event_code(self.sensor_url, self.TESTCODE) response = self.client.get(reverse(self.config_url)) self.assertEqual(200, response.status_code) def test_config_view_get_existing_dashboard_displayed(self): # Login self._get_with_event_code(self.sensor_url, self.TESTCODE) venue = AGVenue.objects.create( name=self.test_venue_data["name"], description=self.test_venue_data["description"], latitude=self.test_venue_data["latitude"], longitude=self.test_venue_data["longitude"], ) venue.save() sensor_type = AGSensorType.objects.create( name=self.test_sensor_type, processing_formula=0, format=self.test_sensor_format, graph_type=self.test_sensor_graph_type, ) sensor_type.save() sensor = AGSensor.objects.create( name=self.test_sensor_name, type_id=sensor_type ) sensor.save() # Send a request to create an event (should trigger the creation of a # grafana dashboard of the same name) self.client.post( reverse(self.event_url), data={ "submit-event": "", "name": self.event_name, "date": self.test_event_data["date"], "description": self.test_event_data["description"], "venue_uuid": venue.uuid, }, ) response = self.client.get("/gfconfig/configure/{}".format(self.gfconfig.id)) self.assertContains(response, self.event_name) self.assertContains(response, sensor.name) def test_config_post_success(self): # Delete GFConfig used for the tests (will interfere otherwise) self.gfconfig.delete() # Login self._get_with_event_code(self.sensor_url, self.TESTCODE) response = self.client.post( reverse(self.config_url), data={ "submit": "", "gf_name": "Test Grafana Instance", "gf_host": HOST, "gf_token": self.ADMIN, "gf_username": "admin", "gf_password": "<PASSWORD>", }, ) self.assertEqual(302, response.status_code) # Restore GFConfig instance used for the tests self.gfconfig = GFConfig.objects.create( gf_name="Test", gf_host=HOST, gf_token=self.ADMIN, gf_current=True ) self.gfconfig.save() gfconfig = GFConfig.objects.filter(gf_name="Test Grafana Instance") self.assertTrue(gfconfig.count() > 0) self.assertTrue(gfconfig[0].gf_name == "Test Grafana Instance") self.assertTrue(gfconfig[0].gf_host == HOST) self.assertTrue(gfconfig[0].gf_token == self.ADMIN) def test_config_post_fail_invalid_api_key(self): # Login self._get_with_event_code(self.sensor_url, self.TESTCODE) response = self.client.post( reverse(self.config_url), data={ "submit": "", "gf_name": "Test Grafana Instance", "gf_host": HOST, "gf_token": "abcde", }, ) self.assertEqual(302, response.status_code) gfconfig = GFConfig.objects.filter(gf_name="Test Grafana Instance") self.assertTrue(gfconfig.count() == 0) def test_config_post_fail_insufficient_permissions(self): # Login self._get_with_event_code(self.sensor_url, self.TESTCODE) response = self.client.post( reverse(self.config_url), data={ "submit": "", "gf_name": "Test Grafana Instance", "gf_host": HOST, "gf_token": self.VIEWER, "gf_username": "admin", "gf_password": "<PASSWORD>", }, ) self.assertEqual(302, response.status_code) gfconfig = GFConfig.objects.filter(gf_name="Test Grafana Instance") self.assertTrue(gfconfig.count() == 0) def test_delete_config(self): # Login self._get_with_event_code(self.sensor_url, self.TESTCODE) GFConfig.objects.all().delete() gfconfig = GFConfig.objects.create( gf_name="Test Grafana Instance", gf_host=HOST, gf_token=self.ADMIN ) gfconfig.save() gfconfig = GFConfig.objects.filter(gf_name="Test Grafana Instance").first() self.client.post( os.path.join( reverse(self.config_delete_url, kwargs={"gf_id": gfconfig.id}) ), data={ "submit": "", "gf_name": "Test Grafana Instance", "gf_host": HOST, "gf_token": self.ADMIN, "gf_username": "admin", "gf_password": "<PASSWORD>", }, ) gfconfig = GFConfig.objects.filter(gf_name="Test Grafana Instance") self.assertTrue(gfconfig.count() == 0) # test that GFConfig.gf_current can be set to True using the update view def test_update_config(self): # Login self._get_with_event_code(self.sensor_url, self.TESTCODE) GFConfig.objects.all().delete() gfconfig = GFConfig.objects.create( gf_name="Test Grafana Instance", gf_host=HOST, gf_token=self.ADMIN, gf_current=False, ) gfconfig.save() self.client.post( os.path.join(reverse(self.config_update_url, kwargs={"gf_id": gfconfig.id})) ) gfconfig = GFConfig.objects.all().first() self.assertEquals(gfconfig.gf_current, True) def test_config_post_event_exists_dashboard_created(self): # Login self._get_with_event_code(self.sensor_url, self.TESTCODE) self.create_venue_and_event(self.event_name) # Delete GFConfig used for the test (will interfere otherwise) self.gfconfig.delete() response = self.client.post( reverse(self.config_url), data={ "submit": "", "gf_name": "Test Grafana Instance", "gf_host": HOST, "gf_token": self.ADMIN, "gf_username": "admin", "gf_password": "<PASSWORD>", }, ) self.assertEqual(302, response.status_code) # Restore GFConfig instance used for the tests self.gfconfig = GFConfig.objects.create( gf_name="Test", gf_host=HOST, gf_token=self.ADMIN, gf_current=True ) self.gfconfig.save() # check that dashboard was created with same name as event dashboard = self.grafana.get_dashboard_by_name(self.event_name) self.assertTrue(dashboard) self.assertEquals(dashboard["dashboard"]["title"], self.event_name) def test_config_post_event_exists_dashboard_created_with_sensor(self): # Login self._get_with_event_code(self.sensor_url, self.TESTCODE) # Create a sensor type and sensor sensor_type = AGSensorType.objects.create( name=self.test_sensor_type, processing_formula=0, format=self.test_sensor_format, graph_type=self.test_sensor_graph_type, ) sensor_type.save() sensor = AGSensor.objects.create( name=self.test_sensor_name, type_id=sensor_type ) sensor.save() self.create_venue_and_event(self.event_name) # Delete GFConfig used for the test (will interfere otherwise) self.gfconfig.delete() response = self.client.post( reverse(self.config_url), data={ "submit": "", "gf_name": "Test Grafana Instance", "gf_host": HOST, "gf_token": self.ADMIN, "gf_username": "admin", "gf_password": "<PASSWORD>", }, ) self.assertEqual(302, response.status_code) # Restore GFConfig instance used for the tests self.gfconfig = GFConfig.objects.create( gf_name="Test", gf_host=HOST, gf_token=self.ADMIN, gf_current=True ) self.gfconfig.save() # check that dashboard was created with expected panel dashboard = self.grafana.get_dashboard_by_name(self.event_name) self.assertTrue(dashboard) self.assertEquals(dashboard["dashboard"]["title"], self.event_name) # panels should have been created # querying like this because the returned dashboard object may have no panels # attribute, so trying to retrieve dashboard["panels"] could throw a key error panels = dashboard["dashboard"].get("panels", None) self.assertTrue(panels) self.assertTrue(len(panels) == 1) panel = panels[0] self.assertEquals(panel["title"], self.test_sensor_name) def test_update_dashboard_panels_remove_all_single_gfconfig(self): # Login self._get_with_event_code(self.sensor_url, self.TESTCODE) # Create an event event = self.create_venue_and_event(self.event_name) # Create a dashboard self.grafana.create_dashboard(self.event_name) # Add a panel to the dashboard # Create a sensor type and sensor sensor_type = AGSensorType.objects.create( name=self.test_sensor_type, processing_formula=0, format=self.test_sensor_format, graph_type=self.test_sensor_graph_type, ) sensor_type.save() sensor = AGSensor.objects.create( name=self.test_sensor_name, type_id=sensor_type ) sensor.save() # Add a sensor panel self.grafana.add_panel(sensor, event) # Update dashboard with empty list of sensors self.client.post( reverse( self.config_update_dashboard_url, kwargs={"gf_id": self.gfconfig.id} ), data={"dashboard_name": self.event_name, "sensors": []}, ) # Query dashboard dashboard = self.grafana.get_dashboard_by_name(self.event_name) self.assertTrue(dashboard) # Retrieve current panels try: panels = dashboard["dashboard"]["panels"] except KeyError: panels = [] # Confirm panels were deleted self.assertEquals(panels, []) def test_update_dashboard_panels_keep_all_panels_single_gfconfig(self): # Login self._get_with_event_code(self.sensor_url, self.TESTCODE) self.create_venue_and_event(self.event_name) # create a dashboard self.grafana.create_dashboard(self.event_name) # add a panel to the dashboard # Create a sensor type and sensor sensor_type = AGSensorType.objects.create( name=self.test_sensor_type, processing_formula=0, format=self.test_sensor_format, graph_type=self.test_sensor_graph_type, ) sensor_type.save() sensor = AGSensor.objects.create( name=self.test_sensor_name, type_id=sensor_type ) sensor.save() sensors = AGSensor.objects.all() sensor_ids = [] for sensor in sensors: sensor_ids.append(sensor.uuid) self.client.post( reverse( self.config_update_dashboard_url, kwargs={"gf_id": self.gfconfig.id} ), data={"dashboard_name": self.event_name, "sensors": sensor_ids}, ) dashboard = self.grafana.get_dashboard_by_name(self.event_name) self.assertTrue(dashboard) # Retrieve current panels try: panels = dashboard["dashboard"]["panels"] except KeyError: panels = [] self.assertEquals(len(panels), 1) def test_update_dashboard_panels_keep_subset_of_panels_single_gfconfig(self): # Login self._get_with_event_code(self.sensor_url, self.TESTCODE) self.create_venue_and_event(self.event_name) # create a dashboard self.grafana.create_dashboard(self.event_name) # add a panel to the dashboard # Create a sensor type and sensor sensor_type = AGSensorType.objects.create( name=self.test_sensor_type, processing_formula=0, format=self.test_sensor_format, graph_type=self.test_sensor_graph_type, ) sensor_type.save() # Create 5 sensors for i in range(5): sensor = AGSensor.objects.create( name=self.test_sensor_name + "i", type_id=sensor_type ) sensor.save() # Retrieve sensor ids for the first 2 sensors sensor_ids = [] sensors = AGSensor.objects.all() for i in range(2): sensor_ids.append(sensors[i].uuid) # Post to update the dashboard with 2 sensor panels self.client.post( reverse( self.config_update_dashboard_url, kwargs={"gf_id": self.gfconfig.id} ), data={"dashboard_name": self.event_name, "sensors": sensor_ids}, ) dashboard = self.grafana.get_dashboard_by_name(self.event_name) self.assertTrue(dashboard) # Retrieve current panels try: panels = dashboard["dashboard"]["panels"] except KeyError: panels = [] self.assertEquals(len(panels), 2) for i in range(2): self.assertEquals(panels[i]["title"], sensors[i].name) def test_reset_dashboard_panels_single_gfconfig(self): # Login self._get_with_event_code(self.sensor_url, self.TESTCODE) # update dashboard with a subset of panels, then restore all panels by using # reset self.create_venue_and_event(self.event_name) # create a dashboard self.grafana.create_dashboard(self.event_name) # add a panel to the dashboard # Create a sensor type and sensor sensor_type = AGSensorType.objects.create( name=self.test_sensor_type, processing_formula=0, format=self.test_sensor_format, graph_type=self.test_sensor_graph_type, ) sensor_type.save() # Create 5 sensors for i in range(5): sensor = AGSensor.objects.create( name=self.test_sensor_name + "i", type_id=sensor_type ) sensor.save() # Retrieve sensor ids for the first 2 sensors sensor_ids_partial = [] sensors = AGSensor.objects.all() for i in range(2): sensor_ids_partial.append(sensors[i].uuid) # Post to update the dashboard with 2 sensor panels self.client.post( reverse( self.config_update_dashboard_url, kwargs={"gf_id": self.gfconfig.id} ), data={"dashboard_name": self.event_name, "sensors": sensor_ids_partial}, ) dashboard = self.grafana.get_dashboard_by_name(self.event_name) self.assertTrue(dashboard) # Retrieve current panels try: panels = dashboard["dashboard"]["panels"] except KeyError: panels = [] self.assertEquals(len(panels), 2) sensor_ids = [] sensors = AGSensor.objects.all() for sensor in sensors: sensor_ids.append(sensor.uuid) # Post to reset the dashboard with all sensor panels self.client.post( reverse( self.config_reset_dashboard_url, kwargs={"gf_id": self.gfconfig.id} ), data={"dashboard_name": self.event_name}, ) dashboard = self.grafana.get_dashboard_by_name(self.event_name) self.assertTrue(dashboard) # Retrieve current panels try:
0.000 -2 os-c3-os-c3 1 0.970 180.000 1 c3-ss-ss-c3 1 3.150 0.000 -2 c226,p2 GA AUE=0.4785 RMSE=0.5249 TorType=2 c3-ss-ss-c3 1 0.890 0.000 3 c226,p2 GA AUE=0.4785 RMSE=0.5249 o -c -c3-hc 1 0.830 0.000 -1 CH3COO,CH3COOH GA AUE=0.0144 RMSE=0.0193 TorType=2 o -c -c3-hc 1 0.040 180.000 3 ho-oh-c3-c3 1 0.000 0.000 3 m19 SS AUE=0.1539 RMSE=0.2110 TorType=2 oh-c3-c3-oh 1 0.900 0.000 -3 p5,ch2oh2 GA AUE=0.9894 RMSE=1.1930 TorType=2 oh-c3-c3-oh 1 1.130 0.000 2 p5,ch2oh2 GA AUE=0.9894 RMSE=1.1930 TorType=2 os-c3-c3-os 1 0.000 0.000 -3 p28,suger5ring,suger6ring,coccoc GA AUE=1.1750 RMSE=1.6708 TorType=2 os-c3-c3-os 1 0.000 180.000 -2 os-c3-c3-os 1 0.170 180.000 1 c1-c1-c3-c1 1 0.000 0.000 2 t5 SS AUE=0.0048 RMSE=0.0058 TorType=3 c2-c2-c3-c2 1 0.112 0.000 2 t4 SS AUE=0.5917 RMSE=0.7276 TorType=3 c2-ce-ca-ca 1 0.505 180.000 2 add6f SS AUE=0.2273 RMSE=0.3302 TorType=3 c2-ce-ce-c3 1 0.500 180.000 2 set1_2 SS AUE=0.6541 RMSE=0.8643 TorType=3 c2-cf-cd-cc 1 0.500 180.000 2 add6d SS AUE=0.3708 RMSE=0.4956 TorType=3 c2-n2-c3-n2 1 1.570 180.000 -2 t14 GA AUE=1.3428 RMSE=1.6221 TorType=3 c2-n2-c3-n2 1 2.730 180.000 1 t14 GA AUE=1.3428 RMSE=1.6221 TorType=3 c2-n2-na-cd 1 1.575 180.000 2 c99 SS AUE=0.2455 RMSE=0.3271 TorType=3 c2-n2-n -c 1 2.790 180.000 1 c80 SS AUE=2.1704 RMSE=2.7351 TorType=3 c2-n2-nh-c2 1 1.200 0.000 2 set3_6 SS AUE=1.7161 RMSE=2.4147 TorType=3 c2-ne-ca-ca 1 0.495 0.000 3 c63 SS AUE=1.1301 RMSE=1.4142 TorType=3 c2-ne-ce-c2 1 0.170 180.000 2 c26 SS AUE=0.7462 RMSE=0.9083 TorType=3 c2-ne-ce-c3 1 0.820 0.000 2 set1_6 SS AUE=0.2966 RMSE=0.4200 TorType=3 c2-nh-c2-c2 1 0.980 180.000 2 set3_2 SS AUE=0.5762 RMSE=0.7682 TorType=3 c2-nh-c2-c3 1 3.140 180.000 2 set3_26 SS AUE=0.5612 RMSE=0.7360 TorType=3 c2-nh-c3-h1 1 0.400 0.000 3 set3_3 SS AUE=0.2455 RMSE=0.3092 TorType=3 c2-nh-ca-ca 1 0.550 180.000 2 set3_4 SS AUE=0.8992 RMSE=1.3720 TorType=3 c2-nh-nh-c2 1 2.930 0.000 3 set3_24 SS AUE=2.3906 RMSE=3.0117 TorType=3 c2-p2-c3-p2 1 2.070 180.000 1 t18 SS AUE=0.4761 RMSE=0.6635 TorType=3 c2-p2-n4-hn 1 0.000 180.000 3 c133 SS AUE=0.2623 RMSE=0.3265 TorType=3 c2-p2-na-cc 1 1.830 180.000 2 c146 SS AUE=0.3236 RMSE=0.3673 TorType=3 c2-p2-nh-c2 1 1.330 180.000 2 set3_14 SS AUE=0.4660 RMSE=0.7730 TorType=3 c2-p2-nh-c3 1 2.400 180.000 2 c119 SS AUE=1.0662 RMSE=1.4725 TorType=3 c2-p2-nh-ca 1 1.880 180.000 1 c158 SS AUE=1.5854 RMSE=1.8810 TorType=3 c2-pe-ca-ca 1 1.065 180.000 2 c71 SS AUE=0.2838 RMSE=0.3291 TorType=3 c2-pe-ce-c2 1 0.825 180.000 2 c34 SS AUE=0.3082 RMSE=0.3467 TorType=3 c2-pe-ce-c3 1 3.640 180.000 1 set1_14 SS AUE=0.2869 RMSE=0.3329 TorType=3 c2-pe-ne-c2 1 0.290 0.000 1 c104 SS AUE=0.4118 RMSE=0.5379 TorType=3 c2-pe-pe-c2 1 0.680 180.000 2 c196 SS AUE=0.2486 RMSE=0.3241 TorType=3 c3-c2-nh-ca 1 1.160 180.000 -2 set1_10 GA AUE=0.3625 RMSE=0.5970 TorType=3 c3-c2-nh-ca 1 1.880 0.000 1 set1_10 GA AUE=0.3625 RMSE=0.5970 TorType=3 c3-c3-c3-hc 1 0.080 0.000 3 t2 SS AUE=0.2507 RMSE=0.3027 TorType=3 c3-c3-cc-ca 1 0.082 0.000 3 p3 SS AUE=0.4586 RMSE=0.5633 TorType=3 c3-c3-n -c 1 0.650 180.000 -4 sialic2 GA AUE=1.1541 RMSE=1.2847 TorType=3 c3-c3-n -c 1 0.030 180.000 -3 sialic2 GA AUE=1.1541 RMSE=1.2847 TorType=3 c3-c3-n -c 1 2.260 0.000 1 sialic2 GA AUE=1.1541 RMSE=1.2847 TorType=3 c3-c -c3-c3 1 0.332 180.000 2 p10 SS AUE=0.3226 RMSE=0.4401 TorType=3 c3-c -ce-c3 1 4.110 0.000 2 set3_25 SS AUE=0.6933 RMSE=1.1187 TorType=3 c3-ce-ce-c3 1 0.500 180.000 2 set3_22 SS AUE=1.0809 RMSE=1.3455 TorType=3 c3-n2-c2-c3 1 10.370 180.000 2 c7 SS AUE=1.1629 RMSE=1.3902 TorType=3 c3-n3-n3-c3 1 2.310 0.000 2 c112 SS AUE=0.8815 RMSE=1.0390 TorType=3 c3-n3-nh-c2 1 1.355 0.000 2 set3_7 SS AUE=1.4104 RMSE=1.6750 TorType=3 c3-n4-ca-ca 1 1.495 0.000 2 c65 SS AUE=0.2872 RMSE=0.3575 TorType=3 c3-n4-n4-c3 1 0.244 0.000 3 c127 SS AUE=0.6207 RMSE=0.7993 TorType=3 c3-nh-c2-c2 1 0.950 180.000 -2 c27 GA AUE=0.7690 RMSE=1.0440 TorType=3 c3-nh-c2-c2 1 1.120 180.000 3 c27 GA AUE=0.7690 RMSE=1.0440 TorType=3 c3-nh-c2-c3 1 2.495 180.000 2 set1_7 SS AUE=0.8853 RMSE=1.2321 TorType=3 c3-os-c2-c2 1 2.520 180.000 -2 c33 GA AUE=0.9155 RMSE=1.0796 TorType=3 c3-os-c2-c2 1 2.000 180.000 1 c33 GA AUE=0.9155 RMSE=1.0796 TorType=3 c3-os-c2-c3 1 4.790 180.000 2 set1_13 SS AUE=0.9973 RMSE=1.5097 TorType=3 c3-os-c3-h1 1 0.337 0.000 3 c52 SS AUE=0.2706 RMSE=0.3300 TorType=3 c3-os-ca-ca 1 1.610 180.000 2 c70 SS AUE=0.3151 RMSE=0.3580 TorType=3 c3-os-n2-c2 1 2.200 180.000 -2 c103 SS AUE=1.2430 RMSE=1.4817 TorType=3 c3-os-n2-c2 1 0.900 180.000 3 c103 SS AUE=1.2430 RMSE=1.4817 TorType=3 c3-os-n3-c3 1 0.840 0.000 2 c118 SS AUE=0.7374 RMSE=0.9683 TorType=3 c3-os-n4-c3 1 0.620 180.000 3 c132 SS AUE=0.8090 RMSE=0.9444 TorType=3 c3-os-na-cc 1 0.190 0.000 2 c145 SS AUE=0.2720 RMSE=0.3305 TorType=3 c3-os-n -c 1 0.420 0.000 2 c87 SS AUE=0.3019 RMSE=0.3567 TorType=3 c3-os-nh-c2 1 1.150 0.000 1 set3_13 SS AUE=0.9655 RMSE=1.1845 TorType=3 c3-os-nh-ca 1 0.500 0.000 1 c157 SS AUE=0.8647 RMSE=1.0585 TorType=3 c3-os-no-o 1 2.515 180.000 2 c168 SS AUE=0.3706 RMSE=0.4248 TorType=3 c3-os-oh-ho 1 1.010 0.000 2 c178 SS AUE=0.2810 RMSE=0.3796 TorType=3 c3-os-os-c3 1 0.380 0.000 1 c187 SS AUE=0.4838 RMSE=0.6593 TorType=3 c3-os-p2-c2 1 2.940 180.000 -2 c188 GA AUE=0.3661 RMSE=0.4565 TorType=3 c3-os-p2-c2 1 1.850 180.000 1 c188 GA AUE=0.3661 RMSE=0.4565 TorType=3 c3-p3-c2-c2 1 0.297 0.000 2 c35 SS AUE=1.0902 RMSE=1.4763 TorType=3 c3-p3-c2-c3 1 0.950 180.000 2 set1_15 SS AUE=0.4182 RMSE=0.4905 TorType=3 c3-p3-ca-ca 1 0.177 180.000 2 c72 SS AUE=0.2797 RMSE=0.3319 TorType=3 c3-p3-n2-c2 1 5.000 180.000 2 c105 SS AUE=0.8649 RMSE=1.0889 TorType=3 c3-p3-n3-c3 1 2.850 0.000 2 c120 SS AUE=0.8776 RMSE=1.2067 TorType=3 c3-p3-n4-c3 1 0.067 0.000 3 c134 SS AUE=0.1760 RMSE=0.2433 TorType=3 c3-p3-na-cc 1 1.025 0.000 2 c147 SS AUE=0.2741 RMSE=0.3331 TorType=3 c3-p3-n -c 1 1.830 0.000 2 c89 SS AUE=0.9690 RMSE=1.3708 TorType=3 c3-p3-nh-c2 1 1.850 0.000 2 set3_15 SS AUE=0.8611 RMSE=0.9832 TorType=3 c3-p3-no-o 1 1.400 180.000 2 c170 SS AUE=0.5082 RMSE=0.5728 TorType=3 c3-p3-oh-ho 1 0.240 180.000 3 c180 SS AUE=0.9983 RMSE=1.2838 TorType=3 c3-p3-p2-c2 1 0.200 0.000 1 c197 SS AUE=0.5014 RMSE=0.7016 TorType=3 c3-p3-p3-c3 1 0.375 0.000 3 c204 SS AUE=0.8032 RMSE=0.9405 TorType=3 c3-p4-n3-c3 1 1.778 180.000 2 c121 SS AUE=1.1246 RMSE=1.4091 TorType=3 c3-p4-n4-hn 1 0.005 0.000 3 c135 SS AUE=0.2627 RMSE=0.3254 TorType=3 c3-p4-na-cc 1 1.000 0.000 -3 c148 GA AUE=0.9954 RMSE=1.1119 TorType=3 c3-p4-na-cc 1 0.640 180.000 2 c148 GA AUE=0.9954 RMSE=1.1119 TorType=3 c3-p4-nh-c2 1 0.900 0.000 1 set3_16 SS AUE=1.0315 RMSE=1.1976 TorType=3 c3-p4-nh-ca 1 0.000 180.000 -3 c160 GA AUE=1.0676 RMSE=1.4622 TorType=3 c3-p4-nh-ca 1 0.840 180.000 2 c160 GA AUE=1.0676 RMSE=1.4622 TorType=3 c3-p4-os-c3 1 0.600 180.000 2 c190 SS AUE=0.5663 RMSE=0.6640 TorType=3 c3-p4-p3-c3 1 1.400 0.000 1 c205 SS AUE=0.7593 RMSE=0.9141 TorType=3 c3-px-ca-ca 1 0.432 180.000 2 c73 SS AUE=0.4755 RMSE=0.6108 TorType=3 c3-px-c -c3 1 0.000 0.000 -2 c16 GA AUE=1.0361 RMSE=1.3175 TorType=3 c3-px-c -c3 1 0.580 180.000 1 c16 GA AUE=1.0361 RMSE=1.3175 TorType=3 c3-px-ce-c2 1 1.130 0.000 2 c36 SS AUE=1.2444 RMSE=1.6024 TorType=3 c3-px-ce-c3 1 0.810 180.000 2 set1_16 SS AUE=0.9969 RMSE=1.2788 TorType=3 c3-px-ne-c2 1 0.610 0.000 -3 c106 GA AUE=1.6606 RMSE=2.1207 TorType=3 c3-px-ne-c2 1 1.440 0.000 1 c106 GA AUE=1.6606 RMSE=2.1207 TorType=3 c3-px-pe-c2 1 1.565 0.000 2 c198 SS AUE=1.0967 RMSE=1.2917 TorType=3 c3-s4-c3-h1 1 0.117 0.000 3 c59 SS AUE=0.2210 RMSE=0.2792 TorType=3 c3-s4-n3-c3 1 3.100 0.000 2 c125 SS AUE=1.3654 RMSE=1.8896 TorType=3 c3-s4-n4-c3 1 0.200 0.000 3 c139 SS AUE=0.7713 RMSE=0.9400 TorType=3 c3-s4-na-cc 1 0.550 0.000 2 c152 SS AUE=0.5159 RMSE=0.7408 TorType=3 c3-s4-nh-c2 1 0.235 180.000 -2 set3_20 GA AUE=1.5742 RMSE=1.9736 TorType=3 c3-s4-nh-c2 1 0.500 0.000 -3 set3_20 GA AUE=1.5742 RMSE=1.9736 TorType=3 c3-s4-nh-c2 1 1.302 0.000 1 set3_20 GA AUE=1.5742 RMSE=1.9736 TorType=3 c3-s4-no-o 1 1.130 180.000 2 c175 SS AUE=0.7753 RMSE=0.8760 TorType=3 c3-s4-oh-ho 1 0.000 180.000 1 c185 SS AUE=1.7272 RMSE=2.1061 TorType=3 c3-s4-os-c3 1 1.310 180.000 1 c194 SS AUE=0.9618 RMSE=1.1506 TorType=3 c3-s4-p3-c3 1 2.220 0.000 2 c209 SS AUE=1.9189 RMSE=2.5861 TorType=3 c3-s4-sh-hs 1 0.000 0.000 -3 c224 GA AUE=1.1511 RMSE=1.3863 TorType=3 c3-s4-sh-hs 1 0.560 180.000 2 c224 GA AUE=1.1511 RMSE=1.3863 TorType=3 c3-s4-ss-c3 1 0.050 0.000 3 c227 SS AUE=0.7707 RMSE=0.9378 TorType=3 c3-s6-c3-h1 1 0.089 0.000 3 c60 SS AUE=0.0648 RMSE=0.0808 TorType=3 c3-s6-n3-c3 1 3.610 0.000 2 c126 SS AUE=1.8933 RMSE=2.6424 TorType=3 c3-s6-n4-c3 1 1.470 0.000 1 c140 SS AUE=0.2994 RMSE=0.3260 TorType=3 c3-s6-na-cc 1 3.938 180.000 2 c153 SS AUE=0.8118 RMSE=1.0393 TorType=3 c3-s6-n -c 1 0.768 180.000 2 c95 SS AUE=0.4645 RMSE=0.6488 TorType=3 c3-s6-nh-c2 1 0.667 0.000 2 set3_21 SS AUE=1.6191 RMSE=2.2150 TorType=3 c3-s6-no-o 1 0.348 0.000 2 c176 SS AUE=0.2701 RMSE=0.3306 TorType=3 c3-s6-oh-ho 1 11.690 180.000 1 c186 SS AUE=0.6401 RMSE=0.8081 TorType=3 c3-s6-os-c3 1 0.533 180.000 2 c195 SS AUE=0.9691 RMSE=1.1571 TorType=3 c3-s6-p3-c3 1 0.183 0.000 3 c210 SS AUE=0.5556 RMSE=0.6476 TorType=3 c3-s6-sh-hs 1 4.317 180.000 2 c225 SS AUE=1.0170 RMSE=1.0970 TorType=3 c3-s6-ss-c3 1 2.400 180.000 2 c228 SS AUE=0.8201 RMSE=1.0146 TorType=3 c3-ss-c2-c3 1 2.025 180.000 2 set1_19 SS AUE=0.5269 RMSE=0.6098 TorType=3 c3-ss-c3-c3 1 0.167 0.000 3 p9 SS AUE=0.4614 RMSE=0.5750 TorType=3 c3-ss-c3-h1 1 0.220 0.000 3 c58 SS AUE=0.2551 RMSE=0.3303 TorType=3 c3-ss-ca-ca 1 0.750 180.000 2 c76 SS AUE=0.2509 RMSE=0.3297 TorType=3 c3-ss-n2-c2 1 1.350 180.000 -2 c109 GA AUE=0.6324 RMSE=0.7825 TorType=3 c3-ss-n2-c2 1 1.380 180.000 1 c109 GA AUE=0.6324 RMSE=0.7825 TorType=3 c3-ss-n3-c3 1 2.680 0.000 2 c124 SS AUE=1.0072 RMSE=1.2488 TorType=3 c3-ss-n4-c3 1 0.390 0.000 3 c138 SS AUE=0.3868 RMSE=0.4909 TorType=3 c3-ss-n -c 1 0.500 0.000 2 c93 SS AUE=0.5560 RMSE=0.7560 TorType=3 c3-ss-nh-c2 1 1.100 0.000 2 set3_19 SS AUE=0.9372 RMSE=1.1240 TorType=3 c3-ss-no-o 1 2.295 180.000 2 c174
should be C-contiguous. False otherwise. @rtype: Mat @returns: A copy that is contiguous (c or f) and has begin 0. """ if self.use_opencl: result = self._map('', c_contiguous=c_contiguous) else: result = Mat(self.computer, self.NP.copy(order='C' if c_contiguous else 'F')) return result def empty_copy(self): """Return an empty version of self.""" if self.use_opencl: result = Mat(None, None) result._init_cl_empty(self.computer, self.shape, self.dtype, self.c_contiguous) else: result = Mat(None, None) order = 'C' if self.c_contiguous else 'F' ndarr = np.empty(self.shape, dtype=self.dtype, order=order) result._init_np_from_np(self.computer, ndarr) return result def astype(self, dtype): """Cast Mat to the given dtype. @type dtype: numpy dtype @param dtype: The dtype of the resulting array. @rtype: Mat @returns: A newly created array (even if the dtype doesn't change). """ if self.use_opencl: result = Mat(self.computer, self.shape, dtype=dtype, c_contiguous=self.c_contiguous) # The map functions are faster than fill. if self.contiguous: self.computer._cl_map_1d('', self, result) else: self.computer._cl_map('', self, result) else: result = Mat(self.computer, self.NP.astype(dtype)) return result def fill(self, other): """ Fill the Mat with a scalar or with corresponding values from other Mat. When using numpy data and a scalar it will call numpy.ndarray.fill. Otherwise this function is more general than the numpy.ndarray.fill function, because it allows the argument to be non-scalar. @type other: Mat/scalar @param other: A scalar or a Mat of the same shape. """ if self.use_opencl: self._iop('=', other) else: if isinstance(other, Mat): self.NP[:] = other.NP[:] else: self.NP.fill(other) ##### Access Methods ##### def count_nonzero(self): """Return the number of non-zero elements in the Mat.""" # TODO implement this in kernel, improve speed here. return (self != 0).astype(np.uint64).sum() # def getl(self, index): # """Get elements using a logical index of the same size.""" # if index.shape != self.shape: # raise ValueError('Index shape mismatch %s != %s', # index.shape, self.shape) # def setl(self, index, value): # """Set elements using a logical index of the same size""" # if index.shape != self.shape: # raise ValueError('Index shape mismatch %s != %s', # index.shape, self.shape) # if value.shape != self.shape: # raise ValueError('Value shape mismatch %s != %s', # value.shape, self.shape) # def getl2(self, index0, index1): # """Get elements using two logical indexes.""" # def setl2(self, index0, index1, value): # """Set elements using two logical indexes.""" def li(self, index, value=None): """ Logical index operations get/set with Mat index. @type index: Mat @param index: A Mat containing info on which values to get/set. index[i,j] != 0 indicates self[i,j] should be used. Each index dimension must equal 1 or match self. At least one index dimension must match self. @param value: If not None will set instead of get. @returns: None if logical index is empty and value is None. Otherwise a new Mat if value is None, otherwise self. If 1 not in index.shape, will return a Mat with shape1==1 @raises ValueError: If index.shape does not conform to self.shape. If value.size does not conform to number of values referred to by index. """ if index.shape0 != self.shape0: incompatible = index.shape0 != 1 or index.shape1 != self.shape1 elif index.shape1 != self.shape1 and index.shape0 != 1: incompatible = True else: incompatible = False if incompatible: raise ValueError('index shape %s is incompatible with %s' % (index.shape, self.shape)) if self.use_opencl: if value is None: # Get operation nnz = index.count_nonzero() if nnz == 0: return None if index.shape0 == 1: rshape = (self.shape0, nnz) elif index.shape1 == 1: rshape = (nnz, self.shape1) else: rshape = (nnz, 1) result = Mat(self.computer, rshape, dtype=self.dtype, c_contiguous=self.c_contiguous) else: # Set operation result = self self.computer._cl_li(result, self, index, value) else: index_np = index.NP if index_np.dtype != np.bool: index_np = index_np.astype(np.bool) if index.shape == self.shape: index_arg = index.NP elif index.shape0 == self.shape0: index_arg = (index.NP.squeeze(), slice(None)) else: index_arg = (slice(None), index.NP.squeeze()) if value is None: result_np = self.NP[index_arg] if result_np.size == 0: return None if result_np.ndim == 1: result_np = result_np[:, np.newaxis] result = Mat(self.computer, result_np) else: if isinstance(value, Mat): value = value.NP self.NP[index_arg] = value result = self return result def _slice_helper(self, slice_arg, axis_size, axis): """Checks and converts slice to appropriate begin,step,end.""" begin = slice_arg.start if begin is None: begin = 0 elif begin < 0: raise IndexError('Negative indices are not allowed (yet).') end = slice_arg.stop if end is None: end = axis_size elif end > axis_size: raise IndexError('index %s is out of bounds for axis' '%d with size %s' % (end, axis, axis_size)) if end < begin: raise IndexError('Non-increasing slice %s:%s not supported (yet).' % (begin, end)) step = slice_arg.step if step is None: step = 1 elif step <= 0: raise IndexError('Non-positive steps are not allowed (yet).') new_axis_size = end-begin if step > new_axis_size: raise IndexError('Slice step size is too large %s:%s:%s ' % (begin, end, step)) if step != 1: new_axis_size += 1 new_axis_size //= step return (_size_t(begin), _size_t(step), _size_t(end), _size_t(new_axis_size)) def _getitem_by_slice(self, slice0, slice1): """ Obtain a submatrix of self (assumes use_opencl). If possible the submatrix will reuse the data. Negative values in slices are not allowed. @type slice0: slice @param slice0: Defines the rows to choose from self. @type slice1: slice @param slice1: Defines the columns to choose from self. """ begin0, step0, end0, shape0 = \ self._slice_helper(slice0, self.shape0, 0) begin1, step1, end1, shape1 = \ self._slice_helper(slice1, self.shape1, 1) stride0 = self.ptr_stride0 stride1 = self.ptr_stride1 begin = self.begin + begin0*stride0 + begin1*stride1 stride0 *= step0 stride1 *= step1 result = Mat(None, None) result._init_cl_empty(self.computer, (shape0, shape1), dtype=self.dtype, c_contiguous=self.c_contiguous, init_buffer=False) result._set_buffer(self.buffer) result._set_begin(begin) result._set_ptr_strides(stride0, stride1) return result def __getitem__(self, key): """ self[key] for restricted set of slice/int tuples. Also see Mat.li (logical indexing). For arbitrary keys you can also use self.computer.M(self.NP[key]), as long as the key works with numpy.ndarray.__getitem__. @type key: (slice/int, slice/int) @param key: Not all slices are allowed, will raise exception. @raises TypeError: If key is not a tuple of (slice, slice) @returns: For opencl, guaranteed to return a Mat that uses the same data. For numpy this will most likely also be the case, but depends on the numpy implementation. """ if not isinstance(key, tuple) or len(key) != 2: raise TypeError('Expected key to be 2-tuple.') for k in key: if not (isinstance(k, slice) or isinstance(k, int)): raise TypeError('Expected slice or int.') k0, k1 = key if self.use_opencl: if isinstance(k0, int): if k0 < 0: k0 = self.shape0 - k0 k0 = slice(k0, k0 + 1, 1) if isinstance(k1, int): if k1 < 0: k1 = self.shape1 - k1 k1 = slice(k1, k1 + 1, 1) result = self._getitem_by_slice(k0, k1) else: result = Mat(self.computer, self.NP[key]) return result # def __setitem__(self, key, value): # """self[key]=value # @type value: Mat, scalar # @param value: anything that can be passed to fill.""" # if isinstance(key, Mat): # key = key.NP # altered_arr = self.NP # altered_arr[key] = value # self.fill(self.computer.M(altered_arr)) # if self.use_opencl: # submat = self.__getitem__(key) # success = False # if submat.buffer is self.buffer: # try: # submat.fill(value) # success = True # except Exception: # pass # if not success: # warnings.warn('__setitem__ has not implemented support ' # 'for this type of key (yet). ' # 'You may want to use slices and/or ' # 'a Mat value instead.', # InefficiencyWarning) # if isinstance(value, Mat): # value = value.NP # new_np = self.NP # if isinstance(key, Mat): # key = key.NP # new_np[key] = value # self.fill(Mat(self.computer, new_np)) # else: # if isinstance(key, Mat): # key = key.NP # if isinstance(value, Mat): # value = value.NP # self._ndarray[key] = value # def __contains__(self, item): # """Whether item is in self""" # return NotImplemented ##### Object Methods ##### def __bool__(self): """Needed to allow scalar==self and scalar!=self.""" # TODO change this once you know where it fails. raise MissingOperationError( '__nonzero__ is disabled because it is called innapriopriately ' 'for some scalar-matrix operators. ' 'This may be failing for that reason OR OTHERWISE. ' 'NOTE: The operation c <op> M is not available for <op>= %s. ' 'You have to use M <op> c instead.' % OPS_NON_REVERSABLE) return self def __str__(self): """Returns self.NP.__str__()""" return self.NP.__str__() def __repr__(self): """Returns self.NP.__repr__()""" return self.NP.__repr__() ##### Arithmetic Methods ##### def __pos__(self): """+self""" if self.use_opencl: result = self._map('+') else: result = Mat(self.computer, +self.NP) return result def __neg__(self): """-self""" if self.use_opencl:
Repo # and SharedRepo r = get_repo(r_id) if not r: continue r.repo_id = r.id r.repo_name = r.name r.repo_desc = r.desc cmmts = get_commits(r_id, 0, 1) last_commit = cmmts[0] if cmmts else None r.last_modified = last_commit.ctime if last_commit else 0 r._dict['share_type'] = 'group' r.user = seafile_api.get_repo_owner(r_id) r.user_perm = check_permission(r_id, username) shared_repos.append(r) if not CLOUD_MODE: shared_repos += seafile_api.get_inner_pub_repo_list() return HttpResponse(json.dumps(shared_repos, cls=SearpcObjEncoder), status=200, content_type=json_content_type) class SharedFileView(APIView): # Anyone should be able to access a Shared File assuming they have the token throttle_classes = (UserRateThrottle, ) def get(self, request, token, format=None): assert token is not None # Checked by URLconf try: fileshare = FileShare.objects.get(token=token) except FileShare.DoesNotExist: return api_error(status.HTTP_404_NOT_FOUND, "Token not found") repo_id = fileshare.repo_id repo = get_repo(repo_id) if not repo: return api_error(status.HTTP_404_NOT_FOUND, "Library not found") path = fileshare.path.rstrip('/') # Normalize file path file_name = os.path.basename(path) file_id = None try: file_id = seafile_api.get_file_id_by_path(repo_id, path) except SearpcError as e: logger.error(e) return api_error(HTTP_520_OPERATION_FAILED, "Failed to get file id by path.") if not file_id: return api_error(status.HTTP_404_NOT_FOUND, "File not found") # Increase file shared link view_cnt, this operation should be atomic fileshare.view_cnt = F('view_cnt') + 1 fileshare.save() op = request.GET.get('op', 'download') return get_repo_file(request, repo_id, file_id, file_name, op) class SharedFileDetailView(APIView): throttle_classes = (UserRateThrottle, ) def get(self, request, token, format=None): assert token is not None # Checked by URLconf try: fileshare = FileShare.objects.get(token=token) except FileShare.DoesNotExist: return api_error(status.HTTP_404_NOT_FOUND, "Token not found") if fileshare.is_encrypted(): password = request.GET.get('password', '') if not password: return api_error(status.HTTP_403_FORBIDDEN, "Password is required") if not check_password(password, fileshare.password): return api_error(status.HTTP_403_FORBIDDEN, "Invalid Password") repo_id = fileshare.repo_id repo = get_repo(repo_id) if not repo: return api_error(status.HTTP_404_NOT_FOUND, "Library not found") path = fileshare.path.rstrip('/') # Normalize file path file_name = os.path.basename(path) file_id = None try: file_id = seafile_api.get_file_id_by_path(repo_id, path) commits = get_file_revisions_after_renamed(repo_id, path) c = commits[0] except SearpcError as e: return api_error(HTTP_520_OPERATION_FAILED, "Failed to get file id by path.") if not file_id: return api_error(status.HTTP_404_NOT_FOUND, "File not found") entry = {} try: entry["size"] = get_file_size(repo.store_id, repo.version, file_id) except Exception as e: logger.error(e) entry["size"] = 0 entry["type"] = "file" entry["name"] = file_name entry["id"] = file_id entry["mtime"] = c.ctime entry["repo_id"] = repo_id entry["path"] = path return HttpResponse(json.dumps(entry), status=200, content_type=json_content_type) class FileShareEncoder(json.JSONEncoder): def default(self, obj): if not isinstance(obj, FileShare): return None return {'username':obj.username, 'repo_id':obj.repo_id, 'path':obj.path, 'token':obj.token, 'ctime':obj.ctime, 'view_cnt':obj.view_cnt, 's_type':obj.s_type} class SharedLinksView(APIView): authentication_classes = (TokenAuthentication, SessionAuthentication ) permission_classes = (IsAuthenticated,) throttle_classes = (UserRateThrottle, ) def get(self, request, format=None): username = request.user.username fileshares = FileShare.objects.filter(username=username) p_fileshares = [] # personal file share for fs in fileshares: if is_personal_repo(fs.repo_id): # only list files in personal repos r = seafile_api.get_repo(fs.repo_id) if not r: fs.delete() continue if fs.s_type == 'f': if seafile_api.get_file_id_by_path(r.id, fs.path) is None: fs.delete() continue fs.filename = os.path.basename(fs.path) fs.shared_link = gen_file_share_link(fs.token) else: if seafile_api.get_dir_id_by_path(r.id, fs.path) is None: fs.delete() continue fs.filename = os.path.basename(fs.path.rstrip('/')) fs.shared_link = gen_dir_share_link(fs.token) fs.repo = r p_fileshares.append(fs) return HttpResponse(json.dumps({"fileshares": p_fileshares}, cls=FileShareEncoder), status=200, content_type=json_content_type) def delete(self, request, format=None): token = request.GET.get('t', None) if not token: return api_error(status.HTTP_400_BAD_REQUEST, 'Token is missing') username = request.user.username share = FileShare.objects.filter(token=token).filter(username=username) or \ UploadLinkShare.objects.filter(token=token).filter(username=username) if not share: return api_error(status.HTTP_400_BAD_REQUEST, 'Invalid token') share.delete() return HttpResponse(json.dumps({}), status=200, content_type=json_content_type) class SharedDirView(APIView): throttle_classes = (UserRateThrottle, ) def get(self, request, token, format=None): """List dirents in dir download shared link """ fileshare = FileShare.objects.get_valid_dir_link_by_token(token) if not fileshare: return api_error(status.HTTP_400_BAD_REQUEST, "Invalid token") repo_id = fileshare.repo_id repo = get_repo(repo_id) if not repo: return api_error(status.HTTP_400_BAD_REQUEST, "Invalid token") if fileshare.is_encrypted(): password = request.GET.get('password', '') if not password: return api_error(status.HTTP_403_FORBIDDEN, "Password is required") if not check_password(password, fileshare.password): return api_error(status.HTTP_403_FORBIDDEN, "Invalid Password") req_path = request.GET.get('p', '/') if req_path[-1] != '/': req_path += '/' if req_path == '/': real_path = fileshare.path else: real_path = posixpath.join(fileshare.path, req_path.lstrip('/')) if real_path[-1] != '/': # Normalize dir path real_path += '/' dir_id = seafile_api.get_dir_id_by_path(repo_id, real_path) if not dir_id: return api_error(status.HTTP_400_BAD_REQUEST, "Invalid path") username = fileshare.username try: dirs = seafserv_threaded_rpc.list_dir_with_perm(repo_id, real_path, dir_id, username, -1, -1) dirs = dirs if dirs else [] except SearpcError as e: logger.error(e) return api_error(HTTP_520_OPERATION_FAILED, "Failed to list dir.") dir_list, file_list = [], [] for dirent in dirs: dtype = "file" entry = {} if stat.S_ISDIR(dirent.mode): dtype = "dir" else: if repo.version == 0: entry["size"] = get_file_size(repo.store_id, repo.version, dirent.obj_id) else: entry["size"] = dirent.size entry["type"] = dtype entry["name"] = dirent.obj_name entry["id"] = dirent.obj_id entry["mtime"] = dirent.mtime if dtype == 'dir': dir_list.append(entry) else: file_list.append(entry) dir_list.sort(key=lambda x: x['name'].lower()) file_list.sort(key=lambda x: x['name'].lower()) dentrys = dir_list + file_list content_type = 'application/json; charset=utf-8' return HttpResponse(json.dumps(dentrys), status=200, content_type=content_type) class DefaultRepoView(APIView): """ Get user's default library. """ authentication_classes = (TokenAuthentication, SessionAuthentication) permission_classes = (IsAuthenticated, ) throttle_classes = (UserRateThrottle, ) def get(self, request, format=None): username = request.user.username repo_id = UserOptions.objects.get_default_repo(username) if repo_id is None or (get_repo(repo_id) is None): json = { 'exists': False, } return Response(json) else: return self.default_repo_info(repo_id) def default_repo_info(self, repo_id): repo_json = { 'exists': False, } if repo_id is not None: repo_json['exists'] = True repo_json['repo_id'] = repo_id return Response(repo_json) def post(self, request): if not request.user.permissions.can_add_repo(): return api_error(status.HTTP_403_FORBIDDEN, 'You do not have permission to create library.') username = request.user.username repo_id = UserOptions.objects.get_default_repo(username) if repo_id and (get_repo(repo_id) is not None): return self.default_repo_info(repo_id) repo_id = create_default_library(request) return self.default_repo_info(repo_id) class SharedRepo(APIView): """ Support uniform interface for shared libraries. """ authentication_classes = (TokenAuthentication, SessionAuthentication ) permission_classes = (IsAuthenticated, ) throttle_classes = (UserRateThrottle, ) def delete(self, request, repo_id, format=None): """ Unshare a library. Repo owner and system admin can perform this operation. """ repo = get_repo(repo_id) if not repo: return api_error(status.HTTP_404_NOT_FOUND, 'Library not found.') username = request.user.username if is_org_context(request): repo_owner = seafile_api.get_org_repo_owner(repo_id) else: repo_owner = seafile_api.get_repo_owner(repo_id) if not request.user.is_staff and not username == repo_owner: return api_error(status.HTTP_403_FORBIDDEN, 'You do not have permission to unshare library.') share_type = request.GET.get('share_type', '') if not share_type: return api_error(status.HTTP_400_BAD_REQUEST, 'Share type is required.') if share_type == 'personal': user = request.GET.get('user', '') if not user: return api_error(status.HTTP_400_BAD_REQUEST, 'User is required.') if not is_valid_username(user): return api_error(status.HTTP_400_BAD_REQUEST, 'User is not valid') remove_share(repo_id, username, user) elif share_type == 'group': group_id = request.GET.get('group_id', '') if not group_id: return api_error(status.HTTP_400_BAD_REQUEST, 'Group ID is required.') try: group_id = int(group_id) except ValueError: return api_error(status.HTTP_400_BAD_REQUEST, 'Group ID is not valid.') seafile_api.unset_group_repo(repo_id, int(group_id), username) elif share_type == 'public': if is_org_context(request): org_id = request.user.org.org_id seaserv.seafserv_threaded_rpc.unset_org_inner_pub_repo(org_id, repo_id) else: seafile_api.remove_inner_pub_repo(repo_id) else: return api_error(status.HTTP_400_BAD_REQUEST, 'Share type can only be personal or group or public.') return Response('success', status=status.HTTP_200_OK) def put(self, request, repo_id, format=None): """ Share a repo to users/groups/public. """ # argument check share_type = request.GET.get('share_type') permission = request.GET.get('permission') if permission not in get_available_repo_perms(): error_msg = 'permission invalid.' return api_error(status.HTTP_400_BAD_REQUEST, error_msg) if share_type not in ('personal', 'group', 'public'): error_msg = 'share_type invalid.' return api_error(status.HTTP_400_BAD_REQUEST, error_msg) # recourse check repo = seafile_api.get_repo(repo_id) if not repo: error_msg = 'Library %s not found.' % repo_id return api_error(status.HTTP_404_NOT_FOUND, error_msg) # permission check username = request.user.username repo_owner = get_repo_owner(request, repo_id) if username != repo_owner: error_msg = 'Permission denied.' return api_error(status.HTTP_403_FORBIDDEN, error_msg) if share_type == 'personal': user = request.GET.get('user') users = request.GET.get('users') if not user and not users: return api_error(status.HTTP_400_BAD_REQUEST, 'User or users (comma separated are mandatory) are not provided') usernames = [] if user: usernames += user.split(",") if users: usernames += users.split(",") shared_users = [] invalid_users = [] notexistent_users = [] notsharable_errors = [] for u in usernames: if not u: continue if not is_valid_username(u): invalid_users.append(u) continue if not is_registered_user(u): notexistent_users.append(u) continue try: seafile_api.share_repo(repo_id, username, u, permission) shared_users.append(u) except SearpcError as e: logger.error(e) notsharable_errors.append(e) try: send_perm_audit_msg('add-repo-perm', username, u, repo_id, '/', permission) except Exception as e: logger.error(e) if invalid_users or notexistent_users or notsharable_errors: # removing already created share for s_user in shared_users: try: remove_share(repo_id, username, s_user) except SearpcError as e: # ignoring this error, go to next unsharing continue if invalid_users: return api_error(status.HTTP_400_BAD_REQUEST, 'Some users are not valid, sharing rolled back') if notexistent_users: return api_error(status.HTTP_400_BAD_REQUEST, 'Some users are not existent, sharing rolled back') if notsharable_errors: # show the first sharing error return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, 'Internal error occurs, sharing rolled back') if share_type == 'group': group_id = request.GET.get('group_id') if not group_id: error_msg = 'group_id invalid.' return api_error(status.HTTP_400_BAD_REQUEST, error_msg) try: group_id = int(group_id) except ValueError: return api_error(status.HTTP_400_BAD_REQUEST, 'Group ID must be integer.') group = get_group(group_id) if not group: return api_error(status.HTTP_400_BAD_REQUEST, 'Group
"bUXu": -595, " paX": -596, "taiX": -597, "gzaX": -598, "draX": -599, "mX": -600, "lIXil": -601, " IXl": -602, "IXaz": -603, " biX": -604, "Xro": -605, "Xland ": -606, "aXun": -607, "aXuc": -608, "aXle": -609, "guX": -610, "Xai": -611, "Xui": -612, "leXen ": 613, " naXa": -614, "bliXd": 615, "Xmati": -616, " IXn": -617, "maX ": -618, "oXi": -619, " briX": -620, "Xlia": -621, "OXet": -622, "luiX": -623, "efiX": 624, "OpeX": 625, " yiX": 626, "Xmo": -627, " OXer ": -628, "oXmala": -629, "gulaX": -630, "koXr": -631, "Xass": -632, "Xas ": -633, "mUX": -634, "ieX": -635, "oXn": -636, " naXi": -637, " reX": -638, " zaX": -639, "Xue": -640, "uXi": -641, "m aX ": -642, "Xanis": -643, "maXn": -644, "Xdi": 645, "seXe": 646, "uXan": -647, " saXan ": -648, "aXil ": -649, " diXi": -650, "Xgi": -651, "taX ": -652, "goX": -653, "CX": -654, "Xie ": -655, "Xs ": -656, "Xah": -657, "SX": -658, "bliXl": 659, " CaXa": 660, "aXac": 661, "iXf": 662, "moXr": -663, "voX": -664, "oXre": -665, "toXa": -666, "Xat": -668, "doXr": 669, "iXel": -670, " moXo": 671, "iXil ": -672, "aXip ": -673, "Xua": -674, "iXem": -675, "Xay ": -676, "reXe ": 677, "oX ": -678, "Xide": -679, "yun eX": 680, " aXit ": -681, "dar tuXla ": -682, "aXne": -683, "auX": -684, "oXa ": 685, "aXg": -686, "rkeX": 687, " tUX": -688, "noXr": -689, " iXr": 690, " saXo": 691, "iyoX": -692, "gX": -693, "eaXa": -694, "ouX": -695, "eoXr": -696, "aXul": -697, " eXr": 698, "Xic": -699, "neXe ": 700, "iXen": -701, "CeXe": 702, "deX": 703, "aXre": -704, "uXe": -705, "sleX": 706, "aXra": -707, "Xano": -708, "oXe": -709, "bliX ": 710, " baXa": -711, "CiX": 712, "aXazi": -713, "IXu": -714, "teXe": 715, "yeXe": 716, " vuX": -717, "meXe ": 718, "niXd": 719, "eXm": 720, " oXun": -721, " praX": -722, "Xio": -723, "eceX": 724, "oXu": 725, "aXe": -726, "oXlu": 727, "Xanda": -728, "araX": -729, "uXay": -730, "Xne": 731, "Xh": -732, "haX": -733, "beXe": 734, "eXl": 735, "Xo": -736, "loX": -737, " liX": -738, "zX": -739, "vX": -740, "IX": 741, "yX": -742, "oXram": -743, "iXe": 744, "eXer": 745, " buXun": -746, "UX": 747, "OX": 750, "lX": -751, "oX": 752, "uX": 753, "aX": 754, "Xi": 756, "X": -757}, u'o': {"uz kXr": -1, "ni kXt": 2, "dir gX": 3, "Il dXn": 4, "an Xng": 5, "lum kX": -6, "niS kX": -7, "Giz gX": 8, " te gX": 9, "rka kX": -10, "jIn Xn": 11, "ki rXp": 12, "Xnerek": 13, "ezik X": -14, "dIn CX": -15, "ni Xng": 16, "klI gX": 17, "ran CX": -18, "ok Xn ": 19, "I Xrt ": 20, "m gXcu": -21, " efe X": 22, "OSe dX": 23, "Ge dXn": 24, "ik dXk": -25, "bu sXn": -26, "ma sXk": -27, "e Xlup": -28, " Xlusm": -29, "Xyune ": 30, " la gX": 31, "an rXm": -32, "ka yXn": 33, "tacI X": -34, "nuGu X": -35, "Xnusma": -36, "nde rX": -37, "bin Xg": 38, "zle gX": 39, "dXnsa": -40, "Xktel": -41, "Xnalm": 42, "kXley": 43, "lXwen": 44, "gakgX": -45, "sXnus": 46, "ektX ": 47, "uyanX": -48, "0 Xz ": -49, "rI Xo": 50, " Xnut": 51, " us X": 52, "jon X": -53, " lXm ": 54, "Xykoy": 55, "l bXb": -56, "kXsog": 57, " tXko": 58, "bOksX": 59, "a sX ": 60, "e Xt ": 61, "76 yX": 62, "e Xmn": 63, "CXpul": -64, " kXrv": 65, "d gX ": -66, " dXb ": 67, "Xtana": 68, "I Xes": 69, "lerXn": 70, "he Xp": -71, "Xrtuc": -72, " Xdi": 73, "i gyX": -74, "i lXw": 75, "gXdek": 76, "hektX": -77, "Xlumy": 78, "th jX": 79, "pt yX": 80, "Xkse ": 81, "i i X": 82, " setX": 83, "arelX": 84, "GI Xb": -85, "Xpece": 86, "r m X": -87, "Xglel": 88, "Xrfun": 89, "b Xde": -90, "e I X": -91, "Xgdur": 92, "rXdov": 93, "Xseri": -94, "Xunen": 95, "Xksen": -96, "ortrX": 97, " Xrce": 98, "Sii X": 99, "mXren": 100, " kXtl": 101, "fritX": 102, "yl X ": 103, "dirX ": 104, "lemX ": 105, " rXba": 106, "b dXl": -107, "k sX ": 108, "k bX ": 109, "guadX": 110, "Xrdev": 111, "en aX": 112, "rXney": -113, "dr Xv": 114, "n gaX": 115, "orn X": 116, "Xrdio": -117, "ou gX": -118, "Ol gX": 119, "i bX ": 120, "ny bX": -121, "11 kX": -122, "n dXt": 123, "ga gX": 124, "nCo X": 125, "CXkes": -126, "Xsler": 127, "Xyuym": 128, "Xnlus": -129, "Xders": 130, "Xryo ": 131, "Xnkin": -132, "os gX": 133, "ilo X": -134, "5 yXk": 135, "zu kX": -136, " sXv ": 137, "m Xr ": 138, "ukkXy": 139, "s Xlv": 140, " me X": -141, "bXles": -142, "CUn X": 143, " hXrg": 144, " UrX": 145, "4 kXy": 146, " Xve ": 147, " d gX": 148, "Xnkay": 149, "Xnbaz": -150, "mis X": -151, "kXreb": 152, "dXkse": 153, " tetX": 154, " b dX": -155, "7 yXk": 156, " 1 Xz": 157, "morkX": 158, "romXr": 159, "Xleme": 160, "m y X": 161, "Xtsun": 162, "shi X": -163, "2 kXy": 164, "C rXm": -165, "li Xe": 166, "lu Xb": -167, "go gX": 168, "bi lX": -169, " lXp ": 170, "tXral": -171, "gerlX": 172, "inagX": -173, "i n X": -174, "cal X": -175, "s Xzi": -176, "nC Xd": -177, "ry kX": -178, "fe gX": 179, "dI mX": -180, " dXle": 181, "kUp X": -182, "ka Xg": 183, "Or SX": 184, "ad yX": 185, "Xvern": -186, " bXnl": 187, "dr gX": 188, "Xluye": 189, "ketXr": 190, "r gX ": -191, "st gX": 192, "tu dX": -193, "foklX": 194, "Xvell": -195, "oza X": 196, "Xzale": -197, "tXrki": 198, "rk gX": 199, "o dX ": 200, "Xrsen": 201, " CXsu": 202, "e yXe": 203, "c bXl": 204, " dXl ": 205, "tXron": -206, "Xrtla": -207, "ku kX": -208, "8 kXy": 209, "ndXrt": 210, "ov Xn": 211, "S Xna": -212, "rXlov": 213, "dyatX": 214, "Xmuyo": 215, "gI kX": -216, "Xzar ": -217, "Xlcen": 218, "u Xst": 219, "Xyser": -220, "Xlulu": -221, "na lX": -222, "Xvula": -223, "d Xlm": -224, "Um gX": 225, " Xrun": 226, "z rXm": -227, "ngXl ": 228, "CerdX": 229, " Xnyu": 230, "r Xme": 231, " tXb": -232, "bXluk": 233, "Xsym ": 234, "Xzbek": 235, "m Xtu": -236, "Xtusu": 237, "n lXk": -238, "gXrsu": 239, "13 kX": -240, "Xnely": -241, "Xlmun": 242, "gXrta": -243, "ha Xb": -244, "gXnul": 245, "Xrebi": 246, "Xyuna": -247, "Xzenl": 248, "n Xve": -249, "dXkuz": -250, "Xrcul": -251, "rXc": -252, " terX": 253, "Xzgur": 254, "Xzerk": 255, " Xrm": -256, "a Xne": 257, " Xkta": -258, "Xkenl": 259, "Xnlar": -260, "Xsunu": -261, "dXviz": 262, "gXbl": -263, "n iX": 264, "bXcu": 265, "Xooy": 266, "Xnm ": 267, "Xpem": 268, "CdXk": 269, "uerX": -270, " Xsu": 271, " Xsr": 272, "Xgge": 273, "Xyre": 274, "elXz": 275, "Xzia": -276, "Xrrk": 277, "objX": 278, "ho X": 279, "kyXd": 280, " Xen": 281, "lIdX": 282, "rkXp": 283, "eanX": -284, "Xk1 ": 285, "pXku": 286, "gXlm": 287, "Xnex": -288, "gXmo": -289, "dXlk": 290, "tXve": 291, "Xccu": 292, " jXl": 293, "InXz": 294, "bXhl": 295, "Xgrt": 296, "kXkk": -297, "Xnnb": 298, "ykXy": 299, "nbXn": 300, "bXnb": 301, "ntXz": 302, "Xzie": -303, "taXv": 304, " G X": 305, "Xlea": -306, "segX": -307, "uc X": -308, "rIXn": 309, "nh X": 310, "Xrso": -311, "esiX": 312, " O X": 313, "Xkuv": 314, "lldX": -315, "Xnss": -316, "sXro": -317, "dXny": -318, "kyXn": 319, "SXru": 320, " Xkd": 321, "Xzni": -322, "fosX": 323, "yorX": 324, "irXr": 325, " IgX": -326, "Xrki": -327, "Xnra": -328, " Xnr": 329, "Xyok": 330, "zXme": 331, " pnX": 332, "Xnea": -333, "rOlX": 334, "Xtov": -335, "Xvma": -336, "kXye": 337, "Xbeg": 338, "Xyti": -339, "gXvi": -340, "barX": -341, "Xpti": -342, "Xneb": 343, "Xryu": -344, "9 Xc": -345, "Xket":
inputdata["AirHandlingSystem"][ahu_name]["AirHeatExchangerPowerConsumption"] = 0 for unit_id, unit_configure in enumerate(inputdata["AirHandlingSystem"][ahu_name]["AirHandlingUnit"]): if unit_configure["AirHeatExchangerPowerConsumption"] != None: inputdata["AirHandlingSystem"][ahu_name]["AirHeatExchangerPowerConsumption"] += \ unit_configure["AirHeatExchangerPowerConsumption"] * unit_configure["Number"] # 全熱交換器の風量 [m3/h] inputdata["AirHandlingSystem"][ahu_name]["AirHeatExchangerAirVolume"] = 0 for unit_id, unit_configure in enumerate(inputdata["AirHandlingSystem"][ahu_name]["AirHandlingUnit"]): if (unit_configure["AirHeatExchangeRatioCooling"] != None) and (unit_configure["AirHeatExchangeRatioHeating"] != None): inputdata["AirHandlingSystem"][ahu_name]["AirHeatExchangerAirVolume"] += \ unit_configure["FanAirVolume"] * unit_configure["Number"] ##---------------------------------------------------------------------------------- ## 冷暖同時供給の有無の判定 ##---------------------------------------------------------------------------------- for ahu_name in inputdata["AirHandlingSystem"]: inputdata["AirHandlingSystem"][ ahu_name ]["isSimultaneousSupply"] = "無" inputdata["AirHandlingSystem"][ ahu_name ]["isSimultaneousSupply_cooling"] = "無" inputdata["AirHandlingSystem"][ ahu_name ]["isSimultaneousSupply_heating"] = "無" for pump_name in inputdata["SecondaryPumpSystem"]: inputdata["SecondaryPumpSystem"][ pump_name ]["isSimultaneousSupply"] = "無" for ref_name in inputdata["HeatsourceSystem"]: inputdata["HeatsourceSystem"][ ref_name ]["isSimultaneousSupply"] = "無" for room_zone_name in inputdata["AirConditioningZone"]: if inputdata["AirConditioningZone"][room_zone_name]["isSimultaneousSupply"] == "有": # 空調機群 inputdata["AirHandlingSystem"][ inputdata["AirConditioningZone"][room_zone_name]["AHU_cooling_insideLoad"] ]["isSimultaneousSupply_cooling"] = "有" inputdata["AirHandlingSystem"][ inputdata["AirConditioningZone"][room_zone_name]["AHU_cooling_outdoorLoad"]]["isSimultaneousSupply_cooling"] = "有" inputdata["AirHandlingSystem"][ inputdata["AirConditioningZone"][room_zone_name]["AHU_heating_insideLoad"] ]["isSimultaneousSupply_heating"] = "有" inputdata["AirHandlingSystem"][ inputdata["AirConditioningZone"][room_zone_name]["AHU_heating_outdoorLoad"]]["isSimultaneousSupply_heating"] = "有" # 熱源群 id_ref_c1 = inputdata["AirHandlingSystem"][ inputdata["AirConditioningZone"][room_zone_name]["AHU_cooling_insideLoad"] ]["HeatSource_cooling"] id_ref_c2 = inputdata["AirHandlingSystem"][ inputdata["AirConditioningZone"][room_zone_name]["AHU_cooling_outdoorLoad"] ]["HeatSource_cooling"] id_ref_h1 = inputdata["AirHandlingSystem"][ inputdata["AirConditioningZone"][room_zone_name]["AHU_heating_insideLoad"] ]["HeatSource_heating"] id_ref_h2 = inputdata["AirHandlingSystem"][ inputdata["AirConditioningZone"][room_zone_name]["AHU_heating_outdoorLoad"] ]["HeatSource_heating"] inputdata["HeatsourceSystem"][ id_ref_c1 ]["isSimultaneousSupply"] = "有" inputdata["HeatsourceSystem"][ id_ref_c2 ]["isSimultaneousSupply"] = "有" inputdata["HeatsourceSystem"][ id_ref_h1 ]["isSimultaneousSupply"] = "有" inputdata["HeatsourceSystem"][ id_ref_h2 ]["isSimultaneousSupply"] = "有" # 二次ポンプ群 id_pump_c1 = inputdata["AirHandlingSystem"][ inputdata["AirConditioningZone"][room_zone_name]["AHU_cooling_insideLoad"] ]["Pump_cooling"] id_pump_c2 = inputdata["AirHandlingSystem"][ inputdata["AirConditioningZone"][room_zone_name]["AHU_cooling_outdoorLoad"] ]["Pump_cooling"] id_pump_h1 = inputdata["AirHandlingSystem"][ inputdata["AirConditioningZone"][room_zone_name]["AHU_heating_insideLoad"] ]["Pump_heating"] id_pump_h2 = inputdata["AirHandlingSystem"][ inputdata["AirConditioningZone"][room_zone_name]["AHU_heating_outdoorLoad"] ]["Pump_heating"] inputdata["SecondaryPumpSystem"][ id_pump_c1 ]["isSimultaneousSupply"] = "有" inputdata["SecondaryPumpSystem"][ id_pump_c2 ]["isSimultaneousSupply"] = "有" inputdata["SecondaryPumpSystem"][ id_pump_h1 ]["isSimultaneousSupply"] = "有" inputdata["SecondaryPumpSystem"][ id_pump_h2 ]["isSimultaneousSupply"] = "有" elif inputdata["AirConditioningZone"][room_zone_name]["isSimultaneousSupply"] == "有(室負荷)": # 空調機群 inputdata["AirHandlingSystem"][ inputdata["AirConditioningZone"][room_zone_name]["AHU_cooling_insideLoad"] ]["isSimultaneousSupply_cooling"] = "有" inputdata["AirHandlingSystem"][ inputdata["AirConditioningZone"][room_zone_name]["AHU_heating_insideLoad"] ]["isSimultaneousSupply_heating"] = "有" # 熱源群 id_ref_c1 = inputdata["AirHandlingSystem"][ inputdata["AirConditioningZone"][room_zone_name]["AHU_cooling_insideLoad"] ]["HeatSource_cooling"] id_ref_h1 = inputdata["AirHandlingSystem"][ inputdata["AirConditioningZone"][room_zone_name]["AHU_heating_insideLoad"] ]["HeatSource_heating"] inputdata["HeatsourceSystem"][ id_ref_c1 ]["isSimultaneousSupply"] = "有" inputdata["HeatsourceSystem"][ id_ref_h1 ]["isSimultaneousSupply"] = "有" # 二次ポンプ群 id_pump_c1 = inputdata["AirHandlingSystem"][ inputdata["AirConditioningZone"][room_zone_name]["AHU_cooling_insideLoad"] ]["Pump_cooling"] id_pump_h1 = inputdata["AirHandlingSystem"][ inputdata["AirConditioningZone"][room_zone_name]["AHU_heating_insideLoad"] ]["Pump_heating"] inputdata["SecondaryPumpSystem"][ id_pump_c1 ]["isSimultaneousSupply"] = "有" inputdata["SecondaryPumpSystem"][ id_pump_h1 ]["isSimultaneousSupply"] = "有" elif inputdata["AirConditioningZone"][room_zone_name]["isSimultaneousSupply"] == "有(外気負荷)": # 空調機群 inputdata["AirHandlingSystem"][ inputdata["AirConditioningZone"][room_zone_name]["AHU_cooling_outdoorLoad"]]["isSimultaneousSupply_cooling"] = "有" inputdata["AirHandlingSystem"][ inputdata["AirConditioningZone"][room_zone_name]["AHU_heating_outdoorLoad"]]["isSimultaneousSupply_heating"] = "有" # 熱源群 id_ref_c2 = inputdata["AirHandlingSystem"][ inputdata["AirConditioningZone"][room_zone_name]["AHU_cooling_outdoorLoad"] ]["HeatSource_cooling"] id_ref_h2 = inputdata["AirHandlingSystem"][ inputdata["AirConditioningZone"][room_zone_name]["AHU_heating_outdoorLoad"] ]["HeatSource_heating"] inputdata["HeatsourceSystem"][ id_ref_c2 ]["isSimultaneousSupply"] = "有" inputdata["HeatsourceSystem"][ id_ref_h2 ]["isSimultaneousSupply"] = "有" # 二次ポンプ群 id_pump_c2 = inputdata["AirHandlingSystem"][ inputdata["AirConditioningZone"][room_zone_name]["AHU_cooling_outdoorLoad"] ]["Pump_cooling"] id_pump_h2 = inputdata["AirHandlingSystem"][ inputdata["AirConditioningZone"][room_zone_name]["AHU_heating_outdoorLoad"] ]["Pump_heating"] inputdata["SecondaryPumpSystem"][ id_pump_c2 ]["isSimultaneousSupply"] = "有" inputdata["SecondaryPumpSystem"][ id_pump_h2 ]["isSimultaneousSupply"] = "有" # 両方とも冷暖同時なら、その空調機群は冷暖同時運転可能とする。 for ahu_name in inputdata["AirHandlingSystem"]: if (inputdata["AirHandlingSystem"][ ahu_name ]["isSimultaneousSupply_cooling"] == "有") and \ (inputdata["AirHandlingSystem"][ ahu_name ]["isSimultaneousSupply_heating"] == "有"): inputdata["AirHandlingSystem"][ ahu_name ]["isSimultaneousSupply"] = "有" ##---------------------------------------------------------------------------------- ## 空調機群が処理する日積算室負荷(解説書 2.5.1) ##---------------------------------------------------------------------------------- for room_zone_name in inputdata["AirConditioningZone"]: # 室内負荷処理用空調機群の名称 ahu_name = inputdata["AirConditioningZone"][room_zone_name]["AHU_cooling_insideLoad"] # 当該空調機群が熱を供給する時刻別室負荷を積算する。 resultJson["AHU"][ahu_name]["Qroom_hourly"] += resultJson["Qroom"][room_zone_name]["Qroom_hourly"] ##---------------------------------------------------------------------------------- ## 空調機群の運転時間(解説書 2.5.2) ##---------------------------------------------------------------------------------- for room_zone_name in inputdata["AirConditioningZone"]: # 室内負荷処理用空調機群の名称 ahu_name = inputdata["AirConditioningZone"][room_zone_name]["AHU_cooling_insideLoad"] # 室の空調有無 roomScheduleRoom(365×24)を加算 resultJson["AHU"][ ahu_name ]["schedule"] += roomScheduleRoom[room_zone_name] for room_zone_name in inputdata["AirConditioningZone"]: # 外気負荷処理用空調機群の名称 ahu_name = inputdata["AirConditioningZone"][room_zone_name]["AHU_cooling_outdoorLoad"] # 室の空調有無 roomScheduleRoom(365×24)を加算 resultJson["AHU"][ ahu_name ]["schedule"] += roomScheduleRoom[room_zone_name] # 各空調機群の運転時間 for ahu_name in inputdata["AirHandlingSystem"]: # 運転スケジュールの和が「1以上(どこか一部屋は動いている)」であれば、空調機は稼働しているとする。 resultJson["AHU"][ahu_name]["schedule"][ resultJson["AHU"][ahu_name]["schedule"] > 1 ] = 1 # 時刻別の外気エンタルピー resultJson["AHU"][ahu_name]["Hoa_hourly"] = Hoa_hourly ##---------------------------------------------------------------------------------- ## 外気負荷[kW]の算出(解説書 2.5.3) ##---------------------------------------------------------------------------------- # 外気導入量 [m3/h] for ahu_name in inputdata["AirHandlingSystem"]: inputdata["AirHandlingSystem"][ ahu_name ]["outdoorAirVolume_cooling"] = 0 inputdata["AirHandlingSystem"][ ahu_name ]["outdoorAirVolume_heating"] = 0 for room_zone_name in inputdata["AirConditioningZone"]: # 各室の外気導入量 [m3/h] if "room_usage_condition" in inputdata["SpecialInputData"]: # SP-9シートで任意の入力がされている場合 inputdata["AirConditioningZone"][room_zone_name]["outdoorAirVolume"] = \ bc.get_roomOutdoorAirVolume( inputdata["AirConditioningZone"][room_zone_name]["buildingType"], inputdata["AirConditioningZone"][room_zone_name]["roomType"], inputdata["SpecialInputData"]["room_usage_condition"] ) * inputdata["AirConditioningZone"][room_zone_name]["zoneArea"] else: inputdata["AirConditioningZone"][room_zone_name]["outdoorAirVolume"] = \ bc.get_roomOutdoorAirVolume( inputdata["AirConditioningZone"][room_zone_name]["buildingType"], inputdata["AirConditioningZone"][room_zone_name]["roomType"] ) * inputdata["AirConditioningZone"][room_zone_name]["zoneArea"] # 冷房期間における外気風量 [m3/h] inputdata["AirHandlingSystem"][ inputdata["AirConditioningZone"][room_zone_name]["AHU_cooling_outdoorLoad"] ]["outdoorAirVolume_cooling"] += \ inputdata["AirConditioningZone"][room_zone_name]["outdoorAirVolume"] # 暖房期間における外気風量 [m3/h] inputdata["AirHandlingSystem"][ inputdata["AirConditioningZone"][room_zone_name]["AHU_heating_outdoorLoad"] ]["outdoorAirVolume_heating"] += \ inputdata["AirConditioningZone"][room_zone_name]["outdoorAirVolume"] # 全熱交換効率の補正 for ahu_name in inputdata["AirHandlingSystem"]: # 冷房運転時の補正 if inputdata["AirHandlingSystem"][ahu_name]["AirHeatExchangeRatioCooling"] != None: ahuaexeff = inputdata["AirHandlingSystem"][ahu_name]["AirHeatExchangeRatioCooling"]/100 aexCeff = 1 - ((1/0.85)-1) * (1-ahuaexeff)/ahuaexeff aexCtol = 0.95 aexCbal = 0.67 inputdata["AirHandlingSystem"][ahu_name]["AirHeatExchangeRatioCooling"] = \ ahuaexeff * aexCeff * aexCtol * aexCbal # 暖房運転時の補正 if inputdata["AirHandlingSystem"][ahu_name]["AirHeatExchangeRatioHeating"] != None: ahuaexeff = inputdata["AirHandlingSystem"][ahu_name]["AirHeatExchangeRatioHeating"]/100 aexCeff = 1 - ((1/0.85)-1) * (1-ahuaexeff)/ahuaexeff aexCtol = 0.95 aexCbal = 0.67 inputdata["AirHandlingSystem"][ahu_name]["AirHeatExchangeRatioHeating"] = \ ahuaexeff * aexCeff * aexCtol * aexCbal # 外気負荷[kW] for ahu_name in inputdata["AirHandlingSystem"]: for dd in range(0,365): for hh in range(0,24): if resultJson["AHU"][ahu_name]["schedule"][dd][hh] > 0: # 空調機が稼働する場合 # 運転モードによって場合分け if ac_mode[dd] == "暖房": # 外気導入量 [m3/h] ahuVoa = inputdata["AirHandlingSystem"][ahu_name]["outdoorAirVolume_heating"] # 全熱交換風量 [m3/h] ahuaexV = inputdata["AirHandlingSystem"][ahu_name]["AirHeatExchangerAirVolume"] # 全熱交換風量(0以上、外気導入量以下とする) if ahuaexV > ahuVoa: ahuaexV = ahuVoa elif ahuaexV <= 0: ahuaexV = 0 # 外気負荷の算出 if inputdata["AirHandlingSystem"][ahu_name]["AirHeatExchangeRatioHeating"] == None: # 全熱交換器がない場合 resultJson["AHU"][ahu_name]["Qoa_hourly"][dd][hh] = \ (resultJson["AHU"][ahu_name]["Hoa_hourly"][dd][hh] - resultJson["schedule"]["room_enthalpy"][dd][hh]) * inputdata["AirHandlingSystem"][ahu_name]["outdoorAirVolume_heating"] *1.293/3600 else: # 全熱交換器がある場合 if (resultJson["AHU"][ahu_name]["Hoa_hourly"][dd][hh] > resultJson["schedule"]["room_enthalpy"][dd][hh]) and (inputdata["AirHandlingSystem"][ahu_name]["AirHeatExchangerControl"] == "有"): # バイパス有の場合はそのまま外気導入する。 resultJson["AHU"][ahu_name]["Qoa_hourly"][dd][hh] = \ (resultJson["AHU"][ahu_name]["Hoa_hourly"][dd][hh] - resultJson["schedule"]["room_enthalpy"][dd][hh]) * inputdata["AirHandlingSystem"][ahu_name]["outdoorAirVolume_heating"] *1.293/3600 else: # 全熱交換器による外気負荷削減を見込む。 resultJson["AHU"][ahu_name]["Qoa_hourly"][dd][hh] = \ (resultJson["AHU"][ahu_name]["Hoa_hourly"][dd][hh] - resultJson["schedule"]["room_enthalpy"][dd][hh]) * \ (inputdata["AirHandlingSystem"][ahu_name]["outdoorAirVolume_heating"] - \ ahuaexV * inputdata["AirHandlingSystem"][ahu_name]["AirHeatExchangeRatioHeating"] ) *1.293/3600 elif (ac_mode[dd] == "中間") or (ac_mode[dd] == "冷房"): ahuVoa = inputdata["AirHandlingSystem"][ahu_name]["outdoorAirVolume_cooling"] ahuaexV = inputdata["AirHandlingSystem"][ahu_name]["AirHeatExchangerAirVolume"] # 全熱交換風量(0以上、外気導入量以下とする) if ahuaexV > ahuVoa: ahuaexV = ahuVoa elif ahuaexV <= 0: ahuaexV = 0 # 外気負荷の算出 if inputdata["AirHandlingSystem"][ahu_name]["AirHeatExchangeRatioCooling"] == None: # 全熱交換器がない場合 resultJson["AHU"][ahu_name]["Qoa_hourly"][dd][hh] = \ (resultJson["AHU"][ahu_name]["Hoa_hourly"][dd][hh] - resultJson["schedule"]["room_enthalpy"][dd][hh]) * inputdata["AirHandlingSystem"][ahu_name]["outdoorAirVolume_cooling"] *1.293/3600 else: # 全熱交換器がある場合 if (resultJson["AHU"][ahu_name]["Hoa_hourly"][dd][hh] < resultJson["schedule"]["room_enthalpy"][dd][hh]) and (inputdata["AirHandlingSystem"][ahu_name]["AirHeatExchangerControl"] == "有"): # バイパス有の場合はそのまま外気導入する。 resultJson["AHU"][ahu_name]["Qoa_hourly"][dd][hh] = \ (resultJson["AHU"][ahu_name]["Hoa_hourly"][dd][hh] - resultJson["schedule"]["room_enthalpy"][dd][hh]) * inputdata["AirHandlingSystem"][ahu_name]["outdoorAirVolume_cooling"] *1.293/3600 else: # 全熱交換器がある場合 # 全熱交換器による外気負荷削減を見込む。 resultJson["AHU"][ahu_name]["Qoa_hourly"][dd][hh] = \ (resultJson["AHU"][ahu_name]["Hoa_hourly"][dd][hh] - resultJson["schedule"]["room_enthalpy"][dd][hh]) * \ (inputdata["AirHandlingSystem"][ahu_name]["outdoorAirVolume_cooling"] - \ ahuaexV * inputdata["AirHandlingSystem"][ahu_name]["AirHeatExchangeRatioCooling"] ) *1.293/3600 ##---------------------------------------------------------------------------------- ## 外気冷房制御による負荷削減量(解説書 2.5.4) ##---------------------------------------------------------------------------------- for ahu_name in inputdata["AirHandlingSystem"]: for dd in range(0,365): for hh in range(0,24): if resultJson["AHU"][ahu_name]["schedule"][dd][hh] > 0: # 空調機が稼働する場合 # 外気冷房効果の推定 if (inputdata["AirHandlingSystem"][ahu_name]["isEconomizer"] == "有") and (resultJson["AHU"][ahu_name]["Qroom_hourly"][dd][hh]>0): # 外気冷房があり、室負荷が冷房要求であれば # 外気冷房運転時の外気風量 [kg/s] resultJson["AHU"][ahu_name]["Economizer"]["AHUVovc"][dd][hh] = \ resultJson["AHU"][ahu_name]["Qroom_hourly"][dd][hh] / \ ((resultJson["schedule"]["room_enthalpy"][dd][hh]-resultJson["AHU"][ahu_name]["Hoa_hourly"][dd][hh]) * (3600/1000)) # 上限・下限 if resultJson["AHU"][ahu_name]["Economizer"]["AHUVovc"][dd][hh] < inputdata["AirHandlingSystem"][ahu_name]["outdoorAirVolume_cooling"] *1.293/3600: # 下限(外気取入量) [m3/h]→[kg/s] resultJson["AHU"][ahu_name]["Economizer"]["AHUVovc"][dd][hh] = inputdata["AirHandlingSystem"][ahu_name]["outdoorAirVolume_cooling"] *1.293/3600 elif resultJson["AHU"][ahu_name]["Economizer"]["AHUVovc"][dd][hh] > inputdata["AirHandlingSystem"][ahu_name]["EconomizerMaxAirVolume"] *1.293/3600: # 上限(給気風量) [m3/h]→[kg/s] resultJson["AHU"][ahu_name]["Economizer"]["AHUVovc"][dd][hh] = inputdata["AirHandlingSystem"][ahu_name]["EconomizerMaxAirVolume"] *1.293/3600 # 追加すべき外気量(外気冷房用の追加分のみ)[kg/s] resultJson["AHU"][ahu_name]["Economizer"]["AHUVovc"][dd][hh] = \ resultJson["AHU"][ahu_name]["Economizer"]["AHUVovc"][dd][hh] - inputdata["AirHandlingSystem"][ahu_name]["outdoorAirVolume_cooling"] *1.293/3600 # 外気冷房による負荷削減効果 [MJ/day] if (inputdata["AirHandlingSystem"][ahu_name]["isEconomizer"] == "有"): # 外気冷房があれば if resultJson["AHU"][ahu_name]["Economizer"]["AHUVovc"][dd][hh] > 0: # 外冷時風量>0であれば resultJson["AHU"][ahu_name]["Economizer"]["Qahu_oac"][dd][hh] = \ resultJson["AHU"][ahu_name]["Economizer"]["AHUVovc"][dd][hh] * (resultJson["schedule"]["room_enthalpy"][dd][hh]-resultJson["AHU"][ahu_name]["Hoa_hourly"][dd][hh])*3600/1000 ##---------------------------------------------------------------------------------- ## 日積算空調負荷 Qahu_c, Qahu_h の算出(解説書 2.5.5) ##---------------------------------------------------------------------------------- for ahu_name in inputdata["AirHandlingSystem"]: for dd in range(0,365): for hh in range(0,24): if resultJson["AHU"][ahu_name]["schedule"][dd][hh] > 0: # 空調機が稼働する場合 if (inputdata["AirHandlingSystem"][ahu_name]["isOutdoorAirCut"] == "無"): # 外気カットがない場合 resultJson["AHU"][ahu_name]["Qahu_hourly"][dd][hh] = \ resultJson["AHU"][ahu_name]["Qroom_hourly"][dd][hh] + resultJson["AHU"][ahu_name]["Qoa_hourly"][dd][hh] * 3600/1000 else: if hh != 0 and resultJson["AHU"][ahu_name]["schedule"][dd][hh-1] == 0: # 起動時(前の時刻が停止状態)であれば resultJson["AHU"][ahu_name]["Qahu_hourly"][dd][hh] = resultJson["AHU"][ahu_name]["Qroom_hourly"][dd][hh] else: resultJson["AHU"][ahu_name]["Qahu_hourly"][dd][hh] = \ resultJson["AHU"][ahu_name]["Qroom_hourly"][dd][hh] + resultJson["AHU"][ahu_name]["Qoa_hourly"][dd][hh] * 3600/1000 print('空調負荷計算完了') if DEBUG: # pragma: no cover for ahu_name in inputdata["AirHandlingSystem"]: # 外気負荷のグラフ化 mf.hourlyplot( resultJson["AHU"][ahu_name]["Qoa_hourly"] , "外気負荷: "+ahu_name, "b", "時刻別外気負荷") # 外気冷房効果のグラフ化 mf.hourlyplot( resultJson["AHU"][ahu_name]["Economizer"]["Qahu_oac"] , "外気冷房による削減熱量: "+ahu_name, "b", "時刻別外気冷房効果") mf.hourlyplot( resultJson["AHU"][ahu_name]["Economizer"]["AHUVovc"] , "外気冷房時の風量: "+ahu_name, "b", "時刻別外気冷房時風量") # 空調負荷のグラフ化 mf.hourlyplot( resultJson["AHU"][ahu_name]["Qahu_hourly"] , "空調負荷: "+ahu_name, "b", "時刻別空調負荷") ##---------------------------------------------------------------------------------- ## 任意評定用 空調負荷( SP-10 ) ##---------------------------------------------------------------------------------- if "SpecialInputData" in inputdata: if "Qahu" in inputdata["SpecialInputData"]: for ahu_name in inputdata["SpecialInputData"]["Qahu"]: # SP-10シートに入力された空調機群毎に処理 if ahu_name in resultJson["AHU"]: # SP-10シートに入力された室が空調機群として存在していれば for dd in range(0,365): for hh in range(0,24): # 空調負荷[kW] → [MJ/h] resultJson["AHU"][ahu_name]["Qahu_hourly"][dd][hh] = inputdata["SpecialInputData"]["Qahu"][ahu_name][dd][hh] * 3600 / 1000 # 外気冷房は強制的に0とする(既に見込まれているものとする) resultJson["AHU"][ahu_name]["Economizer"]["Qahu_oac"][dd][hh] = 0 ##---------------------------------------------------------------------------------- ## 空調機群の負荷率(解説書 2.5.6) ##---------------------------------------------------------------------------------- for ahu_name in inputdata["AirHandlingSystem"]: for dd in range(0,365): for hh in range(0,24): if resultJson["AHU"][ahu_name]["schedule"][dd][hh] > 0: # 空調機が稼働する場合 # 冷暖同時運転が「有」である場合(季節に依らず、冷却コイル負荷も加熱コイル負荷も処理する) if inputdata["AirHandlingSystem"][ahu_name]["isSimultaneousSupply"] == "有": if resultJson["AHU"][ahu_name]["Qahu_hourly"][dd][hh] >= 0: # 冷房負荷の場合 # 冷房時の負荷率 [-] resultJson["AHU"][ahu_name]["load_ratio"][dd][hh] = \ resultJson["AHU"][ahu_name]["Qahu_hourly"][dd][hh]*1000/3600 / inputdata["AirHandlingSystem"][ahu_name]["RatedCapacityCooling"] else: # 暖房時の負荷率 [-] resultJson["AHU"][ahu_name]["load_ratio"][dd][hh] = \ (-1) * resultJson["AHU"][ahu_name]["Qahu_hourly"][dd][hh]*1000/3600 / inputdata["AirHandlingSystem"][ahu_name]["RatedCapacityHeating"] # 冷暖同時供給が「無」である場合(季節により、冷却コイル負荷か加熱コイル負荷のどちらか一方を処理する) elif inputdata["AirHandlingSystem"][ahu_name]["isSimultaneousSupply"] == "無": # 冷房期、中間期の場合 if (ac_mode[dd] == "冷房" or ac_mode[dd] == "中間"): if resultJson["AHU"][ahu_name]["Qahu_hourly"][dd][hh] > 0: # 冷房負荷の場合 resultJson["AHU"][ahu_name]["load_ratio"][dd][hh] = \ resultJson["AHU"][ahu_name]["Qahu_hourly"][dd][hh]*1000/3600 / inputdata["AirHandlingSystem"][ahu_name]["RatedCapacityCooling"] else: resultJson["AHU"][ahu_name]["load_ratio"][dd][hh] = 0.01 # 暖房期の場合 elif ac_mode[dd] == "暖房": if resultJson["AHU"][ahu_name]["Qahu_hourly"][dd][hh] < 0: # 暖房負荷の場合 resultJson["AHU"][ahu_name]["load_ratio"][dd][hh] = \ (-1) * resultJson["AHU"][ahu_name]["Qahu_hourly"][dd][hh]*1000/3600 / inputdata["AirHandlingSystem"][ahu_name]["RatedCapacityHeating"] else: resultJson["AHU"][ahu_name]["load_ratio"][dd][hh] = 0.01 if DEBUG: # pragma: no cover for ahu_name in inputdata["AirHandlingSystem"]: # 空調負荷率のグラフ化 mf.hourlyplot( resultJson["AHU"][ahu_name]["load_ratio"] , "空調負荷率: "+ahu_name, "b", "時刻別負荷率") ##---------------------------------------------------------------------------------- ## 風量制御方式によって定まる係数(解説書 2.5.7) ##---------------------------------------------------------------------------------- def ahu_control_performance_curve(load_ratio, a4, a3, a2, a1, a0, Vmin): """ 空調機群の制御によるエネルギー削減効果(負荷率の関数) """ if load_ratio <= 0: saving_factor = 0 else: if load_ratio > 1: saving_factor = 1.2 elif load_ratio == 0: saving_factor = 0 elif load_ratio < Vmin: saving_factor = a4 * (Vmin)**4 + a3 * (Vmin)**3 + a2 * (Vmin)**2 + a1 * (Vmin)**1 + a0 else: saving_factor = a4 * (load_ratio)**4 + a3 * (load_ratio)**3 + a2 * (load_ratio)**2 + a1 * (load_ratio)**1 + a0 return saving_factor for ahu_name in inputdata["AirHandlingSystem"]: for unit_id, unit_configure in enumerate(inputdata["AirHandlingSystem"][ahu_name]["AirHandlingUnit"]): # 係数の取得 if unit_configure["FanControlType"] in FLOWCONTROL.keys(): a4 = FLOWCONTROL[ unit_configure["FanControlType"] ]["a4"] a3 =
#!/usr/bin/env python3 # # Automated tests for running proxy simulations. # See `--help` for more information. # #/* # * Licensed to the EPYSYS SCIENCE (EpiSci) under one or more # * contributor license agreements. # * The EPYSYS SCIENCE (EpiSci) licenses this file to You under # * the Episys Science (EpiSci) Public License (Version 1.1) (the "License"); you may not use this file # * except in compliance with the License. # * You may obtain a copy of the License at # * # * https://github.com/EpiSci/oai-lte-5g-multi-ue-proxy/blob/master/LICENSE # * # * Unless required by applicable law or agreed to in writing, software # * distributed under the License is distributed on an "AS IS" BASIS, # * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # * See the License for the specific language governing permissions and # * limitations under the License. # *------------------------------------------------------------------------------- # * For more information about EPYSYS SCIENCE (EpiSci): # * <EMAIL> # */ import os import sys import argparse import logging import time import re import glob import bz2 import subprocess from subprocess import Popen from typing import Optional, Dict, List, Generator, Union WORKSPACE_DIR = os.path.abspath(os.path.dirname(sys.argv[0])) # ---------------------------------------------------------------------------- # Command line argument parsing parser = argparse.ArgumentParser(description=""" Automated tests for running proxy simulations """) parser.add_argument('--num-ues', '-u', metavar='N', type=int, default=1, help=""" The number of UEs to launch (default: %(default)s) """) parser.add_argument('--duration', '-d', metavar='SECONDS', type=int, default=30, help=""" How long to run the test before stopping to examine the logs """) parser.add_argument('--log-dir', '-l', default='.', help=""" Where to store log files """) parser.add_argument('--no-run', '-n', action='store_true', help=""" Don't run the scenario, only examine the logs in the --log-dir directory from a previous run of the scenario """) parser.add_argument('--mode', default='lte', choices='lte nr nsa'.split(), help=""" The kind of simulation scenario to run (default: %(default)s) """) parser.add_argument('--nfapi-trace-level', '-N', choices='none error warn note info debug'.split(), help=""" Set the NFAPI trace level """) parser.add_argument('--debug', action='store_true', help=""" Enable debug logging (for this script only) """) OPTS = parser.parse_args() del parser logging.basicConfig(level=logging.DEBUG if OPTS.debug else logging.INFO, format='>>> %(name)s: %(levelname)s: %(message)s') LOGGER = logging.getLogger(os.path.basename(sys.argv[0])) RUN_OAI = os.path.join(WORKSPACE_DIR, 'run-oai') if OPTS.nfapi_trace_level: os.environ['NFAPI_TRACE_LEVEL'] = OPTS.nfapi_trace_level # ---------------------------------------------------------------------------- def redirect_output(cmd: str, filename: str) -> str: cmd += ' >{} 2>&1'.format(filename) return cmd def compress(from_name: str, to_name: Optional[str]=None, remove_original: bool=False) -> None: """ Compress the file `from_name` and store it as `to_name`. `to_name` defaults to `from_name` with `.bz2` appended. If `remove_original` is True, removes `from_name` when the compress finishes. """ if to_name is None: to_name = from_name if not to_name.endswith('.bz2'): to_name += '.bz2' LOGGER.info('Compress %s to %s', from_name, to_name) with bz2.open(to_name, 'w') as outh, \ open(from_name, 'rb') as inh: while True: data = inh.read(10240) if not data: break outh.write(data) if remove_original: LOGGER.debug('Remove %s', from_name) os.remove(from_name) class CompressJobs: """ Allow multiple invocations of `compress` to run in parallel """ def __init__(self) -> None: self.kids: List[int] = [] def compress(self, from_name: str, to_name: Optional[str]=None, remove_original: bool=False) -> None: if not os.path.exists(from_name): # It's not necessarily an error if the log file does not exist. # For example, if nfapi_trace never gets invoked (e.g., because # NFAPI_TRACE_LEVEL is set to none), then the log file nfapi.log # will not get created. LOGGER.warning('No file: %s', from_name) return kid = os.fork() if kid != 0: self.kids.append(kid) else: LOGGER.debug('in pid %d compress %s...', os.getpid(), from_name) compress(from_name, to_name, remove_original) LOGGER.debug('in pid %d compress %s...done', os.getpid(), from_name) sys.exit() def wait(self) -> None: LOGGER.debug('wait %s...', self.kids) failed = [] for kid in self.kids: LOGGER.debug('waitpid %d', kid) _pid, status = os.waitpid(kid, 0) if status != 0: failed.append(kid) if failed: raise Exception('compression failed: %s', failed) LOGGER.debug('wait...done') class NodeIdGenerator: id: int = 0 def __call__(self): self.id += 1 return self.id class Scenario: """ Represents a proxy scenario """ def __init__(self) -> None: self.enb_hostname: Optional[str] = None self.enb_node_id: Optional[int] = None self.ue_hostname: Dict[int, str] = {} self.ue_node_id: Dict[int, int] = {} self.gnb_hostname: Optional[str] = None self.gnb_node_id: Optional[int] = None self.nrue_hostname: Dict[int, str] = {} self.nrue_node_id: Dict[int, int] = {} # Setup our data structures according to the command-line options node_ids = NodeIdGenerator() if OPTS.mode == 'nsa': # Non-standalone mode: eNB, gNB, UEs and NRUEs self.enb_hostname = 'eNB' self.enb_node_id = node_ids() for i in range(OPTS.num_ues): ue_num = i + 1 node_id = node_ids() self.ue_hostname[ue_num] = f'UE{ue_num}' self.ue_node_id[ue_num] = node_id self.nrue_hostname[ue_num] = f'NRUE{ue_num}' self.nrue_node_id[ue_num] = node_id self.gnb_hostname = 'gNB' self.gnb_node_id = node_ids() if OPTS.mode == 'nr': # NR mode: gNB and NRUEs, no eNB, no UEs self.gnb_hostname = 'gNB' self.gnb_node_id = node_ids() for i in range(OPTS.num_ues): ue_num = i + 1 self.nrue_hostname[ue_num] = f'NRUE{ue_num}' self.nrue_node_id[ue_num] = node_ids() if OPTS.mode == 'lte': # LTE mode: eNB and UEs, no gNB, no NRUEs self.enb_hostname = 'eNB' self.enb_node_id = node_ids() for i in range(OPTS.num_ues): ue_num = i + 1 self.ue_hostname[ue_num] = f'UE{ue_num}' self.ue_node_id[ue_num] = node_ids() def launch_enb(self) -> Popen: log_name = '{}/eNB.log'.format(OPTS.log_dir) LOGGER.info('Launch eNB: %s', log_name) cmd = 'NODE_NUMBER=1 {RUN_OAI} enb' \ .format(RUN_OAI=RUN_OAI) if OPTS.mode == 'nsa': cmd += ' --nsa' proc = Popen(redirect_output(cmd, log_name), shell=True) # TODO: Sleep time needed so eNB and UEs don't start at the exact same # time When nodes start at the same time, occasionally eNB will only # recognize one UE I think this bug has been fixed -- the random # number generator initializer issue time.sleep(1) return proc def launch_proxy(self) -> Popen: log_name = '{}/nfapi.log'.format(OPTS.log_dir) LOGGER.info('Launch Proxy: %s', log_name) if OPTS.mode == 'nr': num_proxy_ues = len(self.nrue_hostname) else: num_proxy_ues = len(self.ue_hostname) cmd = 'exec sudo -E {WORKSPACE_DIR}/build/proxy {NUM_UES} {SOFTMODEM_MODE}' \ .format(WORKSPACE_DIR=WORKSPACE_DIR, NUM_UES=num_proxy_ues, \ SOFTMODEM_MODE=f'--{OPTS.mode}') proc = Popen(redirect_output(cmd, log_name), shell=True) time.sleep(2) return proc def launch_ue(self) -> Dict[int, Popen]: procs = {} for num, hostname in self.ue_hostname.items(): log_name = '{}/{}.log'.format(OPTS.log_dir, hostname) LOGGER.info('Launch UE%d: %s', num, log_name) cmd = 'NODE_NUMBER={NODE_ID} {RUN_OAI} ue' \ .format(NODE_ID=self.ue_node_id[num], RUN_OAI=RUN_OAI) if OPTS.mode == 'nsa': cmd += ' --nsa' procs[num] = Popen(redirect_output(cmd, log_name), shell=True) # TODO: Sleep time needed so eNB and UEs don't start at the exact # same time When nodes start at the same time, occasionally eNB # will only recognize one UE time.sleep(1) return procs def launch_gnb(self) -> Popen: log_name = '{}/gNB.log'.format(OPTS.log_dir) LOGGER.info('Launch gNB: %s', log_name) cmd = 'NODE_NUMBER=0 {RUN_OAI} gnb' \ .format(RUN_OAI=RUN_OAI) if OPTS.mode == 'nsa': cmd += ' --nsa' if OPTS.mode == 'nr': cmd += ' --sa' proc = Popen(redirect_output(cmd, log_name), shell=True) # TODO: Sleep time needed so eNB and UEs don't start at the exact same # time When nodes start at the same time, occasionally eNB will only # recognize one UE I think this bug has been fixed -- the random # number generator initializer issue time.sleep(1) return proc def launch_nrue(self) -> Dict[int, Popen]: procs = {} for num, hostname in self.nrue_hostname.items(): log_name = '{}/{}.log'.format(OPTS.log_dir, hostname) LOGGER.info('Launch nrUE%d: %s', num, log_name) cmd = 'NODE_NUMBER={NODE_ID} {RUN_OAI} nrue' \ .format(NODE_ID=self.nrue_node_id[num], RUN_OAI=RUN_OAI) if OPTS.mode == 'nsa': cmd += ' --nsa' if OPTS.mode == 'nr': cmd += ' --sa' procs[num] = Popen(redirect_output(cmd, log_name), shell=True) # TODO: Sleep time needed so eNB and NRUEs don't start at the # exact same time When nodes start at the same time, occasionally # eNB will only recognize one NRUE time.sleep(3) return procs def run(self) -> bool: """ Run the simulation. Return True if the test passes """ enb_proc: Optional[Popen] = None proxy_proc: Optional[Popen] = None ue_proc: Dict[int, Popen] = {} gnb_proc: Optional[Popen] = None nrue_proc: Dict[int, Popen] = {} # ------------------------------------------------------------------------------------ # Launch the softmodem processes if self.enb_hostname: enb_proc = self.launch_enb() if self.gnb_hostname: gnb_proc = self.launch_gnb() proxy_proc = self.launch_proxy() if self.nrue_hostname: nrue_proc = self.launch_nrue() if self.ue_hostname: ue_proc = self.launch_ue() # ------------------------------------------------------------------------------------ # Let the simulation run for a while time.sleep(OPTS.duration) # ------------------------------------------------------------------------------------ # Analyze the log files to see if the test run passed passed = True if enb_proc: # See if the eNB crashed status = enb_proc.poll() if status is None: LOGGER.info('eNB process is still running, which is good') else: passed = False LOGGER.critical('eNB process ended early: %r', status) if proxy_proc: # See if the proxy crashed status = proxy_proc.poll() if status is None: LOGGER.info('proxy process is still running, which is good') else: passed = False LOGGER.critical('proxy process ended early: %r', status) if ue_proc: # See if the UE processes crashed for ue_number in self.ue_hostname: status = ue_proc[ue_number].poll() if status is None: LOGGER.info('UE%d process is still running, which is good', ue_number) else: passed = False LOGGER.critical('UE%d process ended early:
''') # XX /r load with index and no offset. ldWithIndex = TailRecipe( 'ldWithIndex', LoadComplex, base_size=2, ins=(GPR, GPR), outs=(GPR), instp=IsEqual(LoadComplex.offset, 0), clobbers_flags=False, compute_size="size_plus_maybe_offset_for_in_reg_0", emit=''' if !flags.notrap() { sink.trap(TrapCode::HeapOutOfBounds, func.srclocs[inst]); } PUT_OP(bits, rex3(in_reg0, out_reg0, in_reg1), sink); if needs_offset(in_reg0) { modrm_sib_disp8(out_reg0, sink); sib(0, in_reg1, in_reg0, sink); sink.put1(0); } else { modrm_sib(out_reg0, sink); sib(0, in_reg1, in_reg0, sink); } ''') # XX /r float load with no offset. fld = TailRecipe( 'fld', Load, base_size=1, ins=(GPR), outs=(FPR), instp=IsEqual(Load.offset, 0), clobbers_flags=False, compute_size="size_plus_maybe_offset_for_in_reg_0", emit=''' if !flags.notrap() { sink.trap(TrapCode::HeapOutOfBounds, func.srclocs[inst]); } PUT_OP(bits, rex2(in_reg0, out_reg0), sink); if needs_offset(in_reg0) { modrm_disp8(in_reg0, out_reg0, sink); sink.put1(0); } else { modrm_rm(in_reg0, out_reg0, sink); } ''') # XX /r float load with index and no offset. fldWithIndex = TailRecipe( 'fldWithIndex', LoadComplex, base_size=2, ins=(GPR, GPR), outs=(FPR), instp=IsEqual(LoadComplex.offset, 0), clobbers_flags=False, compute_size="size_plus_maybe_offset_for_in_reg_0", emit=''' if !flags.notrap() { sink.trap(TrapCode::HeapOutOfBounds, func.srclocs[inst]); } PUT_OP(bits, rex3(in_reg0, out_reg0, in_reg1), sink); if needs_offset(in_reg0) { modrm_sib_disp8(out_reg0, sink); sib(0, in_reg1, in_reg0, sink); sink.put1(0); } else { modrm_sib(out_reg0, sink); sib(0, in_reg1, in_reg0, sink); } ''') # XX /r load with 8-bit offset. ldDisp8 = TailRecipe( 'ldDisp8', Load, base_size=2, ins=(GPR), outs=(GPR), instp=IsSignedInt(Load.offset, 8), clobbers_flags=False, compute_size="size_plus_maybe_sib_for_in_reg_0", emit=''' if !flags.notrap() { sink.trap(TrapCode::HeapOutOfBounds, func.srclocs[inst]); } PUT_OP(bits, rex2(in_reg0, out_reg0), sink); if needs_sib_byte(in_reg0) { modrm_sib_disp8(out_reg0, sink); sib_noindex(in_reg0, sink); } else { modrm_disp8(in_reg0, out_reg0, sink); } let offset: i32 = offset.into(); sink.put1(offset as u8); ''') # XX /r load with index and 8-bit offset. ldWithIndexDisp8 = TailRecipe( 'ldWithIndexDisp8', LoadComplex, base_size=3, ins=(GPR, GPR), outs=(GPR), instp=IsSignedInt(LoadComplex.offset, 8), clobbers_flags=False, emit=''' if !flags.notrap() { sink.trap(TrapCode::HeapOutOfBounds, func.srclocs[inst]); } PUT_OP(bits, rex3(in_reg0, out_reg0, in_reg1), sink); modrm_sib_disp8(out_reg0, sink); sib(0, in_reg1, in_reg0, sink); let offset: i32 = offset.into(); sink.put1(offset as u8); ''') # XX /r float load with 8-bit offset. fldDisp8 = TailRecipe( 'fldDisp8', Load, base_size=2, ins=(GPR), outs=(FPR), instp=IsSignedInt(Load.offset, 8), clobbers_flags=False, compute_size="size_plus_maybe_sib_for_in_reg_0", emit=''' if !flags.notrap() { sink.trap(TrapCode::HeapOutOfBounds, func.srclocs[inst]); } PUT_OP(bits, rex2(in_reg0, out_reg0), sink); if needs_sib_byte(in_reg0) { modrm_sib_disp8(out_reg0, sink); sib_noindex(in_reg0, sink); } else { modrm_disp8(in_reg0, out_reg0, sink); } let offset: i32 = offset.into(); sink.put1(offset as u8); ''') # XX /r float load with 8-bit offset. fldWithIndexDisp8 = TailRecipe( 'fldWithIndexDisp8', LoadComplex, base_size=3, ins=(GPR, GPR), outs=(FPR), instp=IsSignedInt(LoadComplex.offset, 8), clobbers_flags=False, emit=''' if !flags.notrap() { sink.trap(TrapCode::HeapOutOfBounds, func.srclocs[inst]); } PUT_OP(bits, rex3(in_reg0, out_reg0, in_reg1), sink); modrm_sib_disp8(out_reg0, sink); sib(0, in_reg1, in_reg0, sink); let offset: i32 = offset.into(); sink.put1(offset as u8); ''') # XX /r load with 32-bit offset. ldDisp32 = TailRecipe( 'ldDisp32', Load, base_size=5, ins=(GPR), outs=(GPR), instp=IsSignedInt(Load.offset, 32), clobbers_flags=False, compute_size='size_plus_maybe_sib_for_in_reg_0', emit=''' if !flags.notrap() { sink.trap(TrapCode::HeapOutOfBounds, func.srclocs[inst]); } PUT_OP(bits, rex2(in_reg0, out_reg0), sink); if needs_sib_byte(in_reg0) { modrm_sib_disp32(out_reg0, sink); sib_noindex(in_reg0, sink); } else { modrm_disp32(in_reg0, out_reg0, sink); } let offset: i32 = offset.into(); sink.put4(offset as u32); ''') # XX /r load with index and 32-bit offset. ldWithIndexDisp32 = TailRecipe( 'ldWithIndexDisp32', LoadComplex, base_size=6, ins=(GPR, GPR), outs=(GPR), instp=IsSignedInt(LoadComplex.offset, 32), clobbers_flags=False, emit=''' if !flags.notrap() { sink.trap(TrapCode::HeapOutOfBounds, func.srclocs[inst]); } PUT_OP(bits, rex3(in_reg0, out_reg0, in_reg1), sink); modrm_sib_disp32(out_reg0, sink); sib(0, in_reg1, in_reg0, sink); let offset: i32 = offset.into(); sink.put4(offset as u32); ''') # XX /r float load with 32-bit offset. fldDisp32 = TailRecipe( 'fldDisp32', Load, base_size=5, ins=(GPR), outs=(FPR), instp=IsSignedInt(Load.offset, 32), clobbers_flags=False, compute_size="size_plus_maybe_sib_for_in_reg_0", emit=''' if !flags.notrap() { sink.trap(TrapCode::HeapOutOfBounds, func.srclocs[inst]); } PUT_OP(bits, rex2(in_reg0, out_reg0), sink); if needs_sib_byte(in_reg0) { modrm_sib_disp32(out_reg0, sink); sib_noindex(in_reg0, sink); } else { modrm_disp32(in_reg0, out_reg0, sink); } let offset: i32 = offset.into(); sink.put4(offset as u32); ''') # XX /r float load with index and 32-bit offset. fldWithIndexDisp32 = TailRecipe( 'fldWithIndexDisp32', LoadComplex, base_size=6, ins=(GPR, GPR), outs=(FPR), instp=IsSignedInt(LoadComplex.offset, 32), clobbers_flags=False, emit=''' if !flags.notrap() { sink.trap(TrapCode::HeapOutOfBounds, func.srclocs[inst]); } PUT_OP(bits, rex3(in_reg0, out_reg0, in_reg1), sink); modrm_sib_disp32(out_reg0, sink); sib(0, in_reg1, in_reg0, sink); let offset: i32 = offset.into(); sink.put4(offset as u32); ''') # Unary fill with SIB and 32-bit displacement. fillSib32 = TailRecipe( 'fillSib32', Unary, base_size=6, ins=StackGPR32, outs=GPR, clobbers_flags=False, emit=''' let base = stk_base(in_stk0.base); PUT_OP(bits, rex2(base, out_reg0), sink); modrm_sib_disp32(out_reg0, sink); sib_noindex(base, sink); sink.put4(in_stk0.offset as u32); ''') # Like fillSib32, but targeting an FPR rather than a GPR. ffillSib32 = TailRecipe( 'ffillSib32', Unary, base_size=6, ins=StackFPR32, outs=FPR, clobbers_flags=False, emit=''' let base = stk_base(in_stk0.base); PUT_OP(bits, rex2(base, out_reg0), sink); modrm_sib_disp32(out_reg0, sink); sib_noindex(base, sink); sink.put4(in_stk0.offset as u32); ''') # Regfill with RSP-relative 32-bit displacement. regfill32 = TailRecipe( 'regfill32', RegFill, base_size=6, ins=StackGPR32, outs=(), clobbers_flags=False, emit=''' let src = StackRef::sp(src, &func.stack_slots); let base = stk_base(src.base); PUT_OP(bits, rex2(base, dst), sink); modrm_sib_disp32(dst, sink); sib_noindex(base, sink); sink.put4(src.offset as u32); ''') # Like regfill32, but targeting an FPR rather than a GPR. fregfill32 = TailRecipe( 'fregfill32', RegFill, base_size=6, ins=StackFPR32, outs=(), clobbers_flags=False, emit=''' let src = StackRef::sp(src, &func.stack_slots); let base = stk_base(src.base); PUT_OP(bits, rex2(base, dst), sink); modrm_sib_disp32(dst, sink); sib_noindex(base, sink); sink.put4(src.offset as u32); ''') # # Call/return # call_id = TailRecipe( 'call_id', Call, base_size=4, ins=(), outs=(), emit=''' sink.trap(TrapCode::StackOverflow, func.srclocs[inst]); PUT_OP(bits, BASE_REX, sink); // The addend adjusts for the difference between the end of the // instruction and the beginning of the immediate field. sink.reloc_external(Reloc::X86CallPCRel4, &func.dfg.ext_funcs[func_ref].name, -4); sink.put4(0); ''') call_plt_id = TailRecipe( 'call_plt_id', Call, base_size=4, ins=(), outs=(), emit=''' sink.trap(TrapCode::StackOverflow, func.srclocs[inst]); PUT_OP(bits, BASE_REX, sink); sink.reloc_external(Reloc::X86CallPLTRel4, &func.dfg.ext_funcs[func_ref].name, -4); sink.put4(0); ''') call_r = TailRecipe( 'call_r', CallIndirect, base_size=1, ins=GPR, outs=(), emit=''' sink.trap(TrapCode::StackOverflow, func.srclocs[inst]); PUT_OP(bits, rex1(in_reg0), sink); modrm_r_bits(in_reg0, bits, sink); ''') ret = TailRecipe( 'ret', MultiAry, base_size=0, ins=(), outs=(), emit=''' PUT_OP(bits, BASE_REX, sink); ''') # # Branches # jmpb = TailRecipe( 'jmpb', Jump, base_size=1, ins=(), outs=(), branch_range=8, clobbers_flags=False, emit=''' PUT_OP(bits, BASE_REX, sink); disp1(destination, func, sink); ''') jmpd = TailRecipe( 'jmpd', Jump, base_size=4, ins=(), outs=(), branch_range=32, clobbers_flags=False, emit=''' PUT_OP(bits, BASE_REX, sink); disp4(destination, func, sink); ''') brib = TailRecipe( 'brib', BranchInt, base_size=1, ins=FLAG.rflags, outs=(), branch_range=8, clobbers_flags=False, emit=''' PUT_OP(bits | icc2opc(cond), BASE_REX, sink); disp1(destination, func, sink); ''') brid = TailRecipe( 'brid', BranchInt, base_size=4, ins=FLAG.rflags, outs=(), branch_range=32, clobbers_flags=False, emit=''' PUT_OP(bits | icc2opc(cond), BASE_REX, sink); disp4(destination, func, sink); ''') brfb = TailRecipe( 'brfb', BranchFloat, base_size=1, ins=FLAG.rflags, outs=(), branch_range=8, clobbers_flags=False, instp=floatccs(BranchFloat), emit=''' PUT_OP(bits | fcc2opc(cond), BASE_REX, sink); disp1(destination, func, sink); ''') brfd = TailRecipe( 'brfd', BranchFloat, base_size=4, ins=FLAG.rflags, outs=(), branch_range=32, clobbers_flags=False, instp=floatccs(BranchFloat), emit=''' PUT_OP(bits | fcc2opc(cond), BASE_REX, sink); disp4(destination, func, sink); ''') indirect_jmp = TailRecipe( 'indirect_jmp', IndirectJump, base_size=1, ins=GPR, outs=(), clobbers_flags=False, emit=''' PUT_OP(bits, rex1(in_reg0), sink); modrm_r_bits(in_reg0, bits, sink); ''') jt_entry = TailRecipe( 'jt_entry', BranchTableEntry, base_size=2, ins=(GPR, GPR), outs=(GPR), clobbers_flags=False, instp=valid_scale(BranchTableEntry), compute_size="size_plus_maybe_offset_for_in_reg_1", emit=''' PUT_OP(bits, rex3(in_reg1, out_reg0, in_reg0), sink); if needs_offset(in_reg1) { modrm_sib_disp8(out_reg0, sink); sib(imm.trailing_zeros() as u8, in_reg0, in_reg1, sink); sink.put1(0); } else { modrm_sib(out_reg0, sink); sib(imm.trailing_zeros() as u8, in_reg0, in_reg1, sink); } ''') jt_base = TailRecipe( 'jt_base', BranchTableBase, base_size=5, ins=(), outs=(GPR), clobbers_flags=False, emit=''' PUT_OP(bits, rex2(0, out_reg0), sink); modrm_riprel(out_reg0, sink); // No reloc is needed here as the jump table is emitted directly after // the function body. jt_disp4(table, func, sink); ''') # # Test flags and set a register. # # These setCC instructions only set the low 8 bits, and they can only write # ABCD registers without a REX prefix. # # Other instruction encodings accepting `b1` inputs have the same constraints # and only look at the low 8 bits of the input register. # seti = TailRecipe( 'seti', IntCond, base_size=1, ins=FLAG.rflags, outs=GPR, requires_prefix=True, clobbers_flags=False, emit=''' PUT_OP(bits | icc2opc(cond), rex1(out_reg0), sink); modrm_r_bits(out_reg0, bits, sink); ''') seti_abcd = TailRecipe( 'seti_abcd', IntCond, base_size=1, ins=FLAG.rflags, outs=ABCD, when_prefixed=seti, clobbers_flags=False, emit=''' PUT_OP(bits | icc2opc(cond), rex1(out_reg0), sink); modrm_r_bits(out_reg0, bits, sink); ''') setf = TailRecipe( 'setf', FloatCond, base_size=1, ins=FLAG.rflags, outs=GPR, requires_prefix=True, clobbers_flags=False, emit=''' PUT_OP(bits | fcc2opc(cond), rex1(out_reg0), sink); modrm_r_bits(out_reg0, bits, sink); ''') setf_abcd = TailRecipe( 'setf_abcd', FloatCond, base_size=1, ins=FLAG.rflags, outs=ABCD, when_prefixed=setf, clobbers_flags=False, emit=''' PUT_OP(bits | fcc2opc(cond), rex1(out_reg0), sink); modrm_r_bits(out_reg0, bits, sink); ''') # # Conditional move (a.k.a integer select) # (maybe-REX.W) 0F 4x modrm(r,r) # 1 byte, modrm(r,r), is after the opcode # cmov = TailRecipe( 'cmov', IntSelect, base_size=1, ins=(FLAG.rflags, GPR, GPR), outs=2, requires_prefix=False, clobbers_flags=False, emit=''' PUT_OP(bits | icc2opc(cond), rex2(in_reg1, in_reg2), sink); modrm_rr(in_reg1, in_reg2, sink); ''') # # Bit scan forwards and reverse # bsf_and_bsr = TailRecipe( 'bsf_and_bsr', Unary, base_size=1, ins=GPR, outs=(GPR, FLAG.rflags), requires_prefix=False, clobbers_flags=True, emit=''' PUT_OP(bits, rex2(in_reg0, out_reg0), sink); modrm_rr(in_reg0, out_reg0, sink); ''') # # Compare and set flags. # # XX /r, MR form. Compare two GPR registers and set flags. rcmp = TailRecipe( 'rcmp', Binary, base_size=1, ins=(GPR, GPR), outs=FLAG.rflags, emit=''' PUT_OP(bits, rex2(in_reg0, in_reg1), sink); modrm_rr(in_reg0, in_reg1, sink); ''') # XX /r, RM form. Compare two FPR registers and set flags. fcmp = TailRecipe( 'fcmp', Binary, base_size=1, ins=(FPR, FPR), outs=FLAG.rflags, emit=''' PUT_OP(bits, rex2(in_reg1, in_reg0), sink); modrm_rr(in_reg1, in_reg0, sink); ''') # XX /n, MI form with imm8. rcmp_ib = TailRecipe( 'rcmp_ib', BinaryImm, base_size=2, ins=GPR, outs=FLAG.rflags, instp=IsSignedInt(BinaryImm.imm, 8), emit=''' PUT_OP(bits, rex1(in_reg0), sink); modrm_r_bits(in_reg0, bits, sink); let imm: i64 = imm.into(); sink.put1(imm as u8); ''') # XX /n, MI form with imm32. rcmp_id = TailRecipe( 'rcmp_id', BinaryImm, base_size=5, ins=GPR, outs=FLAG.rflags, instp=IsSignedInt(BinaryImm.imm, 32), emit=''' PUT_OP(bits, rex1(in_reg0), sink); modrm_r_bits(in_reg0, bits, sink); let imm:
<gh_stars>0 #!/usr/bin/env python3 """Test config parsing""" import logging import re import shutil import tempfile import os import unittest from faucet import config_parser as cp LOGNAME = '/dev/null' class TestConfig(unittest.TestCase): # pytype: disable=module-attr """Test config parsing raises correct exception.""" tmpdir = None def setUp(self): logging.disable(logging.CRITICAL) self.tmpdir = tempfile.mkdtemp() def tearDown(self): logging.disable(logging.NOTSET) shutil.rmtree(self.tmpdir) def conf_file_name(self): """Return path to test config file in test directory.""" return os.path.join(self.tmpdir, 'faucet.yaml') def create_config_file(self, config): """Returns file path to file containing the config parameter.""" conf_file_name = self.conf_file_name() with open(conf_file_name, 'wb') as conf_file: if isinstance(config, bytes): conf_file.write(config) else: conf_file.write(config.encode('utf-8')) return conf_file_name def run_function_with_config(self, config, function, before_function=None): """Return False with error if provided function raises InvalidConfigError.""" # TODO: Check acls_in work now acl_in is deprecated if isinstance(config, str) and 'acl_in' in config and not 'acls_in': config = re.sub('(acl_in: )(.*)', 'acls_in: [\\2]', config) conf_file = self.create_config_file(config) if before_function: before_function() try: function(conf_file, LOGNAME) except cp.InvalidConfigError as err: return (False, err) return (True, None) def check_config_failure(self, config, function, before_function=None): """Ensure config parsing reported as failed.""" config_success, config_err = self.run_function_with_config( config, function, before_function) self.assertEqual(config_success, False, config_err) def check_config_success(self, config, function, before_function=None): """Ensure config parsing reported succeeded.""" config_success, config_err = self.run_function_with_config( config, function, before_function) self.assertEqual(config_success, True, config_err) def test_dupe_vid(self): """Test that VLANs cannot have same VID.""" config = """ vlans: office: vid: 100 guest: vid: 100 dps: sw1: dp_id: 0x1 interfaces: 1: native_vlan: office 2: native_vlan: guest """ self.check_config_failure(config, cp.dp_parser) def test_unhashable_key(self): config = """ vlans: ? office: vid: 100 guest: vid: 200 dps: sw1: dp_id: 0x1 interfaces: 1: native_vlan: office 2: native_vlan: office 3: native_vlan: guest 4: native_vlan: office 5: tagged_vlans: [office] sw2: dp_id: 0x2 interfaces: 1: native_vlan: office 2: native_vlan: guest 24: tagged_vlans: [office, guest] """ self.check_config_failure(config, cp.dp_parser) def test_config_contains_only_int(self): """Test that config is invalid when only an int""" config = """5""" self.check_config_failure(config, cp.dp_parser) def test_config_contains_only_float(self): """Test that config is invalid when only a float""" config = """5.5""" self.check_config_failure(config, cp.dp_parser) def test_config_contains_only_str(self): """Test config is invalid when only a string""" config = """aaaa""" self.check_config_failure(config, cp.dp_parser) def test_config_only_boolean(self): """Test config is invalid when only a boolean""" config = """False""" self.check_config_failure(config, cp.dp_parser) def test_config_only_datetime(self): """Test that config is invalid when only a datetime object""" config = """1967-07-31""" self.check_config_failure(config, cp.dp_parser) def test_config_contains_only_dash(self): """Test that config is invalid when only only a -""" config = """-""" self.check_config_failure(config, cp.dp_parser) def test_config_contains_only_array(self): """Test that config is invalid when only only [2, 2]""" config = """[2, 2]""" self.check_config_failure(config, cp.dp_parser) def test_config_only_empty_array(self): """Test that config is invalid when only only []""" config = """[]""" self.check_config_failure(config, cp.dp_parser) def test_unconfigured_acl(self): """Test that config is invalid when there are unconfigured acls""" config = """ vlans: office: vid: 100 dps: sw1: dp_id: 0x1 interfaces: 1: acl_in: access-port-protect tagged_vlans: [office] """ self.check_config_failure(config, cp.dp_parser) def test_unconfigured_vlan_acl(self): """Test that config is invalid when only there are unconfigured acls""" config = """ vlans: office: vid: 100 acl_in: office-vlan-protect dps: sw1: dp_id: 0x1 interfaces: 1: tagged_vlans: [office] """ self.check_config_failure(config, cp.dp_parser) def test_config_routes_are_empty(self): """Test that config is invalid when vlan routes are empty""" config = """ vlans: office: vid: 100 routes: - route: ip_dst: ip_gw: dps: sw1: dp_id: 0x1 interfaces: 5: tagged_vlans: [office] """ self.check_config_failure(config, cp.dp_parser) def test_config_routes_not_strings(self): """Test config is invalid when vlan routes are not strings""" config = """ vlans: office: vid: 100 routes: - route: ip_dst: 5.5 ip_gw: [] dps: sw1: dp_id: 0x1 interfaces: 5: tagged_vlans: [office] """ self.check_config_failure(config, cp.dp_parser) def test_config_vips_not_strings(self): """Test that config is invalid when faucet_vips does not contain strings""" config = """ vlans: office: vid: 100 faucet_vips: [False, 5.5, []] dps: sw1: dp_id: 0x1 interfaces: 5: tagged_vlans: [office] """ self.check_config_failure(config, cp.dp_parser) def test_config_faucet_invalid_vips(self): """Test that config is rejected if faucet_vips does not contain valid ip addresses""" config = """ vlans: office: vid: 100 faucet_vips: ['aaaaa', '', '123421342'] dps: sw1: dp_id: 0x1 interfaces: 5: tagged_vlans: [office] """ self.check_config_failure(config, cp.dp_parser) def test_config_vlans_is_empty(self): """Test that config is rejected when vlans is empty""" config = """ vlans: dps: sw1: dp_id: 0x1 hardware: "Open vSwitch" interfaces: 1: native_vlan: office """ self.check_config_failure(config, cp.dp_parser) def test_config_dps_is_empty(self): """Test that config is rejected when dps is empty""" config = """ vlans: office: vid: 100 dps: """ self.check_config_failure(config, cp.dp_parser) def test_including_invalid_files(self): """Test that config is rejected when including invalid files""" config = """ include: [-, False, 1967-06-07, 5.5, [5], {'5': 5}, testing] vlans: office: vid: 100 dps: sw1: dp_id: 0x1 interfaces: 5: tagged_vlans: [office] """ self.check_config_failure(config, cp.dp_parser) def test_config_vlans_on_stack(self): """Test that config is rejected vlans on a stack interface.""" config = """ vlans: office: vid: 100 dps: sw1: dp_id: 0x1 hardware: "Open vSwitch" stack: priority: 1 interfaces: 1: native_vlan: office stack: dp: sw2 port: 1 2: native_vlan: office sw2: dp_id: 0x2 hardware: "Open vSwitch" interfaces: 1: stack: dp: sw1 port: 1 2: native_vlan: office """ self.check_config_failure(config, cp.dp_parser) def test_config_stack(self): """Test valid stacking config.""" config = """ vlans: office: vid: 100 dps: sw1: dp_id: 0x1 hardware: "Open vSwitch" stack: priority: 1 interfaces: 1: stack: dp: sw2 port: 1 2: native_vlan: office sw2: dp_id: 0x2 hardware: "Open vSwitch" interfaces: 1: stack: dp: sw1 port: 1 2: native_vlan: office """ self.check_config_success(config, cp.dp_parser) def test_config_stack_and_non_stack(self): """Test stack and non-stacking config.""" config = """ vlans: office: vid: 100 dps: sw1: dp_id: 0x1 hardware: "Open vSwitch" stack: priority: 1 interfaces: 1: stack: dp: sw2 port: 1 2: native_vlan: office sw2: dp_id: 0x2 hardware: "Open vSwitch" interfaces: 1: stack: dp: sw1 port: 1 2: native_vlan: office sw3: dp_id: 0x3 hardware: "Open vSwitch" interfaces: 1: native_vlan: office 2: native_vlan: office """ self.check_config_success(config, cp.dp_parser) def test_config_stack_islands(self): """Test that stack islands don't exist.""" config = """ vlans: office: vid: 100 dps: sw1: dp_id: 0x1 hardware: "Open vSwitch" stack: priority: 1 interfaces: 1: stack: dp: sw2 port: 1 2: native_vlan: office sw2: dp_id: 0x2 hardware: "Open vSwitch" interfaces: 1: stack: dp: sw1 port: 1 2: native_vlan: office sw3: dp_id: 0x3 hardware: "Open vSwitch" interfaces: 1: stack: dp: sw4 port: 1 2: native_vlan: office sw4: dp_id: 0x4 hardware: "Open vSwitch" interfaces: 1: stack: dp: sw3 port: 1 2: native_vlan: office """ self.check_config_failure(config, cp.dp_parser) def test_port_number(self): """Test port number is valid.""" config = """ vlans: office: vid: 100 dps: sw1: dp_id: 0x1 interfaces: testing: native_vlan: office """ self.check_config_failure(config, cp.dp_parser) def test_override_port(self): """Test override port is valid.""" config = """ vlans: office: vid: 100 dps: sw1: dp_id: 0x1 interfaces: testing: number: 1 native_vlan: office override_output_port: output_port output_port: number: 2 output_only: True """ self.check_config_success(config, cp.dp_parser) def test_one_port_dp(self): """Test port number is valid.""" config = """ vlans: office: vid: 100 dps: sw1: dp_id: 0x1 interfaces: testing: number: 1 native_vlan: office """ self.check_config_success(config, cp.dp_parser) def test_dp_id_too_big(self): """Test DP ID is valid.""" config = """ vlans: office: vid: 100 dps: sw1: dp_id: 0xfffffffffffffffffffffffffffffffff interfaces: 1: native_vlan: office """ self.check_config_failure(config, cp.dp_parser) def test_invalid_vid(self): """Test VID is valid.""" config = """ vlans: office: vid: 10000 dps: sw1: dp_id: 0x1 interfaces: 1: native_vlan: office """ self.check_config_failure(config, cp.dp_parser) def test_routers_empty(self): """Test with empty router config.""" config = """ routers: router-1: vlans: office: vid: 100 dps: sw1: dp_id: 0x1 interfaces: 1: native_vlan: office """ self.check_config_failure(config, cp.dp_parser) def test_valid_mac(self): """Test with valid MAC.""" config = """ vlans: office: vid: 100 faucet_mac: '11:22:33:44:55:66' dps: sw1: dp_id: 0x1 interfaces: 1: native_vlan: office """ self.check_config_success(config, cp.dp_parser) def test_invalid_mac(self): """Test with invalid MAC.""" config = """ vlans: office: vid: 100 faucet_mac: '11:22:33:44:55:66:77:88' dps: sw1: dp_id: 0x1 interfaces: 1: native_vlan: office """ self.check_config_failure(config, cp.dp_parser) def test_empty_mac(self): """Test with empty MAC.""" config = """ vlans: office: vid: 100 faucet_mac: '' dps: sw1: dp_id: 0x1 interfaces: 1: native_vlan: office """ self.check_config_failure(config, cp.dp_parser) def test_empty_vid(self): """Test empty VID.""" config = """ vlans: office: vid: dps: sw1: dp_id: 0x1 interfaces: 1: native_vlan: office """ self.check_config_failure(config, cp.dp_parser) def test_empty_interfaces(self): """Test empty interfaces.""" config = """ vlans: office: vid: dps: sw1: dp_id: 0x1 """ self.check_config_failure(config, cp.dp_parser) def test_invalid_interfaces(self): """Test invalid interfaces.""" config = """ vlans: office: vid: 100 dps: sw1: dp_id: 0x1 interfaces: {'5': 5} """ self.check_config_failure(config, cp.dp_parser) def test_unresolved_mirror_ports(self): """Test invalid mirror port name.""" config = """ vlans: office: vid: 100 dps: sw1: dp_id: 0x1 interfaces: 1: native_vlan: office acl_in: mirror_all acls: mirror_all: - rule: actions: mirror: UNRESOLVED allow: 1 """ self.check_config_failure(config, cp.dp_parser) def test_resolved_mirror_port(self): """Test can use name reference to mirrored port.""" config = """ vlans: office: vid: 100 dps: sw1: dp_id: 0x1 interfaces: mirrored_port: number: 1 native_vlan: office 2: mirror: mirrored_port """ self.check_config_success(config, cp.dp_parser) def test_vlans_on_mirror_ports(self): """Test invalid VLANs configured on a mirror port.""" config = """ vlans: office: vid: 100 dps: sw1: dp_id: 0x1 interfaces: 1: native_vlan: office 2: native_vlan: office mirror: 1 """ self.check_config_failure(config, cp.dp_parser) def test_unresolved_output_ports(self): """Test invalid output port name.""" config = """ vlans: office: vid: 100 dps:
= ExternalCircuit(top, solverE, total_rho, collector_voltage, cathode_area * 1e4, plate, debug=False) plate.voltage = circuit # plate.voltage = collector_voltage if install_grid: if grid_scraper == "regular": fastscraper_flag = 0 collectlpdata_flag = 1 elif grid_scraper == "fast": fastscraper_flag = 1 collectlpdata_flag = 0 else: raise Exception("Illegal grid_scraper value") installconductor(accel_grid) installconductor(source, dfill=largepos) installconductor(plate, dfill=largepos) scraper = ParticleScraper([accel_grid], lfastscraper=fastscraper_flag, lcollectlpdata=collectlpdata_flag, lsaveintercept=0) scraper = ParticleScraper([source, plate], lfastscraper=0, lcollectlpdata=True, lsaveintercept=True) scraper_dictionary = {'grid': 1, 'source': 2, 'collector': 3} else: installconductor(source, dfill=largepos) installconductor(plate, dfill=largepos) scraper = ParticleScraper([source, plate], lcollectlpdata=True, lsaveintercept=True) scraper_dictionary = {'source': 1, 'collector': 2} ############# # DIAGNOSTICS ############# # Particle/Field diagnostic options if particle_diagnostic_switch: particleperiod = 250 # TEMP particle_diagnostic_0 = ParticleDiagnostic(period=particleperiod, top=top, w3d=w3d, species={species.name: species for species in listofallspecies}, # if species.name == 'measurement'}, # TEMP comm_world=comm_world, lparallel_output=False, write_dir=diagDir[:-5]) installafterstep(particle_diagnostic_0.write) if field_diagnostic_switch: fieldperiod = 200 efield_diagnostic_0 = FieldDiagnostic.ElectrostaticFields(solver=solverE, top=top, w3d=w3d, comm_world=comm_world, period=fieldperiod) installafterstep(efield_diagnostic_0.write) # Set externally derived parameters for efficiency calculation efficiency.tec_parameters['A_em'][0] = cathode_area * 1e4 # cm**2 if install_grid: efficiency.tec_parameters['occlusion'][0] = efficiency.calculate_occlusion(**efficiency.tec_parameters) else: efficiency.tec_parameters['occlusion'][0] = 0.0 ########################## # SOLVER SETTINGS/GENERATE ########################## # prevent gist from starting upon setup top.lprntpara = false top.lpsplots = false top.verbosity = -1 # Reduce solver verbosity solverE.mgverbose = -1 # further reduce output upon stepping - prevents websocket timeouts in Jupyter notebook init_iters = 20000 regular_iters = 200 init_tol = 1e-6 regular_tol = 1e-6 # Time Step # Determine an appropriate time step based upon estimated final velocity if install_grid: vz_accel = sqrt(2. * abs(V_grid) * np.abs(background_beam.charge) / background_beam.mass) else: vz_accel = sqrt(2. * abs(collector_voltage) * np.abs(background_beam.charge) / background_beam.mass) vzfinal = vz_accel + beam_beta * c dt = dz / vzfinal top.dt = dt solverE.mgmaxiters = init_iters solverE.mgtol = init_tol package("w3d") generate() solverE.mgtol = regular_tol solverE.mgmaxiters = regular_iters print("weights (background) (measurement): {}, {}".format(background_beam.sw, measurement_beam.sw)) # Use rnpinject to set number of macroparticles emitted background_beam.rnpinject = PTCL_PER_STEP measurement_beam.rnpinject = 0 # measurement beam is off at start ################## # CONTROL SEQUENCE ################## # Run until steady state is achieved (flat current profile at collector) (measurement species turned on) # Record data for effiency calculation # Switch off measurement species and wait for simulation to clear (background species is switched on) early_abort = 0 # If true will flag output data to notify startup_time = 4 * gap_distance / vz_accel # ~4 crossing times to approach steady-state with external circuit crossing_measurements = 10 # Number of crossing times to record for steps_per_crossing = int(gap_distance / vz_accel / dt) ss_check_interval = int(steps_per_crossing / 2.) ss_max_checks = 8 # Maximum number of of times to run steady-state check procedure before aborting times = [] # Write out timing of cycle steps to file clock = 0 # clock tracks the current, total simulation-runtime # Run initial block of steps record_time(stept, times, startup_time) clock += times[-1] stop_initialization = top.it # for diag file print("Completed Initialization on Step {}\nInitialization run time: {}".format(top.it, times[-1])) # Start checking for Steady State Operation tol = 0.01 ss_flag = 0 check_count = 0 # Track number of times steady-state check performed while ss_flag != 1 and check_count < ss_max_checks: if (max_wall_time - clock) < times[-1]: early_abort = 1 break record_time(step, times, ss_check_interval*4) clock += times[-1] tstart = (top.it - ss_check_interval) * top.dt _, current1 = plate.get_current_history(js=None, l_lost=1, l_emit=0, l_image=0, tmin=tstart, tmax=None, nt=1) current = np.sum(current1) if np.abs(current) < 0.5 * efficiency.tec_parameters['occlusion'][0] * beam_current: # If too little current is getting through run another check cycle check_count += 1 print("Completed check {}, insufficient current, running again for {} steps".format(check_count, ss_check_interval)) continue ss_flag = 1 # print np.abs(current), 0.5 * efficiency.tec_parameters['occlusion'][0] * beam_current # try: # # If steady_state check initialized no need to do it again # steady_state # except NameError: # # If this is the first pass with sufficient current then initialize the check # if check_count == 0: # # If the initial period was long enough to get current on collector then use that # steady_state = SteadyState(top, plate, steps_per_crossing) # else: # # If we had to run several steady state checks with no current then just use the period with current # steady_state = SteadyState(top, plate, ss_check_interval) # # ss_flag = steady_state(steps_per_crossing) check_count += 1 stop_ss_check = top.it # For diag file # If there was a failure to reach steady state after specified number of checks then pass directly end if check_count == ss_max_checks: early_abort = -1 crossing_measurements = 0 # Start Steady State Operation print(" Steady State Reached.\nStarting efficiency " "recording for {} crossing times.\nThis will be {} steps".format(crossing_measurements, steps_per_crossing * crossing_measurements)) # particle_diagnostic_0.period = steps_per_crossing #TEMP commented out # Switch to measurement beam species measurement_beam.rnpinject = PTCL_PER_STEP background_beam.rnpinject = 0 # Install Zcrossing Diagnostic ZCross = ZCrossingParticles(zz=grid_height * gap_distance / 200., laccumulate=1) emitter_flux = [] crossing_wall_time = times[-1] * steps_per_crossing / ss_check_interval # Estimate wall time for one crossing print('crossing_wall_time estimate: {}, for {} steps'.format(crossing_wall_time, steps_per_crossing)) print('wind-down loop time estimate: {}, for {} steps'.format(crossing_wall_time * steps_per_crossing / ss_check_interval, ss_check_interval)) for sint in range(crossing_measurements): # Kill the loop and proceed to writeout if we don't have time to complete the loop if (max_wall_time - clock) < crossing_wall_time: early_abort = 2 break record_time(step, times, steps_per_crossing) clock += times[-1] # Re-evaluate time for next loop crossing_wall_time = times[-1] # Record velocities of emitted particles for later KE calculation velocity_array = np.array([ZCross.getvx(js=measurement_beam.js), ZCross.getvy(js=measurement_beam.js), ZCross.getvz(js=measurement_beam.js)]).transpose() # velocity_array = velocity_array[velocity_array[:, 2] >= 0.] # Filter particles moving to emitter emitter_flux.append(velocity_array) ZCross.clear() # Clear ZcrossingParticles memory print("Measurement: {} of {} intervals completed. Interval run time: {} s".format(sint + 1, crossing_measurements, times[-1])) stop_eff_calc = top.it # For diag file # Run wind-down until measurement particles have cleared measurement_beam.rnpinject = 0 background_beam.rnpinject = PTCL_PER_STEP initial_population = measurement_beam.npsim[0] measurement_tol = 0.03 # if particle_diagnostic_switch: # particle_diagnostic_0.period = ss_check_interval while measurement_beam.npsim[0] > measurement_tol * initial_population: # Kill the loop and proceed to writeout if we don't have time to complete the loop if (max_wall_time - clock) < crossing_wall_time * ss_check_interval / steps_per_crossing : early_abort = 3 break record_time(step, times, ss_check_interval) clock += times[-1] # Record velocities of emitted particles for later KE calculation # Check is required here as measurement_beam particles will not always be passing through if ZCross.getvx(js=measurement_beam.js).shape[0] > 0: velocity_array = np.array([ZCross.getvx(js=measurement_beam.js), ZCross.getvy(js=measurement_beam.js), ZCross.getvz(js=measurement_beam.js)]).transpose() print "Backwards particles: {}".format(np.where(velocity_array[:, 2] < 0.)[0].shape[0]) # velocity_array = velocity_array[velocity_array[:, 2] >= 0.] # Filter particles moving to emitter emitter_flux.append(velocity_array) ZCross.clear() # Clear ZcrossingParticles memory print(" Wind-down: Taking {} steps, On Step: {}, {} Particles Left".format(ss_check_interval, top.it, measurement_beam.npsim[0])) stop_winddown = top.it # For diag file ###################### # CALCULATE EFFICIENCY ###################### try: emitter_flux = np.vstack(emitter_flux) except ValueError: # If this triggered then measurement emission never took place # Run took too long probably and abort took place emitter_flux = np.array([[0., 0., 0.]]) # Find integrated charge on each conductor surface_charge = analyze_scraped_particles(top, measurement_beam, solverE) measured_charge = {} for key in surface_charge: # We can abuse the fact that js=0 for background species to filter it from the sum measured_charge[key] = np.sum(surface_charge[key][:, 1] * surface_charge[key][:, 3]) # Set derived parameters from simulation efficiency.tec_parameters['run_time'][0] = crossing_measurements * steps_per_crossing * dt if crossing_measurements == 0: # Set to large value to force all powers and currents to zero efficiency.tec_parameters['run_time'][0] = 1e20 # Find total number of measurement particles that were emitted total_macroparticles = measurement_beam.npsim[0] + np.sum([measured_charge[key] for key in surface_charge]) efficiency.tec_parameters['J_em'][0] = e * (total_macroparticles - measured_charge[scraper_dictionary['source']]) \ * measurement_beam.sw / \ efficiency.tec_parameters['run_time'][0] / efficiency.tec_parameters['A_em'][0] # If grid isn't being used then J_grid will not be in scraper dict try: efficiency.tec_parameters['J_grid'][0] = e * measured_charge[scraper_dictionary['grid']] * measurement_beam.sw / \ efficiency.tec_parameters['run_time'][0] / \ (efficiency.tec_parameters['occlusion'][0] * efficiency.tec_parameters['A_em'][0]) except KeyError: efficiency.tec_parameters['J_grid'][0] = 0.0 efficiency.tec_parameters['J_ec'][0] = e * measured_charge[scraper_dictionary['collector']] * measurement_beam.sw / \ efficiency.tec_parameters['run_time'][0] / efficiency.tec_parameters['A_em'][0] efficiency.tec_parameters['P_em'][0] = efficiency.calculate_power_flux(emitter_flux, measurement_beam.sw, efficiency.tec_parameters['phi_em'][0], **efficiency.tec_parameters) # Efficiency calculation print("Efficiency") efficiency_result = efficiency.calculate_efficiency(**efficiency.tec_parameters) print("Overall Efficiency: {}".format(efficiency_result['eta'])) print("Total steps: {}".format(top.it)) ###################### # FINAL RUN STATISTICS ###################### if comm_world.rank ==
<gh_stars>10-100 #!/usr/bin/python # Copyright 2017 Google Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Plotting code.""" from functools import partial import sys from common import decimal_fmt import matplotlib.gridspec as gridspec import matplotlib.pyplot as plt import matplotlib.ticker as ticker import numpy as np import pandas as pd MAX_SEPARATE = 5 CUT_VALUE_HEAD_SECS = 0.01 CUT_VALUE_TAIL_SECS = 0.1 NUM_BINS = 50 NUM_LARGEST_BINS = 10 MIN_SEPARATION = 0.05 MIN_FLOW_GOODPUT = 5000000 NUM_MEAN_MARKERS = 10 # per-experiment, per-direction color/marker DIR_CONN_COLOR_D = { 'delta1': { 'fwd': ['darkblue', 'x'], 'rev': ['darkgreen', 'x'], }, 'delta2': { 'fwd': ['darkblue', 'x'], 'rev': ['darkgreen', 'x'], }, 'delta3': { 'fwd': ['blue', 'x'], 'rev': ['green', 'x'], }, 'delta4': { 'fwd': ['blue', 'x'], 'rev': ['green', 'x'], }, } # per-connection color/marker IP_CONN_COLOR_D = { 0: ['g', 'o'], 1: ['r', 'v'], 2: ['c', '^'], 3: ['m', '>'], 4: ['y', '<'], 5: ['k', 'o'], 6: ['g', 'v'], 7: ['r', '^'], 8: ['c', '>'], 9: ['m', '<'], 'remaining': ['b', 'x'], } class Plotter(object): """Class that processes analyzed files and plots them.""" def __init__(self, infile, outfile, analysis_type, plot_format, plot_title, src_reverse, debug): self._infile = infile self._outfile = outfile self._analysis_type = analysis_type self._plot_format = plot_format self._plot_title = plot_title self._src_reverse = src_reverse self._debug = debug milli = 1e-3 self._format_milli = ticker.FuncFormatter( lambda y, pos: '{0:g}'.format(y / milli)) kilo = 1e+3 self._format_kilo = ticker.FuncFormatter( lambda y, pos: '{0:g}'.format(y / kilo)) mega = 1e+6 self._format_mega = ticker.FuncFormatter( lambda y, pos: '{0:g}'.format(y / mega)) cent = 100 self._format_percent = ticker.FuncFormatter( lambda y, pos: '{0:g}'.format(y * cent)) def run(self): """Plot a result file obtained from the pcap analysis.""" df = self.read_input() if self._analysis_type == 'flow': self.flow_process_data(df) elif self._analysis_type == 'packet': self.packet_process_data(df) def read_input(self): """Read an input file into a pandas dataframe.""" # prepare the input fd # we cannot use controlled execution (`with open(...) as f:`) as we want # to support sys.stdin too. f = (open(self._infile, 'r') if self._infile != sys.stdin else sys.stdin) try: if self._analysis_type == 'flow': df = self.flow_read_input(f) elif self._analysis_type == 'packet': df = self.packet_read_input(f) finally: if self._infile != sys.stdin: f.close() return df def flow_read_input(self, f): """Read input file into a pandas dataframe (flow type).""" lst = [] i = 0 for line in f: try: (connhash, first_ts, last_ts, ip_proto, tcp_seq_syn_sport, tcp_seq_syn_dport, ip_total_pkt, ip_total_bytes, pps, ip_bitrate, tcp_bytes, tcp_goodput_bytes, tcp_goodput_bitrate, delta1_small_mean, delta1_small_median, delta1_large_mean, delta1_large_median) = line.split() except ValueError: sys.stderr.write('discarding line = "%s"\n' % line) continue if line[0] == '#': # this is a comment continue if self._debug > 0: sys.stderr.write('%s\n' % line) if pps == '-' or ip_bitrate == '-': continue lst += [[i, connhash, float(first_ts), float(last_ts), int(ip_proto), tcp_seq_syn_sport, tcp_seq_syn_dport, int(ip_total_pkt), int(ip_total_bytes), float(pps), float(ip_bitrate), int(tcp_bytes), int(tcp_goodput_bytes), float(tcp_goodput_bitrate), float(delta1_small_mean), float(delta1_small_median), float(delta1_large_mean), float(delta1_large_median)]] i += 1 df = pd.DataFrame(lst, columns=['order', 'connhash', 'first_ts', 'last_ts', 'ip_proto', 'tcp_seq_syn_sport', 'tcp_seq_syn_dport', 'ip_total_pkt', 'ip_total_bytes', 'pps', 'ip_bitrate', 'tcp_bytes', 'tcp_goodput_bytes', 'tcp_goodput_bitrate', 'delta1_small_mean', 'delta1_small_median', 'delta1_large_mean', 'delta1_large_median']) return df def packet_read_input(self, f): """Read input file into a pandas dataframe (packet type).""" lst = [] i = 0 for line in f: try: t, timestamp, src, dst, delta, traffic = line.split() except ValueError: sys.stderr.write('discarding line = "%s"\n' % line) continue if line[0] == '#': # this is a comment continue if self._debug > 0: sys.stderr.write('%s\n' % line) lst += [[i, t, float(timestamp), src, dst, float(delta), traffic]] i += 1 df = pd.DataFrame(lst, columns=['order', 'type', 'timestamp', 'src', 'dst', 'delta', 'traffic']) return df def flow_process_data(self, df): """Process a pandas dataframe (flow mode).""" # create the matplotlib figure fig = plt.figure(figsize=(9, 7)) # ax_pps = fig.add_subplot(5, 1, 1) ax_tcp_rate = fig.add_subplot(4, 1, 1) ax_delta1 = fig.add_subplot(4, 1, 2) ax_tcp_total = fig.add_subplot(4, 1, 3) ax_tcp_extra_bytes = fig.add_subplot(4, 1, 4) # ax_ip_rate = fig.add_subplot(4, 1, 4) # shift x axis time_shift = float(df[:1].first_ts) format_shift = ticker.FuncFormatter( lambda x, pos: '{0:g}'.format(x - time_shift)) for ax in (ax_tcp_rate, ax_delta1, ax_tcp_total, ax_tcp_extra_bytes): ax.xaxis.set_major_formatter(format_shift) xticks = ax.get_xticks() + (time_shift - int(time_shift)) ax.set_xticks(xticks) xmin = float(df.first_ts[:1]) xmax = float(df.first_ts[-1:]) if xmin < xmax: extra_space = (xmax - xmin) * .05 / 2 ax.set_xlim(xmin - extra_space, xmax + extra_space) # scale y axis # ax_pps.yaxis.set_major_formatter(self._format_kilo) ax_tcp_rate.yaxis.set_major_formatter(self._format_mega) ax_tcp_total.yaxis.set_major_formatter(self._format_mega) ax_tcp_extra_bytes.yaxis.set_major_formatter(self._format_percent) ax_delta1.yaxis.set_major_formatter(self._format_milli) # ax_ip_rate.yaxis.set_major_formatter(self._format_mega) # ax_delta1.plot(df.first_ts, df.delta1_large_mean, # linestyle='', marker='v', # color='g', markersize=3) # select tcp flows only df_tcp = df[(df.ip_proto == 6)] label, color, marker = 'tcp', 'b', 'x' # ax_pps.plot(df_tcp.first_ts, df_tcp.ip_total_pkt, # label=label, linestyle='', marker=marker, # color=color, markersize=3) # plot TCP flow goodput ax_tcp_rate.plot(df_tcp.first_ts, df_tcp.tcp_goodput_bitrate, label=label, linestyle='', marker=marker, color=color, markersize=3) tcp_goodput_quantile_01 = df_tcp.tcp_goodput_bitrate.quantile(q=0.01) ax_tcp_rate.axhline(y=tcp_goodput_quantile_01, color='g', ls='dotted', lw=0.5) tcp_goodput_quantile_50 = df_tcp.tcp_goodput_bitrate.quantile(q=0.50) ax_tcp_rate.axhline(y=tcp_goodput_quantile_50, color='g', ls='dashed', lw=0.5) tcp_goodput_quantile_99 = df_tcp.tcp_goodput_bitrate.quantile(q=0.99) ax_tcp_rate.axhline(y=tcp_goodput_quantile_99, color='g', ls='dotted', lw=0.5) # zoom on around the median ax_tcp_rate.set_ylim([0, 10 * tcp_goodput_quantile_50]) # add a label with the median ax_tcp_rate.text(time_shift, tcp_goodput_quantile_50, decimal_fmt(tcp_goodput_quantile_50, 'bps'), fontsize='x-small') # plot flow media delta1 ax_delta1.plot(df_tcp.first_ts, df_tcp.delta1_large_median, linestyle='', marker='x', color='b', markersize=3) delta1_quantile_01 = df_tcp.delta1_large_median.quantile(q=0.01) ax_delta1.axhline(y=delta1_quantile_01, color='g', ls='dotted', lw=0.5) delta1_quantile_50 = df_tcp.delta1_large_median.quantile(q=0.50) ax_delta1.axhline(y=delta1_quantile_50, color='g', ls='dashed', lw=0.5) delta1_quantile_99 = df_tcp.delta1_large_median.quantile(q=0.99) ax_delta1.axhline(y=delta1_quantile_99, color='g', ls='dotted', lw=0.5) # zoom on around the median ax_delta1.set_ylim([0, 10 * delta1_quantile_50]) # add a label with the median ax_delta1.text(time_shift, delta1_quantile_50, '%s' % decimal_fmt(delta1_quantile_50, 'sec'), fontsize='x-small') # plot flow goodput (absolute) ax_tcp_total.plot(df_tcp.first_ts, df_tcp.tcp_goodput_bytes, label=label, linestyle='', marker=marker, color=color, markersize=3) tcp_bytes_quantile_50 = df_tcp.tcp_goodput_bytes.quantile(q=0.50) ax_tcp_total.axhline(y=tcp_bytes_quantile_50, color='g', ls='dashed', lw=0.5) tcp_extra_percent = ((df_tcp.tcp_bytes - df_tcp.tcp_goodput_bytes) / df_tcp.tcp_goodput_bytes) ax_tcp_extra_bytes.plot(df_tcp.first_ts, tcp_extra_percent, label=label, linestyle='', marker=marker, color=color, markersize=3) ax_tcp_extra_bytes.axhline(y=0, color='k', ls='solid', lw=0.5) ax_tcp_extra_bytes.axhline(y=tcp_extra_percent.mean(), color='g', ls='dashed', lw=0.5) # ax_ip_rate.plot(df_tcp.first_ts, df_tcp.ip_bitrate, # label=label, linestyle='', marker=marker, # color=color, markersize=3) total_line = 'total { flows: %s pkt: %s ip_bytes: %s }' % ( decimal_fmt(len(df_tcp), ''), decimal_fmt(sum(df_tcp['ip_total_pkt']), 'pkt'), decimal_fmt(sum(df_tcp['ip_total_bytes']), 'B')) tcp_flows_over_threshold = len( df_tcp[(df_tcp.tcp_goodput_bitrate > MIN_FLOW_GOODPUT)]) total_line += '\ntcp_goodput { median: %s percent_over_%s: %f } ' % ( decimal_fmt(tcp_goodput_quantile_50, 'bps'), decimal_fmt(MIN_FLOW_GOODPUT, 'bps'), 100.0 * tcp_flows_over_threshold / len(df_tcp)) total_line += '\ndelta1 { median: %s } ' % ( decimal_fmt(delta1_quantile_50, 'sec')) ax_tcp_extra_bytes.set_xlabel('Flow Start (sec) -- ' + total_line, fontsize='small') # ax_pps.set_ylabel('Flow Throughput (Kpps)') ax_tcp_rate.set_ylabel('Flow Goodput\n(Mbps)') ax_delta1.set_ylabel('Flow Median\ndelta1 (msec)') ax_tcp_total.set_ylabel('Flow Goodput\n(MB)') ax_tcp_extra_bytes.set_ylabel('Flow Extra\nTCP Bytes (%)') # ax_ip_rate.set_ylabel('Flow IP Throughput (Mbps)') # ax_tcp_total.legend() ax_tcp_rate.set_title(self._plot_title) plt.savefig(self._outfile, format=self._plot_format) def packet_process_data(self, df): """Process a pandas dataframe (packet mode).""" # create the matplotlib figure fig = plt.figure(figsize=(9, 7)) fig.subplots_adjust(hspace=.4) fig.canvas.set_window_title('packet_process_data') outer_grid = gridspec.GridSpec(2, 2) layout = [ ((0, 0), 'delta1', 'time', '-'), ((0, 1), 'delta2', 'time', '-'), # ((1, 0), 'delta3', 'time', '-'), ((1, 0), 'delta4', 'distro', 'data'), ((1, 1), 'delta4', 'distro', 'ack'), ] # split the data depending on the direction def match_direction(reverse, x): addr = x['src'] if ':' in addr: addr, _ = addr.split(':', 1) if not reverse or not addr.startswith(reverse): return 'fwd' else: return 'rev' bound_match_direction = partial(match_direction, self._src_reverse) df['dir'] = df.apply(bound_match_direction, axis=1) ax = {} subplot_spec = {} for (position, delta, graph, traffic) in layout: ax[delta] = {} subplot_spec[delta] = {} # get the data frame to analyze here data = {} for direction in ('fwd', 'rev'): data[direction] = df[(df.dir == direction) & (df.type == delta)] if delta == 'delta4': # remove the heads of the trains (hystart_ack_delta in tcp_cubic.c) data[direction] = data[direction][(data[direction].delta < 0.002)] # print the data frames # ax[delta][graph] = fig.add_subplot(4, 2, position) subplot_spec[delta][graph] = outer_grid[position[0], position[1]] ax[delta][graph] = plt.subplot(subplot_spec[delta][graph]) if graph == 'time': # print the time series ax[delta][graph] = self.add_timeseries_graph( delta, ax[delta][graph], subplot_spec[delta][graph], data) elif graph == 'distro': # print the distribution self.add_distribution_graph(delta, ax[delta][graph], data, traffic) # main title plt.suptitle(self._plot_title, fontsize='x-small') # synchronize the y axes for delta1 and delta2 ymin_l = [] ymax_l = [] for delta in ('delta1', 'delta2'): for vax in ax[delta]['time']: ymin, ymax = vax.get_ylim() ymin_l.append(ymin) ymax_l.append(ymax) ymin = min(ymin_l) ymax = max(ymax_l) for delta in ('delta1', 'delta2'): for vax in ax[delta]['time']: vax.set_ylim(ymin, ymax) # add the legend ax['delta1']['time'][1].legend(prop={'size': 'xx-small'}) plt.savefig(self._outfile, format=self._plot_format) def add_timeseries_graph(self, delta, _, subplot_spec, data): """Print the time series.""" total_line = '%s' % delta time_shift = {} # ensure there is at
import cgi import logging import os import sys import tempfile import threading from http import HTTPStatus from http.server import __version__ as http_version, HTTPServer, SimpleHTTPRequestHandler # noqa # Use from..import for importlib as it needs to bootstrap some stuff before "util" etc works, which import doesn't do from importlib import import_module, util as importlib_util from time import sleep from typing import AnyStr, Callable, Optional from urllib.parse import parse_qs, ParseResult, urlparse logger = logging.getLogger(__name__) def action_identifier(verb: str, url: str) -> str: return verb.upper() + ' ' + url class Action: def __init__(self, handler: Callable, origin: str = 'unknown', **kwargs): self.handler = handler self.origin = origin self._kwargs = kwargs def nextcall(self, **kwargs) -> 'Action': self._kwargs = kwargs return self def __call__(self, *args, **kwargs): final_kwargs = dict(**self._kwargs) final_kwargs.update(kwargs) return self.handler(*args, **final_kwargs) class ActionRequestHandlerMeta(type): """ Meta class with "class level" properties for the request handler below. Since every request results in a new instance, this is a way to provide properties for easy of use """ def __init__(cls, *args, **kwargs): cls._action_sources = [] cls._actions = {} cls._fallback = False cls._exceptions = None cls._multi_threaded = False super().__init__(*args, **kwargs) @property def action_sources(cls): """ Action sources define modules where to look for matching functions. A source can be either an 'str', in which case we assume its a dot-notated (base) module, or a dict that contains a 'path' key (in which we assume you're specifying modules by a disk path. :return: """ return cls._action_sources @property def actions(cls): """ If you want to predefine actions, it can be done via this property. It's a dict that contains the URIs as key (without the QUERY part) and the matching function handler to call :return: """ return cls._actions @action_sources.setter def action_sources(cls, sources: list): cls._action_sources = sources @actions.setter def actions(cls, actions: dict): cls._actions = actions def add_action(cls, verb: str, url: str, handler: Callable, origin: str = 'direct'): cls._actions[action_identifier(verb, url)] = Action(handler, origin) def remove_action(cls, verb: str, url: str, ): del cls._actions[action_identifier(verb, url)] def enable_fallback(cls, status: bool = True): """ Fallback means that when a multi-level url is encountered, the resulting action can be linked "higher up" if no explicit match is found. For example: /test/test2 would normally map to function test_test2() (ignoring the prefix) but with fallback enabled, test() would also match if no specific match is found). /test/test2/test3 would also match test() in this situation. Can be handy but is potentially very dangerous if you're unaware so off by default. :param status: :return: """ cls._fallback = status def exception_handler(cls, handler: Callable): cls._exceptions = handler class ActionRequestHandler(SimpleHTTPRequestHandler, metaclass=ActionRequestHandlerMeta): _action_modules = None server_version = 'SimpleActionHTTP/' + http_version def __init__(self, *args, **kwargs): self._file_path = None self._mime_type = None self._parsed_url = None self._response_sent = False super().__init__(*args, **kwargs) def do_GET(self): self._dispatch() def do_POST(self): form = {} files = {} if self.headers['content-type'].endswith('/json'): # Support json posts as well, fake cgi fields import json content_len = int(self.headers.get('content-length', 0)) content = json.loads(self.rfile.read(content_len)) for k, v in content.items(): form.setdefault(k, []).append(cgi.MiniFieldStorage(k, v)) else: fields = cgi.FieldStorage(self.rfile, self.headers, environ={'REQUEST_METHOD': 'POST'}) for f in fields.list: target = files if f.filename else form # Since HTTP allows for the same field to be present multiple times, add as list target.setdefault(f.name, []).append(f) self._dispatch(form, files) def do_HEAD(self): self._dispatch() def do_PUT(self): self._dispatch() def send_json(self, content=None, path=None): """ Shortcut function to send a json file back to the client :param path: Physical path on the disk if this is a pre-existing file :param content: File content to use if not a file (str, dict, bytes) """ if not isinstance(content, bytes): import json content = json.dumps(content) + '\n' if isinstance(content, str): content = content.encode() self.send_file(path, content, 'application/json') def send_file(self, path=None, content: bytes = None, type=None): if content: if not type: raise RuntimeError('Cannot determine mime type based on content, please specify') (fd, path) = tempfile.mkstemp() os.write(fd, content) # Temp-store the path & mime type so we can "dummy return" them self._file_path = path self._mime_type = type # Try to send the header already, this results in a file if successful f = self.send_head() if f: try: # Copy the file to the client self.copyfile(f, self.wfile) finally: f.close() if content: os.close(fd) os.unlink(path) def translate_path(self, path): """ Override the base function and return the same path, since we're already working with those (we only use the send-file functionality, not the entire file hosting) :param path: :return: """ return self._file_path def guess_type(self, path): """ Override that supports pre-specified type :param path: :return: """ return self._mime_type if self._mime_type else super().guess_type(path) def reply(self, http_code: int, response_message: str = None, content: AnyStr = None, content_type: AnyStr = None): self.send_response(http_code, response_message) body = None if content: body = content.encode('UTF-8', 'replace') # If we have content, make sure to add the correct headers if content_type: self.send_header("Content-Type", content_type) self.send_header('Content-Length', str(len(content))) self.end_headers() if body: self.wfile.write(body) def success(self, response_message: str = None, content=None, content_type=None): """ Shortcut function to report success """ self.reply(HTTPStatus.OK, response_message, content, content_type) def handle_expect_100(self): (action, _) = self._find_action(self.path) if action.origin == 'error': self.send_404() return False return super().handle_expect_100() def _dispatch(self, form=None, files=None): action = self._find_action(self.path) if action and action.origin == 'error': logger.debug("Calling error handler for [%s %s]" % (self.command, self.parsed_url.path)) if action: parameters = { "url": self.parsed_url } if self.parsed_url.query: parameters['query'] = parse_qs(self.parsed_url.query) if form: parameters['form'] = form if files: parameters['files'] = files def worker(): try: action(self, **parameters) except Exception as e: if self._exceptions: arguments = {'request': self, 'exc_info': sys.exc_info(), 'original_handler': action} arguments.update(parameters) self._exceptions(**arguments) if self._multi_threaded: threading.Thread(target=worker).run() else: worker() else: logger.error("No action for [%s %s]" % (self.command, self.parsed_url.path)) self.send_404() def send_response(self, code, message=None): """Limit response sending to once""" if not self._response_sent: self._response_sent = True super().send_response(code, message) def send_404(self): self.send_error(HTTPStatus.NOT_FOUND) @property def parsed_url(self) -> ParseResult: if not self._parsed_url: self._parsed_url = urlparse(self.path) return self._parsed_url def _find_action(self, path: str) -> Optional[Action]: action_name = self._request_identifier() prefixes = [self.command.lower(), 'any', ''] # load the "top level" action modules self._load_action_modules() if action_name in self._actions: # We already found this return self._actions[action_name] for identifier in self._action_modules: parts = list(filter(None, urlparse(path).path.split('/'))) (module, parts) = self._find_module(identifier, parts) # If we still have parts left, that will be (part of) the function name if module: if parts: remainder = [] while parts: # Function name is a combo of the remaining parts function = '_'.join(parts) # With optional <COMMAND>, "any" or no prefix for prefix in prefixes: try_function = '_'.join([prefix, function]).lstrip('_') if hasattr(module, try_function): return self._save_action(action_name, getattr(module, try_function), 'direct' if not remainder else 'fallback') if not self._fallback: # No falling back to broader modules supported, abort break # Still here and we appear to have fallback enabled, pop the least significant part and try # again for a broader action function remainder.insert(0, parts.pop(-1)) else: method = self._find_catchall(module, self.command) if method: # Since this is a dedicated module, we still consider it a direct origin return self._save_action(action_name, method, 'direct') # Still here, look through all of the modules again and find the first catch-all function that is either the # VERB/COMMAND or "any" for _, module in self._action_modules.items(): module = module[0] method = self._find_catchall(module, self.command) if method: return self._save_action(action_name, method, 'catchall') # Do we have 404 actions? for prefix in [self.command, 'ANY']: name = ' '.join([prefix, '404']).strip() if name in self._actions: return Action(self._actions[name], 'error') # nothing found, use default functionality return None def _request_identifier(self) -> str: return action_identifier(self.command, self.parsed_url.path) def _save_action(self, action_name, method, origin): logger.debug( 'Action "%s" was mapped to "%s.%s" [%s]' % (action_name, method.__module__, method.__name__, origin)) self._actions[action_name] = Action(method, origin, original_url=self.parsed_url) return self._actions[action_name] def _find_module(self, identifier: str, parts: list): """ Given the module identified by "identifier", try to enumerate into its submodules as deep as possible using the parts list and return the found module and the remaining parts. :param identifier: :param parts: :return: """ parts = list(parts) (module, origin) = self._action_modules[identifier] if origin == 'module': (module, loaded_parts) = self._load_sub_module(module, parts) if isinstance(loaded_parts, str): loaded_parts = loaded_parts.split('.') parts = parts[len(loaded_parts) if loaded_parts else 0:] elif origin == 'path': # See how "low" we can go directory-wise first while parts: part = parts.pop(0) try_dir = os.path.join(base, part)
<reponame>steven-rr/AE8843<gh_stars>0 import random import matplotlib.pyplot as plt import numpy as np from matplotlib import animation from scipy import integrate from scipy import linalg as LA import cvxpy as cp # generate seed for random generator random.seed(239) class RLdMPC: # State indices xidx, yidx, thetaidx = 0, 1, 2 vidx, omegaidx = 0, 1 xmax, ymax, thetamax = 10, 10, 10 # Define parameter weights qv = np.sqrt(1) qomega = np.sqrt(1) # Define time t0, tf = 0, 20 num_pts = int(1/.01) time = np.linspace(t0, tf, num_pts+1) dT = time[1]-time[0] time_elapsed = 0 mean, sq_sigma = 0, 1 yhat_idx, u_idx, phi_idx, z_idx, r_idx = 0, 1, 2, 3, 4 def __init__(self, vehicle, C_est, N, R=1): self.horizon = N self.vehicle = vehicle self.num_states = vehicle.state.size self.num_controls = vehicle.B.shape[1] self.R = R self.noise = np.random.normal( self.mean, self.sq_sigma, size=(self.num_states*self.num_pts, 1)) self.C = self.vehicle.output_matrix() self.C_est = C_est self.CCov = 1000*np.eye(self.num_states) self.time_elapsed = 0 self.y = [np.matmul(self.C, self.vehicle.state) + self.noise[0]] self.phi = [self.vehicle.state] self.P = [self.CCov] self.params = [self.C_est] self.controls = [np.zeros((self.num_controls, 1))] self.Kstar = np.zeros((self.num_states, self.num_states)) # Initialize animation self.plot = Plotter(self.vehicle.state[self.xidx], self.vehicle.state[self.yidx], self.dT) def update_time(self): self.time_elapsed += 1 def get_time_elapsed(self): return self.time_elapsed def make_vector(self, item): """ :param item: item to repeat :return: return list of repeated items """ return np.array([item]*self.horizon) def compute_state_with_opt_mu(self): """ This method propagates the state of the system by the horizon :return: Null """ # Initialization # Step 1 obj = 0 prior = 0 #self.vehicle.arrived() for t in range(self.num_pts+1): # minimize A = self.vehicle.system_matrix() b = self.vehicle.control_matrix() phit = self.vehicle.state yt = self.y[t] print(yt) Q = self.CCov rt = np.linalg.pinv(Q) utk = [] try: self.Kstar = LA.solve_continuous_are(A, b, Q, self.R) except ValueError or np.linalg.LinAlgError: print('Using previous P') for idx in range(self.horizon): utk.append(self.propagate(phit.T[0], A, b, Q, self.Kstar)) phit = np.matmul(A*self.dT + np.eye(3), phit) \ + utk[idx] yt = np.matmul(self.C_est, phit) rt += np.matmul(phit, phit.T) Q = np.linalg.pinv(rt) #utk = self.opt(t, A, b) print('OPTIMAL') control = np.array(utk[0], dtype=float) print(control) # Step 3 of RLdMPC self.vehicle.update(control) new_phi = self.vehicle.state new_output = np.matmul(self.C, new_phi) + self.get_noise(t) new_deviation = np.subtract(new_output, np.matmul(self.C_est, new_phi)) G = np.matmul(np.matmul(self.CCov, new_phi), np.linalg.pinv( np.matmul(self.C_est, self.C_est.T) + self.quadratic_cost(new_phi, self.CCov))) self.CCov -= np.matmul(np.eye(self.num_states) - np.matmul(G, new_phi.T), self.CCov) self.C_est += np.matmul(G, new_deviation.T).T # Store new values self.controls.append(control) self.phi.append(new_phi) self.y.append(new_output) self.P.append(self.CCov) self.params.append(self.C_est) print("----------------------------------") print("Next initial state: \n") print(self.vehicle.state[self.xidx], "\n", self.vehicle.state[self.yidx]) print("----------------------------------") obj += new_output*new_output + self.R*control*control + np.matmul(np.matmul(new_phi.T, self.CCov), new_phi) if np.linalg.norm(new_phi) <= 1e-3: break self.update_time() return obj def compute_state_with_rldmpc(self): def compute_state_with_opt_mu(self): """ This method propagates the state of the system by the horizon :return: Null """ # Initialization # Step 1 obj = 0 prior = 0 # self.vehicle.arrived() for t in range(self.num_pts + 1): # minimize A = self.vehicle.system_matrix() b = self.vehicle.control_matrix() phit = self.vehicle.state yt = self.y[t] print(yt) Q = self.CCov rt = np.linalg.pinv(Q) try: self.Kstar = LA.solve_continuous_are(A, b, Q, self.R) except ValueError or np.linalg.LinAlgError: print('Using previous P') utk = self.opt(t, A, b) print('OPTIMAL') control = np.array([[utk[0]]], dtype=float) print(control) # Step 3 of RLdMPC self.vehicle.update(control) new_phi = self.vehicle.state new_output = np.matmul(self.C, new_phi) + self.get_noise(t) new_deviation = np.subtract(new_output, np.matmul(self.C_est, new_phi)) G = np.matmul(np.matmul(self.CCov, new_phi), np.linalg.pinv( np.matmul(self.C_est, self.C_est.T) + self.quadratic_cost(new_phi, self.CCov))) self.CCov -= np.matmul(np.eye(self.num_states) - np.matmul(G, new_phi.T), self.CCov) self.C_est += np.matmul(G, new_deviation.T).T # Store new values self.controls.append(control) self.phi.append(new_phi) self.y.append(new_output) self.P.append(self.CCov) self.params.append(self.C_est) print("----------------------------------") print("Next initial state: \n") print(self.vehicle.state[self.xidx], "\n", self.vehicle.state[self.yidx]) print("----------------------------------") self.update_time() def opt(self, t, A, b): obj = 0 print('MINIMIZING') phit = self.vehicle.state. \ reshape(self.num_states, ) yt = self.y[t]. \ reshape(self.num_controls, ) rt = np.linalg.pinv(self.CCov).flatten() zt = np.matmul(self.CCov, phit) # Form and solve control problem phitk = cp.Variable((self.num_states, self.horizon + 1)) utk = cp.Variable((self.num_controls, self.horizon)) ytk = cp.Variable((self.num_states, self.horizon)) ztk = cp.Variable((self.num_states, self.horizon) ) rbartk = cp.Variable((self.num_states ** 2, self.horizon), nonneg=True) Rtk = cp.Variable((self.num_states, self.num_states)) # Parameters Amat = cp.Parameter((self.num_states, self.num_states)) bmat = cp.Parameter((self.num_states, self.num_controls)) Rmat = cp.Parameter() Cparam = cp.Parameter((self.num_controls, self.num_states)) Kstar = cp.Parameter((self.num_states, self.num_states), PSD=True) Eye = cp.Parameter((self.num_states, self.num_states), PSD=True) # Initialize params Amat.value = A bmat.value = b Rmat.value = self.R Cparam.value = self.C_est Kstar.value = self.Kstar Eye.value = np.eye(self.num_states) constr = [] stateshape = self.vehicle.state.shape controlshape = (self.num_controls, 1) rshape = self.CCov.shape rflatshape = (self.num_states ** 2,) zero = np.zeros(stateshape).reshape(self.num_states, ) input = np.zeros(controlshape) alpha = 1 for idx in range(self.horizon): print('---------------') print(idx) print('---------------') # Define objective function # Stage Cost obj += cp.multiply(self.R, cp.sum_squares(utk[:, idx])) \ + cp.quad_form(phitk[:, idx], Eye)\ #+ cp.sum_squares(ytk[:, idx]) \ # + cp.sum(phitk[:, idx].T @ ztk[:, idx]) # + phiz print(obj) constr += [ # Lower Input Bounds cp.norm(-utk[self.vidx, idx], 'inf') <= -self.vehicle.v_min, # Upper Input Bounds cp.norm(utk[self.vidx, idx], 'inf') <= self.vehicle.v_max, # Lower Input Bounds #cp.norm(-ytk[self.xidx, idx], 'inf') <= -self.xmax, # Upper Input Bounds # cp.norm(ytk[self.xidx, idx], 'inf') <= self.xmax, ] constr += [ # Simulate next value phitk[:, idx + 1] == Amat @ phitk[:, idx] + bmat @ utk[:, idx], #ytk[:, idx] == Cparam @ phitk[:, idx], ] # terminal cost obj += cp.quad_form(phitk[:, self.horizon], Kstar) constr += [ phitk[:, 0] == phit, #ytk[:, 0] == yt, # rbartk[:, 0] == rt, # ztk[:, 0] == zt #utk[:, idx] == ut ] ocp = cp.Problem(cp.Minimize(obj), constr) ocp.solve(solver=cp.CVXOPT, verbose=True) return utk.value[0] def get_noise(self, t): idx = 2*t + self.num_controls return self.noise[idx:idx + self.num_controls] def objective(self, utk, t, A, b, P, Kstar): return self.calculate_finite_horizon(t, utk, A,b,P,Kstar) def calculate_finite_horizon(self, t, ut, A, b, P, Kstar): phi = self.vehicle.state utk = ut.reshape(2, 1) print('state') print(phi) r = np.linalg.inv(P) # Calculate finite horizon obj = 0 for idx in range(self.horizon): print("Horizon: ", idx, "for time: ", t) if idx == 0: phi = np.matmul(A, phi) \ + np.matmul(self.vehicle.control_matrix(phi[self.thetaidx]), utk ) y = self.y[t] z = np.matmul(self.P[t], phi) u = utk r = r else: control = self.propagate( phi, A, b, np.linalg.inv(r), self.Kstar ) # update per horizon # phi phi = np.matmul(A, phi) \ + np.matmul(self.vehicle.control_matrix(phi[self.thetaidx]), control) # u u = control # y y = np.matmul(self.C_est, phi) # R r += np.matmul(phi.T, phi) ptk = np.linalg.inv(r.reshape(self.CCov.shape)) z = np.matmul(ptk, phi) print(phi) print(y) print(u) print(z) obj += self.cost(phi, y, u, z, self.R) obj += self.quadratic_cost(phi, Kstar) return obj def cost(self, phi, y, u, z, R): y_squared = np.matmul(y.T,y) u_squared = self.quadratic_cost(u, R) phiz = np.matmul(phi.T, z) cost = np.sum([y_squared, u_squared, phiz ]) print(cost) return cost def propagate(self, vector, A, b, Q, Kstar): """ :param vector: the current deviation of the goal state :param idx: the current horizon at which the dynamics are estimated :return: the next state of the system and an update to control matrix """ # add noise to the output try: self.Kstar = LA.solve_continuous_are(A, b, Q, self.R) except ValueError or np.linalg.LinAlgError: print('Using previous P') btk = np.matmul(b.T, Kstar) K = np.matmul( np.linalg.inv( np.matmul( btk, b ) + self.R ), np.matmul( btk, A ) ) delta_pts = int(1/10) # Project the solver span = np.linspace( self.get_time_elapsed(), self.get_time_elapsed() + self.horizon, ) # Drive deviation to zero by solving xdot = (A-BK)x # Output the noisy signal sol = integrate.solve_ivp( fun=self.sys_func, t_span=[span[0], span[-1]], y0=vector, args=(A, b, K), method='LSODA', t_eval=span ) # Optimal Trajectory and Control which are used for the Certainty Equivalence strategy optimal_trajectory = sol.y optimal_control = np.matmul(-K, optimal_trajectory) # Only take the first action and limit the entries v_clipped = np.clip(optimal_control[0, [0]], self.vehicle.v_min, self.vehicle.v_max) # Apply control to current state control = np.array([v_clipped], dtype=float) return control def sys_func(self, t, x, a, b, k): """ :param t: Current time at which the dynamics are evaluated :param x: Current state of the system :param a: the system matrix :param b: the control matrix :param k: the gain matrix :param param_est: output matrix :return: output estimation for each time, t, evaluated with sys dynamics """ return np.matmul(a - b.dot(k), x.reshape(self.num_states, 1)).T def output_func(self, t, x, a, b, k, C): """ :param t: Current time at which the dynamics are evaluated :param x: Current state of the system :param a: the system matrix :param b: the control matrix :param k: the gain matrix :param param_est: output matrix :return: output
<reponame>Wappsto/python-wappsto-iot import datetime import itertools import json import socket import threading import time import traceback import uuid from typing import Dict from typing import List from typing import Optional from typing import Tuple from typing import Union from pydantic import BaseModel from pydantic import Field from utils import pkg_smithing import rich class ErrorException(Exception): def __init__(self, code, msg, data): self.code = code self.msg = msg self.data = data class ObjectModel(BaseModel): type: str name: Optional[str] extra_info: dict = Field(default_factory=dict) children: List[uuid.UUID] = Field(default_factory=list) parent: Optional[uuid.UUID] = None uuid: Optional[uuid.UUID] class UrlObject(BaseModel): type: str parent: Optional[uuid.UUID] uuid: Optional[uuid.UUID] class Parameters(BaseModel): left: str op: Optional[str] right: Optional[str] def pairwise(iterable): """ Pair the iterables, in groups of 2. s -> (s0, s1), (s2, s3), (s4, s5), ... Taking advantage of the iter structure and zip's use of iters. Args: iterable: The list to be paired """ a = iter(iterable) return itertools.zip_longest(a, a) class SimuServer(object): param_op_list: List[str] = [ '==', '>=', '<=', '!=', '=', '<', '>' ] def __init__( self, network_uuid: uuid.UUID, name: str, description: Optional[str] = None ): self.network_uuid: uuid.UUID = network_uuid self.network_name: str = name self.objects: Dict[uuid.UUID, ObjectModel] = {} self.objects[self.network_uuid] = ObjectModel( type='network', uuid=self.network_uuid, # children=[], name=name, extra_info={'description': description} if description else {} ) self.failed_data: List[Tuple[bool, str]] = [] self.killed = threading.Event() self.data_in: List[bytes] = [] self.data_to_be_send: list[bytes] = [] def get_network_obj(self) -> ObjectModel: return self.objects[self.network_uuid] def get_obj(self, name: str) -> Optional[ObjectModel]: for obj in self.objects.values(): if obj.name == name: return obj return None def add_object( self, this_uuid: uuid.UUID, this_type: str, parent_uuid: Optional[uuid.UUID], this_name: Optional[str], children: Optional[List] = None, extra_info: Optional[dict] = None ): if parent_uuid not in self.objects and this_type != 'network': raise ValueError("Parent need to exist!") self.objects[this_uuid] = ObjectModel( parent=parent_uuid, type=this_type, uuid=this_uuid, name=this_name, children=children if children else [], extra_info=extra_info if extra_info else {} ) if this_type != 'network': if not parent_uuid: raise ValueError("Parent UUID are needed!") self.objects[parent_uuid].children.append(this_uuid) def _add_object_from_dict( self, parent_uuid: uuid.UUID, self_type: str, data: dict ) -> uuid.UUID: this_uuid: uuid.UUID = uuid.UUID(data['meta']['id']) this_type = data['meta'].get('type') if self_type != "state" else "state" children: List = [] this_name: Optional[str] = None if this_type: msg = f"ADD: Conflict in type: {this_type} == {self_type}, for: {this_uuid}" self.add_check(this_type == self_type, msg) if self_type == 'value': this_name = data.pop('name') children = data.pop('state') if 'state' in data.keys() else [] elif self_type == 'device': this_name = data.pop('name') children = data.pop('value') if 'value' in data.keys() else [] elif self_type == 'network': this_name = data.pop('name') children = data.pop('device') if 'device' in data.keys() else [] elif self_type == 'state': if 'timestamp' in data.keys(): data['timestamp'] = pkg_smithing.str_to_datetime( timestamp=data['timestamp'] ) if 'meta' in data.keys(): data.pop('meta') self.add_object( this_uuid=this_uuid, this_type=this_type, parent_uuid=parent_uuid, this_name=this_name, children=children, extra_info=data ) return this_uuid def _update_object_from_dict( self, this_uuid: uuid.UUID, self_type: str, data: dict ) -> uuid.UUID: this_type = data['meta'].get('type') if self_type != "state" else "state" children: List = [] this_name: Optional[str] = None if this_type: msg = f"UPDATE: Conflict in type: {this_type} == {self_type}, for: {this_uuid}" self.add_check(this_type == self_type, msg) if self_type == 'value': this_name = data.pop('name') children = data.pop('state') if 'state' in data.keys() else [] elif self_type == 'device': this_name = data.pop('name') children = data.pop('value') if 'value' in data.keys() else [] elif self_type == 'network': this_name = data.pop('name') children = data.pop('device') if 'device' in data.keys() else [] elif self_type == 'state': if 'timestamp' in data.keys(): # ERROR: Deepcopy? Else it changes the data. data['timestamp'] = pkg_smithing.str_to_datetime( timestamp=data['timestamp'] ) if 'meta' in data.keys(): data.pop('meta') old_data = self.objects[this_uuid] old_data.extra_info.update(data) self.add_object( this_uuid=this_uuid, this_type=this_type, parent_uuid=old_data.parent, this_name=this_name if this_name else old_data.name, children=children if children else old_data.children, extra_info=old_data.extra_info ) return this_uuid def add_check(self, result, text): self.failed_data.append((result, text)) def fail_check(self): for test, text in self.failed_data: assert test, text def get_socket( self, mock_rw_socket, mock_ssl_socket ): self.killed.clear() mock_ssl_socket.return_value.close.side_effect = lambda: self.killed.set() def socket_simu(*args, **kwargs) -> bytes: timeout = 0 if mock_rw_socket.return_value.settimeout.call_args: t_data, _ = mock_rw_socket.return_value.settimeout.call_args timeout = t_data[0] + time.perf_counter() while not self.killed.is_set(): if timeout <= time.perf_counter(): break if not mock_ssl_socket.return_value.sendall.call_args: if self.data_to_be_send: t_data = self.data_to_be_send.pop() return t_data time.sleep(0.01) continue temp_data, _ = mock_ssl_socket.return_value.sendall.call_args send_data = temp_data[0] mock_ssl_socket.return_value.sendall.call_args = None data = None try: self.data_in.append(send_data) data = self.rpc_handle(send_data) except Exception as error: self.add_check( False, f"send_data={send_data}\n{error}\n{traceback.format_exc()}" ) raise error else: if data != b'': return data time.sleep(0.1) raise socket.timeout(timeout) mock_ssl_socket.return_value.recv.side_effect = socket_simu def send_data( self, data: Union[dict, list], pkg_method: str, pkg_url: str, pkg_id: Optional[str] = None, ) -> None: if pkg_id is None: pkg_id = pkg_smithing.random_string() pkg_data = json.dumps( pkg_smithing.rpc_pkg_request( pkg_method=pkg_method, pkg_id=pkg_id, pkg_url=pkg_url, pkg_data=data, ) ).encode() self.data_to_be_send.append(pkg_data) # TODO: Add to wait for reply list/function. def send_control( self, obj_uuid: uuid.UUID, data: Union[str, int, float], timestamp: datetime.datetime ) -> None: pkg_id = f"Server_PUT_{pkg_smithing.random_string()}" pkg_data = pkg_smithing.state_pkg( obj_uuid=obj_uuid, # type="Control", data=str(data), timestamp=timestamp ) self.send_data( data=pkg_data, pkg_method="PUT", pkg_id=pkg_id, pkg_url=f"/state/{obj_uuid}", ) self._update_object_from_dict( this_uuid=obj_uuid, self_type="state", data=pkg_data ) def send_delete(self, obj_uuid, obj_type: str): pkg_id = f"Server_DELETE_{pkg_smithing.random_string()}" self.send_data( data={}, pkg_method="DELETE", pkg_id=pkg_id, pkg_url=f"/{obj_type}/{obj_uuid}", ) def _params_parser(self, params) -> List[Parameters]: param_list: List[Parameters] = [] for parameters in params.split('&'): for op in self.param_op_list: if op in parameters: temp = parameters.split(op) param_list.append( Parameters( left=temp[0], op=op, right=temp[-1], ) ) break else: param_list.append( Parameters( left=parameters, ) ) return param_list def _objectmodel_parser(self, path) -> UrlObject: # NOTE: Tested with: # /network/xxxxxxxx-xxxx-4xxx-axxx-xxxxxxxxxxxx/device/xxxxxxxx-xxxx-4xxx-axxx-xxxxxxxxxxxx/value/xxxxxxxx-xxxx-4xxx-axxx-xxxxxxxxxxxx/state/xxxxxxxx-xxxx-4xxx-axxx-xxxxxxxxxxxx # /network/xxxxxxxx-xxxx-4xxx-axxx-xxxxxxxxxxxx/device # /device/xxxxxxxx-xxxx-4xxx-axxx-xxxxxxxxxxxx # /network/ parent_uuid: Optional[uuid.UUID] = None object_type: Optional[str] = None object_uuid: Optional[uuid.UUID] = None for obj_type, obj_uuid in pairwise(path.split('/')[1:]): parent_uuid = object_uuid object_type = obj_type object_uuid = obj_uuid if obj_uuid else None return UrlObject( type=object_type, uuid=object_uuid, parent=parent_uuid, ) def _url_parser(self, url) -> Tuple[UrlObject, List[Parameters]]: params = url.split('?') param_list: List[Parameters] = self._params_parser(params[-1]) if len(params) > 1 else [] obj: UrlObject = self._objectmodel_parser(url.split('?')[0]) return ( obj, param_list ) def rpc_handle(self, data: bytes) -> bytes: return_value: list = [] p_data = json.loads(data.decode()) if not isinstance(p_data, list): p_data = [p_data] for j_data in p_data: pkg_id = j_data['id'] error = j_data.get('error') if error: self.add_check(False, error) return_value.append( pkg_smithing.rpc_pkg_result( pkg_id=pkg_id, pkg_data=True ) ) if 'result' in j_data: # TODO: Handle Success & Failed package. return b'' if 'error' in j_data: self.add_check(False, j_data['error']['message']) return b'' pkg_method = j_data['method'] the_url = j_data['params']['url'] the_data = j_data['params'].get('data') fast_send = j_data['params'].get('meta', {}).get('fast', False) # identifier = j_data['params']['meta']['identifier'] url_obj: Tuple[UrlObject, List[Parameters]] = self._url_parser(the_url) try: if pkg_method.upper() == 'GET': r_data = self.get_handle( data=the_data, fast_send=fast_send, url_obj=url_obj ) elif pkg_method.upper() == 'POST': r_data = self.post_handle( data=the_data, fast_send=fast_send, url_obj=url_obj ) elif pkg_method.upper() == 'PUT': r_data = self.put_handle( data=the_data, fast_send=fast_send, url_obj=url_obj ) elif pkg_method.upper() == 'DELETE': r_data = self.delete_handle( data=the_data, fast_send=fast_send, url_obj=url_obj ) elif pkg_method.upper() == 'HEAD': r_data = self.head_handle( data=the_data, fast_send=fast_send, url_obj=url_obj ) else: self.add_check(False, f"Unknown method: {pkg_method}") return pkg_smithing.error_pkg( pkg_id=pkg_id, data=j_data, code=-32601, msg="Unhandled Method", ) except ErrorException as err: self.add_check(False, f"Could not parse data: {data!r}") return_value.append( pkg_smithing.error_pkg( pkg_id=pkg_id, code=err.code, msg=err.msg, data=err.data ) ) return_value.append( pkg_smithing.rpc_pkg_result( pkg_id=pkg_id, pkg_data=r_data ) ) if not return_value: return b'' return json.dumps( return_value if len(return_value) > 1 else return_value[-1] ).encode() def get_handle( self, data: dict, url_obj: Tuple[UrlObject, List[Parameters]], fast_send=False ) -> Union[dict, bool]: the_uuid = url_obj[0].uuid the_type = url_obj[0].type if url_obj[1] or not the_uuid: return self._search_obj(data=data, url_obj=url_obj) if the_type == "network": the_uuid = self.network_uuid if the_uuid not in self.objects.keys(): self.add_check(False, f"GET: {the_uuid} not found.") raise ErrorException( code=-32602, msg="UUID not found!", data=str(the_uuid) ) return self._obj_generate(obj_uuid=the_uuid) def post_handle( self, data: dict, url_obj: Tuple[UrlObject, List[Parameters]], fast_send=False ) -> Union[dict, bool]: parent_uuid = url_obj[0].parent the_type = url_obj[0].type the_uuid = uuid.UUID(data['meta']['id']) if the_type == "network": new_unit_uuid = self._update_object_from_dict( this_uuid=the_uuid, self_type=the_type, data=data, ) elif parent_uuid not in self.objects.keys(): self.add_check(False, f"POST: {parent_uuid}/{the_type} not found.") raise ErrorException( code=-32602, msg="UUID not found!", data=str(parent_uuid) ) elif parent_uuid is not None: new_unit_uuid = self._add_object_from_dict( parent_uuid=parent_uuid, self_type=the_type, data=data, ) if fast_send: return True return self._obj_generate(obj_uuid=new_unit_uuid) def put_handle( self, data: dict, url_obj: Tuple[UrlObject, List[Parameters]], fast_send=False ) -> Union[dict, bool]: the_uuid = url_obj[0].uuid if the_uuid not in self.objects.keys() or not the_uuid: self.add_check(False, f"GET: {the_uuid} not found.") raise ErrorException( code=-32602, msg="UUID not found!", data=str(the_uuid) ) self._update_object_from_dict( this_uuid=the_uuid, self_type=url_obj[0].type, data=data ) if fast_send: return True return self._obj_generate(obj_uuid=the_uuid) def delete_handle( self, data: dict, url_obj: Tuple[UrlObject, List[Parameters]], fast_send=False ) -> Union[dict, bool]: the_uuid = url_obj[0].uuid if the_uuid not in self.objects.keys() or not the_uuid: self.add_check(False, f"GET: {the_uuid} not found.") raise ErrorException( code=-32602, msg="UUID not found!", data=str(the_uuid) ) the_obj = self._obj_generate(obj_uuid=the_uuid) del self.objects[the_uuid] if fast_send:
<filename>CHEBYSHEV/TVB_Method/cheb_class.py import numpy as np import itertools from numpy.polynomial import chebyshev as cheb """ Module for defining the class of Chebyshev polynomials, as well as various related classes and methods, including: Classes: ------- Polynomial: Superclass for MultiPower and MultiCheb. Contains methods and attributes applicable to both subclasses MultiCheb: Chebyshev polynomials in arbitrary dimension. Term: Terms are just tuples of exponents with the degrevlex ordering Methods: -------- match_poly_dimensions(polys): Matches the dimensions of a list of polynomials. mon_combos_highest(mon, numLeft): Find all the monomials of a given degree and returns them. Works recursively. mon_combos(mon, numLeft): Finds all the monomials _up to_ a given degree and returns them. Works recursively. sort_polys_by_degree(polys): Sorts the polynomials by their degree. get_var_list(dim): Return a list of tuples corresponding to the variables [x_1, x_2, ..., x_n]. The tuple for x_1 is (1,0,0,...,0), and for x_i the 1 is in the ith slot. slice_bottom(arr):Gets the nd slices needed to slice an array into the bottom corner of another. There is probably a better (vectorized) way to do this. slice_top(arr): Construct a list of slices needed to put an array into the upper left corner of another. There is probably a better way to do this. match_size(a,b): Reshape two coefficient ndarrays to have the same shape. makePolyCoeffMatrix(inputString): Take a string input of a polynomaial and return the coefficient matrix for it. Usefull for making things of high degree of dimension so you don't have to make it by hand. """ class Polynomial(object): ''' Superclass for MultiPower and MultiCheb. Contains methods and attributes that are applicable to both subclasses. Attributes ---------- coeff The coefficient matrix represented in the object. dim The number of dimensions of the coefficient matrix order Ordering type given as a string shape The shape of the coefficient matrix lead_term The polynomial term with the largest total degree degree The total degree of the lead_term lead_coeff The coeff of the lead_term Parameters ---------- coeff : ndarray Coefficients of the polynomial order : string lead_term : Tuple Default is None. Accepts tuple or tuple-like inputs clean_zeros : bool Default is True. If True, all extra rows, columns, etc of all zeroes are removed from matrix of coefficients. Methods ------- clean_coeff Removes extra rows, columns, etc of zeroes from end of matrix of coefficients match_size Matches the shape of two matrices. monomialList Creates a list of monomials that make up the polynomial in degrevlex order. monSort Calls monomial list. update_lead_term Finds the lead_term of a polynomial __call__ Evaluates a polynomial at a certain point. __eq__ Checks if two polynomials are equal. __ne__ Checks if two polynomials are not equal. ''' def __init__(self, coeff, order='degrevlex', lead_term=None, clean_zeros=True): ''' order : string Term order to use for the polynomial. degrevlex is default. Currently no other order is implemented. ''' if isinstance(coeff,np.ndarray): self.coeff = coeff elif isinstance(coeff,str): self.coeff = makePolyCoeffMatrix(coeff) else: raise ValueError('coeff must be an np.array or a string!') if clean_zeros: self.clean_coeff() self.dim = self.coeff.ndim self.order = order self.jac = None self.shape = self.coeff.shape if lead_term is None: self.update_lead_term() else: self.lead_term = tuple(lead_term) self.degree = sum(self.lead_term) self.lead_coeff = self.coeff[self.lead_term] def clean_coeff(self): """ Remove 0s on the outside of the coeff matrix. Acts in place. """ for axis in range(self.coeff.ndim): change = True while change: change = False if self.coeff.shape[axis] == 1: continue axisCount = 0 slices = list() for i in self.coeff.shape: if axisCount == axis: s = slice(i-1,i) else: s = slice(0,i) slices.append(s) axisCount += 1 if np.sum(abs(self.coeff[slices])) == 0: self.coeff = np.delete(self.coeff,-1,axis=axis) change = True def update_lead_term(self): """ Update the lead term of the polynomial. """ non_zeros = list() for i in zip(*np.where(self.coeff != 0)): non_zeros.append(Term(i)) if len(non_zeros) != 0: self.lead_term = max(non_zeros).val self.degree = sum(self.lead_term) self.lead_coeff = self.coeff[self.lead_term] else: self.lead_term = None self.lead_coeff = 0 self.degree = -1 def __call__(self, point): ''' Evaluate the polynomial at 'point'. This method is overridden by the MultiPower and MultiCheb classes, so this definition only checks if the polynomial can be evaluated at the given point. Parameters ---------- point : array-like the point at which to evaluate the polynomial Returns ------- __call__ : complex value of the polynomial at the given point ''' if len(point) != len(self.coeff.shape): raise ValueError('Cannot evaluate polynomial in {} variables at point {}'\ .format(self.dim, point)) def grad(self, point): ''' Evaluates the gradient of the polynomial at 'point'. This method is overridden by the MultiPower and MultiCheb classes, so this definition only checks if the polynomial can be evaluated at the given point. Parameters ---------- point : array-like the point at which to evaluate the polynomial Returns ------- grad : ndarray Gradient of the polynomial at the given point. ''' if len(point) != len(self.coeff.shape): raise ValueError('Cannot evaluate polynomial in {} variables at point {}'\ .format(self.dim, point)) def __eq__(self,other): ''' Check if coeff matrices of 'self' and 'other' are the same. ''' if self.shape != other.shape: return False return np.allclose(self.coeff, other.coeff) def __ne__(self,other): ''' Check if coeff matrices of 'self' and 'other' are not the same. ''' return not (self == other) ############################################################################### #### MULTI_CHEB ############################################################### class MultiCheb(Polynomial): """ A Chebyshev polynomial. Attributes ---------- coeff: ndarray A tensor of coefficients whose i_1,...,i_{dim} entry corresponds to the coefficient of the term T_{i_1}(x_1)...T_{i_{dim}}(x_{dim}) dim: The number of variables, dimension of polynomial. order: string Term order shape: tuple of ints The shape of the coefficient array. lead_term: The term with the largest total degree. degree: int The total degree of lead_term. lead_coeff The coefficient of the lead_term. terms : int Highest term of single-variable polynomials. The polynomial has degree at most terms+1 in each variable. Parameters ---------- coeff : list(terms**dim) or np.array ([terms,] * dim) coefficents in given ordering. order : string Term order for Groebner calculations. Default = 'degrevlex' lead_term : list The index of the current leading coefficent. If None, this is computed at initialization. clean_zeros: boolean If True, strip off any rows or columns of zeros on the outside of the coefficient array. Methods ------- __add__ Add two MultiCheb polynomials. __sub__ Subtract two MultiCheb polynomials. mon_mult Multiply a MultiCheb monomial by a MultiCheb polynomial. __call__ Evaluate a MultiCheb polynomial at a point. """ def __init__(self, coeff, order='degrevlex', lead_term=None, clean_zeros = True): super(MultiCheb, self).__init__(coeff, order, lead_term, clean_zeros) def __add__(self,other): ''' Addition of two MultiCheb polynomials. Parameters ---------- other : MultiCheb Returns ------- MultiCheb The sum of the coeff of self and coeff of other. ''' if self.shape != other.shape: new_self, new_other = match_size(self.coeff,other.coeff) else: new_self, new_other = self.coeff, other.coeff return MultiCheb(new_self + new_other,clean_zeros = False) def __sub__(self,other): ''' Subtraction of two MultiCheb polynomials. Parameters ---------- other : MultiCheb Returns ------- MultiCheb The coeff values are the result of self.coeff - other.coeff. ''' if self.shape != other.shape: new_self, new_other = match_size(self.coeff,other.coeff) else: new_self, new_other = self.coeff, other.coeff return MultiCheb((new_self - (new_other)), clean_zeros = False) def _fold_in_i_dir(coeff_array, dim, fdim, size_in_fdim, fold_idx): """Find coeffs corresponding to T_|m-n| (referred to as 'folding' in some of this documentation) when multiplying a monomial times a Chebyshev polynomial. Multiplying the monomial T_m(x_i) times T_n(x_i) gives (T_{m+n}(x_i) + T_{|n-m|}(x_i))/2 So multipying T_m(x_i) times polynomial P with coefficients in coeff_array results in a new coefficient array sol that has coeff_array in the bottom right corner plus a 'folded' copy of coeff_array in locations corresponding to |n-m|. This method returns the folded part (not dividing by 2) Parameters ---------- coeff_array : ndarray coefficients of the polynomial. dim : int The number of dimensions in coeff_array. fdim : int The dimension being folded ('i' in the explanation above) size_in_fdim : int The size of the solution matrix in the dimension being folded. fold_idx : int The index to fold around ('m' in the explanation above) Returns ------- sol : ndarray """ if fold_idx == 0: return coeff_array
<reponame>fredstro/sage r""" Listing Sage packages This module can be used to see which Sage packages are installed and which packages are available for installation. For more information about creating Sage packages, see the "Packaging Third-Party Code" section of the Sage Developer's Guide. Actually installing the packages should be done via the command line, using the following commands: - ``sage -i PACKAGE_NAME`` -- install the given package - ``sage -f PACKAGE_NAME`` -- re-install the given package, even if it was already installed Packages available ------------------ **Standard packages:** {STANDARD_PACKAGES} **Optional packages:** {OPTIONAL_PACKAGES} **Experimental packages:** {EXPERIMENTAL_PACKAGES} Functions --------- """ #***************************************************************************** # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # http://www.gnu.org/licenses/ #***************************************************************************** def _list_to_table(list_of_packages): r""" Helper function returning a ReST table from a list of strings. The entries are sorted vertically. INPUT: - ``list_of_packages`` -- a list EXAMPLE:: sage: print sage.misc.package._list_to_table([str(x) for x in range(10)]) .. csv-table:: :class: contentstable :widths: 20, 20, 20, 20, 20 :delim: | <BLANKLINE> ``0`` | ``2`` | ``4`` | ``6`` | ``8`` ``1`` | ``3`` | ``5`` | ``7`` | ``9`` <BLANKLINE> Check that the local list of packages matches the online list. Standard packages:: sage: from sage.misc.package import _STANDARD_PACKAGES, standard_packages sage: a,b = standard_packages() # optional internet sage: set(a+b).symmetric_difference(_STANDARD_PACKAGES) # optional internet set() Optional packages:: sage: from sage.misc.package import _OPTIONAL_PACKAGES, optional_packages sage: a,b = optional_packages() # optional internet sage: set(a+b).symmetric_difference(_OPTIONAL_PACKAGES) # optional internet set() Experimental packages:: sage: from sage.misc.package import _EXPERIMENTAL_PACKAGES, experimental_packages sage: a,b = experimental_packages() # optional internet sage: set(a+b).symmetric_difference(_EXPERIMENTAL_PACKAGES) # optional internet set() """ from string import join s = (".. csv-table::\n" " :class: contentstable\n" " :widths: 20, 20, 20, 20, 20\n" " :delim: |\n\n") length = len(list_of_packages) width = 5 height = (length+width-1)//width list_of_packages = sorted(["``"+p+"``" if p else p for p in list_of_packages]) list_of_packages.sort() list_of_packages.extend(['']*width) for l in range(height): s += " "+join(list_of_packages[l::height][:width], ' | ')+"\n" return s _STANDARD_PACKAGES = ['atlas', 'backports_ssl_match_hostname', 'boehm_gc', 'boost_cropped', 'bzip2', 'cddlib', 'cephes', 'certifi', 'cliquer', 'combinatorial_designs', 'conway_polynomials', 'cvxopt', 'cython', 'dateutil', 'docutils', 'ecl', 'eclib', 'ecm', 'elliptic_curves', 'fflas_ffpack', 'flint', 'flintqs', 'freetype', 'gap', 'gd', 'gdmodule', 'genus2reduction', 'gf2x', 'gfan', 'git', 'givaro', 'glpk', 'graphs', 'gsl', 'iconv', 'iml', 'ipython', 'jinja2', 'jmol', 'jsonschema', 'lcalc', 'libfplll', 'libgap', 'libgd', 'libpng', 'linbox', 'lrcalc', 'm4ri', 'm4rie', 'markupsafe', 'mathjax', 'matplotlib', 'maxima', 'mercurial', 'mistune', 'mpc', 'mpfi', 'mpfr', 'mpmath', 'ncurses', 'networkx', 'ntl', 'numpy', 'palp', 'pari', 'pari_galdata', 'pari_seadata_small', 'patch', 'pexpect', 'pil', 'pillow', 'pip', 'pkgconf', 'pkgconfig', 'planarity', 'polybori', 'polytopes_db', 'ppl', 'pycrypto', 'pygments', 'pynac', 'pyparsing', 'python', 'pyzmq', 'r', 'ratpoints', 'readline', 'rpy2', 'rubiks', 'rw', 'sage', 'sage_root', 'sage_scripts', 'sagenb', 'sagetex', 'scipy', 'setuptools', 'singular', 'six', 'sphinx', 'sqlalchemy', 'sqlite', 'symmetrica', 'sympow', 'sympy', 'tachyon', 'tornado', 'zeromq', 'zlib', 'zn_poly'] _OPTIONAL_PACKAGES = ['PyQt_x11', 'TOPCOM', 'arb', 'beautifulsoup', 'benzene', 'biopython', 'bliss', 'brian', 'buckygen', 'cbc', 'ccache', 'chomp', 'cluster_seed', 'coxeter3', 'cryptominisat', 'cunningham_tables', 'd3js', 'database_cremona_ellcurve', 'database_gap', 'database_jones_numfield', 'database_kohel', 'database_odlyzko_zeta', 'database_pari', 'database_stein_watkins', 'database_stein_watkins_mini', 'database_symbolic_data', 'dot2tex', 'extra_docs', 'gambit', 'gap_packages', 'gcc', 'gdb', 'giac', 'giacpy', 'ginv', 'git_trac', 'gmp', 'gnuplotpy', 'guppy', 'java3d', 'kash3', 'knoboo', 'libogg', 'libtheora', 'lidia', 'lie', 'lrslib', 'mcqd', 'modular_decomposition', 'mpi4py', 'mpir', 'nauty', 'normaliz', 'nose', 'nzmath', 'openmpi', 'openssl', 'ore_algebra', 'p_group_cohomology', 'phc', 'plantri', 'pybtex', 'python2', 'python3', 'pyx', 'qhull', 'sage_mode', 'scons', 'sip', 'termcap', 'threejs', 'tides', 'topcom', 'trac'] _EXPERIMENTAL_PACKAGES = ['4ti2', 'PyQt4', 'PyVTK', 'QScintilla2', 'asymptote', 'autotools', 'bison', 'cadabra', 'clapack', 'clisp', 'cmake', 'compilerwrapper', 'csdp', 'dvipng', 'ets', 'fes', 'flex', 'fricas', 'gnofract4d', 'gnuplot', 'graphviz', 'latte_int', 'libcprops', 'libjpeg', 'libsigsegv', 'macaulay2', 'mayavi', 'meataxe', 'modglue', 'mpich2', 'numarray', 'numeric', 'openopt', 'pcre', 'phcpack', 'polymake', 'processing', 'pygame', 'pygsl', 'pygtk', 'pynifti', 'pyqt', 'pyrexembed', 'qasm', 'qepcad', 'quantlib', 'quantlib_swig', 'reallib3_linux', 'sandpile', 'scitools++', 'semigroupe', 'simpqs', 'sip', 'soya', 'soya_cvs', 'superlu', 'surf', 'valgrind', 'vtk_meta', 'wxPython', 'yafray', 'yassl'] __doc__ = __doc__.format(STANDARD_PACKAGES =_list_to_table(_STANDARD_PACKAGES), OPTIONAL_PACKAGES =_list_to_table(_OPTIONAL_PACKAGES), EXPERIMENTAL_PACKAGES =_list_to_table(_EXPERIMENTAL_PACKAGES)) import os __installed_packages = None def install_package(package=None, force=None): """ This function is obsolete. Run ``sage -i PKGNAME`` from a shell to install a package. Use the function :func:`installed_packages` to list all installed packages. TESTS:: sage: install_package() doctest:...: DeprecationWarning: use installed_packages() to list all installed packages See http://trac.sagemath.org/16759 for details. [...'arb...'python...] sage: install_package("autotools") Traceback (most recent call last): ... NotImplementedError: installing Sage packages using 'install_package()' is obsolete. Run 'sage -i autotools' from a shell prompt instead """ if package is not None: # deprecation(16759, ...) raise NotImplementedError("installing Sage packages using 'install_package()' is obsolete.\nRun 'sage -i {}' from a shell prompt instead".format(package)) from sage.misc.superseded import deprecation deprecation(16759, "use installed_packages() to list all installed packages") return installed_packages() def installed_packages(): """ Return a list of all installed packages, with version numbers. EXAMPLES:: sage: installed_packages() [...'arb...'python...] .. seealso:: :func:`standard_packages`, :func:`optional_packages`, :func:`experimental_packages` """ from sage.env import SAGE_SPKG_INST return sorted(os.listdir(SAGE_SPKG_INST)) def is_package_installed(package): """ Return true if ``package`` is installed. EXAMPLES:: sage: is_package_installed('pari') True Giving just the beginning of the package name is not good enough:: sage: is_package_installed('matplotli') False Otherwise, installing "pillow" will cause this function to think that "pil" is installed, for example. """ return any(p.split('-')[0] == package for p in installed_packages()) def package_versions(package_type, local=False): r""" Return version information for each Sage package. INPUT: - ``package_type`` (string) -- one of `"standard"`, `"optional"` or `"experimental"` - ``local`` (boolean) -- only query local data (no internet needed) For packages of the given type, return a dictionary whose entries are of the form ``'package': (installed, latest)``, where ``installed`` is the installed version (or ``None`` if not installed) and ``latest`` is the latest available version. If the package has a directory in ``SAGE_ROOT/build/pkgs/``, then ``latest`` is determined by the file ``package-version.txt`` in that directory. If ``local`` is False, then Sage's servers are queried for package information. EXAMPLES:: sage: std = package_versions('standard', local=True) sage: 'gap' in std True sage: std['zn_poly'] ('0.9.p11', '0.9.p11') """ if package_type not in ['standard','optional','experimental']: raise ValueError("'package_type' must be one of 'standard','optional','experimental'.") cmd = 'sage-list-packages {} --dump'.format(package_type) if local: cmd += " --local" X = os.popen(cmd).read().split('\n')[:-1] versions = {} for line in X: line = line.split(' ') installed = line[2] if installed == 'not_installed': installed = None versions[line[0]] = (installed, line[1]) return versions def _package_lists_from_sage_output(package_type, local=False): r""" Helper function for :func:`standard_packages`, :func:`optional_packages` and :func:`experimental_packages`. INPUT: - ``package_type`` (string) -- one of `"standard"`, `"optional"` or `"experimental"` - ``local`` (boolean) -- only query local data (no internet needed) OUTPUT: The function returns a pair of lists ``(installed,not_installed)`` with the corresponding packages' name, sorted alphabetically. If ``local`` is ``True``, then the list of all packages is downloaded from the server; otherwise, the list is extracted from the packages in ``SAGE_ROOT/build/pkgs/` EXAMPLE:: sage: from sage.misc.package import standard_packages sage: installed, not_installed = standard_packages() # optional internet Local check that all standard packages are installed:: sage: from sage.misc.package import _package_lists_from_sage_output sage: installed, not_installed = _package_lists_from_sage_output('standard', local=True) sage: 'pari' in installed True """ installed = [] not_installed = [] versions = package_versions(package_type, local) for p in versions: if versions[p][0] is None: not_installed.append(p) else: installed.append(p) return sorted(installed), sorted(not_installed) def standard_packages(): """ Return two lists. The first contains the installed and the second contains the not-installed standard packages that are available from the Sage repository. You must have an internet connection. OUTPUT: - installed standard packages (as a list) - NOT installed standard packages (as a list) Run ``sage -i package_name`` from a shell to install a given package or ``sage -f package_name`` to re-install it. EXAMPLE:: sage: from sage.misc.package import standard_packages sage: installed, not_installed = standard_packages() # optional internet sage: installed[0], installed[-1] # optional internet ('atlas', 'zn_poly') sage: 'mercurial' in not_installed # optional internet True """ return _package_lists_from_sage_output('standard') def optional_packages(): """ Return two lists. The first contains the installed and the second contains the not-installed optional packages that are available from the Sage repository. You must have an internet connection. OUTPUT: - installed optional packages (as a list) - NOT installed optional packages (as a list) Run ``sage -i package_name`` from a shell to install a given package or ``sage -f package_name`` to re-install it. EXAMPLE:: sage: from sage.misc.package import optional_packages sage: installed, not_installed = optional_packages() # optional internet sage: min(installed+not_installed) # optional internet 'PyQt_x11' sage: max(installed+not_installed) # optional internet 'trac' """ return _package_lists_from_sage_output('optional') def experimental_packages(): """ Return two lists. The first contains the installed and the second contains the not-installed experimental packages that are available from the