code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def add_deploy(state, deploy_func, *args, **kwargs): frameinfo = get_caller_frameinfo() kwargs['frameinfo'] = frameinfo for host in state.inventory: deploy_func(state, host, *args, **kwargs)
Prepare & add an deploy to pyinfra.state by executing it on all hosts. Args: state (``pyinfra.api.State`` obj): the deploy state to add the operation deploy_func (function): the operation function from one of the modules, ie ``server.user`` args/kwargs: passed to the operation function
codesearchnet
def to_geotiff(arr, path='./output.tif', proj=None, spec=None, bands=None, **kwargs): assert has_rasterio, "To create geotiff images please install rasterio" try: img_md = arr.rda.metadata["image"] x_size = img_md["tileXSize"] y_size = img_md["tileYSize"] except (AttributeError, KeyError): x_size = kwargs.get("chunk_size", 256) y_size = kwargs.get("chunk_size", 256) try: tfm = kwargs['transform'] if 'transform' in kwargs else arr.affine except: tfm = None dtype = arr.dtype.name if arr.dtype.name != 'int8' else 'uint8' if spec is not None and spec.lower() == 'rgb': if bands is None: bands = arr._rgb_bands if not arr.options.get('dra'): from gbdxtools.rda.interface import RDA rda = RDA() dra = rda.HistogramDRA(arr) arr = dra.aoi(bbox=arr.bounds) arr = arr[bands,...].astype(np.uint8) dtype = 'uint8' else: if bands is not None: arr = arr[bands,...] meta = { 'width': arr.shape[2], 'height': arr.shape[1], 'count': arr.shape[0], 'dtype': dtype, 'driver': 'GTiff', 'transform': tfm } if proj is not None: meta["crs"] = {'init': proj} if "tiled" in kwargs and kwargs["tiled"]: meta.update(blockxsize=x_size, blockysize=y_size, tiled="yes") with rasterio.open(path, "w", **meta) as dst: writer = rio_writer(dst) result = store(arr, writer, compute=False) result.compute(scheduler=threaded_get) return path
Write out a geotiff file of the image Args: path (str): path to write the geotiff file to, default is ./output.tif proj (str): EPSG string of projection to reproject to spec (str): if set to 'rgb', write out color-balanced 8-bit RGB tif bands (list): list of bands to export. If spec='rgb' will default to RGB bands Returns: str: path the geotiff was written to
juraj-google-style
def AddTask(self, target, args=(), name='Unnamed task', blocking=True, inline=True): if (not self.started): raise ThreadPoolNotStartedError(self.name) if (self.max_threads == 0): target(*args) return if inline: blocking = False with self.lock: while True: if (len(self) < self.max_threads): try: self._AddWorker() except (RuntimeError, threading.ThreadError) as e: logging.error('Threadpool exception: Could not spawn worker threads: %s', e) try: self._queue.put((target, args, name, time.time()), block=False) return except queue.Full: if (len(self) < self.max_threads): try: self._AddWorker() continue except (RuntimeError, threading.ThreadError) as e: logging.error('Threadpool exception: Could not spawn worker threads: %s', e) if inline: break elif blocking: try: self._queue.put((target, args, name, time.time()), block=True, timeout=1) return except queue.Full: continue else: raise Full() if inline: target(*args)
Adds a task to be processed later. Args: target: A callable which should be processed by one of the workers. args: A tuple of arguments to target. name: The name of this task. Used to identify tasks in the log. blocking: If True we block until the task is finished, otherwise we raise queue.Full inline: If set, process the task inline when the queue is full. This implies no blocking. Specifying inline helps if the worker tasks are blocked because it still ensures some progress is made. However, this can generally block the calling thread even after the threadpool is available again and therefore decrease efficiency. Raises: ThreadPoolNotStartedError: if the pool was not started yet. queue.Full: if the pool is full and can not accept new jobs.
codesearchnet
def elmo_loss2ppl(losses: List[np.ndarray]) -> float: avg_loss = np.mean(losses) return float(np.exp(avg_loss))
Calculates perplexity by loss Args: losses: list of numpy arrays of model losses Returns: perplexity : float
codesearchnet
def partial_declaration_path(decl): if not decl: return [] if not decl.cache.partial_declaration_path: result = [decl.partial_name] parent = decl.parent while parent: if parent.cache.partial_declaration_path: result.reverse() decl.cache.partial_declaration_path \ = parent.cache.partial_declaration_path + result return decl.cache.partial_declaration_path else: result.append(parent.partial_name) parent = parent.parent result.reverse() decl.cache.partial_declaration_path = result return result return decl.cache.partial_declaration_path
Returns a list of parent declarations names without template arguments that have default value. Args: decl (declaration_t): declaration for which the partial declaration path should be calculated. Returns: list[(str | basestring)]: list of names, where first item is the top parent name and last item the inputted declaration name.
juraj-google-style
def __init__(self, bundle_context_manager: execution.BundleContextManager, progress_frequency: Optional[float]=None, cache_token_generator=FnApiRunner.get_cache_token_generator(), split_managers=()) -> None: self.bundle_context_manager: execution.BundleContextManager = bundle_context_manager self._progress_frequency = progress_frequency self._worker_handler: Optional[WorkerHandler] = None self._cache_token_generator = cache_token_generator self.split_managers = split_managers
Set up a bundle manager. Args: progress_frequency
github-repos
def _HasId(self, schedule, entity_id): try: self._GetById(schedule, entity_id) has = True except KeyError: has = False return has
Check if the schedule has an entity with the given id. Args: schedule: The transitfeed.Schedule instance to look in. entity_id: The id of the entity. Returns: True if the schedule has an entity with the id or False if not.
juraj-google-style
def analyze_directory(self, directory: Path, identifier: Union[str, None]=None, ignore_files: Union[list[str], None]=None, n_identifier: Union[str, list[str], None]=None, only_modules: bool=True): files = [file for file in os.listdir(directory) if os.path.isfile(os.path.join(directory, file))] if identifier is not None: files = [file for file in files if identifier in file] if n_identifier is not None: if isinstance(n_identifier, list): for n_ in n_identifier: files = [file for file in files if n_ not in file] else: files = [file for file in files if n_identifier not in file] ignore_files = ignore_files or [] ignore_files.append('__init__.py') files = [file for file in files if file not in ignore_files] for file in files: print('Testing', file) if only_modules: module_identifier = file.split('.')[0] try: module_identifier = getattr(transformers, module_identifier) suite = doctest.DocTestSuite(module_identifier) result = unittest.TextTestRunner().run(suite) self.assertIs(len(result.failures), 0) except AttributeError: logger.info(f'{module_identifier} is not a module.') else: result = doctest.testfile(str('..' / directory / file), optionflags=doctest.ELLIPSIS) self.assertIs(result.failed, 0)
Runs through the specific directory, looking for the files identified with `identifier`. Executes the doctests in those files Args: directory (`Path`): Directory containing the files identifier (`str`): Will parse files containing this ignore_files (`List[str]`): List of files to skip n_identifier (`str` or `List[str]`): Will not parse files containing this/these identifiers. only_modules (`bool`): Whether to only analyze modules
github-repos
def verify_profile_name(msg, cfg): if msg.profile not in cfg.data: raise UnknownProfileError(msg.profile)
Verifies the profile name exists in the config.json file. Args: :msg: (Message class) an instance of a message class. :cfg: (jsonconfig.Config) config instance.
juraj-google-style
def launch(self, image, command, **kwargs): if isinstance(command, PythonCall): return PythonJob(self, image, command, **kwargs) else: return Job(self, image, command, **kwargs)
Create a job on this engine Args: image (str): name of the docker image to launch command (str): shell command to run
codesearchnet
def diagonal_gaussian_posterior_builder(getter, name, shape=None, *args, **kwargs): parameter_shapes = tfp.distributions.Normal.param_static_shapes(shape) loc_var = getter((name + '/posterior_loc'), *args, shape=parameter_shapes['loc'], **kwargs) scale_var = getter((name + '/posterior_scale'), *args, shape=parameter_shapes['scale'], **kwargs) posterior = tfp.distributions.Normal(loc=loc_var, scale=tf.nn.softplus(scale_var), name='{}_posterior_dist'.format(name)) return posterior
A pre-canned builder for diagonal gaussian posterior distributions. Given a true `getter` function and arguments forwarded from `tf.get_variable`, return a distribution object for a diagonal posterior over a variable of the requisite shape. Args: getter: The `getter` passed to a `custom_getter`. Please see the documentation for `tf.get_variable`. name: The `name` argument passed to `tf.get_variable`. shape: The `shape` argument passed to `tf.get_variable`. *args: See positional arguments passed to `tf.get_variable`. **kwargs: See keyword arguments passed to `tf.get_variable`. Returns: An instance of `tfp.distributions.Normal` representing the posterior distribution over the variable in question.
codesearchnet
def replace_keywords(self, sentence): if (not sentence): return sentence new_sentence = [] orig_sentence = sentence if (not self.case_sensitive): sentence = sentence.lower() current_word = '' current_dict = self.keyword_trie_dict current_white_space = '' sequence_end_pos = 0 idx = 0 sentence_len = len(sentence) while (idx < sentence_len): char = sentence[idx] current_word += orig_sentence[idx] if (char not in self.non_word_boundaries): current_white_space = char if ((self._keyword in current_dict) or (char in current_dict)): sequence_found = None longest_sequence_found = None is_longer_seq_found = False if (self._keyword in current_dict): sequence_found = current_dict[self._keyword] longest_sequence_found = current_dict[self._keyword] sequence_end_pos = idx if (char in current_dict): current_dict_continued = current_dict[char] current_word_continued = current_word idy = (idx + 1) while (idy < sentence_len): inner_char = sentence[idy] current_word_continued += orig_sentence[idy] if ((inner_char not in self.non_word_boundaries) and (self._keyword in current_dict_continued)): current_white_space = inner_char longest_sequence_found = current_dict_continued[self._keyword] sequence_end_pos = idy is_longer_seq_found = True if (inner_char in current_dict_continued): current_dict_continued = current_dict_continued[inner_char] else: break idy += 1 else: if (self._keyword in current_dict_continued): current_white_space = '' longest_sequence_found = current_dict_continued[self._keyword] sequence_end_pos = idy is_longer_seq_found = True if is_longer_seq_found: idx = sequence_end_pos current_word = current_word_continued current_dict = self.keyword_trie_dict if longest_sequence_found: new_sentence.append((longest_sequence_found + current_white_space)) current_word = '' current_white_space = '' else: new_sentence.append(current_word) current_word = '' current_white_space = '' else: current_dict = self.keyword_trie_dict new_sentence.append(current_word) current_word = '' current_white_space = '' elif (char in current_dict): current_dict = current_dict[char] else: current_dict = self.keyword_trie_dict idy = (idx + 1) while (idy < sentence_len): char = sentence[idy] current_word += orig_sentence[idy] if (char not in self.non_word_boundaries): break idy += 1 idx = idy new_sentence.append(current_word) current_word = '' current_white_space = '' if ((idx + 1) >= sentence_len): if (self._keyword in current_dict): sequence_found = current_dict[self._keyword] new_sentence.append(sequence_found) else: new_sentence.append(current_word) idx += 1 return ''.join(new_sentence)
Searches in the string for all keywords present in corpus. Keywords present are replaced by the clean name and a new string is returned. Args: sentence (str): Line of text where we will replace keywords Returns: new_sentence (str): Line of text with replaced keywords Examples: >>> from flashtext import KeywordProcessor >>> keyword_processor = KeywordProcessor() >>> keyword_processor.add_keyword('Big Apple', 'New York') >>> keyword_processor.add_keyword('Bay Area') >>> new_sentence = keyword_processor.replace_keywords('I love Big Apple and bay area.') >>> new_sentence >>> 'I love New York and Bay Area.'
codesearchnet
def flowread(flow_or_path, quantize=False, concat_axis=0, *args, **kwargs): if isinstance(flow_or_path, np.ndarray): if ((flow_or_path.ndim != 3) or (flow_or_path.shape[(- 1)] != 2)): raise ValueError('Invalid flow with shape {}'.format(flow_or_path.shape)) return flow_or_path elif (not is_str(flow_or_path)): raise TypeError('"flow_or_path" must be a filename or numpy array, not {}'.format(type(flow_or_path))) if (not quantize): with open(flow_or_path, 'rb') as f: try: header = f.read(4).decode('utf-8') except Exception: raise IOError('Invalid flow file: {}'.format(flow_or_path)) else: if (header != 'PIEH'): raise IOError('Invalid flow file: {}, header does not contain PIEH'.format(flow_or_path)) w = np.fromfile(f, np.int32, 1).squeeze() h = np.fromfile(f, np.int32, 1).squeeze() flow = np.fromfile(f, np.float32, ((w * h) * 2)).reshape((h, w, 2)) else: assert (concat_axis in [0, 1]) cat_flow = imread(flow_or_path, flag='unchanged') if (cat_flow.ndim != 2): raise IOError('{} is not a valid quantized flow file, its dimension is {}.'.format(flow_or_path, cat_flow.ndim)) assert ((cat_flow.shape[concat_axis] % 2) == 0) (dx, dy) = np.split(cat_flow, 2, axis=concat_axis) flow = dequantize_flow(dx, dy, *args, **kwargs) return flow.astype(np.float32)
Read an optical flow map. Args: flow_or_path (ndarray or str): A flow map or filepath. quantize (bool): whether to read quantized pair, if set to True, remaining args will be passed to :func:`dequantize_flow`. concat_axis (int): The axis that dx and dy are concatenated, can be either 0 or 1. Ignored if quantize is False. Returns: ndarray: Optical flow represented as a (h, w, 2) numpy array
codesearchnet
def qhull_cmd(cmd, options, points): prep_str = [str(len(points[0])), str(len(points))] prep_str.extend([' '.join(map(repr, row)) for row in points]) output = getattr(hull, cmd)(options, '\n'.join(prep_str)) return list(map(str.strip, output.strip().split('\n')))
Generalized helper method to perform a qhull based command. Args: cmd: Command to perform. Supported commands are qconvex, qdelaunay and qvoronoi. options: Options to be provided for qhull command. See specific methods for info on supported options. Up to two options separated by spaces are supported. points: Sequence of points as input to qhull command. Returns: Output as a list of strings. E.g., ['4', '0 2', '1 0', '2 3 ', '3 1']
codesearchnet
def from_file_msg(cls, fp): log.debug("Parsing email from file Outlook") f, _ = msgconvert(fp) return cls.from_file(f, True)
Init a new object from a Outlook message file, mime type: application/vnd.ms-outlook Args: fp (string): file path of raw Outlook email Returns: Instance of MailParser
juraj-google-style
def from_dense(tensor, name=None): with ops.name_scope(name, 'dense_to_sparse'): tensor = ops.convert_to_tensor(tensor) indices = array_ops.where_v2(math_ops.not_equal(tensor, array_ops.zeros_like(tensor))) values = array_ops.gather_nd(tensor, indices) shape = array_ops.shape(tensor, out_type=dtypes.int64) return sparse_tensor.SparseTensor(indices, values, shape)
Converts a dense tensor into a sparse tensor. Only elements not equal to zero will be present in the result. The resulting `SparseTensor` has the same dtype and shape as the input. >>> sp = tf.sparse.from_dense([0, 0, 3, 0, 1]) >>> sp.shape.as_list() [5] >>> sp.values.numpy() array([3, 1], dtype=int32) >>> sp.indices.numpy() array([[2], [4]]) Args: tensor: A dense `Tensor` to be converted to a `SparseTensor`. name: Optional name for the op. Returns: The `SparseTensor`.
github-repos
def download_apcor(self, uri): local_file = os.path.basename(uri) if os.access(local_file, os.F_OK): fobj = open(local_file) else: fobj = storage.vofile(uri, view='data') fobj.seek(0) str = fobj.read() fobj.close() apcor_str = str return ApcorData.from_string(apcor_str)
Downloads apcor data. Args: uri: The URI of the apcor data file. Returns: apcor: ossos.downloads.core.ApcorData
juraj-google-style
def load(self, txt_fst_filename): with open(txt_fst_filename, 'r') as txt_fst: for line in txt_fst: line = line.strip() splitted_line = line.split() if len(splitted_line) == 1: self[int(splitted_line[0])].final = True else: self.add_arc(int(splitted_line[0]), int( splitted_line[1]), splitted_line[2].decode('hex'))
Save the transducer in the text file format of OpenFST. The format is specified as follows: arc format: src dest ilabel olabel [weight] final state format: state [weight] lines may occur in any order except initial state must be first line Args: txt_fst_filename (string): The name of the file Returns: None
juraj-google-style
def intersects(self, other): try: return (self.min_x <= other.max_x and self.max_x >= other.min_x and self.min_y <= other.max_y and self.max_y >= other.min_y) except AttributeError: return self.intersects(Envelope(other))
Returns true if this envelope intersects another. Arguments: other -- Envelope or tuple of (minX, minY, maxX, maxY)
juraj-google-style
def __delitem__(self, anchor_id): try: self._anchor_path(anchor_id).unlink() except OSError: raise KeyError('No anchor with id {}'.format(anchor_id))
Remove an anchor from storage. Args: anchor_id: The ID of the anchor to remove. Raises: KeyError: There is no anchor with that ID.
juraj-google-style
def get_vm(access_token, subscription_id, resource_group, vm_name): endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Compute/virtualMachines/', vm_name, '?api-version=', COMP_API]) return do_get(endpoint, access_token)
Get virtual machine details. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. vm_name (str): Name of the virtual machine. Returns: HTTP response. JSON body of VM properties.
codesearchnet
async def update_notifications(self, on_match_open: bool=None, on_tournament_end: bool=None): params = {} if (on_match_open is not None): params['notify_users_when_matches_open'] = on_match_open if (on_tournament_end is not None): params['notify_users_when_the_tournament_ends'] = on_tournament_end assert_or_raise((len(params) > 0), ValueError, 'At least one of the notifications must be given') (await self.update(**params))
update participants notifications for this tournament |methcoro| Args: on_match_open: Email registered Challonge participants when matches open up for them on_tournament_end: Email registered Challonge participants the results when this tournament ends Raises: APIException
codesearchnet
def _compile_output_step(outputs): if (not outputs): raise GraphQLCompilationError(u'No fields were selected for output! Please mark at least one field with the @output directive.') output_fields = {} for (output_name, output_context) in six.iteritems(outputs): location = output_context['location'] optional = output_context['optional'] graphql_type = output_context['type'] expression = None existence_check = None if isinstance(location, FoldScopeLocation): if optional: raise AssertionError(u'Unreachable state reached, optional in fold: {}'.format(output_context)) if (location.field == COUNT_META_FIELD_NAME): expression = expressions.FoldCountContextField(location) else: expression = expressions.FoldedContextField(location, graphql_type) else: expression = expressions.OutputContextField(location, graphql_type) if optional: existence_check = expressions.ContextFieldExistence(location.at_vertex()) if existence_check: expression = expressions.TernaryConditional(existence_check, expression, expressions.NullLiteral) output_fields[output_name] = expression return blocks.ConstructResult(output_fields)
Construct the final ConstructResult basic block that defines the output format of the query. Args: outputs: dict, output name (string) -> output data dict, specifying the location from where to get the data, and whether the data is optional (and therefore may be missing); missing optional data is replaced with 'null' Returns: a ConstructResult basic block that constructs appropriate outputs for the query
codesearchnet
def add_task(self, tile_address, coroutine): self._loop.call_soon_threadsafe(self._add_task, tile_address, coroutine)
Add a task into the event loop. This is the main entry point for registering background tasks that are associated with a tile. The tasks are added to the EmulationLoop and the tile they are a part of is recorded. When the tile is reset, all of its background tasks are canceled as part of the reset process. If you have a task that should not be associated with any tile, you may pass `None` for tile_address and the task will not be cancelled when any tile is reset. Args: tile_address (int): The address of the tile running the task. coroutine (coroutine): A coroutine that will be added to the event loop.
codesearchnet
def check_function_argument_count(func, input_arity, infeed_queue): def format_error(complaint, quantity): return '%s %d argument%s' % (complaint, quantity, '' if quantity == 1 else 's') num_args_supplied = input_arity if infeed_queue is not None: num_args_supplied += infeed_queue.number_of_tuple_elements arg_spec = tf_inspect.getargspec(func) num_func_args = len(arg_spec.args) if arg_spec.defaults is None: num_func_defaults = 0 else: num_func_defaults = len(arg_spec.defaults) min_func_args = num_func_args - num_func_defaults if num_args_supplied < min_func_args: if num_func_defaults == 0 and arg_spec.varargs is None: return format_error('exactly', num_func_args) else: return format_error('at least', min_func_args) if arg_spec.varargs is None and num_args_supplied > num_func_args: if num_func_defaults == 0: return format_error('exactly', num_func_args) else: return format_error('at most', num_func_args) return None
Validate the number of input arguments to an XLA function. Args: func: the Python function that will be called to generate the body of an XLA computation graph. input_arity: the number of explicit arguments supplied by the caller. infeed_queue: if not None, the infeed queue that will supply additional arguments to the function. Returns: None if function can be called with the supplied number of arguments, or an error string if it cannot.
github-repos
def split_line(what, indent='', cols=79): if len(indent) > cols: raise ValueError("The indent can't be longer than cols.") if cols < 2: raise ValueError( "The cols can't be smaller than 2 (a char plus a possible '-')" ) what = indent + what.lstrip() if len(what) <= cols: what, new_line = '', what else: try: closest_space = what[:cols].rindex(' ') except ValueError: closest_space = -1 if closest_space > len(indent): what, new_line = ( what[closest_space:], what[:closest_space], ) elif what[cols] == ' ': what, new_line = ( what[cols:], what[:cols], ) else: what, new_line = what[cols - 1:], what[:cols - 1] + '-' return what.lstrip(), new_line.rstrip()
Split a line on the closest space, or break the last word with '-'. Args: what(str): text to spli one line of. indent(str): will prepend this indent to the split line, taking it into account in the column count. cols(int): maximum length of the split line. Returns: tuple(str, str): rest of the text and split line in that order. Raises: ValueError: when the indent is greater than the indent, or the cols param is too small
juraj-google-style
def purity(state): rho = np.array(state) if rho.ndim == 1: return 1.0 return np.real(np.trace(rho.dot(rho)))
Calculate the purity of a quantum state. Args: state (ndarray): a quantum state Returns: float: purity.
juraj-google-style
def _UpdateUsers(self, update_users): for (user, ssh_keys) in update_users.items(): if ((not user) or (user in self.invalid_users)): continue configured_keys = self.user_ssh_keys.get(user, []) if (set(ssh_keys) != set(configured_keys)): if (not self.utils.UpdateUser(user, ssh_keys)): self.invalid_users.add(user) else: self.user_ssh_keys[user] = ssh_keys[:]
Provision and update Linux user accounts based on account metadata. Args: update_users: dict, authorized users mapped to their public SSH keys.
codesearchnet
def fit_to_structure(self, structure, symprec=0.1): sga = SpacegroupAnalyzer(structure, symprec) symm_ops = sga.get_symmetry_operations(cartesian=True) return sum([self.transform(symm_op) for symm_op in symm_ops]) / len(symm_ops)
Returns a tensor that is invariant with respect to symmetry operations corresponding to a structure Args: structure (Structure): structure from which to generate symmetry operations symprec (float): symmetry tolerance for the Spacegroup Analyzer used to generate the symmetry operations
juraj-google-style
def __init__(self, group, provider, checker, code, messages): self.group = group self.provider = provider self.checker = checker self.code = code self.messages = messages
Initialization method. Args: group (AnalysisGroup): parent group. provider (Provider): parent Provider. checker (Checker): parent Checker. code (int): constant from Checker class. messages (str): messages string.
juraj-google-style
def get_file_list(wildcard): files = glob.glob(os.path.expanduser(wildcard)) return files
Search for files to be concatenated. Currently very basic, but could expand to be more sophisticated. Args: wildcard (regular expression string) Returns: files (list of full file paths)
codesearchnet
def read(keypath, configfile=None): if configfile in _configs: appconfig = _configs[configfile] else: appconfig = AppConfig(configfile=configfile) _configs[configfile] = appconfig return appconfig.read(keypath)
Reads a value from the configuration file. Args: keypath: str Specifies the key for which the value is desired. It can be a hierarchical path. Example: "section1.subsection.key1" configfile: str Path to the config file to read. Defaults to None, in which case the application's default config file is used. Returns: value from configuration file
juraj-google-style
def _get_client_by_id(self, client_id): client = self.grr_api.Client(client_id) print('Checking for client approval') self._check_approval_wrapper(client, client.ListFlows) print('{0:s}: Client approval is valid'.format(client_id)) return client.Get()
Get GRR client dictionary and make sure valid approvals exist. Args: client_id: GRR client ID. Returns: GRR API Client object
juraj-google-style
def util_pattern_space(time_series, lag, dim): n = len(time_series) if ((lag * dim) > n): raise Exception('Result matrix exceeded size limit, try to change lag or dim.') elif (lag < 1): raise Exception('Lag should be greater or equal to 1.') pattern_space = np.empty(((n - (lag * (dim - 1))), dim)) for i in range((n - (lag * (dim - 1)))): for j in range(dim): pattern_space[i][j] = time_series[(i + (j * lag))] return pattern_space
Create a set of sequences with given lag and dimension Args: time_series: Vector or string of the sample data lag: Lag between beginning of sequences dim: Dimension (number of patterns) Returns: 2D array of vectors
codesearchnet
def get_resource(self, feature_column, name): del feature_column, name raise NotImplementedError('StateManager.get_resource')
Returns an already created resource. Resources can be things such as tables, variables, trackables, etc. Args: feature_column: A `FeatureColumn` object this variable corresponds to. name: Name of the resource.
github-repos
def create_bulk(self, resource, timeout=-1): uri = self.URI + '/bulk' default_values = self._get_default_values(self.BULK_DEFAULT_VALUES) updated_data = self._helper.update_resource_fields(resource, default_values) self._helper.create(updated_data, uri=uri, timeout=timeout) return self.get_range(resource['namePrefix'], resource['vlanIdRange'])
Creates bulk Ethernet networks. Args: resource (dict): Specifications to create in bulk. timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView; it just stops waiting for its completion. Returns: list: List of created Ethernet Networks.
juraj-google-style
def make(cls, name: str, ctx: 'context.Context', module: str, pyval_name: str | None=None) -> 'PyTDFunction': pyval = ctx.loader.lookup_pytd(module, pyval_name or name) if isinstance(pyval, pytd.Alias) and isinstance(pyval.type, pytd.Function): pyval = pyval.type pyval = pyval.Replace(name=f'{module}.{name}') f = ctx.convert.constant_to_value(pyval, {}, ctx.root_node) self = cls(name, f.signatures, pyval.kind, pyval.decorators, ctx) self.module = module return self
Create a PyTDFunction. Args: name: The function name. ctx: The abstract context. module: The module that the function is in. pyval_name: Optionally, the name of the pytd.Function object to look up, if it is different from the function name. Returns: A new PyTDFunction.
github-repos
def _OpenFile(self, path): if (not self._registry_file_reader): return None return self._registry_file_reader.Open(path, ascii_codepage=self._ascii_codepage)
Opens a Windows Registry file. Args: path (str): path of the Windows Registry file. Returns: WinRegistryFile: Windows Registry file or None if not available.
codesearchnet
def annotate(self, sent): preds = [] words = [] for (word, fv) in self.sent2examples(sent): probs = self.predictor(fv) tags = probs.argsort() tag = self.ID_TAG[tags[(- 1)]] words.append(word) preds.append(tag) annotations = zip(words, preds) return annotations
Annotate a squence of words with entity tags. Args: sent: sequence of strings/words.
codesearchnet
def flush(cls, *args): return _remove_keys([], [((cls._make_key(args) if args else cls.PREFIX) + '*')])
Removes all keys of this namespace Without args, clears all keys starting with cls.PREFIX if called with args, clears keys starting with given cls.PREFIX + args Args: *args: Arbitrary number of arguments. Returns: List of removed keys.
codesearchnet
def save_data_files(vr, bs, prefix=None, directory=None): filename = '{}_band.dat'.format(prefix) if prefix else 'band.dat' directory = directory if directory else '.' filename = os.path.join(directory, filename) if bs.is_metal(): zero = vr.efermi else: zero = bs.get_vbm()['energy'] with open(filename, 'w') as f: header = ' f.write(header) for band in bs.bands[Spin.up]: for d, e in zip(bs.distance, band): f.write('{:.8f} {:.8f}\n'.format(d, e - zero)) f.write('\n') if bs.is_spin_polarized: for band in bs.bands[Spin.down]: for d, e in zip(bs.distance, band): f.write('{:.8f} {:.8f}\n'.format(d, e - zero)) f.write('\n') return filename
Write the band structure data files to disk. Args: vs (`Vasprun`): Pymatgen `Vasprun` object. bs (`BandStructureSymmLine`): Calculated band structure. prefix (`str`, optional): Prefix for data file. directory (`str`, optional): Directory in which to save the data. Returns: The filename of the written data file.
juraj-google-style
def get_variants(self, chromosome=None, start=None, end=None): query = {} if chromosome: query['chrom'] = chromosome if start: query['start'] = {'$lte': end} query['end'] = {'$gte': start} LOG.info("Find all variants {}".format(query)) return self.db.variant.find(query).sort([('start', ASCENDING)])
Return all variants in the database If no region is specified all variants will be returned. Args: chromosome(str) start(int) end(int) Returns: variants(Iterable(Variant))
juraj-google-style
def is_empty(self): for family in self.iter_package_families(): for pkg in self.iter_packages(family): return False return True
Determine if the repository contains any packages. Returns: True if there are no packages, False if there are at least one.
codesearchnet
def start(self, **kwargs): if not self.is_running(): self.websock_url = self.chrome.start(**kwargs) self.websock = websocket.WebSocketApp(self.websock_url) self.websock_thread = WebsockReceiverThread( self.websock, name='WebsockThread:%s' % self.chrome.port) self.websock_thread.start() self._wait_for(lambda: self.websock_thread.is_open, timeout=30) self.send_to_chrome(method='Network.enable') self.send_to_chrome(method='Page.enable') self.send_to_chrome(method='Console.enable') self.send_to_chrome(method='Runtime.enable') self.send_to_chrome(method='ServiceWorker.enable') self.send_to_chrome(method='ServiceWorker.setForceUpdateOnPageLoad') self.send_to_chrome( method='Network.setBlockedURLs', params={'urls': ['*google-analytics.com/analytics.js', '*google-analytics.com/ga.js']})
Starts chrome if it's not running. Args: **kwargs: arguments for self.chrome.start(...)
juraj-google-style
def persons_significant_control(self, num, statements=False, **kwargs): baseuri = (self._BASE_URI + 'company/{}/persons-with-significant-control'.format(num)) if (statements is True): baseuri += '-statements' res = self.session.get(baseuri, params=kwargs) self.handle_http_error(res) return res
Search for a list of persons with significant control. Searches for persons of significant control based on company number for a specified company. Specify statements=True to only search for officers with statements. Args: num (str, int): Company number to search on. statements (Optional[bool]): Search only for persons with statements. Default is False. kwargs (dict): additional keywords passed into requests.session.get *params* keyword.
codesearchnet
def GetRawDevice(path): path = CanonicalPathToLocalPath(path) try: path = win32file.GetLongPathName(path) except pywintypes.error: pass try: mount_point = win32file.GetVolumePathName(path) except pywintypes.error as details: logging.info('path not found. %s', details) raise IOError(('No mountpoint for path: %s' % path)) if (not path.startswith(mount_point)): stripped_mp = mount_point.rstrip('\\') if (not path.startswith(stripped_mp)): raise IOError(('path %s is not mounted under %s' % (path, mount_point))) corrected_path = LocalPathToCanonicalPath(path[len(mount_point):]) corrected_path = utils.NormalizePath(corrected_path) volume = win32file.GetVolumeNameForVolumeMountPoint(mount_point).rstrip('\\') volume = LocalPathToCanonicalPath(volume) result = rdf_paths.PathSpec(path=volume, pathtype=rdf_paths.PathSpec.PathType.OS, mount_point=mount_point.rstrip('\\')) return (result, corrected_path)
Resolves the raw device that contains the path. Args: path: A path to examine. Returns: A pathspec to read the raw device as well as the modified path to read within the raw device. This is usually the path without the mount point. Raises: IOError: if the path does not exist or some unexpected behaviour occurs.
codesearchnet
async def set_headline(self, name, level, message): if (name not in self.services): raise ArgumentError('Unknown service name', short_name=name) self.services[name]['state'].set_headline(level, message) headline = self.services[name]['state'].headline.to_dict() (await self._notify_update(name, 'new_headline', headline))
Set the sticky headline for a service. Args: name (string): The short name of the service to query level (int): The level of the message (info, warning, error) message (string): The message contents
codesearchnet
def compute_shader(self, source) -> 'ComputeShader': res = ComputeShader.__new__(ComputeShader) res.mglo, ls1, ls2, ls3, ls4, res._glo = self.mglo.compute_shader(source) members = {} for item in ls1: obj = Uniform.__new__(Uniform) obj.mglo, obj._location, obj._array_length, obj._dimension, obj._name = item members[obj.name] = obj for item in ls2: obj = UniformBlock.__new__(UniformBlock) obj.mglo, obj._index, obj._size, obj._name = item members[obj.name] = obj res._members = members res.ctx = self res.extra = None return res
A :py:class:`ComputeShader` is a Shader Stage that is used entirely for computing arbitrary information. While it can do rendering, it is generally used for tasks not directly related to drawing. Args: source (str): The source of the compute shader. Returns: :py:class:`ComputeShader` object
juraj-google-style
def _RunAction(self, rule, client_id): actions_count = 0 try: if self._CheckIfHuntTaskWasAssigned(client_id, rule.hunt_id): logging.info( "Foreman: ignoring hunt %s on client %s: was started " "here before", client_id, rule.hunt_id) else: logging.info("Foreman: Starting hunt %s on client %s.", rule.hunt_id, client_id) if rule.hunt_name: flow_cls = registry.AFF4FlowRegistry.FlowClassByName(rule.hunt_name) hunt_urn = rdfvalue.RDFURN("aff4:/hunts/%s" % rule.hunt_id) flow_cls.StartClients(hunt_urn, [client_id]) else: hunt.StartHuntFlowOnClient(client_id, rule.hunt_id) actions_count += 1 except Exception as e: logging.exception("Failure running foreman action on client %s: %s", rule.hunt_id, e) return actions_count
Run all the actions specified in the rule. Args: rule: Rule which actions are to be executed. client_id: Id of a client where rule's actions are to be executed. Returns: Number of actions started.
juraj-google-style
def dependency_of_fetches(fetches, op): try: from tensorflow.python.client.session import _FetchHandler as FetchHandler handler = FetchHandler(op.graph, fetches, {}) targets = tuple(handler.fetches() + handler.targets()) except ImportError: if isinstance(fetches, list): targets = tuple(fetches) elif isinstance(fetches, dict): raise ValueError("Don't know how to parse dictionary to fetch list! " "This is a bug of tensorpack.") else: targets = (fetches, ) return dependency_of_targets(targets, op)
Check that op is in the subgraph induced by the dependencies of fetches. fetches may have more general structure. Args: fetches: An argument to `sess.run`. Nested structure will affect performance. op (tf.Operation or tf.Tensor): Returns: bool: True if any of `fetches` depend on `op`.
juraj-google-style
def label_matrix_to_one_hot(L, k=None): n, m = L.shape if k is None: k = L.max() L_onehot = torch.zeros(n, m, k + 1) for i, row in enumerate(L): for j, k in enumerate(row): if k > 0: L_onehot[i, j, k - 1] = 1 return L_onehot
Converts a 2D [n,m] label matrix into an [n,m,k] one hot 3D tensor Note that in the returned 3D matrix, abstain votes continue to be represented by 0s, not 1s. Args: L: a [n,m] label matrix with categorical labels (0 = abstain) k: the number of classes that could appear in L if None, k is inferred as the max element in L
juraj-google-style
def _create_node(self, index: int, name: str, external_id: Optional[str] = None) -> SpotifyArtistNode: if external_id is None: graph: SpotifyArtistGraph = self._graph items: List[NameExternalIDPair] = graph.client.search_artists_by_name(name) for item in items: if item.name == name: external_id = item.external_id break return SpotifyArtistNode(graph=self._graph, index=index, name=name, external_id=external_id)
Returns a new `SpotifyArtistNode` instance with the given index and name. Arguments: index (int): The index of the node to create. name (str): The name of the node to create. external_id (Optional[str]): The external ID of the node.
juraj-google-style
def GetZipInfoByPathSpec(self, path_spec): location = getattr(path_spec, 'location', None) if location is None: raise errors.PathSpecError('Path specification missing location.') if not location.startswith(self.LOCATION_ROOT): raise errors.PathSpecError('Invalid location in path specification.') if len(location) > 1: return self._zip_file.getinfo(location[1:]) return None
Retrieves the ZIP info for a path specification. Args: path_spec (PathSpec): a path specification. Returns: zipfile.ZipInfo: a ZIP info object or None if not available. Raises: PathSpecError: if the path specification is incorrect.
juraj-google-style
def crscode_to_string(codetype, code, format): link = 'http: result = urllib2.urlopen(link).read() if not isinstance(result, str): result = result.decode() return result
Lookup crscode on spatialreference.org and return in specified format. Arguments: - *codetype*: "epsg", "esri", or "sr-org". - *code*: The code. - *format*: The crs format of the returned string. One of "ogcwkt", "esriwkt", or "proj4", but also several others... Returns: - Crs string in the specified format.
juraj-google-style
def micros_to_timestamp(micros, timestamp): seconds = long((micros / _MICROS_PER_SECOND)) micro_remainder = (micros % _MICROS_PER_SECOND) timestamp.seconds = seconds timestamp.nanos = (micro_remainder * _NANOS_PER_MICRO)
Convert microseconds from utc epoch to google.protobuf.timestamp. Args: micros: a long, number of microseconds since utc epoch. timestamp: a google.protobuf.timestamp.Timestamp to populate.
codesearchnet
def activate_backup_image(reset=False): dn = "sys/rack-unit-1/mgmt/fw-boot-def/bootunit-combined" r = "no" if reset is True: r = "yes" inconfig = .format(r) ret = __proxy__['cimc.set_config_modify'](dn, inconfig, False) return ret
Activates the firmware backup image. CLI Example: Args: reset(bool): Reset the CIMC device on activate. .. code-block:: bash salt '*' cimc.activate_backup_image salt '*' cimc.activate_backup_image reset=True
juraj-google-style
def update(self, grads): grads = nest.flatten(grads) if distribute_lib.has_strategy() and distribute_lib.in_cross_replica_context(): distribution = distribute_lib.get_strategy() is_finite_per_replica = distribution.extended.call_for_each_replica(_is_all_finite, args=(grads,)) is_finite = distribution.experimental_local_results(is_finite_per_replica)[0] else: is_finite = _is_all_finite(grads) def update_if_finite_grads(): def incr_loss_scale(): new_loss_scale = self.current_loss_scale * self.multiplier return control_flow_ops.group(_assign_if_finite(self.current_loss_scale, new_loss_scale), self.counter.assign(0)) return cond.cond(self.counter + 1 >= self.growth_steps, incr_loss_scale, lambda: _op_in_graph_mode(self.counter.assign_add(1))) def update_if_not_finite_grads(): new_loss_scale = math_ops.maximum(self.current_loss_scale / self.multiplier, 1) return control_flow_ops.group(self.counter.assign(0), self.current_loss_scale.assign(new_loss_scale)) update_op = cond.cond(is_finite, update_if_finite_grads, update_if_not_finite_grads) should_apply_gradients = is_finite return (update_op, should_apply_gradients)
Updates the value of the loss scale. Args: grads: A nested structure of unscaled gradients, each which is an all-reduced gradient of the loss with respect to a weight. Returns: update_op: In eager mode, None. In graph mode, an op to update the loss scale. should_apply_gradients: Either a bool or a scalar boolean tensor. If False, the caller should skip applying `grads` to the variables this step.
github-repos
def decode(self, ids): _, tmp_file_path = tempfile.mkstemp() wavfile.write(tmp_file_path, self._sample_rate, np.asarray(ids)) return tmp_file_path
Transform a sequence of float32 into a waveform. Args: ids: list of integers to be converted. Returns: Path to the temporary file where the waveform was saved. Raises: ValueError: if the ids are not of the appropriate size.
juraj-google-style
def removeTags(dom): try: string_type = basestring except NameError: string_type = str element_stack = None if (type(dom) in [list, tuple]): element_stack = dom elif isinstance(dom, HTMLElement): element_stack = (dom.childs if dom.isTag() else [dom]) elif isinstance(dom, string_type): element_stack = parseString(dom).childs else: element_stack = dom output = '' while element_stack: el = element_stack.pop(0) if (not (el.isTag() or el.isComment() or (not el.getTagName()))): output += el.__str__() if el.childs: element_stack = (el.childs + element_stack) return output
Remove all tags from `dom` and obtain plaintext representation. Args: dom (str, obj, array): str, HTMLElement instance or array of elements. Returns: str: Plain string without tags.
codesearchnet
def find(self, title): if title not in self._titles: raise KeyError(title) return self._titles[title][0]
Return the first worksheet with the given title. Args: title(str): title/name of the worksheet to return Returns: WorkSheet: contained worksheet object Raises: KeyError: if the spreadsheet has no no worksheet with the given ``title``
juraj-google-style
def get_processid(config): pidfile = config.get('daemon', 'pidfile', fallback=None) if pidfile is None: raise ValueError("Configuration doesn't have pidfile option!") try: with open(pidfile, 'r') as _file: pid = _file.read().rstrip() try: pid = int(pid) except ValueError: raise ValueError("stale pid file with invalid data:{}" .format(pid)) else: if pid in [-1, 1]: raise ValueError("invalid PID ({})".format(pid)) else: return pid except OSError as exc: if exc.errno == 2: print("CRITICAL: anycast-healthchecker could be down as pid file " "{} doesn't exist".format(pidfile)) sys.exit(2) else: raise ValueError("error while reading pid file:{}".format(exc))
Return process id of anycast-healthchecker. Arguments: config (obj): A configparser object with the configuration of anycast-healthchecker. Returns: The process id found in the pid file Raises: ValueError in the following cases - pidfile option is missing from the configuration - pid is either -1 or 1 - stale pidfile, either with no data or invalid data - failure to read pidfile
juraj-google-style
def _read_file(file_name): with open(file_name) as config_file: data = json.load(config_file) return data
Read the file content and load it as JSON. Arguments: file_name (:py:class:`str`): The filename. Returns: :py:class:`dict`: The loaded JSON data. Raises: :py:class:`FileNotFoundError`: If the file is not found.
juraj-google-style
def _get_first_approximation(self): equalities = set(chain((implication.extract_equalities() for _, _, implication in self._iter_implications()))).union(self.ground_truth.extract_equalities()) var_assignments = {} value_assignments = {} for var in self.variables: var_assignments[var] = {var} value_assignments[var] = self._get_nonfalse_values(var) for var, value in equalities: if value in self.variables: other_var = value value_assignments[var] |= value_assignments[other_var] for var_assignment in var_assignments[other_var]: var_assignments[var].add(var_assignment) var_assignments[var_assignment] = var_assignments[var] value_assignments[var_assignment] = value_assignments[var] else: value_assignments[var].add(value) return value_assignments
Get all (variable, value) combinations to consider. This gets the (variable, value) combinations that the solver needs to consider based on the equalities that appear in the implications. E.g., with the following implication: t1 = v1 => t1 = t2 | t3 = v2 the combinations to consider are (t1, v1) because t1 = v1 appears, (t2, v1) because t1 = t2 and t1 = v1 appear, and (t3, v2) because t3 = v2 appears. Returns: A dictionary D mapping strings (variables) to sets of strings (values). For two variables t1 and t2, if t1 = t2 is a possible assignment (by first approximation), then D[t1] and D[t2] point to the same memory location.
github-repos
def __getattr__(self, attr): if not self._protocol: raise usb_exceptions.HandleClosedError() val = getattr(self._protocol, attr) if callable(val): def _retry_wrapper(*args, **kwargs): result = _retry_usb_function(self._num_retries, val, *args, **kwargs) _LOG.debug('LIBUSB FASTBOOT: %s(*%s, **%s) -> %s', attr, args, kwargs, result) return result return _retry_wrapper return val
Fallthrough to underlying FastbootProtocol handler. Args: attr: Attribute to get. Returns: Either the attribute from the device or a retrying function-wrapper if attr is a method on the device.
juraj-google-style
def _get_tensors_for_gradient(x): if not isinstance(x, composite_tensor.CompositeTensor): return x if not isinstance(x, CompositeTensorGradientProtocol): raise ValueError(f'Type {type(x).__name__} is not supported as a gradient source or gradient target.') composite_gradient = x.__composite_gradient__ gradient_components = composite_gradient.get_gradient_components(x) if gradient_components is x: return x return nest.map_structure(_get_tensors_for_gradient, gradient_components)
Returns the Tensors in `x` that should be differentiated. Args: x: A `Tensor` or `CompositeTensor`. Returns: A `Tensor` or a nested structure of `Tensor`.
github-repos
def add_output(self, name, value): self.template.add_output(Output(name, Value=value))
Simple helper for adding outputs. Args: name (str): The name of the output to create. value (str): The value to put in the output.
juraj-google-style
def _parse_hparams(hparams): prefixes = ["agent_", "optimizer_", "runner_", "replay_buffer_"] ret = [] for prefix in prefixes: ret_dict = {} for key in hparams.values(): if prefix in key: par_name = key[len(prefix):] ret_dict[par_name] = hparams.get(key) ret.append(ret_dict) return ret
Split hparams, based on key prefixes. Args: hparams: hyperparameters Returns: Tuple of hparams for respectably: agent, optimizer, runner, replay_buffer.
juraj-google-style
def release(self, subnets): if (isinstance(subnets, str) or isinstance(subnets, IPNetwork)): subnets = [subnets] subnets_iter = ((str(subnet) if isinstance(subnet, IPNetwork) else subnet) for subnet in subnets) try: with self._create_lock(): for subnet in subnets_iter: self._release(self.create_lease_object_from_subnet(subnet)) except (utils.TimerException, IOError): raise LagoSubnetLeaseLockException(self.path)
Free the lease of the given subnets Args: subnets (list of str or netaddr.IPAddress): dotted ipv4 subnet in CIDR notation (for example ```192.168.200.0/24```) or IPAddress object. Raises: LagoSubnetLeaseException: If subnet is a str and can't be parsed LagoSubnetLeaseLockException: If the lock to self.path can't be acquired.
codesearchnet
def read_from_tfrecord(file_pattern: str, coder: Optional[coders.BytesCoder]=coders.BytesCoder(), compression_type: str='AUTO', validate: Optional[bool]=True): return ReadFromTFRecord(file_pattern=file_pattern, compression_type=getattr(CompressionTypes, compression_type), validate=validate) | beam.Map(lambda s: beam.Row(record=s))
Reads data from TFRecord. Args: file_pattern (str): A file glob pattern to read TFRecords from. coder (coders.BytesCoder): Coder used to decode each record. compression_type (CompressionTypes): Used to handle compressed input files. Default value is CompressionTypes.AUTO, in which case the file_path's extension will be used to detect the compression. validate (bool): Boolean flag to verify that the files exist during the pipeline creation time.
github-repos
def parse_genetic_models(models_info, case_id): genetic_models = [] if models_info: for family_info in models_info.split(','): splitted_info = family_info.split(':') if (splitted_info[0] == case_id): genetic_models = splitted_info[1].split('|') return genetic_models
Parse the genetic models entry of a vcf Args: models_info(str): The raw vcf information case_id(str) Returns: genetic_models(list)
codesearchnet
def get_average_voltage(self, min_voltage=None, max_voltage=None): pairs_in_range = self._select_in_voltage_range(min_voltage, max_voltage) if len(pairs_in_range) == 0: return 0 total_cap_in_range = sum([p.mAh for p in pairs_in_range]) total_edens_in_range = sum([p.mAh * p.voltage for p in pairs_in_range]) return total_edens_in_range / total_cap_in_range
Average voltage for path satisfying between a min and max voltage. Args: min_voltage (float): The minimum allowable voltage for a given step. max_voltage (float): The maximum allowable voltage allowable for a given step. Returns: Average voltage in V across the insertion path (a subset of the path can be chosen by the optional arguments)
juraj-google-style
def authorization_code_pkce(self, client_id, code_verifier, code, redirect_uri, grant_type='authorization_code'): return self.post('https:
Authorization code pkce grant This is the OAuth 2.0 grant that mobile apps utilize in order to access an API. Use this endpoint to exchange an Authorization Code for a Token. Args: grant_type (str): Denotes the flow you're using. For authorization code pkce use authorization_code client_id (str): your application's client Id code_verifier (str): Cryptographically random key that was used to generate the code_challenge passed to /authorize. code (str): The Authorization Code received from the /authorize Calls redirect_uri (str, optional): This is required only if it was set at the GET /authorize endpoint. The values must match Returns: access_token, id_token
codesearchnet
def get_policies_from_aws(client, scope='Local'): done = False marker = None policies = [] while (not done): if marker: response = client.list_policies(Marker=marker, Scope=scope) else: response = client.list_policies(Scope=scope) policies += response['Policies'] if response['IsTruncated']: marker = response['Marker'] else: done = True return policies
Returns a list of all the policies currently applied to an AWS Account. Returns a list containing all the policies for the specified scope Args: client (:obj:`boto3.session.Session`): A boto3 Session object scope (`str`): The policy scope to use. Default: Local Returns: :obj:`list` of `dict`
codesearchnet
def events_from_file(filepath): records = list(tf_record.tf_record_iterator(filepath)) result = [] for r in records: event = event_pb2.Event() event.ParseFromString(r) result.append(event) return result
Returns all events in a single event file. Args: filepath: Path to the event file. Returns: A list of all tf.compat.v1.Event protos in the event file.
github-repos
def _TopKGrad(op: ops.Operation, grad, _): in_shape = array_ops.shape(op.inputs[0]) ind_shape = array_ops.shape(op.outputs[1]) ind_lastdim = array_ops.gather(math_ops.cast(ind_shape, dtypes.int64), array_ops.size(ind_shape) - 1) ind_2d = array_ops.reshape(op.outputs[1], array_ops_stack.stack([-1, ind_lastdim])) in_lastdim = array_ops.gather(math_ops.cast(in_shape, dtypes.int64), array_ops.size(in_shape) - 1) outerdim = array_ops.shape(ind_2d)[0] ind = array_ops.reshape(ind_2d + math_ops.cast(array_ops.expand_dims(math_ops.range(0, math_ops.cast(outerdim, dtypes.int64) * in_lastdim, in_lastdim), -1), dtypes.int32), [-1]) return [array_ops.reshape(array_ops.scatter_nd(array_ops.expand_dims(ind, -1), array_ops.reshape(grad, [-1]), [math_ops.reduce_prod(in_shape)]), in_shape), array_ops.zeros([], dtype=dtypes.int32)]
Return the gradients for TopK. Args: op: The TopKOp for which we need to generate gradients. grad: Tensor. The gradients passed to the TopKOp. Returns: A list of two tensors, the first being the gradient w.r.t to the input and TopK, and the second being the gradient w.r.t. to the indices (all zero).
github-repos
def fmt_addr_raw(addr, reverse=True): addr = addr.replace(':', '') raw_addr = [int(addr[i:i+2], 16) for i in range(0, len(addr), 2)] if reverse: raw_addr.reverse() if sys.version_info[0] == 2: return str(bytearray(raw_addr)) return bytearray(raw_addr)
Given a string containing a xx:xx:xx:xx:xx:xx address, return as a byte sequence. Args: addr (str): Bluetooth address in xx:xx:xx:xx:xx:xx format. reverse (bool): True if the byte ordering should be reversed in the output. Returns: A bytearray containing the converted address.
juraj-google-style
def read_dftbp(filename): infile = open(filename, 'r') lines = infile.readlines() for ss in lines: if ss.strip().startswith(' lines.remove(ss) natoms = int(lines[0].split()[0]) symbols = lines[1].split() if (lines[0].split()[1].lower() == 'f'): is_scaled = True scale_pos = 1 scale_latvecs = dftbpToBohr else: is_scaled = False scale_pos = dftbpToBohr scale_latvecs = dftbpToBohr positions = [] expaned_symbols = [] for ii in range(2, natoms+2): lsplit = lines[ii].split() expaned_symbols.append(symbols[int(lsplit[1]) - 1]) positions.append([float(ss)*scale_pos for ss in lsplit[2:5]]) origin = [float(ss) for ss in lines[natoms+2].split()] cell = [] for ii in range(natoms+3, natoms+6): lsplit = lines[ii].split() cell.append([float(ss)*scale_latvecs for ss in lsplit[:3]]) cell = np.array(cell) if is_scaled: atoms = Atoms(symbols=expaned_symbols, cell=cell, scaled_positions=positions) else: atoms = Atoms(symbols=expaned_symbols, cell=cell, positions=positions) return atoms
Reads DFTB+ structure files in gen format. Args: filename: name of the gen-file to be read Returns: atoms: an object of the phonopy.Atoms class, representing the structure found in filename
juraj-google-style
def migrate_database(adapter): all_variants = adapter.get_variants() nr_variants = all_variants.count() nr_updated = 0 with progressbar(all_variants, label="Updating variants", length=nr_variants) as bar: for variant in bar: if 'chrom' in variant: continue nr_updated += 1 splitted_id = variant['_id'].split('_') chrom = splitted_id[0] start = int(splitted_id[1]) ref = splitted_id[2] alt = splitted_id[3] end = start + (max(len(ref), len(alt)) - 1) adapter.db.variant.find_one_and_update( {'_id': variant['_id']}, { '$set': { 'chrom': chrom, 'start': start, 'end': end } } ) return nr_updated
Migrate an old loqusdb instance to 1.0 Args: adapter Returns: nr_updated(int): Number of variants that where updated
juraj-google-style
def str_internal(self, is_recursive=False): printable_name = self.__class__.__name__ if hasattr(self, 'step_name'): printable_name += ' %s' % self.name_context.logging_name() if is_recursive: return '<%s>' % printable_name if self.spec is None: printable_fields = [] else: printable_fields = operation_specs.worker_printable_fields(self.spec) if not is_recursive and getattr(self, 'receivers', []): printable_fields.append('receivers=[%s]' % ', '.join([str(receiver) for receiver in self.receivers])) return '<%s %s>' % (printable_name, ', '.join(printable_fields))
Internal helper for __str__ that supports recursion. When recursing on receivers, keep the output short. Args: is_recursive: whether to omit some details, particularly receivers. Returns: Compact string representing this object.
github-repos
def create_handler(Model, name=None, **kwds): async def action_handler(service, action_type, payload, props, notify=True, **kwds): if (action_type == get_crud_action('create', (name or Model))): try: message_props = {} if ('correlation_id' in props): message_props['correlation_id'] = props['correlation_id'] for requirement in Model.required_fields(): field_name = requirement.name if ((not (field_name in payload)) and (field_name != 'id')): raise ValueError(('Required field not found in payload: %s' % field_name)) new_model = Model(**payload) new_model.save() if notify: (await service.event_broker.send(payload=ModelSerializer().serialize(new_model), action_type=change_action_status(action_type, success_status()), **message_props)) except Exception as err: if notify: (await service.event_broker.send(payload=str(err), action_type=change_action_status(action_type, error_status()), **message_props)) else: raise err return action_handler
This factory returns an action handler that creates a new instance of the specified model when a create action is recieved, assuming the action follows nautilus convetions. Args: Model (nautilus.BaseModel): The model to create when the action received. Returns: function(action_type, payload): The action handler for this model
codesearchnet
async def iter(self, url: Union[(str, methods)], data: Optional[MutableMapping]=None, headers: Optional[MutableMapping]=None, *, limit: int=200, iterkey: Optional[str]=None, itermode: Optional[str]=None, minimum_time: Optional[int]=None, as_json: Optional[bool]=None) -> AsyncIterator[dict]: itervalue = None if (not data): data = {} last_request_time = None while True: current_time = time.time() if (minimum_time and last_request_time and ((last_request_time + minimum_time) > current_time)): (await self.sleep(((last_request_time + minimum_time) - current_time))) (data, iterkey, itermode) = sansio.prepare_iter_request(url, data, iterkey=iterkey, itermode=itermode, limit=limit, itervalue=itervalue) last_request_time = time.time() response_data = (await self.query(url, data, headers, as_json)) itervalue = sansio.decode_iter_request(response_data) for item in response_data[iterkey]: (yield item) if (not itervalue): break
Iterate over a slack API method supporting pagination When using :class:`slack.methods` the request is made `as_json` if available Args: url: :class:`slack.methods` or url string data: JSON encodable MutableMapping headers: limit: Maximum number of results to return per call. iterkey: Key in response data to iterate over (required for url string). itermode: Iteration mode (required for url string) (one of `cursor`, `page` or `timeline`) minimum_time: Minimum elapsed time (in seconds) between two calls to the Slack API (default to 0). If not reached the client will sleep for the remaining time. as_json: Post JSON to the slack API Returns: Async iterator over `response_data[key]`
codesearchnet
def get_object_metadata(self, request): file_ = self.get_file(request.bucket, request.object) return file_.get_metadata()
Retrieves an object's metadata. Args: request: (GetRequest) input message Returns: (Item) The response message.
github-repos
def should_stop_early(self) -> bool: if not self._trial.measurements: return False return self._should_stop_early_fn(self._trial)
Tells whether current trial should be stopped early. In `pg.sample`, an optional `EarlyStoppingPolicy` can be provided, which is useful for terminating trials which are progressive evaluated. Progressive evaluation on examples can be achieved by calling `feedback.add_measurement` multiple times at different steps. In-between these steps, users can call this method to determine if current trial is considered less competitive by the early stopping policy, and thus can be abandoned. In that case, users should call `feedback.skip()` to abandon current trial without feeding back the reward to the search algorithm. Returns: If current trial can be stopped early.
github-repos
def _load_partition_graphs(self, client_partition_graphs, validate): self._debug_graphs = {} self._node_devices = {} partition_graphs_and_device_names = [] for device_name in self._device_names: partition_graph = None if device_name in self._dump_graph_file_paths: partition_graph = _load_graph_def_from_event_file(self._dump_graph_file_paths[device_name]) else: logging.warn('Failed to load partition graphs for device %s from disk. As a fallback, the client graphs will be used. This may cause mismatches in device names.' % device_name) partition_graph = self._find_partition_graph(client_partition_graphs, device_name) if partition_graph: partition_graphs_and_device_names.append((partition_graph, device_name)) for partition_graph, maybe_device_name in partition_graphs_and_device_names: debug_graph = debug_graphs.DebugGraph(partition_graph, device_name=maybe_device_name) self._debug_graphs[debug_graph.device_name] = debug_graph self._collect_node_devices(debug_graph) if validate and debug_graph.device_name in self._dump_tensor_data: self._validate_dump_with_graphs(debug_graph.device_name)
Load and process partition graphs. Load the graphs; parse the input and control input structure; obtain the device and op type of each node; remove the Copy and debug ops inserted by the debugger. The gathered information can be used to validate the tensor dumps. Args: client_partition_graphs: A repeated field of GraphDefs representing the partition graphs executed by the TensorFlow runtime, from the Python client. These partition graphs are used only if partition graphs cannot be loaded from the dump directory on the file system. validate: (`bool`) Whether the dump files are to be validated against the partition graphs. Raises: ValueError: If the partition GraphDef of one or more devices fail to be loaded.
github-repos
def quantize(self, input_grid): pixels = {} for i in range(self.max_bin+1): pixels[i] = [] data = (np.array(input_grid, dtype=int) - self.min_thresh) / self.data_increment data[data < 0] = -1 data[data > self.max_bin] = self.max_bin good_points = np.where(data >= 0) for g in np.arange(good_points[0].shape[0]): pixels[data[(good_points[0][g], good_points[1][g])]].append((good_points[0][g], good_points[1][g])) return pixels, data
Quantize a grid into discrete steps based on input parameters. Args: input_grid: 2-d array of values Returns: Dictionary of value pointing to pixel locations, and quantized 2-d array of data
juraj-google-style
def _VerifyValues(self, tensor_in_sizes, filter_in_sizes, stride, padding, expected, data_format, dtype, use_gpu, op_name): if use_gpu and (not test.is_gpu_available(cuda_only=True)): self.skipTest('GPU not available') results = [] result = self._SetupValuesForDevice(tensor_in_sizes, filter_in_sizes, stride, padding, data_format, dtype, use_gpu=use_gpu, op_name=op_name) results.append(result) with self.cached_session() as sess: values = self.evaluate(results) for value in values: tf_logging.debug('expected = %s', expected) tf_logging.debug('actual = %s', value) self.assertAllCloseAccordingToType(expected, value.flatten())
Verifies the output values of the convolution function. Args: tensor_in_sizes: Input tensor dimensions [batch, input_x, input_y, input_z, input_depth]. filter_in_sizes: Filter tensor dimensions [kernel_x, kernel_y, kernel_z, input_depth, output_depth]. stride: [x_stride, y_stride, z_stride] padding: Padding type. expected: Value that the output of computation should match data_format: Format of the data tensors. dtype: Data type for inputs and outputs. use_gpu: True if the operations should be run on GPU op_name: Name of the op to be tested Returns: None
github-repos
def alias_inplace_update(x, i, v): return _inplace_helper(x, i, v, gen_array_ops.inplace_update)
Applies an inplace update on input x at index i with value v. Aliases x. If i is None, x and v must be the same shape. Computes x = v; If i is a scalar, x has a rank 1 higher than v's. Computes x[i, :] = v; Otherwise, x and v must have the same rank. Computes x[i, :] = v; Args: x: A Tensor. i: None, a scalar or a vector. v: A Tensor. Returns: Returns x.
github-repos
def parse_location(location): def split_dms(text, hemisphere): 'Split degrees, minutes and seconds string.\n\n Args:\n text (str): Text to split\n\n Returns::\n float: Decimal degrees\n ' out = [] sect = [] for i in text: if i.isdigit(): sect.append(i) else: out.append(sect) sect = [] (d, m, s) = [float(''.join(i)) for i in out] if (hemisphere in 'SW'): (d, m, s) = [((- 1) * x) for x in (d, m, s)] return to_dd(d, m, s) for sep in ';, ': chunks = location.split(sep) if (len(chunks) == 2): if chunks[0].endswith('N'): latitude = float(chunks[0][:(- 1)]) elif chunks[0].endswith('S'): latitude = ((- 1) * float(chunks[0][:(- 1)])) else: latitude = float(chunks[0]) if chunks[1].endswith('E'): longitude = float(chunks[1][:(- 1)]) elif chunks[1].endswith('W'): longitude = ((- 1) * float(chunks[1][:(- 1)])) else: longitude = float(chunks[1]) return (latitude, longitude) elif (len(chunks) == 4): if chunks[0].endswith(('s', '"')): latitude = split_dms(chunks[0], chunks[1]) else: latitude = float(chunks[0]) if (chunks[1] == 'S'): latitude = ((- 1) * latitude) if chunks[2].endswith(('s', '"')): longitude = split_dms(chunks[2], chunks[3]) else: longitude = float(chunks[2]) if (chunks[3] == 'W'): longitude = ((- 1) * longitude) return (latitude, longitude)
Parse latitude and longitude from string location. Args: location (str): String to parse Returns: tuple of float: Latitude and longitude of location
codesearchnet
def _validate_testbed_name(name): if not name: raise MoblyConfigError("Test bed names can't be empty.") name = str(name) for char in name: if char not in utils.valid_filename_chars: raise MoblyConfigError('Char "%s" is not allowed in test bed names.' % char)
Validates the name of a test bed. Since test bed names are used as part of the test run id, it needs to meet certain requirements. Args: name: The test bed's name specified in config file. Raises: MoblyConfigError: The name does not meet any criteria.
github-repos
def write(self, session, directory, name, replaceParamFile=None, **kwargs): name_split = name.split('.') name = name_split[0] extension = '' if (len(name_split) >= 2): extension = name_split[(- 1)] try: name = self._namePreprocessor(name) except: 'DO NOTHING' if (extension == ''): filename = '{0}.{1}'.format(name, self.fileExtension) else: filename = '{0}.{1}'.format(name, extension) filePath = os.path.join(directory, filename) with io_open(filePath, 'w') as openFile: self._write(session=session, openFile=openFile, replaceParamFile=replaceParamFile, **kwargs)
Write from database back to file. Args: session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database. directory (str): Directory where the file will be written. name (str): The name of the file that will be created (including the file extension is optional). replaceParamFile (:class:`gsshapy.orm.ReplaceParamFile`, optional): ReplaceParamFile instance. Use this if the file you are writing contains replacement parameters.
codesearchnet
def rotate(self, image, angle, resample=None, expand=0, center=None, translate=None, fillcolor=None): resample = resample if resample is not None else PIL.Image.NEAREST self._ensure_format_supported(image) if not isinstance(image, PIL.Image.Image): image = self.to_pil_image(image) return image.rotate(angle, resample=resample, expand=expand, center=center, translate=translate, fillcolor=fillcolor)
Returns a rotated copy of `image`. This method returns a copy of `image`, rotated the given number of degrees counter clockwise around its centre. Args: image (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor`): The image to rotate. If `np.ndarray` or `torch.Tensor`, will be converted to `PIL.Image.Image` before rotating. Returns: image: A rotated `PIL.Image.Image`.
github-repos
def true_num_genes(model, custom_spont_id=None): true_num = 0 for gene in model.genes: if not is_spontaneous(gene, custom_id=custom_spont_id): true_num += 1 return true_num
Return the number of genes in a model ignoring spontaneously labeled genes. Args: model (Model): custom_spont_id (str): Optional custom spontaneous ID if it does not match the regular expression ``[Ss](_|)0001`` Returns: int: Number of genes excluding spontaneous genes
juraj-google-style
def create_analyzer_ui(debug_dump, tensor_filters=None, ui_type='readline', on_ui_exit=None, config=None): if config is None: config = cli_config.CLIConfig() analyzer = DebugAnalyzer(debug_dump, config=config) if tensor_filters: for tensor_filter_name in tensor_filters: analyzer.add_tensor_filter(tensor_filter_name, tensor_filters[tensor_filter_name]) cli = ui_factory.get_ui(ui_type, on_ui_exit=on_ui_exit, config=config) cli.register_command_handler('list_tensors', analyzer.list_tensors, analyzer.get_help('list_tensors'), prefix_aliases=['lt']) cli.register_command_handler('node_info', analyzer.node_info, analyzer.get_help('node_info'), prefix_aliases=['ni']) cli.register_command_handler('list_inputs', analyzer.list_inputs, analyzer.get_help('list_inputs'), prefix_aliases=['li']) cli.register_command_handler('list_outputs', analyzer.list_outputs, analyzer.get_help('list_outputs'), prefix_aliases=['lo']) cli.register_command_handler('print_tensor', analyzer.print_tensor, analyzer.get_help('print_tensor'), prefix_aliases=['pt']) cli.register_command_handler('print_source', analyzer.print_source, analyzer.get_help('print_source'), prefix_aliases=['ps']) cli.register_command_handler('list_source', analyzer.list_source, analyzer.get_help('list_source'), prefix_aliases=['ls']) cli.register_command_handler('eval', analyzer.evaluate_expression, analyzer.get_help('eval'), prefix_aliases=['ev']) dumped_tensor_names = [] for datum in debug_dump.dumped_tensor_data: dumped_tensor_names.append('%s:%d' % (datum.node_name, datum.output_slot)) cli.register_tab_comp_context(['print_tensor', 'pt'], dumped_tensor_names) return cli
Create an instance of ReadlineUI based on a DebugDumpDir object. Args: debug_dump: (debug_data.DebugDumpDir) The debug dump to use. tensor_filters: (dict) A dict mapping tensor filter name (str) to tensor filter (Callable). ui_type: (str) requested UI type, only "readline" is supported. on_ui_exit: (`Callable`) the callback to be called when the UI exits. config: A `cli_config.CLIConfig` object. Returns: (base_ui.BaseUI) A BaseUI subtype object with a set of standard analyzer commands and tab-completions registered.
github-repos
def gremove(pattern): for item in glob.glob(pattern): if not remove(item): return False return True
Remove all file found by glob.glob(pattern). Args: pattern (str): Pattern of files to remove Returns: bool: True if the operation is successful, False otherwise.
juraj-google-style
def monitoring_helper(service_addr, duration_ms, monitoring_level, num_queries): if monitoring_level <= 0 or monitoring_level > 2: sys.exit('Please choose a monitoring level between 1 and 2.') for query in range(0, num_queries): res = profiler_client.monitor(service_addr, duration_ms, monitoring_level) print('Cloud TPU Monitoring Results (Sample ', query, '):\n\n', res)
Helper function to print monitoring results. Helper function to print monitoring results for num_queries times. Args: service_addr: Address of the TPU profiler service. duration_ms: Duration of one monitoring sample in milliseconds. monitoring_level: An integer between 1 and 2. Level 2 is more verbose than level 1 and shows more metrics. num_queries: Number of monitoring samples to collect.
github-repos
def attach_template(self, _template, _key, **unbound_var_values): if (_key in unbound_var_values): raise ValueError(('%s specified twice.' % _key)) unbound_var_values[_key] = self return _template.as_layer().construct(**unbound_var_values)
Attaches the template to this such that _key=this layer. Note: names were chosen to avoid conflicts with any likely unbound_var keys. Args: _template: The template to construct. _key: The key that this layer should replace. **unbound_var_values: The values for the unbound_vars. Returns: A new layer with operation applied. Raises: ValueError: If _key is specified twice or there is a problem computing the template.
codesearchnet
def needs_keras_history(tensors, ignore_call_context=False): input_tensors = nest.flatten(tensors) if call_context().in_call and (not ignore_call_context): return False if all((getattr(tensor, '_keras_history', None) is not None for tensor in input_tensors)): return False return uses_keras_history(tensors)
Check if any Tensors need to be wrapped in TensorFlowOpLayers. This will never return True inside a sublayer, because sublayers do not need to create Keras History. Otherwise, this returns True if one or more of `tensors` originates from a `keras.Input` and does not have `_keras_history` set. Args: tensors: An arbitrary nested structure of Tensors. ignore_call_context: Whether to ignore the check of if currently outside of a `call` context. This is `True` when creating KerasHistory inside `Node`, where we always know that Tensors are being used with the Functional API. Returns: Bool, whether at least one Tensor needs to be wrapped.
github-repos
def recipe_sdf_to_bigquery(config, auth_write, partner_id, file_types, filter_type, filter_ids, dataset, version, table_suffix, time_partitioned_table, create_single_day_table): dataset(config, {'auth': auth_write, 'dataset': dataset}) sdf(config, {'auth': 'user', 'version': version, 'partner_id': partner_id, 'file_types': file_types, 'filter_type': filter_type, 'read': {'filter_ids': {'single_cell': True, 'values': filter_ids}}, 'time_partitioned_table': time_partitioned_table, 'create_single_day_table': create_single_day_table, 'dataset': dataset, 'table_suffix': table_suffix})
Download SDF reports into a BigQuery table. Args: auth_write (authentication) - Credentials used for writing data. partner_id (integer) - The sdf file types. file_types (string_list) - The sdf file types. filter_type (choice) - The filter type for the filter ids. filter_ids (integer_list) - Comma separated list of filter ids for the request. dataset (string) - Dataset to be written to in BigQuery. version (choice) - The sdf version to be returned. table_suffix (string) - Optional: Suffix string to put at the end of the table name (Must contain alphanumeric or underscores) time_partitioned_table (boolean) - Is the end table a time partitioned create_single_day_table (boolean) - Would you like a separate table for each day? This will result in an extra table each day and the end table with the most up to date SDF.
github-repos
def reflection(normal, origin=(0, 0, 0)): n = (np.array(normal, dtype=float) / np.linalg.norm(normal)) (u, v, w) = n translation = np.eye(4) translation[(0:3, 3)] = (- np.array(origin)) xx = (1 - (2 * (u ** 2))) yy = (1 - (2 * (v ** 2))) zz = (1 - (2 * (w ** 2))) xy = (((- 2) * u) * v) xz = (((- 2) * u) * w) yz = (((- 2) * v) * w) mirror_mat = [[xx, xy, xz, 0], [xy, yy, yz, 0], [xz, yz, zz, 0], [0, 0, 0, 1]] if (np.linalg.norm(origin) > 1e-06): mirror_mat = np.dot(np.linalg.inv(translation), np.dot(mirror_mat, translation)) return SymmOp(mirror_mat)
Returns reflection symmetry operation. Args: normal (3x1 array): Vector of the normal to the plane of reflection. origin (3x1 array): A point in which the mirror plane passes through. Returns: SymmOp for the reflection about the plane
codesearchnet
def area_frac_vs_chempot_plot(self, ref_delu, chempot_range, delu_dict=None, delu_default=0, increments=10, no_clean=False, no_doped=False): delu_dict = (delu_dict if delu_dict else {}) chempot_range = sorted(chempot_range) all_chempots = np.linspace(min(chempot_range), max(chempot_range), increments) hkl_area_dict = {} for hkl in self.all_slab_entries.keys(): hkl_area_dict[hkl] = [] for u in all_chempots: delu_dict[ref_delu] = u wulffshape = self.wulff_from_chempot(delu_dict=delu_dict, no_clean=no_clean, no_doped=no_doped, delu_default=delu_default) for hkl in wulffshape.area_fraction_dict.keys(): hkl_area_dict[hkl].append(wulffshape.area_fraction_dict[hkl]) plt = pretty_plot(width=8, height=7) axes = plt.gca() for hkl in self.all_slab_entries.keys(): clean_entry = list(self.all_slab_entries[hkl].keys())[0] if all([(a == 0) for a in hkl_area_dict[hkl]]): continue else: plt.plot(all_chempots, hkl_area_dict[hkl], '--', color=self.color_dict[clean_entry], label=str(hkl)) plt.ylabel('Fractional area $A^{Wulff}_{hkl}/A^{Wulff}$') self.chempot_plot_addons(plt, chempot_range, str(ref_delu).split('_')[1], axes, rect=[(- 0.0), 0, 0.95, 1], pad=5, ylim=[0, 1]) return plt
1D plot. Plots the change in the area contribution of each facet as a function of chemical potential. Args: ref_delu (sympy Symbol): The free variable chempot with the format: Symbol("delu_el") where el is the name of the element. chempot_range (list): Min/max range of chemical potential to plot along delu_dict (Dict): Dictionary of the chemical potentials to be set as constant. Note the key should be a sympy Symbol object of the format: Symbol("delu_el") where el is the name of the element. delu_default (float): Default value for all unset chemical potentials increments (int): Number of data points between min/max or point of intersection. Defaults to 10 points. Returns: (Pylab): Plot of area frac on the Wulff shape for each facet vs chemical potential.
codesearchnet