code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def jwt_is_expired(self, access_token=None, leeway=0): if (access_token is not None): exp = self._decode_exp(access_token) else: exp = self.jwt_exp now = time() if (exp < (now - leeway)): return True return False
Validate JWT access token expiration. Args: access_token (str): Access token to validate. Defaults to ``None``. leeway (float): Time in seconds to adjust for local clock skew. Defaults to 0. Returns: bool: ``True`` if expired, otherwise ``False``.
codesearchnet
def _DropCommonSuffixes(filename): for suffix in ('test.cc', 'regtest.cc', 'unittest.cc', 'inl.h', 'impl.h', 'internal.h'): if (filename.endswith(suffix) and (len(filename) > len(suffix)) and (filename[((- len(suffix)) - 1)] in ('-', '_'))): return filename[:((- len(suffix)) - 1)] return os....
Drops common suffixes like _test.cc or -inl.h from filename. For example: >>> _DropCommonSuffixes('foo/foo-inl.h') 'foo/foo' >>> _DropCommonSuffixes('foo/bar/foo.cc') 'foo/bar/foo' >>> _DropCommonSuffixes('foo/foo_internal.h') 'foo/foo' >>> _DropCommonSuffixes('foo/foo_unusualinternal.h') 'foo/foo_unusualinternal' Ar...
codesearchnet
def __edit_distance_alt(self, words): words = [x.lower() for x in words] return [e2 for e1 in words for e2 in self.edit_distance_1(e1)]
Compute all strings that are 1 edits away from all the words using only the letters in the corpus Args: words (list): The words for which to calculate the edit distance Returns: set: The set of strings that are edit distance two from the \ provided words
juraj-google-style
def fit(self, x, y): train = np.vstack((np.array([self.featurize_row(row.iloc[0], row.iloc[1]) for idx, row in x.iterrows()]), np.array([self.featurize_row(row.iloc[1], ...
Train the model. Args: x_tr (pd.DataFrame): CEPC format dataframe containing the pairs y_tr (pd.DataFrame or np.ndarray): labels associated to the pairs
juraj-google-style
def compilable_sources(self, sourcedir, absolute=False, recursive=True, excludes=[]): filepaths = [] for (root, dirs, files) in os.walk(sourcedir): dirs.sort() files.sort() for item in files: relative_dir = os.path.relpath(root, sourcedir) if (relative_dir == '.')...
Find all scss sources that should be compiled, aka all sources that are not "partials" Sass sources. Args: sourcedir (str): Directory path to scan. Keyword Arguments: absolute (bool): Returned paths will be absolute using ``sourcedir`` argument (if True), else return relative paths. recursive (bool): Switch to enabl...
codesearchnet
def to_python_package(classes, target_folder, parent_package=None, indent=DEFAULT_INDENT): PackageBuilder(target_folder, parent_package, indent).from_classes_with_refs(classes)
This function can be used to build a python package representation of pyschema classes. One module is created per namespace in a package matching the namespace hierarchy. Args: classes: A collection of classes to build the package from target_folder: Root folder of the package parent_package: Prepended on all import s...
codesearchnet
def get_equivalent_atoms(self, tolerance=0.3): PA = self._get_point_group_analyzer(tolerance=tolerance) eq = PA.get_equivalent_atoms() self._convert_eq(eq) return eq
Returns sets of equivalent atoms with symmetry operations Args: tolerance (float): Tolerance to generate the full set of symmetry operations. Returns: dict: The returned dictionary has two possible keys: ``eq_sets``: A dictionary of indices mapping to sets of indices, each key maps to indices of all equivalent atoms...
codesearchnet
def create_threads(self, sess, coord=None, daemon=False, start=False): with self._lock: try: if self._runs_per_session[sess] > 0: return [] except KeyError: pass self._runs_per_session[sess] = len(self._enqueue_ops) self._exceptions_raised = []...
Create threads to run the enqueue ops for the given session. This method requires a session in which the graph was launched. It creates a list of threads, optionally starting them. There is one thread for each op passed in `enqueue_ops`. The `coord` argument is an optional coordinator that the threads will use to t...
github-repos
def rand_ascii_str(length): letters = [random.choice(ascii_letters_and_digits) for _ in range(length)] return ''.join(letters)
Generates a random string of specified length, composed of ascii letters and digits. Args: length: The number of characters in the string. Returns: The random string generated.
codesearchnet
def __init__(self, wrapped_list): self._non_append_mutation_value = False self._external_modification_value = False super().__init__(wrapped_list) self._last_wrapped_list_snapshot = list(self._storage)
Construct a new list wrapper. Args: wrapped_list: The initial value of the data structure. A shallow copy may be maintained for error checking. `wrapped_list` itself should not be modified directly after constructing the `ListWrapper`, and if changes are detected the `ListWrapper` will throw an exception on save.
github-repos
async def stop_tasks(self, address): tasks = self._tasks.get(address, []) for task in tasks: task.cancel() asyncio.gather(*tasks, return_exceptions=True) self._tasks[address] = []
Clear all tasks pertaining to a tile. This coroutine will synchronously cancel all running tasks that were attached to the given tile and wait for them to stop before returning. Args: address (int): The address of the tile we should stop.
juraj-google-style
def _get_num_multimodal_tokens(self, image_sizes=None, **kwargs): vision_data = {} if image_sizes is not None: images_kwargs = LlavaProcessorKwargs._defaults.get('images_kwargs', {}) images_kwargs.update(kwargs) crop_size = images_kwargs.get('crop_size', None) or self.image_processor.cro...
Computes the number of placeholder tokens needed for multimodal inputs with the given sizes. Args: image_sizes (`List[List[int]]`, *optional*): The input sizes formatted as (height, width) per each image. Returns: `MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided input moda...
github-repos
def __init__(self, name=None): rr = gen_io_ops.identity_reader_v2(name=name) super(IdentityReader, self).__init__(rr, supports_serialize=True)
Create a IdentityReader. Args: name: A name for the operation (optional).
github-repos
def to_frame(self, **kwargs): df = export.write_dataframe(self._values, **kwargs) df.name = self.title return df
r"""Return a pandas DataFrame loaded from the worksheet data. Args: \**kwargs: passed to ``pandas.read_csv()`` (e.g. ``header``, ``index_col``) Returns: pandas.DataFrame: new ``DataFrame`` instance
codesearchnet
def to_pandas(self): df = self.data.to_pandas(is_transposed=self._is_transposed) if df.empty: if (len(self.columns) != 0): df = pandas.DataFrame(columns=self.columns).astype(self.dtypes) else: df = pandas.DataFrame(columns=self.columns, index=self.index) else: ...
Converts Modin DataFrame to Pandas DataFrame. Returns: Pandas DataFrame of the DataManager.
codesearchnet
def write_info_file(tensorboard_info): payload = ('%s\n' % _info_to_string(tensorboard_info)) with open(_get_info_file_path(), 'w') as outfile: outfile.write(payload)
Write TensorBoardInfo to the current process's info file. This should be called by `main` once the server is ready. When the server shuts down, `remove_info_file` should be called. Args: tensorboard_info: A valid `TensorBoardInfo` object. Raises: ValueError: If any field on `info` is not of the correct type.
codesearchnet
def __init__(self, dump): self._dump = dump self._cached_tensor_values = {}
Constructor of ExpressionEvaluator. Args: dump: an instance of `DebugDumpDir`.
github-repos
def RunJob(self, job): if (not job.leased_until): raise LockError('CronJob must be leased for Run() to be called.') if (job.leased_until < rdfvalue.RDFDatetime.Now()): raise LockError(('CronJob lease expired for %s.' % job.cron_job_id)) logging.info('Starting cron job: %s', job.cron_job_id) ...
Does the actual work of the Cron, if the job is due to run. Args: job: The cronjob rdfvalue that should be run. Must be leased. Returns: A boolean indicating if this cron job was started or not. False may be returned when the threadpool is already full. Raises: LockError: if the object is not locked. ValueError: If ...
codesearchnet
def sync_trial_info(self, job_path, expr_dir_name): expr_name = expr_dir_name[(- 8):] expr_path = os.path.join(job_path, expr_dir_name) if (expr_name not in self._monitored_trials): self._create_trial_info(expr_path) self._monitored_trials.add(expr_name) else: self._update_trial_...
Load information of the trial from the given experiment directory. Create or update the trial information, together with the trial meta file. Args: job_path(str) expr_dir_name(str)
codesearchnet
def connect(self, timeout=600): if self.socket: raise TensorForceError("Already connected to {}:{}. Only one connection allowed at a time. " + "Close first by calling `close`!".format(self.host, self.port)) self.socket = socket.socket(sock...
Starts the server tcp connection on the given host:port. Args: timeout (int): The time (in seconds) for which we will attempt a connection to the remote (every 5sec). After that (or if timeout is None or 0), an error is raised.
juraj-google-style
def get_source_event_declaration(self, event): return next((x.source_mapping for x in self.events if (x.name == event)))
Return the source mapping where the event is declared Args: event (str): event name Returns: (dict): sourceMapping
codesearchnet
def drag_and_drop(self, source_selector, destination_selector, **kwargs): self.info_log(('Drag and drop: source (%s); destination (%s)' % (source_selector, destination_selector))) use_javascript_dnd = kwargs.get('use_javascript_dnd', 'proxy_driver:use_javascript_dnd') source_el = self.find(source_selector) ...
Drag and drop Args: source_selector: (str) destination_selector: (str) Kwargs: use_javascript_dnd: bool; default: config proxy_driver:use_javascript_dnd
codesearchnet
def multiplicative_jitter(x, epsilon=0.01): if (epsilon == 0): return x return (x * mtf.random_uniform(x.mesh, x.shape, minval=(1.0 - epsilon), maxval=(1.0 + epsilon), dtype=x.dtype))
Multiply values by a random number between 1-epsilon and 1+epsilon. Makes models more resilient to rounding errors introduced by bfloat16. This seems particularly important for logits. Args: x: a mtf.Tensor epsilon: a floating point value Returns: a mtf.Tensor with the same type and shape as x.
codesearchnet
def delete_by_file(self, file_obj): BalancedDiscStorage._check_interface(file_obj) file_hash = self._get_hash(file_obj) return self.delete_by_hash(file_hash)
Remove file from the storage. File is identified by opened `file_obj`, from which the hashes / path are computed. Args: file_obj (file): Opened file-like object, which is used to compute hashes. Raises: IOError: If the `file_obj` is not in storage.
juraj-google-style
def pull(self, arm_id, success, failure): self.__beta_dist_dict[arm_id].observe(success, failure)
Pull arms. Args: arm_id: Arms master id. success: The number of success. failure: The number of failure.
juraj-google-style
def consume_input(self, mystr, stack=[], state=1, curchar=0, depth=0): mystrsplit = mystr.split(' ') if self.s[state].type == 1: stack.append(self.s[state].sym) if len(self.s[state].trans) > 0: state = self.s[state].trans[0] if self.parse(...
Consumes an input and validates if it is accepted Args: mystr (str): the input string to be consumes stack (list): the stack of symbols state (int): the current state of the PDA curchar (int): the index of the consumed character depth (int): the depth of the function call in the stack Returns: bool: A value indicating ...
juraj-google-style
def __init__(self, log_dir, testbed_name): self._log_dir = log_dir self._testbed_name = testbed_name self.results = records.TestResult() self._test_run_infos = [] self._test_run_metadata = TestRunner._TestRunMetaData(log_dir, testbed_name)
Constructor for TestRunner. Args: log_dir: string, root folder where to write logs testbed_name: string, name of the testbed to run tests on
github-repos
def parse_fields_whois(self, response): try: temp = response.split('|') ret = {'asn_registry': temp[4].strip(' \n')} if (ret['asn_registry'] not in self.rir_whois.keys()): raise ASNRegistryError('ASN registry {0} is not known.'.format(ret['asn_registry'])) ret['asn'] = te...
The function for parsing ASN fields from a whois response. Args: response (:obj:`str`): The response from the ASN whois server. Returns: dict: The ASN lookup results :: { 'asn' (str) - The Autonomous System Number 'asn_date' (str) - The ASN Allocation date 'asn_registry' (str) - The assigned ASN registry 'asn_cidr'...
codesearchnet
def events_filter(self, topics: List[str]=None, from_block: BlockSpecification=None, to_block: BlockSpecification=None) -> StatelessFilter: return self.client.new_filter(self.address, topics=topics, from_block=from_block, to_block=to_block)
Install a new filter for an array of topics emitted by the contract. Args: topics: A list of event ids to filter for. Can also be None, in which case all events are queried. from_block: The block number at which to start looking for events. to_block: The block number at which to stop looking for events. Return: Filter...
codesearchnet
def get_propagator(name): from .sgp4 import Sgp4 from .sgp4beta import Sgp4Beta scope = locals().copy() scope.update(globals()) if name not in scope: raise UnknownPropagatorError(name) return scope[name]
Retrieve a named propagator Args: name (str): Name of the desired propagator Return: Propagator class
juraj-google-style
def refl(scatterer, h_pol=True): return scatterer.wavelength**4/(np.pi**5*scatterer.Kw_sqr) * \ radar_xsect(scatterer, h_pol)
Reflectivity (with number concentration N=1) for the current setup. Args: scatterer: a Scatterer instance. h_pol: If True (default), use horizontal polarization. If False, use vertical polarization. Returns: The reflectivity. NOTE: To compute reflectivity in dBZ, give the particle diameter and wavelength in [mm], th...
juraj-google-style
def make_trace_api(client): generated = trace_service_client.TraceServiceClient(credentials=client._credentials, client_info=_CLIENT_INFO) return _TraceAPI(generated, client)
Create an instance of the gapic Trace API. Args: client (~google.cloud.trace.client.Client): The client that holds configuration details. Returns: A :class:`~google.cloud.trace._gapic._TraceAPI` instance with the proper configurations.
codesearchnet
def empty(shape, dtype=None, **kwargs): data = np.empty(shape, dtype) return dc.array(data, **kwargs)
Create an array of given shape and type, without initializing entries. Args: shape (sequence of ints): 2D shape of the array. dtype (data-type, optional): Desired data-type for the array. kwargs (optional): Other arguments of the array (*coords, attrs, and name). Returns: array (decode.array): Decode array without in...
juraj-google-style
def timestamp(method='iso8601'): if (method == 'iso8601'): tz_hour = (time.timezone utc_offset = (str(tz_hour) if (tz_hour < 0) else ('+' + str(tz_hour))) stamp = (time.strftime('%Y-%m-%dT%H%M%S') + utc_offset) return stamp else: raise ValueError('only iso8601 is accepte...
make an iso8601 timestamp Args: method (str): type of timestamp Example: >>> stamp = timestamp() >>> print('stamp = {!r}'.format(stamp)) stamp = ...-...-...T...
codesearchnet
def reindex(self): _map = dict(zip(self.micro_indices, reindex(self.micro_indices))) partition = tuple((tuple((_map[index] for index in group)) for group in self.partition)) return CoarseGrain(partition, self.grouping)
Re-index this coarse graining to use squeezed indices. The output grouping is translated to use indices ``0..n``, where ``n`` is the number of micro indices in the coarse-graining. Re-indexing does not effect the state grouping, which is already index-independent. Returns: CoarseGrain: A new |CoarseGrain| object, ind...
codesearchnet
def GetSubkeyByName(self, name): if not self._registry_key and self._registry: self._GetKeyFromRegistry() return self._subkeys.get(name.upper(), None)
Retrieves a subkey by name. Args: name (str): name of the subkey. Returns: WinRegistryKey: Windows Registry subkey or None if not found.
juraj-google-style
def select_sites( self, site_labels ): if type( site_labels ) in ( list, set ): selected_sites = [ s for s in self.sites if s.label in site_labels ] elif type( site_labels ) is str: selected_sites = [ s for s in self.sites if s.label is site_labels ] else: ...
Selects sites in the lattice with specified labels. Args: site_labels (List(Str)|Set(Str)|Str): Labels of sites to select. This can be a List [ 'A', 'B' ], a Set ( 'A', 'B' ), or a String 'A'. Returns: (List(Site)): List of sites with labels given by `site_labels`.
juraj-google-style
def get_holodeck_path(): if (('HOLODECKPATH' in os.environ) and (os.environ['HOLODECKPATH'] != '')): return os.environ['HOLODECKPATH'] if (os.name == 'posix'): return os.path.expanduser('~/.local/share/holodeck') elif (os.name == 'nt'): return os.path.expanduser('~\\AppData\\Local\\h...
Gets the path of the holodeck environment Returns: (str): path to the current holodeck environment
codesearchnet
def compress_dir(path, compression="gz"): for parent, subdirs, files in os.walk(path): for f in files: compress_file(os.path.join(parent, f), compression=compression)
Recursively compresses all files in a directory. Note that this compresses all files singly, i.e., it does not create a tar archive. For that, just use Python tarfile class. Args: path (str): Path to parent directory. compression (str): A compression mode. Valid options are "gz" or "bz2". Defaults to gz.
juraj-google-style
def authenticate_direct_bind(self, username, password): bind_user = '{rdn}={username},{user_search_dn}'.format(rdn=self.config.get('LDAP_USER_RDN_ATTR'), username=username, user_search_dn=self.full_user_search_dn) connection = self._make_connection(bind_user=bind_user, bind_password=password) response = Aut...
Performs a direct bind. We can do this since the RDN is the same as the login attribute. Hence we just string together a dn to find this user with. Args: username (str): Username of the user to bind (the field specified as LDAP_BIND_RDN_ATTR) password (str): User's password to bind with. Returns: AuthenticationRespon...
codesearchnet
def get_special_tokens_mask(self, token_ids_0: List, token_ids_1: Optional[List]=None, already_has_special_tokens: bool=False) -> List[int]: if already_has_special_tokens: if token_ids_1 is not None: raise ValueError('You should not supply a second sequence if the provided sequence of ids is alr...
Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` or `encode_plus` methods. Args: token_ids_0 (`List[int]`): List of ids of the first sequence. token_ids_1 (`List[int]`, *optional*): List of ids of the sec...
github-repos
def GetRequestXML(self, method, *args): self.suds_client.set_options(nosend=True) service_request = (getattr(self, method))(*args).envelope self.suds_client.set_options(nosend=False) return lxml.etree.fromstring(service_request)
Get the raw SOAP XML for a request. Args: method: The method name. *args: A list of arguments to be passed to the method. Returns: An element containing the raw XML that would be sent as the request.
juraj-google-style
def download_write_file(self, metadata, out_dir=None): fileName = metadata['name'] path = os.path.join(out_dir or wandb_dir(), fileName) if self.file_current(fileName, metadata['md5']): return path, None size, response = self.download_file(metadata['url']) ...
Download a file from a run and write it to wandb/ Args: metadata (obj): The metadata object for the file to download. Comes from Api.download_urls(). Returns: A tuple of the file's local path and the streaming response. The streaming response is None if the file already existed and was up to date.
juraj-google-style
def Webhook(self, request, global_params=None): config = self.GetMethodConfig('Webhook') return self._RunMethod(config, request, global_params=global_params)
ReceiveTriggerWebhook [Experimental] is called when the API receives a webhook request targeted at a specific trigger. Args: request: (CloudbuildProjectsTriggersWebhookRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (ReceiveTriggerWebhookResponse) The response m...
github-repos
def set_document_type(loader_cls: Type, type_: Type) -> None: loader_cls.document_type = type_ if (not hasattr(loader_cls, '_registered_classes')): loader_cls._registered_classes = dict()
Set the type corresponding to the whole document. Args: loader_cls: The loader class to set the document type for. type_: The type to loader should process the document into.
codesearchnet
def validate_document(self, definition): initial_document = {} try: initial_document = Loader.load(definition) except RuntimeError as exception: self.logger.error(str(exception)) sys.exit(1) document = Validator().validate(initial_document) if (document is None): self...
Validate given pipeline document. The method is trying to load, parse and validate the spline document. The validator verifies the Python structure B{not} the file format. Args: definition (str): path and filename of a yaml file containing a valid spline definition. Returns: dict: loaded and validated spline documen...
codesearchnet
def get_class(class_key): if (class_key not in CLASSES): for basecls in (MediaMetadata, MediaCollection): if class_key.startswith(basecls.__name__): class_name = ('MS' + class_key.replace(basecls.__name__, '')) if (sys.version_info[0] == 2): cl...
Form a music service data structure class from the class key Args: class_key (str): A concatenation of the base class (e.g. MediaMetadata) and the class name Returns: class: Subclass of MusicServiceItem
codesearchnet
def path(self, goal): if (goal == self.name): return [self] if (goal not in self.routes): raise ValueError("Unknown '{0}'".format(goal)) obj = self path = [obj] while True: obj = obj.routes[goal].direction path.append(obj) if (obj.name == goal): br...
Get the shortest way between two nodes of the graph Args: goal (str): Name of the targeted node Return: list of Node
codesearchnet
def notify(self, notices): issues_html = get_template('unattached_ebs_volume.html') issues_text = get_template('unattached_ebs_volume.txt') for recipient, issues in list(notices.items()): if issues: message_html = issues_html.render(issues=issues) ...
Send notifications to the users via. the provided methods Args: notices (:obj:`dict` of `str`: `dict`): List of the notifications to send Returns: `None`
juraj-google-style
def _convert_to_compatible_tensor(value, target, error_prefix): try: tensor = tf_v1.convert_to_tensor_or_indexed_slices(value, target.dtype) except TypeError as e: raise TypeError("%s: %s" % (error_prefix, e)) if _is_sparse(tensor) != _is_sparse(target): if _is_sparse(tensor): raise TypeError...
Converts `value` into a tensor that can be feed into `tensor_info`. Args: value: A value to convert into Tensor or SparseTensor. target: An object returned by `parse_tensor_info_map`. error_prefix: A string to prefix on raised TypeErrors. Raises: TypeError: If it fails to convert. Returns: A Tensor or SparseTensor c...
juraj-google-style
def _setup(self, delete=True): if delete: self.clear() with nn.context_scope(self.ctx): outputs = self.func( *(self.inputs_f + self.func_args), **self.func_kwargs) if not hasattr(outputs, '__iter__'): self.outputs = [outputs] ...
Create a function instance and execute setup. Args: delete (bool): Delete buffered variables.
juraj-google-style
def run(self, input_dir, output_file_path): logging.info('Running defense %s', self.submission_id) tmp_run_dir = self.temp_copy_extracted_submission() output_dir = os.path.dirname(output_file_path) output_filename = os.path.basename(output_file_path) cmd = ['--network=none', '-m=24g', '--cpus=3.75',...
Runs defense inside Docker. Args: input_dir: directory with input (adversarial images). output_file_path: path of the output file. Returns: how long it took to run submission in seconds
codesearchnet
def save_index(self, filename): data = {} for f in self.files.values(): entities = {v.entity.id: v.value for k, v in f.tags.items()} data[f.path] = {'domains': f.domains, 'entities': entities} with open(filename, 'w') as outfile: json.dump(data, outfi...
Save the current Layout's index to a .json file. Args: filename (str): Filename to write to. Note: At the moment, this won't serialize directory-specific config files. This means reconstructed indexes will only work properly in cases where there aren't multiple layout specs within a project.
juraj-google-style
def _convert_int(self, value): try: return int(value) except: return None
Converts a value into a integer. Args: value: String representation of a field from the Bulkdozer feed. Returns: If possible to convert value into an integer, returns the integer representation, otherwise None.
github-repos
def _maybe_expand_labels(labels, predictions): with ops.name_scope(None, 'expand_labels', (labels, predictions)) as scope: labels = sparse_tensor.convert_to_tensor_or_sparse_tensor(labels) if isinstance(labels, sparse_tensor.SparseTensor): return cond.cond(math_ops.equal(array_ops.rank(p...
If necessary, expand `labels` along last dimension to match `predictions`. Args: labels: `Tensor` or `SparseTensor` with shape [D1, ... DN, num_labels] or [D1, ... DN]. The latter implies num_labels=1, in which case the result is an expanded `labels` with shape [D1, ... DN, 1]. predictions: `Tensor` with shape [D1, .....
github-repos
def export(self, remote_function): if (self._worker.mode is None): self._functions_to_export.append(remote_function) return if (self._worker.mode != ray.worker.SCRIPT_MODE): return self._do_export(remote_function)
Export a remote function. Args: remote_function: the RemoteFunction object.
codesearchnet
def _activation_summary(x): tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name) tf.summary.histogram(tensor_name + '/activations', x) tf.summary.scalar(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
Helper to create summaries for activations. Creates a summary that provides a histogram of activations. Creates a summary that measures the sparsity of activations. Args: x: Tensor Returns: nothing
juraj-google-style
def _find_dependencies(self, dataset_key, **dfilter): try: node = self.getitem(dataset_key) LOG.trace("Found exact dataset already loaded: {}".format(node.name)) return node, set() except KeyError: LOG.trace("Exact dataset {}...
Find the dependencies for *dataset_key*. Args: dataset_key (str, float, DatasetID): Dataset identifier to locate and find any additional dependencies for. **dfilter (dict): Additional filter parameters. See `satpy.readers.get_key` for more details.
juraj-google-style
def __parameter_descriptor(self, subfield_list): descriptor = {} final_subfield = subfield_list[(- 1)] if all((subfield.required for subfield in subfield_list)): descriptor['required'] = True descriptor['type'] = self.__field_to_parameter_type(final_subfield) default = self.__parameter_defau...
Creates descriptor for a parameter using the subfields that define it. Each parameter is defined by a list of fields, with all but the last being a message field and the final being a simple (non-message) field. Many of the fields in the descriptor are determined solely by the simple field at the end, though some (su...
codesearchnet
def find_files(paths, file_predicate): file_list = [] for path in paths: p = abs_path(path) for (dirPath, _, fileList) in os.walk(p): for fname in fileList: (name, ext) = os.path.splitext(fname) if file_predicate(name, ext): file_li...
Locate files whose names and extensions match the given predicate in the specified directories. Args: paths: A list of directory paths where to find the files. file_predicate: A function that returns True if the file name and extension are desired. Returns: A list of files that match the predicate.
codesearchnet
def remove(self, path, dir_fd=None): path = self._path_with_dir_fd(path, self.remove, dir_fd) self.filesystem.remove(path)
Remove the FakeFile object at the specified file path. Args: path: Path to file to be removed. dir_fd: If not `None`, the file descriptor of a directory, with `path` being relative to this directory. New in Python 3.3. Raises: OSError: if path points to a directory. OSError: if path does not exist. OSError: if remova...
juraj-google-style
def add_operator(self, operator): if (not isinstance(operator, Operator)): raise FiqlObjectException(('%s is not a valid element type' % operator.__class__)) if (not self._working_fragment.operator): self._working_fragment.operator = operator elif (operator > self._working_fragment.operator)...
Add an ``Operator`` to the ``Expression``. The ``Operator`` may result in a new ``Expression`` if an ``Operator`` already exists and is of a different precedence. There are three possibilities when adding an ``Operator`` to an ``Expression`` depending on whether or not an ``Operator`` already exists: - No ``Operator...
codesearchnet
def on_graph_def(self, graph_def, device_name, wall_time): del wall_time self._graph_defs[device_name] = graph_def if (not self._graph_defs_arrive_first): self._add_graph_def(device_name, graph_def) self._incoming_channel.get()
Implementation of the GraphDef-carrying Event proto callback. Args: graph_def: A GraphDef proto. N.B.: The GraphDef is from the core runtime of a debugged Session::Run() call, after graph partition. Therefore it may differ from the GraphDef available to the general TensorBoard. For example, the GraphDef in general Ten...
codesearchnet
def get_signature(self, base_commit=None): if base_commit is None: base_commit = 'HEAD' self.run('add', '-A', self.path) sha = self.run('rev-parse', '--verify', base_commit).strip() diff = self.run('diff', sha).strip() if len(diff) == 0: try: ...
Get the signature of the current state of the repository TODO right now `get_signature` is an effectful process in that it adds all untracked file to staging. This is the only way to get accruate diff on new files. This is ok because we only use it on a disposable copy of the repo. Args: base_commit - the base commit...
juraj-google-style
def CopyAFF4ToLocal(aff4_urn, target_dir, token=None, overwrite=False): try: fd = aff4.FACTORY.Open(aff4_urn, token=token) filepath = os.path.join(target_dir, fd.urn.Path()[1:]) if isinstance(fd, standard.VFSDirectory): try: os.makedirs(filepath) excep...
Copy an AFF4 object that supports a read interface to local filesystem. Args: aff4_urn: URN of thing to copy. target_dir: Directory to copy the file to. token: Auth token. overwrite: If True overwrite the file if it exists. Returns: If aff4_urn points to a file, returns path to the downloaded file. Otherwise returns ...
codesearchnet
def noise_op(latents, hparams): if ((hparams.latent_noise == 0) or (hparams.mode != tf.estimator.ModeKeys.TRAIN)): return latents latent_shape = common_layers.shape_list(latents) return (latents + tf.random_normal(latent_shape, stddev=hparams.latent_noise))
Adds isotropic gaussian-noise to each latent. Args: latents: 4-D or 5-D tensor, shape=(NTHWC) or (NHWC). hparams: HParams. Returns: latents: latents with isotropic gaussian noise appended.
codesearchnet
def S2_surface(self, sizes, bounds, presets, covers, use_torch=False, num_samples=10): args = self.inputs Si = self.sobol_analysis(num_samples, {'num_vars': len(args), 'names': args, 'bounds': [bounds[arg] for arg in args]}, covers) S2 = Si['S2'] (s2_max, v1, v2) = get_max_s2_sensitivity(S2) x_var =...
Calculates the sensitivity surface of a GrFN for the two variables with the highest S2 index. Args: num_samples: Number of samples for sensitivity analysis. sizes: Tuple of (number of x inputs, number of y inputs). bounds: Set of bounds for GrFN inputs. presets: Set of standard values for GrFN inputs. Returns: Tuple:...
codesearchnet
def ParseFileObject(self, parser_mediator, file_object): if not self._encoding: self._encoding = parser_mediator.codepage try: if not self._HasExpectedLineLength(file_object): display_name = parser_mediator.GetDisplayName() raise errors.UnableToParseFile(( ...
Parses a DSV text file-like object. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_object (dfvfs.FileIO): file-like object. Raises: UnableToParseFile: when the file cannot be parsed.
juraj-google-style
def _try_guard_against_uninitialized_dependencies(name, initial_value): if not isinstance(initial_value, tensor_lib.Tensor): raise TypeError('initial_value needs to be a Tensor: %s' % initial_value) if _has_cycle(initial_value.op, state={}): return initial_value return _safe_initial_value_fr...
Attempt to guard against dependencies on uninitialized variables. Replace references to variables in `initial_value` with references to the variable's initialized values. The initialized values are essentially conditional TensorFlow graphs that return a variable's value if it is initialized or its `initial_value` if i...
github-repos
def deprecated(replacement=None, message=None): def wrap(old): def wrapped(*args, **kwargs): msg = "%s is deprecated" % old.__name__ if replacement is not None: if isinstance(replacement, property): r = replacement.fget elif i...
Decorator to mark classes or functions as deprecated, with a possible replacement. Args: replacement (callable): A replacement class or method. message (str): A warning message to be displayed. Returns: Original function, but with a warning to use the updated class.
juraj-google-style
def create(cls, extension_name=None, extension_tag=None, extension_type=None): extension_name = ExtensionName(extension_name) extension_tag = ExtensionTag(extension_tag) extension_type = ExtensionType(extension_type) return ExtensionInformation(extension_name=extension_name, extension_tag=extension_tag,...
Construct an ExtensionInformation object from provided extension values. Args: extension_name (str): The name of the extension. Optional, defaults to None. extension_tag (int): The tag number of the extension. Optional, defaults to None. extension_type (int): The type index of the extension. Optional, defaults to None...
codesearchnet
def _match_against_protocol(self, left, other_type, subst, view): if isinstance(left.cls, abstract.AMBIGUOUS_OR_EMPTY): return subst elif left.cls.is_dynamic: return self._subst_with_type_parameters_from(subst, other_type) elif other_type.full_name == 'typing.Sequence' and any((cls.full_name...
Checks whether a type is compatible with a protocol. Args: left: An instance of a type. other_type: A protocol. subst: The current type parameter assignment. view: The current mapping of Variable to Value. Returns: A new type parameter assignment if the matching succeeded, None otherwise.
github-repos
def get_graph_element_name(elem): return elem.name if hasattr(elem, 'name') else str(elem)
Obtain the name or string representation of a graph element. If the graph element has the attribute "name", return name. Otherwise, return a __str__ representation of the graph element. Certain graph elements, such as `SparseTensor`s, do not have the attribute "name". Args: elem: The graph element in question. Retur...
github-repos
def reset(self, ms=0, halt=True): self._dll.JLINKARM_SetResetDelay(ms) res = self._dll.JLINKARM_Reset() if (res < 0): raise errors.JLinkException(res) elif (not halt): self._dll.JLINKARM_Go() return res
Resets the target. This method resets the target, and by default toggles the RESET and TRST pins. Args: self (JLink): the ``JLink`` instance ms (int): Amount of milliseconds to delay after reset (default: 0) halt (bool): if the CPU should halt after reset (default: True) Returns: Number of bytes read.
codesearchnet
def dot(self, y, t=None, A=None, U=None, V=None, kernel=None, check_sorted=True): if (kernel is None): kernel = self.kernel if (t is not None): t = np.atleast_1d(t) if (check_sorted and np.any((np.diff(t) < 0.0))): raise ValueError('the input coordinates must be sorted') ...
Dot the covariance matrix into a vector or matrix Compute ``K.y`` where ``K`` is the covariance matrix of the GP without the white noise or ``yerr`` values on the diagonal. Args: y (array[n] or array[n, nrhs]): The vector or matrix ``y`` described above. kernel (Optional[terms.Term]): A different kernel can optionall...
codesearchnet
def ParseContainersTable( self, parser_mediator, database=None, table=None, **unused_kwargs): if database is None: raise ValueError('Missing database value.') if table is None: raise ValueError('Missing table value.') for esedb_record in table.records: if parser_mediator.abort...
Parses the Containers table. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. database (Optional[pyesedb.file]): ESE database. table (Optional[pyesedb.table]): table. Raises: ValueError: if the database or table value is missing.
juraj-google-style
def unpack_byte(self, offset): o = self._offset + offset try: return struct.unpack_from("<B", self._buf, o)[0] except struct.error: raise OverrunBufferException(o, len(self._buf))
Returns a little-endian unsigned byte from the relative offset. Arguments: - `offset`: The relative offset from the start of the block. Throws: - `OverrunBufferException`
juraj-google-style
def consolidate(self, args): result = dict(args) for opt in self: if (opt.name in result): result[opt.name] = opt.convert(result[opt.name]) elif (opt.default is not None): result[opt.name] = opt.convert(opt.default) return result
Consolidate the provided arguments. If the provided arguments have matching options, this performs a type conversion. For any option that has a default value and is not present in the provided arguments, the default value is added. Args: args (dict): A dictionary of the provided arguments. Returns: dict: A dictionar...
codesearchnet
def prange(N=1, dim=1): A = {} r = numpy.arange(N, dtype=int) key = numpy.zeros(dim, dtype=int) for i in range(N): key[(- 1)] = i A[tuple(key)] = (1 * (r == i)) return Poly(A, dim, (N,), int)
Constructor to create a range of polynomials where the exponent vary. Args: N (int): Number of polynomials in the array. dim (int): The dimension the polynomial should span. Returns: (Poly): A polynomial array of length N containing simple polynomials with increasing exponent. Examples: >>> print(prange(4)) [1, q0, ...
codesearchnet
def resize_tensor_input(self, input_index, tensor_size, strict=False): self._ensure_safe() tensor_size = np.array(tensor_size, dtype=np.int32) self._interpreter.ResizeInputTensor(input_index, tensor_size, strict)
Resizes an input tensor. Args: input_index: Tensor index of input to set. This value can be gotten from the 'index' field in get_input_details. tensor_size: The tensor_shape to resize the input to. strict: Only unknown dimensions can be resized when `strict` is True. Unknown dimensions are indicated as `-1` in the `sh...
github-repos
def WriteGraphSeries(graph_series, label, token=None): if data_store.RelationalDBEnabled(): data_store.REL_DB.WriteClientGraphSeries(graph_series, label) if _ShouldUseLegacyDatastore(): aff4_attr = _GetAFF4AttributeForReportType(graph_series.report_type)() if isinstance(aff4_attr, rdf_st...
Writes graph series for a particular client label to the DB. Args: graph_series: A series of rdf_stats.Graphs containing aggregated data for a particular report-type. label: Client label by which data in the graph_series was aggregated. token: ACL token to use for writing to the legacy (non-relational) datastore. Rai...
codesearchnet
def unpack_inputs(func): original_signature = inspect.signature(func) @functools.wraps(func) def run_call_with_unpacked_inputs(self, *args, **kwargs): kwargs_call = {key: val for key, val in kwargs.items() if key not in dict(original_signature.parameters)} fn_args_and_kwargs = {key: val for...
Decorator that processes the inputs to a Keras layer, passing them to the layer as keyword arguments. This enables downstream use of the inputs by their variable name, even if they arrive packed as a dictionary in the first input (common case in Keras). Args: func (`callable`): The callable function of the TensorFlow ...
github-repos
def _url_format(self, service): base_service_url = '{base}{service}'.format( base=self.urlbase, service=service ) return base_service_url
Generate URL from urlbase and service. Args: service (str): The endpoint service to use, i.e. gradebook Returns: str: URL to where the request should be made
juraj-google-style
def create_from_binary(cls, binary_view): (attr_type, attr_len, non_resident, name_len, name_offset, flags, attr_id, content_len, content_offset, indexed_flag) = cls._REPR.unpack(binary_view[:cls._REPR.size]) if name_len: name = binary_view[name_offset:(name_offset + (2 * name_len))].tobytes().decode('u...
Creates a new object AttributeHeader from a binary stream. The binary stream can be represented by a byte string, bytearray or a memoryview of the bytearray. Args: binary_view (memoryview of bytearray) - A binary stream with the information of the attribute Returns: AttributeHeader: New object using hte binary stream...
codesearchnet
def __init__(self, tcex, name, to, from_addr, subject, body, header, owner=None, **kwargs): super(Email, self).__init__(tcex, 'emails', name, owner, **kwargs) self.api_entity = 'email' self._data['to'] = to or kwargs.get('to') self._data['from'] = from_addr or kwargs.get('from_a...
Initialize Class Properties. Args: name (str): The name for this Group. subject (str): The subject for this Email. header (str): The header for this Email. body (str): The body for this Email. date_added (str, kwargs): The date timestamp the Indicator was created. from_addr (str, kwargs): The **from** address for this...
juraj-google-style
def register(self, task_json=None, json_filename=None): if not task_json and not json_filename: raise Exception("Both task json and filename can't be none.") if task_json and json_filename: raise Exception("Both task json and filename can't be provided.") if js...
Registers a new GBDX task. Args: task_json (dict): Dictionary representing task definition. json_filename (str): A full path of a file with json representing the task definition. Only one out of task_json and json_filename should be provided. Returns: Response (str).
juraj-google-style
def AtMaximumDepth(self, search_depth): if self._key_path_segments is not None: if search_depth >= self._number_of_key_path_segments: return True return False
Determines if the find specification is at maximum depth. Args: search_depth (int): number of key path segments to compare. Returns: bool: True if at maximum depth, False if not.
juraj-google-style
def binomial_coefficient(n, k): if ((not isinstance(k, int)) or (not isinstance(n, int))): raise TypeError('Expecting positive integers') if (k > n): raise ValueError('k must be lower or equal than n') if ((k < 0) or (n < 0)): raise ValueError('Expecting positive integers') retur...
Calculate the binomial coefficient indexed by n and k. Args: n (int): positive integer k (int): positive integer Returns: The binomial coefficient indexed by n and k Raises: TypeError: If either n or k is not an integer ValueError: If either n or k is negative, or if k is strictly greater than n
codesearchnet
async def get_jsone_context_and_template(chain, parent_link, decision_link, tasks_for): if (tasks_for == 'action'): (jsone_context, tmpl) = (await get_action_context_and_template(chain, parent_link, decision_link)) else: tmpl = (await get_in_tree_template(decision_link)) jsone_context = ...
Get the appropriate json-e context and template for any parent task. Args: chain (ChainOfTrust): the chain of trust. parent_link (LinkOfTrust): the parent link to test. decision_link (LinkOfTrust): the parent link's decision task link. tasks_for (str): the reason the parent link was created (cron, hg-push, action) Re...
codesearchnet
def topdown(cls): return tuple(unique_everseen((r for r in cls._instances.values() if (r.direction == 'topdown'))))
Get all topdown `Relationship` instances. Returns: :obj:`generator` Example: >>> from pronto import Relationship >>> for r in Relationship.topdown(): ... print(r) Relationship('can_be') Relationship('has_part')
codesearchnet
def target_code_to_name(code): TARGET_NAMES = {v: k for (k, v) in TARGET_CODES.items()} return TARGET_NAMES[code]
Converts an int target code to a target name Since self.TARGET_CODES is a 1:1 mapping, perform a reverse lookup to get the more readable name. Args: code: Value from self.TARGET_CODES Returns: String target name corresponding to the given code.
codesearchnet
def cluster_info(cpu, cfg): cpus = cpu.cpu_count pods_per_core = cfg.doc.find('pods-per-core') pods_per_core_int = (int(pods_per_core.value) if pods_per_core else PODS_PER_CORE) cfg_max_pods = cfg.doc.find('max-pods') cfg_max_pods_int = (int(cfg_max_pods.value) if cfg_max_pods else MAX_PODS) cal...
Collects fact for each host Collects the cpu and node configuration facts to be used by the rule. Arguments: cpu (CpuInfo): Parser object for the cpu info. cfg (NodeConfig): Parser object for the node configuration. Returns: dict: Dictionary of fact information including the keys ``cpu_count``, ``pods_per_core_int``...
codesearchnet
def decode_from_file(estimator, vocabulary, model_type, batch_size, sequence_length, checkpoint_path='', input_filename=gin.REQUIRED, output_filename=gin.REQUIRED, eos_id=1): with tf.gfile.Open(input_filename) as f: text = f.read() records = text.split('\n') inputs = [record.strip() for record in re...
Decode from a text file. Args: estimator: a TPUEstimator vocabulary: a mtf.transformer.vocabulary.Vocabulary model_type: a string batch_size: an integer sequence_length: an integer (maximum decode length) checkpoint_path: an optional string input_filename: a string output_filename: a string eos_id: EOS id
codesearchnet
def multi_frontier_two_objective_reward(example): int_val = int(example * 10) if int_val >= 0 and int_val < 3: return [int_val, 10 - int_val] elif int_val >= 3 and int_val < 7: return [int_val * 10, 100 - int_val * 10] else: return [int_val, 10 - int_val]
Reward for the trivial search space. The reward (i.e. fitness) is a 2-element list. The goal of the search, therefore, is to find the pareto frontier in multi_frontier_two_objective_pareto function. Args: example: a materialized value. Returns: A 2-element list.
github-repos
def add_chunk(self, chunk: Union[message.Message, bytes], field_tags: util.FieldTypes, index=None) -> None: if self._parent_splitter is not None: self._parent_splitter.add_chunk(chunk, self._fields_in_parent + field_tags, index) else: assert self._chunks is not None assert self._chunked_...
Adds a new chunk and updates the ChunkedMessage proto. Args: chunk: Proto message or bytes. field_tags: Field information about the placement of the chunked data within self._proto. index: Optional index at which to insert the chunk. The chunk ordering is important for merging.
github-repos
def acquire(self, constructor_fn: Callable[[], Any], tag: Any=None) -> Any: with self._lock: if self._ref is None or self._ref() is None or self._tag != tag: result = constructor_fn() if result is None: return None self._ref = weakref.ref(result) ...
Acquire a reference to the object this shared control block manages. Args: constructor_fn: function that initialises / constructs the object if not present in the cache. This function should take no arguments. It should return an initialised object, or None if the object could not be initialised / constructed. tag: an...
github-repos
def activate_async(fn, _engine): @coroutine @functools.wraps(fn) def wrapper(*args, **kw): _engine.activate() try: if iscoroutinefunction(fn): (yield from fn(*args, **kw)) else: fn(*args, **kw) finally: _engine.disa...
Async version of activate decorator Arguments: fn (function): function that be wrapped by decorator. _engine (Engine): pook engine instance Returns: function: decorator wrapper function.
codesearchnet
def create_game(self, map_name): map_inst = maps.get(map_name) map_data = map_inst.data(self._run_config) if (map_name not in self._saved_maps): for controller in self._controllers: controller.save_map(map_inst.path, map_data) self._saved_maps.add(map_name) create = sc_pb.Req...
Create a game for the agents to join. Args: map_name: The map to use.
codesearchnet
def from_primitive(cls, primitive: message.Message, context: Context) -> 'PrimitiveWrapper': result = cls(primitive, context) result.validate_wrapped() return result
Instantiates a new version of PrimitiveWrapper wrapping primitive. Args: primitive: The FHIR primitive message to wrap and validate. context: Related primitive information to use for printing/parsing a wrapped primitive. Returns: An instance of PrimitiveWrapper.
github-repos
def apply_sync(fn: StreamFn, content: Iterable[_T]) -> list[_T]: async def run_with_context(): async with context.context(): as_async = streams.stream_content(content) return await streams.gather_stream(fn(as_async)) return asyncio.run(run_with_context())
Applies a part function synchronously. Args: fn: the part function to apply to the content. content: a collection of inputs/parts on which to apply the function. Returns: the content, with the function `fn` applied to each input/part.
github-repos