code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def __init__(self, name: YangIdentifier, ns: Optional[YangIdentifier]): self.name = name self.namespace = ns
Initialize the class instance. Args: name: Member's local name. ns: Member's namespace.
juraj-google-style
def execute_command(self, command): logger.debug('Executing commands:\n %s' % command) err_msg = 'Something happened when executing some commands on device' chan = self.ssh.get_transport().open_session() chan.settimeout(5) chan.exec_command(command) error_chan = chan.makefile_stderr() output_chan = chan.makefile() error = '' output = '' for e in error_chan.read(): error = error + self._read_wrapper(e) for o in output_chan.read(): output = output + self._read_wrapper(o) if len(error) > 0: msg = '%s %s:\n%s\n%s' % (err_msg, self.ssh.get_host_keys().keys()[0], command, error) logger.error(msg) raise exceptions.CommandExecutionException(msg) regex = re.compile('Command fail') if len(regex.findall(output)) > 0: msg = '%s %s:\n%s\n%s' % (err_msg, self.ssh.get_host_keys().keys()[0], command, output) logger.error(msg) raise exceptions.CommandExecutionException(msg) output = output.splitlines() i = 0 for line in output: current_line = line.split(' if len(current_line) > 1: output[i] = current_line[1] else: output[i] = current_line[0] i += 1 return output[:-1]
This method will execute the commands on the device without as if you were just connected to it (it will not enter into any vdom). This method is not recommended unless you are 100% sure of what you are doing. Args: * **command** (str) -- Command to execute. Returns: A list of strings containing the output. Raises: exceptions.CommandExecutionException -- If it detects any problem with the command.
juraj-google-style
def scan_manifest(self, manifest): top_roots = set() for stored_path in manifest.keys(): if '/' in stored_path: top_dir = stored_path.split('/', 1)[0] if top_dir not in top_roots: top_roots.add(top_dir) import_roots = list(self.import_roots) + sorted(top_roots) stored_resources = {} for support_file in _runtime_support_files: resource = fetch_support_file(support_file, self.timestamp_tuple) stored_filename = resource.zipinfo.filename stored_resources[stored_filename] = resource for stored_path, local_path in manifest.items(): if local_path is None: stored_resources[stored_path] = stored_resource.EmptyFile(stored_path, self.timestamp_tuple) else: stored_resources[stored_path] = stored_resource.StoredFile(stored_path, self.timestamp_tuple, local_path) if '__main__.py' in stored_resources: raise error.Error('Configuration error for [%s]: Manifest file included a file named __main__.py, which is not allowed' % self.manifest_filename) stored_resources['__main__.py'] = self.generate_main(self.main_filename, self.generate_boilerplate(import_roots)) for stored_filename in _runtime_init_files: if stored_filename in stored_resources: logging.debug('Skipping __init__.py already present [%s]', stored_filename) continue stored_resources[stored_filename] = stored_resource.EmptyFile(stored_filename, self.timestamp_tuple) return stored_resources
Return a dict of StoredResources based on an input manifest. Returns: A dict of store_filename to StoredResource
github-repos
def predict(self, a, b): a = np.array(a).reshape((-1, 1)) b = np.array(b).reshape((-1, 1)) return (mutual_info_regression(a, b.reshape((-1,))) + mutual_info_regression(b, a.reshape((-1,))))/2
Compute the test statistic Args: a (array-like): Variable 1 b (array-like): Variable 2 Returns: float: test statistic
juraj-google-style
def disease_terms(self, hgnc_id=None): query = {} if hgnc_id: LOG.debug('Fetching all diseases for gene %s', hgnc_id) query['genes'] = hgnc_id else: LOG.info('Fetching all disease terms') return list(self.disease_term_collection.find(query))
Return all disease terms that overlaps a gene If no gene, return all disease terms Args: hgnc_id(int) Returns: iterable(dict): A list with all disease terms that match
codesearchnet
def readUserSession(datafile): for line in datafile: pages = line.split() total = len(pages) if total < 2: continue if total > 500: continue return [PAGE_CATEGORIES[int(i) - 1] for i in pages] return []
Reads the user session record from the file's cursor position Args: datafile: Data file whose cursor points at the beginning of the record Returns: list of pages in the order clicked by the user
juraj-google-style
def delete_vmss(access_token, subscription_id, resource_group, vmss_name): endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Compute/virtualMachineScaleSets/', vmss_name, '?api-version=', COMP_API]) return do_delete(endpoint, access_token)
Delete a virtual machine scale set. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. vmss_name (str): Name of the virtual machine scale set. Returns: HTTP response.
juraj-google-style
def pprint(sequence, keys=None): if len(sequence) > 0: columns = calculate_columns(sequence) row_format = calculate_row_format(columns, keys) header = row_format % dict([(key, key.title()) for key in columns]) separator = row_format % dict([(key, '-' * columns[key]) for key in columns]) print(separator) print(header) print(separator) for row in sequence: print(row_format % row) print(separator)
Print sequence as ascii table to stdout. Args: sequence (list or tuple): a sequence with a dictionary each entry. keys (list): optional list of keys to order columns as well as to filter for them.
juraj-google-style
async def append_entries(self, destination=None): destination_list = [destination] if destination else self.state.cluster for destination in destination_list: data = { 'type': 'append_entries', 'term': self.storage.term, 'leader_id': self.id, 'commit_index': self.log.commit_index, 'request_id': self.request_id } next_index = self.log.next_index[destination] prev_index = next_index - 1 if self.log.last_log_index >= next_index: data['entries'] = [self.log[next_index]] else: data['entries'] = [] data.update({ 'prev_log_index': prev_index, 'prev_log_term': self.log[prev_index]['term'] if self.log and prev_index else 0 }) asyncio.ensure_future(self.state.send(data, destination), loop=self.loop)
AppendEntries RPC — replicate log entries / heartbeat Args: destination — destination id Request params: term — leader’s term leader_id — so follower can redirect clients prev_log_index — index of log entry immediately preceding new ones prev_log_term — term of prev_log_index entry commit_index — leader’s commit_index entries[] — log entries to store (empty for heartbeat)
juraj-google-style
def _get_pdf_filenames_at(source_directory): if (not os.path.isdir(source_directory)): raise ValueError(('%s is not a directory!' % source_directory)) return [os.path.join(source_directory, filename) for filename in os.listdir(source_directory) if filename.endswith(PDF_EXTENSION)]
Find all PDF files in the specified directory. Args: source_directory (str): The source directory. Returns: list(str): Filepaths to all PDF files in the specified directory. Raises: ValueError
codesearchnet
def unify_basis(self, keys=None, basis=None): if keys is None: keys = [k for k, v in self.data.items() if isinstance(v, Curve)] else: keys = utils.flatten_list(keys) if basis is None: basis = self.survey_basis(keys=keys) if basis is None: m = "No basis was provided and welly could not retrieve common basis." raise WellError(m) for k in keys: if keys and (k not in keys): continue try: self.data[k] = self.data[k].to_basis(basis) except: continue return
Give everything, or everything in the list of keys, the same basis. If you don't provide a basis, welly will try to get one using ``survey_basis()``. Args: basis (ndarray): A basis: the regularly sampled depths at which you want the samples. keys (list): List of strings: the keys of the data items to unify, if not all of them. Returns: None. Works in place.
juraj-google-style
def django_cache_function(timeout: int = 5 * 60, cache_key: str = '', debug_cache: bool = False): cache_key = cache_key or None def decorator(fn): def wrapper(*args, **kwargs): if cache_key: call_sig = '' _cache_key = cache_key check_stored_call_sig = False else: call_sig = get_call_signature(fn, args, kwargs) _cache_key = make_cache_key(call_sig) check_stored_call_sig = True if debug_cache: log.critical("Checking cache for key: " + _cache_key) cache_result_tuple = cache.get(_cache_key) if cache_result_tuple is None: if debug_cache: log.debug("Cache miss") else: if debug_cache: log.debug("Cache hit") cached_call_sig, func_result = cache_result_tuple if (not check_stored_call_sig) or cached_call_sig == call_sig: return func_result log.warning( "... Cache hit was due to hash collision; cached_call_sig " "{} != call_sig {}".format( repr(cached_call_sig), repr(call_sig))) func_result = fn(*args, **kwargs) cache_result_tuple = (call_sig, func_result) cache.set(key=_cache_key, value=cache_result_tuple, timeout=timeout) return func_result return wrapper return decorator
Decorator to add caching to a function in Django. Uses the Django default cache. Args: timeout: timeout in seconds; use None for "never expire", as 0 means "do not cache". cache_key: optional cache key to use (if falsy, we'll invent one) debug_cache: show hits/misses?
juraj-google-style
def list_bucket(self, bucket): self.response.write('Listbucket result:\n') page_size = 1 stats = gcs.listbucket(bucket + '/foo', max_keys=page_size) while True: count = 0 for stat in stats: count += 1 self.response.write(repr(stat)) self.response.write('\n') if count != page_size or count == 0: break stats = gcs.listbucket(bucket + '/foo', max_keys=page_size, marker=stat.filename)
Create several files and paginate through them. Production apps should set page_size to a practical value. Args: bucket: bucket.
juraj-google-style
class FlaxTemperatureLogitsWarper(FlaxLogitsWarper): def __init__(self, temperature: float): if not isinstance(temperature, float) or not temperature > 0: raise ValueError(f'`temperature` has to be a strictly positive float, but is {temperature}') self.temperature = temperature def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray: scores = scores / self.temperature return scores
[`FlaxLogitsWarper`] for temperature (exponential scaling output probability distribution). Args: temperature (`float`): The value used to module the logits distribution.
github-repos
def signature(array): length = len(array) index = _NUM_SIGNATURE_BYTES if length > _NUM_SIGNATURE_BYTES else length return array[:index]
Returns the first 262 bytes of the given bytearray as part of the file header signature. Args: array: bytearray to extract the header signature. Returns: First 262 bytes of the file content as bytearray type.
juraj-google-style
def link_contentkey_authorization_policy(access_token, ckap_id, options_id, \ ams_redirected_rest_endpoint): path = '/ContentKeyAuthorizationPolicies' full_path = ''.join([path, "('", ckap_id, "')", "/$links/Options"]) full_path_encoded = urllib.parse.quote(full_path, safe='') endpoint = ''.join([ams_rest_endpoint, full_path_encoded]) uri = ''.join([ams_redirected_rest_endpoint, 'ContentKeyAuthorizationPolicyOptions', \ "('", options_id, "')"]) body = '{"uri": "' + uri + '"}' return do_ams_post(endpoint, full_path_encoded, body, access_token, "json_only", "1.0;NetFx")
Link Media Service Content Key Authorization Policy. Args: access_token (str): A valid Azure authentication token. ckap_id (str): A Media Service Asset Content Key Authorization Policy ID. options_id (str): A Media Service Content Key Authorization Policy Options . ams_redirected_rest_endpoint (str): A Media Service Redirected Endpoint. Returns: HTTP response. JSON body.
juraj-google-style
def request(path): headers = {'Accept': 'application/json'} try: requested_object = requests.get(path, headers=headers) requested_object.raise_for_status() except requests.exceptions.HTTPError as exception: LOGGER.error((inspect.stack()[0][3]) + ': HTTPError = ' + str(exception.response.status_code) + ' ' + str(exception.response.reason) + ' ' + str(path)) raise except requests.exceptions.InvalidURL as exception: LOGGER.error('URLError = ' + str(exception.reason) + ' ' + str(path)) raise except Exception: import traceback LOGGER.error('Generic exception: ' + traceback.format_exc()) raise else: response = requested_object.json() return response
Send a request to a given URL accepting JSON format and return a \ deserialized Python object. Args: path (str): The URI to be requested. Returns: response: Deserialized JSON Python object. Raises: HTTPError: the HTTP error returned by the requested server. InvalidURL: an invalid URL has been requested. Exception: generic exception.
juraj-google-style
def get_cmd_handler(self, cmd): cmd = cmd.replace('-', '_') handler = getattr(self, cmd, None) if (not handler): raise BuildException('Command {} is not supported as a build command'.format(cmd)) return handler
Return an handler for cmd. The handler and the command should have the same name. See class description for more info about handlers. Args: cmd (str): The name of the command Returns: callable: which handles cmd Raises: lago.build.BuildException: If an handler for cmd doesn't exist
codesearchnet
def send(self, **kwargs): assert (len(kwargs) == 1), 'Must make a single request.' res = self.send_req(sc_pb.Request(**kwargs)) return getattr(res, list(kwargs.keys())[0])
Create and send a specific request, and return the response. For example: send(ping=sc_pb.RequestPing()) => sc_pb.ResponsePing Args: **kwargs: A single kwarg with the name and value to fill in to Request. Returns: The Response corresponding to your request.
codesearchnet
def cashflows(self, market: pmd.ProcessedMarketData, name: Optional[str]=None) -> Tuple[types.DateTensor, types.FloatTensor]: name = name or self._name + '_cashflows' with tf.name_scope(name): valuation_date = dateslib.convert_to_date_tensor(market.date) future_cashflows = tf.cast(self._payment_dates >= valuation_date, dtype=self._dtype) notional = tf.expand_dims(self._notional, axis=-1) cashflows = notional * (future_cashflows * self._daycount_fractions * self._coupon_rate) return (self._payment_dates, cashflows)
Returns cashflows for the fixed leg. Args: market: An instance of `ProcessedMarketData`. name: Python str. The name to give to the ops created by this function. Default value: `None` which maps to 'cashflows'. Returns: A tuple of two `Tensor`s of shape `batch_shape + [num_cashflows]` and containing the dates and the corresponding cashflows price for each stream based on the input market data.
github-repos
def validate_request_success(response_text, request_url, status_code, expected_status_code): try: assert (status_code == expected_status_code) except AssertionError: msg = 'Request to {url} failed with status {status_code}:\nThe reponse from the request was as follows:\n\n{content}'.format(url=request_url, status_code=status_code, content=response_text) raise BadHttpRequestError(msg)
Validates that a request was successful. Args: response_text (str): The response body of the request. request_url (str): The URL the request was made at. status_code (int): The status code of the response. expected_status_code (int): The expected status code of the response. Raises: :class:`saltant.exceptions.BadHttpRequestError`: The HTTP request failed.
codesearchnet
def get_naive(dt): if (not dt.tzinfo): return dt if hasattr(dt, 'asdatetime'): return dt.asdatetime() return dt.replace(tzinfo=None)
Gets a naive datetime from a datetime. datetime_tz objects can't just have tzinfo replaced with None, you need to call asdatetime. Args: dt: datetime object. Returns: datetime object without any timezone information.
codesearchnet
def run_interactive_command(command, env=None, **kwargs): command_result = _run_command(command=command, out_pipe=sys.stdout, err_pipe=sys.stderr, stdin=sys.stdin, env=env, **kwargs) return command_result
Runs a command interactively, reusing the current stdin, stdout and stderr Args: command(list of str): args of the command to execute, including the command itself as command[0] as `['ls', '-l']` env(dict of str:str): If set, will use the given dict as env for the subprocess **kwargs: Any other keyword args passed will be passed to the :ref:subprocess.Popen call Returns: lago.utils.CommandStatus: result of the interactive execution
codesearchnet
def load_template(self, name): if name in self.cached_templates: logger.debug("Using cached template: %s", name) return self.cached_templates[name] logger.debug("Attempting to find template by name: %s", name) name_with_ext, provider_name, base_path = self.find_template_details(name) full_path = None if base_path is not None: full_path = os.path.join(base_path, name_with_ext) template = template_exception_handler( lambda: self.get_provider(provider_name).load_template( name_with_ext, full_path=full_path ), self.error_context, filename=full_path ) self.cached_templates[name] = template return template
Attempts to load the relevant template from our templating system/environment. Args: name: The name of the template to load. Return: On success, a StatikTemplate object that can be used to render content.
juraj-google-style
def _slice_single_param(param, param_event_ndims, slices, dist_batch_shape): param_shape = tf.shape(input=param) insert_ones = tf.ones([((tf.size(input=dist_batch_shape) + param_event_ndims) - tf.rank(param))], dtype=param_shape.dtype) new_param_shape = tf.concat([insert_ones, param_shape], axis=0) full_batch_param = tf.reshape(param, new_param_shape) param_slices = [] param_dim_idx = 0 batch_dim_idx = 0 for slc in slices: if (slc is tf.newaxis): param_slices.append(slc) continue if (slc is Ellipsis): if (batch_dim_idx < 0): raise ValueError('Found multiple `...` in slices {}'.format(slices)) param_slices.append(slc) num_remaining_non_newaxis_slices = sum([(s is not tf.newaxis) for s in slices[(slices.index(Ellipsis) + 1):]]) batch_dim_idx = (- num_remaining_non_newaxis_slices) param_dim_idx = (batch_dim_idx - param_event_ndims) continue param_dim_size = new_param_shape[param_dim_idx] batch_dim_size = dist_batch_shape[batch_dim_idx] is_broadcast = (batch_dim_size > param_dim_size) if isinstance(slc, slice): (start, stop, step) = (slc.start, slc.stop, slc.step) if (start is not None): start = tf.where(is_broadcast, 0, start) if (stop is not None): stop = tf.where(is_broadcast, 1, stop) if (step is not None): step = tf.where(is_broadcast, 1, step) param_slices.append(slice(start, stop, step)) else: param_slices.append(tf.where(is_broadcast, 0, slc)) param_dim_idx += 1 batch_dim_idx += 1 param_slices.extend(([ALL_SLICE] * param_event_ndims)) return full_batch_param.__getitem__(param_slices)
Slices a single parameter of a distribution. Args: param: A `Tensor`, the original parameter to slice. param_event_ndims: `int` event parameterization rank for this parameter. slices: A `tuple` of normalized slices. dist_batch_shape: The distribution's batch shape `Tensor`. Returns: new_param: A `Tensor`, batch-sliced according to slices.
codesearchnet
def verify_password(self, password, password_hash): if isinstance(password_hash, self.user_manager.db_manager.UserClass): print( 'Deprecation warning: verify_password(password, user) has been changed'\ ' to: verify_password(password, password_hash). The user param will be deprecated.'\ ' Please change your call with verify_password(password, user) into'\ ' a call with verify_password(password, user.password)' ' as soon as possible.') password_hash = password_hash.password return self.password_crypt_context.verify(password, password_hash)
Verify plaintext ``password`` against ``hashed password``. Args: password(str): Plaintext password that the user types in. password_hash(str): Password hash generated by a previous call to ``hash_password()``. Returns: | True when ``password`` matches ``password_hash``. | False otherwise. Example: :: if verify_password('mypassword', user.password): login_user(user)
juraj-google-style
def get_cluster(self, label): for cluster in self._clusters: if label == cluster['label']: return self._get_connection(cluster) raise AttributeError('No such cluster %s.' % label)
Returns a connection to a mongo-clusters. Args: label (string): the label of a cluster. Returns: A connection to the cluster labeld with label. Raises: AttributeError: there is no cluster with the given label in the config
juraj-google-style
def send_event(self, event_type, category=None, dimensions=None, properties=None, timestamp=None): if category and category not in SUPPORTED_EVENT_CATEGORIES: raise ValueError('Event category is not one of the supported' + 'types: {' + ', '.join(SUPPORTED_EVENT_CATEGORIES) + '}') data = { 'eventType': event_type, 'category': category, 'dimensions': dimensions or {}, 'properties': properties or {}, 'timestamp': int(timestamp) if timestamp else None, } _logger.debug('Sending event to SignalFx: %s', data) self._add_extra_dimensions(data) return self._send_event(event_data=data, url='{0}/{1}'.format( self._endpoint, self._INGEST_ENDPOINT_EVENT_SUFFIX), session=self._session)
Send an event to SignalFx. Args: event_type (string): the event type (name of the event time series). category (string): the category of the event. dimensions (dict): a map of event dimensions. properties (dict): a map of extra properties on that event. timestamp (float): timestamp when the event has occured
juraj-google-style
def get(self, rid, data_callback=None, raise_on_error=True): cached_data = None ds_data = self.ds.get(rid, raise_on_error=False) if ds_data is not None: expired = True if ds_data.get('found') is True: if self.ttl < int(ds_data.get('_source', {}).get('cache-date', 0)): cached_data = ds_data.get('_source', {}).get('cache-data') expired = False self.tcex.log.debug('Using cached data for ({}).'.format(rid)) else: self.tcex.log.debug('Cached data is expired for ({}).'.format(rid)) if expired or ds_data.get('found') is False: if callable(data_callback): cached_data = data_callback(rid) self.tcex.log.debug('Using callback data for ({}).'.format(rid)) if cached_data: self.update(rid, cached_data, raise_on_error) return cached_data
Get cached data from the data store. Args: rid (str): The record identifier. data_callback (callable): A method that will return the data. raise_on_error (bool): If True and not r.ok this method will raise a RunTimeError. Returns: object : Python request response.
juraj-google-style
def _name_to_tensor(self, tensor_name): id1, id2 = self._tensor_name_to_ids[tensor_name] return self._operations[id1].outputs[id2]
The tensor with the given name. Args: tensor_name: a string, name of a tensor in the graph. Returns: a tf.Tensor or mtf.Tensor
juraj-google-style
def _ListDir(dirpath, pathtype): pathspec = rdf_paths.PathSpec(path=dirpath, pathtype=pathtype) childpaths = [] try: file_obj = vfs.VFSOpen(pathspec) for path in file_obj.ListNames(): if ((pathtype != rdf_paths.PathSpec.PathType.REGISTRY) or path): childpaths.append(path) except IOError: pass return childpaths
Returns children of a given directory. This function is intended to be used by the `PathComponent` subclasses to get initial list of potential children that then need to be filtered according to the rules of a specific component. Args: dirpath: A path to the directory. pathtype: The pathtype to use. Raises: ValueError: in case of unsupported path types.
codesearchnet
def click_slot(self, slot, right=False): if isinstance(slot, int): slot = self.window.slots[slot] button = constants.INV_BUTTON_RIGHT \ if right else constants.INV_BUTTON_LEFT return self.send_click(windows.SingleClick(slot, button))
Left-click or right-click the slot. Args: slot (Slot): The clicked slot. Can be ``Slot`` instance or integer. Set to ``inventory.cursor_slot`` for clicking outside the window.
juraj-google-style
def __cloudflare_request(self, *, account, path, args=None): if not args: args = {} if not self.cloudflare_initialized[account.account_id]: self.cloudflare_session[account.account_id] = requests.Session() self.cloudflare_session[account.account_id].headers.update({ 'X-Auth-Email': account.email, 'X-Auth-Key': account.api_key, 'Content-Type': 'application/json' }) self.cloudflare_initialized[account.account_id] = True if 'per_page' not in args: args['per_page'] = 100 response = self.cloudflare_session[account.account_id].get(account.endpoint + path, params=args) if response.status_code != 200: raise CloudFlareError('Request failed: {}'.format(response.text)) return response.json()
Helper function to interact with the CloudFlare API. Args: account (:obj:`CloudFlareAccount`): CloudFlare Account object path (`str`): URL endpoint to communicate with args (:obj:`dict` of `str`: `str`): A dictionary of arguments for the endpoint to consume Returns: `dict`
juraj-google-style
def orbit(self, orbit): self._orbit = orbit tle = Tle.from_orbit(orbit) lines = tle.text.splitlines() if (len(lines) == 3): (_, line1, line2) = lines else: (line1, line2) = lines self.tle = twoline2rv(line1, line2, wgs72)
Initialize the propagator Args: orbit (Orbit)
codesearchnet
def _import_module(self, name, level): key = (name, level) if key not in self._imported_modules_cache: self._imported_modules_cache[key] = self._do_import_module(name, level) return self._imported_modules_cache[key]
Import the module and return the module object. Args: name: Name of the module. E.g. "sys". level: Specifies whether to use absolute or relative imports. -1: (Python <= 3.1) "Normal" import. Try both relative and absolute. 0: Absolute import. 1: "from . import abc" 2: "from .. import abc" etc. Returns: An instance of abstract.Module or None if we couldn't find the module.
github-repos
def __init__(self, capacity=100, initialization_list=None): self._capacity = capacity self._data = dict() if initialization_list: for entry in initialization_list: triplet = HistoryTriplet._make(entry) self._data[(triplet.device, triplet.tensor)] = NumericsAlertHistory( initialization_list=triplet.jsonable_history)
Constructor. Args: capacity: (`int`) maximum number of device-tensor keys to store. initialization_list: (`list`) An optional list (parsed from JSON) that is used to initialize the data within this registry. Use the create_jsonable_registry method of NumericsAlertRegistry to create such a list.
juraj-google-style
def insert(self, keys, values, name=None): return self.insert_or_assign(keys, values, name)
Associates `keys` with `values`. Args: keys: Keys to insert. Can be a tensor of any shape. Must match the table's key type. values: Values to be associated with keys. Must be a tensor of the same shape as `keys` and match the table's value type. name: A name for the operation (optional). Returns: The created Operation. Raises: TypeError: when `keys` or `values` doesn't match the table data types.
github-repos
def __init__(self, certificate_type=None, certificate_value=None): super(Certificate, self).__init__(Tags.CERTIFICATE) if certificate_type is None: self.certificate_type = CertificateType() else: self.certificate_type = CertificateType(certificate_type) if certificate_value is None: self.certificate_value = CertificateValue() else: self.certificate_value = CertificateValue(certificate_value)
Construct a Certificate object. Args: certificate_type (CertificateType): The type of the certificate. Optional, defaults to None. certificate_value (bytes): The bytes of the certificate. Optional, defaults to None.
juraj-google-style
def _ragged_stack_concat_axis_1(rt_inputs, stack_values): num_inputs = len(rt_inputs) nrows_checks = [] rt_nrows = rt_inputs[0].nrows() for index, rt in enumerate(rt_inputs[1:]): nrows_checks.append(check_ops.assert_equal(rt_nrows, rt.nrows(), message=f'Input tensors at index 0 (=x) and {index + 1} (=y) have incompatible shapes.')) with ops.control_dependencies(nrows_checks): concatenated_rt = _ragged_stack_concat_axis_0(rt_inputs, stack_values=False) row_indices = math_ops.range(rt_nrows * num_inputs) row_index_matrix = array_ops.reshape(row_indices, [num_inputs, -1]) transposed_row_index_matrix = array_ops.transpose(row_index_matrix) row_permutation = array_ops.reshape(transposed_row_index_matrix, [-1]) permuted_rt = ragged_gather_ops.gather(concatenated_rt, row_permutation) if stack_values: stack_splits = math_ops.range(0, rt_nrows * num_inputs + 1, num_inputs) _copy_row_shape(rt_inputs, stack_splits) return ragged_tensor.RaggedTensor.from_row_splits(permuted_rt, stack_splits, validate=False) else: concat_splits = permuted_rt.row_splits[::num_inputs] _copy_row_shape(rt_inputs, concat_splits) return ragged_tensor.RaggedTensor.from_row_splits(permuted_rt.values, concat_splits, validate=False)
Helper function to concatenate or stack ragged tensors along axis 1. Args: rt_inputs: A list of RaggedTensors, all with the same rank and ragged_rank. stack_values: Boolean. If true, then stack values; otherwise, concatenate them. Returns: A RaggedTensor.
github-repos
def call(self, input_ids: tf.Tensor | None=None, attention_mask: tf.Tensor | None=None, decoder_input_ids: tf.Tensor | None=None, decoder_attention_mask: tf.Tensor | None=None, decoder_position_ids: tf.Tensor | None=None, head_mask: tf.Tensor | None=None, decoder_head_mask: tf.Tensor | None=None, cross_attn_head_mask: tf.Tensor | None=None, encoder_outputs: Optional[Union[Tuple, TFBaseModelOutput]]=None, past_key_values: List[tf.Tensor] | None=None, inputs_embeds: tf.Tensor | None=None, decoder_inputs_embeds: tf.Tensor | None=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: tf.Tensor | None=None, training: Optional[bool]=False) -> Union[Tuple[tf.Tensor], TFSeq2SeqLMOutput]: if labels is not None: labels = tf.where(labels == self.config.pad_token_id, tf.cast(tf.fill(shape_list(labels), -100), labels.dtype), labels) use_cache = False if decoder_input_ids is None and decoder_inputs_embeds is None: decoder_input_ids = shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id) outputs = self.model(input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, encoder_outputs=encoder_outputs, decoder_attention_mask=decoder_attention_mask, decoder_position_ids=decoder_position_ids, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) lm_logits = tf.matmul(outputs[0], self.model.shared.weights, transpose_b=True) lm_logits = self.bias_layer(lm_logits) masked_lm_loss = None if labels is None else self.hf_compute_loss(labels, lm_logits) if not return_dict: output = (lm_logits,) + outputs[1:] return (masked_lm_loss,) + output if masked_lm_loss is not None else output return TFSeq2SeqLMOutput(loss=masked_lm_loss, logits=lm_logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions)
labels (`tf.tensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Returns:
github-repos
def __init__( self, tcex, owner, action=None, attribute_write_type=None, halt_on_error=True, playbook_triggers_enabled=None, ): self.tcex = tcex self._action = action or 'Create' self._attribute_write_type = attribute_write_type or 'Replace' self._batch_max_chunk = 5000 self._halt_on_error = halt_on_error self._hash_collision_mode = None self._file_merge_mode = None self._owner = owner self._playbook_triggers_enabled = playbook_triggers_enabled self._group_shelf_fqfn = None self._indicator_shelf_fqfn = None self._halt_on_batch_error = None self._halt_on_file_error = None self._halt_on_poll_error = None self._saved_xids = None self._saved_groups = None self._saved_indicators = None self.enable_saved_file = False self._batch_data_count = None self._poll_interval = None self._poll_interval_times = [] self._poll_timeout = 3600 self._files = {} self._groups = None self._groups_shelf = None self._indicators = None self._indicators_shelf = None self._gen_indicator_class()
Initialize Class Properties. Args: tcex (obj): An instance of TcEx object. owner (str): The ThreatConnect owner for Batch action. action (str, default:Create): Action for the batch job ['Create', 'Delete']. attribute_write_type (str, default:Replace): Write type for Indicator attributes ['Append', 'Replace']. halt_on_error (bool, default:True): If True any batch error will halt the batch job.
juraj-google-style
def centroid_distance(item_a, time_a, item_b, time_b, max_value): (ax, ay) = item_a.center_of_mass(time_a) (bx, by) = item_b.center_of_mass(time_b) return (np.minimum(np.sqrt((((ax - bx) ** 2) + ((ay - by) ** 2))), max_value) / float(max_value))
Euclidean distance between the centroids of item_a and item_b. Args: item_a: STObject from the first set in ObjectMatcher time_a: Time integer being evaluated item_b: STObject from the second set in ObjectMatcher time_b: Time integer being evaluated max_value: Maximum distance value used as scaling value and upper constraint. Returns: Distance value between 0 and 1.
codesearchnet
def view_as_complex(x): if any_symbolic_tensors((x,)): return ViewAsComplex().symbolic_call(x) x = backend.convert_to_tensor(x) if len(x.shape) < 1 or x.shape[-1] != 2: raise ValueError(f'Last dimension of input must be size 2 (real and imaginary). Received shape: {x.shape}') real_part = x[..., 0] imag_part = x[..., 1] return backend.cast(real_part, dtype='complex64') + 1j * backend.cast(imag_part, dtype='complex64')
Converts a real tensor with shape `(..., 2)` to a complex tensor, where the last dimension represents the real and imaginary components of a complex tensor. Args: x: A real tensor with last dimension of size 2. Returns: A complex tensor with shape `x.shape[:-1]`. Example: ``` >>> import numpy as np >>> from keras import ops >>> real_imag = np.array([[1.0, 2.0], [3.0, 4.0]]) >>> complex_tensor = ops.view_as_complex(real_imag) >>> complex_tensor array([1.+2.j, 3.+4.j]) ```
github-repos
def from_json(raw): ncls = None _type = raw.get('type') try: ncls = _type_map[NodeType(_type)] except (KeyError, ValueError) as e: logger.warning('Unknown node type: %s', _type) if DEBUG: raise_from(exception.ParseException(('Parse error for %s' % _type), raw), e) return None node = ncls() node.load(raw) return node
Helper to construct a node from a dict. Args: raw (dict): Raw node representation. Returns: Node: A Node object or None.
codesearchnet
def rpc_name(rpc_id): name = _RPC_NAME_MAP.get(rpc_id) if name is None: name = 'RPC 0x%04X' % rpc_id return name
Map an RPC id to a string name. This function looks the RPC up in a map of all globally declared RPCs, and returns a nice name string. if the RPC is not found in the global name map, returns a generic name string such as 'rpc 0x%04X'. Args: rpc_id (int): The id of the RPC that we wish to look up. Returns: str: The nice name of the RPC.
juraj-google-style
async def process_graph_input(graph, stream, value, rpc_executor): graph.sensor_log.push(stream, value) if stream.important: associated_output = stream.associated_stream() graph.sensor_log.push(associated_output, value) to_check = deque([x for x in graph.roots]) while len(to_check) > 0: node = to_check.popleft() if node.triggered(): try: results = node.process(rpc_executor, graph.mark_streamer) for result in results: if inspect.iscoroutine(result.value): result.value = await asyncio.ensure_future(result.value) result.raw_time = value.raw_time graph.sensor_log.push(node.stream, result) except: logging.getLogger(__name__).exception("Unhandled exception in graph node processing function for node %s", str(node)) if len(results) > 0: to_check.extend(node.outputs)
Process an input through this sensor graph. The tick information in value should be correct and is transfered to all results produced by nodes acting on this tick. This coroutine is an asyncio compatible version of SensorGraph.process_input() Args: stream (DataStream): The stream the input is part of value (IOTileReading): The value to process rpc_executor (RPCExecutor): An object capable of executing RPCs in case we need to do that.
juraj-google-style
def getFileObjects(self): files = {'project-file': self, 'mapping-table-file': self.mapTableFile, 'channel-input-file': self.channelInputFile, 'precipitation-file': self.precipFile, 'storm-pipe-network-file': self.stormPipeNetworkFile, 'hmet-file': self.hmetFile, 'nwsrfs-file': self.nwsrfsFile, 'orographic-gage-file': self.orographicGageFile, 'grid-pipe-file': self.gridPipeFile, 'grid-stream-file': self.gridStreamFile, 'time-series-file': self.timeSeriesFiles, 'projection-file': self.projectionFile, 'replace-parameters-file': self.replaceParamFile, 'replace-value-file': self.replaceValFile, 'output-location-file': self.outputLocationFiles, 'maps': self.maps, 'link-node-datasets-file': self.linkNodeDatasets} return files
Retrieve a dictionary of file objects. This is a utility method that can be used to programmatically access the GsshaPy file objects. Use this method in conjunction with the getFileKeys method to access only files that have been read into the database. Returns: dict: Dictionary with human readable keys and values of GsshaPy file object instances. Files that have not been read into the database will have a value of None.
codesearchnet
def parse_machine_listing(text: str, convert: bool=True, strict: bool=True) -> \ List[dict]: listing = [] for line in text.splitlines(False): facts = line.split(';') row = {} filename = None for fact in facts: name, sep, value = fact.partition('=') if sep: name = name.strip().lower() value = value.strip().lower() if convert: try: value = convert_machine_list_value(name, value) except ValueError: if strict: raise row[name] = value else: if name[0:1] == ' ': filename = name[1:] else: name = name.strip().lower() row[name] = '' if filename: row['name'] = filename listing.append(row) elif strict: raise ValueError('Missing filename.') return listing
Parse machine listing. Args: text: The listing. convert: Convert sizes and dates. strict: Method of handling errors. ``True`` will raise ``ValueError``. ``False`` will ignore rows with errors. Returns: list: A list of dict of the facts defined in RFC 3659. The key names must be lowercase. The filename uses the key ``name``.
juraj-google-style
def concatenate(cls, list_of_stats): all_stats = np.stack([stats.values for stats in list_of_stats]) all_counts = all_stats[:, 4] all_counts_relative = all_counts / np.sum(all_counts) min_value = float(np.min(all_stats[:, 2])) max_value = float(np.max(all_stats[:, 3])) mean_value = float(np.sum(all_counts_relative * all_stats[:, 0])) var_value = float(np.sum(all_counts_relative * (all_stats[:, 1] + np.power(all_stats[:, 0] - mean_value, 2)))) num_value = int(np.sum(all_counts)) return cls(mean_value, var_value, min_value, max_value, num_value)
Take a list of stats from different sets of data points and merge the stats for getting stats overall data points. Args: list_of_stats (iterable): A list containing stats for different sets of data points. Returns: DataStats: Stats calculated overall sets of data points.
juraj-google-style
def from_json(cls, data): required_keys = ('location', 'design_days') for key in required_keys: assert key in data, 'Required key "{}" is missing!'.format(key) return cls(Location.from_json(data['location']), [DesignDay.from_json(des_day) for des_day in data['design_days']])
Create a DDY from a dictionary. Args: data = { "location": ladybug Location schema, "design_days": [] // list of ladybug DesignDay schemas}
juraj-google-style
def postprocess_model(self, model: 'PreTrainedModel', **kwargs): return self._process_model_after_weight_loading(model, **kwargs)
Post-process the model post weights loading. Make sure to override the abstract method `_process_model_after_weight_loading`. Args: model (`~transformers.PreTrainedModel`): The model to quantize kwargs (`dict`, *optional*): The keyword arguments that are passed along `_process_model_after_weight_loading`.
github-repos
def constant(times: np.ndarray, amp: complex) -> np.ndarray: return np.full(len(times), amp, dtype=np.complex_)
Continuous constant pulse. Args: times: Times to output pulse for. amp: Complex pulse amplitude.
juraj-google-style
def modutf7_encode(data: str) -> bytes: ret = bytearray() is_usascii = True encode_start = None for i, symbol in enumerate(data): charpoint = ord(symbol) if is_usascii: if charpoint == 0x26: ret.extend(b'&-') elif 0x20 <= charpoint <= 0x7e: ret.append(charpoint) else: encode_start = i is_usascii = False else: if 0x20 <= charpoint <= 0x7e: to_encode = data[encode_start:i] encoded = _modified_b64encode(to_encode) ret.append(0x26) ret.extend(encoded) ret.extend((0x2d, charpoint)) is_usascii = True if not is_usascii: to_encode = data[encode_start:] encoded = _modified_b64encode(to_encode) ret.append(0x26) ret.extend(encoded) ret.append(0x2d) return bytes(ret)
Encode the string using modified UTF-7. Args: data: The input string to encode.
juraj-google-style
def EnumerateConfig(self, service, path, cache, filter_type=None): result = [] external = [] path = self._FixPath(path) if (path not in cache): external.append('%s -> %s', self.OLD_PAMCONF_FILENAME, path) return (result, external) for tokens in self.ParseEntries(cache[path]): if (path == self.OLD_PAMCONF_FILENAME): try: service = tokens[0] tokens = tokens[1:] except IndexError: continue new_path = None filter_request = None try: if (tokens[0] == '@include'): new_path = tokens[1] elif (tokens[1] in ['include', 'substack']): new_path = tokens[2] filter_request = tokens[0] except IndexError: pass if new_path: new_path = self._FixPath(new_path) if (new_path not in cache): external.append(('%s -> %s' % (path, new_path))) continue (r, e) = self.EnumerateConfig(service, new_path, cache, filter_request) result.extend(r) external.extend(e) else: if (filter_type and (tokens[0] != filter_type)): continue match = self.PAMCONF_RE.match(' '.join(tokens)) if match: (p_type, control, module_path, module_args) = match.group(1, 2, 3, 4) if p_type.startswith('-'): p_type = p_type[1:] result.append(rdf_config_file.PamConfigEntry(service=service, type=p_type, control=control, module_path=module_path, module_args=module_args)) return (result, external)
Return PamConfigEntries it finds as it recursively follows PAM configs. Args: service: A string containing the service name we are processing. path: A string containing the file path name we want. cache: A dictionary keyed on path, with the file contents (list of str). filter_type: A string containing type name of the results we want. Returns: A tuple of a list of RDFValue PamConfigEntries found & a list of strings which are the external config references found.
codesearchnet
def visit_boolean_op(self, boolean_logic: _evaluation.BooleanOperatorNode) -> _sql_data_types.Select: lhs_result = self.visit(boolean_logic.left) rhs_result = self.visit(boolean_logic.right) if lhs_result.sql_data_type != _sql_data_types.Boolean: lhs_result = lhs_result.is_not_null() if rhs_result.sql_data_type != _sql_data_types.Boolean: rhs_result = rhs_result.is_not_null() lhs_subquery = lhs_result.as_operand() rhs_subquery = rhs_result.as_operand() if boolean_logic.op == _ast.BooleanLogic.Op.IMPLIES: sql_value = f'(NOT {lhs_subquery} OR {rhs_subquery})' elif boolean_logic.op == _ast.BooleanLogic.Op.XOR: sql_value = f'({lhs_subquery} <> {rhs_subquery})' else: sql_value = f'({lhs_subquery} {boolean_logic.op.upper()} {rhs_subquery})' sql_alias = 'logic_' return _sql_data_types.Select(select_part=_sql_data_types.RawExpression(sql_value, _sql_data_type=_sql_data_types.Boolean, _sql_alias=sql_alias), from_part=None)
Translates a FHIRPath Boolean logic operation to Standard SQL. Note that evaluation for Boolean logic is only supported for Boolean operands of scalar cardinality. Args: boolean_logic: The FHIRPath AST `BooleanLogic` node. Returns: A compiled Standard SQL expression.
github-repos
def generate_encodeable_characters(characters: Iterable[str], encodings: Iterable[str]) -> Iterable[str]: for c in characters: for encoding in encodings: try: c.encode(encoding) (yield c) except UnicodeEncodeError: pass
Generates the subset of 'characters' that can be encoded by 'encodings'. Args: characters: The characters to check for encodeability e.g. 'abcd'. encodings: The encodings to check against e.g. ['cp1252', 'iso-8859-5']. Returns: The subset of 'characters' that can be encoded using one of the provided encodings.
codesearchnet
def from_json(cls, json): result = super(_ReducerReader, cls).from_json(json) result.current_key = _ReducerReader.decode_data(json["current_key"]) result.current_values = _ReducerReader.decode_data(json["current_values"]) return result
Creates an instance of the InputReader for the given input shard state. Args: json: The InputReader state as a dict-like object. Returns: An instance of the InputReader configured using the values of json.
juraj-google-style
def _from_keras_log_format(data, **kwargs): data_val = pd.DataFrame(data[['epoch']]) data_val['acc'] = data['val_acc'] data_val['loss'] = data['val_loss'] data_val['data'] = 'validation' data_training = pd.DataFrame(data[['acc', 'loss', 'epoch']]) data_training['data'] = 'training' result = pd.concat([data_training, data_val], sort=False) plot(result, **kwargs)
Plot accuracy and loss from a panda's dataframe. Args: data: Panda dataframe in the format of the Keras CSV log. output_dir_path: The path to the directory where the resultings plots should end up.
juraj-google-style
def get_processid(config): pidfile = config.get('daemon', 'pidfile', fallback=None) if (pidfile is None): raise ValueError("Configuration doesn't have pidfile option!") try: with open(pidfile, 'r') as _file: pid = _file.read().rstrip() try: pid = int(pid) except ValueError: raise ValueError('stale pid file with invalid data:{}'.format(pid)) else: if (pid in [(- 1), 1]): raise ValueError('invalid PID ({})'.format(pid)) else: return pid except OSError as exc: if (exc.errno == 2): print("CRITICAL: anycast-healthchecker could be down as pid file {} doesn't exist".format(pidfile)) sys.exit(2) else: raise ValueError('error while reading pid file:{}'.format(exc))
Return process id of anycast-healthchecker. Arguments: config (obj): A configparser object with the configuration of anycast-healthchecker. Returns: The process id found in the pid file Raises: ValueError in the following cases - pidfile option is missing from the configuration - pid is either -1 or 1 - stale pidfile, either with no data or invalid data - failure to read pidfile
codesearchnet
def gpio_properties(self): res = self._dll.JLINK_EMU_GPIO_GetProps(0, 0) if res < 0: raise errors.JLinkException(res) num_props = res buf = (structs.JLinkGPIODescriptor * num_props)() res = self._dll.JLINK_EMU_GPIO_GetProps(ctypes.byref(buf), num_props) if res < 0: raise errors.JLinkException(res) return list(buf)
Returns the properties of the user-controllable GPIOs. Provided the device supports user-controllable GPIOs, they will be returned by this method. Args: self (JLink): the ``JLink`` instance Returns: A list of ``JLinkGPIODescriptor`` instances totalling the number of requested properties. Raises: JLinkException: on error.
juraj-google-style
def AddLabel(self, label): if not isinstance(label, py2to3.STRING_TYPES): raise TypeError('label is not a string type. Is {0:s}'.format( type(label))) if not self._VALID_LABEL_REGEX.match(label): raise ValueError(( 'Unsupported label: "{0:s}". A label must only consist of ' 'alphanumeric characters or underscores.').format(label)) if label not in self.labels: self.labels.append(label)
Adds a label to the event tag. Args: label (str): label. Raises: TypeError: if the label provided is not a string. ValueError: if a label is malformed.
juraj-google-style
def __init__(self, app, *, options=None): self.options = options or {} self.application = app super().__init__()
Initialize a new standalone application. Args: app: A wsgi Python application. options (dict): the configuration.
juraj-google-style
class MetricContainer: values: List[Union[int, float]] timestamps: List[pd.Timestamp] def sort_by_timestamp(self, in_place=True): timestamps, values = zip(*sorted(zip(self.timestamps, self.values))) if not in_place: return MetricContainer(values=values, timestamps=timestamps) self.timestamps, self.values = zip(*sorted(zip(self.timestamps, self.values)))
This class holds the metric values and timestamps for a given metric. Args: metric_values: List of metric values. timestamps: List of pandas timestamps corresponding to the metric values.
github-repos
def __eq__(self, other) -> bool: if self.interval == other.interval and self.channel == other.channel: return True return False
Two time-slots are the same if they have the same interval and channel. Args: other (Timeslot): other Timeslot
juraj-google-style
async def get_in_tree_template(link): context = link.context source_url = get_source_url(link) if not source_url.endswith(('.yml', '.yaml')): raise CoTError("{} source url {} doesn't end in .yml or .yaml!".format( link.name, source_url )) tmpl = await load_json_or_yaml_from_url( context, source_url, os.path.join( context.config["work_dir"], "{}_taskcluster.yml".format(link.name) ) ) return tmpl
Get the in-tree json-e template for a given link. By convention, this template is SOURCE_REPO/.taskcluster.yml. Args: link (LinkOfTrust): the parent link to get the source url from. Raises: CoTError: on non-yaml `source_url` KeyError: on non-well-formed source template Returns: dict: the first task in the template.
juraj-google-style
def __driver_stub(self, text, state): origline = readline.get_line_buffer() line = origline.lstrip() if (line and (line[(- 1)] == '?')): self.__driver_helper(line) else: toks = shlex.split(line) return self.__driver_completer(toks, text, state)
Display help messages or invoke the proper completer. The interface of helper methods and completer methods are documented in the helper() decorator method and the completer() decorator method, respectively. Arguments: text: A string, that is the current completion scope. state: An integer. Returns: A string used to replace the given text, if any. None if no completion candidates are found. Raises: This method is called via the readline callback. If this method raises an error, it is silently ignored by the readline library. This behavior makes debugging very difficult. For this reason, non-driver methods are run within try-except blocks. When an error occurs, the stack trace is printed to self.stderr.
codesearchnet
def __getitem__(self, index): raise NotImplementedError
Gets batch at position `index`. Args: index: position of the batch in the Sequence. Returns: A batch
github-repos
def emit(self, signal, message, analysis_id): log.debug('kernel {} zmq send ({}): {}' ''.format(analysis_id, signal, message)) self.zmq_publish.send(json.dumps({ 'analysis_id': analysis_id, 'frame': {'signal': signal, 'load': message}, }, default=json_encoder_default).encode('utf-8'))
Emit signal to main. Args: signal: Name of the signal to be emitted. message: Message to be sent. analysis_id: Identifies the instance of this analysis.
juraj-google-style
def run_task_external(self, coroutine): self.verify_calling_thread(False, 'run_task_external must not be called from the emulation thread') future = asyncio.run_coroutine_threadsafe(coroutine, self._loop) return future.result()
Inject a task into the emulation loop and wait for it to finish. The coroutine parameter is run as a Task inside the EmulationLoop until it completes and the return value (or any raised Exception) is pased back into the caller's thread. Args: coroutine (coroutine): The task to inject into the event loop. Returns: object: Whatever the coroutine returned.
juraj-google-style
def create_tree(profile, tree): resource = "/trees" payload = {"tree": tree} data = api.post_request(profile, resource, payload) return prepare(data)
Create a new tree. Args: profile A profile generated from ``simplygithub.authentication.profile``. Such profiles tell this module (i) the ``repo`` to connect to, and (ii) the ``token`` to connect with. tree A list of blob objects (each with a path, mode, type, and content or sha) to put in the tree. Returns: A dict with data about the tree.
juraj-google-style
def _update_exponential_bucket_count(a_float, dist): buckets = dist.exponentialBuckets if (buckets is None): raise ValueError((_BAD_UNSET_BUCKETS % u'exponential buckets')) bucket_counts = dist.bucketCounts num_finite_buckets = buckets.numFiniteBuckets if (len(bucket_counts) < (num_finite_buckets + 2)): raise ValueError(_BAD_LOW_BUCKET_COUNT) scale = buckets.scale factor = buckets.growthFactor if (a_float <= scale): index = 0 else: index = (1 + int((math.log((a_float / scale)) / math.log(factor)))) index = min(index, (num_finite_buckets + 1)) bucket_counts[index] += 1 _logger.debug(u'scale:%f, factor:%f, sample:%f, index:%d', scale, factor, a_float, index)
Adds `a_float` to `dist`, updating its exponential buckets. Args: a_float (float): a new value dist (:class:`endpoints_management.gen.servicecontrol_v1_messages.Distribution`): the Distribution being updated Raises: ValueError: if `dist` does not already have exponential buckets defined ValueError: if there are not enough bucket count fields in `dist`
codesearchnet
def DoesNotContain(self, value): self._awql = self._CreateSingleValueCondition(value, 'DOES_NOT_CONTAIN') return self._query_builder
Sets the type of the WHERE clause as "does not contain". Args: value: The value to be used in the WHERE condition. Returns: The query builder that this WHERE builder links to.
codesearchnet
def get(self): if (self.ttl[self.idx] <= 0): self.buffers[self.idx] = self.inqueue.get(timeout=300.0) self.ttl[self.idx] = self.cur_max_ttl if (self.cur_max_ttl < self.max_ttl): self.cur_max_ttl += 1 buf = self.buffers[self.idx] self.ttl[self.idx] -= 1 released = (self.ttl[self.idx] <= 0) if released: self.buffers[self.idx] = None self.idx = ((self.idx + 1) % len(self.buffers)) return (buf, released)
Get a new batch from the internal ring buffer. Returns: buf: Data item saved from inqueue. released: True if the item is now removed from the ring buffer.
codesearchnet
def get(self, key, mem_map=True): self.raise_error_if_not_open() if key in self._file: data = self._file[key] sampling_rate = data.attrs[SAMPLING_RATE_ATTR] if not mem_map: data = data[()] data = np.float32(data) / MAX_INT16_VALUE return data, sampling_rate
Return the samples for the given key and the sampling-rate. Args: key (str): The key to read the data from. mem_map (bool): If ``True`` returns the data as memory-mapped array, otherwise a copy is returned. Note: The container has to be opened in advance. Returns: tuple: A tuple containing the samples as numpy array with ``np.float32`` [-1.0,1.0] and the sampling-rate.
juraj-google-style
def serialize_to_string(self): return print_mdl.SerializeToString()
Serialize the ProfileProto to a binary string. Users can write it to file for offline analysis by tfprof commandline or graphical interface. Returns: ProfileProto binary string.
github-repos
def _get_init_rng(self): return self.seed_generator.next()
Returns a JAX `PRNGKey` or structure of `PRNGKey`s to pass to `init_fn`. By default, this returns a single `PRNGKey` retrieved by calling `self.seed_generator.next()`. Override this to return a different structure. Returns: a JAX `PRNGKey` or structure of `PRNGKey`s that will be passed as the `rng` argument of `init_fn`.
github-repos
def decode(self, decoder_input_ids, encoder_outputs, encoder_attention_mask: Optional[jnp.ndarray]=None, decoder_attention_mask: Optional[jnp.ndarray]=None, past_key_values: Optional[dict]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, train: bool=False, params: Optional[dict]=None, dropout_rng: PRNGKey=None): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.return_dict encoder_hidden_states = encoder_outputs[0] if encoder_attention_mask is None: batch_size, sequence_length = encoder_hidden_states.shape[:2] encoder_attention_mask = jnp.ones((batch_size, sequence_length)) batch_size, sequence_length = decoder_input_ids.shape if decoder_attention_mask is None: decoder_attention_mask = jnp.ones((batch_size, sequence_length)) rngs = {} if dropout_rng is not None: rngs['dropout'] = dropout_rng inputs = {'params': params or self.params} if past_key_values: inputs['cache'] = past_key_values mutable = ['cache'] else: mutable = False def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, **kwargs): decoder_module = module._get_decoder_module() decoder_outputs = decoder_module(decoder_input_ids, decoder_attention_mask, **kwargs) sequence_output = decoder_outputs[0] if self.config.tie_word_embeddings: sequence_output = sequence_output * self.config.d_model ** (-0.5) if self.config.tie_word_embeddings: shared_embedding = module.shared.variables['params']['embedding'] lm_logits = module.lm_head.apply({'params': {'kernel': shared_embedding.T}}, sequence_output) else: lm_logits = module.lm_head(sequence_output) return (lm_logits, decoder_outputs) outputs = self.module.apply(inputs, decoder_input_ids=jnp.array(decoder_input_ids, dtype='i4'), decoder_attention_mask=jnp.array(decoder_attention_mask, dtype='i4'), encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=jnp.array(encoder_attention_mask, dtype='i4'), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, mutable=mutable, method=_decoder_forward) if past_key_values is None: lm_logits, decoder_outputs = outputs else: (lm_logits, decoder_outputs), past = outputs if return_dict: outputs = FlaxCausalLMOutputWithCrossAttentions(logits=lm_logits, hidden_states=decoder_outputs.hidden_states, attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions) else: outputs = (lm_logits,) + decoder_outputs[1:] if past_key_values is not None and return_dict: outputs['past_key_values'] = unfreeze(past['cache']) return outputs elif past_key_values is not None and (not return_dict): outputs = outputs[:1] + (unfreeze(past['cache']),) + outputs[1:] return outputs
Returns: Example: ```python >>> from transformers import AutoTokenizer, FlaxLongT5ForConditionalGeneration >>> import jax.numpy as jnp >>> tokenizer = AutoTokenizer.from_pretrained("google-t5/t5-base") >>> model = FlaxLongT5ForConditionalGeneration.from_pretrained("google/long-t5-local-base") >>> text = "summarize: My friends are cool but they eat too many carbs." >>> inputs = tokenizer(text, return_tensors="np") >>> encoder_outputs = model.encode(**inputs) >>> decoder_start_token_id = model.config.decoder_start_token_id >>> decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id >>> outputs = model.decode(decoder_input_ids, encoder_outputs) >>> logits = outputs.logits ```
github-repos
def set_time(self, value: float): if value < 0: value = 0 self.offset += self.get_time() - value
Set the current time. This can be used to jump in the timeline. Args: value (float): The new time
juraj-google-style
def _extract_blocks(x, block_h, block_w): (_, height, width, depth) = common_layers.shape_list(x) assert height % block_h == 0 assert width % block_w == 0 x = tf.reshape(x, [-1, height width return tf.transpose(x, [0, 1, 3, 2, 4, 5])
Helper function for local 2d attention. Args: x: a [batch, height, width, depth] tensor block_h: An integer. block height block_w: An inteter. block width returns: a [batch, num_heads, height/block_h, width/block_w, depth] tensor
juraj-google-style
def read(self, offset, length): if not isinstance(offset, (int, long)): raise TypeError("Invalid offset type, should be integer.") offset = self._adjust_offset(offset) self._validate_offset(offset, length) return bytes(self.mapping[offset:offset + length])
Read a string of bytes from the specified `offset` in bytes, relative to the base physical address of the MMIO region. Args: offset (int, long): offset from base physical address, in bytes. length (int): number of bytes to read. Returns: bytes: bytes read. Raises: TypeError: if `offset` type is invalid. ValueError: if `offset` is out of bounds.
juraj-google-style
def __init__(self, tcex): self.tcex = tcex self._config_data = {} self._default_args = None self._default_args_resolved = None self._parsed = False self._parsed_resolved = False self.parser = TcExArgParser()
Initialize Class Properties. Args: tcex (tcex.TcEx): Instance of TcEx class.
juraj-google-style
def config(self): if (self._full_config is None): self._full_config = DotDict() self._full_config.merge(self._default) self._full_config.merge(self._config) self._full_config.merge(self._environment) self._full_config.merge(self._override) return self._full_config
Get the complete configuration where the default, config, environment, and override values are merged together. Returns: (DotDict): A dictionary of configuration values that allows lookups using dot notation.
codesearchnet
def _make_sql_compatible(ll): new_ll = [] for l in ll: new_l = () for i in l: if not i: new_l = new_l + (None,) else: if isinstance(i, str): if sys.version_info < (3, 0): val = i.decode('utf8').encode('ascii', errors='ignore') else: val = i else: val = i new_l = new_l + (val,) new_ll.append(new_l) return new_ll
Convert any python list of lists (or tuples) so that the strings are formatted correctly for insertion into Args: ll (list): List of lists (or tuples)
juraj-google-style
def _gql(query_string, query_class=Query): from .google_imports import gql gql_qry = gql.GQL(query_string) kind = gql_qry.kind() if kind is None: modelclass = model.Expando else: modelclass = model.Model._lookup_model( kind, tasklets.get_context()._conn.adapter.default_model) kind = modelclass._get_kind() ancestor = None flt = gql_qry.filters() filters = list(modelclass._default_filters()) for name_op in sorted(flt): name, op = name_op values = flt[name_op] op = op.lower() if op == 'is' and name == gql.GQL._GQL__ANCESTOR: if len(values) != 1: raise ValueError('"is" requires exactly one value') [(func, args)] = values ancestor = _args_to_val(func, args) continue if op not in _OPS: raise NotImplementedError('Operation %r is not supported.' % op) for (func, args) in values: val = _args_to_val(func, args) prop = _get_prop_from_modelclass(modelclass, name) if prop._name != name: raise RuntimeError('Whoa! _get_prop_from_modelclass(%s, %r) ' 'returned a property whose name is %r?!' % (modelclass.__name__, name, prop._name)) if isinstance(val, ParameterizedThing): node = ParameterNode(prop, op, val) elif op == 'in': node = prop._IN(val) else: node = prop._comparison(op, val) filters.append(node) if filters: filters = ConjunctionNode(*filters) else: filters = None orders = _orderings_to_orders(gql_qry.orderings(), modelclass) offset = gql_qry.offset() limit = gql_qry.limit() if limit < 0: limit = None keys_only = gql_qry._keys_only if not keys_only: keys_only = None options = QueryOptions(offset=offset, limit=limit, keys_only=keys_only) projection = gql_qry.projection() if gql_qry.is_distinct(): group_by = projection else: group_by = None qry = query_class(kind=kind, ancestor=ancestor, filters=filters, orders=orders, default_options=options, projection=projection, group_by=group_by) return qry
Parse a GQL query string (internal version). Args: query_string: Full GQL query, e.g. 'SELECT * FROM Kind WHERE prop = 1'. query_class: Optional class to use, default Query. Returns: An instance of query_class.
juraj-google-style
def exp(cls, x: 'TensorFluent') -> 'TensorFluent': return cls._unary_op(x, tf.exp, tf.float32)
Returns a TensorFluent for the exp function. Args: x: The input fluent. Returns: A TensorFluent wrapping the exp function.
juraj-google-style
def read(self, uri): read_response = self.connect(uri) fedora_graph = rdflib.Graph().parse( data=read_response.read(), format='turtle') return fedora_graph
Method takes uri and creates a RDF graph from Fedora Repository Args: uri(str): URI of Fedora URI Returns: rdflib.Graph
juraj-google-style
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None): pass
Returns a `Tensor`. The output of this function will be used by model-builder-functions. For example the pseudo code of `input_layer` will be like: ```python def input_layer(features, feature_columns, ...): outputs = [fc._get_dense_tensor(...) for fc in feature_columns] return tf.concat(outputs) ``` Args: inputs: A `_LazyBuilder` object to access inputs. weight_collections: List of graph collections to which Variables (if any will be created) are added. trainable: If `True` also add variables to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). Returns: `Tensor` of shape [batch_size] + `_variable_shape`.
github-repos
def EstimateTimeRemaining(self): number_of_hashes = self.hash_queue.qsize() hashes_per_batch = self._analyzer.hashes_per_batch wait_time_per_batch = self._analyzer.wait_after_analysis analyses_performed = self._analyzer.analyses_performed if (analyses_performed == 0): average_analysis_time = self._analyzer.seconds_spent_analyzing else: (average_analysis_time, _) = divmod(self._analyzer.seconds_spent_analyzing, analyses_performed) (batches_remaining, _) = divmod(number_of_hashes, hashes_per_batch) estimated_seconds_per_batch = (average_analysis_time + wait_time_per_batch) return (batches_remaining * estimated_seconds_per_batch)
Estimates how long until all hashes have been analyzed. Returns: int: estimated number of seconds until all hashes have been analyzed.
codesearchnet
def is_collection_aligned(self, data_collection): if self._collection_type != data_collection._collection_type: return False elif len(self.values) != len(data_collection.values): return False elif self.header.analysis_period != data_collection.header.analysis_period: return False return True
Check if this Data Collection is aligned with another. Aligned Data Collections are of the same Data Collection class, have the same number of values and have matching datetimes. Args: data_collection: The Data Collection which you want to test if this collection is aligned with. Return: True if collections are aligned, Fale if not aligned
juraj-google-style
def cache_path(self): cache_path = os.path.join(os.path.dirname(__file__), '..', 'cache') if (not os.path.exists(cache_path)): os.mkdir(cache_path) return cache_path
make a directory to store all caches Returns: --------- cache path
codesearchnet
def WriteUInt256(self, value): if type(value) is UInt256: value.Serialize(self) else: raise Exception("Cannot write value that is not UInt256")
Write a UInt256 type to the stream. Args: value (UInt256): Raises: Exception: when `value` is not of neocore.UInt256 type.
juraj-google-style
def trim_wav_ms(in_path: Path, out_path: Path, start_time: int, end_time: int) -> None: try: trim_wav_sox(in_path, out_path, start_time, end_time) except FileNotFoundError: trim_wav_pydub(in_path, out_path, start_time, end_time) except subprocess.CalledProcessError: trim_wav_pydub(in_path, out_path, start_time, end_time)
Extracts part of a WAV File. First attempts to call sox. If sox is unavailable, it backs off to pydub+ffmpeg. Args: in_path: A path to the source file to extract a portion of out_path: A path describing the to-be-created WAV file. start_time: The point in the source WAV file at which to begin extraction. end_time: The point in the source WAV file at which to end extraction.
juraj-google-style
def read_uint8(self, little_endian=True): if little_endian: endian = "<" else: endian = ">" return self.unpack('%sB' % endian)
Read 1 byte as an unsigned integer value from the stream. Args: little_endian (bool): specify the endianness. (Default) Little endian. Returns: int:
juraj-google-style
def call(self, method_name: str, args: Optional[Sequence[core_tf_types.Tensor]]=None, output_specs=None, timeout_in_ms=0): if args is None: args = [] status_or, deleter = gen_rpc_ops.rpc_call(self._client_handle, args=nest.flatten(args), method_name=method_name, timeout_in_ms=timeout_in_ms) return StatusOrResult(status_or, deleter, output_specs)
Method to invoke remote registered functions on the connected server. Server should be started before making an RPC Call. Args: method_name: Registered method to invoke on Server. args: Input arguments for the method. output_specs: Output specs for the output from method. timeout_in_ms: Timeout for this call. If 0, default client timeout will be used. Returns: StatusOrResult object. This function issues the RPC call to server, it does not block for the duration of RPC. Please call is_ok, get_error or get_value methods on the returned object to blocked till RPC finishes.
github-repos
def scalar(self, tag, value, step=None): value = float(onp.array(value)) if (step is None): step = self._step else: self._step = step summary = Summary(value=[Summary.Value(tag=tag, simple_value=value)]) self.add_summary(summary, step)
Saves scalar value. Args: tag: str: label for this data value: int/float: number to log step: int: training step
codesearchnet
def Run(self, conf, args): if not args: help_text = self.Help() else: help_command = args.pop() print('Usage: nsscache [global options] %s [options]' % help_command) print() try: callable_action = getattr(inspect.getmodule(self), help_command.capitalize()) help_text = callable_action().Help() except AttributeError: print('command %r is not implemented' % help_command) return 1 print(help_text) return 0
Run the Help command. See Command.Run() for full documentation on the Run() method. Args: conf: nss_cache.config.Config object args: list of arguments to be parsed by this command. Returns: zero, and prints the help text as a side effectg
github-repos
def shift(self, time: int) -> 'TimeslotCollection': slots = [Timeslot(slot.interval.shift(time), slot.channel) for slot in self.timeslots] return TimeslotCollection(*slots)
Return a new TimeslotCollection shifted by `time`. Args: time: time to be shifted by
juraj-google-style
def count(self, files=False): return len(self.files) if files else len(self.unique())
Returns a count of unique values or files. Args: files (bool): When True, counts all files mapped to the Entity. When False, counts all unique values. Returns: an int.
juraj-google-style
def ParseFileObject(self, parser_mediator, file_object): evtx_file = pyevtx.file() evtx_file.set_ascii_codepage(parser_mediator.codepage) try: evtx_file.open_file_object(file_object) except IOError as exception: parser_mediator.ProduceExtractionWarning( 'unable to open file with error: {0!s}'.format(exception)) return try: self._ParseRecords(parser_mediator, evtx_file) finally: evtx_file.close()
Parses a Windows XML EventLog (EVTX) file-like object. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_object (dfvfs.FileIO): a file-like object.
juraj-google-style
def set_user(self, user): self.session['user_id'] = user.key self.session['user_data'] = user.clean_value() role = self.get_role() self.session['role_id'] = role.key self.current.role_id = role.key self.current.user_id = user.key self.session['permissions'] = role.get_permissions()
Writes user data to session. Args: user: User object
codesearchnet