code stringlengths 20 4.93k | docstring stringlengths 33 1.27k | source stringclasses 3
values |
|---|---|---|
def __init__(self, file_path, cause):
message = six.text_type("Malformed config at {}: {}").format(
file_path,
cause
)
super(MalformedConfig, self).__init__(message) | Exception to be raised if pased file is invalid.
Args:
file_path (string): path to bad config
cause (string): reason of failure, i.e. what exactly was the
problem while parsing | juraj-google-style |
def add_peer_parser(subparsers, parent_parser):
parser = subparsers.add_parser('peer', help='Displays information about validator peers', description="Provides a subcommand to list a validator's peers")
grand_parsers = parser.add_subparsers(title='subcommands', dest='subcommand')
grand_parsers.required = Tr... | Adds argument parser for the peer command
Args:
subparsers: Add parsers to this subparser object
parent_parser: The parent argparse.ArgumentParser object | codesearchnet |
def send(self, message):
if ('call_id' not in message):
message['call_id'] = self.gen_call_id()
self._ws.send(message.to_json()) | Sends a RTMMessage
Should be called after starting the loop
Args:
message(RTMMessage): the sending message
Raises:
WebSocketConnectionClosedException: if the loop is closed | codesearchnet |
def graph_execution_traces(self, digest=False, begin=None, end=None):
digests = self._graph_execution_trace_digests
if begin is not None or end is not None:
begin = begin or 0
end = end or len(digests)
digests = digests[begin:end]
if digest:
return digests
else:
r... | Get all the intra-graph execution tensor traces read so far.
Args:
digest: Whether the results will be returned in the more light-weight
digest form.
begin: Optional beginning index for the requested traces or their digests.
Python-style negative indices are supported.
end: Optional ending index for the requested trac... | github-repos |
def extract_derivative_feature(feature):
first_derivative_feature = processing.derivative_extraction(feature, DeltaWindows=2)
second_derivative_feature = processing.derivative_extraction(first_derivative_feature, DeltaWindows=2)
feature_cube = np.concatenate((feature[(:, :, None)], first_derivative_feature[... | This function extracts temporal derivative features which are
first and second derivatives.
Args:
feature (array): The feature vector which its size is: N x M
Return:
array: The feature cube vector which contains the static, first and second derivative features of size: N x M x 3 | codesearchnet |
def set_timezone(tz=None, deploy=False):
if not tz:
raise CommandExecutionError("Timezone name option must not be none.")
ret = {}
query = {'type': 'config',
'action': 'set',
'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/deviceconfig/system/timezo... | Set the timezone of the Palo Alto proxy minion. A commit will be required before this is processed.
CLI Example:
Args:
tz (str): The name of the timezone to set.
deploy (bool): If true then commit the full candidate configuration, if false only set pending change.
.. code-block:: bash
salt '*' panos.set_timezone U... | juraj-google-style |
def __definitions_descriptor(self):
result = {}
for (def_key, def_value) in self.__parser.schemas().iteritems():
if (('properties' in def_value) or ('type' in def_value)):
key_result = {}
required_keys = set()
if ('type' in def_value):
key_result['type... | Describes the definitions section of the OpenAPI spec.
Returns:
Dictionary describing the definitions of the spec. | codesearchnet |
def xpath(self, exact=None):
exact = (exact if (exact is not None) else self.exact)
if isinstance(self.expression, AbstractExpression):
expression = self._apply_expression_filters(self.expression)
return to_xpath(expression, exact=exact)
else:
return str_(self.expression) | Returns the XPath query for this selector.
Args:
exact (bool, optional): Whether to exactly match text.
Returns:
str: The XPath query for this selector. | codesearchnet |
def first_function(function: _evaluation.FirstFunction, operand_result: Optional[_sql_data_types.Select], params_result: Collection[_sql_data_types.StandardSqlExpression]) -> _sql_data_types.Select:
del params_result
if operand_result is None:
raise ValueError('first() cannot be called without an operan... | Generates Spark SQL representing the FHIRPath first() function.
Returns a collection with the first value of the operand collection.
The returned SQL expression is a table with cardinality 0 or 1.
Args:
function: The FHIRPath AST `FirstFunction` node
operand_result: The expression which is being evaluated
params_res... | github-repos |
def Match(self, registry_key):
value_names = frozenset([
registry_value.name for registry_value in registry_key.GetValues()])
return self._value_names.issubset(value_names) | Determines if a Windows Registry key matches the filter.
Args:
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
Returns:
bool: True if the keys match. | juraj-google-style |
def __init__(self, outer_index, inner_index):
if outer_index.batch_dims != inner_index.batch_dims:
raise ValueError('outer_index.batch_dims and inner_index.batch_dims must be the same.')
super(ProductIndexMap, self).__init__(indices=inner_index.indices + outer_index.indices * tf.cast(inner_index.num_seg... | Combines indices i and j into pairs (i, j). The result is an index where each segment (i, j) is the
intersection of segments i and j. For example if the inputs represent table cells indexed by respectively rows
and columns the output will be a table indexed by (row, column) pairs, i.e. by cell. The implementation
combi... | github-repos |
def probe_async(self, callback):
def _on_finished(_name, control_info, exception):
if (exception is not None):
callback(self.id, False, str(exception))
return
self._control_info = control_info
try:
info = {'connection_string': 'direct', 'uuid': control_in... | Send advertisements for all connected devices.
Args:
callback (callable): A callback for when the probe operation has completed.
callback should have signature callback(adapter_id, success, failure_reason) where:
success: bool
failure_reason: None if success is True, otherwise a reason for why we could not probe | codesearchnet |
def get_best_dataset_key(key, choices):
if ((key.wavelength is not None) and choices):
nearest_wl = min([_wl_dist(key.wavelength, x.wavelength) for x in choices if (x.wavelength is not None)])
choices = [c for c in choices if (_wl_dist(key.wavelength, c.wavelength) == nearest_wl)]
if ((key.modif... | Choose the "best" `DatasetID` from `choices` based on `key`.
The best key is chosen based on the follow criteria:
1. Central wavelength is nearest to the `key` wavelength if
specified.
2. Least modified dataset if `modifiers` is `None` in `key`.
Otherwise, the modifiers are ignored.
3. Highest calibration if `calibra... | codesearchnet |
def _StopMonitoringProcess(self, process):
if (process is None):
raise ValueError('Missing process.')
pid = process.pid
self._RaiseIfNotMonitored(pid)
del self._process_information_per_pid[pid]
rpc_client = self._rpc_clients_per_pid.get(pid, None)
if rpc_client:
rpc_client.Close(... | Stops monitoring a process.
Args:
process (MultiProcessBaseProcess): process.
Raises:
KeyError: if the process is not monitored.
ValueError: if the process is missing. | codesearchnet |
def load(self, key_filter=None, header_preproc=None):
df = pd.read_csv(self.input_file,
sep='\t',
dtype=object)
if key_filter is not None:
df = df[df[df.columns[0]].str.match(key_filter)]
meta... | Load data table from tsv file, from default location
Args:
key_filter (str): additional filter for key column - regex matching
key values to include; None for no filter
header_preproc (func): function to apply to column headers to extract year numbers (as strings)
Returns:
pd.DataFrame: data | juraj-google-style |
def _format_field_name(self, field_name) -> str:
field = self._get_model_field(field_name)
return self.qn(field.column) | Formats a field's name for usage in SQL.
Arguments:
field_name:
The field name to format.
Returns:
The specified field name formatted for
usage in SQL. | juraj-google-style |
def draw_rects(self, *rects):
rect_array = ffi.new('SDL_Rect[]', len(rects))
for i, r in enumerate(rects):
rect_array[i] = r._ptr[0]
check_int_err(lib.SDL_RenderDrawRects(self._ptr, rect_array, len(rects))) | Draw some number of rectangles on the current rendering target.
Args:
*rects (Rect): The destination rectangles.
Raises:
SDLError: If an error is encountered. | juraj-google-style |
def get_group(self, uuid=None):
if (uuid is None):
uuid = self.uuid
group_data = self.get('group', params={'uuid': uuid})
return group_data | Get group data based on uuid.
Args:
uuid (str): optional uuid. defaults to self.cuuid
Raises:
PyLmodUnexpectedData: No data was returned.
requests.RequestException: Exception connection error
Returns:
dict: group json | codesearchnet |
def retrieve_products(self, reviewer):
if not isinstance(reviewer, self._reviewer_cls):
raise TypeError(
"Type of given reviewer isn't acceptable:", reviewer,
", expected:", self._reviewer_cls)
return list(self.graph.successors(reviewer)) | Retrieve products reviewed by a given reviewer.
Args:
reviewer: A reviewer.
Returns:
A list of products which the reviewer reviews.
Raises:
TypeError: when given reviewer isn't instance of specified reviewer
class when this graph is constructed. | juraj-google-style |
def use_pcm(self, pcm_params=None, solvent_key='solvent', solvent_params=None, radii_force_field=None):
self.params['pcm'] = dict()
self.params[solvent_key] = dict()
default_pcm_params = {'Theory': 'SSVPE', 'vdwScale': 1.1, 'Radii': 'UFF'}
if (not solvent_params):
solvent_params = {'Dielectric':... | Set the solvent model to PCM. Default parameters are trying to comply to
gaussian default value
Args:
pcm_params (dict): The parameters of "$pcm" section.
solvent_key (str): for versions < 4.2 the section name is "pcm_solvent"
solvent_params (dict): The parameters of solvent_key section
radii_force_field (str): The fo... | codesearchnet |
def trigger(self, when=1):
tw = Window(self.stream, self._config['type'])
tw._config['evictPolicy'] = self._config['evictPolicy']
tw._config['evictConfig'] = self._config['evictConfig']
if (self._config['evictPolicy'] == 'TIME'):
tw._config['evictTimeUnit'] = 'MILLISECONDS'
if isinstance(whe... | Declare a window with this window's size and a trigger policy.
When the window is triggered is defined by `when`.
If `when` is an `int` then the window is triggered every
`when` tuples. For example, with ``when=5`` the window
will be triggered every five tuples.
If `when` is an `datetime.timedelta` then it is the p... | codesearchnet |
def num_batches(self):
raise NotImplementedError | Return the size (number of batches) for the dataset created.
For certain type of the data input, the number of batches is known, eg
for Numpy data, the size is same as (number_of_element / batch_size).
Whereas for dataset or python generator, the size is unknown since it
may or may not have an end state.
Returns:
int... | github-repos |
def bounding_box_from(points, i, i1, thr):
pi = points[i]
pi1 = points[i1]
min_lat = min(pi.lat, pi1.lat)
min_lon = min(pi.lon, pi1.lon)
max_lat = max(pi.lat, pi1.lat)
max_lon = max(pi.lon, pi1.lon)
return ((min_lat - thr), (min_lon - thr), (max_lat + thr), (max_lon + thr)) | Creates bounding box for a line segment
Args:
points (:obj:`list` of :obj:`Point`)
i (int): Line segment start, index in points array
i1 (int): Line segment end, index in points array
Returns:
(float, float, float, float): with bounding box min x, min y, max x and max y | codesearchnet |
def _serve_audio_metadata(self, request):
tag = request.args.get('tag')
run = request.args.get('run')
sample = int(request.args.get('sample', 0))
events = self._multiplexer.Tensors(run, tag)
response = self._audio_response_for_run(events, run, tag, sample)
return http_util.Respond(request,... | Given a tag and list of runs, serve a list of metadata for audio.
Note that the actual audio data are not sent; instead, we respond
with URLs to the audio. The frontend should treat these URLs as
opaque and should not try to parse information about them or
generate them itself, as the format may change.
Args:
request... | juraj-google-style |
def add_log_file(path):
logfile_handler = RotatingFileHandler(
path, maxBytes=50000, backupCount=2)
formatter = logging.Formatter(
fmt='%(asctime)s %(levelname)s %(module)s - %(message)s',
datefmt="%d-%b-%Y %H:%M:%S")
logfile_handler.setFormatter(formatter)
geoparse_logger.a... | Add log file.
Args:
path (:obj:`str`): Path to the log file. | juraj-google-style |
def __init__(self,
unique_identifier=None,
data=None):
super(DecryptResponsePayload, self).__init__(
enums.Tags.RESPONSE_PAYLOAD
)
self._unique_identifier = None
self._data = None
self.unique_identifier = unique_identifier
... | Construct a Decrypt response payload struct.
Args:
unique_identifier (string): The ID of the managed object (e.g.,
a symmetric key) used for decryption. Required for encoding
and decoding.
data (bytes): The decrypted data in binary form. Required for
encoding and decoding. | juraj-google-style |
def __init__(self, function_name, unique_function_id, node_name_prefix, attr_name, level=1, children_inputs_mappings=None):
self._function_name = function_name
self._unique_function_id = unique_function_id
self._next_global_index = 0
self._used_global_indices = set()
self._tag_to_global_index = {}
... | Initialize ophint argument.
Args:
function_name: Name of the function that this tracks arguments for.
unique_function_id: UUID of function that this tracks arguments for.
node_name_prefix: How identities that are created are named.
attr_name: Name of attribute to use to store the index for this hint.
i.e. FUNCTION_INP... | github-repos |
def getaccountaddress(self, user_id=""):
address = self.rpc.call("getaccountaddress", user_id)
self.logger.debug("Your", self.coin, "address is", address)
return address | Get the coin address associated with a user id.
If the specified user id does not yet have an address for this
coin, then generate one.
Args:
user_id (str): this user's unique identifier
Returns:
str: Base58Check address for this account | juraj-google-style |
def _LinearFoldByteStream(self, mapped_value, **unused_kwargs):
try:
attribute_values = [
getattr(mapped_value, attribute_name, None)
for attribute_name in self._attribute_names]
attribute_values = [
value for value in attribute_values if value is not None]
retur... | Folds the data type into a byte stream.
Args:
mapped_value (object): mapped value.
Returns:
bytes: byte stream.
Raises:
FoldingError: if the data type definition cannot be folded into
the byte stream. | juraj-google-style |
def set_name(self, name):
if (not self._campfire.get_user().admin):
return False
result = self._connection.put(('room/%s' % self.id), {'room': {'name': name}})
if result['success']:
self._load()
return result['success'] | Set the room name.
Args:
name (str): Name
Returns:
bool. Success | codesearchnet |
def undo_windowing(hidden_states: torch.Tensor, shape: List[int], mask_unit_shape: List[int]) -> torch.Tensor:
batch_size, hidden_size = (hidden_states.shape[0], hidden_states.shape[-1])
num_mask_units = [s
hidden_states = hidden_states.view(batch_size, *num_mask_units, *mask_unit_shape, hidden_size)
h... | Restore spatial organization by undoing windowed organization of mask units.
Args:
hidden_states (`torch.Tensor`): The hidden states tensor of shape `[batch_size, num_mask_unit_height*num_mask_unit_width, hidden_size]`.
shape (`List[int]`): The original shape of the hidden states tensor before windowing.
mask_unit_sha... | github-repos |
def _handle_request(self, request):
if request is None:
return Response(success=False, uid=request.uid)
action_map = {
'start_dag': self._handle_start_dag,
'stop_workflow': self._handle_stop_workflow,
'join_dags': self._handle_join_dags,
... | Handle an incoming request by forwarding it to the appropriate method.
Args:
request (Request): Reference to a request object containing the
incoming request.
Raises:
RequestActionUnknown: If the action specified in the request is not known.
Returns:
Response: A response object containing the response from the metho... | juraj-google-style |
class RandomUniform(Initializer):
def __init__(self, minval=-0.05, maxval=0.05, seed=None):
self.minval = minval
self.maxval = maxval
self.seed = seed
self._random_generator = _RandomGenerator(seed)
def __call__(self, shape, dtype=None, **kwargs):
_validate_kwa... | Initializer that generates tensors with a uniform distribution.
Also available via the shortcut function
`tf.keras.initializers.random_uniform`.
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.RandomUniform(minval=0., maxval=1.)
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras... | github-repos |
def _write_init_fetchers(self, filenames):
destination = "%s%s" % (self.output_directory, self.fetchers_path)
self.write(destination=destination, filename="__init__.py", template_name="__init_fetcher__.py.tpl",
filenames=self._prepare_filenames(filenames, suffix='Fetcher'),
... | Write fetcher init file
Args:
filenames (dict): dict of filename and classes | juraj-google-style |
def _StopFolderSelectionMethod(self, stop_folder):
if (not self.show_stop_hierarchy):
return (lambda stop: (stop_folder, None))
station_folder = self._CreateFolder(stop_folder, 'Stations')
platform_folder = self._CreateFolder(stop_folder, 'Platforms')
platform_connections = self._CreateFolder(pl... | Create a method to determine which KML folder a stop should go in.
Args:
stop_folder: the parent folder element for all stops.
Returns:
A function that should accept a Stop argument and return a tuple of
(stop KML folder, pathways KML folder).
Given a Stop, we need to determine which folder the stop should go in. I... | codesearchnet |
def __init__(self, hash_start=[], hash_stop=UInt256()):
self.HashStart = hash_start
self.HashStop = hash_stop | Create an instance.
Args:
hash_start (list): a list of hash values. Each value is of the bytearray type. Note: should actually be UInt256 objects.
hash_stop (UInt256): | juraj-google-style |
def append_dims_and_file_extension(fname, data_df):
if not fname.endswith(".gct"):
out_fname = '{0}_n{1}x{2}.gct'.format(fname, data_df.shape[1], data_df.shape[0])
return out_fname
else:
basename = os.path.splitext(fname)[0]
out_fname = '{0}_n{1}x{2}.gct'.format(b... | Append dimensions and file extension to output filename.
N.B. Dimensions are cols x rows.
Args:
fname (string): output filename
data_df (pandas df)
Returns:
out_fname (string): output filename with matrix dims and .gct appended | juraj-google-style |
def walk_dependencies(root, visitor):
def visit(parent, visitor):
for d in get_dependencies(parent):
visitor(d, parent)
visit(d, visitor)
visitor(root, None)
visit(root, visitor) | Call visitor on root and all dependencies reachable from it in breadth
first order.
Args:
root (component): component function or class
visitor (function): signature is `func(component, parent)`. The
call on root is `visitor(root, None)`. | juraj-google-style |
def GetBaseFiles(self, diff):
files = {}
for line in diff.splitlines(True):
if (line.startswith('Index:') or line.startswith('Property changes on:')):
(unused, filename) = line.split(':', 1)
filename = to_slash(filename.strip())
files[filename] = self.GetBaseFile(file... | Helper that calls GetBase file for each file in the patch.
Returns:
A dictionary that maps from filename to GetBaseFile's tuple. Filenames
are retrieved based on lines that start with "Index:" or
"Property changes on:". | codesearchnet |
def CreateTaskStart(self):
task_start = TaskStart()
task_start.identifier = self.identifier
task_start.session_identifier = self.session_identifier
task_start.timestamp = self.start_time
return task_start | Creates a task start.
Returns:
TaskStart: task start attribute container. | codesearchnet |
def VerifyServerPEM(self, http_object):
try:
server_pem = http_object.data
server_url = http_object.url
if b"BEGIN CERTIFICATE" in server_pem:
server_certificate = rdf_crypto.RDFX509Cert(server_pem)
self.communicator.LoadServerCertificate(
server... | Check the server PEM for validity.
This is used to determine connectivity to the server. Sometimes captive
portals return a valid HTTP status, but the data is corrupted.
Args:
http_object: The response received from the server.
Returns:
True if the response contains a valid server certificate. | juraj-google-style |
def _make_request(self, url, method='get', data=None, extra_headers=None):
attempts = 0
while (attempts < 1):
if (not self._is_authenticated):
self._authenticate()
try:
return self._send_request(url, method, data, extra_headers)
except HTTPError as e:
... | Prepares the request, checks for authentication and retries in case of issues
Args:
url (str): URL of the request
method (str): Any of "get", "post", "delete"
data (any): Possible extra data to send with the request
extra_headers (dict): Possible extra headers to send along in the request
Returns:
dict | codesearchnet |
def Copy(self, name=None):
new = copy.copy(self)
new.d = copy.copy(self.d)
new.name = name if name is not None else self.name
return new | Returns a copy.
Make a shallow copy of d. If you want a deep copy of d,
use copy.deepcopy on the whole object.
Args:
name: string name for the new Hist | juraj-google-style |
def draw_lines_svg_layer(df_endpoints, layer_name, layer_number=1):
dwg = svgwrite.Drawing('should_not_exist.svg', profile='tiny', debug=False)
dwg.attribs['width'] = df_endpoints[['x_source', 'x_target']].values.max()
dwg.attribs['height'] = df_endpoints[['y_source', 'y_target']].values.max()
nsmap = I... | Draw lines defined by endpoint coordinates as a layer in a SVG file.
Args:
df_endpoints (pandas.DataFrame) : Each row corresponds to the endpoints
of a single line, encoded through the columns: ``x_source``,
``y_source``, ``x_target``, and ``y_target``.
layer_name (str) : Name of Inkscape layer.
layer_number (int, op... | codesearchnet |
def get_formatted_string(self, input_string):
if isinstance(input_string, str):
try:
return self.get_processed_string(input_string)
except KeyNotInContextError as err:
raise KeyNotInContextError(f"Unable to format '{input_string}' because {err}") from err
elif isinstance(... | Return formatted value for input_string.
get_formatted gets a context[key] value.
get_formatted_string is for any arbitrary string that is not in the
context.
Only valid if input_string is a type string.
Return a string interpolated from the context dictionary.
If input_string='Piping {key1} the {key2} wild'
And con... | codesearchnet |
def runCmd(cls, cmd):
cit.echo(cmd, 'command')
result = os.system(cmd)
cls.checkResult(result) | run command and show if success or failed
Args:
cmd: string
Returns:
bool: if this command run successfully | codesearchnet |
async def run_tasks(context):
running_tasks = RunTasks()
context.running_tasks = running_tasks
status = (await running_tasks.invoke(context))
context.running_tasks = None
return status | Run any tasks returned by claimWork.
Returns the integer status of the task that was run, or None if no task was
run.
args:
context (scriptworker.context.Context): the scriptworker context.
Raises:
Exception: on unexpected exception.
Returns:
int: exit status
None: if no task run. | codesearchnet |
def security_label(self, name, description=None, color=None):
label = SecurityLabel(name, description, color)
for label_data in self._labels:
if (label_data.name == name):
label = label_data
break
else:
self._labels.append(label)
return label | Return instance of SecurityLabel.
.. note:: The provided security label will be create if it doesn't exist. If the security
label already exists nothing will be changed.
Args:
name (str): The value for this security label.
description (str): A description for this security label.
color (str): A color (hex value) for ... | codesearchnet |
def scripthash_to_address(scripthash):
sb = bytearray([ADDRESS_VERSION]) + scripthash
c256 = bin_dbl_sha256(sb)[0:4]
outb = sb + bytearray(c256)
return base58.b58encode(bytes(outb)).decode("utf-8") | Convert a script hash to a public address.
Args:
scripthash (bytes):
Returns:
str: base58 encoded string representing the wallet address. | juraj-google-style |
def Update(self, attribute=None):
client_id = self.urn.Split()[0]
if attribute == "CONTAINS":
flow_id = flow.StartAFF4Flow(
client_id=client_id,
flow_name="ListDirectory",
pathspec=self.real_pathspec,
not... | Refresh an old attribute.
Note that refreshing the attribute is asynchronous. It does not change
anything about the current object - you need to reopen the same URN some
time later to get fresh data.
Attributes: CONTAINS - Refresh the content of the directory listing.
Args:
attribute: An attribute object as listed ab... | juraj-google-style |
def CanonicalPathToLocalPath(path):
r
path = path.replace("/\\", "\\")
path = path.replace("/", "\\")
m = re.match(r"\\([a-zA-Z]):(.*)$", path)
if m:
path = "%s:\\%s" % (m.group(1), m.group(2).lstrip("\\"))
return path | r"""Converts the canonical paths as used by GRR to OS specific paths.
Due to the inconsistencies between handling paths in windows we need to
convert a path to an OS specific version prior to using it. This function
should be called just before any OS specific functions.
Canonical paths on windows have:
- / instead o... | juraj-google-style |
def distance_from_point(self, pt):
return np.linalg.norm(np.array(pt) - self.coords) | Returns distance between the site and a point in space.
Args:
pt: Cartesian coordinates of point.
Returns:
Distance (float) | juraj-google-style |
def initialize_remaining_constants(self, value=0):
remaining = []
for (node, _inputs, _outputs) in self.iterate_bfs():
streams = (node.input_streams() + [node.stream])
for stream in streams:
if (stream.stream_type is not DataStream.ConstantType):
continue
... | Ensure that all constant streams referenced in the sensor graph have a value.
Constant streams that are automatically created by the compiler are initialized
as part of the compilation process but it's possible that the user references
other constant streams but never assigns them an explicit initial value. This
func... | codesearchnet |
def Execute(self, http):
self._Execute(http)
for key in self.__request_response_handlers:
response = self.__request_response_handlers[key].response
callback = self.__request_response_handlers[key].handler
exception = None
if response.status_co... | Execute all the requests as a single batched HTTP request.
Args:
http: A httplib2.Http object to be used with the request.
Returns:
None
Raises:
BatchError if the response is the wrong format. | juraj-google-style |
def __init__(self, output_mediator):
super(SharedElasticsearchOutputModule, self).__init__(output_mediator)
self._client = None
self._document_type = self._DEFAULT_DOCUMENT_TYPE
self._event_documents = []
self._flush_interval = self._DEFAULT_FLUSH_INTERVAL
self._host = None
self._index_... | Initializes an Elasticsearch output module.
Args:
output_mediator (OutputMediator): mediates interactions between output
modules and other components, such as storage and dfvfs. | juraj-google-style |
def merge_files(context):
resolver = EFTemplateResolver(
profile=context.profile,
region=context.region,
env=context.env,
service=context.service
)
try:
with open(context.template_path, 'r') as f:
template_body = f.read()
f.close()
except IOError as error:
raise I... | Given a context containing path to template, env, and service:
merge config into template and output the result to stdout
Args:
context: a populated context object | juraj-google-style |
def copy_entities(self, from_namespace, from_workspace, etype, enames):
r = fapi.copy_entities(from_namespace, from_workspace,
self.namespace, self.name, etype, enames,
self.api_url)
fapi._check_response_code(r, 201) | Copy entities from another workspace.
Args:
from_namespace (str): Source workspace namespace
from_workspace (str): Source workspace name
etype (str): Entity type
enames (list(str)): List of entity names to copy | juraj-google-style |
def CacheObject(self, identifier, vfs_object):
if (identifier in self._values):
raise KeyError('Object already cached for identifier: {0:s}'.format(identifier))
if (len(self._values) == self._maximum_number_of_cached_values):
raise errors.CacheFullError('Maximum number of cached values reached.'... | Caches a VFS object.
This method ignores the cache value reference count.
Args:
identifier (str): VFS object identifier.
vfs_object (object): VFS object to cache.
Raises:
CacheFullError: if he maximum number of cached values is reached.
KeyError: if the VFS object already is cached. | codesearchnet |
def AddItem(self, key, item, f=(lambda x: x)):
with self._mutex:
bucket = self._buckets[key]
bucket.AddItem(item, f) | Add a new item to the Reservoir with the given tag.
If the reservoir has not yet reached full size, the new item is guaranteed
to be added. If the reservoir is full, then behavior depends on the
always_keep_last boolean.
If always_keep_last was set to true, the new item is guaranteed to be added
to the reservoir, and... | codesearchnet |
def report_validation_error(self, element_path: str, msg: str) -> None: | Reports the given error during FHIR validation.
This indicates that the resource does not fully comply with the FHIR
specification or profile.
Args:
element_path: The path to the field where the issue occurred.
msg: The error message produced. | github-repos |
def _get_vep_transcript(self, transcript_info):
transcript = Transcript(hgnc_symbol=transcript_info.get('SYMBOL'), transcript_id=transcript_info.get('Feature'), ensembl_id=transcript_info.get('Gene'), biotype=transcript_info.get('BIOTYPE'), consequence=transcript_info.get('Consequence'), strand=transcript_info.get(... | Create a Transcript based on the vep annotation
Args:
transcript_info (dict): A dict with vep info
Returns:
transcript (puzzle.models.Transcript): A Transcripts | codesearchnet |
def get_for_type(input_type='text'):
if (input_type in RandomInputHelper.cache):
return RandomInputHelper.cache[input_type]
types = {'text': RandomInputHelper.get_random_value, 'hidden': RandomInputHelper.get_random_value, 'search': RandomInputHelper.get_random_value, 'color': RandomInputHelper.get_rand... | Get a random string for the given html input type
Args:
input_type (str): The input type (e.g. email).
Returns:
str: The (cached) random value. | codesearchnet |
def __init__(self, value, data_type, masks=None, name='Secret Data'):
super(SecretData, self).__init__()
self._object_type = enums.ObjectType.SECRET_DATA
self.value = value
self.data_type = data_type
self.names = [name]
if masks:
self.cryptographic... | Create a SecretData object.
Args:
value(bytes): The bytes representing secret data.
data_type(SecretDataType): An enumeration defining the type of the
secret value.
masks(list): A list of CryptographicUsageMask enumerations
defining how the key will be used.
name(string): The string name of the key. | juraj-google-style |
def GetAPFSFileEntryByPathSpec(self, path_spec):
location = getattr(path_spec, 'location', None)
identifier = getattr(path_spec, 'identifier', None)
if identifier is not None:
fsapfs_file_entry = self._fsapfs_volume.get_file_entry_by_identifier(
identifier)
elif location is no... | Retrieves the APFS file entry for a path specification.
Args:
path_spec (PathSpec): a path specification.
Returns:
pyfsapfs.file_entry: file entry.
Raises:
PathSpecError: if the path specification is missing location and
identifier. | juraj-google-style |
def _GetDisplayPath(self, path_spec, full_path, data_stream_name):
display_path = ''
if path_spec.HasParent():
parent_path_spec = path_spec.parent
if (parent_path_spec and (parent_path_spec.type_indicator == dfvfs_definitions.TYPE_INDICATOR_TSK_PARTITION)):
display_path = ''.join([di... | Retrieves a path to display.
Args:
path_spec (dfvfs.PathSpec): path specification of the file entry.
full_path (str): full path of the file entry.
data_stream_name (str): name of the data stream.
Returns:
str: path to display. | codesearchnet |
class TimesFmOutput(BaseModelOutput):
loc: Optional[torch.Tensor] = None
scale: Optional[torch.Tensor] = None | Args:
loc (`torch.Tensor` of shape `(batch_size, )`):
The mean of the time series inputs.
scale (`torch.Tensor` of shape `(batch_size,)`):
The scale of the time series inputs. | github-repos |
def get_card(self, id, name=None):
return self.create_card(dict(id=id, name=name)) | Get a card
Returns:
Card: The card with the given `id` | codesearchnet |
def process_actions(self, actions):
notices = {}
notification_contacts = {}
for action in actions:
resource = action['resource']
action_status = ActionStatus.SUCCEED
try:
if action['action'] == AuditActions.REMOVE:
... | Process the actions we want to take
Args:
actions (`list`): List of actions we want to take
Returns:
`list` of notifications | juraj-google-style |
def write_file(self, filename='HEADER'):
with open(filename, "w") as f:
f.write(str(self) + "\n") | Writes Header into filename on disk.
Args:
filename: Filename and path for file to be written to disk | juraj-google-style |
def addColumn(self, columnName, dtype, defaultValue):
model = self.tableView.model()
if (model is not None):
model.addDataFrameColumn(columnName, dtype, defaultValue)
self.addColumnButton.setChecked(False) | Adds a column with the given parameters to the underlying model
This method is also a slot.
If no model is set, nothing happens.
Args:
columnName (str): The name of the new column.
dtype (numpy.dtype): The datatype of the new column.
defaultValue (object): Fill the column with this value. | codesearchnet |
def to_deeper_model(self, target_id, new_layer):
self.operation_history.append(("to_deeper_model", target_id, new_layer))
input_id = self.layer_id_to_input_node_ids[target_id][0]
output_id = self.layer_id_to_output_node_ids[target_id][0]
if self.weighted:
if is_layer... | Insert a relu-conv-bn block after the target block.
Args:
target_id: A convolutional layer ID. The new block should be inserted after the block.
new_layer: An instance of StubLayer subclasses. | juraj-google-style |
def __init__(self, jid, password, verify_security=False):
self.jid = aioxmpp.JID.fromstr(jid)
self.password = password
self.verify_security = verify_security
self.behaviours = []
self._values = {}
self.conn_coro = None
self.stream = None
self.cl... | Creates an agent
Args:
jid (str): The identifier of the agent in the form username@server
password (str): The password to connect to the server
verify_security (bool): Wether to verify or not the SSL certificates | juraj-google-style |
def shape(self):
nrows = self._row_partition.static_nrows
ncols = self._row_partition.static_uniform_row_length
value_shape = self._values.shape[1:]
return tensor_shape.TensorShape([nrows, ncols]).concatenate(value_shape) | The statically known shape of this ragged tensor.
Returns:
A `TensorShape` containing the statically known shape of this ragged
tensor. Ragged dimensions have a size of `None`.
Examples:
>>> tf.ragged.constant([[0], [1, 2]]).shape
TensorShape([2, None])
>>> tf.ragged.constant([[[0, 1]], [[1, 2], [3, 4]]], ragged_r... | github-repos |
def increase_route_count(self, crawled_request):
for route in self.__routing_options.routes:
if re.compile(route).match(crawled_request.url):
count_key = (str(route) + crawled_request.method)
if (count_key in self.__routing_count.keys()):
self.__routing_count[count_ke... | Increase the count that determines how many times a URL of a certain route has been crawled.
Args:
crawled_request (:class:`nyawc.http.Request`): The request that possibly matches a route. | codesearchnet |
def __init__(self, operation: Type[Operation], *expressions: Expression) -> None:
self.operation = operation
self.length = len(expressions)
self.constant = Multiset()
self.syntactic = Multiset()
self.sequence_variables = Multiset()
self.sequence_variable_i... | Create a CommutativePatternsParts instance.
Args:
operation:
The type of the commutative operation. Must be a subclass of :class:`.Operation` with
:attr:`~.Operation.commutative` set to ``True``.
*expressions:
The operands of the commutative operation. | juraj-google-style |
def _build_encryption_key_information(self, value):
if (value is None):
return None
if (not isinstance(value, dict)):
raise TypeError('Encryption key information must be a dictionary.')
cryptographic_parameters = value.get('cryptographic_parameters')
if cryptographic_parameters:
... | Build an EncryptionKeyInformation struct from a dictionary.
Args:
value (dict): A dictionary containing the key/value pairs for a
EncryptionKeyInformation struct.
Returns:
EncryptionKeyInformation: an EncryptionKeyInformation struct
Raises:
TypeError: if the input argument is invalid | codesearchnet |
def albedo(self, value=999.0):
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float '
'for field `albedo`'.format(value))
self._albedo = va... | Corresponds to IDD Field `albedo`
Args:
value (float): value for IDD Field `albedo`
Missing value: 999.0
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value | juraj-google-style |
def ParseRecord(self, parser_mediator, key, structure):
if key != 'logline':
logger.warning(
'Unable to parse record, unknown structure: {0:s}'.format(key))
return
try:
timestamp = int(structure.timestamp)
except ValueError:
logger.debug('Invalid timestamp string {0:s... | Parses a log record structure.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
key (str): name of the parsed structure.
structure (pyparsing.ParseResults): structure parsed from the log file. | juraj-google-style |
def experimental_from_proto(cls, proto: struct_pb2.TypeSpecProto) -> 'TypeSpec':
return nested_structure_coder.decode_proto(struct_pb2.StructuredValue(type_spec_value=proto)) | Returns a TypeSpec instance based on the serialized proto.
Do NOT override for custom non-TF types.
Args:
proto: Proto generated using 'experimental_as_proto'. | github-repos |
def dump(destination, ms, single=False, pretty_print=False, **kwargs):
text = dumps(ms, single=single, pretty_print=pretty_print, **kwargs)
if hasattr(destination, 'write'):
print(text, file=destination)
else:
with open(destination, 'w') as fh:
print(text, file=fh) | Serialize Xmrs objects to the Prolog representation and write to a file.
Args:
destination: filename or file object where data will be written
ms: an iterator of Xmrs objects to serialize (unless the
*single* option is `True`)
single: if `True`, treat *ms* as a single Xmrs object
instead of as an iterator
pretty_print... | codesearchnet |
def DeterminePeakMemoryUsage(self, item):
return tf_cluster.TF_DeterminePeakMemoryUsage(item.tf_item, self._tf_cluster) | Returns a snapshot of the peak memory usage.
Args:
item: The item for which to measure the costs.
Returns: A hashtable indexed by device name. | github-repos |
def _histogram_move_keys_by_game(sess, ds, batch_size=8*1024):
ds = ds.batch(batch_size)
ds = ds.map(lambda x: tf.strings.substr(x, 0, 12))
iterator = ds.make_initializable_iterator()
sess.run(iterator.initializer)
get_next = iterator.get_next()
h = collections.Counter()
try:
... | Given dataset of key names, return histogram of moves/game.
Move counts are written by the game players, so
this is mostly useful for repair or backfill.
Args:
sess: TF session
ds: TF dataset containing game move keys.
batch_size: performance tuning parameter | juraj-google-style |
def ValidateAccessAndSubjects(requested_access, subjects):
if (not requested_access):
raise access_control.UnauthorizedAccess(('Must specify requested access type for %s' % subjects))
for s in requested_access:
if (s not in 'rwq'):
raise ValueError(('Invalid access requested for %s: ... | Does basic requested access validation.
Args:
requested_access: String consisting or 'r', 'w' and 'q' characters.
subjects: A list of subjects that are about to be accessed with a given
requested_access. Used for logging purposes only.
Returns:
True if requested_access is valid.
Raises:
access_control.UnauthorizedAc... | codesearchnet |
def mark_flags_as_mutual_exclusive(flag_names, required=False,
flag_values=FLAGS):
def validate_mutual_exclusion(flags_dict):
flag_count = sum(1 for val in flags_dict.values() if val is not None)
if flag_count == 1 or (not required and flag_count == 0):
return True... | Ensures that only one flag among flag_names is set.
Args:
flag_names: [str], a list of the flag names to be checked.
required: Boolean, if set, exactly one of the flags must be set.
Otherwise, it is also valid for none of the flags to be set.
flag_values: An optional FlagValues instance to validate against. | juraj-google-style |
def list_files(root, suffix, prefix=False):
root = os.path.expanduser(root)
files = list(
filter(
lambda p: os.path.isfile(os.path.join(root, p)) and p.endswith(suffix),
os.listdir(root)
)
)
if prefix is True:
files = [os.path.join(root, d) for d in ... | List all files ending with a suffix at a given root
Args:
root (str): Path to directory whose folders need to be listed
suffix (str or tuple): Suffix of the files to match, e.g. '.png' or ('.jpg', '.png').
It uses the Python "str.endswith" method and is passed directly
prefix (bool, optional): If true, prepends the pa... | juraj-google-style |
def validate(self):
endpoint = '/validate'
payload = dict(accessToken=self.access_token)
rep = self._ygg_req(endpoint, payload)
return (not bool(rep)) | Check if an access token is valid
Returns:
dict: Empty or error dict | codesearchnet |
def removedirs(self, target_directory):
target_directory = self.filesystem.absnormpath(target_directory)
directory = self.filesystem.confirmdir(target_directory)
if directory.contents:
self.filesystem.raise_os_error(errno.ENOTEMPTY, self.path.basename(target_directory))
else:
self.rmdir(... | Remove a leaf fake directory and all empty intermediate ones.
Args:
target_directory: the directory to be removed.
Raises:
OSError: if target_directory does not exist or is not a directory.
OSError: if target_directory is not empty. | codesearchnet |
def with_subject(self, subject):
return self.__class__(
self._signer,
service_account_email=self._service_account_email,
scopes=self._scopes,
token_uri=self._token_uri,
subject=subject,
project_id=self._project_id,
addi... | Create a copy of these credentials with the specified subject.
Args:
subject (str): The subject claim.
Returns:
google.auth.service_account.Credentials: A new credentials
instance. | juraj-google-style |
def report_proto_path(self):
return self._report_proto_path | Getter for path where tensor_tracer.proto object should be written.
Returns:
A string path. | github-repos |
def recipe_google_ads_segmentology(config, auth_read, customer_id, developer_token, login_id, auth_write, recipe_slug):
dataset(config, {'description': 'Create a dataset for bigquery tables.', 'hour': [4], 'auth': auth_write, 'dataset': recipe_slug})
bigquery(config, {'auth': auth_write, 'function': 'Pearson Si... | GoogleAds funnel analysis using Census data.
Args:
auth_read (authentication) - Credentials used for reading data.
customer_id (string) - Google Ads customer.
developer_token (string) - Google Ads developer token.
login_id (string) - Google Ads login.
auth_write (authentication) - Authorization used for writing data.
... | github-repos |
def stat_v2(path):
return _pywrap_file_io.Stat(compat.path_to_str(path)) | Returns file statistics for a given path.
Args:
path: string, path to a file
Returns:
FileStatistics struct that contains information about the path
Raises:
errors.OpError: If the operation fails. | github-repos |
def _find_furthest_new_line(read_buffer):
new_line_positions = [read_buffer.rfind(n) for n in new_lines_bytes]
return max(new_line_positions) | Return -1 if read_buffer does not contain new line otherwise the position of the rightmost newline.
Args:
read_buffer (bytestring)
Returns:
int: The right most position of new line character in read_buffer if found, else -1 | codesearchnet |
def set_installed_version(vcs, version):
version_path = _get_version_path(vcs)
with open(version_path, 'w') as f:
f.write(version) | Set the installed version for this project.
Args:
vcs (easyci.vcs.base.Vcs)
version (str) | codesearchnet |
def tags(pode, leaf=False):
fulltags = [tag for tag in pode[1]['tags']]
if not leaf:
return fulltags
retn = []
for size, tag in sorted([(len(t), t) for t in fulltags], reverse=True):
look = tag + '.'
if any([r.startswith(look) for r in retn]):
continu... | Get all the tags for a given node.
Args:
pode (tuple): A packed node.
leaf (bool): If True, only return the full tags.
Returns:
list: A list of tag strings. | juraj-google-style |
def _GetEventData(
self, parser_mediator, record_index, evt_record, recovered=False):
event_data = WinEvtRecordEventData()
try:
event_data.record_number = evt_record.identifier
except OverflowError as exception:
parser_mediator.ProduceExtractionWarning((
'unable to read rec... | Retrieves event data from the Windows EventLog (EVT) record.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
record_index (int): event record index.
evt_record (pyevt.record): event record.
recovered (Optional[bool]): True if the record was... | juraj-google-style |
def get_all_pipelines(app=''):
url = '{host}/applications/{app}/pipelineConfigs'.format(host=API_URL, app=app)
response = requests.get(url, verify=GATE_CA_BUNDLE, cert=GATE_CLIENT_CERT)
assert response.ok, 'Could not retrieve Pipelines for {0}.'.format(app)
pipelines = response.json()
LOG.debug('Pip... | Get a list of all the Pipelines in _app_.
Args:
app (str): Name of Spinnaker Application.
Returns:
requests.models.Response: Response from Gate containing Pipelines. | codesearchnet |
def coalescence_times(self, backward=True):
if (not isinstance(backward, bool)):
raise TypeError('backward must be a bool')
for dist in sorted((d for (n, d) in self.distances_from_root() if (len(n.children) > 1)), reverse=backward):
(yield dist) | Generator over the times of successive coalescence events
Args:
``backward`` (``bool``): ``True`` to go backward in time (i.e., leaves to root), otherwise ``False`` | codesearchnet |
def _GetGradSource(op_or_tensor):
name_tokens = op_or_tensor.name.split('/')
grad_pos = [i for i, x in enumerate(name_tokens) if x.startswith('gradients')]
if not grad_pos:
raise ValueError(f"Expected op/tensor name to start with gradients (excluding scope), got: {op_or_tensor.name}. This means that... | Identify which call to tf.gradients created this gradient op or tensor.
TensorArray gradient calls use an accumulator TensorArray object. If
multiple gradients are calculated and run in the same session, the multiple
gradient nodes may accidentally flow through the same accumulator TensorArray.
This double counting b... | github-repos |
def predict(self, a, b, sig=[-1, -1], maxpnt=500):
a = (a - np.mean(a)) / np.std(a)
b = (b - np.mean(b)) / np.std(b)
return FastHsicTestGamma(a, b, sig, maxpnt) | Compute the test statistic
Args:
a (array-like): Variable 1
b (array-like): Variable 2
sig (list): [0] (resp [1]) is kernel size for a(resp b) (set to median distance if -1)
maxpnt (int): maximum number of points used, for computational time
Returns:
float: test statistic | juraj-google-style |
def default_num_choices(self) -> int:
return OnnxConfig.default_fixed_num_choices | The default number of choices to use if no other indication
Returns:
Integer > 0 | github-repos |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.