code stringlengths 20 4.93k | docstring stringlengths 33 1.27k | source stringclasses 3
values |
|---|---|---|
def to_json_str(self):
adict = dict(vars(self), sort_keys=True)
adict['type'] = self.__class__.__name__
return json.dumps(adict) | Convert data to json string representation.
Returns:
json representation as string. | codesearchnet |
def get_organisation(self, **query_params):
organisation_json = self.get_organisations_json(self.base_uri, query_params=query_params)
return self.create_organisation(organisation_json) | Get the Organisation for this board. Returns Organisation object.
Returns:
list(Organisation): The organisation attached to this board | codesearchnet |
def get_airport_details(self, iata, page=1, limit=100):
url = AIRPORT_DATA_BASE.format(iata, str(self.AUTH_TOKEN), page, limit)
details = self._fr24.get_airport_details(url)
weather = self._fr24.get_airport_weather(url)
details['position']['elevation'] = weather['elevation']
return details | Retrieve the details of an airport
Given the IATA code of an airport, this method returns the detailed information like lat lon, full name, URL, codes etc.
Args:
iata (str): The IATA code for an airport, e.g. HYD
page (int): Optional page number; for users who are on a plan with flightradar24 they can pass in higher ... | codesearchnet |
def create(self, name, passphrase=None, wallet_data=None):
if (not self.application):
raise RoundError('User accounts are limited to one wallet. Make an account or shoot us an email <dev@gem.co> if you have a compelling use case for more.')
if ((not passphrase) and (not wallet_data)):
raise Valu... | Create a new Wallet object and add it to this Wallets collection.
This is only available in this library for Application wallets. Users
must add additional wallets in their User Console
Args:
name (str): wallet name
passphrase (str, optional): A passphrase with which to encrypt a user
wallet. If not supplied, wallet_d... | codesearchnet |
def merge(self, workdir, pot_files, out_dvdb, delete_source=True):
pot_files = [os.path.abspath(s) for s in list_strings(pot_files)]
if (not os.path.isabs(out_dvdb)):
out_dvdb = os.path.join(os.path.abspath(workdir), os.path.basename(out_dvdb))
if self.verbose:
print(('Will merge %d files in... | Merge POT files containing 1st order DFPT potential
return the absolute path of the new database in workdir.
Args:
delete_source: True if POT1 files should be removed after (successful) merge. | codesearchnet |
def kill_raylet_monitor(self, check_alive=True):
self._kill_process_type(ray_constants.PROCESS_TYPE_RAYLET_MONITOR, check_alive=check_alive) | Kill the raylet monitor.
Args:
check_alive (bool): Raise an exception if the process was already
dead. | codesearchnet |
def period_end_day(self, value=None):
if (value is not None):
try:
value = str(value)
except ValueError:
raise ValueError('value {} need to be of type str for field `period_end_day`'.format(value))
if (',' in value):
raise ValueError('value should not cont... | Corresponds to IDD Field `period_end_day`
Args:
value (str): value for IDD Field `period_end_day`
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value | codesearchnet |
def label(self, name):
if isinstance(name, str):
self._label = name
else:
raise TypeError('label expects a string') | Set snapshot label to name
Args:
name (str or None): label to assign unitary
Raises:
TypeError: name is not string or None. | codesearchnet |
def get_unique_variable(name):
candidates = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, name)
if not candidates:
raise ValueError('Couldnt find variable %s' % name)
for candidate in candidates:
if candidate.op.name == name:
return candidate
raise ValueError('Variable %s does not uniquely ... | Gets the variable uniquely identified by that name.
Args:
name: a name that uniquely identifies the variable.
Returns:
a tensorflow variable.
Raises:
ValueError: if no variable uniquely identified by the name exists. | juraj-google-style |
def save(self, output_saved_model_dir):
assert self._converted
if self._need_calibration:
assert self._calibration_data_collected
if self._input_graph_def:
raise ValueError('Not able to save to a SavedModel since input is a GraphDef')
def _restore_collections(dest_graph, src_meta_graph_... | Save the converted graph as a SavedModel.
Args:
output_saved_model_dir: construct a SavedModel using the converted
GraphDef and save it to the specified directory. This option only works
when the input graph is loaded from a SavedModel, i.e. when
input_saved_model_dir is specified and input_graph_def is None in
__init... | github-repos |
def track(self, event_key, user_id, attributes=None, event_tags=None):
if (not self.is_valid):
self.logger.error(enums.Errors.INVALID_DATAFILE.format('track'))
return
if (not validator.is_non_empty_string(event_key)):
self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('event_key')... | Send conversion event to Optimizely.
Args:
event_key: Event key representing the event which needs to be recorded.
user_id: ID for user.
attributes: Dict representing visitor attributes and values which need to be recorded.
event_tags: Dict representing metadata associated with the event. | codesearchnet |
def GetCodeObjectAtLine(module, line):
if (not hasattr(module, '__file__')):
return (False, (None, None))
prev_line = 0
next_line = six.MAXSIZE
for code_object in _GetModuleCodeObjects(module):
for co_line_number in _GetLineNumbers(code_object):
if (co_line_number == line):
... | Searches for a code object at the specified line in the specified module.
Args:
module: module to explore.
line: 1-based line number of the statement.
Returns:
(True, Code object) on success or (False, (prev_line, next_line)) on
failure, where prev_line and next_line are the closest lines with code above
and below th... | codesearchnet |
def __init__(self, identifier, text_format=False):
super(FormatSpecification, self).__init__()
self._text_format = text_format
self.identifier = identifier
self.signatures = [] | Initializes a format specification.
Args:
identifier (str): unique name for the format.
text_format (Optional[bool]): True if the format is a text format,
False otherwise. | juraj-google-style |
def _get_column_alias(builder: column_expression_builder.ColumnExpressionBuilder) -> str:
if builder.column_name:
return builder.column_name
else:
invoke_node = builder.node
while invoke_node and (not hasattr(invoke_node, 'identifier') or not invoke_node.identifier):
invoke_n... | Determine the column alias based on the builder's state.
Args:
builder: A ColumnExpressionBuilder object.
Returns:
A string representing the column alias. | github-repos |
def CreateBudget(client):
budget_service = client.GetService('BudgetService', version='v201809')
budget = {
'name': 'Interplanetary Cruise App Budget
'amount': {
'microAmount': '50000000'
},
'deliveryMethod': 'STANDARD',
'isExplicitlyShared': False
}
budget_opera... | Creates a budget and returns its budgetId.
Args:
client: An AdWordsClient instance.
Returns:
An int budgetId for the created Budget. | juraj-google-style |
def setup(__pkg: ModuleType) -> Tuple[Callable[[str], str],
Callable[[str, str, int], str]]:
package_locale = path.join(path.dirname(__pkg.__file__), 'locale')
gettext.install(__pkg.__name__, package_locale)
return gettext.gettext, gettext.ngettext | Configure ``gettext`` for given package.
Args:
__pkg: Package to use as location for :program:`gettext` files
Returns:
:program:`gettext` functions for singular and plural translations | juraj-google-style |
def minutes(start, end=None):
return iterate.between(start, datetime.timedelta(minutes=1), end) | Iterate over the minutes between the given datetime_tzs.
Args:
start: datetime_tz to start from.
end: (Optional) Date to end at, if not given the iterator will never
terminate.
Returns:
An iterator which generates datetime_tz objects a minute apart. | codesearchnet |
def _GetClientLibCallback(args, client_func=_GetClientLib):
client_paths = client_func(
args.service, args.language, args.output, args.build_system,
hostname=args.hostname, application_path=args.application)
for client_path in client_paths:
print 'API client library written to %s' % client_path | Generate discovery docs and client libraries to files.
Args:
args: An argparse.Namespace object to extract parameters from.
client_func: A function that generates client libraries and stores them to
files, accepting a list of service names, a client library language,
an output directory, a build system for the client ... | juraj-google-style |
def _bfd_rx(self, **kwargs):
method_name = 'rbridge_id_router_router_bgp_router_bgp_attributes_' \
'bfd_interval_min_rx'
bfd_rx = getattr(self._rbridge, method_name)
config = bfd_rx(**kwargs)
if kwargs['delete']:
tag = 'min-rx'
confi... | Return the BFD minimum receive interval XML.
You should not use this method.
You probably want `BGP.bfd`.
Args:
min_rx (str): BFD receive interval in milliseconds (300, 500, etc)
delete (bool): Remove the configuration if ``True``.
Returns:
XML to be passed to the switch.
Raises:
None | juraj-google-style |
def GetSystemConfigurationArtifact(self, session_identifier=CURRENT_SESSION):
system_configuration = artifacts.SystemConfigurationArtifact()
system_configuration.code_page = self.GetValue(
'codepage', default_value=self._codepage)
system_configuration.hostname = self._hostnames.get(
s... | Retrieves the knowledge base as a system configuration artifact.
Args:
session_identifier (Optional[str])): session identifier, where
CURRENT_SESSION represents the active session.
Returns:
SystemConfigurationArtifact: system configuration artifact. | juraj-google-style |
def build_info(self):
if self.is_bootloader:
self.log.error('Device is in fastboot mode, could not get build info.')
return
if self._build_info is None or self._is_rebooting:
info = {}
build_info = self.adb.getprops(CACHED_SYSTEM_PROPS)
for build_info_constant in BuildInf... | Gets the build info of this Android device, including build id and type.
This is not available if the device is in bootloader mode.
Returns:
A dict with the build info of this Android device, or None if the
device is in bootloader mode. | github-repos |
def get_dihedral(self, i: int, j: int, k: int, l: int) -> float:
v1 = (self[k].coords - self[l].coords)
v2 = (self[j].coords - self[k].coords)
v3 = (self[i].coords - self[j].coords)
v23 = np.cross(v2, v3)
v12 = np.cross(v1, v2)
return math.degrees(math.atan2((np.linalg.norm(v2) * np.dot(v1, v23)... | Returns dihedral angle specified by four sites.
Args:
i: Index of first site
j: Index of second site
k: Index of third site
l: Index of fourth site
Returns:
Dihedral angle in degrees. | codesearchnet |
def parse_received(received):
values_by_clause = {}
for pattern in RECEIVED_COMPILED_LIST:
matches = [match for match in pattern.finditer(received)]
if len(matches) == 0:
log.debug("No matches found for %s in %s" % (
pattern.pattern, received))
... | Parse a single received header.
Return a dictionary of values by clause.
Arguments:
received {str} -- single received header
Raises:
MailParserReceivedParsingError -- Raised when a
received header cannot be parsed
Returns:
dict -- values by clause | juraj-google-style |
def mknod(self, filename, mode=None, device=None, dir_fd=None):
if self.filesystem.is_windows_fs:
raise (AttributeError, "module 'os' has no attribute 'mknode'")
if (mode is None):
mode = (S_IFREG | 384)
if (device or ((not (mode & S_IFREG)) and (not is_root()))):
self.filesystem.rai... | Create a filesystem node named 'filename'.
Does not support device special files or named pipes as the real os
module does.
Args:
filename: (str) Name of the file to create
mode: (int) Permissions to use and type of file to be created.
Default permissions are 0o666. Only the stat.S_IFREG file type
is supported by th... | codesearchnet |
def add_business_days(self, date_tensor, num_days, roll_convention=constants.BusinessDayConvention.NONE):
pass | Adds given number of business days to given dates.
Note that this is different from calling `add_period_and_roll` with
PeriodType.DAY. For example, adding 5 business days to Monday gives the next
Monday (unless there are holidays on this week or next Monday). Adding 5
days and rolling means landing on Saturday and the... | github-repos |
def assert_corofunction(**kw):
for name, value in kw.items():
if not asyncio.iscoroutinefunction(value):
raise TypeError(
'paco: {} must be a coroutine function'.format(name)) | Asserts if a given values are a coroutine function.
Arguments:
**kw (mixed): value to check if it is an iterable.
Raises:
TypeError: if assertion fails. | juraj-google-style |
def gen_pdf(rst_content, style_text, header=None, footer=FOOTER):
out_file_obj = StringIO()
with NamedTemporaryFile() as f:
f.write(style_text)
f.flush()
pdf = _init_pdf(f.name, header, footer)
pdf.createPdf(text=rst_content, output=out_file_obj, compressed=True)
out_file_obj.see... | Create PDF file from `rst_content` using `style_text` as style.
Optinally, add `header` or `footer`.
Args:
rst_content (str): Content of the PDF file in restructured text markup.
style_text (str): Style for the :mod:`rst2pdf` module.
header (str, default None): Header which will be rendered to each page.
footer (str,... | codesearchnet |
def _check_root_tag(self, root):
supported = self.supported_tags()
if root.tag in supported:
return
error = "Document root element ({0}) not one of ({1})"
raise UnsupportedRootElementError(
message=error.format(root.tag, supported),
expected=... | Check that the XML element tree has a supported root element.
Args:
root (etree.Element)
Raises:
UnsupportedRootElementError | juraj-google-style |
def get_instances_with_configs(configs):
results = []
for c in configs:
try:
serial = c.pop('serial')
except KeyError:
raise Error(
'Required value "serial" is missing in AndroidDevice config %s.'
% c)
is_required = c.get(KEY_D... | Create AndroidDevice instances from a list of dict configs.
Each config should have the required key-value pair 'serial'.
Args:
configs: A list of dicts each representing the configuration of one
android device.
Returns:
A list of AndroidDevice objects. | juraj-google-style |
def run(self, args):
jlink = pylink.JLink()
if args.test:
if jlink.test():
print('Self-test succeeded.')
else:
print('Self-test failed.')
elif ((args.list is None) or (args.list in ['usb', 'ip'])):
host = pylink.JLinkHost.USB_OR_IP
if (args.list == 'us... | Runs the emulator command.
Args:
self (EmulatorCommand): the ``EmulatorCommand`` instance
args (Namespace): arguments to parse
Returns:
``None`` | codesearchnet |
def expected_h(nvals, fit='RANSAC'):
rsvals = [expected_rs(n) for n in nvals]
poly = poly_fit(np.log(nvals), np.log(rsvals), 1, fit=fit)
return poly[0] | Uses expected_rs to calculate the expected value for the Hurst exponent h
based on the values of n used for the calculation.
Args:
nvals (iterable of int):
the values of n used to calculate the individual (R/S)_n
KWargs:
fit (str):
the fitting method to use for the line fit, either 'poly' for normal
least squares pol... | codesearchnet |
def mixture_stddev(mixture_weight_vector, mean_vector, stddev_vector):
tensorshape_util.assert_has_rank(mixture_weight_vector.shape, 2)
if (not tensorshape_util.is_compatible_with(mean_vector.shape, mixture_weight_vector.shape)):
raise ValueError('Expecting means to have same shape as mixture weights.')... | Computes the standard deviation of a mixture distribution.
This function works regardless of the component distribution, so long as
each component's mean and standard deviation can be provided.
Args:
mixture_weight_vector: A 2D tensor with shape [batch_size, num_components]
mean_vector: A 2D tensor of mixture compone... | codesearchnet |
def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)
... | Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` method.
Args:
token_ids_0 (`List[int]`): List of IDs.
token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs.
already_has_spe... | github-repos |
def _parse_config(self):
config = self.get_block('mlag configuration')
cfg = dict()
cfg.update(self._parse_domain_id(config))
cfg.update(self._parse_local_interface(config))
cfg.update(self._parse_peer_address(config))
cfg.update(self._parse_peer_link(config))
cfg.update(self._parse_shutdown... | Parses the mlag global configuration
Returns:
dict: A dict object that is intended to be merged into the
resource dict | codesearchnet |
def _register_notification_callback(self, connection_handle, attribute_handle, callback, once=False):
notification_id = (connection_handle, attribute_handle)
with self.notification_callbacks_lock:
self.notification_callbacks[notification_id] = (callback, once) | Register a callback as a notification callback. It will be called if a notification with the matching
connection_handle and attribute_handle is received.
Args:
connection_handle (int): The connection handle to watch
attribute_handle (int): The attribute handle to watch
callback (func): The callback function to call on... | codesearchnet |
def derivative_extraction(feat, DeltaWindows):
rows, cols = feat.shape
DIF = np.zeros(feat.shape, dtype=feat.dtype)
Scale = 0
FEAT = np.lib.pad(feat, ((0, 0), (DeltaWindows, DeltaWindows)), 'edge')
for i in range(DeltaWindows):
offset = DeltaWindows
... | This function the derivative features.
Args:
feat (array): The main feature vector(For returning the second
order derivative it can be first-order derivative).
DeltaWindows (int): The value of DeltaWindows is set using
the configuration parameter DELTAWINDOW.
Returns:
array: Derivative feature vector - A NUMFRAMESxN... | juraj-google-style |
def datasets_insert(self, dataset_name, friendly_name=None, description=None):
url = (Api._ENDPOINT + (Api._DATASETS_PATH % (dataset_name.project_id, '')))
data = {'kind': 'bigquery
if friendly_name:
data['friendlyName'] = friendly_name
if description:
data['description'] = description
... | Issues a request to create a dataset.
Args:
dataset_name: the name of the dataset to create.
friendly_name: (optional) the friendly name for the dataset
description: (optional) a description for the dataset
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation. | codesearchnet |
def options(self, section):
if not self.has_section(section):
raise NoSectionError(section) from None
return self.__getitem__(section).options() | Returns list of configuration options for the named section.
Args:
section (str): name of section
Returns:
list: list of option names | juraj-google-style |
def call_remoteckan(self, *args, **kwargs):
requests_kwargs = kwargs.get('requests_kwargs', dict())
credentials = self._get_credentials()
if credentials:
requests_kwargs['auth'] = credentials
kwargs['requests_kwargs'] = requests_kwargs
apikey = kwarg... | Calls the remote CKAN
Args:
*args: Arguments to pass to remote CKAN call_action method
**kwargs: Keyword arguments to pass to remote CKAN call_action method
Returns:
Dict: The response from the remote CKAN call_action method | juraj-google-style |
def export_model(module_spec, class_count, saved_model_dir):
(sess, in_image, _, _, _, _) = build_eval_session(module_spec, class_count)
with sess.graph.as_default() as graph:
tf.saved_model.simple_save(sess, saved_model_dir, inputs={'image': in_image}, outputs={'prediction': graph.get_tensor_by_name('f... | Exports model for serving.
Args:
module_spec: The hub.ModuleSpec for the image module being used.
class_count: The number of classes.
saved_model_dir: Directory in which to save exported model and variables. | codesearchnet |
def SignBuffer(self, in_buffer):
precondition.AssertType(in_buffer, bytes)
with tempfile.NamedTemporaryFile() as temp_in:
temp_in.write(in_buffer)
temp_in.seek(0)
outfile = self.SignFile(temp_in.name)
with io.open(outfile, 'rb') as filedesc:
return filedesc.read() | Sign a buffer via temp files.
Our signing tool can't sign a buffer, so we work around it using temporary
files.
Args:
in_buffer: data to sign
Returns:
signed data | codesearchnet |
def _parse_description(self, config):
value = None
match = re.search(r'description (.+)$', config, re.M)
if match:
value = match.group(1)
return dict(description=value) | Scans the specified config block and returns the description value
Args:
config (str): The interface config block to scan
Returns:
dict: Returns a dict object with the description value retrieved
from the config block. If the description value is not
configured, None is returned as the value. The returned dict
is i... | juraj-google-style |
def load_checkpoints(self, checkpointDirs):
self.memo_lookup_table = None
if (not checkpointDirs):
return {}
if (type(checkpointDirs) is not list):
raise BadCheckpoint('checkpointDirs expects a list of checkpoints')
return self._load_checkpoints(checkpointDirs) | Load checkpoints from the checkpoint files into a dictionary.
The results are used to pre-populate the memoizer's lookup_table
Kwargs:
- checkpointDirs (list) : List of run folder to use as checkpoints
Eg. ['runinfo/001', 'runinfo/002']
Returns:
- dict containing, hashed -> future mappings | codesearchnet |
def indicators(self, indicator_type=None, filters=None, params=None):
indicator = self._tcex.ti.indicator(indicator_type)
for i in self.tc_requests.indicators_from_tag(indicator, self.name, filters=filters, params=params):
(yield i) | Gets all indicators from a tag.
Args:
params:
filters:
indicator_type: | codesearchnet |
def sever_sink_ports(self, context, ports, connected_to=None):
if connected_to:
source_port_lookup = self._source_port_lookup(
ports.get(connected_to, []))
else:
source_port_lookup = True
sink_ports = self.... | Conditionally sever Sink Ports of the child. If connected_to
is then None then sever all, otherwise restrict to connected_to's
Source Ports
Args:
context (Context): The context to use
ports (dict): {part_name: [PortInfo]}
connected_to (str): Restrict severing to this part | juraj-google-style |
def get_nn(self, structure, n):
return [e['site'] for e in self.get_nn_info(structure, n)] | Get near neighbors of site with index n in structure.
Args:
structure (Structure): input structure.
n (integer): index of site in structure for which to determine
neighbors.
Returns:
sites (list of Site objects): near neighbors. | juraj-google-style |
def tflite_to_tosa_bytecode(flatbuffer, bytecode, use_external_constant=False, ordered_input_arrays=None, ordered_output_arrays=None):
pywrap_mlir.experimental_tflite_to_tosa_bytecode(flatbuffer, bytecode, use_external_constant, ordered_input_arrays, ordered_output_arrays) | Converts TFLite flatbuffer to TOSA dialect in MLIR bytecode.
Args:
flatbuffer: Path to flatbuffer.
bytecode: Path to output bytecode.
use_external_constant: Whether to create `tfl.external_const` instead of
`tfl.const`.
ordered_input_arrays:
ordered_output_arrays: If ordered_output_arrays is not empty, then the
functi... | github-repos |
def op_priority(op_type):
if op_type in ('Const', 'Shape', 'BroadcastGradientArgs', 'Range', 'VariableShape', 'Fill', 'OneHot', 'ShapeN'):
return 7
if op_type in ('Identity', 'Cast', 'Reshape', 'ExpandDims', 'StopGradient', 'PreventGradient', 'Squeeze', 'Gather', 'GatherNd'):
return 6
if op_... | Returns the priority of the op.
If the priority of the op is k, it will be traced if trace_level>=k.
Args:
op_type: String name of the operation type.
Returns:
Integer value corresponding the priority of the op. | github-repos |
def verify_exhausted_iterator(self, ds_fn, num_outputs, sparse_tensors=False, assert_items_equal=False):
del assert_items_equal
self.gen_outputs(ds_fn, [], num_outputs, verify_exhausted=True, sparse_tensors=sparse_tensors)
actual = self.gen_outputs(ds_fn, [], 0, ckpt_saved=True, verify_exhausted=True, spars... | Verifies that saving and restoring an exhausted iterator works.
An exhausted iterator is one which has returned an OutOfRange error.
Args:
ds_fn: 0-argument function that returns a Dataset.
num_outputs: Total number of outputs expected from this Dataset.
sparse_tensors: Whether dataset is built from SparseTensor(s).
... | github-repos |
def _catch_errors(a_func, to_catch):
def inner(*args, **kwargs):
try:
return a_func(*args, **kwargs)
except tuple(to_catch) as exception:
utils.raise_with_traceback(
gax.errors.create_error('RPC failed', cause=exception))
return inn... | Updates a_func to wrap exceptions with GaxError
Args:
a_func (callable): A callable.
to_catch (list[Exception]): Configures the exceptions to wrap.
Returns:
Callable: A function that will wrap certain exceptions with GaxError | juraj-google-style |
def multithread_predict_dataflow(dataflows, model_funcs):
num_worker = len(model_funcs)
assert len(dataflows) == num_worker
if num_worker == 1:
return predict_dataflow(dataflows[0], model_funcs[0])
kwargs = {'thread_name_prefix': 'EvalWorker'} if sys.version_info.minor >= 6 else {}
with... | Running multiple `predict_dataflow` in multiple threads, and aggregate the results.
Args:
dataflows: a list of DataFlow to be used in :func:`predict_dataflow`
model_funcs: a list of callable to be used in :func:`predict_dataflow`
Returns:
list of dict, in the format used by
`DetectionDataset.eval_or_save_inference_re... | juraj-google-style |
def get_blob(profile, sha):
resource = ('/blobs/' + sha)
data = api.get_request(profile, resource)
return prepare(data) | Fetch a blob.
Args:
profile
A profile generated from ``simplygithub.authentication.profile``.
Such profiles tell this module (i) the ``repo`` to connect to,
and (ii) the ``token`` to connect with.
sha
The SHA of the blob to fetch.
Returns:
A dict with data about the blob. | codesearchnet |
def __init__(self, **kwargs):
prefix_chars = kwargs.get('prefix_chars', '-')
if prefix_chars != '-':
raise ValueError(
'argparse_flags.ArgumentParser only supports "-" as the prefix '
'character, found "{}".'.format(prefix_chars))
self._inherited_absl_flags = kwargs.pop(... | Initializes ArgumentParser.
Args:
**kwargs: same as argparse.ArgumentParser, except:
1. It also accepts `inherited_absl_flags`: the absl flags to inherit.
The default is the global absl.flags.FLAGS instance. Pass None to
ignore absl flags.
2. The `prefix_chars` argument must be the default value '-'.
Raises:
ValueErr... | juraj-google-style |
def char(self, c: str) -> None:
if self.peek() == c:
self.offset += 1
else:
raise UnexpectedInput(self, f"char '{c}'") | Parse the specified character.
Args:
c: One-character string.
Raises:
EndOfInput: If past the end of `self.input`.
UnexpectedInput: If the next character is different from `c`. | juraj-google-style |
def _transform(transformer_chain: Sequence[Tuple[(DataTransformer, Type)]], data: S, context: PipelineContext=None) -> T:
for (transformer, target_type) in transformer_chain:
data = transformer.transform(target_type, data, context)
return data | Transform data to a new type.
Args:
transformer_chain: A sequence of (transformer, type) pairs to convert the data.
data: The data to be transformed.
context: The context of the transformations (mutable).
Returns:
The transformed data. | codesearchnet |
def task_address(self, job_name, task_index):
try:
job = self._cluster_spec[job_name]
except KeyError:
raise ValueError('No such job in cluster: %r' % job_name)
try:
return job[task_index]
except KeyError:
raise ValueError('No task with index %r in job %r' % (task_index, ... | Returns the address of the given task in the given job.
Args:
job_name: The string name of a job in this cluster.
task_index: A non-negative integer.
Returns:
The address of the given task in the given job.
Raises:
ValueError: If `job_name` does not name a job in this cluster,
or no task with index `task_index` is d... | github-repos |
def has_open_file(self, file_object):
return (file_object in [wrappers[0].get_object() for wrappers in self.open_files if wrappers]) | Return True if the given file object is in the list of open files.
Args:
file_object: The FakeFile object to be checked.
Returns:
`True` if the file is open. | codesearchnet |
def _submitQuery(self, gitquery, gitvars={}, verbose=False, rest=False):
errOut = (DEVNULL if (not verbose) else None)
authhead = ('Authorization: bearer ' + self.__githubApiToken)
bashcurl = ('curl -iH TMPauthhead -X POST -d TMPgitquery https:
bashcurl_list = bashcurl.split()
bashcurl_list[2] = aut... | Send a curl request to GitHub.
Args:
gitquery (str): The query or endpoint itself.
Examples:
query: 'query { viewer { login } }'
endpoint: '/user'
gitvars (Optional[Dict]): All query variables.
Defaults to empty.
verbose (Optional[bool]): If False, stderr prints will be
suppressed. Defaults to False.
rest (Optional[bo... | codesearchnet |
def basis(sample_paths):
samples = tf.convert_to_tensor(sample_paths)
dim = samples.shape.as_list()[-1]
grid = tf.range(0, degree + 1, dtype=samples.dtype)
samples_centered = samples - tf.math.reduce_mean(samples, axis=0)
samples_centered = tf.expand_dims(samples_centered, -2)
grid = tf.meshgrid... | Computes polynomial basis expansion at the given sample points.
Args:
sample_paths: A `Tensor`s of either `flot32` or `float64` dtype and of
shape `[num_samples, dim]` where `dim` has to be statically known.
Returns:
A `Tensor`s of shape `[degree * dim, num_samples]`. | github-repos |
def register_views(self, app):
self.add_resource(LoginRedirectView, '/auth/login')
self.add_resource(LogoutRedirectView, '/auth/logout')
for entry_point in CINQ_PLUGINS['cloud_inquisitor.plugins.auth']['plugins']:
cls = entry_point.load()
app.available_auth_systems[cls.name] = cls
if... | Iterates all entry points for views and auth systems and dynamically load and register the routes with Flask
Args:
app (`CINQFlask`): CINQFlask object to register views for
Returns:
`None` | codesearchnet |
def Match(self, event):
if (not self._matcher):
return True
self._decision = self._matcher.Matches(event)
return self._decision | Determines if an event matches the filter.
Args:
event (EventObject): an event.
Returns:
bool: True if the event matches the filter. | codesearchnet |
def match_pattern(expr_or_pattern: object, expr: object) -> MatchDict:
try:
return expr_or_pattern.match(expr)
except AttributeError:
if expr_or_pattern == expr:
return MatchDict()
else:
res = MatchDict()
res.success = False
res.... | Recursively match `expr` with the given `expr_or_pattern`
Args:
expr_or_pattern: either a direct expression (equal to `expr` for a
successful match), or an instance of :class:`Pattern`.
expr: the expression to be matched | juraj-google-style |
def eigvals(tensor, name=None):
if tensor.dtype == dtypes.float32 or tensor.dtype == dtypes.complex64:
out_dtype = dtypes.complex64
elif tensor.dtype == dtypes.float64 or tensor.dtype == dtypes.complex128:
out_dtype = dtypes.complex128
e, _ = gen_linalg_ops.eig(tensor, Tout=out_dtype, comput... | Computes the eigenvalues of one or more matrices.
Note: If your program backpropagates through this function, you should replace
it with a call to tf.linalg.eig (possibly ignoring the second output) to
avoid computing the eigen decomposition twice. This is because the
eigenvectors are used to compute the gradient w.r.... | github-repos |
def return_type(type_name, formatter=None):
def _returns(func):
annotated(func)
func.metadata.typed_returnvalue(type_name, formatter)
return func
return _returns | Specify that this function returns a typed value.
Args:
type_name (str): A type name known to the global typedargs type system
formatter (str): An optional name of a formatting function specified
for the type given in type_name. | juraj-google-style |
def _ConvertCollectionsCounterToDict(cls, collections_counter):
if (not isinstance(collections_counter, collections.Counter)):
raise TypeError
json_dict = {'__type__': 'collections.Counter'}
for (attribute_name, attribute_value) in iter(collections_counter.items()):
if (attribute_value is No... | Converts a collections.Counter object into a JSON dictionary.
The resulting dictionary of the JSON serialized objects consists of:
{
'__type__': 'collections.Counter'
...
}
Here '__type__' indicates the object base type. In this case
'collections.Counter'. The rest of the elements of the dictionary make up
the collec... | codesearchnet |
def get_top_docs(self, question_hidden_states: np.ndarray, n_docs=5) -> Tuple[np.ndarray, np.ndarray]:
raise NotImplementedError | For each query in the batch, retrieves `n_docs` documents.
Args:
question_hidden_states (`np.ndarray` of shape `(batch_size, vector_size)`):
An array of query vectors.
n_docs (`int`):
The number of docs retrieved per query.
Returns:
`np.ndarray` of shape `(batch_size, n_docs)`: A tensor of indices of retrieved docume... | github-repos |
def _set_advertising_data(self, packet_type, data):
payload = struct.pack("<BB%ss" % (len(data)), packet_type, len(data), bytes(data))
response = self._send_command(6, 9, payload)
result, = unpack("<H", response.payload)
if result != 0:
return False, {'reason': 'Er... | Set the advertising data for advertisements sent out by this bled112
Args:
packet_type (int): 0 for advertisement, 1 for scan response
data (bytearray): the data to set | juraj-google-style |
def _StartAnalysisProcesses(self, storage_writer, analysis_plugins):
logger.info('Starting analysis plugins.')
for analysis_plugin in analysis_plugins.values():
self._analysis_plugins[analysis_plugin.NAME] = analysis_plugin
process = self._StartWorkerProcess(analysis_plugin.NAME, storage_writ... | Starts the analysis processes.
Args:
storage_writer (StorageWriter): storage writer.
analysis_plugins (dict[str, AnalysisPlugin]): analysis plugins that
should be run and their names. | juraj-google-style |
def seek(self, offset, whence=os.SEEK_SET):
if not self._is_open:
raise IOError('Not opened.')
self._vslvm_logical_volume.seek(offset, whence) | Seeks to an offset within the file-like object.
Args:
offset (int): offset to seek to.
whence (Optional(int)): value that indicates whether offset is an absolute
or relative position within the file.
Raises:
IOError: if the seek failed.
OSError: if the seek failed. | juraj-google-style |
def __generate_localization_dictionary_from_file(file_path, localization_entry_attribute_name_for_key):
localization_dictionary = {}
f = open_strings_file(file_path, "r+")
header_comment_key_value_tuples = extract_header_comment_key_value_tuples_from_file(f)
if len(header_comment_key_value_tuples)... | Generates a dictionary mapping between keys (defined by the given attribute name) and localization entries.
Args:
file_path (str): The strings file path.
localization_entry_attribute_name_for_key: The name of the attribute of LocalizationEntry to use as key.
Returns:
dict: A dictionary mapping between keys (defined b... | juraj-google-style |
def GetMissingChunks(self, fd, length, offset):
start_chunk = (offset
end_chunk = (((offset + length) - 1)
relevant_chunks = range(start_chunk, (end_chunk + 1))
missing_chunks = set(relevant_chunks)
for (idx, metadata) in iteritems(fd.ChunksMetadata(relevant_chunks)):
if (not self.DataRefr... | Return which chunks a file doesn't have.
Specifically, we return a list of the chunks specified by a
length-offset range which are not in the datastore.
Args:
fd: The database object to read chunks from.
length: Length to read.
offset: File offset to read from.
Returns:
A list of chunk numbers. | codesearchnet |
def description(self, force_refresh=False):
if force_refresh:
self.clear_cache()
if not self._tuning_job_describe_result:
self._tuning_job_describe_result = self._sage_client.describe_hyper_parameter_tuning_job(
HyperParameterTuningJobName=self.name
... | Call ``DescribeHyperParameterTuningJob`` for the hyperparameter tuning job.
Args:
force_refresh (bool): Set to True to fetch the latest data from SageMaker API.
Returns:
dict: The Amazon SageMaker response for ``DescribeHyperParameterTuningJob``. | juraj-google-style |
def add_field_with_label(self, key, label_description, field):
self.inputs[key] = field
label = Label(label_description)
label.style['margin'] = '0px 5px'
label.style['min-width'] = '30%'
container = HBox()
container.style.update({'justify-content': 'space-between', 'overflow': 'auto', 'padding'... | Adds a field to the dialog together with a descriptive label and a unique identifier.
Note: You can access to the fields content calling the function GenericDialog.get_field(key).
Args:
key (str): The unique identifier for the field.
label_description (str): The string content of the description label.
field (Widget)... | codesearchnet |
def draw_mask(im, mask, alpha=0.5, color=None):
if color is None:
color = PALETTE_RGB[np.random.choice(len(PALETTE_RGB))][::-1]
im = np.where(np.repeat((mask > 0)[:, :, None], 3, axis=2),
im * (1 - alpha) + color * alpha, im)
im = im.astype('uint8')
return im | Overlay a mask on top of the image.
Args:
im: a 3-channel uint8 image in BGR
mask: a binary 1-channel image of the same size
color: if None, will choose automatically | juraj-google-style |
def stop(self, timeout_s=None):
self.stopped.set()
if self.thread:
self.thread.join(timeout_s)
return (not self.thread.isAlive())
else:
return True | Stops the interval.
If a timeout is provided and stop returns False then the thread is
effectively abandoned in whatever state it was in (presumably dead-locked).
Args:
timeout_s: The time in seconds to wait on the thread to finish. By
default it's forever.
Returns:
False if a timeout was provided and we timed out. | codesearchnet |
def with_input_types(self, input_type_hint):
input_type_hint = native_type_compatibility.convert_to_beam_type(input_type_hint)
validate_composite_type_param(input_type_hint, 'Type hints for a PTransform')
return super().with_input_types(input_type_hint) | Annotates the input type of a :class:`PTransform` with a type-hint.
Args:
input_type_hint (type): An instance of an allowed built-in type, a custom
class, or an instance of a
:class:`~apache_beam.typehints.typehints.TypeConstraint`.
Raises:
TypeError: If **input_type_hint** is not a valid type-hint.
See
:obj:`apache_... | github-repos |
def _SetPath(self, path):
old_path = self._path
if (old_path and (not io_wrapper.IsCloudPath(old_path))):
try:
size = tf.io.gfile.stat(old_path).length
logger.debug('Setting latest size of %s to %d', old_path, size)
self._finalized_sizes[old_path] = size
excep... | Sets the current path to watch for new events.
This also records the size of the old path, if any. If the size can't be
found, an error is logged.
Args:
path: The full path of the file to watch. | codesearchnet |
def sym_get(self, path: Union[utils.KeyPath, str, int], default: Any=RAISE_IF_NOT_FOUND, use_inferred: bool=False) -> Any:
path = utils.KeyPath.from_value(path)
if default is RAISE_IF_NOT_FOUND:
return path.query(self, use_inferred=use_inferred)
else:
return path.get(self, default, use_infer... | Returns a sub-node by path.
NOTE: there is no `sym_set`, use `sym_rebind`.
Args:
path: A KeyPath object or equivalence.
default: Default value if path does not exists. If absent, `KeyError` will
be thrown.
use_inferred: If True, return inferred value instead of the symbolic form
of `pg.Inferential` objects.
Returns:... | github-repos |
def _get_relationships(self, dna: pg.DNA) -> Tuple[List[pg.DNA], List[Optional[pg.DNA]], List[Optional[int]]]:
def is_mutable_node(obj):
return self._is_mutable_node(obj)
results = pg.query(dna, where=is_mutable_node, enter_selected=True)
child_nodes = list(results.values())
parent_nodes = [n.p... | Extracts the parent-child node relationships in a DNA.
Note that PyGlove represents the nodes in a DNA instance as DNA instances
themselves.
Args:
dna: the DNA that will be mutated.
Returns:
A tuple of 3 lists of the same length with corresponding elements:
-child_nodes: a list of every node in the DNA.
-parent_node... | github-repos |
def patch_deepCopy(self, patches):
patchesCopy = []
for patch in patches:
patchCopy = patch_obj()
patchCopy.diffs = patch.diffs[:]
patchCopy.start1 = patch.start1
patchCopy.start2 = patch.start2
patchCopy.length1 = patch.length1
patchCopy.length2 = patch.length2
... | Given an array of patches, return another array that is identical.
Args:
patches: Array of Patch objects.
Returns:
Array of Patch objects. | codesearchnet |
def _FormatDateTime(self, event):
if (not event.timestamp):
return 'N/A'
date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(timestamp=event.timestamp)
(year, month, day_of_month) = date_time.GetDate()
(hours, minutes, seconds) = date_time.GetTimeOfDay()
try:
return '{0:04d}-{1... | Formats the date and time.
Args:
event (EventObject): event.
Returns:
str: date and time string or "N/A" if no event timestamp is available. | codesearchnet |
def _get_common_params(self, user_id, attributes):
commonParams = {}
commonParams[self.EventParams.PROJECT_ID] = self._get_project_id()
commonParams[self.EventParams.ACCOUNT_ID] = self._get_account_id()
visitor = {}
visitor[self.EventParams.END_USER_ID] = user_id
visitor[self.EventParams.... | Get params which are used same in both conversion and impression events.
Args:
user_id: ID for user.
attributes: Dict representing user attributes and values which need to be recorded.
Returns:
Dict consisting of parameters common to both impression and conversion events. | juraj-google-style |
def SplitKeyPath(key_path, path_separator=definitions.KEY_PATH_SEPARATOR):
return list(filter(None, key_path.split(path_separator))) | Splits the key path into path segments.
Args:
key_path (str): key path.
path_separator (Optional[str]): path separator.
Returns:
list[str]: key path segments without the root path segment, which is an
empty string. | codesearchnet |
def create_channels(self, dataset, token, new_channels_data):
channels = {}
for channel_new in new_channels_data:
self._check_channel(channel_new.name)
if channel_new.channel_type not in ['image', 'annotation']:
raise ValueError('Channel type must be ' ... | Creates channels given a dictionary in 'new_channels_data'
, 'dataset' name, and 'token' (project) name.
Arguments:
token (str): Token to identify project
dataset (str): Dataset name to identify dataset to download from
new_channels_data (dict): New channel data to upload into new
channels
Returns:
bool: Process comp... | juraj-google-style |
def Instance(reactor=None):
if (NodeLeader._LEAD is None):
NodeLeader._LEAD = NodeLeader(reactor)
return NodeLeader._LEAD | Get the local node instance.
Args:
reactor: (optional) custom reactor to use in NodeLeader.
Returns:
NodeLeader: instance. | codesearchnet |
def supported_features_mapping(*supported_features: str, onnx_config_cls: Optional[str]=None) -> Dict[str, Callable[[PretrainedConfig], OnnxConfig]]:
if onnx_config_cls is None:
raise ValueError('A OnnxConfig class must be provided')
config_cls = transformers
for attr_name in onnx_config_cls.split('... | Generate the mapping between supported the features and their corresponding OnnxConfig for a given model.
Args:
*supported_features: The names of the supported features.
onnx_config_cls: The OnnxConfig full name corresponding to the model.
Returns:
The dictionary mapping a feature to an OnnxConfig constructor. | github-repos |
def ToJson(self, index):
return {
'n': index,
'asset': self.AssetId.To0xString(),
'value': self.Value.ToNeoJsonString(),
'address': self.Address
} | Convert object members to a dictionary that can be parsed as JSON.
Args:
index (int): The index of the output in a transaction
Returns:
dict: | juraj-google-style |
def __init__(
self, location=None, parent=None, part_index=None, start_offset=None,
**kwargs):
if not parent:
raise ValueError('Missing parent value.')
super(TSKPartitionPathSpec, self).__init__(parent=parent, **kwargs)
self.location = location
self.part_index = part_index
se... | Initializes a path specification.
Note that the TSK partition path specification must have a parent.
Args:
location (Optional[str]): location.
parent (Optional[PathSpec]): parent path specification.
part_index (Optional[int]): part index.
start_offset (Optional[int]): start offset.
Raises:
ValueError: when parent is... | juraj-google-style |
def __init__(self, *args, allow_comments=False, directory=None, **kwargs):
super().__init__(*args, **kwargs)
self.allow_comments = allow_comments
self.dir = directory | Constructor. Also see Entry.__init__.
Args:
allow_comments (bool): Whether to allow comments. Default False.
directory (str): Optional. If the page should live in a subdirectory
instead of at the web root, specify it here instead of making it
part of the slug. | juraj-google-style |
def open_repository(path, spor_dir='.spor'):
root = _find_root_dir(path, spor_dir)
return Repository(root, spor_dir) | Open an existing repository.
Args:
path: Path to any file or directory within the repository.
spor_dir: The name of the directory containing spor data.
Returns: A `Repository` instance.
Raises:
ValueError: No repository is found. | codesearchnet |
def zoom_blur(x, severity=1):
c = [
np.arange(1, 1.11, 0.01),
np.arange(1, 1.16, 0.01),
np.arange(1, 1.21, 0.02),
np.arange(1, 1.26, 0.02),
np.arange(1, 1.31, 0.03)
][severity - 1]
x = (np.array(x) / 255.).astype(np.float32)
out = np.zeros_like(x)
for zoom_factor in c:
out... | Zoom blurring to images.
Applying zoom blurring to images by zooming the central part of the images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Applied zoom blur. | juraj-google-style |
def console_get_height_rect(con: tcod.console.Console, x: int, y: int, w: int, h: int, fmt: str) -> int:
return int(lib.TCOD_console_get_height_rect_fmt(_console(con), x, y, w, h, _fmt(fmt))) | Return the height of this text once word-wrapped into this rectangle.
Returns:
int: The number of lines of text once word-wrapped.
.. deprecated:: 8.5
Use :any:`Console.get_height_rect` instead. | codesearchnet |
def inspect(self, nids=None, wslice=None, **kwargs):
figs = []
for task in self.select_tasks(nids=nids, wslice=wslice):
if hasattr(task, "inspect"):
fig = task.inspect(**kwargs)
if fig is None:
cprint("Cannot inspect Task %s" % tas... | Inspect the tasks (SCF iterations, Structural relaxation ...) and
produces matplotlib plots.
Args:
nids: List of node identifiers.
wslice: Slice object used to select works.
kwargs: keyword arguments passed to `task.inspect` method.
.. note::
nids and wslice are mutually exclusive.
If nids and wslice are both None, ... | juraj-google-style |
def get_broker() -> 'Broker':
global global_broker
if (global_broker is None):
from .brokers.rabbitmq import RabbitmqBroker
set_broker(RabbitmqBroker(host='127.0.0.1', port=5672, heartbeat=5, connection_attempts=5, blocked_connection_timeout=30))
return global_broker | Get the global broker instance. If no global broker is set,
this initializes a RabbitmqBroker and returns it.
Returns:
Broker: The default Broker. | codesearchnet |
def MetaGraph(self):
if (self._meta_graph is None):
raise ValueError('There is no metagraph in this EventAccumulator')
meta_graph = meta_graph_pb2.MetaGraphDef()
meta_graph.ParseFromString(self._meta_graph)
return meta_graph | Return the metagraph definition, if there is one.
Raises:
ValueError: If there is no metagraph for this run.
Returns:
The `meta_graph_def` proto. | codesearchnet |
def get_snpeff_info(snpeff_string, snpeff_header):
snpeff_annotations = [dict(zip(snpeff_header, snpeff_annotation.split('|'))) for snpeff_annotation in snpeff_string.split(',')]
return snpeff_annotations | Make the vep annotations into a dictionaries
A snpeff dictionary will have the snpeff column names as keys and
the vep annotations as values.
The dictionaries are stored in a list.
One dictionary for each transcript.
Args:
snpeff_string (string): A string with the ANN annotation
snpeff_header (list): A list with the ... | codesearchnet |
def __init__(self, n, key=None, reverse=False):
super().__init__()
self._n = n
self._key = key
self._reverse = reverse | Creates a global Top operation.
The arguments 'key' and 'reverse' may be passed as keyword arguments,
and have the same meaning as for Python's sort functions.
Args:
n: number of elements to extract from pcoll.
key: (optional) a mapping of elements to a comparable key, similar to
the key argument of Python's sorting ... | github-repos |
def __toString(self, values):
for key in values:
if (not (values[key] is str)):
values[key] = str(values[key])
return values | Will replace dict values with string values
Args:
values (dict): Dictionary of values
Returns:
Updated values dict | codesearchnet |
def path_of_module(self, mod: nn.Module) -> str:
try:
return super().path_of_module(mod)
except NameError as e:
if self.allow_insert_stateless_mods and len(list(mod.parameters())) == 0 and (len(list(mod.buffers())) == 0):
path = self._insert_module_as_submodule(mod)
retur... | Helper method to find the qualified name of `mod` in the Module hierarchy of `root`. For example, if `root` has
a submodule named `foo`, which has a submodule named `bar`, passing `bar` into this function will return the
string "foo.bar".
Args:
mod (str): The `Module` to retrieve the qualified name for. | github-repos |
def _process_debug_op_state_changes(self, event_reply=None):
if event_reply is None:
event_reply = debug_service_pb2.EventReply()
while not self._debug_ops_state_change_queue.empty():
state_change = self._debug_ops_state_change_queue.get()
debug_node_key = (state_change.node_name, state_... | Dequeue and process all the queued debug-op state change protos.
Include all the debug-op state change protos in a `EventReply` proto.
Args:
event_reply: An `EventReply` to add the `DebugOpStateChange` protos to,
or `None`.
Returns:
An `EventReply` proto with the dequeued `DebugOpStateChange` protos (if
any) added. | github-repos |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.