code stringlengths 20 4.93k | docstring stringlengths 33 1.27k | source stringclasses 3
values |
|---|---|---|
class DPTReassembleStage(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layers = nn.ModuleList()
if config.is_hybrid:
self._init_reassemble_dpt_hybrid(config)
else:
self._init_reassemble_dpt(config)
self.n... | This class reassembles the hidden states of the backbone into image-like feature representations at various
resolutions.
This happens in 3 stages:
1. Map the N + 1 tokens to a set of N tokens, by taking into account the readout ([CLS]) token according to
`config.readout_type`.
2. Project the channel dimension of the h... | github-repos |
def getctime(self, path):
try:
file_obj = self.filesystem.resolve(path)
except IOError:
self.filesystem.raise_os_error(errno.ENOENT)
return file_obj.st_ctime | Returns the creation time of the fake file.
Args:
path: the path to fake file.
Returns:
(int, float) the creation time of the fake file in number of
seconds since the epoch.
Raises:
OSError: if the file does not exist. | codesearchnet |
def metadata(self, path):
try:
file_metadata = self._blobstorageIO()._status(path)
return FileMetadata(path, file_metadata['size'], file_metadata['last_updated'])
except Exception as e:
raise BeamIOError('Metadata operation failed', {path: e}) | Fetch metadata fields of a file on the FileSystem.
Args:
path: string path of a file.
Returns:
:class:`~apache_beam.io.filesystem.FileMetadata`.
Raises:
``BeamIOError``: if path isn't a file or doesn't exist. | github-repos |
def _build_command(self, python_executable, lib_dir_fq, proxy_enabled):
exe_command = [os.path.expanduser(python_executable), '-m', 'pip', 'install', '-r', self.requirements_file, '--ignore-installed', '--quiet', '--target', lib_dir_fq]
if self.args.no_cache_dir:
exe_command.append('--no-cache-dir')
... | Build the pip command for installing dependencies.
Args:
python_executable (str): The fully qualified path of the Python executable.
lib_dir_fq (str): The fully qualified path of the lib directory.
Returns:
list: The Python pip command with all required args. | codesearchnet |
def start(logdir, options=None):
global _profiler
with _profiler_lock:
if _profiler is not None:
raise errors.AlreadyExistsError(None, None, 'Another profiler is running.')
_profiler = _pywrap_profiler.ProfilerSession()
try:
opts = dict(options._asdict()) if optio... | Start profiling TensorFlow performance.
Args:
logdir: Profiling results log directory.
options: `ProfilerOptions` namedtuple to specify miscellaneous profiler
options. See example usage below.
Raises:
AlreadyExistsError: If a profiling session is already running.
Example usage:
```python
options = tf.profiler.experi... | github-repos |
def add_weatherdata(self, data):
if (not isinstance(data, WeatherData)):
raise ValueError('Weather data need to be of type WeatherData')
self._data['WEATHER DATA'].append(data) | Appends weather data.
Args:
data (WeatherData): weather data object | codesearchnet |
def launch_R_script(template, arguments, output_function=None, verbose=True, debug=False):
id = str(uuid.uuid4())
os.makedirs((('/tmp/cdt_R_script_' + id) + '/'))
try:
scriptpath = (('/tmp/cdt_R_script_' + id) + '/instance_{}'.format(os.path.basename(template)))
copy(template, scriptpath)
... | Launch an R script, starting from a template and replacing text in file
before execution.
Args:
template (str): path to the template of the R script
arguments (dict): Arguments that modify the template's placeholders
with arguments
output_function (function): Function to execute **after** the execution
of the R script... | codesearchnet |
def poweroff_server(self, server=None, server_id=None):
sid = server_id if server_id is not None else server.sid
if sid is None:
raise Exception('No Server Specified.')
json_scheme = self.gen_def_json_scheme('SetEnqueueServerPowerOff', dict(ServerId=sid))
json_obj = ... | Poweroff a VM. If possible to pass the VM object or simply the ID
of the VM that we want to turn on.
Args:
server: VM Object that represent the VM to power off,
server_id: Int or Str representing the ID of the VM to power off.
Returns:
return True if json_obj['Success'] is 'True' else False | juraj-google-style |
def from_json(cls, jsonmsg):
import json
msg = json.loads(jsonmsg)
obj = cls(**msg)
obj.validate()
return obj | Create an object directly from a JSON string.
Applies general validation after creating the
object to check whether all required fields are
present.
Args:
jsonmsg (str): An object encoded as a JSON string
Returns:
An object of the generated type
Raises:
ValidationError: if `jsonmsg` does not match the schema
`cls` ... | juraj-google-style |
def add_jpeg_decoding(module_spec):
input_height, input_width = hub.get_expected_image_size(module_spec)
input_depth = hub.get_num_image_channels(module_spec)
jpeg_data = tf.placeholder(tf.string, name='DecodeJPGInput')
decoded_image = tf.image.decode_jpeg(jpeg_data, channels=input_depth)
decoded_image_... | Adds operations that perform JPEG decoding and resizing to the graph..
Args:
module_spec: The hub.ModuleSpec for the image module being used.
Returns:
Tensors for the node to feed JPEG data into, and the output of the
preprocessing steps. | juraj-google-style |
def read_graph_execution_trace(self, graph_execution_trace_digest):
debug_event = self._reader.read_graph_execution_traces_event(graph_execution_trace_digest.locator)
return self._graph_execution_trace_from_debug_event_proto(debug_event, graph_execution_trace_digest.locator) | Read the detailed graph execution trace.
Args:
graph_execution_trace_digest: A `GraphExecutionTraceDigest` object.
Returns:
The corresponding `GraphExecutionTrace` object. | github-repos |
def _ParseFileData(self, knowledge_base, file_object):
plist_file = plist.PlistFile()
try:
plist_file.Read(file_object)
except IOError as exception:
raise errors.PreProcessFail(
'Unable to read: {0:s} with error: {1!s}'.format(
self.ARTIFACT_DEFINITION_NAME, except... | Parses file content (data) for a preprocessing attribute.
Args:
knowledge_base (KnowledgeBase): to fill with preprocessing information.
file_object (dfvfs.FileIO): file-like object that contains the artifact
value data.
Raises:
errors.PreProcessFail: if the preprocessing fails. | juraj-google-style |
def select_by_value(self, value):
self._selected_key = None
self._selected_item = None
for k in self.children:
item = self.children[k]
item.attributes['selected'] = False
if value == item.get_value():
self._selected_key = k
... | Selects an item by the text content of the child.
Args:
value (str): Text content of the item that have to be selected. | juraj-google-style |
def format_page(self, page, link_resolver, output):
debug('Formatting page %s' % page.link.ref, 'formatting')
if output:
actual_output = os.path.join(output,
'html')
if not os.path.exists(actual_output):
os.makedi... | Called by `project.Project.format_page`, to leave full control
to extensions over the formatting of the pages they are
responsible of.
Args:
page: tree.Page, the page to format.
link_resolver: links.LinkResolver, object responsible
for resolving links potentially mentioned in `page`
output: str, path to the output dir... | juraj-google-style |
def _lm_numdiff_jacobian(eval_func, nmr_params, nmr_observations):
return SimpleCLFunction.from_string(r + str(nmr_params) + + str(nmr_observations) + , dependencies=[eval_func, SimpleCLFunction.from_string( + str(nmr_observations) + + eval_func.get_cl_function_name() + + eval_func.get_cl_function_name() + ... | Get a numerical differentiated Jacobian function.
This computes the Jacobian of the observations (function vector) with respect to the parameters.
Args:
eval_func (mot.lib.cl_function.CLFunction): the evaluation function
nmr_params (int): the number of parameters
nmr_observations (int): the number of observations (th... | juraj-google-style |
def apply_product_config(config):
cot_product = config['cot_product']
for key in config:
if (isinstance(config[key], Mapping) and ('by-cot-product' in config[key])):
try:
config[key] = config[key]['by-cot-product'][cot_product]
except KeyError:
rai... | Apply config values that are keyed by `cot_product`.
This modifies the passed in configuration.
Args:
config dict: the config to apply cot_product keying too
Returns: dict | codesearchnet |
def _get_query_argument(args, cell, env):
sql_arg = args.get('query', None)
if sql_arg is None:
if not isinstance(cell, basestring):
raise Exception('Expected a --query argument or inline SQL')
return bigquery.Query(cell, env=env)
item = google.datalab.utils.commands.get_notebook_item(sql_a... | Get a query argument to a cell magic.
The query is specified with args['query']. We look that up and if it is a BQ query
object, just return it. If it is a string, build a query object out of it and return
that
Args:
args: the dictionary of magic arguments.
cell: the cell contents which can be variable value override... | juraj-google-style |
def crps(self, model_type, model_name, condition_model_name, condition_threshold, query=None):
def gamma_cdf(x, a, loc, b):
if a == 0 or b == 0:
cdf = np.ones(x.shape)
else:
cdf = gamma.cdf(x, a, loc, b)
return cdf
crps_obj =... | Calculates the cumulative ranked probability score (CRPS) on the forecast data.
Args:
model_type: model type being evaluated.
model_name: machine learning model being evaluated.
condition_model_name: Name of the hail/no-hail model being evaluated
condition_threshold: Threshold for using hail size CDF
query: pandas que... | juraj-google-style |
def declare(self, name, description=None, **kwargs):
if not self._is_valid_key(name):
raise self.InvalidKeyError(
'Invalid key name, must begin with a lowercase letter', name)
if name in self._declarations:
raise self.KeyAlreadyDeclaredError(
'Configuration key already decla... | Declare a configuration key with the given name.
Args:
name: Configuration key to declare, must not have been already declared.
description: If provided, use this as the description for this key.
**kwargs: Other kwargs to pass to the Declaration, only default_value
is currently supported. | juraj-google-style |
def mtf_transformer_paper_lm(size):
n = (2 ** size)
hparams = mtf_transformer_base_lm()
hparams.batch_size = 256
hparams.d_model = 1024
hparams.d_ff = int((8192 * n))
hparams.d_kv = 256
hparams.num_heads = int((8 * n))
hparams.shared_embedding_and_softmax_weights = False
hparams.lear... | Config for language-model experiments.
Train these on languagemodel_lm1b32k_packed for 136000 steps (10 epochs)
The size parameter is an integer that controls the number of heads and the
size of the size of the feedforward hidden layers. Increasing size by 1
doubles each of these.
Results:
size params/10^9 log-p... | codesearchnet |
def getall(self):
users = self.users_re.findall(self.config, re.M)
resources = dict()
for user in users:
resources.update(self._parse_username(user))
return resources | Returns all local users configuration as a resource dict
Returns:
dict: A dict of usernames with a nested resource dict object | codesearchnet |
def get_parameters(self, grad_only=True):
params = OrderedDict()
for v in self.get_modules():
if not isinstance(v, tuple):
continue
prefix, module = v
for k, v in module.__dict__.items():
if not isinstance(v, nn.Variable):
... | Get parameters.
Args:
grad_only (bool, optional): Return parameters with `need_grad` option as `True`.
If you set this option as `False`, All parameters are returned. Default is `True`.
Returns:
dict: The dictionary of parameter name (`str`) to Variable (:obj:`~nnabla.Variable`). | juraj-google-style |
def to_variant(dataset: DatasetV2):
return dataset._variant_tensor | Returns a variant representing the given dataset.
Args:
dataset: A `tf.data.Dataset`.
Returns:
A scalar `tf.variant` tensor representing the given dataset. | github-repos |
def new(self, val):
if len(self.things) >= self.max_things:
raise LimitationError('too many things')
self.things.add(val)
return val | Add a new value to me.
Args:
val (LispVal): The value to be added.
Returns:
LispVal: The added value.
Raises:
~parthial.errs.LimitationError: If I already contain the maximum
number of elements. | juraj-google-style |
def __init__(self, channel):
self.BatchWriteSpans = channel.unary_unary(
"/google.devtools.cloudtrace.v2.TraceService/BatchWriteSpans",
request_serializer=google_dot_devtools_dot_cloudtrace__v2_dot_proto_dot_tracing__pb2.BatchWriteSpansRequest.SerializeToString,
resp... | Constructor.
Args:
channel: A grpc.Channel. | juraj-google-style |
def _match_elements(dom, matches):
out = {}
for key, content in matches.items():
pattern = content["data"].strip()
if "\n" in pattern:
pattern = pattern.split()
transformer = lambda x: x.strip().split()
else:
transformer = lambda x: x.strip()
... | Find location of elements matching patterns specified in `matches`.
Args:
dom (obj): HTMLElement DOM tree.
matches (dict): Structure: ``{"var": {"data": "match", ..}, ..}``.
Returns:
dict: Structure: ``{"var": {"data": HTMLElement_obj, ..}, ..}`` | juraj-google-style |
def _GetRecord(self, offset, record_size):
record_header = '<4sLQQL'
get4 = (lambda x: struct.unpack('<L', self.input_dat[x:(x + 4)])[0])
url_offset = struct.unpack('B', self.input_dat[(offset + 52):(offset + 53)])[0]
if (url_offset in [255, 254]):
return None
data_offset = get4((offset + 68... | Retrieve a single record from the file.
Args:
offset: offset from start of input_dat where header starts
record_size: length of the header according to file (untrusted)
Returns:
A dict containing a single browser history record. | codesearchnet |
def start(self, auto_register=True):
return self.container.start_agent(agent=self, auto_register=auto_register) | Tells the container to start this agent.
It returns a coroutine or a future depending on whether it is called from a coroutine or a synchronous method.
Args:
auto_register (bool): register the agent in the server (Default value = True) | juraj-google-style |
def _on_response_message(self, sequence, topic, message):
try:
conn_key = self._find_connection(topic)
context = self.conns.get_context(conn_key)
except ArgumentError:
self._logger.warn('Dropping message that does not correspond with a known connection, message=%s', message)
retu... | Process a response message received
Args:
sequence (int): The sequence number of the packet received
topic (string): The topic this message was received on
message (dict): The message itself | codesearchnet |
def paint(self):
snippet = {'fill-opacity': VectorStyle.get_style_value(self.opacity), 'fill-color': VectorStyle.get_style_value(self.color), 'fill-outline-color': VectorStyle.get_style_value(self.outline_color)}
if self.translate:
snippet['fill-translate'] = self.translate
return snippet | Renders a javascript snippet suitable for use as a mapbox-gl fill paint entry
Returns:
A dict that can be converted to a mapbox-gl javascript paint snippet | codesearchnet |
def remove_context(self, name):
self._context(name)
del self.contexts[name]
self._flush_tools() | Remove a context from the suite.
Args:
name (str): Name of the context to remove. | juraj-google-style |
def map_arg(**maps):
def deco(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
if six.PY2:
argmap = inspect.getcallargs(func, *args, **kwargs)
else:
sig = inspect.signature(func)
argmap = sig.bin... | Apply a mapping on certain argument before calling the original function.
Args:
maps (dict): {argument_name: map_func} | juraj-google-style |
def _test_end(self, result, e):
if self.begin_time is not None:
self.end_time = utils.get_current_epoch_time()
self.result = result
if e:
self.termination_signal = ExceptionRecord(e) | Marks the end of the test logic.
Args:
result: One of the TEST_RESULT enums in TestResultEnums.
e: A test termination signal (usually an exception object). It can
be any exception instance or of any subclass of
mobly.signals.TestSignal. | github-repos |
async def attach_url(self, url: str, description: str = None) -> Attachment:
return await self._attach(url=url, description=description) | add an url as an attachment
|methcoro|
Args:
url: url you want to add
description: *optional* description for your attachment
Returns:
Attachment:
Raises:
ValueError: url must not be None
APIException | juraj-google-style |
def CloseExpression(clean_lines, linenum, pos):
line = clean_lines.elided[linenum]
if ((line[pos] not in '({[<') or Match('<[<=]', line[pos:])):
return (line, clean_lines.NumLines(), (- 1))
(end_pos, stack) = FindEndOfExpressionInLine(line, pos, [])
if (end_pos > (- 1)):
return (line, li... | If input points to ( or { or [ or <, finds the position that closes it.
If lines[linenum][pos] points to a '(' or '{' or '[' or '<', finds the
linenum/pos that correspond to the closing of the expression.
TODO(unknown): cpplint spends a fair bit of time matching parentheses.
Ideally we would want to index all opening... | codesearchnet |
def __init__(self, dataset=None, worker=None, devices=None, components=None, element_spec=None, options=None, canonicalize_devices=None):
if worker is None or devices is None:
raise ValueError('Both `worker` and `devices` should be provided')
error_message = 'Either `dataset` or both `components` and `e... | Create iterator for the `dataset` to fetch data to worker's `devices` .
`OwnedMultiDeviceIterator` is used to prefetch input to the devices on the
given worker. The lifetime of this iterator is tied to the encompassing
python object. Once we go out of scope of the python object or return from
a tf.function the underly... | github-repos |
def _guess_fmt_from_bytes(inp):
stripped = inp.strip()
fmt = None
ini_section_header_re = re.compile(b'^\[([\w-]+)\]')
if len(stripped) == 0:
fmt = 'yaml'
else:
if stripped.startswith(b'<'):
fmt = 'xml'
else:
for l in stripped.splitlines... | Try to guess format of given bytestring.
Args:
inp: byte string to guess format of
Returns:
guessed format | juraj-google-style |
def inspect_config(self, id):
url = self._url('/configs/{0}', id)
return self._result(self._get(url), True) | Retrieve config metadata
Args:
id (string): Full ID of the config to inspect
Returns (dict): A dictionary of metadata
Raises:
:py:class:`docker.errors.NotFound`
if no config with that ID exists | juraj-google-style |
def formula_html(self, reversed_=False):
if self.H_count == 1:
text = "H"
elif self.H_count > 1:
text = "H<sub>{}</sub>".format(self.H_count)
else:
text = ""
seq = [self.symbol, text, self.charge_sign_html()]
if reversed_:
... | Chemical formula HTML
Args:
reversed (bool): reversed text for leftmost atom groups | juraj-google-style |
def load_with_vocab(fin, vocab, dtype=np.float32):
arr = None
for line in fin:
try:
(token, v) = _parse_line(line, dtype)
except (ValueError, IndexError):
raise ParseError((b'Parsing error in line: ' + line))
if (token in vocab):
if (arr is None):
... | Load word embedding file with predefined vocabulary
Args:
fin (File): File object to read. File should be open for reading ascii.
vocab (dict): Mapping from words (``bytes``) to vector indices
(``int``).
dtype (numpy.dtype): Element data type to use for the array.
Returns:
numpy.ndarray: Word embedding representation... | codesearchnet |
def add_path(self, path, path_filter=None):
for root, _, files in os.walk(path):
for filename in files:
full_path_and_filename = os.path.join(root, filename)
if path_filter is None or path_filter(full_path_and_filename):
relative_path_and_... | Adding all files from given path to the object.
Args:
path (str): valid, existing directory | juraj-google-style |
def assertProtoEqual(self, a: message.Message, b: message.Message, check_initialized: bool=True, normalize_numbers: bool=False, msg: Optional[str]=None) -> None:
pool = descriptor_pool.Default()
if isinstance(a, str):
a = text_format.Merge(a, b.__class__(), descriptor_pool=pool)
for pb in (a, b):
... | Fails with a useful error if a and b aren't equal.
Comparison of repeated fields matches the semantics of
unittest.TestCase.assertEqual(), ie order and extra duplicates fields matter.
Args:
self: absltest.TestCase
a: proto2 PB instance, or text string representing one.
b: proto2 PB instance -- message.Message or subc... | github-repos |
def _build_network_on_replica(model, mode, inputs=None, targets=None):
from tensorflow.python.keras import models
from tensorflow.python.keras.engine import sequential
if isinstance(model, sequential.Sequential):
updated_model = models._clone_sequential_model(model, input_tensors=inputs, layer_fn=mo... | Build an updated model on replicas.
We create a new Keras model while sharing the variables from the old graph.
Building a new sub-graph is required since the original keras model creates
placeholders for the input and the output that are not accessible till we
call iterator.get_next() inside the step_fn for `fit`/`ev... | github-repos |
def _sobol_generating_matrices(dim: types.IntTensor, log_num_results: types.IntTensor, num_digits: types.IntTensor, dtype=None) -> types.IntTensor:
global _INITIAL_DIRECTION_NUMBERS
global _PRIMITIVE_POLYNOMIAL_COEFFICIENTS
dtype = dtype or tf.int32
zero = tf.constant(0, dtype=dtype)
indices = tf.ca... | Returns all Sobol generating matrices.
Args:
dim: Positive scalar `Tensor` with rank 0 representing the event size of
points which can be sampled from the resulting generating matrix.
log_num_results: Positive scalar `Tensor` with rank 0 representing the
base-2 logarithm of the maximum number of points which can be sa... | github-repos |
def FindFirst(cls, setting_matcher, device_matcher=None, **kwargs):
try:
return next(cls.FindDevices(setting_matcher, device_matcher=device_matcher, **kwargs))
except StopIteration:
raise usb_exceptions.DeviceNotFoundError('No device available, or it is in the wrong configuration.') | Find and return the first matching device.
Args:
setting_matcher: See cls.FindDevices.
device_matcher: See cls.FindDevices.
**kwargs: See cls.FindDevices.
Returns:
An instance of UsbHandle.
Raises:
DeviceNotFoundError: Raised if the device is not available. | codesearchnet |
def no_llvm(*args, uid=0, gid=0, **kwargs):
uchroot_cmd = no_args()
uchroot_cmd = uchroot_cmd[__default_opts__(uid, gid)]
return uchroot_cmd[args] | Return a customizable uchroot command.
The command will be executed inside a uchroot environment.
Args:
args: List of additional arguments for uchroot (typical: mounts)
Return:
chroot_cmd | juraj-google-style |
def process_dimensions(kdims, vdims):
dimensions = {}
for group, dims in [('kdims', kdims), ('vdims', vdims)]:
if dims is None:
continue
elif isinstance(dims, (tuple, basestring, Dimension, dict)):
dims = [dims]
elif not isinstance(dims, list):
ra... | Converts kdims and vdims to Dimension objects.
Args:
kdims: List or single key dimension(s) specified as strings,
tuples dicts or Dimension objects.
vdims: List or single value dimension(s) specified as strings,
tuples dicts or Dimension objects.
Returns:
Dictionary containing kdims and vdims converted to Dimension
o... | juraj-google-style |
def GetArtifactParserDependencies(rdf_artifact):
deps = set()
processors = parser.Parser.GetClassesByArtifact(rdf_artifact.name)
for p in processors:
deps.update(p.knowledgebase_dependencies)
return deps | Return the set of knowledgebase path dependencies required by the parser.
Args:
rdf_artifact: RDF artifact object.
Returns:
A set of strings for the required kb objects e.g.
["users.appdata", "systemroot"] | juraj-google-style |
def recursively_convert_to_json_serializable(test_obj):
try:
if ((not isinstance(test_obj, list)) and np.isnan(test_obj)):
return None
except TypeError:
pass
except ValueError:
pass
if isinstance(test_obj, (string_types, integer_types, float, bool)):
return te... | Helper function to convert a dict object to one that is serializable
Args:
test_obj: an object to attempt to convert a corresponding json-serializable object
Returns:
(dict) A converted test_object
Warning:
test_obj may also be converted in place. | codesearchnet |
def create_endpoints_csv_file(self, timeout=(- 1)):
uri = '{}/endpoints/'.format(self.data['uri'])
return self._helper.do_post(uri, {}, timeout, None) | Creates an endpoints CSV file for a SAN.
Args:
timeout:
Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in
OneView, just stops waiting for its completion.
Returns:
dict: Endpoint CSV File Response. | codesearchnet |
def collections(self, page_size=None):
iterator = self._client._firestore_api.list_collection_ids(self._document_path, page_size=page_size, metadata=self._client._rpc_metadata)
iterator.document = self
iterator.item_to_value = _item_to_collection_ref
return iterator | List subcollections of the current document.
Args:
page_size (Optional[int]]): The maximum number of collections
in each page of results from this request. Non-positive values
are ignored. Defaults to a sensible value set by the API.
Returns:
Sequence[~.firestore_v1beta1.collection.CollectionReference]:
iterator of s... | codesearchnet |
def set_package_releases(self, project_name, versions):
self.packages[project_name] = sorted(versions, reverse=True) | Storage package information in ``self.packages``
Args:
project_name (str): This will be used as a the key in the
dictionary.
versions (list): List of ``str`` representing the available
versions of a project. | juraj-google-style |
def Parse(self, rdf_data):
if self._filter:
return list(self._filter.Parse(rdf_data, self.expression))
return rdf_data | Process rdf data through the filter.
Filters sift data according to filter rules. Data that passes the filter
rule is kept, other data is dropped.
If no filter method is provided, the data is returned as a list.
Otherwise, a items that meet filter conditions are returned in a list.
Args:
rdf_data: Host data that has... | juraj-google-style |
def start(self, request: Request) -> Response:
if self._session_state != SessionState.ready:
raise RuntimeError('Session not ready')
response = Response()
yield from self._prepare_fetch(request, response)
response.file_transfer_size = yield from self._fetch_size(r... | Start a file or directory listing download.
Args:
request: Request.
Returns:
A Response populated with the initial data connection reply.
Once the response is received, call :meth:`download`.
Coroutine. | juraj-google-style |
def get_minimum_indentation(text):
r
lines = text.split('\n')
indentations = [get_indentation(line_)
for line_ in lines if len(line_.strip()) > 0]
if len(indentations) == 0:
return 0
return min(indentations) | r"""
returns the number of preceding spaces
Args:
text (str): unicode text
Returns:
int: indentation
CommandLine:
python -m utool.util_str --exec-get_minimum_indentation --show
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_str import * # NOQA
>>> import utool as ut
>>> text = ' foo\n bar'
>>> result = get... | juraj-google-style |
def deliver_tx(self, raw_transaction):
self.abort_if_abci_chain_is_not_synced()
logger.debug('deliver_tx: %s', raw_transaction)
transaction = self.bigchaindb.is_valid_transaction(
decode_transaction(raw_transaction), self.block_transactions)
if not transaction:
... | Validate the transaction before mutating the state.
Args:
raw_tx: a raw string (in bytes) transaction. | juraj-google-style |
def __init__(self,
batch_size=1000,
threadpool_prefix="batch_processor",
threadpool_size=10):
super(BatchConverter, self).__init__()
self.batch_size = batch_size
self.threadpool_prefix = threadpool_prefix
self.threadpool_size = threadpool_size | BatchProcessor constructor.
Args:
batch_size: All the values will be processed in batches of this size.
threadpool_prefix: Prefix that will be used in thread pool's threads
names.
threadpool_size: Size of a thread pool that will be used. If
threadpool_size is 0, no threads will be used and all conversions will
be done... | juraj-google-style |
def get_type_parameters(self, annot, seen=None):
seen = seen or set()
if annot in seen or not annot.formal:
return []
if isinstance(annot, mixin.NestedAnnotation):
seen = seen | {annot}
if isinstance(annot, abstract.TypeParameter):
return [annot]
elif isinstance(annot, abstra... | Returns all the TypeParameter instances that appear in the annotation.
Note that if you just need to know whether or not the annotation contains
type parameters, you can check its `.formal` attribute.
Args:
annot: An annotation.
seen: A seen set. | github-repos |
def get_account(self, address, id=None, endpoint=None):
return self._call_endpoint(GET_ACCOUNT_STATE, params=[address], id=id, endpoint=endpoint) | Look up an account on the blockchain. Sample output:
Args:
address: (str) address to lookup ( in format 'AXjaFSP23Jkbe6Pk9pPGT6NBDs1HVdqaXK')
id: (int, optional) id to use for response tracking
endpoint: (RPCEndpoint, optional) endpoint to specify to use
Returns:
json object of the result or the error encountered in... | juraj-google-style |
def parse_method_configs(interface_config):
retry_codes_map = {name: retry_codes for (name, retry_codes) in six.iteritems(interface_config.get('retry_codes', {}))}
retry_params_map = {name: retry_params for (name, retry_params) in six.iteritems(interface_config.get('retry_params', {}))}
method_configs = {}
... | Creates default retry and timeout objects for each method in a gapic
interface config.
Args:
interface_config (Mapping): The interface config section of the full
gapic library config. For example, If the full configuration has
an interface named ``google.example.v1.ExampleService`` you would
pass in just that interfac... | codesearchnet |
def get_option(option_name, section_name="main", default=_sentinel, cfg_file=cfg_file):
defaults = get_defaults()
if default != _sentinel:
my_defaults = {option_name: default}
else:
my_defaults = defaults.get('section_name', {})
parser = get_par... | Returns a specific option specific in a config file
Arguments:
option_name -- Name of the option (example host_name)
section_name -- Which section of the config (default: name)
examples:
>>> get_option("some option", default="default result")
'default result' | juraj-google-style |
def get_connection_string(params, hide_password=True):
connection_string = (params['driver'] + ':
user = params.get('user', None)
password = params.get('password', None)
host = params.get('host', None)
port = params.get('port', None)
database = params.get('database', None)
if (database is No... | Get a database connection string
Args:
params (dict): database configuration, as defined in :mod:`ozelot.config`
hide_password (bool): if True, the password is hidden in the returned string
(use this for logging purposes).
Returns:
str: connection string | codesearchnet |
def has_course_mode(self, course_run_id, mode):
course_modes = self.get_course_modes(course_run_id)
return any((course_mode for course_mode in course_modes if (course_mode['slug'] == mode))) | Query the Enrollment API to see whether a course run has a given course mode available.
Arguments:
course_run_id (str): The string value of the course run's unique identifier
Returns:
bool: Whether the course run has the given mode avaialble for enrollment. | codesearchnet |
def load_words(self, words):
self._dictionary.update([word.lower() for word in words])
self._update_dictionary() | Load a list of words from which to generate a word frequency list
Args:
words (list): The list of words to be loaded | juraj-google-style |
def firmware_drivers(self):
if (not self.__firmware_drivers):
self.__firmware_drivers = FirmwareDrivers(self.__connection)
return self.__firmware_drivers | Gets the FirmwareDrivers API client.
Returns:
FirmwareDrivers: | codesearchnet |
def contains_peroxide(structure, relative_cutoff=1.1):
ox_type = oxide_type(structure, relative_cutoff)
if ox_type == "peroxide":
return True
else:
return False | Determines if a structure contains peroxide anions.
Args:
structure (Structure): Input structure.
relative_cutoff: The peroxide bond distance is 1.49 Angstrom.
Relative_cutoff * 1.49 stipulates the maximum distance two O
atoms must be to each other to be considered a peroxide.
Returns:
Boolean indicating if structure... | juraj-google-style |
def schedule(self, callback, *args, **kwargs):
self._executor.submit(callback, *args, **kwargs) | Schedule the callback to be called asynchronously in a thread pool.
Args:
callback (Callable): The function to call.
args: Positional arguments passed to the function.
kwargs: Key-word arguments passed to the function.
Returns:
None | juraj-google-style |
def from_http_response(response):
try:
payload = response.json()
except ValueError:
payload = {"error": {"message": response.text or "unknown error"}}
error_message = payload.get("error", {}).get("message", "unknown error")
errors = payload.get("error", {}).get("errors", ())
m... | Create a :class:`GoogleAPICallError` from a :class:`requests.Response`.
Args:
response (requests.Response): The HTTP response.
Returns:
GoogleAPICallError: An instance of the appropriate subclass of
:class:`GoogleAPICallError`, with the message and errors populated
from the response. | juraj-google-style |
def _IsText(self, bytes_in, encoding=None):
is_text = True
if isinstance(bytes_in, py2to3.UNICODE_TYPE):
return is_text
for value in bytes_in:
if py2to3.PY_2:
value = ord(value)
if (not (31 < value < 128)):
is_text = False
break
if is_text:
... | Examine the bytes in and determine if they are indicative of text.
Parsers need quick and at least semi reliable method of discovering whether
or not a particular byte stream is text or resembles text or not. This can
be used in text parsers to determine if a file is a text file or not for
instance.
The method assume... | codesearchnet |
def __getitem__(self, item):
depth = item.count('.') + 1
parts = item.split('.', 1)
for m in self.modules:
if parts[0] == m.name:
if depth == 1:
return m
for p in self.packages:
if parts[0] == p.name:
if... | Return the corresponding Package or Module object.
Args:
item (str): name of the package/module, dot-separated.
Returns:
Package/Module: corresponding object. | juraj-google-style |
def compile_intermediate_cpfs(self,
scope: Dict[str, TensorFluent],
batch_size: Optional[int] = None,
noise: Optional[Noise] = None) -> List[CPFPair]:
interm_fluents = []
with self.graph.as_de... | Compiles the intermediate fluent CPFs given the current `state` and `action` scope.
Args:
scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): The fluent scope for CPF evaluation.
batch_size (Optional[int]): The batch size.
Returns:
A list of intermediate fluent CPFs compiled to :obj:`rddl2tf.fluent.TensorFluent`. | juraj-google-style |
def _convert_to_json(self, response):
try:
return response.json()
except ValueError:
logging.warning('Expected response in JSON format from {0} but the actual response text is: {1}'.format(response.request.url, response.text))
return None | Converts response to JSON.
If the response cannot be converted to JSON then `None` is returned.
Args:
response - An object of type `requests.models.Response`
Returns:
Response in JSON format if the response can be converted to JSON. `None` otherwise. | codesearchnet |
def truthyAttrs(cls):
def __bool__(self):
return bool(any(getattr(self, attr) for attr in self.attrs))
cls.__bool__ = cls.__nonzero__ = __bool__
return cls | Class decorator: override __bool__ to set truthiness based on any attr being present.
Args:
cls (class): class to decorate
Returns:
class: same, but modified, class | juraj-google-style |
def insert(cls, cur, table: str, values: dict):
keys = cls._COMMA.join(values.keys())
value_place_holder = (cls._PLACEHOLDER * len(values))
query = cls._insert_string.format(table, keys, value_place_holder[:(- 1)])
(yield from cur.execute(query, tuple(values.values())))
return (yield from cur.fetcho... | Creates an insert statement with only chosen fields
Args:
table: a string indicating the name of the table
values: a dict of fields and values to be inserted
Returns:
A 'Record' object with table columns as properties | codesearchnet |
def add(self, promise, bitoffset, *, _offsetideal=None):
if _offsetideal is None:
_offsetideal = bitoffset
if isinstance(promise, TDOPromise):
newpromise = promise.makesubatoffset(
bitoffset, _offsetideal=_offsetideal)
self._... | Add a promise to the promise collection at an optional offset.
Args:
promise: A TDOPromise to add to this collection.
bitoffset: An integer offset for this new promise in the collection.
_offsetideal: An integer offset for this new promise in the collection if the associated primitive supports arbitrary TDO control. | juraj-google-style |
def TransformerLM(vocab_size,
feature_depth=512,
feedforward_depth=2048,
num_layers=6,
num_heads=8,
dropout=0.1,
max_len=2048,
mode='train'):
return layers.Serial(
layers.ShiftRight()... | Transformer language model (only uses the decoder part of Transformer).
Args:
vocab_size: int: vocab size
feature_depth: int: depth of embedding
feedforward_depth: int: depth of feed-forward layer
num_layers: int: number of encoder/decoder layers
num_heads: int: number of attention heads
dropout: float: dropout rate ... | juraj-google-style |
def read_schema(path):
result = schema_pb2.Schema()
contents = file_io.read_file_to_string(path)
text_format.Parse(contents, result)
return result | Reads a schema from the provided location.
Args:
path: The location of the file holding a serialized Schema proto.
Returns:
An instance of Schema or None if the input argument is None | github-repos |
def process_sequence(sequence,
rules=None,
skip_non_vietnamese=True):
result = ""
raw = result
result_parts = []
if rules is None:
rules = get_telex_definition()
accepted_chars = _accepted_chars(rules)
for key in sequence:
if key n... | \
Convert a key sequence into a Vietnamese string with diacritical marks.
Args:
rules (optional): see docstring for process_key().
skip_non_vietnamese (optional): see docstring for process_key().
It even supports continous key sequences connected by separators.
i.e. process_sequence('con meof.ddieen') should work. | juraj-google-style |
def image_load(filename: str) -> tcod.image.Image:
return tcod.image.Image._from_cdata(
ffi.gc(lib.TCOD_image_load(_bytes(filename)), lib.TCOD_image_delete)
) | Load an image file into an Image instance and return it.
Args:
filename (AnyStr): Path to a .bmp or .png image file. | juraj-google-style |
def from_raw(self, file_names=None, **kwargs):
if file_names:
self.file_names = file_names
if (not isinstance(file_names, (list, tuple))):
self.file_names = [file_names]
raw_file_loader = self.loader
set_number = 0
test = None
counter = 0
self.logger.debug('start iterating th... | Load a raw data-file.
Args:
file_names (list of raw-file names): uses CellpyData.file_names if
None. If the list contains more than one file name, then the
runs will be merged together. | codesearchnet |
def _initialize_splittable_dimensions(self, mtf_graph):
all_mtf_dimension_names = set()
for mtf_operation in mtf_graph.operations:
for mtf_tensor in mtf_operation.outputs:
for mtf_dimension in mtf_tensor.shape.dims:
if not re.match(r"_anonymous_\d*", mtf_dimension.name):
... | Initializer for self._splittable_mtf_dimension_names.
Args:
mtf_graph: an mtf.Graph.
Returns:
A set(string) of the names of Mesh TensorFlow dimensions that may be
assigned in a layout. | juraj-google-style |
def exhaustive_fragment_check(self, ontology: pd.DataFrame, iri_curie_fragment_predicate: str='iri', cross_reference_iris: bool=False, cross_reference_fragments: bool=False, diff: bool=True) -> Tuple[list]:
(inside, outside) = ([], [])
header = (['Index'] + list(ontology.columns))
for row in ontology.itertu... | All entities with conflicting fragments gets a full diff to see if they belong
Args:
ontology: pandas DataFrame created from an ontology where the colnames are predicates
and if classes exist it is also thrown into a the colnames.
iri_curie_fragment_predicate: usually in qname form and is the colname of the DataFrame ... | codesearchnet |
def detect_language(index_page):
dom = dhtmlparser.parseString(index_page)
clean_content = dhtmlparser.removeTags(dom)
lang = None
try:
lang = langdetect.detect(clean_content)
except UnicodeDecodeError:
lang = langdetect.detect(clean_content.decode("utf-8"))
return Source... | Detect `languages` using `langdetect` library.
Args:
index_page (str): HTML content of the page you wish to analyze.
Returns:
obj: One :class:`.SourceString` object. | juraj-google-style |
def reverse_transform(self, tables, table_metas=None, missing=None):
if (missing is None):
missing = self.missing
else:
self.missing = missing
warnings.warn(DEPRECATION_MESSAGE.format('reverse_transform'), DeprecationWarning)
reverse = {}
for table_name in tables:
table =... | Transform data back to its original format.
Args:
tables(dict): mapping of table names to `tuple` where each tuple is on the form
(`pandas.DataFrame`, `dict`). The `DataFrame` contains the transformed
data and the `dict` the corresponding meta information.
If not specified, the tables will be retrieved using the met... | codesearchnet |
def abi_to_fasta(input, output):
direcs = [input, ]
zip_files = list_files(input, ['zip'])
if zip_files:
direcs.extend(_process_zip_files(zip_files))
for d in direcs:
files = list_files(d, ['ab1', 'abi'])
seqs = [SeqIO.read(open(f, 'rb'), 'abi') for f in files]
... | Converts ABI or AB1 files to FASTA format.
Args:
input (str): Path to a file or directory containing abi/ab1 files or
zip archives of abi/ab1 files
output (str): Path to a directory for the output FASTA files | juraj-google-style |
def torque_off(self):
data = []
data.append(10)
data.append(self.servoid)
data.append(RAM_WRITE_REQ)
data.append(TORQUE_CONTROL_RAM)
data.append(1)
data.append(0)
send_data(data) | Set the torques of Herkulex to zero
In this mode, position control and velocity control
will not work, enable torque before that. Also the
servo shaft is freely movable
Args:
none | codesearchnet |
def __init__(self, model_handler: ModelHandler[ExampleT, PredictionT, Any], clock, metrics_namespace, load_model_at_runtime: bool=False, model_tag: str='RunInference'):
self._model_handler = model_handler
self._shared_model_handle = shared.Shared()
self._clock = clock
self._model = None
self._metric... | A DoFn implementation generic to frameworks.
Args:
model_handler: An implementation of ModelHandler.
clock: A clock implementing time_ns. *Used for unit testing.*
metrics_namespace: Namespace of the transform to collect metrics.
load_model_at_runtime: Bool to indicate if model loading should be
deferred to runtime - f... | github-repos |
def _apply_transformation(inputs):
(ts, transformation, extend_collection, clear_redo) = inputs
new = ts.append_transformation(transformation, extend_collection, clear_redo=clear_redo)
o = [ts]
if new:
o.extend(new)
return o | Helper method for multiprocessing of apply_transformation. Must not be
in the class so that it can be pickled.
Args:
inputs: Tuple containing the transformed structure, the transformation
to be applied, a boolean indicating whether to extend the
collection, and a boolean indicating whether to clear the redo
Returns:
... | codesearchnet |
def _resize_output_size_rescale_to_max_len(height: int, width: int, min_len: Optional[int]=1, max_len: Optional[int]=None) -> Tuple[int, int]:
max_len = max(height, width) if max_len is None else max_len
aspect_ratio = width / height
if width >= height:
width = max_len
height = int(width / a... | Get the output size of the image after resizing given a dictionary specifying the max and min sizes.
Args:
height (`int`):
Height of the input image.
width (`int`):
Width of the input image.
min_len (`int`, *optional*, defaults to 1):
Minimum size of the output image.
max_len (`int`, *optional*, defaults to the maximum... | github-repos |
def validate(self, definition, version=None, strict=False):
if not HAS_KUBERNETES_VALIDATE:
raise KubernetesValidateMissing()
errors = list()
warnings = list()
try:
if version is None:
try:
version = self.version['kube... | validate checks a kubernetes resource definition
Args:
definition (dict): resource definition
version (str): version of kubernetes to validate against
strict (bool): whether unexpected additional properties should be considered errors
Returns:
warnings (list), errors (list): warnings are missing validations, errors a... | juraj-google-style |
def get_max_res_without_distortion(image_size: Tuple[int, int], target_size: Tuple[int, int]) -> Tuple[int, int]:
original_height, original_width = image_size
target_height, target_width = target_size
scale_w = target_width / original_width
scale_h = target_height / original_height
if scale_w < scal... | Determines the maximum resolution to which an image can be resized to without distorting its
aspect ratio, based on the target resolution.
Args:
image_size (Tuple[int, int]): The original resolution of the image (height, width).
target_resolution (Tuple[int, int]): The desired resolution to fit the image into (height,... | github-repos |
def encode_texts(self, texts, unknown_token='<UNK>', verbose=1, **kwargs):
if (not self.has_vocab):
raise ValueError('You need to build the vocabulary using `build_vocab` before using `encode_texts`')
if (unknown_token and (unknown_token not in self.special_token)):
raise ValueError(((('Your spe... | Encodes the given texts using internal vocabulary with optionally applied encoding options. See
``apply_encoding_options` to set various options.
Args:
texts: The list of text items to encode.
unknown_token: The token to replace words that out of vocabulary. If none, those words are omitted.
verbose: The verbosity lev... | codesearchnet |
def _refine_candidate(self, width, height):
packer = newPacker(PackingMode.Offline, PackingBin.BFF,
pack_algo=self._pack_algo, sort_algo=SORT_LSIDE,
rotation=self._rotation)
packer.add_bin(width, height)
for r in self._rectangles:
pac... | Use bottom-left packing algorithm to find a lower height for the
container.
Arguments:
width
height
Returns:
tuple (width, height, PackingAlgorithm): | juraj-google-style |
def __init__(self, filters):
self.filters = filters
super().__init__(', '.join(repr(f) for f in filters)) | Initialization of instances:
Args:
filters (list): the invalid filter names.
Attributes:
filters (list): the invalid filter names. | juraj-google-style |
def InitializeDownload(self, http_request, http=None, client=None):
self.EnsureUninitialized()
if http is None and client is None:
raise exceptions.UserError('Must provide client or http.')
http = http or client.http
if client is not None:
http_request.ur... | Initialize this download by making a request.
Args:
http_request: The HttpRequest to use to initialize this download.
http: The httplib2.Http instance for this request.
client: If provided, let this client process the final URL before
sending any additional requests. If client is provided and
http is not, client.http ... | juraj-google-style |
def compute_index(self, axis, data_object, compute_diff=True):
def pandas_index_extraction(df, axis):
if (not axis):
return df.index
else:
try:
return df.columns
except AttributeError:
return pandas.Index([])
index_obj = (self.... | Computes the index after a number of rows have been removed.
Note: In order for this to be used properly, the indexes must not be
changed before you compute this.
Args:
axis: The axis to extract the index from.
data_object: The new data object to extract the index from.
compute_diff: True to use `self` to compute the... | codesearchnet |
def field_mask(original, modified):
if ((original is None) and (modified is None)):
return field_mask_pb2.FieldMask()
if ((original is None) and (modified is not None)):
original = copy.deepcopy(modified)
original.Clear()
if ((modified is None) and (original is not None)):
mo... | Create a field mask by comparing two messages.
Args:
original (~google.protobuf.message.Message): the original message.
If set to None, this field will be interpretted as an empty
message.
modified (~google.protobuf.message.Message): the modified message.
If set to None, this field will be interpretted as an empty
mes... | codesearchnet |
def extract_signature(func, ignore_first=False):
sig_params = get_signature_params(func)
if ignore_first:
if len(sig_params) == 0:
raise Exception("Methods must take a 'self' argument, but the "
"method '{}' does not have one.".format(
... | Extract the function signature from the function.
Args:
func: The function whose signature should be extracted.
ignore_first: True if the first argument should be ignored. This should
be used when func is a method of a class.
Returns:
A function signature object, which includes the names of the keyword
arguments as w... | juraj-google-style |
def pretty_print_config_to_json(self, services, hostname=None):
descriptor = self.get_config_dict(services, hostname)
return json.dumps(descriptor, sort_keys=True, indent=2,
separators=(',', ': ')) | JSON string description of a protorpc.remote.Service in API format.
Args:
services: Either a single protorpc.remote.Service or a list of them
that implements an api/version.
hostname: string, Hostname of the API, to override the value set on the
current service. Defaults to None.
Returns:
string, The API descriptor d... | juraj-google-style |
def dp004(self, value=None):
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float '
'for field `dp004`'.format(value))
self._dp004 = value | Corresponds to IDD Field `dp004`
Dew-point temperature corresponding to 0.4% annual cumulative frequency of occurrence
Args:
value (float): value for IDD Field `dp004`
Unit: C
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not... | juraj-google-style |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.