code stringlengths 20 4.93k | docstring stringlengths 33 1.27k | source stringclasses 3
values |
|---|---|---|
def _split_input_from_namespace(cls, app, namespace, entity_kind,
shard_count):
raw_entity_kind = cls._get_raw_entity_kind(entity_kind)
if shard_count == 1:
return [key_range.KeyRange(namespace=namespace, _app=app)]
ds_query = datastore.Query(kind=raw_en... | Helper for _split_input_from_params.
If there are not enough Entities to make all of the given shards, the
returned list of KeyRanges will include Nones. The returned list will
contain KeyRanges ordered lexographically with any Nones appearing at the
end.
Args:
app: the app.
namespace: the namespace.
entity_kind: ent... | juraj-google-style |
def files_comments_edit(self, *, comment: str, file: str, id: str, **kwargs) -> SlackResponse:
kwargs.update({'comment': comment, 'file': file, 'id': id})
return self.api_call('files.comments.edit', json=kwargs) | Edit an existing file comment.
Args:
comment (str): The body of the comment.
e.g. 'Everyone should take a moment to read this file.'
file (str): The file id. e.g. 'F1234467890'
id (str): The file comment id. e.g. 'Fc1234567890' | codesearchnet |
def _read_git_tags(
default_version=DEFAULT_VERSION,
git_command=('git', 'tag'),
):
try:
current_tags = check_output(git_command).splitlines()
except Exception:
raise
if not current_tags[0]:
warnings.warn(
'Unable to resolve current version',
... | tries to find current git tag
Notes:
git_command exposed for testing null case
Args:
default_version (str): what version to make
git_command (:obj:`list`): subprocess command
Retruns:
str: latest version found, or default
Warns:
exceptions.ProsperDefaultVersionWarning: git version not found | juraj-google-style |
def _req(self, req):
logger.debug('DUT> %s', req)
self._log and self.pause()
times = 3
res = None
while times:
times = times - 1
try:
self._sendline(req)
self._expect(req)
line = None
... | Send command and wait for response.
The command will be repeated 3 times at most in case data loss of serial port.
Args:
req (str): Command to send, please do not include new line in the end.
Returns:
[str]: The output lines | juraj-google-style |
def get_oauth_data(self, code, client_id, client_secret, state):
request = self._get_request()
response = request.post(self.OAUTH_TOKEN_URL, {'state': state, 'code': code, 'grant_type': 'authorization_code', 'client_id': client_id, 'client_secret': client_secret})
return HSAccessTokenAuth.from_response(resp... | Get Oauth data from HelloSign
Args:
code (str): Code returned by HelloSign for our callback url
client_id (str): Client id of the associated app
client_secret (str): Secret token of the associated app
Returns:
A HSAccessTokenAuth object | codesearchnet |
def click_nowait(self, pattern, action='click', desc=None, **match_kwargs):
point = self.match(pattern, **match_kwargs)
if ((not point) or (not point.matched)):
return None
func = getattr(self, action)
func(*point.pos)
return point | Return immediately if no image found
Args:
- pattern (str or Pattern): filename or an opencv image object.
- action (str): click or long_click
Returns:
Click point or None | codesearchnet |
def Run(self, request, global_params=None):
config = self.GetMethodConfig('Run')
return self._RunMethod(config, request, global_params=global_params) | Runs a `BuildTrigger` at a particular source revision.
Args:
request: (CloudbuildProjectsTriggersRunRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message. | github-repos |
def _GetMergeTaskStorageFilePath(self, task):
filename = '{0:s}.plaso'.format(task.identifier)
return os.path.join(self._merge_task_storage_path, filename) | Retrieves the path of a task storage file in the merge directory.
Args:
task (Task): task.
Returns:
str: path of a task storage file file in the merge directory. | codesearchnet |
def filter_dict(d, exclude):
ret = {}
for key, value in d.items():
if key not in exclude:
ret.update({key: value})
return ret | Return a new dict with specified keys excluded from the origional dict
Args:
d (dict): origional dict
exclude (list): The keys that are excluded | juraj-google-style |
def crt(self, mp, mq):
u = (((mq - mp) * self.p_inverse) % self.q)
return (mp + (u * self.p)) | The Chinese Remainder Theorem as needed for decryption. Returns the solution modulo n=pq.
Args:
mp(int): the solution modulo p.
mq(int): the solution modulo q. | codesearchnet |
def isholiday(self, date):
date = parsefun(date)
if self.holidays:
i = bisect.bisect_left(self.holidays, date)
if i == 0 and date < self.holidays[0]:
warn('Holiday list exhausted at start, ' \
'isholiday(%s) output may be... | Check if a given date is a holiday.
Args:
date (date, datetime or str): Date to be checked.
Returns:
bool: True if the date is a holiday, False otherwise. | juraj-google-style |
def count_function(function: _evaluation.CountFunction, operand_result: Optional[_sql_data_types.Select], params_result: Collection[_sql_data_types.StandardSqlExpression]) -> _sql_data_types.Select:
del function, params_result
if operand_result is None:
raise ValueError('count() cannot be called without... | Returns an integer representing the number of elements in a collection.
By default, `_CountFunction` will return 0.
Args:
function: The FHIRPath AST `HasValueFunction` node
operand_result: The expression which is being evaluated
params_result: The parameter passed in to function
Returns:
A compiled Spark SQL express... | github-repos |
def _get_music_services_data(cls):
if (cls._music_services_data is not None):
return cls._music_services_data
result = {}
root = XML.fromstring(cls._get_music_services_data_xml().encode('utf-8'))
services = root.findall('Service')
for service in services:
result_value = service.attri... | Parse raw account data xml into a useful python datastructure.
Returns:
dict: Each key is a service_type, and each value is a
`dict` containing relevant data. | codesearchnet |
def AddWeight(self, path_segment_index, weight):
if (path_segment_index not in self._weight_per_index):
raise ValueError('Path segment index not set.')
self._weight_per_index[path_segment_index] += weight
if (weight not in self._indexes_per_weight):
self._indexes_per_weight[weight] = []
... | Adds a weight for a specific path segment index.
Args:
path_segment_index: an integer containing the path segment index.
weight: an integer containing the weight.
Raises:
ValueError: if the path segment weights do not contain
the path segment index. | codesearchnet |
def Get(self, request, global_params=None):
config = self.GetMethodConfig('Get')
return self._RunMethod(config, request, global_params=global_params) | Returns information about a previously requested build. The `Build` that is returned includes its status (such as `SUCCESS`, `FAILURE`, or `WORKING`), and timing information.
Args:
request: (CloudbuildProjectsBuildsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Retur... | github-repos |
async def inspect(self, service_id: str) -> Mapping[str, Any]:
response = await self.docker._query_json(
"services/{service_id}".format(service_id=service_id), method="GET"
)
return response | Inspect a service
Args:
service_id: ID or name of the service
Returns:
a dict with info about a service | juraj-google-style |
def test(x, y, regex_expr=False):
return matches(x, y, regex_expr=regex_expr) if isregex(x) else equal(x, y) | Compares to values based on regular expression matching or
strict equality comparison.
Arguments:
x (regex|str): string or regular expression to test.
y (str): value to match.
regex_expr (bool): enables regex string based expression matching.
Raises:
AssertionError: in case of matching error.
Returns:
bool | juraj-google-style |
def authenticate(self, username, password):
if self.config.get('LDAP_BIND_DIRECT_CREDENTIALS'):
result = self.authenticate_direct_credentials(username, password)
elif ((not self.config.get('LDAP_ALWAYS_SEARCH_BIND')) and (self.config.get('LDAP_USER_RDN_ATTR') == self.config.get('LDAP_USER_LOGIN_ATTR')))... | An abstracted authentication method. Decides whether to perform a
direct bind or a search bind based upon the login attribute configured
in the config.
Args:
username (str): Username of the user to bind
password (str): User's password to bind with.
Returns:
AuthenticationResponse | codesearchnet |
def evaluate_tensor_slice(tensor, tensor_slicing):
_ = tensor
if not validate_slicing_string(tensor_slicing):
raise ValueError('Invalid tensor-slicing string.')
return tensor[_parse_slices(tensor_slicing)] | Call eval on the slicing of a tensor, with validation.
Args:
tensor: (numpy ndarray) The tensor value.
tensor_slicing: (str or None) Slicing of the tensor, e.g., "[:, 1]". If
None, no slicing will be performed on the tensor.
Returns:
(numpy ndarray) The sliced tensor.
Raises:
ValueError: If tensor_slicing is not a v... | github-repos |
def read(self, input_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):
super(CheckResponsePayload, self).read(input_stream, kmip_version=kmip_version)
local_stream = utils.BytearrayStream(input_stream.read(self.length))
if self.is_tag_next(enums.Tags.UNIQUE_IDENTIFIER, local_stream):
self._unique_id... | Read the data encoding the Check response payload and decode it into
its constituent parts.
Args:
input_stream (stream): A data stream containing encoded object
data, supporting a read method; usually a BytearrayStream
object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object w... | codesearchnet |
def Analyze(self, hashes):
hash_analyses = []
for digest in hashes:
json_response = self._QueryHash(digest)
hash_analysis = interface.HashAnalysis(digest, json_response)
hash_analyses.append(hash_analysis)
return hash_analyses | Looks up hashes in Viper using the Viper HTTP API.
Args:
hashes (list[str]): hashes to look up.
Returns:
list[HashAnalysis]: hash analysis.
Raises:
RuntimeError: If no host has been set for Viper. | juraj-google-style |
def get_explanation_dict(self, entry):
centry = self.process_entry(entry)
if (centry is None):
uncorrected_energy = entry.uncorrected_energy
corrected_energy = None
else:
uncorrected_energy = centry.uncorrected_energy
corrected_energy = centry.energy
d = {'compatibility':... | Provides an explanation dict of the corrections that are being applied
for a given compatibility scheme. Inspired by the "explain" methods
in many database methodologies.
Args:
entry: A ComputedEntry.
Returns:
(dict) of the form
{"Compatibility": "string",
"Uncorrected_energy": float,
"Corrected_energy": float,
"Corr... | codesearchnet |
def _generate_matrix(self, hash_bytes):
half_columns = ((self.columns
cells = (self.rows * half_columns)
matrix = [([False] * self.columns) for _ in range(self.rows)]
for cell in range(cells):
if self._get_bit(cell, hash_bytes[1:]):
column = (cell
row = (cell % self.row... | Generates matrix that describes which blocks should be coloured.
Arguments:
hash_bytes - List of hash byte values for which the identicon is being
generated. Each element of the list should be an integer from 0 to
255.
Returns:
List of rows, where each element in a row is boolean. True means the
foreground colour sho... | codesearchnet |
def post_process_object_detection(self, outputs, threshold: float=0.5, target_sizes: Union[TensorType, List[Tuple]]=None, top_k: int=100):
out_logits, out_bbox = (outputs.logits, outputs.pred_boxes)
if target_sizes is not None:
if len(out_logits) != len(target_sizes):
raise ValueError('Make ... | Converts the raw output of [`ConditionalDetrForObjectDetection`] into final bounding boxes in (top_left_x,
top_left_y, bottom_right_x, bottom_right_y) format. Only supports PyTorch.
Args:
outputs ([`DetrObjectDetectionOutput`]):
Raw outputs of the model.
threshold (`float`, *optional*):
Score threshold to keep object ... | github-repos |
def clear_values(self, red=0.0, green=0.0, blue=0.0, alpha=0.0, depth=1.0):
self.clear_color = (red, green, blue, alpha)
self.clear_depth = depth | Sets the clear values for the window buffer.
Args:
red (float): red compoent
green (float): green compoent
blue (float): blue compoent
alpha (float): alpha compoent
depth (float): depth value | codesearchnet |
def __init__(self, outputs):
self._outputs = self._wrap_and_check_outputs(outputs, self._SINGLE_OUTPUT_DEFAULT_NAME, error_label='Prediction') | Constructor for PredictOutput.
Args:
outputs: A `Tensor` or a dict of string to `Tensor` representing the
predictions.
Raises:
ValueError: if the outputs is not dict, or any of its keys are not
strings, or any of its values are not `Tensor`s. | github-repos |
def unlock_swarm(self, key):
if isinstance(key, dict):
if ('UnlockKey' not in key):
raise errors.InvalidArgument('Invalid unlock key format')
else:
key = {'UnlockKey': key}
url = self._url('/swarm/unlock')
res = self._post_json(url, data=key)
self._raise_for_status(res)
... | Unlock a locked swarm.
Args:
key (string): The unlock key as provided by
:py:meth:`get_unlock_key`
Raises:
:py:class:`docker.errors.InvalidArgument`
If the key argument is in an incompatible format
:py:class:`docker.errors.APIError`
If the server returns an error.
Returns:
`True` if the request was successful.
Exa... | codesearchnet |
def convert_variables_to_constants_from_session_graph(session, graph_def, output_node_names, variable_names_allowlist=None, variable_names_denylist=None):
graph_def, _ = _replace_variables_by_constants(converter_data=_SessionConverterData(session=session, graph_def=graph_def, output_node_names=output_node_names, va... | Replaces all the variables in a graph with constants of the same values.
This function works similarly to convert_variables_to_constants_v2, but it
retrieves the constant values from a Session instead of from a
ConcreteFunction. This is useful when converting graphs generated from
TensorFlow V1, where ConcreteFunction... | github-repos |
def get_sessions(self, app_path=None):
if app_path is not None:
return self._tornado.get_sessions(app_path)
all_sessions = []
for path in self._tornado.app_paths:
all_sessions += self._tornado.get_sessions(path)
return all_sessions | Gets all currently active sessions for applications.
Args:
app_path (str, optional) :
The configured application path for the application to return
sessions for. If None, return active sessions for all
applications. (default: None)
Returns:
list[ServerSession] | juraj-google-style |
def on_test_end(self, logs=None):
logs = self._process_logs(logs)
for callback in self.callbacks:
callback.on_test_end(logs) | Calls the `on_test_end` methods of its callbacks.
Args:
logs: Dict. Currently no data is passed to this argument for this method
but that may change in the future. | github-repos |
def refund(request, invoice_id):
current_invoice = InvoiceController.for_id_or_404(invoice_id)
try:
current_invoice.refund()
messages.success(request, "This invoice has been refunded.")
except ValidationError as ve:
messages.error(request, ve)
return redirect("invoice", i... | Marks an invoice as refunded and requests a credit note for the
full amount paid against the invoice.
This view requires a login, and the logged in user must be staff.
Arguments:
invoice_id (castable to int): The ID of the invoice to refund.
Returns:
redirect:
Redirects to ``invoice``. | juraj-google-style |
def symm_reduce(self, coords_set, threshold=1e-06):
surf_sg = SpacegroupAnalyzer(self.slab, 0.1)
symm_ops = surf_sg.get_symmetry_operations()
unique_coords = []
coords_set = [self.slab.lattice.get_fractional_coords(coords) for coords in coords_set]
for coords in coords_set:
incoord = False
... | Reduces the set of adsorbate sites by finding removing
symmetrically equivalent duplicates
Args:
coords_set: coordinate set in cartesian coordinates
threshold: tolerance for distance equivalence, used
as input to in_coord_list_pbc for dupl. checking | codesearchnet |
def element_or_none(self, using, value):
try:
return self._execute(Command.FIND_CHILD_ELEMENT, {
'using': using,
'value': value
})
except:
return None | Check if an element in the current element.
Support:
Android iOS Web(WebView)
Args:
using(str): The element location strategy.
value(str): The value of the location strategy.
Returns:
Return Element if the element does exists and return None otherwise.
Raises:
WebDriverException. | juraj-google-style |
def _SetFieldType(self, field_proto, field_desc, package, scope):
if field_proto.type_name:
desc = self._GetTypeFromScope(package, field_proto.type_name, scope)
else:
desc = None
if (not field_proto.HasField('type')):
if isinstance(desc, descriptor.Descriptor):
field_prot... | Sets the field's type, cpp_type, message_type and enum_type.
Args:
field_proto: Data about the field in proto format.
field_desc: The descriptor to modiy.
package: The package the field's container is in.
scope: Enclosing scope of available types. | codesearchnet |
def equals(self, rhs):
for comparator in self._comparators:
if comparator.equals(rhs):
return True
return False | Checks whether any Comparator is equal to rhs.
Args:
# rhs: can be anything
Returns:
bool | codesearchnet |
def __init__(self, default: typing.Any, values: typing.List[typing.Any], frozen: bool=False):
if not isinstance(values, list) or not values:
raise ValueError(f'Values for Enum should be a non-empty list. Found {values!r}.')
if MISSING_VALUE != default and default not in values:
raise ValueError(... | Constructor.
Args:
default: default value for this spec.
values: all acceptable values.
frozen: If True, values other than the default value is not accceptable. | github-repos |
def signature(self, name, file_name, file_type, file_content, owner=None, **kwargs):
return Signature(self.tcex, name, file_name, file_type, file_content, owner=owner, **kwargs) | Create the Signature TI object.
Args:
owner:
file_content:
file_name:
file_type:
name:
**kwargs:
Return: | codesearchnet |
def _maybe_read_file(filename):
try:
with open(filename) as infile:
return infile.read()
except IOError as e:
if e.errno == errno.ENOENT:
return None | Read the given file, if it exists.
Args:
filename: A path to a file.
Returns:
A string containing the file contents, or `None` if the file does
not exist. | juraj-google-style |
def load_caffe(model_desc, model_file):
with change_env('GLOG_minloglevel', '2'):
import caffe
caffe.set_mode_cpu()
net = caffe.Net(model_desc, model_file, caffe.TEST)
param_dict = CaffeLayerProcessor(net).process()
logger.info("Model loaded from caffe. Params: " +
... | Load a caffe model. You must be able to ``import caffe`` to use this
function.
Args:
model_desc (str): path to caffe model description file (.prototxt).
model_file (str): path to caffe model parameter file (.caffemodel).
Returns:
dict: the parameters. | juraj-google-style |
def variable_dtype(self):
return self._variable_dtype | The variable dtype of this policy.
This is the dtype layers will create their variables in, unless a layer
explicitly chooses a different dtype. If this is different than
`Policy.compute_dtype`, Layers will cast variables to the compute dtype to
avoid type errors.
Variable regularizers are run in the variable dtype, ... | github-repos |
def delete(self, key):
data = None
if (key is not None):
data = self.db.delete(key.strip())
else:
self.tcex.log.warning(u'The key field was None.')
return data | Delete method of CRUD operation for all data types.
Args:
key (string): The variable to write to the DB.
Returns:
(string): Result of DB write. | codesearchnet |
def unpack(self, buff, offset=0):
super().unpack(buff, offset)
self.version = self._version_ihl.value >> 4
self.ihl = self._version_ihl.value & 15
self.dscp = self._dscp_ecn.value >> 2
self.ecn = self._dscp_ecn.value & 3
self.length = self.length.value
s... | Unpack a binary struct into this object's attributes.
Return the values instead of the lib's basic types.
Args:
buff (bytes): Binary buffer.
offset (int): Where to begin unpacking.
Raises:
:exc:`~.exceptions.UnpackException`: If unpack fails. | juraj-google-style |
def PrintMessage(self, message):
fields = message.ListFields()
if self.use_index_order:
fields.sort(key=lambda x: x[0].index)
for field, value in fields:
if _IsMapEntry(field):
for key in sorted(value):
entry_submsg = ... | Convert protobuf message to text format.
Args:
message: The protocol buffers message. | juraj-google-style |
def __init__(self, strategy, cluster_spec, task_type, task_id, session_config=None, rpc_layer='grpc', worker_barrier=None):
self._strategy = strategy
self._cluster_spec = cluster_spec
self._task_type = task_type
self._task_id = task_id
self._session_config = session_config
self._worker_barrier =... | Initialize the worker context object.
Args:
strategy: a `DistributionStrategy` object.
cluster_spec: a ClusterSpec object. It can be empty or None in the local
training case.
task_type: a string indicating the role of the corresponding task, such as
"worker" or "ps". It can be None if it is local training or in-graph
... | github-repos |
async def _multipart(self, files_dict):
boundary = bytes(_BOUNDARY, self.encoding)
hder_format = 'Content-Disposition: form-data; name="{}"'
hder_format_io = '; filename="{}"'
multip_pkg = b''
num_of_parts = len(files_dict)
for index, kv in enumerate(files_dic... | Forms multipart requests from a dict with name, path k/vs. Name
does not have to be the actual file name.
Args:
files_dict (dict): A dict of `filename:filepath`s, to be sent
as multipart files.
Returns:
multip_pkg (str): The strings representation of the content body,
multipart formatted. | juraj-google-style |
def download(self, location, local_dir='.'):
self.logger.debug('Getting S3 info')
bucket = self.info['bucket']
prefix = self.info['prefix']
self.logger.debug('Connecting to S3')
s3conn = self.client
location = location.strip('/')
self.logger.... | Download content from bucket/prefix/location.
Location can be a directory or a file (e.g., my_dir or my_dir/my_image.tif)
If location is a directory, all files in the directory are
downloaded. If it is a file, then that file is downloaded.
Args:
location (str): S3 location within prefix.
local_dir (str): Local directo... | juraj-google-style |
def _comparison(self, op, value):
if (not self._indexed):
raise datastore_errors.BadFilterError(('Cannot query for unindexed property %s' % self._name))
from .query import FilterNode
if (value is not None):
value = self._do_validate(value)
value = self._call_to_base_type(value)
... | Internal helper for comparison operators.
Args:
op: The operator ('=', '<' etc.).
Returns:
A FilterNode instance representing the requested comparison. | codesearchnet |
def handle_config_change(self, new_config):
if self.user_handler:
self.user_handler(self.current_config, new_config)
self._call_spec_handlers(new_config)
self.current_config = copy.deepcopy(new_config) | Handle the new configuration.
Args:
new_config (dict): The new configuration | codesearchnet |
def transform_python_types(self, obj):
if is_datetime_type(obj):
return convert_datetime_type(obj)
if is_timedelta_type(obj):
return convert_timedelta_type(obj)
elif isinstance(obj, slice):
return dict(start=obj.start, stop=obj.st... | Handle special scalars such as (Python, NumPy, or Pandas)
datetimes, or Decimal values.
Args:
obj (obj) :
The object to encode. Anything not specifically handled in
this method is passed on to the default system JSON encoder. | juraj-google-style |
def set_iprouting(self, value=None, default=False, disable=False):
if (value is False):
disable = True
cmd = self.command_builder('ip routing', value=value, default=default, disable=disable)
return self.configure(cmd) | Configures the state of global ip routing
EosVersion:
4.13.7M
Args:
value(bool): True if ip routing should be enabled or False if
ip routing should be disabled
default (bool): Controls the use of the default keyword
disable (bool): Controls the use of the no keyword
Returns:
bool: True if the commands completed succ... | codesearchnet |
def as_list(self, label=1, **kwargs):
label_to_use = (label if (self.mode == 'classification') else self.dummy_label)
ans = self.domain_mapper.map_exp_ids(self.local_exp[label_to_use], **kwargs)
ans = [(x[0], float(x[1])) for x in ans]
return ans | Returns the explanation as a list.
Args:
label: desired label. If you ask for a label for which an
explanation wasn't computed, will throw an exception.
Will be ignored for regression explanations.
kwargs: keyword arguments, passed to domain_mapper
Returns:
list of tuples (representation, weight), where representatio... | codesearchnet |
def run_foreach_or_conditional(self, context):
logger.debug("starting")
if self.foreach_items:
self.foreach_loop(context)
else:
self.run_conditional_decorators(context)
logger.debug("done") | Run the foreach sequence or the conditional evaluation.
Args:
context: (pypyr.context.Context) The pypyr context. This arg will
mutate. | juraj-google-style |
def generate_sample_set(self, tags=None):
if isinstance(tags, str):
tags = [tags]
md5_list = self.data_store.tag_match(tags)
return self.store_sample_set(md5_list) | Generate a sample_set that maches the tags or all if tags are not specified.
Args:
tags: Match samples against this tag list (or all if not specified)
Returns:
The sample_set of those samples matching the tags | juraj-google-style |
def resize(self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:
default_to_square = True
if 'shortest_edge' in siz... | Resize an image. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge
resized to keep the input aspect ratio.
Args:
image (`np.ndarray`):
Image to resize.
size (`Dict[str, int]`):
Size of the output image.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.... | github-repos |
async def start(self, name="websocket_client"):
self._con = await websockets.connect(self.url)
self._connection_task = self._loop.add_task(self._manage_connection(), name=name) | Connect to the websocket server.
This method will spawn a background task in the designated event loop
that will run until stop() is called. You can control the name of the
background task for debugging purposes using the name parameter. The
name is not used in anyway except for debug logging statements.
Args:
name... | juraj-google-style |
def get_list(self, key, is_optional=False, is_secret=False, is_local=False, default=None, options=None):
def parse_list(v):
parts = v.split(',')
results = []
for part in parts:
part = part.strip()
if part:
results.append(part)
return results
... | Get a the value corresponding to the key and converts comma separated values to a list.
Args:
key: the dict key.
is_optional: To raise an error if key was not found.
is_secret: If the key is a secret.
is_local: If the key is a local to this service.
default: default value if is_optional is True.
options: list/tuple if... | codesearchnet |
def split_input(cls, mapper_spec):
params = _get_params(mapper_spec)
shard_count = mapper_spec.shard_count
start_time = params[cls.START_TIME_PARAM]
end_time = params[cls.END_TIME_PARAM]
seconds_per_shard = (end_time - start_time) / shard_count
shards = []
for _ in xrange(sh... | Returns a list of input readers for the given input specification.
Args:
mapper_spec: The MapperSpec for this InputReader.
Returns:
A list of InputReaders. | juraj-google-style |
def reset(self, history=None):
if not history:
history = dict()
self.episode_rewards = history.get("episode_rewards", list())
self.episode_timesteps = history.get("episode_timesteps", list())
self.episode_times = history.get("episode_times", list()) | Resets the Runner's internal stats counters.
If history is empty, use default values in history.get().
Args:
history (dict): A dictionary containing an already run experiment's results. Keys should be:
episode_rewards (list of rewards), episode_timesteps (lengths of episodes), episode_times (run-times) | juraj-google-style |
def lstsq(A, b):
r
A = asarray(A, float)
b = asarray(b, float)
if A.ndim == 1:
A = A[:, newaxis]
if A.shape[1] == 1:
return dot(A.T, b) / squeeze(dot(A.T, A))
rcond = finfo(double).eps * max(*A.shape)
return npy_lstsq(A, b, rcond=rcond)[0] | r"""Return the least-squares solution to a linear matrix equation.
Args:
A (array_like): Coefficient matrix.
b (array_like): Ordinate values.
Returns:
:class:`numpy.ndarray`: Least-squares solution. | juraj-google-style |
def prepare_capstone(syntax=AsmSyntax.att, target=None):
if not HAVE_CAPSTONE:
raise NotImplementedError('pwnypack requires capstone to disassemble to AT&T and Intel syntax')
if target is None:
target = pwnypack.target.target
if target.arch == pwnypack.target.Target.Arch.x86:
... | Prepare a capstone disassembler instance for a given target and syntax.
Args:
syntax(AsmSyntax): The assembler syntax (Intel or AT&T).
target(~pwnypack.target.Target): The target to create a disassembler
instance for. The global target is used if this argument is
``None``.
Returns:
An instance of the capstone disasse... | juraj-google-style |
def market_if_touched(self, accountID, **kwargs):
return self.create(accountID, order=MarketIfTouchedOrderRequest(**kwargs)) | Shortcut to create a MarketIfTouched Order in an Account
Args:
accountID : The ID of the Account
kwargs : The arguments to create a MarketIfTouchedOrderRequest
Returns:
v20.response.Response containing the results from submitting
the request | codesearchnet |
def Items(self, key):
with self._mutex:
if key not in self._buckets:
raise KeyError('Key %s was not found in Reservoir' % key)
bucket = self._buckets[key]
return bucket.Items() | Return items associated with given key.
Args:
key: The key for which we are finding associated items.
Raises:
KeyError: If the key is not found in the reservoir.
Returns:
[list, of, items] associated with that key. | juraj-google-style |
def prepend(self, key, value, expire=0, noreply=None):
if (noreply is None):
noreply = self.default_noreply
return self._store_cmd(b'prepend', {key: value}, expire, noreply)[key] | The memcached "prepend" command.
Args:
key: str, see class docs for details.
value: str, see class docs for details.
expire: optional int, number of seconds until the item is expired
from the cache, or zero for no expiry (the default).
noreply: optional bool, True to not wait for the reply (defaults to
self.default_no... | codesearchnet |
def _resolve_subkeys(key, separator='.'):
subkey = None
if (separator in key):
index = key.index(separator)
subkey = key[(index + 1):]
key = key[:index]
return (key, subkey) | Given a key which may actually be a nested key, return the top level
key and any nested subkeys as separate values.
Args:
key (str): A string that may or may not contain the separator.
separator (str): The namespace separator. Defaults to `.`.
Returns:
Tuple[str, str]: The key and subkey(s). | codesearchnet |
def _result_type_impl(*arrays_and_dtypes):
promo_safety_mode = ops.get_dtype_conversion_mode()
valid_arrays_and_dtypes = []
for inp in arrays_and_dtypes:
if inp is not None:
if _is_acceptable_input_type(inp):
valid_arrays_and_dtypes.append(inp)
else:
... | Internal implementation of jnp_style_result_type.
Args:
*arrays_and_dtypes: A list of Tensors, Variables, NumPy arrays or python
numbers.
Returns:
The result promotion type from all the inputs.
Raises:
TypeError: when the promotion between the input dtypes is disabled in the
current mode
NotImplementedError:
(1) Wh... | github-repos |
def roles(self):
if (not self.__roles):
self.__roles = Roles(self.__connection)
return self.__roles | Gets the Roles API client.
Returns:
Roles: | codesearchnet |
def result_to_dict(raw_result):
result = {}
for channel_index, channel in enumerate(raw_result):
channel_id, channel_name = channel[0], channel[1]
channel_result = {
'id': channel_id,
'name': channel_name,
'movies': []
}
for movie in cha... | Parse raw result from fetcher into readable dictionary
Args:
raw_result (dict) - raw data from `fetcher`
Returns:
dict - readable dictionary | juraj-google-style |
def VerifyStructure(self, parser_mediator, lines):
return (re.match(self._VERIFICATION_REGEX, lines) or
re.match(self._CHROMEOS_VERIFICATION_REGEX, lines)) is not None | Verifies that this is a syslog-formatted file.
Args:
parser_mediator (ParserMediator): mediates interactions between
parsers and other components, such as storage and dfvfs.
lines (str): one or more lines from the text file.
Returns:
bool: True if this is the correct parser, False otherwise. | juraj-google-style |
def intersect_one_round(candidates, intersections):
next_candidates = []
for (first, second) in candidates:
both_linearized = False
if (first.__class__ is Linearization):
if (second.__class__ is Linearization):
both_linearized = True
bbox_int = bbox_in... | Perform one step of the intersection process.
.. note::
This is a helper for :func:`_all_intersections` and that function
has a Fortran equivalent.
Checks if the bounding boxes of each pair in ``candidates``
intersect. If the bounding boxes do not intersect, the pair
is discarded. Otherwise, the pair is "accepted". ... | codesearchnet |
def load_maps(maps_dir):
maps_dir = os.path.abspath(maps_dir)
maps = {}
for (root, dirnames, filenames) in os.walk(maps_dir):
for filename in filenames:
if filename.endswith('.xml'):
xml_file = os.path.join(root, filename)
map = MapSource.from_xml(xml_file... | Load all xml map sources from a given directory.
Args:
maps_dir: path to directory to search for maps
Returns:
dict of MapSource: | codesearchnet |
def split(node, stack):
node, defined, reaching = _fix(node)
node = store_state(node, reaching, defined, stack)
anno.clearanno(node)
return node | Carry over the state from the primal to the adjoint.
Args:
node: A module with the primal and adjoint function definitions as returned
by `reverse_ad`.
stack: The stack node to use for storing and restoring state.
Returns:
func: A `Module` node with two function definitions containing the primal
and adjoint respectiv... | juraj-google-style |
def _decode_filename(base_filename, problem_name, decode_hp):
if decode_hp.shards > 1:
base_filename = _add_shard_to_filename(base_filename, decode_hp)
if ("beam{beam}.alpha{alpha}.decodes".format(
beam=str(decode_hp.beam_size), alpha=str(decode_hp.alpha))
in base_filename):
return base_filen... | Generates decode filename.
Args:
base_filename: A string, base of the decode filename.
problem_name: A string, name of the problem.
decode_hp: HParams for decoding.
Returns:
A string, produced decode filename. | juraj-google-style |
def send_result_email(self, sender=None):
status = 'successful'
if self.was_aborted:
status = 'aborted'
app_id = os.environ['APPLICATION_ID']
shard_index = app_id.find('~')
if shard_index != -1:
app_id = app_id[shard_index+1:]
param_dict = {
'status': status,
'... | Sends an email to admins indicating this Pipeline has completed.
For developer convenience. Automatically called from finalized for root
Pipelines that do not override the default action.
Args:
sender: (optional) Override the sender's email address. | juraj-google-style |
def validate(data):
text = data.get('text')
if not isinstance(text, _string_types) or len(text) == 0:
raise ValueError('text field is required and should not be empty')
if 'markdown' in data and not type(data['markdown']) is bool:
raise ValueError('markdown field should be bool')
... | Validates incoming data
Args:
data(dict): the incoming data
Returns:
True if the data is valid
Raises:
ValueError: the data is not valid | juraj-google-style |
async def getPropNorm(self, prop, valu):
pobj = self.model.prop(prop)
if pobj is None:
raise s_exc.NoSuchProp(mesg=f'The property {prop} does not exist.',
prop=prop)
norm, info = pobj.type.norm(valu)
return norm, info | Get the normalized property value based on the Cortex data model.
Args:
prop (str): The property to normalize.
valu: The value to normalize.
Returns:
(tuple): A two item tuple, containing the normed value and the info dictionary.
Raises:
s_exc.NoSuchProp: If the prop does not exist.
s_exc.BadTypeValu: If the value f... | juraj-google-style |
def mobility(sdat, tstart=None, tend=None):
tseries = sdat.tseries_between(tstart, tend)
steps = sdat.steps[tseries.index[0]:tseries.index[(- 1)]]
time = []
mob = []
for step in steps.filter(rprof=True):
time.append(step.timeinfo['t'])
mob.append((step.rprof.iloc[(- 1)].loc['vrms'] /... | Plates mobility.
Compute the ratio vsurf / vrms.
Args:
sdat (:class:`~stagpy.stagyydata.StagyyData`): a StagyyData instance.
tstart (float): time at which the computation should start. Use the
beginning of the time series data if set to None.
tend (float): time at which the computation should end. Use the
end of the ... | codesearchnet |
def _create_centerline(self):
border = array(self.__densify_border())
vor = Voronoi(border)
vertex = vor.vertices
lst_lines = []
for (j, ridge) in enumerate(vor.ridge_vertices):
if ((- 1) not in ridge):
line = LineString([((vertex[ridge[0]][0] + self._minx), (vertex[ridge[0]][1] ... | Calculate the centerline of a polygon.
Densifies the border of a polygon which is then represented by a Numpy
array of points necessary for creating the Voronoi diagram. Once the
diagram is created, the ridges located within the polygon are
joined and returned.
Returns:
a union of lines that are located within the po... | codesearchnet |
def process_data(data, number_to_keep):
result = dict()
if (number_to_keep != 0):
data_temp = dict(Counter(data).most_common(number_to_keep))
data_temp['rest'] = (sum(data.values()) - sum(data_temp.values()))
data = data_temp
labels = data
values = np.array([data[key] for key in ... | Prepare received data for representation.
Args:
data (dict): values to represent (ex. {'001' : 130})
number_to_keep (int): number of elements to show individually.
Returns:
dict: processed data to show. | codesearchnet |
def ManuallyScheduleClients(self, token=None):
client_ids = set()
for flow_request in self.args.flows:
for client_id in flow_request.client_ids:
client_ids.add(client_id)
self.StartClients(self.session_id, client_ids, token=token) | Schedule all flows without using the Foreman.
Since we know all the client ids to run on we might as well just schedule
all the flows and wait for the results.
Args:
token: A datastore access token. | codesearchnet |
def select_symbols(self, symbols, ret_list=False):
symbols = list_strings(symbols)
exclude = symbols[0].startswith("-")
if exclude:
if not all(s.startswith("-") for s in symbols):
raise ValueError("When excluding symbols, all strings must start with `-`")
... | Return a :class:`PseudoTable` with the pseudopotentials with the given list of chemical symbols.
Args:
symbols: str or list of symbols
Prepend the symbol string with "-", to exclude pseudos.
ret_list: if True a list of pseudos is returned instead of a :class:`PseudoTable` | juraj-google-style |
def recursion_error(self, repeated_parser: str):
if self.finished:
return super().recursion_error(repeated_parser)
else:
line_index, character_index, line, pointer = self.current_line()
return 'Infinite recursion detected in {}; empty string was matched and ... | Generate an error to indicate that infinite recursion was encountered.
A parser can supply a representation of itself to this method and the
reader will supply the context, including the location where the
parser stalled.
Args:
repeated_parser: A representation of the repeated parser
Returns:
A full error message | juraj-google-style |
def absolute_proportions(proportions, count):
relative_sum = sum(proportions.values())
absolute_proportions = {idx: int(count / relative_sum * prop_value) for idx, prop_value in
proportions.items()}
absolute_sum = sum(absolute_proportions.values())
rest_value... | Split a given integer into n parts according to len(proportions) so they sum up to count and
match the given proportions.
Args:
proportions (dict): Dict of proportions, with a identifier as key.
Returns:
dict: Dictionary with absolute proportions and same identifiers as key.
Example::
>>> absolute_proportions({'tra... | juraj-google-style |
def _ExtractContentSettingsExceptions(self, exceptions_dict, parser_mediator):
for permission in exceptions_dict:
if (permission not in self._EXCEPTIONS_KEYS):
continue
exception_dict = exceptions_dict.get(permission, {})
for (urls, url_dict) in exception_dict.items():
... | Extracts site specific events.
Args:
exceptions_dict (dict): Permission exceptions data from Preferences file.
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs. | codesearchnet |
def filter_all_reachable_leaves_many(self, identifier_filters, language, forbidden_identifiers=None):
for i, identifier_filter in enumerate(identifier_filters):
if len(identifier_filter) == 1 and not isinstance(identifier_filter[0], list):
identifier_filters[i] = [identifier... | Provides the same functionality as .. py:method:: ItemManager.filter_all_reachable_leaves(),
but for more filters in the same time.
Args:
identifier_filters: list of identifier filters
language (str): language used for further filtering (some objects
for different languages share the same item
Returns:
list: list of ... | juraj-google-style |
def change_numbering(self, rename_dict, inplace=False):
output = self if inplace else self.copy()
new_index = [rename_dict.get(key, key) for key in self.index]
output.index = new_index
if not inplace:
return output | Return the reindexed version of Cartesian.
Args:
rename_dict (dict): A dictionary mapping integers on integers.
Returns:
Cartesian: A renamed copy according to the dictionary passed. | juraj-google-style |
def get_session(self, app_path, session_id):
if app_path not in self._applications:
raise ValueError("Application %s does not exist on this server" % app_path)
return self._applications[app_path].get_session(session_id) | Get an active a session by name application path and session ID.
Args:
app_path (str) :
The configured application path for the application to return
a session for.
session_id (str) :
The session ID of the session to retrieve.
Returns:
ServerSession | juraj-google-style |
def check(self, cell):
pass | Check correctness against single Jupyter cell.
Args:
cell: JSON representation of single cell.
Returns None if test succeeds, raise exception if test fails. | github-repos |
class RandomUniform(RandomInitializer):
def __init__(self, minval=-0.05, maxval=0.05, seed=None):
self.minval = minval
self.maxval = maxval
super().__init__(seed=seed)
def __call__(self, shape, dtype=None):
return random.uniform(shape=shape, minval=self.minval, maxval=self.maxv... | Random uniform initializer.
Draws samples from a uniform distribution for given parameters.
Examples:
>>> # Standalone usage:
>>> initializer = RandomUniform(minval=0.0, maxval=1.0)
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = RandomUniform(minval=0.0, maxval=1.0)
>>> layer... | github-repos |
def __init__(self, name, row_identifier):
super(SQLTableIdentifier, self).__init__()
self.name = name
self.row_identifier = row_identifier | Initializes a SQL table attribute container identifier.
Args:
name (str): name of the table.
row_identifier (int): unique identifier of the row in the table. | juraj-google-style |
def run_filter_query(self, resource_name, filter_clause):
url = self.base_url + "/" + resource_name
params = {"filter":json.dumps(filter_clause)}
r = requests.get(url, headers=self.headers, params=params)
logger.debug("requests.get result r.status_code: {}".format(r.status_cod... | run a query (get) against the CLUE api, using the API and user key fields of self and the fitler_clause provided
Args:
resource_name: str - name of the resource / collection to query - e.g. genes, perts, cells etc.
filter_clause: dictionary - contains filter to pass to API to; uses loopback specification
Returns: lis... | juraj-google-style |
def optimizer(name):
warn_msg = 'Please update `registry.optimizer` callsite (likely due to a `HParams.optimizer` value)'
if (name == 'SGD'):
name = 'sgd'
tf.logging.warning(("'SGD' optimizer now keyed by 'sgd'. %s" % warn_msg))
elif (name == 'RMSProp'):
name = 'rms_prop'
tf.... | Get pre-registered optimizer keyed by name.
`name` should be snake case, though SGD -> sgd, RMSProp -> rms_prop and
UpperCamelCase -> snake_case conversions included for legacy support.
Args:
name: name of optimizer used in registration. This should be a snake case
identifier, though others supported for legacy reaso... | codesearchnet |
def install(pkg, target='LocalSystem', store=False, allow_untrusted=False):
if ('*.' not in pkg):
pkg = _quote(pkg)
target = _quote(target)
cmd = 'installer -pkg {0} -target {1}'.format(pkg, target)
if store:
cmd += ' -store'
if allow_untrusted:
cmd += ' -allowUntrusted'
... | Install a pkg file
Args:
pkg (str): The package to install
target (str): The target in which to install the package to
store (bool): Should the package be installed as if it was from the
store?
allow_untrusted (bool): Allow the installation of untrusted packages?
Returns:
dict: A dictionary containing the results of ... | codesearchnet |
def transition_state(self, new_state):
if self.state == _InstrumentationBlockStates.UNKNOWN:
self.state = new_state
return self
else:
next_block = _InstrumentationBlock(state=new_state, prefix=self.prefix, previous_instrumentation_block=self)
if self.status_code in _Instrumentati... | Transitions or sets the current instrumentation block to the new
parser state.
Args:
new_state: _InstrumentationBlockStates, the state that the parser
should transition to.
Returns:
A new instrumentation block set to the new state, representing
the start of parsing a new instrumentation test method.
Alternatively, if... | github-repos |
def fn_args(fn):
if isinstance(fn, functools.partial):
args = fn_args(fn.func)
args = [a for a in args[len(fn.args):] if a not in (fn.keywords or [])]
else:
if _is_callable_object(fn):
fn = fn.__call__
args = tf_inspect.getfullargspec(fn).args
if _is_bound_met... | Get argument names for function-like object.
Args:
fn: Function, or function-like object (e.g., result of `functools.partial`).
Returns:
`tuple` of string argument names.
Raises:
ValueError: if partial function has positionally bound arguments | github-repos |
def get_item(self, name, bootstrap=False):
for item in self._get_items(bootstrap):
if item.name == name:
return item
return None | Get a particular item in the specification.
Args:
name (str): The name of the item to retrieve.
bootstrap (bool): Only search bootstrap items
Returns (YapconfItem):
A YapconfItem if it is found, None otherwise. | juraj-google-style |
def measurements(self, value):
if value == self._defaults['measurements'] and 'measurements' in self._values:
del self._values['measurements']
else:
self._values['measurements'] = value | The measurements property.
Args:
value (hash). the property value. | juraj-google-style |
def __init__(self, columns: list[str], vocab_size: Optional[int]=None, smooth: bool=True, name: Optional[str]=None):
super().__init__(columns)
self.vocab_size = vocab_size
self.smooth = smooth
self.name = name
self.tfidf_weight = None | This function applies a tf-idf transformation on the given columns
of incoming data.
TFIDF outputs two artifacts for each column: the vocabulary index and
the tfidf weight. The vocabulary index is a mapping from the original
vocabulary to the new vocabulary. The tfidf weight is a mapping
from the original vocabulary t... | github-repos |
def _get_implicit_credentials(cls):
environ_checkers = [cls._implicit_credentials_from_files, cls._implicit_credentials_from_gae, cls._implicit_credentials_from_gce]
for checker in environ_checkers:
credentials = checker()
if (credentials is not None):
return credentials
raise Ap... | Gets credentials implicitly from the environment.
Checks environment in order of precedence:
- Environment variable GOOGLE_APPLICATION_CREDENTIALS pointing to
a file with stored credentials information.
- Stored "well known" file associated with `gcloud` command line tool.
- Google App Engine (production and testing)
... | codesearchnet |
def build_pipeline_labels(job_metadata, task_metadata, task_id_pattern=None):
labels = {
Label(name, job_metadata[name])
for name in ['job-name', 'job-id', 'user-id', 'dsub-version']
}
task_id = task_metadata.get('task-id')
if task_id is not None:
if task_id_pattern:
task_id = task_i... | Build a set() of standard job and task labels.
Args:
job_metadata: Job metadata, such as job-id, job-name, and user-id.
task_metadata: Task metadata, such as the task-id.
task_id_pattern: A pattern for the task-id value, such as "task-%d"; the
original google label values could not be strictly numeric, so "task-"
was ... | juraj-google-style |
def _is_statically_shaped(element_spec):
for spec in nest.flatten(element_spec):
if isinstance(spec, (sparse_tensor.SparseTensorSpec, ragged_tensor.RaggedTensorSpec)):
if spec.shape.rank > 0 and spec.shape.as_list()[0] is None:
return False
else:
for component... | Test if an iterator output is statically shaped.
For sparse and ragged tensors this only tests the batch dimension.
Args:
element_spec: a nest structure of `tf.TypeSpec`. The element spec of the
dataset of the iterator.
Returns:
True if the shape is static, false otherwise. | github-repos |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.