code stringlengths 20 4.93k | docstring stringlengths 33 1.27k | source stringclasses 3
values |
|---|---|---|
def prefer_static_value(x):
static_x = tensor_util.constant_value(x)
if static_x is not None:
return static_x
return x | Return static value of tensor `x` if available, else `x`.
Args:
x: `Tensor` (already converted).
Returns:
Numpy array (if static value is obtainable), else `Tensor`. | github-repos |
def prod(self, vars_list: List[str]) -> 'TensorFluent':
operand = self
if (operand.dtype == tf.bool):
operand = operand.cast(tf.float32)
return self._aggregation_op(tf.reduce_prod, operand, vars_list) | Returns the TensorFluent for the prod aggregation function.
Args:
vars_list: The list of variables to be aggregated over.
Returns:
A TensorFluent wrapping the prod aggregation function. | codesearchnet |
def read_into(self, buffer, face, *, alignment=1, write_offset=0) -> None:
if type(buffer) is Buffer:
buffer = buffer.mglo
return self.mglo.read_into(buffer, face, alignment, write_offset) | Read a face from the cubemap texture.
Args:
buffer (bytearray): The buffer that will receive the pixels.
face (int): The face to read.
Keyword Args:
alignment (int): The byte alignment of the pixels.
write_offset (int): The write offset. | juraj-google-style |
def imcrop(img, bboxes, scale=1.0, pad_fill=None):
chn = 1 if img.ndim == 2 else img.shape[2]
if pad_fill is not None:
if isinstance(pad_fill, (int, float)):
pad_fill = [pad_fill for _ in range(chn)]
assert len(pad_fill) == chn
_bboxes = bboxes[None, ...] if bboxes.ndim == ... | Crop image patches.
3 steps: scale the bboxes -> clip bboxes -> crop and pad.
Args:
img (ndarray): Image to be cropped.
bboxes (ndarray): Shape (k, 4) or (4, ), location of cropped bboxes.
scale (float, optional): Scale ratio of bboxes, the default value
1.0 means no padding.
pad_fill (number or list): Value to be fi... | juraj-google-style |
def dict_strip(d):
_d = deepcopy(d)
for k, v in iteritems(d):
if isinstance(v, str):
_d[k] = v.strip()
elif isinstance(v, dict):
_d[k] = dict_strip(v)
return _d | Strips whitespace from the string values of the given dictionary (recursively).
Args:
d: A dictionary object.
Returns:
A new dictionary object, whose string values' whitespace has been stripped out. | juraj-google-style |
def lock(self, key, client):
self.key = key
self.client = client | Set the key that will be used to ensure messages come from one party
Args:
key (string): The key used to validate future messages
client (string): A string that will be returned to indicate who
locked this device. | juraj-google-style |
def instantiate_interface(virtual_iface, config, loop):
if (virtual_iface == 'null'):
return StandardDeviceServer(None, {}, loop=loop)
conf = {}
if ('interface' in config):
conf = config['interface']
try:
reg = ComponentRegistry()
if virtual_iface.endswith('.py'):
... | Find a virtual interface by name and instantiate it
Args:
virtual_iface (string): The name of the pkg_resources entry point corresponding to
the interface. It should be in group iotile.virtual_interface
config (dict): A dictionary with a 'interface' key with the config info for configuring
this virtual interface. Th... | codesearchnet |
def exportData(self, datfile):
def ampl_set(name, values):
def format_entry(e):
return repr(e).replace(' ', '')
return 'set {0} := {1};'.format(name, ','.join((format_entry(e) for e in values)))
def ampl_param(name, values):
def format_entry(k, v):
k = repr(k)... | Create a .dat file with the data that has been loaded.
Args:
datfile: Path to the file (Relative to the current working
directory or absolute). | codesearchnet |
def match_term(self, value, required=True, new_group=False):
if self.initialized:
if required:
self._and_join(new_group)
else:
self._or_join(new_group)
self._term(value)
return self | Add a fulltext search term to the query.
Warning:
Do not use this method with any other query-building helpers. This method
is only for building fulltext queries (in non-advanced mode). Using other
helpers, such as ``match_field()``, will cause the query to run in advanced mode.
If a fulltext term query is run in adva... | codesearchnet |
def validate(
self, nanopub: Mapping[str, Any]
) -> Tuple[bool, List[Tuple[str, str]]]:
(is_valid, messages) = validate_to_schema(nanopub, self.nanopub_schema)
if not is_valid:
return messages
if nanopub["nanopub"]["type"]["name"].upper() == "... | Validates using the nanopub schema
Args:
nanopub (Mapping[str, Any]): nanopub dict
Returns:
Tuple[bool, List[Tuple[str, str]]]:
bool: Is valid? Yes = True, No = False
List[Tuple[str, str]]: Validation issues, empty if valid, tuple is ('ERROR|WARNING', msg)
e.g. [('WARNING', "Context ID not found")] | juraj-google-style |
def _PrintAnalysisStatusUpdateWindow(self, processing_status):
if self._stdout_output_writer:
self._ClearScreen()
output_text = 'plaso - {0:s} version {1:s}\n\n'.format(
self._tool_name, plaso.__version__)
self._output_writer.Write(output_text)
self._PrintAnalysisStatusHeader(proces... | Prints an analysis status update in window mode.
Args:
processing_status (ProcessingStatus): processing status. | juraj-google-style |
def learn(self, grad_arr, fix_opt_flag=False):
if grad_arr.ndim > 3:
grad_arr = grad_arr.reshape((
grad_arr.shape[0],
grad_arr.shape[1],
-1
))
delta_arr, grads_list = self.__lstm_model.back_propagation(self.__pred_arr, gra... | Update this Discriminator by ascending its stochastic gradient.
Args:
grad_arr: `np.ndarray` of gradients.
fix_opt_flag: If `False`, no optimization in this model will be done.
Returns:
`np.ndarray` of delta or gradients. | juraj-google-style |
def available_writers(as_dict=False):
writers = []
for writer_configs in configs_for_writer():
try:
writer_info = read_writer_config(writer_configs)
except (KeyError, IOError, yaml.YAMLError):
LOG.warning('Could not import writer config from: %s', writer_configs)
... | Available writers based on current configuration.
Args:
as_dict (bool): Optionally return writer information as a dictionary.
Default: False
Returns: List of available writer names. If `as_dict` is `True` then
a list of dictionaries including additionally writer information
is returned. | codesearchnet |
def poke(exposes):
def _poke(store, objname, obj, container, visited=None, _stack=None):
try:
sub_container = store.newContainer(objname, obj, container)
except (SystemExit, KeyboardInterrupt):
raise
except:
raise ValueError('generic poke not supported by... | Default serializer factory.
Arguments:
exposes (iterable): attributes to serialized.
Returns:
callable: serializer (`poke` routine). | codesearchnet |
def createDomains(tlds, nicks=None, nicksFile=None):
domain_candidates = []
if (nicks != None):
for n in nicks:
for t in tlds:
tmp = {'domain': (n + t['tld']), 'type': t['type'], 'tld': t['tld']}
domain_candidates.append(tmp)
elif (nicksFile != None):
... | Method that globally permits to generate the domains to be checked.
Args:
-----
tlds: List of tlds.
nicks: List of aliases.
nicksFile: The filepath to the aliases file.
Returns:
--------
list: list of domains to be checked. | codesearchnet |
def RemoveTransaction(self, tx):
if (BC.Default() is None):
return False
if (not BC.Default().ContainsTransaction(tx.Hash)):
return False
if (tx.Hash.ToBytes() in self.MemPool):
del self.MemPool[tx.Hash.ToBytes()]
return True
return False | Remove a transaction from the memory pool if it is found on the blockchain.
Args:
tx (neo.Core.TX.Transaction): instance.
Returns:
bool: True if successfully removed. False otherwise. | codesearchnet |
def GetDateRange(self):
start = self.start_date
end = self.end_date
for (date, (exception_type, _)) in self.date_exceptions.items():
if (exception_type == self._EXCEPTION_TYPE_REMOVE):
continue
if ((not start) or (date < start)):
start = date
if ((not end) or ... | Return the range over which this ServicePeriod is valid.
The range includes exception dates that add service outside of
(start_date, end_date), but doesn't shrink the range if exception
dates take away service at the edges of the range.
Returns:
A tuple of "YYYYMMDD" strings, (start date, end date) or (None, None) if... | codesearchnet |
def group(self, group_type=None, owner=None, **kwargs):
group = None
if (not group_type):
return Group(self.tcex, None, None, owner=owner, **kwargs)
name = kwargs.pop('name', None)
group_type = group_type.upper()
if (group_type == 'ADVERSARY'):
group = Adversary(self.tcex, name, owne... | Create the Group TI object.
Args:
owner:
group_type:
**kwargs:
Return: | codesearchnet |
def init_feed_dict(self):
return self._init_feed_dict | Return the feed dictionary used when evaluating the `init_op`.
Returns:
A feed dictionary or `None`. | github-repos |
def rmse(y, p):
assert y.shape == p.shape
return np.sqrt(mse(y, p)) | Root Mean Squared Error (RMSE).
Args:
y (numpy.array): target
p (numpy.array): prediction
Returns:
e (numpy.float64): RMSE | juraj-google-style |
def get_tag_html(tag_id):
tag_data = get_lazy_tag_data(tag_id)
tag = tag_data['tag']
args = tag_data['args']
kwargs = tag_data['kwargs']
(lib, tag_name) = get_lib_and_tag_name(tag)
args_str = ''
if args:
for arg in args:
if isinstance(arg, six.string_types):
... | Returns the Django HTML to load the tag library and render the tag.
Args:
tag_id (str): The tag id for the to return the HTML for. | codesearchnet |
def __init__(self, pqc: tf.Tensor, qubits: List[cirq.GridQubit], symbol_names: tf.Tensor, value_layers_inputs: List[Union[tf.Variable, List[tf.Variable]]], value_layers: List[List[tf.keras.layers.Layer]], name: Union[None, str]=None):
super().__init__(name=name)
self._pqc = pqc
self._qubits = sorted(qubits)... | Initializes a QuantumCircuit.
Args:
pqc: TFQ string representation of a parameterized quantum circuit.
qubits: The qubits on which `pqc` acts.
symbol_names: Strings which are used to specify the order in which the
values in `self.symbol_values` should be placed inside of the circuit.
value_layers_inputs: Inputs to the... | github-repos |
def VerifyStructure(self, parser_mediator, lines):
if self._VERIFICATION_REGEX.match(lines):
return True
return False | Verifies whether content corresponds to a Zsh extended_history file.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
lines (str): one or more lines from the text file.
Returns:
bool: True if the line was successfully parsed. | juraj-google-style |
def __init__(self, _args):
super(TcExInit, self).__init__(_args)
self.base_url = (
'https:
).format(self.args.branch) | Initialize Class properties.
Args:
_args (namespace): The argparser args Namespace. | juraj-google-style |
def _CheckMacOSPaths(self, filename, artifact_definition, source, paths):
result = True
paths_with_private = []
paths_with_symbolic_link_to_private = []
for path in paths:
path_lower = path.lower()
path_segments = path_lower.split(source.separator)
if not path_segments:
... | Checks if the paths are valid MacOS paths.
Args:
filename (str): name of the artifacts definition file.
artifact_definition (ArtifactDefinition): artifact definition.
source (SourceType): source definition.
paths (list[str]): paths to validate.
Returns:
bool: True if the MacOS paths is valid. | juraj-google-style |
def num_samples(self, sr=None):
native_sr = self.sampling_rate
num_samples = units.seconds_to_sample(self.duration, native_sr)
if sr is not None:
ratio = float(sr) / native_sr
num_samples = int(np.ceil(num_samples * ratio))
return num_samples | Return the number of samples.
Args:
sr (int): Calculate the number of samples with the given
sampling-rate. If None use the native sampling-rate.
Returns:
int: Number of samples | juraj-google-style |
def np_doc_only(np_fun_name, np_fun=None):
np_fun_name, np_fun = _prepare_np_fun_name_and_fun(np_fun_name, np_fun)
def decorator(f):
f.__doc__ = _np_doc_helper(f, np_fun, np_fun_name=np_fun_name)
return f
return decorator | Attachs numpy docstring to a function.
This differs from np_doc in that it doesn't check for a match in signature.
Args:
np_fun_name: name for the np_fun symbol. At least one of np_fun or
np_fun_name shoud be set.
np_fun: (optional) the numpy function whose docstring will be used.
Returns:
A function decorator that ... | github-repos |
def expand(self, url):
expand_url = f'{self.api_url}v3/expand'
params = {
'shortUrl': url,
'access_token': self.api_key,
'format': 'txt',
}
response = self._get(expand_url, params=params)
if response.ok:
return response.tex... | Expand implementation for Bit.ly
Args:
url: the URL you want to shorten
Returns:
A string containing the expanded URL
Raises:
ExpandingErrorException: If the API Returns an error as response | juraj-google-style |
def simple_stack(self, opcode=None):
if opcode is not None:
return (frame_state.SimpleFrame(opcode),)
elif self.frame:
return (frame_state.SimpleFrame(self.frame.current_opcode),)
else:
return () | Get a stack of simple frames.
Args:
opcode: Optionally, an opcode to create a stack for.
Returns:
If an opcode is provided, a stack with a single frame at that opcode.
Otherwise, the VM's current stack converted to simple frames. | github-repos |
def json(cls, message):
if (type(message) is OrderedDict):
pprint(dict(message))
else:
pprint(message) | Print a nice JSON output
Args:
message: the message to print | codesearchnet |
def sketch_fasta(fasta_path, outdir):
genome_name = genome_name_from_fasta_path(fasta_path)
outpath = os.path.join(outdir, genome_name)
args = ['mash', 'sketch', '-o', outpath, fasta_path]
logging.info('Running Mash sketch with command: %s', ' '.join(args))
p = Popen(args)
p.wait()
sket... | Create a Mash sketch from an input fasta file
Args:
fasta_path (str): input fasta file path. Genome name in fasta filename
outdir (str): output directory path to write Mash sketch file to
Returns:
str: output Mash sketch file path | juraj-google-style |
def _Aff4Read(aff4_obj, offset, length):
length = (length or (_Aff4Size(aff4_obj) - offset))
aff4_obj.Seek(offset)
return aff4_obj.Read(length) | Reads contents of given AFF4 file.
Args:
aff4_obj: An AFF4 stream instance to retrieve contents for.
offset: An offset to start the reading from.
length: A number of bytes to read. Reads the whole file if 0.
Returns:
Contents of specified AFF4 stream.
Raises:
TypeError: If `aff4_obj` is not an instance of AFF4 strea... | codesearchnet |
def build_relative_position(query_layer, key_layer, bucket_size: int=-1, max_position: int=-1):
query_size = query_layer.size(-2)
key_size = key_layer.size(-2)
q_ids = torch.arange(query_size, dtype=torch.long, device=query_layer.device)
k_ids = torch.arange(key_size, dtype=torch.long, device=key_layer.... | Build relative position according to the query and key
We assume the absolute position of query \(P_q\) is range from (0, query_size) and the absolute position of key
\(P_k\) is range from (0, key_size), The relative positions from query to key is \(R_{q \rightarrow k} = P_q -
P_k\)
Args:
query_size (int): the length... | github-repos |
def open(self, mode='r', encoding=None):
access_type = self._get_access_type(mode)
if access_type == 't' and encoding is not None and encoding != self.encoded_with:
warnings.warn('Attempting to decode %s as "%s", but encoding is declared as "%s"'
% (self, ... | Return file-like object
Args:
mode (str): access mode (only reading modes are supported)
encoding (str): text decoding method for text access (default: system default)
Returns:
io.BytesIO OR io.TextIOWrapper: buffer accessing the file as bytes or characters | juraj-google-style |
def __init__(self, platform, device):
self._platform = platform
self._device = device
if (self._platform, self._device) not in _context_cache:
context = cl.Context([device])
_context_cache[(self._platform, self._device)] = context
self._context = _conte... | Storage unit for an OpenCL environment.
Args:
platform (pyopencl platform): An PyOpenCL platform.
device (pyopencl device): An PyOpenCL device | juraj-google-style |
def rt_is_equiv_dense(rt):
return math_ops.reduce_all([math_ops.equal(math_ops.reduce_variance(math_ops.cast(row_lens, backend.floatx())), constant_op.constant([0.0])) for row_lens in rt.nested_row_lengths()]) | Returns true if this RaggedTensor has the same row_lengths across
all ragged dimensions and thus can be converted to a dense tensor
without loss of information.
Args:
rt: RaggedTensor. | github-repos |
def execute_before(self, sensor_graph, scope_stack):
parent = scope_stack[-1]
alloc = parent.allocator
connect_stream = alloc.allocate_stream(DataStream.UnbufferedType, attach=True)
disconnect_stream = alloc.allocate_stream(DataStream.UnbufferedType,... | Execute statement before children are executed.
Args:
sensor_graph (SensorGraph): The sensor graph that we are building or
modifying
scope_stack (list(Scope)): A stack of nested scopes that may influence
how this statement allocates clocks or other stream resources. | juraj-google-style |
def simulate(self, action):
with tf.name_scope("environment/simulate"):
if action.dtype in (tf.float16, tf.float32, tf.float64):
action = tf.check_numerics(action, "action")
def step(action):
step_response = self._batch_env.step(action)
if len(step... | Step the batch of environments.
The results of the step can be accessed from the variables defined below.
Args:
action: Tensor holding the batch of actions to apply.
Returns:
Operation. | juraj-google-style |
def rewrite_filters_in_optional_blocks(ir_blocks):
new_ir_blocks = []
optional_context_depth = 0
for block in ir_blocks:
new_block = block
if isinstance(block, CoerceType):
raise AssertionError(u'Found a CoerceType block after all such blocks should have been lowered to Filter bl... | In optional contexts, add a check for null that allows non-existent optional data through.
Optional traversals in Gremlin represent missing optional data by setting the current vertex
to null until the exit from the optional scope. Therefore, filtering and type coercions
(which should have been lowered into filters by... | codesearchnet |
def find_slot(self, wanted, slots=None):
for slot in self.find_slots(wanted, slots):
return slot
return None | Searches the given slots or, if not given,
active hotbar slot, hotbar, inventory, open window in this order.
Args:
wanted: function(Slot) or Slot or itemID or (itemID, metadata)
Returns:
Optional[Slot]: The first slot containing the item
or None if not found. | codesearchnet |
def find_executable(executable):
logger = logging.getLogger(__name__)
logger.debug("Checking executable '%s'...", executable)
executable_path = _find_executable(executable)
found = (executable_path is not None)
if found:
logger.debug("Executable '%s' found: '%s'", executable, executable_path... | Finds executable in PATH
Returns:
string or None | codesearchnet |
def build_hpo_term(hpo_info):
try:
hpo_id = hpo_info['hpo_id']
except KeyError:
raise KeyError("Hpo terms has to have a hpo_id")
LOG.debug("Building hpo term %s", hpo_id)
try:
description = hpo_info['description']
except KeyError:
raise KeyError("Hpo ... | Build a hpo_term object
Check that the information is correct and add the correct hgnc ids to the
array of genes.
Args:
hpo_info(dict)
Returns:
hpo_obj(scout.models.HpoTerm): A dictionary with hpo information | juraj-google-style |
def __init__(self, usage=None, data=None):
super(TransactionAttribute, self).__init__()
self.Usage = usage
self.Data = data | Create an instance.
Args:
usage (neo.Core.TX.TransactionAttribute.TransactionAttributeUsage):
data (bytes): | juraj-google-style |
def ParseArguments(args):
try:
(opts, filenames) = getopt.getopt(args, '', ['help', 'output=', 'verbose=',
'counting=',
'filter=',
'root=',
... | Parses the command line arguments.
This may set the output format and verbosity level as side-effects.
Args:
args: The command line arguments:
Returns:
The list of filenames to lint. | juraj-google-style |
def make_timebar(progress=0, duration=0):
duration_string = api_music.duration_to_string(duration)
if duration <= 0:
return "---"
time_counts = int(round((progress / duration) * TIMEBAR_LENGTH))
if time_counts > TIMEBAR_LENGTH:
time_counts = TIMEBAR_LENGTH
if duration > 0:
... | Makes a new time bar string
Args:
progress: How far through the current song we are (in seconds)
duration: The duration of the current song (in seconds)
Returns:
timebar (str): The time bar string | juraj-google-style |
def ensure_mingw_drive(win32_path):
(win32_drive, _path) = splitdrive(win32_path)
mingw_drive = ('/' + win32_drive[:(- 1)].lower())
mingw_path = (mingw_drive + _path)
return mingw_path | r""" replaces windows drives with mingw style drives
Args:
win32_path (str):
CommandLine:
python -m utool.util_path --test-ensure_mingw_drive
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> win32_path = r'C:/Program Files/Foobar'
>>> result = ensure_mingw_drive(win32_path)
>>> print(resu... | codesearchnet |
def _refresh_grpc(operations_stub, operation_name):
request_pb = operations_pb2.GetOperationRequest(name=operation_name)
return operations_stub.GetOperation(request_pb) | Refresh an operation using a gRPC client.
Args:
operations_stub (google.longrunning.operations_pb2.OperationsStub):
The gRPC operations stub.
operation_name (str): The name of the operation.
Returns:
google.longrunning.operations_pb2.Operation: The operation. | juraj-google-style |
def list(self):
self._initialize_list()
interested = True
response = self._cloudFormation.list_stacks()
print('Stack(s):')
while interested:
if ('StackSummaries' in response):
for stack in response['StackSummaries']:
stack_status = stack['StackStatus']
... | List the existing stacks in the indicated region
Args:
None
Returns:
True if True
Todo:
Figure out what could go wrong and take steps
to hanlde problems. | codesearchnet |
def convert_tensorflow(nlp: Pipeline, opset: int, output: Path):
if not is_tf_available():
raise Exception('Cannot convert because TF is not installed. Please install tensorflow first.')
print("/!\\ Please note TensorFlow doesn't support exporting model > 2Gb /!\\")
try:
import tensorflow as... | Export a TensorFlow backed pipeline to ONNX Intermediate Representation (IR)
Args:
nlp: The pipeline to be exported
opset: The actual version of the ONNX operator set to use
output: Path where will be stored the generated ONNX model
Notes: TensorFlow cannot export model bigger than 2GB due to internal constraint from... | github-repos |
def parse_content_type(headers: MutableMapping) -> Tuple[Optional[str], str]:
content_type = headers.get("content-type")
if not content_type:
return None, "utf-8"
else:
type_, parameters = cgi.parse_header(content_type)
encoding = parameters.get("charset", "utf-8")
retur... | Find content-type and encoding of the response
Args:
headers: Response headers
Returns:
:py:class:`tuple` (content-type, encoding) | juraj-google-style |
def encode(self, input_ids: jnp.ndarray, attention_mask: Optional[jnp.ndarray]=None, position_ids: Optional[jnp.ndarray]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, train: bool=False, params: Optional[dict]=None, dropout_rng: PRNGKey=None):
... | Returns:
Example:
```python
>>> from transformers import AutoTokenizer, FlaxMBartForConditionalGeneration
>>> model = FlaxMBartForConditionalGeneration.from_pretrained("facebook/mbart-large-cc25")
>>> tokenizer = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25")
>>> text = "My friends are cool but they eat... | github-repos |
def decode(pieces, sequence_length, model_file=None, model_proto=None, reverse=False, name=None):
return _gen_sentencepiece_processor_op.sentencepiece_decode(pieces, sequence_length, model_file=model_file, model_proto=model_proto, reverse=reverse, name=name) | Decode pieces into postprocessed text.
Args:
pieces: A 2D int32 or string tensor [batch_size x max_length] of
encoded sequences.
sequence_length: A 1D int32 tensor [batch_size] representing the
length of pieces.
model_file: The sentencepiece model file path.
model_proto: The sentencepiece model serialized proto.
Eithe... | codesearchnet |
def send_message(channel_id, message):
channel = client.get_channel(channel_id)
if channel is None:
logger.info("{} is not a channel".format(channel_id))
return
data = datatools.get_data()
if not data["discord"]["servers"][channel.server.id][modulename]["activated"]:
... | Send a message to a channel
Args:
channel_id (str): The id of the channel to send the message to
message (str): The message to send to the channel | juraj-google-style |
def count_moves_in_game_range(self, game_begin, game_end):
rows = self.bt_table.read_rows(
ROWCOUNT_PREFIX.format(game_begin),
ROWCOUNT_PREFIX.format(game_end),
filter_=bigtable_row_filters.ColumnRangeFilter(
METADATA, MOVE_COUNT, MOVE_COUNT))
... | Count the total moves in a game range.
Args:
game_begin: integer, starting game
game_end: integer, ending game
Uses the `ct_` keyspace for rapid move summary. | juraj-google-style |
def format(self, compact: bool=False, verbose: bool=True, root_indent: int=0, **kwargs) -> str: | Formats this object into a string representation.
Args:
compact: If True, this object will be formatted into a single line.
verbose: If True, this object will be formatted with verbosity.
Subclasses should define `verbosity` on their own.
root_indent: The start indent level for this object if the output is a
multi-lin... | github-repos |
def scale(self, width: int, height: int) -> None:
lib.TCOD_image_scale(self.image_c, width, height)
(self.width, self.height) = (width, height) | Scale this Image to the new width and height.
Args:
width (int): The new width of the Image after scaling.
height (int): The new height of the Image after scaling. | codesearchnet |
def generate_name(self, name_format=DEFAULT_FILE_NAME_FORMAT):
if (len(self.segments) > 0):
return (self.segments[0].points[0].time.strftime(name_format) + '.gpx')
else:
return 'EmptyTrack' | Generates a name for the track
The name is generated based on the date of the first point of the
track, or in case it doesn't exist, "EmptyTrack"
Args:
name_format (str, optional): Name formar to give to the track, based on
its start time. Defaults to DEFAULT_FILE_NAME_FORMAT
Returns:
str | codesearchnet |
def _remove_curly_braces(text):
current_pos = 0
depth = 0
ret = ""
for match in re.finditer("[{}]", text):
if depth == 0:
ret += text[current_pos:match.start()]
depth += 1 if text[match.start()] == "{" else -1
current_pos = match.end()
if depth != 0:
pass
else:
ret += ... | Remove everything in curly braces.
Curly braces may be nested, so we keep track of depth.
Args:
text: a string
Returns:
a string | juraj-google-style |
def dump_migration_session_state(raw):
class BlockStyle(str):
pass
class SessionDumper(yaml.SafeDumper):
pass
def str_block_formatter(dumper, data):
return dumper.represent_scalar(u'tag:yaml.org,2002:str', data, style='|')
SessionDumper.add_representer(BlockStyle, str_block_fo... | Serialize a migration session state to yaml using nicer formatting
Args:
raw: object to serialize
Returns: string (of yaml)
Specifically, this forces the "output" member of state step dicts (e.g.
state[0]['output']) to use block formatting. For example, rather than this:
- migration: [app, migration_name]
output: "l... | codesearchnet |
def find_all(self, kw: YangIdentifier,
pref: YangIdentifier = None) -> List["Statement"]:
return [c for c in self.substatements
if c.keyword == kw and c.prefix == pref] | Return the list all substatements with the given keyword and prefix.
Args:
kw: Statement keyword (local part for extensions).
pref: Keyword prefix (``None`` for built-in statements). | juraj-google-style |
def _set_root(self, request):
if request.state_root:
root = request.state_root
else:
head = self._get_chain_head()
root = head.state_root_hash
try:
self._tree.set_merkle_root(root)
except KeyError as e:
LOGGER.debug('Unable to find root "%s" in database', e)
... | Sets the root of the merkle tree, returning any head id used.
Note:
This method will fail if `_tree` has not been set
Args:
request (object): The parsed protobuf request object
Returns:
str: the state root of the head block used to specify the root
Raises:
ResponseFailed: Failed to set the root if the merkle tree | codesearchnet |
def assert_proper_iterable(values):
unintentional_iterables = (tensor_lib.Tensor, sparse_tensor.SparseTensor, np.ndarray) + compat.bytes_or_text_types
if isinstance(values, unintentional_iterables):
raise TypeError('Expected argument "values" to be a "proper" iterable. Found: %s' % type(values))
if... | Static assert that values is a "proper" iterable.
`Ops` that expect iterables of `Tensor` can call this to validate input.
Useful since `Tensor`, `ndarray`, byte/text type are all iterables themselves.
Args:
values: Object to be checked.
Raises:
TypeError: If `values` is not iterable or is one of
`Tensor`, `Sparse... | github-repos |
def create_from_json(cls, json_data):
block = Block()
block_info = json_data["block_info"]
block.block_id = block_info["block_id"]
block.num_bins = block_info["num_bins"] if "num_bins" in block_info else None
block.property_type = block_info["property_type"] if "property... | Deserialize block json data into a Block object
Args:
json_data (dict): The json data for this block
Returns:
Block object | juraj-google-style |
def complete_acquaintance_strategy(qubit_order: Sequence[ops.Qid], acquaintance_size: int=0) -> circuits.Circuit:
if (acquaintance_size < 0):
raise ValueError('acquaintance_size must be non-negative.')
elif (acquaintance_size == 0):
return circuits.Circuit(device=UnconstrainedAcquaintanceDevice)... | Returns an acquaintance strategy capable of executing a gate corresponding
to any set of at most acquaintance_size qubits.
Args:
qubit_order: The qubits on which the strategy should be defined.
acquaintance_size: The maximum number of qubits to be acted on by
an operation.
Returns:
An circuit capable of implementing ... | codesearchnet |
def workflow_stages(self) -> List[WorkflowStage]:
workflow_stages = []
stages = DB.get_hash_value(self.key, 'workflow_stages')
for index in range(len(ast.literal_eval(stages))):
workflow_stages.append(WorkflowStage(self.id, index))
return workflow_stages | Return list of workflow stages.
Returns:
dict, resources of a specified pb | codesearchnet |
async def create_artifact(context, path, target_path, content_type, content_encoding, storage_type='s3', expires=None):
payload = {'storageType': storage_type, 'expires': (expires or get_expiration_arrow(context).isoformat()), 'contentType': content_type}
args = [get_task_id(context.claim_task), get_run_id(cont... | Create an artifact and upload it.
This should support s3 and azure out of the box; we'll need some tweaking
if we want to support redirect/error artifacts.
Args:
context (scriptworker.context.Context): the scriptworker context.
path (str): the path of the file to upload.
target_path (str):
content_type (str): Content... | codesearchnet |
def _get_batches_of_transformed_samples(self, index_array):
raise NotImplementedError | Gets a batch of transformed samples.
Args:
index_array: Array of sample indices to include in batch.
Returns:
A batch of transformed samples. | github-repos |
def send(msg_type, send_async=False, *args, **kwargs):
message = message_factory(msg_type, *args, **kwargs)
try:
if send_async:
message.send_async()
else:
message.send()
except MessageSendError as e:
err_exit('Unable to send message: ', e) | Constructs a message class and sends the message.
Defaults to sending synchronously. Set send_async=True to send
asynchronously.
Args:
:msg_type: (str) the type of message to send, i.e. 'Email'
:send_async: (bool) default is False, set True to send asynchronously.
:kwargs: (dict) keywords arguments that are required ... | codesearchnet |
def create_model(text_in, timesteps, phase):
with pt.defaults_scope(activation_fn=tf.nn.relu, l2loss=1e-05):
with tf.device('/cpu:0'):
embedded = text_in.embedding_lookup(CHARS, [EMBEDDING_SIZE])
lstm = embedded.cleave_sequence(timesteps).sequence_lstm(LOWER).sequence_lstm(UPPER)
... | Creates a 2 layer LSTM model with dropout.
Args:
text_in: The input text as ASCII ordinals in a Tensor.
timesteps: The number of timesteps in the sequence.
phase: Phase controls whether or not dropout is active. In training mode
we want to perform dropout, but in test we want to disable it.
Returns:
The logits. | codesearchnet |
def update(self, resource, id_or_uri):
return self._client.update(resource=resource, uri=id_or_uri) | Updates a registered Device Manager.
Args:
resource (dict): Object to update.
id_or_uri: Can be either the Device manager ID or URI.
Returns:
dict: The device manager resource. | codesearchnet |
def get_hostname(url):
if (url not in URLHelper.__cache):
URLHelper.__cache[url] = urlparse(url)
parts = URLHelper.__cache[url].netloc.split('.')
if (len(parts) == 1):
return parts[0]
else:
return '.'.join(parts[(- 2):(- 1)]) | Get the hostname of the given URL.
Args:
url (str): The URL to get the hostname from.
Returns:
str: The hostname | codesearchnet |
def __batch_update(self, train_events, test_events, n_epoch):
for epoch in range(n_epoch):
if n_epoch != 1:
np.random.shuffle(train_events)
for e in train_events:
self.rec.update(e, batch_train=... | Batch update called by the fitting method.
Args:
train_events (list of Event): Positive training events.
test_events (list of Event): Test events.
n_epoch (int): Number of epochs for the batch training. | juraj-google-style |
def similar(self, **kwargs):
path = self._get_id_path('similar')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response | Get the similar TV series for a specific TV series id.
Args:
page: (optional) Minimum value of 1. Expected value is an integer.
language: (optional) ISO 639-1 code.
append_to_response: (optional) Comma separated, any TV method.
Returns:
A dict respresentation of the JSON returned from the API. | codesearchnet |
def check_valid_cpc_status(method, uri, cpc):
status = cpc.properties.get('status', None)
if (status is None):
return
valid_statuses = ['active', 'service-required', 'degraded', 'exceptions']
if (status not in valid_statuses):
if uri.startswith(cpc.uri):
raise ConflictError(m... | Check that the CPC is in a valid status, as indicated by its 'status'
property.
If the Cpc object does not have a 'status' property set, this function does
nothing (in order to make the mock support easy to use).
Raises:
ConflictError with reason 1: The CPC itself has been targeted by the
operation.
ConflictError wit... | codesearchnet |
def _get_arg_parser(func, types, args_and_defaults, delimiter_chars):
_LOG.debug("Creating ArgumentParser for '%s'", func.__name__)
(description, arg_help) = _prepare_doc(
func, [x for (x, _) in args_and_defaults], delimiter_chars)
parser = argparse.ArgumentParser(description=description)
f... | Return an ArgumentParser for the given function. Arguments are defined
from the function arguments and their associated defaults.
Args:
func: function for which we want an ArgumentParser
types: types to which the command line arguments should be converted to
args_and_defaults: list of 2-tuples (arg_name, arg_default)
... | juraj-google-style |
def coarse_grain(G, ncg):
if ncg <= 1:
return G
G = numpy.asarray(G)
nbin, remainder = divmod(G.shape[-1], ncg)
if remainder != 0:
nbin += 1
return numpy.transpose([
numpy.sum(G[..., i:i+ncg], axis=-1) / G[..., i:i+ncg].shape[-1]
... | Coarse-grain last index of array ``G``.
Bin the last index of array ``G`` in bins of width ``ncg``, and
replace each bin by its average. Return the binned results.
Args:
G: Array to be coarse-grained.
ncg: Bin width for coarse-graining. | juraj-google-style |
def parse_flux_bounds(entry):
lower_bound = None
upper_bound = None
for parameter in entry.kinetic_law_reaction_parameters:
pid, name, value, units = parameter
if pid == 'UPPER_BOUND' or name == 'UPPER_BOUND':
upper_bound = value
elif pid == 'LOWER_BOUND' or name == ... | Return flux bounds for reaction entry.
Detect flux bounds that are specified using the non-standardized
kinetic law parameters which are used by many pre-FBC SBML models. The
flux bounds are returned as a pair of lower, upper bounds. The returned
bound is None if undefined.
Args:
entry: :class:`SBMLReactionEntry`. | juraj-google-style |
def _probe_characteristics_finished(self, result):
handle = result['context']['handle']
conn_id = result['context']['connection_id']
conndata = self._get_connection(handle, 'preparing')
if (conndata is None):
self._logger.info('Connection disconnected before probe_char... finished, conn_id=%d', ... | Callback when BLE adapter has finished probing services and characteristics for a device
Args:
result (dict): Result from the probe_characteristics command | codesearchnet |
def process_response(self, req, resp, resource):
if isinstance(resp.body, dict):
try:
resp.body = json.dumps(resp.body)
except(nameError):
resp.status = falcon.HTTP_500 | Post-processing of the response (after routing).
Args:
req: Request object.
resp: Response object.
resource: Resource object to which the request was
routed. May be None if no route was found
for the request. | juraj-google-style |
def parse_individual(sample):
ind_info = {}
if ('sample_id' not in sample):
raise PedigreeError("One sample is missing 'sample_id'")
sample_id = sample['sample_id']
if ('sex' not in sample):
raise PedigreeError(("Sample %s is missing 'sex'" % sample_id))
sex = sample['sex']
if (s... | Parse individual information
Args:
sample (dict)
Returns:
{
'individual_id': str,
'father': str,
'mother': str,
'display_name': str,
'sex': str,
'phenotype': str,
'bam_file': str,
'vcf2cytosure': str,
'analysis_type': str,
'capture_kits': list(str),
} | codesearchnet |
def get_or_generate_vocabulary(data_dir, tmp_dir, data_prefix, max_page_size_exp, approx_vocab_size=32768, strip=True):
num_pages_for_vocab_generation = (approx_vocab_size
vocab_file = vocab_filename(approx_vocab_size, strip)
def my_generator(data_prefix):
'Line generator for vocab.'
count... | Get or generate the vocabulary.
Args:
data_dir: a string
tmp_dir: a string
data_prefix: a string
max_page_size_exp: an integer
approx_vocab_size: an integer
strip: a boolean
Returns:
a TextEncoder | codesearchnet |
def _format_src_url(self, path, caller_system):
path = ('%s/%s' % (self._endpoint, self.relpath(path)))
if (caller_system is not self):
try:
path = ('%s?%s' % (path, self._storage_parameters['sas_token']))
except KeyError:
pass
return path | Ensure path is absolute and use the correct URL format for use with
cross Azure storage account copy function.
Args:
path (str): Path or URL.
caller_system (pycosio.storage.azure._AzureBaseSystem subclass):
System calling this method (Can be another Azure system).
Returns:
str: URL. | codesearchnet |
def HandleSimpleResponses(
self, timeout_ms=None, info_cb=DEFAULT_MESSAGE_CALLBACK):
return self._AcceptResponses(b'OKAY', info_cb, timeout_ms=timeout_ms) | Accepts normal responses from the device.
Args:
timeout_ms: Timeout in milliseconds to wait for each response.
info_cb: Optional callback for text sent from the bootloader.
Returns:
OKAY packet's message. | juraj-google-style |
def check_array_lengths(inputs, targets, weights=None):
def is_tensor_or_composite_tensor(x):
return tensor_util.is_tf_type(x) or is_composite_or_composite_value(x)
def set_of_lengths(x):
if x is None:
return {}
else:
return set([y.shape[0] for y in x if y is no... | Does user input validation for numpy arrays.
Args:
inputs: list of Numpy arrays of inputs.
targets: list of Numpy arrays of targets.
weights: list of Numpy arrays of sample weights.
Raises:
ValueError: in case of incorrectly formatted data. | github-repos |
def users(self, proc):
ret = {}
if (self.first_column in ['USER', 'UID']):
for row in self.data:
if (proc == row[self.command_name]):
if (row[self.first_column] not in ret):
ret[row[self.first_column]] = []
ret[row[self.first_column]].appen... | Searches for all users running a given command.
Returns:
dict: each username as a key to a list of PIDs (as strings) that
are running the given process.
``{}`` if neither ``USER`` nor ``UID`` is found or ``proc`` is not found.
.. note::
'proc' must match the entire command and arguments. | codesearchnet |
def check_channel(fcn):
def wrapper(*args, **kwargs):
if (not isinstance(args[1], ChannelResource)):
raise RuntimeError('resource must be an instance of intern.resource.boss.ChannelResource.')
if (not args[1].cutout_ready):
raise PartialChannelResourceError('ChannelResource ... | Decorator that ensures a valid channel passed in.
Args:
fcn (function): Function that has a ChannelResource as its second argument.
Returns:
(function): Wraps given function with one that checks for a valid channel. | codesearchnet |
def bessel_i0(x, name=None):
with ops.name_scope(name, 'bessel_i0', [x]):
return gen_special_math_ops.bessel_i0(x) | Computes the Bessel i0 function of `x` element-wise.
Modified Bessel function of order 0.
It is preferable to use the numerically stabler function `i0e(x)` instead.
>>> tf.math.special.bessel_i0([-1., -0.5, 0.5, 1.]).numpy()
array([1.26606588, 1.06348337, 1.06348337, 1.26606588], dtype=float32)
Args:
x: A `Tensor` ... | github-repos |
def differential(P, Q):
P, Q = Poly(P), Poly(Q)
if not chaospy.poly.is_decomposed(Q):
differential(chaospy.poly.decompose(Q)).sum(0)
if Q.shape:
return Poly([differential(P, q) for q in Q])
if Q.dim>P.dim:
P = chaospy.poly.setdim(P, Q.dim)
else:
Q = chaospy.po... | Polynomial differential operator.
Args:
P (Poly):
Polynomial to be differentiated.
Q (Poly):
Polynomial to differentiate by. Must be decomposed. If polynomial
array, the output is the Jacobian matrix. | juraj-google-style |
def _extract_namespace_ast_node(self, desc):
if ((len(desc) == 0) or (not isinstance(desc[0], AstNamespace))):
if self._debug:
self._logger.info('Description: %r', desc)
raise InvalidSpec('First declaration in a stone must be a namespace. Possibly caused by preceding errors.', desc[0].li... | Checks that the namespace is declared first in the spec, and that only
one namespace is declared.
Args:
desc (List[stone.stone.parser.ASTNode]): All AST nodes in a spec
file in the order they were defined.
Return:
stone.frontend.ast.AstNamespace: The namespace AST node. | codesearchnet |
def modify_lattice(self, new_lattice):
self._lattice = new_lattice
for site in self._sites:
site.lattice = new_lattice | Modify the lattice of the structure. Mainly used for changing the
basis.
Args:
new_lattice (Lattice): New lattice | codesearchnet |
def FindEnumTypeByName(self, full_name):
full_name = _NormalizeFullyQualifiedName(full_name)
if (full_name not in self._enum_descriptors):
self.FindFileContainingSymbol(full_name)
return self._enum_descriptors[full_name] | Loads the named enum descriptor from the pool.
Args:
full_name: The full name of the enum descriptor to load.
Returns:
The enum descriptor for the named type. | codesearchnet |
def run_board(args):
init_config(args)
from backend.collector import CollectorService
service = CollectorService(args.logdir, args.reload_interval, standalone=False, log_level=args.log_level)
service.run()
logger.info(('Try to start automlboard on port %s\n' % args.port))
command = [os.path.join... | Run main entry for AutoMLBoard.
Args:
args: args parsed from command line | codesearchnet |
async def game(self, short_name, *, id=None, text=None, parse_mode=(), link_preview=True, geo=None, period=60, contact=None, game=False, buttons=None):
result = types.InputBotInlineResultGame(id=(id or ''), short_name=short_name, send_message=(await self._message(text=text, parse_mode=parse_mode, link_preview=link_... | Creates a new inline result of game type.
Args:
short_name (`str`):
The short name of the game to use. | codesearchnet |
def save_array_types(self, fname):
type_defs = {'arrays': sorted(list(self.array_types))}
with open(fname, 'wt') as fh:
pprint(type_defs, stream=fh) | Save array type registry to a file
Args:
fname (str): Name of file to save array database to | juraj-google-style |
def encode_bqm_as_qp(solver, linear, quadratic):
active = active_qubits(linear, quadratic)
nan = float('nan')
lin = [uniform_get(linear, qubit, 0 if qubit in active else nan)
for qubit in solver._encoding_qubits]
lin = base64.b64encode(struct.pack('<' + ('d... | Encode the binary quadratic problem for submission to a given solver,
using the `qp` format for data.
Args:
solver (:class:`dwave.cloud.solver.Solver`):
The solver used.
linear (dict[variable, bias]/list[variable, bias]):
Linear terms of the model.
quadratic (dict[(variable, variable), bias]):
Quadratic terms of the... | juraj-google-style |
def unbroadcast_tfe_to(tensor, shape):
axis = utils.create_unbroadcast_axis(shape, shape_as_list(tensor))
return tf.reshape(tf.reduce_sum(tensor, axis=axis), shape) | Reverse the broadcasting operation.
See utils.py.
Args:
tensor: A Tensor.
shape: A shape that could have been broadcasted to the shape of tensor.
Returns:
Tensor with dimensions summed to match `shape`. | juraj-google-style |
def id_to_piece(input, model_file=None, model_proto=None, name=None):
return _gen_sentencepiece_processor_op.sentencepiece_id_to_piece(input, model_file=model_file, model_proto=model_proto, name=name) | Converts vocabulary id into piece.
Args:
input: An arbitrary tensor of int32.
model_file: The sentencepiece model file path.
model_proto: The sentencepiece model serialized proto.
Either `model_file` or `model_proto` must be set.
name: The name argument that is passed to the op function.
Returns:
A tensor of string wi... | codesearchnet |
def read(self, istream, kmip_version=enums.KMIPVersion.KMIP_1_0):
super(RevokeResponsePayload, self).read(
istream,
kmip_version=kmip_version
)
tstream = BytearrayStream(istream.read(self.length))
self.unique_identifier = attributes.UniqueIdentifier()
... | Read the data encoding the RevokeResponsePayload object and decode it
into its constituent parts.
Args:
istream (Stream): A data stream containing encoded object data,
supporting a read method; usually a BytearrayStream object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object w... | juraj-google-style |
def variant_export_lines(store, case_obj, variants_query):
export_variants = []
for variant in variants_query:
variant_line = []
position = variant['position']
change = variant['reference']+'>'+variant['alternative']
variant_line.append(variant['rank_score'])
varia... | Get variants info to be exported to file, one list (line) per variant.
Args:
store(scout.adapter.MongoAdapter)
case_obj(scout.models.Case)
variants_query: a list of variant objects, each one is a dictionary
Returns:
export_variants: a list of strings. Each string of the list corresponding to the fields
of a variant ... | juraj-google-style |
def save(f, arr, vocab):
itr = iter(vocab)
(word, idx) = next(itr)
_write_line(f, arr[idx], word)
for (word, idx) in itr:
f.write(b'\n')
_write_line(f, arr[idx], word) | Save word embedding file.
Args:
f (File): File to write the vectors. File should be open for writing
ascii.
arr (numpy.array): Numpy array with ``float`` dtype.
vocab (iterable): Each element is pair of a word (``bytes``) and ``arr``
index (``int``). Word should be encoded to str apriori. | codesearchnet |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.