code stringlengths 20 4.93k | docstring stringlengths 33 1.27k | source stringclasses 3
values |
|---|---|---|
def whois_emails(self, emails):
api_name = 'opendns-whois-emails'
fmt_url_path = u'whois/emails/{0}'
return self._multi_get(api_name, fmt_url_path, emails) | Calls WHOIS Email end point
Args:
emails: An enumerable of string Emails
Returns:
A dict of {email: domain_result} | juraj-google-style |
def __init__(self, grammar, latent_size, num_units):
super(ProbabilisticGrammar, self).__init__()
self.grammar = grammar
self.latent_size = latent_size
self.lstm = tf.compat.v1.nn.rnn_cell.LSTMCell(num_units)
self.output_layer = tf.keras.layers.Dense(len(grammar.production_rules)) | Constructs a probabilistic grammar.
Args:
grammar: An object representing a grammar. It has members
`nonterminal_symbols`, `alphabet`, `production_rules`, and
`start_symbol`, and a method `mask` determining (in)valid
production rules given a symbol.
latent_size: Number of dimensions in the latent code.
num_units: Numb... | juraj-google-style |
async def enqueue(self, query, queue_index=None, stop_current=False, shuffle=False):
if query is None or query == "":
return
self.statuslog.info("Parsing {}".format(query))
self.logger.debug("Enqueueing from query")
indexnum = None
if queue_index is not No... | Queues songs based on either a YouTube search or a link
Args:
query (str): Either a search term or a link
queue_index (str): The queue index to enqueue at (None for end)
stop_current (bool): Whether to stop the current song after the songs are queued
shuffle (bool): Whether to shuffle the added songs | juraj-google-style |
def get_associated_profiles(self):
uri = "{}/associatedProfiles".format(self.data['uri'])
return self._helper.do_get(uri) | Gets the URIs of profiles which are using an Ethernet network.
Args:
id_or_uri: Can be either the logical interconnect group id or the logical interconnect group uri
Returns:
list: URIs of the associated profiles. | juraj-google-style |
def gbest_idx(swarm):
best = 0
cmp = comparator(swarm[best].best_fitness)
for (idx, particle) in enumerate(swarm):
if cmp(particle.best_fitness, swarm[best].best_fitness):
best = idx
return best | gbest Neighbourhood topology function.
Args:
swarm: list: The list of particles.
Returns:
int: The index of the gbest particle. | juraj-google-style |
def _parse_query_key(self, key, val, is_escaped):
if key.endswith('__contains'):
key = key[:-10]
val = self._parse_query_modifier('contains', val, is_escaped)
elif key.endswith('__range'):
key = key[:-7]
val = self._parse_query_modifier('range', v... | Strips query modifier from key and call's the appropriate value modifier.
Args:
key (str): Query key
val: Query value
Returns:
Parsed query key and value. | juraj-google-style |
def set_device_policy(device_policy):
if device_policy == 'silent':
context.context().device_policy = context.DEVICE_PLACEMENT_SILENT
elif device_policy == 'silent_for_int32':
context.context().device_policy = context.DEVICE_PLACEMENT_SILENT_FOR_INT32
elif device_policy == 'warn':
co... | Sets the current thread device policy.
The device policy controls how operations requiring inputs on a specific
device (e.g., on GPU:0) handle inputs on a different device (e.g. GPU:1).
When using the default, an appropriate policy will be picked automatically.
The default policy may change over time.
This function ... | github-repos |
def WriteUInt160(self, value):
if (type(value) is UInt160):
value.Serialize(self)
else:
raise Exception('value must be UInt160 instance ') | Write a UInt160 type to the stream.
Args:
value (UInt160):
Raises:
Exception: when `value` is not of neocore.UInt160 type. | codesearchnet |
def CopyFrom(self, other_msg):
if self is other_msg:
return
self.Clear()
self.MergeFrom(other_msg) | Copies the content of the specified message into the current message.
The method clears the current message and then merges the specified
message using MergeFrom.
Args:
other_msg: Message to copy into the current one. | juraj-google-style |
def _update_section_state(line_info, state):
section_updated = False
google_section_permitted = _google_section_permitted(line_info, state)
google_section = google_section_permitted and _google_section(line_info)
if google_section:
state.section.format = Formats.GOOGLE
state.section.titl... | Uses line_info to determine the current section of the docstring.
Updates state and line_info.remaining.
Args:
line_info: Information about the current line.
state: The state of the parser. | github-repos |
def load_qrandom():
fname = 'datasets/qrandom.npy'
with pkg_resources.resource_stream(__name__, fname) as f:
return np.load(f) | Loads a set of 10000 random numbers generated by qrandom.
This dataset can be used when you want to do some limited tests with "true"
random data without an internet connection.
Returns:
int array
the dataset | codesearchnet |
def output_reference(self, name):
if (name not in self.output_names):
raise ValueError('Invalid output "{}"'.format(name))
return Reference(step_name=self.name_in_workflow, output_name=name) | Return a reference to the given output for use in an input
of a next Step.
For a Step named `echo` that has an output called `echoed`, the
reference `echo/echoed` is returned.
Args:
name (str): the name of the Step output
Raises:
ValueError: The name provided is not a valid output name for this
Step. | codesearchnet |
def __init__(self, graph_def, input_tensors, output_tensors, input_arrays_with_shape=None, output_arrays=None, experimental_debug_info_func=None):
super(TFLiteConverter, self).__init__(graph_def, input_tensors, output_tensors, input_arrays_with_shape, output_arrays, experimental_debug_info_func) | Constructor for TFLiteConverter.
Args:
graph_def: Frozen TensorFlow GraphDef.
input_tensors: List of input tensors. Type and shape are computed using
`foo.shape` and `foo.dtype`.
output_tensors: List of output tensors (only .name is used from this).
input_arrays_with_shape: Tuple of strings representing input tensor n... | github-repos |
def coder_benchmark_factory(coder, generate_fn):
class CoderBenchmark(object):
def __init__(self, num_elements_per_benchmark):
self._coder = coders.IterableCoder(coder)
self._list = [generate_fn() for _ in range(num_elements_per_benchmark)]
def __call__(self):
... | Creates a benchmark that encodes and decodes a list of elements.
Args:
coder: coder to use to encode an element.
generate_fn: a callable that generates an element. | github-repos |
def _pack(formatstring, value):
_checkString(formatstring, description='formatstring', minlength=1)
try:
result = struct.pack(formatstring, value)
except:
errortext = 'The value to send is probably out of range, as the num-to-bytestring conversion failed.'
errortext += ' Value:... | Pack a value into a bytestring.
Uses the built-in :mod:`struct` Python module.
Args:
* formatstring (str): String for the packing. See the :mod:`struct` module for details.
* value (depends on formatstring): The value to be packed
Returns:
A bytestring (str).
Raises:
ValueError
Note that the :mod:`struct` module p... | juraj-google-style |
def __init__(self, xid=None, command=None, flags=None, meter_id=None,
bands=None):
super().__init__(xid)
self.command = command
self.flags = flags
self.meter_id = meter_id
self.bands = bands | Create a MeterMod with the optional parameters below.
Args:
xid (int): Headers transaction id. Defaults to random.
command (MeterModCommand): One of OFPMC_*.
flags (MeterFlags): One of OFPMF_*.
meter_id (int): Meter instance.
bands (MeterBandHeader): The bands length is inferred from the
length field in the header. | juraj-google-style |
def _einsum_v1_parse_and_resolve_equation(equation, input_shapes):
equation = equation.replace(' ', '')
match = re.match('^([a-zA-Z,.]+)(->[a-zA-Z.]*)?$', equation)
if not match:
raise ValueError(f'Indices have incorrect format. Received: {equation}.')
input_axis_labels = match.group(1).split(',... | Helper for einsum() that splits/resolves inputs & outputs.
Args:
equation: Equation string given as argument to einsum().
input_shapes: List of the shapes of all inputs given to einsum()
Returns:
input_axis_labels, output_axis_labels where:
input_axis_labels: List of length len(input_shapes) of strings
representing t... | github-repos |
def update(self, rid, data, raise_on_error=True):
cache_data = {'cache-date': self._dt_to_epoch(datetime.now()), 'cache-data': data}
return self.ds.put(rid, cache_data, raise_on_error) | Write updated cache data to the DataStore.
Args:
rid (str): The record identifier.
data (dict): The record data.
raise_on_error (bool): If True and not r.ok this method will raise a RunTimeError.
Returns:
object : Python request response. | juraj-google-style |
def ExtractEvents(
self, parser_mediator, registry_key, codepage='cp1252', **kwargs):
self._ParseMRUListKey(parser_mediator, registry_key, codepage=codepage) | Extracts events from a Windows Registry key.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
codepage (Optional[str]): extended ASCII string codepage. | juraj-google-style |
def expected_h(nvals, fit="RANSAC"):
rsvals = [expected_rs(n) for n in nvals]
poly = poly_fit(np.log(nvals), np.log(rsvals), 1, fit=fit)
return poly[0] | Uses expected_rs to calculate the expected value for the Hurst exponent h
based on the values of n used for the calculation.
Args:
nvals (iterable of int):
the values of n used to calculate the individual (R/S)_n
KWargs:
fit (str):
the fitting method to use for the line fit, either 'poly' for normal
least squares pol... | juraj-google-style |
def _ParsePlistKeyValue(self, knowledge_base, name, value):
if not knowledge_base.GetValue('keyboard_layout'):
if name in self._PLIST_KEYS:
if isinstance(value, (list, tuple)):
value = value[0]
_, _, keyboard_layout = value.rpartition('.')
knowledge_base.SetValue('keyb... | Parses a plist key value.
Args:
knowledge_base (KnowledgeBase): to fill with preprocessing information.
name (str): name of the plist key.
value (str): value of the plist key. | juraj-google-style |
def _log_device_compatibility_check(policy_name, gpu_details_list):
if policy_name != 'mixed_float16':
return
supported_device_strs = []
unsupported_device_strs = []
for details in gpu_details_list:
name = details.get('device_name', 'Unknown GPU')
cc = details.get('compute_capabi... | Logs a compatibility check if the devices support the policy.
Currently only logs for the policy mixed_float16.
Args:
policy_name: The name of the dtype policy.
gpu_details_list: A list of dicts, one dict per GPU. Each dict
is the device details for a GPU, as returned by
`tf.config.experimental.get_device_details()`. | github-repos |
def load(self, validate=True):
self._load()
try:
self.config = self._load_config(self.system_config_file)
user = self._load_config(self.global_config_file)
config = self._load_config(self.config_file)
local = self._load_config(self.config_local_file)
for conf in [user, co... | Loads config from all the config files.
Args:
validate (bool): optional flag to tell dvc if it should validate
the config or just load it as is. 'True' by default.
Raises:
dvc.config.ConfigError: thrown if config has invalid format. | codesearchnet |
def __init__(self, url, access_token, index,
source="parsedmarc", verify=True, timeout=60):
url = urlparse(url)
self.url = "{0}:
url.netloc)
self.access_token = access_token.lstrip("Splunk ")
sel... | Initializes the HECClient
Args:
url (str): The URL of the HEC
access_token (str): The HEC access token
index (str): The name of the index
source (str): The source name
verify (bool): Verify SSL certificates
timeout (float): Number of seconds to wait for the server to send
data before giving up | juraj-google-style |
def comment(data, what):
data = data.splitlines()
data = map((lambda x: (('
return '\n'.join(data) | Comments line containing `what` in string `data`.
Args:
data (str): Configuration file in string.
what (str): Line which will be commented out.
Returns:
str: Configuration file with commented `what`. | codesearchnet |
def expect_false(condition, msg, extras=None):
try:
asserts.assert_false(condition, msg, extras)
except signals.TestSignal as e:
logging.exception('Expected a `False` value, got `True`.')
recorder.add_error(e) | Expects an expression evaluates to False.
If the expectation is not met, the test is marked as fail after its
execution finishes.
Args:
expr: The expression that is evaluated.
msg: A string explaining the details in case of failure.
extras: An optional field for extra information to be included in test
result. | github-repos |
def markdown_to_text(body):
md = markdown.markdown(body, extensions=[
'markdown.extensions.extra'
])
soup = BeautifulSoup(md, 'html.parser')
return soup.get_text() | Converts markdown to text.
Args:
body: markdown (or plaintext, or maybe HTML) input
Returns:
Plaintext with all tags and frills removed | juraj-google-style |
def __init__(self, event_timestamp, duration=5):
super(TimeSlice, self).__init__()
self.duration = duration
self.event_timestamp = event_timestamp | Initializes the time slice.
Args:
event_timestamp (int): event timestamp of the time slice or None.
duration (Optional[int]): duration of the time slice in minutes.
The default is 5, which represent 2.5 minutes before and 2.5 minutes
after the event timestamp. | juraj-google-style |
def attention_mask_ignore_padding(inputs, dtype=tf.float32):
inputs = rename_length_to_memory_length(inputs)
return (mtf.cast(mtf.equal(inputs, 0), dtype) * (- 1000000000.0)) | Bias for encoder-decoder attention.
Args:
inputs: a mtf.Tensor with shape [..., length_dim]
dtype: a tf.dtype
Returns:
a mtf.Tensor with shape [..., memory_length_dim] | codesearchnet |
def clean(self, force: bool=False):
with (yield from self._lock):
for connection in tuple(self.ready):
if (force or connection.closed()):
connection.close()
self.ready.remove(connection) | Clean closed connections.
Args:
force: Clean connected and idle connections too.
Coroutine. | codesearchnet |
def protocol_version_to_kmip_version(value):
if not isinstance(value, ProtocolVersion):
return None
if value.major == 1:
if value.minor == 0:
return enums.KMIPVersion.KMIP_1_0
elif value.minor == 1:
return enums.KMIPVersion.KMIP_1_1
elif value.minor ... | Convert a ProtocolVersion struct to its KMIPVersion enumeration equivalent.
Args:
value (ProtocolVersion): A ProtocolVersion struct to be converted into
a KMIPVersion enumeration.
Returns:
KMIPVersion: The enumeration equivalent of the struct. If the struct
cannot be converted to a valid enumeration, None is returned... | juraj-google-style |
def get(name):
for matcher in matchers:
if ((matcher.__name__ == name) or (getattr(matcher, 'name', None) == name)):
return matcher | Returns a matcher instance by class or alias name.
Arguments:
name (str): matcher class name or alias.
Returns:
matcher: found matcher instance, otherwise ``None``. | codesearchnet |
def key_prefix(self) -> str:
return self.__class__.__qualname__ | Prefix for key to avoid collisions from different Processors.
Defaults to classname. Processor() should override this if, for example, it
accepts arguments that change output of __call__.
Returns:
Prefix that will be added to key. | github-repos |
def NewFromJSON(data):
if data.get('shakes', None):
shakes = [Shake.NewFromJSON(shk) for shk in data.get('shakes')]
else:
shakes = None
return User(id=data.get('id', None), name=data.get('name', None), profile_image_url=data.get('profile_image_url', None), about=data.get('about', None), webs... | Create a new User instance from a JSON dict.
Args:
data (dict): JSON dictionary representing a user.
Returns:
A User instance. | codesearchnet |
def apply(self, func, **kwargs):
oid = self.oid
self.call_queue.append((func, kwargs))
def call_queue_closure(oid_obj, call_queues):
for func, kwargs in call_queues:
if isinstance(func, ray.ObjectID):
func = ray.get(func)
... | Apply a function to the object stored in this partition.
Note: It does not matter if func is callable or an ObjectID. Ray will
handle it correctly either way. The keyword arguments are sent as a
dictionary.
Args:
func: The function to apply.
Returns:
A RayRemotePartition object. | juraj-google-style |
def _GetStatus(self, two_factor=False):
params = ['status']
if two_factor:
params += ['--twofactor']
retcode = self._RunOsLoginControl(params)
if retcode is None:
if self.oslogin_installed:
self.logger.warning('OS Login not installed.')
self.oslogin_installed = False
... | Check whether OS Login is installed.
Args:
two_factor: bool, True if two factor should be enabled.
Returns:
bool, True if OS Login is installed. | juraj-google-style |
def from_representation(self, representation):
object_dict = {}
failed = {}
for (name, field) in self.fields.items():
if (name not in representation):
continue
try:
if ((not isinstance(representation[name], (list, tuple))) and field.many):
raise ValueE... | Convert given representation dict into internal object.
Internal object is simply a dictionary of values with respect to field
sources.
This does not check if all required fields exist or values are
valid in terms of value validation
(see: :meth:`BaseField.validate()`) but still requires all of passed
representation ... | codesearchnet |
def _average_precision(self, rec, prec):
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], prec, [0.]))
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
i = np.where(mrec[1:] != mrec... | calculate average precision
Params:
----------
rec : numpy.array
cumulated recall
prec : numpy.array
cumulated precision
Returns:
----------
ap as float | juraj-google-style |
def old_tracer_correlation( self ):
if self.has_run:
return self.atoms.sum_dr_squared() / float( self.number_of_jumps )
else:
return None | Deprecated tracer correlation factor for this simulation.
Args:
None
Returns:
(Float): The tracer correlation factor, f.
Notes:
This function assumes that the jump distance between sites has
been normalised to a=1. If the jump distance is not equal to 1
then the value returned by this function should be divided by a... | juraj-google-style |
def listen_now_items(self):
response = self._call(mc_calls.ListenNowGetListenNowItems)
listen_now_item_list = response.body.get('listennow_items', [])
listen_now_items = defaultdict(list)
for item in listen_now_item_list:
type_ = f"{ListenNowItemType(item['type']).name}s"
listen_now_item... | Get a listing of Listen Now items.
Note:
This does not include situations;
use the :meth:`situations` method instead.
Returns:
dict: With ``albums`` and ``stations`` keys of listen now items. | codesearchnet |
def get_instance_type_parameter(self, name: str, node: 'cfg.CFGNode | None'=None):
del name
if node is None:
node = self.ctx.root_node
return self.ctx.new_unsolvable(node) | Get a cfg.Variable of the instance's values for the type parameter.
Treating self as an abstract.Instance, gets the variable of its values for
the given type parameter. For the real implementation, see
SimpleValue.get_instance_type_parameter.
Args:
name: The name of the type parameter.
node: Optionally, the current C... | github-repos |
def concept(self, mechanism, purviews=False, cause_purviews=False, effect_purviews=False):
log.debug('Computing concept %s...', mechanism)
if (not mechanism):
log.debug('Empty concept; returning null concept')
return self.null_concept
cause = self.mic(mechanism, purviews=(cause_purviews or p... | Return the concept specified by a mechanism within this subsytem.
Args:
mechanism (tuple[int]): The candidate set of nodes.
Keyword Args:
purviews (tuple[tuple[int]]): Restrict the possible purviews to
those in this list.
cause_purviews (tuple[tuple[int]]): Restrict the possible cause
purviews to those in this list. ... | codesearchnet |
def longest_one_seg_prefix(self, word):
match = self.seg_regex.match(word)
if match:
return match.group(0)
else:
return '' | Return longest IPA Unicode prefix of `word`
Args:
word (unicode): word as IPA string
Returns:
unicode: longest single-segment prefix of `word` | codesearchnet |
def initialize_from_assignments(assignments, k, max_assign_weight=0.75):
cells = len(assignments)
init_W = np.zeros((k, cells))
for i, a in enumerate(assignments):
init_W[a, i] = max_assign_weight
for a2 in range(k):
if a2!=a:
init_W[a2, i] ... | Creates a weight initialization matrix from Poisson clustering assignments.
Args:
assignments (array): 1D array of integers, of length cells
k (int): number of states/clusters
max_assign_weight (float, optional): between 0 and 1 - how much weight to assign to the highest cluster. Default: 0.75
Returns:
init_W (array)... | juraj-google-style |
def cmd2(command, shell=False, detatch=False, verbose=False, verbout=None):
import shlex
if isinstance(command, (list, tuple)):
raise ValueError('command tuple not supported yet')
args = shlex.split(command, posix=not WIN32)
if verbose is True:
verbose = 2
if verbout is None:
... | Trying to clean up cmd
Args:
command (str): string command
shell (bool): if True, process is run in shell
detatch (bool): if True, process is run in background
verbose (int): verbosity mode
verbout (bool): if True, `command` writes to stdout in realtime.
defaults to True iff verbose > 0
Returns:
dict: info - informat... | juraj-google-style |
def expand_value_set_definition(self, value_set: value_set_pb2.ValueSet) -> value_set_pb2.ValueSet:
base_url, request_url = _expansion_request_url_for_value_set_url(value_set.url.value)
request_json = json_format.print_fhir_to_json_string(value_set).encode('utf-8')
session_ = self.create_session()
sessi... | Expands the value set definition using a terminology server.
Requests an expansion of the given value set from the appropriate
terminology server. Attempts to expand arbitrary value sets by passing their
entire definition to the terminology service for expansion.
If possible, requests expansion from the domain associ... | github-repos |
def get(cls, resource_id):
res = Resource.get(resource_id)
return cls(res) if res else None | Returns the class object identified by `resource_id`
Args:
resource_id (str): Unique EC2 Instance ID to load from database
Returns:
EC2 Instance object if found, else None | juraj-google-style |
def plot_heatmap(data, title='Heatmap', show_legend=True, show_labels=True, label_fmt='.2f', vmin=None, vmax=None, figsize=None, label_color='w', cmap='RdBu', **kwargs):
(fig, ax) = plt.subplots(figsize=figsize)
heatmap = ax.pcolor(data, vmin=vmin, vmax=vmax, cmap=cmap)
ax.invert_yaxis()
if (title is no... | Plot a heatmap using matplotlib's pcolor.
Args:
* data (DataFrame): DataFrame to plot. Usually small matrix (ex.
correlation matrix).
* title (string): Plot title
* show_legend (bool): Show color legend
* show_labels (bool): Show value labels
* label_fmt (str): Label format string
* vmin (float): Min value for scale
*... | codesearchnet |
def render_policy_template(account_number='', app='coreforrest', env='dev', group='forrest', items=None, pipeline_settings=None, region='us-east-1', service=''):
statements = []
rendered_service_policy = get_template('infrastructure/iam/{0}.json.j2'.format(service), account_number=account_number, app=app, env=e... | Render IAM Policy template.
To support multiple statement blocks, JSON objects can be separated by a
comma. This function attempts to turn any invalid JSON into a valid list
based on this comma separated assumption.
Args:
account_number (str): AWS Account number.
app (str): Name of Spinnaker Application.
env (str): E... | codesearchnet |
def parse(self, argument):
if isinstance(argument, list):
return argument
elif (not argument):
return []
else:
if self._comma_compat:
argument = argument.replace(',', ' ')
return argument.split() | Parses argument as whitespace-separated list of strings.
It also parses argument as comma-separated list of strings if requested.
Args:
argument: string argument passed in the commandline.
Returns:
[str], the parsed flag value. | codesearchnet |
def must_run_on_cpu(node, pin_variables_on_cpu=False):
if isinstance(node, ops.Operation):
node_def = node.node_def
else:
assert isinstance(node, node_def_pb2.NodeDef)
node_def = node
if pin_variables_on_cpu and _is_variable_op(node_def.op):
return True
if node_def.op == ... | Returns True if the given node_def must run on CPU, otherwise False.
Args:
node: The node to be assigned to a device. Could be either an ops.Operation
or NodeDef.
pin_variables_on_cpu: If True, this function will return False if node_def
represents a variable-related op.
Returns:
True if the given node must run on CP... | github-repos |
def master(self, task_type=None, task_id=None, rpc_layer=None):
if task_type is not None and task_id is not None:
master = self.cluster_spec().task_address(task_type, task_id)
return format_master_url(master, rpc_layer or self._rpc_layer)
return self._cluster_resolvers[0].master(rpc_layer=rpc_la... | Returns the master address to use when creating a session.
This usually returns the master from the first ClusterResolver passed in,
but you can override this by specifying the task_type and task_id.
Note: this is only useful for TensorFlow 1.x.
Args:
task_type: (Optional) The type of the TensorFlow task of the mast... | github-repos |
def clean_all(G, settings):
quiet = settings['quiet']
recon = settings['recon']
sprint = settings['sprint']
error = settings['error']
all_outputs = []
for node in G.nodes(data=True):
if ('output' in node[1]):
for item in get_all_outputs(node[1]):
all_outputs.a... | Removes all the output files from all targets. Takes
the graph as the only argument
Args:
The networkx graph object
The settings dictionary
Returns:
0 if successful
1 if removing even one file failed | codesearchnet |
def language_from_str(language_def, metamodel):
if type(language_def) is not text:
raise TextXError("textX accepts only unicode strings.")
if metamodel.debug:
metamodel.dprint("*** PARSING LANGUAGE DEFINITION ***")
if metamodel.debug in textX_parsers:
parser = textX_pars... | Constructs parser and initializes metamodel from language description
given in textX language.
Args:
language_def (str): A language description in textX.
metamodel (TextXMetaModel): A metamodel to initialize.
Returns:
Parser for the new language. | juraj-google-style |
def __add__(self, r):
if not isinstance(r, TestResult):
raise TypeError('Operand %s of type %s is not a TestResult.' % (r, type(r)))
sum_result = TestResult()
for name in sum_result.__dict__:
r_value = getattr(r, name)
l_value = getattr(self, name)
if isinstance(r_value, list... | Overrides '+' operator for TestResult class.
The add operator merges two TestResult objects by concatenating all of
their lists together.
Args:
r: another instance of TestResult to be added
Returns:
A TestResult instance that's the sum of two TestResult instances. | github-repos |
def panel(self, panel_id):
if not isinstance(panel_id, ObjectId):
panel_id = ObjectId(panel_id)
panel_obj = self.panel_collection.find_one({'_id': panel_id})
return panel_obj | Fetch a gene panel by '_id'.
Args:
panel_id (str, ObjectId): str or ObjectId of document ObjectId
Returns:
dict: panel object or `None` if panel not found | juraj-google-style |
def get_labels(self, **query_params):
labels = self.get_labels_json(self.base_uri, query_params=query_params)
labels_list = []
for label_json in labels:
labels_list.append(self.create_label(label_json))
return labels_list | Get the labels attached to this board. Returns a label of Label
objects.
Returns:
list(Label): The labels attached to this board | codesearchnet |
def CreateExtensionSetting(client, feed_items, campaign_feed, feed_item_ids,
platform_restrictions=None):
campaign_extension_setting_service = client.GetService(
'CampaignExtensionSettingService', 'v201809')
extension_feed_items = [{
CreateSitelinkFeedItem(feed_items, feed... | Creates the extension setting for a list of Feed Items.
Args:
client: an AdWordsClient instance.
feed_items: the list of all Feed Items.
campaign_feed: the original Campaign Feed.
feed_item_ids: the Ids of the feed items for which extension settings should
be created.
platform_restrictions: an optional Platform Restri... | juraj-google-style |
def get(self, center, target, date):
if ((center.index, target.index) in self.segments):
(pos, vel) = self.segments[(center.index, target.index)].compute_and_differentiate(date.jd)
sign = 1
else:
(pos, vel) = self.segments[(target.index, center.index)].compute_and_differentiate(date.jd)
... | Retrieve the position and velocity of a target with respect to a center
Args:
center (Target):
target (Target):
date (Date):
Return:
numpy.array: length-6 array position and velocity (in m and m/s) of the
target, with respect to the center | codesearchnet |
def variable_summaries(vars_, groups=None, scope='weights'):
groups = groups or {r'all': r'.*'}
grouped = collections.defaultdict(list)
for var in vars_:
for name, pattern in groups.items():
if re.match(pattern, var.name):
name = re.sub(pattern, name, var.name)
grouped[name].append(va... | Create histogram summaries for the provided variables.
Summaries can be grouped via regexes matching variables names.
Args:
vars_: List of variables to summarize.
groups: Mapping of name to regex for grouping summaries.
scope: Name scope for this operation.
Returns:
Summary tensor. | juraj-google-style |
def _gen(self, optimized, splitstring):
self.resolved = {}
for nt in self.grammar.grammar_nonterminals_map:
for i in self.grammar.grammar_nonterminals_map[nt]:
if self.grammar.grammar_rules[i][0] not in self.resolved\
... | Generates a new random object generated from the nonterminal
Args:
optimized (bool): mode of operation - if enabled not all
CNF rules are included (mitigate O(n^3))
splitstring (bool): A boolean for enabling or disabling
Returns:
str: The generated string | juraj-google-style |
def _case_helper(cond_fn, pred_fn_pairs, default, exclusive, name, allow_python_preds=False, **cond_kwargs):
predicates, actions = _case_verify_and_canonicalize_args(pred_fn_pairs, exclusive, name, allow_python_preds)
with ops.name_scope(name, 'case', [predicates]):
if default is None:
defau... | Implementation of case that allows for different cond functions.
Args:
cond_fn: method that has signature and semantics of `cond` above.
pred_fn_pairs: Dict or list of pairs of a boolean scalar tensor, and a
callable which returns a list of tensors.
default: Optional callable that returns a list of tensors.
exclusive:... | github-repos |
def __init__(self, package, ad):
super(SnippetClient, self).__init__(app_name=package, ad=ad)
self.package = package
self._ad = ad
self._adb = ad.adb
self._proc = None | Initializes a SnippetClient.
Args:
package: (str) The package name of the apk where the snippets are
defined.
ad: (AndroidDevice) the device object associated with this client. | juraj-google-style |
def BreachDepressions(dem, in_place=False, topology='D8'):
if (type(dem) is not rdarray):
raise Exception('A richdem.rdarray or numpy.ndarray is required!')
if (topology not in ['D8', 'D4']):
raise Exception('Unknown topology!')
if (not in_place):
dem = dem.copy()
_AddAnalysis(de... | Breaches all depressions in a DEM.
Args:
dem (rdarray): An elevation model
in_place (bool): If True, the DEM is modified in place and there is
no return; otherwise, a new, altered DEM is returned.
topology (string): A topology indicator
Returns:
DEM without depressions. | codesearchnet |
def Parse(self, value):
value_line = value.split(' ')
if len(value_line) < 3:
raise TextFSMTemplateError('Expect at least 3 tokens on line.')
if not value_line[2].startswith('('):
options = value_line[1]
for option in options.split(','):
self._AddOption(option)
... | Parse a 'Value' declaration.
Args:
value: String line from a template file, must begin with 'Value '.
Raises:
TextFSMTemplateError: Value declaration contains an error. | juraj-google-style |
def allreduce(self, x, mesh_axes, reduction_fn_string):
if not mesh_axes:
return x
x = x.to_laid_out_tensor()
if reduction_fn_string == "SUM":
group_assignment = self._create_group_assignment(mesh_axes)
group_size = len(group_assignment[0])
tf_in = x.one_slice
dtype = tf_i... | Grouped allreduce, (summed across the given dimensions).
Args:
x: a LaidOutTensor
mesh_axes: a list of integers
reduction_fn_string: "SUM"
Returns:
a LaidOutTensor
Raises:
ValueError: if the reduction is not yet implemented. | juraj-google-style |
def do_ams_patch(endpoint, path, body, access_token):
headers = {"Content-Type": json_acceptformat,
"DataServiceVersion": dsversion_min,
"MaxDataServiceVersion": dsversion_max,
"Accept": json_acceptformat,
"Accept-Charset" : charset,
"A... | Do a AMS PATCH request and return JSON.
Args:
endpoint (str): Azure Media Services Initial Endpoint.
path (str): Azure Media Services Endpoint Path.
body (str): Azure Media Services Content Body.
access_token (str): A valid Azure authentication token.
Returns:
HTTP response. JSON body. | juraj-google-style |
def list_sku_versions(access_token, subscription_id, location, publisher, offer, sku):
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/providers/Microsoft.Compute/',
'locations/', location,
... | List available versions for a given publisher's sku.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
location (str): Azure data center location. E.g. westus.
publisher (str): VM image publisher. E.g. MicrosoftWindowsServer.
offer (str): VM image offer. E.g. W... | juraj-google-style |
def get_forced_variation(self, experiment_key, user_id):
if (not self.is_valid):
self.logger.error(enums.Errors.INVALID_DATAFILE.format('get_forced_variation'))
return None
if (not validator.is_non_empty_string(experiment_key)):
self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('... | Gets the forced variation for a given user and experiment.
Args:
experiment_key: A string key identifying the experiment.
user_id: The user ID.
Returns:
The forced variation key. None if no forced variation key. | codesearchnet |
def insert_before(self, value: Union[RawValue, Value],
raw: bool = False) -> "ArrayEntry":
return ArrayEntry(self.index, self.before, self.after.cons(self.value),
self._cook_value(value, raw), self.parinst,
self.schema_node, date... | Insert a new entry before the receiver.
Args:
value: The value of the new entry.
raw: Flag to be set if `value` is raw.
Returns:
An instance node of the new inserted entry. | juraj-google-style |
def Artifacts(self, os_name=None, cpe=None, label=None):
hit = (lambda x: ((x[0] == x[1]) or (not x[0])))
seq = [(self.os_name, os_name), (self.cpe, cpe), (self.label, label)]
return all(map(hit, seq)) | Whether the conditions applies, modulo host data.
Args:
os_name: An OS string.
cpe: A CPE string.
label: A label string.
Returns:
True if os_name, cpe or labels match. Empty values are ignored. | codesearchnet |
async def game(
self, short_name, *, id=None,
text=None, parse_mode=(), link_preview=True,
geo=None, period=60, contact=None, game=False, buttons=None
):
result = types.InputBotInlineResultGame(
id=id or '',
short_name=short_name,
... | Creates a new inline result of game type.
Args:
short_name (`str`):
The short name of the game to use. | juraj-google-style |
def Analyze(self, source_path, output_writer):
if (not os.path.exists(source_path)):
raise RuntimeError('No such source: {0:s}.'.format(source_path))
scan_context = source_scanner.SourceScannerContext()
scan_path_spec = None
scan_step = 0
scan_context.OpenSourcePath(source_path)
while Tr... | Analyzes the source.
Args:
source_path (str): the source path.
output_writer (StdoutWriter): the output writer.
Raises:
RuntimeError: if the source path does not exists, or if the source path
is not a file or directory, or if the format of or within the source
file is not supported. | codesearchnet |
def get_site_spd_dos(self, site):
spd_dos = dict()
for orb, pdos in self.pdos[site].items():
orbital_type = _get_orb_type(orb)
if orbital_type in spd_dos:
spd_dos[orbital_type] = add_densities(spd_dos[orbital_type], pdos)
else:
... | Get orbital projected Dos of a particular site
Args:
site: Site in Structure associated with CompleteDos.
Returns:
dict of {orbital: Dos}, e.g. {"s": Dos object, ...} | juraj-google-style |
def rot90(array, k=1, axes=(0, 1)):
array = convert_to_tensor(array)
if array.shape.rank < 2:
raise ValueError(f'Input array must have at least 2 dimensions. Received: array.ndim={array.shape.rank}')
if len(axes) != 2 or axes[0] == axes[1]:
raise ValueError(f'Invalid axes: {axes}. Axes must ... | Rotate an array by 90 degrees in the specified plane.
Args:
array: Input tensor
k: Number of 90-degree rotations (default=1)
axes: Tuple of two axes that define the plane of rotation.
Defaults to (0, 1).
Returns:
Rotated tensor with correct shape transformation | github-repos |
def update(self, id, newObj):
newObj = self.validation(newObj)
for obj in self.model.db:
if obj["id"] != id:
continue
newObj.pop("id", None)
obj.update(newObj)
obj = self._cast_model(obj)
if not self._batch.enable.is_s... | Update a object
Args:
id (int): Target Object ID
newObj (object): New object will be merged into original object
Returns:
Object: Updated object
None: If specified object id is not found
MultipleInvalid: If input object is invaild | juraj-google-style |
def grappler_optimize(graph, fetches=None, config_proto=None):
if config_proto is None:
config_proto = config_pb2.ConfigProto()
config_proto.graph_options.rewrite_options.min_graph_nodes = -1
if fetches is not None:
for fetch in fetches:
graph.add_to_collection('train_op', fe... | Tries to optimize the provided graph using grappler.
Args:
graph: A `tf.Graph` instance containing the graph to optimize.
fetches: An optional list of `Tensor`s to fetch (i.e. not optimize away).
Grappler uses the 'train_op' collection to look for fetches, so if not
provided this collection should be non-empty.
config... | github-repos |
def apply_inverse(self, y, in_place=False):
r
return cho_solve(self._factor, y, overwrite_b=in_place) | r"""
Apply the inverse of the covariance matrix to the input by solving
.. math::
K\,x = y
Args:
y (ndarray[nsamples] or ndadrray[nsamples, nrhs]): The vector or
matrix :math:`y`.
in_place (Optional[bool]): Should the data in ``y`` be overwritten
with the result :math:`x`? (default: ``False``) | juraj-google-style |
def to_csv(self, filename: str, latexify_names: bool=False):
elements = set()
for entry in self.entries:
elements.update(entry.composition.elements)
elements = sorted(list(elements), key=(lambda a: a.X))
writer = csv.writer(open(filename, 'w'), delimiter=unicode2str(','), quotechar=unicode2str('... | Exports PDEntries to a csv
Args:
filename: Filename to write to.
entries: PDEntries to export.
latexify_names: Format entry names to be LaTex compatible,
e.g., Li_{2}O | codesearchnet |
def default(self, obj):
if isinstance(obj, decimal.Decimal):
obj = format(obj, 'f')
str_digit = text_type(obj)
return (str_digit.rstrip('0').rstrip('.') if ('.' in str_digit) else str_digit)
elif isinstance(obj, phonenumbers.PhoneNumber):
return phonenumbers.format_number(obj, ph... | Encode individual objects into their JSON representation.
This method is used by :class:`flask.json.JSONEncoder` to encode
individual items in the JSON object.
Args:
obj (object): Any Python object we wish to convert to JSON.
Returns:
str: The stringified, valid JSON representation of our provided
object. | codesearchnet |
def index(self, entries):
if (not self.is_empty()):
raise ValueError('Cannot call index again on a non-empty index')
if (not isinstance(entries, list)):
queue = deque([])
for (key, minhash, size) in entries:
if (size <= 0):
raise ValueError('Set size must be p... | Index all sets given their keys, MinHashes, and sizes.
It can be called only once after the index is created.
Args:
entries (`iterable` of `tuple`): An iterable of tuples, each must be
in the form of `(key, minhash, size)`, where `key` is the unique
identifier of a set, `minhash` is the MinHash of the set,
and `size` ... | codesearchnet |
def _parse_block_ref(cls, block_ref, deprecated=False):
if (deprecated and (block_ref is None)):
return None
if isinstance(block_ref, LocalId):
return block_ref
is_valid_deprecated = (deprecated and cls.DEPRECATED_ALLOWED_ID_RE.match(block_ref))
is_valid = cls.ALLOWED_ID_RE.match(block_r... | Given `block_ref`, tries to parse it into a valid block reference.
Returns `block_ref` if it is valid.
Raises:
InvalidKeyError: if `block_ref` is invalid. | codesearchnet |
def set_card_simple(self, title, content):
self.response.card.type = 'Simple'
self.response.card.title = title
self.response.card.content = content | Set response card as simple type.
title and content cannot exceed 8,000 characters.
Args:
title: str. Title of Simple or Standard type card.
content: str. Content of Simple type card. | juraj-google-style |
def control(controllee: Union[('cirq.Gate', op_tree.OP_TREE)], control_qubits: Sequence['cirq.Qid']=None, default: Any=RaiseTypeErrorIfNotProvided) -> Any:
if (control_qubits is None):
control_qubits = []
controller = getattr(controllee, 'controlled_by', None)
result = (NotImplemented if (controller... | Returns a Controlled version of the given value, if defined.
Controllees define how to be controlled by defining a method
controlled_by(self, control_qubits). Note that the method may return
NotImplemented to indicate a particular controlling can't be done.
Args:
controllee: The gate, operation or iterable of operati... | codesearchnet |
def validate_composite_type_param(type_param, error_msg_prefix):
possible_classes = [type, TypeConstraint]
is_not_type_constraint = not is_typing_generic(type_param) and (not isinstance(type_param, tuple(possible_classes))) and (type_param is not None) and (getattr(type_param, '__module__', None) != 'typing')
... | Determines if an object is a valid type parameter to a
:class:`CompositeTypeHint`.
Implements sanity checking to disallow things like::
List[1, 2, 3] or Dict[5].
Args:
type_param: An object instance.
error_msg_prefix (:class:`str`): A string prefix used to format an error
message in the case of an exception.
Raises... | github-repos |
def Get(self, name, default=utils.NotAValue, context=None):
if (not self.initialized):
if (name not in self.constants):
raise RuntimeError(("Error while retrieving %s: Configuration hasn't been initialized yet." % name))
if context:
if (isinstance(context, string_types) or (not isins... | Get the value contained by the named parameter.
This method applies interpolation/escaping of the named parameter and
retrieves the interpolated value.
Args:
name: The name of the parameter to retrieve. This should be in the format
of "Section.name"
default: If retrieving the value results in an error, return this d... | codesearchnet |
def utterances_from_dir(eaf_dir: Path, tier_prefixes: Tuple[(str, ...)]) -> List[Utterance]:
logger.info('EAF from directory: {}, searching with tier_prefixes {}'.format(eaf_dir, tier_prefixes))
utterances = []
for eaf_path in eaf_dir.glob('**/*.eaf'):
eaf_utterances = utterances_from_eaf(eaf_path, ... | Returns the utterances found in ELAN files in a directory.
Recursively explores the directory, gathering ELAN files and extracting
utterances from them for tiers that start with the specified prefixes.
Args:
eaf_dir: A path to the directory to be searched
tier_prefixes: Stings matching the start of ELAN tier names th... | codesearchnet |
def loads(s, single=False, version=_default_version,
strict=False, errors='warn'):
ms = deserialize(s, version=version, strict=strict, errors=errors)
if single:
return next(ms)
else:
return ms | Deserialize SimpleMRS string representations
Args:
s (str): a SimpleMRS string
single (bool): if `True`, only return the first Xmrs object
Returns:
a generator of Xmrs objects (unless *single* is `True`) | juraj-google-style |
def select_with_index(self, selector=IndexedElement, transform=identity):
if self.closed():
raise ValueError('Attempt to call select_with_index() on a closed Queryable.')
if (not is_callable(selector)):
raise TypeError('select_with_index() parameter selector={0} is not callable'.format(repr(sele... | Transforms each element of a sequence into a new form, incorporating
the index of the element.
Each element is transformed through a selector function which accepts
the element value and its zero-based index in the source sequence. The
generated sequence is lazily evaluated.
Note: This method uses deferred execution.... | codesearchnet |
def _ParseMFTAttribute(self, parser_mediator, mft_entry, mft_attribute):
if mft_entry.is_empty() or mft_entry.base_record_file_reference != 0:
return
if mft_attribute.attribute_type in [
self._MFT_ATTRIBUTE_STANDARD_INFORMATION,
self._MFT_ATTRIBUTE_FILE_NAME]:
file_attribute_f... | Extract data from a NFTS $MFT attribute.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
mft_entry (pyfsntfs.file_entry): MFT entry.
mft_attribute (pyfsntfs.attribute): MFT attribute. | juraj-google-style |
def _ParseSourcePathOption(self, options):
self._source_path = self.ParseStringOption(options, self._SOURCE_OPTION)
if not self._source_path:
raise errors.BadConfigOption('Missing source path.')
self._source_path = os.path.abspath(self._source_path) | Parses the source path option.
Args:
options (argparse.Namespace): command line arguments.
Raises:
BadConfigOption: if the options are invalid. | juraj-google-style |
def add_outbound_connection(self, uri):
LOGGER.debug("Adding connection to %s", uri)
conn = OutboundConnection(
connections=self._connections,
endpoint=uri,
dispatcher=self._dispatcher,
zmq_identity=self._zmq_identity,
secured=self._se... | Adds an outbound connection to the network.
Args:
uri (str): The zmq-style (e.g. tcp://hostname:port) uri
to attempt to connect to. | juraj-google-style |
def __eq__(self, other):
res = False
if len(self) == len(other):
if np.all(self._z == other.z) and np.all(self._x == other.x):
res = True
return res | Return True if all Pauli terms are equal.
Args:
other (Pauli): other pauli
Returns:
bool: are self and other equal. | juraj-google-style |
def imresize(img, size, return_scale=False, interpolation='bilinear'):
(h, w) = img.shape[:2]
resized_img = cv2.resize(img, size, interpolation=interp_codes[interpolation])
if (not return_scale):
return resized_img
else:
w_scale = (size[0] / w)
h_scale = (size[1] / h)
ret... | Resize image to a given size.
Args:
img (ndarray): The input image.
size (tuple): Target (w, h).
return_scale (bool): Whether to return `w_scale` and `h_scale`.
interpolation (str): Interpolation method, accepted values are
"nearest", "bilinear", "bicubic", "area", "lanczos".
Returns:
tuple or ndarray: (`resized_img`... | codesearchnet |
def scan_meta_graph_def(meta_graph_def, op_denylist):
ops_in_metagraph = set(meta_graph_lib.ops_used_by_graph_def(meta_graph_def.graph_def))
denylisted_ops = op_denylist & ops_in_metagraph
if denylisted_ops:
print('MetaGraph with tag set %s contains the following denylisted ops:' % meta_graph_def.me... | Scans meta_graph_def and reports if there are ops on denylist.
Print ops if they are on denylist, or print success if no denylisted ops
found.
Args:
meta_graph_def: MetaGraphDef protocol buffer.
op_denylist: set of ops to scan for. | github-repos |
def report_error(self, read_tuple_name, error_name, wrong='', message='', warning=False):
if ((not self.report_only_first) or (error_name not in self.reported_errors)):
print('\t'.join([('error' if (warning == False) else 'warning'), read_tuple_name, error_name, wrong, message]))
self.reported_errors.ad... | Report an error.
Args:
read_tuple_name (): Name of the read tuple.
error_name (): Name of the error.
wrong (str): What is wrong.
message (str): Additional msessage to be printed.
warning (bool): Warning (not an error). | codesearchnet |
def assign_device(cls, core):
return Sharding(proto=xla_data_pb2.OpSharding(type=xla_data_pb2.OpSharding.MAXIMAL, tile_assignment_dimensions=[1], tile_assignment_devices=[core])) | Returns an AssignDevice sharding attribute.
This causes an op to be computed in its entirety only on one core in
the XLA device.
Args:
core: The core to assign this Op to. | github-repos |
def get_url_distribution(self, params=None):
params = params or {}
all_responses = {}
api_name = 'virustotal-url-distribution'
response_chunks = self._request_reports(list(params.keys()), list(params.values()), 'url/distribution')
self._extract_response_chunks(all_respo... | Retrieves a live feed with the latest URLs submitted to VT.
Args:
resources: a dictionary with name and value for optional arguments
Returns:
A dict with the VT report. | juraj-google-style |
def func_load(code, defaults=None, closure=None, globs=None):
if isinstance(code, (tuple, list)):
code, defaults, closure = code
if isinstance(defaults, list):
defaults = tuple(defaults)
def ensure_value_to_cell(value):
def dummy_fn():
value
cel... | Deserializes a user defined function.
Args:
code: bytecode of the function.
defaults: defaults of the function.
closure: closure of the function.
globs: dictionary of global objects.
Returns:
A function object. | github-repos |
def restore(self, state):
self._clear()
self._parseUserInfo({'labels': state['labels']})
self._parseNodes(state['nodes'])
self._keep_version = state['keep_version'] | Unserialize saved note data.
Args:
state (dict): Serialized state to load. | juraj-google-style |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.