code stringlengths 20 4.93k | docstring stringlengths 33 1.27k | source stringclasses 3
values |
|---|---|---|
def get_related(self):
if self.exists and hasattr(self.rdf.triples, 'ore') and hasattr(self.rdf.triples.ore, 'aggregates'):
related = [ self.repo.parse_uri(uri) for uri in self.rdf.triples.ore.aggregates ]
return related
else:
return [] | get ore:aggregates for this resource, optionally retrieving resource payload
Args:
retrieve (bool): if True, issue .refresh() on resource thereby confirming existence and retrieving payload | juraj-google-style |
def get_country_name_from_iso3(cls, iso3, use_live=True, exception=None):
countryinfo = cls.get_country_info_from_iso3(iso3, use_live=use_live, exception=exception)
if countryinfo is not None:
return countryinfo.get('
return None | Get country name from ISO3 code
Args:
iso3 (str): ISO3 code for which to get country name
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]:... | juraj-google-style |
def row_splits_dtype(self):
return self._row_splits_dtype | The `tf.dtypes.DType` of the RaggedTensor's `row_splits`.
Examples:
>>> rt = tf.ragged.constant([[1, 2, 3], [4]], row_splits_dtype=tf.int64)
>>> tf.type_spec_from_value(rt).row_splits_dtype
tf.int64
Returns:
A `tf.dtypes.DType` for the RaggedTensor's `row_splits` tensor. One
of `tf.int32` or `tf.int64`. | github-repos |
def _print_task_data(self, task):
print(' {0:s} ({1:s})'.format(task['name'], task['id']))
paths = task.get('saved_paths', [])
if not paths:
return
for path in paths:
if path.endswith('worker-log.txt'):
continue
if path.endswith('{0:s}.log'.format(task.get('id'))):
... | Pretty-prints task data.
Args:
task: Task dict generated by Turbinia. | juraj-google-style |
def get_other_answers_simple(pool, seeded_answers, get_student_item_dict, num_responses):
ret = []
pool = {int(k): v for (k, v) in pool.items()}
total_in_pool = len(seeded_answers)
merged_pool = convert_seeded_answers(seeded_answers)
student_id = get_student_item_dict()['student_id']
for key in ... | Get answers from others with simple algorithm, which picks one answer for each option.
Args:
see `get_other_answers`
num_responses (int): the number of responses to be returned. This value may not be
respected if there is not enough answers to return
Returns:
dict: answers based on the selection algorithm | codesearchnet |
def xeval(source, optimize=True):
native = xcompile(source, optimize=optimize)
return native() | Compiles to native Python bytecode and runs program, returning the
topmost value on the stack.
Args:
optimize: Whether to optimize the code after parsing it.
Returns:
None: If the stack is empty
obj: If the stack contains a single value
[obj, obj, ...]: If the stack contains many values | juraj-google-style |
def update(self, resource, timeout=-1):
self.__set_default_values(resource)
uri = self._client.build_uri(resource['logicalSwitch']['uri'])
return self._client.update(resource, uri=uri, timeout=timeout) | Updates a Logical Switch.
Args:
resource (dict): Object to update.
timeout:
Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView, just stop waiting for its completion.
Returns:
dict: Updated resource. | juraj-google-style |
def logout(self, client_id, return_to, federated=False):
return_to = quote_plus(return_to)
if federated is True:
return self.get(
'https:
self.domain, client_id, return_to),
headers={'Content-Type': 'application/json'}
... | Logout
Use this endpoint to logout a user. If you want to navigate the user to a
specific URL after the logout, set that URL at the returnTo parameter.
The URL should be included in any the appropriate Allowed Logout URLs list:
Args:
client_id (str): The client_id of your application.
returnTo (str): URL to redirect... | juraj-google-style |
def ProcessConfigOverrides(filename):
abs_filename = os.path.abspath(filename)
cfg_filters = []
keep_looking = True
while keep_looking:
(abs_path, base_name) = os.path.split(abs_filename)
if (not base_name):
break
cfg_file = os.path.join(abs_path, 'CPPLINT.cfg')
... | Loads the configuration files and processes the config overrides.
Args:
filename: The name of the file being processed by the linter.
Returns:
False if the current |filename| should not be processed further. | codesearchnet |
def has_no_checked_field(self, locator, **kwargs):
kwargs["checked"] = True
return self.has_no_selector("field", locator, **kwargs) | Checks if the page or current node has no radio button or checkbox with the given label,
value, or id that is currently checked.
Args:
locator (str): The label, name, or id of a checked field.
**kwargs: Arbitrary keyword arguments for :class:`SelectorQuery`.
Returns:
bool: Whether it doesn't exist. | juraj-google-style |
def rename(oldname, newname, overwrite=False):
rename_v2(oldname, newname, overwrite) | Rename or move a file / directory.
Args:
oldname: string, pathname for a file
newname: string, pathname to which the file needs to be moved
overwrite: boolean, if false it's an error for `newname` to be occupied by
an existing file.
Raises:
errors.OpError: If the operation fails. | github-repos |
def add_glyph(self, source_or_glyph, glyph=None, **kw):
if (glyph is not None):
source = source_or_glyph
else:
(source, glyph) = (ColumnDataSource(), source_or_glyph)
if (not isinstance(source, DataSource)):
raise ValueError("'source' argument to add_glyph() must be DataSource subcla... | Adds a glyph to the plot with associated data sources and ranges.
This function will take care of creating and configuring a Glyph object,
and then add it to the plot's list of renderers.
Args:
source (DataSource) : a data source for the glyphs to all use
glyph (Glyph) : the glyph to add to the Plot
Keyword Argumen... | codesearchnet |
def register_subcommand(parser: ArgumentParser):
serve_parser = parser.add_parser('serve', help='CLI tool to run inference requests through REST and GraphQL endpoints.')
serve_parser.add_argument('--task', type=str, choices=get_supported_tasks(), help='The task to run the pipeline on')
serve_parser.add_argu... | Register this command to argparse so it's available for the transformer-cli
Args:
parser: Root parser to register command-specific arguments | github-repos |
def compare_versions(ver1='', oper='==', ver2=''):
if (not ver1):
raise SaltInvocationError('compare_version, ver1 is blank')
if (not ver2):
raise SaltInvocationError('compare_version, ver2 is blank')
if (ver1 == 'latest'):
ver1 = six.text_type(sys.maxsize)
if (ver2 == 'latest'):... | Compare software package versions
Args:
ver1 (str): A software version to compare
oper (str): The operand to use to compare
ver2 (str): A software version to compare
Returns:
bool: True if the comparison is valid, otherwise False
CLI Example:
.. code-block:: bash
salt '*' pkg.compare_versions 1.2 >= 1.3 | codesearchnet |
def send_highspeed(self, data, progress_callback):
if (not self.connected):
raise HardwareError('Cannot send a script if we are not in a connected state')
if (isinstance(data, str) and (not isinstance(data, bytes))):
raise ArgumentError('You must send bytes or bytearray to _send_highspeed', type... | Send a script to a device at highspeed, reporting progress.
This method takes a binary blob and downloads it to the device as fast
as possible, calling the passed progress_callback periodically with
updates on how far it has gotten.
Args:
data (bytes): The binary blob that should be sent to the device at highspeed.
p... | codesearchnet |
def __init__(self, project_id, credentials, config=None):
self._project_id = project_id
self._credentials = credentials
self._config = config if config is not None else Context._get_default_config() | Initializes an instance of a Context object.
Args:
project_id: the current cloud project.
credentials: the credentials to use to authorize requests.
config: key/value configurations for cloud operations | juraj-google-style |
def preprocess_data(data: List[Tuple[List[str], List[str]]], to_lower: bool = True,
append_case: str = "first") -> List[Tuple[List[Tuple[str]], List[str]]]:
new_data = []
for words, tags in data:
new_words = [process_word(word, to_lower=to_lower, append_case=append_case)
... | Processes all words in data using
:func:`~deeppavlov.dataset_iterators.morphotagger_iterator.process_word`.
Args:
data: a list of pairs (words, tags), each pair corresponds to a single sentence
to_lower: whether to lowercase
append_case: whether to add case mark
Returns:
a list of preprocessed sentences | juraj-google-style |
def create(self, data, *args, **kwargs):
if self.create.__func__.__module__ != self.__module__:
raise Exception("Child method not implemented")
self._MambuStruct__method = "POST"
self._MambuStruct__data = data
self.connect(*args, **kwargs)
... | Creates an entity in Mambu
This method must be implemented in child classes
Args:
data (dictionary): dictionary with data to send, this dictionary
is specific for each Mambu entity | juraj-google-style |
def speechlib_mel(sample_rate, n_fft, n_mels, fmin=None, fmax=None):
bank_width = int(n_fft
if fmax is None:
fmax = sample_rate / 2
if fmin is None:
fmin = 0
assert fmin >= 0, 'fmin cannot be negative'
assert fmin < fmax <= sample_rate / 2, 'fmax must be between (fmin, samplerate / ... | Create a Mel filter-bank the same as SpeechLib FbankFC.
Args:
sample_rate (int): Sample rate in Hz. number > 0 [scalar]
n_fft (int): FFT size. int > 0 [scalar]
n_mel (int): Mel filter size. int > 0 [scalar]
fmin (float): lowest frequency (in Hz). If None use 0.0.
float >= 0 [scalar]
fmax: highest frequency (in Hz). If... | github-repos |
def execute(self, commands, encoding='json', **kwargs):
if (encoding not in ('json', 'text')):
raise TypeError('encoding must be one of [json, text]')
try:
self.error = None
request = self.request(commands, encoding=encoding, **kwargs)
response = self.send(request)
return... | Executes the list of commands on the destination node
This method takes a list of commands and sends them to the
destination node, returning the results. The execute method handles
putting the destination node in enable mode and will pass the
enable password, if required.
Args:
commands (list): A list of commands to... | codesearchnet |
def read_from_hdx(identifier, configuration=None):
dataset = Dataset(configuration=configuration)
result = dataset._dataset_load_from_hdx(identifier)
if result:
return dataset
return None | Reads the dataset given by identifier from HDX and returns Dataset object
Args:
identifier (str): Identifier of dataset
configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration.
Returns:
Optional[Dataset]: Dataset object if successful read, None if not | codesearchnet |
def get_cytoband_coord(chrom, pos):
chrom = chrom.strip('chr')
pos = int(pos)
result = None
logger.debug('Finding Cytoband for chrom:{0} pos:{1}'.format(chrom, pos))
if (chrom in CYTOBANDS):
for interval in CYTOBANDS[chrom][pos]:
result = '{0}{1}'.format(chrom, interval.data)
... | Get the cytoband coordinate for a position
Args:
chrom(str): A chromosome
pos(int): The position
Returns:
cytoband | codesearchnet |
def minimum_image( self, r1, r2 ):
delta_r = r2 - r1
delta_r = np.array( [ x - math.copysign( 1.0, x ) if abs(x) > 0.5 else x for x in delta_r ] )
return( delta_r ) | Find the minimum image vector from point r1 to point r2.
Args:
r1 (np.array): fractional coordinates of point r1.
r2 (np.array): fractional coordinates of point r2.
Returns:
(np.array): the fractional coordinate vector from r1 to the nearest image of r2. | juraj-google-style |
def close(self, reason=None):
with self._closing:
if self._closed:
return
if self.is_active:
_LOGGER.debug("Stopping consumer.")
self._consumer.stop()
self._consumer = None
self._rpc.close()
... | Stop consuming messages and shutdown all helper threads.
This method is idempotent. Additional calls will have no effect.
Args:
reason (Any): The reason to close this. If None, this is considered
an "intentional" shutdown. | juraj-google-style |
def Write(self, output_writer):
for (column_index, column_size) in enumerate(self._column_sizes):
(column_size, _) = divmod(column_size, self._NUMBER_OF_SPACES_IN_TAB)
column_size = ((column_size + 1) * self._NUMBER_OF_SPACES_IN_TAB)
self._column_sizes[column_index] = column_size
if self... | Writes the table to output writer.
Args:
output_writer (CLIOutputWriter): output writer. | codesearchnet |
def _shared_name(self):
return self.name[:self.name.index(':')] | The shared name of the variable.
Unlike name(), shared_name doesn't have ":0" suffix. It is user-specified
name with name scope prefix.
Returns:
variable name. | github-repos |
def __eq__(self, other):
if type(self) is not type(other) or \
self.name != other.name or \
self.num_qubits != other.num_qubits or \
self.num_clbits != other.num_clbits or \
self.definition != other.definition:
return False
... | Two instructions are the same if they have the same name,
same dimensions, and same params.
Args:
other (instruction): other instruction
Returns:
bool: are self and other equal. | juraj-google-style |
def merge_svg_layers(svg_sources, share_transform=True):
(width, height), layers = get_svg_layers(svg_sources)
if share_transform:
transforms = [layer_i.attrib['transform'] for layer_i in layers
if 'transform' in layer_i.attrib]
if len(transforms) > 1:
... | Merge layers from input svg sources into a single XML document.
Args:
svg_sources (list) : A list of file-like objects, each containing
one or more XML layers.
share_transform (bool) : If exactly one layer has a transform, apply it
to *all* other layers as well.
Returns:
StringIO.StringIO : File-like object contain... | juraj-google-style |
def get_text(revision, strip=True):
start_pos = revision.find("<text")
assert start_pos != -1
end_tag_pos = revision.find(">", start_pos)
assert end_tag_pos != -1
end_tag_pos += len(">")
end_pos = revision.find("</text>")
if end_pos == -1:
ret = ""
else:
ret = revision[end_tag_pos:end_pos]... | Extract the text from a revision.
Args:
revision: a string
strip: a boolean
Returns:
a string | juraj-google-style |
def connection(self):
ctx = stack.top
if (ctx is None):
raise Exception('Working outside of the Flask application context. If you wish to make a connection outside of a flask application context, please handle your connections and use manager.make_connection()')
if hasattr(ctx, 'ldap3_manager_main_c... | Convenience property for externally accessing an authenticated
connection to the server. This connection is automatically
handled by the appcontext, so you do not have to perform an unbind.
Returns:
ldap3.Connection: A bound ldap3.Connection
Raises:
ldap3.core.exceptions.LDAPException: Since this method is performing
... | codesearchnet |
def _safe_issubclass(derived, parent):
try:
return issubclass(derived, parent)
except (TypeError, AttributeError):
if hasattr(derived, '__origin__'):
try:
return issubclass(derived.__origin__, parent)
except TypeError:
pass
return F... | Like issubclass, but swallows TypeErrors.
This is useful for when either parameter might not actually be a class,
e.g. typing.Union isn't actually a class.
Args:
derived: As in issubclass.
parent: As in issubclass.
Returns:
issubclass(derived, parent), or False if a TypeError was raised. | github-repos |
def clear(self, timestamp):
self.storage.clear()
self.push(streams.DATA_CLEARED, timestamp, 1) | Clear all data from the RSL.
This pushes a single reading once we clear everything so that
we keep track of the highest ID that we have allocated to date.
This needs the current timestamp to be able to properly timestamp
the cleared storage reading that it pushes.
Args:
timestamp (int): The current timestamp to stor... | codesearchnet |
def __init__(self, vars_map):
super(core.PostProcessor, self).__init__()
self.vars_map = {}
for var_name, value in iteritems(vars_map):
var_regex = re.compile(
re.escape("%" + var_name + "%"), flags=re.IGNORECASE)
self.vars_map[var_name.lower()] = (var_regex, value) | EnvVarsPostProcessor constructor.
Args:
vars_map: Dictionary of "string" -> "string|list", i.e. a mapping of
environment variables names to their suggested values or to lists
of their suggested values. | juraj-google-style |
def change_tz(cal, new_timezone, default, utc_only=False, utc_tz=icalendar.utc):
for vevent in getattr(cal, 'vevent_list', []):
start = getattr(vevent, 'dtstart', None)
end = getattr(vevent, 'dtend', None)
for node in (start, end):
if node:
dt = node.val... | Change the timezone of the specified component.
Args:
cal (Component): the component to change
new_timezone (tzinfo): the timezone to change to
default (tzinfo): a timezone to assume if the dtstart or dtend in cal
doesn't have an existing timezone
utc_only (bool): only convert dates that are in utc
utc_tz (tzinfo): th... | juraj-google-style |
def core_name(self):
buf_size = self.MAX_BUF_SIZE
buf = (ctypes.c_char * buf_size)()
self._dll.JLINKARM_Core2CoreName(self.core_cpu(), buf, buf_size)
return ctypes.string_at(buf).decode() | Returns the name of the target ARM core.
Args:
self (JLink): the ``JLink`` instance
Returns:
The target core's name. | codesearchnet |
def get_type_info(obj):
if isinstance(obj, primitive_types):
return ('primitive', type(obj).__name__)
if isinstance(obj, sequence_types):
return ('sequence', type(obj).__name__)
if isinstance(obj, array_types):
return ('array', type(obj).__name__)
if isinstance(obj, key_value_typ... | Get type information for a Python object
Args:
obj: The Python object
Returns:
tuple: (object type "catagory", object type name) | codesearchnet |
def get_node_ip_address(address='8.8.8.8:53'):
(ip_address, port) = address.split(':')
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
s.connect((ip_address, int(port)))
node_ip_address = s.getsockname()[0]
except Exception as e:
node_ip_address = '127.0.0.1'
if... | Determine the IP address of the local node.
Args:
address (str): The IP address and port of any known live service on the
network you care about.
Returns:
The IP address of the current node. | codesearchnet |
def DeregisterDefinition(self, artifact_definition):
artifact_definition_name = artifact_definition.name.lower()
if (artifact_definition_name not in self._artifact_definitions):
raise KeyError('Artifact definition not set for name: {0:s}.'.format(artifact_definition.name))
del self._artifact_definit... | Deregisters an artifact definition.
Artifact definitions are identified based on their lower case name.
Args:
artifact_definition (ArtifactDefinition): an artifact definition.
Raises:
KeyError: if an artifact definition is not set for the corresponding name. | codesearchnet |
def norm(self, valu):
func = self._type_norms.get(type(valu))
if (func is None):
raise s_exc.NoSuchFunc(name=self.name, mesg=('no norm for type: %r' % (type(valu),)))
return func(valu) | Normalize the value for a given type.
Args:
valu (obj): The value to normalize.
Returns:
((obj,dict)): The normalized valu, info tuple.
Notes:
The info dictionary uses the following key conventions:
subs (dict): The normalized sub-fields as name: valu entries. | codesearchnet |
def get_output_from_cache(name, filename):
cache_filename = _get_cache_filename(name, filename)
if (os.path.exists(cache_filename) and (os.path.getmtime(filename) < os.path.getmtime(cache_filename))):
with io.open(cache_filename) as f:
return f.read()
return None | Returns the output from the cache if still valid.
It checks that the cache file is defined and that its modification time is
after the modification time of the original file.
Args:
name: string: name of the linter.
filename: string: path of the filename for which we are retrieving the
output.
Returns: a string with ... | codesearchnet |
def BuildService(self, cls):
def _WrapCallMethod(srvc, method_descriptor,
rpc_controller, request, callback):
return self._CallMethod(srvc, method_descriptor,
rpc_controller, request, callback)
self.cls = cls
cls.CallMethod = _WrapCal... | Constructs the service class.
Args:
cls: The class that will be constructed. | juraj-google-style |
def configure(self, options):
self.client.api.configure_plugin(self.name, options)
self.reload() | Update the plugin's settings.
Args:
options (dict): A key-value mapping of options.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error. | juraj-google-style |
def create_chapter_from_file(self, file_name, url=None, title=None):
with codecs.open(file_name, 'r', encoding='utf-8') as f:
content_string = f.read()
return self.create_chapter_from_string(content_string, url, title) | Creates a Chapter object from an html or xhtml file. Sanitizes the
file's content using the clean_function method, and saves
it as the content of the created chapter.
Args:
file_name (string): The file_name containing the html or xhtml
content of the created Chapter
url (Option[string]): A url to infer the title of th... | codesearchnet |
def learn_one(self, x: beam.Row) -> None:
if len(x.__dict__) != 1:
raise ValueError('ZScore.learn_one expected univariate input, but got %s', str(x))
v = next(iter(x))
self._stdev_tracker.push(v)
self._sub_stat_tracker.push(v) | Updates the mean and standard deviation trackers with a new data point.
Args:
x: A `beam.Row` containing a single numerical value. | github-repos |
def collect_function_renames():
renames = set()
all_v2_names = get_all_v2_names()
def visit(unused_path, unused_parent, children):
for child in children:
_, attr = tf_decorator.unwrap(child[1])
api_names_v1 = [name for name in tf_export.get_v1_names(attr) if '.__int... | Looks for functions/classes that need to be renamed in TF 2.0.
Returns:
Set of tuples of the form (current name, new name). | github-repos |
def forward(self, main_feature, condition_feature):
probabilities_and_temperature = self.mlp(torch.concat((main_feature, condition_feature), dim=1))
probabilities, temperature = (probabilities_and_temperature[:, :2, ...], probabilities_and_temperature[:, 2:, ...])
probabilities = probabilities + self.p_eps
... | Args:
main_feature (`torch.Tensor` of shape `(batch_size, num_channels, height, width)`):
Main feature.
condition_feature (torch.Tensor of shape `(batch_size, num_channels, height, width)`):
Condition feature.
Returns:
`torch.Tensor`:
Output log binomial distribution | github-repos |
def todo(self, **kwargs):
path = ('%s/%s/todo' % (self.manager.path, self.get_id()))
self.manager.gitlab.http_post(path, **kwargs) | Create a todo associated to the object.
Args:
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabTodoError: If the todo cannot be set | codesearchnet |
def assert_processor_available(processor: str) -> None:
if processor not in [Processors.XHTML2PDF,
Processors.WEASYPRINT,
Processors.PDFKIT]:
raise AssertionError("rnc_pdf.set_pdf_processor: invalid PDF processor"
" specifie... | Assert that a specific PDF processor is available.
Args:
processor: a PDF processor type from :class:`Processors`
Raises:
AssertionError: if bad ``processor``
RuntimeError: if requested processor is unavailable | juraj-google-style |
def __init__(self, data_type=DATA_TYPE):
super(SyslogLineEventData, self).__init__(data_type=data_type)
self.body = None
self.hostname = None
self.pid = None
self.reporter = None
self.severity = None | Initializes an event data attribute container.
Args:
data_type (Optional[str]): event data type indicator. | juraj-google-style |
async def inspect(self, name: str) -> Mapping:
response = await self.docker._query_json("images/{name}/json".format(name=name))
return response | Return low-level information about an image
Args:
name: name of the image | juraj-google-style |
def cut_spectrum(sp, l0, lf):
if (l0 >= lf):
raise ValueError('l0 must be lower than lf')
idx0 = np.argmin(np.abs((sp.x - l0)))
idx1 = np.argmin(np.abs((sp.x - lf)))
out = copy.deepcopy(sp)
out.x = out.x[idx0:idx1]
out.y = out.y[idx0:idx1]
return out | Cuts spectrum given a wavelength interval, leaving origina intact
Args:
sp: Spectrum instance
l0: initial wavelength
lf: final wavelength
Returns:
Spectrum: cut spectrum | codesearchnet |
def stop_artifact_creation(self, id_or_uri, task_uri):
data = {'taskUri': task_uri}
uri = (((self.URI + '/') + extract_id_from_uri(id_or_uri)) + self.STOP_CREATION_PATH)
return self._client.update(data, uri=uri) | Stops creation of the selected Artifact Bundle.
Args:
id_or_uri: ID or URI of the Artifact Bundle.
task_uri: Task URI associated with the Artifact Bundle.
Returns:
string: | codesearchnet |
def is_user_in_experiment(config, experiment, attributes, logger):
audience_conditions = experiment.getAudienceConditionsOrIds()
logger.debug(audience_logs.EVALUATING_AUDIENCES_COMBINED.format(experiment.key, json.dumps(audience_conditions)))
if ((audience_conditions is None) or (audience_conditions == []))... | Determine for given experiment if user satisfies the audiences for the experiment.
Args:
config: project_config.ProjectConfig object representing the project.
experiment: Object representing the experiment.
attributes: Dict representing user attributes which will be used in determining
if the audience conditions are m... | codesearchnet |
def _compute_keys(self, n_minus_1_grams: torch.LongTensor, indices: torch.LongTensor) -> Tuple[torch.LongTensor, torch.LongTensor]:
batch_size, _ = n_minus_1_grams.shape
hash_result = torch.ones(batch_size, device=self.device, dtype=torch.long)
hash_result_with_just_context = self.accumulate_hash(hash_resul... | Computes random keys for each ngram and depth.
Args:
n_minus_1_grams (`torch.LongTensor`):
Ngrams (batch_size, ngram_len - 1).
indices (`torch.LongTensor`):
indices of the continuations (batch_size, num_indices)
Returns:
Ngram keys (batch_size, num_indices, depth). | github-repos |
def rename(self, new_folder_name):
headers = self.headers
endpoint = 'https:
payload = '{ "DisplayName": "' + new_folder_name + '"}'
r = requests.patch(endpoint, headers=headers, data=payload)
if check_response(r):
return_folder = r.json()
retur... | Renames the Folder to the provided name.
Args:
new_folder_name: A string of the replacement name.
Raises:
AuthError: Raised if Outlook returns a 401, generally caused by an invalid or expired access token.
Returns:
A new Folder representing the folder with the new name on Outlook. | juraj-google-style |
def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)
... | Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` method.
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_spe... | github-repos |
def call(func, args):
assert hasattr(func, '__call__'), 'Cannot call func: {}'.format(
func.__name__)
raw_func = (
func if isinstance(func, FunctionType) else func.__class__.__call__)
hints = collections.defaultdict(lambda: Any, get_type_hints(raw_func))
argspec = _getargspec(raw_fu... | Call the function with args normalized and cast to the correct types.
Args:
func: The function to call.
args: The arguments parsed by docopt.
Returns:
The return value of func. | juraj-google-style |
def __init__(self, fill_method='zero', fill_missing=True, **kwargs):
super().__init__()
self.fill_missing = fill_missing
self.filler = SimpleFill(fill_method) | Imputs NaN's using various filling methods like mean, zero, median, min, random
Args:
fill_method: How NaN's will be exchanged. Possible values: 'mean', 'zero', 'median', 'min', 'random'
fill_missing: If True, transformer will fill NaN values by filling method | juraj-google-style |
def set_error_filter(self, filt):
self._filter = filt | Set the error filter.
Args:
filt: A function or callable object that accepts a single argument of type
Error and returns True if that error should be included in the log. A
filter of None will add all errors.
NOTE: The filter may adjust some properties of the error. | github-repos |
def add_user(self, group, username):
try:
self.lookup_id(group)
except ldap_tools.exceptions.InvalidResult as err:
raise err from None
operation = {'memberUid': [(ldap3.MODIFY_ADD, [username])]}
self.client.modify(self.__distinguished_name(group), operation) | Add a user to the specified LDAP group.
Args:
group: Name of group to update
username: Username of user to add
Raises:
ldap_tools.exceptions.InvalidResult:
Results of the query were invalid. The actual exception raised
inherits from InvalidResult. See #lookup_id for more info. | codesearchnet |
def cap17(msg):
allbds = ['05', '06', '07', '08', '09', '0A', '20', '21', '40', '41',
'42', '43', '44', '45', '48', '50', '51', '52', '53', '54',
'55', '56', '5F', '60', 'NA', 'NA', 'E1', 'E2']
d = hex2bin(data(msg))
idx = [i for i, v in enumerate(d[:28]) if v=='1']
cap... | Extract capacities from BDS 1,7 message
Args:
msg (String): 28 bytes hexadecimal message string
Returns:
list: list of suport BDS codes | juraj-google-style |
def add(self, doc, attributes=None):
doc_ref = str(doc[self._ref])
self._documents[doc_ref] = (attributes or {})
self.document_count += 1
for (field_name, field) in self._fields.items():
extractor = field.extractor
field_value = (doc[field_name] if (extractor is None) else extractor(doc)... | Adds a document to the index.
Before adding documents to the index it should have been fully
setup, with the document ref and all fields to index already having
been specified.
The document must have a field name as specified by the ref (by default
this is 'id') and it should have all fields defined for indexing,
tho... | codesearchnet |
def setEditable(self, editable):
if not isinstance(editable, bool):
raise TypeError('Argument is not of type bool')
self._editable = editable | setter to _editable. apply changes while changing dtype.
Raises:
TypeError: if editable is not of type bool.
Args:
editable (bool): apply changes while changing dtype. | juraj-google-style |
def get_numeric_features_to_observed_range(examples):
observed_features = collections.defaultdict(list)
for example in examples:
for feature_name in get_numeric_feature_names(example):
original_feature = parse_original_feature_from_example(example, feature_name)
observed_features... | Returns numerical features and their observed ranges.
Args:
examples: Examples to read to get ranges.
Returns:
A dict mapping feature_name -> {'observedMin': 'observedMax': } dicts,
with a key for each numerical feature. | codesearchnet |
def upload_backup_bundle_from_file(self, file_path, deployment_groups_id_or_uri):
deployment_groups_uri = deployment_groups_id_or_uri
if self.DEPLOYMENT_GROUPS_URI not in deployment_groups_id_or_uri:
deployment_groups_uri = self.DEPLOYMENT_GROUPS_URI + deployment_groups_id_or_uri
... | Restore an Artifact Bundle from a backup file.
Args:
file_path (str): The File Path to restore the Artifact Bundle.
deployment_groups_id_or_uri: ID or URI of the Deployment Groups.
Returns:
dict: Deployment group. | juraj-google-style |
def recode(self, table: pd.DataFrame, validate=False) -> pd.DataFrame:
df = pd.DataFrame(index=table.index)
for column in self.columns:
df = column.update_dataframe(df, table=table, validate=validate)
return df | Return a fully recoded dataframe.
Args:
table (pd.DataFrame): A dataframe on which to apply recoding logic.
validate (bool): If ``True``, recoded table must pass validation tests. | codesearchnet |
def js_adaptor(buffer):
buffer = re.sub('true', 'True', buffer)
buffer = re.sub('false', 'False', buffer)
buffer = re.sub('none', 'None', buffer)
buffer = re.sub('NaN', '"NaN"', buffer)
return buffer | convert javascript objects like true, none, NaN etc. to
quoted word.
Arguments:
buffer: string to be converted
Returns:
string after conversion | codesearchnet |
def get_resize_output_image_size(image, resolution_max_side: int, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> Tuple[int, int]:
height, width = get_image_size(image, channel_dim=input_data_format)
height, width = _resize_output_size_rescale_to_max_len(height, width, max_len=resolution_max_... | Get the output size of the image after resizing given a dictionary specifying the max and min sizes.
Args:
image (`np.ndarray`):
Image to resize.
resolution_max_side (`int`):
The longest edge of the image will be resized to this value. The shortest edge will be resized to keep the
input aspect ratio.
input_data_format ... | github-repos |
def update_work_as_completed(self, worker_id, work_id, other_values=None,
error=None):
client = self._datastore_client
try:
with client.transaction() as transaction:
work_key = client.key(KIND_WORK_TYPE, self._work_type_entity_id,
K... | Updates work piece in datastore as completed.
Args:
worker_id: ID of the worker which did the work
work_id: ID of the work which was done
other_values: dictionary with additonal values which should be saved
with the work piece
error: if not None then error occurred during computation of the work
piece. In such case wo... | juraj-google-style |
def power(self, n):
if n > 0:
return super().power(n)
return Chi(SuperOp(self).power(n)) | The matrix power of the channel.
Args:
n (int): compute the matrix power of the superoperator matrix.
Returns:
Chi: the matrix power of the SuperOp converted to a Chi channel.
Raises:
QiskitError: if the input and output dimensions of the
QuantumChannel are not equal, or the power is not an integer. | juraj-google-style |
def install_bootstrapped_files(nb_path=None, server_config=True, DEBUG=False):
install_path = None
print('Starting hide_code.js install...')
current_dir = path.abspath(path.dirname(__file__))
config_dirs = j_path.jupyter_config_path()
notebook_module_path = Utils.get_notebook_module_dir()
for di... | Installs javascript and exporting server extensions in Jupyter notebook.
Args:
nb_path (string): Path to notebook module.
server_config (boolean): Install exporting server extensions.
DEBUG (boolean): Verbose mode. | codesearchnet |
def __init__(self, broker, queue_output, backend=None,
max_tasks_in_memory=None, max_workers_in_memory=None):
self._app = Celery(broker=broker, backend=backend)
self._queue_output = queue_output
from celery.backends.base import DisabledBackend
self._use_result_... | Constructs an event listener instance.
Args:
broker (str): the broker being used by the celery system.
queue_output (Queue): to send to streaming dispatcher.
backend (str): the result backend being used by the celery system.
max_tasks_in_memory (int): max tasks stored
max_workers_in_memory (int): max workers stored | juraj-google-style |
def add_gene_panel(self, panel_obj):
panel_name = panel_obj['panel_name']
panel_version = panel_obj['version']
display_name = panel_obj.get('display_name', panel_name)
if self.gene_panel(panel_name, panel_version):
raise IntegrityError('Panel {0} with version {1} already exist in database'.forma... | Add a gene panel to the database
Args:
panel_obj(dict) | codesearchnet |
def static_nvals(self):
if self._nvals is not None:
nvals = tensor_util.constant_value(self._nvals)
if nvals is not None:
return nvals
if self._value_rowids is not None:
nvals = tensor_shape.dimension_at_index(self._value_rowids.shape, 0)
if nvals.value is not None:
... | The number of values in this partition, if statically known.
```python
self.value_rowids().shape == [self.static_vals]
```
Returns:
The number of values in this partition as an `int` (if statically known);
or `None` (otherwise). | github-repos |
def setEditorData(self, editor, index):
editor.blockSignals(True)
data = index.data()
dataIndex = editor.findData(data)
editor.setCurrentIndex(dataIndex)
editor.blockSignals(False) | Sets the current data for the editor.
The data displayed has the same value as `index.data(Qt.EditRole)`
(the translated name of the datatype). Therefor a lookup for all items
of the combobox is made and the matching item is set as the currently
displayed item.
Signals emitted by the editor are blocked during exectio... | codesearchnet |
def set_all_file_column_labels(self, xlabel=None, ylabel=None):
if xlabel is not None:
self.general.x_column_label = xlabel
if ylabel is not None:
self.general.y_column_label = ylabel
if xlabel is None and ylabel is None:
warnings.warn("is not specify... | Indicate general x,y column labels.
This sets the general x and y column labels into data files for all plots.
It can be overridden for specific plots.
Args:
xlabel/ylabel (str, optional): String indicating column label for x,y values
into the data files. Default is None.
Raises:
UserWarning: If xlabel and ylabel ar... | juraj-google-style |
def filter_object(obj, marks, presumption=DELETE):
if isinstance(obj, list):
keys = reversed(range(0, len(obj)))
else:
keys = obj.keys()
for k in keys:
v = obj[k]
m = marks.get(id(v), UNSPECIFIED)
if m == DELETE:
del obj[k]
elif m == KEEP o... | Filter down obj based on marks, presuming keys should be kept/deleted.
Args:
obj: The object to be filtered. Filtering is done in-place.
marks: An object mapping id(obj) --> {DELETE,KEEP}
These values apply to the entire subtree, unless inverted.
presumption: The default action to take on all keys. | juraj-google-style |
def is_complex_format_str(node):
inferred = utils.safe_infer(node)
if ((inferred is None) or (not isinstance(inferred.value, str))):
return True
try:
parsed = list(string.Formatter().parse(inferred.value))
except ValueError:
return False
for (_, _, format_spec, _) in parsed:
... | Checks if node represents a string with complex formatting specs.
Args:
node (astroid.node_classes.NodeNG): AST node to check
Returns:
bool: True if inferred string uses complex formatting, False otherwise | codesearchnet |
def convert_matmul(params, w_name, scope_name, inputs, layers, weights, names):
print('Converting matmul ...')
if (names == 'short'):
tf_name = ('MMUL' + random_string(4))
elif (names == 'keep'):
tf_name = w_name
else:
tf_name = (w_name + str(random.random()))
if (len(inputs)... | Convert matmul layer.
Args:
params: dictionary with layer parameters
w_name: name prefix in state_dict
scope_name: pytorch scope name
inputs: pytorch node inputs
layers: dictionary with keras tensors
weights: pytorch state_dict
names: use short names for keras layers | codesearchnet |
def __init__(self, name, default=None, help=None, type=str):
self._name = name
self._default = default
self._help = help
self._type = type | Initialise the workflow option.
Args:
name (str): The name of the option under which the value will be stored.
default: The default value that should be used when no value is specified.
Set to None to make this a non-optional option.
help (str): A short help string for this option.
type: The type of the option. Suppor... | juraj-google-style |
def median(self, **kwargs):
if self._is_transposed:
kwargs['axis'] = (kwargs.get('axis', 0) ^ 1)
return self.transpose().median(**kwargs)
axis = kwargs.get('axis', 0)
func = self._build_mapreduce_func(pandas.DataFrame.median, **kwargs)
return self._full_axis_reduce(axis, func) | Returns median of each column or row.
Returns:
A new QueryCompiler object containing the median of each column or row. | codesearchnet |
def _execute_command(self, key, *args):
client = self.redis_clients[(key.redis_shard_hash() % len(self.redis_clients))]
return client.execute_command(*args) | Execute a Redis command on the appropriate Redis shard based on key.
Args:
key: The object ID or the task ID that the query is about.
args: The command to run.
Returns:
The value returned by the Redis command. | codesearchnet |
def _prepare_images_structure(self, images: ImageInput) -> ImageInput:
return make_flat_list_of_images(images) | Prepare the images structure for processing.
Args:
images (`ImageInput`):
The input images to process.
Returns:
`ImageInput`: The images with a valid nesting. | github-repos |
def send(url, data):
validate(data)
return requests.post(url, json=data) | Sends an incoming message
Args:
url(str): the incoming hook url
data(dict): the sending data
Returns:
requests.Response | codesearchnet |
def _format_value(cls, value, type_):
res = value
if type_ == 'CLASS':
res = '{}.{}'.format(value.__module__, value.__name__)
elif type_ == 'DURATION':
res = value.total_seconds() * 1000
elif type_ == 'TIMESTAMP':
res = calendar.timegm(value.timetuple()) * 1000 + value.microsecon... | Returns the API representation of a value given its type.
Args:
value: The value of the item that needs to be shortened.
type_(string): The type of the value.
Returns:
A formatted value in the form of a float, int, or string. | github-repos |
def read_config_info(ini_file):
try:
config = RawConfigParser()
config.optionxform = lambda option: option
config.read(ini_file)
the_stuff = {}
for section in config.sections():
the_stuff[section] = {}
for option in config.options(section):
... | Read the INI file
Args:
ini_file - path to the file
Returns:
A dictionary of stuff from the INI file
Exits:
1 - if problems are encountered | juraj-google-style |
def to_css(self):
if (self.a == 1.0):
return ('hsl(%d, %s%%, %s%%)' % (self.h, (self.s * 100), (self.l * 100)))
else:
return ('hsla(%d, %s%%, %s%%, %s)' % (self.h, (self.s * 100), (self.l * 100), self.a)) | Generate the CSS representation of this HSL color.
Returns:
str, ``"hsl(...)"`` or ``"hsla(...)"`` | codesearchnet |
def _get_file_iterator(self, file_obj):
file_obj.seek(0)
return iter(lambda: file_obj.read(self.read_bs), '') | For given `file_obj` return iterator, which will read the file in
`self.read_bs` chunks.
Args:
file_obj (file): File-like object.
Return:
iterator: Iterator reading the file-like object in chunks. | juraj-google-style |
def __init__(self, _args):
super(TcExLib, self).__init__(_args)
self.latest_version = None
self.lib_directory = 'lib_{}.{}.{}'.format(
sys.version_info.major, sys.version_info.minor, sys.version_info.micro
)
self.requirements_file = 'requirements.tx... | Initialize Class properties.
Args:
_args (namespace): The argparser args Namespace. | juraj-google-style |
def Open(self, hostname, port):
server_url = 'http:
try:
self._xmlrpc_proxy = xmlrpclib.ServerProxy(
server_url, allow_none=True)
except SocketServer.socket.error as exception:
logger.warning((
'Unable to connect to RPC server on {0:s}:{1:d} with error: '
'{2!... | Opens a RPC communication channel to the server.
Args:
hostname (str): hostname or IP address to connect to for requests.
port (int): port to connect to for requests.
Returns:
bool: True if the communication channel was established. | juraj-google-style |
def infer(query, replacements=None, root_type=None, libs=('stdcore', 'stdmath')):
if root_type:
type_scope = scope.ScopeStack(std_core.MODULE, root_type)
else:
type_scope = scope.ScopeStack(std_core.MODULE)
stdcore_included = False
for lib in libs:
if (lib == 'stdcore'):
... | Determine the type of the query's output without actually running it.
Arguments:
query: A query object or string with the query.
replacements: Built-time parameters to the query, either as dict or as
an array (for positional interpolation).
root_type: The types of variables to be supplied to the query inference.
libs:... | codesearchnet |
def step_interpolation(x, xp, fp, **kwargs):
del kwargs
xp = np.expand_dims(xp, (- 1))
(lower, upper) = (xp[:(- 1)], xp[1:])
conditions = ((x >= lower) & (x < upper))
conditions = np.concatenate([[(x < xp[0])], conditions, [(x >= xp[(- 1)])]])
values = np.concatenate([[fp[0]], fp])
assert np... | Multi-dimensional step interpolation.
Returns the multi-dimensional step interpolant to a function with
given discrete data points (xp, fp), evaluated at x.
Note that *N and *M indicate zero or more dimensions.
Args:
x: An array of shape [*N], the x-coordinates of the interpolated values.
xp: An np.array of shape [D... | codesearchnet |
def AddStop(self, lat, lng, name, stop_id=None):
if stop_id is None:
stop_id = util.FindUniqueId(self.stops)
stop = self._gtfs_factory.Stop(stop_id=stop_id, lat=lat, lng=lng, name=name)
self.AddStopObject(stop)
return stop | Add a stop to this schedule.
Args:
lat: Latitude of the stop as a float or string
lng: Longitude of the stop as a float or string
name: Name of the stop, which will appear in the feed
stop_id: stop_id of the stop or None, in which case a unique id is picked
Returns:
A new Stop object | juraj-google-style |
def remove_low_and_no_objects(masks, scores, labels, object_mask_threshold, num_labels):
if not masks.shape[0] == scores.shape[0] == labels.shape[0]:
raise ValueError('mask, scores and labels must have the same shape!')
to_keep = labels.ne(num_labels) & (scores > object_mask_threshold)
return (masks... | Binarize the given masks using `object_mask_threshold`, it returns the associated values of `masks`, `scores` and
`labels`.
Args:
masks (`torch.Tensor`):
A tensor of shape `(num_queries, height, width)`.
scores (`torch.Tensor`):
A tensor of shape `(num_queries)`.
labels (`torch.Tensor`):
A tensor of shape `(num_querie... | github-repos |
def make_color_wheel(bins=None):
if bins is None:
bins = [15, 6, 4, 11, 13, 6]
assert len(bins) == 6
RY, YG, GC, CB, BM, MR = tuple(bins)
ry = [1, np.arange(RY) / RY, 0]
yg = [1 - np.arange(YG) / YG, 1, 0]
gc = [0, 1, np.arange(GC) / GC]
cb = [0, 1 - np.arange(CB) / CB, 1]
... | Build a color wheel.
Args:
bins(list or tuple, optional): Specify the number of bins for each
color range, corresponding to six ranges: red -> yellow,
yellow -> green, green -> cyan, cyan -> blue, blue -> magenta,
magenta -> red. [15, 6, 4, 11, 13, 6] is used for default
(see Middlebury).
Returns:
ndarray: Color whee... | juraj-google-style |
def install(device: AndroidDevice, apk_path: str, timeout: int=DEFAULT_TIMEOUT_INSTALL_APK_SEC, user_id: Optional[int]=None, params: Optional[Iterable[str]]=None) -> None:
android_api_version = int(device.build_info['build_version_sdk'])
if user_id is not None and android_api_version < 24:
raise ValueEr... | Install an apk on an Android device.
Installing apk is more complicated than most people realize on Android.
This is just a util for the most common use cases. If you need special logic
beyond this, we recomend you write your own instead of modifying this.
Args:
device: AndroidDevice, Mobly's Android controller objec... | github-repos |
def __init__(self, *nodes, depth=0):
self.edges = set()
vertices = []
matrix = Matrix(*nodes, depth=depth)
for key in matrix.keys:
vertices.append(Vertex(key))
for l, line in enumerate(matrix.data):
for c, cell in enumerate(line):
... | Initialization method.
An intermediary matrix is built to ease the creation of the graph.
Args:
*nodes (list of DSM/Package/Module):
the nodes on which to build the graph.
depth (int): the depth of the intermediary matrix. See
the documentation for Matrix class. | juraj-google-style |
def clean_file(c_source, virtualenv_dirname):
with open(c_source, "r") as file_obj:
contents = file_obj.read().rstrip()
py_version = "python{}.{}".format(*sys.version_info[:2])
lib_path = os.path.join(
".nox", virtualenv_dirname, "lib", py_version, "site-packages", ""
)
con... | Strip trailing whitespace and clean up "local" names in C source.
These source files are autogenerated from the ``cython`` CLI.
Args:
c_source (str): Path to a ``.c`` source file.
virtualenv_dirname (str): The name of the ``virtualenv``
directory where Cython is installed (this is part of a
relative path ``.nox/{NAME... | juraj-google-style |
def compute_advantages(rollout, last_r, gamma=0.9, lambda_=1.0, use_gae=True):
traj = {}
trajsize = len(rollout[SampleBatch.ACTIONS])
for key in rollout:
traj[key] = np.stack(rollout[key])
if use_gae:
assert SampleBatch.VF_PREDS in rollout, "Values not found!"
vpred_t = np... | Given a rollout, compute its value targets and the advantage.
Args:
rollout (SampleBatch): SampleBatch of a single trajectory
last_r (float): Value estimation for last observation
gamma (float): Discount factor.
lambda_ (float): Parameter for GAE
use_gae (bool): Using Generalized Advantage Estamation
Returns:
SampleB... | juraj-google-style |
def _check_expiration(self, url: str, data: 'SavedEndpoint') -> 'SavedEndpoint':
if (data.expires_after < time.time()):
del self.data[url]
data = None
return data | Checks the expiration time for data for a url.
If the data has expired, it is deleted from the cache.
Args:
url: url to check
data: page of data for that url
Returns:
value of either the passed data or None if it expired | codesearchnet |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.