code stringlengths 20 4.93k | docstring stringlengths 33 1.27k | source stringclasses 3
values |
|---|---|---|
def fill_dataset_tree(self, tree, data_sets):
tree.model().removeRows(0, tree.model().rowCount())
for index, (time, script) in enumerate(data_sets.items()):
name = script.settings['tag']
type = script.name
item_time = QtGui.QStandardItem(str(time))
... | fills the tree with data sets where datasets is a dictionary of the form
Args:
tree:
data_sets: a dataset
Returns: | juraj-google-style |
def clamp(value, maximum=None):
value = max(value, 0)
if (maximum is not None):
return min(value, maximum)
else:
return value | Clamp numeric values to be non-negative, an optionally, less than a
given maximum.
Args:
value (float) :
A number to clamp.
maximum (float, optional) :
A max bound to to clamp to. If None, there is no upper bound,
and values are only clamped to be non-negative. (default: None)
Returns:
float | codesearchnet |
def get_string(self, significant_figures=6):
ph = ('{:.%df}' % significant_figures)
lines = []
for (bound, d) in zip(self.bounds, 'xyz'):
fillers = (bound + ([d] * 2))
bound_format = ' '.join((([ph] * 2) + [' {}lo {}hi']))
lines.append(bound_format.format(*fillers))
if self.tilt:... | Returns the string representation of simulation box in LAMMPS
data file format.
Args:
significant_figures (int): No. of significant figures to
output for box settings. Default to 6.
Returns:
String representation | codesearchnet |
def iso_date(d) -> str:
if isinstance(d, datetime):
return d.isoformat()
elif isinstance(d, date):
return datetime.combine(d, datetime.min.time()).isoformat()
else:
try:
datetime.strptime(d, '%Y-%m-%dT%H:%M:%S')
return ... | Return iso format of a date
Args:
d:
Returns: str | juraj-google-style |
def get_config(self):
raise NotImplementedError(f'{self} does not implement get_config()') | Returns the config of the quantizer.
A quantizer config is a Python dictionary (serializable)
containing all configuration parameters of the quantizer.
The same quantizer can be reinstantiated later
(without any saved state) from this configuration.
This method is optional if you are just training and executing model... | github-repos |
def predict_on_batch(self, x):
raise NotImplementedError | Returns predictions for a single batch of samples.
Args:
x: Input data. It must be array-like.
Returns:
NumPy array(s) of predictions. | github-repos |
def _read_hip_para(self, length, *, version):
counter = 0
optkind = list()
options = dict()
while (counter < length):
kind = self._read_binary(2)
if (not kind):
break
code = int(kind, base=2)
cbit = (True if int(kind[15], base=2) else False)
clen = sel... | Read HIP parameters.
Positional arguments:
* length -- int, length of parameters
Keyword arguments:
* version -- int, HIP version
Returns:
* dict -- extracted HIP parameters | codesearchnet |
def MatchBestComponentName(self, component):
fd = self.OpenAsContainer()
file_listing = set(fd.ListNames())
if component not in file_listing:
lower_component = component.lower()
for x in file_listing:
if lower_component == x.lower():
component = x
... | Returns the name of the component which matches best our base listing.
In order to do the best case insensitive matching we list the files in the
base handler and return the base match for this component.
Args:
component: A component name which should be present in this directory.
Returns:
the best component name. | juraj-google-style |
def _update_record(self, identifier, rtype=None, name=None, content=None):
if (identifier is not None):
identifier = int(identifier)
records = self._list_records_internal(identifier=identifier)
else:
records = self._list_records_internal(name=name, rtype=rtype)
LOGGER.debug('Records ... | Update a DNS entry identified by identifier or name in the domain zone.
Any non given argument will leave the current value of the DNS entry.
Args:
identifier (str): The easyname id of the DNS entry to update.
[rtype] (str): The DNS rtype (e.g. A, TXT, MX, etc) of the new entry.
[name] (str): The name of the new DNS e... | codesearchnet |
def unescape(cls, text: str) -> str:
chop = text.split("\\", 1)
try:
return (chop[0] if len(chop) == 1
else chop[0] + cls.unescape_map[chop[1][0]] +
cls.unescape(chop[1][1:]))
except KeyError:
raise InvalidArgument(text) fr... | Replace escape sequence with corresponding characters.
Args:
text: Text to unescape. | juraj-google-style |
def _ImageDimensions(image, rank):
if image.get_shape().is_fully_defined():
return image.get_shape().as_list()
else:
static_shape = image.get_shape().with_rank(rank).as_list()
dynamic_shape = array_ops_stack.unstack(array_ops.shape(image), rank)
return [s if s is not None else d ... | Returns the dimensions of an image tensor.
Args:
image: A rank-D Tensor. For 3-D of shape: `[height, width, channels]`.
rank: The expected rank of the image
Returns:
A list of corresponding to the dimensions of the
input image. Dimensions that are statically known are python integers,
otherwise, they are integer sc... | github-repos |
def GetFileAndLine(component):
if inspect.isbuiltin(component):
return (None, None)
try:
filename = inspect.getsourcefile(component)
except TypeError:
return (None, None)
try:
unused_code, lineindex = inspect.findsource(component)
lineno = lineindex + 1
except... | Returns the filename and line number of component.
Args:
component: A component to find the source information for, usually a class
or routine.
Returns:
filename: The name of the file where component is defined.
lineno: The line number where component is defined. | github-repos |
def strip_prefix_from_items(prefix, items):
items_no_prefix = []
for item in items:
if item.startswith(prefix):
items_no_prefix.append(item[len(prefix):])
else:
items_no_prefix.append(item)
return items_no_prefix | Strips out the prefix from each of the items if it is present.
Args:
prefix: the string for that you wish to strip from the beginning of each
of the items.
items: a list of strings that may or may not contain the prefix you want
to strip out.
Returns:
items_no_prefix: a copy of the list of items (same order) without ... | codesearchnet |
def encode(self, s):
if s.endswith(".mp3"):
out_filepath = s[:-4] + ".wav"
call([
"sox", "--guard", s, "-r", "16k", "-b", "16", "-c", "1", out_filepath
])
s = out_filepath
elif not s.endswith(".wav"):
out_filepath = s + ".wav"
if not os.pa... | Transform a string with a filename into a list of float32.
Args:
s: path to the file with a waveform.
Returns:
samples: list of int16s | juraj-google-style |
def start(self, name: str, increment_count: bool=True) -> None:
if (not self._timing):
return
now = get_now_utc_pendulum()
if self._stack:
last = self._stack[(- 1)]
self._totaldurations[last] += (now - self._starttimes[last])
if (name not in self._starttimes):
self._total... | Start a named timer.
Args:
name: name of the timer
increment_count: increment the start count for this timer | codesearchnet |
def __init__(self, rfile, maxlen, bufsize=8192):
self.rfile = rfile
self.maxlen = maxlen
self.bytes_read = 0
self.buffer = EMPTY
self.bufsize = bufsize
self.closed = False | Initialize ChunkedRFile instance.
Args:
rfile (file): file encoded with the 'chunked' transfer encoding
maxlen (int): maximum length of the file being read
bufsize (int): size of the buffer used to read the file | juraj-google-style |
def collect_members(module_to_name):
members = {}
for (module, module_name) in module_to_name.items():
all_names = getattr(module, '__all__', None)
for (name, member) in inspect.getmembers(module):
if ((inspect.isfunction(member) or inspect.isclass(member)) and (not _always_drop_symb... | Collect all symbols from a list of modules.
Args:
module_to_name: Dictionary mapping modules to short names.
Returns:
Dictionary mapping name to (fullname, member) pairs. | codesearchnet |
def delete_tag(self, tag_name, **kwargs):
resp = self._delete(self._u(self._TAG_ENDPOINT_SUFFIX, tag_name), **kwargs)
resp.raise_for_status()
return resp | delete a tag by name
Args:
tag_name (string): name of tag to delete | codesearchnet |
def _normalize_hparams(hparams):
result = {}
for (k, v) in six.iteritems(hparams):
if isinstance(k, HParam):
k = k.name
if k in result:
raise ValueError("multiple values specified for hparam %r" % (k,))
result[k] = v
return result | Normalize a dict keyed by `HParam`s and/or raw strings.
Args:
hparams: A `dict` whose keys are `HParam` objects and/or strings
representing hyperparameter names, and whose values are
hyperparameter values. No two keys may have the same name.
Returns:
A `dict` whose keys are hyperparameter names (as strings) and whose... | juraj-google-style |
def _GetDateTime(self, filetime):
if (filetime == 0):
return dfdatetime_semantic_time.SemanticTime('Not set')
return dfdatetime_filetime.Filetime(timestamp=filetime) | Retrieves the date and time from a FILETIME timestamp.
Args:
filetime (int): FILETIME timestamp.
Returns:
dfdatetime.DateTimeValues: date and time. | codesearchnet |
def list(self, pattern='*'):
if (self._descriptors is None):
self._descriptors = self._client.list_metric_descriptors(filter_string=self._filter_string, type_prefix=self._type_prefix)
return [metric for metric in self._descriptors if fnmatch.fnmatch(metric.type, pattern)] | Returns a list of metric descriptors that match the filters.
Args:
pattern: An optional pattern to further filter the descriptors. This can
include Unix shell-style wildcards. E.g. ``"compute*"``,
``"*cpu/load_??m"``.
Returns:
A list of MetricDescriptor objects that match the filters. | codesearchnet |
def byte_adaptor(fbuffer):
if six.PY3:
strings = fbuffer.read().decode('latin-1')
fbuffer = six.StringIO(strings)
return fbuffer
else:
return fbuffer | provides py3 compatibility by converting byte based
file stream to string based file stream
Arguments:
fbuffer: file like objects containing bytes
Returns:
string buffer | juraj-google-style |
def check(self, dsm, simplicity_factor=2, **kwargs):
economy_of_mechanism = False
message = ''
data = dsm.data
categories = dsm.categories
dsm_size = dsm.size[0]
if (not categories):
categories = (['appmodule'] * dsm_size)
dependency_number = 0
for i in range(0, dsm_size):
... | Check economy of mechanism.
As first abstraction, number of dependencies between two modules
< 2 * the number of modules
(dependencies to the framework are NOT considered).
Args:
dsm (:class:`DesignStructureMatrix`): the DSM to check.
simplicity_factor (int): simplicity factor.
Returns:
bool: True if economic, else ... | codesearchnet |
def open_shards(glob_pattern, mode='rt', encoding='utf-8'):
if 'b' in mode:
encoding = None
with tempfile.NamedTemporaryFile(delete=False) as out_file:
for shard in glob.glob(glob_pattern):
with open(shard, 'rb') as in_file:
out_file.write(in_file.read())
conc... | Returns a composite file of all shards matching the given glob pattern.
Args:
glob_pattern (str): Pattern used to match files which should be opened.
mode (str): Specify the mode in which the file should be opened. For
available modes, check io.open() documentation.
encoding (str): Name of the encoding used to decode ... | github-repos |
def __cloudflare_list_zone_records(self, *, account, zoneID, **kwargs):
done = False
records = {}
page = 1
while not done:
kwargs['page'] = page
response = self.__cloudflare_request(
account=account,
path='/zones/{}/dns_re... | Helper function to list all records on a CloudFlare DNS Zone. Returns a `dict` containing the records and
their information.
Args:
account (:obj:`CloudFlareAccount`): A CloudFlare Account object
zoneID (`int`): Internal CloudFlare ID of the DNS zone
**kwargs (`dict`): Additional arguments to be consumed by the API end... | juraj-google-style |
def ParseSMS(self, parser_mediator, query, row, **unused_kwargs):
query_hash = hash(query)
phone_number = self._GetRowValue(query_hash, row, 'dstnum_sms')
if phone_number:
phone_number = phone_number.replace(' ', '')
event_data = SkypeSMSEventData()
event_data.number = phone_number
... | Parses an SMS.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row resulting from query. | juraj-google-style |
def _orthogonal_matrix(self, n):
a = random_ops.random_normal([n, n], dtype=self.dtype, seed=self.seed)
if self.seed:
self.seed += 1
q, r = gen_linalg_ops.qr(a)
d = array_ops.diag_part(r)
q *= math_ops.sign(d)
return q | Construct an n x n orthogonal matrix.
Args:
n: Dimension.
Returns:
A n x n orthogonal matrix. | github-repos |
def where(condition, x1=None, x2=None):
if x1 is None and x2 is not None or (x1 is not None and x2 is None):
raise ValueError('`x1` and `x2` either both should be `None` or both should have non-None value.')
if any_symbolic_tensors((condition, x1, x2)):
return Where().symbolic_call(condition, x1... | Return elements chosen from `x1` or `x2` depending on `condition`.
Args:
condition: Where `True`, yield `x1`, otherwise yield `x2`.
x1: Values from which to choose when `condition` is `True`.
x2: Values from which to choose when `condition` is `False`.
Returns:
A tensor with elements from `x1` where `condition` is `T... | github-repos |
def cartesian(self,subsets=None,step_pixels=100,max_distance_pixels=150,*args,**kwargs):
n = Cartesian.read_cellframe(self,subsets=subsets,step_pixels=step_pixels,max_distance_pixels=max_distance_pixels,prune_neighbors=False,*args,**kwargs)
if 'measured_regions' in kwargs: n.measured_regions = ... | Return a class that can be used to create honeycomb plots
Args:
subsets (list): list of SubsetLogic objects
step_pixels (int): distance between hexagons
max_distance_pixels (int): the distance from each point by which to caclulate the quanitty of the phenotype for that area
Returns:
Cartesian: returns a class that ho... | juraj-google-style |
def categorize(self, categories, default=None):
return dim(self, categorize, categories=categories, default=default) | Replaces discrete values with supplied categories
Replaces discrete values in input array into a fixed set of
categories defined either as a list or dictionary.
Args:
categories: List or dict of categories to map inputs to
default: Default value to assign if value not in categories | juraj-google-style |
def write(self, output_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):
local_stream = utils.BytearrayStream()
if len(self._credentials) == 0:
raise ValueError("Authentication struct missing credentials.")
for credential in self._credentials:
credential.write(l... | Write the data encoding the Authentication struct to a stream.
Args:
output_stream (stream): A data stream in which to encode object
data, supporting a write method; usually a BytearrayStream
object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be encoded. Optional,
d... | juraj-google-style |
def getTickTock(self, vals):
val0, val1 = vals
try:
_tick = self._getLiftValu(val0)
except ValueError as e:
raise s_exc.BadTypeValu(name=self.name, valu=val0,
mesg='Unable to process the value for val0 in getTickTock.')
... | Get a tick, tock time pair.
Args:
vals (list): A pair of values to norm.
Returns:
(int, int): A ordered pair of integers. | juraj-google-style |
async def count(self, text, opts=None):
i = 0
async for _ in self.cell.eval(text, opts=opts, user=self.user):
i += 1
return i | Count the number of nodes which result from a storm query.
Args:
text (str): Storm query text.
opts (dict): Storm query options.
Returns:
(int): The number of nodes resulting from the query. | codesearchnet |
def rpow(self, other, axis="columns", level=None, fill_value=None):
return self._binary_op(
"rpow", other, axis=axis, level=level, fill_value=fill_value
) | Pow this DataFrame against another DataFrame/Series/scalar.
Args:
other: The object to use to apply the pow against this.
axis: The axis to pow over.
level: The Multilevel index level to apply pow over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the Pow applied. | juraj-google-style |
def roc_auc_score(y_true: Union[List[List[float]], List[List[int]], np.ndarray],
y_pred: Union[List[List[float]], List[List[int]], np.ndarray]) -> float:
try:
return sklearn.metrics.roc_auc_score(np.squeeze(np.array(y_true)),
np.squeeze(np.... | Compute Area Under the Curve (AUC) from prediction scores.
Args:
y_true: true binary labels
y_pred: target scores, can either be probability estimates of the positive class
Returns:
Area Under the Curve (AUC) from prediction scores | juraj-google-style |
def get_backend_engine(self, name, **kwargs):
if name not in self._engines:
msg = "Given settings backend is unknowed: {}"
raise SettingsBackendError(msg.format(name))
return self._engines[name](**kwargs) | Get backend engine from given name.
Args:
(string): Path to validate.
Raises:
boussole.exceptions.SettingsBackendError: If given backend name
does not match any available engine.
Returns:
object: Instance of selected backend engine. | juraj-google-style |
def get_sample_window(self, type_tag, size=10):
size = size * 1024 * 1024
cursor = self.database[self.sample_collection].find({'type_tag': type_tag},
{'md5': 1,'length': 1}).sort('import_time',pymongo.DESCENDING)
total_size = 0
md5_list = []
... | Get a window of samples not to exceed size (in MB).
Args:
type_tag: Type of sample ('exe','pcap','pdf','json','swf', or ...).
size: Size of samples in MBs.
Returns:
a list of md5s. | juraj-google-style |
def update_swarm(self, version, swarm_spec=None, rotate_worker_token=False, rotate_manager_token=False):
url = self._url('/swarm/update')
response = self._post_json(url, data=swarm_spec, params={'rotateWorkerToken': rotate_worker_token, 'rotateManagerToken': rotate_manager_token, 'version': version})
self._... | Update the Swarm's configuration
Args:
version (int): The version number of the swarm object being
updated. This is required to avoid conflicting writes.
swarm_spec (dict): Configuration settings to update. Use
:py:meth:`~docker.api.swarm.SwarmApiMixin.create_swarm_spec` to
generate a valid configuration. Default: ``N... | codesearchnet |
async def verify_docker_worker_task(chain, link):
if chain != link:
check_interactive_docker_worker(link)
verify_docker_image_sha(chain, link) | Docker-worker specific checks.
Args:
chain (ChainOfTrust): the chain we're operating on
link (ChainOfTrust or LinkOfTrust): the trust object for the signing task.
Raises:
CoTError: on failure. | juraj-google-style |
def all(self, data={}, **kwargs):
return super(Subscription, self).all(data, **kwargs) | Fetch all Subscription entities
Returns:
Dictionary of Subscription data | codesearchnet |
def reduce_by(self, package_request):
if self.pr:
reqstr = _short_req_str(package_request)
self.pr.passive('reducing %s wrt %s...', self, reqstr)
if self.solver.optimised:
if (package_request in self.been_reduced_by):
return (self, [])
if ((package_request.range is None) ... | Remove variants whos dependencies conflict with the given package
request.
Returns:
(VariantSlice, [Reduction]) tuple, where slice may be None if all
variants were reduced. | codesearchnet |
def emboss_pepstats_on_fasta(infile, outfile='', outdir='', outext='.pepstats', force_rerun=False):
outfile = ssbio.utils.outfile_maker(inname=infile, outname=outfile, outdir=outdir, outext=outext)
program = 'pepstats'
pepstats_args = '-sequence="{}" -outfile="{}"'.format(infile, outfile)
... | Run EMBOSS pepstats on a FASTA file.
Args:
infile: Path to FASTA file
outfile: Name of output file without extension
outdir: Path to output directory
outext: Extension of results file, default is ".pepstats"
force_rerun: Flag to rerun pepstats
Returns:
str: Path to output file. | juraj-google-style |
def GetApprovalForObject(object_urn, token=None, username=""):
if token is None:
raise access_control.UnauthorizedAccess(
"No token given, cannot authenticate.")
if not username:
username = token.username
approvals_root_urn = aff4.ROOT_URN.Add("ACL").Add(
object_urn.Path... | Looks for approvals for an object and returns available valid tokens.
Args:
object_urn: Urn of the object we want access to.
token: The token to use to lookup the ACLs.
username: The user to get the approval for, if "" we get it from the
token.
Returns:
A token for access to the object on success, otherwise raises.... | juraj-google-style |
def checksum1(data, stringlength):
value_buffer = 0
for count in range(0, stringlength):
value_buffer = value_buffer ^ data[count]
return value_buffer&0xFE | Calculate Checksum 1
Calculate the ckecksum 1 required for the herkulex data packet
Args:
data (list): the data of which checksum is to be calculated
stringlength (int): the length of the data
Returns:
int: The calculated checksum 1 | juraj-google-style |
def from_json(cls, json):
if json[cls.KEY_RANGE_PARAM] is None:
key_ranges = None
else:
key_ranges = []
for k in json[cls.KEY_RANGE_PARAM]:
if k:
key_ranges.append(key_range.KeyRange.from_json(k))
else:
key_ranges.append(None)
if json[cls.NA... | Create new DatastoreInputReader from the json, encoded by to_json.
Args:
json: json map representation of DatastoreInputReader.
Returns:
an instance of DatastoreInputReader with all data deserialized from json. | juraj-google-style |
def substitute(self, var_map):
if (self in var_map):
return var_map[self]
return self._substitute(var_map) | Substitute sub-expressions
Args:
var_map (dict): Dictionary with entries of the form
``{expr: substitution}`` | codesearchnet |
def initialize(self):
if ops.executing_eagerly_outside_functions():
self._iterator._eager_reset()
return []
else:
return [self._iterator.initializer] | Initialize underlying iterator.
In eager execution, this simply recreates the underlying iterator.
In graph execution, it returns the initializer ops for the underlying
iterator.
Returns:
A list of any initializer ops that should be run. | github-repos |
def get_catalog_results(self, content_filter_query, query_params=None, traverse_pagination=False):
query_params = query_params or {}
try:
endpoint = getattr(self.client, self.SEARCH_ALL_ENDPOINT)
response = endpoint().post(data=content_filter_query, **query_params)
... | Return results from the discovery service's search/all endpoint.
Arguments:
content_filter_query (dict): query parameters used to filter catalog results.
query_params (dict): query parameters used to paginate results.
traverse_pagination (bool): True to return all results, False to return the paginated response.
Defau... | juraj-google-style |
def samefile(path1, path2):
(path1, path1_is_storage) = format_and_is_storage(path1)
(path2, path2_is_storage) = format_and_is_storage(path2)
if ((not path1_is_storage) and (not path2_is_storage)):
return os_path_samefile(path1, path2)
if ((not path1_is_storage) or (not path2_is_storage)):
... | Return True if both pathname arguments refer to the same file or directory.
Equivalent to "os.path.samefile".
Args:
path1 (path-like object): Path or URL.
path2 (path-like object): Path or URL.
Returns:
bool: True if same file or directory. | codesearchnet |
def from_file(cls, source, distance_weights=None, merge_same_words=False, group_marker_opening='<<', group_marker_closing='>>'):
source_string = open(source, 'r').read()
return cls.from_string(source_string, distance_weights, merge_same_words, group_marker_opening=group_marker_opening, group_marker_closing=grou... | Read a string from a file and derive a ``Graph`` from it.
This is a convenience function for opening a file and passing its
contents to ``Graph.from_string()`` (see that for more detail)
Args:
source (str): the file to read and derive the graph from
distance_weights (dict): dict of relative indices corresponding
with... | codesearchnet |
def schedule(self, function, args, kwargs):
closure = Closure(function, self.closure_queue._cancellation_mgr, args=args, kwargs=kwargs)
ret = closure.build_output_remote_value()
self.closure_queue.put(closure)
return ret | Schedules `function` to be dispatched to a worker for execution.
Args:
function: The function to be dispatched to a worker for execution
asynchronously.
args: Positional arguments for `fn`.
kwargs: Keyword arguments for `fn`.
Returns:
A `RemoteValue` object. | github-repos |
def get_organisation(self, id, name=None):
return self.create_organisation(dict(id=id, name=name)) | Get an organisation
Returns:
Organisation: The organisation with the given `id` | codesearchnet |
def _split_list_into_bundles(self, output_pcollection, elements, max_element_per_bundle, element_size_fn):
bundle = self._evaluation_context.create_bundle(output_pcollection)
bundle_size = 0
bundles = [bundle]
for element in elements:
if max_element_per_bundle and bundle_size >= max_element_per_... | Splits elements, an iterable, into multiple output bundles.
Args:
output_pcollection: PCollection that the elements belong to.
elements: elements to be chunked into bundles.
max_element_per_bundle: (approximately) the maximum element per bundle.
If it is None, only a single bundle will be produced.
element_size_fn: Fu... | github-repos |
def metadata(self, url):
_, path = self._parse_url(url)
status = self._hdfs_client.status(path, strict=False)
if status is None:
raise BeamIOError('File not found: %s' % url)
return FileMetadata(url, status[_FILE_STATUS_LENGTH], status[_FILE_STATUS_UPDATED] / 1000.0) | Fetch metadata fields of a file on the FileSystem.
Args:
url: string url of a file.
Returns:
:class:`~apache_beam.io.filesystem.FileMetadata`.
Raises:
``BeamIOError``: if url doesn't exist. | github-repos |
def exportUsufy(data, ext, fileH):
if (ext == 'csv'):
usufyToCsvExport(data, ((fileH + '.') + ext))
elif (ext == 'gml'):
usufyToGmlExport(data, ((fileH + '.') + ext))
elif (ext == 'json'):
usufyToJsonExport(data, ((fileH + '.') + ext))
elif (ext == 'ods'):
usufyToOdsExpor... | Method that exports the different structures onto different formats.
Args:
-----
data: Data to export.
ext: One of the following: csv, excel, json, ods.
fileH: Fileheader for the output files.
Returns:
--------
Performs the export as requested by parameter. | codesearchnet |
def CreateTask(self, session_identifier):
task = tasks.Task(session_identifier)
logger.debug('Created task: {0:s}.'.format(task.identifier))
with self._lock:
self._tasks_queued[task.identifier] = task
self._total_number_of_tasks += 1
self.SampleTaskStatus(task, 'created')
retur... | Creates a task.
Args:
session_identifier (str): the identifier of the session the task is
part of.
Returns:
Task: task attribute container. | juraj-google-style |
def delete(self, key):
key = self._service_key(key)
self._service_ops['delete'](key) | Removes the object named by `key` in `service`.
Args:
key: Key naming the object to remove. | codesearchnet |
def build_all_reduce_device_prefixes(job_name, num_tasks):
if (job_name != 'localhost'):
return [('/job:%s/task:%d' % (job_name, d)) for d in range(0, num_tasks)]
else:
assert (num_tasks == 1)
return [('/job:%s' % job_name)] | Build list of device prefix names for all_reduce.
Args:
job_name: "worker", "ps" or "localhost".
num_tasks: number of jobs across which device names should be generated.
Returns:
A list of device name prefix strings. Each element spells out the full
host name without adding the device.
e.g. "/job:worker/task:0" | codesearchnet |
def get_package(name, version, paths=None):
if isinstance(version, basestring):
range_ = VersionRange("==%s" % version)
else:
range_ = VersionRange.from_version(version, "==")
it = iter_packages(name, range_, paths)
try:
return it.next()
except StopIteration:
re... | Get an exact version of a package.
Args:
name (str): Name of the package, eg 'maya'.
version (Version or str): Version of the package, eg '1.0.0'
paths (list of str, optional): paths to search for package, defaults
to `config.packages_path`.
Returns:
`Package` object, or None if the package was not found. | juraj-google-style |
def new_cells(self, name=None, formula=None):
return self._impl.new_cells(name, formula).interface | Create a cells in the space.
Args:
name: If omitted, the model is named automatically ``CellsN``,
where ``N`` is an available number.
func: The function to define the formula of the cells.
Returns:
The new cells. | juraj-google-style |
def Get(self, request, global_params=None):
config = self.GetMethodConfig('Get')
return self._RunMethod(config, request, global_params=global_params) | Gets the specified model resource by model ID.
Args:
request: (BigqueryModelsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Model) The response message. | github-repos |
def _full_reduce(self, axis, map_func, reduce_func=None):
if reduce_func is None:
reduce_func = map_func
mapped_parts = self.data.map_across_blocks(map_func)
full_frame = mapped_parts.map_across_full_axis(axis, reduce_func)
if axis == 0:
columns = self.c... | Apply function that will reduce the data to a Pandas Series.
Args:
axis: 0 for columns and 1 for rows. Default is 0.
map_func: Callable function to map the dataframe.
reduce_func: Callable function to reduce the dataframe. If none,
then apply map_func twice.
Return:
A new QueryCompiler object containing the results f... | juraj-google-style |
def swd_read16(self, offset):
value = self._dll.JLINK_SWD_GetU16(offset)
return ctypes.c_uint16(value).value | Gets a unit of ``16`` bits from the input buffer.
Args:
self (JLink): the ``JLink`` instance
offset (int): the offset (in bits) from which to start reading
Returns:
The integer read from the input buffer. | juraj-google-style |
def slice(inputs, start_indices, shape):
if any_symbolic_tensors((inputs, start_indices)):
return Slice(shape=shape).symbolic_call(inputs, start_indices)
return backend.core.slice(inputs, start_indices, shape) | Return a slice of an input tensor.
At a high level, this operation is an explicit replacement for array slicing
e.g. `inputs[start_indices: start_indices + shape]`.
Unlike slicing via brackets, this operation will accept tensor start
indices on all backends, which is useful when indices dynamically computed
via other ... | github-repos |
def get_usb_serial(self, port_num):
port = self.port_map[str(port_num)]
arg = ''.join(['DEVICE INFO,', self._addr, '.', port])
cmd = ['esuit64', '-t', arg]
info = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
serial = None
if ('SERIAL' in info):
serial_info = info.split('SERIAL:... | Get the device serial number
Args:
port_num: port number on the Cambrionix unit
Return:
usb device serial number | codesearchnet |
def string_to_scopes(scopes):
if not scopes:
return []
elif isinstance(scopes, six.string_types):
return scopes.split(' ')
else:
return scopes | Converts stringifed scope value to a list.
If scopes is a list then it is simply passed through. If scopes is an
string then a list of each individual scope is returned.
Args:
scopes: a string or iterable of strings, the scopes.
Returns:
The scopes in a list. | juraj-google-style |
def make_single_array(ds, batch_size=(8 * 1024)):
if (isinstance(ds.output_types, tuple) or isinstance(ds.output_shapes, tuple)):
raise ValueError('Dataset must have a single type and shape')
nshapes = len(ds.output_shapes)
if (nshapes > 0):
raise ValueError('Dataset must be comprised of sca... | Create a single numpy array from a dataset.
The dataset must have only one dimension, that is,
the length of its `output_shapes` and `output_types`
is 1, and its output shape must be `[]`, that is,
every tensor in the dataset must be a scalar.
Args:
ds: a TF Dataset.
batch_size: how many elements to read per pass
... | codesearchnet |
def na_if(series, *values):
series = pd.Series(series)
series[series.isin(values)] = np.nan
return series | If values in a series match a specified value, change them to `np.nan`.
Args:
series: Series or vector, often symbolic.
*values: Value(s) to convert to `np.nan` in the series. | codesearchnet |
def from_api_repr(cls, resource):
if (
"datasetReference" not in resource
or "datasetId" not in resource["datasetReference"]
):
raise KeyError(
"Resource lacks required identity information:"
'["datasetReference"]["datasetId"]'... | Factory: construct a dataset given its API representation
Args:
resource (Dict[str: object]):
Dataset resource representation returned from the API
Returns:
google.cloud.bigquery.dataset.Dataset:
Dataset parsed from ``resource``. | juraj-google-style |
def custom_apply(self, path: utils.KeyPath, value_spec: pg_typing.ValueSpec, allow_partial: bool, child_transform: Optional[Callable[[utils.KeyPath, pg_typing.Field, Any], Any]]=None) -> Tuple[bool, 'Dict']:
proceed_with_standard_apply = True
if self._value_spec:
if value_spec and (not value_spec.is_com... | Implement pg.typing.CustomTyping interface.
Args:
path: KeyPath of current object.
value_spec: Origin value spec of the field.
allow_partial: Whether allow partial object to be created.
child_transform: Function to transform child node values in dict_obj into
their final values. Transform function is called on leaf no... | github-repos |
def dict_of_sets_add(dictionary, key, value):
set_objs = dictionary.get(key, set())
set_objs.add(value)
dictionary[key] = set_objs | Add value to a set in a dictionary by key
Args:
dictionary (DictUpperBound): Dictionary to which to add values
key (Any): Key within dictionary
value (Any): Value to add to set in dictionary
Returns:
None | juraj-google-style |
def _binary_assert_doc_v2(sym, opname, test_var):
def _decorator(func):
func.__doc__ = '\n Assert the condition `x {sym} y` holds element-wise.\n\n This Op checks that `x[i] {sym} y[i]` holds for every pair of (possibly\n broadcast) elements of `x` and `y`. If both `x` and `y` are empty, ... | Common docstring for v2 assert_* ops that compare two tensors element-wise.
Args:
sym: Binary operation symbol, i.e. "=="
opname: Name for the symbol, i.e. "assert_equal"
test_var: A number used in the docstring example
Returns:
Decorator that adds the appropriate docstring to the function for
symbol `sym`. | github-repos |
def inspect_edge(G: AnalysisGraph, source: str, target: str):
return create_statement_inspection_table(
G[source][target]["InfluenceStatements"]
) | 'Drill down' into an edge in the analysis graph and inspect its
provenance. This function prints the provenance.
Args:
G
source
target | juraj-google-style |
def __eq__(self, other):
return SeriesWeld(
grizzly_impl.compare(
self.expr,
other,
"==",
self.weld_type
),
WeldBit(),
self.df,
self.column_name
) | Summary
Args:
other (TYPE): Description
Returns:
TYPE: Description | juraj-google-style |
async def get_ticket(self, request):
session = (await get_session(request))
return session.get(self.cookie_name) | Called to return the ticket for a request.
Args:
request: aiohttp Request object.
Returns:
A ticket (string like) object, or None if no ticket is available
for the passed request. | codesearchnet |
def get_all_artifacts_per_task_id(chain, upstream_artifacts):
all_artifacts_per_task_id = {}
for link in chain.links:
if link.task_type in PARENT_TASK_TYPES:
add_enumerable_item_to_dict(
dict_=all_artifacts_per_task_id, key=link.task_id, item='public/task-graph.... | Return every artifact to download, including the Chain Of Trust Artifacts.
Args:
chain (ChainOfTrust): the chain of trust object
upstream_artifacts: the list of upstream artifact definitions
Returns:
dict: sorted list of paths to downloaded artifacts ordered by taskId | juraj-google-style |
def from_api_repr(cls, resource):
entry = resource.copy()
role = entry.pop("role", None)
entity_type, entity_id = entry.popitem()
if len(entry) != 0:
raise ValueError("Entry has unexpected keys remaining.", entry)
return cls(role, entity_type, entity_id) | Factory: construct an access entry given its API representation
Args:
resource (Dict[str, object]):
Access entry resource representation returned from the API
Returns:
google.cloud.bigquery.dataset.AccessEntry:
Access entry parsed from ``resource``.
Raises:
ValueError:
If the resource has more keys than ``role`` and... | juraj-google-style |
def sent_request(self, value):
if value == self._defaults['sentRequest'] and 'sentRequest' in self._values:
del self._values['sentRequest']
else:
self._values['sentRequest'] = value | The sent_request property.
Args:
value (string). the property value. | juraj-google-style |
def filter_def_file(def_file: str, filter_file: str, filtered_file: str) -> None:
with open(filter_file, 'r', encoding='utf-8') as filter_file_handle:
filter_json: Dict[str, Any] = json.load(filter_file_handle)
inclusion_patterns: List[str] = filter_json['global'] + ['EXPORTS', '*;*']
incl_p... | Filters a windows .def file based on a filter .json.
Args:
def_file: The path to the input windows .def file.
filter_file: The path to the filter file (JSON format).
filtered_file: The path to the output filtered windows .def file. | github-repos |
def BuildAdGroupOperations(batch_job_helper, campaign_operations, number_of_adgroups=1):
adgroup_operations = [{'xsi_type': 'AdGroupOperation', 'operand': {'campaignId': campaign_operation['operand']['id'], 'id': batch_job_helper.GetId(), 'name': ('Batch Ad Group
return adgroup_operations | Builds the operations adding desired number of AdGroups to given Campaigns.
Note: When the AdGroups are created, they will have a different Id than those
generated here as a temporary Id. This is just used to identify them in the
BatchJobService.
Args:
batch_job_helper: a BatchJobHelper instance.
campaign_operations:... | codesearchnet |
def __init__(self, job, runner, options=None):
self._job = job
self._runner = runner
self._options = options
self.metric_results = None | Initialize a new DataflowPipelineResult instance.
Args:
job: Job message from the Dataflow API. Could be :data:`None` if a job
request was not sent to Dataflow service (e.g. template jobs).
runner: DataflowRunner instance. | github-repos |
def create_autocast_variable(variable):
if not distributed_training_utils.is_distributed_variable(variable):
return AutoCastVariable(variable)
class AutoCastDistributedVariable(AutoCastVariable, variable.__class__):
def __repr__(self):
return '<AutoCastDistributedVariable ... | Creates an AutoCastVariable that wraps another variable.
This typically just returns `AutoCastVariable(variable)`. But, if the variable
is a DistributedVariable or one of its subclasses, we instead dynamically
create a class that subclasses from both AutoCastVariable and
variable.__class__. This is so the returned var... | github-repos |
def compute_bleu(reference_corpus, translation_corpus, max_order=4, use_bp=True):
reference_length = 0
translation_length = 0
bp = 1.0
geo_mean = 0
matches_by_order = ([0] * max_order)
possible_matches_by_order = ([0] * max_order)
precisions = []
for (references, translations) in zip(ref... | Computes BLEU score of translated segments against one or more references.
Args:
reference_corpus: list of references for each translation. Each
reference should be tokenized into a list of tokens.
translation_corpus: list of translations to score. Each translation
should be tokenized into a list of tokens.
max_order:... | codesearchnet |
def _ring_2d(m, n):
if (m == 1):
return [(0, i) for i in range(n)]
if (n == 1):
return [(i, 0) for i in range(m)]
if ((m % 2) != 0):
tf.logging.warning('Odd dimension')
return [((i % m), (i
ret = [(0, 0)]
for i in range((m
for j in range(1, n):
r... | Ring-order of a mxn mesh.
Args:
m: an integer
n: an integer
Returns:
a list of mxn pairs | codesearchnet |
def to_json_string(self, use_diff: bool=True) -> str:
if use_diff is True:
config_dict = self.to_diff_dict()
else:
config_dict = self.to_dict()
return json.dumps(config_dict, indent=2, sort_keys=True) + '\n' | Serializes this instance to a JSON string.
Args:
use_diff (`bool`, *optional*, defaults to `True`):
If set to `True`, only the difference between the config instance and the default `PretrainedConfig()`
is serialized to JSON string.
Returns:
`str`: String containing all the attributes that make up this configuration ... | github-repos |
def get_aligned_output_features_output_indices(out_features: Optional[list[str]], out_indices: Optional[Union[list[int], tuple[int]]], stage_names: list[str]) -> tuple[list[str], list[int]]:
out_indices = list(out_indices) if out_indices is not None else None
verify_out_features_out_indices(out_features=out_fea... | Get the `out_features` and `out_indices` so that they are aligned.
The logic is as follows:
- `out_features` not set, `out_indices` set: `out_features` is set to the `out_features` corresponding to the
`out_indices`.
- `out_indices` not set, `out_features` set: `out_indices` is set to the `out_indices` corresponding t... | github-repos |
def get_output_embeddings(self) -> nn.Module:
return None | Returns the model's output embeddings.
Returns:
`nn.Module`: A torch module mapping hidden states to vocabulary. | github-repos |
def bulk_insert(self, rows, return_model=False):
if (self.conflict_target or self.conflict_action):
compiler = self._build_insert_compiler(rows)
objs = compiler.execute_sql(return_id=True)
if return_model:
return [self.model(**dict(r, **k)) for (r, k) in zip(rows, objs)]
... | Creates multiple new records in the database.
This allows specifying custom conflict behavior using .on_conflict().
If no special behavior was specified, this uses the normal Django create(..)
Arguments:
rows:
An array of dictionaries, where each dictionary
describes the fields to insert.
return_model (default: Fals... | codesearchnet |
def get_header(message, name):
header = message.get(name)
log.debug('Getting header {!r}: {!r}'.format(name, header))
if header:
return decode_header_part(header)
return six.text_type() | Gets an email.message.Message and a header name and returns
the mail header decoded with the correct charset.
Args:
message (email.message.Message): email message object
name (string): header to get
Returns:
decoded header | codesearchnet |
def flip_variable(self, v):
try:
idx = self.variables.index(v)
except ValueError:
raise ValueError('variable {} is not a variable in constraint {}'.format(v, self.name))
if (self.vartype is dimod.BINARY):
original_func = self.func
def func(*args):
new_args = list... | Flip a variable in the constraint.
Args:
v (variable):
Variable in the constraint to take the complementary value of its
construction value.
Examples:
This example creates a constraint that :math:`a = b` on binary variables
and flips variable a.
>>> import dwavebinarycsp
>>> const = dwavebinarycsp.Constraint.from_fu... | codesearchnet |
def add(self, other_op):
self._op.logEntries.extend(other_op.logEntries)
self._merge_timestamps(other_op)
self._merge_metric_values(other_op) | Combines `other_op` with the operation held by this aggregator.
N.B. It merges the operations log entries and metric values, but makes
the assumption the operation is consistent. It's the callers
responsibility to ensure consistency
Args:
other_op (
class:`endpoints_management.gen.servicecontrol_v1_messages.Operatio... | codesearchnet |
def ParsePageVisitedRow(self, parser_mediator, query, row, cache=None, database=None, **unused_kwargs):
query_hash = hash(query)
from_visit = self._GetRowValue(query_hash, row, 'from_visit')
hidden = self._GetRowValue(query_hash, row, 'hidden')
rev_host = self._GetRowValue(query_hash, row, 'rev_host')
... | Parses a page visited row.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row.
cache (Optional[SQLiteCache]): cache.
database (Optional[SQLiteDatabase]): database. | codesearchnet |
def AddEvent(self, event):
self._RaiseIfNotWritable()
event_data_identifier = event.GetEventDataIdentifier()
if event_data_identifier:
if not isinstance(event_data_identifier, identifiers.SQLTableIdentifier):
raise IOError('Unsupported event data identifier type: {0:s}'.format(... | Adds an event.
Args:
event (EventObject): event.
Raises:
IOError: when the storage file is closed or read-only or
if the event data identifier type is not supported.
OSError: when the storage file is closed or read-only or
if the event data identifier type is not supported. | juraj-google-style |
def getGrid(self, use_mask=True):
grid_card_name = 'WATERSHED_MASK'
if (not use_mask):
grid_card_name = 'ELEVATION'
return self.getGridByCard(grid_card_name) | Returns GDALGrid object of GSSHA model bounds
Paramters:
use_mask(bool): If True, uses watershed mask. Otherwise, it uses the elevaiton grid.
Returns:
GDALGrid | codesearchnet |
def on_predict_batch_end(self, batch, logs=None):
if self._should_call_predict_batch_hooks:
self._call_batch_hook(ModeKeys.PREDICT, 'end', batch, logs=logs) | Calls the `on_predict_batch_end` methods of its callbacks.
Args:
batch: Integer, index of batch within the current epoch.
logs: Dict. Aggregated metric results up until this batch. | github-repos |
def _gauss(mean: int, sigma: int) -> int:
return int(random.gauss(mean, sigma)) | Creates a variation from a base value
Args:
mean: base value
sigma: gaussian sigma
Returns: random value | codesearchnet |
def get_identifier(identifier, module_globals, module_name):
if isinstance(identifier, six.string_types):
fn = module_globals.get(identifier)
if (fn is None):
raise ValueError('Unknown {}: {}'.format(module_name, identifier))
return fn
elif callable(identifier):
retur... | Helper utility to retrieve the callable function associated with a string identifier.
Args:
identifier: The identifier. Could be a string or function.
module_globals: The global objects of the module.
module_name: The module name
Returns:
The callable associated with the identifier. | codesearchnet |
def set_intrusion_alert_through_smoke_detectors(self, activate: bool = True):
data = {"intrusionAlertThroughSmokeDetectors": activate}
return self._restCall(
"home/security/setIntrusionAlertThroughSmokeDetectors", json.dumps(data)
) | activate or deactivate if smoke detectors should "ring" during an alarm
Args:
activate(bool): True will let the smoke detectors "ring" during an alarm | juraj-google-style |
def add_arguments(self, parser, bootstrap=False):
[item.add_argument(parser, bootstrap) for item in self._get_items(bootstrap=False)] | Adds all items to the parser passed in.
Args:
parser (argparse.ArgumentParser): The parser to add all items to.
bootstrap (bool): Flag to indicate whether you only want to mark
bootstrapped items as required on the command-line. | codesearchnet |
def datacenters(self):
if (not self.__datacenters):
self.__datacenters = Datacenters(self.__connection)
return self.__datacenters | Gets the Datacenters API client.
Returns:
Datacenters: | codesearchnet |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.