code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
|---|---|---|
def getmu_vertices_stability_phase(self, target_comp, dep_elt, tol_en=0.01):
muref = np.array([self.el_refs[e].energy_per_atom for e in self.elements if (e != dep_elt)])
chempot_ranges = self.get_chempot_range_map([e for e in self.elements if (e != dep_elt)])
for e in self.elements:
if (not (e in target_comp.elements)):
target_comp = (target_comp + Composition({e: 0.0}))
coeff = [(- target_comp[e]) for e in self.elements if (e != dep_elt)]
for e in chempot_ranges.keys():
if (e.composition.reduced_composition == target_comp.reduced_composition):
multiplicator = (e.composition[dep_elt] / target_comp[dep_elt])
ef = (e.energy / multiplicator)
all_coords = []
for s in chempot_ranges[e]:
for v in s._coords:
elts = [e for e in self.elements if (e != dep_elt)]
res = {}
for i in range(len(elts)):
res[elts[i]] = (v[i] + muref[i])
res[dep_elt] = ((np.dot((v + muref), coeff) + ef) / target_comp[dep_elt])
already_in = False
for di in all_coords:
dict_equals = True
for k in di:
if (abs((di[k] - res[k])) > tol_en):
dict_equals = False
break
if dict_equals:
already_in = True
break
if (not already_in):
all_coords.append(res)
return all_coords
|
returns a set of chemical potentials corresponding to the vertices of
the simplex in the chemical potential phase diagram.
The simplex is built using all elements in the target_composition
except dep_elt.
The chemical potential of dep_elt is computed from the target
composition energy.
This method is useful to get the limiting conditions for
defects computations for instance.
Args:
target_comp: A Composition object
dep_elt: the element for which the chemical potential is computed
from the energy of
the stable phase at the target composition
tol_en: a tolerance on the energy to set
Returns:
[{Element:mu}]: An array of conditions on simplex vertices for
which each element has a chemical potential set to a given
value. "absolute" values (i.e., not referenced to element energies)
|
codesearchnet
|
def password(message: Text, default: Text='', validate: Union[(Type[Validator], Callable[([Text], bool)], None)]=None, qmark: Text=DEFAULT_QUESTION_PREFIX, style: Optional[Style]=None, **kwargs: Any) -> Question:
return text.text(message, default, validate, qmark, style, is_password=True, **kwargs)
|
Question the user to enter a secret text not displayed in the prompt.
This question type can be used to prompt the user for information
that should not be shown in the command line. The typed text will be
replaced with `*`.
Args:
message: Question text
default: Default value will be returned if the user just hits
enter.
validate: Require the entered value to pass a validation. The
value can not be submited until the validator accepts
it (e.g. to check minimum password length).
This can either be a function accepting the input and
returning a boolean, or an class reference to a
subclass of the prompt toolkit Validator class.
qmark: Question prefix displayed in front of the question.
By default this is a `?`
style: A custom color and style for the question parts. You can
configure colors as well as font types for different elements.
Returns:
Question: Question instance, ready to be prompted (using `.ask()`).
|
codesearchnet
|
def clear_history(vcs):
evidence_path = _get_committed_history_path(vcs)
if os.path.exists(evidence_path):
os.remove(evidence_path)
|
Clear (committed) test run history from this project.
Args:
vcs (easyci.vcs.base.Vcs)
|
juraj-google-style
|
def set_s3_prefix(self, region, name):
ct = self.session.client('cloudtrail', region_name=region)
ct.update_trail(Name=name, S3KeyPrefix=self.account.account_name)
auditlog(event='cloudtrail.set_s3_prefix', actor=self.ns, data={'account': self.account.account_name, 'region': region})
self.log.info('Updated S3KeyPrefix to {0} for {0}/{1}'.format(self.account.account_name, region))
|
Sets the S3 prefix for a CloudTrail Trail
Args:
region (`str`): Name of the AWS region
name (`str`): Name of the CloudTrail Trail
Returns:
`None`
|
codesearchnet
|
def qmhl(data: quantum_data.QuantumData, input_qhbm: qhbm.QHBM):
return data.expectation(input_qhbm.modular_hamiltonian) + input_qhbm.e_inference.log_partition()
|
Calculate the QMHL loss of the QHBM against the quantum data.
See equation 21 in the appendix.
Args:
data: The data mixed state to learn.
input_qhbm: QHBM being trained to approximate `data`.
Returns:
The quantum cross-entropy between the data and the model.
|
github-repos
|
def save_image(image_url, image_directory, image_name):
image_type = get_image_type(image_url)
if image_type is None:
raise ImageErrorException(image_url)
full_image_file_name = os.path.join(image_directory, image_name + '.' + image_type)
if os.path.exists(image_url):
shutil.copy(image_url, full_image_file_name)
return image_type
try:
with open(full_image_file_name, 'wb') as f:
user_agent = r'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:31.0) Gecko/20100101 Firefox/31.0'
request_headers = {'User-Agent': user_agent}
requests_object = requests.get(image_url, headers=request_headers)
try:
content = requests_object.content
f.write(content)
except AttributeError:
raise ImageErrorException(image_url)
except IOError:
raise ImageErrorException(image_url)
return image_type
|
Saves an online image from image_url to image_directory with the name image_name.
Returns the extension of the image saved, which is determined dynamically.
Args:
image_url (str): The url of the image.
image_directory (str): The directory to save the image in.
image_name (str): The file name to save the image as.
Raises:
ImageErrorException: Raised if unable to save the image at image_url
|
juraj-google-style
|
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return len(cls + token_ids_0 + sep) * [0]
return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
|
Create a mask from the two sequences passed to be used in a sequence-pair classification task. mBART does not
make use of token type ids, therefore a list of zeros is returned.
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of zeros.
|
github-repos
|
def get_gene_associations(model):
for reaction in model.reactions:
assoc = None
if reaction.genes is None:
continue
elif isinstance(reaction.genes, string_types):
assoc = boolean.Expression(reaction.genes)
else:
variables = [boolean.Variable(g) for g in reaction.genes]
assoc = boolean.Expression(boolean.And(*variables))
yield reaction.id, assoc
|
Create gene association for class :class:`.GeneDeletionStrategy`.
Return a dict mapping reaction IDs to
:class:`psamm.expression.boolean.Expression` objects,
representing relationships between reactions and related genes. This helper
function should be called when creating :class:`.GeneDeletionStrategy`
objects.
Args:
model: :class:`psamm.datasource.native.NativeModel`.
|
juraj-google-style
|
def add_reader(
self,
fd: IFileLike,
callback: typing.Callable[[IFileLike], typing.Any],
) -> None:
raise NotImplementedError()
|
Add a file descriptor to the processor and wait for READ.
Args:
fd (IFileLike): Any obect that exposes a 'fileno' method that
returns a valid file descriptor integer.
callback (typing.Callable[[IFileLike], typing.Any]): A function
that consumes the IFileLike object whenever the READ event is
fired.
|
juraj-google-style
|
def save_op(self, filename_tensor, saveables):
tensor_names = []
tensors = []
tensor_slices = []
for saveable in saveables:
for spec in saveable.specs:
tensor_names.append(spec.name)
tensors.append(spec.tensor)
tensor_slices.append(spec.slice_spec)
if self._write_version == saver_pb2.SaverDef.V1:
return io_ops._save(filename=filename_tensor, tensor_names=tensor_names, tensors=tensors, tensor_slices=tensor_slices)
elif self._write_version == saver_pb2.SaverDef.V2:
return io_ops.save_v2(filename_tensor, tensor_names, tensor_slices, tensors)
else:
raise RuntimeError('Unexpected write_version: ' + self._write_version)
|
Create an Op to save 'saveables'.
This is intended to be overridden by subclasses that want to generate
different Ops.
Args:
filename_tensor: String Tensor.
saveables: A list of BaseSaverBuilder.SaveableObject objects.
Returns:
An Operation that save the variables.
Raises:
RuntimeError: (implementation detail) if "self._write_version" is an
unexpected value.
|
github-repos
|
def get_recipe(filepath=None, includepath=None, stringcontent=None):
if filepath:
with open(filepath) as recipe_file:
stringcontent = recipe_file.read()
try:
return recipe_includes(json.loads(stringcontent.replace('\n', ' ')), includepath)
except ValueError as e:
pos = 0
for count, line in enumerate(stringcontent.splitlines(), 1):
pos += len(line)
if pos >= e.pos:
e.lineno = count
e.pos = pos
e.args = ('JSON ERROR: %s LINE: %s CHARACTER: %s ERROR: %s LINE: %s' % (filepath, count, pos - e.pos, str(e.msg), line.strip()),)
raise
|
Loads json for recipe, replaces newlines, and expands includes.
Args:
- filepath: (string) The local file path to the recipe json file to load.
Returns:
Dictionary of recipe file.
|
github-repos
|
def RegisterPathSpec(cls, path_spec_type):
type_indicator = path_spec_type.TYPE_INDICATOR
if type_indicator in cls._path_spec_types:
raise KeyError(
'Path specification type: {0:s} already set.'.format(
type_indicator))
cls._path_spec_types[type_indicator] = path_spec_type
if getattr(path_spec_type, '_IS_SYSTEM_LEVEL', False):
cls._system_level_type_indicators[type_indicator] = path_spec_type
|
Registers a path specification type.
Args:
path_spec_type (type): path specification type.
Raises:
KeyError: if path specification is already registered.
|
juraj-google-style
|
def _WriteHeader(self, output_writer):
header_string = ''
if self._title:
header_string = ' {0:s} '.format(self._title)
header_string = self._HEADER_FORMAT_STRING.format(header_string)
output_writer.Write(header_string)
|
Writes a header.
Args:
output_writer (OutputWriter): output writer.
|
codesearchnet
|
def create_failover_dns(self, primary_region='us-east-1'):
dns_record = self.generated.dns()['global']
zone_ids = get_dns_zone_ids(env=self.env, facing=self.elb_subnet)
elb_dns_aws = find_elb(name=self.app_name, env=self.env, region=self.region)
elb_dns_zone_id = find_elb_dns_zone_id(name=self.app_name, env=self.env, region=self.region)
if (primary_region in elb_dns_aws):
failover_state = 'PRIMARY'
else:
failover_state = 'SECONDARY'
self.log.info('%s set as %s record', elb_dns_aws, failover_state)
self.log.info('Updating Application Failover URL: %s', dns_record)
dns_kwargs = {'dns_name': dns_record, 'elb_dns_zone_id': elb_dns_zone_id, 'elb_aws_dns': elb_dns_aws, 'dns_ttl': self.dns_ttl, 'failover_state': failover_state}
for zone_id in zone_ids:
self.log.debug('zone_id: %s', zone_id)
update_failover_dns_record(self.env, zone_id, **dns_kwargs)
return dns_record
|
Create dns entries in route53 for multiregion failover setups.
Args:
primary_region (str): primary AWS region for failover
Returns:
Auto-generated DNS name.
|
codesearchnet
|
def from_file(cls, fp, is_outlook=False):
log.debug('Parsing email from file {!r}'.format(fp))
with ported_open(fp) as f:
message = email.message_from_file(f)
if is_outlook:
log.debug('Removing temp converted Outlook email {!r}'.format(fp))
os.remove(fp)
return cls(message)
|
Init a new object from a file path.
Args:
fp (string): file path of raw email
is_outlook (boolean): if True is an Outlook email
Returns:
Instance of MailParser
|
codesearchnet
|
def block_embedding_to(self, device):
self.block_emb = self.block_emb.to(device)
|
Send `self.block_emb` to a specific device.
Args:
device (`str` or `torch.device`):
The device to which `self.block_emb` will be sent.
|
github-repos
|
def _ParseFileVersion(file_version):
tokens = file_version.split('brain.Event:')
try:
return float(tokens[-1])
except ValueError:
logger.warn(
('Invalid event.proto file_version. Defaulting to use of '
'out-of-order event.step logic for purging expired events.'))
return -1
|
Convert the string file_version in event.proto into a float.
Args:
file_version: String file_version from event.proto
Returns:
Version number as a float.
|
juraj-google-style
|
def comments_1(self, value=None):
if value is not None:
try:
value = str(value)
except ValueError:
raise ValueError('value {} need to be of type str '
'for field `comments_1`'.format(value))
if ',' in value:
raise ValueError('value should not contain a comma '
'for field `comments_1`')
self._comments_1 = value
|
Corresponds to IDD Field `comments_1`
Args:
value (str): value for IDD Field `comments_1`
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
juraj-google-style
|
def apply_to_miz(self, miz):
miz.mission.day = self.date.day
miz.mission.month = self.date.month
miz.mission.year = self.date.year
miz.mission.mission_start_time = self.mission_start_time
return True
|
Applies this datetime to a Miz object (it will be mutated in place)
Args:
miz: MIZ object to mutate
Returns: True
|
juraj-google-style
|
def get_group_id(self, uuid=None):
group_data = self.get_group(uuid)
try:
return group_data['response']['docs'][0]['id']
except (KeyError, IndexError):
failure_message = ('Error in get_group response data - '
'got {0}'.format(group_data))
log.exception(failure_message)
raise PyLmodUnexpectedData(failure_message)
|
Get group id based on uuid.
Args:
uuid (str): optional uuid. defaults to self.cuuid
Raises:
PyLmodUnexpectedData: No group data was returned.
requests.RequestException: Exception connection error
Returns:
int: numeric group id
|
juraj-google-style
|
def convertDateTimeStrToDateStr(datetime):
if not datetime == None and 'T' in datetime:
datetime = datetime.split('T')[0]
return datetime
|
Convert a DateTime string (YYYY-MM-DDTHH:mm:SSZ) to just a Date string by removing the time (YYYY-MM-DD)
Args:
datetime: the datetime as a string
Returns:
A string representation of the date in the following
format YYYY-MM-DD
|
github-repos
|
def list_vms_sub(access_token, subscription_id):
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/providers/Microsoft.Compute/virtualMachines',
'?api-version=', COMP_API])
return do_get_next(endpoint, access_token)
|
List VMs in a subscription.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
Returns:
HTTP response. JSON body of a list of VM model views.
|
juraj-google-style
|
def __init__(self,
num_parameter_servers=0,
ps_device='/job:ps',
placement='CPU:0'):
self._num_ps = num_parameter_servers
self._ps_device = ps_device
self._placement = placement if num_parameter_servers == 0 else 'CPU:0'
self._next_task_id = 0
|
Initialize VariableDeviceChooser.
Args:
num_parameter_servers: number of parameter servers.
ps_device: string representing the parameter server device.
placement: string representing the placement of the variable either CPU:0
or GPU:0. When using parameter servers forced to CPU:0.
|
juraj-google-style
|
def _getsize_from_header(self, header):
for key in self._SIZE_KEYS:
try:
return int(header.pop(key))
except KeyError:
continue
else:
raise UnsupportedOperation('getsize')
|
Return the size from header
Args:
header (dict): Object header.
Returns:
int: Size in bytes.
|
juraj-google-style
|
def value_to_key_strokes(value):
result = ''
if isinstance(value, Integral):
value = str(value)
for v in value:
if isinstance(v, Keys):
result += v.value
elif isinstance(v, Integral):
result += str(v)
else:
result += v
return [result]
|
Convert value to a list of key strokes
>>> value_to_key_strokes(123)
['123']
>>> value_to_key_strokes('123')
['123']
>>> value_to_key_strokes([1, 2, 3])
['123']
>>> value_to_key_strokes(['1', '2', '3'])
['123']
Args:
value(int|str|list)
Returns:
A list of string.
|
juraj-google-style
|
def call(self, x):
with tf.name_scope("embedding"):
embeddings = tf.gather(self.shared_weights, x)
embeddings *= self.hidden_size ** 0.5
padding = model_utils.get_padding(x)
embeddings *= tf.expand_dims(1 - padding, -1)
return embeddings
|
Get token embeddings of x.
Args:
x: An int64 tensor with shape [batch_size, length]
Returns:
embeddings: float32 tensor with shape [batch_size, length, embedding_size]
padding: float32 tensor with shape [batch_size, length] indicating the
locations of the padding tokens in x.
|
juraj-google-style
|
def _awaitReset(self, utcTimeStamp, verbose=True):
resetTime = pytz.utc.localize(datetime.utcfromtimestamp(utcTimeStamp))
_vPrint(verbose, '--- Current Timestamp')
_vPrint(verbose, (' %s' % time.strftime('%c')))
now = pytz.utc.localize(datetime.utcnow())
waitTime = (round((resetTime - now).total_seconds()) + 1)
_vPrint(verbose, '--- Current UTC Timestamp')
_vPrint(verbose, (' %s' % now.strftime('%c')))
_vPrint(verbose, '--- GITHUB NEEDS A BREAK Until UTC Timestamp')
_vPrint(verbose, (' %s' % resetTime.strftime('%c')))
self._countdown(waitTime, printString='--- Waiting %*d seconds...', verbose=verbose)
_vPrint(verbose, '--- READY!')
|
Wait until the given UTC timestamp.
Args:
utcTimeStamp (int): A UTC format timestamp.
verbose (Optional[bool]): If False, all extra printouts will be
suppressed. Defaults to True.
|
codesearchnet
|
def _add_sub_parsers(self, top_level_parser, methods_to_parse, class_name):
description = "Accessible methods of {}".format(class_name)
sub_parsers = top_level_parser.add_subparsers(description=description,
dest="method")
parser_to_method = {}
for method_name, parser in methods_to_parse.items():
parser_name = parser.get_name() or method_name
if parser_name.startswith("_"):
if not self._parse_private:
continue
parser_name = parser_name.strip("_")
parser_name = parser_name.replace("_", "-")
parser_to_method[parser_name] = method_name
sub_parsers.add_parser(parser_name, parents=[parser],
add_help=False,
description=parser.description)
return parser_to_method
|
Add all the sub-parsers to the top_level_parser.
Args:
top_level_parser: the top level parser
methods_to_parse: dict of method name pointing to their associated
argument parser
class_name: name of the decorated class
Returns:
a dict of registered name of the parser i.e. sub command name
pointing to the method real name
|
juraj-google-style
|
def _check_type(obj, expected_types):
if not isinstance(obj, expected_types):
raise TypeError('Expected type %s; got type %s' % (expected_types, type(obj)))
|
Check if an object is of the expected type.
Args:
obj: The object being checked.
expected_types: (`type` or an iterable of `type`s) The expected `type`(s)
of obj.
Raises:
TypeError: If obj is not an instance of expected_type.
|
github-repos
|
def __init__(self, identifier=None):
super(SessionStart, self).__init__()
self.artifact_filters = None
self.command_line_arguments = None
self.debug_mode = False
self.enabled_parser_names = None
self.filter_file = None
self.identifier = identifier
self.parser_filter_expression = None
self.preferred_encoding = None
self.preferred_time_zone = None
self.preferred_year = None
self.product_name = None
self.product_version = None
self.timestamp = None
|
Initializes a session start attribute container.
Args:
identifier (Optional[str]): unique identifier of the session.
The identifier should match that of the corresponding
session completion information.
|
juraj-google-style
|
def build_recursive_gcs_delocalize_env(source, outputs):
filtered_outs = [
var for var in outputs
if var.recursive and var.file_provider == job_model.P_GCS
]
return '\n'.join([
'export {0}={1}/{2}'.format(var.name,
source.rstrip('/'),
var.docker_path.rstrip('/'))
for var in filtered_outs
])
|
Return a multi-line string with export statements for the variables.
Arguments:
source: Folder with the data.
For example /mnt/data
outputs: a list of OutputFileParam
Returns:
a multi-line string with a shell script that sets environment variables
corresponding to the outputs.
|
juraj-google-style
|
def parse_genotypes(variant, individuals, individual_positions):
genotypes = []
for ind in individuals:
pos = individual_positions[ind['individual_id']]
genotypes.append(parse_genotype(variant, ind, pos))
return genotypes
|
Parse the genotype calls for a variant
Args:
variant(cyvcf2.Variant)
individuals: List[dict]
individual_positions(dict)
Returns:
genotypes(list(dict)): A list of genotypes
|
codesearchnet
|
def optionally(self, entity_type, attribute_name=None):
if not attribute_name:
attribute_name = entity_type
self.optional += [(entity_type, attribute_name)]
return self
|
Parsed intents from this parser can optionally include an entity of the provided type.
Args:
entity_type(str): an entity type
attribute_name(str): the name of the attribute on the parsed intent. Defaults to match entity_type.
Returns:
self: to continue modifications.
|
juraj-google-style
|
def add_history(self, filename, color_scheme, font, wrap):
filename = encoding.to_unicode_from_fs(filename)
if filename in self.filenames:
return
editor = codeeditor.CodeEditor(self)
if osp.splitext(filename)[1] == '.py':
language = 'py'
else:
language = 'bat'
editor.setup_editor(linenumbers=False,
language=language,
scrollflagarea=False,
show_class_func_dropdown=False)
editor.focus_changed.connect(lambda: self.focus_changed.emit())
editor.setReadOnly(True)
editor.set_font(font, color_scheme)
editor.toggle_wrap_mode(wrap)
text, _ = encoding.read(filename)
editor.set_text(text)
editor.set_cursor_position('eof')
self.editors.append(editor)
self.filenames.append(filename)
index = self.tabwidget.addTab(editor, osp.basename(filename))
self.find_widget.set_editor(editor)
self.tabwidget.setTabToolTip(index, filename)
self.tabwidget.setCurrentIndex(index)
|
Add new history tab.
Args:
filename (str): file to be loaded in a new tab.
|
juraj-google-style
|
def supported_view_classes(cls) -> Set[Type['View']]:
supported_view_classes = set()
view_class = pg_typing.get_outer_class(cls, base_cls=View, immediate=True)
if view_class is not None and (not inspect.isabstract(view_class)):
supported_view_classes.add(view_class)
for base_cls in cls.__bases__:
if issubclass(base_cls, View.Extension):
supported_view_classes.update(base_cls.supported_view_classes())
return supported_view_classes
|
Returns all non-abstract View classes that the current class supports.
A class can inherit from multiple ``View.Extension`` classes. For example:
.. code-block:: python
class MyObject(View1.Extension, View2.Extension):
...
In this case, ``MyObject`` supports both ``View1`` and ``View2``.
Returns:
All non-abstract View classes that the current class supports.
|
github-repos
|
def GetDisplayNameForPathSpec(self, path_spec):
return path_helper.PathHelper.GetDisplayNameForPathSpec(path_spec, mount_path=self._mount_path, text_prepend=self._text_prepend)
|
Retrieves the display name for a path specification.
Args:
path_spec (dfvfs.PathSpec): path specification.
Returns:
str: human readable version of the path specification.
|
codesearchnet
|
def fetch(self, settlement_id, data={}, **kwargs):
return super(Settlement, self).fetch(settlement_id, data, **kwargs)
|
Fetch Settlement data for given Id
Args:
settlement_id : Id for which settlement object has to be retrieved
Returns:
settlement dict for given settlement id
|
codesearchnet
|
def once(coro, raise_exception=False, return_value=None):
return times(coro, limit=1, return_value=return_value, raise_exception=raise_exception)
|
Wrap a given coroutine function that is restricted to one execution.
Repeated calls to the coroutine function will return the value of the first
invocation.
This function can be used as decorator.
arguments:
coro (coroutinefunction): coroutine function to wrap.
raise_exception (bool): raise exception if execution times exceeded.
return_value (mixed): value to return when execution times exceeded,
instead of the memoized one from last invocation.
Raises:
TypeError: if coro argument is not a coroutine function.
Returns:
coroutinefunction
Usage::
async def mul_2(num):
return num * 2
once = paco.once(mul_2)
await once(2)
# => 4
await once(3)
# => 4
once = paco.once(mul_2, return_value='exceeded')
await once(2)
# => 4
await once(3)
# => 'exceeded'
|
codesearchnet
|
def _regular_normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=0.001):
mean, var = nn.moments(x, reduction_axes, None, None, False)
normed = nn.batch_normalization(x, mean, var, beta, gamma, epsilon)
return (normed, mean, var)
|
Non-fused version of `normalize_batch_in_training`.
Args:
x: Input tensor or variable.
gamma: Tensor by which to scale the input.
beta: Tensor with which to center the input.
reduction_axes: iterable of integers,
axes over which to normalize.
epsilon: Fuzz factor.
Returns:
A tuple length of 3, `(normalized_tensor, mean, variance)`.
|
github-repos
|
def get_query_columns(engine, query):
con = engine.connect()
result = con.execute(query).fetchone()
values = list(result)
cols_names = result.keys()
cols = OrderedDict()
for i in range(len(cols_names)):
cols[cols_names[i]] = type(values[i]).__name__
return cols
|
Extract columns names and python typos from query
Args:
engine: SQLAlchemy connection engine
query: SQL query
Returns:
dict with columns names and python types
|
codesearchnet
|
def transfer(self, transfer_payload=None, *, from_user, to_user):
if (self.persist_id is None):
raise EntityNotYetPersistedError('Entities cannot be transferred until they have been persisted')
return self.plugin.transfer(self.persist_id, transfer_payload, from_user=from_user, to_user=to_user)
|
Transfer this entity to another owner on the backing
persistence layer
Args:
transfer_payload (dict): Payload for the transfer
from_user (any): A user based on the model specified by the
persistence layer
to_user (any): A user based on the model specified by the
persistence layer
Returns:
str: Id of the resulting transfer action on the persistence
layer
Raises:
:exc:`~.EntityNotYetPersistedError`: If the entity being
transferred is not associated with an id on the
persistence layer (:attr:`~Entity.persist_id`) yet
:exc:`~.EntityNotFoundError`: If the entity could not be
found on the persistence layer
:exc:`~.EntityTransferError`: If the entity fails to be
transferred on the persistence layer
:exc:`~.PersistenceError`: If any other unhandled error
in the plugin occurred
|
codesearchnet
|
def blocking_reader(reader, input, buffer_size=_DEFAULT_BUFFER_SIZE):
ion_event = None
while True:
read_event = (yield ion_event)
ion_event = reader.send(read_event)
while ion_event is not None and ion_event.event_type.is_stream_signal:
data = input.read(buffer_size)
if len(data) == 0:
if ion_event.event_type is IonEventType.INCOMPLETE:
ion_event = reader.send(NEXT_EVENT)
continue
else:
yield ION_STREAM_END_EVENT
return
ion_event = reader.send(read_data_event(data))
|
Provides an implementation of using the reader co-routine with a file-like object.
Args:
reader(Coroutine): A reader co-routine.
input(BaseIO): The file-like object to read from.
buffer_size(Optional[int]): The optional buffer size to use.
|
juraj-google-style
|
def _generate_malformed_query(data):
if isinstance(data, six.text_type):
query_str = data.replace(':', ' ')
else:
query_str = ' '.join([word.strip(':') for word in data.children])
return {
'simple_query_string': {
'fields': ['_all'],
'query': query_str
}
}
|
Generates a query on the ``_all`` field with all the query content.
Args:
data (six.text_type or list): The query in the format of ``six.text_type`` (when used from parsing driver)
or ``list`` when used from withing the ES visitor.
|
juraj-google-style
|
def colored_block(text: str, block_start: str, block_end: str, color: Optional[str]=None, background: Optional[str]=None, styles: Optional[List[str]]=None) -> str:
if not color and (not background) and (not styles):
return text
s = []
start_index = 0
end_index = 0
previous_color = None
def write_nonblock_text(text: str, previous_color: Optional[str]):
if previous_color:
s.append(previous_color)
s.append(text)
while start_index < len(text):
start_index = text.find(block_start, end_index)
if start_index == -1:
write_nonblock_text(text[end_index:], previous_color)
break
since_last_block = text[end_index:start_index]
write_nonblock_text(since_last_block, previous_color)
colors = re.findall(_ANSI_COLOR_REGEX, since_last_block)
if colors:
previous_color = colors[-1]
end_index = text.find(block_end, start_index + len(block_start))
if end_index == -1:
write_nonblock_text(text[start_index:], previous_color)
break
end_index += len(block_end)
block = text[start_index:end_index]
block = colored(block, color=color, background=background, styles=styles)
s.append(block)
return ''.join(s)
|
Apply colors to text blocks.
Args:
text: A string that may or may not already has ANSI color characters.
block_start: A string that signals the start of a block. E.g. '{{'
block_end: A string that signals the end of a block. E.g. '}}'.
color: A string for text colors. Applicable values are:
'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white'.
background: A string for background colors. Applicable values are:
'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white'.
styles: A list of strings for applying styles on the text.
Applicable values are:
'bold', 'dark', 'underline', 'blink', 'reverse', 'concealed'.
Returns:
A string with ANSI color characters embracing the matched text blocks.
|
github-repos
|
def generate_xid(identifier=None):
if (identifier is None):
identifier = str(uuid.uuid4())
elif isinstance(identifier, list):
identifier = '-'.join([str(i) for i in identifier])
identifier = hashlib.sha256(identifier.encode('utf-8')).hexdigest()
return hashlib.sha256(identifier.encode('utf-8')).hexdigest()
|
Generate xid from provided identifiers.
.. Important:: If no identifier is provided a unique xid will be returned, but it will
not be reproducible. If a list of identifiers are provided they must be
in the same order to generate a reproducible xid.
Args:
identifier (list|str): Optional *string* value(s) to be used to make a unique and
reproducible xid.
|
codesearchnet
|
def start_simple_webserver(domain=None, port=5832):
import tornado.ioloop
import tornado.web
import tornado.httpserver
import tornado.wsgi
import flask
app = flask.Flask('__simple__')
@app.route('/', methods=['GET', 'POST', 'DELETE', 'PUT'])
def echo_args(*args, **kwargs):
from flask import request
print('Simple server was pinged')
print(('args = %r' % (args,)))
print(('kwargs = %r' % (kwargs,)))
print(('request.args = %r' % (request.args,)))
print(('request.form = %r' % (request.form,)))
return ''
if (domain is None):
domain = get_localhost()
app.server_domain = domain
app.server_port = port
app.server_url = ('http:
print(('app.server_url = %s' % (app.server_url,)))
http_server = tornado.httpserver.HTTPServer(tornado.wsgi.WSGIContainer(app))
http_server.listen(app.server_port)
tornado.ioloop.IOLoop.instance().start()
|
r"""
simple webserver that echos its arguments
Args:
domain (None): (default = None)
port (int): (default = 5832)
CommandLine:
python -m utool.util_web --exec-start_simple_webserver:0
python -m utool.util_web --exec-start_simple_webserver:1
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_web import * # NOQA
>>> domain = None
>>> port = 5832
>>> result = start_simple_webserver(domain, port)
>>> print(result)
|
codesearchnet
|
def _tf_flatten_batch_dims(x, num_nonbatch_dims):
shape = x.shape.as_list()
assert None not in shape
new_shape = ([list_product(shape[:-num_nonbatch_dims])]
+ shape[-num_nonbatch_dims:])
if new_shape != shape:
x = tf.reshape(x, new_shape)
return x
|
Flatten all but last num_nonbatch_dims into one dimension.
Args:
x: a tf.Tensor:
num_nonbatch_dims: an integer
Returns:
a tf.Tensor with 1 + num_nonbatch_dims dimensions.
|
juraj-google-style
|
def _PrintSessionsOverview(self, storage_reader):
table_view = views.ViewsFactory.GetTableView(self._views_format_type, title='Sessions')
for session in storage_reader.GetSessions():
start_time = timelib.Timestamp.CopyToIsoFormat(session.start_time)
session_identifier = uuid.UUID(hex=session.identifier)
session_identifier = '{0!s}'.format(session_identifier)
table_view.AddRow([session_identifier, start_time])
table_view.Write(self._output_writer)
|
Prints a sessions overview.
Args:
storage_reader (StorageReader): storage reader.
|
codesearchnet
|
def prepare_model_settings(label_count, sample_rate, clip_duration_ms, window_size_ms, window_stride_ms, feature_bin_count, preprocess):
desired_samples = int(sample_rate * clip_duration_ms / 1000)
window_size_samples = int(sample_rate * window_size_ms / 1000)
window_stride_samples = int(sample_rate * window_stride_ms / 1000)
length_minus_window = desired_samples - window_size_samples
if length_minus_window < 0:
spectrogram_length = 0
else:
spectrogram_length = 1 + int(length_minus_window / window_stride_samples)
if preprocess == 'average':
fft_bin_count = 1 + _next_power_of_two(window_size_samples) / 2
average_window_width = int(math.floor(fft_bin_count / feature_bin_count))
fingerprint_width = int(math.ceil(fft_bin_count / average_window_width))
elif preprocess == 'mfcc':
average_window_width = -1
fingerprint_width = feature_bin_count
elif preprocess == 'micro':
average_window_width = -1
fingerprint_width = feature_bin_count
else:
raise ValueError('Unknown preprocess mode "%s" (should be "mfcc", "average", or "micro")' % preprocess)
fingerprint_size = fingerprint_width * spectrogram_length
return {'desired_samples': desired_samples, 'window_size_samples': window_size_samples, 'window_stride_samples': window_stride_samples, 'spectrogram_length': spectrogram_length, 'fingerprint_width': fingerprint_width, 'fingerprint_size': fingerprint_size, 'label_count': label_count, 'sample_rate': sample_rate, 'preprocess': preprocess, 'average_window_width': average_window_width}
|
Calculates common settings needed for all models.
Args:
label_count: How many classes are to be recognized.
sample_rate: Number of audio samples per second.
clip_duration_ms: Length of each audio clip to be analyzed.
window_size_ms: Duration of frequency analysis window.
window_stride_ms: How far to move in time between frequency windows.
feature_bin_count: Number of frequency bins to use for analysis.
preprocess: How the spectrogram is processed to produce features.
Returns:
Dictionary containing common settings.
Raises:
ValueError: If the preprocessing mode isn't recognized.
|
github-repos
|
def remove(self, key):
if self.prepickle:
key = pickle.dumps(key)
if key not in self.keys:
raise ValueError("The given key does not exist")
for H, hashtable in zip(self.keys[key], self.hashtables):
hashtable.remove_val(H, key)
if not hashtable.get(H):
hashtable.remove(H)
self.keys.remove(key)
|
Remove the key from the index.
Args:
key (hashable): The unique identifier of a set.
|
juraj-google-style
|
def create(self, params=None, headers=None):
path = '/creditor_bank_accounts'
if (params is not None):
params = {self._envelope_key(): params}
try:
response = self._perform_request('POST', path, params, headers, retry_failures=True)
except errors.IdempotentCreationConflictError as err:
return self.get(identity=err.conflicting_resource_id, params=params, headers=headers)
return self._resource_for(response)
|
Create a creditor bank account.
Creates a new creditor bank account object.
Args:
params (dict, optional): Request body.
Returns:
ListResponse of CreditorBankAccount instances
|
codesearchnet
|
def assertNotAllEqual(self, a, b, msg=None):
try:
self.assertAllEqual(a, b)
except AssertionError:
return
msg = msg or ''
raise AssertionError('The two values are equal at all elements. %s' % msg)
|
Asserts that two numpy arrays or Tensors do not have the same values.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
msg: Optional message to report on failure.
|
github-repos
|
def __init__(self, paths=None, separator='/'):
if not paths:
raise errors.FormatError('Missing paths value.')
super(PathSourceType, self).__init__()
self.paths = paths
self.separator = separator
|
Initializes a source type.
Args:
paths (Optional[str]): paths relative to the root of the file system.
separator (Optional[str]): path segment separator.
Raises:
FormatError: when paths is not set.
|
juraj-google-style
|
def to_hdf(self,path,key,mode='a'):
pd.DataFrame(self.serialize()).to_hdf(path,key,mode=mode,format='table',complib='zlib',complevel=9)
f = h5py.File(path,'r+')
f[key].attrs["microns_per_pixel"] = float(self.microns_per_pixel) if self.microns_per_pixel is not None else np.nan
f.close()
|
Save the CellDataFrame to an hdf5 file.
Args:
path (str): the path to save to
key (str): the name of the location to save it to
mode (str): write mode
|
juraj-google-style
|
def get_output_from_cache(name, filename):
cache_filename = _get_cache_filename(name, filename)
if (os.path.exists(cache_filename)
and os.path.getmtime(filename) < os.path.getmtime(cache_filename)):
with io.open(cache_filename) as f:
return f.read()
return None
|
Returns the output from the cache if still valid.
It checks that the cache file is defined and that its modification time is
after the modification time of the original file.
Args:
name: string: name of the linter.
filename: string: path of the filename for which we are retrieving the
output.
Returns: a string with the output, if it is still valid, or None otherwise.
|
juraj-google-style
|
def abspath(fpath):
from os import path, getcwd, chdir
original = getcwd()
chdir(reporoot)
result = path.abspath(path.expanduser(fpath))
chdir(original)
return result
|
Returns the absolute path to the specified file/folder *relative to the
repository root*.
Args:
fpath (str): path to a file or folder; doesn't need to exist.
|
juraj-google-style
|
def run_inference(self, batch: Sequence[scipy.sparse.csr_matrix], model: Union[xgboost.Booster, xgboost.XGBModel], inference_args: Optional[dict[str, Any]]=None) -> Iterable[PredictionResult]:
return self._inference_fn(batch, model, inference_args)
|
Runs inferences on a batch of SciPy sparse matrices.
Args:
batch: A sequence of examples as Scipy sparse matrices.
The dimensions must match the dimensions of the data
used to train the model.
model: XGBoost booster or XBGModel (sklearn interface). Must implement
predict(X). Where the parameter X is a SciPy sparse matrix.
inference_args: Any additional arguments for an inference.
Returns:
An Iterable of type PredictionResult.
|
github-repos
|
def _GetFrameCodeObjectName(frame):
if frame.f_code.co_argcount >= 1 and 'self' == frame.f_code.co_varnames[0]:
return (frame.f_locals['self'].__class__.__name__ +
'.' + frame.f_code.co_name)
else:
return frame.f_code.co_name
|
Gets the code object name for the frame.
Args:
frame: the frame to get the name from
Returns:
The function name if the code is a static function or the class name with
the method name if it is an member function.
|
juraj-google-style
|
def whois_domains(self, domains):
api_name = 'opendns-whois-domain'
fmt_url_path = u'whois/{0}'
return self._multi_get(api_name, fmt_url_path, domains)
|
Calls WHOIS domain end point
Args:
domains: An enumerable of domains
Returns:
A dict of {domain: domain_result}
|
codesearchnet
|
def get_child(self, injection_site_fn, binding):
child_scope_id = binding.scope_id
new_binding_stack = (self._binding_stack + [binding])
if (binding in self._binding_stack):
raise errors.CyclicInjectionError(new_binding_stack)
if (not self._is_scope_usable_from_scope_fn(child_scope_id, self._scope_id)):
raise errors.BadDependencyScopeError(self.get_injection_site_desc(), self._scope_id, child_scope_id, binding.binding_key)
return _InjectionContext(injection_site_fn, new_binding_stack, child_scope_id, self._is_scope_usable_from_scope_fn)
|
Creates a child injection context.
A "child" injection context is a context for a binding used to
inject something into the current binding's provided value.
Args:
injection_site_fn: the child function being injected into
binding: a Binding
Returns:
a new _InjectionContext
|
codesearchnet
|
def filterfalse_items(item_list, flag_list):
assert len(item_list) == len(flag_list)
filtered_items = list(util_iter.ifilterfalse_items(item_list, flag_list))
return filtered_items
|
Returns items in item list where the corresponding item in flag list is true
Args:
item_list (list): list of items
flag_list (list): list of truthy values
Returns:
filtered_items : items where the corresponding flag was truthy
SeeAlso:
util_iter.ifilterfalse_items
|
juraj-google-style
|
def switch_to_window(page_class, webdriver):
window_list = list(webdriver.window_handles)
original_window = webdriver.current_window_handle
for window_handle in window_list:
webdriver.switch_to_window(window_handle)
try:
return PageFactory.create_page(page_class, webdriver)
except:
pass
webdriver.switch_to_window(original_window)
raise WindowNotFoundError(u('Window {0} not found.').format(page_class.__class__.__name__))
|
Utility method for switching between windows. It will search through currently open
windows, then switch to the window matching the provided PageObject class.
Args:
page_class (PageObject): Page class to search for/instantiate.
webdriver (WebDriver): Selenium webdriver.
Usage::
WebUtils.switch_to_window(DetailsPopUpPage, driver) # switches to the pop up window.
|
codesearchnet
|
def relation_completions(
completion_text: str, bel_spec: BELSpec, bel_fmt: str, size: int
) -> list:
if bel_fmt == "short":
relation_list = bel_spec["relations"]["list_short"]
else:
relation_list = bel_spec["relations"]["list_long"]
matches = []
for r in relation_list:
if re.match(completion_text, r):
matches.append(r)
replace_list = []
for match in matches:
highlight = match.replace(completion_text, f"<em>{completion_text}</em>")
replace_list.append(
{
"replacement": match,
"label": match,
"highlight": highlight,
"type": "Relation",
}
)
return replace_list[:size]
|
Filter BEL relations by prefix
Args:
prefix: completion string
bel_fmt: short, medium, long BEL formats
spec: BEL specification
Returns:
list: list of BEL relations that match prefix
|
juraj-google-style
|
def save(variable, filename):
fileObj = open(filename, 'wb')
pickle.dump(variable, fileObj)
fileObj.close()
|
Save variable on given path using Pickle
Args:
variable: what to save
path (str): path of the output
|
juraj-google-style
|
def variant(self, document_id, gene_panels=None, case_id=None):
query = {}
if case_id:
query['case_id'] = case_id
query['variant_id'] = document_id
else:
query['_id'] = document_id
variant_obj = self.variant_collection.find_one(query)
if variant_obj:
variant_obj = self.add_gene_info(variant_obj, gene_panels)
if variant_obj['chromosome'] in ['X', 'Y']:
variant_obj['is_par'] = is_par(variant_obj['chromosome'],
variant_obj['position'])
return variant_obj
|
Returns the specified variant.
Arguments:
document_id : A md5 key that represents the variant or "variant_id"
gene_panels(List[GenePanel])
case_id (str): case id (will search with "variant_id")
Returns:
variant_object(Variant): A odm variant object
|
juraj-google-style
|
def _retry_failed_log(failed_trigger_log):
model = type(failed_trigger_log)
try:
failed_trigger_log = model.objects.select_for_update().get(id=failed_trigger_log.id, state=TRIGGER_LOG_STATE['FAILED'])
except model.DoesNotExist:
return False
failed_trigger_log.redo()
return True
|
Try to re-apply a failed trigger log action.
Makes sure the argument trigger log is in a FAILED state and acquires a row lock on it.
Returns:
True if the operation succeeded
|
codesearchnet
|
def __init__(self, additional_note="", kwargs_dict=None):
self._additional_note = additional_note
if kwargs_dict:
bullets = []
for key in sorted(kwargs_dict.keys()):
value = kwargs_dict[key]
if any(x.isspace() for x in key):
raise ValueError("Parameter name \"%s\" contains whitespace." % key)
value = value.lstrip()
if "\n" in value:
raise ValueError(
"Parameter description for \"%s\" contains newlines." % key)
bullets.append("* `%s`: %s" % (key, value))
self._additional_note += ("\n\n
|
Initializes the AppendDocstring object.
Args:
additional_note: Python string added as additional docstring to public
version of function.
kwargs_dict: Python string/string dictionary representing specific kwargs
expanded from the **kwargs input.
Raises:
ValueError: if kwargs_dict.key contains whitespace.
ValueError: if kwargs_dict.value contains newlines.
|
juraj-google-style
|
def control_status_ctx():
ret = _control_ctx()[-1]
return ret
|
Returns the current control context for autograph.
This method is useful when calling `tf.__internal__.autograph.tf_convert`,
The context will be used by tf_convert to determine whether it should convert
the input function. See the sample usage like below:
```
def foo(func):
return tf.__internal__.autograph.tf_convert(
input_fn, ctx=tf.__internal__.autograph.control_status_ctx())()
```
Returns:
The current control context of autograph.
|
github-repos
|
def ExamineEvent(self, mediator, event):
if event.data_type not in self._DATATYPES:
return
url = getattr(event, 'url', None)
if url is None:
return
parsed_url = urlparse.urlparse(url)
domain = getattr(parsed_url, 'netloc', None)
if domain in self._domains:
return
self._domains.append(domain)
|
Analyzes an event and extracts domains from it.
We only evaluate straightforward web history events, not visits which can
be inferred by TypedURLs, cookies or other means.
Args:
mediator (AnalysisMediator): mediates interactions between
analysis plugins and other components, such as storage and dfvfs.
event (EventObject): event to examine.
|
juraj-google-style
|
def _delegate_method(keras_tensor_cls, method_name):
def delegate(self, *args, **kwargs):
return InstanceMethod(method_name)(self, args, kwargs)
setattr(keras_tensor_cls, method_name, delegate)
|
Register method on a KerasTensor class.
Calling this function times with the same arguments should be a no-op.
This method exposes an instance method on the KerasTensor class that will use
an `InstanceMethod` layer to run the desired method on the represented
intermediate values in the model.
Args:
keras_tensor_cls: The KerasTensor subclass that should expose the property.
method_name: The name of the method to expose and delegate to the
represented (Composite)Tensor.
|
github-repos
|
def asset(self, asset_id, asset_type, action='GET'):
if (not self.can_update()):
self._tcex.handle_error(910, [self.type])
if (asset_type == 'PHONE'):
return self.tc_requests.victim_phone_asset(self.api_type, self.api_sub_type, self.unique_id, asset_id, action=action)
if (asset_type == 'EMAIL'):
return self.tc_requests.victim_email_asset(self.api_type, self.api_sub_type, self.unique_id, asset_id, action=action)
if (asset_type == 'NETWORK'):
return self.tc_requests.victim_network_asset(self.api_type, self.api_sub_type, self.unique_id, asset_id, action=action)
if (asset_type == 'SOCIAL'):
return self.tc_requests.victim_social_asset(self.api_type, self.api_sub_type, self.unique_id, asset_id, action=action)
if (asset_type == 'WEB'):
return self.tc_requests.victim_web_asset(self.api_type, self.api_sub_type, self.unique_id, asset_id, action=action)
self._tcex.handle_error(925, ['asset_type', 'asset', 'asset_type', 'asset_type', asset_type])
return None
|
Gets a asset of a Victim
Valid asset_type:
+ PHONE
+ EMAIL
+ NETWORK
+ SOCIAL
+ WEB
Args:
asset_type:
asset_id:
action:
Returns:
|
codesearchnet
|
def add_logs(self, logs):
self._log.extend(logs)
for log in logs:
print('%s line %d:%d: %s' % log)
|
Record a log and print it.
The log should be a tuple `(severity, lineno, col_offset, msg)`, which will
be printed and recorded. It is part of the log available in the `self.log`
property.
Args:
logs: The logs to add. Must be a list of tuples
`(severity, lineno, col_offset, msg)`.
|
github-repos
|
def recipe_trends_places_to_bigquery_via_value(config, auth_write, secret, key, woeids, destination_dataset, destination_table):
twitter(config, {'auth': auth_write, 'secret': secret, 'key': key, 'trends': {'places': {'single_cell': True, 'values': woeids}}, 'out': {'bigquery': {'dataset': destination_dataset, 'table': destination_table}}})
|
Move using hard coded WOEID values.
Args:
auth_write (authentication) - Credentials used for writing data.
secret (string) - NA
key (string) - NA
woeids (integer_list) - NA
destination_dataset (string) - NA
destination_table (string) - NA
|
github-repos
|
def load_notebook_node(notebook_path):
nb = nbformat.reads(papermill_io.read(notebook_path), as_version=4)
if (not hasattr(nb.metadata, 'papermill')):
nb.metadata['papermill'] = {'parameters': dict(), 'environment_variables': dict(), 'version': __version__}
for cell in nb.cells:
if (not hasattr(cell.metadata, 'tags')):
cell.metadata['tags'] = []
if (not hasattr(cell.metadata, 'papermill')):
cell.metadata['papermill'] = dict()
return nb
|
Returns a notebook object with papermill metadata loaded from the specified path.
Args:
notebook_path (str): Path to the notebook file.
Returns:
nbformat.NotebookNode
|
codesearchnet
|
def __init__(self, object_graph_proto, save_path, save_path_tensor, reader, restore_op_cache, graph_view, options, saveables_cache):
self.options = options
self.object_graph_proto = object_graph_proto
self.restore_uid = ops.uid()
self.unused_attributes = {}
self.object_by_proto_id = weakref.WeakValueDictionary()
self.matched_proto_ids = set()
self.all_python_objects = object_identity.ObjectIdentityWeakSet()
self.save_path_tensor = save_path_tensor
self.save_path_string = save_path
self.dtype_map = reader.get_variable_to_dtype_map()
self.shape_map = reader.get_variable_to_shape_map()
self.restore_ops = []
self.restore_ops_by_name = restore_op_cache
self.graph_view = graph_view
self.new_restore_ops_callback = None
self.deferred_slot_restorations = {}
self.slot_restorations = collections.defaultdict(list)
self.expect_partial_attr = False
if not self.options.experimental_skip_slot_variables:
for node_index, node in enumerate(self.object_graph_proto.nodes):
for slot_reference in node.slot_variables:
self.slot_restorations[slot_reference.original_variable_node_id].append(base._SlotVariableRestoration(optimizer_id=node_index, slot_variable_id=slot_reference.slot_variable_node_id, slot_name=slot_reference.slot_name))
self._deleter = _CheckpointRestoreCoordinatorDeleter(self.expect_partial_attr, self.object_graph_proto, self.matched_proto_ids, self.unused_attributes)
self.saveables_cache = saveables_cache
|
Specify the checkpoint being loaded.
Args:
object_graph_proto: The TrackableObjectGraph protocol buffer associated
with this checkpoint.
save_path: A string, the path to the checkpoint, as returned by
`tf.train.latest_checkpoint`.
save_path_tensor: A string `Tensor` which contains or will be fed the save
path.
reader: A `CheckpointReader` for `save_path`. If None,
`_CheckpointRestoreCoordinator` will initialize one itself.
restore_op_cache: A dictionary shared between
`_CheckpointRestoreCoordinator`s for the same Python objects, used to
look up restore ops by name to avoid re-creating them across multiple
`restore()` calls.
graph_view: A graph_view_lib.ObjectGraphView object for the restored
objects.
options: A CheckpointOptions object.
saveables_cache: An optional cache storing previously created
SaveableObjects created for each Trackable. Maps Trackables to a
dictionary of attribute names to Trackable.
|
github-repos
|
def build_relative_position(query_size, key_size):
q_ids = tf.range(query_size, dtype=tf.int32)
k_ids = tf.range(key_size, dtype=tf.int32)
rel_pos_ids = q_ids[:, None] - tf.tile(tf.reshape(k_ids, [1, -1]), [query_size, 1])
rel_pos_ids = rel_pos_ids[:query_size, :]
rel_pos_ids = tf.expand_dims(rel_pos_ids, axis=0)
return tf.cast(rel_pos_ids, tf.int64)
|
Build relative position according to the query and key
We assume the absolute position of query \(P_q\) is range from (0, query_size) and the absolute position of key
\(P_k\) is range from (0, key_size), The relative positions from query to key is \(R_{q \rightarrow k} = P_q -
P_k\)
Args:
query_size (int): the length of query
key_size (int): the length of key
Return:
`tf.Tensor`: A tensor with shape [1, query_size, key_size]
|
github-repos
|
def kernel_initrds(self):
kernels = []
initrds = []
name_values = [(k, v) for (k, v) in self.data.get('configs', [])]
for value in (self.data.get('title', []) + self.data.get('menuentry', [])):
name_values.extend(value)
for (name, value) in name_values:
if name.startswith('module'):
if ('vmlinuz' in value):
kernels.append(_parse_kernel_initrds_value(value))
elif (('initrd' in value) or ('initramfs' in value)):
initrds.append(_parse_kernel_initrds_value(value))
elif name.startswith(('kernel', 'linux')):
if ('ipxe.lkrn' in value):
return {}
elif ('xen.gz' not in value):
kernels.append(_parse_kernel_initrds_value(value))
elif (name.startswith('initrd') or name.startswith('initrd16')):
initrds.append(_parse_kernel_initrds_value(value))
return {GRUB_KERNELS: kernels, GRUB_INITRDS: initrds}
|
Get the `kernel` and `initrd` files referenced in GRUB configuration files
Returns:
(dict): Returns a dict of the `kernel` and `initrd` files referenced
in GRUB configuration files
|
codesearchnet
|
def load_from_file(self, yamlfile, _override=True, _allow_undeclared=False):
self._logger.info('Loading configuration from file: %s', yamlfile)
try:
parsed_yaml = self._modules['yaml'].safe_load(yamlfile.read())
except self._modules['yaml'].YAMLError:
self._logger.exception('Problem parsing YAML')
raise self.ConfigurationInvalidError(('Failed to load from %s as YAML' % yamlfile))
if (not isinstance(parsed_yaml, dict)):
raise self.ConfigurationInvalidError('YAML parsed, but wrong type, should be dict', parsed_yaml)
self._logger.debug('Configuration loaded from file: %s', parsed_yaml)
self.load_from_dict(parsed_yaml, _override=_override, _allow_undeclared=_allow_undeclared)
|
Loads the configuration from a file.
Parsed contents must be a single dict mapping config key to value.
Args:
yamlfile: The opened file object to load configuration from.
See load_from_dict() for other args' descriptions.
Raises:
ConfigurationInvalidError: If configuration file can't be read, or can't
be parsed as either YAML (or JSON, which is a subset of YAML).
|
codesearchnet
|
def _maybe_add_default_serving_output(export_outputs):
if len(export_outputs) == 1:
(key, value), = export_outputs.items()
if key != signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
export_outputs[signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY] = value
if len(export_outputs) > 1:
if signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY not in export_outputs:
raise ValueError('Multiple export_outputs were provided, but none of them is specified as the default. Do this by naming one of them with signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY.')
return export_outputs
|
Add a default serving output to the export_outputs if not present.
Args:
export_outputs: Describes the output signatures to be exported to
`SavedModel` and used during serving. Should be a dict.
Returns:
export_outputs dict with default serving signature added if necessary
Raises:
ValueError: if multiple export_outputs were provided without a default
serving key.
|
github-repos
|
def BuildArtifactsRegistry(
cls, artifact_definitions_path, custom_artifacts_path):
if artifact_definitions_path and not os.path.isdir(
artifact_definitions_path):
raise errors.BadConfigOption(
'No such artifacts filter file: {0:s}.'.format(
artifact_definitions_path))
if custom_artifacts_path and not os.path.isfile(custom_artifacts_path):
raise errors.BadConfigOption(
'No such artifacts filter file: {0:s}.'.format(custom_artifacts_path))
registry = artifacts_registry.ArtifactDefinitionsRegistry()
reader = artifacts_reader.YamlArtifactsReader()
try:
registry.ReadFromDirectory(reader, artifact_definitions_path)
except (KeyError, artifacts_errors.FormatError) as exception:
raise errors.BadConfigOption((
'Unable to read artifact definitions from: {0:s} with error: '
'{1!s}').format(artifact_definitions_path, exception))
if custom_artifacts_path:
try:
registry.ReadFromFile(reader, custom_artifacts_path)
except (KeyError, artifacts_errors.FormatError) as exception:
raise errors.BadConfigOption((
'Unable to read artifact definitions from: {0:s} with error: '
'{1!s}').format(custom_artifacts_path, exception))
return registry
|
Build Find Specs from artifacts or filter file if available.
Args:
artifact_definitions_path (str): path to artifact definitions file.
custom_artifacts_path (str): path to custom artifact definitions file.
Returns:
artifacts.ArtifactDefinitionsRegistry: artifact definitions registry.
Raises:
RuntimeError: if no valid FindSpecs are built.
|
juraj-google-style
|
def cd(new_directory, clean_up=lambda: True):
previous_directory = os.getcwd()
os.chdir(os.path.expanduser(new_directory))
try:
yield
finally:
os.chdir(previous_directory)
clean_up()
|
Changes into a given directory and cleans up after it is done
Args:
new_directory: The directory to change to
clean_up: A method to clean up the working directory once done
|
juraj-google-style
|
def is_known_type(self, type_name):
type_name = str(type_name)
if (type_name in self.known_types):
return True
return False
|
Check if type is known to the type system.
Returns:
bool: True if the type is a known instantiated simple type, False otherwise
|
codesearchnet
|
def transform(self, y):
if y.ndim == 1:
return y.reshape(-1, 1)
return y
|
Makes 1D y 2D.
Args:
y : np.ndarray
Target y to be transformed.
Returns:
np.ndarray
A numpy array, of dimension at least 2.
|
github-repos
|
def get_pipeline_yaml(file):
tag_representers = [PyString, SicString]
yaml_loader = get_yaml_parser_safe()
for representer in tag_representers:
yaml_loader.register_class(representer)
pipeline_definition = yaml_loader.load(file)
return pipeline_definition
|
Return pipeline yaml from open file object.
Use specific custom representers to model the custom pypyr pipeline yaml
format, to load in special literal types like py and sic strings.
If looking to extend the pypyr pipeline syntax with special types, add
these to the tag_representers list.
Args:
file: open file-like object.
Returns:
dict-like representation of loaded yaml.
|
codesearchnet
|
def add_feature(feature, package=None, source=None, limit_access=False, enable_parent=False, image=None, restart=False):
cmd = ['DISM', '/Quiet', ('/Image:{0}'.format(image) if image else '/Online'), '/Enable-Feature', '/FeatureName:{0}'.format(feature)]
if package:
cmd.append('/PackageName:{0}'.format(package))
if source:
cmd.append('/Source:{0}'.format(source))
if limit_access:
cmd.append('/LimitAccess')
if enable_parent:
cmd.append('/All')
if (not restart):
cmd.append('/NoRestart')
return __salt__['cmd.run_all'](cmd)
|
Install a feature using DISM
Args:
feature (str): The feature to install
package (Optional[str]): The parent package for the feature. You do not
have to specify the package if it is the Windows Foundation Package.
Otherwise, use package to specify the parent package of the feature
source (Optional[str]): The optional source of the capability. Default
is set by group policy and can be Windows Update
limit_access (Optional[bool]): Prevent DISM from contacting Windows
Update for the source package
enable_parent (Optional[bool]): True will enable all parent features of
the specified feature
image (Optional[str]): The path to the root directory of an offline
Windows image. If `None` is passed, the running operating system is
targeted. Default is None.
restart (Optional[bool]): Reboot the machine if required by the install
Returns:
dict: A dictionary containing the results of the command
CLI Example:
.. code-block:: bash
salt '*' dism.add_feature NetFx3
|
codesearchnet
|
def DeregisterAnalyzer(cls, analyzer_class):
analyzer_name = analyzer_class.NAME.lower()
if (analyzer_name not in cls._analyzer_classes):
raise KeyError('analyzer class not set for name: {0:s}'.format(analyzer_class.NAME))
del cls._analyzer_classes[analyzer_name]
|
Deregisters a analyzer class.
The analyzer classes are identified based on their lower case name.
Args:
analyzer_class (type): class object of the analyzer.
Raises:
KeyError: if analyzer class is not set for the corresponding name.
|
codesearchnet
|
def run_processor(processorClass, ocrd_tool=None, mets_url=None, resolver=None, workspace=None, page_id=None, log_level=None, input_file_grp=None, output_file_grp=None, parameter=None, working_dir=None):
workspace = _get_workspace(workspace, resolver, mets_url, working_dir)
if (parameter is not None):
if (not (':
fname = os.path.abspath(parameter)
else:
fname = workspace.download_url(parameter)
with open(fname, 'r') as param_json_file:
parameter = json.load(param_json_file)
else:
parameter = {}
log.debug('Running processor %s', processorClass)
processor = processorClass(workspace, ocrd_tool=ocrd_tool, page_id=page_id, input_file_grp=input_file_grp, output_file_grp=output_file_grp, parameter=parameter)
ocrd_tool = processor.ocrd_tool
name = ('%s v%s' % (ocrd_tool['executable'], processor.version))
otherrole = ocrd_tool['steps'][0]
log.debug('Processor instance %s (%s doing %s)', processor, name, otherrole)
processor.process()
workspace.mets.add_agent(name=name, _type='OTHER', othertype='SOFTWARE', role='OTHER', otherrole=otherrole)
workspace.save_mets()
return processor
|
Create a workspace for mets_url and run processor through it
Args:
parameter (string): URL to the parameter
|
codesearchnet
|
def is_generic_dict(type_: Type) -> bool:
if hasattr(typing, '_GenericAlias'):
return (isinstance(type_, typing._GenericAlias) and
type_.__origin__ is dict)
else:
return (isinstance(type_, typing.GenericMeta) and
type_.__origin__ is Dict)
|
Determines whether a type is a Dict[...].
How to do this varies for different Python versions, due to the
typing library not having a stable API. This functions smooths
over the differences.
Args:
type_: The type to check.
Returns:
True iff it's a Dict[...something...].
|
juraj-google-style
|
async def fetch_messages(self, selected: SelectedMailbox, sequence_set: SequenceSet, attributes: FrozenSet[FetchAttribute]) -> Tuple[(Iterable[Tuple[(int, MessageInterface)]], SelectedMailbox)]:
...
|
Get a list of loaded message objects corresponding to given sequence
set.
Args:
selected: The selected mailbox session.
sequence_set: Sequence set of message sequences or UIDs.
attributes: Fetch attributes for the messages.
Raises:
:class:`~pymap.exceptions.MailboxNotFound`
|
codesearchnet
|
def dump_database_as_insert_sql(engine: Engine, fileobj: TextIO=sys.stdout, include_ddl: bool=False, multirow: bool=False) -> None:
for tablename in get_table_names(engine):
dump_table_as_insert_sql(engine=engine, table_name=tablename, fileobj=fileobj, include_ddl=include_ddl, multirow=multirow)
|
Reads an entire database and writes SQL to replicate it to the output
file-like object.
Args:
engine: SQLAlchemy :class:`Engine`
fileobj: file-like object to write to
include_ddl: if ``True``, include the DDL to create the table as well
multirow: write multi-row ``INSERT`` statements
|
codesearchnet
|
def get_time(self, force_uptime=False):
if force_uptime:
return self.uptime
time = (self.uptime + self.time_offset)
if self.is_utc:
time |= (1 << 31)
return time
|
Get the current UTC time or uptime.
By default, this method will return UTC time if possible and fall back
to uptime if not. If you specify, force_uptime=True, it will always
return uptime even if utc time is available.
Args:
force_uptime (bool): Always return uptime, defaults to False.
Returns:
int: The current uptime or encoded utc time.
|
codesearchnet
|
def create_user_dsn(driver: str, **kw) -> bool:
attributes = []
for attr in kw.keys():
attributes.append("%s=%s" % (attr, kw[attr]))
return bool(
ctypes.windll.ODBCCP32.SQLConfigDataSource(0, ODBC_ADD_DSN, driver,
nul.join(attributes))
)
|
(Windows only.)
Create a user ODBC data source name (DSN).
Args:
driver: ODBC driver name
kw: Driver attributes
Returns:
bool: was the DSN created?
|
juraj-google-style
|
def wait_for_compilation_job(self, job, poll=5):
desc = _wait_until(lambda: _compilation_job_status(self.sagemaker_client, job), poll)
self._check_job_status(job, desc, 'CompilationJobStatus')
return desc
|
Wait for an Amazon SageMaker Neo compilation job to complete.
Args:
job (str): Name of the compilation job to wait for.
poll (int): Polling interval in seconds (default: 5).
Returns:
(dict): Return value from the ``DescribeCompilationJob`` API.
Raises:
ValueError: If the compilation job fails.
|
juraj-google-style
|
def _preprocess_params(cls, kwargs):
for (attr, val) in kwargs.items():
if (cls.is_the_primary_key(attr) and cls._prevent_primary_key_initialization_):
del kwargs[attr]
continue
if (val == ''):
kwargs[attr] = None
continue
if ((attr in class_mapper(cls).relationships) and (attr not in cls._no_overwrite_)):
rel = class_mapper(cls).relationships[attr]
if rel.uselist:
if isinstance(val, list):
if all((isinstance(v, dict) for v in val)):
rel_cls = cls.mapped_rel_class(attr)
kwargs[attr] = rel_cls.update_or_new_all(list_of_kwargs=val, keys=[rel_cls.primary_key_name()])
elif isinstance(val, dict):
rel_cls = cls.mapped_rel_class(attr)
mapping_col = rel.collection_class().keyfunc.name
list_of_kwargs = [merge(v, {mapping_col: k}) for (k, v) in val.items()]
kwargs[attr] = {getattr(obj, mapping_col): obj for obj in rel_cls.update_or_new_all(list_of_kwargs=list_of_kwargs, keys=[rel_cls.primary_key_name()])}
elif isinstance(val, dict):
rel_cls = cls.mapped_rel_class(attr)
kwargs[attr] = rel_cls.update_or_new(**merge(val, {'keys': [rel_cls.primary_key_name()]}))
return kwargs
|
Returns a preprocessed dictionary of parameters.
Use this to filter the kwargs passed to `new`, `create`,
`build` methods.
Args:
**kwargs: a dictionary of parameters
|
codesearchnet
|
def _expand_to_event_rank(self, x):
expanded_x = x
for _ in range(tensorshape_util.rank(self.event_shape)):
expanded_x = tf.expand_dims(expanded_x, (- 1))
return expanded_x
|
Expand the rank of x up to static_event_rank times for broadcasting.
The static event rank was checked to not be None at construction time.
Args:
x: A tensor to expand.
Returns:
The expanded tensor.
|
codesearchnet
|
def GetShadowMap(self, since=None):
return ShadowUpdateGetter().GetUpdates(self, self.conf['shadow_url'], since)
|
Return the shadow map from this source.
Args:
since: Get data only changed since this timestamp (inclusive) or None
for all data.
Returns:
instance of shadow.ShadowMap
|
github-repos
|
def _resolve_task_logging(job_metadata, job_resources, task_descriptors):
if not job_resources.logging:
return
for task_descriptor in task_descriptors:
logging_uri = provider_base.format_logging_uri(
job_resources.logging.uri, job_metadata, task_descriptor.task_metadata)
logging_path = job_model.LoggingParam(logging_uri,
job_resources.logging.file_provider)
if task_descriptor.task_resources:
task_descriptor.task_resources = task_descriptor.task_resources._replace(
logging_path=logging_path)
else:
task_descriptor.task_resources = job_model.Resources(
logging_path=logging_path)
|
Resolve the logging path from job and task properties.
Args:
job_metadata: Job metadata, such as job-id, job-name, and user-id.
job_resources: Resources specified such as ram, cpu, and logging path.
task_descriptors: Task metadata, parameters, and resources.
Resolve the logging path, which may have substitution parameters such as
job-id, task-id, user-id, and job-name.
|
juraj-google-style
|
def target(self):
return c_api.TF_ServerTarget(self._server)
|
Returns the target for a `tf.compat.v1.Session` to connect to this server.
To create a
`tf.compat.v1.Session` that
connects to this server, use the following snippet:
```python
server = tf.distribute.Server(...)
with tf.compat.v1.Session(server.target):
# ...
```
Returns:
A string containing a session target for this server.
|
github-repos
|
def restart_apppool(name):
ps_cmd = ['Restart-WebAppPool', r"'{0}'".format(name)]
cmd_ret = _srvmgr(ps_cmd)
return cmd_ret['retcode'] == 0
|
Restart an IIS application pool.
.. versionadded:: 2016.11.0
Args:
name (str): The name of the IIS application pool.
Returns:
bool: True if successful, otherwise False
CLI Example:
.. code-block:: bash
salt '*' win_iis.restart_apppool name='MyTestPool'
|
juraj-google-style
|
def __init__(self, meter_id=Meter.OFPM_ALL):
super().__init__()
self.meter_id = meter_id
|
Create a MeterMultipartRequest with the optional parameters below.
Args:
meter_id(Meter): Meter Indentify.The value Meter.OFPM_ALL is used
to refer to all Meters on the switch.
|
juraj-google-style
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.