code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
|---|---|---|
def from_path(cls, path, suffix=""):
def _get_filepath(filename):
name_pattern = filename + suffix + '*' if filename != 'POTCAR' \
else filename + '*'
paths = glob.glob(os.path.join(path, name_pattern))
fpath = None
if len(paths) >= 1:
paths.sort(reverse=True)
warning_msg = "Multiple files detected, using %s" \
% os.path.basename(paths[0]) if len(paths) > 1 \
else None
fpath = paths[0]
else:
warning_msg = "Could not find %s" % filename
if filename in ['AECCAR0', 'AECCAR2']:
warning_msg += ", cannot calculate charge transfer."
elif filename == "POTCAR":
warning_msg += ", interpret Bader results with caution."
if warning_msg:
warnings.warn(warning_msg)
return fpath
chgcar_filename = _get_filepath("CHGCAR")
if chgcar_filename is None:
raise IOError("Could not find CHGCAR!")
potcar_filename = _get_filepath("POTCAR")
aeccar0 = _get_filepath("AECCAR0")
aeccar2 = _get_filepath("AECCAR2")
if (aeccar0 and aeccar2):
chgref = Chgcar.from_file(aeccar0) + Chgcar.from_file(aeccar2)
chgref_filename = "CHGREF"
chgref.write_file(chgref_filename)
else:
chgref_filename = None
return cls(chgcar_filename, potcar_filename=potcar_filename,
chgref_filename=chgref_filename)
|
Convenient constructor that takes in the path name of VASP run
to perform Bader analysis.
Args:
path (str): Name of directory where VASP output files are
stored.
suffix (str): specific suffix to look for (e.g. '.relax1'
for 'CHGCAR.relax1.gz').
|
juraj-google-style
|
def buffer(self, data=None, *, reserve=0, dynamic=False) -> Buffer:
if type(reserve) is str:
reserve = mgl.strsize(reserve)
res = Buffer.__new__(Buffer)
res.mglo, res._size, res._glo = self.mglo.buffer(data, reserve, dynamic)
res._dynamic = dynamic
res.ctx = self
res.extra = None
return res
|
Create a :py:class:`Buffer` object.
Args:
data (bytes): Content of the new buffer.
Keyword Args:
reserve (int): The number of bytes to reserve.
dynamic (bool): Treat buffer as dynamic.
Returns:
:py:class:`Buffer` object
|
juraj-google-style
|
def format_page(self, page, link_resolver, output):
debug(('Formatting page %s' % page.link.ref), 'formatting')
if output:
actual_output = os.path.join(output, 'html')
if (not os.path.exists(actual_output)):
os.makedirs(actual_output)
else:
actual_output = None
page.format(self.formatter, link_resolver, actual_output)
|
Called by `project.Project.format_page`, to leave full control
to extensions over the formatting of the pages they are
responsible of.
Args:
page: tree.Page, the page to format.
link_resolver: links.LinkResolver, object responsible
for resolving links potentially mentioned in `page`
output: str, path to the output directory.
|
codesearchnet
|
def _check_enum(parameter_name, value, parameter_config):
enum_values = [enum['backendValue'] for enum in parameter_config['enum'].values() if ('backendValue' in enum)]
if (value not in enum_values):
raise errors.EnumRejectionError(parameter_name, value, enum_values)
|
Checks if an enum value is valid.
This is called by the transform_parameter_value function and shouldn't be
called directly.
This verifies that the value of an enum parameter is valid.
Args:
parameter_name: A string containing the name of the parameter, which is
either just a variable name or the name with the index appended. For
example 'var' or 'var[2]'.
value: A string containing the value passed in for the parameter.
parameter_config: The dictionary containing information specific to the
parameter in question. This is retrieved from request.parameters in
the method config.
Raises:
EnumRejectionError: If the given value is not among the accepted
enum values in the field parameter.
|
codesearchnet
|
def get_pourbaix_plot(self, limits=None, title='', label_domains=True, plt=None):
if (limits is None):
limits = [[(- 2), 16], [(- 3), 3]]
plt = (plt or pretty_plot(16))
xlim = limits[0]
ylim = limits[1]
h_line = np.transpose([[xlim[0], ((- xlim[0]) * PREFAC)], [xlim[1], ((- xlim[1]) * PREFAC)]])
o_line = np.transpose([[xlim[0], (((- xlim[0]) * PREFAC) + 1.23)], [xlim[1], (((- xlim[1]) * PREFAC) + 1.23)]])
neutral_line = np.transpose([[7, ylim[0]], [7, ylim[1]]])
V0_line = np.transpose([[xlim[0], 0], [xlim[1], 0]])
ax = plt.gca()
ax.set_xlim(xlim)
ax.set_ylim(ylim)
lw = 3
plt.plot(h_line[0], h_line[1], 'r--', linewidth=lw)
plt.plot(o_line[0], o_line[1], 'r--', linewidth=lw)
plt.plot(neutral_line[0], neutral_line[1], 'k-.', linewidth=lw)
plt.plot(V0_line[0], V0_line[1], 'k-.', linewidth=lw)
for (entry, vertices) in self._pd._stable_domain_vertices.items():
center = np.average(vertices, axis=0)
(x, y) = np.transpose(np.vstack([vertices, vertices[0]]))
plt.plot(x, y, 'k-', linewidth=lw)
if label_domains:
plt.annotate(generate_entry_label(entry), center, ha='center', va='center', fontsize=20, color='b')
plt.xlabel('pH')
plt.ylabel('E (V)')
plt.title(title, fontsize=20, fontweight='bold')
return plt
|
Plot Pourbaix diagram.
Args:
limits: 2D list containing limits of the Pourbaix diagram
of the form [[xlo, xhi], [ylo, yhi]]
title (str): Title to display on plot
label_domains (bool): whether to label pourbaix domains
plt (pyplot): Pyplot instance for plotting
Returns:
plt (pyplot) - matplotlib plot object with pourbaix diagram
|
codesearchnet
|
def _on_qosok(self, qosok_frame):
for name, args in self._exchanges.items():
self._channel.exchange_declare(
exchange=name,
exchange_type=args["type"],
durable=args["durable"],
auto_delete=args["auto_delete"],
arguments=args["arguments"],
passive=config.conf["passive_declares"],
callback=self._on_exchange_declareok,
)
for name, args in self._queues.items():
self._channel.queue_declare(
queue=name,
durable=args["durable"],
auto_delete=args["auto_delete"],
exclusive=args["exclusive"],
arguments=args["arguments"],
passive=config.conf["passive_declares"],
callback=self._on_queue_declareok,
)
|
Callback invoked when the server acknowledges the QoS settings.
Asserts or creates the exchanges and queues exist.
Args:
qosok_frame (pika.spec.Basic.Qos): The frame send from the server.
|
juraj-google-style
|
def fit_transform(self, input_df, normalize=True):
_df = input_df.copy(deep=False)
self.normalize = normalize
self.convert_to_categorical(_df)
self.cat_columns = _df.select_dtypes(include=['category']).columns.tolist()
_df = _df.select_dtypes(include=['bool', 'int', 'float', 'category'])
if self.normalize:
for column in list(_df.select_dtypes(include=[np.number]).columns.values):
print('Normalizing column {:s}...'.format(column))
_df[column], _min, _max = self._normalize_series(_df[column])
self.norm_map[column] = (_min, _max)
return self.dummy_encoder.fit_transform(_df)
|
Convert the dataframe to a matrix (numpy ndarray)
Args:
input_df (dataframe): The dataframe to convert
normalize (bool): Boolean flag to normalize numeric columns (default=True)
|
juraj-google-style
|
def find_contexts(self, in_request=None, in_resolve=None):
names = self.context_names
if in_request:
def _in_request(name):
context = self.context(name)
packages = set((x.name for x in context.requested_packages(True)))
return (in_request in packages)
names = [x for x in names if _in_request(x)]
if in_resolve:
if isinstance(in_resolve, basestring):
in_resolve = PackageRequest(in_resolve)
def _in_resolve(name):
context = self.context(name)
variant = context.get_resolved_package(in_resolve.name)
if variant:
overlap = (variant.version in in_resolve.range)
return ((in_resolve.conflict and (not overlap)) or (overlap and (not in_resolve.conflict)))
else:
return in_resolve.conflict
names = [x for x in names if _in_resolve(x)]
return names
|
Find contexts in the suite based on search criteria.
Args:
in_request (str): Match contexts that contain the given package in
their request.
in_resolve (str or `Requirement`): Match contexts that contain the
given package in their resolve. You can also supply a conflict
requirement - '!foo' will match any contexts whos resolve does
not contain any version of package 'foo'.
Returns:
List of context names that match the search criteria.
|
codesearchnet
|
def _probe_characteristics_finished(self, result):
handle = result['context']['handle']
conn_id = result['context']['connection_id']
conndata = self._get_connection(handle, 'preparing')
if conndata is None:
self._logger.info('Connection disconnected before probe_char... finished, conn_id=%d',
conn_id)
return
callback = conndata['callback']
if result['result'] is False:
conndata['failed'] = True
conndata['failure_reason'] = 'Could not probe GATT characteristics'
self.disconnect_async(conn_id, self._on_connection_failed)
return
services = result['return_value']['services']
if TileBusService not in services:
conndata['failed'] = True
conndata['failure_reason'] = 'TileBus service not present in GATT services'
self.disconnect_async(conn_id, self._on_connection_failed)
return
conndata['chars_done_time'] = time.time()
service_time = conndata['services_done_time'] - conndata['connect_time']
char_time = conndata['chars_done_time'] - conndata['services_done_time']
total_time = service_time + char_time
conndata['state'] = 'connected'
conndata['services'] = services
conndata['parser'] = IOTileReportParser(report_callback=self._on_report, error_callback=self._on_report_error)
conndata['parser'].context = conn_id
del conndata['disconnect_handler']
with self.count_lock:
self.connecting_count -= 1
self._logger.info("Total time to connect to device: %.3f (%.3f enumerating services, %.3f enumerating chars)", total_time, service_time, char_time)
callback(conndata['connection_id'], self.id, True, None)
|
Callback when BLE adapter has finished probing services and characteristics for a device
Args:
result (dict): Result from the probe_characteristics command
|
juraj-google-style
|
def from_str(self, in_str):
parts = in_str.split(";")
for part in parts:
var_name, value = part.split(":")
if var_name == "Obs_Threshold":
self.obs_threshold = float(value)
elif var_name == "Thresholds":
self.thresholds = np.array(value.split(), dtype=float)
self.contingency_tables = pd.DataFrame(columns=self.contingency_tables.columns,
data=np.zeros((self.thresholds.size,
self.contingency_tables.columns.size)))
elif var_name in self.contingency_tables.columns:
self.contingency_tables[var_name] = np.array(value.split(), dtype=int)
|
Read the DistributedROC string and parse the contingency table values from it.
Args:
in_str (str): The string output from the __str__ method
|
juraj-google-style
|
def check_steps_argument(input_data, steps, steps_name):
is_x_iterator = isinstance(input_data, (iterator_ops.Iterator, iterator_ops.IteratorBase))
if input_data is None or is_x_iterator or has_symbolic_tensors(input_data) or (isinstance(input_data, list) and (not input_data)):
if steps is None:
input_type_str = 'a Dataset iterator' if is_x_iterator else 'data tensors'
raise ValueError('When using {input_type} as input to a model, you should specify the `{steps_name}` argument.'.format(input_type=input_type_str, steps_name=steps_name))
return True
if isinstance(input_data, (data_types.DatasetV1, data_types.DatasetV2)):
return True
if steps is not None:
list_types = (np.ndarray, list, tuple)
if isinstance(input_data, list_types) or (isinstance(input_data, dict) and any((isinstance(v, list_types) for v in input_data.values()))):
logging.warning('When passing input data as arrays, do not specify `steps_per_epoch`/`steps` argument. Please use `batch_size` instead.')
return False
|
Validates `steps` argument based on input data's type.
The cases when `steps` value must be provided are when
1. input data passed is an iterator.
2. model was built on top of symbolic tensors, input data is not
required and is `None`.
3. input data passed is a symbolic tensor.
Args:
input_data: Input data. Can be Numpy array(s) or TensorFlow tensor(s) or
tf.data.Dataset iterator or `None`.
steps: Integer or `None`. Total number of steps (batches of samples) to
execute.
steps_name: The public API's parameter name for `steps`.
Returns:
boolean, True if `steps` argument is required, else False.
Raises:
ValueError: if `steps` argument is required for given input data type
but not provided.
|
github-repos
|
def _get_upload_session_status(res):
response = json.loads(res.body.decode())
if ('sessionStatus' not in response):
try:
info = response['errorMessage']['additionalInfo']['uploader_service.GoogleRupioAdditionalInfo']['completionInfo']['customerSpecificInfo']
reason = '{} : {}'.format(info['status'], info['message'])
except KeyError:
reason = 'unknown reason'
raise exceptions.NetworkError('image upload failed: {}'.format(reason))
return response['sessionStatus']
|
Parse the image upload response to obtain status.
Args:
res: http_utils.FetchResponse instance, the upload response
Returns:
dict, sessionStatus of the response
Raises:
hangups.NetworkError: If the upload request failed.
|
codesearchnet
|
def from_pure(cls, z):
return cls(cls._key, {z: 1.0}, {z: 1.0}, pyxray.element_symbol(z))
|
Creates a pure composition.
Args:
z (int): atomic number
|
codesearchnet
|
async def loadCoreModule(self, ctor, conf=None):
if conf is None:
conf = {}
modu = self._loadCoreModule(ctor, conf=conf)
try:
await s_coro.ornot(modu.preCoreModule)
except asyncio.CancelledError:
raise
except Exception:
logger.exception(f'module preCoreModule failed: {ctor}')
self.modules.pop(ctor, None)
return
mdefs = modu.getModelDefs()
self.model.addDataModels(mdefs)
cmds = modu.getStormCmds()
[self.addStormCmd(c) for c in cmds]
try:
await s_coro.ornot(modu.initCoreModule)
except asyncio.CancelledError:
raise
except Exception:
logger.exception(f'module initCoreModule failed: {ctor}')
self.modules.pop(ctor, None)
return
await self.fire('core:module:load', module=ctor)
return modu
|
Load a single cortex module with the given ctor and conf.
Args:
ctor (str): The python module class path
conf (dict):Config dictionary for the module
|
juraj-google-style
|
def listEverything(matching=False):
pages=pageNames()
if matching:
pages=[x for x in pages if matching in x]
for i,page in enumerate(pages):
pages[i]="%s%s (%s)"%(pageFolder(page),page,getPageType(page))
print("\n".join(sorted(pages)))
|
Prints every page in the project to the console.
Args:
matching (str, optional): if given, only return names with this string in it
|
juraj-google-style
|
def addFeature(self, f, conflict='error', missing='other'):
OPTIONS = ['error', 'ignore', 'me', 'other']
assert (missing in OPTIONS), 'Invalid value in `missing`.'
assert (conflict in OPTIONS), 'Invalid value in `missing`.'
if ((f.prop not in self.props) and (missing == 'error')):
raise Exception('Property has not set.')
elif ((f.prop not in self.props) and (missing in ['ignore', 'first'])):
return
if (isinstance(f.value, int) or isinstance(f.value, float)):
if (f.operator == '='):
inter1 = (f, f)
elif (f.operator[0] == '<'):
inter1 = (None, f)
elif (f.operator[0] == '>'):
inter1 = (f, None)
inter0 = self.props.get(f.prop, (None, None))
try:
self.props[f.prop] = Features._applyInter(inter0, inter1, conflict)
except Exception as e:
raise RADLParseException(('%s. Involved features: %s' % (e, [str(f0) for f0 in inter0])), line=f.line)
elif isinstance(f, SoftFeatures):
self.props.setdefault(f.prop, []).append(f)
elif (f.operator == 'contains'):
if ((f.prop in self.props) and (f.value.getValue('name') in self.props[f.prop])):
feature = self.props[f.prop][f.value.getValue('name')].clone()
for f0 in f.value.features:
feature.value.addFeature(f0, conflict, missing)
self.props[f.prop][f.value.getValue('name')] = feature
else:
self.props.setdefault(f.prop, {})[f.value.getValue('name')] = f
else:
value0 = self.props.get(f.prop, None)
if ((not value0) or (conflict == 'other')):
self.props[f.prop] = f
elif ((value0.value != f.value) and (conflict == 'error')):
raise RADLParseException(('Conflict adding `%s` because `%s` is already set and conflict is %s' % (f, value0, conflict)), line=f.line)
|
Add a feature.
Args:
- f(Feature): feature to add.
- conflict(str): if a property hasn't compatible values/constrains, do:
- ``"error"``: raise exception.
- ``"ignore"``: go on.
- ``"me"``: keep the old value.
- ``"other"``: set the passed value.
- missing(str): if a property has not been set yet, do:
- ``"error"``: raise exception.
- ``"ignore"``: do nothning.
- ``"me"``: do nothing.
- ``"other"``: set the passed value.
|
codesearchnet
|
def __init__(self, timestamp=None):
if timestamp and (timestamp < 0 or timestamp > self._UINT60_MAX):
raise ValueError('Invalid UUID version 1 timestamp.')
super(UUIDTime, self).__init__()
self._precision = definitions.PRECISION_100_NANOSECONDS
self._timestamp = timestamp
|
Initializes an UUID version 1 timestamp.
Args:
timestamp (Optional[int]): UUID version 1 timestamp.
Raises:
ValueError: if the UUID version 1 timestamp is invalid.
|
juraj-google-style
|
def get_and_update(cls, id, **kwargs):
model = cls.get(id)
for k, v in cls._preprocess_params(kwargs).items():
setattr(model, k, v)
cls.session.commit()
return model
|
Returns an updated instance of the service's model class.
Args:
model: the model to update
**kwargs: update parameters
|
juraj-google-style
|
def _ExtractFileEntry(
self, path_spec, destination_path, output_writer, skip_duplicates=True):
file_entry = path_spec_resolver.Resolver.OpenFileEntry(path_spec)
if not file_entry:
logger.warning('Unable to open file entry for path spec: {0:s}'.format(
path_spec.comparable))
return
if not self._filter_collection.Matches(file_entry):
return
file_entry_processed = False
for data_stream in file_entry.data_streams:
if self._abort:
break
self._ExtractDataStream(
file_entry, data_stream.name, destination_path, output_writer,
skip_duplicates=skip_duplicates)
file_entry_processed = True
if not file_entry_processed:
self._ExtractDataStream(
file_entry, '', destination_path, output_writer,
skip_duplicates=skip_duplicates)
|
Extracts a file entry.
Args:
path_spec (dfvfs.PathSpec): path specification of the source file.
destination_path (str): path where the extracted files should be stored.
output_writer (CLIOutputWriter): output writer.
skip_duplicates (Optional[bool]): True if files with duplicate content
should be skipped.
|
juraj-google-style
|
def default_sample_indices_fn(metadata: VideoMetadata, num_frames=None, fps=None, **kwargs):
total_num_frames = metadata.total_num_frames
video_fps = metadata.fps
if num_frames is None and fps is not None:
num_frames = int(total_num_frames / video_fps * fps)
if num_frames > total_num_frames:
raise ValueError(f'When loading the video with fps={fps}, we computed num_frames={num_frames} which exceeds total_num_frames={total_num_frames}. Check fps or video metadata.')
if num_frames is not None:
indices = np.arange(0, total_num_frames, total_num_frames / num_frames, dtype=int)
else:
indices = np.arange(0, total_num_frames, dtype=int)
return indices
|
A default sampling function that replicates the logic used in get_uniform_frame_indices,
while optionally handling `fps` if `num_frames` is not provided.
Args:
metadata (`VideoMetadata`):
`VideoMetadata` object containing metadata about the video, such as "total_num_frames" or "fps".
num_frames (`int`, *optional*):
Number of frames to sample uniformly.
fps (`int`, *optional*):
Desired frames per second. Takes priority over num_frames if both are provided.
Returns:
`np.ndarray`: Array of frame indices to sample.
|
github-repos
|
def limit_weights(weights, limit=0.1):
if 1.0 / limit > len(weights):
raise ValueError('invalid limit -> 1 / limit must be <= len(weights)')
if isinstance(weights, dict):
weights = pd.Series(weights)
if np.round(weights.sum(), 1) != 1.0:
raise ValueError('Expecting weights (that sum to 1) - sum is %s'
% weights.sum())
res = np.round(weights.copy(), 4)
to_rebalance = (res[res > limit] - limit).sum()
ok = res[res < limit]
ok += (ok / ok.sum()) * to_rebalance
res[res > limit] = limit
res[res < limit] = ok
if any(x > limit for x in res):
return limit_weights(res, limit=limit)
return res
|
Limits weights and redistributes excedent amount
proportionally.
ex:
- weights are {a: 0.7, b: 0.2, c: 0.1}
- call with limit=0.5
- excess 0.2 in a is ditributed to b and c
proportionally.
- result is {a: 0.5, b: 0.33, c: 0.167}
Args:
* weights (Series): A series describing the weights
* limit (float): Maximum weight allowed
|
juraj-google-style
|
def _read_mode_pocsp(self, size, kind):
temp = self._read_binary(size)
data = dict(kind=kind, length=size, start=(True if int(temp[0]) else False), end=(True if int(temp[1]) else False), filler=bytes(chr(int(temp[2:], base=2)), encoding='utf-8'))
return data
|
Read Partial Order Connection Service Profile option.
Positional arguments:
* size - int, length of option
* kind - int, 10 (POC-Serv Profile)
Returns:
* dict -- extracted Partial Order Connection Service Profile (POC-SP) option
Structure of TCP POC-SP Option [RFC 1693][RFC 6247]:
1 bit 1 bit 6 bits
+----------+----------+------------+----------+--------+
| Kind=10 | Length=3 | Start_flag | End_flag | Filler |
+----------+----------+------------+----------+--------+
Octets Bits Name Description
0 0 tcp.pocsp.kind Kind (10)
1 8 tcp.pocsp.length Length (3)
2 16 tcp.pocsp.start Start Flag
2 17 tcp.pocsp.end End Flag
2 18 tcp.pocsp.filler Filler
|
codesearchnet
|
def save_replay(self, replay_data, replay_dir, prefix=None):
if not prefix:
replay_filename = ""
elif os.path.sep in prefix:
raise ValueError("Prefix '%s' contains '%s', use replay_dir instead." % (
prefix, os.path.sep))
else:
replay_filename = prefix + "_"
now = datetime.datetime.utcnow().replace(microsecond=0)
replay_filename += "%s.SC2Replay" % now.isoformat("-").replace(":", "-")
replay_dir = self.abs_replay_path(replay_dir)
if not gfile.Exists(replay_dir):
gfile.MakeDirs(replay_dir)
replay_path = os.path.join(replay_dir, replay_filename)
with gfile.Open(replay_path, "wb") as f:
f.write(replay_data)
return replay_path
|
Save a replay to a directory, returning the path to the replay.
Args:
replay_data: The result of controller.save_replay(), ie the binary data.
replay_dir: Where to save the replay. This can be absolute or relative.
prefix: Optional prefix for the replay filename.
Returns:
The full path where the replay is saved.
Raises:
ValueError: If the prefix contains the path seperator.
|
juraj-google-style
|
def set_x_grid_info(self, x_low, x_high, num_x, xscale, xval_name):
self._set_grid_info('x', x_low, x_high, num_x, xscale, xval_name)
return
|
Set the grid values for x.
Create information for the grid of x values.
Args:
num_x (int): Number of points on axis.
x_low/x_high (float): Lowest/highest value for the axis.
xscale (str): Scale of the axis. Choices are 'log' or 'lin'.
xval_name (str): Name representing the axis. See GenerateContainer documentation
for options for the name.
|
codesearchnet
|
def get_parent(self, path):
self.__validate_storage_path(path, projects_allowed=False)
path_steps = [step for step in path.split('/') if step]
del path_steps[-1]
parent_path = '/{0}'.format('/'.join(path_steps))
return self.api_client.get_entity_by_query(path=parent_path)
|
Get the parent entity of the entity pointed by the given path.
Args:
path (str): The path of the entity whose parent is needed
Returns:
A JSON object of the parent entity if found.
Raises:
StorageArgumentException: Invalid arguments
StorageForbiddenException: Server response code 403
StorageNotFoundException: Server response code 404
StorageException: other 400-600 error codes
|
juraj-google-style
|
def init_from_acceptor(self, acceptor):
states = sorted(
acceptor.states,
key=attrgetter('initial'),
reverse=True)
for state in states:
for arc in state.arcs:
itext = acceptor.isyms.find(arc.ilabel)
if itext in self.alphabet:
self.add_arc(state.stateid, arc.nextstate, itext)
if state.final:
self[state.stateid].final = True
if state.initial:
self[state.stateid].initial = True
|
Adds a sink state
Args:
alphabet (list): The input alphabet
Returns:
None
|
juraj-google-style
|
def get_lanczos_eig(self, compute_m=True, feed_dict=None):
if compute_m:
min_eig, min_vec = self.sess.run([self.m_min_eig, self.m_min_vec], feed_dict=feed_dict)
else:
min_eig, min_vec = self.sess.run([self.h_min_eig, self.h_min_vec], feed_dict=feed_dict)
return min_vec, min_eig
|
Computes the min eigen value and corresponding vector of matrix M or H
using the Lanczos algorithm.
Args:
compute_m: boolean to determine whether we should compute eig val/vec
for M or for H. True for M; False for H.
feed_dict: dictionary mapping from TF placeholders to values (optional)
Returns:
min_eig_vec: Corresponding eigen vector to min eig val
eig_val: Minimum eigen value
|
juraj-google-style
|
def write_worksheets(workbook, data_list, result_info_key, identifier_keys):
worksheet_keys = get_worksheet_keys(data_list[0], result_info_key)
for key in worksheet_keys:
title = key.split('/')[1]
title = utilities.convert_snake_to_title_case(title)
title = KEY_TO_WORKSHEET_MAP.get(title, title)
if key == 'property/nod':
create_property_nod_worksheets(workbook, data_list, result_info_key, identifier_keys)
else:
worksheet = workbook.create_sheet(title=title[:31])
processed_data = process_data(key, data_list, result_info_key, identifier_keys)
write_data(worksheet, processed_data)
workbook.remove_sheet(workbook.active)
|
Writes rest of the worksheets to workbook.
Args:
workbook: workbook to write into
data_list: Analytics API data as a list of dicts
result_info_key: the key in api_data dicts that contains the data results
identifier_keys: the list of keys used as requested identifiers
(address, zipcode, block_id, etc)
|
juraj-google-style
|
def enable_eager_op_as_function(fn: _F) -> _F:
def wrapper(*args, **kwargs):
return fn(*args, **kwargs)
return wrapper
|
Returns the same fn. This will be removed once all usages are removed.
Args:
fn: the function to be wrapped.
Returns:
The wrapped function.
|
github-repos
|
def yaml(modules_to_register: Iterable[Any]=None, classes_to_register: Iterable[Any]=None) -> ruamel.yaml.YAML:
yaml = ruamel.yaml.YAML(typ='rt')
yaml.representer.add_representer(np.ndarray, numpy_to_yaml)
yaml.constructor.add_constructor('!numpy_array', numpy_from_yaml)
yaml = register_module_classes(yaml=yaml, modules=modules_to_register)
yaml = register_classes(yaml=yaml, classes=classes_to_register)
return yaml
|
Create a YAML object for loading a YAML configuration.
Args:
modules_to_register: Modules containing classes to be registered with the YAML object. Default: None.
classes_to_register: Classes to be registered with the YAML object. Default: None.
Returns:
A newly creating YAML object, configured as apporpirate.
|
codesearchnet
|
def dedupe_all_lists(obj, exclude_keys=()):
squared_dedupe_len = 10
if isinstance(obj, dict):
new_obj = {}
for key, value in obj.items():
if key in exclude_keys:
new_obj[key] = value
else:
new_obj[key] = dedupe_all_lists(value)
return new_obj
elif isinstance(obj, (list, tuple, set)):
new_elements = [dedupe_all_lists(v) for v in obj]
if len(new_elements) < squared_dedupe_len:
new_obj = dedupe_list(new_elements)
else:
new_obj = dedupe_list_of_dicts(new_elements)
return type(obj)(new_obj)
else:
return obj
|
Recursively remove duplucates from all lists.
Args:
obj: collection to deduplicate
exclude_keys (Container[str]): key names to ignore for deduplication
|
juraj-google-style
|
def output_of(*cmd: Optional[str], **kwargs) -> str:
result = cast(str, run_cmd(*cmd, log_run_to_stderr=False, out=TeeCapture(), **kwargs).out)
if result.endswith('\n'):
result = result[:(- 1)]
return result
|
Invokes a subprocess and returns its output as a string.
Args:
cmd: Components of the command to execute, e.g. ["echo", "dog"].
**kwargs: Extra arguments for asyncio.create_subprocess_shell, such as
a cwd (current working directory) argument.
Returns:
A (captured output, captured error output, return code) triplet. The
captured outputs will be None if the out or err parameters were not set
to an instance of TeeCapture.
Raises:
subprocess.CalledProcessError: The process returned a non-zero error
code and raise_on_fail was set.
|
codesearchnet
|
def create_virtual_env(venv_path: str, requirements_paths: Iterable[str], python_path: str, verbose: bool) -> None:
shell_tools.run_cmd('virtualenv', (None if verbose else '--quiet'), '-p', python_path, venv_path, out=sys.stderr)
pip_path = os.path.join(venv_path, 'bin', 'pip')
for req_path in requirements_paths:
shell_tools.run_cmd(pip_path, 'install', (None if verbose else '--quiet'), '-r', req_path, out=sys.stderr)
|
Creates a new virtual environment and then installs dependencies.
Args:
venv_path: Where to put the virtual environment's state.
requirements_paths: Location of requirements files to -r install.
python_path: The python binary to use.
verbose: When set, more progress output is produced.
|
codesearchnet
|
def _create_simple_tf1_conv_model(self, input_shape: Sequence[int]=(1, 3, 4, 3), filter_shape: Sequence[int]=(2, 3, 3, 2), use_variable_for_filter=False) -> Tuple[core.Tensor, core.Tensor]:
in_placeholder = array_ops.placeholder(dtypes.float32, shape=input_shape)
filters = random_ops.random_uniform(shape=filter_shape, minval=-1.0, maxval=1.0)
if use_variable_for_filter:
filters = variables.Variable(filters)
output_tensor = nn_ops.conv2d(in_placeholder, filters, strides=[1, 1, 2, 1], dilations=[1, 1, 1, 1], padding='SAME', data_format='NHWC')
return (in_placeholder, output_tensor)
|
Creates a basic convolution model.
This is intended to be used for TF1 (graph mode) tests.
Args:
input_shape: Shape of the input tensor.
filter_shape: Shape of the filter.
use_variable_for_filter: Setting this to `True` makes the filter for the
conv operation a `tf.Variable`.
Returns:
in_placeholder: Input tensor placeholder.
output_tensor: The resulting tensor of the convolution operation.
|
github-repos
|
def get_jobs(self, name=None):
if self.applicationResource:
return self._get_elements(self.jobs, 'jobs', Job, None, name)
else:
return []
|
Retrieves jobs running on this resource in its instance.
Args:
name (str, optional): Only return jobs containing property **name** that matches `name`. `name` can be a
regular expression. If `name` is not supplied, then all jobs are returned.
Returns:
list(Job): A list of jobs matching the given `name`.
.. note:: If ``applicationResource`` is `False` an empty list is returned.
.. versionadded:: 1.9
|
juraj-google-style
|
def relative_to_contrib(diff, project):
path = pathlib.Path(diff.b_path)
contrib_path = project.contrib_module_path
return path.relative_to(contrib_path)
|
Compute relative path of changed file to contrib dir
Args:
diff (git.diff.Diff): file diff
project (Project): project
Returns:
Path
|
codesearchnet
|
def es_get_class_defs(cls_def, cls_name):
rtn_dict = {key: value for key, value in cls_def.items() \
if key.startswith("kds_es")}
for key in rtn_dict:
del cls_def[key]
return rtn_dict
|
Reads through the class defs and gets the related es class
defintions
Args:
-----
class_defs: RdfDataset of class definitions
|
juraj-google-style
|
def optionally(self, entity_type, attribute_name=None):
if (not attribute_name):
attribute_name = entity_type
self.optional += [(entity_type, attribute_name)]
return self
|
Parsed intents from this parser can optionally include an entity of the provided type.
Args:
entity_type(str): an entity type
attribute_name(str): the name of the attribute on the parsed intent. Defaults to match entity_type.
Returns:
self: to continue modifications.
|
codesearchnet
|
def publishFsFromMXD(self, fs_config):
fs = None
res = None
resItm = None
if self.securityhandler is None:
print ("Security handler required")
return
if self.securityhandler.is_portal:
url = self.securityhandler.org_url
else:
url = 'http:
try:
res = []
if isinstance(fs_config, list):
for fs in fs_config:
if 'ReplaceTag' in fs:
resItm = {"ReplaceTag":fs['ReplaceTag'] }
else:
resItm = {"ReplaceTag":"{FeatureService}" }
resItm['FSInfo'] = self._publishFSFromMXD(config=fs, url=url)
if not resItm['FSInfo'] is None and 'url' in resItm['FSInfo']:
print ("%s created" % resItm['FSInfo']['url'])
res.append(resItm)
else:
print (str(resItm['FSInfo']))
else:
if 'ReplaceTag' in fs_config:
resItm = {"ReplaceTag":fs_config['ReplaceTag'] }
else:
resItm = {"ReplaceTag":"{FeatureService}" }
resItm['FSInfo'] = self._publishFSFromMXD(config=fs_config, url=url)
if 'url' in resItm['FSInfo']:
print ("%s created" % resItm['FSInfo']['url'])
res.append(resItm)
else:
print (str(resItm['FSInfo']))
return res
except common.ArcRestHelperError as e:
raise e
except Exception as e:
line, filename, synerror = trace()
raise common.ArcRestHelperError({
"function": "publishFsFromMXD",
"line": line,
"filename": filename,
"synerror": synerror,
}
)
finally:
resItm = None
fs = None
del resItm
del fs
gc.collect()
|
Publishes the layers in a MXD to a feauture service.
Args:
fs_config (list): A list of JSON configuration feature service details to publish.
Returns:
dict: A dictionary of results objects.
|
juraj-google-style
|
def validate_config_must_have(config, required_keys):
missing_keys = (set(required_keys) - set(config))
if (len(missing_keys) > 0):
raise Exception(('Invalid config with missing keys "%s"' % ', '.join(missing_keys)))
|
Validate a config dictionary to make sure it has all of the specified keys
Args:
config: the config to validate.
required_keys: the list of possible keys that config must include.
Raises:
Exception if the config does not have any of them.
|
codesearchnet
|
def valid_ip_prefix(ip_prefix):
try:
ip_prefix = ipaddress.ip_network(ip_prefix)
except ValueError:
return False
else:
if ip_prefix.version == 4 and ip_prefix.max_prefixlen != 32:
return False
if ip_prefix.version == 6 and ip_prefix.max_prefixlen != 128:
return False
return True
|
Perform a sanity check on ip_prefix.
Arguments:
ip_prefix (str): The IP-Prefix to validate
Returns:
True if ip_prefix is a valid IPv4 address with prefix length 32 or a
valid IPv6 address with prefix length 128, otherwise False
|
juraj-google-style
|
def monkhorst_automatic(cls, structure, ngkpt,
use_symmetries=True, use_time_reversal=True, chksymbreak=None, comment=None):
sg = SpacegroupAnalyzer(structure)
nshiftk = 1
shiftk = 3*(0.5,)
return cls.monkhorst(
ngkpt, shiftk=shiftk, use_symmetries=use_symmetries, use_time_reversal=use_time_reversal,
chksymbreak=chksymbreak, comment=comment if comment else "Automatic Monkhorst-Pack scheme")
|
Convenient static constructor for an automatic Monkhorst-Pack mesh.
Args:
structure: :class:`Structure` object.
ngkpt: Subdivisions N_1, N_2 and N_3 along reciprocal lattice vectors.
use_symmetries: Use spatial symmetries to reduce the number of k-points.
use_time_reversal: Use time-reversal symmetry to reduce the number of k-points.
Returns:
:class:`KSampling` object.
|
juraj-google-style
|
def show(self, objtype, objid):
url = self._object_url(objtype, int(objid))
return self._make_request(url, method='get')
|
Query for a specific resource by ID
Args:
objtype (str): object type, e.g. 'device', 'interface'
objid (int): object ID (DeviceID, etc.)
Returns:
A dict with that object
Raises:
requests.exceptions.HTTPError
|
codesearchnet
|
def restore_saved_local_scope(
self,
saved_variables,
args_mapping,
line_number
):
restore_nodes = list()
for var in saved_variables:
if var.RHS in args_mapping:
restore_nodes.append(RestoreNode(
var.RHS + ' = ' + args_mapping[var.RHS],
var.RHS,
[var.LHS],
line_number=line_number,
path=self.filenames[-1]
))
else:
restore_nodes.append(RestoreNode(
var.RHS + ' = ' + var.LHS,
var.RHS,
[var.LHS],
line_number=line_number,
path=self.filenames[-1]
))
for node, successor in zip(restore_nodes, restore_nodes[1:]):
node.connect(successor)
if restore_nodes:
self.nodes[-1].connect(restore_nodes[0])
self.nodes.extend(restore_nodes)
return restore_nodes
|
Restore the previously saved variables to their original values.
Args:
saved_variables(list[SavedVariable])
args_mapping(dict): A mapping of call argument to definition argument.
line_number(int): Of the def of the function call about to be entered into.
Note: We do not need connect_if_allowed because of the
preceding call to save_local_scope.
|
juraj-google-style
|
def _has_connection(hostname, port):
try:
host = socket.gethostbyname(hostname)
socket.create_connection((host, port), 2)
return True
except Exception:
return False
|
Checks if internet connection exists to host via specified port.
If any exception is raised while trying to open a socket this will return
false.
Args:
hostname (str): Hostname to connect to.
port (int): Port to connect to
Returns:
bool: Has connection or not
|
codesearchnet
|
def calculate_bias_shape(input_shape, bias_dims):
input_rank = len(input_shape)
if (bias_dims is None):
return input_shape[1:]
elif (not bias_dims):
return ()
else:
bias_shape = ([1] * input_rank)
for dim in bias_dims:
dim %= input_rank
if (dim == 0):
raise ValueError('Cannot apply bias across the minibatch dimension.')
bias_shape[dim] = input_shape[dim]
start = input_rank
for dim in xrange(1, input_rank):
if (bias_shape[dim] != 1):
start = dim
break
return tuple(bias_shape[start:])
|
Calculate `bias_shape` based on the `input_shape` and `bias_dims`.
Args:
input_shape: Shape of the input being passed into the module. The leading
dimension is the minibatch size.
bias_dims: The dimensions that bias should be applied over. The remaining
dimensions will get broadcasted over.
Returns:
bias_shape: Tuple corresponding to the shape of bias Variable to create.
Raises:
ValueError: If the user attempts to add bias over the minibatch dimension,
e.g. `bias_dims=[0]`.
|
codesearchnet
|
def _ScanFileSystem(self, scan_node, base_path_specs):
if not scan_node or not scan_node.path_spec:
raise errors.SourceScannerError(
'Invalid or missing file system scan node.')
base_path_specs.append(scan_node.path_spec)
|
Scans a file system scan node for file systems.
Args:
scan_node (SourceScanNode): file system scan node.
base_path_specs (list[PathSpec]): file system base path specifications.
Raises:
SourceScannerError: if the scan node is invalid.
|
juraj-google-style
|
def write_tarball_from_ivorn_xml_tuples(ivorn_xml_tuples, filepath):
out = tarfile.open(filepath, mode='w:bz2')
logger.info("Writing packets to tarball at " + filepath)
packet_count = 0
try:
for (ivorn, xml) in ivorn_xml_tuples:
out.addfile(*bytestring_to_tar_tuple(
filename_from_ivorn(ivorn),
xml
))
packet_count += 1
finally:
out.close()
return packet_count
|
Iterate over a series of ivorn / xml bstring tuples and write to bz'd tarball.
Args:
ivorn_xml_tuples (iterable): [(ivorn,xml)]
An iterable (e.g. list) of tuples containing two entries -
an ivorn string and an xml bytestring.
filepath (string): Path to the new tarball to create. Typically of form
'/path/to/foo.tar.bz2'
Returns
packet_count (int): Number of packets written to tarball
|
juraj-google-style
|
def _check_tf2_flags(flags):
if not flags.keras_model_file and (not flags.saved_model_dir):
raise ValueError('one of the arguments --saved_model_dir --keras_model_file is required')
|
Checks the parsed and unparsed flags to ensure they are valid in 2.X.
Args:
flags: argparse.Namespace object containing TFLite flags.
Raises:
ValueError: Invalid flags.
|
github-repos
|
def make_worksheet(self, sheet_name=None):
if sheet_name is None:
sheet_name = self.table_name
if not sheet_name:
sheet_name = ""
self._stream = self.workbook.add_worksheet(sheet_name)
self._current_data_row = self._first_data_row
|
Make a worksheet to the current workbook.
Args:
sheet_name (str):
Name of the worksheet to create. The name will be automatically generated
(like ``"Sheet1"``) if the ``sheet_name`` is empty.
|
juraj-google-style
|
def __init__(self, base_url, object_factory, single_request_timeout=None):
check_type(base_url, basestring, may_be_none=False)
check_type(single_request_timeout, int)
super(AccessTokensAPI, self).__init__()
self._base_url = str(validate_base_url(base_url))
self._single_request_timeout = single_request_timeout
self._endpoint_url = urllib.parse.urljoin(self.base_url, API_ENDPOINT)
self._request_kwargs = {"timeout": single_request_timeout}
self._object_factory = object_factory
|
Initialize an AccessTokensAPI object with the provided RestSession.
Args:
base_url(basestring): The base URL the API endpoints.
single_request_timeout(int): Timeout in seconds for the API
requests.
Raises:
TypeError: If the parameter types are incorrect.
|
juraj-google-style
|
def spt(points, max_dist_error, max_speed_error):
if (len(points) <= 2):
return points
else:
is_error = False
e = 1
while ((e < len(points)) and (not is_error)):
i = 1
while ((i < e) and (not is_error)):
delta_e = (time_dist(points[e], points[0]) * I_3600)
delta_i = (time_dist(points[i], points[0]) * I_3600)
di_de = 0
if (delta_e != 0):
di_de = (delta_i / delta_e)
d_lat = (points[e].lat - points[0].lat)
d_lon = (points[e].lon - points[0].lon)
point = Point((points[0].lat + (d_lat * di_de)), (points[0].lon + (d_lon * di_de)), None)
dt1 = time_dist(points[i], points[(i - 1)])
if (dt1 == 0):
dt1 = 1e-09
dt2 = time_dist(points[(i + 1)], points[i])
if (dt2 == 0):
dt2 = 1e-09
v_i_1 = (loc_dist(points[i], points[(i - 1)]) / dt1)
v_i = (loc_dist(points[(i + 1)], points[i]) / dt2)
if ((loc_dist(points[i], point) > max_dist_error) or (abs((v_i - v_i_1)) > max_speed_error)):
is_error = True
else:
i = (i + 1)
if is_error:
return ([points[0]] + spt(points[i:len(points)], max_dist_error, max_speed_error))
e = (e + 1)
if (not is_error):
return [points[0], points[(len(points) - 1)]]
|
A combination of both `td_sp` and `td_tr`
Detailed in,
Spatiotemporal Compression Techniques for Moving Point Objects,
Nirvana Meratnia and Rolf A. de By, 2004,
in Advances in Database Technology - EDBT 2004: 9th
International Conference on Extending Database Technology,
Heraklion, Crete, Greece, March 14-18, 2004
Args:
points (:obj:`list` of :obj:`Point`)
max_dist_error (float): max distance error, in meters
max_speed_error (float): max speed error, in km/h
Returns:
:obj:`list` of :obj:`Point`
|
codesearchnet
|
def _check_callback(callback):
if inspect.isclass(callback):
callback_object = callback()
if not callable(callback_object):
raise ValueError(
"Callback must be a class that implements __call__ or a function."
)
elif callable(callback):
callback_object = callback
else:
raise ValueError(
"Callback must be a class that implements __call__ or a function."
)
return callback_object
|
Turns a callback that is potentially a class into a callable object.
Args:
callback (object): An object that might be a class, method, or function.
if the object is a class, this creates an instance of it.
Raises:
ValueError: If an instance can't be created or it isn't a callable object.
TypeError: If the class requires arguments to be instantiated.
Returns:
callable: A callable object suitable for use as the consumer callback.
|
juraj-google-style
|
def main(self, ignored_argv=('',)):
self._install_signal_handler(signal.SIGTERM, 'SIGTERM')
if self.flags.inspect:
logger.info('Not bringing up TensorBoard, but inspecting event files.')
event_file = os.path.expanduser(self.flags.event_file)
efi.inspect(self.flags.logdir, event_file, self.flags.tag)
return 0
if self.flags.version_tb:
print(version.VERSION)
return 0
try:
server = self._make_server()
sys.stderr.write(('TensorBoard %s at %s (Press CTRL+C to quit)\n' % (version.VERSION, server.get_url())))
sys.stderr.flush()
self._register_info(server)
server.serve_forever()
return 0
except TensorBoardServerException as e:
logger.error(e.msg)
sys.stderr.write(('ERROR: %s\n' % e.msg))
sys.stderr.flush()
return (- 1)
|
Blocking main function for TensorBoard.
This method is called by `tensorboard.main.run_main`, which is the
standard entrypoint for the tensorboard command line program. The
configure() method must be called first.
Args:
ignored_argv: Do not pass. Required for Abseil compatibility.
Returns:
Process exit code, i.e. 0 if successful or non-zero on failure. In
practice, an exception will most likely be raised instead of
returning non-zero.
:rtype: int
|
codesearchnet
|
def _check_remote_command(self, destination, timeout_ms, success_msgs=None):
timeout = timeouts.PolledTimeout.from_millis(timeout_ms)
stream = self._adb_connection.open_stream(destination, timeout)
if (not stream):
raise usb_exceptions.AdbStreamUnavailableError('Service %s not supported', destination)
try:
message = stream.read(timeout_ms=timeout)
if any([(m in message) for m in success_msgs]):
return
except usb_exceptions.CommonUsbError:
if destination.startswith('reboot:'):
return
raise
raise usb_exceptions.AdbRemoteError('Device message: %s', message)
|
Open a stream to destination, check for remote errors.
Used for reboot, remount, and root services. If this method returns, the
command was successful, otherwise an appropriate error will have been
raised.
Args:
destination: Stream destination to open.
timeout_ms: Timeout in milliseconds for the operation.
success_msgs: If provided, a list of messages that, if returned from the
device, indicate success, so don't treat them as errors.
Raises:
AdbRemoteError: If the remote command fails, will contain any message we
got back from the device.
AdbStreamUnavailableError: The service requested isn't supported.
|
codesearchnet
|
def add_newlines(f, output, char):
line_count = get_line_count(f)
f = open(f, 'r+')
output = open(output, 'r+')
for line in range(line_count):
string = f.readline()
string = re.sub(char, char + '\n', string)
output.write(string)
|
Adds line breaks after every occurance of a given character in a file.
Args:
f: string, path to input file.
output: string, path to output file.
Returns:
None.
|
juraj-google-style
|
def decode_conjure_bean_type(cls, obj, conjure_type):
deserialized = {}
for (python_arg_name, field_definition) \
in conjure_type._fields().items():
field_identifier = field_definition.identifier
if field_identifier not in obj or obj[field_identifier] is None:
cls.check_null_field(
obj, deserialized, python_arg_name, field_definition)
else:
value = obj[field_identifier]
field_type = field_definition.field_type
deserialized[python_arg_name] = \
cls.do_decode(value, field_type)
return conjure_type(**deserialized)
|
Decodes json into a conjure bean type (a plain bean, not enum
or union).
Args:
obj: the json object to decode
conjure_type: a class object which is the bean type
we're decoding into
Returns:
A instance of a bean of type conjure_type.
|
juraj-google-style
|
def _extract_from(raw_json, pandas_options=None):
data_frames = []
if (pandas_options is None):
pandas_options = {}
columns = pandas_options.pop('columns', None)
(columns, header_line_number) = _convert_pandas_csv_options(pandas_options, columns)
for table in raw_json:
list_data = [[(np.nan if (not e['text']) else e['text']) for e in row] for row in table['data']]
_columns = columns
if (isinstance(header_line_number, int) and (not columns)):
_columns = list_data.pop(header_line_number)
_columns = [('' if (e is np.nan) else e) for e in _columns]
data_frames.append(pd.DataFrame(data=list_data, columns=_columns, **pandas_options))
return data_frames
|
Extract tables from json.
Args:
raw_json (list):
Decoded list from tabula-java JSON.
pandas_options (dict optional):
pandas options for `pd.DataFrame()`
|
codesearchnet
|
def create_atomic_observe_operations(self, states, actions, internals, terminal, reward, index):
num_episodes = tf.count_nonzero(input_tensor=terminal, dtype=util.tf_dtype('int'))
increment_episode = tf.assign_add(ref=self.episode, value=tf.to_int64(x=num_episodes))
increment_global_episode = tf.assign_add(ref=self.global_episode, value=tf.to_int64(x=num_episodes))
with tf.control_dependencies(control_inputs=(increment_episode, increment_global_episode)):
states = util.map_tensors(fn=tf.stop_gradient, tensors=states)
internals = util.map_tensors(fn=tf.stop_gradient, tensors=internals)
actions = util.map_tensors(fn=tf.stop_gradient, tensors=actions)
terminal = tf.stop_gradient(input=terminal)
reward = tf.stop_gradient(input=reward)
observation = self.fn_observe_timestep(states=states, internals=internals, actions=actions, terminal=terminal, reward=reward)
with tf.control_dependencies(control_inputs=(observation,)):
self.unbuffered_episode_output = (self.global_episode + 0)
|
Returns the tf op to fetch when unbuffered observations are passed in.
Args:
states (any): One state (usually a value tuple) or dict of states if multiple states are expected.
actions (any): One action (usually a value tuple) or dict of states if multiple actions are expected.
internals (any): Internal list.
terminal (bool): boolean indicating if the episode terminated after the observation.
reward (float): scalar reward that resulted from executing the action.
Returns: Tf op to fetch when `observe()` is called.
|
codesearchnet
|
def set_of_vars(arg_plot):
return set(var for var in arg_plot.split(',') if var in phyvars.PLATES)
|
Build set of needed variables.
Args:
arg_plot (str): string with variable names separated with ``,``.
Returns:
set of str: set of variables.
|
juraj-google-style
|
def set_value_at_field(msg: message.Message, field: Union[descriptor.FieldDescriptor, str], value: Any):
if isinstance(field, str):
field = _field_descriptor_for_name(msg, field)
if field_is_repeated(field):
if field_is_primitive(field):
getattr(msg, field.name)[:] = value
else:
del getattr(msg, field.name)[:]
getattr(msg, field.name).extend(value)
elif field_is_primitive(field):
setattr(msg, field.name, value)
else:
getattr(msg, field.name).CopyFrom(value)
|
Sets value at the field.
Args:
msg: The message whose field to mutate.
field: The FieldDescriptor or name of the field to mutate.
value: The value to set.
|
github-repos
|
def __generate_object_term__(self, datatype, value):
if datatype == NS_MGR.xsd.anyURI.rdflib:
term = rdflib.URIRef(value)
elif datatype:
term = rdflib.Literal(value, datatype=datatype)
else:
term = rdflib.Literal(value)
return term
|
Internal method takes a datatype (can be None) and returns
the RDF Object Term
Args:
-----
datatype: None, or rdflib.URIRef
value: Varys depending on ingester
|
juraj-google-style
|
def ResolveFlats(
dem,
in_place = False
):
if type(dem) is not rdarray:
raise Exception("A richdem.rdarray or numpy.ndarray is required!")
if not in_place:
dem = dem.copy()
_AddAnalysis(dem, "ResolveFlats(dem, in_place={in_place})".format(in_place=in_place))
demw = dem.wrap()
_richdem.rdResolveFlatsEpsilon(demw)
dem.copyFromWrapped(demw)
if not in_place:
return dem
|
Attempts to resolve flats by imposing a local gradient
Args:
dem (rdarray): An elevation model
in_place (bool): If True, the DEM is modified in place and there is
no return; otherwise, a new, altered DEM is returned.
Returns:
DEM modified such that all flats drain.
|
juraj-google-style
|
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
original_shape = hidden_states.shape
hidden_states = hidden_states.view(-1, hidden_states.size(-1))
logits = self.router(hidden_states)
top_logits, top_indices = torch.topk(logits, k=self.config.moe_topk, dim=1)
scores = nn.functional.softmax(top_logits, dim=-1)
original_dtype = top_indices.dtype
tokens_per_expert = torch.histc(top_indices.flatten().to(torch.float32), bins=self.config.moe_num_experts, min=0, max=self.config.moe_num_experts - 1).to(original_dtype)
indices = top_indices
flatten_indices = indices.view(-1)
sorted_indices = torch.argsort(flatten_indices)
permuted_tokens = hidden_states.index_select(0, sorted_indices
expert_output = self.experts(permuted_tokens, tokens_per_expert)
unpermuted_tokens = torch.zeros((scores.shape[0] * self.config.moe_topk, expert_output.size(1)), dtype=expert_output.dtype, device=expert_output.device)
unpermuted_tokens.index_copy_(0, sorted_indices, expert_output)
unpermuted_tokens = unpermuted_tokens.view(-1, self.config.moe_topk, expert_output.size(1))
output = (unpermuted_tokens * scores.unsqueeze(-1)).sum(dim=1).view(original_shape)
shared_expert_output = self.shared_experts(hidden_states.view(original_shape))
return output + shared_expert_output
|
Forward pass of the MoE Layer.
Args:
hidden_states (`torch.Tensor`):
Input tensor of shape (batch_size, sequence_length, hidden_size).
Returns:
torch.Tensor: Output tensor after passing through the MoE layer.
Process:
1. Route tokens to experts using the router.
2. Permute tokens based on routing decisions.
3. Process tokens through experts.
4. Unpermute and combine expert outputs.
5. Add shared expert output to the final result.
|
github-repos
|
def getHostCaPath(self, name):
cert = self.getHostCert(name)
if cert is None:
return None
return self._getCaPath(cert)
|
Gets the path to the CA certificate that issued a given host keypair.
Args:
name (str): The name of the host keypair.
Examples:
Get the path to the CA cert which issue the cert for "myhost":
mypath = cdir.getHostCaPath('myhost')
Returns:
str: The path if exists.
|
juraj-google-style
|
def _convert_update_row(row):
after_values = row['after_values']
before_values = row['before_values']
values = after_values
return {
'values': values,
'updated_values': _get_updated_values(before_values, after_values)
}
|
Convert a row for update event
Args:
row (dict): event row data
|
juraj-google-style
|
def blit(self, dest: tcod.console.Console, fill_fore: bool=True, fill_back: bool=True) -> None:
if (not dest):
dest = tcod.console.Console._from_cdata(ffi.NULL)
if ((dest.width != self.width) or (dest.height != self.height)):
raise ValueError('ConsoleBuffer.blit: Destination console has an incorrect size.')
if fill_back:
bg = dest.bg.ravel()
bg[0::3] = self.back_r
bg[1::3] = self.back_g
bg[2::3] = self.back_b
if fill_fore:
fg = dest.fg.ravel()
fg[0::3] = self.fore_r
fg[1::3] = self.fore_g
fg[2::3] = self.fore_b
dest.ch.ravel()[:] = self.char
|
Use libtcod's "fill" functions to write the buffer to a console.
Args:
dest (Console): Console object to modify.
fill_fore (bool):
If True, fill the foreground color and characters.
fill_back (bool):
If True, fill the background color.
|
codesearchnet
|
def write(self, destination, filename, template_name, **kwargs):
template = self.env.get_template(template_name)
content = template.render(kwargs)
super(TemplateFileWriter, self).write(destination=destination, filename=filename, content=content)
|
Write a file according to the template name
Args:
destination (string): the destination location
filename (string): the filename that will be written
template_name (string): the name of the template
kwargs (dict): all attribute that will be passed to the template
|
codesearchnet
|
def load_videos(template, video_length, frame_shape):
filenames = tf.gfile.Glob(template)
if (not filenames):
raise ValueError('no files found.')
filenames = sorted(filenames)
dataset_len = len(filenames)
filenames = tf.constant(filenames)
dataset = tf.data.Dataset.from_tensor_slices(filenames)
dataset = dataset.apply(tf.data.experimental.map_and_batch((lambda filename: load_image_map_function(filename, frame_shape)), video_length, drop_remainder=True))
return (dataset, dataset_len)
|
Loads videos from files.
Args:
template: template string for listing the image files.
video_length: length of the video.
frame_shape: shape of each frame.
Returns:
dataset: the tf dataset frame by frame.
dataset_len: number of the items which is the number of image files.
Raises:
ValueError: if no files found.
|
codesearchnet
|
def distances(self, word, words):
point = self[word]
vectors = np.asarray([self[w] for w in words])
diff = (vectors - point)
distances = np.linalg.norm(diff, axis=1)
return distances
|
Calculate eucledean pairwise distances between `word` and `words`.
Args:
word (string): single word.
words (list): list of strings.
Returns:
numpy array of the distances.
Note:
L2 metric is used to calculate distances.
|
codesearchnet
|
def create_from_json(cls, json_data):
msa = Msa()
msa.msa = json_data["msa_info"]["msa"]
msa.meta = json_data["meta"] if "meta" in json_data else None
msa.component_results = _create_component_results(json_data, "msa_info")
return msa
|
Deserialize msa json data into a Msa object
Args:
json_data (dict): The json data for this msa
Returns:
Msa object
|
juraj-google-style
|
def login(self, client_id, access_token, connection, scope='openid'):
return self.post('https:
|
Login using a social provider's access token
Given the social provider's access_token and the connection specified,
it will do the authentication on the provider and return a dict with
the access_token and id_token. Currently, this endpoint only works for
Facebook, Google, Twitter and Weibo.
Args:
client_id (str): application's client id.
access_token (str): social provider's access_token.
connection (str): connection type (e.g: 'facebook')
Returns:
A dict with 'access_token' and 'id_token' keys.
|
codesearchnet
|
def delete_recursively_v2(path):
_pywrap_file_io.DeleteRecursively(compat.path_to_bytes(path))
|
Deletes everything under path recursively.
Args:
path: string, a path
Raises:
errors.OpError: If the operation fails.
|
github-repos
|
def ConvertMessage(self, value, message):
message_descriptor = message.DESCRIPTOR
full_name = message_descriptor.full_name
if _IsWrapperMessage(message_descriptor):
self._ConvertWrapperMessage(value, message)
elif full_name in _WKTJSONMETHODS:
methodcaller(_WKTJSONMETHODS[full_name][1], value, message)(self)
else:
self._ConvertFieldValuePair(value, message)
|
Convert a JSON object into a message.
Args:
value: A JSON object.
message: A WKT or regular protocol message to record the data.
Raises:
ParseError: In case of convert problems.
|
juraj-google-style
|
def write_input(self, output_dir='.', make_dir_if_not_present=True):
if (make_dir_if_not_present and (not os.path.exists(output_dir))):
os.makedirs(output_dir)
feff = self.all_input()
feff_input = '\n\n'.join((str(feff[k]) for k in ['HEADER', 'PARAMETERS', 'POTENTIALS', 'ATOMS'] if (k in feff)))
for (k, v) in feff.items():
with open(os.path.join(output_dir, k), 'w') as f:
f.write(str(v))
with open(os.path.join(output_dir, 'feff.inp'), 'w') as f:
f.write(feff_input)
if ('ATOMS' not in feff):
self.atoms.struct.to(fmt='cif', filename=os.path.join(output_dir, feff['PARAMETERS']['CIF']))
|
Writes a set of FEFF input to a directory.
Args:
output_dir: Directory to output the FEFF input files
make_dir_if_not_present: Set to True if you want the directory (
and the whole path) to be created if it is not present.
|
codesearchnet
|
def _parse_format_pages_isbn(html_chunk):
ppi = get_first_content(html_chunk.find('div', {'class': 'price-overflow'}))
if (not ppi):
return (None, None, None)
ppi = filter((lambda x: x.strip()), ppi.split('<br />'))[0]
isbn = dhtmlparser.parseString(ppi)
isbn = isbn.find('b')
isbn = (isbn[0].getContent() if isbn else None)
pages = None
book_format = None
details = ppi.split('|')
if (len(details) >= 2):
book_format = details[0].strip()
pages = details[1].strip()
return (book_format, pages, isbn)
|
Parse format, number of pages and ISBN.
Args:
html_chunk (obj): HTMLElement containing slice of the page with details.
Returns:
tuple: (format, pages, isbn), all as string.
|
codesearchnet
|
def call(self, input_ids=None, token_type_ids=None, inputs_embeds=None, training=False):
assert not (input_ids is None and inputs_embeds is None)
if input_ids is not None:
check_embeddings_within_bounds(input_ids, self.config.vocab_size)
inputs_embeds = tf.gather(params=self.weight, indices=input_ids)
input_shape = shape_list(inputs_embeds)[:-1]
if token_type_ids is None:
token_type_ids = tf.fill(dims=input_shape, value=0)
position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0)
position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)
token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids)
final_embeddings = inputs_embeds + position_embeds + token_type_embeds
final_embeddings = self.LayerNorm(inputs=final_embeddings)
final_embeddings = self.dropout(inputs=final_embeddings, training=training)
return final_embeddings
|
Applies embedding based on inputs tensor.
Returns:
final_embeddings (`tf.Tensor`): output embedding tensor.
|
github-repos
|
def get_elements_between_bands(self, band_i, band_j):
if band_i < 1 or band_i > self.nb_bands or band_j < 1 or band_j > self.nb_bands:
raise ValueError("Band index out of bounds")
return self.data[:, band_i - 1, band_j - 1, :]
|
Method returning a numpy array with elements
[cdum_x_real, cdum_x_imag, cdum_y_real, cdum_y_imag, cdum_z_real, cdum_z_imag]
between bands band_i and band_j (vasp 1-based indexing) for all kpoints.
Args:
band_i (Integer): Index of band i
band_j (Integer): Index of band j
Returns:
a numpy list of elements for each kpoint
|
juraj-google-style
|
def is_prefix(cls, path):
lagofile = paths.Paths(path).prefix_lagofile()
return os.path.isfile(lagofile)
|
Check if a path is a valid prefix
Args:
path(str): path to be checked
Returns:
bool: True if the given path is a prefix
|
juraj-google-style
|
def _ConvertValueBinaryDataToFloatingPointValue(self, value):
if (not value):
return None
value_length = len(value)
if (value_length not in (4, 8)):
raise errors.ParseError('Unsupported value data size: {0:d}'.format(value_length))
if (value_length == 4):
floating_point_map = self._GetDataTypeMap('float32le')
elif (value_length == 8):
floating_point_map = self._GetDataTypeMap('float64le')
try:
return self._ReadStructureFromByteStream(value, 0, floating_point_map)
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError('Unable to parse floating-point value with error: {0!s}'.format(exception))
|
Converts a binary data value into a floating-point value.
Args:
value (bytes): binary data value containing an ASCII string or None.
Returns:
float: floating-point representation of binary data value or None if
value is not set.
Raises:
ParseError: if the floating-point value data size is not supported or
if the value cannot be parsed.
|
codesearchnet
|
def call(self, hidden_states: tf.Tensor, attention_mask: np.ndarray | tf.Tensor | None=None, encoder_hidden_states: np.ndarray | tf.Tensor | None=None, encoder_attention_mask: np.ndarray | tf.Tensor | None=None, layer_head_mask: tf.Tensor | None=None, cross_attn_layer_head_mask: tf.Tensor | None=None, past_key_value: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]]=None, training: Optional[bool]=False) -> Tuple[tf.Tensor, tf.Tensor, Tuple[Tuple[tf.Tensor]]]:
residual = hidden_states
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
hidden_states, self_attn_weights, present_key_value = self.self_attn(hidden_states=hidden_states, past_key_value=self_attn_past_key_value, attention_mask=attention_mask, layer_head_mask=layer_head_mask)
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
cross_attn_present_key_value = None
cross_attn_weights = None
if encoder_hidden_states is not None:
residual = hidden_states
cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, layer_head_mask=cross_attn_layer_head_mask, past_key_value=cross_attn_past_key_value)
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = residual + hidden_states
hidden_states = self.encoder_attn_layer_norm(hidden_states)
present_key_value = present_key_value + cross_attn_present_key_value
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = self.activation_dropout(hidden_states, training=training)
hidden_states = self.fc2(hidden_states)
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states)
return (hidden_states, self_attn_weights, cross_attn_weights, present_key_value)
|
Args:
hidden_states (`tf.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`tf.Tensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
encoder_hidden_states (`tf.Tensor`):
cross attention input to the layer of shape `(batch, seq_len, embed_dim)`
encoder_attention_mask (`tf.Tensor`): encoder attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (`tf.Tensor`): mask for attention heads in a given layer of size
`(decoder_attention_heads,)`
cross_attn_layer_head_mask (`tf.Tensor`): mask for heads of the cross-attention module.
`(decoder_attention_heads,)`
past_key_value (`Tuple(tf.Tensor)`): cached past key and value projection states
|
github-repos
|
def UpdateMaps(self, conf, incremental, force_write=False, force_lock=False):
if not self._Lock(path=conf.lockfile, force=force_lock):
self.log.error('Failed to acquire lock, aborting!')
return self.ERR_LOCK
retval = 0
for map_name in conf.maps:
if map_name not in conf.options:
self.log.error('No such map name defined in config: %s', map_name)
return 1
if incremental:
self.log.info('Updating and verifying %s cache.', map_name)
else:
self.log.info('Rebuilding and verifying %s cache.', map_name)
cache_options = conf.options[map_name].cache
source_options = conf.options[map_name].source
old_cwd = os.getcwd()
tempdir = tempfile.mkdtemp(dir=cache_options['dir'], prefix='nsscache-%s-' % map_name)
if not os.path.isabs(cache_options['dir']):
cache_options['dir'] = os.path.abspath(cache_options['dir'])
if not os.path.isabs(conf.timestamp_dir):
conf.timestamp_dir = os.path.abspath(conf.timestamp_dir)
if not os.path.isabs(tempdir):
tempdir = os.path.abspath(tempdir)
os.chdir(tempdir)
try:
try:
source = source_factory.Create(source_options)
updater = self._Updater(map_name, source, cache_options, conf)
if incremental:
self.log.info('Updating and verifying %s cache.', map_name)
else:
self.log.info('Rebuilding and verifying %s cache.', map_name)
retval = updater.UpdateFromSource(source, incremental=incremental, force_write=force_write)
except error.PermissionDenied:
self.log.error('Permission denied: could not update map %r. Aborting', map_name)
retval += 1
except (error.EmptyMap, error.InvalidMap) as e:
self.log.error(e)
retval += 1
except error.InvalidMerge as e:
self.log.warning('Could not merge map %r: %s. Skipping.', map_name, e)
finally:
os.chdir(old_cwd)
shutil.rmtree(tempdir)
return retval
|
Update each configured map.
For each configured map, create a source and cache object and
update the cache from the source.
Args:
conf: configuration object
incremental: flag indicating incremental update should occur
force_write: optional flag indicating safety checks should be ignored
force_lock: optional flag indicating we override existing locks
Returns:
integer, zero indicating success, non-zero failure
|
github-repos
|
def impersonate(self, user, enterprise):
if ((not user) or (not enterprise)):
raise ValueError('You must set a user name and an enterprise name to begin impersonification')
self._is_impersonating = True
self._impersonation = ('%s@%s' % (user, enterprise))
|
Impersonate a user in a enterprise
Args:
user: the name of the user to impersonate
enterprise: the name of the enterprise where to use impersonation
|
codesearchnet
|
def _iterate(self, url, params, api_entity):
params['resultLimit'] = self.result_limit
should_iterate = True
result_start = 0
while should_iterate:
params['resultStart'] = result_start
r = self.tcex.session.get(url, params=params)
if not self.success(r):
err = r.text or r.reason
self.tcex.handle_error(950, [r.status_code, err, r.url])
data = r.json().get('data').get(api_entity)
if len(data) < self.result_limit:
should_iterate = False
result_start += self.result_limit
for result in data:
yield result
|
Args:
url:
params:
api_entity:
Return:
|
juraj-google-style
|
def inference(self, state_arr, limit=1000):
agent_x, agent_y = np.where(state_arr[0] == 1)
agent_x, agent_y = agent_x[0], agent_y[0]
result_list = [(agent_x, agent_y, 0.0)]
self.t = 1
while self.t <= limit:
next_action_arr = self.extract_possible_actions(state_arr)
next_q_arr = self.function_approximator.inference_q(next_action_arr)
action_arr, q = self.select_action(next_action_arr, next_q_arr)
agent_x, agent_y = np.where(action_arr[0] == 1)
agent_x, agent_y = agent_x[0], agent_y[0]
result_list.append((agent_x, agent_y, q[0]))
state_arr = self.update_state(state_arr, action_arr)
self.t += 1
end_flag = self.check_the_end_flag(state_arr)
if end_flag is True:
break
return result_list
|
Infernce.
Args:
state_arr: `np.ndarray` of state.
limit: The number of inferencing.
Returns:
`list of `np.ndarray` of an optimal route.
|
juraj-google-style
|
def ToScriptHash(data, unhex=True):
if len(data) > 1 and unhex:
data = binascii.unhexlify(data)
return UInt160(data=binascii.unhexlify(bytes(Crypto.Hash160(data), encoding='utf-8')))
|
Get a script hash of the data.
Args:
data (bytes): data to hash.
unhex (bool): (Default) True. Set to unhexlify the stream. Use when the bytes are not raw bytes; i.e. b'aabb'
Returns:
UInt160: script hash.
|
juraj-google-style
|
def _make_tags_vector(self, tags, bucket_length=None) -> np.ndarray:
bucket_length = bucket_length or len(tags)
answer = np.zeros(shape=(bucket_length,), dtype=np.int32)
for i, tag in enumerate(tags):
answer[i] = self.tags.tok2idx(tag)
return answer
|
Transforms a sentence of tags to Numpy array, which will be the network target.
Args:
tags: input sentence of tags
bucket_length: the width of the bucket
Returns:
A 2d array, answer[i][j] contains the index of j-th tag in i-th input sentence.
|
juraj-google-style
|
def IsHFS(self):
tsk_fs_type = self.GetFsType()
return (tsk_fs_type in [pytsk3.TSK_FS_TYPE_HFS, pytsk3.TSK_FS_TYPE_HFS_DETECT])
|
Determines if the file system is HFS, HFS+ or HFSX.
Returns:
bool: True if the file system is HFS.
|
codesearchnet
|
def cancel_merge_when_pipeline_succeeds(self, **kwargs):
path = ('%s/%s/cancel_merge_when_pipeline_succeeds' % (self.manager.path, self.get_id()))
server_data = self.manager.gitlab.http_put(path, **kwargs)
self._update_attrs(server_data)
|
Cancel merge when the pipeline succeeds.
Args:
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabMROnBuildSuccessError: If the server could not handle the
request
|
codesearchnet
|
def DecryptPrivateKey(self, encrypted_private_key):
aes = AES.new(self._master_key, AES.MODE_CBC, self._iv)
return aes.decrypt(encrypted_private_key)
|
Decrypt the provided ciphertext with the initialized private key.
Args:
encrypted_private_key (byte string): the ciphertext to be decrypted.
Returns:
bytes: the ciphertext.
|
juraj-google-style
|
def _list_inputs_or_outputs(self, recursive, node_name, depth, control, op_type, do_outputs=False):
if do_outputs:
tracker = self._debug_dump.node_recipients
type_str = 'Recipients of'
short_type_str = 'recipients'
else:
tracker = self._debug_dump.node_inputs
type_str = 'Inputs to'
short_type_str = 'inputs'
lines = []
font_attr_segs = {}
node_name, _ = debug_graphs.parse_node_or_tensor_name(node_name)
if not self._debug_dump.node_exists(node_name):
return cli_shared.error('There is no node named "%s" in the partition graphs' % node_name)
if recursive:
max_depth = depth
else:
max_depth = 1
if control:
include_ctrls_str = ', control %s included' % short_type_str
else:
include_ctrls_str = ''
line = '%s node "%s"' % (type_str, node_name)
font_attr_segs[0] = [(len(line) - 1 - len(node_name), len(line) - 1, 'bold')]
lines.append(line + ' (Depth limit = %d%s):' % (max_depth, include_ctrls_str))
command_template = 'lo -c -r %s' if do_outputs else 'li -c -r %s'
self._dfs_from_node(lines, font_attr_segs, node_name, tracker, max_depth, 1, [], control, op_type, command_template=command_template)
lines.append('')
lines.append('Legend:')
lines.append(' (d): recursion depth = d.')
if control:
lines.append(' (Ctrl): Control input.')
if op_type:
lines.append(' [Op]: Input node has op type Op.')
return debugger_cli_common.RichTextLines(lines, font_attr_segs=font_attr_segs)
|
Helper function used by list_inputs and list_outputs.
Format a list of lines to display the inputs or output recipients of a
given node.
Args:
recursive: Whether the listing is to be done recursively, as a boolean.
node_name: The name of the node in question, as a str.
depth: Maximum recursion depth, applies only if recursive == True, as an
int.
control: Whether control inputs or control recipients are included, as a
boolean.
op_type: Whether the op types of the nodes are to be included, as a
boolean.
do_outputs: Whether recipients, instead of input nodes are to be
listed, as a boolean.
Returns:
Input or recipient tree formatted as a RichTextLines object.
|
github-repos
|
def rename_attribute(self, attribute: str, new_name: str) -> None:
for (key_node, _) in self.yaml_node.value:
if (key_node.value == attribute):
key_node.value = new_name
break
|
Renames an attribute.
Use only if is_mapping() returns true.
If the attribute does not exist, this will do nothing.
Args:
attribute: The (old) name of the attribute to rename.
new_name: The new name to rename it to.
|
codesearchnet
|
def bounce(sequence):
N = len(sequence)
def f(i):
(div, mod) = divmod(i, N)
if ((div % 2) == 0):
return sequence[mod]
else:
return sequence[((N - mod) - 1)]
return partial(force, sequence=_advance(f))
|
Return a driver function that can advance a "bounced" sequence
of values.
.. code-block:: none
seq = [0, 1, 2, 3]
# bounce(seq) => [0, 1, 2, 3, 3, 2, 1, 0, 0, 1, 2, ...]
Args:
sequence (seq) : a sequence of values for the driver to bounce
|
codesearchnet
|
def add_event(self, event):
if not isinstance(event, event_pb2.Event):
raise TypeError("Expected an event_pb2.Event proto, "
" but got %s" % type(event))
self._async_writer.write(event.SerializeToString())
|
Adds an event to the event file.
Args:
event: An `Event` protocol buffer.
|
juraj-google-style
|
def Match(self, artifact=None, os_name=None, cpe=None, label=None):
return [c for c in self.conditions if c.Match(artifact, os_name, cpe, label)]
|
Test if host data should trigger a check.
Args:
artifact: An artifact name.
os_name: An OS string.
cpe: A CPE string.
label: A label string.
Returns:
A list of conditions that match.
|
codesearchnet
|
def encode_value(value):
if value is None:
return document_pb2.Value(null_value=struct_pb2.NULL_VALUE)
if isinstance(value, bool):
return document_pb2.Value(boolean_value=value)
if isinstance(value, six.integer_types):
return document_pb2.Value(integer_value=value)
if isinstance(value, float):
return document_pb2.Value(double_value=value)
if isinstance(value, DatetimeWithNanoseconds):
return document_pb2.Value(timestamp_value=value.timestamp_pb())
if isinstance(value, datetime.datetime):
return document_pb2.Value(timestamp_value=_datetime_to_pb_timestamp(value))
if isinstance(value, six.text_type):
return document_pb2.Value(string_value=value)
if isinstance(value, six.binary_type):
return document_pb2.Value(bytes_value=value)
document_path = getattr(value, "_document_path", None)
if document_path is not None:
return document_pb2.Value(reference_value=document_path)
if isinstance(value, GeoPoint):
return document_pb2.Value(geo_point_value=value.to_protobuf())
if isinstance(value, list):
value_list = [encode_value(element) for element in value]
value_pb = document_pb2.ArrayValue(values=value_list)
return document_pb2.Value(array_value=value_pb)
if isinstance(value, dict):
value_dict = encode_dict(value)
value_pb = document_pb2.MapValue(fields=value_dict)
return document_pb2.Value(map_value=value_pb)
raise TypeError(
"Cannot convert to a Firestore Value", value, "Invalid type", type(value)
)
|
Converts a native Python value into a Firestore protobuf ``Value``.
Args:
value (Union[NoneType, bool, int, float, datetime.datetime, \
str, bytes, dict, ~google.cloud.Firestore.GeoPoint]): A native
Python value to convert to a protobuf field.
Returns:
~google.cloud.firestore_v1beta1.types.Value: A
value encoded as a Firestore protobuf.
Raises:
TypeError: If the ``value`` is not one of the accepted types.
|
juraj-google-style
|
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
sep = [self.sep_token_id]
cls_segment_id = [2]
if token_ids_1 is None:
return len(token_ids_0 + sep) * [0] + cls_segment_id
return len(token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] + cls_segment_id
|
Create a mask from the two sequences passed to be used in a sequence-pair classification task. An XLNet
sequence pair mask has the following format:
```
0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
| first sequence | second sequence |
```
If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
|
github-repos
|
def read(self, input_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):
super(ProtocolVersion, self).read(input_stream, kmip_version=kmip_version)
local_stream = utils.BytearrayStream(input_stream.read(self.length))
if self.is_tag_next(enums.Tags.PROTOCOL_VERSION_MAJOR, local_stream):
self._major = primitives.Integer(tag=enums.Tags.PROTOCOL_VERSION_MAJOR)
self._major.read(local_stream, kmip_version=kmip_version)
else:
raise ValueError('Invalid encoding missing the major protocol version number.')
if self.is_tag_next(enums.Tags.PROTOCOL_VERSION_MINOR, local_stream):
self._minor = primitives.Integer(tag=enums.Tags.PROTOCOL_VERSION_MINOR)
self._minor.read(local_stream, kmip_version=kmip_version)
else:
raise ValueError('Invalid encoding missing the minor protocol version number.')
self.is_oversized(local_stream)
|
Read the data encoding the ProtocolVersion struct and decode it into
its constituent parts.
Args:
input_stream (stream): A data stream containing encoded object
data, supporting a read method; usually a BytearrayStream
object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be decoded. Optional,
defaults to KMIP 1.0.
Raises:
ValueError: Raised if either the major or minor protocol versions
are missing from the encoding.
|
codesearchnet
|
def process(self, batch, device=None):
padded = self.pad(batch)
tensor = self.numericalize(padded, device=device)
return tensor
|
Process a list of examples to create a torch.Tensor.
Pad, numericalize, and postprocess a batch and create a tensor.
Args:
batch (list(object)): A list of object from a batch of examples.
Returns:
torch.autograd.Variable: Processed object given the input
and custom postprocessing Pipeline.
|
codesearchnet
|
def split_window(self, fpath, vertical=False, size=None, bufopts=None):
command = ('split {}'.format(fpath) if fpath else 'new')
if vertical:
command = ('v' + command)
if size:
command = (str(size) + command)
self._vim.command(command)
if bufopts:
self.set_buffer_options(bufopts)
|
Open file in a new split window.
Args:
fpath (str): Path of the file to open. If ``None``, a new empty
split is created.
vertical (bool): Whether to open a vertical split.
size (Optional[int]): The height (or width) to set for the new window.
bufopts (Optional[dict]): Buffer-local options to set in the split window.
See :func:`.set_buffer_options`.
|
codesearchnet
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.