code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
|---|---|---|
def evaluate(self, expression):
dump_tensors_iter = re.finditer(_DUMP_TENSOR_PATTERN, expression)
rewritten_expression = expression
for match in reversed(list(dump_tensors_iter)):
tensor_name = match.group(0)[1:-1].strip()
device_name, node_name, output_slot, debug_op, exec_index = _parse_debug_tensor_name(tensor_name)
if tensor_name not in self._cached_tensor_values:
try:
value = self._dump.get_tensors(node_name, output_slot, debug_op, device_name=device_name)[exec_index]
except debug_data.WatchKeyDoesNotExistInDebugDumpDirError:
raise ValueError('Eval failed due to the value of %s:%d:DebugIdentity being unavailable' % (node_name, output_slot))
self._cached_tensor_values[tensor_name] = value
rewritten_expression = rewritten_expression[:match.start(0)] + "self._cached_tensor_values['" + tensor_name + "']" + rewritten_expression[match.end(0):]
return eval(rewritten_expression)
|
Parse an expression.
Args:
expression: the expression to be parsed.
Returns:
The result of the evaluation.
Raises:
ValueError: If the value of one or more of the debug tensors in the
expression are not available.
|
github-repos
|
def replace_drive_enclosure(self, information):
uri = '{}/replaceDriveEnclosure'.format(self.data['uri'])
result = self._helper.create(information, uri)
self.refresh()
return result
|
When a drive enclosure has been physically replaced, initiate the replacement operation that enables the
new drive enclosure to take over as a replacement for the prior drive enclosure. The request requires
specification of both the serial numbers of the original drive enclosure and its replacement to be provided.
Args:
information: Options to replace the drive enclosure.
Returns:
dict: SAS Logical Interconnect.
|
codesearchnet
|
def get_parsed_context(pipeline, context_in_string):
logger.debug('starting')
if ('context_parser' in pipeline):
parser_module_name = pipeline['context_parser']
logger.debug(f'context parser found: {parser_module_name}')
parser_module = pypyr.moduleloader.get_module(parser_module_name)
try:
logger.debug(f'running parser {parser_module_name}')
result_context = parser_module.get_parsed_context(context_in_string)
logger.debug(f'step {parser_module_name} done')
if (result_context is None):
logger.debug(f'{parser_module_name} returned None. Using empty context instead')
return pypyr.context.Context()
else:
return pypyr.context.Context(result_context)
except AttributeError:
logger.error(f"The parser {parser_module_name} doesn't have a get_parsed_context(context) function.")
raise
else:
logger.debug('pipeline does not have custom context parser. Using empty context.')
logger.debug('done')
return pypyr.context.Context()
|
Execute get_parsed_context handler if specified.
Dynamically load the module specified by the context_parser key in pipeline
dict and execute the get_parsed_context function on that module.
Args:
pipeline: dict. Pipeline object.
context_in_string: string. Argument string used to initialize context.
Returns:
pypyr.context.Context() instance.
Raises:
AttributeError: parser specified on pipeline missing get_parsed_context
function.
|
codesearchnet
|
def patch_on_type(src: symbolic.Symbolic, value_type: Union[Type[Any], Tuple[Type[Any], ...]], value: Any=None, value_fn: Optional[Callable[[Any], Any]]=None, skip_notification: Optional[bool]=None) -> Any:
return _conditional_patch(src, lambda k, v, p: isinstance(v, value_type), value, value_fn, skip_notification)
|
Recursively patch values on matched types.
Example::
d = pg.Dict(a={'x': 1}, b=2)
print(pg.patching.patch_on_type(d, int, value_fn=lambda x: x * 2))
# {a={x=2}, b=4}
Args:
src: symbolic value to patch.
value_type: Value type to match.
value: New value for field that satisfy `condition`.
value_fn: Callable object that produces new value based on old value.
If not None, `value` must be None.
skip_notification: If True, `on_change` event will not be triggered for this
operation. If None, the behavior is decided by `pg.notify_on_rebind`.
Please see `symbolic.Symbolic.rebind` for details.
Returns:
`src` after being patched.
|
github-repos
|
def create_van_der_corput_samples(idx, number_base=2):
assert number_base > 1
idx = numpy.asarray(idx).flatten() + 1
out = numpy.zeros(len(idx), dtype=float)
base = float(number_base)
active = numpy.ones(len(idx), dtype=bool)
while numpy.any(active):
out[active] += (idx[active] % number_base)/base
idx
base *= number_base
active = idx > 0
return out
|
Van der Corput samples.
Args:
idx (int, numpy.ndarray):
The index of the sequence. If array is provided, all values in
array is returned.
number_base (int):
The numerical base from where to create the samples from.
Returns (float, numpy.ndarray):
Van der Corput samples.
|
juraj-google-style
|
def anti_clobber_dir_path(dir_path, suffix='.d'):
dir_path = os.path.normpath(dir_path)
parts = dir_path.split(os.sep)
for index in range(len(parts)):
test_path = os.sep.join(parts[:(index + 1)])
if os.path.isfile(test_path):
parts[index] += suffix
return os.sep.join(parts)
return dir_path
|
Return a directory path free of filenames.
Args:
dir_path (str): A directory path.
suffix (str): The suffix to append to the part of the path that is
a file.
Returns:
str
|
codesearchnet
|
def _watch(self, primals, tangents):
def _watch(primal, tangent):
if not primal.dtype.is_floating:
logging.log_first_n(logging.WARN, 'The dtype of the watched primal must be floating (e.g. tf.float32), got %r', 5, primal.dtype)
tangent = ops.convert_to_tensor(tangent, dtype=primal.dtype)
if hasattr(primal, 'handle'):
primal = ops.convert_to_tensor(primal.handle)
pywrap_tfe.TFE_Py_ForwardAccumulatorWatch(self._accumulator, primal, tangent)
nest.map_structure(_watch, primals, tangents)
|
Ensures that `primals` are being traced by this accumulator.
Mathematically, `tangents` is a vector right-multiplying the Jacobian matrix
(a Jacobian-vector product) for the function computed while this accumulator
is active. Since JVPs are computed in forward mode as the computation
happens, this vector must be supplied in advance.
Watching a single tensor multiple times sums each of its `tangents`. Any
un-watched tensor has zeros for its tangent vector.
Args:
primals: A Tensor or list of Tensors.
tangents: A Tensor or list of Tensors matching `primals`.
|
github-repos
|
def download(self, temp_ver, store_metadata=True):
dest = self._prefixed(temp_ver.name)
temp_dest = ('%s.tmp' % dest)
with utils.LockFile((dest + '.lock')):
if os.path.exists(dest):
return
temp_ver.download(temp_dest)
if store_metadata:
with open(('%s.metadata' % dest), 'w') as f:
utils.json_dump(temp_ver.get_metadata(), f)
sha1 = utils.get_hash(temp_dest)
if (temp_ver.get_hash() != sha1):
raise RuntimeError(('Image %s does not match the expected hash %s' % (temp_ver.name, sha1)))
with open(('%s.hash' % dest), 'w') as f:
f.write(sha1)
with log_utils.LogTask('Convert image', logger=LOGGER):
result = utils.run_command(['qemu-img', 'convert', '-O', 'raw', temp_dest, dest])
os.unlink(temp_dest)
if result:
raise RuntimeError(result.err)
|
Retrieve the given template version
Args:
temp_ver (TemplateVersion): template version to retrieve
store_metadata (bool): If set to ``False``, will not refresh the
local metadata with the retrieved one
Returns:
None
|
codesearchnet
|
def fit(weights: Array, train_dataset: Dataset, iters: int, learning_rate: float, log_span: int, val_dataset: typing.Optional[Dataset]=None) -> Array:
grad_loss = jit(grad(cross_entropy_loss, argnums=0))
for t in range(iters):
weights = weights - learning_rate * grad_loss(weights, train_dataset.X, train_dataset.Y)
if (t + 1) % log_span != 0:
continue
metrics_train = jit(get_metrics)(weights, train_dataset)
print()
print('iter:\t%d' % (t + 1))
print()
print('train accuracy:\t%.5f' % metrics_train.accuracy)
print('train prec.:\t%.5f' % metrics_train.precision)
print('train recall:\t%.5f' % metrics_train.recall)
print('train fscore:\t%.5f' % metrics_train.fscore)
print('train loss:\t%.5f' % metrics_train.loss)
print()
if val_dataset is None:
continue
metrics_val = jit(get_metrics)(weights, val_dataset)
print('val accuracy:\t%.5f' % metrics_val.accuracy)
print('val prec.:\t%.5f' % metrics_val.precision)
print('val recall:\t%.5f' % metrics_val.recall)
print('val fscore:\t%.5f' % metrics_val.fscore)
print('val loss:\t%.5f' % metrics_val.loss)
print()
return weights
|
Updates the weights with the given dataset.
Args:
weights: A weight vector.
train_dataset: A train dataset.
iters: A number of iterations.
learning_rate: A learning rate.
log_span: A span to log metrics.
val_dataset: A validation dataset (optional).
Returns:
An updated weight vector.
|
github-repos
|
def run_conditional_decorators(self, context):
logger.debug('starting')
run_me = context.get_formatted_as_type(self.run_me, out_type=bool)
skip_me = context.get_formatted_as_type(self.skip_me, out_type=bool)
swallow_me = context.get_formatted_as_type(self.swallow_me, out_type=bool)
if run_me:
if (not skip_me):
try:
if self.retry_decorator:
self.retry_decorator.retry_loop(context, self.invoke_step)
else:
self.invoke_step(context=context)
except Exception as ex_info:
if swallow_me:
logger.error(f)
else:
raise
else:
logger.info(f'{self.name} not running because skip is True.')
else:
logger.info(f'{self.name} not running because run is False.')
logger.debug('done')
|
Evaluate the step decorators to decide whether to run step or not.
Use pypyr.dsl.Step.run_step if you intend on executing the step the
same way pypyr does.
Args:
context: (pypyr.context.Context) The pypyr context. This arg will
mutate.
|
codesearchnet
|
def add_input(self, **kwargs):
self._closed()
def _get_item(args):
'Get a single item from args.'
if (not args):
raise ValueError('No parameter specified.')
item = args.popitem()
if args:
raise ValueError('Too many parameters, not clear what to do with {}'.format(kwargs))
return item
symbols = None
input_dict = CommentedMap()
if ('default' in kwargs):
input_dict['default'] = kwargs.pop('default')
if ('label' in kwargs):
input_dict['label'] = kwargs.pop('label')
if ('symbols' in kwargs):
symbols = kwargs.pop('symbols')
(name, input_type) = _get_item(kwargs)
if (input_type == 'enum'):
typ = CommentedMap()
typ['type'] = 'enum'
if (symbols is None):
raise ValueError("Please specify the enum's symbols.")
if (symbols == []):
raise ValueError("The enum's symbols cannot be empty.")
if (type(symbols) != list):
raise ValueError('Symbols should be a list.')
symbols = [str(s) for s in symbols]
typ['symbols'] = symbols
input_dict['type'] = typ
elif bool(input_dict):
input_dict['type'] = input_type
msg = ('"{}" is already used as a workflow input. Please use a ' + 'different name.')
if (name in self.wf_inputs):
raise ValueError(msg.format(name))
if isinstance(input_type, dict):
input_dict['type'] = input_type
if bool(input_dict):
self.wf_inputs[name] = input_dict
else:
self.wf_inputs[name] = input_type
return Reference(input_name=name)
|
Add workflow input.
Args:
kwargs (dict): A dict with a `name: type` item
and optionally a `default: value` item, where name is the
name (id) of the workflow input (e.g., `dir_in`) and type is
the type of the input (e.g., `'Directory'`).
The type of input parameter can be learned from
`step.inputs(step_name=input_name)`.
Returns:
inputname
Raises:
ValueError: No or multiple parameter(s) have been specified.
|
codesearchnet
|
def flowshow(flow, win_name='', wait_time=0):
flow = flowread(flow)
flow_img = flow2rgb(flow)
imshow(rgb2bgr(flow_img), win_name, wait_time)
|
Show optical flow.
Args:
flow (ndarray or str): The optical flow to be displayed.
win_name (str): The window name.
wait_time (int): Value of waitKey param.
|
juraj-google-style
|
def unstem(self, term):
originals = []
for i in self.terms[term]:
originals.append(self.tokens[i]['unstemmed'])
mode = Counter(originals).most_common(1)
return mode[0][0]
|
Given a stemmed term, get the most common unstemmed variant.
Args:
term (str): A stemmed term.
Returns:
str: The unstemmed token.
|
juraj-google-style
|
def find_invalid_filenames(filenames, repository_root):
errors = []
for filename in filenames:
if (not os.path.abspath(filename).startswith(repository_root)):
errors.append((filename, ('Error: File %s does not belong to repository %s' % (filename, repository_root))))
if (not os.path.exists(filename)):
errors.append((filename, ('Error: File %s does not exist' % (filename,))))
if os.path.isdir(filename):
errors.append((filename, ('Error: %s is a directory. Directories are not yet supported' % (filename,))))
return errors
|
Find files that does not exist, are not in the repo or are directories.
Args:
filenames: list of filenames to check
repository_root: the absolute path of the repository's root.
Returns: A list of errors.
|
codesearchnet
|
def __init__(self, **kwargs) -> 'PygalleBaseClass':
self.options = kwargs
self.init_properties() \
.set_uid() \
.set_class_name() \
.set_category()
|
Create a new instance of :class:`PygalleBaseClass`
# Arguments
args:
kwargs:
# Returns:
PygalleBaseClass: An instance of :class:`PygalleBaseClass`
|
juraj-google-style
|
def reciprocal_lattice_from_outcar( filename ):
outcar = open(filename, "r").read()
recLat = re.findall(r"reciprocal\s*lattice\s*vectors\s*([-.\s\d]*)",
outcar)[-1]
recLat = recLat.split()
recLat = np.array(recLat, dtype=float)
recLat.shape = (3, 6)
recLat = recLat[:, 3:]
return recLat
|
Finds and returns the reciprocal lattice vectors, if more than
one set present, it just returns the last one.
Args:
filename (Str): The name of the outcar file to be read
Returns:
List(Float): The reciprocal lattice vectors.
|
juraj-google-style
|
class Speech2Text2Processor(ProcessorMixin):
feature_extractor_class = 'AutoFeatureExtractor'
tokenizer_class = 'Speech2Text2Tokenizer'
def __init__(self, feature_extractor, tokenizer):
super().__init__(feature_extractor, tokenizer)
self.current_processor = self.feature_extractor
self._in_target_context_manager = False
def __call__(self, *args, **kwargs):
if self._in_target_context_manager:
return self.current_processor(*args, **kwargs)
if 'raw_speech' in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.')
audio = kwargs.pop('raw_speech')
else:
audio = kwargs.pop('audio', None)
sampling_rate = kwargs.pop('sampling_rate', None)
text = kwargs.pop('text', None)
if len(args) > 0:
audio = args[0]
args = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.')
if audio is not None:
inputs = self.feature_extractor(audio, *args, sampling_rate=sampling_rate, **kwargs)
if text is not None:
encodings = self.tokenizer(text, **kwargs)
if text is None:
return inputs
elif audio is None:
return encodings
else:
inputs['labels'] = encodings['input_ids']
return inputs
def batch_decode(self, *args, **kwargs):
return self.tokenizer.batch_decode(*args, **kwargs)
def decode(self, *args, **kwargs):
return self.tokenizer.decode(*args, **kwargs)
@contextmanager
def as_target_processor(self):
warnings.warn('`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your labels by using the argument `text` of the regular `__call__` method (either in the same call as your audio inputs, or in a separate call.')
self._in_target_context_manager = True
self.current_processor = self.tokenizer
yield
self.current_processor = self.feature_extractor
self._in_target_context_manager = False
|
Constructs a Speech2Text2 processor which wraps a Speech2Text2 feature extractor and a Speech2Text2 tokenizer into
a single processor.
[`Speech2Text2Processor`] offers all the functionalities of [`AutoFeatureExtractor`] and [`Speech2Text2Tokenizer`].
See the [`~Speech2Text2Processor.__call__`] and [`~Speech2Text2Processor.decode`] for more information.
Args:
feature_extractor (`AutoFeatureExtractor`):
An instance of [`AutoFeatureExtractor`]. The feature extractor is a required input.
tokenizer (`Speech2Text2Tokenizer`):
An instance of [`Speech2Text2Tokenizer`]. The tokenizer is a required input.
|
github-repos
|
def report_filter(config, auth, body, filters):
new_body = body.copy()
for f, d in filters.items():
for v in get_rows(config, auth, d):
new_body['params'].setdefault('filters', []).append({'type': f, 'value': v})
return new_body
|
Adds filters to a report body
Filters cannot be easily added to the reports without templateing, this allows
filters to be passed as lists.
Values are specified using get_rows(...) helper, see
starthinker/util/data/__init__.py.
To specify a filter, use the official filter name and a list of values.
For exmaple:
```
filters = {
"FILTER_PARTNER": {
"values":789
},
"FILTER_ADVERTISER": {
"values":[1234, 5678, 91011]
}
}
```
Args:
* auth: (string) Either user or service.
* body: (json) the report body ( with or without filters )
* filters: (json) a dictionary of filters to apply ( see above examples )
Returns:
* body: ( json ) modified report body
|
github-repos
|
def get_percentage_volume_change(self):
initial_vol = self.initial.lattice.volume
final_vol = self.final.lattice.volume
return ((final_vol / initial_vol) - 1)
|
Returns the percentage volume change.
Returns:
Volume change in percentage, e.g., 0.055 implies a 5.5% increase.
|
codesearchnet
|
def stat(self, follow_symlinks=True):
if follow_symlinks:
if self._statresult_symlink is None:
file_object = self._filesystem.resolve(self.path)
if self._filesystem.is_windows_fs:
file_object.st_nlink = 0
self._statresult_symlink = file_object.stat_result.copy()
return self._statresult_symlink
if self._statresult is None:
file_object = self._filesystem.lresolve(self.path)
self._inode = file_object.st_ino
if self._filesystem.is_windows_fs:
file_object.st_nlink = 0
self._statresult = file_object.stat_result.copy()
return self._statresult
|
Return a stat_result object for this entry.
Args:
follow_symlinks: If False and the entry is a symlink, return the
result for the symlink, otherwise for the object it points to.
|
juraj-google-style
|
def build_model(self, token_encoder_model, trainable_embeddings=True, output_activation='softmax'):
if (not isinstance(token_encoder_model, SequenceEncoderBase)):
raise ValueError('`token_encoder_model` should be an instance of `{}`'.format(SequenceEncoderBase))
if ((not token_encoder_model.allows_dynamic_length()) and (self.max_tokens is None)):
raise ValueError('The provided `token_encoder_model` does not allow variable length mini-batches. You need to provide `max_tokens`')
if (self.embeddings_index is None):
embedding_layer = Embedding(len(self.token_index), self.embedding_dims, input_length=self.max_tokens, mask_zero=token_encoder_model.allows_dynamic_length(), trainable=trainable_embeddings)
else:
embedding_layer = Embedding(len(self.token_index), self.embedding_dims, weights=[build_embedding_weights(self.token_index, self.embeddings_index)], input_length=self.max_tokens, mask_zero=token_encoder_model.allows_dynamic_length(), trainable=trainable_embeddings)
sequence_input = Input(shape=(self.max_tokens,), dtype='int32')
x = embedding_layer(sequence_input)
x = token_encoder_model(x)
x = Dense(self.num_classes, activation=output_activation)(x)
return Model(sequence_input, x)
|
Builds a model using the given `text_model`
Args:
token_encoder_model: An instance of `SequenceEncoderBase` for encoding all the tokens within a document.
This encoding is then fed into a final `Dense` layer for classification.
trainable_embeddings: Whether or not to fine tune embeddings.
output_activation: The output activation to use. (Default value: 'softmax')
Use:
- `softmax` for binary or multi-class.
- `sigmoid` for multi-label classification.
- `linear` for regression output.
Returns:
The model output tensor.
|
codesearchnet
|
def where(self, predicate):
if self.closed():
raise ValueError('Attempt to call where() on a closed Queryable.')
if (not is_callable(predicate)):
raise TypeError('where() parameter predicate={predicate} is not callable'.format(predicate=repr(predicate)))
return self._create(ifilter(predicate, self))
|
Filters elements according to whether they match a predicate.
Note: This method uses deferred execution.
Args:
predicate: A unary function which is applied to each element in the
source sequence. Source elements for which the predicate
returns True will be present in the result.
Returns:
A Queryable over those elements of the source sequence for which
the predicate is True.
Raises:
ValueError: If the Queryable is closed.
TypeError: If the predicate is not callable.
|
codesearchnet
|
def call(self, *args, **kwargs):
if (not self.is_connected()):
if self.autoconnect:
return self._call_with_autoconnect(*args, **kwargs)
else:
error = ConnectionError('you are not connected and autoconnect=False')
return tornado.gen.maybe_future(error)
return self._call(*args, **kwargs)
|
Calls a redis command and returns a Future of the reply.
Args:
*args: full redis command as variable length argument list or
a Pipeline object (as a single argument).
**kwargs: internal private options (do not use).
Returns:
a Future with the decoded redis reply as result (when available) or
a ConnectionError object in case of connection error.
Raises:
ClientError: your Pipeline object is empty.
Examples:
>>> @tornado.gen.coroutine
def foobar():
client = Client()
result = yield client.call("HSET", "key", "field", "val")
|
codesearchnet
|
def patch_addPadding(self, patches):
paddingLength = self.Patch_Margin
nullPadding = ""
for x in range(1, paddingLength + 1):
nullPadding += chr(x)
for patch in patches:
patch.start1 += paddingLength
patch.start2 += paddingLength
patch = patches[0]
diffs = patch.diffs
if not diffs or diffs[0][0] != self.DIFF_EQUAL:
diffs.insert(0, (self.DIFF_EQUAL, nullPadding))
patch.start1 -= paddingLength
patch.start2 -= paddingLength
patch.length1 += paddingLength
patch.length2 += paddingLength
elif paddingLength > len(diffs[0][1]):
extraLength = paddingLength - len(diffs[0][1])
newText = nullPadding[len(diffs[0][1]):] + diffs[0][1]
diffs[0] = (diffs[0][0], newText)
patch.start1 -= extraLength
patch.start2 -= extraLength
patch.length1 += extraLength
patch.length2 += extraLength
patch = patches[-1]
diffs = patch.diffs
if not diffs or diffs[-1][0] != self.DIFF_EQUAL:
diffs.append((self.DIFF_EQUAL, nullPadding))
patch.length1 += paddingLength
patch.length2 += paddingLength
elif paddingLength > len(diffs[-1][1]):
extraLength = paddingLength - len(diffs[-1][1])
newText = diffs[-1][1] + nullPadding[:extraLength]
diffs[-1] = (diffs[-1][0], newText)
patch.length1 += extraLength
patch.length2 += extraLength
return nullPadding
|
Add some padding on text start and end so that edges can match
something. Intended to be called only from within patch_apply.
Args:
patches: Array of Patch objects.
Returns:
The padding string added to each side.
|
juraj-google-style
|
def _parse_flowcontrol_receive(self, config):
value = 'off'
match = re.search(r'flowcontrol receive (\w+)$', config, re.M)
if match:
value = match.group(1)
return dict(flowcontrol_receive=value)
|
Scans the config block and returns the flowcontrol receive value
Args:
config (str): The interface config block to scan
Returns:
dict: Returns a dict object with the flowcontrol receive value
retrieved from the config block. The returned dict object
is intended to be merged into the interface resource dict
|
juraj-google-style
|
def plot(self, tag, mpl_plt, step=None, close_plot=True):
if (step is None):
step = self._step
else:
self._step = step
fig = mpl_plt.get_current_fig_manager()
(img_w, img_h) = fig.canvas.get_width_height()
image_buf = io.BytesIO()
mpl_plt.savefig(image_buf, format='png')
image_summary = Summary.Image(encoded_image_string=image_buf.getvalue(), colorspace=4, height=img_h, width=img_w)
summary = Summary(value=[Summary.Value(tag=tag, image=image_summary)])
self.add_summary(summary, step)
if close_plot:
mpl_plt.close()
|
Saves matplotlib plot output to summary image.
Args:
tag: str: label for this data
mpl_plt: matplotlib stateful pyplot object with prepared plotting state
step: int: training step
close_plot: bool: automatically closes plot
|
codesearchnet
|
def uncompress(element, output_spec):
flat_types = structure.get_flat_tensor_types(output_spec)
flat_shapes = structure.get_flat_tensor_shapes(output_spec)
tensor_list = ged_ops.uncompress_element(element, output_types=flat_types, output_shapes=flat_shapes)
return structure.from_tensor_list(output_spec, tensor_list)
|
Uncompress a compressed dataset element.
Args:
element: A scalar variant tensor to uncompress. The element should have been
created by calling `compress`.
output_spec: A nested structure of `tf.TypeSpec` representing the type(s) of
the uncompressed element.
Returns:
The uncompressed element.
|
github-repos
|
def get_mim_genes(genemap_lines, mim2gene_lines):
LOG.info("Get the mim genes")
genes = {}
hgnc_genes = {}
gene_nr = 0
no_hgnc = 0
for entry in parse_mim2gene(mim2gene_lines):
if 'gene' in entry['entry_type']:
mim_nr = entry['mim_number']
gene_nr += 1
if not 'hgnc_symbol' in entry:
no_hgnc += 1
else:
genes[mim_nr] = entry
LOG.info("Number of genes without hgnc symbol %s", str(no_hgnc))
for entry in parse_genemap2(genemap_lines):
mim_number = entry['mim_number']
inheritance = entry['inheritance']
phenotype_info = entry['phenotypes']
hgnc_symbol = entry['hgnc_symbol']
hgnc_symbols = entry['hgnc_symbols']
if mim_number in genes:
genes[mim_number]['inheritance'] = inheritance
genes[mim_number]['phenotypes'] = phenotype_info
genes[mim_number]['hgnc_symbols'] = hgnc_symbols
for mim_nr in genes:
gene_info = genes[mim_nr]
hgnc_symbol = gene_info['hgnc_symbol']
if hgnc_symbol in hgnc_genes:
existing_info = hgnc_genes[hgnc_symbol]
if not existing_info['phenotypes']:
hgnc_genes[hgnc_symbol] = gene_info
else:
hgnc_genes[hgnc_symbol] = gene_info
return hgnc_genes
|
Get a dictionary with genes and their omim information
Args:
genemap_lines(iterable(str))
mim2gene_lines(iterable(str))
Returns.
hgnc_genes(dict): A dictionary with hgnc_symbol as keys
|
juraj-google-style
|
def are_values_same_type(first_val, second_val):
first_val_type = type(first_val)
second_val_type = type(second_val)
if (isinstance(first_val, string_types) and isinstance(second_val, string_types)):
return True
if (isinstance(first_val, bool) or isinstance(second_val, bool)):
return (first_val_type == second_val_type)
if (isinstance(first_val, (numbers.Integral, float)) and isinstance(second_val, (numbers.Integral, float))):
return True
return False
|
Method to verify that both values belong to same type. Float and integer are
considered as same type.
Args:
first_val: Value to validate.
second_Val: Value to validate.
Returns:
Boolean: True if both values belong to same type. Otherwise False.
|
codesearchnet
|
def _url_dirname(self, url_or_path):
return os.path.dirname(url_or_path)
|
Pass through to os.path.dirname.
This version uses os.path instead of posixpath to be compatible with the
host OS.
Args:
url_or_path: A string in the form of /some/path.
|
github-repos
|
def __getattr__(self, name: str) -> column_expression_builder.ColumnExpressionBuilder:
lookup = name[:-1] if name.endswith('_') and keyword.iskeyword(name[:-1]) else name
expression = None
if self._fields:
for field in self._fields:
if field.column_name == lookup:
expression = field.builder
else:
expression = getattr(self._root_resource.builder, lookup)
if expression is None:
raise AttributeError(f'No such field {name}')
return column_expression_builder.ColumnExpressionBuilder.from_fhir_path_builder(expression)
|
Used to support building expressions directly off of the base view.
See the class-level documentation for guidance on use.
Args:
name: the name of the FHIR field to start with in the builder.
Returns:
A ColumnExpressionBuilder for the field in question
|
github-repos
|
def _cumprod(l):
ret = [1]
for item in l:
ret.append(ret[-1] * item)
return ret
|
Cumulative product of a list.
Args:
l: a list of integers
Returns:
a list with one more element (starting with 1)
|
juraj-google-style
|
def put_many(self, type: Type[T], items: Iterable[T], context: PipelineContext = None) -> None:
pass
|
Puts multiple objects of the same type into the data sink.
Args:
type: The type of the objects being inserted.
items: The objects to be inserted.
context: The context of the insertion (mutable).
|
juraj-google-style
|
def export_as_tfhub_module(model_name, hparams, decode_hparams, problem, checkpoint_path, export_dir):
def hub_module_fn():
'Creates the TF graph for the hub module.'
model_fn = t2t_model.T2TModel.make_estimator_model_fn(model_name, hparams, decode_hparams=decode_hparams, use_tpu=FLAGS.use_tpu)
features = problem.serving_input_fn(hparams, decode_hparams, use_tpu=FLAGS.use_tpu).features
original_features = features.copy()
spec = model_fn(features, labels=None, mode=tf.estimator.ModeKeys.PREDICT)
hub.add_signature(inputs=original_features, outputs=spec.export_outputs['serving_default'].outputs)
drop_collections = [tf.GraphKeys.LOSSES, tf.GraphKeys.SUMMARIES, tf.GraphKeys.LOCAL_VARIABLES]
module_spec = hub.create_module_spec(hub_module_fn, drop_collections=drop_collections)
export_module_spec_with_checkpoint(module_spec, checkpoint_path=checkpoint_path, export_path=export_dir, scope_prefix='')
|
Exports the last checkpoint from the directory as tfhub module.
It creates the Module spec and signature (based on T2T problem information),
which is later used to create and export the hub module.
Module will be saved inside the ckpt_dir.
Args:
model_name: name of the model to be exported.
hparams: T2T parameters, model graph will be based on them.
decode_hparams: T2T parameters for decoding.
problem: the name of the problem
checkpoint_path: path to the checkpoint to be exported.
export_dir: Directory to write the exported model to.
|
codesearchnet
|
def to_dict(self):
return {'all_set': self._is_all_set(), 'progress': self.progress(), 'values': {property_name: (getattr(self, property_name) or []) for property_name in worker_mapping().keys()}}
|
This method is used in with connection to REST API. It basically
converts all important properties to dictionary, which may be used by
frontend.
Returns:
dict: ``{"all_set": bool, "progress": [int(done), int(how_many)], \
"values": {"property": [values], ..}}``
|
codesearchnet
|
def get_output(self):
template_function = TEMPLATE_WRAPPER.format(function_name=self.js_function_name, template_code=self.output.getvalue()).strip()
module_format = JS_MODULE_FORMATS[self.js_module_format]
return module_format(self.dependencies, template_function)
|
Returns the generated JavaScript code.
Returns:
str
|
codesearchnet
|
def fix_docstring(obj: Any, old_doc_args: str, new_doc_args: str):
source, line_number = inspect.getsourcelines(obj)
idx = 0
while idx < len(source) and _re_args.search(source[idx]) is None:
idx += 1
if idx == len(source):
return
indent = find_indent(source[idx])
idx += 1
start_idx = idx
while idx < len(source) and (len(source[idx].strip()) == 0 or find_indent(source[idx]) > indent):
idx += 1
idx -= 1
while len(source[idx].strip()) == 0:
idx -= 1
idx += 1
if ''.join(source[start_idx:idx])[:-1] != old_doc_args:
return
obj_file = find_source_file(obj)
with open(obj_file, 'r', encoding='utf-8') as f:
content = f.read()
lines = content.split('\n')
lines = lines[:line_number + start_idx - 1] + [new_doc_args] + lines[line_number + idx - 1:]
print(f'Fixing the docstring of {obj.__name__} in {obj_file}.')
with open(obj_file, 'w', encoding='utf-8') as f:
f.write('\n'.join(lines))
|
Fixes the docstring of an object by replacing its arguments documentation by the one matched with the signature.
Args:
obj (`Any`):
The object whose dostring we are fixing.
old_doc_args (`str`):
The current documentation of the parameters of `obj` in the docstring (as returned by
`match_docstring_with_signature`).
new_doc_args (`str`):
The documentation of the parameters of `obj` matched with its signature (as returned by
`match_docstring_with_signature`).
|
github-repos
|
def create_summary_metadata(hparams_plugin_data_pb):
if not isinstance(hparams_plugin_data_pb, plugin_data_pb2.HParamsPluginData):
raise TypeError('Needed an instance of plugin_data_pb2.HParamsPluginData.'
' Got: %s' % type(hparams_plugin_data_pb))
content = plugin_data_pb2.HParamsPluginData()
content.CopyFrom(hparams_plugin_data_pb)
content.version = PLUGIN_DATA_VERSION
return tf.compat.v1.SummaryMetadata(
plugin_data=tf.compat.v1.SummaryMetadata.PluginData(
plugin_name=PLUGIN_NAME, content=content.SerializeToString()))
|
Returns a summary metadata for the HParams plugin.
Returns a summary_pb2.SummaryMetadata holding a copy of the given
HParamsPluginData message in its plugin_data.content field.
Sets the version field of the hparams_plugin_data_pb copy to
PLUGIN_DATA_VERSION.
Args:
hparams_plugin_data_pb: the HParamsPluginData protobuffer to use.
|
juraj-google-style
|
def construct_lanczos_params(self):
self.min_eigen_vec = autograph.to_graph(utils.tf_lanczos_smallest_eigval)
def _m_vector_prod_fn(x):
return self.get_psd_product(x, dtype=self.lanczos_dtype)
def _h_vector_prod_fn(x):
return self.get_h_product(x, dtype=self.lanczos_dtype)
self.m_min_vec_estimate = np.zeros(shape=(self.matrix_m_dimension, 1), dtype=np.float64)
zeros_m = tf.zeros(shape=(self.matrix_m_dimension, 1), dtype=tf.float64)
self.m_min_vec_ph = tf.placeholder_with_default(input=zeros_m,
shape=(self.matrix_m_dimension, 1),
name='m_min_vec_ph')
self.m_min_eig, self.m_min_vec = self.min_eigen_vec(_m_vector_prod_fn,
self.matrix_m_dimension,
self.m_min_vec_ph,
self.lzs_params['max_iter'],
dtype=self.lanczos_dtype)
self.m_min_eig = tf.cast(self.m_min_eig, self.nn_dtype)
self.m_min_vec = tf.cast(self.m_min_vec, self.nn_dtype)
self.h_min_vec_estimate = np.zeros(shape=(self.matrix_m_dimension - 1, 1), dtype=np.float64)
zeros_h = tf.zeros(shape=(self.matrix_m_dimension - 1, 1), dtype=tf.float64)
self.h_min_vec_ph = tf.placeholder_with_default(input=zeros_h,
shape=(self.matrix_m_dimension - 1, 1),
name='h_min_vec_ph')
self.h_min_eig, self.h_min_vec = self.min_eigen_vec(_h_vector_prod_fn,
self.matrix_m_dimension-1,
self.h_min_vec_ph,
self.lzs_params['max_iter'],
dtype=self.lanczos_dtype)
self.h_min_eig = tf.cast(self.h_min_eig, self.nn_dtype)
self.h_min_vec = tf.cast(self.h_min_vec, self.nn_dtype)
|
Computes matrices T and V using the Lanczos algorithm.
Args:
k: number of iterations and dimensionality of the tridiagonal matrix
Returns:
eig_vec: eigen vector corresponding to min eigenvalue
|
juraj-google-style
|
def IsDevice(self):
if (self._stat_object is None):
self._stat_object = self._GetStat()
if (self._stat_object is not None):
self.entry_type = self._stat_object.type
return (self.entry_type == definitions.FILE_ENTRY_TYPE_DEVICE)
|
Determines if the file entry is a device.
Returns:
bool: True if the file entry is a device.
|
codesearchnet
|
class GeneratorEnqueuer(SequenceEnqueuer):
def __init__(self, generator, use_multiprocessing=False, random_seed=None):
super(GeneratorEnqueuer, self).__init__(generator, use_multiprocessing)
self.random_seed = random_seed
def _get_executor_init(self, workers):
def pool_fn(seqs):
pool = get_pool_class(True)(workers, initializer=init_pool_generator, initargs=(seqs, self.random_seed, get_worker_id_queue()))
_DATA_POOLS.add(pool)
return pool
return pool_fn
def _run(self):
self._send_sequence()
with closing(self.executor_fn(_SHARED_SEQUENCES)) as executor:
while True:
if self.stop_signal.is_set():
return
self.queue.put(executor.apply_async(next_sample, (self.uid,)), block=True)
def get(self):
try:
while self.is_running():
inputs = self.queue.get(block=True).get()
self.queue.task_done()
if inputs is not None:
yield inputs
except StopIteration:
last_ones = []
while self.queue.qsize() > 0:
last_ones.append(self.queue.get(block=True))
for f in last_ones:
f.wait()
last_ones = [future.get() for future in last_ones if future.successful()]
for inputs in last_ones:
if inputs is not None:
yield inputs
except Exception as e:
self.stop()
if 'generator already executing' in str(e):
raise RuntimeError('Your generator is NOT thread-safe. Keras requires a thread-safe generator when `use_multiprocessing=False, workers > 1`. ')
raise e
|
Builds a queue out of a data generator.
The provided generator can be finite in which case the class will throw
a `StopIteration` exception.
Args:
generator: a generator function which yields data
use_multiprocessing: use multiprocessing if True, otherwise threading
random_seed: Initial seed for workers,
will be incremented by one for each worker.
|
github-repos
|
def __init__(self, obj):
if distob.engine is None:
setup_engines()
if isinstance(obj, Ref):
self._ref = obj
self.is_local = (self._ref.id.engine is distob.engine.eid)
else:
self._ref = Ref(obj)
self.is_local = True
if self.is_local:
self._dv = None
self._obcache = distob.engine[self._ref.id]
self._obcache_current = True
else:
self._dv = distob.engine._client[self._ref.id.engine]
self._dv.use_dill()
self._obcache = None
self._obcache_current = False
self._id = self._ref.id
self.prefer_local = True
instance_methods, instance_attribs, size = call(
_scan_instance, self, self.__class__._include_underscore,
self.__class__._exclude, prefer_local=False)
for name, doc in instance_methods:
setattr(self, name, _make_proxy_method(name, doc))
for name, doc in instance_attribs:
setattr(self.__class__, name, _make_proxy_property(name, doc))
self.__engine_affinity__ = (self._ref.id.engine, size)
|
Set up the Remote* proxy object to access an already-existing object,
which may be local or remote.
Args:
obj (Ref or object): either a Ref reference to the (possibly remote)
object to be controlled, or else an actual (local) object to be
controlled.
|
juraj-google-style
|
async def update_state(self, short_name, state):
if (short_name not in self.services):
raise ArgumentError('Service name is unknown', short_name=short_name)
if (state not in states.KNOWN_STATES):
raise ArgumentError('Invalid service state', state=state)
serv = self.services[short_name]['state']
if (serv.state == state):
return
update = {}
update['old_status'] = serv.state
update['new_status'] = state
update['new_status_string'] = states.KNOWN_STATES[state]
serv.state = state
(await self._notify_update(short_name, 'state_change', update))
|
Set the current state of a service.
If the state is unchanged from a previous attempt, this routine does
nothing.
Args:
short_name (string): The short name of the service
state (int): The new stae of the service
|
codesearchnet
|
def add_institute(self, institute_obj):
internal_id = institute_obj['internal_id']
display_name = institute_obj['internal_id']
if self.institute(institute_id=internal_id):
raise IntegrityError("Institute {0} already exists in database"
.format(display_name))
LOG.info("Adding institute with internal_id: {0} and "
"display_name: {1}".format(internal_id,
display_name))
insert_info = self.institute_collection.insert_one(institute_obj)
LOG.info("Institute saved")
|
Add a institute to the database
Args:
institute_obj(Institute)
|
juraj-google-style
|
def load(self, key_filter=None, header_preproc=None):
df = pd.read_csv(self.input_file, sep='\t', dtype=object)
if (key_filter is not None):
df = df[df[df.columns[0]].str.match(key_filter)]
meta_col = df.columns[0]
df[meta_col] = df[meta_col].str.split(',').str[(- 1)]
for col_name in df.columns[1:]:
stripped = df[col_name].str.replace('[a-z]', '')
df[col_name] = pd.to_numeric(stripped, errors='coerce')
if (header_preproc is not None):
df.columns = (list(df.columns[:1]) + [header_preproc(c) for c in df.columns[1:]])
df.columns = (['key'] + [int(y) for y in df.columns[1:]])
return df
|
Load data table from tsv file, from default location
Args:
key_filter (str): additional filter for key column - regex matching
key values to include; None for no filter
header_preproc (func): function to apply to column headers to extract year numbers (as strings)
Returns:
pd.DataFrame: data
|
codesearchnet
|
def json_to_url(json, symbol):
start = json[0]['date']
end = json[(- 1)]['date']
diff = (end - start)
periods = [300, 900, 1800, 7200, 14400, 86400]
diffs = {}
for p in periods:
diffs[p] = abs((1 - (p / (diff / len(json)))))
period = min(diffs, key=diffs.get)
url = 'https:
return url
|
Converts a JSON to a URL by the Poloniex API
Args:
json: JSON data as a list of dict dates, where the keys are
the raw market statistics.
symbol: String of currency pair, like a ticker symbol.
Returns:
String URL to Poloniex API representing the given JSON.
|
codesearchnet
|
def validate_filename(filename, white_list_formats):
return filename.lower().endswith(white_list_formats) and os.path.isfile(filename)
|
Check if a filename refers to a valid file.
Args:
filename: String, absolute path to a file
white_list_formats: Set, allowed file extensions
Returns:
A boolean value indicating if the filename is valid or not
|
github-repos
|
def CopyToDateTimeStringISO8601(self):
date_time_string = self.CopyToDateTimeString()
if date_time_string:
date_time_string = date_time_string.replace(' ', 'T')
date_time_string = '{0:s}Z'.format(date_time_string)
return date_time_string
|
Copies the date time value to an ISO 8601 date and time string.
Returns:
str: date and time value formatted as an ISO 8601 date and time string or
None if the timestamp cannot be copied to a date and time string.
|
codesearchnet
|
def loadfile(method=True, writable=False, create=False):
def convert_file_args(args, kwargs):
filething = args[0] if args else None
filename = kwargs.pop("filename", None)
fileobj = kwargs.pop("fileobj", None)
return filething, filename, fileobj, args[1:], kwargs
def wrap(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
filething, filename, fileobj, args, kwargs = \
convert_file_args(args, kwargs)
with _openfile(self, filething, filename, fileobj,
writable, create) as h:
return func(self, h, *args, **kwargs)
@wraps(func)
def wrapper_func(*args, **kwargs):
filething, filename, fileobj, args, kwargs = \
convert_file_args(args, kwargs)
with _openfile(None, filething, filename, fileobj,
writable, create) as h:
return func(h, *args, **kwargs)
return wrapper if method else wrapper_func
return wrap
|
A decorator for functions taking a `filething` as a first argument.
Passes a FileThing instance as the first argument to the wrapped function.
Args:
method (bool): If the wrapped functions is a method
writable (bool): If a filename is passed opens the file readwrite, if
passed a file object verifies that it is writable.
create (bool): If passed a filename that does not exist will create
a new empty file.
|
juraj-google-style
|
def _ParseKeywordArgs(args, fn_spec):
kwargs = {}
remaining_kwargs = []
remaining_args = []
fn_keywords = fn_spec.varkw
fn_args = fn_spec.args + fn_spec.kwonlyargs
if not args:
return (kwargs, remaining_kwargs, remaining_args)
skip_argument = False
for index, argument in enumerate(args):
if skip_argument:
skip_argument = False
continue
if _IsFlag(argument):
contains_equals = '=' in argument
stripped_argument = argument.lstrip('-')
if contains_equals:
key, value = stripped_argument.split('=', 1)
else:
key = stripped_argument
value = None
key = key.replace('-', '_')
is_bool_syntax = not contains_equals and (index + 1 == len(args) or _IsFlag(args[index + 1]))
keyword = ''
if key in fn_args or (is_bool_syntax and key.startswith('no') and (key[2:] in fn_args)) or fn_keywords:
keyword = key
elif len(key) == 1:
matching_fn_args = [arg for arg in fn_args if arg[0] == key]
if len(matching_fn_args) == 1:
keyword = matching_fn_args[0]
elif len(matching_fn_args) > 1:
raise FireError(f"The argument '{argument}' is ambiguous as it could refer to any of the following arguments: {matching_fn_args}")
if not keyword:
got_argument = False
elif contains_equals:
got_argument = True
elif is_bool_syntax:
got_argument = True
if keyword in fn_args:
value = 'True'
elif keyword.startswith('no'):
keyword = keyword[2:]
value = 'False'
else:
value = 'True'
else:
assert index + 1 < len(args)
value = args[index + 1]
got_argument = True
skip_argument = not contains_equals and (not is_bool_syntax)
if got_argument:
kwargs[keyword] = value
else:
remaining_kwargs.append(argument)
if skip_argument:
remaining_kwargs.append(args[index + 1])
else:
remaining_args.append(argument)
return (kwargs, remaining_kwargs, remaining_args)
|
Parses the supplied arguments for keyword arguments.
Given a list of arguments, finds occurrences of --name value, and uses 'name'
as the keyword and 'value' as the value. Constructs and returns a dictionary
of these keyword arguments, and returns a list of the remaining arguments.
Only if fn_keywords is None, this only finds argument names used by the
function, specified through fn_args.
This returns the values of the args as strings. They are later processed by
_ParseArgs, which converts them to the appropriate type.
Args:
args: A list of arguments.
fn_spec: The inspectutils.FullArgSpec describing the given callable.
Returns:
kwargs: A dictionary mapping keywords to values.
remaining_kwargs: A list of the unused kwargs from the original args.
remaining_args: A list of the unused arguments from the original args.
Raises:
FireError: If a single-character flag is passed that could refer to multiple
possible args.
|
github-repos
|
def get_instance(self):
return Instance(self.rest_client.make_request(self.instance), self.rest_client)
|
Get the Streams instance that owns this view.
Returns:
Instance: Streams instance owning this view.
|
codesearchnet
|
def get_adif_id(self, callsign, timestamp=timestamp_now):
return self.get_all(callsign, timestamp)[const.ADIF]
|
Returns ADIF id of a callsign's country
Args:
callsign (str): Amateur Radio callsign
timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC)
Returns:
int: containing the country ADIF id
Raises:
KeyError: No Country found for callsign
|
juraj-google-style
|
def check_output(self, want, got, optionflags):
if got and (not want):
return True
if want is None:
want = ''
if want == got:
return True
want = self._ADDRESS_RE.sub('at ...>', want)
want, want_changed = self._tf_tensor_numpy_output(want)
if want_changed:
got, _ = self._tf_tensor_numpy_output(got)
want_text_parts, self.want_floats = self.extract_floats(want)
want_text_parts = [part.strip(' ') for part in want_text_parts]
want_text_wild = '...'.join(want_text_parts)
if '....' in want_text_wild:
want_text_wild = re.sub('\\.\\.\\.\\.+', '...', want_text_wild)
_, self.got_floats = self.extract_floats(got)
self.text_good = super(TfDoctestOutputChecker, self).check_output(want=want_text_wild, got=got, optionflags=optionflags)
if not self.text_good:
return False
if self.want_floats.size == 0:
return True
self.float_size_good = self.want_floats.size == self.got_floats.size
if self.float_size_good:
return self._allclose(self.want_floats, self.got_floats)
else:
return False
|
Compares the docstring output to the output gotten by running the code.
Python addresses in the output are replaced with wildcards.
Float values in the output compared as using `np.allclose`:
* Float values are extracted from the text and replaced with wildcards.
* The wildcard text is compared to the actual output.
* The float values are compared using `np.allclose`.
The method returns `True` if both the text comparison and the numeric
comparison are successful.
The numeric comparison will fail if either:
* The wrong number of floats are found.
* The float values are not within tolerence.
Args:
want: The output in the docstring.
got: The output generated after running the snippet.
optionflags: Flags passed to the doctest.
Returns:
A bool, indicating if the check was successful or not.
|
github-repos
|
def suggest(self, query):
res, suggest = self.search(query, results=1, suggestion=True)
try:
title = suggest or res[0]
except IndexError:
title = None
return title
|
Gather suggestions based on the provided title or None if no
suggestions found
Args:
query (str): Page title
Returns:
String or None: Suggested page title or **None** if no \
suggestion found
|
juraj-google-style
|
def _QueryProcessStatus(self, process):
process_is_alive = process.is_alive()
if process_is_alive:
rpc_client = self._rpc_clients_per_pid.get(process.pid, None)
process_status = rpc_client.CallFunction()
else:
process_status = None
return process_status
|
Queries a process to determine its status.
Args:
process (MultiProcessBaseProcess): process to query for its status.
Returns:
dict[str, str]: status values received from the worker process.
|
juraj-google-style
|
def get_end_start_epochs(year, month, day, direction, unit, count):
if (year or month or day):
if (not year):
year = 2017
if (not month):
month = 1
if (not day):
day = 1
initial_delorean = date_to_delorean(year, month, day)
else:
count += 1
initial_delorean = now_delorean()
initial_epoch = int(initial_delorean.epoch)
shifted_epoch = shift_epoch(initial_delorean, direction, unit, count)
return {'initial': initial_epoch, 'shifted': shifted_epoch}
|
Gets epoch from a start date and epoch from a shifted date
Args:
year: Int between 1 and 9999.
month: Int between 1 and 12.
day: Int between 1 and 31.
direction: String to shift time forwards or backwards.
Valid values: 'last', 'next'.
unit: String of time period unit for count argument.
How far back to check historical market data.
Valid values: 'hour', 'day', 'week', 'month', 'year'.
count: Int of units.
How far back to check historical market data?
Returns:
Dict of int epochs in UTC with keys 'initial' and 'shifted'
|
codesearchnet
|
def _MakeServiceDescriptor(self, service_proto, service_index, scope, package, file_desc):
if package:
service_name = '.'.join((package, service_proto.name))
else:
service_name = service_proto.name
methods = [self._MakeMethodDescriptor(method_proto, service_name, package, scope, index) for (index, method_proto) in enumerate(service_proto.method)]
desc = descriptor.ServiceDescriptor(name=service_proto.name, full_name=service_name, index=service_index, methods=methods, options=_OptionsOrNone(service_proto), file=file_desc)
self._service_descriptors[service_name] = desc
return desc
|
Make a protobuf ServiceDescriptor given a ServiceDescriptorProto.
Args:
service_proto: The descriptor_pb2.ServiceDescriptorProto protobuf message.
service_index: The index of the service in the File.
scope: Dict mapping short and full symbols to message and enum types.
package: Optional package name for the new message EnumDescriptor.
file_desc: The file containing the service descriptor.
Returns:
The added descriptor.
|
codesearchnet
|
def _GetContainerTypes(self):
self._cursor.execute(self._TABLE_NAMES_QUERY)
table_names = [row[0] for row in self._cursor.fetchall()]
return [table_name for table_name in self._CONTAINER_TYPES if (table_name in table_names)]
|
Retrieves the container types to merge.
Container types not defined in _CONTAINER_TYPES are ignored and not merged.
Specific container types reference other container types, such
as event referencing event data. The names are ordered to ensure the
attribute containers are merged in the correct order.
Returns:
list[str]: names of the container types to merge.
|
codesearchnet
|
def squeeze_batch_dims(inp, op, inner_rank):
with ops.name_scope_v2('squeeze_batch_dims'):
shape = inp.shape
inner_shape = shape[-inner_rank:]
if not inner_shape.is_fully_defined():
inner_shape = array_ops.shape(inp)[-inner_rank:]
batch_shape = shape[:-inner_rank]
if not batch_shape.is_fully_defined():
batch_shape = array_ops.shape(inp)[:-inner_rank]
if isinstance(inner_shape, tensor_shape.TensorShape):
inp_reshaped = array_ops.reshape(inp, [-1] + inner_shape.as_list())
else:
inp_reshaped = array_ops.reshape(inp, array_ops.concat(([-1], inner_shape), axis=-1))
out_reshaped = op(inp_reshaped)
out_inner_shape = out_reshaped.shape[-inner_rank:]
if not out_inner_shape.is_fully_defined():
out_inner_shape = array_ops.shape(out_reshaped)[-inner_rank:]
out = array_ops.reshape(out_reshaped, array_ops.concat((batch_shape, out_inner_shape), axis=-1))
out.set_shape(inp.shape[:-inner_rank] + out.shape[-inner_rank:])
return out
|
Returns `unsqueeze_batch(op(squeeze_batch(inp)))`.
Where `squeeze_batch` reshapes `inp` to shape
`[prod(inp.shape[:-inner_rank])] + inp.shape[-inner_rank:]`
and `unsqueeze_batch` does the reverse reshape but on the output.
Args:
inp: A tensor with dims `batch_shape + inner_shape` where `inner_shape`
is length `inner_rank`.
op: A callable that takes a single input tensor and returns a single.
output tensor.
inner_rank: A python integer.
Returns:
`unsqueeze_batch_op(squeeze_batch(inp))`.
|
github-repos
|
def _set_initial_contents(self, contents):
contents = self._encode_contents(contents)
changed = self._byte_contents != contents
st_size = len(contents)
if self._byte_contents:
self.size = 0
current_size = self.st_size or 0
self.filesystem.change_disk_usage(
st_size - current_size, self.name, self.st_dev)
self._byte_contents = contents
self.st_size = st_size
self.epoch += 1
return changed
|
Sets the file contents and size.
Called internally after initial file creation.
Args:
contents: string, new content of file.
Returns:
True if the contents have been changed.
Raises:
IOError: if the st_size is not a non-negative integer,
or if st_size exceeds the available file system space
|
juraj-google-style
|
def attach(self, droplet_id, region):
return self.get_data(('volumes/%s/actions/' % self.id), type=POST, params={'type': 'attach', 'droplet_id': droplet_id, 'region': region})
|
Attach a Volume to a Droplet.
Args:
droplet_id: int - droplet id
region: string - slug identifier for the region
|
codesearchnet
|
def read(file_path):
actual_file_path = os.path.expanduser(file_path)
with open(actual_file_path, 'r') as f:
lines = f.readlines()
gmt = []
for (line_num, line) in enumerate(lines):
fields = line.split('\t')
assert (len(fields) > 2), ('Each line must have at least 3 tab-delimited items. ' + 'line_num: {}, fields: {}').format(line_num, fields)
fields[(- 1)] = fields[(- 1)].rstrip()
entries = fields[2:]
entries = [x for x in entries if x]
assert (len(set(entries)) == len(entries)), ('There should not be duplicate entries for the same set. ' + 'line_num: {}, entries: {}').format(line_num, entries)
line_dict = {SET_IDENTIFIER_FIELD: fields[0], SET_DESC_FIELD: fields[1], SET_MEMBERS_FIELD: entries}
gmt.append(line_dict)
verify_gmt_integrity(gmt)
return gmt
|
Read a gmt file at the path specified by file_path.
Args:
file_path (string): path to gmt file
Returns:
gmt (GMT object): list of dicts, where each dict corresponds to one
line of the GMT file
|
codesearchnet
|
def __init__(self, estimator, logdir=None):
threading.Thread.__init__(self)
self.event = threading.Event()
self.estimator = estimator
self.logdir = logdir or tempfile.mkdtemp()
|
Initialize ``Tensorboard`` instance.
Args:
estimator (sagemaker.estimator.Framework): A SageMaker ``Estimator``.
logdir (str): Directory for logs (default: None). If not specified, a temporary directory is made.
|
juraj-google-style
|
def dp020(self, value=None):
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float '
'for field `dp020`'.format(value))
self._dp020 = value
|
Corresponds to IDD Field `dp020`
Dew-point temperature corresponding to 2.0% annual cumulative frequency of occurrence
Args:
value (float): value for IDD Field `dp020`
Unit: C
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
juraj-google-style
|
def array(self, dimensions=None):
if (dimensions is None):
dims = [d for d in (self.kdims + self.vdims)]
else:
dims = [self.get_dimension(d, strict=True) for d in dimensions]
(columns, types) = ([], [])
for dim in dims:
column = self.dimension_values(dim)
columns.append(column)
types.append(column.dtype.kind)
if (len(set(types)) > 1):
columns = [c.astype('object') for c in columns]
return np.column_stack(columns)
|
Convert dimension values to columnar array.
Args:
dimensions: List of dimensions to return
Returns:
Array of columns corresponding to each dimension
|
codesearchnet
|
def _find_cellid(self, code):
from difflib import SequenceMatcher
maxvalue = 0.
maxid = None
for cellid, c in self.cellids.items():
matcher = SequenceMatcher(a=c, b=code)
ratio = matcher.quick_ratio()
if ratio > maxvalue and ratio > 0.5:
maxid, maxvalue = cellid, ratio
return maxid
|
Determines the most similar cell (if any) to the specified code. It
must have at least 50% overlap ratio and have been a loop-intercepted
cell previously.
Args:
code (str): contents of the code cell that were executed.
|
juraj-google-style
|
def add_input(self, *args, **kwargs):
return self._inputs.add(*args, **kwargs)
|
Add a wrapped input argument to the hint.
Args:
*args: The input tensor.
**kwargs:
"name" label
"tag" a tag to group multiple arguments that will be aggregated. I.e.
a string like 'cool_input'. Basically multiple inputs can be added
to the same hint for parallel operations that will eventually be
combined. An example would be static_rnn which creates multiple copies
of state or inputs.
"aggregate" aggregation strategy that is valid only for tag non None.
Acceptable values are OpHint.AGGREGATE_FIRST, OpHint.AGGREGATE_LAST,
and OpHint.AGGREGATE_STACK.
"index_override" The global index to use. This corresponds to the
argument order in the final stub that will be generated.
Returns:
The wrapped input tensor.
|
github-repos
|
def fullpath(self):
return str(os.path.join(self.path, self.directory))
|
Full path to the Mackup configuration files.
The full path to the directory when Mackup is storing the configuration
files.
Returns:
str
|
codesearchnet
|
def absolute_url(self):
if self.is_root():
return utils.concat_urls(self.url)
return utils.concat_urls(self.parent.absolute_url, self.url)
|
Get the absolute url of ``self``.
Returns:
str: the absolute url.
|
codesearchnet
|
def extract_github_repo_owner_and_name(url):
_check_github_url_is_supported(url)
parts = get_parts_of_url_path(url)
repo_owner = parts[0]
repo_name = parts[1]
return repo_owner, _strip_trailing_dot_git(repo_name)
|
Given an URL, return the repo name and who owns it.
Args:
url (str): The URL to the GitHub repository
Raises:
ValueError: on url that aren't from github
Returns:
str, str: the owner of the repository, the repository name
|
juraj-google-style
|
def restore_collection(backup):
for k, v in six.iteritems(backup):
del tf.get_collection_ref(k)[:]
tf.get_collection_ref(k).extend(v)
|
Restore from a collection backup.
Args:
backup (dict):
|
juraj-google-style
|
def fragment_search(self, fragement:str) -> List[dict]:
fragement = self.extract_fragment(fragement)
ilx_rows = self.fragment2rows.get(fragement)
if not ilx_rows:
return None
else:
return ilx_rows
|
Returns the rows in InterLex associated with the fragment
Note:
Pressumed to have duplicate fragements in InterLex
Args:
fragment: The fragment_id of the curie pertaining to the ontology
Returns:
None or List[dict]
|
juraj-google-style
|
def predict_proba(self, x, y=None, **kwargs):
if (self.clf is None):
raise ValueError('Model has to be trained before making predictions.')
if (x is pandas.Series):
input_ = self.featurize_row(x.iloc[0], x.iloc[1]).reshape((1, (- 1)))
elif (x is pandas.DataFrame):
input_ = np.array([self.featurize_row(x.iloc[0], x.iloc[1]) for row in x])
elif (y is not None):
input_ = self.featurize_row(x, y).reshape((1, (- 1)))
else:
raise TypeError('DataType not understood.')
return self.clf.predict(input_)
|
Predict the causal score using a trained RCC model
Args:
x (numpy.array or pandas.DataFrame or pandas.Series): First variable or dataset.
args (numpy.array): second variable (optional depending on the 1st argument).
Returns:
float: Causation score (Value : 1 if a->b and -1 if b->a)
|
codesearchnet
|
def round_f1(y_true, y_predicted):
try:
predictions = [np.round(x) for x in y_predicted]
except TypeError:
predictions = y_predicted
return f1_score(y_true, predictions)
|
Calculates F1 (binary) measure.
Args:
y_true: list of true values
y_predicted: list of predicted values
Returns:
F1 score
|
juraj-google-style
|
def StaticAdd(cls, queue_urn, rdf_value, mutation_pool=None):
if (not isinstance(rdf_value, cls.rdf_type)):
raise ValueError(('This collection only accepts values of type %s.' % cls.rdf_type.__name__))
if (mutation_pool is None):
raise ValueError("Mutation pool can't be none.")
timestamp = rdfvalue.RDFDatetime.Now().AsMicrosecondsSinceEpoch()
if (not isinstance(queue_urn, rdfvalue.RDFURN)):
queue_urn = rdfvalue.RDFURN(queue_urn)
mutation_pool.QueueAddItem(queue_urn, rdf_value, timestamp)
|
Adds an rdf value the queue.
Adds an rdf value to a queue. Does not require that the queue be locked, or
even open. NOTE: The caller is responsible for ensuring that the queue
exists and is of the correct type.
Args:
queue_urn: The urn of the queue to add to.
rdf_value: The rdf value to add to the queue.
mutation_pool: A MutationPool object to write to.
Raises:
ValueError: rdf_value has unexpected type.
|
codesearchnet
|
def _VerifyValues(self, image, ksizes, strides, rates, padding, patches):
ksizes = [1] + ksizes + [1]
strides = [1] + strides + [1]
rates = [1] + rates + [1]
for dtype in [np.float16, np.float32, np.float64, dtypes.bfloat16.as_numpy_dtype]:
out_tensor = array_ops.extract_image_patches(constant_op.constant(image, dtype=dtype), ksizes=ksizes, strides=strides, rates=rates, padding=padding, name='im2col')
self.assertAllClose(np.array(patches, dtype=dtype), self.evaluate(out_tensor))
|
Tests input-output pairs for the ExtractImagePatches op.
Args:
image: Input tensor with shape: [batch, in_rows, in_cols, depth].
ksizes: Patch size specified as: [ksize_rows, ksize_cols].
strides: Output strides, specified as [stride_rows, stride_cols].
rates: Atrous rates, specified as [rate_rows, rate_cols].
padding: Padding type.
patches: Expected output.
|
github-repos
|
def assert_rank(x, rank, data=None, summarize=None, message=None, name=None):
with ops.name_scope(name, 'assert_rank', (x, rank) + tuple(data or [])):
if not isinstance(x, sparse_tensor.SparseTensor):
x = ops.convert_to_tensor(x, name='x')
rank = ops.convert_to_tensor(rank, name='rank')
message = _message_prefix(message)
static_condition = lambda actual_rank, given_rank: actual_rank == given_rank
dynamic_condition = math_ops.equal
if context.executing_eagerly() or isinstance(x, sparse_tensor.SparseTensor):
name = ''
else:
name = x.name
if data is None:
data = [message, 'Tensor %s must have rank' % name, rank, 'Received shape: ', array_ops.shape(x)]
try:
assert_op = _assert_rank_condition(x, rank, static_condition, dynamic_condition, data, summarize)
except ValueError as e:
if e.args[0] == 'Static rank condition failed':
raise ValueError('%sTensor %s must have rank %d. Received rank %d, shape %s' % (message, name, e.args[2], e.args[1], x.get_shape()))
else:
raise ValueError(e.args[0])
return assert_op
|
Assert `x` has rank equal to `rank`.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.compat.v1.assert_rank(x, 2)]):
output = tf.reduce_sum(x)
```
Args:
x: Numeric `Tensor`.
rank: Scalar integer `Tensor`.
data: The tensors to print out if the condition is False. Defaults to
error message and the shape of `x`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional). Defaults to "assert_rank".
Returns:
Op raising `InvalidArgumentError` unless `x` has specified rank.
If static checks determine `x` has correct rank, a `no_op` is returned.
Raises:
ValueError: If static checks determine `x` has wrong rank.
|
github-repos
|
def anti_join(df, other, **kwargs):
(left_on, right_on, suffixes) = get_join_parameters(kwargs)
if (not right_on):
right_on = [col_name for col_name in df.columns.values.tolist() if (col_name in other.columns.values.tolist())]
left_on = right_on
elif (not isinstance(right_on, (list, tuple))):
right_on = [right_on]
other_reduced = other[right_on].drop_duplicates()
joined = df.merge(other_reduced, how='left', left_on=left_on, right_on=right_on, suffixes=('', '_y'), indicator=True).query('_merge=="left_only"')[df.columns.values.tolist()]
return joined
|
Returns all of the rows in the left DataFrame that do not have a
match in the right DataFrame.
Args:
df (pandas.DataFrame): Left DataFrame (passed in via pipe)
other (pandas.DataFrame): Right DataFrame
Kwargs:
by (str or list): Columns to join on. If a single string, will join
on that column. If a list of lists which contain strings or
integers, the right/left columns to join on.
Example:
a >> anti_join(b, by='x1')
x1 x2
2 C 3
|
codesearchnet
|
def get_config(filepath=None, default_loader=None, on_missing=None):
cache_key = (filepath, default_loader, on_missing)
if (CACHE.get(cache_key) is not None):
return CACHE.get(cache_key)
logger = logging.getLogger('birding')
if (filepath is None):
filepath = BIRDING_CONF
if (default_loader is None):
default_loader = get_defaults_file
if (on_missing is None):
on_missing = logger.info
logger.info('Looking for configuration file: {}'.format(os.path.abspath(filepath)))
if (not os.path.exists(filepath)):
on_missing('No {} configuration file found.'.format(filepath))
if (filepath != BIRDING_CONF_DEFAULT):
os.stat(filepath)
config = yaml.safe_load(default_loader())
tv.validate(SCHEMA, config)
if os.path.exists(filepath):
file_config = yaml.safe_load(open(filepath))
if file_config:
config = overlay(file_config, config)
tv.validate(SCHEMA, config)
CACHE.put(cache_key, config)
return config
|
Get a dict for the current birding configuration.
The resulting dictionary is fully populated with defaults, such that all
valid keys will resolve to valid values. Invalid and extra values in the
configuration result in an exception.
See :ref:`config` (module-level docstring) for discussion on how birding
configuration works, including filepath loading. Note that a non-default
filepath set via env results in a :py:exc:`OSError` when the file is
missing, but the default filepath is ignored when missing.
This function caches its return values as to only parse configuration once
per set of inputs. As such, treat the resulting dictionary as read-only as
not to accidentally write values which will be seen by other handles of the
dictionary.
Args:
filepath (str): path to birding configuration YAML file.
default_loader (callable):
callable which returns file descriptor with YAML data of default
configuration values
on_missing (callable): callback to call when file is missing.
Returns:
dict: dict of current birding configuration; treat as read-only.
|
codesearchnet
|
def cancelRealTimeBars(self, bars: RealTimeBarList):
self.client.cancelRealTimeBars(bars.reqId)
self.wrapper.endSubscription(bars)
|
Cancel the realtime bars subscription.
Args:
bars: The bar list that was obtained from ``reqRealTimeBars``.
|
juraj-google-style
|
def getConfigPath(configFileName=None):
paths = {}
applicationPath = './'
if (sys.platform == 'win32'):
applicationPath = os.path.expanduser(os.path.join('~\\', 'OSRFramework'))
else:
applicationPath = os.path.expanduser(os.path.join('~/', '.config', 'OSRFramework'))
paths = {'appPath': applicationPath, 'appPathData': os.path.join(applicationPath, 'data'), 'appPathDefaults': os.path.join(applicationPath, 'default'), 'appPathPlugins': os.path.join(applicationPath, 'plugins'), 'appPathWrappers': os.path.join(applicationPath, 'plugins', 'wrappers'), 'appPathPatterns': os.path.join(applicationPath, 'plugins', 'patterns')}
for path in paths.keys():
if (not os.path.exists(paths[path])):
os.makedirs(paths[path])
return paths
|
Auxiliar function to get the configuration paths depending on the system
Args:
-----
configFileName: TODO.
Returns:
--------
A dictionary with the following keys: appPath, appPathDefaults,
appPathTransforms, appPathPlugins, appPathPatterns, appPathPatterns.
|
codesearchnet
|
def summarize(self, geom, stat=None):
if not hasattr(geom, 'num_coords'):
raise TypeError('Need OGR or GEOS geometry, %s found' % type(geom))
clone = self._clone()
for obj in clone:
arr = obj.array(geom)
if arr is not None:
if stat:
arr = agg_dims(arr, stat)
try:
arr = arr.squeeze()
except ValueError:
pass
obj.image = arr
return clone
|
Returns a new RasterQuerySet with subsetted/summarized ndarrays.
Arguments:
geom -- geometry for masking or spatial subsetting
Keyword args:
stat -- any numpy summary stat method as str (min/max/mean/etc)
|
juraj-google-style
|
def save_with_exif_info(img, *args, **kwargs):
if ('exif' in kwargs):
exif = kwargs.pop('exif')
else:
exif = img.info.get('exif')
img.save(*args, exif=exif, **kwargs)
|
Saves an image using PIL, preserving the exif information.
Args:
img (PIL.Image.Image):
*args: The arguments for the `save` method of the Image class.
**kwargs: The keywords for the `save` method of the Image class.
|
codesearchnet
|
def Shell(self, command, timeout_ms=None):
return self.protocol_handler.Command(
self._handle, service=b'shell', command=command,
timeout_ms=timeout_ms)
|
Run command on the device, returning the output.
Args:
command: Shell command to run
timeout_ms: Maximum time to allow the command to run.
|
juraj-google-style
|
def _write_reqs(amend: bool=False, stage: bool=False):
LOGGER.info('writing requirements')
base_cmd = 'pipenv lock -r'
_write_reqs_file(f'{base_cmd}', 'requirements.txt')
_write_reqs_file(f'{base_cmd} -d', 'requirements-dev.txt')
files_to_add = ['Pipfile', 'requirements.txt', 'requirements-dev.txt']
if amend:
CTX.repo.amend_commit(append_to_msg='update requirements [auto]', files_to_add=files_to_add)
elif stage:
CTX.repo.stage_subset(*files_to_add)
|
Writes the requirement files
Args:
amend: amend last commit with changes
stage: stage changes
|
codesearchnet
|
def _HashRow(cls, row):
values = []
for value in row:
try:
value = '{0!s}'.format(value)
except UnicodeDecodeError:
value = repr(value)
values.append(value)
return hash(' '.join(values))
|
Hashes the given row.
Args:
row (sqlite3.Row): row.
Returns:
int: hash value of the given row.
|
juraj-google-style
|
def apply(self, var, props, reverse=False):
vs, vid = sort_vid_split(var)
if reverse:
tms = []
else:
tms = [(a, op, b) for a, op, b in self._typemap if op in _LR_OPS]
for src, op, tgt in tms:
if _valmatch([vs], src, op, None, self._semi, 'variables'):
vs = vs if tgt == ['*'] else tgt[0]
break
newvar = '{}{}'.format(vs, vid)
newprops = {}
for featsets, valmap in self._propmap:
if reverse:
tgtfeats, srcfeats = featsets
pms = [(b, op, a) for a, op, b in valmap if op in _RL_OPS]
else:
srcfeats, tgtfeats = featsets
pms = [(a, op, b) for a, op, b in valmap if op in _LR_OPS]
vals = [props.get(f) for f in srcfeats]
for srcvals, op, tgtvals in pms:
if _valmatch(vals, srcvals, op, vs, self._semi, 'properties'):
for i, featval in enumerate(zip(tgtfeats, tgtvals)):
k, v = featval
if v == '*':
print(i, len(vals), vals, k, v)
if i < len(vals) and vals[i] is not None:
newprops[k] = vals[i]
elif v != '!':
newprops[k] = v
break
return newvar, newprops
|
Apply the VPM to variable *var* and properties *props*.
Args:
var: a variable
props: a dictionary mapping properties to values
reverse: if `True`, apply the rules in reverse (e.g. from
grammar-external to grammar-internal forms)
Returns:
a tuple (v, p) of the mapped variable and properties
|
juraj-google-style
|
def _IsIdentifier(cls, string):
return (string and (not string[0].isdigit()) and all(((character.isalnum() or (character == '_')) for character in string)))
|
Checks if a string contains an identifier.
Args:
string (str): string to check.
Returns:
bool: True if the string contains an identifier, False otherwise.
|
codesearchnet
|
def flatten(repertoire, big_endian=False):
if repertoire is None:
return None
order = 'C' if big_endian else 'F'
return repertoire.squeeze().ravel(order=order)
|
Flatten a repertoire, removing empty dimensions.
By default, the flattened repertoire is returned in little-endian order.
Args:
repertoire (np.ndarray or None): A repertoire.
Keyword Args:
big_endian (boolean): If ``True``, flatten the repertoire in big-endian
order.
Returns:
np.ndarray: The flattened repertoire.
|
juraj-google-style
|
def _faster_to_representation(self, instance):
ret = {}
fields = self._readable_fields
is_fast = isinstance(instance, prefetch.FastObject)
id_fields = self._readable_id_fields
for field in fields:
attribute = None
if (
is_fast and
not isinstance(
field,
(DynamicGenericRelationField, DynamicRelationField)
)
):
if field in id_fields and field.source not in instance:
attribute = instance.get(field.source + '_id')
ret[field.field_name] = attribute
continue
else:
try:
attribute = instance[field.source]
except KeyError:
if hasattr(instance, field.source):
attribute = getattr(instance, field.source)
else:
attribute = field.get_attribute(instance)
print(
'Missing %s from %s' % (
field.field_name,
self.__class__.__name__
)
)
else:
try:
attribute = field.get_attribute(instance)
except SkipField:
continue
if attribute is None:
ret[field.field_name] = None
else:
ret[field.field_name] = field.to_representation(attribute)
return ret
|
Modified to_representation with optimizations.
1) Returns a plain old dict as opposed to OrderedDict.
(Constructing ordered dict is ~100x slower than `{}`.)
2) Ensure we use a cached list of fields
(this optimization exists in DRF 3.2 but not 3.1)
Arguments:
instance: a model instance or data object
Returns:
Dict of primitive datatypes.
|
juraj-google-style
|
def _CreateIndexIfNotExists(self, index_name, mappings):
try:
if not self._client.indices.exists(index_name):
self._client.indices.create(
body={'mappings': mappings}, index=index_name)
except elasticsearch.exceptions.ConnectionError as exception:
raise RuntimeError(
'Unable to create Elasticsearch index with error: {0!s}'.format(
exception))
|
Creates an Elasticsearch index if it does not exist.
Args:
index_name (str): mame of the index.
mappings (dict[str, object]): mappings of the index.
Raises:
RuntimeError: if the Elasticsearch index cannot be created.
|
juraj-google-style
|
def make_flat_list_of_images(images: Union[list[ImageInput], ImageInput]) -> ImageInput:
if isinstance(images, (list, tuple)) and all((isinstance(images_i, (list, tuple)) for images_i in images)) and all((is_valid_list_of_images(images_i) for images_i in images)):
return [img for img_list in images for img in img_list]
if isinstance(images, (list, tuple)) and is_valid_list_of_images(images):
if is_pil_image(images[0]) or images[0].ndim == 3:
return images
if images[0].ndim == 4:
return [img for img_list in images for img in img_list]
if is_valid_image(images):
if is_pil_image(images) or images.ndim == 3:
return [images]
if images.ndim == 4:
return list(images)
raise ValueError(f'Could not make a flat list of images from {images}')
|
Ensure that the output is a flat list of images. If the input is a single image, it is converted to a list of length 1.
If the input is a nested list of images, it is converted to a flat list of images.
Args:
images (`Union[List[ImageInput], ImageInput]`):
The input image.
Returns:
list: A list of images or a 4d array of images.
|
github-repos
|
def run(self, gin):
with ScratchDir('.'):
p = subprocess.Popen(self._gulp_cmd, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = p.communicate(bytearray(gin, 'utf-8'))
out = out.decode('utf-8')
err = err.decode('utf-8')
if (('Error' in err) or ('error' in err)):
print(gin)
print('----output_0---------')
print(out)
print('----End of output_0------\n\n\n')
print('----output_1--------')
print(out)
print('----End of output_1------')
raise GulpError(err)
if ('ERROR' in out):
raise GulpError(out)
conv_err_string = 'Conditions for a minimum have not been satisfied'
if (conv_err_string in out):
raise GulpConvergenceError()
gout = ''
for line in out.split('\n'):
gout = ((gout + line) + '\n')
return gout
|
Run GULP using the gin as input
Args:
gin: GULP input string
Returns:
gout: GULP output string
|
codesearchnet
|
def freeze(self, permanent_value: Any=utils.MISSING_VALUE, apply_before_use: bool=True) -> 'ValueSpec':
|
Sets the default value using a permanent value and freezes current spec.
A frozen value spec will not accept any value that is not the default
value. A frozen value spec is useful when a subclass fixes the value of a
symoblic attribute and want to prevent it from being modified.
Args:
permanent_value: A permanent value used for current spec.
If MISSING_VALUE, freeze the value spec with current default value.
apply_before_use: If True, invoke `apply` on permanent value
when permanent_value is provided, otherwise use it as is.
Returns:
ValueSpec itself.
Raises:
ValueError if current default value is MISSING_VALUE and the permanent
value is not specified.
|
github-repos
|
def _fork_children_processes(name, successors):
logging.info('Process "%s" started, PID: %d!', name, os.getpid())
children_process = [multiprocessing.Process(target=_fork_children_processes, args=args) for args in successors]
for child_process in children_process:
child_process.start()
if 'child' in name:
time.sleep(4)
for child_process in children_process:
child_process.join()
logging.info('Process "%s" exit.', name)
|
Forks children processes and its descendants recursively.
Args:
name: The name of this process.
successors: The args for the descendant processes.
|
github-repos
|
def _check_error(self, response, json_response=None):
if (response.status_code >= 400):
json_response = (json_response or self._get_json_response(response))
err_cls = self._check_http_error_code(response.status_code)
try:
raise err_cls(('%s error: %s' % (response.status_code, json_response['error']['error_msg'])), response.status_code)
except TypeError:
raise err_cls(('%s error: %s' % (response.status_code, json_response['error_description'])), response.status_code)
return True
|
Check for HTTP error code from the response, raise exception if there's any
Args:
response (object): Object returned by requests' `get` and `post`
methods
json_response (dict): JSON response, if applicable
Raises:
HTTPError: If the status code of response is either 4xx or 5xx
Returns:
True if status code is not error code
|
codesearchnet
|
def resolve_one_of(tags, at_least_one):
if (len(tags) < len(at_least_one)):
return None
for possible_resolution in choose_1_from_each(at_least_one):
resolution = {}
pr = possible_resolution[:]
for entity_type in pr:
last_end_index = (- 1)
if (entity_type in resolution):
last_end_index = resolution.get[entity_type][(- 1)].get('end_token')
(tag, value, c) = find_first_tag(tags, entity_type, after_index=last_end_index)
if (not tag):
break
else:
if (entity_type not in resolution):
resolution[entity_type] = []
resolution[entity_type].append(tag)
if (len(resolution) == len(possible_resolution)):
return resolution
return None
|
This searches tags for Entites in at_least_one and returns any match
Args:
tags(list): List of tags with Entities to search for Entities
at_least_one(list): List of Entities to find in tags
Returns:
object: returns None if no match is found but returns any match as an object
|
codesearchnet
|
def _shape_invariant_to_type_spec(var, shape=None):
var = _convert_tensorarray_to_flow(var)
if shape is None:
return type_spec.type_spec_from_value(var)
elif isinstance(shape, type_spec.TypeSpec):
if not shape.is_compatible_with(var):
raise TypeError('TypeSpec %r is not compatible with %r' % (shape, var))
return shape
elif not isinstance(shape, tensor_shape.TensorShape):
raise TypeError(f"'shape' must be one of TypeSpec, TensorShape or None. Received: {type(shape)}")
if isinstance(var, tensor_lib.Tensor):
return tensor_lib.TensorSpec(shape, var.dtype)
else:
try:
return var._shape_invariant_to_type_spec(shape)
except NotImplementedError as e:
raise TypeError(f'To describe or constrain a {type(var).__name__}, use a {type(var._type_spec).__name__} instead of a TensorShape.') from e
|
Converts a shape invariant to a TypeSpec.
If `var` is a TensorArray, it will first be converted to its flow.
Args:
var: The tensor, tensor array or composite tensor whose shape is described
by the shape invariant.
shape: A `TypeSpec` or `TensorShape`. If `shape` is already a `TypeSpec`,
then it is simply returned as-is.
Returns:
A `TypeSpec` for `var`, consistent with the given shape.
Raises:
TypeError: If `shape` is a TypeSpec and not compatible with `var`.
TypeError: If `shape` is not None, a TypeSpec, or a TensorShape.
TypeError: If `shape` is a TensorShape, `var` is a CompositeTensor, and
`var` doesn't implement the `_shape_invariant_to_type_spec` method.
|
github-repos
|
def _collect_feature_info(self, candidate_feature_diffs):
project_root = self.project.path
for diff in candidate_feature_diffs:
path = diff.b_path
modname = relpath_to_modname(path)
modpath = project_root.joinpath(path)
importer = partial(import_module_at_path, modname, modpath)
(yield (importer, modname, modpath))
|
Collect feature info
Args:
candidate_feature_diffs (List[git.diff.Diff]): list of Diffs
corresponding to admissible file changes compared to
comparison ref
Returns:
List[Tuple]: list of tuple of importer, module name, and module
path. The "importer" is a callable that returns a module
|
codesearchnet
|
def query_blockchain_events(
web3: Web3,
contract_manager: ContractManager,
contract_address: Address,
contract_name: str,
topics: List,
from_block: BlockNumber,
to_block: BlockNumber,
) -> List[Dict]:
filter_params = {
'fromBlock': from_block,
'toBlock': to_block,
'address': to_checksum_address(contract_address),
'topics': topics,
}
events = web3.eth.getLogs(filter_params)
contract_abi = contract_manager.get_contract_abi(contract_name)
return [
decode_event(
abi=contract_abi,
log_=raw_event,
)
for raw_event in events
]
|
Returns events emmitted by a contract for a given event name, within a certain range.
Args:
web3: A Web3 instance
contract_manager: A contract manager
contract_address: The address of the contract to be filtered, can be `None`
contract_name: The name of the contract
topics: The topics to filter for
from_block: The block to start search events
to_block: The block to stop searching for events
Returns:
All matching events
|
juraj-google-style
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.