code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
|---|---|---|
def mp_atan2(y, x):
return 'if((x)>0, atan((y)/(x)), if(((x)<0) and ((y)>=0), atan((y)/(x))+pi, if(((x)<0) and ((y)<0), atan((y)/(x))-pi, if(((x)==0) and ((y)>0), pi/2, if(((x)==0) and ((y)<0), -pi/2, 0)))))'.replace(
'pi', str(math.pi)).replace('y', y).replace('x', x)
|
muparser atan2 function
Implements an atan2(y,x) function for older muparser versions (<2.1.0);
atan2 was added as a built-in function in muparser 2.1.0
Args:
y (str): y argument of the atan2(y,x) function
x (str): x argument of the atan2(y,x) function
Returns:
A muparser string that calculates atan2(y,x)
|
juraj-google-style
|
def _calculateEncodingKey(comparator):
encodingName = None
for (k, v) in list(_encodings.items()):
if (v == comparator):
encodingName = k
break
return encodingName
|
Gets the first key of all available encodings where the corresponding
value matches the comparator.
Args:
comparator (string): A view name for an encoding.
Returns:
str: A key for a specific encoding used by python.
|
codesearchnet
|
def state_province_region(self, value=None):
if (value is not None):
try:
value = str(value)
except ValueError:
raise ValueError('value {} need to be of type str for field `state_province_region`'.format(value))
if (',' in value):
raise ValueError('value should not contain a comma for field `state_province_region`')
self._state_province_region = value
|
Corresponds to IDD Field `state_province_region`
Args:
value (str): value for IDD Field `state_province_region`
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
codesearchnet
|
def AddServiceDescriptor(self, service_desc):
if (not isinstance(service_desc, descriptor.ServiceDescriptor)):
raise TypeError('Expected instance of descriptor.ServiceDescriptor.')
self._service_descriptors[service_desc.full_name] = service_desc
|
Adds a ServiceDescriptor to the pool.
Args:
service_desc: A ServiceDescriptor.
|
codesearchnet
|
def assign_selective_dynamics(self, slab):
sd_list = []
sd_list = [([False, False, False] if (site.properties['surface_properties'] == 'subsurface') else [True, True, True]) for site in slab.sites]
new_sp = slab.site_properties
new_sp['selective_dynamics'] = sd_list
return slab.copy(site_properties=new_sp)
|
Helper function to assign selective dynamics site_properties
based on surface, subsurface site properties
Args:
slab (Slab): slab for which to assign selective dynamics
|
codesearchnet
|
def validate_log_output(self, passed, db_data, user_data, oper):
truncate = self.args.truncate
if ((db_data is not None) and passed):
if (isinstance(db_data, string_types) and (len(db_data) > truncate)):
db_data = db_data[:truncate]
elif isinstance(db_data, list):
db_data_truncated = []
for d in db_data:
if ((d is not None) and isinstance(d, string_types) and (len(d) > truncate)):
db_data_truncated.append('{} ...'.format(d[:self.args.truncate]))
else:
db_data_truncated.append(d)
db_data = db_data_truncated
if ((user_data is not None) and passed):
if (isinstance(user_data, string_types) and (len(user_data) > truncate)):
user_data = user_data[:self.args.truncate]
elif isinstance(user_data, list):
user_data_truncated = []
for u in user_data:
if (isinstance(db_data, string_types) and (len(u) > truncate)):
user_data_truncated.append('{} ...'.format(u[:self.args.truncate]))
else:
user_data_truncated.append(u)
user_data = user_data_truncated
self.log.info('[validate] DB Data : ({}), Type: [{}]'.format(db_data, type(db_data)))
self.log.info('[validate] Operator : ({})'.format(oper))
self.log.info('[validate] User Data : ({}), Type: [{}]'.format(user_data, type(user_data)))
if passed:
self.log.info('[validate] Results : Passed')
else:
self.log.error('[validate] Results : Failed')
if ((db_data is not None) and (user_data is not None) and (oper in ['eq', 'ne'])):
try:
diff_count = 0
for (i, diff) in enumerate(difflib.ndiff(db_data, user_data)):
if (diff[0] == ' '):
continue
elif (diff[0] == '-'):
self.log.info('[validate] Diff : Missing data at index {}'.format(i))
elif (diff[0] == '+'):
self.log.info('[validate] Diff : Extra data at index {}'.format(i))
if (diff_count > self.max_diff):
self.log.info('Max number of differences reached.')
break
diff_count += 1
except TypeError:
pass
except KeyError:
pass
if self.args.halt_on_fail:
raise RuntimeError('Failed validating data.')
|
Format the validation log output to be easier to read.
Args:
passed (bool): The results of the validation test.
db_data (str): The data store in Redis.
user_data (str): The user provided data.
oper (str): The comparison operator.
Raises:
RuntimeError: Raise error on validation failure if halt_on_fail is True.
|
codesearchnet
|
def depth_september_average_ground_temperature(self, value=None):
if (value is not None):
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float for field `depth_september_average_ground_temperature`'.format(value))
self._depth_september_average_ground_temperature = value
|
Corresponds to IDD Field
`depth_september_average_ground_temperature`
Args:
value (float): value for IDD Field `depth_september_average_ground_temperature`
Unit: C
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
codesearchnet
|
def from_json(cls, json):
mapreduce_spec = cls(json["name"],
json["mapreduce_id"],
json["mapper_spec"],
json.get("params"),
json.get("hooks_class_name"))
return mapreduce_spec
|
Create new MapreduceSpec from the json, encoded by to_json.
Args:
json: json representation of MapreduceSpec.
Returns:
an instance of MapreduceSpec with all data deserialized from json.
|
juraj-google-style
|
def take(self, count=1):
if self.closed():
raise ValueError('Attempt to call take() on a closed Queryable.')
count = max(0, count)
return self._create(itertools.islice(self, count))
|
Returns a specified number of elements from the start of a sequence.
If the source sequence contains fewer elements than requested only the
available elements will be returned and no exception will be raised.
Note: This method uses deferred execution.
Args:
count: An optional number of elements to take. The default is one.
Returns:
A Queryable over the first count elements of the source sequence,
or the all elements of elements in the source, whichever is fewer.
Raises:
ValueError: If the Queryable is closed()
|
codesearchnet
|
def _compute_merkle_root(self, required_state_root):
state_hash = None
if (self._previous_valid_batch_c_id is not None):
publishing_or_genesis = (self._always_persist or (required_state_root is None))
state_hash = self._squash(state_root=self._previous_state_hash, context_ids=[self._previous_valid_batch_c_id], persist=self._always_persist, clean_up=publishing_or_genesis)
if (self._always_persist is True):
return state_hash
if (state_hash == required_state_root):
self._squash(state_root=self._previous_state_hash, context_ids=[self._previous_valid_batch_c_id], persist=True, clean_up=True)
return state_hash
|
Computes the merkle root of the state changes in the context
corresponding with _last_valid_batch_c_id as applied to
_previous_state_hash.
Args:
required_state_root (str): The merkle root that these txns
should equal.
Returns:
state_hash (str): The merkle root calculated from the previous
state hash and the state changes from the context_id
|
codesearchnet
|
def _AddCredentialConfiguration(
self, path_spec, credential_type, credential_data):
credential_configuration = configurations.CredentialConfiguration(
credential_data=credential_data, credential_type=credential_type,
path_spec=path_spec)
self._credential_configurations.append(credential_configuration)
|
Adds a credential configuration.
Args:
path_spec (dfvfs.PathSpec): path specification.
credential_type (str): credential type.
credential_data (bytes): credential data.
|
juraj-google-style
|
def signCertAs(self, cert, signas):
cakey = self.getCaKey(signas)
if cakey is None:
raise s_exc.NoCertKey('Missing .key for %s' % signas)
cacert = self.getCaCert(signas)
if cacert is None:
raise s_exc.NoCertKey('Missing .crt for %s' % signas)
cert.set_issuer(cacert.get_subject())
cert.sign(cakey, self.signing_digest)
|
Signs a certificate with a CA keypair.
Args:
cert (OpenSSL.crypto.X509): The certificate to sign.
signas (str): The CA keypair name to sign the new keypair with.
Examples:
Sign a certificate with the CA "myca":
cdir.signCertAs(mycert, 'myca')
Returns:
None
|
juraj-google-style
|
def parse_json_path(self, jsonpath):
if jsonpath not in self.parsed:
try:
self.parsed[jsonpath] = self.parser(jsonpath)
except Exception:
self.log("Invalid Json Path: " + jsonpath, "error")
raise InvalidJsonPathError("Invalid Json Path")
return self.parsed[jsonpath]
|
Parse a jsonpath
Args:
jsonpath: str
Returns: a parsed json path
|
juraj-google-style
|
class CategoricalHinge(reduction_metrics.MeanMetricWrapper):
def __init__(self, name='categorical_hinge', dtype=None):
super().__init__(fn=categorical_hinge, name=name, dtype=dtype)
self._direction = 'down'
def get_config(self):
return {'name': self.name, 'dtype': self.dtype}
|
Computes the categorical hinge metric between `y_true` and `y_pred`.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Example:
>>> m = keras.metrics.CategoricalHinge()
>>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]])
>>> m.result().numpy()
1.4000001
>>> m.reset_state()
>>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]],
... sample_weight=[1, 0])
>>> m.result()
1.2
|
github-repos
|
async def complete_task(context, result):
args = [get_task_id(context.claim_task), get_run_id(context.claim_task)]
reversed_statuses = get_reversed_statuses(context)
try:
if (result == 0):
log.info('Reporting task complete...')
response = (await context.temp_queue.reportCompleted(*args))
elif ((result != 1) and (result in reversed_statuses)):
reason = reversed_statuses[result]
log.info('Reporting task exception {}...'.format(reason))
payload = {'reason': reason}
response = (await context.temp_queue.reportException(*args, payload))
else:
log.info('Reporting task failed...')
response = (await context.temp_queue.reportFailed(*args))
log.debug('Task status response:\n{}'.format(pprint.pformat(response)))
except taskcluster.exceptions.TaskclusterRestFailure as exc:
if (exc.status_code == 409):
log.info('409: not reporting complete/failed.')
else:
raise
|
Mark the task as completed in the queue.
Decide whether to call reportCompleted, reportFailed, or reportException
based on the exit status of the script.
If the task has expired or been cancelled, we'll get a 409 status.
Args:
context (scriptworker.context.Context): the scriptworker context.
Raises:
taskcluster.exceptions.TaskclusterRestFailure: on non-409 error.
|
codesearchnet
|
def _update_hasher(hasher, data, types=True):
if isinstance(data, (tuple, list, zip)):
needs_iteration = True
else:
needs_iteration = any(check(data) for check in
_HASHABLE_EXTENSIONS.iterable_checks)
if needs_iteration:
SEP = b'_,_'
ITER_PREFIX = b'_[_'
ITER_SUFFIX = b'_]_'
iter_ = iter(data)
hasher.update(ITER_PREFIX)
try:
for item in iter_:
prefix, hashable = _convert_to_hashable(item, types)
binary_data = prefix + hashable + SEP
hasher.update(binary_data)
except TypeError:
_update_hasher(hasher, item, types)
for item in iter_:
_update_hasher(hasher, item, types)
hasher.update(SEP)
hasher.update(ITER_SUFFIX)
else:
prefix, hashable = _convert_to_hashable(data, types)
binary_data = prefix + hashable
hasher.update(binary_data)
|
Converts `data` into a byte representation and calls update on the hasher
`hashlib.HASH` algorithm.
Args:
hasher (HASH): instance of a hashlib algorithm
data (object): ordered data with structure
types (bool): include type prefixes in the hash
Example:
>>> hasher = hashlib.sha512()
>>> data = [1, 2, ['a', 2, 'c']]
>>> _update_hasher(hasher, data)
>>> print(hasher.hexdigest()[0:8])
e2c67675
2ba8d82b
|
juraj-google-style
|
def Columns(iterable):
columns = sorted(iterable)
return '({})'.format(', '.join(('`{}`'.format(col) for col in columns)))
|
Returns a string of column names for MySQL INSERTs.
To account for Iterables with undefined order (dicts before Python 3.6),
this function sorts column names.
Examples:
>>> Columns({"password": "foo", "name": "bar"})
u'(`name`, `password`)'
Args:
iterable: The iterable of strings to be used as column names.
Returns: A string containing a tuple of sorted comma-separated column names.
|
codesearchnet
|
def cut_matrix(self, n):
return connectivity.relevant_connections(n, self.from_nodes, self.to_nodes)
|
Compute the cut matrix for this cut.
The cut matrix is a square matrix which represents connections severed
by the cut.
Args:
n (int): The size of the network.
Example:
>>> cut = Cut((1,), (2,))
>>> cut.cut_matrix(3)
array([[0., 0., 0.],
[0., 0., 1.],
[0., 0., 0.]])
|
codesearchnet
|
def add_graph(self, run_key, device_name, graph_def, debug=False):
graph_dict = (self._run_key_to_debug_graphs if debug else
self._run_key_to_original_graphs)
if not run_key in graph_dict:
graph_dict[run_key] = dict()
graph_dict[run_key][tf.compat.as_str(device_name)] = (
debug_graphs_helper.DebugGraphWrapper(graph_def))
|
Add a GraphDef.
Args:
run_key: A key for the run, containing information about the feeds,
fetches, and targets.
device_name: The name of the device that the `GraphDef` is for.
graph_def: An instance of the `GraphDef` proto.
debug: Whether `graph_def` consists of the debug ops.
|
juraj-google-style
|
def _instantiate_data_type(self, data_type_class, data_type_args, loc):
assert issubclass(data_type_class, DataType), \
'Expected stone.data_type.DataType, got %r' % data_type_class
argspec = inspect.getargspec(data_type_class.__init__)
argspec.args.remove('self')
num_args = len(argspec.args)
num_defaults = len(argspec.defaults or ())
pos_args, kw_args = data_type_args
if (num_args - num_defaults) > len(pos_args):
raise InvalidSpec(
'Missing positional argument %s for %s type' %
(quote(argspec.args[len(pos_args)]),
quote(data_type_class.__name__)),
*loc)
elif (num_args - num_defaults) < len(pos_args):
raise InvalidSpec(
'Too many positional arguments for %s type' %
quote(data_type_class.__name__),
*loc)
args = {}
for i, key in enumerate(argspec.args):
args[key] = (i >= num_args - num_defaults)
for key in kw_args:
if key not in args:
raise InvalidSpec('Unknown argument %s to %s type.' %
(quote(key), quote(data_type_class.__name__)),
*loc)
if not args[key]:
raise InvalidSpec(
'Positional argument %s cannot be specified as a '
'keyword argument.' % quote(key),
*loc)
del args[key]
try:
return data_type_class(*pos_args, **kw_args)
except ParameterError as e:
raise InvalidSpec('Bad argument to %s type: %s' %
(quote(data_type_class.__name__), e.args[0]),
*loc)
|
Responsible for instantiating a data type with additional attributes.
This method ensures that the specified attributes are valid.
Args:
data_type_class (DataType): The class to instantiate.
data_type_attrs (dict): A map from str -> values of attributes.
These will be passed into the constructor of data_type_class
as keyword arguments.
Returns:
stone.data_type.DataType: A parameterized instance.
|
juraj-google-style
|
def apply_indexed_slices_grad(self, grad, local_step=0, name=None):
return self.apply_grad(grad_indices=grad.indices, grad_values=grad.values, grad_shape=grad.dense_shape, local_step=local_step, name=name)
|
Attempts to apply a gradient to the accumulator.
The attempt is silently dropped if the gradient is stale, i.e., `local_step`
is less than the accumulator's global time step.
Args:
grad: The gradient `IndexedSlices` to be applied.
local_step: Time step at which the gradient was computed.
name: Optional name for the operation.
Returns:
The operation that (conditionally) applies a gradient to the accumulator.
Raises:
InvalidArgumentError: If grad is of the wrong shape
|
github-repos
|
class TrOCRProcessor(ProcessorMixin):
attributes = ['image_processor', 'tokenizer']
image_processor_class = 'AutoImageProcessor'
tokenizer_class = 'AutoTokenizer'
def __init__(self, image_processor=None, tokenizer=None, **kwargs):
feature_extractor = None
if 'feature_extractor' in kwargs:
warnings.warn('The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor` instead.', FutureWarning)
feature_extractor = kwargs.pop('feature_extractor')
image_processor = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.')
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.')
super().__init__(image_processor, tokenizer)
self.current_processor = self.image_processor
self._in_target_context_manager = False
def __call__(self, images: ImageInput=None, text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]=None, audio=None, videos=None, **kwargs: Unpack[TrOCRProcessorKwargs]) -> BatchFeature:
if self._in_target_context_manager:
return self.current_processor(images, **kwargs)
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.')
output_kwargs = self._merge_kwargs(TrOCRProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs)
if images is not None:
inputs = self.image_processor(images, **output_kwargs['images_kwargs'])
if text is not None:
encodings = self.tokenizer(text, **output_kwargs['text_kwargs'])
if text is None:
return inputs
elif images is None:
return encodings
else:
inputs['labels'] = encodings['input_ids']
return inputs
def batch_decode(self, *args, **kwargs):
return self.tokenizer.batch_decode(*args, **kwargs)
def decode(self, *args, **kwargs):
return self.tokenizer.decode(*args, **kwargs)
@contextmanager
def as_target_processor(self):
warnings.warn('`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your labels by using the argument `text` of the regular `__call__` method (either in the same call as your images inputs, or in a separate call.')
self._in_target_context_manager = True
self.current_processor = self.tokenizer
yield
self.current_processor = self.image_processor
self._in_target_context_manager = False
@property
def feature_extractor_class(self):
warnings.warn('`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.', FutureWarning)
return self.image_processor_class
@property
def feature_extractor(self):
warnings.warn('`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.', FutureWarning)
return self.image_processor
|
Constructs a TrOCR processor which wraps a vision image processor and a TrOCR tokenizer into a single processor.
[`TrOCRProcessor`] offers all the functionalities of [`ViTImageProcessor`/`DeiTImageProcessor`] and
[`RobertaTokenizer`/`XLMRobertaTokenizer`]. See the [`~TrOCRProcessor.__call__`] and [`~TrOCRProcessor.decode`] for
more information.
Args:
image_processor ([`ViTImageProcessor`/`DeiTImageProcessor`], *optional*):
An instance of [`ViTImageProcessor`/`DeiTImageProcessor`]. The image processor is a required input.
tokenizer ([`RobertaTokenizer`/`XLMRobertaTokenizer`], *optional*):
An instance of [`RobertaTokenizer`/`XLMRobertaTokenizer`]. The tokenizer is a required input.
|
github-repos
|
def add_showcase(self, showcase, showcases_to_check=None):
dataset_showcase = self._get_dataset_showcase_dict(showcase)
if (showcases_to_check is None):
showcases_to_check = self.get_showcases()
for showcase in showcases_to_check:
if (dataset_showcase['showcase_id'] == showcase['id']):
return False
showcase = hdx.data.showcase.Showcase({'id': dataset_showcase['showcase_id']}, configuration=self.configuration)
showcase._write_to_hdx('associate', dataset_showcase, 'package_id')
return True
|
Add dataset to showcase
Args:
showcase (Union[Showcase,Dict,str]): Either a showcase id or showcase metadata from a Showcase object or dictionary
showcases_to_check (List[Showcase]): list of showcases against which to check existence of showcase. Defaults to showcases containing dataset.
Returns:
bool: True if the showcase was added, False if already present
|
codesearchnet
|
def _add_file_argument(parser, types, args, custom_kwargs=None):
custom_kwargs = custom_kwargs or {}
arg = args[-1]
dest = custom_kwargs.get('dest', arg.lstrip(_ARG_PREFIX).replace('-', '_'))
kwargs = {'type': types.get(dest), 'action': 'store', 'default': config.ITEMS[dest].default, 'help': config.ITEMS[dest].comment}
kwargs.update(custom_kwargs)
if kwargs['type'] is None:
del kwargs['type']
if arg.startswith(_ARG_PREFIX):
kwargs['dest'] = dest
elif 'type' in kwargs:
kwargs['default'] = kwargs['type'](kwargs['default'])
parser.add_argument(*args, **kwargs)
|
Add a file-configurable option to the parser.
Args:
parser: The parser.
types: A map from option destination to type.
args: The option's name(s). Either a 2-tuple of (short_arg, arg) or a
1-tuple of (arg,).
custom_kwargs: The option's custom kwargs.
|
github-repos
|
def is_parsable(url):
try:
parsed = urlparse(url)
URLHelper.__cache[url] = parsed
return True
except:
return False
|
Check if the given URL is parsable (make sure it's a valid URL). If it is parsable, also cache it.
Args:
url (str): The URL to check.
Returns:
bool: True if parsable, False otherwise.
|
juraj-google-style
|
def _expand_dims(x, input_shape, output_shape):
verify_no_new_dims([output_shape], input_shape)
if ((input_shape == output_shape) or (input_shape.ndims == 0)):
return x
perm = [input_shape.dims.index(d) for d in output_shape.dims if (d in input_shape.dims)]
x = tf.transpose(x, perm)
for (i, d) in enumerate(output_shape.dims):
if (d not in input_shape.dims):
x = tf.expand_dims(x, i)
return x
|
Expand dimensions and transpose if necessary.
Args:
x: a tf.Tensor
input_shape: a Shape
output_shape: a Shape whose dimensions are a superset of
those in input_shape
Returns:
a tf.Tensor
|
codesearchnet
|
def global_horizontal_radiation(self, value=9999.0):
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError(
'value {} need to be of type float '
'for field `global_horizontal_radiation`'.format(value))
if value < 0.0:
raise ValueError('value need to be greater or equal 0.0 '
'for field `global_horizontal_radiation`')
self._global_horizontal_radiation = value
|
Corresponds to IDD Field `global_horizontal_radiation`
Args:
value (float): value for IDD Field `global_horizontal_radiation`
Unit: Wh/m2
value >= 0.0
Missing value: 9999.0
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
juraj-google-style
|
def __init__(self, protocol, node, peers, ksize, alpha):
self.protocol = protocol
self.ksize = ksize
self.alpha = alpha
self.node = node
self.nearest = NodeHeap(self.node, self.ksize)
self.last_ids_crawled = []
log.info("creating spider with peers: %s", peers)
self.nearest.push(peers)
|
Create a new C{SpiderCrawl}er.
Args:
protocol: A :class:`~kademlia.protocol.KademliaProtocol` instance.
node: A :class:`~kademlia.node.Node` representing the key we're
looking for
peers: A list of :class:`~kademlia.node.Node` instances that
provide the entry point for the network
ksize: The value for k based on the paper
alpha: The value for alpha based on the paper
|
juraj-google-style
|
def discount_factor(self, date: Optional[types.DateTensor]=None, time: Optional[types.FloatTensor]=None, **kwargs) -> tf.Tensor:
pass
|
Returns the discount factor to a specified set of dates.
Args:
date: Optional input specifying the dates at which to evaluate the
discount factors. The function expects either `date` or `time` to be
specified.
time: Optional input specifying the times at which to evaluate the
discount factors. The function expects either `date` or `time` to be
specified.
**kwargs: The context object, e.g., curve_type.
Returns:
A `Tensor` of the same shape as `dates` with the corresponding discount
factors.
|
github-repos
|
def by_leb(blocks):
slist_len = len(blocks)
slist = ['x'] * slist_len
for block in blocks:
if blocks[block].leb_num >= slist_len:
add_elements = blocks[block].leb_num - slist_len + 1
slist += (['x'] * add_elements)
slist_len = len(slist)
slist[blocks[block].leb_num] = block
return slist
|
Sort blocks by Logical Erase Block number.
Arguments:
List:blocks -- List of block objects to sort.
Returns:
List -- Indexes of blocks sorted by LEB.
|
juraj-google-style
|
def _get_timestamp_ms(when):
if (when is None):
return None
ms_since_epoch = float((time.mktime(when.utctimetuple()) * 1000.0))
ms_since_epoch += (when.microsecond / 1000.0)
return int(ms_since_epoch)
|
Converts a datetime.datetime to integer milliseconds since the epoch.
Requires special handling to preserve microseconds.
Args:
when: A datetime.datetime instance.
Returns:
Integer time since the epoch in milliseconds. If the supplied 'when' is
None, the return value will be None.
|
codesearchnet
|
def node_name(self, value):
if value == self._defaults['ai.internal.nodeName'] and 'ai.internal.nodeName' in self._values:
del self._values['ai.internal.nodeName']
else:
self._values['ai.internal.nodeName'] = value
|
The node_name property.
Args:
value (string). the property value.
|
juraj-google-style
|
def dump(destination, ms, single=False, pretty_print=False, **kwargs):
text = dumps(ms,
single=single,
pretty_print=pretty_print,
**kwargs)
if hasattr(destination, 'write'):
print(text, file=destination)
else:
with open(destination, 'w') as fh:
print(text, file=fh)
|
Serialize Xmrs objects to the Prolog representation and write to a file.
Args:
destination: filename or file object where data will be written
ms: an iterator of Xmrs objects to serialize (unless the
*single* option is `True`)
single: if `True`, treat *ms* as a single Xmrs object
instead of as an iterator
pretty_print: if `True`, add newlines and indentation
|
juraj-google-style
|
def CheckSectionSpacing(filename, clean_lines, class_info, linenum, error):
if (class_info.last_line - class_info.starting_linenum <= 24 or
linenum <= class_info.starting_linenum):
return
matched = Match(r'\s*(public|protected|private):', clean_lines.lines[linenum])
if matched:
prev_line = clean_lines.lines[linenum - 1]
if (not IsBlankLine(prev_line) and
not Search(r'\b(class|struct)\b', prev_line) and
not Search(r'\\$', prev_line)):
end_class_head = class_info.starting_linenum
for i in range(class_info.starting_linenum, linenum):
if Search(r'\{\s*$', clean_lines.lines[i]):
end_class_head = i
break
if end_class_head < linenum - 1:
error(filename, linenum, 'whitespace/blank_line', 3,
'"%s:" should be preceded by a blank line' % matched.group(1))
|
Checks for additional blank line issues related to sections.
Currently the only thing checked here is blank line before protected/private.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
class_info: A _ClassInfo objects.
linenum: The number of the line to check.
error: The function to call with any errors found.
|
juraj-google-style
|
def _wait_for_response(self, requests):
failed_requests = []
responses_for_requests = OrderedDict.fromkeys(requests)
for retry in range(self._max_retry):
try:
logging.debug('Try
self._availability_limiter.map_with_retries(requests, responses_for_requests)
failed_requests = []
for request, response in responses_for_requests.items():
if self._drop_404s and response is not None and response.status_code == 404:
logging.warning('Request to {0} failed with status code 404, dropping.'.format(request.url))
elif not response:
failed_requests.append((request, response))
if not failed_requests:
break
logging.warning('Try
retry + 1, len(requests), len(requests) - len(failed_requests),
))
requests = [fr[0] for fr in failed_requests]
except InvalidRequestError:
raise
except Exception as e:
logging.exception('Try
pass
if failed_requests:
logging.warning('Still {0} failed request(s) after {1} retries:'.format(
len(failed_requests), self._max_retry,
))
for failed_request, failed_response in failed_requests:
if failed_response is not None:
failed_response_text = failed_response.text.encode('ascii', 'xmlcharrefreplace')
logging.warning('Request to {0} failed with status code {1}. Response text: {2}'.format(
failed_request.url, failed_response.status_code, failed_response_text,
))
else:
logging.warning('Request to {0} failed with None response.'.format(failed_request.url))
return list(responses_for_requests.values())
|
Issues a batch of requests and waits for the responses.
If some of the requests fail it will retry the failed ones up to `_max_retry` times.
Args:
requests - A list of requests
Returns:
A list of `requests.models.Response` objects
Raises:
InvalidRequestError - if any of the requests returns "403 Forbidden" response
|
juraj-google-style
|
def parse(self, argument):
if (not self.enum_values):
return argument
elif self.case_sensitive:
if (argument not in self.enum_values):
raise ValueError(('value should be one of <%s>' % '|'.join(self.enum_values)))
else:
return argument
elif (argument.upper() not in [value.upper() for value in self.enum_values]):
raise ValueError(('value should be one of <%s>' % '|'.join(self.enum_values)))
else:
return [value for value in self.enum_values if (value.upper() == argument.upper())][0]
|
Determine validity of argument and return the correct element of enum.
If self.enum_values is empty, then all arguments are valid and argument
will be returned.
Otherwise, if argument matches an element in enum, then the first
matching element will be returned.
Args:
argument: The supplied flag value.
Returns:
The matching element from enum_values, or argument if enum_values is
empty.
Raises:
ValueError: enum_values was non-empty, but argument didn't match
anything in enum.
|
codesearchnet
|
def get_covalent_bonds(self, tol=0.2):
bonds = []
for (site1, site2) in itertools.combinations(self._sites, 2):
if CovalentBond.is_bonded(site1, site2, tol):
bonds.append(CovalentBond(site1, site2))
return bonds
|
Determines the covalent bonds in a molecule.
Args:
tol (float): The tol to determine bonds in a structure. See
CovalentBond.is_bonded.
Returns:
List of bonds
|
codesearchnet
|
def set_signal_type(self, sig_type):
if isinstance(sig_type, str):
sig_type = [sig_type]
self.snr_input.signal_type = sig_type
return
|
Set the signal type of interest.
Sets the signal type for which the SNR is calculated.
This means inspiral, merger, and/or ringdown.
Args:
sig_type (str or list of str): Signal type desired by user.
Choices are `ins`, `mrg`, `rd`, `all` for circular waveforms created with PhenomD.
If eccentric waveforms are used, must be `all`.
|
juraj-google-style
|
def parse_client_table(redis_client):
NIL_CLIENT_ID = ray.ObjectID.nil().binary()
message = redis_client.execute_command('RAY.TABLE_LOOKUP', ray.gcs_utils.TablePrefix.CLIENT, '', NIL_CLIENT_ID)
if (message is None):
return []
node_info = {}
gcs_entry = ray.gcs_utils.GcsTableEntry.GetRootAsGcsTableEntry(message, 0)
ordered_client_ids = []
for i in range(gcs_entry.EntriesLength()):
client = ray.gcs_utils.ClientTableData.GetRootAsClientTableData(gcs_entry.Entries(i), 0)
resources = {decode(client.ResourcesTotalLabel(i)): client.ResourcesTotalCapacity(i) for i in range(client.ResourcesTotalLabelLength())}
client_id = ray.utils.binary_to_hex(client.ClientId())
if (not client.IsInsertion()):
assert (client_id in node_info), 'Client removed not found!'
assert node_info[client_id]['IsInsertion'], 'Unexpected duplicate removal of client.'
else:
ordered_client_ids.append(client_id)
node_info[client_id] = {'ClientID': client_id, 'IsInsertion': client.IsInsertion(), 'NodeManagerAddress': decode(client.NodeManagerAddress(), allow_none=True), 'NodeManagerPort': client.NodeManagerPort(), 'ObjectManagerPort': client.ObjectManagerPort(), 'ObjectStoreSocketName': decode(client.ObjectStoreSocketName(), allow_none=True), 'RayletSocketName': decode(client.RayletSocketName(), allow_none=True), 'Resources': resources}
return [node_info[client_id] for client_id in ordered_client_ids]
|
Read the client table.
Args:
redis_client: A client to the primary Redis shard.
Returns:
A list of information about the nodes in the cluster.
|
codesearchnet
|
def serialize_date(value):
if not value:
return None
elif isinstance(value, datetime.datetime):
return value.date().isoformat()
elif isinstance(value, datetime.date):
return value.isoformat()
else:
return parse_date(value).isoformat()
|
Attempts to convert `value` into an ``xs:date`` string. If `value` is
``None``, ``None`` will be returned.
Args:
value: A date value. This can be a string, datetime.date, or
datetime.datetime object.
Returns:
An ``xs:date`` formatted timestamp string.
|
juraj-google-style
|
def InitializeDebuggeeLabels(self, flags):
self._debuggee_labels = {}
for (label, var_names) in six.iteritems(_DEBUGGEE_LABELS):
for name in var_names:
value = os.environ.get(name)
if value:
if ((label == labels.Debuggee.MODULE) and (value == 'default')):
break
self._debuggee_labels[label] = value
break
if flags:
self._debuggee_labels.update({name: value for (name, value) in six.iteritems(flags) if (name in _DEBUGGEE_LABELS)})
self._debuggee_labels['projectid'] = self._project_id
|
Initialize debuggee labels from environment variables and flags.
The caller passes all the flags that the the debuglet got. This function
will only use the flags used to label the debuggee. Flags take precedence
over environment variables.
Debuggee description is formatted from available flags.
Args:
flags: dictionary of debuglet command line flags.
|
codesearchnet
|
def prepend_block(self, node, reverse=False):
if not isinstance(node, grammar.STATEMENTS):
raise ValueError
if reverse:
self.to_prepend_block[-1].appendleft(node)
else:
self.to_prepend_block[-1].append(node)
|
Prepend a statement to the current block.
Args:
node: The statement to prepend.
reverse: When called multiple times, this flag determines whether the
statement should be prepended or appended to the already inserted
statements.
Raises:
ValueError: If the given node is not a statement.
|
juraj-google-style
|
def distance(self, method='haversine'):
distances = []
for segment in self:
if len(segment) < 2:
distances.append([])
else:
distances.append(segment.distance(method))
return distances
|
Calculate distances between locations in segments.
Args:
method (str): Method used to calculate distance
Returns:
list of list of float: Groups of distance between points in
segments
|
juraj-google-style
|
def serialize(self):
df = self.copy()
df['scored_calls'] = df['scored_calls'].apply((lambda x: json.dumps(x)))
df['channel_values'] = df['channel_values'].apply((lambda x: json.dumps(x)))
df['regions'] = df['regions'].apply((lambda x: json.dumps(x)))
df['phenotype_calls'] = df['phenotype_calls'].apply((lambda x: json.dumps(x)))
df['neighbors'] = df['neighbors'].apply((lambda x: json.dumps(x)))
df['frame_shape'] = df['frame_shape'].apply((lambda x: json.dumps(x)))
return df
|
Convert the data to one that can be saved in h5 structures
Returns:
pandas.DataFrame: like a cell data frame but serialized. columns
|
codesearchnet
|
def _add_case(self, case_obj):
if self.case(case_obj['_id']):
raise IntegrityError(('Case %s already exists in database' % case_obj['_id']))
return self.case_collection.insert_one(case_obj)
|
Add a case to the database
If the case already exists exception is raised
Args:
case_obj(Case)
|
codesearchnet
|
def preprocess_bel_stmt(stmt: str) -> str:
stmt = stmt.strip()
stmt = re.sub(',+', ',', stmt)
stmt = re.sub(',', ', ', stmt)
stmt = re.sub(' +', ' ', stmt)
return stmt
|
Clean up basic formatting of BEL statement
Args:
stmt: BEL statement as single string
Returns:
cleaned BEL statement
|
codesearchnet
|
def _compute_direction_numbers(dim):
m = np.empty((dim, 32), dtype=np.int32)
m[0, :] = np.ones(32, dtype=np.int32)
for k in range(dim - 1):
a_k = _PRIMITIVE_POLYNOMIAL_COEFFICIENTS[k]
deg = np.int32(np.floor(np.log2(a_k)))
m[k + 1, :deg] = _INITIAL_DIRECTION_NUMBERS[:deg, k]
for j in range(deg, 32):
m[k + 1, j] = m[k + 1, j - deg]
for i in range(deg):
if a_k >> i & 1:
m[k + 1, j] = np.bitwise_xor(m[k + 1, j], m[k + 1, j - deg + i] << deg - i)
return m
|
Returns array of direction numbers for dimension dim.
These are the m_kj values in the Joe & Kuo notes[1], not the v_kj values. So
these refer to the 'abuse of notation' mentioned in the notes -- it is a
matrix of integers, not floats. The variable names below are intended to match
the notation in the notes as closely as possible.
Args:
dim: int, dimension.
Returns:
`numpy.array` of direction numbers with `shape` [dim, 32].
|
github-repos
|
def constraint(self):
return self._constraint
|
Returns the constraint function associated with this variable.
Returns:
The constraint function that was passed to the variable constructor.
Can be `None` if no constraint was passed.
|
github-repos
|
def delete_individual(self, ind_obj):
logger.info("Deleting individual {0} from database"
.format(ind_obj.ind_id))
self.session.delete(ind_obj)
self.save()
return ind_obj
|
Delete a case from the database
Args:
ind_obj (puzzle.models.Individual): initialized individual model
|
juraj-google-style
|
def CheckDirectory(self, path, extension='yaml'):
result = True
if extension:
glob_spec = os.path.join(path, '*.{0:s}'.format(extension))
else:
glob_spec = os.path.join(path, '*')
for definition_file in sorted(glob.glob(glob_spec)):
if (not self.CheckFile(definition_file)):
result = False
return result
|
Validates definition files in a directory.
Args:
path (str): path of the definition file.
extension (Optional[str]): extension of the filenames to read.
Returns:
bool: True if the directory contains valid definitions.
|
codesearchnet
|
def pdf(self, resource_id):
self.resource_id(str(resource_id))
self._request_uri = '{}/pdf'.format(self._request_uri)
|
Update the request URI to get the pdf for this resource.
Args:
resource_id (integer): The group id.
|
juraj-google-style
|
def check_config_file(msg):
with jsonconfig.Config("messages", indent=4) as cfg:
verify_profile_name(msg, cfg)
retrieve_data_from_config(msg, cfg)
if msg._auth is None:
retrieve_pwd_from_config(msg, cfg)
if msg.save:
update_config_data(msg, cfg)
update_config_pwd(msg, cfg)
|
Checks the config.json file for default settings and auth values.
Args:
:msg: (Message class) an instance of a message class.
|
juraj-google-style
|
def input_node_from_schema(schema: Schema, same_sampling_as: Optional[EventSetNode]=None, name: Optional[str]=None) -> EventSetNode:
return input_node(features=schema.features, indexes=schema.indexes, is_unix_timestamp=schema.is_unix_timestamp, same_sampling_as=same_sampling_as, name=name)
|
Creates an input [`EventSetNode`][temporian.EventSetNode] from a schema.
Usage example:
```python
>>> # Create two nodes with the same schema.
>>> a = tp.input_node(features=[("f1", tp.float64), ("f2", tp.str_)])
>>> b = tp.input_node_from_schema(a.schema)
```
Args:
schema: Schema of the node.
same_sampling_as: If set, the created EventSetNode is guaranteed to have the
same sampling as same_sampling_as`. In this case, `indexes` and
`is_unix_timestamp` should not be provided. Some operators require
for input EventSetNodes to have the same sampling.
name: Name for the EventSetNode.
Returns:
EventSetNode with the given specifications.
|
github-repos
|
def remove(self, **kwargs):
return self.client.api.remove_container(self.id, **kwargs)
|
Remove this container. Similar to the ``docker rm`` command.
Args:
v (bool): Remove the volumes associated with the container
link (bool): Remove the specified link and not the underlying
container
force (bool): Force the removal of a running container (uses
``SIGKILL``)
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
|
juraj-google-style
|
async def evaluate_trained_model(state):
return await evaluate_model(
state.train_model_path, state.best_model_path,
os.path.join(fsdb.eval_dir(), state.train_model_name), state.seed)
|
Evaluate the most recently trained model against the current best model.
Args:
state: the RL loop State instance.
|
juraj-google-style
|
def __init__(self, name, state_callback, restore_callback):
def _state_callback_wrapper():
with ops.init_scope():
return state_callback()
self._state_callback = _state_callback_wrapper
self._restore_callback = restore_callback
with ops.device('/cpu:0'):
self._save_string = constant_op.constant('', dtype=dtypes.string)
spec = saveable_object.SaveSpec(self._save_string, '', name, dtype=dtypes.string)
super(_PythonStringStateSaveable, self).__init__(self._save_string, [spec], name)
|
Configure saving.
Args:
name: The checkpoint key to write to.
state_callback: A function taking no arguments which returns a string.
This function is run every time a checkpoint is written.
restore_callback: A function taking a Python string, used to restore
state.
|
github-repos
|
def _GetGenericBasesLookupMap(self, node):
mapping = collections.defaultdict(list)
seen_bases = set()
bases = list(reversed(node.bases))
while bases:
base = bases.pop()
if base in seen_bases:
continue
seen_bases.add(base)
if isinstance(base, pytd.GenericType) and isinstance(base.base_type, pytd.ClassType):
mapping[base.base_type].append(base)
bases.extend(reversed(base.base_type.cls.bases))
elif isinstance(base, pytd.ClassType):
bases.extend(reversed(base.cls.bases))
return mapping
|
Get a lookup map for the generic bases of a class.
Gets a map from a pytd.ClassType to the list of pytd.GenericType bases of
the node that have that class as their base. This method does depth-first
traversal of the bases, which ensures that the order of elements in each
list is consistent with the node's MRO.
Args:
node: A pytd.Class node.
Returns:
A pytd.ClassType -> List[pytd.GenericType] map.
|
github-repos
|
def package_info(pkg_name):
indent = ' '
for (config, _) in _iter_packages():
if (pkg_name == config['name']):
print('Package:', pkg_name)
print(indent, 'Platform:', config['platform'])
print(indent, 'Version:', config['version'])
print(indent, 'Path:', config['path'])
print(indent, 'Worlds:')
for world in config['maps']:
world_info(world['name'], world_config=world, initial_indent=' ')
|
Prints the information of a package.
Args:
pkg_name (str): The name of the desired package to get information
|
codesearchnet
|
def if_else(condition, when_true, otherwise):
if ((not isinstance(when_true, collections.Iterable)) or isinstance(when_true, str)):
when_true = np.repeat(when_true, len(condition))
if ((not isinstance(otherwise, collections.Iterable)) or isinstance(otherwise, str)):
otherwise = np.repeat(otherwise, len(condition))
assert ((len(condition) == len(when_true)) and (len(condition) == len(otherwise)))
if isinstance(when_true, pd.Series):
when_true = when_true.values
if isinstance(otherwise, pd.Series):
otherwise = otherwise.values
output = np.array([(when_true[i] if c else otherwise[i]) for (i, c) in enumerate(condition)])
return output
|
Wraps creation of a series based on if-else conditional logic into a function
call.
Provide a boolean vector condition, value(s) when true, and value(s)
when false, and a vector will be returned the same length as the conditional
vector according to the logical statement.
Args:
condition: A boolean vector representing the condition. This is often
a logical statement with a symbolic series.
when_true: A vector the same length as the condition vector or a single
value to apply when the condition is `True`.
otherwise: A vector the same length as the condition vector or a single
value to apply when the condition is `False`.
Example:
df = pd.DataFrame
|
codesearchnet
|
def get_source_inputs(tensor):
if not hasattr(tensor, '_keras_history'):
return tensor
operation, node_index, _ = tensor._keras_history
if not operation or not operation._inbound_nodes:
return [tensor]
else:
node = operation._inbound_nodes[node_index]
if node.is_input:
return tree.flatten(node.output_tensors)
else:
source_tensors = []
for tensor in node.input_tensors:
previous_sources = get_source_inputs(tensor)
for x in previous_sources:
if all((x is not t for t in source_tensors)):
source_tensors.append(x)
return source_tensors
|
Returns the list of input tensors necessary to compute `tensor`.
Output will always be a list of tensors
(potentially with 1 element).
Args:
tensor: The tensor to start from.
Returns:
List of input tensors.
|
github-repos
|
def __init__(self, session, flush_limit=100):
super(TcExLogHandler, self).__init__()
self.session = session
self.flush_limit = flush_limit
self.entries = []
|
Initialize Class properties.
Args:
session (Request.Session): The preconfigured instance of Session for ThreatConnect API.
flush_limit (int): The limit to flush batch logs to the API.
|
juraj-google-style
|
def _GetDelayImportTimestamps(self, pefile_object):
delay_import_timestamps = []
if not hasattr(pefile_object, 'DIRECTORY_ENTRY_DELAY_IMPORT'):
return delay_import_timestamps
for importdata in pefile_object.DIRECTORY_ENTRY_DELAY_IMPORT:
dll_name = importdata.dll
try:
dll_name = dll_name.decode('ascii')
except UnicodeDecodeError:
dll_name = dll_name.decode('ascii', errors='replace')
timestamp = getattr(importdata.struct, 'dwTimeStamp', 0)
delay_import_timestamps.append([dll_name, timestamp])
return delay_import_timestamps
|
Retrieves timestamps from delay import entries, if available.
Args:
pefile_object (pefile.PE): pefile object.
Returns:
tuple[str, int]: name of the DLL being imported and the second is
the timestamp of the entry.
|
juraj-google-style
|
def copy_clean(node, preserve_annos=None):
return CleanCopier(preserve_annos).copy(node)
|
Creates a deep copy of an AST.
The copy will not include fields that are prefixed by '__', with the
exception of user-specified annotations.
Args:
node: ast.AST
preserve_annos: Optional[Set[Hashable]], annotation keys to include in the
copy
Returns:
ast.AST
|
github-repos
|
async def executor(func, *args, **kwargs):
def syncfunc():
return func(*args, **kwargs)
loop = asyncio.get_running_loop()
return (await loop.run_in_executor(None, syncfunc))
|
Execute a function in an executor thread.
Args:
todo ((func,args,kwargs)): A todo tuple.
|
codesearchnet
|
def IsPipe(self):
if (self._stat_object is None):
self._stat_object = self._GetStat()
if (self._stat_object is not None):
self.entry_type = self._stat_object.type
return (self.entry_type == definitions.FILE_ENTRY_TYPE_PIPE)
|
Determines if the file entry is a pipe.
Returns:
bool: True if the file entry is a pipe.
|
codesearchnet
|
def WriteEventBody(self, event):
for field_name in self._fields:
if (field_name == 'datetime'):
output_value = self._FormatDateTime(event)
else:
output_value = self._dynamic_fields_helper.GetFormattedField(event, field_name)
output_value = self._RemoveIllegalXMLCharacters(output_value)
column_index = self._fields.index(field_name)
self._column_widths.setdefault(column_index, 0)
if (field_name == 'datetime'):
column_width = min(self._MAX_COLUMN_WIDTH, (len(self._timestamp_format) + 2))
else:
column_width = min(self._MAX_COLUMN_WIDTH, (len(output_value) + 2))
self._column_widths[column_index] = max(self._MIN_COLUMN_WIDTH, self._column_widths[column_index], column_width)
self._sheet.set_column(column_index, column_index, self._column_widths[column_index])
if ((field_name == 'datetime') and isinstance(output_value, datetime.datetime)):
self._sheet.write_datetime(self._current_row, column_index, output_value)
else:
self._sheet.write(self._current_row, column_index, output_value)
self._current_row += 1
|
Writes the body of an event object to the spreadsheet.
Args:
event (EventObject): event.
|
codesearchnet
|
def find_parents(root, path, names):
if (not root):
return []
if (not os.path.commonprefix((root, path))):
log.warning('Path %s not in %s', path, root)
return []
dirs = ([root] + os.path.relpath(os.path.dirname(path), root).split(os.path.sep))
while dirs:
search_dir = os.path.join(*dirs)
existing = list(filter(os.path.exists, [os.path.join(search_dir, n) for n in names]))
if existing:
return existing
dirs.pop()
return []
|
Find files matching the given names relative to the given path.
Args:
path (str): The file path to start searching up from.
names (List[str]): The file/directory names to look for.
root (str): The directory at which to stop recursing upwards.
Note:
The path MUST be within the root.
|
codesearchnet
|
def normalize_name(name, overrides=None):
normalized_name = name.title()
if overrides:
override_map = dict([(name.title(), name) for name in overrides])
return override_map.get(normalized_name, normalized_name)
else:
return normalized_name
|
Normalize the key name to title case.
For example, ``normalize_name('content-id')`` will become ``Content-Id``
Args:
name (str): The name to normalize.
overrides (set, sequence): A set or sequence containing keys that
should be cased to themselves. For example, passing
``set('WARC-Type')`` will normalize any key named "warc-type" to
``WARC-Type`` instead of the default ``Warc-Type``.
Returns:
str
|
codesearchnet
|
def _ProcessArchiveTypes(self, mediator, path_spec, type_indicators):
number_of_type_indicators = len(type_indicators)
if number_of_type_indicators == 0:
return
self.processing_status = definitions.STATUS_INDICATOR_COLLECTING
if number_of_type_indicators > 1:
display_name = mediator.GetDisplayName()
logger.debug((
'Found multiple format type indicators: {0:s} for '
'archive file: {1:s}').format(type_indicators, display_name))
for type_indicator in type_indicators:
if type_indicator == dfvfs_definitions.TYPE_INDICATOR_TAR:
archive_path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_TAR, location='/',
parent=path_spec)
elif type_indicator == dfvfs_definitions.TYPE_INDICATOR_ZIP:
archive_path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_ZIP, location='/',
parent=path_spec)
else:
archive_path_spec = None
warning_message = (
'unsupported archive format type indicator: {0:s}').format(
type_indicator)
mediator.ProduceExtractionWarning(
warning_message, path_spec=path_spec)
if archive_path_spec:
try:
path_spec_generator = self._path_spec_extractor.ExtractPathSpecs(
[archive_path_spec], resolver_context=mediator.resolver_context)
for generated_path_spec in path_spec_generator:
if self._abort:
break
event_source = event_sources.FileEntryEventSource(
path_spec=generated_path_spec)
event_source.file_entry_type = (
dfvfs_definitions.FILE_ENTRY_TYPE_FILE)
mediator.ProduceEventSource(event_source)
self.last_activity_timestamp = time.time()
except (IOError, errors.MaximumRecursionDepth) as exception:
warning_message = (
'unable to process archive file with error: {0!s}').format(
exception)
mediator.ProduceExtractionWarning(
warning_message, path_spec=generated_path_spec)
|
Processes a data stream containing archive types such as: TAR or ZIP.
Args:
mediator (ParserMediator): mediates the interactions between
parsers and other components, such as storage and abort signals.
path_spec (dfvfs.PathSpec): path specification.
type_indicators(list[str]): dfVFS archive type indicators found in
the data stream.
|
juraj-google-style
|
def _determine_checkout_url(self, platform, action):
api_version = settings.API_CHECKOUT_VERSION
if platform == "test":
base_uri = settings.ENDPOINT_CHECKOUT_TEST
elif self.live_endpoint_prefix is not None and platform == "live":
base_uri = settings.ENDPOINT_CHECKOUT_LIVE_SUFFIX.format(
self.live_endpoint_prefix)
elif self.live_endpoint_prefix is None and platform == "live":
errorstring =
raise AdyenEndpointInvalidFormat(errorstring)
if action == "paymentsDetails":
action = "payments/details"
if action == "paymentsResult":
action = "payments/result"
if action == "originKeys":
api_version = settings.API_CHECKOUT_UTILITY_VERSION
return '/'.join([base_uri, api_version, action])
|
This returns the Adyen API endpoint based on the provided platform,
service and action.
Args:
platform (str): Adyen platform, ie 'live' or 'test'.
action (str): the API action to perform.
|
juraj-google-style
|
def add_2d_positional_embeddings(self, grid, interpolate_pos_encoding: bool=False):
batch_size, height, width, hidden_dim = grid.shape
row_height = min(self.max_grid_row_position_embeddings, height)
row_position_ids = torch.arange(row_height, dtype=torch.long, device=grid.device)
row_position_embeddings = self.row_position_embeddings(row_position_ids)
row_shape = (1,) * (len(grid.shape) - 3) + (row_height, 1, hidden_dim)
row_position_embeddings = row_position_embeddings.view(*row_shape)
row_width = min(self.max_grid_col_position_embeddings, width)
col_position_ids = torch.arange(row_width, dtype=torch.long, device=grid.device)
col_position_embeddings = self.col_position_embeddings(col_position_ids)
col_shape = (batch_size, 1, row_width, hidden_dim)
col_position_embeddings = col_position_embeddings.view(*col_shape)
positional_embeddings = row_position_embeddings + col_position_embeddings
if interpolate_pos_encoding and (height > self.max_grid_row_position_embeddings or width > self.max_grid_col_position_embeddings):
grid = grid + self.interpolate_pos_encoding(positional_embeddings, height, width)
else:
grid = grid + positional_embeddings
return grid
|
Args:
grid: (batch_size, height, width, hidden_dim)
interpolate_pos_encoding: (`bool`, *optional*, defaults to `False`):
Whether to interpolate the pre-trained position encodings.
Returns:
grid + col_position_embeddings.view(*col_shape): (batch_size, *, height, width, hidden_dim)
|
github-repos
|
def get_targets(self):
if (not hasattr(self, '_targets')):
targets = []
for target_def in (self.config.targets or []):
target = Target(target_def)
targets.append(target)
self._targets = targets
return self._targets
|
Returns the named targets that are specified in the config.
Returns:
list: a list of :class:`stacker.target.Target` objects
|
codesearchnet
|
def parse_mmtf_header(infile):
infodict = {}
mmtf_decoder = mmtf.parse(infile)
infodict['date'] = mmtf_decoder.deposition_date
infodict['release_date'] = mmtf_decoder.release_date
try:
infodict['experimental_method'] = [x.decode() for x in mmtf_decoder.experimental_methods]
except AttributeError:
infodict['experimental_method'] = [x for x in mmtf_decoder.experimental_methods]
infodict['resolution'] = mmtf_decoder.resolution
infodict['description'] = mmtf_decoder.title
group_name_exclude = ['HOH']
chem_comp_type_exclude = ['l-peptide linking', 'peptide linking']
chemicals = list(set([mmtf_decoder.group_list[idx]['groupName'] for idx in mmtf_decoder.group_type_list if ((mmtf_decoder.group_list[idx]['chemCompType'].lower() not in chem_comp_type_exclude) and (mmtf_decoder.group_list[idx]['groupName'] not in group_name_exclude))]))
infodict['chemicals'] = chemicals
return infodict
|
Parse an MMTF file and return basic header-like information.
Args:
infile (str): Path to MMTF file
Returns:
dict: Dictionary of parsed header
Todo:
- Can this be sped up by not parsing the 3D coordinate info somehow?
- OR just store the sequences when this happens since it is already being parsed.
|
codesearchnet
|
def translate_html_string(self, html: str) -> str:
text_content = get_text(html)
chunks = self.parse(text_content)
return resolve(chunks, html)
|
Translates the given HTML string with markups for semantic line breaks.
Args:
html (str): An input html string.
Returns:
The translated HTML string (str).
|
github-repos
|
def run(argv=None, save_main_session=True, test_pipeline=None) -> PipelineResult:
known_args, pipeline_args = parse_known_args(argv)
pipeline_options = PipelineOptions(pipeline_args)
pipeline_options.view_as(SetupOptions).save_main_session = save_main_session
model_handler = VertexAIModelHandlerJSON(endpoint_id=known_args.endpoint, project=known_args.project, location=known_args.location, experiment=known_args.experiment, network=known_args.vpc_network, private=known_args.private)
pipeline = test_pipeline
if not test_pipeline:
pipeline = beam.Pipeline(options=pipeline_options)
parameters = {'temperature': 0.2, 'maxOutputTokens': 256, 'topK': 40, 'topP': 0.95}
prompts = ['What is 5+2?', 'Who is the president?', 'Write me a business plan for a cookie shop.']
read_prompts = pipeline | 'Get prompt' >> beam.Create(prompts)
preprocess = read_prompts | 'Format prompt' >> beam.Map(lambda data: (data, {'prompt': data}))
predictions = preprocess | 'RunInference' >> RunInference(KeyedModelHandler(model_handler), inference_args=parameters)
_ = predictions | 'PrintOutput' >> beam.Map(print)
_ = predictions | 'WriteOutput' >> beam.io.WriteToText(known_args.output, shard_name_template='', append_trailing_newlines=True)
result = pipeline.run()
result.wait_until_finish()
return result
|
Args:
argv: Command line arguments defined for this example.
save_main_session: Used for internal testing.
test_pipeline: Used for internal testing.
|
github-repos
|
def partition_graphs(self):
if not self._debug_graphs:
raise LookupError('No partition graphs have been loaded.')
return [self._debug_graphs[key].debug_graph_def for key in self._debug_graphs]
|
Get the partition graphs.
Returns:
Partition graphs as a list of GraphDef.
Raises:
LookupError: If no partition graphs have been loaded.
|
github-repos
|
def vectorize( self, docs ):
if type(docs) == dict:
docs = docs.items()
if self.model == None:
self.train(docs)
asset_id2vector = {}
unfound = []
for item in docs:
asset_id, _ = item
label = 'DOC_' + str(asset_id)
if label in self.model:
asset_id2vector.update({asset_id: self.model['DOC_' + str(asset_id)]})
else:
unfound.append(item)
if len(unfound) > 0:
sentences = [self._gen_sentence(item) for item in unfound]
self.update_model(sentences, train=self.stream_train)
asset_id2vector.update({item[0]: self.model['DOC_' + str(item[0])] for item in unfound})
return asset_id2vector
|
Returns the feature vectors for a set of docs. If model is not already be trained,
then self.train() is called.
Args:
docs (dict or list of tuples): asset_id, body_text of documents
you wish to featurize.
|
juraj-google-style
|
def get_residue_annotations(self, start_resnum, end_resnum=None):
if (not end_resnum):
end_resnum = start_resnum
f = SeqFeature(FeatureLocation((start_resnum - 1), end_resnum))
return f.extract(self).letter_annotations
|
Retrieve letter annotations for a residue or a range of residues
Args:
start_resnum (int): Residue number
end_resnum (int): Optional residue number, specify if a range is desired
Returns:
dict: Letter annotations for this residue or residues
|
codesearchnet
|
def _assertOpOutputMatchesExpected(self, params, solution, high_level=True, rtol=0.001, atol=1e-05):
diagonal = params['diagonal']
with self.session() as session:
for dtype in self.numeric_types - {np.int8, np.uint8}:
expected = solution.astype(dtype)
with self.test_scope():
params['diagonal'] = array_ops.placeholder(dtype, diagonal.shape, name='diagonal')
if high_level:
output = array_ops.matrix_diag(**params)
else:
output = gen_array_ops.matrix_diag(**params)
result = session.run(output, {params['diagonal']: diagonal.astype(dtype)})
self.assertEqual(output.dtype, expected.dtype)
self.assertAllCloseAccordingToType(expected, result, rtol=rtol, atol=atol, bfloat16_rtol=0.03)
|
Verifies that matrix_diag produces `solution` when fed `params`.
Args:
params: dictionary containing input parameters to matrix_diag.
solution: numpy array representing the expected output of matrix_diag.
high_level: call high_level matrix_diag
rtol: relative tolerance for equality test.
atol: absolute tolerance for equality test.
|
github-repos
|
def dcc_connect(self, address, port, dcctype="chat"):
warnings.warn("Use self.dcc(type).connect()", DeprecationWarning)
return self.dcc(dcctype).connect(address, port)
|
Connect to a DCC peer.
Arguments:
address -- IP address of the peer.
port -- Port to connect to.
Returns a DCCConnection instance.
|
juraj-google-style
|
def verified_excel_file(store, institute_list, temp_excel_dir):
document_lines = []
written_files = 0
today = datetime.datetime.now().strftime('%Y-%m-%d')
LOG.info('Creating verified variant document..')
for cust in institute_list:
verif_vars = store.verified(institute_id=cust)
LOG.info('Found {} verified variants for customer {}'.format(len(verif_vars), cust))
if (not verif_vars):
continue
unique_callers = set()
for (var_type, var_callers) in CALLERS.items():
for caller in var_callers:
unique_callers.add(caller.get('id'))
cust_verified = export_verified_variants(verif_vars, unique_callers)
document_name = ('.'.join([cust, '_verified_variants', today]) + '.xlsx')
workbook = Workbook(os.path.join(temp_excel_dir, document_name))
Report_Sheet = workbook.add_worksheet()
row = 0
for (col, field) in enumerate((VERIFIED_VARIANTS_HEADER + list(unique_callers))):
Report_Sheet.write(row, col, field)
for (row, line) in enumerate(cust_verified, 1):
for (col, field) in enumerate(line):
Report_Sheet.write(row, col, field)
workbook.close()
if os.path.exists(os.path.join(temp_excel_dir, document_name)):
written_files += 1
return written_files
|
Collect all verified variants in a list on institutes and save them to file
Args:
store(adapter.MongoAdapter)
institute_list(list): a list of institute ids
temp_excel_dir(os.Path): folder where the temp excel files are written to
Returns:
written_files(int): the number of files written to temp_excel_dir
|
codesearchnet
|
def query_string_to_dict(query):
query_params = {}
for key_value in query.split('&'):
key_value_pair = key_value.split('=', 1)
key = (key_value_pair[0] if (len(key_value_pair) >= 1) else '')
value = (key_value_pair[1] if (len(key_value_pair) == 2) else '')
query_params[key] = value
return query_params
|
Convert a string to a query dict.
Args:
query (str): The query string.
Returns:
obj: The key value object with query params.
Note:
This method does the same as urllib.parse.parse_qsl except
that it doesn't actually decode the values.
|
codesearchnet
|
def changed(self, path, md5):
actual = self.update(path)
msg = "File '{}', md5 '{}', actual '{}'"
logger.debug(msg.format(path, md5, actual))
if not md5 or not actual:
return True
return actual.split(".")[0] != md5.split(".")[0]
|
Check if file/directory has the expected md5.
Args:
path (str): path to the file/directory to check.
md5 (str): expected md5.
Returns:
bool: True if path has the expected md5, False otherwise.
|
juraj-google-style
|
def events_from_logdir(logdir):
assert gfile.Exists(logdir)
files = gfile.ListDirectory(logdir)
assert len(files) == 1, 'Found not exactly one file in logdir: %s' % files
return events_from_file(os.path.join(logdir, files[0]))
|
Returns all events in the single eventfile in logdir.
Args:
logdir: The directory in which the single event file is sought.
Returns:
A list of all tf.Event protos from the single event file.
Raises:
AssertionError: If logdir does not contain exactly one file.
|
github-repos
|
def verify_branch(branch_name):
try:
shell.run(
'git rev-parse --verify {}'.format(branch_name),
never_pretend=True
)
return True
except IOError:
return False
|
Verify if the given branch exists.
Args:
branch_name (str):
The name of the branch to check.
Returns:
bool: **True** if a branch with name *branch_name* exits, **False**
otherwise.
|
juraj-google-style
|
def _cleanup_keys_with_confirmation(self, keys_to_delete):
print('Round name: ', self.round_name)
print('Number of entities to be deleted: ', len(keys_to_delete))
if (not keys_to_delete):
return
if self.verbose:
print('Entities to delete:')
idx = 0
prev_key_prefix = None
dots_printed_after_same_prefix = False
for k in keys_to_delete:
if (idx >= 20):
print(' ...')
print(' ...')
break
key_prefix = (k.flat_path[0:1] if (k.flat_path[0] in [u'SubmissionType', u'WorkType']) else k.flat_path[0])
if (prev_key_prefix == key_prefix):
if (not dots_printed_after_same_prefix):
print(' ...')
dots_printed_after_same_prefix = True
else:
print(' ', k)
dots_printed_after_same_prefix = False
idx += 1
prev_key_prefix = key_prefix
print()
inp = input_str('Are you sure? (type "yes" without quotes to confirm): ')
if (inp != 'yes'):
return
with self.datastore_client.no_transact_batch() as batch:
for k in keys_to_delete:
batch.delete(k)
print('Data deleted')
|
Asks confirmation and then deletes entries with keys.
Args:
keys_to_delete: list of datastore keys for which entries should be deleted
|
codesearchnet
|
def error_log(self, msg='', level=20, traceback=False):
sys.stderr.write((msg + '\n'))
sys.stderr.flush()
if traceback:
tblines = traceback_.format_exc()
sys.stderr.write(tblines)
sys.stderr.flush()
|
Write error message to log.
Args:
msg (str): error message
level (int): logging level
traceback (bool): add traceback to output or not
|
codesearchnet
|
def _GetPathSegmentIndexForValueWeights(self, value_weights):
largest_weight = value_weights.GetLargestWeight()
if largest_weight > 0:
value_weight_indexes = value_weights.GetIndexesForWeight(largest_weight)
else:
value_weight_indexes = []
if value_weight_indexes:
path_segment_index = value_weight_indexes[0]
else:
path_segment_index = value_weights.GetFirstAvailableIndex()
if path_segment_index is None:
raise RuntimeError('No path segment index found.')
return path_segment_index
|
Retrieves the index of the path segment based on value weights.
Args:
value_weights: the value weights object (instance of _PathSegmentWeights).
Returns:
An integer containing the path segment index.
Raises:
RuntimeError: is no path segment index can be found.
|
juraj-google-style
|
def load(self, source, as_defaults=False):
if isinstance(source, six.string_types):
source = os.path.expanduser(source)
with open(source, encoding='utf-8') as f:
self._rw.load_config_from_file(self._config, f, as_defaults=as_defaults)
elif isinstance(source, (list, tuple)):
for s in source:
with open(s, encoding='utf-8') as f:
self._rw.load_config_from_file(self._config, f, as_defaults=as_defaults)
else:
self._rw.load_config_from_file(self._config, source, as_defaults=as_defaults)
|
Load configuration values from the specified source.
Args:
source:
as_defaults (bool): if ``True``, contents of ``source`` will be treated as schema of configuration items.
|
codesearchnet
|
def connect(address, authkey):
TFManager.register('get_queue')
TFManager.register('get')
TFManager.register('set')
m = TFManager(address, authkey=authkey)
m.connect()
return m
|
Connect to a multiprocess.Manager.
Args:
:address: unique address to the TFManager, either a unique connection string for 'local', or a (host, port) tuple for remote.
:authkey: string authorization key
Returns:
A TFManager instance referencing the remote TFManager at the supplied address.
|
juraj-google-style
|
def operation_at(self,
qubit: ops.Qid,
moment_index: int) -> Optional[ops.Operation]:
if not 0 <= moment_index < len(self._moments):
return None
for op in self._moments[moment_index].operations:
if qubit in op.qubits:
return op
return None
|
Finds the operation on a qubit within a moment, if any.
Args:
qubit: The qubit to check for an operation on.
moment_index: The index of the moment to check for an operation
within. Allowed to be beyond the end of the circuit.
Returns:
None if there is no operation on the qubit at the given moment, or
else the operation.
|
juraj-google-style
|
def diff(self, **kwargs):
path = ('%s/%s/diff' % (self.manager.path, self.get_id()))
return self.manager.gitlab.http_get(path, **kwargs)
|
Generate the commit diff.
Args:
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabGetError: If the diff could not be retrieved
Returns:
list: The changes done in this commit
|
codesearchnet
|
def is_remote_added(remote):
out = __salt__['cmd.run_all'](FLATPAK_BINARY_NAME + ' remotes')
lines = out.splitlines()
for item in lines:
i = re.split(r'\t+', item.rstrip('\t'))
if i[0] == remote:
return True
return False
|
Determines if a remote exists.
Args:
remote (str): The remote's name.
Returns:
bool: True if the remote has already been added.
CLI Example:
.. code-block:: bash
salt '*' flatpak.is_remote_added flathub
|
juraj-google-style
|
def refill(self, from_address, to_address, nfees, ntokens, password, min_confirmations=6, sync=False):
(path, from_address) = from_address
verb = Spoolverb()
inputs = self.select_inputs(from_address, (nfees + 1), ntokens, min_confirmations=min_confirmations)
outputs = ([{'address': to_address, 'value': self.token}] * ntokens)
outputs += ([{'address': to_address, 'value': self.fee}] * nfees)
outputs += [{'script': self._t._op_return_hex(verb.fuel), 'value': 0}]
unsigned_tx = self._t.build_transaction(inputs, outputs)
signed_tx = self._t.sign_transaction(unsigned_tx, password, path=path)
txid = self._t.push(signed_tx)
return txid
|
Refill wallets with the necessary fuel to perform spool transactions
Args:
from_address (Tuple[str]): Federation wallet address. Fuels the wallets with tokens and fees. All transactions to wallets
holding a particular piece should come from the Federation wallet
to_address (str): Wallet address that needs to perform a spool transaction
nfees (int): Number of fees to transfer. Each fee is 10000 satoshi. Used to pay for the transactions
ntokens (int): Number of tokens to transfer. Each token is 600 satoshi. Used to register hashes in the blockchain
password (str): Password for the Federation wallet. Used to sign the transaction
min_confirmations (int): Number of confirmations when chosing the inputs of the transaction. Defaults to 6
sync (bool): Perform the transaction in synchronous mode, the call to the function will block until there is at
least on confirmation on the blockchain. Defaults to False
Returns:
str: transaction id
|
codesearchnet
|
def readline(self, size=None):
if size is not None:
data = self.rfile.readline(size)
self.bytes_read += len(data)
self._check_length()
return data
res = []
while True:
data = self.rfile.readline(256)
self.bytes_read += len(data)
self._check_length()
res.append(data)
if len(data) < 256 or data[-1:] == LF:
return EMPTY.join(res)
|
Read a single line from rfile buffer and return it.
Args:
size (int): minimum amount of data to read
Returns:
bytes: One line from rfile.
|
juraj-google-style
|
def remove_son(self, son):
self._sons = [x for x in self._sons if (x.node_id != son.node_id)]
|
Remove the son node. Do nothing if the node is not a son
Args:
fathers: list of fathers to add
|
codesearchnet
|
def __init__(self, caption, height, width):
self.caption = caption
self.height = height
self.width = width
self._window = None
|
Initialize a new image viewer.
Args:
caption (str): the caption/title for the window
height (int): the height of the window
width (int): the width of the window
Returns:
None
|
juraj-google-style
|
def get_nn_info(self, structure, n):
nns = self.get_voronoi_polyhedra(structure, n)
return self._extract_nn_info(structure, nns)
|
Get all near-neighbor sites as well as the associated image locations
and weights of the site with index n in structure
using Voronoi decomposition.
Args:
structure (Structure): input structure.
n (integer): index of site for which to determine near-neighbor
sites.
Returns:
siw (list of tuples (Site, array, float)): tuples, each one
of which represents a coordinated site, its image location,
and its weight.
|
codesearchnet
|
def stateless_random_shuffle(input_tensor, seed, name=None):
with tf.compat.v1.name_scope(name, default_name='stateless_random_shuffle', values=[input_tensor, seed]):
input_tensor = tf.convert_to_tensor(input_tensor, name='input_tensor')
seed = tf.convert_to_tensor(seed, name='random_seed')
uniforms = tf.random.stateless_uniform(shape=[tf.shape(input_tensor)[0]], seed=seed, dtype=tf.float64)
return tf.gather(input_tensor, tf.argsort(uniforms, stable=True, axis=0))
|
Produces stateless random shuffle of the 1st dimension of an input Tensor.
This is a stateless version of `tf.random_shuffle`. If run twice with the same
seed, produces the same result.
Example
```python
identity_shuffle = tf.range(100)
random_shuffle = stateless_random_shuffle(identity_shuffle, seed=(42, 2))
```
Args:
input_tensor: float32, float64, int32 or int64 1-D Tensor.
seed: int32 or int64 Tensor of shape [2].
name: Python `str` name prefixed to ops created by this function.
Returns:
A Tensor of the same shape and dtype as `input_tensor`.
|
github-repos
|
def from_authorized_user_info(cls, info, scopes=None):
keys_needed = set(('refresh_token', 'client_id', 'client_secret'))
missing = keys_needed.difference(six.iterkeys(info))
if missing:
raise ValueError(
'Authorized user info was not in the expected format, missing '
'fields {}.'.format(', '.join(missing)))
return Credentials(
None,
refresh_token=info['refresh_token'],
token_uri=_GOOGLE_OAUTH2_TOKEN_ENDPOINT,
scopes=scopes,
client_id=info['client_id'],
client_secret=info['client_secret'])
|
Creates a Credentials instance from parsed authorized user info.
Args:
info (Mapping[str, str]): The authorized user info in Google
format.
scopes (Sequence[str]): Optional list of scopes to include in the
credentials.
Returns:
google.oauth2.credentials.Credentials: The constructed
credentials.
Raises:
ValueError: If the info is not in the expected format.
|
juraj-google-style
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.