code stringlengths 20 4.93k | docstring stringlengths 33 1.27k | source stringclasses 3
values |
|---|---|---|
def get_policies_from_aws(client, scope='Local'):
done = False
marker = None
policies = []
while not done:
if marker:
response = client.list_policies(Marker=marker, Scope=scope)
else:
response = client.list_policies(Scope=... | Returns a list of all the policies currently applied to an AWS Account. Returns a list containing all the
policies for the specified scope
Args:
client (:obj:`boto3.session.Session`): A boto3 Session object
scope (`str`): The policy scope to use. Default: Local
Returns:
:obj:`list` of `dict` | juraj-google-style |
def as_str(bytes_or_text, encoding='utf-8'):
return as_text(bytes_or_text, encoding) | Acts as an alias for the `as_text` function..
Args:
bytes_or_text: The input value to be converted. A bytes or unicode object.
encoding: Optional string. The encoding to use if bytes_or_text is a bytes
object. Defaults to 'utf-8'.
Returns:
A unicode string.
Raises:
TypeError: If bytes_or_text is not a bytes or unico... | github-repos |
def make_export_strategy(
args,
keep_target,
assets_extra,
features,
schema,
stats):
target_name = feature_transforms.get_target_name(features)
csv_header = [col['name'] for col in schema]
if not keep_target:
csv_header.remove(target_name)
def export_fn(es... | Makes prediction graph that takes json input.
Args:
args: command line args
keep_target: If ture, target column is returned in prediction graph. Target
column must also exist in input data
assets_extra: other fiels to copy to the output folder
job_dir: root job folder
features: features dict
schema: schema list
stats:... | juraj-google-style |
def __generate_reference__(self, triple_map, **kwargs):
element = kwargs.get("element")
found_elements = element.xpath(
triple_map.reference,
namespaces=self.xml_ns)
for elem in found_elements:
raw_text = elem.text.strip()
if ... | Internal method takes a triple_map and returns the result of
applying to XPath to the current DOM context
Args:
-----
triple_map: SimpleNamespace
element: etree.Element | juraj-google-style |
def add_dict_to_hash(a_hash, a_dict):
if (a_dict is None):
return
for (k, v) in a_dict.items():
a_hash.update((((b'\x00' + k.encode('utf-8')) + b'\x00') + v.encode('utf-8'))) | Adds `a_dict` to `a_hash`
Args:
a_hash (`Hash`): the secure hash, e.g created by hashlib.md5
a_dict (dict[string, [string]]): the dictionary to add to the hash | codesearchnet |
def or_filter(self, **filters):
clone = copy.deepcopy(self)
clone.adapter.add_query([("OR_QRY", filters)])
return clone | Works like "filter" but joins given filters with OR operator.
Args:
**filters: Query filters as keyword arguments.
Returns:
Self. Queryset object.
Example:
>>> Person.objects.or_filter(age__gte=16, name__startswith='jo') | juraj-google-style |
def calc_radius(latitude, ellipsoid='WGS84'):
ellipsoids = {'Airy (1830)': (6377.563, 6356.257), 'Bessel': (6377.397, 6356.079), 'Clarke (1880)': (6378.249145, 6356.51486955), 'FAI sphere': (6371, 6371), 'GRS-67': (6378.16, 6356.775), 'International': (6378.388, 6356.912), 'Krasovsky': (6378.245, 6356.863), 'NAD27'... | Calculate earth radius for a given latitude.
This function is most useful when dealing with datasets that are very
localised and require the accuracy of an ellipsoid model without the
complexity of code necessary to actually use one. The results are meant to
be used as a :data:`BODY_RADIUS` replacement when the simpl... | codesearchnet |
def _get_operator_param_name_and_values(operator_class_name, task_details):
operator_task_details = task_details.copy()
if ('type' in operator_task_details.keys()):
del operator_task_details['type']
if ('up_stream' in operator_task_details.keys()):
del operator_task_details['up_stream']
... | Internal helper gets the name of the python parameter for the Airflow operator class. In
some cases, we do not expose the airflow parameter name in its native form, but choose to
expose a name that's more standard for Datalab, or one that's more friendly. For example,
Airflow's BigQueryOperator uses 'bql' for the query... | codesearchnet |
def __init__(self, structure, include_bv_charge=False):
self.structure = structure
self.include_bv_charge = include_bv_charge
sga = SpacegroupAnalyzer(self.structure)
self.symm_structure = sga.get_symmetrized_structure()
self.equiv_site_seq = list(self.symm_str... | Initializes a Vacancy Generator
Args:
structure(Structure): pymatgen structure object | juraj-google-style |
def from_config(cls, config: dict):
timestamp = config.get('timestamp', None)
return cls(config.get('id'),
config.get('type'),
config.get('data', dict()),
config.get('origin', None),
timestamp,
config... | Create an event object from an event dictionary object.
Args:
config (dict): Event Configuration dictionary. | juraj-google-style |
def _checkResponseNumberOfRegisters(payload, numberOfRegisters):
_checkString(payload, minlength=4, description='payload')
_checkInt(numberOfRegisters, minvalue=1, maxvalue=65535, description='numberOfRegisters')
BYTERANGE_FOR_NUMBER_OF_REGISTERS = slice(2, 4)
bytesForNumberOfRegisters = payload[BYTERAN... | Check that the number of written registers as given in the response is correct.
The bytes 2 and 3 (zero based counting) in the payload holds the value.
Args:
* payload (string): The payload
* numberOfRegisters (int): Number of registers that have been written
Raises:
TypeError, ValueError | codesearchnet |
class IncMeanTracker(WindowedTracker, MeanTracker):
def __init__(self, window_mode, **kwargs):
super().__init__(window_mode=window_mode, **kwargs)
self._mean = 0
def push(self, x):
if not math.isnan(x):
self._n += 1
delta = x - self._mean
else:
... | Base class for incremental mean trackers.
This class implements incremental calculation of the mean, which is more
efficient for streaming data as it updates the mean with each new data point
instead of recalculating from scratch.
Args:
window_mode: A `WindowMode` enum specifying whether the window is `LANDMARK`
or `... | github-repos |
def add_variable(var, restore=True):
collections = [MODEL_VARIABLES]
if restore:
collections.append(VARIABLES_TO_RESTORE)
for collection in collections:
if (var not in tf.get_collection(collection)):
tf.add_to_collection(collection, var) | Adds a variable to the MODEL_VARIABLES collection.
Optionally it will add the variable to the VARIABLES_TO_RESTORE collection.
Args:
var: a variable.
restore: whether the variable should be added to the
VARIABLES_TO_RESTORE collection. | codesearchnet |
def _PrintStorageInformationAsText(self, storage_reader):
table_view = views.ViewsFactory.GetTableView(
self._views_format_type, title='Plaso Storage Information')
table_view.AddRow(['Filename', os.path.basename(self._storage_file_path)])
table_view.AddRow(['Format version', storage_reader.form... | Prints information about the store as human-readable text.
Args:
storage_reader (StorageReader): storage reader. | juraj-google-style |
def append(self, item):
if self.should_flush():
self.flush()
self.items.append(item) | Add new item to the list.
If needed, append will first flush existing items and clear existing items.
Args:
item: an item to add to the list. | juraj-google-style |
def update_state(self, y_true, y_pred, sample_weight=None):
y_true = ops.convert_to_tensor(y_true, dtype=self.dtype)
y_pred = ops.convert_to_tensor(y_pred, dtype='float32')
y_pred = ops.cast(y_pred >= self.threshold, self.dtype)
return super().update_state(y_true, y_pred, sample_weight) | Accumulates the confusion matrix statistics.
Before the confusion matrix is updated, the predicted values are
thresholded to be:
0 for values that are smaller than the `threshold`
1 for values that are larger or equal to the `threshold`
Args:
y_true: The ground truth values.
y_pred: The predicted values.
sample_weigh... | github-repos |
def read(self, size=None):
if not self._is_open:
raise IOError('Not opened.')
return self._file_object.read(size) | Reads a byte string from the file-like object at the current offset.
The function will read a byte string of the specified size or
all of the remaining data if no size was specified.
Args:
size (Optional[int]): number of bytes to read, where None is all
remaining data.
Returns:
bytes: data read.
Raises:
IOError: if... | juraj-google-style |
def append_dictionary_to_file(localization_key_to_comment, file_path, section_name):
output_file = open_strings_file(file_path, "a")
write_section_header_to_file(output_file, section_name)
for entry_key, entry_comment in sorted(localization_key_to_comment.iteritems(), key=operator.itemgetter(1)):
... | Appends dictionary of localization keys and comments to a file
Args:
localization_key_to_comment (dict): A mapping between localization keys and comments.
file_path (str): The path of the file to append to.
section_name (str): The name of the section. | juraj-google-style |
def get_container_details(self, container_id_or_name: str) -> dict:
container = self._client.containers.get(container_id_or_name)
return container.attrs | Get details of a container.
Args:
container_id_or_name (string): docker container id or name
Returns:
dict, details of the container | codesearchnet |
def numpy(self):
return _var_to_tensor(self).numpy() | Copies the values in this ShardedVariable to a NumPy array.
First converts to a single Tensor using the registered conversion function,
which concatenates the shards, then uses Tensor.numpy() to convert to
a NumPy array.
Returns:
A NumPy array of the same shape and dtype. | github-repos |
def create(self, teamId, personId=None, personEmail=None, isModerator=False, **request_parameters):
check_type(teamId, basestring, may_be_none=False)
check_type(personId, basestring)
check_type(personEmail, basestring)
check_type(isModerator, bool)
post_data = dict_from_items_with_values(request_par... | Add someone to a team by Person ID or email address.
Add someone to a team by Person ID or email address; optionally making
them a moderator.
Args:
teamId(basestring): The team ID.
personId(basestring): The person ID.
personEmail(basestring): The email address of the person.
isModerator(bool): Set to True to make the... | codesearchnet |
def print_alignment(mapping, instance1, instance2):
result = []
for instance1_item, m in zip(instance1, mapping):
r = instance1_item[1] + "(" + instance1_item[2] + ")"
if m == -1:
r += "-Null"
else:
instance2_item = instance2[m]
r += "-" + instanc... | print the alignment based on a node mapping
Args:
mapping: current node mapping list
instance1: nodes of AMR 1
instance2: nodes of AMR 2 | juraj-google-style |
def result(self, timeout=None):
self._blocking_poll(timeout=timeout)
if (self._exception is not None):
raise self._exception
return self._result | Get the result of the operation, blocking if necessary.
Args:
timeout (int):
How long (in seconds) to wait for the operation to complete.
If None, wait indefinitely.
Returns:
google.protobuf.Message: The Operation's result.
Raises:
google.api_core.GoogleAPICallError: If the operation errors or if
the timeout is reac... | codesearchnet |
def _decode_response_string_and_validate_format(self, rpc_id, response):
if not response:
raise errors.ProtocolError(self._device, errors.ProtocolError.NO_RESPONSE_FROM_SERVER)
result = json.loads(response)
for field_name in RPC_RESPONSE_REQUIRED_FIELDS:
if field_name not in result:
... | Decodes response JSON string to python dict and validates its format.
Args:
rpc_id: int, the actual id of this RPC. It should be the same with the id
in the response, otherwise throws an error.
response: str, the JSON string of the RPC response.
Returns:
A dict decoded from the response JSON string.
Raises:
errors.P... | github-repos |
def get_imports(filename: Union[str, os.PathLike]) -> list[str]:
with open(filename, encoding='utf-8') as f:
content = f.read()
imported_modules = set()
import transformers.utils
def recursive_look_for_imports(node):
if isinstance(node, ast.Try):
return
elif isinstan... | Extracts all the libraries (not relative imports this time) that are imported in a file.
Args:
filename (`str` or `os.PathLike`): The module file to inspect.
Returns:
`list[str]`: The list of all packages required to use the input module. | github-repos |
def create_issue(title: str, description: str, labels: Optional[List[str]]=None) -> Tuple[int, str]:
url = 'https:
data = {'owner': _GITHUB_REPO_OWNER, 'repo': _GITHUB_REPO_NAME, 'title': title, 'body': description, 'labels': [_AWAITING_TRIAGE_LABEL, _PERF_ALERT_LABEL]}
if labels:
data['labels'].ext... | Create an issue with title, description with a label.
Args:
title: GitHub issue title.
description: GitHub issue description.
labels: Labels used to tag the GitHub issue.
Returns:
Tuple containing GitHub issue number and issue URL. | github-repos |
def posterior_chromatogram_hypotheses_fast(experiment, prior_chrom_null):
tg_ids = experiment.df.tg_num_id.values
pp_values = (1 - experiment.df['pep'].values)
current_tg_id = tg_ids[0]
scores = []
final_result = []
final_result_h0 = []
for i in range(tg_ids.shape[0]):
id_ = tg_ids[i... | Compute posterior probabilities for each chromatogram
For each chromatogram (each group_id / peptide precursor), all hypothesis of all peaks
being correct (and all others false) as well as the h0 (all peaks are
false) are computed.
The prior probability that the are given in the function
This assumes that the input... | codesearchnet |
def get_ref(profile, ref):
resource = ('/refs/' + ref)
data = api.get_request(profile, resource)
return prepare(data) | Fetch a ref.
Args:
profile
A profile generated from ``simplygithub.authentication.profile``.
Such profiles tell this module (i) the ``repo`` to connect to,
and (ii) the ``token`` to connect with.
ref
The ref to fetch, e.g., ``heads/my-feature-branch``.
Returns
A dict with data about the ref. | codesearchnet |
def set_sflow(self, name, value=None, default=False, disable=False):
if (value not in [True, False, None]):
raise ValueError
commands = [('interface %s' % name)]
commands.append(self.command_builder('sflow enable', value=value, default=default, disable=disable))
return self.configure(commands) | Configures the sFlow state on the interface
Args:
name (string): The interface identifier. It must be a full
interface name (ie Ethernet, not Et)
value (boolean): True if sFlow should be enabled otherwise False
default (boolean): Specifies the default value for sFlow
disable (boolean): Specifies to disable sFlow
... | codesearchnet |
def __init__(self, filename, error_handler, **kwargs):
self._filename = filename
self._error_handler = error_handler
self.lexer = ply.lex.lex(module=self, **kwargs) | Create a Lex lexer.
To pass this into a Ply Yacc parser, pass it in using the .lexer propert
of an StlLexer instance:
my_lexer = StlLexer()
my_parser = ply.yacc.parser(lexer=my_lexer.lexer)
Args:
filename: The filename string to use in any error messaging.
error_handler: A object to handle and lexing errors.
kwargs: ... | github-repos |
def save_plot(self, filename, img_format="eps", ylim=None,
zero_to_efermi=True, smooth=False):
plt = self.get_plot(ylim=ylim, zero_to_efermi=zero_to_efermi,
smooth=smooth)
plt.savefig(filename, format=img_format)
plt.close() | Save matplotlib plot to a file.
Args:
filename: Filename to write to.
img_format: Image format to use. Defaults to EPS.
ylim: Specifies the y-axis limits. | juraj-google-style |
def open(self):
if self._is_open:
raise exceptions.ClientConnectionFailure('client connection already open')
else:
try:
self.proxy.open()
self._is_open = True
except Exception as e:
self.logger.error('could not open client connection: %s', e)
... | Open the client connection.
Raises:
ClientConnectionFailure: if the client connection is already open
Exception: if an error occurs while trying to open the connection | codesearchnet |
def filter_invalid_unicode_from_table(table):
if not hasattr(table, 'table_id'):
table.table_id = 0
for row_index, row in table.iterrows():
for col_index, cell in enumerate(row):
cell, is_invalid = filter_invalid_unicode(cell)
if is_invalid:
logging.warnin... | Removes invalid unicode from table. Checks whether a table cell text contains an invalid unicode encoding. If yes,
reset the table cell text to an empty str and log a warning for each invalid cell
Args:
table: table to clean. | github-repos |
def create_complete_files(climan, path, cmd, *cmds, zsh_sourceable=False):
path = pathlib.Path(path)
zsh_dir = (path / 'zsh')
if (not zsh_dir.exists()):
zsh_dir.mkdir(parents=True)
zsh_file = (zsh_dir / '_{}.sh'.format(cmd))
bash_dir = (path / 'bash')
if (not bash_dir.exists()):
... | Create completion files for bash and zsh.
Args:
climan (:class:`~loam.cli.CLIManager`): CLI manager.
path (path-like): directory in which the config files should be
created. It is created if it doesn't exist.
cmd (str): command name that should be completed.
cmds (str): extra command names that should be completed.
zs... | codesearchnet |
def ParseNetworkConnectivityUsage(
self, parser_mediator, cache=None, database=None, table=None,
**unused_kwargs):
self._ParseGUIDTable(
parser_mediator, cache, database, table,
self._NETWORK_CONNECTIVITY_USAGE_VALUES_MAP,
SRUMNetworkConnectivityUsageEventData) | Parses the network connectivity usage monitor table.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
cache (Optional[ESEDBCache]): cache, which contains information about
the identifiers stored in the SruDbIdMapTable table.
database (Option... | juraj-google-style |
def read_zmat(cls, inputfile, implicit_index=True):
cols = ['atom', 'b', 'bond', 'a', 'angle', 'd', 'dihedral']
if implicit_index:
zmat_frame = pd.read_table(inputfile, comment='
zmat_frame.index = range(1, (len(zmat_frame) + 1))
else:
zmat_frame = pd.read_table(inputfile, comment='
... | Reads a zmat file.
Lines beginning with ``#`` are ignored.
Args:
inputfile (str):
implicit_index (bool): If this option is true the first column
has to be the element symbols for the atoms.
The row number is used to determine the index.
Returns:
Zmat: | codesearchnet |
def convert_to_generator_like(data, batch_size=None, steps_per_epoch=None, epochs=1, shuffle=False):
if isinstance(data, tuple):
data = tuple((ele for ele in data if not all((e is None for e in nest.flatten(ele)))))
if data_utils.is_generator_or_sequence(data) or isinstance(data, iterator_ops.IteratorBa... | Make a generator out of NumPy or EagerTensor inputs.
Args:
data: Either a generator or `keras.utils.data_utils.Sequence` object or
`Dataset`, `Iterator`, or a {1,2,3}-tuple of NumPy arrays or EagerTensors.
If a tuple, the elements represent `(x, y, sample_weights)` and may be
`None` or `[None]`.
batch_size: Used when ... | github-repos |
def must_exist(*components):
_path = path(*components)
if not exists(_path):
raise File404(_path)
return _path | Ensure path exists.
Arguments:
*components (str[]): Path components.
Returns:
str: File path.
Raises:
File404: If path does not exist. | juraj-google-style |
class EncodecDecoderOutput(ModelOutput):
audio_values: Optional[torch.FloatTensor] = None | Args:
audio_values (`torch.FloatTensor` of shape `(batch_size, segment_length)`, *optional*):
Decoded audio values, obtained using the decoder part of Encodec. | github-repos |
def list_instances(i_info, param_str, numbered=False):
print(param_str)
for i in i_info:
if numbered:
print("Instance {}
print(" {6}Name: {1}{3:<22}{1}ID: {0}{4:<20}{1:<18}Status: {2}{5}{1}".
format(C_TI, C_NORM, C_STAT[i_info[i]['state']],
... | Display a list of all instances and their details.
Iterates through all the instances in the dict, and displays
information for each instance.
Args:
i_info (dict): information on instances and details.
param_str (str): the title to display before the list.
numbered (bool): optional - indicates wheter the list should ... | juraj-google-style |
def current_spi_to_number(self):
if self.slots['subpage'] == None:
return self.sub_pi_to_number(0, 0)
else:
return self.sub_pi_to_number(self.slots['subpage'],
self.slots['subitem']) | Convert subpage & subitem to a integer
* if page == 1, then return 0, since the item count is the true # of items
* if page == 2, then return, page-1 * items_per_page, since we are
returning the # of items on a full page.
Args:
* None
Returns:
* Integer - Which represents the number of items up to the page. | juraj-google-style |
def CheckCondition(condition, check_object):
try:
of = objectfilter.Parser(condition).Parse()
compiled_filter = of.Compile(objectfilter.BaseFilterImplementation)
return compiled_filter.Matches(check_object)
except objectfilter.Error as e:
raise ConditionError(e) | Check if a condition matches an object.
Args:
condition: A string condition e.g. "os == 'Windows'"
check_object: Object to validate, e.g. an rdf_client.KnowledgeBase()
Returns:
True or False depending on whether the condition matches.
Raises:
ConditionError: If condition is bad. | juraj-google-style |
def reshape(vari, shape):
if isinstance(vari, Poly):
core = vari.A.copy()
for key in vari.keys:
core[key] = reshape(core[key], shape)
out = Poly(core, vari.dim, shape, vari.dtype)
return out
return numpy.asarray(vari).reshape(shape) | Reshape the shape of a shapeable quantity.
Args:
vari (chaospy.poly.base.Poly, numpy.ndarray):
Shapeable input quantity.
shape (tuple):
The polynomials new shape. Must be compatible with the number of
elements in ``vari``.
Returns:
(chaospy.poly.base.Poly, numpy.ndarray):
Same type as ``vari``.
Examples:
>>> poly = ... | codesearchnet |
def sync_proxy(self, mri, block):
done_queue = Queue()
self._queues[mri] = done_queue
update_fields = set()
def callback(value=None):
if isinstance(value, Exception):
if isinstance(value, Disconnected):
... | Abstract method telling the ClientComms to sync this proxy Block
with its remote counterpart. Should wait until it is connected
Args:
mri (str): The mri for the remote block
block (BlockModel): The local proxy Block to keep in sync | juraj-google-style |
def recv(self, request_id):
log.debug("Reading response %d from Kafka" % request_id)
if not self._sock:
self.reinit()
resp = self._read_bytes(4)
(size,) = struct.unpack('>i', resp)
resp = self._read_bytes(size)
return res... | Get a response packet from Kafka
Arguments:
request_id: can be any int (only used for debug logging...)
Returns:
str: Encoded kafka packet response from server | juraj-google-style |
def os_version_info_ex():
if (not HAS_WIN32):
return
class OSVersionInfo(ctypes.Structure):
_fields_ = (('dwOSVersionInfoSize', DWORD), ('dwMajorVersion', DWORD), ('dwMinorVersion', DWORD), ('dwBuildNumber', DWORD), ('dwPlatformId', DWORD), ('szCSDVersion', (WCHAR * 128)))
def __init__... | Helper function to return the results of the GetVersionExW Windows API call.
It is a ctypes Structure that contains Windows OS Version information.
Returns:
class: An instance of a class containing version info | codesearchnet |
def decode(self, encoded):
encoded = super().decode(encoded)
tokens = [self.itos[index] for index in encoded]
return self.detokenize(tokens) | Decodes a tensor into a sequence.
Args:
encoded (torch.Tensor): Encoded sequence.
Returns:
str: Sequence decoded from ``encoded``. | juraj-google-style |
def log(self: EventSetOrNode) -> EventSetOrNode:
from temporian.core.operators.unary import log
return log(self) | Calculates the natural logarithm of an [`EventSet`][temporian.EventSet]'s
features.
Can only be used on floating point features.
Example:
```python
>>> a = tp.event_set(
... timestamps=[1, 2, 3, 4, 5],
... features={"M": [np.e, 1., 2., 10., -1.]},
... )
>>> a.log()
indexes: ...
timestamps: [1. 2. 3. 4. 5.]
'M... | github-repos |
def delete(self, membershipId):
check_type(membershipId, basestring)
self._session.delete(((API_ENDPOINT + '/') + membershipId)) | Delete a membership, by ID.
Args:
membershipId(basestring): The membership ID.
Raises:
TypeError: If the parameter types are incorrect.
ApiError: If the Webex Teams cloud returns an error. | codesearchnet |
def load_text(self, text, tokenizer=None):
if tokenizer:
words = [x.lower() for x in tokenizer(text)]
else:
words = self.tokenize(text)
self._dictionary.update(words)
self._update_dictionary() | Load text from which to generate a word frequency list
Args:
text (str): The text to be loaded
tokenizer (function): The function to use to tokenize a string | juraj-google-style |
def from_parent(parent_key, i):
if (not isinstance(parent_key, HDPrivateKey)):
raise TypeError('parent_key must be an HDPrivateKey object.')
hmac_key = parent_key.chain_code
if (i & 2147483648):
hmac_data = ((b'\x00' + bytes(parent_key._key)) + i.to_bytes(length=4, byteorder='big'))
else... | Derives a child private key from a parent
private key. It is not possible to derive a child
private key from a public parent key.
Args:
parent_private_key (HDPrivateKey): | codesearchnet |
def release_docs_side_effect(content):
result = content.replace("{", "{{").replace("}", "}}")
result = result.replace("{{version}}", "{version}")
result = result.replace("{{circleci_build}}", "{circleci_build}")
result = result.replace("{{travis_build}}", "{travis_build}")
result = re... | Updates the template so that curly braces are escaped correctly.
Args:
content (str): The template for ``docs/index.rst.release.template``.
Returns:
str: The updated template with properly escaped curly braces. | juraj-google-style |
def __init__(self, model, task, cmdOptions):
validateOpfJsonValue(task, "opfTaskSchema.json")
self.__logger = logging.getLogger(".".join(
['com.numenta', self.__class__.__module__, self.__class__.__name__]))
self.__logger.debug(("Instantiated %s(" + \
"model=%r, ... | Constructor
Args:
model: The OPF Model instance against which to run the task
task: A dictionary conforming to opfTaskSchema.json
cmdOptions: ParseCommandLineOptionsResult namedtuple | juraj-google-style |
def mark_complex(self, name, serializer, deserializer):
self._complex_properties[name] = (serializer, deserializer) | Mark a property as complex with serializer and deserializer functions.
Args:
name (str): The name of the complex property.
serializer (callable): The function to call to serialize the property's
value to something that can be saved in a json.
deserializer (callable): The function to call to unserialize the property
fr... | juraj-google-style |
def get_operation_device(self, operation_name):
operation = self._name_to_operation(operation_name)
if isinstance(operation, tf.Operation):
return operation.device
else:
return None | The device of an operation.
Note that only tf operations have device assignments.
Args:
operation_name: a string, name of an operation in the graph.
Returns:
a string or None, representing the device name. | codesearchnet |
def putenv(key, value):
key = path2fsn(key)
value = path2fsn(value)
if is_win and PY2:
try:
set_windows_env_var(key, value)
except WindowsError:
raise ValueError
else:
try:
os.putenv(key, value)
except OSError:
... | Like `os.putenv` but takes unicode under Windows + Python 2
Args:
key (pathlike): The env var to get
value (pathlike): The value to set
Raises:
ValueError | juraj-google-style |
def _FormatPropertyName(self, property_name):
fix_key = re.sub(r'(.)([A-Z][a-z]+)', r'\1_\2', property_name)
return re.sub(r'([a-z0-9])([A-Z])', r'\1_\2', fix_key).lower() | Formats a camel case property name as snake case.
Args:
property_name (str): property name in camel case.
Returns:
str: property name in snake case. | juraj-google-style |
def _AddAttributeContainer(self, container_type, attribute_container):
container_list = self._GetSerializedAttributeContainerList(container_type)
identifier = identifiers.SQLTableIdentifier(
container_type, container_list.next_sequence_number + 1)
attribute_container.SetIdentifier(identifier)
... | Adds an attribute container.
Args:
container_type (str): attribute container type.
attribute_container (AttributeContainer): attribute container.
Raises:
IOError: if the attribute container cannot be serialized.
OSError: if the attribute container cannot be serialized. | juraj-google-style |
def get_pattern_step_time(self, patternnumber, stepnumber):
_checkPatternNumber(patternnumber)
_checkStepNumber(stepnumber)
address = _calculateRegisterAddress('time', patternnumber, stepnumber)
return self.read_register(address, 0) | Get the step time.
Args:
* patternnumber (integer): 0-7
* stepnumber (integer): 0-7
Returns:
The step time (int??). | codesearchnet |
def abort_expired_batches(self, request_timeout_ms, cluster):
expired_batches = []
to_remove = []
count = 0
for tp in list(self._batches.keys()):
assert tp in self._tp_locks, 'TopicPartition not in locks dict'
... | Abort the batches that have been sitting in RecordAccumulator for
more than the configured request_timeout due to metadata being
unavailable.
Arguments:
request_timeout_ms (int): milliseconds to timeout
cluster (ClusterMetadata): current metadata for kafka cluster
Returns:
list of ProducerBatch that were expired | juraj-google-style |
def get_sine_pos_embed(pos_tensor: torch.Tensor, num_pos_feats: int=128, temperature: int=10000, exchange_xy: bool=True) -> Tensor:
scale = 2 * math.pi
dim_t = torch.arange(num_pos_feats, dtype=torch.float32, device=pos_tensor.device)
dim_t = temperature ** (2 * torch.div(dim_t, 2, rounding_mode='floor') / ... | Generate sine position embeddings from a position tensor.
Args:
pos_tensor (torch.Tensor):
Tensor containing positions. Shape: [..., n].
num_pos_feats (`int`, *optional*, defaults to 128):
Projected shape for each float in the tensor.
temperature (`int`, *optional*, defaults to 10000):
Temperature in the sine/cosine f... | github-repos |
def print_type_of_instance(self, t: types.BaseValue, instance=None) -> str: | Returns a string of the type of an instance of t.
For example, if t is `int`, then this method returns "int".
Args:
t: An abstract value.
instance: A specific instance of t to print. | github-repos |
def export(self, template_file_name, output_file_name,
sort="public", data=None, limit=0):
exportedData = {}
exportedUsers = self.getSortedUsers()
template = self.__getTemplate(template_file_name)
position = 1
if not limit:
exportedData["users... | Export ranking to a file.
Args:
template_file_name (str): where is the template
(moustache template)
output_file_name (str): where create the file with the ranking
sort (str): field to sort the users | juraj-google-style |
def insert_arguments_into_query(compilation_result, arguments):
_ensure_arguments_are_provided(compilation_result.input_metadata, arguments)
if (compilation_result.language == MATCH_LANGUAGE):
return insert_arguments_into_match_query(compilation_result, arguments)
elif (compilation_result.language =... | Insert the arguments into the compiled GraphQL query to form a complete query.
Args:
compilation_result: a CompilationResult object derived from the GraphQL compiler
arguments: dict, mapping argument name to its value, for every parameter the query expects.
Returns:
string, a query in the appropriate output language,... | codesearchnet |
def _restore_training_state(self, restore_state):
self.load_state_dict(restore_state["model"])
self.optimizer.load_state_dict(restore_state["optimizer"])
self.lr_scheduler.load_state_dict(restore_state["lr_scheduler"])
start_iteration = restore_state["iteration"] + 1
if ... | Restores the model and optimizer states
This helper function restores the model's state to a given iteration so
that a user can resume training at any epoch.
Args:
restore_state: a state_dict dictionary | juraj-google-style |
def choices_validator(choices):
def validator(value):
if (value not in choices):
raise ValidationError('{} is not in {}'.format(value, list(choices)))
return validator | Return validator function that will check if ``value in choices``.
Args:
max_value (list, set, tuple): allowed choices for new validator | codesearchnet |
def parse_dtype_info(flags):
if (flags.dtype in (i[0] for i in DTYPE_MAP.values())):
return
try:
(flags.dtype, default_loss_scale) = DTYPE_MAP[flags.dtype]
except KeyError:
raise ValueError('Invalid dtype: {}'.format(flags.dtype))
flags.loss_scale = (flags.loss_scale or default_l... | Convert dtype string to tf dtype, and set loss_scale default as needed.
Args:
flags: namespace object returned by arg parser.
Raises:
ValueError: If an invalid dtype is provided. | codesearchnet |
def load_vocabulary(lang='en', type='wiki'):
src_dir = '{}_vocab'.format(type)
p = locate_resource(src_dir, lang)
return CountedVocabulary.from_vocabfile(p) | Return a CountedVocabulary object.
Args:
lang (string): language code.
type (string): wiki,... | codesearchnet |
def __init__(self, resolver_context):
super(BDEFileSystem, self).__init__(resolver_context)
self._bde_volume = None
self._file_object = None | Initializes a file system.
Args:
resolver_context (Context): resolver context. | juraj-google-style |
def runTemplate(id, data={}):
conn = Qubole.agent()
path = str(id) + "/run"
res = conn.post(Template.element_path(path), data)
cmdType = res['command_type']
cmdId = res['id']
cmdClass = eval(cmdType)
cmd = cmdClass.find(cmdId)
while not Command.is... | Run an existing Template and waits for the Result.
Prints result to stdout.
Args:
`id`: ID of the template to run
`data`: json data containing the input_vars
Returns:
An integer as status (0: success, 1: failure) | juraj-google-style |
def load_yaml(task: Task, file: str) -> Result:
with open(file, 'r') as f:
yml = ruamel.yaml.YAML(typ='safe')
data = yml.load(f)
return Result(host=task.host, result=data) | Loads a yaml file.
Arguments:
file: path to the file containing the yaml file to load
Examples:
Simple example with ``ordered_dict``::
> nr.run(task=load_yaml,
file="mydata.yaml")
Returns:
Result object with the following attributes set:
* result (``dict``): dictionary with the contents of the file | codesearchnet |
def check_connection(host='localhost', port=27017, username=None, password=None,
authdb=None, max_delay=1):
if username and password:
uri = ("mongodb:
.format(quote_plus(username), quote_plus(password), host, port, authdb))
log_uri = ("mongodb:
... | Check if a connection could be made to the mongo process specified
Args:
host(str)
port(int)
username(str)
password(str)
authdb (str): database to to for authentication
max_delay(int): Number of milliseconds to wait for connection
Returns:
bool: If connection could be established | juraj-google-style |
def pool3d(x, pool_size, strides=(1, 1, 1), padding='valid', data_format=None, pool_mode='max'):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
x, tf_data_format... | 3D Pooling.
Args:
x: Tensor or variable.
pool_size: tuple of 3 integers.
strides: tuple of 3 integers.
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
pool_mode: string, `"max"` or `"avg"`.
Returns:
A tensor, result of 3D pooling.
Raises:
ValueError: if `data_for... | github-repos |
def check_valid(spec):
DeviceSpec.from_string(spec) | Check that a device spec is valid.
Args:
spec: a string.
Raises:
An exception if the spec is invalid. | github-repos |
def _delete(self, url, data, scope):
self._create_session(scope)
response = self.session.delete(url, data=data)
return response.status_code, response.text | Make a DELETE request using the session object to a Degreed endpoint.
Args:
url (str): The url to send a DELETE request to.
data (str): The json encoded payload to DELETE.
scope (str): Must be one of the scopes Degreed expects:
- `CONTENT_PROVIDER_SCOPE`
- `COMPLETION_PROVIDER_SCOPE` | juraj-google-style |
def transmit(self, payload, **kwargs):
kwargs['app_label'] = 'degreed'
kwargs['model_name'] = 'DegreedLearnerDataTransmissionAudit'
kwargs['remote_user_id'] = 'degreed_user_email'
super(DegreedLearnerTransmitter, self).transmit(payload, **kwargs) | Send a completion status call to Degreed using the client.
Args:
payload: The learner completion data payload to send to Degreed | juraj-google-style |
def setup_and_load_epoch(hparams, data_dir, which_epoch_data=None):
t2t_env = rl_utils.setup_env(
hparams, batch_size=hparams.real_batch_size,
max_num_noops=hparams.max_num_noops
)
if which_epoch_data is not None:
if which_epoch_data == "last":
which_epoch_data = infer_last_epoch_num(d... | Load T2TGymEnv with data from one epoch.
Args:
hparams: hparams.
data_dir: data directory.
which_epoch_data: data from which epoch to load.
Returns:
env. | juraj-google-style |
def get_name_or_instance_id(self, with_id=False):
name = self.get_tag('Name', case_sensitive=False)
if (name and (len(name.value.strip()) > 0)):
return ('{0} ({1})'.format(name.value, self.id) if with_id else name.value)
return self.id | Returns the name of an instance if existant, else return the instance id
Args:
with_id (bool): Include the instance ID even if the name is found (default: False)
Returns:
Name and/or instance ID of the instance object | codesearchnet |
def extract_numerics_alert(event):
value = event.summary.value[0]
debugger_plugin_metadata_content = None
if value.HasField("metadata"):
plugin_data = value.metadata.plugin_data
if plugin_data.plugin_name == constants.DEBUGGER_PLUGIN_NAME:
debugger_plugin_metadata_content = plugin_data.content
... | Determines whether a health pill event contains bad values.
A bad value is one of NaN, -Inf, or +Inf.
Args:
event: (`Event`) A `tensorflow.Event` proto from `DebugNumericSummary`
ops.
Returns:
An instance of `NumericsAlert`, if bad values are found.
`None`, if no bad values are found.
Raises:
ValueError: if the eve... | juraj-google-style |
def label_contains(
node,
triggers
):
for trigger in triggers:
if trigger.trigger_word in node.label:
yield TriggerNode(trigger, node) | Determine if node contains any of the trigger_words provided.
Args:
node(Node): CFG node to check.
trigger_words(list[Union[Sink, Source]]): list of trigger words to look for.
Returns:
Iterable of TriggerNodes found. Can be multiple because multiple
trigger_words can be in one node. | juraj-google-style |
def register_entity(self, entity_value, entity_type, alias_of=None):
if alias_of:
self.trie.insert(entity_value.lower(), data=(alias_of, entity_type))
else:
self.trie.insert(entity_value.lower(), data=(entity_value, entity_type))
self.trie.insert(entity_type.... | Register an entity to be tagged in potential parse results
Args:
entity_value(str): the value/proper name of an entity instance (Ex: "The Big Bang Theory")
entity_type(str): the type/tag of an entity instance (Ex: "Television Show") | juraj-google-style |
def _convert_token_to_id(self, token, token_type='TOKEN_TIME') -> int:
return self.encoder.get(f'{token}_{token_type}', int(self.unk_token)) | Encodes the Midi tokens to transformer generated token ids.
Args:
token (`int`):
This denotes the token value.
token_type (`str`):
This denotes the type of the token. There are four types of midi tokens such as "TOKEN_TIME",
"TOKEN_VELOCITY", "TOKEN_NOTE" and "TOKEN_SPECIAL".
Returns:
`int`: returns the id of the tok... | github-repos |
def convert(self):
return super(TFLiteConverterV2, self).convert() | Converts a TensorFlow GraphDef based on instance variables.
Returns:
The converted data in serialized format.
Raises:
ValueError:
No concrete function is specified.
Multiple concrete functions are specified.
Input shape is not specified.
Invalid quantization parameters. | github-repos |
def _get_data_by_field(self, field_number):
if (not self.is_data_loaded):
self._import_data()
if (not (0 <= field_number < self._num_of_fields)):
raise ValueError(('Field number should be between 0-%d' % self._num_of_fields))
return self._data[field_number] | Return a data field by field number.
This is a useful method to get the values for fields that Ladybug
currently doesn't import by default. You can find list of fields by typing
EPWFields.fields
Args:
field_number: a value between 0 to 34 for different available epw fields.
Returns:
An annual Ladybug list | codesearchnet |
def add_channel(channel: EFBChannel):
global master, slaves
if isinstance(channel, EFBChannel):
if (channel.channel_type == ChannelType.Slave):
slaves[channel.channel_id] = channel
else:
master = channel
else:
raise TypeError('Channel instance is expected') | Register the channel with the coordinator.
Args:
channel (EFBChannel): Channel to register | codesearchnet |
def forward(self, num_patches_height: int, num_patches_width: int) -> torch.Tensor:
hpos_ids = torch.arange(num_patches_height, device=self.inv_freq.device).unsqueeze(1).expand(-1, num_patches_width)
wpos_ids = torch.arange(num_patches_width, device=self.inv_freq.device).unsqueeze(0).expand(num_patches_height, ... | Calculate the Rotary Position Embedding (RoPE) for MLCDVisionModel based on the grid size.
Args:
num_patches_height (int): Number of patches in the height dimension.
num_patches_width (int): Number of patches in the width dimension.
Returns:
torch.Tensor: Rotary positional embeddings for the given grid size. | github-repos |
def decode_function_result(self, function_name, data):
description = self.function_data[function_name]
arguments = decode_abi(description['decode_types'], data)
return arguments | Return the function call result decoded.
Args:
function_name (str): One of the existing functions described in the
contract interface.
data (bin): The encoded result from calling `function_name`.
Return:
List[object]: The values returned by the call to `function_name`. | codesearchnet |
def terminate_session(self, token):
url = (self.rest_url + ('/session/%s' % token))
response = self._delete(url)
if (not response.ok):
return None
return True | Terminates the session token, effectively logging out the user
from all crowd-enabled services.
Args:
token: The session token.
Returns:
True: If session terminated
None: If session termination failed | codesearchnet |
def inspect_container(self, container):
return self._result(self._get(self._url('/containers/{0}/json', container)), True) | Identical to the `docker inspect` command, but only for containers.
Args:
container (str): The container to inspect
Returns:
(dict): Similar to the output of `docker inspect`, but as a
single dict
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error. | codesearchnet |
def create_transaction(self, to_account):
from_account = self.statement_import.bank_account
transaction = Transaction.objects.create()
Leg.objects.create(transaction=transaction, account=from_account, amount=(+ (self.amount * (- 1))))
Leg.objects.create(transaction=transaction, account=to_account, amoun... | Create a transaction for this statement amount and account, into to_account
This will also set this StatementLine's ``transaction`` attribute to the newly
created transaction.
Args:
to_account (Account): The account the transaction is into / out of.
Returns:
Transaction: The newly created (and committed) transaction... | codesearchnet |
def deps_from_import_graph(import_graph):
def make_module(filename):
return resolved_file_to_module(import_graph.provenance[filename])
def split_files(filenames):
stubs = []
sources = []
for f in filenames:
if _is_type_stub(f):
stubs.append(f)
... | Construct PytypeRunner args from an importlab.ImportGraph instance.
Kept as a separate function so PytypeRunner can be tested independently of
importlab.
Args:
import_graph: An importlab.ImportGraph instance.
Returns:
List of (tuple of source modules, tuple of direct deps) in dependency order. | github-repos |
def read(self, input_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):
super(UsernamePasswordCredential, self).read(input_stream, kmip_version=kmip_version)
local_stream = BytearrayStream(input_stream.read(self.length))
if self.is_tag_next(enums.Tags.USERNAME, local_stream):
self._username = primiti... | Read the data encoding the UsernamePasswordCredential struct and
decode it into its constituent parts.
Args:
input_stream (stream): A data stream containing encoded object
data, supporting a read method; usually a BytearrayStream
object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which t... | codesearchnet |
def _to_backend_mesh(device_mesh):
mesh_dims = list(zip(device_mesh.axis_names, device_mesh.shape))
return dtensor.create_distributed_mesh(mesh_dims=mesh_dims, local_devices=device_mesh.devices.flatten()) | Convert the DeviceMesh to Tensorflow backend specific Mesh.
Args:
device_mesh: DeviceMesh instance to convert.
Returns:
A `tf.dtensor.Mesh` instance. | github-repos |
def preprocess(source):
doc = html5lib.parseFragment(source)
source = ET.tostring(doc, encoding='utf-8', method='text').decode('utf-8')
source = source.replace(u'\n', u'').strip()
source = re.sub(r'\s\s+', u' ', source)
return source | Removes unnecessary break lines and white spaces.
Args:
source (str): Input sentence.
Returns:
Preprocessed sentence. (str) | juraj-google-style |
def build(self):
def _create_per_worker_dataset():
dataset = self._dataset_fn()
return dataset
per_worker_dataset = self._coordinator._create_per_worker_resources(_create_per_worker_dataset)
dataset_fn_output_type_spec = self._dataset_fn.structured_outputs._type_spec
for dataset_remote_... | Trigger dataset creation on workers without creating an iterator.
Returns:
A PerWorkerValues object containing a tuple of RemoteValues, themselves
containing the built Dataset for each worker | github-repos |
def to_pytd_type(self, val: abstract.BaseValue) -> pytd.Type:
if val is self._ctx.consts.Any:
return pytd.AnythingType()
elif isinstance(val, abstract.Union):
return pytd_utils.JoinTypes((self.to_pytd_type(v) for v in val.options))
elif isinstance(val, abstract.PythonConstant):
retur... | Returns the type of the abstract value, as a pytd node.
For example, if the abstract value is:
PythonConstant(0)
then to_pytd_type() produces:
pytd.NamedType(int)
Args:
val: The abstract value. | github-repos |
def ReadFromDirectory(self, artifacts_reader, path, extension='yaml'):
for artifact_definition in artifacts_reader.ReadDirectory(path, extension=extension):
self.RegisterDefinition(artifact_definition) | Reads artifact definitions into the registry from files in a directory.
This function does not recurse sub directories.
Args:
artifacts_reader (ArtifactsReader): an artifacts reader.
path (str): path of the directory to read from.
extension (Optional[str]): extension of the filenames to read.
Raises:
KeyError: if a ... | codesearchnet |
def git_merge(base, head, no_ff=False):
pretend = context.get('pretend', False)
branch = git.current_branch(refresh=True)
if branch.name != base and not pretend:
git_checkout(base)
args = []
if no_ff:
args.append('--no-ff')
log.info("Merging <33>{}<32> into <33>{}<3... | Merge *head* into *base*.
Args:
base (str):
The base branch. *head* will be merged into this branch.
head (str):
The branch that will be merged into *base*.
no_ff (bool):
If set to **True** it will force git to create merge commit. If set
to **False** (default) it will do a fast-forward merge if possible. | juraj-google-style |
def __init__(self, key=None, **kwargs):
if not key:
raise ValueError('Missing key.')
super(RC4Decrypter, self).__init__()
self._rc4_cipher = ARC4.new(key) | Initializes a decrypter.
Args:
key (Optional[bytes]): key.
kwargs (dict): keyword arguments depending on the decrypter.
Raises:
ValueError: when key is not set. | juraj-google-style |
def hessian(self, coordinates):
N3 = coordinates.size
hessian = numpy.zeros((N3, N3), float)
for term in self.terms:
term.add_to_hessian(coordinates, hessian)
return hessian | Compute the force-field Hessian for the given coordinates.
Argument:
| ``coordinates`` -- A numpy array with the Cartesian atom
coordinates, with shape (N,3).
Returns:
| ``hessian`` -- A numpy array with the Hessian, with shape (3*N,
3*N). | codesearchnet |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.