code stringlengths 20 4.93k | docstring stringlengths 33 1.27k | source stringclasses 3
values |
|---|---|---|
def golden_images(self):
if (not self.__golden_images):
self.__golden_images = GoldenImages(self.__connection)
return self.__golden_images | Gets the Golden Images API client.
Returns:
GoldenImages: | codesearchnet |
def next_generation(self, mut_rate=0, max_mut_amt=0, log_base=10):
if (self.__num_processes > 1):
process_pool = Pool(processes=self.__num_processes)
members = [m.get() for m in self.__members]
else:
members = self.__members
if (len(members) == 0):
raise Exception('Generation... | Generates the next population from a previously evaluated generation
Args:
mut_rate (float): mutation rate for new members (0.0 - 1.0)
max_mut_amt (float): how much the member is allowed to mutate
(0.0 - 1.0, proportion change of mutated parameter)
log_base (int): the higher this number, the more likely the first
Memb... | codesearchnet |
def closing(input_rasterfilename, times):
input_raster = RasterUtilClass.read_raster(input_rasterfilename)
closing_raster = input_raster
for i in range(times):
closing_raster = RasterUtilClass.raster_dilation(closing_raster)
for i in range(times):
closing_raster = RasterUtilClass.raster_... | Do closing.
Closing: Dilate firstly, then Erode.
Args:
input_rasterfilename: input original raster image filename.
times: Erode and Dilate times.
Returns:
closing_raster: raster image after close. | codesearchnet |
def transpose(self, name=None):
if any(x > 1 for x in self._rate):
raise base.NotSupportedError(
"Cannot transpose a dilated convolution module.")
if any(p != self._conv_op_padding for p in self._padding):
raise base.NotSupportedError(
"Cannot tranpose a convolution using m... | Returns matching `Conv2DTranspose` module.
Args:
name: Optional string assigning name of transpose module. The default name
is constructed by appending "_transpose" to `self.name`.
Returns:
`Conv2DTranspose` module.
Raises:
base.NotSupportedError: If `rate` in any dimension > 1. | juraj-google-style |
def copy(self, destination):
destination_uri = self.repo.parse_uri(destination)
response = self.repo.api.http_request('COPY', self.uri, data=None, headers={'Destination':destination_uri.toPython()})
if response.status_code == 201:
return destination_uri
else:
raise Exception('HTTP %s, coul... | Method to copy resource to another location
Args:
destination (rdflib.term.URIRef, str): URI location to move resource
Returns:
(Resource) new, moved instance of resource | juraj-google-style |
def micros_to_timestamp(micros, timestamp):
seconds = long(micros / _MICROS_PER_SECOND)
micro_remainder = micros % _MICROS_PER_SECOND
timestamp.seconds = seconds
timestamp.nanos = micro_remainder * _NANOS_PER_MICRO | Convert microseconds from utc epoch to google.protobuf.timestamp.
Args:
micros: a long, number of microseconds since utc epoch.
timestamp: a google.protobuf.timestamp.Timestamp to populate. | juraj-google-style |
def warp(self, to_sref, dest=None, interpolation=gdalconst.GRA_NearestNeighbour):
if not hasattr(to_sref, 'ExportToWkt'):
to_sref = SpatialReference(to_sref)
dest_wkt = to_sref.ExportToWkt()
dtype = self[0].DataType
err_thresh = 0.125
vrt = ... | Returns a new reprojected instance.
Arguments:
to_sref -- spatial reference as a proj4 or wkt string, or a
SpatialReference
Keyword args:
dest -- filepath as str
interpolation -- GDAL interpolation type | juraj-google-style |
def register(self, table):
if table.table_type.is_system:
raise ValueError('Cannot add system table to catalog')
if not table.table_type.is_shared:
raise ValueError('Cannot add local table to catalog')
if table.is_substitute:
raise ValueError('Cannot ... | Adds a shared table to the catalog.
Args:
table (SymbolTable): A non-system, shared symbol table. | juraj-google-style |
def compose_q_update_vec(self, q_update_vec: torch.Tensor, normalize_quats: bool=True) -> Rotation:
quats = self.get_quats()
new_quats = quats + quat_multiply_by_vec(quats, q_update_vec)
return Rotation(rot_mats=None, quats=new_quats, normalize_quats=normalize_quats) | Returns a new quaternion Rotation after updating the current object's underlying rotation with a quaternion
update, formatted as a [*, 3] tensor whose final three columns represent x, y, z such that (1, x, y, z) is the
desired (not necessarily unit) quaternion update.
Args:
q_update_vec:
A [*, 3] quaternion update ten... | github-repos |
def getToC(doc, simple=True):
def recurse(olItem, liste, lvl):
'Recursively follow the outline item chain and record item information in a list.'
while olItem:
if olItem.title:
title = olItem.title
else:
title = ' '
if (not olItem.... | Create a table of contents.
Args:
simple: a bool to control output. Returns a list, where each entry consists of outline level, title, page number and link destination (if simple = False). For details see PyMuPDF's documentation. | codesearchnet |
def __init__(self, name, data=None, package_cls=None):
super(PackageMaker, self).__init__(data)
self.name = name
self.package_cls = package_cls or Package
self.installed_variants = []
self.skipped_variants = [] | Create a package maker.
Args:
name (str): Package name. | juraj-google-style |
def copy_sharding(from_tensor, to_tensor, use_sharding_op=False):
sharding = get_tensor_sharding(from_tensor)
if sharding is None:
return to_tensor
if isinstance(to_tensor, resource_variable_ops.BaseResourceVariable) and context.xla_sharding_for_resource_variables_enabled():
proto = xla_data... | Copies the a tensor's sharding to another.
Args:
from_tensor: Source tensor. Must be the sole output of an op.
to_tensor: the tensor the annotate with the copy.
use_sharding_op: whether to create a sharding op on `to_tensor`.
Returns:
A tensor with sharding annotation copied from `from_tensor`. | github-repos |
def __init__(self, items: Optional[Iterable[Any]]=None, *, value_spec: Optional[pg_typing.List]=None, onchange_callback: Optional[Callable[[Dict[utils.KeyPath, base.FieldUpdate]], None]]=None, allow_partial: bool=False, accessor_writable: bool=True, sealed: bool=False, root_path: Optional[utils.KeyPath]=None):
if v... | Constructor.
Args:
items: A optional iterable object as initial value for this list.
value_spec: Value spec that applies to this List.
onchange_callback: Callback when sub-tree has been modified.
allow_partial: Whether to allow unbound or partial fields. This takes
effect only when value_spec is not None.
accessor_wri... | github-repos |
def sym_distance(cls, q0, q1):
q = Quaternion.sym_log_map(q0, q1)
return q.norm | Quaternion symmetrized distance.
Find the intrinsic symmetrized geodesic distance between q0 and q1.
Params:
q0: the first quaternion
q1: the second quaternion
Returns:
A positive amount corresponding to the length of the symmetrized
geodesic curve connecting q0 to q1.
Note:
This formulation is more numerically sta... | codesearchnet |
def __init__(self, batch_size, key_depth, val_depth, memory_size,
sharpen_factor=1., name="neural_memory"):
self.name = name
self.batch_size = batch_size
self.key_depth = key_depth
self.val_depth = val_depth
self.memory_size = memory_size
self.sharpen_factor = sharpen_factor
... | Initialize the memory object.
Args:
batch_size: the batch size.
key_depth: the depth of the memory keys.
val_depth: the depth of the memory values.
memory_size: the number of items in the memory.
sharpen_factor: the sharpen_factor for addressing the memory.
name: the optional variable scope. | juraj-google-style |
def _get_token(request=None, allowed_auth_schemes=('OAuth', 'Bearer'), allowed_query_keys=('bearer_token', 'access_token')):
allowed_auth_schemes = _listlike_guard(allowed_auth_schemes, 'allowed_auth_schemes', iterable_only=True)
auth_header = os.environ.get('HTTP_AUTHORIZATION')
if auth_header:
for... | Get the auth token for this request.
Auth token may be specified in either the Authorization header or
as a query param (either access_token or bearer_token). We'll check in
this order:
1. Authorization header.
2. bearer_token query param.
3. access_token query param.
Args:
request: The current request, or None.
Re... | codesearchnet |
def parse_cscore(infile):
cscore_dict = {}
with open(infile, 'r') as f:
for ll in f.readlines():
if ll.lower().startswith('model1'):
l = ll.split()
cscore = l[1]
tmscore_full = l[2].split('+-')
tmscore = tmscore_full[0]
... | Parse the cscore file to return a dictionary of scores.
Args:
infile (str): Path to cscore
Returns:
dict: Dictionary of scores | codesearchnet |
def end_of(self, event_id, import_options=True):
event_id = str(event_id)
if (event_id in DatePickerDictionary.items):
linked_picker = DatePickerDictionary.items[event_id]
self.config['linked_to'] = linked_picker.config['id']
if import_options:
backup_moment_format = self.con... | Set Date-Picker as the end-date of a date-range.
Args:
- event_id (string): User-defined unique id for linking two fields
- import_options (bool): inherit options from start-date input,
default: TRUE | codesearchnet |
def compile_action_preconditions(self, state: Sequence[tf.Tensor], action: Sequence[tf.Tensor]) -> List[TensorFluent]:
scope = self.action_precondition_scope(state, action)
preconds = []
with self.graph.as_default():
with tf.name_scope('action_preconditions'):
for p in self.rddl.domain.p... | Compiles the action preconditions given current `state` and `action` fluents.
Args:
state (Sequence[tf.Tensor]): The current state fluents.
action (Sequence[tf.Tensor]): The action fluents.
Returns:
A list of :obj:`rddl2tf.fluent.TensorFluent`. | codesearchnet |
def create_event_model(event):
if event['type'].startswith('task'):
factory = {
JobEventName.Started: JobStartedEvent,
JobEventName.Succeeded: JobSucceededEvent,
JobEventName.Stopped: JobStoppedEvent,
JobEventName.Aborted: JobAbortedEvent
}
... | Factory function that turns a celery event into an event object.
Args:
event (dict): A dictionary that represents a celery event.
Returns:
object: An event object representing the received event.
Raises:
JobEventTypeUnsupported: If an unsupported celery job event was received.
WorkerEventTypeUnsupported: If an unsup... | juraj-google-style |
def yaw_pitch_roll(self):
self._normalise()
yaw = np.arctan2((2 * ((self.q[0] * self.q[3]) - (self.q[1] * self.q[2]))), (1 - (2 * ((self.q[2] ** 2) + (self.q[3] ** 2)))))
pitch = np.arcsin((2 * ((self.q[0] * self.q[2]) + (self.q[3] * self.q[1]))))
roll = np.arctan2((2 * ((self.q[0] * self.q[1]) - (self.... | Get the equivalent yaw-pitch-roll angles aka. intrinsic Tait-Bryan angles following the z-y'-x'' convention
Returns:
yaw: rotation angle around the z-axis in radians, in the range `[-pi, pi]`
pitch: rotation angle around the y'-axis in radians, in the range `[-pi/2, -pi/2]`
roll: rotation angle around the x''-ax... | codesearchnet |
def initial_state(self, batch_size, trainable=False):
init_state = tf.eye(self._mem_slots, batch_shape=[batch_size])
if (self._mem_size > self._mem_slots):
difference = (self._mem_size - self._mem_slots)
pad = tf.zeros((batch_size, self._mem_slots, difference))
init_state = tf.concat([in... | Creates the initial memory.
We should ensure each row of the memory is initialized to be unique,
so initialize the matrix to be the identity. We then pad or truncate
as necessary so that init_state is of size
(batch_size, self._mem_slots, self._mem_size).
Args:
batch_size: The size of the batch.
trainable: Whether th... | codesearchnet |
def Collect(self, top_frame):
frame = top_frame
top_line = self.breakpoint['location']['line']
breakpoint_frames = self.breakpoint['stackFrames']
try:
if 'expressions' in self.breakpoint:
self.breakpoint['evaluatedExpressions'] = [
self._CaptureExpression(top_f... | Collects call stack, local variables and objects.
Starts collection from the specified frame. We don't start from the top
frame to exclude the frames due to debugger. Updates the content of
self.breakpoint.
Args:
top_frame: top frame to start data collection. | juraj-google-style |
def make_call_types(f, globals_d):
arg_spec = getargspec(f)
args = [k for k in arg_spec.args if k != "self"]
defaults = {}
if arg_spec.defaults:
default_args = args[-len(arg_spec.defaults):]
for a, default in zip(default_args, arg_spec.defaults):
defaults[a] = de... | Make a call_types dictionary that describes what arguments to pass to f
Args:
f: The function to inspect for argument names (without self)
globals_d: A dictionary of globals to lookup annotation definitions in | juraj-google-style |
def start_listener_thread(self, timeout_ms=30000, exception_handler=None):
try:
thread = Thread(target=self.listen_forever,
args=(timeout_ms, exception_handler))
thread.daemon = True
self.sync_thread = thread
self.should_listen... | Start a listener thread to listen for events in the background.
Args:
timeout (int): How long to poll the Home Server for before
retrying.
exception_handler (func(exception)): Optional exception handler
function which can be used to handle exceptions in the caller
thread. | juraj-google-style |
def __write_to_fil_light(self, filename_out, *args, **kwargs):
n_bytes = self.header[b'nbits'] / 8
with open(filename_out, "wb") as fileh:
fileh.write(generate_sigproc_header(self))
j = self.data
if n_bytes == 4:
np.float32(j.ravel()).tofil... | Write data to .fil file.
Args:
filename_out (str): Name of output file | juraj-google-style |
def _get_snippet_ctime(self, snip_name):
if snip_name not in self.snip_ctimes:
snippet = yaml_snippet_loader.YamlSnippetLoader.get_snippet_by_name(snip_name)
self.snip_ctimes[snip_name] = os.path.getctime(snippet.path)
return self.snip_ctimes[snip_name] | Returns and remembers (during this DevAssistant invocation) last ctime of given
snippet.
Calling ctime costs lost of time and some snippets, like common_args, are used widely,
so we don't want to call ctime bazillion times on them during one invocation.
Args:
snip_name: name of snippet to get ctime for
Returns:
ctime... | juraj-google-style |
def _GetWinevtRcDatabaseReader(self):
if ((not self._winevt_database_reader) and self._data_location):
database_path = os.path.join(self._data_location, self._WINEVT_RC_DATABASE)
if (not os.path.isfile(database_path)):
return None
self._winevt_database_reader = winevt_rc.WinevtRe... | Opens the Windows Event Log resource database reader.
Returns:
WinevtResourcesSqlite3DatabaseReader: Windows Event Log resource
database reader or None. | codesearchnet |
class PoolerAnswerClass(nn.Module):
def __init__(self, config):
super().__init__()
self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size)
self.activation = nn.Tanh()
self.dense_1 = nn.Linear(config.hidden_size, 1, bias=False)
logger.warning_once('[DEPRECATION W... | Compute SQuAD 2.0 answer class from classification and start tokens hidden states.
Args:
config ([`PretrainedConfig`]):
The config used by the model, will be used to grab the `hidden_size` of the model. | github-repos |
def __call__(self, func: T) -> T:
api_names_attr = API_ATTRS[self._api_name].names
api_names_attr_v1 = API_ATTRS_V1[self._api_name].names
_, undecorated_func = tf_decorator.unwrap(func)
self.set_attr(undecorated_func, api_names_attr, self._names)
self.set_attr(undecorated_func, api_names_attr_v1, se... | Calls this decorator.
Args:
func: decorated symbol (function or class).
Returns:
The input function with _tf_api_names attribute set. | github-repos |
def create_pull_response(responses):
from google.cloud import pubsub
from google.protobuf import timestamp_pb2
res = pubsub.types.PullResponse()
for response in responses:
received_message = pubsub.types.ReceivedMessage()
message = received_message.message
message.data = response... | Create an instance of ``google.cloud.pubsub.types.ReceivedMessage``.
Used to simulate the response from pubsub.SubscriberClient().pull().
Args:
responses: list of ``PullResponseMessage``
Returns:
An instance of ``google.cloud.pubsub.types.PullResponse`` populated with
responses. | github-repos |
def get_reference(root):
reference = {}
elem = root.find('bibliographyLink')
if (elem is None):
raise MissingElementError('bibliographyLink')
ref_doi = elem.get('doi', None)
ref_key = elem.get('preferredKey', None)
if (ref_doi is not None):
try:
ref = crossref_api.wor... | Read reference info from root of ReSpecTh XML file.
Args:
root (`~xml.etree.ElementTree.Element`): Root of ReSpecTh XML file
Returns:
properties (`dict`): Dictionary with reference information | codesearchnet |
def path_fraction_point(points, fraction):
seg_id, offset = path_fraction_id_offset(points, fraction, relative_offset=True)
return linear_interpolate(points[seg_id], points[seg_id + 1], offset) | Computes the point which corresponds to the fraction
of the path length along the piecewise linear curve which
is constructed from the set of points.
Args:
points: an iterable of indexable objects with indices
0, 1, 2 correspoding to 3D cartesian coordinates
fraction: path length fraction (0 <= fraction <= 1)
Returns... | juraj-google-style |
def tail(self, n):
if n < 0:
n = max(0, len(self.index) + n)
if self._is_transposed:
result = self.__constructor__(
self.data.transpose().take(1, -n).transpose(),
self.index[-n:],
self.columns,
self... | Returns the last n rows.
Args:
n: Integer containing the number of rows to return.
Returns:
DataManager containing the last n rows of the original DataManager. | juraj-google-style |
def _GetNumberOfDaysInCentury(self, year):
if (year < 0):
raise ValueError('Year value out of bounds.')
(year, _) = divmod(year, 100)
if self._IsLeapYear(year):
return 36525
return 36524 | Retrieves the number of days in a century.
Args:
year (int): year in the century e.g. 1970.
Returns:
int: number of (remaining) days in the century.
Raises:
ValueError: if the year value is out of bounds. | codesearchnet |
def GetDataDownloader(self, version=sorted(_SERVICE_MAP.keys())[(- 1)], server=None):
if (not server):
server = DEFAULT_ENDPOINT
return DataDownloader(self, version, server) | Creates a downloader for Ad Manager reports and PQL result sets.
This is a convenience method. It is functionally identical to calling
DataDownloader(ad_manager_client, version, server)
Args:
[optional]
version: A string identifying the Ad Manager version to connect to.
This defaults to what is currently the latest v... | codesearchnet |
def getmtime(self, path):
try:
file_obj = self.filesystem.resolve(path)
return file_obj.st_mtime
except IOError:
self.filesystem.raise_os_error(errno.ENOENT, winerror=3) | Returns the modification time of the fake file.
Args:
path: the path to fake file.
Returns:
(int, float) the modification time of the fake file
in number of seconds since the epoch.
Raises:
OSError: if the file does not exist. | codesearchnet |
def parse_peddy_ped_check(lines):
ped_check = []
header = []
for i,line in enumerate(lines):
line = line.rstrip()
if i == 0:
header = line.lstrip('
else:
pair_info = dict(zip(header, line.split(',')))
pai... | Parse a .ped_check.csv file
Args:
lines(iterable(str))
Returns:
ped_check(list(dict)) | juraj-google-style |
def _decorate_run_options_for_profile(self, run_options):
run_options.trace_level = config_pb2.RunOptions.FULL_TRACE | Modify a RunOptions object for profiling TensorFlow graph execution.
Args:
run_options: (RunOptions) the modified RunOptions object. | github-repos |
def page(self, title=None, pageid=None, auto_suggest=True, redirect=True, preload=False):
if (((title is None) or (title.strip() == '')) and (pageid is None)):
raise ValueError('Either a title or a pageid must be specified')
elif title:
if auto_suggest:
temp_title = self.suggest(titl... | Get MediaWiki page based on the provided title or pageid
Args:
title (str): Page title
pageid (int): MediaWiki page identifier
auto-suggest (bool): **True:** Allow page title auto-suggest
redirect (bool): **True:** Follow page redirects
preload (bool): **True:** Load most page properties
Raises:
ValueError: when title... | codesearchnet |
def _calculate_page_index(index, data):
if (index > data['total_results']):
raise ValueError('index not in paged data')
page_length = len(data['results'])
return (((index | Determine the location of a given index in paged data.
Arguments:
index (:py:class:`int`): The overall index.
data: (:py:class:`dict`) The first page of data.
Returns:
:py:class:`tuple`: The location of that index, in the format
``(page, index_in_page)``. | codesearchnet |
def MemberVisible(component, name, member, class_attrs=None, verbose=False):
if isinstance(name, str) and name.startswith('__'):
return False
if verbose:
return True
if member is absolute_import or member is division or member is print_function:
return False
if isinstance(member,... | Returns whether a member should be included in auto-completion or help.
Determines whether a member of an object with the specified name should be
included in auto-completion or help text(both usage and detailed help).
If the member name starts with '__', it will always be excluded. If it
starts with only one '_', it... | github-repos |
def get_formal_type_parameter(self, t: str) -> 'BaseValue':
del t
return self.ctx.convert.unsolvable | Get the class's type for the type parameter.
Treating self as a class_mixin.Class, gets its formal type for the given
type parameter. For the real implementation, see
ParameterizedClass.get_formal_type_parameter.
Args:
t: The name of the type parameter.
Returns:
A formal type. | github-repos |
def _is_txn_to_replay(self, txn_id, possible_successor, already_seen):
is_successor = self._is_predecessor_of_possible_successor(
txn_id,
possible_successor)
in_different_batch = not self._is_in_same_batch(txn_id,
... | Decide if possible_successor should be replayed.
Args:
txn_id (str): Id of txn in failed batch.
possible_successor (str): Id of txn to possibly replay.
already_seen (list): A list of possible_successors that have
been replayed.
Returns:
(bool): If the possible_successor should be replayed. | juraj-google-style |
def _runExperimentImpl(options, model=None):
json_helpers.validate(options.privateOptions,
schemaDict=g_parsedPrivateCommandLineOptionsSchema)
experimentDir = options.experimentDir
descriptionPyModule = helpers.loadExperimentDescriptionScriptFromDir(
experimentDir)
expIface ... | Creates and runs the experiment
Args:
options: namedtuple ParseCommandLineOptionsResult
model: For testing: may pass in an existing OPF Model instance
to use instead of creating a new one.
Returns: reference to OPFExperiment instance that was constructed (this
is provided to aid with debugging) or None, if none was
c... | juraj-google-style |
def exhaustive_curie_check(self, ontology: pd.DataFrame, curie_predicate: str, curie_prefix: str, diff: bool=True) -> Tuple[list]:
(inside, outside) = ([], [])
curie_prefix = curie_prefix.replace(':', '')
header = (['Index'] + list(ontology.columns))
for row in ontology.itertuples():
row = {head... | All entities with conflicting curies gets a full diff to see if they belong
Args:
ontology: pandas DataFrame created from an ontology where the colnames are predicates
and if classes exist it is also thrown into a the colnames.
curie_predicate: usually in qname form and is the colname of the DataFrame
curie_prefix: No... | codesearchnet |
def _controller_buffer(self, port):
address = _LIB.Controller(self._env, port)
buffer_ = ctypes.cast(address, ctypes.POINTER(CONTROLLER_VECTOR)).contents
return np.frombuffer(buffer_, dtype='uint8') | Find the pointer to a controller and setup a NumPy buffer.
Args:
port: the port of the controller to setup
Returns:
a NumPy buffer with the controller's binary data | juraj-google-style |
def _compile_arithmetic_expression(self, expr: Expression, scope: Dict[(str, TensorFluent)], batch_size: Optional[int]=None, noise: Optional[List[tf.Tensor]]=None) -> TensorFluent:
etype = expr.etype
args = expr.args
if (len(args) == 1):
etype2op = {'+': (lambda x: x), '-': (lambda x: (- x))}
... | Compile an arithmetic expression `expr` into a TensorFluent
in the given `scope` with optional batch size.
Args:
expr (:obj:`rddl2tf.expr.Expression`): A RDDL arithmetic expression.
scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): A fluent scope.
batch_size (Optional[size]): The batch size.
Returns:
:obj:`rddl2... | codesearchnet |
def slice_hidden(self, x):
x_sliced = tf.reshape(x, shape=[(- 1), self.hparams.num_blocks, self.hparams.block_dim])
return x_sliced | Slice encoder hidden state into block_dim.
Args:
x: Encoder hidden state of shape [-1, hidden_size].
Returns:
Sliced states of shape [-1, num_blocks, block_dim]. | codesearchnet |
def floor(x):
if any_symbolic_tensors((x,)):
return Floor().symbolic_call(x)
return backend.numpy.floor(x) | Return the floor of the input, element-wise.
The floor of the scalar `x` is the largest integer `i`, such that `i <= x`.
Args:
x: Input tensor.
Returns:
Output tensor, element-wise floor of `x`. | github-repos |
def get(cls, issue_id):
res = Issue.get(issue_id, IssueType.get(cls.issue_type).issue_type_id)
return (cls(res) if res else None) | Returns the class object identified by `issue_id`
Args:
issue_id (str): Unique EC2 Instance ID to load from database
Returns:
EC2 Instance object if found, else None | codesearchnet |
def _ParseCachedEntryVista(self, value_data, cached_entry_offset):
try:
cached_entry = self._ReadStructureFromByteStream(
value_data[cached_entry_offset:], cached_entry_offset,
self._cached_entry_data_type_map)
except (ValueError, errors.ParseError) as exception:
raise error... | Parses a Windows Vista cached entry.
Args:
value_data (bytes): value data.
cached_entry_offset (int): offset of the first cached entry data
relative to the start of the value data.
Returns:
AppCompatCacheCachedEntry: cached entry.
Raises:
ParseError: if the value data could not be parsed. | juraj-google-style |
def get(cls, session, team_id):
return cls(
'/teams/%d.json' % team_id,
singleton=True,
session=session,
) | Return a specific team.
Args:
session (requests.sessions.Session): Authenticated session.
team_id (int): The ID of the team to get.
Returns:
helpscout.models.Person: A person singleton representing the team,
if existing. Otherwise ``None``. | juraj-google-style |
def List(self, request, global_params=None):
config = self.GetMethodConfig('List')
return self._RunMethod(config, request, global_params=global_params) | Lists all jobs that you started in the specified project. Job information is available for a six month period after creation. The job list is sorted in reverse chronological order, by job creation time. Requires the Can View project role, or the Is Owner project role if you set the allUsers property.
Args:
request: (B... | github-repos |
def preprocess(source):
doc = html5lib.parseFragment(source)
source = ET.tostring(doc, encoding='utf-8', method='text').decode('utf-8')
source = source.replace(u'\n', u'').strip()
source = re.sub('\\s\\s+', u' ', source)
return source | Removes unnecessary break lines and white spaces.
Args:
source (str): Input sentence.
Returns:
Preprocessed sentence. (str) | codesearchnet |
def _parse_flowcontrol_send(self, config):
value = 'off'
match = re.search(r'flowcontrol send (\w+)$', config, re.M)
if match:
value = match.group(1)
return dict(flowcontrol_send=value) | Scans the config block and returns the flowcontrol send value
Args:
config (str): The interface config block to scan
Returns:
dict: Returns a dict object with the flowcontrol send value
retrieved from the config block. The returned dict object
is intended to be merged into the interface resource dict | juraj-google-style |
def time_zones_for_number(numobj):
ntype = number_type(numobj)
if ntype == PhoneNumberType.UNKNOWN:
return _UNKNOWN_TIME_ZONE_LIST
elif not is_number_type_geographical(ntype, numobj.country_code):
return _country_level_time_zones_for_number(numobj)
return time_zones_for_geographical... | As time_zones_for_geographical_number() but explicitly checks the
validity of the number passed in.
Arguments:
numobj -- a valid phone number for which we want to get the time zones to which it belongs
Returns a list of the corresponding time zones or a single element list with the default
unknown time zone if no othe... | juraj-google-style |
def build_chain(self, source, chain):
for group in WalkByGroup(source, (chain.order + 1)):
pre = group[:(- 1)]
res = group[(- 1)]
if (pre not in chain.content):
chain.content[pre] = {res: 1}
elif (res not in chain.content[pre]):
chain.content[pre][res] = 1
... | Build markov chain from source on top of existin chain
Args:
source: iterable which will be used to build chain
chain: MarkovChain in currently loaded shelve file that
will be extended by source | codesearchnet |
def _GetEntries(self, paths, max_entries, iterator_from_file, is_sequence=False):
entries = {}
index = 0
for filepath in paths:
reader = iterator_from_file(filepath)
for record in reader:
if is_sequence:
sequence_example = tf.train.SequenceExample.FromString(recor... | Extracts examples into a dictionary of feature values.
Args:
paths: A list of the paths to the files to parse.
max_entries: The maximum number of examples to load.
iterator_from_file: A method that takes a file path string and returns an
iterator to the examples in that file.
is_sequence: True if the input data from '... | codesearchnet |
def SetDecodedStreamSize(self, decoded_stream_size):
if self._is_open:
raise IOError('Already open.')
if (decoded_stream_size < 0):
raise ValueError('Invalid decoded stream size: {0:d} value out of bounds.'.format(decoded_stream_size))
self._decoded_stream_size = decoded_stream_size | Sets the decoded stream size.
This function is used to set the decoded stream size if it can be
determined separately.
Args:
decoded_stream_size (int): size of the decoded stream in bytes.
Raises:
IOError: if the file-like object is already open.
OSError: if the file-like object is already open.
ValueError: if the d... | codesearchnet |
def _might_have_parameter(fn_or_cls, arg_name):
if inspect.isclass(fn_or_cls):
fn = _find_class_construction_fn(fn_or_cls)
else:
fn = fn_or_cls
while hasattr(fn, '__wrapped__'):
fn = fn.__wrapped__
arg_spec = _get_cached_arg_spec(fn)
if six.PY3:
if arg_spec.varkw:
return True
ret... | Returns True if `arg_name` might be a valid parameter for `fn_or_cls`.
Specifically, this means that `fn_or_cls` either has a parameter named
`arg_name`, or has a `**kwargs` parameter.
Args:
fn_or_cls: The function or class to check.
arg_name: The name fo the parameter.
Returns:
Whether `arg_name` might be a valid a... | juraj-google-style |
def get_sv_variants(self, chromosome=None, end_chromosome=None, sv_type=None,
pos=None, end=None):
query = {}
if chromosome:
query['chrom'] = chromosome
if end_chromosome:
query['end_chrom'] = end_chromosome
if sv_type:
... | Return all structural variants in the database
Args:
chromosome (str)
end_chromosome (str)
sv_type (str)
pos (int): Left position of SV
end (int): Right position of SV
Returns:
variants (Iterable(Variant)) | juraj-google-style |
class IntLayerNorm(nn.Module):
def __init__(self, normalized_shape, eps, output_bit=8, quant_mode=False, force_dequant='none'):
super().__init__()
self.normalized_shape = normalized_shape
self.eps = eps
self.weight = nn.Parameter(torch.zeros(normalized_shape))
self.bias = nn... | Quantized version of `torch.nn.LayerNorm`. Adds quantization-specific arguments on top of `torch.nn.LayerNorm`.
Args:
output_bit (`int`, *optional*, defaults to `8`):
Bitwidth for the layer output activation.
quant_mode (`bool`, *optional*, defaults to `False`):
Whether or not the layer is quantized.
force_dequant (`s... | github-repos |
def add_graph(self, run_key, device_name, graph_def, debug=False):
graph_dict = (self._run_key_to_debug_graphs if debug else self._run_key_to_original_graphs)
if (not (run_key in graph_dict)):
graph_dict[run_key] = dict()
graph_dict[run_key][tf.compat.as_str(device_name)] = debug_graphs_helper.Debug... | Add a GraphDef.
Args:
run_key: A key for the run, containing information about the feeds,
fetches, and targets.
device_name: The name of the device that the `GraphDef` is for.
graph_def: An instance of the `GraphDef` proto.
debug: Whether `graph_def` consists of the debug ops. | codesearchnet |
def _determine_best_metric(self, metrics, trial):
is_new_best_metric = False
if self.args.metric_for_best_model is not None:
metric_to_check = self.args.metric_for_best_model
if not metric_to_check.startswith('eval_'):
metric_to_check = f'eval_{metric_to_check}'
try:
... | Determine if the model should be saved based on the evaluation metrics.
Returns:
bool: True if a new best metric was found, else False | github-repos |
def _ParseAndComputePenalties(self, code, dumptree=False):
tree = pytree_utils.ParseCodeToTree(code)
split_penalty.ComputeSplitPenalties(tree)
if dumptree:
pytree_visitor.DumpPyTree(tree, target_stream=sys.stderr)
return tree | Parses the code and computes split penalties.
Arguments:
code: code to parse as a string
dumptree: if True, the parsed pytree (after penalty assignment) is dumped
to stderr. Useful for debugging.
Returns:
Parse tree. | github-repos |
def t0(self):
return self._t0 | Absolute timestamp of the first dumped tensor across all devices.
Returns:
(`int`) absolute timestamp of the first dumped tensor, in microseconds. | github-repos |
def write(self, default: bool=False):
none_type = type(None)
if default:
ordered_vals = ['query', 'subject', 'identity', 'length',
'mismatches', 'gaps', 'query_start', 'query_end',
'subject_start', 'subject_end', 'evalue'... | Restore B6/M8 entry to original format
Args:
default (bool): output entry in default BLAST+ B6 format
Returns:
str: properly formatted string containing the B6/M8 entry | juraj-google-style |
def last_metric_eval(multiplexer, session_name, metric_name):
try:
(run, tag) = run_tag_from_session_and_metric(session_name, metric_name)
tensor_events = multiplexer.Tensors(run=run, tag=tag)
except KeyError as e:
raise KeyError(("Can't find metric %s for session: %s. Underlying error m... | Returns the last evaluations of the given metric at the given session.
Args:
multiplexer: The EventMultiplexer instance allowing access to
the exported summary data.
session_name: String. The session name for which to get the metric
evaluations.
metric_name: api_pb2.MetricName proto. The name of the metric to use.
Re... | codesearchnet |
def call_api(self, method_type, method_name, valid_status_codes, resource, data, uid, **kwargs):
url = resource.get_resource_url(resource, base_url=self.Meta.base_url)
if (method_type in SINGLE_RESOURCE_METHODS):
if ((not uid) and (not kwargs)):
raise MissingUidException
url = resour... | Make HTTP calls.
Args:
method_type: The HTTP method
method_name: The name of the python method making the HTTP call
valid_status_codes: A tuple of integer status codes
deemed acceptable as response statuses
resource: The resource class that will be generated
data: The post data being sent.
uid: The unique identifier o... | codesearchnet |
def fit_cosine_function(wind):
wind_daily = wind.groupby(wind.index.date).mean()
wind_daily_hourly = pd.Series(index=wind.index, data=wind_daily.loc[wind.index.date].values)
df = pd.DataFrame(data=dict(daily=wind_daily_hourly, hourly=wind)).dropna(how='any')
x = np.array([df.daily, df.index.hour])
(... | fits a cosine function to observed hourly windspeed data
Args:
wind: observed hourly windspeed data
Returns:
parameters needed to generate diurnal features of windspeed using a cosine function | codesearchnet |
def check_embeddings_within_bounds(tensor: tf.Tensor, embed_dim: int, tensor_name: str='input_ids') -> None:
tf.debugging.assert_less(tensor, tf.cast(embed_dim, dtype=tensor.dtype), message=f"The maximum value of {tensor_name} ({tf.math.reduce_max(tensor)}) must be smaller than the embedding layer's input dimension... | `tf.gather`, on which TF embedding layers are based, won't check positive out of bound indices on GPU, returning
zeros instead. This function adds a check against that dangerous silent behavior.
Args:
tensor (`tf.Tensor`): The tensor of indices to check.
embed_dim (`int`): The embedding dimension.
tensor_name (`str`, ... | github-repos |
def _list_certs(certificate_store='My'):
ret = dict()
blacklist_keys = ['DnsNameList', 'Thumbprint']
ps_cmd = ['Get-ChildItem',
'-Path', r"'Cert:\LocalMachine\{0}'".format(certificate_store),
'|',
'Select-Object DnsNameList, SerialNumber, Subject, Thumbprint, ... | List details of available certificates in the LocalMachine certificate
store.
Args:
certificate_store (str): The name of the certificate store on the local
machine.
Returns:
dict: A dictionary of certificates found in the store | juraj-google-style |
def set_render_option(self, render_option):
self._render_option = render_option | Sets the rendering option.
Args:
render_option: (str) this parameter decides how the pipeline graph is
rendered. See display.pipeline_graph_renderer for available options. | github-repos |
def module_selected(self, module_name, module_ui):
if self.current_button == self.module_buttons[module_name]:
return
self.module_buttons[module_name].config(bg="
if self.current_button is not None:
self.current_button.config(bg="white")
self.current_but... | Called when a module is selected
Args:
module_name (str): The name of the module
module_ui: The function to call to create the module's UI | juraj-google-style |
def set_attr_text(self, attr_key, attr_val, el_idx=0):
self.get_element_by_attr_key(attr_key, el_idx).attrib[attr_key] = attr_val | Set the value of the selected attribute of the selected element.
Args:
attr_key : str
Name of attribute for which to search
attr_val : str
Text to set for the attribute.
el_idx : int
Index of element to use in the event that there are multiple sibling
elements with the same name. | codesearchnet |
async def wait_done(self) -> int:
(await self._done_running_evt.wait())
if (self._exit_code is None):
raise SublemonLifetimeError('Subprocess exited abnormally with `None` exit code')
return self._exit_code | Coroutine to wait for subprocess run completion.
Returns:
The exit code of the subprocess. | codesearchnet |
def set_notify_dispatch_request(self, notify_dispatch_request, *args):
self._notify_dispatch_request = notify_dispatch_request
self._notify_args = args | Set function to call just before requests are dispatched
Args:
notify_dispatch_request (callable): function will be called
with request as single arg just before request is dispatched | juraj-google-style |
def Write(self, schedule, output_file):
root = ET.Element('kml')
root.attrib['xmlns'] = 'http:
doc = ET.SubElement(root, 'Document')
open_tag = ET.SubElement(doc, 'open')
open_tag.text = '1'
self._CreateStopsFolder(schedule, doc)
if self.split_routes:
route_types = set()
... | Writes out a feed as KML.
Args:
schedule: A transitfeed.Schedule object containing the feed to write.
output_file: The name of the output KML file, or file object to use. | juraj-google-style |
def cumulative_distribution(self, X):
self.check_fit()
return norm.cdf(X, loc=self.mean, scale=self.std) | Cumulative distribution function for gaussian distribution.
Arguments:
X: `np.ndarray` of shape (n, 1).
Returns:
np.ndarray: Cumulative density for X. | codesearchnet |
def update_dynamic_gene_list(self, case, hgnc_symbols=None, hgnc_ids=None, phenotype_ids=None, build='37'):
dynamic_gene_list = []
res = []
if hgnc_ids:
LOG.info('Fetching genes by hgnc id')
res = self.hgnc_collection.find({'hgnc_id': {'$in': hgnc_ids}, 'build': build})
elif hgnc_symbols... | Update the dynamic gene list for a case
Adds a list of dictionaries to case['dynamic_gene_list'] that looks like
{
hgnc_symbol: str,
hgnc_id: int,
description: str
}
Arguments:
case (dict): The case that should be updated
hgnc_symbols (iterable): A list of hgnc_symbols
hgnc_ids (iterable): A list of hgnc_ids
Return... | codesearchnet |
def set_properties(self, property_dict):
self.properties.update(property_dict) | Sets a dictionary of properties on this entity.
Args:
property_dict: A map from property name to value. See
:class:`google.cloud.datastore.entity.Entity` documentation for allowed
values. | github-repos |
def int64_counter(urn, metric, ptransform=None, pcollection=None, labels=None) -> metrics_pb2.MonitoringInfo:
labels = labels or {}
labels.update(create_labels(ptransform=ptransform, pcollection=pcollection))
if isinstance(metric, int):
metric = coders.VarIntCoder().encode(metric)
return create_... | Return the counter monitoring info for the specifed URN, metric and labels.
Args:
urn: The URN of the monitoring info/metric.
metric: The payload field to use in the monitoring info or an int value.
ptransform: The ptransform id used as a label.
pcollection: The pcollection id used as a label. | github-repos |
def _ConstructAndTestGradient(self, image_shape, kernel_shape, strides, rates, padding, use_gpu):
assert image_shape[3] == kernel_shape[2]
np.random.seed(1)
image = np.random.random_sample(image_shape).astype(np.float32)
kernel = np.random.random_sample(kernel_shape).astype(np.float32)
strides = [1]... | Verifies the gradients of the erosion function.
Args:
image_shape: Input shape, [batch, in_height, in_width, channels].
kernel_shape: Filter shape, [filter_height, filter_width, channels].
strides: Output strides, specified as [stride_height, stride_width].
rates: Atrous rates, specified as [rate_height, rate_width].
... | github-repos |
def prune_candidates(candidates):
pruned = []
for (first, second) in candidates:
if (first.__class__ is Linearization):
nodes1 = first.curve.nodes
else:
nodes1 = first.nodes
if (second.__class__ is Linearization):
nodes2 = second.curve.nodes
el... | Reduce number of candidate intersection pairs.
.. note::
This is a helper for :func:`_all_intersections`.
Uses more strict bounding box intersection predicate by forming the
actual convex hull of each candidate curve segment and then checking
if those convex hulls collide.
Args:
candidates (List): An iterable of pa... | codesearchnet |
def get_plot(self, ylim=None, units='thz'):
u = freq_units(units)
plt = pretty_plot(12, 8)
band_linewidth = 1
data = self.bs_plot_data()
for d in range(len(data['distances'])):
for i in range(self._nb_bands):
plt.plot(data['distances'][d], [(data['frequency'][d][i][j] * u.factor)... | Get a matplotlib object for the bandstructure plot.
Args:
ylim: Specify the y-axis (frequency) limits; by default None let
the code choose.
units: units for the frequencies. Accepted values thz, ev, mev, ha, cm-1, cm^-1. | codesearchnet |
def parse_args(cmd_args, is_script=False):
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=NewlinesHelpFormatter,
epilog=CODES_TABLE
)
if is_script:
parser.add_argument(
"files",
metavar="FILES",
nargs="*",... | Parses a list of command line arguments into a ValidationOptions object.
Args:
cmd_args (list of str): The list of command line arguments to be parsed.
is_script: Whether the arguments are intended for use in a stand-alone
script or imported into another tool.
Returns:
Instance of ``ValidationOptions`` | juraj-google-style |
def __str__(self, talker='GP'):
if not len(talker) == 2:
raise ValueError('Talker ID must be two characters %r' % talker)
data = ['%sGLL' % talker]
data.extend(nmea_latitude(self.latitude))
data.extend(nmea_longitude(self.longitude))
data.append('%s.%02i' % (... | Pretty printed position string.
Args:
talker (str): Talker ID
Returns:
str: Human readable string representation of ``Position`` object | juraj-google-style |
def putfile(self, filepath, buildroot, metahash):
def gen_obj_path(filename):
filehash = util.hash_file(filepath).hexdigest()
return filehash, os.path.join(self.obj_cachedir, filehash[0:2],
filehash[2:4], filehash)
filepath_rela... | Put a file in the cache.
Args:
filepath: Path to file on disk.
buildroot: Path to buildroot
buildrule: The rule that generated this file.
metahash: hash object | juraj-google-style |
def SignMessage(self, message, script_hash):
keypair = self.GetKeyByScriptHash(script_hash)
prikey = bytes(keypair.PrivateKey)
res = Crypto.Default().Sign(message, prikey)
return res, keypair.PublicKey | Sign a message with a specified script_hash.
Args:
message (str): a hex encoded message to sign
script_hash (UInt160): a bytearray (len 20).
Returns:
str: the signed message | juraj-google-style |
def attention_lm_decoder(decoder_input, decoder_self_attention_bias, hparams, name='decoder'):
x = decoder_input
with tf.variable_scope(name):
for layer in range(hparams.num_hidden_layers):
with tf.variable_scope(('layer_%d' % layer)):
with tf.variable_scope('self_attention')... | A stack of attention_lm layers.
Args:
decoder_input: a Tensor
decoder_self_attention_bias: bias Tensor for self-attention
(see common_attention.attention_bias())
hparams: hyperparameters for model
name: a string
Returns:
y: a Tensors | codesearchnet |
def nrows(self):
if self._nrows is not None:
return self._nrows
nsplits = tensor_shape.dimension_at_index(self._row_splits.shape, 0)
if nsplits.value is None:
return array_ops.shape(self._row_splits, out_type=self.dtype)[0] - 1
else:
return constant_op.constant(nsplits.value - 1,... | Returns the number of rows created by this `RowPartition`.
Returns:
scalar integer Tensor | github-repos |
def __init__(self, site1, site2):
self.site1 = site1
self.site2 = site2 | Initializes a covalent bond between two sites.
Args:
site1 (Site): First site.
site2 (Site): Second site. | juraj-google-style |
def dumps(self, with_defaults=False):
return self._rw.dump_config_to_string(self._config, with_defaults=with_defaults) | Generate a string representing all the configuration values.
Args:
with_defaults (bool): if ``True``, values of items with no custom values will be included in the output
if they have a default value set. | juraj-google-style |
def ReconcileShadow(self, store_type):
for k, v in iteritems(self.entry):
if v.pw_entry.store == store_type:
shadow_entry = self.shadow.get(k)
if shadow_entry is not None:
v.pw_entry = shadow_entry
else:
v.pw_entry.store = "UNKNOWN" | Verify that entries that claim to use shadow files have a shadow entry.
If the entries of the non-shadowed file indicate that a shadow file is used,
check that there is actually an entry for that file in shadow.
Args:
store_type: The type of password store that should be used (e.g.
/etc/shadow or /etc/gshadow) | juraj-google-style |
def register_recipe(cls, recipe):
recipe_name = recipe.contents['name']
cls._recipe_classes[recipe_name] = (
recipe.contents, recipe.args, recipe.__doc__) | Registers a dftimewolf recipe.
Args:
recipe: imported python module representing the recipe. | juraj-google-style |
def call(self, hidden_states: tf.Tensor, attention_mask: np.ndarray | tf.Tensor | None=None, layer_head_mask: tf.Tensor | None=None, past_key_value: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]]=None, training: Optional[bool]=False, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=False) -> Tup... | Args:
hidden_states (`tf.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`tf.Tensor`, *optional*): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (`tf.Tensor`, *optional*): mask for attention h... | github-repos |
def read_bit(self, registeraddress, functioncode=2):
_checkFunctioncode(functioncode, [1, 2])
return self._genericCommand(functioncode, registeraddress) | Read one bit from the slave.
Args:
* registeraddress (int): The slave register address (use decimal numbers, not hex).
* functioncode (int): Modbus function code. Can be 1 or 2.
Returns:
The bit value 0 or 1 (int).
Raises:
ValueError, TypeError, IOError | juraj-google-style |
def linear_quantize(input, scale, zero_point, inplace=False):
if len(input.shape) == 4:
scale = scale.view(-1, 1, 1, 1)
zero_point = zero_point.view(-1, 1, 1, 1)
elif len(input.shape) == 2:
scale = scale.view(-1, 1)
zero_point = zero_point.view(-1, 1)
else:
scale = sc... | Quantize single-precision input tensor to integers with the given scaling factor and zeropoint.
Args:
input (`torch.Tensor`):
Single-precision input tensor to be quantized.
scale (`torch.Tensor`):
Scaling factor for quantization.
zero_pint (`torch.Tensor`):
Shift for quantization.
inplace (`bool`, *optional*, defaults... | github-repos |
def automatic_density_by_vol(structure, kppvol, force_gamma=False):
vol = structure.lattice.reciprocal_lattice.volume
kppa = kppvol * vol * structure.num_sites
return Kpoints.automatic_density(structure, kppa,
force_gamma=force_gamma) | Returns an automatic Kpoint object based on a structure and a kpoint
density per inverse Angstrom^3 of reciprocal cell.
Algorithm:
Same as automatic_density()
Args:
structure (Structure): Input structure
kppvol (int): Grid density per Angstrom^(-3) of reciprocal cell
force_gamma (bool): Force a gamma centered mesh
R... | juraj-google-style |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.