code stringlengths 51 2.38k | docstring stringlengths 4 15.2k |
|---|---|
def _GetConnection(self):
while self.conn is None:
self.conn = Pool().GetConnection(self.connInfo)
if self.conn is not None:
break
else:
time.sleep(1) | Retieves a prelocked connection from the Pool
@author: Nick Verbeck
@since: 9/7/2008 |
def properties(self):
value = self._schema.get("properties", {})
if not isinstance(value, dict):
raise SchemaError(
"properties value {0!r} is not an object".format(value))
return value | Schema for particular properties of the object. |
def batch(self, requests):
for request in requests:
if 'body' in request:
request['body'] = urlencode(request['body'])
def _grouper(complete_list, n=1):
for i in range(0, len(complete_list), n):
yield complete_list[i:i + n]
responses = []
... | Make a batch request.
:param requests: A list of dictionaries with keys 'method', 'relative_url' and optionally 'body'.
Yields a list of responses and/or exceptions. |
def from_series(cls, series, offset=0):
return cls(data=series.data, index=series.index, data_name=series.data_name, index_name=series.index_name,
sort=series.sort, offset=offset) | Creates and return a Series from a Series
:param series: raccoon Series
:param offset: offset value must be provided as there is no equivalent for a DataFrame
:return: Series |
def post(self, url: StrOrURL,
*, data: Any=None, **kwargs: Any) -> '_RequestContextManager':
return _RequestContextManager(
self._request(hdrs.METH_POST, url,
data=data,
**kwargs)) | Perform HTTP POST request. |
def detect_images_and_galleries(generators):
for generator in generators:
if isinstance(generator, ArticlesGenerator):
for article in itertools.chain(generator.articles, generator.translations, generator.drafts):
detect_image(generator, article)
detect_gallery(gen... | Runs generator on both pages and articles. |
def abort_job(self, job_id):
response = requests.post(self._get_abort_job_url(job_id),
headers=self._get_abort_job_headers(),
data=self._get_abort_job_xml())
response.raise_for_status()
return response | Abort an existing job. When a job is aborted, no more records are processed.
Changes to data may already have been committed and aren't rolled back.
:param job_id: job_id as returned by 'create_operation_job(...)'
:return: abort response as xml |
def gaussfill(dem, size=3, newmask=None):
smooth = gauss_fltr_astropy(dem, size=size)
smooth[~dem.mask] = dem[~dem.mask]
if newmask is not None:
smooth = np.ma.array(smooth, mask=newmask)
return smooth | Gaussian filter with filling |
def from_fits(cls, filename):
table = Table.read(filename)
intervals = np.vstack((table['UNIQ'], table['UNIQ']+1)).T
nuniq_interval_set = IntervalSet(intervals)
interval_set = IntervalSet.from_nuniq_interval_set(nuniq_interval_set)
return cls(interval_set) | Loads a MOC from a FITS file.
The specified FITS file must store the MOC (i.e. the list of HEALPix cells it contains) in a binary HDU table.
Parameters
----------
filename : str
The path to the FITS file.
Returns
-------
result : `~mocpy.moc.MOC` or... |
def _cryptography_cipher(key, iv):
return Cipher(
algorithm=algorithms.TripleDES(key),
mode=modes.CBC(iv),
backend=default_backend()
) | Build a cryptography TripleDES Cipher object.
:param bytes key: Encryption key
:param bytesiv iv: Initialization vector
:returns: TripleDES Cipher instance
:rtype: cryptography.hazmat.primitives.ciphers.Cipher |
def build_event_graph(graph, tree, node):
if node_key(node) in graph:
return
type = get_type(node)
text = get_text(node)
label = '%s (%s)' % (type, text)
graph.add_node(node_key(node), type=type, label=label, text=text)
args = get_args(node)
for arg_role, (arg_id, arg_tag) in args.it... | Return a DiGraph of a specific event structure, built recursively |
def is_ome(self):
if self.index > 1 or not self.description:
return False
d = self.description
return d[:14] == '<?xml version=' and d[-6:] == '</OME>' | Page contains OME-XML in ImageDescription tag. |
def _get_parameter(self, name, eopatch):
if hasattr(self, name) and getattr(self, name) is not None:
return getattr(self, name)
if name == 'bbox' and eopatch.bbox:
return eopatch.bbox
if name in eopatch.meta_info:
return eopatch.meta_info[name]
if name... | Collects the parameter either from initialization parameters or from EOPatch |
def report_message(report):
body = 'Error: return code != 0\n\n'
body += 'Archive: {}\n\n'.format(report['archive'])
body += 'Docker image: {}\n\n'.format(report['image'])
body += 'Docker container: {}\n\n'.format(report['container_id'])
return body | Report message. |
def predict(self, x):
if self._is_leaf():
d1 = self.predict_initialize['count_dict']
d2 = count_dict(self.Y)
for key, value in d1.iteritems():
if key in d2:
d2[key] += value
else:
d2[key] = value
... | Make prediction recursively. Use both the samples inside the current
node and the statistics inherited from parent. |
def generate_cert(domain):
result = __salt__['cmd.run_all'](["icinga2", "pki", "new-cert", "--cn", domain, "--key", "{0}{1}.key".format(get_certs_path(), domain), "--cert", "{0}{1}.crt".format(get_certs_path(), domain)], python_shell=False)
return result | Generate an icinga2 client certificate and key.
Returns::
icinga2 pki new-cert --cn domain.tld --key /etc/icinga2/pki/domain.tld.key --cert /etc/icinga2/pki/domain.tld.crt
CLI Example:
.. code-block:: bash
salt '*' icinga2.generate_cert domain.tld |
def find(cls, api_key=None, fetch_all=True, endpoint=None, maximum=None,
**kwargs):
exclude = kwargs.pop('exclude', None)
if isinstance(exclude, six.string_types):
exclude = [exclude, ]
query_params = cls.translate_query_params(**kwargs)
if endpoint is None:
... | Find some entities from the API endpoint.
If no api_key is provided, the global api key will be used.
If fetch_all is True, page through all the data and find every record
that exists.
If add_headers is provided (as a dict) use it to add headers to the
HTTP request, eg.
... |
def create_template(self, s, provider_name=None):
if provider_name is None:
provider_name = self.supported_providers[0]
return template_exception_handler(
lambda: self.get_provider(provider_name).create_template(s),
self.error_context
) | Creates a template from the given string based on the specified provider or the provider with
highest precedence.
Args:
s: The string to convert to a template.
provider_name: The name of the provider to use to create the template. |
def book_name(self, number):
try:
name = self.cur.execute("SELECT name FROM book WHERE number = ?;", [number]).fetchone()
except:
self.error("cannot look up name of book number %s" % number)
return(str(name[0])) | Return name of book with given index. |
def datapath4file(filename, ext:str='.tgz', archive=True):
"Return data path to `filename`, checking locally first then in the config file."
local_path = URLs.LOCAL_PATH/'data'/filename
if local_path.exists() or local_path.with_suffix(ext).exists(): return local_path
elif archive: return Config.data_arc... | Return data path to `filename`, checking locally first then in the config file. |
def level_i18n_name(self):
for level, name in spatial_granularities:
if self.level == level:
return name
return self.level_name | In use within templates for dynamic translations. |
def _check_point(self, lat, lng):
if abs(lat) > 90 or abs(lng) > 180:
msg = "Illegal lat and/or lng, (%s, %s) provided." % (lat, lng)
raise IllegalPointException(msg) | Checks if latitude and longitude correct |
def calculate_hash_of_files(files, root):
file_hash = hashlib.md5()
for fname in sorted(files):
fileobj = os.path.join(root, fname)
file_hash.update((fname + "\0").encode())
with open(fileobj, "rb") as filedes:
for chunk in iter(lambda: filedes.read(4096), ""):
... | Return a hash of all of the given files at the given root.
Adapted from stacker.hooks.aws_lambda; used according to its license:
https://github.com/cloudtools/stacker/blob/1.4.0/LICENSE
Args:
files (list[str]): file names to include in the hash calculation,
relative to ``root``.
... |
def _construct_auto_distance(features, column_types):
numeric_ftrs = []
string_ftrs = []
dict_ftrs = []
for ftr in features:
try:
ftr_type = column_types[ftr]
except:
raise ValueError("The specified feature does not exist in the " +
"i... | Construct a composite distance function for a set of features, based on the
types of those features.
NOTE: This function is very similar to
`:func:_nearest_neighbors.choose_auto_distance`. The function is separate
because the auto-distance logic different than for each nearest
neighbors-based toolk... |
def from_geom(geom):
name = geom.params['stat']
kwargs = geom._kwargs
if (not isinstance(name, type) and
hasattr(name, 'compute_layer')):
return name
if isinstance(name, stat):
return name
elif isinstance(name, type) and issubclass(name, st... | Return an instantiated stat object
stats should not override this method.
Parameters
----------
geom : geom
`geom`
Returns
-------
out : stat
A stat object
Raises
------
:class:`PlotnineError` if unable to create... |
def mmInformation(NetworkName_presence=0, NetworkName_presence1=0,
TimeZone_presence=0, TimeZoneAndTime_presence=0,
LsaIdentifier_presence=0):
a = TpPd(pd=0x5)
b = MessageType(mesType=0x32)
packet = a / b
if NetworkName_presence is 1:
c = NetworkNameHdr(ieiNN=... | MM INFORMATION Section 9.2.15a |
def full_research_organism(soup):
"research-organism list including inline tags, such as italic"
if not raw_parser.research_organism_keywords(soup):
return []
return list(map(node_contents_str, raw_parser.research_organism_keywords(soup))) | research-organism list including inline tags, such as italic |
def get_region_from_metadata():
global __Location__
if __Location__ == 'do-not-get-from-metadata':
log.debug('Previously failed to get AWS region from metadata. Not trying again.')
return None
if __Location__ != '':
return __Location__
try:
result = requests.get(
... | Try to get region from instance identity document and cache it
.. versionadded:: 2015.5.6 |
def benchmark(self, func, c_args, threads, grid, times):
time = []
for _ in range(self.iterations):
value = self.run_kernel(func, c_args, threads, grid)
if value < 0.0:
raise Exception("too many resources requested for launch")
time.append(value)
... | runs the kernel repeatedly, returns averaged returned value
The C function tuning is a little bit more flexible than direct CUDA
or OpenCL kernel tuning. The C function needs to measure time, or some
other quality metric you wish to tune on, on its own and should
therefore return a sing... |
def OnWidgetToolbarToggle(self, event):
self.main_window.widget_toolbar.SetGripperVisible(True)
widget_toolbar_info = self.main_window._mgr.GetPane("widget_toolbar")
self._toggle_pane(widget_toolbar_info)
event.Skip() | Widget toolbar toggle event handler |
def dirichlet_covariance(alpha):
r
alpha0 = alpha.sum()
norm = alpha0 ** 2 * (alpha0 + 1.0)
Z = -alpha[:, np.newaxis] * alpha[np.newaxis, :]
ind = np.diag_indices(Z.shape[0])
Z[ind] += alpha0 * alpha
cov = Z / norm
return cov | r"""Covariance matrix for Dirichlet distribution.
Parameters
----------
alpha : (M, ) ndarray
Parameters of Dirichlet distribution
Returns
-------
cov : (M, M) ndarray
Covariance matrix |
def set(self, image_file, source=None):
image_file.set_size()
self._set(image_file.key, image_file)
if source is not None:
if not self.get(source):
raise ThumbnailError('Cannot add thumbnails for source: `%s` '
'that is not in kvst... | Updates store for the `image_file`. Makes sure the `image_file` has a
size set. |
def _ParseItem(self, parser_mediator, olecf_item):
result = False
event_data = OLECFItemEventData()
event_data.name = olecf_item.name
event_data.offset = 0
event_data.size = olecf_item.size
creation_time, modification_time = self._GetTimestamps(olecf_item)
if creation_time:
date_time =... | Parses an OLECF item.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
olecf_item (pyolecf.item): OLECF item.
Returns:
bool: True if an event was produced. |
def ManuallyScheduleClients(self, token=None):
client_ids = set()
for flow_request in self.args.flows:
for client_id in flow_request.client_ids:
client_ids.add(client_id)
self.StartClients(self.session_id, client_ids, token=token) | Schedule all flows without using the Foreman.
Since we know all the client ids to run on we might as well just schedule
all the flows and wait for the results.
Args:
token: A datastore access token. |
def split_query(query: str) -> List[str]:
try:
_query = query.strip()
except (ValueError, AttributeError):
raise QueryParserException('query is not valid, received instead {}'.format(query))
expressions = _query.split(',')
expressions = [exp.strip() for exp in expressions if exp.strip()]... | Split a query into different expressions.
Example:
name:bla, foo:<=1 |
def update_route53_records(self, domain_name, dns_name):
zone_id = self.get_hosted_zone_id_for_domain(domain_name)
is_apex = self.route53.get_hosted_zone(Id=zone_id)['HostedZone']['Name'][:-1] == domain_name
if is_apex:
record_set = {
'Name': domain_name,
... | Updates Route53 Records following GW domain creation |
def size_to_content(self):
new_sizing = self.copy_sizing()
new_sizing.minimum_height = 0
new_sizing.maximum_height = 0
axes = self.__axes
if axes and axes.is_valid:
if axes.x_calibration and axes.x_calibration.units:
new_sizing.minimum_height = self.fo... | Size the canvas item to the proper height. |
def _get_stddevs(self, C, stddev_types, rup, imt, num_sites):
stddevs = []
for stddev_type in stddev_types:
sigma_mean = self._compute_standard_dev(rup, imt, C)
sigma_tot = np.sqrt((sigma_mean ** 2) + (C['SigmaReg'] ** 2))
sigma_tot = np.log10(np.exp(sigma_tot))
... | Return standard deviations as defined in eq. 4 and 5, page 744,
based on table 8, page 744.
Eq. 5 yields std dev in natural log, so convert to log10 |
def tap(f):
@wraps(f)
def _cb(res, *a, **kw):
d = maybeDeferred(f, res, *a, **kw)
d.addCallback(lambda ignored: res)
return d
return _cb | "Tap" a Deferred callback chain with a function whose return value is
ignored. |
def _writeLinks(self, links, fileObject, replaceParamFile):
for link in links:
linkType = link.type
fileObject.write('LINK %s\n' % link.linkNumber)
if 'TRAP' in linkType or 'TRAPEZOID' in linkType or 'BREAKPOINT' in linkType:
self._writeCrossSectionL... | Write Link Lines to File Method |
def geq_multiple(self, other):
if other == TimeValue("0.000"):
return self
return int(math.ceil(other / self)) * self | Return the next multiple of this time value,
greater than or equal to ``other``.
If ``other`` is zero, return this time value.
:rtype: :class:`~aeneas.exacttiming.TimeValue` |
def extract_fields(lines, delim, searches, match_lineno=1, **kwargs):
keep_idx = []
for lineno, line in lines:
if lineno < match_lineno or delim not in line:
if lineno == match_lineno:
raise WcutError('Delimter not found in line {}'.format(
match_lineno))
... | Return generator of fields matching `searches`.
Parameters
----------
lines : iterable
Provides line number (1-based) and line (str)
delim : str
Delimiter to split line by to produce fields
searches : iterable
Returns search (str) to match against line fields.
match_line... |
def get_token_network(
self,
token_address: TokenAddress,
block_identifier: BlockSpecification = 'latest',
) -> Optional[Address]:
if not isinstance(token_address, T_TargetAddress):
raise ValueError('token_address must be an address')
address = self.pr... | Return the token network address for the given token or None if
there is no correspoding address. |
def word_wrap(text, columns=80, indent=4, padding=2):
paragraphs = _PARA_BREAK.split(text)
lines = []
columns -= padding
for para in paragraphs:
if para.isspace():
continue
line = ' ' * indent
for word in para.split():
if (len(line) + 1 + len(word)) > colu... | Given a block of text, breaks into a list of lines wrapped to
length. |
def get(path, objectType, user=None):
ret = {'Path': path,
'ACLs': []}
sidRet = _getUserSid(user)
if path and objectType:
dc = daclConstants()
objectTypeBit = dc.getObjectTypeBit(objectType)
path = dc.processPath(path, objectTypeBit)
tdacl = _get_dacl(path, objectT... | Get the ACL of an object. Will filter by user if one is provided.
Args:
path: The path to the object
objectType: The type of object (FILE, DIRECTORY, REGISTRY)
user: A user name to filter by
Returns (dict): A dictionary containing the ACL
CLI Example:
.. code-block:: bash
... |
def distinct_values_of(self, field, count_deleted=False):
solr_params = "facet=true&facet.field=%s&rows=0" % field
result = self.riak_http_search_query(self.index_name, solr_params, count_deleted)
facet_fields = result['facet_counts']['facet_fields'][field]
keys = facet_fields[0::2]
... | Uses riak http search query endpoint for advanced SOLR queries.
Args:
field (str): facet field
count_deleted (bool): ignore deleted or not
Returns:
(dict): pairs of field values and number of counts |
def add_tarball(self, tarball, package):
if tarball is None:
logger.error(
"No tarball found for %s: probably a renamed project?",
package)
return
target_dir = os.path.join(self.root_directory, package)
if not os.path.exists(target_dir):
... | Add a tarball, possibly creating the directory if needed. |
def population_variant_regions(items, merged=False):
def _get_variant_regions(data):
out = dd.get_variant_regions(data) or dd.get_sample_callable(data)
if merged and dd.get_variant_regions(data):
merged_out = dd.get_variant_regions_merged(data)
if merged_out:
... | Retrieve the variant region BED file from a population of items.
If tumor/normal, return the tumor BED file. If a population, return
the BED file covering the most bases. |
def validate(self, form, extra_validators=tuple()):
if not self.has_data:
return True
if self.is_list_data:
if not isinstance(self._formdata[self.name], (list, tuple)):
return False
return super(NestedModelList, self).validate(form, extra_validators) | Perform validation only if data has been submitted |
def from_array(self, array, propname):
r
array = sp.atleast_3d(array)
if sp.shape(array) != self._shape:
raise Exception('The array shape does not match the network')
temp = array.flatten()
Ps = sp.array(self['pore.index'][self.pores('internal')], dtype=int)
p... | r"""
Apply data to the network based on a rectangular array filled with
values. Each array location corresponds to a pore in the network.
Parameters
----------
array : array_like
The rectangular array containing the values to be added to the
network. Thi... |
def get_fields(self):
ret = OrderedDict()
if not self.user:
return ret
fields = super(ModelPermissionsSerializer, self).get_fields()
if self.user.is_superuser:
return fields
allowed_fields = self._get_user_allowed_fields()
for allowed_field in allo... | Calculate fields that can be accessed by authenticated user. |
def get_waveform_end_frequency(template=None, **kwargs):
input_params = props(template,**kwargs)
approximant = kwargs['approximant']
if approximant in _filter_ends:
return _filter_ends[approximant](**input_params)
else:
return None | Return the stop frequency of a template |
def SearchDependencies(self,
os_name,
artifact_name_list,
existing_artifact_deps=None,
existing_expansion_deps=None):
artifact_deps = existing_artifact_deps or set()
expansion_deps = existing_expansion_deps or se... | Return a set of artifact names needed to fulfill dependencies.
Search the path dependency tree for all artifacts that can fulfill
dependencies of artifact_name_list. If multiple artifacts provide a
dependency, they are all included.
Args:
os_name: operating system string
artifact_name_lis... |
def description(self):
for line in self.SLACKBUILDS_TXT.splitlines():
if line.startswith(self.line_name):
sbo_name = line[17:].strip()
if line.startswith(self.line_des):
if sbo_name == self.name:
return line[31:].strip() | Grab package verion |
def max_pool(inputs, kernel_size, stride=2, padding='VALID', scope=None):
with tf.name_scope(scope, 'MaxPool', [inputs]):
kernel_h, kernel_w = _two_element_tuple(kernel_size)
stride_h, stride_w = _two_element_tuple(stride)
return tf.nn.max_pool(inputs,
ksize=[1, kernel_h, kernel_... | Adds a Max Pooling layer.
It is assumed by the wrapper that the pooling is only done per image and not
in depth or batch.
Args:
inputs: a tensor of size [batch_size, height, width, depth].
kernel_size: a list of length 2: [kernel_height, kernel_width] of the
pooling kernel over which the op is com... |
def _display(node, indent, expandattrs, expandvals, output):
attrs = _displayattrs(node.attrib, expandattrs)
if node.text is None or not expandvals:
val = ''
elif isinstance(node.text, str):
val = ' %s' % repr(node.text.strip())
else:
val = ' %s' % repr(node.text)
output.writ... | Core function to display a Node object |
def downgrades(src):
def _(f):
destination = src - 1
@do(operator.setitem(_downgrade_methods, destination))
@wraps(f)
def wrapper(op, conn, version_info_table):
conn.execute(version_info_table.delete())
f(op)
write_version_info(conn, version_info_t... | Decorator for marking that a method is a downgrade to a version to the
previous version.
Parameters
----------
src : int
The version this downgrades from.
Returns
-------
decorator : callable[(callable) -> callable]
The decorator to apply. |
def _remove_empty_lines(self, lines):
ret = []
for l in lines:
if (len(l) > 1 or len(l) == 1 and
(not isinstance(l[0], str) or l[0].strip())):
ret.append(l)
return ret | Iterate through the lines and remove any that are
either empty or contain only one whitespace value
Parameters
----------
lines : array-like
The array of lines that we are to filter.
Returns
-------
filtered_lines : array-like
The same ar... |
def atlas_node_stop( atlas_state ):
for component in atlas_state.keys():
log.debug("Stopping Atlas component '%s'" % component)
atlas_state[component].ask_join()
atlas_state[component].join()
return True | Stop the atlas node threads |
def select(self, table, cols, execute=True, select_type='SELECT', return_type=list):
select_type = select_type.upper()
assert select_type in SELECT_QUERY_TYPES
statement = '{0} {1} FROM {2}'.format(select_type, join_cols(cols), wrap(table))
if not execute:
return statement
... | Query every row and only certain columns from a table. |
def transfer_project(self, to_namespace, **kwargs):
path = '/projects/%s/transfer' % (self.id,)
self.manager.gitlab.http_put(path,
post_data={"namespace": to_namespace},
**kwargs) | Transfer a project to the given namespace ID
Args:
to_namespace (str): ID or path of the namespace to transfer the
project to
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
... |
def _handle_tag_definesceneandframelabeldata(self):
obj = _make_object("DefineSceneAndFrameLabelData")
obj.SceneCount = self._get_struct_encodedu32()
for i in range(1, obj.SceneCount + 1):
setattr(obj, 'Offset{}'.format(i), self._get_struct_encodedu32())
setattr(obj, 'Nam... | Handle the DefineSceneAndFrameLabelData tag. |
def send(self, verb, params=None, source=None, tags=None):
m = RFC1459Message.from_data(verb, params=params, source=source, tags=tags)
self._send_message(m) | Send a generic IRC message to the server.
A message is created using the various parts of the message, then gets
assembled and sent to the server.
Args:
verb (str): Verb, such as PRIVMSG.
params (list of str): Message parameters, defaults to no params.
sourc... |
def get_missing_bins(original, trimmed):
original_diag = np.diag(original)
trimmed_diag = np.diag(trimmed)
index = []
m = min(original.shape)
for j in range(min(trimmed.shape)):
k = 0
while original_diag[j + k] != trimmed_diag[j] and k < 2 * m:
k += 1
index.append... | Retrieve indices of a trimmed matrix with respect to the original matrix.
Fairly fast but is only correct if diagonal values are different, which is
always the case in practice. |
def gstd(data, channels=None):
if channels is None:
data_stats = data
else:
data_stats = data[:, channels]
return np.exp(np.std(np.log(data_stats), axis=0)) | Calculate the geometric std. dev. of the events in an FCSData object.
Parameters
----------
data : FCSData or numpy array
NxD flow cytometry data where N is the number of events and D is
the number of parameters (aka channels).
channels : int or str or list of int or list of str, option... |
def initialize_pop(self):
self.toolbox.register("individual", self.generate)
self.toolbox.register("population", tools.initRepeat,
list, self.toolbox.individual)
self.population = self.toolbox.population(n=self._params['popsize'])
self.assign_fitnesses(self.... | Assigns initial fitnesses. |
def potcar_eatom_list_from_outcar( filename='OUTCAR' ):
with open( filename ) as f:
outcar = f.read()
eatom_re = re.compile( "energy of atom\s+\d+\s+EATOM=\s*([-\d\.]+)" )
eatom = [ float( e ) for e in eatom_re.findall( outcar ) ]
return eatom | Returns a list of EATOM values for the pseudopotentials used.
Args:
filename (Str, optional): OUTCAR filename. Defaults to 'OUTCAR'.
Returns:
(List(Float)): A list of EATOM values, in the order they appear in the OUTCAR. |
def lr_padding(self, terms):
lpad = rpad = []
if self.lpad:
lpad = [self.lpad] * (self.n - 1)
if self.rpad:
rpad = [self.rpad] * (self.n - 1)
return lpad + terms + rpad | Pad doc from the left and right before adding,
depending on what's in self.lpad and self.rpad
If any of them is '', then don't pad there. |
def start_logger(self):
level = self.real_level(self.level)
logging.basicConfig(level=level)
self.set_logger(self.name, self.level)
config.dictConfig(self.config)
self.logger = logging.getLogger(self.name) | Enables the root logger and configures extra loggers. |
def run_final_eval(train_session, module_spec, class_count, image_lists,
jpeg_data_tensor, decoded_image_tensor,
resized_image_tensor, bottleneck_tensor):
test_bottlenecks, test_ground_truth, test_filenames = (
get_random_cached_bottlenecks(train_session, image_lists,
... | Runs a final evaluation on an eval graph using the test data set.
Args:
train_session: Session for the train graph with the tensors below.
module_spec: The hub.ModuleSpec for the image module being used.
class_count: Number of classes
image_lists: OrderedDict of training images for each label.
jp... |
def regex_extract(arg, pattern, index):
return ops.RegexExtract(arg, pattern, index).to_expr() | Returns specified index, 0 indexed, from string based on regex pattern
given
Parameters
----------
pattern : string (regular expression string)
index : int, 0 indexed
Returns
-------
extracted : string |
def delete_role(query):
role = _query_to_role(query)
if click.confirm(f'Are you sure you want to delete {role!r}?'):
role_manager.delete(role, commit=True)
click.echo(f'Successfully deleted {role!r}')
else:
click.echo('Cancelled.') | Delete a role. |
def makeRandomBinaryTree(leafNodeNumber=None):
while True:
nodeNo = [-1]
def fn():
nodeNo[0] += 1
if random.random() > 0.6:
i = str(nodeNo[0])
return BinaryTree(0.00001 + random.random()*0.8, True, fn(), fn(), i)
else:
... | Creates a random binary tree. |
def sorted(self, fsort):
if not self.params:
self.params = dict()
self.params['sort'] = fsort
return self | Allows to add one or more sort on specific fields. Each sort can be reversed as well. The sort is defined on a per field level, with special field name for _score to sort by score. |
def fetch(self, endpoint_name, identifier_input, query_params=None):
endpoint_url = constants.URL_PREFIX + "/" + self._version + "/" + endpoint_name
if query_params is None:
query_params = {}
if len(identifier_input) == 1:
query_params.update(identifier_input[0])
... | Calls this instance's request_client's post method with the
specified component endpoint
Args:
- endpoint_name (str) - The endpoint to call like "property/value".
- identifier_input - One or more identifiers to request data for. An identifier can
be in one of the... |
def rsem_stats_table(self):
headers = OrderedDict()
headers['alignable_percent'] = {
'title': '% Alignable'.format(config.read_count_prefix),
'description': '% Alignable reads'.format(config.read_count_desc),
'max': 100,
'min': 0,
'suffix': '%'... | Take the parsed stats from the rsem report and add them to the
basic stats table at the top of the report |
def enable_fullquicklook(self):
self.args.disable_quicklook = False
for p in ['cpu', 'gpu', 'mem', 'memswap']:
setattr(self.args, 'disable_' + p, True) | Disable the full quicklook mode |
def get(self, callback):
derived_path = self.context.request.url
logger.debug('[{log_prefix}]: get.derived_path: {path}'.format(
log_prefix=LOG_PREFIX, path=derived_path))
callback(self.storage.get(self.result_key_for(derived_path))) | Gets an item based on the path. |
def get_probs_for_labels(labels, prediction_results):
probs = []
if 'probability' in prediction_results:
for i, r in prediction_results.iterrows():
probs_one = [0.0] * len(labels)
for k, v in six.iteritems(r):
if v in labels and k.startswith('predicted'):
if k == 'predict':
... | Given ML Workbench prediction results, get probs of each label for each instance.
The prediction results are like:
[
{'predicted': 'daisy', 'probability': 0.8, 'predicted_2': 'rose', 'probability_2': 0.1},
{'predicted': 'sunflower', 'probability': 0.9, 'predicted_2': 'daisy', 'probability_2': 0.01},
..... |
def _connect(self):
try:
db = pymysql.connect(user=self.user, passwd=self.passwd,
host=self.host, port=self.port,
db=self.shdb, use_unicode=True)
return db, db.cursor()
except Exception:
logger.error("D... | Connect to the MySQL database. |
def parse_disease_associations(path: str, excluded_disease_ids: set):
if os.path.isdir(path) or not os.path.exists(path):
logger.info("Couldn't find the disease associations file. Returning empty list.")
return {}
disease_associations = defaultdict(list)
with open(path) as input_file:
... | Parse the disease-drug target associations file.
:param str path: Path to the disease-drug target associations file.
:param list excluded_disease_ids: Identifiers of the disease for which drug targets are being predicted.
:return: Dictionary of drug target-disease mappings. |
def complete_extra(self, args):
"Completions for the 'extra' command."
if len(args) == 0:
return self._listdir('./')
return self._complete_path(args[-1]) | Completions for the 'extra' command. |
def step1(self, pin):
context = SRPContext(
'Pair-Setup', str(pin),
prime=constants.PRIME_3072,
generator=constants.PRIME_3072_GEN,
hash_func=hashlib.sha512)
self._session = SRPClientSession(
context, binascii.hexlify(self._auth_private).decode... | First pairing step. |
def get_dict(self, exclude_keys=None, include_keys=None):
d = {}
exclude_keys_list = exclude_keys or []
include_keys_list = include_keys or []
for k in self._get_keys():
if k not in exclude_keys_list and (
k in include_keys_list or not include_keys
... | return dictionary of keys and values corresponding to this model's
data - if include_keys is null the function will return all keys
:param exclude_keys: (optional) is a list of columns from model that
should not be returned by this function
:param include_keys: (optional) is a list of c... |
def assert_less(first, second, msg_fmt="{msg}"):
if not first < second:
msg = "{!r} is not less than {!r}".format(first, second)
fail(msg_fmt.format(msg=msg, first=first, second=second)) | Fail if first is not less than second.
>>> assert_less('bar', 'foo')
>>> assert_less(5, 5)
Traceback (most recent call last):
...
AssertionError: 5 is not less than 5
The following msg_fmt arguments are supported:
* msg - the default error message
* first - the first argument
*... |
def get_belapi_handle(client, username=None, password=None):
(username, password) = get_user_creds(username, password)
sys_db = client.db("_system", username=username, password=password)
try:
if username and password:
belapi_db = sys_db.create_database(
name=belapi_db_nam... | Get BEL API arango db handle |
def execute_series_lead_lag_timedelta(
op, data, offset, default, aggcontext=None, **kwargs
):
func = operator.add if isinstance(op, ops.Lag) else operator.sub
group_by = aggcontext.group_by
order_by = aggcontext.order_by
parent = aggcontext.parent
parent_df = getattr(parent, 'obj', parent)
... | An implementation of shifting a column relative to another one that is
in units of time rather than rows. |
def __get_activator_method(self, method_name):
activator = getattr(self.__module, ACTIVATOR, None)
if activator is None:
activator = getattr(self.__module, ACTIVATOR_LEGACY, None)
if activator is not None:
_logger.warning(
"Bundle %s uses the d... | Retrieves the requested method of the activator, or returns None
:param method_name: A method name
:return: A method, or None |
def maybe_upcast(values, fill_value=np.nan, dtype=None, copy=False):
if is_extension_type(values):
if copy:
values = values.copy()
else:
if dtype is None:
dtype = values.dtype
new_dtype, fill_value = maybe_promote(dtype, fill_value)
if new_dtype != values.... | provide explicit type promotion and coercion
Parameters
----------
values : the ndarray that we want to maybe upcast
fill_value : what we want to fill with
dtype : if None, then use the dtype of the values, else coerce to this type
copy : if True always make a copy even if no upcast is required |
def can_join_group(self, project):
if project.class_.is_locked or project.group_max < 2:
return False
u2g = self.fetch_group_assoc(project)
if u2g:
return len(list(u2g.group.users)) < project.group_max
return True | Return whether or not user can join a group on `project`. |
def walkfiles(startdir, regex=None, recurse=True):
for r,_,fs in os.walk(startdir):
if not recurse and startdir != r:
return
for f in fs:
path = op.abspath(op.join(r,f))
if regex and not _is_match(regex, path):
continue
if op.isfile... | Yields the absolute paths of files found within the given start
directory. Can optionally filter paths using a regex pattern. |
def get(self, field_path):
if not self._exists:
return None
nested_data = field_path_module.get_nested_value(field_path, self._data)
return copy.deepcopy(nested_data) | Get a value from the snapshot data.
If the data is nested, for example:
.. code-block:: python
>>> snapshot.to_dict()
{
'top1': {
'middle2': {
'bottom3': 20,
'bottom4': 22,
},
... |
def _update_offsets(start_x, spacing, terminations, offsets, length):
return (start_x + spacing[0] * terminations / 2.,
offsets[1] + spacing[1] * 2. + length) | Update the offsets |
def get(object_ids):
worker = global_worker
worker.check_connected()
with profiling.profile("ray.get"):
if worker.mode == LOCAL_MODE:
return object_ids
global last_task_error_raise_time
if isinstance(object_ids, list):
values = worker.get_object(object_ids)
... | Get a remote object or a list of remote objects from the object store.
This method blocks until the object corresponding to the object ID is
available in the local object store. If this object is not in the local
object store, it will be shipped from an object store that has it (once the
object has bee... |
def object_isinstance(node, class_or_seq, context=None):
obj_type = object_type(node, context)
if obj_type is util.Uninferable:
return util.Uninferable
return _object_type_is_subclass(obj_type, class_or_seq, context=context) | Check if a node 'isinstance' any node in class_or_seq
:param node: A given node
:param class_or_seq: Union[nodes.NodeNG, Sequence[nodes.NodeNG]]
:rtype: bool
:raises AstroidTypeError: if the given ``classes_or_seq`` are not types |
def send_exit_with_code(cls, sock, code):
encoded_exit_status = cls.encode_int(code)
cls.send_exit(sock, payload=encoded_exit_status) | Send an Exit chunk over the specified socket, containing the specified return code. |
def get_ir_reciprocal_mesh(mesh,
cell,
is_shift=None,
is_time_reversal=True,
symprec=1e-5,
is_dense=False):
_set_no_error()
lattice, positions, numbers, _ = _expand_cell(cell)
... | Return k-points mesh and k-point map to the irreducible k-points.
The symmetry is serched from the input cell.
Parameters
----------
mesh : array_like
Uniform sampling mesh numbers.
dtype='intc', shape=(3,)
cell : spglib cell tuple
Crystal structure.
is_shift : array_li... |
def reward_battery(self):
if not 'battery' in self.mode:
return
mode = self.mode['battery']
if mode and mode and self.__test_cond(mode):
self.logger.debug('Battery out')
self.player.stats['reward'] += mode['reward']
self.player.game_over = self.pla... | Add a battery level reward |
def encode(self, tag):
sequence = str(tag.sequence_n)
if len(sequence) > self._sequence_l:
sequence = sequence[:self._sequence_l]
while len(sequence) < self._sequence_l:
sequence = '0' + sequence
version = str(tag.version)
if len(version) > 2:
... | Parses a CWR file name from a FileTag object.
The result will be a string following the format CWyynnnnsss_rrr.Vxx,
where the numeric sequence will have the length set on the encoder's
constructor.
:param tag: FileTag to parse
:return: a string file name parsed from the FileTag |
def virtual_interface_create(provider, names, **kwargs):
client = _get_client()
return client.extra_action(provider=provider, names=names, action='virtual_interface_create', **kwargs) | Attach private interfaces to a server
CLI Example:
.. code-block:: bash
salt minionname cloud.virtual_interface_create my-nova names=['salt-master'] net_name='salt' |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.