code stringlengths 51 2.38k | docstring stringlengths 4 15.2k |
|---|---|
def _GetConnection(self):
while self.conn is None:
self.conn = Pool().GetConnection(self.connInfo)
if self.conn is not None:
break
else:
time.sleep(1) | Retieves a prelocked connection from the Pool
@author: Nick Verbeck
@since: 9/7/2008 |
def properties(self):
value = self._schema.get("properties", {})
if not isinstance(value, dict):
raise SchemaError(
"properties value {0!r} is not an object".format(value))
return value | Schema for particular properties of the object. |
def batch(self, requests):
for request in requests:
if 'body' in request:
request['body'] = urlencode(request['body'])
def _grouper(complete_list, n=1):
for i in range(0, len(complete_list), n):
yield complete_list[i:i + n]
responses = []
for group in _grouper(requests, 50):
responses += self.post(
batch=json.dumps(group)
)
for response, request in zip(responses, requests):
if not response:
yield None
continue
try:
yield self._parse(response['body'])
except FacepyError as exception:
exception.request = request
yield exception | Make a batch request.
:param requests: A list of dictionaries with keys 'method', 'relative_url' and optionally 'body'.
Yields a list of responses and/or exceptions. |
def from_series(cls, series, offset=0):
return cls(data=series.data, index=series.index, data_name=series.data_name, index_name=series.index_name,
sort=series.sort, offset=offset) | Creates and return a Series from a Series
:param series: raccoon Series
:param offset: offset value must be provided as there is no equivalent for a DataFrame
:return: Series |
def post(self, url: StrOrURL,
*, data: Any=None, **kwargs: Any) -> '_RequestContextManager':
return _RequestContextManager(
self._request(hdrs.METH_POST, url,
data=data,
**kwargs)) | Perform HTTP POST request. |
def detect_images_and_galleries(generators):
for generator in generators:
if isinstance(generator, ArticlesGenerator):
for article in itertools.chain(generator.articles, generator.translations, generator.drafts):
detect_image(generator, article)
detect_gallery(generator, article)
elif isinstance(generator, PagesGenerator):
for page in itertools.chain(generator.pages, generator.translations, generator.hidden_pages):
detect_image(generator, page)
detect_gallery(generator, page) | Runs generator on both pages and articles. |
def abort_job(self, job_id):
response = requests.post(self._get_abort_job_url(job_id),
headers=self._get_abort_job_headers(),
data=self._get_abort_job_xml())
response.raise_for_status()
return response | Abort an existing job. When a job is aborted, no more records are processed.
Changes to data may already have been committed and aren't rolled back.
:param job_id: job_id as returned by 'create_operation_job(...)'
:return: abort response as xml |
def gaussfill(dem, size=3, newmask=None):
smooth = gauss_fltr_astropy(dem, size=size)
smooth[~dem.mask] = dem[~dem.mask]
if newmask is not None:
smooth = np.ma.array(smooth, mask=newmask)
return smooth | Gaussian filter with filling |
def from_fits(cls, filename):
table = Table.read(filename)
intervals = np.vstack((table['UNIQ'], table['UNIQ']+1)).T
nuniq_interval_set = IntervalSet(intervals)
interval_set = IntervalSet.from_nuniq_interval_set(nuniq_interval_set)
return cls(interval_set) | Loads a MOC from a FITS file.
The specified FITS file must store the MOC (i.e. the list of HEALPix cells it contains) in a binary HDU table.
Parameters
----------
filename : str
The path to the FITS file.
Returns
-------
result : `~mocpy.moc.MOC` or `~mocpy.tmoc.TimeMOC`
The resulting MOC. |
def _cryptography_cipher(key, iv):
return Cipher(
algorithm=algorithms.TripleDES(key),
mode=modes.CBC(iv),
backend=default_backend()
) | Build a cryptography TripleDES Cipher object.
:param bytes key: Encryption key
:param bytesiv iv: Initialization vector
:returns: TripleDES Cipher instance
:rtype: cryptography.hazmat.primitives.ciphers.Cipher |
def build_event_graph(graph, tree, node):
if node_key(node) in graph:
return
type = get_type(node)
text = get_text(node)
label = '%s (%s)' % (type, text)
graph.add_node(node_key(node), type=type, label=label, text=text)
args = get_args(node)
for arg_role, (arg_id, arg_tag) in args.items():
arg = get_node_by_id(tree, arg_id)
if arg is None:
arg = arg_tag
build_event_graph(graph, tree, arg)
graph.add_edge(node_key(node), node_key(arg), type=arg_role,
label=arg_role) | Return a DiGraph of a specific event structure, built recursively |
def is_ome(self):
if self.index > 1 or not self.description:
return False
d = self.description
return d[:14] == '<?xml version=' and d[-6:] == '</OME>' | Page contains OME-XML in ImageDescription tag. |
def _get_parameter(self, name, eopatch):
if hasattr(self, name) and getattr(self, name) is not None:
return getattr(self, name)
if name == 'bbox' and eopatch.bbox:
return eopatch.bbox
if name in eopatch.meta_info:
return eopatch.meta_info[name]
if name == 'maxcc':
return 1.0
if name == 'time_difference':
return dt.timedelta(seconds=-1)
if name in ('size_x', 'size_y'):
return None
raise ValueError('Parameter {} was neither defined in initialization of {} nor is contained in '
'EOPatch'.format(name, self.__class__.__name__)) | Collects the parameter either from initialization parameters or from EOPatch |
def report_message(report):
body = 'Error: return code != 0\n\n'
body += 'Archive: {}\n\n'.format(report['archive'])
body += 'Docker image: {}\n\n'.format(report['image'])
body += 'Docker container: {}\n\n'.format(report['container_id'])
return body | Report message. |
def predict(self, x):
if self._is_leaf():
d1 = self.predict_initialize['count_dict']
d2 = count_dict(self.Y)
for key, value in d1.iteritems():
if key in d2:
d2[key] += value
else:
d2[key] = value
return argmax(d2)
else:
if self.criterion(x):
return self.right.predict(x)
else:
return self.left.predict(x) | Make prediction recursively. Use both the samples inside the current
node and the statistics inherited from parent. |
def generate_cert(domain):
result = __salt__['cmd.run_all'](["icinga2", "pki", "new-cert", "--cn", domain, "--key", "{0}{1}.key".format(get_certs_path(), domain), "--cert", "{0}{1}.crt".format(get_certs_path(), domain)], python_shell=False)
return result | Generate an icinga2 client certificate and key.
Returns::
icinga2 pki new-cert --cn domain.tld --key /etc/icinga2/pki/domain.tld.key --cert /etc/icinga2/pki/domain.tld.crt
CLI Example:
.. code-block:: bash
salt '*' icinga2.generate_cert domain.tld |
def find(cls, api_key=None, fetch_all=True, endpoint=None, maximum=None,
**kwargs):
exclude = kwargs.pop('exclude', None)
if isinstance(exclude, six.string_types):
exclude = [exclude, ]
query_params = cls.translate_query_params(**kwargs)
if endpoint is None:
endpoint = cls.get_endpoint()
if fetch_all:
result = cls._fetch_all(api_key=api_key, endpoint=endpoint,
maximum=maximum,
**query_params)
else:
result = cls._fetch_page(api_key=api_key, endpoint=endpoint,
maximum=maximum,
**query_params)
collection = [r for r in result
if not cls._find_exclude_filter(exclude, r)]
return collection | Find some entities from the API endpoint.
If no api_key is provided, the global api key will be used.
If fetch_all is True, page through all the data and find every record
that exists.
If add_headers is provided (as a dict) use it to add headers to the
HTTP request, eg.
{'host': 'some.hidden.host'}
Capitalizing header keys does not matter.
Remaining keyword arguments will be passed as `query_params` to the
instant method `request` (ClientMixin). |
def create_template(self, s, provider_name=None):
if provider_name is None:
provider_name = self.supported_providers[0]
return template_exception_handler(
lambda: self.get_provider(provider_name).create_template(s),
self.error_context
) | Creates a template from the given string based on the specified provider or the provider with
highest precedence.
Args:
s: The string to convert to a template.
provider_name: The name of the provider to use to create the template. |
def book_name(self, number):
try:
name = self.cur.execute("SELECT name FROM book WHERE number = ?;", [number]).fetchone()
except:
self.error("cannot look up name of book number %s" % number)
return(str(name[0])) | Return name of book with given index. |
def datapath4file(filename, ext:str='.tgz', archive=True):
"Return data path to `filename`, checking locally first then in the config file."
local_path = URLs.LOCAL_PATH/'data'/filename
if local_path.exists() or local_path.with_suffix(ext).exists(): return local_path
elif archive: return Config.data_archive_path() / filename
else: return Config.data_path() / filename | Return data path to `filename`, checking locally first then in the config file. |
def level_i18n_name(self):
for level, name in spatial_granularities:
if self.level == level:
return name
return self.level_name | In use within templates for dynamic translations. |
def _check_point(self, lat, lng):
if abs(lat) > 90 or abs(lng) > 180:
msg = "Illegal lat and/or lng, (%s, %s) provided." % (lat, lng)
raise IllegalPointException(msg) | Checks if latitude and longitude correct |
def calculate_hash_of_files(files, root):
file_hash = hashlib.md5()
for fname in sorted(files):
fileobj = os.path.join(root, fname)
file_hash.update((fname + "\0").encode())
with open(fileobj, "rb") as filedes:
for chunk in iter(lambda: filedes.read(4096), ""):
if not chunk:
break
file_hash.update(chunk)
file_hash.update("\0".encode())
return file_hash.hexdigest() | Return a hash of all of the given files at the given root.
Adapted from stacker.hooks.aws_lambda; used according to its license:
https://github.com/cloudtools/stacker/blob/1.4.0/LICENSE
Args:
files (list[str]): file names to include in the hash calculation,
relative to ``root``.
root (str): base directory to analyze files in.
Returns:
str: A hash of the hashes of the given files. |
def _construct_auto_distance(features, column_types):
numeric_ftrs = []
string_ftrs = []
dict_ftrs = []
for ftr in features:
try:
ftr_type = column_types[ftr]
except:
raise ValueError("The specified feature does not exist in the " +
"input data.")
if ftr_type == str:
string_ftrs.append(ftr)
elif ftr_type == dict:
dict_ftrs.append(ftr)
elif ftr_type in [int, float, _array.array]:
numeric_ftrs.append(ftr)
else:
raise TypeError("Unable to automatically construct a distance " +
"function for feature '{}'. ".format(ftr) +
"For the nearest neighbor classifier, features " +
"must be of type integer, float, string, dictionary, " +
"or array.array.")
dist = []
for ftr in string_ftrs:
dist.append([[ftr], 'levenshtein', 1])
if len(dict_ftrs) > 0:
dist.append([dict_ftrs, 'weighted_jaccard', len(dict_ftrs)])
if len(numeric_ftrs) > 0:
dist.append([numeric_ftrs, 'euclidean', len(numeric_ftrs)])
return dist | Construct a composite distance function for a set of features, based on the
types of those features.
NOTE: This function is very similar to
`:func:_nearest_neighbors.choose_auto_distance`. The function is separate
because the auto-distance logic different than for each nearest
neighbors-based toolkit.
Parameters
----------
features : list[str]
Names of for which to construct a distance function.
column_types : dict(string, type)
Names and types of all columns.
Returns
-------
dist : list[list]
A composite distance function. Each element of the inner list has three
elements: a list of feature names (strings), a distance function name
(string), and a weight (float). |
def from_geom(geom):
name = geom.params['stat']
kwargs = geom._kwargs
if (not isinstance(name, type) and
hasattr(name, 'compute_layer')):
return name
if isinstance(name, stat):
return name
elif isinstance(name, type) and issubclass(name, stat):
klass = name
elif is_string(name):
if not name.startswith('stat_'):
name = 'stat_{}'.format(name)
klass = Registry[name]
else:
raise PlotnineError(
'Unknown stat of type {}'.format(type(name)))
valid_kwargs = (
(klass.aesthetics() |
klass.DEFAULT_PARAMS.keys()) &
kwargs.keys())
params = {k: kwargs[k] for k in valid_kwargs}
return klass(geom=geom, **params) | Return an instantiated stat object
stats should not override this method.
Parameters
----------
geom : geom
`geom`
Returns
-------
out : stat
A stat object
Raises
------
:class:`PlotnineError` if unable to create a `stat`. |
def mmInformation(NetworkName_presence=0, NetworkName_presence1=0,
TimeZone_presence=0, TimeZoneAndTime_presence=0,
LsaIdentifier_presence=0):
a = TpPd(pd=0x5)
b = MessageType(mesType=0x32)
packet = a / b
if NetworkName_presence is 1:
c = NetworkNameHdr(ieiNN=0x43, eightBitNN=0x0)
packet = packet / c
if NetworkName_presence1 is 1:
d = NetworkNameHdr(ieiNN=0x45, eightBitNN=0x0)
packet = packet / d
if TimeZone_presence is 1:
e = TimeZoneHdr(ieiTZ=0x46, eightBitTZ=0x0)
packet = packet / e
if TimeZoneAndTime_presence is 1:
f = TimeZoneAndTimeHdr(ieiTZAT=0x47, eightBitTZAT=0x0)
packet = packet / f
if LsaIdentifier_presence is 1:
g = LsaIdentifierHdr(ieiLI=0x48, eightBitLI=0x0)
packet = packet / g
return packet | MM INFORMATION Section 9.2.15a |
def full_research_organism(soup):
"research-organism list including inline tags, such as italic"
if not raw_parser.research_organism_keywords(soup):
return []
return list(map(node_contents_str, raw_parser.research_organism_keywords(soup))) | research-organism list including inline tags, such as italic |
def get_region_from_metadata():
global __Location__
if __Location__ == 'do-not-get-from-metadata':
log.debug('Previously failed to get AWS region from metadata. Not trying again.')
return None
if __Location__ != '':
return __Location__
try:
result = requests.get(
"http://169.254.169.254/latest/dynamic/instance-identity/document",
proxies={'http': ''}, timeout=AWS_METADATA_TIMEOUT,
)
except requests.exceptions.RequestException:
log.warning('Failed to get AWS region from instance metadata.', exc_info=True)
__Location__ = 'do-not-get-from-metadata'
return None
try:
region = result.json()['region']
__Location__ = region
return __Location__
except (ValueError, KeyError):
log.warning('Failed to decode JSON from instance metadata.')
return None
return None | Try to get region from instance identity document and cache it
.. versionadded:: 2015.5.6 |
def benchmark(self, func, c_args, threads, grid, times):
time = []
for _ in range(self.iterations):
value = self.run_kernel(func, c_args, threads, grid)
if value < 0.0:
raise Exception("too many resources requested for launch")
time.append(value)
time = sorted(time)
if times:
return time
else:
if self.iterations > 4:
return numpy.mean(time[1:-1])
else:
return numpy.mean(time) | runs the kernel repeatedly, returns averaged returned value
The C function tuning is a little bit more flexible than direct CUDA
or OpenCL kernel tuning. The C function needs to measure time, or some
other quality metric you wish to tune on, on its own and should
therefore return a single floating-point value.
Benchmark runs the C function repeatedly and returns the average of the
values returned by the C function. The number of iterations is set
during the creation of the CFunctions object. For all measurements the
lowest and highest values are discarded and the rest is included in the
average. The reason for this is to be robust against initialization
artifacts and other exceptional cases.
:param func: A C function compiled for this specific configuration
:type func: ctypes._FuncPtr
:param c_args: A list of arguments to the function, order should match the
order in the code. The list should be prepared using
ready_argument_list().
:type c_args: list(Argument)
:param threads: Ignored, but left as argument for now to have the same
interface as CudaFunctions and OpenCLFunctions.
:type threads: any
:param grid: Ignored, but left as argument for now to have the same
interface as CudaFunctions and OpenCLFunctions.
:type grid: any
:param times: Return the execution time of all iterations.
:type times: bool
:returns: All execution times, if times=True, or a robust average for the
kernel execution time.
:rtype: float |
def OnWidgetToolbarToggle(self, event):
self.main_window.widget_toolbar.SetGripperVisible(True)
widget_toolbar_info = self.main_window._mgr.GetPane("widget_toolbar")
self._toggle_pane(widget_toolbar_info)
event.Skip() | Widget toolbar toggle event handler |
def dirichlet_covariance(alpha):
r
alpha0 = alpha.sum()
norm = alpha0 ** 2 * (alpha0 + 1.0)
Z = -alpha[:, np.newaxis] * alpha[np.newaxis, :]
ind = np.diag_indices(Z.shape[0])
Z[ind] += alpha0 * alpha
cov = Z / norm
return cov | r"""Covariance matrix for Dirichlet distribution.
Parameters
----------
alpha : (M, ) ndarray
Parameters of Dirichlet distribution
Returns
-------
cov : (M, M) ndarray
Covariance matrix |
def set(self, image_file, source=None):
image_file.set_size()
self._set(image_file.key, image_file)
if source is not None:
if not self.get(source):
raise ThumbnailError('Cannot add thumbnails for source: `%s` '
'that is not in kvstore.' % source.name)
thumbnails = self._get(source.key, identity='thumbnails') or []
thumbnails = set(thumbnails)
thumbnails.add(image_file.key)
self._set(source.key, list(thumbnails), identity='thumbnails') | Updates store for the `image_file`. Makes sure the `image_file` has a
size set. |
def _ParseItem(self, parser_mediator, olecf_item):
result = False
event_data = OLECFItemEventData()
event_data.name = olecf_item.name
event_data.offset = 0
event_data.size = olecf_item.size
creation_time, modification_time = self._GetTimestamps(olecf_item)
if creation_time:
date_time = dfdatetime_filetime.Filetime(timestamp=creation_time)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_CREATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
result = True
if modification_time:
date_time = dfdatetime_filetime.Filetime(timestamp=modification_time)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_MODIFICATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
result = True
for sub_item in olecf_item.sub_items:
if self._ParseItem(parser_mediator, sub_item):
result = True
return result | Parses an OLECF item.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
olecf_item (pyolecf.item): OLECF item.
Returns:
bool: True if an event was produced. |
def ManuallyScheduleClients(self, token=None):
client_ids = set()
for flow_request in self.args.flows:
for client_id in flow_request.client_ids:
client_ids.add(client_id)
self.StartClients(self.session_id, client_ids, token=token) | Schedule all flows without using the Foreman.
Since we know all the client ids to run on we might as well just schedule
all the flows and wait for the results.
Args:
token: A datastore access token. |
def split_query(query: str) -> List[str]:
try:
_query = query.strip()
except (ValueError, AttributeError):
raise QueryParserException('query is not valid, received instead {}'.format(query))
expressions = _query.split(',')
expressions = [exp.strip() for exp in expressions if exp.strip()]
if not expressions:
raise QueryParserException('Query is not valid: {}'.format(query))
return expressions | Split a query into different expressions.
Example:
name:bla, foo:<=1 |
def update_route53_records(self, domain_name, dns_name):
zone_id = self.get_hosted_zone_id_for_domain(domain_name)
is_apex = self.route53.get_hosted_zone(Id=zone_id)['HostedZone']['Name'][:-1] == domain_name
if is_apex:
record_set = {
'Name': domain_name,
'Type': 'A',
'AliasTarget': {
'HostedZoneId': 'Z2FDTNDATAQYW2',
'DNSName': dns_name,
'EvaluateTargetHealth': False
}
}
else:
record_set = {
'Name': domain_name,
'Type': 'CNAME',
'ResourceRecords': [
{
'Value': dns_name
}
],
'TTL': 60
}
response = self.route53.change_resource_record_sets(
HostedZoneId=zone_id,
ChangeBatch={
'Changes': [
{
'Action': 'UPSERT',
'ResourceRecordSet': record_set
}
]
}
)
return response | Updates Route53 Records following GW domain creation |
def size_to_content(self):
new_sizing = self.copy_sizing()
new_sizing.minimum_height = 0
new_sizing.maximum_height = 0
axes = self.__axes
if axes and axes.is_valid:
if axes.x_calibration and axes.x_calibration.units:
new_sizing.minimum_height = self.font_size + 4
new_sizing.maximum_height = self.font_size + 4
self.update_sizing(new_sizing) | Size the canvas item to the proper height. |
def _get_stddevs(self, C, stddev_types, rup, imt, num_sites):
stddevs = []
for stddev_type in stddev_types:
sigma_mean = self._compute_standard_dev(rup, imt, C)
sigma_tot = np.sqrt((sigma_mean ** 2) + (C['SigmaReg'] ** 2))
sigma_tot = np.log10(np.exp(sigma_tot))
stddevs.append(sigma_tot + np.zeros(num_sites))
return stddevs | Return standard deviations as defined in eq. 4 and 5, page 744,
based on table 8, page 744.
Eq. 5 yields std dev in natural log, so convert to log10 |
def tap(f):
@wraps(f)
def _cb(res, *a, **kw):
d = maybeDeferred(f, res, *a, **kw)
d.addCallback(lambda ignored: res)
return d
return _cb | "Tap" a Deferred callback chain with a function whose return value is
ignored. |
def _writeLinks(self, links, fileObject, replaceParamFile):
for link in links:
linkType = link.type
fileObject.write('LINK %s\n' % link.linkNumber)
if 'TRAP' in linkType or 'TRAPEZOID' in linkType or 'BREAKPOINT' in linkType:
self._writeCrossSectionLink(link, fileObject, replaceParamFile)
elif linkType == 'STRUCTURE':
self._writeStructureLink(link, fileObject, replaceParamFile)
elif linkType in ('RESERVOIR', 'LAKE'):
self._writeReservoirLink(link, fileObject, replaceParamFile)
else:
log.error('OOPS: CIF LINE 417')
fileObject.write('\n') | Write Link Lines to File Method |
def geq_multiple(self, other):
if other == TimeValue("0.000"):
return self
return int(math.ceil(other / self)) * self | Return the next multiple of this time value,
greater than or equal to ``other``.
If ``other`` is zero, return this time value.
:rtype: :class:`~aeneas.exacttiming.TimeValue` |
def extract_fields(lines, delim, searches, match_lineno=1, **kwargs):
keep_idx = []
for lineno, line in lines:
if lineno < match_lineno or delim not in line:
if lineno == match_lineno:
raise WcutError('Delimter not found in line {}'.format(
match_lineno))
yield [line]
continue
fields = line.split(delim)
if lineno == match_lineno:
keep_idx = list(match_fields(fields, searches, **kwargs))
keep_fields = [fields[i] for i in keep_idx]
if keep_fields:
yield keep_fields | Return generator of fields matching `searches`.
Parameters
----------
lines : iterable
Provides line number (1-based) and line (str)
delim : str
Delimiter to split line by to produce fields
searches : iterable
Returns search (str) to match against line fields.
match_lineno : int
Line number of line to split and search fields
Remaining keyword arguments are passed to `match_fields`. |
def get_token_network(
self,
token_address: TokenAddress,
block_identifier: BlockSpecification = 'latest',
) -> Optional[Address]:
if not isinstance(token_address, T_TargetAddress):
raise ValueError('token_address must be an address')
address = self.proxy.contract.functions.token_to_token_networks(
to_checksum_address(token_address),
).call(block_identifier=block_identifier)
address = to_canonical_address(address)
if is_same_address(address, NULL_ADDRESS):
return None
return address | Return the token network address for the given token or None if
there is no correspoding address. |
def word_wrap(text, columns=80, indent=4, padding=2):
paragraphs = _PARA_BREAK.split(text)
lines = []
columns -= padding
for para in paragraphs:
if para.isspace():
continue
line = ' ' * indent
for word in para.split():
if (len(line) + 1 + len(word)) > columns:
lines.append(line)
line = ' ' * padding
line += word
else:
line += ' ' + word
if not line.isspace():
lines.append(line)
return lines | Given a block of text, breaks into a list of lines wrapped to
length. |
def get(path, objectType, user=None):
ret = {'Path': path,
'ACLs': []}
sidRet = _getUserSid(user)
if path and objectType:
dc = daclConstants()
objectTypeBit = dc.getObjectTypeBit(objectType)
path = dc.processPath(path, objectTypeBit)
tdacl = _get_dacl(path, objectTypeBit)
if tdacl:
for counter in range(0, tdacl.GetAceCount()):
tAce = tdacl.GetAce(counter)
if not sidRet['sid'] or (tAce[2] == sidRet['sid']):
ret['ACLs'].append(_ace_to_text(tAce, objectTypeBit))
return ret | Get the ACL of an object. Will filter by user if one is provided.
Args:
path: The path to the object
objectType: The type of object (FILE, DIRECTORY, REGISTRY)
user: A user name to filter by
Returns (dict): A dictionary containing the ACL
CLI Example:
.. code-block:: bash
salt 'minion-id' win_dacl.get c:\temp directory |
def distinct_values_of(self, field, count_deleted=False):
solr_params = "facet=true&facet.field=%s&rows=0" % field
result = self.riak_http_search_query(self.index_name, solr_params, count_deleted)
facet_fields = result['facet_counts']['facet_fields'][field]
keys = facet_fields[0::2]
vals = facet_fields[1::2]
return dict(zip(keys, vals)) | Uses riak http search query endpoint for advanced SOLR queries.
Args:
field (str): facet field
count_deleted (bool): ignore deleted or not
Returns:
(dict): pairs of field values and number of counts |
def add_tarball(self, tarball, package):
if tarball is None:
logger.error(
"No tarball found for %s: probably a renamed project?",
package)
return
target_dir = os.path.join(self.root_directory, package)
if not os.path.exists(target_dir):
os.mkdir(target_dir)
logger.info("Created %s", target_dir)
logger.info("Copying tarball to %s", target_dir)
shutil.copy(tarball, target_dir) | Add a tarball, possibly creating the directory if needed. |
def population_variant_regions(items, merged=False):
def _get_variant_regions(data):
out = dd.get_variant_regions(data) or dd.get_sample_callable(data)
if merged and dd.get_variant_regions(data):
merged_out = dd.get_variant_regions_merged(data)
if merged_out:
out = merged_out
else:
out = merge_overlaps(out, data)
return out
import pybedtools
if len(items) == 1:
return _get_variant_regions(items[0])
else:
paired = vcfutils.get_paired(items)
if paired:
return _get_variant_regions(paired.tumor_data)
else:
vrs = []
for data in items:
vr_bed = _get_variant_regions(data)
if vr_bed:
vrs.append((pybedtools.BedTool(vr_bed).total_coverage(), vr_bed))
vrs.sort(reverse=True)
if vrs:
return vrs[0][1] | Retrieve the variant region BED file from a population of items.
If tumor/normal, return the tumor BED file. If a population, return
the BED file covering the most bases. |
def validate(self, form, extra_validators=tuple()):
if not self.has_data:
return True
if self.is_list_data:
if not isinstance(self._formdata[self.name], (list, tuple)):
return False
return super(NestedModelList, self).validate(form, extra_validators) | Perform validation only if data has been submitted |
def from_array(self, array, propname):
r
array = sp.atleast_3d(array)
if sp.shape(array) != self._shape:
raise Exception('The array shape does not match the network')
temp = array.flatten()
Ps = sp.array(self['pore.index'][self.pores('internal')], dtype=int)
propname = 'pore.' + propname.split('.')[-1]
self[propname] = sp.nan
self[propname][self.pores('internal')] = temp[Ps] | r"""
Apply data to the network based on a rectangular array filled with
values. Each array location corresponds to a pore in the network.
Parameters
----------
array : array_like
The rectangular array containing the values to be added to the
network. This array must be the same shape as the original network.
propname : string
The name of the pore property being added. |
def get_fields(self):
ret = OrderedDict()
if not self.user:
return ret
fields = super(ModelPermissionsSerializer, self).get_fields()
if self.user.is_superuser:
return fields
allowed_fields = self._get_user_allowed_fields()
for allowed_field in allowed_fields:
field = fields[allowed_field.name]
if isinstance(field, ModelPermissionsSerializer):
if not field.get_fields():
field_cls = field._related_class
kwargs = get_relation_kwargs(allowed_field.name,
field.info)
if not issubclass(field_cls,
serializers.HyperlinkedRelatedField):
kwargs.pop('view_name', None)
field = field_cls(**kwargs)
ret[allowed_field.name] = field
return ret | Calculate fields that can be accessed by authenticated user. |
def get_waveform_end_frequency(template=None, **kwargs):
input_params = props(template,**kwargs)
approximant = kwargs['approximant']
if approximant in _filter_ends:
return _filter_ends[approximant](**input_params)
else:
return None | Return the stop frequency of a template |
def SearchDependencies(self,
os_name,
artifact_name_list,
existing_artifact_deps=None,
existing_expansion_deps=None):
artifact_deps = existing_artifact_deps or set()
expansion_deps = existing_expansion_deps or set()
artifact_objs = self.GetArtifacts(
os_name=os_name, name_list=artifact_name_list)
artifact_deps = artifact_deps.union([a.name for a in artifact_objs])
for artifact in artifact_objs:
expansions = GetArtifactPathDependencies(artifact)
if expansions:
expansion_deps = expansion_deps.union(set(expansions))
new_artifact_names = self.GetArtifactNames(
os_name=os_name, provides=expansions)
missing_artifacts = new_artifact_names - artifact_deps
if missing_artifacts:
new_artifacts, new_expansions = self.SearchDependencies(
os_name,
new_artifact_names,
existing_artifact_deps=artifact_deps,
existing_expansion_deps=expansion_deps)
artifact_deps = artifact_deps.union(new_artifacts)
expansion_deps = expansion_deps.union(new_expansions)
return artifact_deps, expansion_deps | Return a set of artifact names needed to fulfill dependencies.
Search the path dependency tree for all artifacts that can fulfill
dependencies of artifact_name_list. If multiple artifacts provide a
dependency, they are all included.
Args:
os_name: operating system string
artifact_name_list: list of artifact names to find dependencies for.
existing_artifact_deps: existing dependencies to add to, for recursion,
e.g. set(["WindowsRegistryProfiles", "WindowsEnvironmentVariablePath"])
existing_expansion_deps: existing expansion dependencies to add to, for
recursion, e.g. set(["users.userprofile", "users.homedir"])
Returns:
(artifact_names, expansion_names): a tuple of sets, one with artifact
names, the other expansion names |
def description(self):
for line in self.SLACKBUILDS_TXT.splitlines():
if line.startswith(self.line_name):
sbo_name = line[17:].strip()
if line.startswith(self.line_des):
if sbo_name == self.name:
return line[31:].strip() | Grab package verion |
def max_pool(inputs, kernel_size, stride=2, padding='VALID', scope=None):
with tf.name_scope(scope, 'MaxPool', [inputs]):
kernel_h, kernel_w = _two_element_tuple(kernel_size)
stride_h, stride_w = _two_element_tuple(stride)
return tf.nn.max_pool(inputs,
ksize=[1, kernel_h, kernel_w, 1],
strides=[1, stride_h, stride_w, 1],
padding=padding) | Adds a Max Pooling layer.
It is assumed by the wrapper that the pooling is only done per image and not
in depth or batch.
Args:
inputs: a tensor of size [batch_size, height, width, depth].
kernel_size: a list of length 2: [kernel_height, kernel_width] of the
pooling kernel over which the op is computed. Can be an int if both
values are the same.
stride: a list of length 2: [stride_height, stride_width].
Can be an int if both strides are the same. Note that presently
both strides must have the same value.
padding: the padding method, either 'VALID' or 'SAME'.
scope: Optional scope for name_scope.
Returns:
a tensor representing the results of the pooling operation.
Raises:
ValueError: if 'kernel_size' is not a 2-D list |
def _display(node, indent, expandattrs, expandvals, output):
attrs = _displayattrs(node.attrib, expandattrs)
if node.text is None or not expandvals:
val = ''
elif isinstance(node.text, str):
val = ' %s' % repr(node.text.strip())
else:
val = ' %s' % repr(node.text)
output.write(encode(indent + striptag(node.tag) + attrs + val + '\n'))
for sub_node in node:
_display(sub_node, indent + ' ', expandattrs, expandvals, output) | Core function to display a Node object |
def downgrades(src):
def _(f):
destination = src - 1
@do(operator.setitem(_downgrade_methods, destination))
@wraps(f)
def wrapper(op, conn, version_info_table):
conn.execute(version_info_table.delete())
f(op)
write_version_info(conn, version_info_table, destination)
return wrapper
return _ | Decorator for marking that a method is a downgrade to a version to the
previous version.
Parameters
----------
src : int
The version this downgrades from.
Returns
-------
decorator : callable[(callable) -> callable]
The decorator to apply. |
def _remove_empty_lines(self, lines):
ret = []
for l in lines:
if (len(l) > 1 or len(l) == 1 and
(not isinstance(l[0], str) or l[0].strip())):
ret.append(l)
return ret | Iterate through the lines and remove any that are
either empty or contain only one whitespace value
Parameters
----------
lines : array-like
The array of lines that we are to filter.
Returns
-------
filtered_lines : array-like
The same array of lines with the "empty" ones removed. |
def atlas_node_stop( atlas_state ):
for component in atlas_state.keys():
log.debug("Stopping Atlas component '%s'" % component)
atlas_state[component].ask_join()
atlas_state[component].join()
return True | Stop the atlas node threads |
def select(self, table, cols, execute=True, select_type='SELECT', return_type=list):
select_type = select_type.upper()
assert select_type in SELECT_QUERY_TYPES
statement = '{0} {1} FROM {2}'.format(select_type, join_cols(cols), wrap(table))
if not execute:
return statement
values = self.fetch(statement)
return self._return_rows(table, cols, values, return_type) | Query every row and only certain columns from a table. |
def transfer_project(self, to_namespace, **kwargs):
path = '/projects/%s/transfer' % (self.id,)
self.manager.gitlab.http_put(path,
post_data={"namespace": to_namespace},
**kwargs) | Transfer a project to the given namespace ID
Args:
to_namespace (str): ID or path of the namespace to transfer the
project to
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabTransferProjectError: If the project could not be transfered |
def _handle_tag_definesceneandframelabeldata(self):
obj = _make_object("DefineSceneAndFrameLabelData")
obj.SceneCount = self._get_struct_encodedu32()
for i in range(1, obj.SceneCount + 1):
setattr(obj, 'Offset{}'.format(i), self._get_struct_encodedu32())
setattr(obj, 'Name{}'.format(i), self._get_struct_string())
obj.FrameLabelCount = self._get_struct_encodedu32()
for i in range(1, obj.FrameLabelCount + 1):
setattr(obj, 'FrameNum{}'.format(i), self._get_struct_encodedu32())
setattr(obj, 'FrameLabel{}'.format(i), self._get_struct_string())
return obj | Handle the DefineSceneAndFrameLabelData tag. |
def send(self, verb, params=None, source=None, tags=None):
m = RFC1459Message.from_data(verb, params=params, source=source, tags=tags)
self._send_message(m) | Send a generic IRC message to the server.
A message is created using the various parts of the message, then gets
assembled and sent to the server.
Args:
verb (str): Verb, such as PRIVMSG.
params (list of str): Message parameters, defaults to no params.
source (str): Source of the message, defaults to no source.
tags (dict): `Tags <http://ircv3.net/specs/core/message-tags-3.2.html>`_
to send with the message. |
def get_missing_bins(original, trimmed):
original_diag = np.diag(original)
trimmed_diag = np.diag(trimmed)
index = []
m = min(original.shape)
for j in range(min(trimmed.shape)):
k = 0
while original_diag[j + k] != trimmed_diag[j] and k < 2 * m:
k += 1
index.append(k + j)
return np.array(index) | Retrieve indices of a trimmed matrix with respect to the original matrix.
Fairly fast but is only correct if diagonal values are different, which is
always the case in practice. |
def gstd(data, channels=None):
if channels is None:
data_stats = data
else:
data_stats = data[:, channels]
return np.exp(np.std(np.log(data_stats), axis=0)) | Calculate the geometric std. dev. of the events in an FCSData object.
Parameters
----------
data : FCSData or numpy array
NxD flow cytometry data where N is the number of events and D is
the number of parameters (aka channels).
channels : int or str or list of int or list of str, optional
Channels on which to calculate the statistic. If None, use all
channels.
Returns
-------
float or numpy array
The geometric standard deviation of the events in the specified
channels of `data`. |
def initialize_pop(self):
self.toolbox.register("individual", self.generate)
self.toolbox.register("population", tools.initRepeat,
list, self.toolbox.individual)
self.population = self.toolbox.population(n=self._params['popsize'])
self.assign_fitnesses(self.population)
self._params['model_count'] += len(self.population) | Assigns initial fitnesses. |
def potcar_eatom_list_from_outcar( filename='OUTCAR' ):
with open( filename ) as f:
outcar = f.read()
eatom_re = re.compile( "energy of atom\s+\d+\s+EATOM=\s*([-\d\.]+)" )
eatom = [ float( e ) for e in eatom_re.findall( outcar ) ]
return eatom | Returns a list of EATOM values for the pseudopotentials used.
Args:
filename (Str, optional): OUTCAR filename. Defaults to 'OUTCAR'.
Returns:
(List(Float)): A list of EATOM values, in the order they appear in the OUTCAR. |
def lr_padding(self, terms):
lpad = rpad = []
if self.lpad:
lpad = [self.lpad] * (self.n - 1)
if self.rpad:
rpad = [self.rpad] * (self.n - 1)
return lpad + terms + rpad | Pad doc from the left and right before adding,
depending on what's in self.lpad and self.rpad
If any of them is '', then don't pad there. |
def start_logger(self):
level = self.real_level(self.level)
logging.basicConfig(level=level)
self.set_logger(self.name, self.level)
config.dictConfig(self.config)
self.logger = logging.getLogger(self.name) | Enables the root logger and configures extra loggers. |
def run_final_eval(train_session, module_spec, class_count, image_lists,
jpeg_data_tensor, decoded_image_tensor,
resized_image_tensor, bottleneck_tensor):
test_bottlenecks, test_ground_truth, test_filenames = (
get_random_cached_bottlenecks(train_session, image_lists,
FLAGS.test_batch_size,
'testing', FLAGS.bottleneck_dir,
FLAGS.image_dir, jpeg_data_tensor,
decoded_image_tensor, resized_image_tensor,
bottleneck_tensor, FLAGS.tfhub_module))
(eval_session, _, bottleneck_input, ground_truth_input, evaluation_step,
prediction) = build_eval_session(module_spec, class_count)
test_accuracy, predictions = eval_session.run(
[evaluation_step, prediction],
feed_dict={
bottleneck_input: test_bottlenecks,
ground_truth_input: test_ground_truth
})
tf.logging.info('Final test accuracy = %.1f%% (N=%d)' %
(test_accuracy * 100, len(test_bottlenecks)))
if FLAGS.print_misclassified_test_images:
tf.logging.info('=== MISCLASSIFIED TEST IMAGES ===')
for i, test_filename in enumerate(test_filenames):
if predictions[i] != test_ground_truth[i]:
tf.logging.info('%70s %s' % (test_filename,
list(image_lists.keys())[predictions[i]])) | Runs a final evaluation on an eval graph using the test data set.
Args:
train_session: Session for the train graph with the tensors below.
module_spec: The hub.ModuleSpec for the image module being used.
class_count: Number of classes
image_lists: OrderedDict of training images for each label.
jpeg_data_tensor: The layer to feed jpeg image data into.
decoded_image_tensor: The output of decoding and resizing the image.
resized_image_tensor: The input node of the recognition graph.
bottleneck_tensor: The bottleneck output layer of the CNN graph. |
def regex_extract(arg, pattern, index):
return ops.RegexExtract(arg, pattern, index).to_expr() | Returns specified index, 0 indexed, from string based on regex pattern
given
Parameters
----------
pattern : string (regular expression string)
index : int, 0 indexed
Returns
-------
extracted : string |
def delete_role(query):
role = _query_to_role(query)
if click.confirm(f'Are you sure you want to delete {role!r}?'):
role_manager.delete(role, commit=True)
click.echo(f'Successfully deleted {role!r}')
else:
click.echo('Cancelled.') | Delete a role. |
def makeRandomBinaryTree(leafNodeNumber=None):
while True:
nodeNo = [-1]
def fn():
nodeNo[0] += 1
if random.random() > 0.6:
i = str(nodeNo[0])
return BinaryTree(0.00001 + random.random()*0.8, True, fn(), fn(), i)
else:
return BinaryTree(0.00001 + random.random()*0.8, False, None, None, str(nodeNo[0]))
tree = fn()
def fn2(tree):
if tree.internal:
return fn2(tree.left) + fn2(tree.right)
return 1
if leafNodeNumber is None or fn2(tree) == leafNodeNumber:
return tree | Creates a random binary tree. |
def sorted(self, fsort):
if not self.params:
self.params = dict()
self.params['sort'] = fsort
return self | Allows to add one or more sort on specific fields. Each sort can be reversed as well. The sort is defined on a per field level, with special field name for _score to sort by score. |
def fetch(self, endpoint_name, identifier_input, query_params=None):
endpoint_url = constants.URL_PREFIX + "/" + self._version + "/" + endpoint_name
if query_params is None:
query_params = {}
if len(identifier_input) == 1:
query_params.update(identifier_input[0])
return self._request_client.get(endpoint_url, query_params)
return self._request_client.post(endpoint_url, identifier_input, query_params) | Calls this instance's request_client's post method with the
specified component endpoint
Args:
- endpoint_name (str) - The endpoint to call like "property/value".
- identifier_input - One or more identifiers to request data for. An identifier can
be in one of these forms:
- A list of property identifier dicts:
- A property identifier dict can contain the following keys:
(address, zipcode, unit, city, state, slug, meta).
One of 'address' or 'slug' is required.
Ex: [{"address": "82 County Line Rd",
"zipcode": "72173",
"meta": "some ID"}]
A slug is a URL-safe string that identifies a property.
These are obtained from HouseCanary.
Ex: [{"slug": "123-Example-St-San-Francisco-CA-94105"}]
- A list of dicts representing a block:
- A block identifier dict can contain the following keys:
(block_id, num_bins, property_type, meta).
'block_id' is required.
Ex: [{"block_id": "060750615003005", "meta": "some ID"}]
- A list of dicts representing a zipcode:
Ex: [{"zipcode": "90274", "meta": "some ID"}]
- A list of dicts representing an MSA:
Ex: [{"msa": "41860", "meta": "some ID"}]
The "meta" field is always optional.
Returns:
A Response object, or the output of a custom OutputGenerator
if one was specified in the constructor. |
def rsem_stats_table(self):
headers = OrderedDict()
headers['alignable_percent'] = {
'title': '% Alignable'.format(config.read_count_prefix),
'description': '% Alignable reads'.format(config.read_count_desc),
'max': 100,
'min': 0,
'suffix': '%',
'scale': 'YlGn'
}
self.general_stats_addcols(self.rsem_mapped_data, headers) | Take the parsed stats from the rsem report and add them to the
basic stats table at the top of the report |
def enable_fullquicklook(self):
self.args.disable_quicklook = False
for p in ['cpu', 'gpu', 'mem', 'memswap']:
setattr(self.args, 'disable_' + p, True) | Disable the full quicklook mode |
def get(self, callback):
derived_path = self.context.request.url
logger.debug('[{log_prefix}]: get.derived_path: {path}'.format(
log_prefix=LOG_PREFIX, path=derived_path))
callback(self.storage.get(self.result_key_for(derived_path))) | Gets an item based on the path. |
def get_probs_for_labels(labels, prediction_results):
probs = []
if 'probability' in prediction_results:
for i, r in prediction_results.iterrows():
probs_one = [0.0] * len(labels)
for k, v in six.iteritems(r):
if v in labels and k.startswith('predicted'):
if k == 'predict':
prob_name = 'probability'
else:
prob_name = 'probability' + k[9:]
probs_one[labels.index(v)] = r[prob_name]
probs.append(probs_one)
return probs
else:
for i, r in prediction_results.iterrows():
probs_one = [0.0] * len(labels)
for k, v in six.iteritems(r):
if k in labels:
probs_one[labels.index(k)] = v
probs.append(probs_one)
return probs | Given ML Workbench prediction results, get probs of each label for each instance.
The prediction results are like:
[
{'predicted': 'daisy', 'probability': 0.8, 'predicted_2': 'rose', 'probability_2': 0.1},
{'predicted': 'sunflower', 'probability': 0.9, 'predicted_2': 'daisy', 'probability_2': 0.01},
...
]
Each instance is ordered by prob. But in some cases probs are needed for fixed
order of labels. For example, given labels = ['daisy', 'rose', 'sunflower'], the
results of above is expected to be:
[
[0.8, 0.1, 0.0],
[0.01, 0.0, 0.9],
...
]
Note that the sum of each instance may not be always 1. If model's top_n is set to
none-zero, and is less than number of labels, then prediction results may not contain
probs for all labels.
Args:
labels: a list of labels specifying the order of the labels.
prediction_results: a pandas DataFrame containing prediction results, usually returned
by get_prediction_results() call.
Returns:
A list of list of probs for each class. |
def _connect(self):
try:
db = pymysql.connect(user=self.user, passwd=self.passwd,
host=self.host, port=self.port,
db=self.shdb, use_unicode=True)
return db, db.cursor()
except Exception:
logger.error("Database connection error")
raise | Connect to the MySQL database. |
def parse_disease_associations(path: str, excluded_disease_ids: set):
if os.path.isdir(path) or not os.path.exists(path):
logger.info("Couldn't find the disease associations file. Returning empty list.")
return {}
disease_associations = defaultdict(list)
with open(path) as input_file:
for line in input_file:
target_id, disease_id = line.strip().split(" ")
if disease_id not in excluded_disease_ids:
disease_associations[target_id].append(disease_id)
return disease_associations | Parse the disease-drug target associations file.
:param str path: Path to the disease-drug target associations file.
:param list excluded_disease_ids: Identifiers of the disease for which drug targets are being predicted.
:return: Dictionary of drug target-disease mappings. |
def complete_extra(self, args):
"Completions for the 'extra' command."
if len(args) == 0:
return self._listdir('./')
return self._complete_path(args[-1]) | Completions for the 'extra' command. |
def step1(self, pin):
context = SRPContext(
'Pair-Setup', str(pin),
prime=constants.PRIME_3072,
generator=constants.PRIME_3072_GEN,
hash_func=hashlib.sha512)
self._session = SRPClientSession(
context, binascii.hexlify(self._auth_private).decode()) | First pairing step. |
def get_dict(self, exclude_keys=None, include_keys=None):
d = {}
exclude_keys_list = exclude_keys or []
include_keys_list = include_keys or []
for k in self._get_keys():
if k not in exclude_keys_list and (
k in include_keys_list or not include_keys
):
d[k] = getattr(self, k)
return d | return dictionary of keys and values corresponding to this model's
data - if include_keys is null the function will return all keys
:param exclude_keys: (optional) is a list of columns from model that
should not be returned by this function
:param include_keys: (optional) is a list of columns from model that
should be returned by this function
:return: |
def assert_less(first, second, msg_fmt="{msg}"):
if not first < second:
msg = "{!r} is not less than {!r}".format(first, second)
fail(msg_fmt.format(msg=msg, first=first, second=second)) | Fail if first is not less than second.
>>> assert_less('bar', 'foo')
>>> assert_less(5, 5)
Traceback (most recent call last):
...
AssertionError: 5 is not less than 5
The following msg_fmt arguments are supported:
* msg - the default error message
* first - the first argument
* second - the second argument |
def get_belapi_handle(client, username=None, password=None):
(username, password) = get_user_creds(username, password)
sys_db = client.db("_system", username=username, password=password)
try:
if username and password:
belapi_db = sys_db.create_database(
name=belapi_db_name,
users=[{"username": username, "password": password, "active": True}],
)
else:
belapi_db = sys_db.create_database(name=belapi_db_name)
except arango.exceptions.DatabaseCreateError:
if username and password:
belapi_db = client.db(belapi_db_name, username=username, password=password)
else:
belapi_db = client.db(belapi_db_name)
try:
belapi_db.create_collection(belapi_settings_name)
except Exception:
pass
try:
belapi_db.create_collection(belapi_statemgmt_name)
except Exception:
pass
return belapi_db | Get BEL API arango db handle |
def execute_series_lead_lag_timedelta(
op, data, offset, default, aggcontext=None, **kwargs
):
func = operator.add if isinstance(op, ops.Lag) else operator.sub
group_by = aggcontext.group_by
order_by = aggcontext.order_by
parent = aggcontext.parent
parent_df = getattr(parent, 'obj', parent)
indexed_original_df = parent_df.set_index(group_by + order_by)
adjusted_parent_df = parent_df.assign(
**{k: func(parent_df[k], offset) for k in order_by}
)
adjusted_indexed_parent = adjusted_parent_df.set_index(group_by + order_by)
result = adjusted_indexed_parent[getattr(data, 'obj', data).name]
result = result.reindex(indexed_original_df.index)
return post_lead_lag(result, default) | An implementation of shifting a column relative to another one that is
in units of time rather than rows. |
def __get_activator_method(self, method_name):
activator = getattr(self.__module, ACTIVATOR, None)
if activator is None:
activator = getattr(self.__module, ACTIVATOR_LEGACY, None)
if activator is not None:
_logger.warning(
"Bundle %s uses the deprecated '%s' to declare"
" its activator. Use @BundleActivator instead.",
self.__name,
ACTIVATOR_LEGACY,
)
return getattr(activator, method_name, None) | Retrieves the requested method of the activator, or returns None
:param method_name: A method name
:return: A method, or None |
def maybe_upcast(values, fill_value=np.nan, dtype=None, copy=False):
if is_extension_type(values):
if copy:
values = values.copy()
else:
if dtype is None:
dtype = values.dtype
new_dtype, fill_value = maybe_promote(dtype, fill_value)
if new_dtype != values.dtype:
values = values.astype(new_dtype)
elif copy:
values = values.copy()
return values, fill_value | provide explicit type promotion and coercion
Parameters
----------
values : the ndarray that we want to maybe upcast
fill_value : what we want to fill with
dtype : if None, then use the dtype of the values, else coerce to this type
copy : if True always make a copy even if no upcast is required |
def can_join_group(self, project):
if project.class_.is_locked or project.group_max < 2:
return False
u2g = self.fetch_group_assoc(project)
if u2g:
return len(list(u2g.group.users)) < project.group_max
return True | Return whether or not user can join a group on `project`. |
def walkfiles(startdir, regex=None, recurse=True):
for r,_,fs in os.walk(startdir):
if not recurse and startdir != r:
return
for f in fs:
path = op.abspath(op.join(r,f))
if regex and not _is_match(regex, path):
continue
if op.isfile(path):
yield path | Yields the absolute paths of files found within the given start
directory. Can optionally filter paths using a regex pattern. |
def get(self, field_path):
if not self._exists:
return None
nested_data = field_path_module.get_nested_value(field_path, self._data)
return copy.deepcopy(nested_data) | Get a value from the snapshot data.
If the data is nested, for example:
.. code-block:: python
>>> snapshot.to_dict()
{
'top1': {
'middle2': {
'bottom3': 20,
'bottom4': 22,
},
'middle5': True,
},
'top6': b'\x00\x01 foo',
}
a **field path** can be used to access the nested data. For
example:
.. code-block:: python
>>> snapshot.get('top1')
{
'middle2': {
'bottom3': 20,
'bottom4': 22,
},
'middle5': True,
}
>>> snapshot.get('top1.middle2')
{
'bottom3': 20,
'bottom4': 22,
}
>>> snapshot.get('top1.middle2.bottom3')
20
See :meth:`~.firestore_v1beta1.client.Client.field_path` for
more information on **field paths**.
A copy is returned since the data may contain mutable values,
but the data stored in the snapshot must remain immutable.
Args:
field_path (str): A field path (``.``-delimited list of
field names).
Returns:
Any or None:
(A copy of) the value stored for the ``field_path`` or
None if snapshot document does not exist.
Raises:
KeyError: If the ``field_path`` does not match nested data
in the snapshot. |
def _update_offsets(start_x, spacing, terminations, offsets, length):
return (start_x + spacing[0] * terminations / 2.,
offsets[1] + spacing[1] * 2. + length) | Update the offsets |
def get(object_ids):
worker = global_worker
worker.check_connected()
with profiling.profile("ray.get"):
if worker.mode == LOCAL_MODE:
return object_ids
global last_task_error_raise_time
if isinstance(object_ids, list):
values = worker.get_object(object_ids)
for i, value in enumerate(values):
if isinstance(value, RayError):
last_task_error_raise_time = time.time()
raise value
return values
else:
value = worker.get_object([object_ids])[0]
if isinstance(value, RayError):
last_task_error_raise_time = time.time()
raise value
return value | Get a remote object or a list of remote objects from the object store.
This method blocks until the object corresponding to the object ID is
available in the local object store. If this object is not in the local
object store, it will be shipped from an object store that has it (once the
object has been created). If object_ids is a list, then the objects
corresponding to each object in the list will be returned.
Args:
object_ids: Object ID of the object to get or a list of object IDs to
get.
Returns:
A Python object or a list of Python objects.
Raises:
Exception: An exception is raised if the task that created the object
or that created one of the objects raised an exception. |
def object_isinstance(node, class_or_seq, context=None):
obj_type = object_type(node, context)
if obj_type is util.Uninferable:
return util.Uninferable
return _object_type_is_subclass(obj_type, class_or_seq, context=context) | Check if a node 'isinstance' any node in class_or_seq
:param node: A given node
:param class_or_seq: Union[nodes.NodeNG, Sequence[nodes.NodeNG]]
:rtype: bool
:raises AstroidTypeError: if the given ``classes_or_seq`` are not types |
def send_exit_with_code(cls, sock, code):
encoded_exit_status = cls.encode_int(code)
cls.send_exit(sock, payload=encoded_exit_status) | Send an Exit chunk over the specified socket, containing the specified return code. |
def get_ir_reciprocal_mesh(mesh,
cell,
is_shift=None,
is_time_reversal=True,
symprec=1e-5,
is_dense=False):
_set_no_error()
lattice, positions, numbers, _ = _expand_cell(cell)
if lattice is None:
return None
if is_dense:
dtype = 'uintp'
else:
dtype = 'intc'
grid_mapping_table = np.zeros(np.prod(mesh), dtype=dtype)
grid_address = np.zeros((np.prod(mesh), 3), dtype='intc')
if is_shift is None:
is_shift = [0, 0, 0]
if spg.ir_reciprocal_mesh(
grid_address,
grid_mapping_table,
np.array(mesh, dtype='intc'),
np.array(is_shift, dtype='intc'),
is_time_reversal * 1,
lattice,
positions,
numbers,
symprec) > 0:
return grid_mapping_table, grid_address
else:
return None | Return k-points mesh and k-point map to the irreducible k-points.
The symmetry is serched from the input cell.
Parameters
----------
mesh : array_like
Uniform sampling mesh numbers.
dtype='intc', shape=(3,)
cell : spglib cell tuple
Crystal structure.
is_shift : array_like, optional
[0, 0, 0] gives Gamma center mesh and value 1 gives half mesh shift.
Default is None which equals to [0, 0, 0].
dtype='intc', shape=(3,)
is_time_reversal : bool, optional
Whether time reversal symmetry is included or not. Default is True.
symprec : float, optional
Symmetry tolerance in distance. Default is 1e-5.
is_dense : bool, optional
grid_mapping_table is returned with dtype='uintp' if True. Otherwise
its dtype='intc'. Default is False.
Returns
-------
grid_mapping_table : ndarray
Grid point mapping table to ir-gird-points.
dtype='intc' or 'uintp', shape=(prod(mesh),)
grid_address : ndarray
Address of all grid points.
dtype='intc', shspe=(prod(mesh), 3) |
def reward_battery(self):
if not 'battery' in self.mode:
return
mode = self.mode['battery']
if mode and mode and self.__test_cond(mode):
self.logger.debug('Battery out')
self.player.stats['reward'] += mode['reward']
self.player.game_over = self.player.game_over or mode['terminal'] | Add a battery level reward |
def encode(self, tag):
sequence = str(tag.sequence_n)
if len(sequence) > self._sequence_l:
sequence = sequence[:self._sequence_l]
while len(sequence) < self._sequence_l:
sequence = '0' + sequence
version = str(tag.version)
if len(version) > 2:
version = version[:1] + version[-1:]
while len(version) < 2:
version = '0' + version
year = str(tag.year)[-2:]
sender = tag.sender[:3]
receiver = tag.receiver[:3]
rule = self._header + year + sequence + sender
rule = rule + self._ip_delimiter + receiver + ".V" + version
return rule | Parses a CWR file name from a FileTag object.
The result will be a string following the format CWyynnnnsss_rrr.Vxx,
where the numeric sequence will have the length set on the encoder's
constructor.
:param tag: FileTag to parse
:return: a string file name parsed from the FileTag |
def virtual_interface_create(provider, names, **kwargs):
client = _get_client()
return client.extra_action(provider=provider, names=names, action='virtual_interface_create', **kwargs) | Attach private interfaces to a server
CLI Example:
.. code-block:: bash
salt minionname cloud.virtual_interface_create my-nova names=['salt-master'] net_name='salt' |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.