code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def _conv(self, name, x, filter_size, in_filters, out_filters, strides):
"""Convolution."""
with tf.variable_scope(name):
n = filter_size * filter_size * out_filters
kernel = tf.get_variable(
"DW", [filter_size, filter_size, in_filters, out_filters],
tf.float32,
initializer=tf.random_normal_initializer(
stddev=np.sqrt(2.0 / n)))
return tf.nn.conv2d(x, kernel, strides, padding="SAME")
|
Convolution.
|
def get_comment_ancestors(comID, depth=None):
"""
Returns the list of ancestors of the given comment, ordered from
oldest to newest ("top-down": direct parent of comID is at last position),
up to given depth
:param comID: the ID of the comment for which we want to retrieve ancestors
:type comID: int
:param depth: the maximum of levels up from the given comment we
want to retrieve ancestors. None for no limit, 1 for
direct parent only, etc.
:type depth: int
:return the list of ancestors
:rtype: list
"""
if depth == 0:
return []
res = run_sql(
"""SELECT "in_reply_to_id_cmtRECORDCOMMENT" FROM "cmtRECORDCOMMENT" WHERE id=%s""",
(comID,
))
if res:
parent_comID = res[0][0]
if parent_comID == 0:
return []
parent_ancestors = []
if depth:
depth -= 1
parent_ancestors = get_comment_ancestors(parent_comID, depth)
parent_ancestors.append(parent_comID)
return parent_ancestors
else:
return []
|
Returns the list of ancestors of the given comment, ordered from
oldest to newest ("top-down": direct parent of comID is at last position),
up to given depth
:param comID: the ID of the comment for which we want to retrieve ancestors
:type comID: int
:param depth: the maximum of levels up from the given comment we
want to retrieve ancestors. None for no limit, 1 for
direct parent only, etc.
:type depth: int
:return the list of ancestors
:rtype: list
|
def run(self):
'''Start scheduler loop'''
logger.info("scheduler starting...")
while not self._quit:
try:
time.sleep(self.LOOP_INTERVAL)
self.run_once()
self._exceptions = 0
except KeyboardInterrupt:
break
except Exception as e:
logger.exception(e)
self._exceptions += 1
if self._exceptions > self.EXCEPTION_LIMIT:
break
continue
logger.info("scheduler exiting...")
self._dump_cnt()
|
Start scheduler loop
|
async def sentinel_monitor(self, name, ip, port, quorum):
"Add a new master to Sentinel to be monitored"
return await self.execute_command('SENTINEL MONITOR', name, ip, port, quorum)
|
Add a new master to Sentinel to be monitored
|
def get_xy_from_linecol(self, line, col, offsets, factors):
"""Get the intermediate coordinates from line & col.
Intermediate coordinates are actually the instruments scanning angles.
"""
loff, coff = offsets
lfac, cfac = factors
x__ = (col - coff) / cfac * 2**16
y__ = (line - loff) / lfac * 2**16
return x__, y__
|
Get the intermediate coordinates from line & col.
Intermediate coordinates are actually the instruments scanning angles.
|
def del_repo(repo, **kwargs):
'''
Delete a repo from the sources.list / sources.list.d
If the .list file is in the sources.list.d directory
and the file that the repo exists in does not contain any other
repo configuration, the file itself will be deleted.
The repo passed in must be a fully formed repository definition
string.
CLI Examples:
.. code-block:: bash
salt '*' pkg.del_repo "myrepo definition"
'''
_check_apt()
is_ppa = False
if repo.startswith('ppa:') and __grains__['os'] in ('Ubuntu', 'Mint', 'neon'):
# This is a PPA definition meaning special handling is needed
# to derive the name.
is_ppa = True
dist = __grains__['lsb_distrib_codename']
if not HAS_SOFTWAREPROPERTIES:
_warn_software_properties(repo)
owner_name, ppa_name = repo[4:].split('/')
if 'ppa_auth' in kwargs:
auth_info = '{0}@'.format(kwargs['ppa_auth'])
repo = LP_PVT_SRC_FORMAT.format(auth_info, dist, owner_name,
ppa_name)
else:
repo = LP_SRC_FORMAT.format(owner_name, ppa_name, dist)
else:
if hasattr(softwareproperties.ppa, 'PPAShortcutHandler'):
repo = softwareproperties.ppa.PPAShortcutHandler(repo).expand(dist)[0]
else:
repo = softwareproperties.ppa.expand_ppa_line(repo, dist)[0]
sources = sourceslist.SourcesList()
repos = [s for s in sources.list if not s.invalid]
if repos:
deleted_from = dict()
try:
repo_type, \
repo_architectures, \
repo_uri, \
repo_dist, \
repo_comps = _split_repo_str(repo)
except SyntaxError:
raise SaltInvocationError(
'Error: repo \'{0}\' not a well formatted definition'
.format(repo)
)
for source in repos:
if (source.type == repo_type
and source.architectures == repo_architectures
and source.uri == repo_uri
and source.dist == repo_dist):
s_comps = set(source.comps)
r_comps = set(repo_comps)
if s_comps.intersection(r_comps):
deleted_from[source.file] = 0
source.comps = list(s_comps.difference(r_comps))
if not source.comps:
try:
sources.remove(source)
except ValueError:
pass
# PPAs are special and can add deb-src where expand_ppa_line
# doesn't always reflect this. Lets just cleanup here for good
# measure
if (is_ppa and repo_type == 'deb' and source.type == 'deb-src' and
source.uri == repo_uri and source.dist == repo_dist):
s_comps = set(source.comps)
r_comps = set(repo_comps)
if s_comps.intersection(r_comps):
deleted_from[source.file] = 0
source.comps = list(s_comps.difference(r_comps))
if not source.comps:
try:
sources.remove(source)
except ValueError:
pass
sources.save()
if deleted_from:
ret = ''
for source in sources:
if source.file in deleted_from:
deleted_from[source.file] += 1
for repo_file, count in six.iteritems(deleted_from):
msg = 'Repo \'{0}\' has been removed from {1}.\n'
if count == 0 and 'sources.list.d/' in repo_file:
if os.path.isfile(repo_file):
msg = ('File {1} containing repo \'{0}\' has been '
'removed.')
try:
os.remove(repo_file)
except OSError:
pass
ret += msg.format(repo, repo_file)
# explicit refresh after a repo is deleted
refresh_db()
return ret
raise CommandExecutionError(
'Repo {0} doesn\'t exist in the sources.list(s)'.format(repo)
)
|
Delete a repo from the sources.list / sources.list.d
If the .list file is in the sources.list.d directory
and the file that the repo exists in does not contain any other
repo configuration, the file itself will be deleted.
The repo passed in must be a fully formed repository definition
string.
CLI Examples:
.. code-block:: bash
salt '*' pkg.del_repo "myrepo definition"
|
def get_volumes_for_sdc(self, sdcObj):
"""
:param sdcObj: SDC object
:return: list of Volumes attached to SDC
:rtyoe: ScaleIO Volume object
"""
self.conn.connection._check_login()
all_volumes = []
response = self.conn.connection._do_get("{}/{}{}/{}".format(self.conn.connection._api_url, 'instances/Sdc::', sdcObj.id, 'relationships/Volume')).json()
for sdc_volume in response:
all_volumes.append(
Volume.from_dict(sdc_volume)
)
return all_volumes
|
:param sdcObj: SDC object
:return: list of Volumes attached to SDC
:rtyoe: ScaleIO Volume object
|
def _client_tagged(self, tags):
'''ensure that the client name is included in a list of tags. This is
important for matching builders to the correct client. We exit
on fail.
Parameters
==========
tags: a list of tags to look for client name in
'''
# We must match the client to a tag
name = self.client_name.lower()
tags = [t.lower() for t in tags]
if name not in tags:
bot.error('%s not found in %s, must match!' %(name, tags))
sys.exit(1)
|
ensure that the client name is included in a list of tags. This is
important for matching builders to the correct client. We exit
on fail.
Parameters
==========
tags: a list of tags to look for client name in
|
def _get_phantom_root_catalog(self, cat_name, cat_class):
"""Get's the catalog id corresponding to the root of all implementation catalogs."""
catalog_map = make_catalog_map(cat_name, identifier=PHANTOM_ROOT_IDENTIFIER)
return cat_class(osid_object_map=catalog_map, runtime=self._runtime, proxy=self._proxy)
|
Get's the catalog id corresponding to the root of all implementation catalogs.
|
def _validate_namespace(self, namespace):
"""Validates a namespace, raising a ResponseFailed error if invalid.
Args:
state_root (str): The state_root to validate
Raises:
ResponseFailed: The state_root was invalid, and a status of
INVALID_ROOT will be sent with the response.
"""
if self._namespace_regex.fullmatch(namespace) is None:
LOGGER.debug('Invalid namespace: %s', namespace)
raise _ResponseFailed(self._status.INVALID_ADDRESS)
|
Validates a namespace, raising a ResponseFailed error if invalid.
Args:
state_root (str): The state_root to validate
Raises:
ResponseFailed: The state_root was invalid, and a status of
INVALID_ROOT will be sent with the response.
|
def set_env(envName, envValue):
"""
่ฎพ็ฝฎ็ฏๅขๅ้
:params envName: envๅๅญ
:params envValue: ๅผ
"""
os.environ[envName] = os.environ[envName] + ':' + envValue
|
่ฎพ็ฝฎ็ฏๅขๅ้
:params envName: envๅๅญ
:params envValue: ๅผ
|
def get_neg_one_task_agent(generators, market, nOffer, maxSteps):
""" Returns a task-agent tuple whose action is always minus one.
"""
env = pyreto.discrete.MarketEnvironment(generators, market, nOffer)
task = pyreto.discrete.ProfitTask(env, maxSteps=maxSteps)
agent = pyreto.util.NegOneAgent(env.outdim, env.indim)
return task, agent
|
Returns a task-agent tuple whose action is always minus one.
|
def upload_job_chunk_list(self, upload_job_id, **kwargs): # noqa: E501
"""List all metadata for uploaded chunks # noqa: E501
List all metadata for uploaded chunks # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.upload_job_chunk_list(upload_job_id, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str upload_job_id: Upload job (required)
:param int limit: How many metadata items for uploaded chunks to retrieve
:param str order: ASC or DESC
:param str after: The ID of the the item after which to retrieve the next page
:param str include: A comma-separated list of data fields to return. Currently supported: total_count
:param str filter: URL-encoded query string parameter to filter returned data `?filter={URL-encoded query string}` ###### Filterable fields: The table lists all the fields that can be filtered on with certain filters: <table> <thead> <tr> <th>Field</th> <th>= / __eq / __neq</th> <th>__in / __nin</th> <th>__lte / __gte</th> <tr> <thead> <tbody> <tr> <td>created_at</td> <td>โ</td> <td>โ</td> <td>โ</td> </tr> <tr> <td>etag</td> <td>โ</td> <td>โ</td> <td>โ</td> </tr> <tr> <td>id</td> <td>โ</td> <td>โ</td> <td> </td> </tr> <tr> <td>updated_at</td> <td>โ</td> <td>โ</td> <td>โ</td> </tr> <tr> <td>status</td> <td>โ</td> <td>โ</td> </tr> <tr> <td>hash</td> <td>โ</td> <td>โ</td> </tr> <tr> <td>length</td> <td>โ</td> <td>โ</td> <td>โ</td> </tr> </tbody> </table> The query string is made up of key-value pairs separated by ampersands. For example, this query: `key1=value1&key2=value2&key3=value3` would be URL-encoded as: `?filter=key1__eq%3Dvalue1%26key2__eq%3Dvalue2%26key3__eq%3Dvalue3` **Filtering by properties** `status__eq=in_progress` **Filtering on date-time fields** Date-time fields should be specified in UTC RFC3339 format, `YYYY-MM-DDThh:mm:ss.msZ`. There are three permitted variations: * UTC RFC3339 with milliseconds. Example: `2016-11-30T16:25:12.1234Z` * UTC RFC3339 without milliseconds. Example: `2016-11-30T16:25:12Z` * UTC RFC3339 shortened without milliseconds and punctuation. Example: `20161130T162512Z` Date-time filtering supports three operators: * equality by appending `__eq` to the field name * greater than or equal to by appending `__gte` to the field name * less than or equal to by appending `__lte` to the field name `{field name}[|__eq|__lte|__gte]={UTC RFC3339 date-time}` Time ranges may be specified by including both the `__gte` and `__lte` forms in the filter. For example: `created_at__gte=2016-11-30T16:25:12.1234Z&created_at__lte=2016-12-30T00:00:00Z` **Filtering on multiple fields** `status__eq=in_progress&created_at__gte=2016-11-30T16:25:12.1234Z&created_at__lte=2016-12-30T00:00:00Z` **Filtering with filter operators** String field filtering supports the following operators: * equality: `__eq` * non-equality: `__neq` * in : `__in` * not in: `__nin` For `__in` and `__nin` filters list of parameters must be comma-separated: `status__in=in_progress,success`
:return: UploadChunkInfoPage
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('asynchronous'):
return self.upload_job_chunk_list_with_http_info(upload_job_id, **kwargs) # noqa: E501
else:
(data) = self.upload_job_chunk_list_with_http_info(upload_job_id, **kwargs) # noqa: E501
return data
|
List all metadata for uploaded chunks # noqa: E501
List all metadata for uploaded chunks # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.upload_job_chunk_list(upload_job_id, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str upload_job_id: Upload job (required)
:param int limit: How many metadata items for uploaded chunks to retrieve
:param str order: ASC or DESC
:param str after: The ID of the the item after which to retrieve the next page
:param str include: A comma-separated list of data fields to return. Currently supported: total_count
:param str filter: URL-encoded query string parameter to filter returned data `?filter={URL-encoded query string}` ###### Filterable fields: The table lists all the fields that can be filtered on with certain filters: <table> <thead> <tr> <th>Field</th> <th>= / __eq / __neq</th> <th>__in / __nin</th> <th>__lte / __gte</th> <tr> <thead> <tbody> <tr> <td>created_at</td> <td>โ</td> <td>โ</td> <td>โ</td> </tr> <tr> <td>etag</td> <td>โ</td> <td>โ</td> <td>โ</td> </tr> <tr> <td>id</td> <td>โ</td> <td>โ</td> <td> </td> </tr> <tr> <td>updated_at</td> <td>โ</td> <td>โ</td> <td>โ</td> </tr> <tr> <td>status</td> <td>โ</td> <td>โ</td> </tr> <tr> <td>hash</td> <td>โ</td> <td>โ</td> </tr> <tr> <td>length</td> <td>โ</td> <td>โ</td> <td>โ</td> </tr> </tbody> </table> The query string is made up of key-value pairs separated by ampersands. For example, this query: `key1=value1&key2=value2&key3=value3` would be URL-encoded as: `?filter=key1__eq%3Dvalue1%26key2__eq%3Dvalue2%26key3__eq%3Dvalue3` **Filtering by properties** `status__eq=in_progress` **Filtering on date-time fields** Date-time fields should be specified in UTC RFC3339 format, `YYYY-MM-DDThh:mm:ss.msZ`. There are three permitted variations: * UTC RFC3339 with milliseconds. Example: `2016-11-30T16:25:12.1234Z` * UTC RFC3339 without milliseconds. Example: `2016-11-30T16:25:12Z` * UTC RFC3339 shortened without milliseconds and punctuation. Example: `20161130T162512Z` Date-time filtering supports three operators: * equality by appending `__eq` to the field name * greater than or equal to by appending `__gte` to the field name * less than or equal to by appending `__lte` to the field name `{field name}[|__eq|__lte|__gte]={UTC RFC3339 date-time}` Time ranges may be specified by including both the `__gte` and `__lte` forms in the filter. For example: `created_at__gte=2016-11-30T16:25:12.1234Z&created_at__lte=2016-12-30T00:00:00Z` **Filtering on multiple fields** `status__eq=in_progress&created_at__gte=2016-11-30T16:25:12.1234Z&created_at__lte=2016-12-30T00:00:00Z` **Filtering with filter operators** String field filtering supports the following operators: * equality: `__eq` * non-equality: `__neq` * in : `__in` * not in: `__nin` For `__in` and `__nin` filters list of parameters must be comma-separated: `status__in=in_progress,success`
:return: UploadChunkInfoPage
If the method is called asynchronously,
returns the request thread.
|
def get_session():
"""Build the session object."""
# NOTE(msimonin): We provide only a basic support which focus
# Chameleon cloud and its rc files
if os.environ.get("OS_IDENTITY_API_VERSION") == "3":
logging.info("Creating a v3 Keystone Session")
auth = v3.Password(
auth_url=os.environ["OS_AUTH_URL"],
username=os.environ["OS_USERNAME"],
password=os.environ["OS_PASSWORD"],
project_id=os.environ["OS_PROJECT_ID"],
user_domain_name=os.environ["OS_USER_DOMAIN_NAME"]
)
else:
logging.info("Creating a v2 Keystone Session")
auth = v2.Password(
auth_url=os.environ["OS_AUTH_URL"],
username=os.environ["OS_USERNAME"],
password=os.environ["OS_PASSWORD"],
tenant_id=os.environ["OS_TENANT_ID"])
return session.Session(auth=auth)
|
Build the session object.
|
def class_statistics(TP, TN, FP, FN, classes, table):
"""
Return all class statistics.
:param TP: true positive dict for all classes
:type TP : dict
:param TN: true negative dict for all classes
:type TN : dict
:param FP: false positive dict for all classes
:type FP : dict
:param FN: false negative dict for all classes
:type FN : dict
:param classes: classes
:type classes : list
:param table: input matrix
:type table : dict
:return: result as dict
"""
TPR = {}
TNR = {}
PPV = {}
NPV = {}
FNR = {}
FPR = {}
FDR = {}
FOR = {}
ACC = {}
F1_SCORE = {}
MCC = {}
BM = {}
MK = {}
PLR = {}
NLR = {}
DOR = {}
POP = {}
P = {}
N = {}
TOP = {}
TON = {}
PRE = {}
G = {}
RACC = {}
F05_Score = {}
F2_Score = {}
ERR = {}
RACCU = {}
Jaccrd_Index = {}
IS = {}
CEN = {}
MCEN = {}
AUC = {}
dInd = {}
sInd = {}
DP = {}
Y = {}
PLRI = {}
DPI = {}
AUCI = {}
GI = {}
LS = {}
AM = {}
BCD = {}
OP = {}
IBA = {}
GM = {}
for i in TP.keys():
POP[i] = TP[i] + TN[i] + FP[i] + FN[i]
P[i] = TP[i] + FN[i]
N[i] = TN[i] + FP[i]
TOP[i] = TP[i] + FP[i]
TON[i] = TN[i] + FN[i]
TPR[i] = TTPN_calc(TP[i], FN[i])
TNR[i] = TTPN_calc(TN[i], FP[i])
PPV[i] = TTPN_calc(TP[i], FP[i])
NPV[i] = TTPN_calc(TN[i], FN[i])
FNR[i] = FXR_calc(TPR[i])
FPR[i] = FXR_calc(TNR[i])
FDR[i] = FXR_calc(PPV[i])
FOR[i] = FXR_calc(NPV[i])
ACC[i] = ACC_calc(TP[i], TN[i], FP[i], FN[i])
F1_SCORE[i] = F_calc(TP[i], FP[i], FN[i], 1)
F05_Score[i] = F_calc(TP[i], FP[i], FN[i], 0.5)
F2_Score[i] = F_calc(TP[i], FP[i], FN[i], 2)
MCC[i] = MCC_calc(TP[i], TN[i], FP[i], FN[i])
BM[i] = MK_BM_calc(TPR[i], TNR[i])
MK[i] = MK_BM_calc(PPV[i], NPV[i])
PLR[i] = LR_calc(TPR[i], FPR[i])
NLR[i] = LR_calc(FNR[i], TNR[i])
DOR[i] = LR_calc(PLR[i], NLR[i])
PRE[i] = PRE_calc(P[i], POP[i])
G[i] = G_calc(PPV[i], TPR[i])
RACC[i] = RACC_calc(TOP[i], P[i], POP[i])
ERR[i] = ERR_calc(ACC[i])
RACCU[i] = RACCU_calc(TOP[i], P[i], POP[i])
Jaccrd_Index[i] = jaccard_index_calc(TP[i], TOP[i], P[i])
IS[i] = IS_calc(TP[i], FP[i], FN[i], POP[i])
CEN[i] = CEN_calc(classes, table, TOP[i], P[i], i)
MCEN[i] = CEN_calc(classes, table, TOP[i], P[i], i, True)
AUC[i] = AUC_calc(TNR[i], TPR[i])
dInd[i] = dInd_calc(TNR[i], TPR[i])
sInd[i] = sInd_calc(dInd[i])
DP[i] = DP_calc(TPR[i], TNR[i])
Y[i] = BM[i]
PLRI[i] = PLR_analysis(PLR[i])
DPI[i] = DP_analysis(DP[i])
AUCI[i] = AUC_analysis(AUC[i])
GI[i] = GI_calc(AUC[i])
LS[i] = lift_calc(PPV[i], PRE[i])
AM[i] = AM_calc(TOP[i], P[i])
OP[i] = OP_calc(ACC[i], TPR[i], TNR[i])
IBA[i] = IBA_calc(TPR[i], TNR[i])
GM[i] = G_calc(TNR[i], TPR[i])
for i in TP.keys():
BCD[i] = BCD_calc(TOP, P, AM[i])
result = {
"TPR": TPR,
"TNR": TNR,
"PPV": PPV,
"NPV": NPV,
"FNR": FNR,
"FPR": FPR,
"FDR": FDR,
"FOR": FOR,
"ACC": ACC,
"F1": F1_SCORE,
"MCC": MCC,
"BM": BM,
"MK": MK,
"PLR": PLR,
"NLR": NLR,
"DOR": DOR,
"TP": TP,
"TN": TN,
"FP": FP,
"FN": FN,
"POP": POP,
"P": P,
"N": N,
"TOP": TOP,
"TON": TON,
"PRE": PRE,
"G": G,
"RACC": RACC,
"F0.5": F05_Score,
"F2": F2_Score,
"ERR": ERR,
"RACCU": RACCU,
"J": Jaccrd_Index,
"IS": IS,
"CEN": CEN,
"MCEN": MCEN,
"AUC": AUC,
"sInd": sInd,
"dInd": dInd,
"DP": DP,
"Y": Y,
"PLRI": PLRI,
"DPI": DPI,
"AUCI": AUCI,
"GI": GI,
"LS": LS,
"AM": AM,
"BCD": BCD,
"OP": OP,
"IBA": IBA,
"GM": GM}
return result
|
Return all class statistics.
:param TP: true positive dict for all classes
:type TP : dict
:param TN: true negative dict for all classes
:type TN : dict
:param FP: false positive dict for all classes
:type FP : dict
:param FN: false negative dict for all classes
:type FN : dict
:param classes: classes
:type classes : list
:param table: input matrix
:type table : dict
:return: result as dict
|
def set_settings_file_path(self, settings_file_path):
"""Currently, it is an error to change this property on any machine.
Later this will allow setting a new path for the settings file, with
automatic relocation of all files (including snapshots and disk images)
which are inside the base directory. This operation is only allowed
when there are no pending unsaved settings.
Setting this property to @c null or to an empty string is forbidden.
When setting this property, the specified path must be absolute.
The specified path may not exist, it will be created when necessary.
in settings_file_path of type str
New settings file path, will be used to determine the new
location for the attached media if it is in the same directory or
below as the original settings file.
return progress of type :class:`IProgress`
Progress object to track the operation completion.
raises :class:`OleErrorNotimpl`
The operation is not implemented yet.
"""
if not isinstance(settings_file_path, basestring):
raise TypeError("settings_file_path can only be an instance of type basestring")
progress = self._call("setSettingsFilePath",
in_p=[settings_file_path])
progress = IProgress(progress)
return progress
|
Currently, it is an error to change this property on any machine.
Later this will allow setting a new path for the settings file, with
automatic relocation of all files (including snapshots and disk images)
which are inside the base directory. This operation is only allowed
when there are no pending unsaved settings.
Setting this property to @c null or to an empty string is forbidden.
When setting this property, the specified path must be absolute.
The specified path may not exist, it will be created when necessary.
in settings_file_path of type str
New settings file path, will be used to determine the new
location for the attached media if it is in the same directory or
below as the original settings file.
return progress of type :class:`IProgress`
Progress object to track the operation completion.
raises :class:`OleErrorNotimpl`
The operation is not implemented yet.
|
def define_options(default_conf):
"""
Define the options from default.conf dynamically
"""
default = {}
with open(default_conf, 'rb') as f:
exec_in(native_str(f.read()), {}, default)
for name, value in default.iteritems():
# if the option is already defined by tornado
# override the value
# a list of options set by tornado:
# log_file_num_backups, logging, help,
# log_to_stderr, log_file_max_size, log_file_prefix
if name in options:
setattr(options, name, value)
# otherwise define the option
else:
define(name, value)
|
Define the options from default.conf dynamically
|
def get_station_by_name(self,
station_name,
num_minutes=None,
direction=None,
destination=None,
stops_at=None):
"""Returns all trains due to serve station `station_name`.
@param station_code
@param num_minutes. Only trains within this time. Between 5 and 90
@param direction Filter by direction. Northbound or Southbound
@param destination Filter by name of the destination stations
@param stops_at Filber by name of one of the stops
"""
url = self.api_base_url + 'getStationDataByNameXML'
params = {
'StationDesc': station_name
}
if num_minutes:
url = url + '_withNumMins'
params['NumMins'] = num_minutes
response = requests.get(
url, params=params, timeout=10)
if response.status_code != 200:
return []
trains = self._parse_station_data(response.content)
if direction is not None or destination is not None:
return self._prune_trains(trains,
direction=direction,
destination=destination,
stops_at=stops_at)
return trains
|
Returns all trains due to serve station `station_name`.
@param station_code
@param num_minutes. Only trains within this time. Between 5 and 90
@param direction Filter by direction. Northbound or Southbound
@param destination Filter by name of the destination stations
@param stops_at Filber by name of one of the stops
|
def set(self):
"""set the event to triggered
after calling this method, all greenlets waiting on the event will be
rescheduled, and calling :meth:`wait` will not block until
:meth:`clear` has been called
"""
self._is_set = True
scheduler.state.awoken_from_events.update(self._waiters)
del self._waiters[:]
|
set the event to triggered
after calling this method, all greenlets waiting on the event will be
rescheduled, and calling :meth:`wait` will not block until
:meth:`clear` has been called
|
def get_experiments(self, workspace_id):
"""Runs HTTP GET request to retrieve the list of experiments."""
api_path = self.EXPERIMENTS_URI_FMT.format(workspace_id)
return self._send_get_req(api_path)
|
Runs HTTP GET request to retrieve the list of experiments.
|
def remove_interface_router(self, router, body=None):
"""Removes an internal network interface from the specified router."""
return self.put((self.router_path % router) +
"/remove_router_interface", body=body)
|
Removes an internal network interface from the specified router.
|
def index(self, block_start, block_end):
"""
Entry point for indexing:
* scan the blockchain from start_block to end_block and make sure we're up-to-date
* process any newly-arrived zone files and re-index the affected subdomains
"""
log.debug("BEGIN Processing zonefiles discovered since last re-indexing")
t1 = time.time()
self.index_discovered_zonefiles(block_end)
t2 = time.time()
log.debug("END Processing zonefiles discovered since last re-indexing ({} seconds)".format(t2 - t1))
|
Entry point for indexing:
* scan the blockchain from start_block to end_block and make sure we're up-to-date
* process any newly-arrived zone files and re-index the affected subdomains
|
def cal_dist_between_2_coord_frame_aligned_boxes(box1_pos, box1_size, box2_pos, box2_size):
""" Calculate Euclidean distance between two boxes those edges are parallel to the coordinate axis
The function decides condition based which corner to corner or edge to edge distance needs to be calculated.
:param tuple box1_pos: x and y position of box 1
:param tuple box1_size: x and y size of box 1
:param tuple box2_pos: x and y position of box 2
:param tuple box2_size: x and y size of box 2
:return:
"""
box1_x_min, box1_y_min = box1_pos
box1_x_max, box1_y_max = (box1_pos[0] + box1_size[0], box1_pos[1] + box1_size[1])
box2_x_min, box2_y_min = box2_pos
box2_x_max, box2_y_max = (box2_pos[0] + box2_size[0], box2_pos[1] + box2_size[1])
# 1|2|3 +-> x => works also for opengl - both boxes described in same coordinates
# 4|5|6 -> 5 is covered by box1 |
# 7|8|9 \/ y
# - case 1 the right lower corner of box2 is above and left to box1 upper left corner
# - case 5 is when the boxes are overlapping so the distance is 0
# - in case 1, 3, 7, 9 respective corners of box1 to box2 are the minimal distance
# - 2, 4, 6, 8 can be covered by either simply calculation via either x or y axis
if box2_x_max < box1_x_min and box2_y_max < box1_y_min: # case 1 -> box2 is fully in sector 1
distance = sqrt((box1_x_min - box2_x_max)**2 + (box1_y_min - box2_y_max)**2)
elif box2_x_min > box1_x_max and box2_y_max < box1_y_min: # case 3 -> box2 is fully in sector 3
distance = sqrt((box2_x_min - box1_x_max)**2 + (box1_y_min - box2_y_max)**2)
elif box2_x_max < box1_x_min and box2_y_min > box1_y_max: # case 7 -> box2 is fully in sector 7
distance = sqrt((box1_x_min - box2_x_max)**2 + (box2_y_min - box1_y_max)**2)
elif box2_x_min > box1_x_max and box2_y_min > box1_y_max: # case 9 -> box2 is fully in sector 9
distance = sqrt((box2_x_min - box1_x_max)**2 + (box2_y_min - box1_y_max)**2)
elif box2_y_max < box1_y_min: # case 2 -> box2 is party in sector 2 and in 1 or 3
distance = box1_y_min - box2_y_max
elif box2_x_max < box1_x_min: # case 4 -> box2 is party in sector 4 and in 1 or 7
distance = box1_x_min - box2_x_max
elif box2_x_min > box1_x_max: # case 6 -> box2 is party in sector 6 and in 3 or 9
distance = box2_x_min - box1_x_max
elif box2_y_min > box1_y_max: # case 8 -> box2 is party in sector 8 and in 7 or 9
distance = box2_y_min - box1_y_max
else: # case 5 box2 reach into area of box1
distance = 0.
return distance
|
Calculate Euclidean distance between two boxes those edges are parallel to the coordinate axis
The function decides condition based which corner to corner or edge to edge distance needs to be calculated.
:param tuple box1_pos: x and y position of box 1
:param tuple box1_size: x and y size of box 1
:param tuple box2_pos: x and y position of box 2
:param tuple box2_size: x and y size of box 2
:return:
|
def p_int(self, tree):
''' V ::= INTEGER '''
tree.value = int(tree.attr)
tree.svalue = tree.attr
|
V ::= INTEGER
|
def get_object(self, id, **args):
"""Fetches the given object from the graph."""
return self.request("{0}/{1}".format(self.version, id), args)
|
Fetches the given object from the graph.
|
def update_firewall_rule(self, firewall_rule, protocol=None, action=None,
name=None, description=None, ip_version=None,
source_ip_address=None, destination_ip_address=None, source_port=None,
destination_port=None, shared=None, enabled=None):
'''
Update a firewall rule
'''
body = {}
if protocol:
body['protocol'] = protocol
if action:
body['action'] = action
if name:
body['name'] = name
if description:
body['description'] = description
if ip_version:
body['ip_version'] = ip_version
if source_ip_address:
body['source_ip_address'] = source_ip_address
if destination_ip_address:
body['destination_ip_address'] = destination_ip_address
if source_port:
body['source_port'] = source_port
if destination_port:
body['destination_port'] = destination_port
if shared:
body['shared'] = shared
if enabled:
body['enabled'] = enabled
return self.network_conn.update_firewall_rule(firewall_rule, body={'firewall_rule': body})
|
Update a firewall rule
|
def multi_label_morphology(image, operation, radius, dilation_mask=None, label_list=None, force=False):
"""
Morphology on multi label images.
Wraps calls to iMath binary morphology. Additionally, dilation and closing operations preserve
pre-existing labels. The choices of operation are:
Dilation: dilates all labels sequentially, but does not overwrite original labels.
This reduces dependence on the intensity ordering of adjoining labels. Ordering dependence
can still arise if two or more labels dilate into the same space - in this case, the label
with the lowest intensity is retained. With a mask, dilated labels are multiplied by the
mask and then added to the original label, thus restricting dilation to the mask region.
Erosion: Erodes labels independently, equivalent to calling iMath iteratively.
Closing: Close holes in each label sequentially, but does not overwrite original labels.
Opening: Opens each label independently, equivalent to calling iMath iteratively.
Arguments
---------
image : ANTsImage
Input image should contain only 0 for background and positive integers for labels.
operation : string
One of MD, ME, MC, MO, passed to iMath.
radius : integer
radius of the morphological operation.
dilation_mask : ANTsImage
Optional binary mask to constrain dilation only (eg dilate cortical label into WM).
label_list : list or tuple or numpy.ndarray
Optional list of labels, to perform operation upon. Defaults to all unique
intensities in image.
Returns
-------
ANTsImage
Example
-------
>>> import ants
>>> img = ants.image_read(ants.get_data('r16'))
>>> labels = ants.get_mask(img,1,150) + ants.get_mask(img,151,225) * 2
>>> labels_dilated = ants.multi_label_morphology(labels, 'MD', 2)
>>> # should see original label regions preserved in dilated version
>>> # label N should have mean N and 0 variance
>>> print(ants.label_stats(labels_dilated, labels))
"""
if (label_list is None) or (len(label_list) == 1):
label_list = np.sort(np.unique(image[image > 0]))
if (len(label_list) > 200) and (not force):
raise ValueError('More than 200 labels... Make sure the image is discrete'
' and call this function again with `force=True` if you'
' really want to do this.')
image_binary = image.clone()
image_binary[image_binary > 1] = 1
# Erosion / opening is simply a case of looping over the input labels
if (operation == 'ME') or (operation == 'MO'):
output = image.clone()
for current_label in label_list:
output = output.iMath(operation, radius, current_label)
return output
if dilation_mask is not None:
if int(dilation_mask.max()) != 1:
raise ValueError('Mask is either empty or not binary')
output = image.clone()
for current_label in label_list:
current_label_region = image.threshold_image(current_label, current_label)
other_labels = output - current_label_region
clab_binary_morphed = current_label_region.iMath(operation, radius, 1)
if (operation == 'MD') and (dilation_mask is not None):
clab_binary_morphed_nooverlap = current_label_region + dilation_mask * clab_binary_morphed - other_labels
clab_binary_morphed_nooverlap = clab_binary_morphed_nooverlap.threshold_image(1, 2)
else:
clab_binary_morphed_nooverlap = clab_binary_morphed - other_labels
clab_binary_morphed_nooverlap = clab_binary_morphed_nooverlap.threshold_image(1, 1)
output = output + clab_binary_morphed_nooverlap * current_label
return output
|
Morphology on multi label images.
Wraps calls to iMath binary morphology. Additionally, dilation and closing operations preserve
pre-existing labels. The choices of operation are:
Dilation: dilates all labels sequentially, but does not overwrite original labels.
This reduces dependence on the intensity ordering of adjoining labels. Ordering dependence
can still arise if two or more labels dilate into the same space - in this case, the label
with the lowest intensity is retained. With a mask, dilated labels are multiplied by the
mask and then added to the original label, thus restricting dilation to the mask region.
Erosion: Erodes labels independently, equivalent to calling iMath iteratively.
Closing: Close holes in each label sequentially, but does not overwrite original labels.
Opening: Opens each label independently, equivalent to calling iMath iteratively.
Arguments
---------
image : ANTsImage
Input image should contain only 0 for background and positive integers for labels.
operation : string
One of MD, ME, MC, MO, passed to iMath.
radius : integer
radius of the morphological operation.
dilation_mask : ANTsImage
Optional binary mask to constrain dilation only (eg dilate cortical label into WM).
label_list : list or tuple or numpy.ndarray
Optional list of labels, to perform operation upon. Defaults to all unique
intensities in image.
Returns
-------
ANTsImage
Example
-------
>>> import ants
>>> img = ants.image_read(ants.get_data('r16'))
>>> labels = ants.get_mask(img,1,150) + ants.get_mask(img,151,225) * 2
>>> labels_dilated = ants.multi_label_morphology(labels, 'MD', 2)
>>> # should see original label regions preserved in dilated version
>>> # label N should have mean N and 0 variance
>>> print(ants.label_stats(labels_dilated, labels))
|
def _compute_missing_deps(self, src_tgt, actual_deps):
"""Computes deps that are used by the compiler but not specified in a BUILD file.
These deps are bugs waiting to happen: the code may happen to compile because the dep was
brought in some other way (e.g., by some other root target), but that is obviously fragile.
Note that in practice we're OK with reliance on indirect deps that are only brought in
transitively. E.g., in Scala type inference can bring in such a dep subtly. Fortunately these
cases aren't as fragile as a completely missing dependency. It's still a good idea to have
explicit direct deps where relevant, so we optionally warn about indirect deps, to make them
easy to find and reason about.
- actual_deps: a map src -> list of actual deps (source, class or jar file) as noted by the
compiler.
Returns a tuple (missing_file_deps, missing_direct_tgt_deps) where:
- missing_file_deps: a list of dep_files where src_tgt requires dep_file, and we're unable
to map to a target (because its target isn't in the total set of targets in play,
and we don't want to parse every BUILD file in the workspace just to find it).
- missing_direct_tgt_deps: a list of dep_tgts where src_tgt is missing a direct dependency
on dep_tgt but has a transitive dep on it.
All paths in the input and output are absolute.
"""
analyzer = self._analyzer
def must_be_explicit_dep(dep):
# We don't require explicit deps on the java runtime, so we shouldn't consider that
# a missing dep.
return (dep not in analyzer.bootstrap_jar_classfiles
and not dep.startswith(DistributionLocator.cached().real_home))
def target_or_java_dep_in_targets(target, targets):
# We want to check if the target is in the targets collection
#
# However, for the special case of scala_library that has a java_sources
# reference we're ok if that exists in targets even if the scala_library does not.
if target in targets:
return True
elif isinstance(target, ScalaLibrary):
return any(t in targets for t in target.java_sources)
else:
return False
# Find deps that are actual but not specified.
missing_file_deps = OrderedSet() # (src, src).
missing_direct_tgt_deps_map = defaultdict(list) # The same, but for direct deps.
targets_by_file = analyzer.targets_by_file(self.context.targets())
buildroot = get_buildroot()
abs_srcs = [os.path.join(buildroot, src) for src in src_tgt.sources_relative_to_buildroot()]
for src in abs_srcs:
for actual_dep in filter(must_be_explicit_dep, actual_deps.get(src, [])):
actual_dep_tgts = targets_by_file.get(actual_dep)
# actual_dep_tgts is usually a singleton. If it's not, we only need one of these
# to be in our declared deps to be OK.
if actual_dep_tgts is None:
missing_file_deps.add((src_tgt, actual_dep))
elif not target_or_java_dep_in_targets(src_tgt, actual_dep_tgts):
# Obviously intra-target deps are fine.
canonical_actual_dep_tgt = next(iter(actual_dep_tgts))
if canonical_actual_dep_tgt not in src_tgt.dependencies:
# The canonical dep is the only one a direct dependency makes sense on.
missing_direct_tgt_deps_map[(src_tgt, canonical_actual_dep_tgt)].append(
(src, actual_dep))
return (list(missing_file_deps),
list(missing_direct_tgt_deps_map.items()))
|
Computes deps that are used by the compiler but not specified in a BUILD file.
These deps are bugs waiting to happen: the code may happen to compile because the dep was
brought in some other way (e.g., by some other root target), but that is obviously fragile.
Note that in practice we're OK with reliance on indirect deps that are only brought in
transitively. E.g., in Scala type inference can bring in such a dep subtly. Fortunately these
cases aren't as fragile as a completely missing dependency. It's still a good idea to have
explicit direct deps where relevant, so we optionally warn about indirect deps, to make them
easy to find and reason about.
- actual_deps: a map src -> list of actual deps (source, class or jar file) as noted by the
compiler.
Returns a tuple (missing_file_deps, missing_direct_tgt_deps) where:
- missing_file_deps: a list of dep_files where src_tgt requires dep_file, and we're unable
to map to a target (because its target isn't in the total set of targets in play,
and we don't want to parse every BUILD file in the workspace just to find it).
- missing_direct_tgt_deps: a list of dep_tgts where src_tgt is missing a direct dependency
on dep_tgt but has a transitive dep on it.
All paths in the input and output are absolute.
|
def untranslated_policy(self, default):
'''Get the policy for untranslated content'''
return self.generator.settings.get(self.info.get('policy', None),
default)
|
Get the policy for untranslated content
|
def servicegroup_server_enable(sg_name, s_name, s_port, **connection_args):
'''
Enable a server:port member of a servicegroup
CLI Example:
.. code-block:: bash
salt '*' netscaler.servicegroup_server_enable 'serviceGroupName' 'serverName' 'serverPort'
'''
ret = True
server = _servicegroup_get_server(sg_name, s_name, s_port, **connection_args)
if server is None:
return False
nitro = _connect(**connection_args)
if nitro is None:
return False
try:
NSServiceGroup.enable_server(nitro, server)
except NSNitroError as error:
log.debug('netscaler module error - NSServiceGroup.enable_server() failed: %s', error)
ret = False
_disconnect(nitro)
return ret
|
Enable a server:port member of a servicegroup
CLI Example:
.. code-block:: bash
salt '*' netscaler.servicegroup_server_enable 'serviceGroupName' 'serverName' 'serverPort'
|
def as_fstring(text):
"""expansion with python f-string, usually ok, but with the case
of ' inside expressions, adding repr will add backslash to it
and cause trouble.
"""
for quote in ('"""', "'''"):
# although script-format always ends with \n, direct use of this function might
# have string ending with " or '
if quote not in text and not text.endswith(quote[0]):
return 'fr' + quote + text + quote
# now, we need to look into the structure of f-string
pieces = split_fstring(text)
# if all expressions do not have single quote, we can
# safely repr the entire string
if not any("'" in piece for piece in pieces[1::2]):
return 'f' + repr(text)
#
# unfortunately, this thing has both single and double triple quotes
# because we cannot use backslash inside expressions in f-string, we
# have to use format string now.
args = []
for idx in range(len(pieces))[1::2]:
pos = valid_expr_till(pieces[idx])
if pos == 0:
raise SyntaxError(f'invalid expression in {pieces[idx]}')
args.append(pieces[idx][:pos])
pieces[idx] = '{' + str(idx // 2) + pieces[idx][pos:] + '}'
return repr(''.join(pieces)) + '.format(' + ', '.join(args) + ')'
|
expansion with python f-string, usually ok, but with the case
of ' inside expressions, adding repr will add backslash to it
and cause trouble.
|
def getCitiesDrawingXML(points):
''' Build an XML string that contains a square for each city'''
xml = ""
for p in points:
x = str(p.x)
z = str(p.y)
xml += '<DrawBlock x="' + x + '" y="7" z="' + z + '" type="beacon"/>'
xml += '<DrawItem x="' + x + '" y="10" z="' + z + '" type="ender_pearl"/>'
return xml
|
Build an XML string that contains a square for each city
|
def get_legal_params(self, method):
'''Given a API name, list all legal parameters using boto3 service model.'''
if method not in self.client.meta.method_to_api_mapping:
# Injected methods. Ignore.
return []
api = self.client.meta.method_to_api_mapping[method]
shape = self.client.meta.service_model.operation_model(api).input_shape
if shape is None:
# No params needed for this API.
return []
return shape.members.keys()
|
Given a API name, list all legal parameters using boto3 service model.
|
def wait(self):
""" Block for user input """
text = input(
"Press return for next %d result%s (or type 'all'):"
% (self.pagesize, plural(self.pagesize))
)
if text:
if text.lower() in ["a", "all"]:
self._pagesize = 0
elif text.isdigit():
self._pagesize = int(text)
|
Block for user input
|
def get_name_cost( db, name ):
"""
Get the cost of a name, given the fully-qualified name.
Do so by finding the namespace it belongs to (even if the namespace is being imported).
Return {'amount': ..., 'units': ...} on success
Return None if the namespace has not been declared
"""
lastblock = db.lastblock
namespace_id = get_namespace_from_name( name )
if namespace_id is None or len(namespace_id) == 0:
log.debug("No namespace '%s'" % namespace_id)
return None
namespace = db.get_namespace( namespace_id )
if namespace is None:
# maybe importing?
log.debug("Namespace '{}' is being revealed".format(namespace_id))
namespace = db.get_namespace_reveal( namespace_id )
if namespace is None:
# no such namespace
log.debug("No namespace '%s'" % namespace_id)
return None
name_fee = price_name( get_name_from_fq_name( name ), namespace, lastblock )
name_fee_units = None
if namespace['version'] == NAMESPACE_VERSION_PAY_WITH_STACKS:
name_fee_units = TOKEN_TYPE_STACKS
else:
name_fee_units = 'BTC'
name_fee = int(math.ceil(name_fee))
log.debug("Cost of '%s' at %s is %s units of %s" % (name, lastblock, name_fee, name_fee_units))
return {'amount': name_fee, 'units': name_fee_units}
|
Get the cost of a name, given the fully-qualified name.
Do so by finding the namespace it belongs to (even if the namespace is being imported).
Return {'amount': ..., 'units': ...} on success
Return None if the namespace has not been declared
|
def create_role_policy(role_name, policy_name, policy, region=None, key=None,
keyid=None, profile=None):
'''
Create or modify a role policy.
CLI Example:
.. code-block:: bash
salt myminion boto_iam.create_role_policy myirole mypolicy '{"MyPolicy": "Statement": [{"Action": ["sqs:*"], "Effect": "Allow", "Resource": ["arn:aws:sqs:*:*:*"], "Sid": "MyPolicySqs1"}]}'
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
_policy = get_role_policy(role_name, policy_name, region, key, keyid, profile)
mode = 'create'
if _policy:
if _policy == policy:
return True
mode = 'modify'
if isinstance(policy, six.string_types):
policy = salt.utils.json.loads(policy, object_pairs_hook=odict.OrderedDict)
try:
_policy = salt.utils.json.dumps(policy)
conn.put_role_policy(role_name, policy_name, _policy)
if mode == 'create':
msg = 'Successfully added policy %s to IAM role %s.'
else:
msg = 'Successfully modified policy %s for IAM role %s.'
log.info(msg, policy_name, role_name)
return True
except boto.exception.BotoServerError as e:
log.error(e)
log.error('Failed to %s policy %s for IAM role %s.',
mode, policy_name, role_name)
return False
|
Create or modify a role policy.
CLI Example:
.. code-block:: bash
salt myminion boto_iam.create_role_policy myirole mypolicy '{"MyPolicy": "Statement": [{"Action": ["sqs:*"], "Effect": "Allow", "Resource": ["arn:aws:sqs:*:*:*"], "Sid": "MyPolicySqs1"}]}'
|
def paintEvent(self, event):
""" Reimplemented to paint the background panel.
"""
painter = QtGui.QStylePainter(self)
option = QtGui.QStyleOptionFrame()
option.initFrom(self)
painter.drawPrimitive(QtGui.QStyle.PE_PanelTipLabel, option)
painter.end()
super(CallTipWidget, self).paintEvent(event)
|
Reimplemented to paint the background panel.
|
def fixed_crop(src, x0, y0, w, h, size=None, interp=2):
"""Crop src at fixed location, and (optionally) resize it to size.
Parameters
----------
src : NDArray
Input image
x0 : int
Left boundary of the cropping area
y0 : int
Top boundary of the cropping area
w : int
Width of the cropping area
h : int
Height of the cropping area
size : tuple of (w, h)
Optional, resize to new size after cropping
interp : int, optional, default=2
Interpolation method. See resize_short for details.
Returns
-------
NDArray
An `NDArray` containing the cropped image.
"""
out = nd.slice(src, begin=(y0, x0, 0), end=(y0 + h, x0 + w, int(src.shape[2])))
if size is not None and (w, h) != size:
sizes = (h, w, size[1], size[0])
out = imresize(out, *size, interp=_get_interp_method(interp, sizes))
return out
|
Crop src at fixed location, and (optionally) resize it to size.
Parameters
----------
src : NDArray
Input image
x0 : int
Left boundary of the cropping area
y0 : int
Top boundary of the cropping area
w : int
Width of the cropping area
h : int
Height of the cropping area
size : tuple of (w, h)
Optional, resize to new size after cropping
interp : int, optional, default=2
Interpolation method. See resize_short for details.
Returns
-------
NDArray
An `NDArray` containing the cropped image.
|
def autocomplete():
"""Command and option completion for the main option parser (and options)
and its subcommands (and options).
Enable by sourcing one of the completion shell scripts (bash or zsh).
"""
# Don't complete if user hasn't sourced bash_completion file.
if 'PIP_AUTO_COMPLETE' not in os.environ:
return
cwords = os.environ['COMP_WORDS'].split()[1:]
cword = int(os.environ['COMP_CWORD'])
try:
current = cwords[cword-1]
except IndexError:
current = ''
load_all_commands()
subcommands = [cmd for cmd, cls in command_dict.items() if not cls.hidden]
options = []
# subcommand
try:
subcommand_name = [w for w in cwords if w in subcommands][0]
except IndexError:
subcommand_name = None
# subcommand options
if subcommand_name:
# special case: 'help' subcommand has no options
if subcommand_name == 'help':
sys.exit(1)
# special case: list locally installed dists for uninstall command
if subcommand_name == 'uninstall' and not current.startswith('-'):
installed = []
lc = current.lower()
for dist in get_installed_distributions(local_only=True):
if dist.key.startswith(lc) and dist.key not in cwords[1:]:
installed.append(dist.key)
# if there are no dists installed, fall back to option completion
if installed:
for dist in installed:
print(dist)
sys.exit(1)
subcommand = command_dict.get(subcommand_name)
options += [(opt.get_opt_string(), opt.nargs)
for opt in subcommand.parser.option_list
if opt.help != optparse.SUPPRESS_HELP]
# filter out previously specified options from available options
prev_opts = [x.split('=')[0] for x in cwords[1:cword-1]]
options = [(x, v) for (x, v) in options if x not in prev_opts]
# filter options by current input
options = [(k, v) for k, v in options if k.startswith(current)]
for option in options:
opt_label = option[0]
# append '=' to options which require args
if option[1]:
opt_label += '='
print(opt_label)
else:
# show options of main parser only when necessary
if current.startswith('-') or current.startswith('--'):
subcommands += [opt.get_opt_string()
for opt in parser.option_list
if opt.help != optparse.SUPPRESS_HELP]
print(' '.join([x for x in subcommands if x.startswith(current)]))
sys.exit(1)
|
Command and option completion for the main option parser (and options)
and its subcommands (and options).
Enable by sourcing one of the completion shell scripts (bash or zsh).
|
def write(self, output):
"""Passthrough for pyserial Serial.write().
Args:
output (str): Block to write to port
"""
view_str = output.encode('ascii', 'ignore')
if (len(view_str) > 0):
self.m_ser.write(view_str)
self.m_ser.flush()
self.m_ser.reset_input_buffer()
time.sleep(self.m_force_wait)
pass
|
Passthrough for pyserial Serial.write().
Args:
output (str): Block to write to port
|
def _parse_urls(self, match):
'''Parse URLs.'''
mat = match.group(0)
# Fix a bug in the regex concerning www...com and www.-foo.com domains
# TODO fix this in the regex instead of working around it here
domain = match.group(5)
if domain[0] in '.-':
return mat
# Only allow IANA one letter domains that are actually registered
if len(domain) == 5 \
and domain[-4:].lower() in ('.com', '.org', '.net') \
and not domain.lower() in IANA_ONE_LETTER_DOMAINS:
return mat
# Check for urls without http(s)
pos = mat.find('http')
if pos != -1:
pre, url = mat[:pos], mat[pos:]
full_url = url
# Find the www and force https://
else:
pos = mat.lower().find('www')
pre, url = mat[:pos], mat[pos:]
full_url = 'https://%s' % url
if self._include_spans:
span = match.span(0)
# add an offset if pre is e.g. ' '
span = (span[0] + len(pre), span[1])
self._urls.append((url, span))
else:
self._urls.append(url)
if self._html:
return '%s%s' % (pre, self.format_url(full_url,
self._shorten_url(escape(url))))
|
Parse URLs.
|
def via_upnp():
""" Use SSDP as described by the Philips guide """
ssdp_list = ssdp_discover("ssdp:all", timeout=5)
#import pickle
#with open("ssdp.pickle", "wb") as f:
#pickle.dump(ssdp_list,f)
bridges_from_ssdp = [u for u in ssdp_list if 'IpBridge' in u.server]
logger.info('SSDP returned %d items with %d Hue bridges(s).',
len(ssdp_list), len(bridges_from_ssdp))
# Confirm SSDP gave an accessible bridge device by reading from the returned
# location. Should look like: http://192.168.0.1:80/description.xml
found_bridges = {}
for bridge in bridges_from_ssdp:
serial, bridge_info = parse_description_xml(bridge.location)
if serial:
found_bridges[serial] = bridge_info
logger.debug('%s', found_bridges)
if found_bridges:
return found_bridges
else:
raise DiscoveryError('SSDP returned nothing')
|
Use SSDP as described by the Philips guide
|
def create_model(cls, data: dict, fields=None):
'''
Creates model instance from data (dict).
'''
if fields is None:
fields = set(cls._fields.keys())
else:
if not isinstance(fields, set):
fields = set(fields)
new_keys = set(data.keys()) - fields
if new_keys:
for new_key in new_keys:
del data[new_key]
return cls(data)
|
Creates model instance from data (dict).
|
def get_primitives_paths():
"""Get the list of folders where the primitives will be looked for.
This list will include the value of any `entry_point` named `jsons_path` published under
the name `mlprimitives`.
An example of such an entry point would be::
entry_points = {
'mlprimitives': [
'jsons_path=some_module:SOME_VARIABLE'
]
}
where the module `some_module` contains a variable such as::
SOME_VARIABLE = os.path.join(os.path.dirname(__file__), 'jsons')
Returns:
list:
The list of folders.
"""
primitives_paths = list()
entry_points = pkg_resources.iter_entry_points('mlprimitives')
for entry_point in entry_points:
if entry_point.name == 'jsons_path':
path = entry_point.load()
primitives_paths.append(path)
return _PRIMITIVES_PATHS + primitives_paths
|
Get the list of folders where the primitives will be looked for.
This list will include the value of any `entry_point` named `jsons_path` published under
the name `mlprimitives`.
An example of such an entry point would be::
entry_points = {
'mlprimitives': [
'jsons_path=some_module:SOME_VARIABLE'
]
}
where the module `some_module` contains a variable such as::
SOME_VARIABLE = os.path.join(os.path.dirname(__file__), 'jsons')
Returns:
list:
The list of folders.
|
def build_beta_part(ruleset, alpha_terminals):
"""
Given a set of already adapted rules, and a dictionary of
patterns and alpha_nodes, wire up the beta part of the RETE
network.
"""
for rule in ruleset:
if isinstance(rule[0], OR):
for subrule in rule[0]:
wire_rule(rule, alpha_terminals, lhs=subrule)
else:
wire_rule(rule, alpha_terminals, lhs=rule)
|
Given a set of already adapted rules, and a dictionary of
patterns and alpha_nodes, wire up the beta part of the RETE
network.
|
def vector_normalize(vector_in, decimals=18):
""" Generates a unit vector from the input.
:param vector_in: vector to be normalized
:type vector_in: list, tuple
:param decimals: number of significands
:type decimals: int
:return: the normalized vector (i.e. the unit vector)
:rtype: list
"""
try:
if vector_in is None or len(vector_in) == 0:
raise ValueError("Input vector cannot be empty")
except TypeError as e:
print("An error occurred: {}".format(e.args[-1]))
raise TypeError("Input must be a list or tuple")
except Exception:
raise
# Calculate magnitude of the vector
magnitude = vector_magnitude(vector_in)
# Normalize the vector
if magnitude > 0:
vector_out = []
for vin in vector_in:
vector_out.append(vin / magnitude)
# Return the normalized vector and consider the number of significands
return [float(("{:." + str(decimals) + "f}").format(vout)) for vout in vector_out]
else:
raise ValueError("The magnitude of the vector is zero")
|
Generates a unit vector from the input.
:param vector_in: vector to be normalized
:type vector_in: list, tuple
:param decimals: number of significands
:type decimals: int
:return: the normalized vector (i.e. the unit vector)
:rtype: list
|
def submit(self, pixels, queue=None, debug=False, configfile=None):
"""
Submit the likelihood job for the given pixel(s).
"""
# For backwards compatibility
batch = self.config['scan'].get('batch',self.config['batch'])
queue = batch['cluster'] if queue is None else queue
# Need to develop some way to take command line arguments...
self.batch = ugali.utils.batch.batchFactory(queue,**batch['opts'])
self.batch.max_jobs = batch.get('max_jobs',200)
if np.isscalar(pixels): pixels = np.array([pixels])
outdir = mkdir(self.config['output']['likedir'])
logdir = mkdir(join(outdir,'log'))
subdir = mkdir(join(outdir,'sub'))
# Save the current configuation settings; avoid writing
# file multiple times if configfile passed as argument.
if configfile is None:
shutil.copy(self.config.filename,outdir)
configfile = join(outdir,os.path.basename(self.config.filename))
lon,lat = pix2ang(self.nside_likelihood,pixels)
commands = []
chunk = batch['chunk']
istart = 0
logger.info('=== Submit Likelihood ===')
for ii,pix in enumerate(pixels):
msg = ' (%i/%i) pixel=%i nside=%i; (lon, lat) = (%.2f, %.2f)'
msg = msg%(ii+1,len(pixels),pix, self.nside_likelihood,lon[ii],lat[ii])
logger.info(msg)
# Create outfile name
outfile = self.config.likefile%(pix,self.config['coords']['coordsys'].lower())
outbase = os.path.basename(outfile)
jobname = batch['jobname']
# Submission command
sub = not os.path.exists(outfile)
cmd = self.command(outfile,configfile,pix)
commands.append([ii,cmd,lon[ii],lat[ii],sub])
if chunk == 0:
# No chunking
command = cmd
submit = sub
logfile = join(logdir,os.path.splitext(outbase)[0]+'.log')
elif (len(commands)%chunk==0) or (ii+1 == len(pixels)):
# End of chunk, create submission script
commands = np.array(commands,dtype=object)
istart, iend = commands[0][0], commands[-1][0]
subfile = join(subdir,'submit_%08i_%08i.sh'%(istart,iend))
logfile = join(logdir,'submit_%08i_%08i.log'%(istart,iend))
command = "sh %s"%subfile
submit = np.any(commands[:,-1])
if submit: self.write_script(subfile,commands)
else:
# Not end of chunk
continue
commands=[]
# Actual job submission
if not submit:
logger.info(self.skip)
continue
else:
job = self.batch.submit(command,jobname,logfile)
logger.info(" "+job)
time.sleep(0.5)
|
Submit the likelihood job for the given pixel(s).
|
def includeme(config):
""" The callable makes it possible to include rpcinterface
in a Pyramid application.
Calling ``config.include(twitcher.rpcinterface)`` will result in this
callable being called.
Arguments:
* ``config``: the ``pyramid.config.Configurator`` object.
"""
settings = config.registry.settings
if asbool(settings.get('twitcher.rpcinterface', True)):
LOGGER.debug('Twitcher XML-RPC Interface enabled.')
# include twitcher config
config.include('twitcher.config')
# using basic auth
config.include('twitcher.basicauth')
# pyramid xml-rpc
# http://docs.pylonsproject.org/projects/pyramid-rpc/en/latest/xmlrpc.html
config.include('pyramid_rpc.xmlrpc')
config.include('twitcher.db')
config.add_xmlrpc_endpoint('api', '/RPC2')
# register xmlrpc methods
config.add_xmlrpc_method(RPCInterface, attr='generate_token', endpoint='api', method='generate_token')
config.add_xmlrpc_method(RPCInterface, attr='revoke_token', endpoint='api', method='revoke_token')
config.add_xmlrpc_method(RPCInterface, attr='revoke_all_tokens', endpoint='api', method='revoke_all_tokens')
config.add_xmlrpc_method(RPCInterface, attr='register_service', endpoint='api', method='register_service')
config.add_xmlrpc_method(RPCInterface, attr='unregister_service', endpoint='api', method='unregister_service')
config.add_xmlrpc_method(RPCInterface, attr='get_service_by_name', endpoint='api', method='get_service_by_name')
config.add_xmlrpc_method(RPCInterface, attr='get_service_by_url', endpoint='api', method='get_service_by_url')
config.add_xmlrpc_method(RPCInterface, attr='clear_services', endpoint='api', method='clear_services')
config.add_xmlrpc_method(RPCInterface, attr='list_services', endpoint='api', method='list_services')
|
The callable makes it possible to include rpcinterface
in a Pyramid application.
Calling ``config.include(twitcher.rpcinterface)`` will result in this
callable being called.
Arguments:
* ``config``: the ``pyramid.config.Configurator`` object.
|
def assignmentComplete():
"""ASSIGNMENT COMPLETE Section 9.1.3"""
a = TpPd(pd=0x6)
b = MessageType(mesType=0x29) # 00101001
c = RrCause()
packet = a / b / c
return packet
|
ASSIGNMENT COMPLETE Section 9.1.3
|
def upload_jterator_project_files(self, directory):
'''Uploads the *jterator* project description from files on disk in
YAML format. It expects a ``pipeline.yaml`` file in `directory` and
optionally ``*handles.yaml`` files in a ``handles`` subfolder of
`directory`.
Parameters
----------
directory: str
path to the root folder where files are located
See also
--------
:meth:`tmclient.api.TmClient.upload_jterator_project`
'''
logger.info(
'load jterator project description from directory: %s', directory
)
if not os.path.exists(directory):
raise OSError('Directory does not exit: {0}'.format(directory))
pipeline_filename = os.path.join(directory, 'pipeline.yaml')
if not os.path.exists(pipeline_filename):
raise OSError(
'Pipeline description file does not exist: {0}'.format(
pipeline_filename
)
)
logger.debug('load pipeline filename: %s', pipeline_filename)
with open(pipeline_filename) as f:
pipeline_description = yaml.safe_load(f.read())
handles_subdirectory = os.path.join(directory, 'handles')
if not os.path.exists(handles_subdirectory):
logger.warn(
'handles subdirectory does not exist: %s', handles_subdirectory
)
handles_filename_pattern = os.path.join(
handles_subdirectory, '*.handles.yaml'
)
handles_descriptions = dict()
for handles_filename in glob.glob(handles_filename_pattern):
name = os.path.splitext(os.path.splitext(
os.path.basename(handles_filename)
)[0])[0]
logger.debug('load handles file: %s', handles_filename)
with open(handles_filename) as f:
handles_descriptions[name] = yaml.safe_load(f.read())
self.upload_jterator_project(
pipeline_description, handles_descriptions
)
|
Uploads the *jterator* project description from files on disk in
YAML format. It expects a ``pipeline.yaml`` file in `directory` and
optionally ``*handles.yaml`` files in a ``handles`` subfolder of
`directory`.
Parameters
----------
directory: str
path to the root folder where files are located
See also
--------
:meth:`tmclient.api.TmClient.upload_jterator_project`
|
def get_model(name):
"""
Convert a model's verbose name to the model class. This allows us to
use the models verbose name in steps.
"""
model = MODELS.get(name.lower(), None)
assert model, "Could not locate model by name '%s'" % name
return model
|
Convert a model's verbose name to the model class. This allows us to
use the models verbose name in steps.
|
def crop(im, r, c, sz):
'''
crop image into a square of size sz,
'''
return im[r:r+sz, c:c+sz]
|
crop image into a square of size sz,
|
def _check_pip_installed():
"""
Invoke `pip --version` and make sure it doesn't error.
Use check_output to capture stdout and stderr
Invokes pip by the same manner that we plan to in _call_pip()
Don't bother trying to reuse _call_pip to do this... Finnicky and not worth
the effort.
"""
try:
subprocess.check_output(
[sys.executable, "-m", "pip", "--version"], stderr=subprocess.STDOUT
)
return True
except subprocess.CalledProcessError:
return False
|
Invoke `pip --version` and make sure it doesn't error.
Use check_output to capture stdout and stderr
Invokes pip by the same manner that we plan to in _call_pip()
Don't bother trying to reuse _call_pip to do this... Finnicky and not worth
the effort.
|
def _f_cash_root(x, counts, bkg, model):
"""
Function to find root of. Described in Appendix A, Stewart (2009).
Parameters
----------
x : float
Model amplitude.
counts : `~numpy.ndarray`
Count map slice, where model is defined.
bkg : `~numpy.ndarray`
Background map slice, where model is defined.
model : `~numpy.ndarray`
Source template (multiplied with exposure).
"""
return np.sum(model * (counts / (x * model + bkg) - 1.0))
|
Function to find root of. Described in Appendix A, Stewart (2009).
Parameters
----------
x : float
Model amplitude.
counts : `~numpy.ndarray`
Count map slice, where model is defined.
bkg : `~numpy.ndarray`
Background map slice, where model is defined.
model : `~numpy.ndarray`
Source template (multiplied with exposure).
|
def get_parser():
"""Get parser for mpu."""
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
parser = ArgumentParser(description=__doc__,
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument('--version',
action='version',
version='mpu {}'.format(mpu.__version__))
subparsers = parser.add_subparsers(help='Python package commands')
package_parser = subparsers.add_parser('package')
mpu.package.cli.get_parser(package_parser)
return parser
|
Get parser for mpu.
|
def _freeze(self, final_text, err=False):
"""Stop spinner, compose last frame and 'freeze' it."""
if not final_text:
final_text = ""
target = self.stderr if err else self.stdout
if target.closed:
target = sys.stderr if err else sys.stdout
text = to_text(final_text)
last_frame = self._compose_out(text, mode="last")
self._last_frame = decode_output(last_frame, target_stream=target)
# Should be stopped here, otherwise prints after
# self._freeze call will mess up the spinner
self.stop()
target.write(self._last_frame)
|
Stop spinner, compose last frame and 'freeze' it.
|
def deserialize(cls, serializer, wf_spec, s_state, **kwargs):
"""
Deserializes the trigger using the provided serializer.
"""
return serializer.deserialize_trigger(wf_spec,
s_state,
**kwargs)
|
Deserializes the trigger using the provided serializer.
|
def load_classes(cls, fail_silently=True):
"""Load all the classes for a plugin.
Produces a sequence containing the identifiers and their corresponding
classes for all of the available instances of this plugin.
fail_silently causes the code to simply log warnings if a
plugin cannot import. The goal is to be able to use part of
libraries from an XBlock (and thus have it installed), even if
the overall XBlock cannot be used (e.g. depends on Django in a
non-Django application). There is disagreement about whether
this is a good idea, or whether we should see failures early
(e.g. on startup or first page load), and in what
contexts. Hence, the flag.
"""
all_classes = itertools.chain(
pkg_resources.iter_entry_points(cls.entry_point),
(entry_point for identifier, entry_point in cls.extra_entry_points),
)
for class_ in all_classes:
try:
yield (class_.name, cls._load_class_entry_point(class_))
except Exception: # pylint: disable=broad-except
if fail_silently:
log.warning('Unable to load %s %r', cls.__name__, class_.name, exc_info=True)
else:
raise
|
Load all the classes for a plugin.
Produces a sequence containing the identifiers and their corresponding
classes for all of the available instances of this plugin.
fail_silently causes the code to simply log warnings if a
plugin cannot import. The goal is to be able to use part of
libraries from an XBlock (and thus have it installed), even if
the overall XBlock cannot be used (e.g. depends on Django in a
non-Django application). There is disagreement about whether
this is a good idea, or whether we should see failures early
(e.g. on startup or first page load), and in what
contexts. Hence, the flag.
|
def replace(self, **kwargs):
"""
Return: a new :class:`AsideUsageKeyV2` with ``KEY_FIELDS`` specified in ``kwargs`` replaced
with their corresponding values. Deprecation value is also preserved.
"""
if 'usage_key' in kwargs:
for attr in self.USAGE_KEY_ATTRS:
kwargs.pop(attr, None)
else:
kwargs['usage_key'] = self.usage_key.replace(**{
key: kwargs.pop(key)
for key
in self.USAGE_KEY_ATTRS
if key in kwargs
})
return super(AsideUsageKeyV2, self).replace(**kwargs)
|
Return: a new :class:`AsideUsageKeyV2` with ``KEY_FIELDS`` specified in ``kwargs`` replaced
with their corresponding values. Deprecation value is also preserved.
|
def by_chat_command(prefix=('/',), separator=' ', pass_args=False):
"""
:param prefix:
a list of special characters expected to indicate the head of a command.
:param separator:
a command may be followed by arguments separated by ``separator``.
:type pass_args: bool
:param pass_args:
If ``True``, arguments following a command will be passed to the handler
function.
:return:
a key function that interprets a chat message's text and returns
the embedded command, optionally followed by arguments. If the text is
not preceded by any of the specified ``prefix``, it returns a 1-tuple
``(None,)`` as the key. This is to distinguish with the special
``None`` key in routing table.
"""
return by_command(lambda msg: msg['text'], prefix, separator, pass_args)
|
:param prefix:
a list of special characters expected to indicate the head of a command.
:param separator:
a command may be followed by arguments separated by ``separator``.
:type pass_args: bool
:param pass_args:
If ``True``, arguments following a command will be passed to the handler
function.
:return:
a key function that interprets a chat message's text and returns
the embedded command, optionally followed by arguments. If the text is
not preceded by any of the specified ``prefix``, it returns a 1-tuple
``(None,)`` as the key. This is to distinguish with the special
``None`` key in routing table.
|
def transform(self, *axes, verbose=True):
"""Transform the data.
Parameters
----------
axes : strings
Expressions for the new set of axes.
verbose : boolean (optional)
Toggle talkback. Default is True
See Also
--------
set_constants
Similar method except for constants
"""
# TODO: ensure that transform does not break data
# create
new = []
newt = "newt" in self.axis_expressions
current = {a.expression: a for a in self._axes}
for expression in axes:
axis = current.get(expression, Axis(self, expression))
new.append(axis)
self._axes = new
# units
for a in self._axes:
if a.units is None:
a.convert(a.variables[0].units)
# finish
self.flush()
self._on_axes_updated()
nownewt = "newt" in self.axis_expressions
if verbose and nownewt and not newt:
print("Look she turned me into a newt")
elif verbose and newt and not nownewt:
print("I got better")
|
Transform the data.
Parameters
----------
axes : strings
Expressions for the new set of axes.
verbose : boolean (optional)
Toggle talkback. Default is True
See Also
--------
set_constants
Similar method except for constants
|
def progress(self, *restrictions, display=True):
"""
report progress of populating the table
:return: remaining, total -- tuples to be populated
"""
todo = self._jobs_to_do(restrictions)
total = len(todo)
remaining = len(todo - self.target)
if display:
print('%-20s' % self.__class__.__name__,
'Completed %d of %d (%2.1f%%) %s' % (
total - remaining, total, 100 - 100 * remaining / (total+1e-12),
datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d %H:%M:%S')), flush=True)
return remaining, total
|
report progress of populating the table
:return: remaining, total -- tuples to be populated
|
def parse_epcr(self):
"""
Parse the ePCR output file. Populate dictionary of resutls. For alleles, find the best result based on the
number of mismatches before populating dictionary
"""
# Use the metadata object from the vtyper_object
for sample in self.vtyper_object.metadata:
# Initialise the dictionary
sample[self.analysistype].result_dict = dict()
# Read in the output file
with open(sample[self.analysistype].resultsfile) as epcrresults:
for result in epcrresults:
# Only the lines without a # contain results
if "#" not in result:
# Split on \t
# vtx2a_0_0 2014-SEQ-0121_127_length_1407_cov_50.7797_ID_10924 - 228 576 2 0 349/100-350
# primer_set: vtx2a_0_0, contig: 2014-SEQ-0121_127_length_1407_cov_50.7797_ID_10924, strand: -,
# start: 228, stop: 576, number of forward mismatches: 2, number of reverse mismatches: 2
# amplicon_combo: 349/100-350
primer_set, contig, strand, start, stop, total_mismatches, indels, amplicon_combo = \
result.rstrip().split('\t')
# Set the mismatches to be an int
total_mismatches = int(total_mismatches)
# Set the position of the amplicon on the contig. Ensure that the lower value is first
genome_pos = '{min}-{max}'.format(min=min([int(start), int(stop)]),
max=max([int(start), int(stop)]))
# Extract the gene name from the modified name used when creating the primer file: LMhlyA_0_0
# becomes LMhlyA
gene_re = re.search(r'([\w-]+)_(\d{1,3})_(\d{1,3})', primer_set)
gene = gene_re.groups()[0]
# Split the amplicon length from amplicon_combo: 349/100-350 -> 349
amplicon_length = amplicon_combo.split('/')[0]
# Populate the dictionary if the 'total_mismatches' key doesn't exist, or if the current number
# of mismatches is better than the previous 'best' number of mismatches
try:
if total_mismatches < sample[self.analysistype].result_dict[gene]['total_mismatches']:
self.populate_results_dict(sample=sample,
gene=gene,
total_mismatches=total_mismatches,
genome_pos=genome_pos,
amplicon_length=amplicon_length,
contig=contig,
primer_set=primer_set)
except KeyError:
self.populate_results_dict(sample=sample,
gene=gene,
total_mismatches=total_mismatches,
genome_pos=genome_pos,
amplicon_length=amplicon_length,
contig=contig,
primer_set=primer_set)
|
Parse the ePCR output file. Populate dictionary of resutls. For alleles, find the best result based on the
number of mismatches before populating dictionary
|
def interleave(infile_1, infile_2, outfile, suffix1=None, suffix2=None):
'''Makes interleaved file from two sequence files. If used, will append suffix1 onto end
of every sequence name in infile_1, unless it already ends with suffix1. Similar for sufffix2.'''
seq_reader_1 = sequences.file_reader(infile_1)
seq_reader_2 = sequences.file_reader(infile_2)
f_out = utils.open_file_write(outfile)
for seq_1 in seq_reader_1:
try:
seq_2 = next(seq_reader_2)
except:
utils.close(f_out)
raise Error('Error getting mate for sequence', seq_1.id, ' ... cannot continue')
if suffix1 is not None and not seq_1.id.endswith(suffix1):
seq_1.id += suffix1
if suffix2 is not None and not seq_2.id.endswith(suffix2):
seq_2.id += suffix2
print(seq_1, file=f_out)
print(seq_2, file=f_out)
try:
seq_2 = next(seq_reader_2)
except:
seq_2 = None
if seq_2 is not None:
utils.close(f_out)
raise Error('Error getting mate for sequence', seq_2.id, ' ... cannot continue')
utils.close(f_out)
|
Makes interleaved file from two sequence files. If used, will append suffix1 onto end
of every sequence name in infile_1, unless it already ends with suffix1. Similar for sufffix2.
|
def make_middleware(app=None, *args, **kw):
""" Given an app, return that app wrapped in RaptorizeMiddleware """
app = RaptorizeMiddleware(app, *args, **kw)
return app
|
Given an app, return that app wrapped in RaptorizeMiddleware
|
def validate(cls, event_info):
"""Validate that provided event information is valid."""
assert 'routing_key' in event_info
assert isinstance(event_info['routing_key'], six.string_types)
assert 'event_action' in event_info
assert event_info['event_action'] in cls.EVENT_TYPES
assert 'payload' in event_info
payload = event_info['payload']
assert payload['summary']
assert payload['source']
assert payload['severity'] in cls.SEVERITY_TYPES
|
Validate that provided event information is valid.
|
def transformer_encoder_layers(inputs,
num_layers,
hparams,
attention_type=AttentionType.GLOBAL,
self_attention_bias=None,
q_padding="VALID",
kv_padding="VALID",
name="transformer"):
"""Multi layer transformer encoder."""
x = inputs
x = tf.nn.dropout(x, 1.0 - hparams.layer_prepostprocess_dropout)
for layer in range(num_layers):
# attention layers + skip connections
with tf.variable_scope("%s_layer_%d" % (name, layer)):
if attention_type == AttentionType.LOCAL_2D:
y = local_attention_2d(common_layers.layer_preprocess(x, hparams),
hparams,
attention_type="local_attention_2d")
elif attention_type == AttentionType.LOCAL_1D:
y = local_attention_1d(common_layers.layer_preprocess(x, hparams),
hparams,
attention_type="local_unmasked",
q_padding=q_padding, kv_padding=kv_padding)
elif attention_type == AttentionType.GLOBAL:
y = full_self_attention(common_layers.layer_preprocess(x, hparams),
self_attention_bias, hparams,
q_padding=q_padding, kv_padding=kv_padding)
x = common_layers.layer_postprocess(x, y, hparams)
# feed-fwd layer + skip connections
y = ffn_layer(common_layers.layer_preprocess(x, hparams), hparams)
x = common_layers.layer_postprocess(x, y, hparams)
return common_layers.layer_preprocess(x, hparams)
|
Multi layer transformer encoder.
|
def get_last_modified_date(
self,
bucket: str,
key: str,
) -> datetime:
"""
Retrieves last modified date for a given key in a given bucket.
:param bucket: the bucket the object resides in.
:param key: the key of the object for which the last modified date is being retrieved.
:return: the last modified date
"""
response = self.get_all_metadata(bucket, key)
return response['LastModified']
|
Retrieves last modified date for a given key in a given bucket.
:param bucket: the bucket the object resides in.
:param key: the key of the object for which the last modified date is being retrieved.
:return: the last modified date
|
def drop_collection(request, database_name, collection_name):
"""Drop Collection"""
name = """Retype "%s" to drop the collection""" % (collection_name)
if request.method == 'POST':
form = ConfirmDropForm(request.POST)
if form.is_valid():
name = form.cleaned_data['name']
if name != collection_name:
messages.error(request, _('The name did not match. \
Drop operation aborted'))
return HttpResponseRedirect(
reverse(
'djmongo_drop_collection',
args=(
database_name,
collection_name)))
response = mongodb_drop_collection(database_name, collection_name)
if response:
errormsg = _("ERROR", response)
messages.error(request, errormsg)
return HttpResponseRedirect(reverse('djmongo_show_dbs'))
else:
messages.success(request, _("The collection was deleted."))
return HttpResponseRedirect(reverse('djmongo_show_dbs'))
else:
# The form is invalid
messages.error(
request, _("Please correct the errors in the form."))
return render(request,
'djmongo/console/generic/bootstrapform.html',
{'form': form, 'name': name})
# This is a GET
context = {'name': name,
'form': ConfirmDropForm(
initial={})}
return render(request, 'djmongo/console/generic/bootstrapform.html',
context)
|
Drop Collection
|
def _readmodule(module, path, inpackage=None):
'''Do the hard work for readmodule[_ex].
If INPACKAGE is given, it must be the dotted name of the package in
which we are searching for a submodule, and then PATH must be the
package search path; otherwise, we are searching for a top-level
module, and PATH is combined with sys.path.
'''
# Compute the full module name (prepending inpackage if set)
if inpackage is not None:
fullmodule = "%s.%s" % (inpackage, module)
else:
fullmodule = module
# Check in the cache
if fullmodule in _modules:
return _modules[fullmodule]
# Initialize the dict for this module's contents
dict = OrderedDict()
# Check if it is a built-in module; we don't do much for these
if module in sys.builtin_module_names and inpackage is None:
_modules[module] = dict
return dict
# Check for a dotted module name
i = module.rfind('.')
if i >= 0:
package = module[:i]
submodule = module[i + 1:]
parent = _readmodule(package, path, inpackage)
if inpackage is not None:
package = "%s.%s" % (inpackage, package)
return _readmodule(submodule, parent['__path__'], package)
# Search the path for the module
f = None
if inpackage is not None:
f, fname, (_s, _m, ty) = imp.find_module(module, path)
else:
f, fname, (_s, _m, ty) = imp.find_module(module, path + sys.path)
if ty == imp.PKG_DIRECTORY:
dict['__path__'] = [fname]
path = [fname] + path
f, fname, (_s, _m, ty) = imp.find_module('__init__', [fname])
_modules[fullmodule] = dict
if ty != imp.PY_SOURCE:
# not Python source, can't do anything with this module
f.close()
return dict
stack = [] # stack of (class, indent) pairs
g = tokenize.generate_tokens(f.readline)
try:
for tokentype, token, start, _end, _line in g:
if tokentype == DEDENT:
lineno, thisindent = start
# close nested classes and defs
while stack and stack[-1][1] >= thisindent:
del stack[-1]
elif token == 'def':
lineno, thisindent = start
# close previous nested classes and defs
while stack and stack[-1][1] >= thisindent:
del stack[-1]
tokentype, meth_name, start = g.next()[0:3]
if tokentype != NAME:
continue # Syntax error
if stack:
cur_class = stack[-1][0]
if isinstance(cur_class, Class):
# it's a method
cur_class._addmethod(meth_name, lineno)
# else it's a nested def
else:
# it's a function
dict[meth_name] = Function(fullmodule, meth_name,
fname, lineno)
stack.append((None, thisindent)) # Marker for nested fns
elif token == 'class':
lineno, thisindent = start
# close previous nested classes and defs
while stack and stack[-1][1] >= thisindent:
del stack[-1]
tokentype, class_name, start = g.next()[0:3]
if tokentype != NAME:
continue # Syntax error
# parse what follows the class name
tokentype, token, start = g.next()[0:3]
inherit = None
if token == '(':
names = [] # List of superclasses
# there's a list of superclasses
level = 1
super = [] # Tokens making up current superclass
while True:
tokentype, token, start = g.next()[0:3]
if token in (')', ',') and level == 1:
n = "".join(super)
if n in dict:
# we know this super class
n = dict[n]
else:
c = n.split('.')
if len(c) > 1:
# super class is of the form
# module.class: look in module for
# class
m = c[-2]
c = c[-1]
if m in _modules:
d = _modules[m]
if c in d:
n = d[c]
names.append(n)
super = []
if token == '(':
level += 1
elif token == ')':
level -= 1
if level == 0:
break
elif token == ',' and level == 1:
pass
# only use NAME and OP (== dot) tokens for type name
elif tokentype in (NAME, OP) and level == 1:
super.append(token)
# expressions in the base list are not supported
inherit = names
cur_class = Class(fullmodule, class_name, inherit,
fname, lineno)
if not stack:
dict[class_name] = cur_class
stack.append((cur_class, thisindent))
elif token == 'import' and start[1] == 0:
modules = _getnamelist(g)
for mod, _mod2 in modules:
try:
# Recursively read the imported module
if inpackage is None:
_readmodule(mod, path)
else:
try:
_readmodule(mod, path, inpackage)
except ImportError:
_readmodule(mod, [])
except:
# If we can't find or parse the imported module,
# too bad -- don't die here.
pass
elif token == 'from' and start[1] == 0:
mod, token = _getname(g)
if not mod or token != "import":
continue
names = _getnamelist(g)
try:
# Recursively read the imported module
d = _readmodule(mod, path, inpackage)
except:
# If we can't find or parse the imported module,
# too bad -- don't die here.
continue
# add any classes that were defined in the imported module
# to our name space if they were mentioned in the list
for n, n2 in names:
if n in d:
dict[n2 or n] = d[n]
elif n == '*':
# don't add names that start with _
for n in d:
if n[0] != '_':
dict[n] = d[n]
elif tokentype == NAME and start[1] == 0:
name = token
line = _line
tokentype, token = g.next()[0:2]
if tokentype == OP and token == "=":
dict[name] = Global(fullmodule, name, fname, _line)
except StopIteration:
pass
f.close()
return dict
|
Do the hard work for readmodule[_ex].
If INPACKAGE is given, it must be the dotted name of the package in
which we are searching for a submodule, and then PATH must be the
package search path; otherwise, we are searching for a top-level
module, and PATH is combined with sys.path.
|
def resolve(cls, propname, objcls=None):
'''
resolve type of the class property for the class. If objcls is not set then
assume that cls argument (class that the function is called from) is the class
we are trying to resolve the type for
:param cls:
:param objcls:
:param propname:
'''
# object class is not given
if objcls is None:
objcls = cls
# get type
typemap = cls._types.get(objcls)
if(typemap is not None):
# nothing registered for this property
return typemap.get(propname, None)
# type map didn't exist so none
return None
|
resolve type of the class property for the class. If objcls is not set then
assume that cls argument (class that the function is called from) is the class
we are trying to resolve the type for
:param cls:
:param objcls:
:param propname:
|
def _determine_filtered_package_requirements(self):
"""
Parse the configuration file for [blacklist]packages
Returns
-------
list of packaging.requirements.Requirement
For all PEP440 package specifiers
"""
filtered_requirements = set()
try:
lines = self.configuration["blacklist"]["packages"]
package_lines = lines.split("\n")
except KeyError:
package_lines = []
for package_line in package_lines:
package_line = package_line.strip()
if not package_line or package_line.startswith("#"):
continue
filtered_requirements.add(Requirement(package_line))
return list(filtered_requirements)
|
Parse the configuration file for [blacklist]packages
Returns
-------
list of packaging.requirements.Requirement
For all PEP440 package specifiers
|
def create_replication(self, source_db=None, target_db=None,
repl_id=None, **kwargs):
"""
Creates a new replication task.
:param source_db: Database object to replicate from. Can be either a
``CouchDatabase`` or ``CloudantDatabase`` instance.
:param target_db: Database object to replicate to. Can be either a
``CouchDatabase`` or ``CloudantDatabase`` instance.
:param str repl_id: Optional replication id. Generated internally if
not explicitly set.
:param dict user_ctx: Optional user to act as. Composed internally
if not explicitly set.
:param bool create_target: Specifies whether or not to
create the target, if it does not already exist.
:param bool continuous: If set to True then the replication will be
continuous.
:returns: Replication document as a Document instance
"""
if source_db is None:
raise CloudantReplicatorException(101)
if target_db is None:
raise CloudantReplicatorException(102)
data = dict(
_id=repl_id if repl_id else str(uuid.uuid4()),
**kwargs
)
# replication source
data['source'] = {'url': source_db.database_url}
if source_db.admin_party:
pass # no credentials required
elif source_db.client.is_iam_authenticated:
data['source'].update({'auth': {
'iam': {'api_key': source_db.client.r_session.get_api_key}
}})
else:
data['source'].update({'headers': {
'Authorization': source_db.creds['basic_auth']
}})
# replication target
data['target'] = {'url': target_db.database_url}
if target_db.admin_party:
pass # no credentials required
elif target_db.client.is_iam_authenticated:
data['target'].update({'auth': {
'iam': {'api_key': target_db.client.r_session.get_api_key}
}})
else:
data['target'].update({'headers': {
'Authorization': target_db.creds['basic_auth']
}})
# add user context delegation
if not data.get('user_ctx') and self.database.creds and \
self.database.creds.get('user_ctx'):
data['user_ctx'] = self.database.creds['user_ctx']
return self.database.create_document(data, throw_on_exists=True)
|
Creates a new replication task.
:param source_db: Database object to replicate from. Can be either a
``CouchDatabase`` or ``CloudantDatabase`` instance.
:param target_db: Database object to replicate to. Can be either a
``CouchDatabase`` or ``CloudantDatabase`` instance.
:param str repl_id: Optional replication id. Generated internally if
not explicitly set.
:param dict user_ctx: Optional user to act as. Composed internally
if not explicitly set.
:param bool create_target: Specifies whether or not to
create the target, if it does not already exist.
:param bool continuous: If set to True then the replication will be
continuous.
:returns: Replication document as a Document instance
|
def stop(self, stopSparkContext=True, stopGraceFully=False):
"""Stop processing streams.
:param stopSparkContext: stop the SparkContext (NOT IMPLEMENTED)
:param stopGracefully: stop gracefully (NOT IMPLEMENTED)
"""
while self._on_stop_cb:
cb = self._on_stop_cb.pop()
log.debug('calling on_stop_cb {}'.format(cb))
cb()
IOLoop.current().stop()
StreamingContext._activeContext = None
|
Stop processing streams.
:param stopSparkContext: stop the SparkContext (NOT IMPLEMENTED)
:param stopGracefully: stop gracefully (NOT IMPLEMENTED)
|
def ComputeFortranSuffixes(suffixes, ppsuffixes):
"""suffixes are fortran source files, and ppsuffixes the ones to be
pre-processed. Both should be sequences, not strings."""
assert len(suffixes) > 0
s = suffixes[0]
sup = s.upper()
upper_suffixes = [_.upper() for _ in suffixes]
if SCons.Util.case_sensitive_suffixes(s, sup):
ppsuffixes.extend(upper_suffixes)
else:
suffixes.extend(upper_suffixes)
|
suffixes are fortran source files, and ppsuffixes the ones to be
pre-processed. Both should be sequences, not strings.
|
def permute(self, qubits: Qubits) -> 'Gate':
"""Permute the order of the qubits"""
vec = self.vec.permute(qubits)
return Gate(vec.tensor, qubits=vec.qubits)
|
Permute the order of the qubits
|
def range_type_to_dtype(range_type: str) -> Optional[tf.DType]:
'''Maps RDDL range types to TensorFlow dtypes.'''
range2dtype = {
'real': tf.float32,
'int': tf.int32,
'bool': tf.bool
}
return range2dtype[range_type]
|
Maps RDDL range types to TensorFlow dtypes.
|
def import_attr(path):
"""
Given a a Python dotted path to a variable in a module,
imports the module and returns the variable in it.
"""
module_path, attr_name = path.rsplit(".", 1)
return getattr(import_module(module_path), attr_name)
|
Given a a Python dotted path to a variable in a module,
imports the module and returns the variable in it.
|
def fix_config(self, options):
"""
Fixes the options, if necessary. I.e., it adds all required elements to the dictionary.
:param options: the options to fix
:type options: dict
:return: the (potentially) fixed options
:rtype: dict
"""
options = super(EvaluationSummary, self).fix_config(options)
opt = "title"
if opt not in options:
options[opt] = None
if opt not in self.help:
self.help[opt] = "The title for the output (string)."
opt = "complexity"
if opt not in options:
options[opt] = False
if opt not in self.help:
self.help[opt] = "Whether to output classifier complexity information (bool)."
opt = "matrix"
if opt not in options:
options[opt] = False
if opt not in self.help:
self.help[opt] = "Whether to output the classifier confusion matrix (bool)."
return options
|
Fixes the options, if necessary. I.e., it adds all required elements to the dictionary.
:param options: the options to fix
:type options: dict
:return: the (potentially) fixed options
:rtype: dict
|
def put_summary(self, summary):
"""
Put a `tf.Summary`.
"""
if isinstance(summary, six.binary_type):
summary = tf.Summary.FromString(summary)
assert isinstance(summary, tf.Summary), type(summary)
# TODO other types
for val in summary.value:
if val.WhichOneof('value') == 'simple_value':
val.tag = re.sub('tower[0-9]+/', '', val.tag) # TODO move to subclasses
# TODO This hack is still needed, seem to disappear only when
# compiled from source.
suffix = '-summary' # tensorflow#6150, tensorboard#59
if val.tag.endswith(suffix):
val.tag = val.tag[:-len(suffix)]
self._dispatch(lambda m: m.process_scalar(val.tag, val.simple_value))
self._dispatch(lambda m: m.process_summary(summary))
|
Put a `tf.Summary`.
|
def stations(self, *stns):
"""Specify one or more stations for the query.
This modifies the query in-place, but returns `self` so that multiple
queries can be chained together on one line.
This replaces any existing spatial queries that have been set.
Parameters
----------
stns : one or more strings
One or more names of variables to request
Returns
-------
self : RadarQuery
Returns self for chaining calls
"""
self._set_query(self.spatial_query, stn=stns)
return self
|
Specify one or more stations for the query.
This modifies the query in-place, but returns `self` so that multiple
queries can be chained together on one line.
This replaces any existing spatial queries that have been set.
Parameters
----------
stns : one or more strings
One or more names of variables to request
Returns
-------
self : RadarQuery
Returns self for chaining calls
|
def _register_endpoints(self, providers):
"""
Register methods to endpoints
:type providers: list[str]
:rtype: list[(str, ((satosa.context.Context, Any) -> satosa.response.Response, Any))]
:param providers: A list of backend providers
:return: A list of endpoint/method pairs
"""
url_map = []
for endp_category in self.endpoints:
for binding, endp in self.endpoints[endp_category].items():
valid_providers = ""
for provider in providers:
valid_providers = "{}|^{}".format(valid_providers, provider)
valid_providers = valid_providers.lstrip("|")
parsed_endp = urlparse(endp)
url_map.append(("(%s)/%s$" % (valid_providers, parsed_endp.path),
functools.partial(self.handle_authn_request, binding_in=binding)))
if self.expose_entityid_endpoint():
parsed_entity_id = urlparse(self.idp.config.entityid)
url_map.append(("^{0}".format(parsed_entity_id.path[1:]),
self._metadata_endpoint))
return url_map
|
Register methods to endpoints
:type providers: list[str]
:rtype: list[(str, ((satosa.context.Context, Any) -> satosa.response.Response, Any))]
:param providers: A list of backend providers
:return: A list of endpoint/method pairs
|
def _ensure_exists(name, path=None):
'''
Raise an exception if the container does not exist
'''
if not exists(name, path=path):
raise CommandExecutionError(
'Container \'{0}\' does not exist'.format(name)
)
|
Raise an exception if the container does not exist
|
def encrypt_key(key, password):
"""Encrypt the password with the public key and return an ASCII representation.
The public key retrieved from the Travis API is loaded as an RSAPublicKey
object using Cryptography's default backend. Then the given password
is encrypted with the encrypt() method of RSAPublicKey. The encrypted
password is then encoded to base64 and decoded into ASCII in order to
convert the bytes object into a string object.
Parameters
----------
key: str
Travis CI public RSA key that requires deserialization
password: str
the password to be encrypted
Returns
-------
encrypted_password: str
the base64 encoded encrypted password decoded as ASCII
Notes
-----
Travis CI uses the PKCS1v15 padding scheme. While PKCS1v15 is secure,
it is outdated and should be replaced with OAEP.
Example:
OAEP(mgf=MGF1(algorithm=SHA256()), algorithm=SHA256(), label=None))
"""
public_key = load_pem_public_key(key.encode(), default_backend())
encrypted_password = public_key.encrypt(password, PKCS1v15())
return base64.b64encode(encrypted_password).decode('ascii')
|
Encrypt the password with the public key and return an ASCII representation.
The public key retrieved from the Travis API is loaded as an RSAPublicKey
object using Cryptography's default backend. Then the given password
is encrypted with the encrypt() method of RSAPublicKey. The encrypted
password is then encoded to base64 and decoded into ASCII in order to
convert the bytes object into a string object.
Parameters
----------
key: str
Travis CI public RSA key that requires deserialization
password: str
the password to be encrypted
Returns
-------
encrypted_password: str
the base64 encoded encrypted password decoded as ASCII
Notes
-----
Travis CI uses the PKCS1v15 padding scheme. While PKCS1v15 is secure,
it is outdated and should be replaced with OAEP.
Example:
OAEP(mgf=MGF1(algorithm=SHA256()), algorithm=SHA256(), label=None))
|
def port_channel_vlag_ignore_split(self, **kwargs):
"""Ignore VLAG Split.
Args:
name (str): Port-channel number. (1, 5, etc)
enabled (bool): Is ignore split enabled? (True, False)
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
KeyError: if `name` or `enable` is not specified.
ValueError: if `name` is not a valid value.
Examples:
>>> import pynos.device
>>> switches = ['10.24.39.211', '10.24.39.203']
>>> auth = ('admin', 'password')
>>> for switch in switches:
... conn = (switch, '22')
... with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.interface.port_channel_vlag_ignore_split(
... name='1', enabled=True)
... dev.interface.port_channel_vlag_ignore_split()
... # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
KeyError
"""
name = str(kwargs.pop('name'))
enabled = bool(kwargs.pop('enabled', True))
callback = kwargs.pop('callback', self._callback)
vlag_ignore_args = dict(name=name)
if not pynos.utilities.valid_interface('port_channel', name):
raise ValueError("`name` must match x")
config = getattr(
self._interface,
'interface_port_channel_vlag_ignore_split'
)(**vlag_ignore_args)
if not enabled:
ignore_split = config.find('.//*ignore-split')
ignore_split.set('operation', 'delete')
return callback(config)
|
Ignore VLAG Split.
Args:
name (str): Port-channel number. (1, 5, etc)
enabled (bool): Is ignore split enabled? (True, False)
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
KeyError: if `name` or `enable` is not specified.
ValueError: if `name` is not a valid value.
Examples:
>>> import pynos.device
>>> switches = ['10.24.39.211', '10.24.39.203']
>>> auth = ('admin', 'password')
>>> for switch in switches:
... conn = (switch, '22')
... with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.interface.port_channel_vlag_ignore_split(
... name='1', enabled=True)
... dev.interface.port_channel_vlag_ignore_split()
... # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
KeyError
|
def newest(cls, session):
"""Fetches the latest media added to MAL.
:type session: :class:`myanimelist.session.Session`
:param session: A valid MAL session
:rtype: :class:`.Media`
:return: the newest media on MAL
:raises: :class:`.MalformedMediaPageError`
"""
media_type = cls.__name__.lower()
p = session.session.get(u'http://myanimelist.net/' + media_type + '.php?o=9&c[]=a&c[]=d&cv=2&w=1').text
soup = utilities.get_clean_dom(p)
latest_entry = soup.find(u"div", {u"class": u"hoverinfo"})
if not latest_entry:
raise MalformedMediaPageError(0, p, u"No media entries found on recently-added page")
latest_id = int(latest_entry[u'rel'][1:])
return getattr(session, media_type)(latest_id)
|
Fetches the latest media added to MAL.
:type session: :class:`myanimelist.session.Session`
:param session: A valid MAL session
:rtype: :class:`.Media`
:return: the newest media on MAL
:raises: :class:`.MalformedMediaPageError`
|
def _contextualize(contextFactory, contextReceiver):
"""
Invoke a callable with an argument derived from the current execution
context (L{twisted.python.context}), or automatically created if none is
yet present in the current context.
This function, with a better name and documentation, should probably be
somewhere in L{twisted.python.context}. Calling context.get() and
context.call() individually is perilous because you always have to handle
the case where the value you're looking for isn't present; this idiom
forces you to supply some behavior for that case.
@param contextFactory: An object which is both a 0-arg callable and
hashable; used to look up the value in the context, set the value in the
context, and create the value (by being called).
@param contextReceiver: A function that receives the value created or
identified by contextFactory. It is a 1-arg callable object, called with
the result of calling the contextFactory, or retrieving the contextFactory
from the context.
"""
value = context.get(contextFactory, _NOT_SPECIFIED)
if value is not _NOT_SPECIFIED:
return contextReceiver(value)
else:
return context.call({contextFactory: contextFactory()},
_contextualize, contextFactory, contextReceiver)
|
Invoke a callable with an argument derived from the current execution
context (L{twisted.python.context}), or automatically created if none is
yet present in the current context.
This function, with a better name and documentation, should probably be
somewhere in L{twisted.python.context}. Calling context.get() and
context.call() individually is perilous because you always have to handle
the case where the value you're looking for isn't present; this idiom
forces you to supply some behavior for that case.
@param contextFactory: An object which is both a 0-arg callable and
hashable; used to look up the value in the context, set the value in the
context, and create the value (by being called).
@param contextReceiver: A function that receives the value created or
identified by contextFactory. It is a 1-arg callable object, called with
the result of calling the contextFactory, or retrieving the contextFactory
from the context.
|
def delete_agent_cloud(self, agent_cloud_id):
"""DeleteAgentCloud.
[Preview API]
:param int agent_cloud_id:
:rtype: :class:`<TaskAgentCloud> <azure.devops.v5_1.task-agent.models.TaskAgentCloud>`
"""
route_values = {}
if agent_cloud_id is not None:
route_values['agentCloudId'] = self._serialize.url('agent_cloud_id', agent_cloud_id, 'int')
response = self._send(http_method='DELETE',
location_id='bfa72b3d-0fc6-43fb-932b-a7f6559f93b9',
version='5.1-preview.1',
route_values=route_values)
return self._deserialize('TaskAgentCloud', response)
|
DeleteAgentCloud.
[Preview API]
:param int agent_cloud_id:
:rtype: :class:`<TaskAgentCloud> <azure.devops.v5_1.task-agent.models.TaskAgentCloud>`
|
def bench_report(results):
"""Print a report for given benchmark results to the console."""
table = Table(names=['function', 'nest', 'nside', 'size',
'time_healpy', 'time_self', 'ratio'],
dtype=['S20', bool, int, int, float, float, float], masked=True)
for row in results:
table.add_row(row)
table['time_self'].format = '10.7f'
if HEALPY_INSTALLED:
table['ratio'] = table['time_self'] / table['time_healpy']
table['time_healpy'].format = '10.7f'
table['ratio'].format = '7.2f'
table.pprint(max_lines=-1)
|
Print a report for given benchmark results to the console.
|
def download(data_dir):
"""Download census data if it is not already present."""
tf.gfile.MakeDirs(data_dir)
training_file_path = os.path.join(data_dir, TRAINING_FILE)
if not tf.gfile.Exists(training_file_path):
_download_and_clean_file(training_file_path, TRAINING_URL)
eval_file_path = os.path.join(data_dir, EVAL_FILE)
if not tf.gfile.Exists(eval_file_path):
_download_and_clean_file(eval_file_path, EVAL_URL)
|
Download census data if it is not already present.
|
def generate_pdfa(
pdf_pages,
output_file,
compression,
log,
threads=1,
pdf_version='1.5',
pdfa_part='2',
):
"""Generate a PDF/A.
The pdf_pages, a list files, will be merged into output_file. One or more
PDF files may be merged. One of the files in this list must be a pdfmark
file that provides Ghostscript with details on how to perform the PDF/A
conversion. By default with we pick PDF/A-2b, but this works for 1 or 3.
compression can be 'jpeg', 'lossless', or an empty string. In 'jpeg',
Ghostscript is instructed to convert color and grayscale images to DCT
(JPEG encoding). In 'lossless' Ghostscript is told to convert images to
Flate (lossless/PNG). If the parameter is omitted Ghostscript is left to
make its own decisions about how to encode images; it appears to use a
heuristic to decide how to encode images. As of Ghostscript 9.25, we
support passthrough JPEG which allows Ghostscript to avoid transcoding
images entirely. (The feature was added in 9.23 but broken, and the 9.24
release of Ghostscript had regressions, so we don't support it until 9.25.)
"""
compression_args = []
if compression == 'jpeg':
compression_args = [
"-dAutoFilterColorImages=false",
"-dColorImageFilter=/DCTEncode",
"-dAutoFilterGrayImages=false",
"-dGrayImageFilter=/DCTEncode",
]
elif compression == 'lossless':
compression_args = [
"-dAutoFilterColorImages=false",
"-dColorImageFilter=/FlateEncode",
"-dAutoFilterGrayImages=false",
"-dGrayImageFilter=/FlateEncode",
]
else:
compression_args = [
"-dAutoFilterColorImages=true",
"-dAutoFilterGrayImages=true",
]
# Older versions of Ghostscript expect a leading slash in
# sColorConversionStrategy, newer ones should not have it. See Ghostscript
# git commit fe1c025d.
strategy = 'RGB' if version() >= '9.19' else '/RGB'
if version() == '9.23':
# 9.23: new feature JPEG passthrough is broken in some cases, best to
# disable it always
# https://bugs.ghostscript.com/show_bug.cgi?id=699216
compression_args.append('-dPassThroughJPEGImages=false')
with NamedTemporaryFile(delete=True) as gs_pdf:
# nb no need to specify ProcessColorModel when ColorConversionStrategy
# is set; see:
# https://bugs.ghostscript.com/show_bug.cgi?id=699392
args_gs = (
[
"gs",
"-dQUIET",
"-dBATCH",
"-dNOPAUSE",
"-dCompatibilityLevel=" + str(pdf_version),
"-dNumRenderingThreads=" + str(threads),
"-sDEVICE=pdfwrite",
"-dAutoRotatePages=/None",
"-sColorConversionStrategy=" + strategy,
]
+ compression_args
+ [
"-dJPEGQ=95",
"-dPDFA=" + pdfa_part,
"-dPDFACompatibilityPolicy=1",
"-sOutputFile=" + gs_pdf.name,
]
)
args_gs.extend(fspath(s) for s in pdf_pages) # Stringify Path objs
log.debug(args_gs)
p = run(args_gs, stdout=PIPE, stderr=STDOUT, universal_newlines=True)
if _gs_error_reported(p.stdout):
log.error(p.stdout)
elif 'overprint mode not set' in p.stdout:
# Unless someone is going to print PDF/A documents on a
# magical sRGB printer I can't see the removal of overprinting
# being a problem....
log.debug(
"Ghostscript had to remove PDF 'overprinting' from the "
"input file to complete PDF/A conversion. "
)
else:
log.debug(p.stdout)
if p.returncode == 0:
# Ghostscript does not change return code when it fails to create
# PDF/A - check PDF/A status elsewhere
copy(gs_pdf.name, fspath(output_file))
else:
log.error('Ghostscript PDF/A rendering failed')
raise SubprocessOutputError()
|
Generate a PDF/A.
The pdf_pages, a list files, will be merged into output_file. One or more
PDF files may be merged. One of the files in this list must be a pdfmark
file that provides Ghostscript with details on how to perform the PDF/A
conversion. By default with we pick PDF/A-2b, but this works for 1 or 3.
compression can be 'jpeg', 'lossless', or an empty string. In 'jpeg',
Ghostscript is instructed to convert color and grayscale images to DCT
(JPEG encoding). In 'lossless' Ghostscript is told to convert images to
Flate (lossless/PNG). If the parameter is omitted Ghostscript is left to
make its own decisions about how to encode images; it appears to use a
heuristic to decide how to encode images. As of Ghostscript 9.25, we
support passthrough JPEG which allows Ghostscript to avoid transcoding
images entirely. (The feature was added in 9.23 but broken, and the 9.24
release of Ghostscript had regressions, so we don't support it until 9.25.)
|
def add_alias(self, name, *alt_names):
"""
Add some duplicate names for a given function. The original function's implementation must already be
registered.
:param name: The name of the function for which an implementation is already present
:param alt_names: Any number of alternate names may be passed as varargs
"""
old_procedure = self.procedures[name]
for alt in alt_names:
new_procedure = copy.deepcopy(old_procedure)
new_procedure.display_name = alt
self.procedures[alt] = new_procedure
|
Add some duplicate names for a given function. The original function's implementation must already be
registered.
:param name: The name of the function for which an implementation is already present
:param alt_names: Any number of alternate names may be passed as varargs
|
def _search(problem, fringe, graph_search=False, depth_limit=None,
node_factory=SearchNode, graph_replace_when_better=False,
viewer=None):
'''
Basic search algorithm, base of all the other search algorithms.
'''
if viewer:
viewer.event('started')
memory = set()
initial_node = node_factory(state=problem.initial_state,
problem=problem)
fringe.append(initial_node)
while fringe:
if viewer:
viewer.event('new_iteration', fringe.sorted())
node = fringe.pop()
if problem.is_goal(node.state):
if viewer:
viewer.event('chosen_node', node, True)
viewer.event('finished', fringe.sorted(), node, 'goal found')
return node
else:
if viewer:
viewer.event('chosen_node', node, False)
memory.add(node.state)
if depth_limit is None or node.depth < depth_limit:
expanded = node.expand()
if viewer:
viewer.event('expanded', [node], [expanded])
for n in expanded:
if graph_search:
others = [x for x in fringe if x.state == n.state]
assert len(others) in (0, 1)
if n.state not in memory and len(others) == 0:
fringe.append(n)
elif graph_replace_when_better and len(others) > 0 and n < others[0]:
fringe.remove(others[0])
fringe.append(n)
else:
fringe.append(n)
if viewer:
viewer.event('finished', fringe.sorted(), None, 'goal not found')
|
Basic search algorithm, base of all the other search algorithms.
|
def _process_data(*kwarg_names):
"""Helper function to handle data keyword argument
"""
def _data_decorator(func):
@functools.wraps(func)
def _mark_with_data(*args, **kwargs):
data = kwargs.pop('data', None)
if data is None:
return func(*args, **kwargs)
else:
data_args = [data[i] if hashable(data, i) else i for i in args]
data_kwargs = {
kw: data[kwargs[kw]] if hashable(data, kwargs[kw]) else kwargs[kw] for kw in set(kwarg_names).intersection(list(kwargs.keys()))
}
try:
# if any of the plots want to use the index_data, they can
# use it by referring to this attribute.
data_kwargs['index_data'] = data.index
except AttributeError as e:
pass
kwargs_update = kwargs.copy()
kwargs_update.update(data_kwargs)
return func(*data_args, **kwargs_update)
return _mark_with_data
return _data_decorator
|
Helper function to handle data keyword argument
|
def percent_encode_host(url):
""" Convert the host of uri formatted with to_uri()
to have a %-encoded host instead of punycode host
The rest of url should be unchanged
"""
# only continue if punycode encoded
if 'xn--' not in url:
return url
parts = urlsplit(url)
domain = parts.netloc.encode('utf-8')
try:
domain = domain.decode('idna')
if six.PY2:
domain = domain.encode('utf-8', 'ignore')
except:
# likely already encoded, so use as is
pass
domain = quote(domain)#, safe=r':\/')
return urlunsplit((parts[0], domain, parts[2], parts[3], parts[4]))
|
Convert the host of uri formatted with to_uri()
to have a %-encoded host instead of punycode host
The rest of url should be unchanged
|
def sql(self, sql: str, *qmark_params, **named_params):
"""
:deprecated: use self.statement to execute properly-formatted sql statements
"""
statement = SingleSqlStatement(sql)
return self.statement(statement).execute(*qmark_params, **named_params)
|
:deprecated: use self.statement to execute properly-formatted sql statements
|
def _get_span_name(servicer_context):
"""Generates a span name based off of the gRPC server rpc_request_info"""
method_name = servicer_context._rpc_event.call_details.method[1:]
if isinstance(method_name, bytes):
method_name = method_name.decode('utf-8')
method_name = method_name.replace('/', '.')
return '{}.{}'.format(RECV_PREFIX, method_name)
|
Generates a span name based off of the gRPC server rpc_request_info
|
def api_retrieve(self, api_key=None):
"""
Call the stripe API's retrieve operation for this model.
:param api_key: The api key to use for this request. Defaults to settings.STRIPE_SECRET_KEY.
:type api_key: string
"""
api_key = api_key or self.default_api_key
return self.stripe_class.retrieve(
id=self.id, api_key=api_key, expand=self.expand_fields
)
|
Call the stripe API's retrieve operation for this model.
:param api_key: The api key to use for this request. Defaults to settings.STRIPE_SECRET_KEY.
:type api_key: string
|
def addCondition(self, *fns, **kwargs):
"""Add a boolean predicate function to expression's list of parse actions. See
:class:`setParseAction` for function call signatures. Unlike ``setParseAction``,
functions passed to ``addCondition`` need to return boolean success/fail of the condition.
Optional keyword arguments:
- message = define a custom message to be used in the raised exception
- fatal = if True, will raise ParseFatalException to stop parsing immediately; otherwise will raise ParseException
Example::
integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
year_int = integer.copy()
year_int.addCondition(lambda toks: toks[0] >= 2000, message="Only support years 2000 and later")
date_str = year_int + '/' + integer + '/' + integer
result = date_str.parseString("1999/12/31") # -> Exception: Only support years 2000 and later (at char 0), (line:1, col:1)
"""
msg = kwargs.get("message", "failed user-defined condition")
exc_type = ParseFatalException if kwargs.get("fatal", False) else ParseException
for fn in fns:
fn = _trim_arity(fn)
def pa(s,l,t):
if not bool(fn(s,l,t)):
raise exc_type(s,l,msg)
self.parseAction.append(pa)
self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False)
return self
|
Add a boolean predicate function to expression's list of parse actions. See
:class:`setParseAction` for function call signatures. Unlike ``setParseAction``,
functions passed to ``addCondition`` need to return boolean success/fail of the condition.
Optional keyword arguments:
- message = define a custom message to be used in the raised exception
- fatal = if True, will raise ParseFatalException to stop parsing immediately; otherwise will raise ParseException
Example::
integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
year_int = integer.copy()
year_int.addCondition(lambda toks: toks[0] >= 2000, message="Only support years 2000 and later")
date_str = year_int + '/' + integer + '/' + integer
result = date_str.parseString("1999/12/31") # -> Exception: Only support years 2000 and later (at char 0), (line:1, col:1)
|
def get_next_occurrence(tx: ScheduledTransaction) -> date:
""" Calculates the next occurrence date for scheduled transaction.
Mimics the recurrenceNextInstance() function from GnuCash.
Still not fully complete but handles the main cases I use. """
# Reference documentation:
# https://github.com/MisterY/gnucash-portfolio/issues/3
# Preparing ref day is an important part before the calculation.
# It should be:
# a) the last occurrence date + 1, or
# b) the recurrence start date - 1.
# because the refDate is the date from which the due dates are being calculated. To include
# the ones starting today, we need to calculate from the day before.
ref_datum: Datum = Datum()
if tx.last_occur:
#ref_datum.set_value(tx.last_occur)
ref_datum.from_date(tx.last_occur)
ref_datum.add_days(1)
else:
ref_datum.from_date(tx.recurrence.recurrence_period_start)
ref_datum.subtract_days(1)
ref_date: datetime = ref_datum.value
# today = datetimeutils.today_date()
#today = Datum().today()
# skipped schedule
#if ref_date > today:
# ref_date = today
###########################################################
# The code below mimics the function
# recurrenceNextInstance(const Recurrence *r, const GDate *refDate, GDate *nextDate)
# https://github.com/Gnucash/gnucash/blob/115c0bf4a4afcae4269fe4b9d1e4a73ec7762ec6/libgnucash/engine/Recurrence.c#L172
start_date: Datum = Datum()
#start_date: datetime = tx.recurrence.recurrence_period_start
start_date.from_date(tx.recurrence.recurrence_period_start)
if ref_date < start_date.value:
# If the occurrence hasn't even started, the next date is the start date.
# this should also handle the "once" type in most cases.
return start_date.value.date()
# start at refDate.
next_date: Datum = Datum()
# next_date: datetime = ref_date
next_date.from_datetime(ref_date)
# last_date: datetime = tx.last_occur
# print(tx.name, base_date, tx.recurrence.recurrence_period_start,
# tx.recurrence.recurrence_mult, tx.recurrence.recurrence_period_type)
# /* Step 1: move FORWARD one period, passing exactly one occurrence. */
mult: int = tx.recurrence.recurrence_mult
period: str = tx.recurrence.recurrence_period_type
wadj = tx.recurrence.recurrence_weekend_adjust
# Not all periods from the original file are included at the moment.
if period in ([RecurrencePeriod.YEAR.value, RecurrencePeriod.MONTH.value,
RecurrencePeriod.END_OF_MONTH.value]):
if period == RecurrencePeriod.YEAR.value:
mult *= 12
# handle weekend adjustment here.
## Takes care of short months.
# next_weekday = datetimeutils.get_day_name(next_date)
next_weekday = next_date.get_day_name()
if wadj == WeekendAdjustment.BACK.value and (
period in ([RecurrencePeriod.YEAR.value, RecurrencePeriod.MONTH.value,
RecurrencePeriod.END_OF_MONTH.value]) and
(next_weekday == "Saturday" or next_weekday == "Sunday")):
# "Allows the following Friday-based calculations to proceed if 'next'
# is between Friday and the target day."
days_to_subtract = 1 if next_weekday == "Saturday" else 2
# next_date = datetimeutils.subtract_days(next_date, days_to_subtract)
next_date.subtract_days(days_to_subtract)
if wadj == WeekendAdjustment.BACK.value and (
period in ([RecurrencePeriod.YEAR.value, RecurrencePeriod.MONTH.value,
RecurrencePeriod.END_OF_MONTH.value]) and next_weekday == "Friday"):
next_date = handle_friday(next_date, period, mult, start_date)
# Line 274.
temp_date = next_date.clone()
if (temp_date.is_end_of_month() or
(period in [RecurrencePeriod.MONTH.value, RecurrencePeriod.YEAR.value]
and (next_date.get_day() >= start_date.get_day()))
):
# next_date = datetimeutils.add_months(next_date, mult)
next_date.add_months(mult)
# Set at end of month again (?!)
#next_date = datetimeutils.get_end_of_month(next_date)
else:
# one fewer month fwd because of the occurrence in this month.
next_date.add_months(mult - 1)
# elif period == "once":
# next_date = tx.recurrence.recurrence_period_start
elif period == RecurrencePeriod.DAY.value:
logging.warning("daily not handled")
else:
logging.info(f"recurrence not handled: {period}")
#######################
# Step 2
# "Back up to align to the base phase. To ensure forward
# progress, we never subtract as much as we added (x % mult < mult)"
if period in ([RecurrencePeriod.YEAR.value, RecurrencePeriod.MONTH.value,
RecurrencePeriod.END_OF_MONTH.value]):
n_months = (
12 * (next_date.get_year() - start_date.get_year()) +
(next_date.get_month() - start_date.get_month())
)
next_date.subtract_months(n_months % mult)
# dim
days_in_month = datetimeutils.get_days_in_month(
next_date.get_year(), next_date.get_month())
# Handle adjustment for 3 ways.
if (period == RecurrencePeriod.END_OF_MONTH.value or
next_date.get_day() >= days_in_month):
# Set to last day of the month.
next_date.set_day(days_in_month)
else:
# Same day as the start.
next_date.set_day(start_date.get_day())
# Adjust for dates on the weekend.
if (period == RecurrencePeriod.YEAR.value or period == RecurrencePeriod.MONTH.value or
period == RecurrencePeriod.END_OF_MONTH.value):
weekday = next_date.get_day_name()
if weekday == "Saturday" or weekday == "Sunday":
if wadj == WeekendAdjustment.BACK.value:
next_date.subtract_days(1 if weekday == "Saturday" else 2)
elif wadj == WeekendAdjustment.FORWARD.value:
next_date.add_days(2 if weekday == "Saturday" else 1)
return next_date.value.date()
|
Calculates the next occurrence date for scheduled transaction.
Mimics the recurrenceNextInstance() function from GnuCash.
Still not fully complete but handles the main cases I use.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.