code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def safe_dump(data, stream=None, **kwargs):
'''
Use a custom dumper to ensure that defaultdict and OrderedDict are
represented properly. Ensure that unicode strings are encoded unless
explicitly told not to.
'''
if 'allow_unicode' not in kwargs:
kwargs['allow_unicode'] = True
return yaml.dump(data, stream, Dumper=SafeOrderedDumper, **kwargs)
|
Use a custom dumper to ensure that defaultdict and OrderedDict are
represented properly. Ensure that unicode strings are encoded unless
explicitly told not to.
|
def load(steps, reload=False):
"""
safely load steps in place, excluding those that fail
Args:
steps: the steps to load
"""
# work on collections by default for fewer isinstance() calls per call to load()
if reload:
_STEP_CACHE.clear()
if callable(steps):
steps = steps()
if not isinstance(steps, collections.Iterable):
return load([steps])[0]
loaded = []
for s in steps:
digest = s._digest
if digest in _STEP_CACHE:
loaded.append(_STEP_CACHE[digest])
else:
try:
s.load()
_STEP_CACHE[digest] = s
loaded.append(s)
except(Exception):
logging.warn('Error during step load:\n%s' %
util.indent(traceback.format_exc()))
return loaded
|
safely load steps in place, excluding those that fail
Args:
steps: the steps to load
|
def resfinderreporter(self):
"""
Custom reports for ResFinder analyses. These reports link the gene(s) found to their resistance phenotypes
"""
# Initialise resistance dictionaries from the notes.txt file
resistance_classes = ResistanceNotes.classes(self.targetpath)
# Create a workbook to store the report. Using xlsxwriter rather than a simple csv format, as I want to be
# able to have appropriately sized, multi-line cells
workbook = xlsxwriter.Workbook(os.path.join(self.reportpath, '{}.xlsx'.format(self.analysistype)))
# New worksheet to store the data
worksheet = workbook.add_worksheet()
# Add a bold format for header cells. Using a monotype font size 10
bold = workbook.add_format({'bold': True, 'font_name': 'Courier New', 'font_size': 8})
# Format for data cells. Monotype, size 10, top vertically justified
courier = workbook.add_format({'font_name': 'Courier New', 'font_size': 8})
courier.set_align('top')
# Initialise the position within the worksheet to be (0,0)
row = 0
col = 0
# A dictionary to store the column widths for every header
columnwidth = dict()
extended = False
headers = ['Strain', 'Gene', 'Allele', 'Resistance', 'PercentIdentity', 'PercentCovered', 'Contig', 'Location',
'nt_sequence']
for sample in self.metadata:
# Create an attribute to store the string for the eventual pipeline report
sample[self.analysistype].pipelineresults = list()
sample[self.analysistype].sampledata = list()
try:
blastresults = sample[self.analysistype].blastresults
except AttributeError:
blastresults = 'NA'
# Process the sample only if the script could find targets
if blastresults != 'NA':
for result in sample[self.analysistype].blastresults:
# Set the name to avoid writing out the dictionary[key] multiple times
name = result['subject_id']
# Use the ResistanceNotes gene name extraction method to get the necessary variables
gname, genename, accession, allele = ResistanceNotes.gene_name(name)
# Initialise a list to store all the data for each strain
data = list()
# Determine resistance phenotype of the gene
resistance = ResistanceNotes.resistance(name, resistance_classes)
# Append the necessary values to the data list
data.append(genename)
data.append(allele)
data.append(resistance)
percentid = result['percentidentity']
data.append(percentid)
data.append(result['alignment_fraction'])
data.append(result['query_id'])
data.append('...'.join([str(result['low']), str(result['high'])]))
try:
# Populate the attribute storing the resfinder results
sample[self.analysistype].pipelineresults.append(
'{rgene} ({pid}%) {rclass}'.format(rgene=genename,
pid=percentid,
rclass=resistance))
# Only if the alignment option is selected, for inexact results, add alignments
if self.align and percentid != 100.00:
# Align the protein (and nucleotide) sequences to the reference
self.alignprotein(sample, name)
if not extended:
# Add the appropriate headers
headers.extend(['aa_Identity',
'aa_Alignment',
'aa_SNP_location',
'nt_Alignment',
'nt_SNP_location'
])
extended = True
# Create a FASTA-formatted sequence output of the query sequence
record = SeqRecord(sample[self.analysistype].dnaseq[name],
id='{}_{}'.format(sample.name, name),
description='')
# Add the alignment, and the location of mismatches for both nucleotide and amino
# acid sequences
data.extend([record.format('fasta'),
sample[self.analysistype].aaidentity[name],
sample[self.analysistype].aaalign[name],
sample[self.analysistype].aaindex[name],
sample[self.analysistype].ntalign[name],
sample[self.analysistype].ntindex[name]
])
else:
record = SeqRecord(Seq(result['subject_sequence'], IUPAC.unambiguous_dna),
id='{}_{}'.format(sample.name, name),
description='')
data.append(record.format('fasta'))
if self.align:
# Add '-'s for the empty results, as there are no alignments for exact matches
data.extend(['100', '-', '-', '-', '-'])
# If there are no blast results for the target, add a '-'
except (KeyError, TypeError):
data.append('-')
sample[self.analysistype].sampledata.append(data)
if 'nt_sequence' not in headers:
headers.append('nt_sequence')
# Write the header to the spreadsheet
for header in headers:
worksheet.write(row, col, header, bold)
# Set the column width based on the longest header
try:
columnwidth[col] = len(header) if len(header) > columnwidth[col] else columnwidth[
col]
except KeyError:
columnwidth[col] = len(header)
worksheet.set_column(col, col, columnwidth[col])
col += 1
# Increment the row and reset the column to zero in preparation of writing results
row += 1
col = 0
# Write out the data to the spreadsheet
for sample in self.metadata:
if not sample[self.analysistype].sampledata:
# Increment the row and reset the column to zero in preparation of writing results
row += 1
col = 0
# Set the width of the row to be the number of lines (number of newline characters) * 12
worksheet.set_row(row)
worksheet.set_column(col, col, columnwidth[col])
for data in sample[self.analysistype].sampledata:
columnwidth[col] = len(sample.name) + 2
worksheet.set_column(col, col, columnwidth[col])
worksheet.write(row, col, sample.name, courier)
col += 1
# List of the number of lines for each result
totallines = list()
for results in data:
#
worksheet.write(row, col, results, courier)
try:
# Counting the length of multi-line strings yields columns that are far too wide, only count
# the length of the string up to the first line break
alignmentcorrect = len(str(results).split('\n')[1])
# Count the number of lines for the data
lines = results.count('\n') if results.count('\n') >= 1 else 1
# Add the number of lines to the list
totallines.append(lines)
except IndexError:
try:
# Counting the length of multi-line strings yields columns that are far too wide, only count
# the length of the string up to the first line break
alignmentcorrect = len(str(results).split('\n')[0])
# Count the number of lines for the data
lines = results.count('\n') if results.count('\n') >= 1 else 1
# Add the number of lines to the list
totallines.append(lines)
# If there are no newline characters, set the width to the length of the string
except AttributeError:
alignmentcorrect = len(str(results))
lines = 1
# Add the number of lines to the list
totallines.append(lines)
# Increase the width of the current column, if necessary
try:
columnwidth[col] = alignmentcorrect if alignmentcorrect > columnwidth[col] else \
columnwidth[col]
except KeyError:
columnwidth[col] = alignmentcorrect
worksheet.set_column(col, col, columnwidth[col])
col += 1
# Set the width of the row to be the number of lines (number of newline characters) * 12
worksheet.set_row(row, max(totallines) * 11)
# Increase the row counter for the next strain's data
row += 1
col = 0
# Close the workbook
workbook.close()
|
Custom reports for ResFinder analyses. These reports link the gene(s) found to their resistance phenotypes
|
def proxyval(self, visited):
'''
Support for new-style classes.
Currently we just locate the dictionary using a transliteration to
python of _PyObject_GetDictPtr, ignoring descriptors
'''
# Guard against infinite loops:
if self.as_address() in visited:
return ProxyAlreadyVisited('<...>')
visited.add(self.as_address())
pyop_attr_dict = self.get_attr_dict()
if pyop_attr_dict:
attr_dict = pyop_attr_dict.proxyval(visited)
else:
attr_dict = {}
tp_name = self.safe_tp_name()
# New-style class:
return InstanceProxy(tp_name, attr_dict, long(self._gdbval))
|
Support for new-style classes.
Currently we just locate the dictionary using a transliteration to
python of _PyObject_GetDictPtr, ignoring descriptors
|
def get_gatk_annotations(config, include_depth=True, include_baseqranksum=True,
gatk_input=True):
"""Retrieve annotations to use for GATK VariantAnnotator.
If include_depth is false, we'll skip annotating DP. Since GATK downsamples
this will undercount on high depth sequencing and the standard outputs
from the original callers may be preferable.
BaseQRankSum can cause issues with some MuTect2 and other runs, so we
provide option to skip it.
"""
broad_runner = broad.runner_from_config(config)
anns = ["MappingQualityRankSumTest", "MappingQualityZero",
"QualByDepth", "ReadPosRankSumTest", "RMSMappingQuality"]
if include_baseqranksum:
anns += ["BaseQualityRankSumTest"]
# Some annotations not working correctly with external datasets and GATK 3
if gatk_input or broad_runner.gatk_type() == "gatk4":
anns += ["FisherStrand"]
if broad_runner.gatk_type() == "gatk4":
anns += ["MappingQuality"]
else:
anns += ["GCContent", "HaplotypeScore", "HomopolymerRun"]
if include_depth:
anns += ["DepthPerAlleleBySample"]
if broad_runner.gatk_type() in ["restricted", "gatk4"]:
anns += ["Coverage"]
else:
anns += ["DepthOfCoverage"]
return anns
|
Retrieve annotations to use for GATK VariantAnnotator.
If include_depth is false, we'll skip annotating DP. Since GATK downsamples
this will undercount on high depth sequencing and the standard outputs
from the original callers may be preferable.
BaseQRankSum can cause issues with some MuTect2 and other runs, so we
provide option to skip it.
|
def from_client_secrets_file(cls, client_secrets_file, scopes, **kwargs):
"""Creates a :class:`Flow` instance from a Google client secrets file.
Args:
client_secrets_file (str): The path to the client secrets .json
file.
scopes (Sequence[str]): The list of scopes to request during the
flow.
kwargs: Any additional parameters passed to
:class:`requests_oauthlib.OAuth2Session`
Returns:
Flow: The constructed Flow instance.
"""
with open(client_secrets_file, 'r') as json_file:
client_config = json.load(json_file)
return cls.from_client_config(client_config, scopes=scopes, **kwargs)
|
Creates a :class:`Flow` instance from a Google client secrets file.
Args:
client_secrets_file (str): The path to the client secrets .json
file.
scopes (Sequence[str]): The list of scopes to request during the
flow.
kwargs: Any additional parameters passed to
:class:`requests_oauthlib.OAuth2Session`
Returns:
Flow: The constructed Flow instance.
|
def xspec_cosmo(H0=None,q0=None,lambda_0=None):
"""
Define the Cosmology in use within the XSpec models. See Xspec manual for help:
http://heasarc.nasa.gov/xanadu/xspec/manual/XScosmo.html
All parameters can be modified or just a single parameter
:param H0: the hubble constant
:param q0:
:param lambda_0:
:return: Either none or the current setting (H_0, q_0, lambda_0)
"""
current_settings = _xspec.get_xscosmo()
if (H0 is None) and (q0 is None) and (lambda_0 is None):
return current_settings
else:
# ok, we will see what was changed by the used
user_inputs = [H0, q0, lambda_0]
for i, current_setting in enumerate(current_settings):
if user_inputs[i] is None:
# the user didn't modify this,
# so lets keep what was already set
user_inputs[i] = current_setting
# pass this to xspec
_xspec.set_xscosmo(*user_inputs)
|
Define the Cosmology in use within the XSpec models. See Xspec manual for help:
http://heasarc.nasa.gov/xanadu/xspec/manual/XScosmo.html
All parameters can be modified or just a single parameter
:param H0: the hubble constant
:param q0:
:param lambda_0:
:return: Either none or the current setting (H_0, q_0, lambda_0)
|
def refresh_token(self, refresh_token):
'''return origin json'''
url = 'https://api.youku.com/oauth2/token.json'
data = {'client_id': self.client_id,
'grant_type': 'refresh_token',
'refresh_token': refresh_token}
r = requests.post(url, data=data)
check_error(r)
return r.json()
|
return origin json
|
def readdir(path):
'''
.. versionadded:: 2014.1.0
Return a list containing the contents of a directory
CLI Example:
.. code-block:: bash
salt '*' file.readdir /path/to/dir/
'''
path = os.path.expanduser(path)
if not os.path.isabs(path):
raise SaltInvocationError('Dir path must be absolute.')
if not os.path.isdir(path):
raise SaltInvocationError('A valid directory was not specified.')
dirents = ['.', '..']
dirents.extend(os.listdir(path))
return dirents
|
.. versionadded:: 2014.1.0
Return a list containing the contents of a directory
CLI Example:
.. code-block:: bash
salt '*' file.readdir /path/to/dir/
|
def process(obj):
"""
Process each block of the merger object.
"""
#merge all static and templates and less files
merged = merge(obj)
#save the full file if name defined
if obj.get('full'):
print 'Saving: {} ({:.2f}kB)'.format(obj['full'], len(merged)/1024.0)
_save(obj['full'], merged)
else:
print 'Full merged size: {:.2f}kB'.format(len(merged)/1024.0)
#minify js and save to file
if obj.get('jsmin'):
jsMin(merged, obj['jsmin'])
#minify css and save to file
if obj.get('cssmin'):
cssMin(merged, obj['cssmin'])
|
Process each block of the merger object.
|
def connection_lost(self, exc):
"""Stop when connection is lost."""
if exc:
self.log.exception('disconnected due to exception')
else:
self.log.info('disconnected because of close/abort.')
self._closed.set()
|
Stop when connection is lost.
|
def print_status(self, indent="", recurse=False):
"""Print a summary of the job status for each `Link` in this `Chain`"""
print ("%s%30s : %15s : %20s" %
(indent, "Linkname", "Link Status", "Jobs Status"))
for link in self._links.values():
if hasattr(link, 'check_status'):
status_vect = link.check_status(
stream=sys.stdout, no_wait=True, do_print=False)
else:
status_vect = None
key = JobDetails.make_fullkey(link.full_linkname)
link_status = JOB_STATUS_STRINGS[link.check_job_status(key)]
if status_vect is None:
jobs_status = JOB_STATUS_STRINGS[link.check_jobs_status()]
else:
jobs_status = status_vect
print ("%s%30s : %15s : %20s" %
(indent, link.linkname, link_status, jobs_status))
if hasattr(link, 'print_status') and recurse:
print ("---------- %30s -----------" % link.linkname)
link.print_status(indent + " ", recurse=True)
print ("------------------------------------------------")
|
Print a summary of the job status for each `Link` in this `Chain`
|
def revoke_access(src, dst='any', port=None, proto=None):
"""
Revoke access to an address or subnet
:param src: address (e.g. 192.168.1.234) or subnet
(e.g. 192.168.1.0/24).
:param dst: destiny of the connection, if the machine has multiple IPs and
connections to only one of those have to accepted this is the
field has to be set.
:param port: destiny port
:param proto: protocol (tcp or udp)
"""
return modify_access(src, dst=dst, port=port, proto=proto, action='delete')
|
Revoke access to an address or subnet
:param src: address (e.g. 192.168.1.234) or subnet
(e.g. 192.168.1.0/24).
:param dst: destiny of the connection, if the machine has multiple IPs and
connections to only one of those have to accepted this is the
field has to be set.
:param port: destiny port
:param proto: protocol (tcp or udp)
|
def cut(list_, index=0):
"""Cut a list by index or arg"""
if isinstance(index, int):
cut_ = lambda x: x[index]
else:
cut_ = lambda x: getattr(x, index)
return list(map(cut_, list_))
|
Cut a list by index or arg
|
async def _build_rr_state_json(self, rr_id: str, timestamp: int) -> (str, int):
"""
Build rev reg state json at a given requested timestamp.
Return rev reg state json and its transaction time on the distributed ledger,
with upper bound at input timestamp of interest.
Raise AbsentRevReg if no revocation registry exists on input rev reg id,
or BadRevStateTime if requested timestamp predates revocation registry creation.
:param rr_id: rev reg id
:param timestamp: timestamp of interest (epoch seconds)
:return: rev reg state json and ledger timestamp (epoch seconds)
"""
LOGGER.debug('_Verifier._build_rr_state_json >>> rr_id: %s, timestamp: %s', rr_id, timestamp)
if not ok_rev_reg_id(rr_id):
LOGGER.debug('Verifier._build_rr_state_json <!< Bad rev reg id %s', rr_id)
raise BadIdentifier('Bad rev reg id {}'.format(rr_id))
rr_json = None
ledger_timestamp = None
get_rr_req_json = await ledger.build_get_revoc_reg_request(self.did, rr_id, timestamp)
resp_json = await self._submit(get_rr_req_json)
resp = json.loads(resp_json)
if resp.get('result', {}).get('data', None) and resp['result']['data'].get('value', None):
# timestamp at or beyond rev reg creation, carry on
try:
(_, rr_json, ledger_timestamp) = await ledger.parse_get_revoc_reg_response(resp_json)
except IndyError: # ledger replied, but there is no such rev reg available
LOGGER.debug('Verifier._build_rr_state_json <!< no rev reg exists on %s', rr_id)
raise AbsentRevReg('No rev reg exists on {}'.format(rr_id))
else:
LOGGER.debug(
'_Verifier._build_rr_state_json <!< Rev reg %s created after asked-for time %s',
rr_id,
timestamp)
raise BadRevStateTime('Rev reg {} created after asked-for time {}'.format(rr_id, timestamp))
rv = (rr_json, ledger_timestamp)
LOGGER.debug('_Verifier._build_rr_state_json <<< %s', rv)
return rv
|
Build rev reg state json at a given requested timestamp.
Return rev reg state json and its transaction time on the distributed ledger,
with upper bound at input timestamp of interest.
Raise AbsentRevReg if no revocation registry exists on input rev reg id,
or BadRevStateTime if requested timestamp predates revocation registry creation.
:param rr_id: rev reg id
:param timestamp: timestamp of interest (epoch seconds)
:return: rev reg state json and ledger timestamp (epoch seconds)
|
def Eps(value=None, loc=None):
"""A rule that accepts no tokens (epsilon) and returns ``value``."""
@llrule(loc, lambda parser: [])
def rule(parser):
return value
return rule
|
A rule that accepts no tokens (epsilon) and returns ``value``.
|
def display(result, stream):
"""
Intelligently print the result (or pass if result is None).
:param result:
:return: None
"""
if result is None:
return
elif isinstance(result, basestring):
pass
elif isinstance(result, collections.Mapping):
result = u'\n'.join(u'%s=%s' % (k, v) for
k, v in result.iteritems() if v is not None)
elif isinstance(result, collections.Iterable):
result = u'\n'.join(unicode(x) for x in result if x is not None)
else:
result = unicode(result)
stream.write(result.encode('utf8'))
stream.write('\n')
|
Intelligently print the result (or pass if result is None).
:param result:
:return: None
|
def _update_pop(self, pop_size):
"""Assigns fitnesses to particles that are within bounds."""
valid_particles = []
invalid_particles = []
for part in self.population:
if any(x > 1 or x < -1 for x in part):
invalid_particles.append(part)
else:
valid_particles.append(part)
self._model_count += len(valid_particles)
for part in valid_particles:
self.update_particle(part)
self.assign_fitnesses(valid_particles)
for part in valid_particles:
if part.fitness > part.best.fitness:
part.best = creator.Particle(part)
part.best.fitness = part.fitness
for part in invalid_particles:
self.update_particle(part)
self.population[:] = valid_particles + invalid_particles
self.population.sort(key=lambda x: x.ident) # shouldn't need to sort?
return
|
Assigns fitnesses to particles that are within bounds.
|
def start(self):
"""Start the connection to a transport."""
connect_thread = threading.Thread(target=self._connect)
connect_thread.start()
|
Start the connection to a transport.
|
def nguHanhNapAm(diaChi, thienCan, xuatBanMenh=False):
"""Sử dụng Ngũ Hành nạp âm để tính Hành của năm.
Args:
diaChi (integer): Số thứ tự của địa chi (Tý=1, Sửu=2,...)
thienCan (integer): Số thứ tự của thiên can (Giáp=1, Ất=2,...)
Returns:
Trả về chữ viết tắt Hành của năm (K, T, H, O, M)
"""
banMenh = {
"K1": "HẢI TRUNG KIM",
"T1": "GIÁNG HẠ THỦY",
"H1": "TÍCH LỊCH HỎA",
"O1": "BÍCH THƯỢNG THỔ",
"M1": "TANG ÐỐ MỘC",
"T2": "ÐẠI KHÊ THỦY",
"H2": "LƯ TRUNG HỎA",
"O2": "THÀNH ÐẦU THỔ",
"M2": "TÒNG BÁ MỘC",
"K2": "KIM BẠCH KIM",
"H3": "PHÚ ÐĂNG HỎA",
"O3": "SA TRUNG THỔ",
"M3": "ÐẠI LÂM MỘC",
"K3": "BẠCH LẠP KIM",
"T3": "TRƯỜNG LƯU THỦY",
"K4": "SA TRUNG KIM",
"T4": "THIÊN HÀ THỦY",
"H4": "THIÊN THƯỢNG HỎA",
"O4": "LỘ BÀN THỔ",
"M4": "DƯƠNG LIỄU MỘC",
"T5": "TRUYỀN TRUNG THỦY",
"H5": "SƠN HẠ HỎA",
"O5": "ÐẠI TRẠCH THỔ",
"M5": "THẠCH LỰU MỘC",
"K5": "KIẾM PHONG KIM",
"H6": "SƠN ÐẦU HỎA",
"O6": "ỐC THƯỢNG THỔ",
"M6": "BÌNH ÐỊA MỘC",
"K6": "XOA XUYẾN KIM",
"T6": "ÐẠI HẢI THỦY"}
matranNapAm = [
[0, "G", "Ất", "Bính", "Đinh", "Mậu", "Kỷ", "Canh", "Tân", "N", "Q"],
[1, "K1", False, "T1", False, "H1", False, "O1", False, "M1", False],
[2, False, "K1", False, "T1", False, "H1", False, "O1", False, "M1"],
[3, "T2", False, "H2", False, "O2", False, "M2", False, "K2", False],
[4, False, "T2", False, "H2", False, "O2", False, "M2", False, "K2"],
[5, "H3", False, "O3", False, "M3", False, "K3", False, "T3", False],
[6, False, "H3", False, "O3", False, "M3", False, "K3", False, "T3"],
[7, "K4", False, "T4", False, "H4", False, "O4", False, "M4", False],
[8, False, "K4", False, "T4", False, "H4", False, "O4", False, "M4"],
[9, "T5", False, "H5", False, "O5", False, "M5", False, "K5", False],
[10, False, "T5", False, "H5", False, "O5", False, "M5", False, "K5"],
[11, "H6", False, "O6", False, "M6", False, "K6", False, "T6", False],
[12, False, "H6", False, "O6", False, "M6", False, "K6", False, "T6"]
]
try:
nh = matranNapAm[diaChi][thienCan]
if nh[0] in ["K", "M", "T", "H", "O"]:
if xuatBanMenh is True:
return banMenh[nh]
else:
return nh[0]
except:
raise Exception(nguHanhNapAm.__doc__)
|
Sử dụng Ngũ Hành nạp âm để tính Hành của năm.
Args:
diaChi (integer): Số thứ tự của địa chi (Tý=1, Sửu=2,...)
thienCan (integer): Số thứ tự của thiên can (Giáp=1, Ất=2,...)
Returns:
Trả về chữ viết tắt Hành của năm (K, T, H, O, M)
|
def process_internal_commands(self):
'''This function processes internal commands
'''
with self._main_lock:
self.check_output_redirect()
program_threads_alive = {}
all_threads = threadingEnumerate()
program_threads_dead = []
with self._lock_running_thread_ids:
reset_cache = not self._running_thread_ids
for t in all_threads:
if getattr(t, 'is_pydev_daemon_thread', False):
pass # I.e.: skip the DummyThreads created from pydev daemon threads
elif isinstance(t, PyDBDaemonThread):
pydev_log.error_once('Error in debugger: Found PyDBDaemonThread not marked with is_pydev_daemon_thread=True.')
elif is_thread_alive(t):
if reset_cache:
# Fix multiprocessing debug with breakpoints in both main and child processes
# (https://youtrack.jetbrains.com/issue/PY-17092) When the new process is created, the main
# thread in the new process already has the attribute 'pydevd_id', so the new thread doesn't
# get new id with its process number and the debugger loses access to both threads.
# Therefore we should update thread_id for every main thread in the new process.
clear_cached_thread_id(t)
thread_id = get_thread_id(t)
program_threads_alive[thread_id] = t
self.notify_thread_created(thread_id, t, use_lock=False)
# Compute and notify about threads which are no longer alive.
thread_ids = list(self._running_thread_ids.keys())
for thread_id in thread_ids:
if thread_id not in program_threads_alive:
program_threads_dead.append(thread_id)
for thread_id in program_threads_dead:
self.notify_thread_not_alive(thread_id, use_lock=False)
# Without self._lock_running_thread_ids
if len(program_threads_alive) == 0:
self.finish_debugging_session()
for t in all_threads:
if hasattr(t, 'do_kill_pydev_thread'):
t.do_kill_pydev_thread()
else:
# Actually process the commands now (make sure we don't have a lock for _lock_running_thread_ids
# acquired at this point as it could lead to a deadlock if some command evaluated tried to
# create a thread and wait for it -- which would try to notify about it getting that lock).
curr_thread_id = get_current_thread_id(threadingCurrentThread())
for thread_id in (curr_thread_id, '*'):
queue = self.get_internal_queue(thread_id)
# some commands must be processed by the thread itself... if that's the case,
# we will re-add the commands to the queue after executing.
cmds_to_add_back = []
try:
while True:
int_cmd = queue.get(False)
if not self.mpl_hooks_in_debug_console and isinstance(int_cmd, InternalConsoleExec):
# add import hooks for matplotlib patches if only debug console was started
try:
self.init_matplotlib_in_debug_console()
self.mpl_in_use = True
except:
pydev_log.debug("Matplotlib support in debug console failed", traceback.format_exc())
self.mpl_hooks_in_debug_console = True
if int_cmd.can_be_executed_by(curr_thread_id):
pydev_log.verbose("processing internal command ", int_cmd)
int_cmd.do_it(self)
else:
pydev_log.verbose("NOT processing internal command ", int_cmd)
cmds_to_add_back.append(int_cmd)
except _queue.Empty: # @UndefinedVariable
# this is how we exit
for int_cmd in cmds_to_add_back:
queue.put(int_cmd)
|
This function processes internal commands
|
def decode(token, key, algorithms=None, options=None, audience=None,
issuer=None, subject=None, access_token=None):
"""Verifies a JWT string's signature and validates reserved claims.
Args:
token (str): A signed JWS to be verified.
key (str or dict): A key to attempt to verify the payload with. Can be
individual JWK or JWK set.
algorithms (str or list): Valid algorithms that should be used to verify the JWS.
audience (str): The intended audience of the token. If the "aud" claim is
included in the claim set, then the audience must be included and must equal
the provided claim.
issuer (str or iterable): Acceptable value(s) for the issuer of the token.
If the "iss" claim is included in the claim set, then the issuer must be
given and the claim in the token must be among the acceptable values.
subject (str): The subject of the token. If the "sub" claim is
included in the claim set, then the subject must be included and must equal
the provided claim.
access_token (str): An access token string. If the "at_hash" claim is included in the
claim set, then the access_token must be included, and it must match
the "at_hash" claim.
options (dict): A dictionary of options for skipping validation steps.
defaults = {
'verify_signature': True,
'verify_aud': True,
'verify_iat': True,
'verify_exp': True,
'verify_nbf': True,
'verify_iss': True,
'verify_sub': True,
'verify_jti': True,
'verify_at_hash': True,
'require_aud': False,
'require_iat': False,
'require_exp': False,
'require_nbf': False,
'require_iss': False,
'require_sub': False,
'require_jti': False,
'require_at_hash': False,
'leeway': 0,
}
Returns:
dict: The dict representation of the claims set, assuming the signature is valid
and all requested data validation passes.
Raises:
JWTError: If the signature is invalid in any way.
ExpiredSignatureError: If the signature has expired.
JWTClaimsError: If any claim is invalid in any way.
Examples:
>>> payload = 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhIjoiYiJ9.jiMyrsmD8AoHWeQgmxZ5yq8z0lXS67_QGs52AzC8Ru8'
>>> jwt.decode(payload, 'secret', algorithms='HS256')
"""
defaults = {
'verify_signature': True,
'verify_aud': True,
'verify_iat': True,
'verify_exp': True,
'verify_nbf': True,
'verify_iss': True,
'verify_sub': True,
'verify_jti': True,
'verify_at_hash': True,
'require_aud': False,
'require_iat': False,
'require_exp': False,
'require_nbf': False,
'require_iss': False,
'require_sub': False,
'require_jti': False,
'require_at_hash': False,
'leeway': 0,
}
if options:
defaults.update(options)
verify_signature = defaults.get('verify_signature', True)
try:
payload = jws.verify(token, key, algorithms, verify=verify_signature)
except JWSError as e:
raise JWTError(e)
# Needed for at_hash verification
algorithm = jws.get_unverified_header(token)['alg']
try:
claims = json.loads(payload.decode('utf-8'))
except ValueError as e:
raise JWTError('Invalid payload string: %s' % e)
if not isinstance(claims, Mapping):
raise JWTError('Invalid payload string: must be a json object')
_validate_claims(claims, audience=audience, issuer=issuer,
subject=subject, algorithm=algorithm,
access_token=access_token,
options=defaults)
return claims
|
Verifies a JWT string's signature and validates reserved claims.
Args:
token (str): A signed JWS to be verified.
key (str or dict): A key to attempt to verify the payload with. Can be
individual JWK or JWK set.
algorithms (str or list): Valid algorithms that should be used to verify the JWS.
audience (str): The intended audience of the token. If the "aud" claim is
included in the claim set, then the audience must be included and must equal
the provided claim.
issuer (str or iterable): Acceptable value(s) for the issuer of the token.
If the "iss" claim is included in the claim set, then the issuer must be
given and the claim in the token must be among the acceptable values.
subject (str): The subject of the token. If the "sub" claim is
included in the claim set, then the subject must be included and must equal
the provided claim.
access_token (str): An access token string. If the "at_hash" claim is included in the
claim set, then the access_token must be included, and it must match
the "at_hash" claim.
options (dict): A dictionary of options for skipping validation steps.
defaults = {
'verify_signature': True,
'verify_aud': True,
'verify_iat': True,
'verify_exp': True,
'verify_nbf': True,
'verify_iss': True,
'verify_sub': True,
'verify_jti': True,
'verify_at_hash': True,
'require_aud': False,
'require_iat': False,
'require_exp': False,
'require_nbf': False,
'require_iss': False,
'require_sub': False,
'require_jti': False,
'require_at_hash': False,
'leeway': 0,
}
Returns:
dict: The dict representation of the claims set, assuming the signature is valid
and all requested data validation passes.
Raises:
JWTError: If the signature is invalid in any way.
ExpiredSignatureError: If the signature has expired.
JWTClaimsError: If any claim is invalid in any way.
Examples:
>>> payload = 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhIjoiYiJ9.jiMyrsmD8AoHWeQgmxZ5yq8z0lXS67_QGs52AzC8Ru8'
>>> jwt.decode(payload, 'secret', algorithms='HS256')
|
def same_page(c):
"""Return true if all the components of c are on the same page of the document.
Page numbers are based on the PDF rendering of the document. If a PDF file is
provided, it is used. Otherwise, if only a HTML/XML document is provided, a
PDF is created and then used to determine the page number of a Mention.
:param c: The candidate to evaluate
:rtype: boolean
"""
return all(
[
_to_span(c[i]).sentence.is_visual()
and bbox_from_span(_to_span(c[i])).page
== bbox_from_span(_to_span(c[0])).page
for i in range(len(c))
]
)
|
Return true if all the components of c are on the same page of the document.
Page numbers are based on the PDF rendering of the document. If a PDF file is
provided, it is used. Otherwise, if only a HTML/XML document is provided, a
PDF is created and then used to determine the page number of a Mention.
:param c: The candidate to evaluate
:rtype: boolean
|
def add_component(self, entity: int, component_instance: Any) -> None:
"""Add a new Component instance to an Entity.
Add a Component instance to an Entiy. If a Component of the same type
is already assigned to the Entity, it will be replaced.
:param entity: The Entity to associate the Component with.
:param component_instance: A Component instance.
"""
component_type = type(component_instance)
if component_type not in self._components:
self._components[component_type] = set()
self._components[component_type].add(entity)
if entity not in self._entities:
self._entities[entity] = {}
self._entities[entity][component_type] = component_instance
self.clear_cache()
|
Add a new Component instance to an Entity.
Add a Component instance to an Entiy. If a Component of the same type
is already assigned to the Entity, it will be replaced.
:param entity: The Entity to associate the Component with.
:param component_instance: A Component instance.
|
def fetchone(table, cols="*", where=(), group="", order=(), limit=(), **kwargs):
"""Convenience wrapper for database SELECT and fetch one."""
return select(table, cols, where, group, order, limit, **kwargs).fetchone()
|
Convenience wrapper for database SELECT and fetch one.
|
def policy_map_clss_set_set_dscp_dscp(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
policy_map = ET.SubElement(config, "policy-map", xmlns="urn:brocade.com:mgmt:brocade-policer")
po_name_key = ET.SubElement(policy_map, "po-name")
po_name_key.text = kwargs.pop('po_name')
clss = ET.SubElement(policy_map, "class")
cl_name_key = ET.SubElement(clss, "cl-name")
cl_name_key.text = kwargs.pop('cl_name')
set = ET.SubElement(clss, "set")
set_dscp = ET.SubElement(set, "set_dscp")
dscp = ET.SubElement(set_dscp, "dscp")
dscp.text = kwargs.pop('dscp')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
Auto Generated Code
|
def _find_lang(langdict, lang, script, region):
"""Return the entry in the dictionary for the given language information."""
# Check if we should map this to a different locale.
full_locale = _full_locale(lang, script, region)
if (full_locale in _LOCALE_NORMALIZATION_MAP and
_LOCALE_NORMALIZATION_MAP[full_locale] in langdict):
return langdict[_LOCALE_NORMALIZATION_MAP[full_locale]]
# First look for the full locale
if full_locale in langdict:
return langdict[full_locale]
# Then look for lang, script as a combination
if script is not None:
lang_script = "%s_%s" % (lang, script)
if lang_script in langdict:
return langdict[lang_script]
# Next look for lang, region as a combination
if region is not None:
lang_region = "%s_%s" % (lang, region)
if lang_region in langdict:
return langdict[lang_region]
# Fall back to bare language code lookup
if lang in langdict:
return langdict[lang]
# Possibly fall back to english
if _may_fall_back_to_english(lang):
return langdict.get("en", None)
else:
return None
|
Return the entry in the dictionary for the given language information.
|
def get_node_by_path(self, path):
"""Get a node from a node path.
Warning: use of this method assumes that sibling nodes have unique names,
if this is not assured the `get_node_by_coord` method can be used instead.
| Example with absolute node path:
| `node.get_node_by_path('/root.name/child.name/gchild.name')`
| Example with relative node path:
| `node.get_node_by_path('child.name/gchild.name')`
:param path: the absolute node path, or the node path relative
to the current node instance.
:type path: str
:returns: the node corresponding to `path`.
:rtype: Node or None
"""
if path==".":
return self
elif path.lstrip().startswith((".", "./")) or not isinstance(path, str):
logger.warning("%s.get_node_by_path: arg «path»=«%s», not correctly specified." % (self.__class__.__name__, path))
return None
_pathlist = list(filter(None, path.split("/")) ) # remove blank strings
if path.startswith("/"):
_node = self._root
_pathlist.pop(0) # remove rootnode name
else:
_node = self
for _nodename in _pathlist:
_node = _node.get_child_by_name(_nodename)
if _node is None:
logger.warning("%s.get_node_by_path: node«%s», arg `path`=«%s», cannot find node." % (self.__class__.__name__, self.name, path))
return None
return _node
|
Get a node from a node path.
Warning: use of this method assumes that sibling nodes have unique names,
if this is not assured the `get_node_by_coord` method can be used instead.
| Example with absolute node path:
| `node.get_node_by_path('/root.name/child.name/gchild.name')`
| Example with relative node path:
| `node.get_node_by_path('child.name/gchild.name')`
:param path: the absolute node path, or the node path relative
to the current node instance.
:type path: str
:returns: the node corresponding to `path`.
:rtype: Node or None
|
def enable_inheritance(path, objectType, clear=False):
'''
enable/disable inheritance on an object
Args:
path: The path to the object
objectType: The type of object (FILE, DIRECTORY, REGISTRY)
clear: True will remove non-Inherited ACEs from the ACL
Returns (dict): A dictionary containing the results
CLI Example:
.. code-block:: bash
salt 'minion-id' win_dacl.enable_inheritance c:\temp directory
'''
dc = daclConstants()
objectType = dc.getObjectTypeBit(objectType)
path = dc.processPath(path, objectType)
return _set_dacl_inheritance(path, objectType, True, None, clear)
|
enable/disable inheritance on an object
Args:
path: The path to the object
objectType: The type of object (FILE, DIRECTORY, REGISTRY)
clear: True will remove non-Inherited ACEs from the ACL
Returns (dict): A dictionary containing the results
CLI Example:
.. code-block:: bash
salt 'minion-id' win_dacl.enable_inheritance c:\temp directory
|
def print_infos(results):
"""Print informations in standard output
:param ReportResults results: the report result containing all compiled informations
"""
print('transactions: %i' % results.total_transactions)
print('timers: %i' % results.total_timers)
print('errors: %i' % results.total_errors)
print('test start: %s' % results.start_datetime)
print('test finish: %s\n' % results.finish_datetime)
|
Print informations in standard output
:param ReportResults results: the report result containing all compiled informations
|
def InitAgeCheck(self):
"""make an interactive grid in which users can edit ages"""
self.panel = wx.Panel(self, style=wx.SIMPLE_BORDER)
text = """Step 6:
Fill in or correct any cells with information about ages.
The column for magic_method_codes can take multiple values in the form of a colon-delimited list.
You may use the drop-down menus to add as many values as needed in these columns.
(See Help button for details)
**Denotes controlled vocabulary """
label = wx.StaticText(self.panel, label=text)
self.items = self.er_magic_data.data_lists[self.er_magic_data.age_type][0]
self.grid_builder = grid_frame2.GridBuilder(self.er_magic_data, 'age',
self.er_magic_data.headers, self.panel, 'location')
self.age_grid = self.grid_builder.make_grid(incl_pmag=False)
self.age_grid.InitUI()
self.grid_builder.add_data_to_grid(self.age_grid, 'age', incl_pmag=False)
self.grid_builder.add_age_data_to_grid()
self.grid = self.age_grid
#
# make it impossible to edit the 1st and 3rd columns
for row in range(self.age_grid.GetNumberRows()):
for col in (0, 2):
self.age_grid.SetReadOnly(row, col, True)
# initialize all needed drop-down menus
self.drop_down_menu = drop_down_menus.Menus("age", self, self.age_grid, None)
# re-set first column name
self.age_grid.SetColLabelValue(0, 'er_site_name')
### Create Buttons ###
hbox_one = wx.BoxSizer(wx.HORIZONTAL)
self.helpButton = wx.Button(self.panel, label="Help")
self.Bind(wx.EVT_BUTTON, lambda event: self.on_helpButton(event, "ErMagicAgeHelp.html"), self.helpButton)
hbox_one.Add(self.helpButton)
hboxok = wx.BoxSizer(wx.HORIZONTAL)
self.saveButton = wx.Button(self.panel, id=-1, label='Save')
self.Bind(wx.EVT_BUTTON, lambda event: self.on_saveButton(event, self.age_grid), self.saveButton)
self.cancelButton = wx.Button(self.panel, wx.ID_CANCEL, '&Cancel')
self.Bind(wx.EVT_BUTTON, self.on_cancelButton, self.cancelButton)
self.continueButton = wx.Button(self.panel, id=-1, label='Save and continue')
self.Bind(wx.EVT_BUTTON, lambda event: self.on_continueButton(event, self.age_grid, next_dia=None), self.continueButton)
self.backButton = wx.Button(self.panel, wx.ID_ANY, "&Back")
previous_dia = self.InitLocCheck
self.Bind(wx.EVT_BUTTON, lambda event: self.on_backButton(event, previous_dia), self.backButton)
self.panel.Bind(wx.grid.EVT_GRID_LABEL_LEFT_CLICK, self.onLeftClickLabel, self.grid)
hboxok.Add(self.saveButton, flag=wx.RIGHT, border=10)
hboxok.Add(self.cancelButton, flag=wx.RIGHT, border=10)
hboxok.Add(self.continueButton, flag=wx.RIGHT, border=10)
hboxok.Add(self.backButton)
### Make Containers ###
vbox = wx.BoxSizer(wx.VERTICAL)
vbox.Add(label, flag=wx.ALIGN_CENTER|wx.TOP|wx.BOTTOM, border=20)#, flag=wx.ALIGN_LEFT|wx.BOTTOM, border=20)
vbox.Add(hbox_one, flag=wx.BOTTOM, border=10)
vbox.Add(hboxok, flag=wx.BOTTOM, border=10)
vbox.Add(self.age_grid, flag=wx.TOP|wx.BOTTOM, border=10) # EXPAND ??
vbox.AddSpacer(20)
self.hbox_all = wx.BoxSizer(wx.HORIZONTAL)
self.hbox_all.AddSpacer(20)
self.hbox_all.Add(vbox)
self.hbox_all.AddSpacer(20)
self.panel.SetSizer(self.hbox_all)
#if sys.platform in ['win32', 'win64']:
# self.panel.SetScrollbars(20, 20, 50, 50)
self.hbox_all.Fit(self)
self.Centre()
self.Show()
self.Hide()
self.Show()
|
make an interactive grid in which users can edit ages
|
def select_serial_number_row(self, serial_number):
"""Select row for identification number serial_number
Args:
serial_number: serial number
Returns:
pandas.DataFrame
"""
sheet = self.table
col = self.db_sheet_cols.id
rows = sheet.loc[:, col] == serial_number
return sheet.loc[rows, :]
|
Select row for identification number serial_number
Args:
serial_number: serial number
Returns:
pandas.DataFrame
|
def find_library_full_path(name):
"""
Similar to `from ctypes.util import find_library`, but try
to return full path if possible.
"""
from ctypes.util import find_library
if os.name == "posix" and sys.platform == "darwin":
# on Mac, ctypes already returns full path
return find_library(name)
def _use_proc_maps(name):
"""
Find so from /proc/pid/maps
Only works with libraries that has already been loaded.
But this is the most accurate method -- it finds the exact library that's being used.
"""
procmap = os.path.join('/proc', str(os.getpid()), 'maps')
if not os.path.isfile(procmap):
return None
with open(procmap, 'r') as f:
for line in f:
line = line.strip().split(' ')
sofile = line[-1]
basename = os.path.basename(sofile)
if 'lib' + name + '.so' in basename:
if os.path.isfile(sofile):
return os.path.realpath(sofile)
# The following two methods come from https://github.com/python/cpython/blob/master/Lib/ctypes/util.py
def _use_ld(name):
"""
Find so with `ld -lname -Lpath`.
It will search for files in LD_LIBRARY_PATH, but not in ldconfig.
"""
cmd = "ld -t -l{} -o {}".format(name, os.devnull)
ld_lib_path = os.environ.get('LD_LIBRARY_PATH', '')
for d in ld_lib_path.split(':'):
cmd = cmd + " -L " + d
result, ret = subproc_call(cmd + '|| true')
expr = r'[^\(\)\s]*lib%s\.[^\(\)\s]*' % re.escape(name)
res = re.search(expr, result.decode('utf-8'))
if res:
res = res.group(0)
if not os.path.isfile(res):
return None
return os.path.realpath(res)
def _use_ldconfig(name):
"""
Find so in `ldconfig -p`.
It does not handle LD_LIBRARY_PATH.
"""
with change_env('LC_ALL', 'C'), change_env('LANG', 'C'):
ldconfig, ret = subproc_call("ldconfig -p")
ldconfig = ldconfig.decode('utf-8')
if ret != 0:
return None
expr = r'\s+(lib%s\.[^\s]+)\s+\(.*=>\s+(.*)' % (re.escape(name))
res = re.search(expr, ldconfig)
if not res:
return None
else:
ret = res.group(2)
return os.path.realpath(ret)
if sys.platform.startswith('linux'):
return _use_proc_maps(name) or _use_ld(name) or _use_ldconfig(name) or find_library(name)
return find_library(name)
|
Similar to `from ctypes.util import find_library`, but try
to return full path if possible.
|
def load_include_path(paths):
"""
Scan for and add paths to the include path
"""
for path in paths:
# Verify the path is valid
if not os.path.isdir(path):
continue
# Add path to the system path, to avoid name clashes
# with mysql-connector for example ...
if path not in sys.path:
sys.path.insert(1, path)
# Load all the files in path
for f in os.listdir(path):
# Are we a directory? If so process down the tree
fpath = os.path.join(path, f)
if os.path.isdir(fpath):
load_include_path([fpath])
|
Scan for and add paths to the include path
|
def _wait_for_disk_threads(self, terminate):
# type: (Uploader, bool) -> None
"""Wait for disk threads
:param Uploader self: this
:param bool terminate: terminate threads
"""
if terminate:
self._upload_terminate = terminate
for thr in self._disk_threads:
thr.join()
|
Wait for disk threads
:param Uploader self: this
:param bool terminate: terminate threads
|
def _ctype_key_value(keys, vals):
"""
Returns ctype arrays for the key-value args, and the whether string keys are used.
For internal use only.
"""
if isinstance(keys, (tuple, list)):
assert(len(keys) == len(vals))
c_keys = []
c_vals = []
use_str_keys = None
for key, val in zip(keys, vals):
c_key_i, c_val_i, str_keys_i = _ctype_key_value(key, val)
c_keys += c_key_i
c_vals += c_val_i
use_str_keys = str_keys_i if use_str_keys is None else use_str_keys
assert(use_str_keys == str_keys_i), "inconsistent types of keys detected."
c_keys_arr = c_array(ctypes.c_char_p, c_keys) if use_str_keys \
else c_array(ctypes.c_int, c_keys)
c_vals_arr = c_array(ctypes.c_void_p, c_vals)
return (c_keys_arr, c_vals_arr, use_str_keys)
assert(isinstance(keys, (int,) + string_types)), \
"unexpected type for keys: " + str(type(keys))
use_str_keys = isinstance(keys, string_types)
if isinstance(vals, NDArray):
c_keys = c_str_array([keys]) if use_str_keys \
else c_array_buf(ctypes.c_int, array('i', [keys]))
return (c_keys, c_handle_array([vals]), use_str_keys)
else:
for value in vals:
assert(isinstance(value, NDArray))
c_keys = c_str_array([keys] * len(vals)) if use_str_keys \
else c_array_buf(ctypes.c_int, array('i', [keys] * len(vals)))
return (c_keys, c_handle_array(vals), use_str_keys)
|
Returns ctype arrays for the key-value args, and the whether string keys are used.
For internal use only.
|
def submitter(self, f):
"""Decorator to submit a coro-function as NewTask to self.loop with sem control.
Use default_callback frequency of loop."""
f = self._wrap_coro_function_with_sem(f)
@wraps(f)
def wrapped(*args, **kwargs):
return self.submit(f(*args, **kwargs))
return wrapped
|
Decorator to submit a coro-function as NewTask to self.loop with sem control.
Use default_callback frequency of loop.
|
def calculate_size(name, entry_processor):
""" Calculates the request payload size"""
data_size = 0
data_size += calculate_size_str(name)
data_size += calculate_size_data(entry_processor)
return data_size
|
Calculates the request payload size
|
def get_relaxation(self, A_configuration, B_configuration, I):
"""Get the sparse SDP relaxation of a Bell inequality.
:param A_configuration: The definition of measurements of Alice.
:type A_configuration: list of list of int.
:param B_configuration: The definition of measurements of Bob.
:type B_configuration: list of list of int.
:param I: The matrix describing the Bell inequality in the
Collins-Gisin picture.
:type I: list of list of int.
"""
coefficients = collinsgisin_to_faacets(I)
M, ncIndices = get_faacets_moment_matrix(A_configuration,
B_configuration, coefficients)
self.n_vars = M.max() - 1
bs = len(M) # The block size
self.block_struct = [bs]
self.F = lil_matrix((bs**2, self.n_vars + 1))
# Constructing the internal representation of the constraint matrices
# See Section 2.1 in the SDPA manual and also Yalmip's internal
# representation
for i in range(bs):
for j in range(i, bs):
if M[i, j] != 0:
self.F[i*bs+j, abs(M[i, j])-1] = copysign(1, M[i, j])
self.obj_facvar = [0 for _ in range(self.n_vars)]
for i in range(1, len(ncIndices)):
self.obj_facvar[abs(ncIndices[i])-2] += \
copysign(1, ncIndices[i])*coefficients[i]
|
Get the sparse SDP relaxation of a Bell inequality.
:param A_configuration: The definition of measurements of Alice.
:type A_configuration: list of list of int.
:param B_configuration: The definition of measurements of Bob.
:type B_configuration: list of list of int.
:param I: The matrix describing the Bell inequality in the
Collins-Gisin picture.
:type I: list of list of int.
|
def from_veto_def(cls, veto):
"""Define a `DataQualityFlag` from a `VetoDef`
Parameters
----------
veto : :class:`~ligo.lw.lsctables.VetoDef`
veto definition to convert from
"""
name = '%s:%s' % (veto.ifo, veto.name)
try:
name += ':%d' % int(veto.version)
except TypeError:
pass
if veto.end_time == 0:
veto.end_time = +inf
known = Segment(veto.start_time, veto.end_time)
pad = (veto.start_pad, veto.end_pad)
return cls(name=name, known=[known], category=veto.category,
description=veto.comment, padding=pad)
|
Define a `DataQualityFlag` from a `VetoDef`
Parameters
----------
veto : :class:`~ligo.lw.lsctables.VetoDef`
veto definition to convert from
|
def verify(self, **kwargs):
"""
Verifies that an instance of this class adheres to the given
restrictions.
:param kwargs: A set of keyword arguments
:return: True if it verifies OK otherwise False.
"""
super(MetadataStatement, self).verify(**kwargs)
if "signing_keys" in self:
if 'signing_keys_uri' in self:
raise VerificationError(
'You can only have one of "signing_keys" and '
'"signing_keys_uri" in a metadata statement')
else:
# signing_keys MUST be a JWKS
kj = KeyJar()
try:
kj.import_jwks(self['signing_keys'], '')
except Exception:
raise VerificationError('"signing_keys" not a proper JWKS')
if "metadata_statements" in self and "metadata_statement_uris" in self:
s = set(self['metadata_statements'].keys())
t = set(self['metadata_statement_uris'].keys())
if s.intersection(t):
raise VerificationError(
'You should not have the same key in "metadata_statements" '
'and in "metadata_statement_uris"')
return True
|
Verifies that an instance of this class adheres to the given
restrictions.
:param kwargs: A set of keyword arguments
:return: True if it verifies OK otherwise False.
|
def _add_tag_manifest_file(zip_file, dir_name, tag_info_list):
"""Generate the tag manifest file and add it to the zip."""
_add_tag_file(
zip_file, dir_name, tag_info_list, _gen_tag_manifest_file_tup(tag_info_list)
)
|
Generate the tag manifest file and add it to the zip.
|
def prepare_all_data(data_dir, block_pct_tokens_thresh=0.1):
"""
Prepare data for all HTML + gold standard blocks examples in ``data_dir``.
Args:
data_dir (str)
block_pct_tokens_thresh (float): must be in [0.0, 1.0]
Returns:
List[Tuple[str, List[float, int, List[str]], List[float, int, List[str]]]]
See Also:
:func:`prepare_data`
"""
gs_blocks_dir = os.path.join(data_dir, GOLD_STANDARD_BLOCKS_DIRNAME)
gs_blocks_filenames = get_filenames(
gs_blocks_dir, full_path=False, match_regex=re.escape(GOLD_STANDARD_BLOCKS_EXT))
gs_blocks_fileroots = (
re.search(r'(.+)' + re.escape(GOLD_STANDARD_BLOCKS_EXT), gs_blocks_filename).group(1)
for gs_blocks_filename in gs_blocks_filenames)
return [prepare_data(data_dir, fileroot, block_pct_tokens_thresh)
for fileroot in gs_blocks_fileroots]
|
Prepare data for all HTML + gold standard blocks examples in ``data_dir``.
Args:
data_dir (str)
block_pct_tokens_thresh (float): must be in [0.0, 1.0]
Returns:
List[Tuple[str, List[float, int, List[str]], List[float, int, List[str]]]]
See Also:
:func:`prepare_data`
|
def register(self, perm_func=None, model=None, allow_staff=None, allow_superuser=None,
allow_anonymous=None, unauthenticated_handler=None, request_types=None, name=None,
replace=False, _return_entry=False):
"""Register permission function & return the original function.
This is typically used as a decorator::
permissions = PermissionsRegistry()
@permissions.register
def can_do_something(user):
...
For internal use only: you can pass ``_return_entry=True`` to
have the registry :class:`.Entry` returned instead of
``perm_func``.
"""
allow_staff = _default(allow_staff, self._allow_staff)
allow_superuser = _default(allow_superuser, self._allow_superuser)
allow_anonymous = _default(allow_anonymous, self._allow_anonymous)
unauthenticated_handler = _default(unauthenticated_handler, self._unauthenticated_handler)
request_types = _default(request_types, self._request_types)
if perm_func is None:
return (
lambda perm_func_:
self.register(
perm_func_, model, allow_staff, allow_superuser, allow_anonymous,
unauthenticated_handler, request_types, name, replace, _return_entry)
)
name = _default(name, perm_func.__name__)
if name == 'register':
raise PermissionsError('register cannot be used as a permission name')
elif name in self._registry and not replace:
raise DuplicatePermissionError(name)
view_decorator = self._make_view_decorator(
name, perm_func, model, allow_staff, allow_superuser, allow_anonymous,
unauthenticated_handler, request_types)
entry = Entry(
name, perm_func, view_decorator, model, allow_staff, allow_superuser, allow_anonymous,
unauthenticated_handler, request_types, set())
self._registry[name] = entry
@wraps(perm_func)
def wrapped_func(user, instance=NO_VALUE):
if user is None:
return False
if not allow_anonymous and user.is_anonymous():
return False
test = lambda: perm_func(user) if instance is NO_VALUE else perm_func(user, instance)
return (
allow_staff and user.is_staff or
allow_superuser and user.is_superuser or
test()
)
register.filter(name, wrapped_func)
log.debug('Registered permission: {0}'.format(name))
return entry if _return_entry else wrapped_func
|
Register permission function & return the original function.
This is typically used as a decorator::
permissions = PermissionsRegistry()
@permissions.register
def can_do_something(user):
...
For internal use only: you can pass ``_return_entry=True`` to
have the registry :class:`.Entry` returned instead of
``perm_func``.
|
def memoized_ignoreargs(func):
"""
A decorator. It performs memoization ignoring the arguments used to call
the function.
"""
def wrapper(*args, **kwargs):
if func not in _MEMOIZED_NOARGS:
res = func(*args, **kwargs)
_MEMOIZED_NOARGS[func] = res
return res
return _MEMOIZED_NOARGS[func]
return wrapper
|
A decorator. It performs memoization ignoring the arguments used to call
the function.
|
def calc_rate_susceptibility(self, rate_std=None, params=None):
"""return the time tree estimation of evolutionary rates +/- one
standard deviation form the ML estimate.
Returns
-------
TreeTime.return_code : str
success or failure
"""
params = params or {}
if rate_std is None:
if not (self.clock_model['valid_confidence'] and 'cov' in self.clock_model):
self.logger("ClockTree.calc_rate_susceptibility: need valid standard deviation of the clock rate to estimate dating error.", 1, warn=True)
return ttconf.ERROR
rate_std = np.sqrt(self.clock_model['cov'][0,0])
current_rate = np.abs(self.clock_model['slope'])
upper_rate = self.clock_model['slope'] + rate_std
lower_rate = max(0.1*current_rate, self.clock_model['slope'] - rate_std)
for n in self.tree.find_clades():
if n.up:
n._orig_gamma = n.branch_length_interpolator.gamma
n.branch_length_interpolator.gamma = n._orig_gamma*upper_rate/current_rate
self.logger("###ClockTree.calc_rate_susceptibility: run with upper bound of rate estimate", 1)
self.make_time_tree(**params)
self.logger("###ClockTree.calc_rate_susceptibility: rate: %f, LH:%f"%(upper_rate, self.tree.positional_joint_LH), 2)
for n in self.tree.find_clades():
n.numdate_rate_variation = [(upper_rate, n.numdate)]
if n.up:
n.branch_length_interpolator.gamma = n._orig_gamma*lower_rate/current_rate
self.logger("###ClockTree.calc_rate_susceptibility: run with lower bound of rate estimate", 1)
self.make_time_tree(**params)
self.logger("###ClockTree.calc_rate_susceptibility: rate: %f, LH:%f"%(lower_rate, self.tree.positional_joint_LH), 2)
for n in self.tree.find_clades():
n.numdate_rate_variation.append((lower_rate, n.numdate))
if n.up:
n.branch_length_interpolator.gamma = n._orig_gamma
self.logger("###ClockTree.calc_rate_susceptibility: run with central rate estimate", 1)
self.make_time_tree(**params)
self.logger("###ClockTree.calc_rate_susceptibility: rate: %f, LH:%f"%(current_rate, self.tree.positional_joint_LH), 2)
for n in self.tree.find_clades():
n.numdate_rate_variation.append((current_rate, n.numdate))
n.numdate_rate_variation.sort(key=lambda x:x[1]) # sort estimates for different rates by numdate
return ttconf.SUCCESS
|
return the time tree estimation of evolutionary rates +/- one
standard deviation form the ML estimate.
Returns
-------
TreeTime.return_code : str
success or failure
|
def add_subsegment(self, subsegment):
"""
Add input subsegment as a child subsegment and increment
reference counter and total subsegments counter.
"""
super(Segment, self).add_subsegment(subsegment)
self.increment()
|
Add input subsegment as a child subsegment and increment
reference counter and total subsegments counter.
|
def is_in_plane(self, pp, dist_tolerance):
"""
Determines if point pp is in the plane within the tolerance dist_tolerance
:param pp: point to be tested
:param dist_tolerance: tolerance on the distance to the plane within which point pp is considered in the plane
:return: True if pp is in the plane, False otherwise
"""
return np.abs(np.dot(self.normal_vector, pp) + self._coefficients[3]) <= dist_tolerance
|
Determines if point pp is in the plane within the tolerance dist_tolerance
:param pp: point to be tested
:param dist_tolerance: tolerance on the distance to the plane within which point pp is considered in the plane
:return: True if pp is in the plane, False otherwise
|
def _handle_get_application_request(self, app_id, semver, key, logical_id):
"""
Method that handles the get_application API call to the serverless application repo
This method puts something in the `_applications` dictionary because the plugin expects
something there in a later event.
:param string app_id: ApplicationId
:param string semver: SemanticVersion
:param string key: The dictionary key consisting of (ApplicationId, SemanticVersion)
:param string logical_id: the logical_id of this application resource
"""
get_application = (lambda app_id, semver: self._sar_client.get_application(
ApplicationId=self._sanitize_sar_str_param(app_id),
SemanticVersion=self._sanitize_sar_str_param(semver)))
try:
self._sar_service_call(get_application, logical_id, app_id, semver)
self._applications[key] = {'Available'}
except EndpointConnectionError as e:
# No internet connection. Don't break verification, but do show a warning.
warning_message = "{}. Unable to verify access to {}/{}.".format(e, app_id, semver)
logging.warning(warning_message)
self._applications[key] = {'Unable to verify'}
|
Method that handles the get_application API call to the serverless application repo
This method puts something in the `_applications` dictionary because the plugin expects
something there in a later event.
:param string app_id: ApplicationId
:param string semver: SemanticVersion
:param string key: The dictionary key consisting of (ApplicationId, SemanticVersion)
:param string logical_id: the logical_id of this application resource
|
def remove_pickle_problems(obj):
"""doc_loader does not pickle correctly, causing Toil errors, remove from
objects.
"""
if hasattr(obj, "doc_loader"):
obj.doc_loader = None
if hasattr(obj, "embedded_tool"):
obj.embedded_tool = remove_pickle_problems(obj.embedded_tool)
if hasattr(obj, "steps"):
obj.steps = [remove_pickle_problems(s) for s in obj.steps]
return obj
|
doc_loader does not pickle correctly, causing Toil errors, remove from
objects.
|
def recv_message(self, debug=False):
"""
Reading socket and receiving message from server. Check the CRC32.
"""
if debug:
packet = self.sock.recv(1024) # reads how many bytes to read
hexdump(packet)
packet_length_data = self.sock.recv(4) # reads how many bytes to read
if len(packet_length_data) < 4:
raise Exception("Nothing in the socket!")
packet_length = struct.unpack("<I", packet_length_data)[0]
packet = self.sock.recv(packet_length - 4) # read the rest of bytes from socket
# check the CRC32
if not crc32(packet_length_data + packet[0:-4]) == struct.unpack('<I', packet[-4:])[0]:
raise Exception("CRC32 was not correct!")
x = struct.unpack("<I", packet[:4])
auth_key_id = packet[4:12]
if auth_key_id == b'\x00\x00\x00\x00\x00\x00\x00\x00':
# No encryption - Plain text
(message_id, message_length) = struct.unpack("<QI", packet[12:24])
data = packet[24:24+message_length]
elif auth_key_id == self.auth_key_id:
pass
message_key = packet[12:28]
encrypted_data = packet[28:-4]
aes_key, aes_iv = self.aes_calculate(message_key, direction="from server")
decrypted_data = crypt.ige_decrypt(encrypted_data, aes_key, aes_iv)
assert decrypted_data[0:8] == self.server_salt
assert decrypted_data[8:16] == self.session_id
message_id = decrypted_data[16:24]
seq_no = struct.unpack("<I", decrypted_data[24:28])[0]
message_data_length = struct.unpack("<I", decrypted_data[28:32])[0]
data = decrypted_data[32:32+message_data_length]
else:
raise Exception("Got unknown auth_key id")
return data
|
Reading socket and receiving message from server. Check the CRC32.
|
def inverse_kinematics(
self,
target_position_right,
target_orientation_right,
target_position_left,
target_orientation_left,
rest_poses,
):
"""
Helper function to do inverse kinematics for a given target position and
orientation in the PyBullet world frame.
Args:
target_position_{right, left}: A tuple, list, or numpy array of size 3 for position.
target_orientation_{right, left}: A tuple, list, or numpy array of size 4 for
a orientation quaternion.
rest_poses: A list of size @num_joints to favor ik solutions close by.
Returns:
A list of size @num_joints corresponding to the joint angle solution.
"""
ndof = 48
ik_solution = list(
p.calculateInverseKinematics(
self.ik_robot,
self.effector_right,
target_position_right,
targetOrientation=target_orientation_right,
restPoses=rest_poses[:7],
lowerLimits=self.lower,
upperLimits=self.upper,
jointRanges=self.ranges,
jointDamping=[0.7] * ndof,
)
)
ik_solution2 = list(
p.calculateInverseKinematics(
self.ik_robot,
self.effector_left,
target_position_left,
targetOrientation=target_orientation_left,
restPoses=rest_poses[7:],
lowerLimits=self.lower,
upperLimits=self.upper,
jointRanges=self.ranges,
jointDamping=[0.7] * ndof,
)
)
for i in range(8, 15):
ik_solution[i] = ik_solution2[i]
return ik_solution[1:]
|
Helper function to do inverse kinematics for a given target position and
orientation in the PyBullet world frame.
Args:
target_position_{right, left}: A tuple, list, or numpy array of size 3 for position.
target_orientation_{right, left}: A tuple, list, or numpy array of size 4 for
a orientation quaternion.
rest_poses: A list of size @num_joints to favor ik solutions close by.
Returns:
A list of size @num_joints corresponding to the joint angle solution.
|
def open(self):
'''Open a connection to the database.
If a connection appears to be open already, transactions are committed
and it is closed before proceeding. After establishing the connection,
the searchIndex table is prepared (and dropped if it already exists).
'''
if self.conn is not None:
self.close()
self.conn = sqlite3.connect(self.filename)
self.cursor = self.conn.cursor()
c = self.cursor
c.execute('SELECT name FROM sqlite_master WHERE type="table"')
if (u'searchIndex',) in c:
c.execute('DROP TABLE searchIndex')
if self.verbose:
print('Dropped existing table', file=sys.stderr)
c.executescript(
'''
CREATE TABLE searchIndex
(id INTEGER PRIMARY KEY, name TEXT, type TEXT, path TEXT);
CREATE UNIQUE INDEX anchor ON searchIndex (name, type, path);
'''
)
|
Open a connection to the database.
If a connection appears to be open already, transactions are committed
and it is closed before proceeding. After establishing the connection,
the searchIndex table is prepared (and dropped if it already exists).
|
def getValidCertifications(self):
""" Returns the certifications fully valid
"""
certs = []
today = date.today()
for c in self.getCertifications():
validfrom = c.getValidFrom() if c else None
validto = c.getValidTo() if validfrom else None
if not validfrom or not validto:
continue
validfrom = validfrom.asdatetime().date()
validto = validto.asdatetime().date()
if (today >= validfrom and today <= validto):
certs.append(c)
return certs
|
Returns the certifications fully valid
|
def choice_input(options=[], prompt='Press ENTER to continue.',
showopts=True, qopt=False):
"""Get input from a list of choices (q to quit)"""
choice = None
if showopts:
prompt = prompt + ' ' + str(options)
if qopt:
prompt = prompt + ' (q to quit)'
while not choice:
try:
choice = string_input(prompt + ' ')
except SyntaxError:
if options == []:
pass
if choice:
if choice in options:
return choice
elif qopt == True and choice == 'q':
choice = None
is_sure = string_input('Are you sure you want to quit? ')
if is_sure in ('Y', 'y', 'yes'):
exit('\nThanks for playing. Goodbye.\n')
elif options == []:
return 0
else:
print('Answer must be one of ' + str(options) +
'. Your answer?')
if options:
choice = None
elif options == []:
return 0
else:
print('Answer must be one of ' + str(options) +
'. Your answer?')
|
Get input from a list of choices (q to quit)
|
def vectorizable_features(fcs):
'''Discovers the ordered set of vectorizable features in ``fcs``.
Returns a list of feature names, sorted lexicographically.
Feature names are only included if the corresponding
features are vectorizable (i.e., they are an instance of
:class:`collections.Mapping`).
'''
is_mapping = lambda obj: isinstance(obj, collections.Mapping)
return sorted(set([name for fc in fcs for name in fc if is_mapping(fc[name])]))
|
Discovers the ordered set of vectorizable features in ``fcs``.
Returns a list of feature names, sorted lexicographically.
Feature names are only included if the corresponding
features are vectorizable (i.e., they are an instance of
:class:`collections.Mapping`).
|
def get_remote_url(path, remote="origin"):
"""
Run git config --get remote.<remote>.url in path.
:param path: Path where git is to be run
:param remote: Remote name
:return: str or None
"""
path = get_path(path)
cmd = ["config", "--get", "remote.%s.url" % remote]
return __run_git(cmd, path)[0]
|
Run git config --get remote.<remote>.url in path.
:param path: Path where git is to be run
:param remote: Remote name
:return: str or None
|
def visit_Assign(self, node):
"""
Implement assignment walker.
Parse class properties defined via the property() function
"""
# [[[cog
# cog.out("print(pcolor('Enter assign visitor', 'magenta'))")
# ]]]
# [[[end]]]
# ###
# Class-level assignment may also be a class attribute that is not
# a managed attribute, record it anyway, no harm in doing so as it
# is not attached to a callable
if self._in_class(node):
element_full_name = self._pop_indent_stack(node, "prop")
code_id = (self._fname, node.lineno)
self._processed_line = node.lineno
self._callables_db[element_full_name] = {
"name": element_full_name,
"type": "prop",
"code_id": code_id,
"last_lineno": None,
}
self._reverse_callables_db[code_id] = element_full_name
# [[[cog
# code = """
# print(
# pcolor(
# 'Visiting property {0} @ {1}'.format(
# element_full_name, code_id[1]
# ),
# 'green'
# )
# )
# """
# cog.out(code)
# ]]]
# [[[end]]]
# Get property actions
self.generic_visit(node)
|
Implement assignment walker.
Parse class properties defined via the property() function
|
def methodcall(obj, method_name, *args, **kwargs):
"""Call a method of `obj`, either locally or remotely as appropriate.
obj may be an ordinary object, or a Remote object (or Ref or object Id)
If there are multiple remote arguments, they must be on the same engine.
kwargs:
prefer_local (bool, optional): Whether to return cached local results if
available, in preference to returning Remote objects. Default is True.
block (bool, optional): Whether remote calls should be synchronous.
If False, returned results may be AsyncResults and should be converted
by the caller using convert_result() before use. Default is True.
"""
this_engine = distob.engine.eid
args = [obj] + list(args)
prefer_local = kwargs.pop('prefer_local', None)
if prefer_local is None:
if isinstance(obj, Remote):
prefer_local = obj.prefer_local
else:
prefer_local = True
block = kwargs.pop('block', True)
execloc, args, kwargs = _process_args(args, kwargs, prefer_local)
if execloc is this_engine:
r = getattr(args[0], method_name)(*args[1:], **kwargs)
else:
if False and prefer_local:
# result cache disabled until issue mattja/distob#1 is fixed
try:
kwtuple = tuple((k, kwargs[k]) for k in sorted(kwargs.keys()))
key = (args[0], method_name, args, kwtuple)
r = _call_cache[key]
except TypeError as te:
if te.args[0][:10] == 'unhashable':
#print("unhashable. won't be able to cache")
r = _uncached_methodcall(execloc, args[0], method_name,
*args[1:], **kwargs)
else:
raise
except KeyError:
r = _uncached_methodcall(execloc, args[0], method_name,
*args[1:], **kwargs)
if block:
_call_cache[key] = r.r
else:
r = _uncached_methodcall(execloc, args[0], method_name,
*args[1:], **kwargs)
if block:
return convert_result(r)
else:
return r
|
Call a method of `obj`, either locally or remotely as appropriate.
obj may be an ordinary object, or a Remote object (or Ref or object Id)
If there are multiple remote arguments, they must be on the same engine.
kwargs:
prefer_local (bool, optional): Whether to return cached local results if
available, in preference to returning Remote objects. Default is True.
block (bool, optional): Whether remote calls should be synchronous.
If False, returned results may be AsyncResults and should be converted
by the caller using convert_result() before use. Default is True.
|
def read_namespaced_pod_disruption_budget_status(self, name, namespace, **kwargs):
"""
read status of the specified PodDisruptionBudget
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_pod_disruption_budget_status(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the PodDisruptionBudget (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1beta1PodDisruptionBudget
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_pod_disruption_budget_status_with_http_info(name, namespace, **kwargs)
else:
(data) = self.read_namespaced_pod_disruption_budget_status_with_http_info(name, namespace, **kwargs)
return data
|
read status of the specified PodDisruptionBudget
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_pod_disruption_budget_status(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the PodDisruptionBudget (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1beta1PodDisruptionBudget
If the method is called asynchronously,
returns the request thread.
|
def A(self):
"""Spectral VAR coefficients.
.. math:: \mathbf{A}(f) = \mathbf{I} - \sum_{k=1}^{p} \mathbf{a}^{(k)}
\mathrm{e}^{-2\pi f}
"""
return fft(np.dstack([np.eye(self.m), -self.b]),
self.nfft * 2 - 1)[:, :, :self.nfft]
|
Spectral VAR coefficients.
.. math:: \mathbf{A}(f) = \mathbf{I} - \sum_{k=1}^{p} \mathbf{a}^{(k)}
\mathrm{e}^{-2\pi f}
|
def matches_count(count, options):
"""
Returns whether the given count matches the given query options.
If no quantity options are specified, any count is considered acceptable.
Args:
count (int): The count to be validated.
options (Dict[str, int | Iterable[int]]): A dictionary of query options.
Returns:
bool: Whether the count matches the options.
"""
if options.get("count") is not None:
return count == int(options["count"])
if options.get("maximum") is not None and int(options["maximum"]) < count:
return False
if options.get("minimum") is not None and int(options["minimum"]) > count:
return False
if options.get("between") is not None and count not in options["between"]:
return False
return True
|
Returns whether the given count matches the given query options.
If no quantity options are specified, any count is considered acceptable.
Args:
count (int): The count to be validated.
options (Dict[str, int | Iterable[int]]): A dictionary of query options.
Returns:
bool: Whether the count matches the options.
|
def accumulate_from_superclasses(cls, propname):
''' Traverse the class hierarchy and accumulate the special sets of names
``MetaHasProps`` stores on classes:
Args:
name (str) : name of the special attribute to collect.
Typically meaningful values are: ``__container_props__``,
``__properties__``, ``__properties_with_refs__``
'''
cachename = "__cached_all" + propname
# we MUST use cls.__dict__ NOT hasattr(). hasattr() would also look at base
# classes, and the cache must be separate for each class
if cachename not in cls.__dict__:
s = set()
for c in inspect.getmro(cls):
if issubclass(c, HasProps) and hasattr(c, propname):
base = getattr(c, propname)
s.update(base)
setattr(cls, cachename, s)
return cls.__dict__[cachename]
|
Traverse the class hierarchy and accumulate the special sets of names
``MetaHasProps`` stores on classes:
Args:
name (str) : name of the special attribute to collect.
Typically meaningful values are: ``__container_props__``,
``__properties__``, ``__properties_with_refs__``
|
def bin_to_edge_slice(s, n):
"""
Convert a bin slice into a bin edge slice.
"""
s = canonify_slice(s, n)
start = s.start
stop = s.stop
if start > stop:
_stop = start + 1
start = stop + 1
stop = _stop
start = max(start - 1, 0)
step = abs(s.step)
if stop <= 1 or start >= n - 1 or stop == start + 1:
return slice(0, None, min(step, n - 2))
s = slice(start, stop, abs(s.step))
if len(range(*s.indices(n - 1))) < 2:
return slice(start, stop, stop - start - 1)
return s
|
Convert a bin slice into a bin edge slice.
|
def zip_strip_namespace(zip_src, namespace, logger=None):
""" Given a namespace, strips 'namespace__' from all files and filenames
in the zip
"""
namespace_prefix = "{}__".format(namespace)
lightning_namespace = "{}:".format(namespace)
zip_dest = zipfile.ZipFile(io.BytesIO(), "w", zipfile.ZIP_DEFLATED)
for name in zip_src.namelist():
orig_content = zip_src.read(name)
try:
orig_content = orig_content.decode("utf-8")
except UnicodeDecodeError:
# if we cannot decode the content, don't try and replace it.
new_content = orig_content
else:
new_content = orig_content.replace(namespace_prefix, "")
new_content = new_content.replace(lightning_namespace, "c:")
name = name.replace(namespace_prefix, "") # not...sure...this..gets...used
if orig_content != new_content and logger:
logger.info(
" {file_name}: removed {namespace}".format(
file_name=name, namespace=namespace_prefix
)
)
new_content = new_content.encode("utf-8")
zip_dest.writestr(name, new_content)
return zip_dest
|
Given a namespace, strips 'namespace__' from all files and filenames
in the zip
|
def tag(name, tag_name):
"""
Tag the named metric with the given tag.
"""
with LOCK:
# just to check if <name> exists
metric(name)
TAGS.setdefault(tag_name, set()).add(name)
|
Tag the named metric with the given tag.
|
def _delete_fw(self, tenant_id, data):
"""Internal routine called when a FW is deleted. """
LOG.debug("In Delete fw data is %s", data)
in_sub = self.get_in_subnet_id(tenant_id)
out_sub = self.get_out_subnet_id(tenant_id)
arg_dict = self._create_arg_dict(tenant_id, data, in_sub, out_sub)
if arg_dict.get('router_id') is None:
LOG.error("Router ID unknown for tenant %s", tenant_id)
return False
if tenant_id not in self.tenant_dict:
self.create_tenant_dict(tenant_id, arg_dict.get('router_id'))
ret = self.send_in_router_port_msg(tenant_id, arg_dict, 'down')
if not ret:
return False
ret = self.send_out_router_port_msg(tenant_id, arg_dict, 'down')
if not ret:
return False
# Usually sending message to queue doesn't fail!!!
router_ret = self.delete_intf_router(tenant_id,
arg_dict.get('tenant_name'),
arg_dict.get('router_id'))
if not router_ret:
LOG.error("Unable to delete router for tenant %s, error case",
tenant_id)
return router_ret
del self.tenant_dict[tenant_id]
return router_ret
|
Internal routine called when a FW is deleted.
|
def enc(data, **kwargs):
'''
Alias to `{box_type}_encrypt`
box_type: secretbox, sealedbox(default)
'''
if 'keyfile' in kwargs:
salt.utils.versions.warn_until(
'Neon',
'The \'keyfile\' argument has been deprecated and will be removed in Salt '
'{version}. Please use \'sk_file\' argument instead.'
)
kwargs['sk_file'] = kwargs['keyfile']
if 'key' in kwargs:
salt.utils.versions.warn_until(
'Neon',
'The \'key\' argument has been deprecated and will be removed in Salt '
'{version}. Please use \'sk\' argument instead.'
)
kwargs['sk'] = kwargs['key']
box_type = _get_config(**kwargs)['box_type']
if box_type == 'secretbox':
return secretbox_encrypt(data, **kwargs)
return sealedbox_encrypt(data, **kwargs)
|
Alias to `{box_type}_encrypt`
box_type: secretbox, sealedbox(default)
|
def GetFormatSpecification(cls):
"""Retrieves the format specification.
Returns:
FormatSpecification: format specification.
"""
format_specification = specification.FormatSpecification(cls.NAME)
format_specification.AddNewSignature(b'ElfFile\x00', offset=0)
return format_specification
|
Retrieves the format specification.
Returns:
FormatSpecification: format specification.
|
def _load_cell(args, schema):
"""Implements the BigQuery load magic used to load data from GCS to a table.
The supported syntax is:
%bigquery load -S|--source <source> -D|--destination <table> <other_args>
Args:
args: the arguments following '%bigquery load'.
schema: a JSON schema for the destination table.
Returns:
A message about whether the load succeeded or failed.
"""
name = args['destination']
table = _get_table(name)
if not table:
table = datalab.bigquery.Table(name)
if table.exists():
if args['mode'] == 'create':
raise Exception('%s already exists; use --append or --overwrite' % name)
elif schema:
table.create(json.loads(schema))
elif not args['infer']:
raise Exception(
'Table does not exist, no schema specified in cell and no --infer flag; cannot load')
# TODO(gram): we should probably try do the schema infer ourselves as BQ doesn't really seem
# to be able to do it. Alternatively we can drop the --infer argument and force the user
# to use a pre-existing table or supply a JSON schema.
csv_options = datalab.bigquery.CSVOptions(delimiter=args['delimiter'],
skip_leading_rows=args['skip'],
allow_jagged_rows=not args['strict'],
quote=args['quote'])
job = table.load(args['source'],
mode=args['mode'],
source_format=('CSV' if args['format'] == 'csv' else 'NEWLINE_DELIMITED_JSON'),
csv_options=csv_options,
ignore_unknown_values=not args['strict'])
if job.failed:
raise Exception('Load failed: %s' % str(job.fatal_error))
elif job.errors:
raise Exception('Load completed with errors: %s' % str(job.errors))
|
Implements the BigQuery load magic used to load data from GCS to a table.
The supported syntax is:
%bigquery load -S|--source <source> -D|--destination <table> <other_args>
Args:
args: the arguments following '%bigquery load'.
schema: a JSON schema for the destination table.
Returns:
A message about whether the load succeeded or failed.
|
def get_run_states(self) -> List[RunState]:
"""Get a list of RunStates from the ZoneMinder API."""
raw_states = self.get_state('api/states.json')
if not raw_states:
_LOGGER.warning("Could not fetch runstates from ZoneMinder")
return []
run_states = []
for i in raw_states['states']:
raw_state = i['State']
_LOGGER.info("Initializing runstate %s", raw_state['Id'])
run_states.append(RunState(self, raw_state))
return run_states
|
Get a list of RunStates from the ZoneMinder API.
|
def save_hex(hex_file, path):
"""
Given a string representation of a hex file, this function copies it to
the specified path thus causing the device mounted at that point to be
flashed.
If the hex_file is empty it will raise a ValueError.
If the filename at the end of the path does not end in '.hex' it will raise
a ValueError.
"""
if not hex_file:
raise ValueError('Cannot flash an empty .hex file.')
if not path.endswith('.hex'):
raise ValueError('The path to flash must be for a .hex file.')
with open(path, 'wb') as output:
output.write(hex_file.encode('ascii'))
|
Given a string representation of a hex file, this function copies it to
the specified path thus causing the device mounted at that point to be
flashed.
If the hex_file is empty it will raise a ValueError.
If the filename at the end of the path does not end in '.hex' it will raise
a ValueError.
|
def reload_solver(self, constraints=None):
"""
Reloads the solver. Useful when changing solver options.
:param list constraints: A new list of constraints to use in the reloaded solver instead of the current one
"""
if constraints is None:
constraints = self._solver.constraints
self._stored_solver = None
self._solver.add(constraints)
|
Reloads the solver. Useful when changing solver options.
:param list constraints: A new list of constraints to use in the reloaded solver instead of the current one
|
def packet_from_xml_packet(xml_pkt, psml_structure=None):
"""
Gets a TShark XML packet object or string, and returns a pyshark Packet objec.t
:param xml_pkt: str or xml object.
:param psml_structure: a list of the fields in each packet summary in the psml data. If given, packets will
be returned as a PacketSummary object.
:return: Packet object.
"""
if not isinstance(xml_pkt, lxml.objectify.ObjectifiedElement):
parser = lxml.objectify.makeparser(huge_tree=True)
xml_pkt = lxml.objectify.fromstring(xml_pkt, parser)
if psml_structure:
return _packet_from_psml_packet(xml_pkt, psml_structure)
return _packet_from_pdml_packet(xml_pkt)
|
Gets a TShark XML packet object or string, and returns a pyshark Packet objec.t
:param xml_pkt: str or xml object.
:param psml_structure: a list of the fields in each packet summary in the psml data. If given, packets will
be returned as a PacketSummary object.
:return: Packet object.
|
def _create_any_group(self, parent_node, name, type_name, instance=None, constructor=None,
args=None, kwargs=None):
"""Generically creates a new group inferring from the `type_name`."""
if args is None:
args = []
if kwargs is None:
kwargs = {}
full_name = self._make_full_name(parent_node.v_full_name, name)
if instance is None:
if constructor is None:
if type_name == RESULT_GROUP:
constructor = ResultGroup
elif type_name == PARAMETER_GROUP:
constructor = ParameterGroup
elif type_name == CONFIG_GROUP:
constructor = ConfigGroup
elif type_name == DERIVED_PARAMETER_GROUP:
constructor = DerivedParameterGroup
elif type_name == GROUP:
constructor = NNGroupNode
else:
raise RuntimeError('You shall not pass!')
instance = self._root_instance._construct_instance(constructor, full_name,
*args, **kwargs)
else:
instance._rename(full_name)
# Check if someone tries to add a particular standard group to a branch where
# it does not belong:
if type_name == RESULT_GROUP:
if type(instance) in (NNGroupNode,
ParameterGroup,
ConfigGroup,
DerivedParameterGroup):
raise TypeError('You cannot add a `%s` type of group under results' %
str(type(instance)))
elif type_name == PARAMETER_GROUP:
if type(instance) in (NNGroupNode,
ResultGroup,
ConfigGroup,
DerivedParameterGroup):
raise TypeError('You cannot add a `%s` type of group under parameters' %
str(type(instance)))
elif type_name == CONFIG_GROUP:
if type(instance) in (NNGroupNode,
ParameterGroup,
ResultGroup,
DerivedParameterGroup):
raise TypeError('You cannot add a `%s` type of group under config' %
str(type(instance)))
elif type_name == DERIVED_PARAMETER_GROUP:
if type(instance) in (NNGroupNode,
ParameterGroup,
ConfigGroup,
ResultGroup):
raise TypeError('You cannot add a `%s` type of group under derived '
'parameters' % str(type(instance)))
elif type_name == GROUP:
if type(instance) in (ResultGroup,
ParameterGroup,
ConfigGroup,
DerivedParameterGroup):
raise TypeError('You cannot add a `%s` type of group under other data' %
str(type(instance)))
else:
raise RuntimeError('You shall not pass!')
self._set_details_tree_node(parent_node, name, instance)
instance._nn_interface = self
self._root_instance._all_groups[instance.v_full_name] = instance
self._add_to_nodes_and_leaves(instance)
parent_node._children[name] = instance
parent_node._groups[name] = instance
return instance
|
Generically creates a new group inferring from the `type_name`.
|
def model_code_key_prefix(code_location_key_prefix, model_name, image):
"""Returns the s3 key prefix for uploading code during model deployment
The location returned is a potential concatenation of 2 parts
1. code_location_key_prefix if it exists
2. model_name or a name derived from the image
Args:
code_location_key_prefix (str): the s3 key prefix from code_location
model_name (str): the name of the model
image (str): the image from which a default name can be extracted
Returns:
str: the key prefix to be used in uploading code
"""
training_job_name = sagemaker.utils.name_from_image(image)
return '/'.join(filter(None, [code_location_key_prefix, model_name or training_job_name]))
|
Returns the s3 key prefix for uploading code during model deployment
The location returned is a potential concatenation of 2 parts
1. code_location_key_prefix if it exists
2. model_name or a name derived from the image
Args:
code_location_key_prefix (str): the s3 key prefix from code_location
model_name (str): the name of the model
image (str): the image from which a default name can be extracted
Returns:
str: the key prefix to be used in uploading code
|
def parse(readDataInstance):
"""Returns a L{DataDirectory}-like object.
@type readDataInstance: L{ReadData}
@param readDataInstance: L{ReadData} object to read from.
@rtype: L{DataDirectory}
@return: The L{DataDirectory} object containing L{consts.IMAGE_NUMBEROF_DIRECTORY_ENTRIES} L{Directory} objects.
@raise DirectoryEntriesLengthException: The L{ReadData} instance has an incorrect number of L{Directory} objects.
"""
if len(readDataInstance) == consts.IMAGE_NUMBEROF_DIRECTORY_ENTRIES * 8:
newDataDirectory = DataDirectory()
for i in range(consts.IMAGE_NUMBEROF_DIRECTORY_ENTRIES):
newDataDirectory[i].name.value = dirs[i]
newDataDirectory[i].rva.value = readDataInstance.readDword()
newDataDirectory[i].size.value = readDataInstance.readDword()
else:
raise excep.DirectoryEntriesLengthException("The IMAGE_NUMBEROF_DIRECTORY_ENTRIES does not match with the length of the passed argument.")
return newDataDirectory
|
Returns a L{DataDirectory}-like object.
@type readDataInstance: L{ReadData}
@param readDataInstance: L{ReadData} object to read from.
@rtype: L{DataDirectory}
@return: The L{DataDirectory} object containing L{consts.IMAGE_NUMBEROF_DIRECTORY_ENTRIES} L{Directory} objects.
@raise DirectoryEntriesLengthException: The L{ReadData} instance has an incorrect number of L{Directory} objects.
|
def submit_cookbook(self, cookbook, params={}, _extra_params={}):
"""
Submit a cookbook.
"""
self._check_user_parameters(params)
files = {'cookbook': cookbook}
return self._submit(params, files, _extra_params=_extra_params)
|
Submit a cookbook.
|
def get(self, request):
""" Returns a json representing the menu voices
in a format eaten by the js menu.
Raised ImproperlyConfigured exceptions can be viewed
in the browser console
"""
self.app_list = site.get_app_list(request)
self.apps_dict = self.create_app_list_dict()
# no menu provided
items = get_config('MENU')
if not items:
voices = self.get_default_voices()
else:
voices = []
for item in items:
self.add_voice(voices, item)
return JsonResponse(voices, safe=False)
|
Returns a json representing the menu voices
in a format eaten by the js menu.
Raised ImproperlyConfigured exceptions can be viewed
in the browser console
|
def convert_examples_to_features(examples, seq_length, tokenizer):
"""Loads a data file into a list of `InputFeature`s."""
features = []
for (ex_index, example) in enumerate(examples):
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
if tokens_b:
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > seq_length - 2:
tokens_a = tokens_a[0:(seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambigiously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = []
input_type_ids = []
tokens.append("[CLS]")
input_type_ids.append(0)
for token in tokens_a:
tokens.append(token)
input_type_ids.append(0)
tokens.append("[SEP]")
input_type_ids.append(0)
if tokens_b:
for token in tokens_b:
tokens.append(token)
input_type_ids.append(1)
tokens.append("[SEP]")
input_type_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < seq_length:
input_ids.append(0)
input_mask.append(0)
input_type_ids.append(0)
assert len(input_ids) == seq_length
assert len(input_mask) == seq_length
assert len(input_type_ids) == seq_length
if ex_index < 5:
logger.info("*** Example ***")
logger.info("unique_id: %s" % (example.unique_id))
logger.info("tokens: %s" % " ".join([str(x) for x in tokens]))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
logger.info(
"input_type_ids: %s" % " ".join([str(x) for x in input_type_ids]))
features.append(
InputFeatures(
unique_id=example.unique_id,
tokens=tokens,
input_ids=input_ids,
input_mask=input_mask,
input_type_ids=input_type_ids))
return features
|
Loads a data file into a list of `InputFeature`s.
|
def add_group(self, groupname, statements):
"""
Adds a group
@type groupname: bytes
@type statements: str
"""
msg = OmapiMessage.open(b"group")
msg.message.append(("create", struct.pack("!I", 1)))
msg.obj.append(("name", groupname))
msg.obj.append(("statements", statements))
response = self.query_server(msg)
if response.opcode != OMAPI_OP_UPDATE:
raise OmapiError("add group failed")
|
Adds a group
@type groupname: bytes
@type statements: str
|
def from_httplib(ResponseCls, r, **response_kw):
"""
Given an :class:`httplib.HTTPResponse` instance ``r``, return a
corresponding :class:`urllib3.response.HTTPResponse` object.
Remaining parameters are passed to the HTTPResponse constructor, along
with ``original_response=r``.
"""
headers = HTTPHeaderDict()
for k, v in r.getheaders():
headers.add(k, v)
# HTTPResponse objects in Python 3 don't have a .strict attribute
strict = getattr(r, 'strict', 0)
return ResponseCls(body=r,
headers=headers,
status=r.status,
version=r.version,
reason=r.reason,
strict=strict,
original_response=r,
**response_kw)
|
Given an :class:`httplib.HTTPResponse` instance ``r``, return a
corresponding :class:`urllib3.response.HTTPResponse` object.
Remaining parameters are passed to the HTTPResponse constructor, along
with ``original_response=r``.
|
def user_field(user, field, *args):
"""
Gets or sets (optional) user model fields. No-op if fields do not exist.
"""
if not field:
return
User = get_user_model()
try:
field_meta = User._meta.get_field(field)
max_length = field_meta.max_length
except FieldDoesNotExist:
if not hasattr(user, field):
return
max_length = None
if args:
# Setter
v = args[0]
if v:
v = v[0:max_length]
setattr(user, field, v)
else:
# Getter
return getattr(user, field)
|
Gets or sets (optional) user model fields. No-op if fields do not exist.
|
def set2d(self):
"""
Configures OpenGL to draw in 2D.
Note that wireframe mode is always disabled in 2D-Mode, but can be re-enabled by calling ``glPolygonMode(GL_FRONT_AND_BACK, GL_LINE)``\ .
"""
# Light
glDisable(GL_LIGHTING)
# To avoid accidental wireframe GUIs and fonts
glPolygonMode( GL_FRONT_AND_BACK, GL_FILL)
width, height = self.get_size()
glDisable(GL_DEPTH_TEST)
glViewport(0, 0, width, height)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glOrtho(0, width, 0, height, -1, 1)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
|
Configures OpenGL to draw in 2D.
Note that wireframe mode is always disabled in 2D-Mode, but can be re-enabled by calling ``glPolygonMode(GL_FRONT_AND_BACK, GL_LINE)``\ .
|
def __on_presence(self, data):
"""
Got a presence stanza
"""
room_jid = data['from'].bare
muc_presence = data['muc']
room = muc_presence['room']
nick = muc_presence['nick']
with self.__lock:
try:
# Get room state machine
room_data = self.__rooms[room]
if room_data.nick != nick:
# Not about the room creator
return
except KeyError:
# Unknown room (or not a room)
return
else:
# Clean up, as we got what we wanted
del self.__rooms[room]
if not self.__rooms:
# No more rooms: no need to listen to presence anymore
self.__xmpp.del_event_handler("presence", self.__on_presence)
if data['type'] == 'error':
# Got an error: update the state machine and clean up
self.__safe_errback(room_data, data['error']['condition'],
data['error']['text'])
elif muc_presence['affiliation'] != 'owner':
# We are not the owner the room: consider it an error
self.__safe_errback(room_data, 'not-owner',
'We are not the owner of the room')
else:
# Success: we own the room
# Setup room configuration
try:
config = self.__muc.getRoomConfig(room_jid)
except ValueError:
# Can't differentiate IQ errors from a "no configuration"
# result: consider it OK
self.__logger.warning("Can't get the configuration form for "
"XMPP room %s", room_jid)
self.__safe_callback(room_data)
else:
# Prepare our configuration
custom_values = room_data.configuration or {}
# Filter options that are not known from the server
known_fields = config['fields']
to_remove = [key for key in custom_values
if key not in known_fields]
for key in to_remove:
del custom_values[key]
# Send configuration (use a new form to avoid OpenFire to have
# an internal error)
form = self.__xmpp['xep_0004'].make_form("submit")
form['values'] = custom_values
self.__muc.setRoomConfig(room_jid, form)
# Call back the creator
self.__safe_callback(room_data)
|
Got a presence stanza
|
def pack_req(cls, code, pl_ratio_min,
pl_ratio_max, trd_env, acc_id, trd_mkt, conn_id):
"""Convert from user request for trading days to PLS request"""
from futuquant.common.pb.Trd_GetPositionList_pb2 import Request
req = Request()
req.c2s.header.trdEnv = TRD_ENV_MAP[trd_env]
req.c2s.header.accID = acc_id
req.c2s.header.trdMarket = TRD_MKT_MAP[trd_mkt]
if code:
req.c2s.filterConditions.codeList.append(code)
if pl_ratio_min is not None:
req.c2s.filterPLRatioMin = float(pl_ratio_min) / 100.0
if pl_ratio_max is not None:
req.c2s.filterPLRatioMax = float(pl_ratio_max) / 100.0
return pack_pb_req(req, ProtoId.Trd_GetPositionList, conn_id)
|
Convert from user request for trading days to PLS request
|
def log_normalization(self, name="log_normalization"):
"""Computes the log normalizing constant, log(Z)."""
with self._name_scope(name):
return (self.df * self.scale_operator.log_abs_determinant() +
0.5 * self.df * self.dimension * math.log(2.) +
self._multi_lgamma(0.5 * self.df, self.dimension))
|
Computes the log normalizing constant, log(Z).
|
def _index(self, model):
'''
Elasticsearch multi types has been removed
Use multi index unless set __msearch_index__.
'''
doc_type = model
if not isinstance(model, str):
doc_type = model.__table__.name
index_name = doc_type
if hasattr(model, "__msearch_index__"):
index_name = model.__msearch_index__
if doc_type not in self._indexs:
self._indexs[doc_type] = Index(self._client, index_name, doc_type)
return self._indexs[doc_type]
|
Elasticsearch multi types has been removed
Use multi index unless set __msearch_index__.
|
def _begin_request(self):
"""
Actually start executing this request.
"""
headers = self.m2req.headers
self._request = HTTPRequest(connection=self,
method=headers.get("METHOD"),
uri=self.m2req.path,
version=headers.get("VERSION"),
headers=headers,
remote_ip=headers.get("x-forwarded-for"))
if len(self.m2req.body) > 0:
self._request.body = self.m2req.body
if self.m2req.is_disconnect():
self.finish()
elif headers.get("x-mongrel2-upload-done", None):
# there has been a file upload.
expected = headers.get("x-mongrel2-upload-start", "BAD")
upload = headers.get("x-mongrel2-upload-done", None)
if expected == upload:
self.request_callback(self._request)
elif headers.get("x-mongrel2-upload-start", None):
# this is just a notification that a file upload has started. Do
# nothing for now!
pass
else:
self.request_callback(self._request)
|
Actually start executing this request.
|
def alpha3(self, code):
"""
Return the ISO 3166-1 three letter country code matching the provided
country code.
If no match is found, returns an empty string.
"""
code = self.alpha2(code)
try:
return self.alt_codes[code][0]
except KeyError:
return ""
|
Return the ISO 3166-1 three letter country code matching the provided
country code.
If no match is found, returns an empty string.
|
def main():
"""
Creates the following structure:
/plugins
__init__.py
hello.py
/templates
blank.html
.gitignore
run_will.py
requirements.txt
Procfile
README.md
"""
print_head()
puts("Welcome to the will project generator.")
puts("")
if args.config_dist_only:
print("Generating config.py.dist...")
else:
print("\nGenerating will scaffold...")
current_dir = os.getcwd()
plugins_dir = os.path.join(current_dir, "plugins")
templates_dir = os.path.join(current_dir, "templates")
if not args.config_dist_only:
print(" /plugins")
# Set up the directories
if not os.path.exists(plugins_dir):
os.makedirs(plugins_dir)
print(" __init__.py")
# Create the plugins __init__.py
with open(os.path.join(plugins_dir, "__init__.py"), 'w+') as f:
pass
print(" morning.py")
# Create the morning plugin
morning_file_path = os.path.join(plugins_dir, "morning.py")
if not os.path.exists(morning_file_path):
with open(morning_file_path, 'w+') as f:
f.write("""from will.plugin import WillPlugin
from will.decorators import respond_to, periodic, hear, randomly, route, rendered_template, require_settings
class MorningPlugin(WillPlugin):
@respond_to("^good morning")
def good_morning(self, message):
self.reply("oh, g'morning!")
""")
print(" /templates")
if not os.path.exists(templates_dir):
os.makedirs(templates_dir)
print(" blank.html")
# Create the plugins __init__.py
with open(os.path.join(templates_dir, "blank.html"), 'w+') as f:
pass
print(" .gitignore")
# Create .gitignore, or at least add shelf.db
gitignore_path = os.path.join(current_dir, ".gitignore")
if not os.path.exists(gitignore_path):
with open(gitignore_path, 'w+') as f:
f.write("""*.py[cod]
pip-log.txt
shelf.db
""")
else:
append_ignore = False
with open(gitignore_path, "r+") as f:
if "shelf.db" not in f.read():
append_ignore = True
if append_ignore:
with open(gitignore_path, "a") as f:
f.write("\nshelf.db\n")
# Create run_will.py
print(" run_will.py")
run_will_path = os.path.join(current_dir, "run_will.py")
if not os.path.exists(run_will_path):
with open(run_will_path, 'w+') as f:
f.write("""#!/usr/bin/env python
from will.main import WillBot
if __name__ == '__main__':
bot = WillBot()
bot.bootstrap()
""")
# And make it executable
st = os.stat('run_will.py')
os.chmod("run_will.py", st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
# Create config.py
print(" config.py.dist")
config_path = os.path.join(current_dir, "config.py.dist")
if not os.path.exists(config_path) or ask_user("! config.py.dist exists. Overwrite it?"):
with open(os.path.join(PROJECT_ROOT, "config.py.dist"), "r") as source_f:
source = source_f.read()
if args.backends:
for backend in SERVICE_BACKENDS:
if backend in args.backends:
_enable_service(backend, source)
else:
__disable_service(backend, source)
else:
# Ask user thru cmd line what backends to enable
print("\nWill supports a few different service backends. Let's set up the ones you want:\n")
source = enable_disable_service("Slack", source)
source = enable_disable_service("HipChat", source)
source = enable_disable_service("Rocket.Chat", source)
source = enable_disable_service("Shell", source)
with open(config_path, "w+") as f:
config = source
f.write(config)
if not args.config_dist_only:
print(" requirements.txt")
# Create requirements.txt
requirements_path = os.path.join(current_dir, "requirements.txt")
if not os.path.exists(requirements_path) or ask_user("! requirements.txt exists. Overwrite it?"):
with open(requirements_path, 'w+') as f:
f.write(requirements_txt)
print(" Procfile")
# Create Procfile
requirements_path = os.path.join(current_dir, "Procfile")
if not os.path.exists(requirements_path):
with open(requirements_path, 'w+') as f:
f.write("web: python run_will.py")
print(" README.md")
# Create the readme
readme_path = os.path.join(current_dir, "README.md")
if not os.path.exists(readme_path):
with open(readme_path, 'w+') as f:
f.write("""
This is our bot, a [will](https://github.com/skoczen/will) bot.
""")
print("\nDone.")
print("\n Your will is now ready to go. Run ./run_will.py to get started!")
else:
print("\nCreated a config.py.dist. Open it up to see what's new!\n")
|
Creates the following structure:
/plugins
__init__.py
hello.py
/templates
blank.html
.gitignore
run_will.py
requirements.txt
Procfile
README.md
|
def push(self, instance, action, success, idxs=_marker):
"""Adds an instance into the pool, to be reindexed on resume
"""
uid = api.get_uid(instance)
info = self.objects.get(uid, {})
idx = [] if idxs is _marker else idxs
info[action] = {'success': success, 'idxs': idx}
self.objects[uid] = info
|
Adds an instance into the pool, to be reindexed on resume
|
def extract_jwt_token(self, token):
"""
Extracts a data dictionary from a jwt token
"""
# Note: we disable exp verification because we will do it ourselves
with InvalidTokenHeader.handle_errors('failed to decode JWT token'):
data = jwt.decode(
token,
self.encode_key,
algorithms=self.allowed_algorithms,
options={'verify_exp': False},
)
self._validate_jwt_data(data, access_type=AccessType.access)
return data
|
Extracts a data dictionary from a jwt token
|
def modify_virtual(hostname, username, password, name, destination,
pool=None,
address_status=None,
auto_lasthop=None,
bwc_policy=None,
cmp_enabled=None,
connection_limit=None,
dhcp_relay=None,
description=None,
fallback_persistence=None,
flow_eviction_policy=None,
gtm_score=None,
ip_forward=None,
ip_protocol=None,
internal=None,
twelve_forward=None,
last_hop_pool=None,
mask=None,
mirror=None,
nat64=None,
persist=None,
profiles=None,
policies=None,
rate_class=None,
rate_limit=None,
rate_limit_mode=None,
rate_limit_dst=None,
rate_limit_src=None,
rules=None,
related_rules=None,
reject=None,
source=None,
source_address_translation=None,
source_port=None,
virtual_state=None,
traffic_classes=None,
translate_address=None,
translate_port=None,
vlans=None):
'''
Modify an virtual server. modify an existing virtual. Only parameters specified will be enforced.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
name
The name of the virtual to create
destination
[ [virtual_address_name:port] | [ipv4:port] | [ipv6.port] ]
pool
[ [pool_name] | none]
address_status
[yes | no]
auto_lasthop
[default | enabled | disabled ]
bwc_policy
[none] | string]
cmp_enabled
[yes | no]
dhcp_relay
[yes | no}
connection_limit
[integer]
description
[string]
state
[disabled | enabled]
fallback_persistence
[none | [profile name] ]
flow_eviction_policy
[none | [eviction policy name] ]
gtm_score
[integer]
ip_forward
[yes | no]
ip_protocol
[any | protocol]
internal
[yes | no]
twelve_forward(12-forward)
[yes | no]
last_hop-pool
[ [pool_name] | none]
mask
{ [ipv4] | [ipv6] }
mirror
{ [disabled | enabled | none] }
nat64
[enabled | disabled]
persist
[list]
profiles
[none | default | list ]
policies
[none | default | list ]
rate_class
[name]
rate_limit
[integer]
rate_limit-mode
[destination | object | object-destination |
object-source | object-source-destination |
source | source-destination]
rate_limit_dst
[integer]
rate_limit_src
[integer]
rules
[none | list ]
related_rules
[none | list ]
reject
[yes | no]
source
{ [ipv4[/prefixlen]] | [ipv6[/prefixlen]] }
source_address_translation
[none | snat:pool_name | lsn | automap | dictionary ]
source_port
[change | preserve | preserve-strict]
state
[enabled | disabled]
traffic_classes
[none | default | list ]
translate_address
[enabled | disabled]
translate_port
[enabled | disabled]
vlans
[none | default | dictionary ]
vlan_ids
[ list]
enabled
[ true | false ]
'''
ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''}
if __opts__['test']:
return _test_output(ret, 'modify', params={
'hostname': hostname,
'username': username,
'password': password,
'name': name,
'destination': destination,
'pool': pool,
'address_status': address_status,
'auto_lasthop': auto_lasthop,
'bwc_policy': bwc_policy,
'cmp_enabled': cmp_enabled,
'connection_limit': connection_limit,
'dhcp_relay': dhcp_relay,
'description': description,
'fallback_persistence': fallback_persistence,
'flow_eviction_policy': flow_eviction_policy,
'gtm_score': gtm_score,
'ip_forward': ip_forward,
'ip_protocol': ip_protocol,
'internal': internal,
'twelve_forward': twelve_forward,
'last_hop_pool': last_hop_pool,
'mask': mask,
'mirror': mirror,
'nat64': nat64,
'persist': persist,
'profiles': profiles,
'policies': policies,
'rate_class': rate_class,
'rate_limit': rate_limit,
'rate_limit_mode': rate_limit_mode,
'rate_limit_dst': rate_limit_dst,
'rate_limit_src': rate_limit_src,
'rules': rules,
'related_rules': related_rules,
'reject': reject,
'source': source,
'source_address_translation': source_address_translation,
'source_port': source_port,
'virtual_state': virtual_state,
'traffic_classes': traffic_classes,
'translate_address': translate_address,
'translate_port': translate_port,
'vlans': vlans
}
)
existing = __salt__['bigip.list_virtual'](hostname, username, password, name)
# does this virtual exist?
if existing['code'] == 200:
# modify
modified = __salt__['bigip.modify_virtual'](hostname=hostname,
username=username,
password=password,
name=name,
destination=destination,
description=description,
pool=pool,
address_status=address_status,
auto_lasthop=auto_lasthop,
bwc_policy=bwc_policy,
cmp_enabled=cmp_enabled,
connection_limit=connection_limit,
dhcp_relay=dhcp_relay,
fallback_persistence=fallback_persistence,
flow_eviction_policy=flow_eviction_policy,
gtm_score=gtm_score,
ip_forward=ip_forward,
ip_protocol=ip_protocol,
internal=internal,
twelve_forward=twelve_forward,
last_hop_pool=last_hop_pool,
mask=mask,
mirror=mirror,
nat64=nat64,
persist=persist,
profiles=profiles,
policies=policies,
rate_class=rate_class,
rate_limit=rate_limit,
rate_limit_mode=rate_limit_mode,
rate_limit_dst=rate_limit_dst,
rate_limit_src=rate_limit_src,
rules=rules,
related_rules=related_rules,
reject=reject,
source=source,
source_address_translation=source_address_translation,
source_port=source_port,
state=virtual_state,
traffic_classes=traffic_classes,
translate_address=translate_address,
translate_port=translate_port,
vlans=vlans)
#was the modification successful?
if modified['code'] == 200:
#relist it to compare
relisting = __salt__['bigip.list_virtual'](hostname, username, password, name)
if relisting['code'] == 200:
relisting = _strip_key(relisting, 'generation')
existing = _strip_key(existing, 'generation')
ret = _check_for_changes('Virtual', ret, existing, relisting)
else:
ret = _load_result(relisting, ret)
else:
ret = _load_result(modified, ret)
elif existing['code'] == 404:
ret['comment'] = 'A Virtual with this name was not found.'
# else something else was returned
else:
ret = _load_result(existing, ret)
return ret
|
Modify an virtual server. modify an existing virtual. Only parameters specified will be enforced.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
name
The name of the virtual to create
destination
[ [virtual_address_name:port] | [ipv4:port] | [ipv6.port] ]
pool
[ [pool_name] | none]
address_status
[yes | no]
auto_lasthop
[default | enabled | disabled ]
bwc_policy
[none] | string]
cmp_enabled
[yes | no]
dhcp_relay
[yes | no}
connection_limit
[integer]
description
[string]
state
[disabled | enabled]
fallback_persistence
[none | [profile name] ]
flow_eviction_policy
[none | [eviction policy name] ]
gtm_score
[integer]
ip_forward
[yes | no]
ip_protocol
[any | protocol]
internal
[yes | no]
twelve_forward(12-forward)
[yes | no]
last_hop-pool
[ [pool_name] | none]
mask
{ [ipv4] | [ipv6] }
mirror
{ [disabled | enabled | none] }
nat64
[enabled | disabled]
persist
[list]
profiles
[none | default | list ]
policies
[none | default | list ]
rate_class
[name]
rate_limit
[integer]
rate_limit-mode
[destination | object | object-destination |
object-source | object-source-destination |
source | source-destination]
rate_limit_dst
[integer]
rate_limit_src
[integer]
rules
[none | list ]
related_rules
[none | list ]
reject
[yes | no]
source
{ [ipv4[/prefixlen]] | [ipv6[/prefixlen]] }
source_address_translation
[none | snat:pool_name | lsn | automap | dictionary ]
source_port
[change | preserve | preserve-strict]
state
[enabled | disabled]
traffic_classes
[none | default | list ]
translate_address
[enabled | disabled]
translate_port
[enabled | disabled]
vlans
[none | default | dictionary ]
vlan_ids
[ list]
enabled
[ true | false ]
|
def add_hyperedge(self, tail, head, attr_dict=None, **attr):
"""Adds a hyperedge to the hypergraph, along with any related
attributes of the hyperedge.
This method will automatically add any node from the tail and
head that was not in the hypergraph.
A hyperedge without a "weight" attribute specified will be
assigned the default value of 1.
:param tail: iterable container of references to nodes in the
tail of the hyperedge to be added.
:param head: iterable container of references to nodes in the
head of the hyperedge to be added.
:param attr_dict: dictionary of attributes shared by all
the hyperedges.
:param attr: keyword arguments of attributes of the hyperedge;
attr's values will override attr_dict's values
if both are provided.
:returns: str -- the ID of the hyperedge that was added.
:raises: ValueError -- tail and head arguments cannot both be empty.
Examples:
::
>>> H = DirectedHypergraph()
>>> x = H.add_hyperedge(["A", "B"], ["C", "D"])
>>> y = H.add_hyperedge(("A", "C"), ("B"), 'weight'=2)
>>> z = H.add_hyperedge(set(["D"]),
set(["A", "C"]),
{color: "red"})
"""
attr_dict = self._combine_attribute_arguments(attr_dict, attr)
# Don't allow both empty tail and head containers (invalid hyperedge)
if not tail and not head:
raise ValueError("tail and head arguments \
cannot both be empty.")
# Use frozensets for tail and head sets to allow for hashable keys
frozen_tail = frozenset(tail)
frozen_head = frozenset(head)
# Initialize a successor dictionary for the tail and head, respectively
if frozen_tail not in self._successors:
self._successors[frozen_tail] = {}
if frozen_head not in self._predecessors:
self._predecessors[frozen_head] = {}
is_new_hyperedge = not self.has_hyperedge(frozen_tail, frozen_head)
if is_new_hyperedge:
# Add tail and head nodes to graph (if not already present)
self.add_nodes(frozen_head)
self.add_nodes(frozen_tail)
# Create new hyperedge name to use as reference for that hyperedge
hyperedge_id = self._assign_next_hyperedge_id()
# Add hyperedge to the forward-star and to the backward-star
# for each node in the tail and head sets, respectively
for node in frozen_tail:
self._forward_star[node].add(hyperedge_id)
for node in frozen_head:
self._backward_star[node].add(hyperedge_id)
# Add the hyperedge as the successors and predecessors
# of the tail set and head set, respectively
self._successors[frozen_tail][frozen_head] = hyperedge_id
self._predecessors[frozen_head][frozen_tail] = hyperedge_id
# Assign some special attributes to this hyperedge. We assign
# a default weight of 1 to the hyperedge. We also store the
# original tail and head sets in order to return them exactly
# as the user passed them into add_hyperedge.
self._hyperedge_attributes[hyperedge_id] = \
{"tail": tail, "__frozen_tail": frozen_tail,
"head": head, "__frozen_head": frozen_head,
"weight": 1}
else:
# If its not a new hyperedge, just get its ID to update attributes
hyperedge_id = self._successors[frozen_tail][frozen_head]
# Set attributes and return hyperedge ID
self._hyperedge_attributes[hyperedge_id].update(attr_dict)
return hyperedge_id
|
Adds a hyperedge to the hypergraph, along with any related
attributes of the hyperedge.
This method will automatically add any node from the tail and
head that was not in the hypergraph.
A hyperedge without a "weight" attribute specified will be
assigned the default value of 1.
:param tail: iterable container of references to nodes in the
tail of the hyperedge to be added.
:param head: iterable container of references to nodes in the
head of the hyperedge to be added.
:param attr_dict: dictionary of attributes shared by all
the hyperedges.
:param attr: keyword arguments of attributes of the hyperedge;
attr's values will override attr_dict's values
if both are provided.
:returns: str -- the ID of the hyperedge that was added.
:raises: ValueError -- tail and head arguments cannot both be empty.
Examples:
::
>>> H = DirectedHypergraph()
>>> x = H.add_hyperedge(["A", "B"], ["C", "D"])
>>> y = H.add_hyperedge(("A", "C"), ("B"), 'weight'=2)
>>> z = H.add_hyperedge(set(["D"]),
set(["A", "C"]),
{color: "red"})
|
def lstm_cell(inputs,
state,
num_units,
use_peepholes=False,
cell_clip=0.0,
initializer=None,
num_proj=None,
num_unit_shards=None,
num_proj_shards=None,
reuse=None,
name=None):
"""Full LSTM cell."""
input_shape = common_layers.shape_list(inputs)
cell = tf.nn.rnn_cell.LSTMCell(num_units,
use_peepholes=use_peepholes,
cell_clip=cell_clip,
initializer=initializer,
num_proj=num_proj,
num_unit_shards=num_unit_shards,
num_proj_shards=num_proj_shards,
reuse=reuse,
name=name,
state_is_tuple=False)
if state is None:
state = cell.zero_state(input_shape[0], tf.float32)
outputs, new_state = cell(inputs, state)
return outputs, new_state
|
Full LSTM cell.
|
def setAnimated(self, state):
"""
Sets whether or not the popup widget should animate its opacity
when it is shown.
:param state | <bool>
"""
self._animated = state
self.setAttribute(Qt.WA_TranslucentBackground, state)
|
Sets whether or not the popup widget should animate its opacity
when it is shown.
:param state | <bool>
|
def run(self, loopinfo=None, batch_size=1):
""" Run consumer
"""
logger.info("{}.Starting...".format(self.__class__.__name__))
if loopinfo:
while True:
for topic in self.topics:
self.call_kafka(topic, batch_size)
time.sleep(loopinfo.sleep)
else:
for topic in self.topics:
self.call_kafka(topic, batch_size)
|
Run consumer
|
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, of length n_samples
Training data, includes reaction's containers
y : array-like, of length n_samples
The target variable for supervised learning problems.
groups : array-like, with shape (n_samples,)
Group labels for the samples used while splitting the dataset into
train/test set.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, groups = indexable(X, y, groups)
cgrs = [~r for r in X]
condition_structure = defaultdict(set)
for structure, condition in zip(cgrs, groups):
condition_structure[condition].add(structure)
train_data = defaultdict(list)
test_data = []
for n, (structure, condition) in enumerate(zip(cgrs, groups)):
train_data[structure].append(n)
if len(condition_structure[condition]) > 1:
test_data.append(n)
if self.n_splits > len(train_data):
raise ValueError("Cannot have number of splits n_splits=%d greater"
" than the number of transformations: %d."
% (self.n_splits, len(train_data)))
structures_weight = sorted(((x, len(y)) for x, y in train_data.items()), key=lambda x: x[1], reverse=True)
fold_mean_size = len(cgrs) // self.n_splits
if structures_weight[0][1] > fold_mean_size:
warning('You have transformation that greater fold size')
for idx in range(self.n_repeats):
train_folds = [[] for _ in range(self.n_splits)]
for structure, structure_length in structures_weight:
if self.shuffle:
check_random_state(self.random_state).shuffle(train_folds)
for fold in train_folds[:-1]:
if len(fold) + structure_length <= fold_mean_size:
fold.extend(train_data[structure])
break
else:
roulette_param = (structure_length - fold_mean_size + len(fold)) / structure_length
if random() > roulette_param:
fold.extend(train_data[structure])
break
else:
train_folds[-1].extend(train_data[structure])
test_folds = [[] for _ in range(self.n_splits)]
for test, train in zip(test_folds, train_folds):
for index in train:
if index in test_data:
test.append(index)
for i in range(self.n_splits):
train_index = []
for fold in train_folds[:i]:
train_index.extend(fold)
for fold in train_folds[i+1:]:
train_index.extend(fold)
test_index = test_folds[i]
yield array(train_index), array(test_index)
|
Generate indices to split data into training and test set.
Parameters
----------
X : array-like, of length n_samples
Training data, includes reaction's containers
y : array-like, of length n_samples
The target variable for supervised learning problems.
groups : array-like, with shape (n_samples,)
Group labels for the samples used while splitting the dataset into
train/test set.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
|
def fit_overlays(self, text, start=None, end=None, **kw):
"""
Get an overlay thet fits the range [start, end).
"""
for ovl in text.overlays:
if ovl.match(props=self.props_match, rng=(start, end)):
yield ovl
|
Get an overlay thet fits the range [start, end).
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.