text stringlengths 78 104k | score float64 0 0.18 |
|---|---|
def havespace(self, scriptname, scriptsize):
"""Ask for available space.
See MANAGESIEVE specifications, section 2.5
:param scriptname: script's name
:param scriptsize: script's size
:rtype: boolean
"""
code, data = self.__send_command(
"HAVESPACE", [scriptname.encode("utf-8"), scriptsize])
if code == "OK":
return True
return False | 0.00464 |
def report(self, morfs, outfile=None):
"""Generate a Cobertura-compatible XML report for `morfs`.
`morfs` is a list of modules or filenames.
`outfile` is a file object to write the XML to.
"""
# Initial setup.
outfile = outfile or sys.stdout
# Create the DOM that will store the data.
impl = xml.dom.minidom.getDOMImplementation()
docType = impl.createDocumentType(
"coverage", None,
"http://cobertura.sourceforge.net/xml/coverage-03.dtd"
)
self.xml_out = impl.createDocument(None, "coverage", docType)
# Write header stuff.
xcoverage = self.xml_out.documentElement
xcoverage.setAttribute("version", __version__)
xcoverage.setAttribute("timestamp", str(int(time.time()*1000)))
xcoverage.appendChild(self.xml_out.createComment(
" Generated by coverage.py: %s " % __url__
))
xpackages = self.xml_out.createElement("packages")
xcoverage.appendChild(xpackages)
# Call xml_file for each file in the data.
self.packages = {}
self.report_files(self.xml_file, morfs)
lnum_tot, lhits_tot = 0, 0
bnum_tot, bhits_tot = 0, 0
# Populate the XML DOM with the package info.
for pkg_name in sorted(self.packages.keys()):
pkg_data = self.packages[pkg_name]
class_elts, lhits, lnum, bhits, bnum = pkg_data
xpackage = self.xml_out.createElement("package")
xpackages.appendChild(xpackage)
xclasses = self.xml_out.createElement("classes")
xpackage.appendChild(xclasses)
for class_name in sorted(class_elts.keys()):
xclasses.appendChild(class_elts[class_name])
xpackage.setAttribute("name", pkg_name.replace(os.sep, '.'))
xpackage.setAttribute("line-rate", rate(lhits, lnum))
xpackage.setAttribute("branch-rate", rate(bhits, bnum))
xpackage.setAttribute("complexity", "0")
lnum_tot += lnum
lhits_tot += lhits
bnum_tot += bnum
bhits_tot += bhits
xcoverage.setAttribute("line-rate", rate(lhits_tot, lnum_tot))
xcoverage.setAttribute("branch-rate", rate(bhits_tot, bnum_tot))
# Use the DOM to write the output file.
outfile.write(self.xml_out.toprettyxml())
# Return the total percentage.
denom = lnum_tot + bnum_tot
if denom == 0:
pct = 0.0
else:
pct = 100.0 * (lhits_tot + bhits_tot) / denom
return pct | 0.000759 |
def _get(self, url, params=None):
"""Used by every other method, it makes a GET request with the given params.
Args:
url (str): relative path of a specific service (account_info, ...).
params (:obj:`dict`, optional): contains parameters to be sent in the GET request.
Returns:
dict: results of the response of the GET request.
"""
if not params:
params = {}
params.update({'login': self.login, 'key': self.key})
response_json = requests.get(self.api_url + url, params).json()
return self._process_response(response_json) | 0.00627 |
def get_item_hrefs(result_collection):
"""
Given a result_collection (returned by a previous API call that
returns a collection, like get_bundle_list() or search()), return a
list of item hrefs.
'result_collection' a JSON object returned by a previous API
call.
Returns a list, which may be empty if no items were found.
"""
# Argument error checking.
assert result_collection is not None
result = []
links = result_collection.get('_links')
if links is not None:
items = links.get('items')
if items is not None:
for item in items:
result.append(item.get('href'))
return result | 0.001466 |
def _GetCompressedStreamTypes(self, mediator, path_spec):
"""Determines if a data stream contains a compressed stream such as: gzip.
Args:
mediator (ParserMediator): mediates the interactions between
parsers and other components, such as storage and abort signals.
path_spec (dfvfs.PathSpec): path specification of the data stream.
Returns:
list[str]: dfVFS compressed stream type indicators found in
the data stream.
"""
try:
type_indicators = analyzer.Analyzer.GetCompressedStreamTypeIndicators(
path_spec, resolver_context=mediator.resolver_context)
except IOError as exception:
type_indicators = []
warning_message = (
'analyzer failed to determine compressed stream type indicators '
'with error: {0!s}').format(exception)
mediator.ProduceExtractionWarning(warning_message, path_spec=path_spec)
return type_indicators | 0.005285 |
def parse_sidebar(self, media_page):
"""Parses the DOM and returns media attributes in the sidebar.
:type media_page: :class:`bs4.BeautifulSoup`
:param media_page: MAL media page's DOM
:rtype: dict
:return: media attributes.
:raises: InvalidMediaError, MalformedMediaPageError
"""
media_info = {}
# if MAL says the series doesn't exist, raise an InvalidMediaError.
error_tag = media_page.find(u'div', {'class': 'badresult'})
if error_tag:
raise InvalidMediaError(self.id)
try:
title_tag = media_page.find(u'div', {'id': 'contentWrapper'}).find(u'h1')
if not title_tag.find(u'div'):
# otherwise, raise a MalformedMediaPageError.
raise MalformedMediaPageError(self.id, media_page, message="Could not find title div")
except:
if not self.session.suppress_parse_exceptions:
raise
try:
utilities.extract_tags(title_tag.find_all())
media_info[u'title'] = title_tag.text.strip()
except:
if not self.session.suppress_parse_exceptions:
raise
info_panel_first = media_page.find(u'div', {'id': 'content'}).find(u'table').find(u'td')
try:
picture_tag = info_panel_first.find(u'img')
media_info[u'picture'] = picture_tag.get(u'src').decode('utf-8')
except:
if not self.session.suppress_parse_exceptions:
raise
try:
# assemble alternative titles for this series.
media_info[u'alternative_titles'] = {}
alt_titles_header = info_panel_first.find(u'h2', text=u'Alternative Titles')
if alt_titles_header:
next_tag = alt_titles_header.find_next_sibling(u'div', {'class': 'spaceit_pad'})
while True:
if next_tag is None or not next_tag.find(u'span', {'class': 'dark_text'}):
# not a language node, break.
break
# get language and remove the node.
language = next_tag.find(u'span').text[:-1]
utilities.extract_tags(next_tag.find_all(u'span', {'class': 'dark_text'}))
names = next_tag.text.strip().split(u', ')
media_info[u'alternative_titles'][language] = names
next_tag = next_tag.find_next_sibling(u'div', {'class': 'spaceit_pad'})
except:
if not self.session.suppress_parse_exceptions:
raise
try:
type_tag = info_panel_first.find(text=u'Type:').parent.parent
utilities.extract_tags(type_tag.find_all(u'span', {'class': 'dark_text'}))
media_info[u'type'] = type_tag.text.strip()
except:
if not self.session.suppress_parse_exceptions:
raise
try:
status_tag = info_panel_first.find(text=u'Status:').parent.parent
utilities.extract_tags(status_tag.find_all(u'span', {'class': 'dark_text'}))
media_info[u'status'] = status_tag.text.strip()
except:
if not self.session.suppress_parse_exceptions:
raise
try:
genres_tag = info_panel_first.find(text=u'Genres:').parent.parent
utilities.extract_tags(genres_tag.find_all(u'span', {'class': 'dark_text'}))
media_info[u'genres'] = []
for genre_link in genres_tag.find_all('a'):
link_parts = genre_link.get('href').split('[]=')
# of the form /anime|manga.php?genre[]=1
genre = self.session.genre(int(link_parts[1])).set({'name': genre_link.text})
media_info[u'genres'].append(genre)
except:
if not self.session.suppress_parse_exceptions:
raise
try:
# grab statistics for this media.
score_tag = info_panel_first.find(text=u'Score:').parent.parent
# get score and number of users.
users_node = [x for x in score_tag.find_all(u'small') if u'scored by' in x.text][0]
num_users = int(users_node.text.split(u'scored by ')[-1].split(u' users')[0])
utilities.extract_tags(score_tag.find_all())
media_info[u'score'] = (decimal.Decimal(score_tag.text.strip()), num_users)
except:
if not self.session.suppress_parse_exceptions:
raise
try:
rank_tag = info_panel_first.find(text=u'Ranked:').parent.parent
utilities.extract_tags(rank_tag.find_all())
media_info[u'rank'] = int(rank_tag.text.strip()[1:].replace(u',', ''))
except:
if not self.session.suppress_parse_exceptions:
raise
try:
popularity_tag = info_panel_first.find(text=u'Popularity:').parent.parent
utilities.extract_tags(popularity_tag.find_all())
media_info[u'popularity'] = int(popularity_tag.text.strip()[1:].replace(u',', ''))
except:
if not self.session.suppress_parse_exceptions:
raise
try:
members_tag = info_panel_first.find(text=u'Members:').parent.parent
utilities.extract_tags(members_tag.find_all())
media_info[u'members'] = int(members_tag.text.strip().replace(u',', ''))
except:
if not self.session.suppress_parse_exceptions:
raise
try:
favorites_tag = info_panel_first.find(text=u'Favorites:').parent.parent
utilities.extract_tags(favorites_tag.find_all())
media_info[u'favorites'] = int(favorites_tag.text.strip().replace(u',', ''))
except:
if not self.session.suppress_parse_exceptions:
raise
try:
# get popular tags.
tags_header = media_page.find(u'h2', text=u'Popular Tags')
tags_tag = tags_header.find_next_sibling(u'span')
media_info[u'popular_tags'] = {}
for tag_link in tags_tag.find_all('a'):
tag = self.session.tag(tag_link.text)
num_people = int(re.match(r'(?P<people>[0-9]+) people', tag_link.get('title')).group('people'))
media_info[u'popular_tags'][tag] = num_people
except:
if not self.session.suppress_parse_exceptions:
raise
return media_info | 0.016507 |
def set_mag_offsets_send(self, target_system, target_component, mag_ofs_x, mag_ofs_y, mag_ofs_z, force_mavlink1=False):
'''
Deprecated. Use MAV_CMD_PREFLIGHT_SET_SENSOR_OFFSETS instead. Set the
magnetometer offsets
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
mag_ofs_x : magnetometer X offset (int16_t)
mag_ofs_y : magnetometer Y offset (int16_t)
mag_ofs_z : magnetometer Z offset (int16_t)
'''
return self.send(self.set_mag_offsets_encode(target_system, target_component, mag_ofs_x, mag_ofs_y, mag_ofs_z), force_mavlink1=force_mavlink1) | 0.006234 |
def index_exists(self):
"""Check to see if index exists."""
headers = {'Content-Type': 'application/json', 'DB-Method': 'GET'}
url = '/v2/exchange/db/{}/{}/_search'.format(self.domain, self.data_type)
r = self.tcex.session.post(url, headers=headers)
if not r.ok:
self.tcex.log.warning('The provided index was not found ({}).'.format(r.text))
return False
return True | 0.009132 |
def make_slash_number(self):
"""
Charset lines have \2 or \3 depending on type of partitioning and codon
positions requested for our dataset.
:return:
"""
if self.partitioning == 'by codon position' and self.codon_positions == '1st-2nd':
return '\\2'
elif self.partitioning in ['by codon position', '1st-2nd, 3rd'] and self.codon_positions in ['ALL', None]:
return '\\3'
else:
return '' | 0.008214 |
def download(self,
files=None,
formats=None,
glob_pattern=None,
dry_run=None,
verbose=None,
silent=None,
ignore_existing=None,
checksum=None,
destdir=None,
no_directory=None,
retries=None,
item_index=None,
ignore_errors=None,
on_the_fly=None,
return_responses=None,
no_change_timestamp=None,
params=None):
"""Download files from an item.
:param files: (optional) Only download files matching given file names.
:type formats: str
:param formats: (optional) Only download files matching the given
Formats.
:type glob_pattern: str
:param glob_pattern: (optional) Only download files matching the given
glob pattern.
:type dry_run: bool
:param dry_run: (optional) Output download URLs to stdout, don't
download anything.
:type verbose: bool
:param verbose: (optional) Turn on verbose output.
:type silent: bool
:param silent: (optional) Suppress all output.
:type ignore_existing: bool
:param ignore_existing: (optional) Skip files that already exist
locally.
:type checksum: bool
:param checksum: (optional) Skip downloading file based on checksum.
:type destdir: str
:param destdir: (optional) The directory to download files to.
:type no_directory: bool
:param no_directory: (optional) Download files to current working
directory rather than creating an item directory.
:type retries: int
:param retries: (optional) The number of times to retry on failed
requests.
:type item_index: int
:param item_index: (optional) The index of the item for displaying
progress in bulk downloads.
:type ignore_errors: bool
:param ignore_errors: (optional) Don't fail if a single file fails to
download, continue to download other files.
:type on_the_fly: bool
:param on_the_fly: (optional) Download on-the-fly files (i.e. derivative EPUB,
MOBI, DAISY files).
:type return_responses: bool
:param return_responses: (optional) Rather than downloading files to disk, return
a list of response objects.
:type no_change_timestamp: bool
:param no_change_timestamp: (optional) If True, leave the time stamp as the
current time instead of changing it to that given in
the original archive.
:type params: dict
:param params: (optional) URL parameters to send with
download request (e.g. `cnt=0`).
:rtype: bool
:returns: True if if all files have been downloaded successfully.
"""
dry_run = False if dry_run is None else dry_run
verbose = False if verbose is None else verbose
silent = False if silent is None else silent
ignore_existing = False if ignore_existing is None else ignore_existing
ignore_errors = False if not ignore_errors else ignore_errors
checksum = False if checksum is None else checksum
no_directory = False if no_directory is None else no_directory
return_responses = False if not return_responses else True
no_change_timestamp = False if not no_change_timestamp else no_change_timestamp
params = None if not params else params
if not dry_run:
if item_index and verbose is True:
print('{0} ({1}):'.format(self.identifier, item_index))
elif item_index and silent is False:
print('{0} ({1}): '.format(self.identifier, item_index), end='')
elif item_index is None and verbose is True:
print('{0}:'.format(self.identifier))
elif item_index is None and silent is False:
print(self.identifier, end=': ')
sys.stdout.flush()
if self.is_dark is True:
msg = 'skipping {0}, item is dark'.format(self.identifier)
log.warning(msg)
if verbose:
print(' ' + msg)
elif silent is False:
print(msg)
return
elif self.metadata == {}:
msg = 'skipping {0}, item does not exist.'.format(self.identifier)
log.warning(msg)
if verbose:
print(' ' + msg)
elif silent is False:
print(msg)
return
if files:
files = self.get_files(files, on_the_fly=on_the_fly)
else:
files = self.get_files(on_the_fly=on_the_fly)
if formats:
files = self.get_files(formats=formats, on_the_fly=on_the_fly)
if glob_pattern:
files = self.get_files(glob_pattern=glob_pattern, on_the_fly=on_the_fly)
if not files:
msg = 'skipping {0}, no matching files found.'.format(self.identifier)
log.info(msg)
if verbose:
print(' ' + msg)
elif silent is False:
print(msg, end='')
errors = list()
responses = list()
for f in files:
if no_directory:
path = f.name
else:
path = os.path.join(self.identifier, f.name)
if dry_run:
print(f.url)
continue
r = f.download(path, verbose, silent, ignore_existing, checksum, destdir,
retries, ignore_errors, None, return_responses,
no_change_timestamp, params)
if return_responses:
responses.append(r)
if r is False:
errors.append(f.name)
if silent is False and verbose is False and dry_run is False:
if errors:
print(' - errors')
else:
print(' - success')
if return_responses:
return responses
else:
return errors | 0.004313 |
def namedb_get_account_history(cur, address, offset=None, count=None):
"""
Get the history of an account's tokens
"""
sql = 'SELECT * FROM accounts WHERE address = ? ORDER BY block_id DESC, vtxindex DESC'
args = (address,)
if count is not None:
sql += ' LIMIT ?'
args += (count,)
if offset is not None:
sql += ' OFFSET ?'
args += (offset,)
sql += ';'
rows = namedb_query_execute(cur, sql, args)
ret = []
for rowdata in rows:
tmp = {}
tmp.update(rowdata)
ret.append(tmp)
return ret | 0.003396 |
def crossJoin(self, other):
"""Returns the cartesian product with another :class:`DataFrame`.
:param other: Right side of the cartesian product.
>>> df.select("age", "name").collect()
[Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')]
>>> df2.select("name", "height").collect()
[Row(name=u'Tom', height=80), Row(name=u'Bob', height=85)]
>>> df.crossJoin(df2.select("height")).select("age", "name", "height").collect()
[Row(age=2, name=u'Alice', height=80), Row(age=2, name=u'Alice', height=85),
Row(age=5, name=u'Bob', height=80), Row(age=5, name=u'Bob', height=85)]
"""
jdf = self._jdf.crossJoin(other._jdf)
return DataFrame(jdf, self.sql_ctx) | 0.006702 |
def bank_account_query(self, number, date, account_type, bank_id):
"""Bank account statement request"""
return self.authenticated_query(
self._bareq(number, date, account_type, bank_id)
) | 0.008969 |
def analyze_beam_spot(scan_base, combine_n_readouts=1000, chunk_size=10000000, plot_occupancy_hists=False, output_pdf=None, output_file=None):
''' Determines the mean x and y beam spot position as a function of time. Therefore the data of a fixed number of read outs are combined ('combine_n_readouts'). The occupancy is determined
for the given combined events and stored into a pdf file. At the end the beam x and y is plotted into a scatter plot with absolute positions in um.
Parameters
----------
scan_base: list of str
scan base names (e.g.: ['//data//SCC_50_fei4_self_trigger_scan_390', ]
combine_n_readouts: int
the number of read outs to combine (e.g. 1000)
max_chunk_size: int
the maximum chunk size used during read, if too big memory error occurs, if too small analysis takes longer
output_pdf: PdfPages
PdfPages file object, if none the plot is printed to screen
'''
time_stamp = []
x = []
y = []
for data_file in scan_base:
with tb.open_file(data_file + '_interpreted.h5', mode="r+") as in_hit_file_h5:
# get data and data pointer
meta_data_array = in_hit_file_h5.root.meta_data[:]
hit_table = in_hit_file_h5.root.Hits
# determine the event ranges to analyze (timestamp_start, start_event_number, stop_event_number)
parameter_ranges = np.column_stack((analysis_utils.get_ranges_from_array(meta_data_array['timestamp_start'][::combine_n_readouts]), analysis_utils.get_ranges_from_array(meta_data_array['event_number'][::combine_n_readouts])))
# create a event_numer index (important)
analysis_utils.index_event_number(hit_table)
# initialize the analysis and set settings
analyze_data = AnalyzeRawData()
analyze_data.create_tot_hist = False
analyze_data.create_bcid_hist = False
analyze_data.histogram.set_no_scan_parameter()
# variables for read speed up
index = 0 # index where to start the read out, 0 at the beginning, increased during looping
best_chunk_size = chunk_size
progress_bar = progressbar.ProgressBar(widgets=['', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA()], maxval=hit_table.shape[0], term_width=80)
progress_bar.start()
# loop over the selected events
for parameter_index, parameter_range in enumerate(parameter_ranges):
logging.debug('Analyze time stamp ' + str(parameter_range[0]) + ' and data from events = [' + str(parameter_range[2]) + ',' + str(parameter_range[3]) + '[ ' + str(int(float(float(parameter_index) / float(len(parameter_ranges)) * 100.0))) + '%')
analyze_data.reset() # resets the data of the last analysis
# loop over the hits in the actual selected events with optimizations: determine best chunk size, start word index given
readout_hit_len = 0 # variable to calculate a optimal chunk size value from the number of hits for speed up
for hits, index in analysis_utils.data_aligned_at_events(hit_table, start_event_number=parameter_range[2], stop_event_number=parameter_range[3], start_index=index, chunk_size=best_chunk_size):
analyze_data.analyze_hits(hits) # analyze the selected hits in chunks
readout_hit_len += hits.shape[0]
progress_bar.update(index)
best_chunk_size = int(1.5 * readout_hit_len) if int(1.05 * readout_hit_len) < chunk_size else chunk_size # to increase the readout speed, estimated the number of hits for one read instruction
# get and store results
occupancy_array = analyze_data.histogram.get_occupancy()
projection_x = np.sum(occupancy_array, axis=0).ravel()
projection_y = np.sum(occupancy_array, axis=1).ravel()
x.append(analysis_utils.get_mean_from_histogram(projection_x, bin_positions=range(0, 80)))
y.append(analysis_utils.get_mean_from_histogram(projection_y, bin_positions=range(0, 336)))
time_stamp.append(parameter_range[0])
if plot_occupancy_hists:
plotting.plot_occupancy(occupancy_array[:, :, 0], title='Occupancy for events between ' + time.strftime('%H:%M:%S', time.localtime(parameter_range[0])) + ' and ' + time.strftime('%H:%M:%S', time.localtime(parameter_range[1])), filename=output_pdf)
progress_bar.finish()
plotting.plot_scatter([i * 250 for i in x], [i * 50 for i in y], title='Mean beam position', x_label='x [um]', y_label='y [um]', marker_style='-o', filename=output_pdf)
if output_file:
with tb.open_file(output_file, mode="a") as out_file_h5:
rec_array = np.array(zip(time_stamp, x, y), dtype=[('time_stamp', float), ('x', float), ('y', float)])
try:
beam_spot_table = out_file_h5.create_table(out_file_h5.root, name='Beamspot', description=rec_array, title='Beam spot position', filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
beam_spot_table[:] = rec_array
except tb.exceptions.NodeError:
logging.warning(output_file + ' has already a Beamspot note, do not overwrite existing.')
return time_stamp, x, y | 0.004399 |
def create_session(docker_image=None,
docker_rm=None,
echo=False,
loglevel='WARNING',
nocolor=False,
session_type='bash',
vagrant_session_name=None,
vagrant_image='ubuntu/xenial64',
vagrant_gui=False,
vagrant_memory='1024',
vagrant_num_machines='1',
vagrant_provider='virtualbox',
vagrant_root_folder=None,
vagrant_swapsize='2G',
vagrant_version='1.8.6',
vagrant_virt_method='virtualbox',
vagrant_cpu='1',
video=-1,
walkthrough=False):
"""Creates a distinct ShutIt session. Sessions can be of type:
bash - a bash shell is spawned and
vagrant - a Vagrantfile is created and 'vagrant up'ped
"""
assert session_type in ('bash','docker','vagrant'), shutit_util.print_debug()
shutit_global_object = shutit_global.shutit_global_object
if video != -1 and video > 0:
walkthrough = True
if session_type in ('bash','docker'):
return shutit_global_object.create_session(session_type,
docker_image=docker_image,
rm=docker_rm,
echo=echo,
walkthrough=walkthrough,
walkthrough_wait=video,
nocolor=nocolor,
loglevel=loglevel)
elif session_type == 'vagrant':
if vagrant_session_name is None:
vagrant_session_name = 'shutit' + shutit_util.random_id()
if isinstance(vagrant_num_machines, int):
vagrant_num_machines = str(vagrant_num_machines)
assert isinstance(vagrant_num_machines, str)
assert isinstance(int(vagrant_num_machines), int)
if vagrant_root_folder is None:
vagrant_root_folder = shutit_global.shutit_global_object.owd
return create_session_vagrant(vagrant_session_name,
vagrant_num_machines,
vagrant_image,
vagrant_provider,
vagrant_gui,
vagrant_memory,
vagrant_swapsize,
echo,
walkthrough,
nocolor,
video,
vagrant_version,
vagrant_virt_method,
vagrant_root_folder,
vagrant_cpu,
loglevel) | 0.024151 |
def get_room_member_profile(self, room_id, user_id, timeout=None):
"""Call get room member profile API.
https://devdocs.line.me/en/#get-group-room-member-profile
Gets the user profile of a member of a room that
the bot is in. This can be the user ID of a user who has
not added the bot as a friend or has blocked the bot.
:param str room_id: Room ID
:param str user_id: User ID
:param timeout: (optional) How long to wait for the server
to send data before giving up, as a float,
or a (connect timeout, read timeout) float tuple.
Default is self.http_client.timeout
:type timeout: float | tuple(float, float)
:rtype: :py:class:`linebot.models.responses.Profile`
:return: Profile instance
"""
response = self._get(
'/v2/bot/room/{room_id}/member/{user_id}'.format(room_id=room_id, user_id=user_id),
timeout=timeout
)
return Profile.new_from_json_dict(response.json) | 0.002863 |
def get_property_names(self, is_allprop):
"""Return list of supported property names in Clark Notation.
Note that 'allprop', despite its name, which remains for
backward-compatibility, does not return every property, but only dead
properties and the live properties defined in RFC4918.
This default implementation returns a combination of:
- Supported standard live properties in the {DAV:} namespace, if the
related getter method returns not None.
- {DAV:}lockdiscovery and {DAV:}supportedlock, if a lock manager is
present
- If a property manager is present, then a list of dead properties is
appended
A resource provider may override this method, to add a list of
supported custom live property names.
"""
# Live properties
propNameList = []
propNameList.append("{DAV:}resourcetype")
if self.get_creation_date() is not None:
propNameList.append("{DAV:}creationdate")
if self.get_content_length() is not None:
assert not self.is_collection
propNameList.append("{DAV:}getcontentlength")
if self.get_content_type() is not None:
propNameList.append("{DAV:}getcontenttype")
if self.get_last_modified() is not None:
propNameList.append("{DAV:}getlastmodified")
if self.get_display_name() is not None:
propNameList.append("{DAV:}displayname")
if self.get_etag() is not None:
propNameList.append("{DAV:}getetag")
# Locking properties
if self.provider.lock_manager and not self.prevent_locking():
propNameList.extend(_lockPropertyNames)
# Dead properties
if self.provider.prop_manager:
refUrl = self.get_ref_url()
propNameList.extend(
self.provider.prop_manager.get_properties(refUrl, self.environ)
)
return propNameList | 0.001 |
def calculate_bbh(blast_results_1, blast_results_2, r_name=None, g_name=None, outdir=''):
"""Calculate the best bidirectional BLAST hits (BBH) and save a dataframe of results.
Args:
blast_results_1 (str): BLAST results for reference vs. other genome
blast_results_2 (str): BLAST results for other vs. reference genome
r_name: Name of reference genome
g_name: Name of other genome
outdir: Directory where BLAST results are stored.
Returns:
Path to Pandas DataFrame of the BBH results.
"""
# TODO: add force_rerun option
cols = ['gene', 'subject', 'PID', 'alnLength', 'mismatchCount', 'gapOpenCount', 'queryStart', 'queryEnd',
'subjectStart', 'subjectEnd', 'eVal', 'bitScore']
if not r_name and not g_name:
r_name = op.basename(blast_results_1).split('_vs_')[0]
g_name = op.basename(blast_results_1).split('_vs_')[1].replace('_blast.out', '')
r_name2 = op.basename(blast_results_2).split('_vs_')[1].replace('_blast.out', '')
if r_name != r_name2:
log.warning('{} != {}'.format(r_name, r_name2))
outfile = op.join(outdir, '{}_vs_{}_bbh.csv'.format(r_name, g_name))
if op.exists(outfile) and os.stat(outfile).st_size != 0:
log.debug('{} vs {} BLAST BBHs already found at {}'.format(r_name, g_name, outfile))
return outfile
bbh1 = pd.read_csv(blast_results_1, sep='\t', names=cols)
bbh2 = pd.read_csv(blast_results_2, sep='\t', names=cols)
out = pd.DataFrame()
log.debug('Finding BBHs for {} vs. {}'.format(r_name, g_name))
for g in bbh1[pd.notnull(bbh1.gene)].gene.unique():
res = bbh1[bbh1.gene == g]
if len(res) == 0:
continue
best_hit = res.ix[res.PID.idxmax()].copy()
best_gene = best_hit.subject
res2 = bbh2[bbh2.gene == best_gene]
if len(res2) == 0:
continue
best_hit2 = res2.ix[res2.PID.idxmax()]
best_gene2 = best_hit2.subject
if g == best_gene2:
best_hit['BBH'] = '<=>'
else:
best_hit['BBH'] = '->'
out = pd.concat([out, pd.DataFrame(best_hit).transpose()])
out.to_csv(outfile)
log.debug('{} vs {} BLAST BBHs saved at {}'.format(r_name, g_name, outfile))
return outfile | 0.003465 |
def fetch_by_client_id(self, client_id):
"""
Retrieve a client by its identifier.
:param client_id: Identifier of a client app.
:return: An instance of :class:`oauth2.Client`.
:raises: ClientNotFoundError
"""
if client_id not in self.clients:
raise ClientNotFoundError
return self.clients[client_id] | 0.005291 |
def hgetall(self, name):
"""
Returns all the fields and values in the Hash.
:param name: str the name of the redis key
:return: Future()
"""
with self.pipe as pipe:
f = Future()
res = pipe.hgetall(self.redis_key(name))
def cb():
data = {}
m_decode = self.memberparse.decode
v_decode = self._value_decode
for k, v in res.result.items():
k = m_decode(k)
v = v_decode(k, v)
data[k] = v
f.set(data)
pipe.on_execute(cb)
return f | 0.00295 |
def _parse_seq_body(line):
"""
Ex:
{()YVPFARKYRPKFFREVIGQEAPVRILKALNcknpskgepcgereiDRGVFPDVRA-LKLLDQASVYGE()}*
MENINNI{()----------FKLILVGDGKFFSSSGEIIFNIWDTKFGGLRDGYYRLTYKNEDDM()}*
Or:
{(HY)ELPWVEKYR...
The sequence fragments in parentheses represent N- or C-terminal flanking
regions that are not part of the alignment block (I think). Most tools don't
include them, but some do, apparently.
"""
line = line.rstrip('*')
if '{()' in line:
head, _rest = line.split('{()', 1)
else:
# Match parens
_rest = line.split('{(', 1)[1]
head, _rest = _rest.split(')', 1)
if '()}' in _rest:
molseq, tail = _rest.split('()}', 1)
else:
# Match parens
molseq, _rest = _rest.split('(', 1)
tail = _rest.split(')}', 1)[0]
return (head, molseq, tail) | 0.002317 |
def pick_and_display_buffer(self, i):
"""
pick i-th buffer from list and display it
:param i: int
:return: None
"""
if len(self.buffers) == 1:
# we don't need to display anything
# listing is already displayed
return
else:
try:
self.display_buffer(self.buffers[i])
except IndexError:
# i > len
self.display_buffer(self.buffers[0]) | 0.00404 |
def transform(odtfile, debug=False, parsable=False, outputdir=None):
""" Given an ODT file this returns a tuple containing
the cnxml, a dictionary of filename -> data, and a list of errors """
# Store mapping of images extracted from the ODT file (and their bits)
images = {}
# Log of Errors and Warnings generated
# For example, the text produced by XSLT should be:
# {'level':'WARNING',
# 'msg' :'Headings without text between them are not allowed',
# 'id' :'import-auto-id2376'}
# That way we can put a little * near all the cnxml where issues arose
errors = []
zip = zipfile.ZipFile(odtfile, 'r')
content = zip.read('content.xml')
xml = etree.fromstring(content)
def appendLog(xslDoc):
if hasattr(xslDoc, 'error_log'):
for entry in xslDoc.error_log:
# Entries are of the form:
# {'level':'ERROR','id':'id1234','msg':'Descriptive message'}
text = entry.message
try:
dict = json.loads(text)
errors.append(dict)
except ValueError:
errors.append({
u'level':u'CRITICAL',
u'id' :u'(none)',
u'msg' :unicode(text) })
def injectStyles(xml):
# HACK - need to find the object location from the manifest ...
strStyles = zip.read('styles.xml')
parser = etree.XMLParser()
parser.feed(strStyles)
stylesXml = parser.close()
for i, obj in enumerate(STYLES_XPATH(stylesXml)):
xml.append(obj)
return xml
# All MathML is stored in separate files "Object #/content.xml"
# This converter includes the MathML by looking up the file in the zip
def mathIncluder(xml):
for i, obj in enumerate(MATH_XPATH(xml)):
strMathPath = MATH_HREF_XPATH(obj)[0] # Or obj.get('{%s}href' % XLINK_NS)
if strMathPath[0] == '#':
strMathPath = strMathPath[1:]
# Remove leading './' Zip doesn't like it
if strMathPath[0] == '.':
strMathPath = strMathPath[2:]
# HACK - need to find the object location from the manifest ...
strMathPath = os.path.join(strMathPath, 'content.xml')
strMath = zip.read(strMathPath)
#parser = etree.XMLParser(encoding='utf-8')
#parser.feed(strMath)
#math = parser.close()
math = etree.parse(StringIO(strMath)).getroot()
# Replace the reference to the Math with the actual MathML
obj.getparent().replace(obj, math)
return xml
def imagePuller(xml):
for i, obj in enumerate(IMAGE_XPATH(xml)):
strPath = IMAGE_HREF_XPATH(obj)[0]
strName = IMAGE_NAME_XPATH(obj)[0]
fileNeedEnding = ( strName.find('.') == -1 )
if fileNeedEnding:
strName = strName + strPath[strPath.index('.'):]
if strPath[0] == '#':
strPath = strPath[1:]
# Remove leading './' Zip doesn't like it
if strPath[0] == '.':
strPath = strPath[2:]
image = zip.read(strPath)
images[strName] = image
# Later on, an XSL pass will convert the draw:frame to a c:image and
# set the @src correctly
return xml
def drawPuller(xml):
styles = DRAW_STYLES_XPATH(xml)
empty_odg_dirname = os.path.join(dirname, 'empty_odg_template')
temp_dirname = tempfile.mkdtemp()
for i, obj in enumerate(DRAW_XPATH(xml)):
# Copy everything except content.xml from the empty ODG (OOo Draw) template into a new zipfile
odg_filename = DRAW_FILENAME_PREFIX + str(i) + '.odg'
png_filename = DRAW_FILENAME_PREFIX + str(i) + '.png'
# add PNG filename as attribute to parent node. The good thing is: The child (obj) will get lost! :-)
parent = obj.getparent()
parent.attrib['ooo_drawing'] = png_filename
odg_zip = zipfile.ZipFile(os.path.join(temp_dirname, odg_filename), 'w', zipfile.ZIP_DEFLATED)
for root, dirs, files in os.walk(empty_odg_dirname):
for name in files:
if name not in ('content.xml', 'styles.xml'): # copy everything inside ZIP except content.xml or styles.xml
sourcename = os.path.join(root, name)
# http://stackoverflow.com/a/1193171/756056
arcname = os.path.join(root[len(empty_odg_dirname):], name) # Path name inside the ZIP file, empty_odg_template is the root folder
odg_zip.write(sourcename, arcname)
content = etree.parse(os.path.join(empty_odg_dirname, 'content.xml'))
# Inject content styles in empty OOo Draw content.xml
content_style_xpath = etree.XPath('/office:document-content/office:automatic-styles', namespaces=NAMESPACES)
content_styles = content_style_xpath(content)
for style in styles:
content_styles[0].append(deepcopy(style))
# Inject drawing in empty OOo Draw content.xml
content_page_xpath = etree.XPath('/office:document-content/office:body/office:drawing/draw:page', namespaces=NAMESPACES)
content_page = content_page_xpath(content)
content_page[0].append(obj)
# write modified content.xml
odg_zip.writestr('content.xml', etree.tostring(content, xml_declaration=True, encoding='UTF-8'))
# copy styles.xml from odt to odg without modification
styles_xml = zip.read('styles.xml')
odg_zip.writestr('styles.xml', styles_xml)
odg_zip.close()
# TODO: Better error handling in the future.
try:
# convert every odg to png
command = '/usr/bin/soffice -headless -nologo -nofirststartwizard "macro:///Standard.Module1.SaveAsPNG(%s,%s)"' % (os.path.join(temp_dirname, odg_filename),os.path.join(temp_dirname, png_filename))
os.system(command)
# save every image to memory
image = open(os.path.join(temp_dirname, png_filename), 'r').read()
images[png_filename] = image
if outputdir is not None:
shutil.copy (os.path.join(temp_dirname, odg_filename), os.path.join(outputdir, odg_filename))
shutil.copy (os.path.join(temp_dirname, png_filename), os.path.join(outputdir, png_filename))
except:
pass
# delete temporary directory
shutil.rmtree(temp_dirname)
return xml
# Reparse after XSL because the RED-escape pass injects arbitrary XML
def redParser(xml):
xsl = makeXsl('pass1_odt2red-escape.xsl')
result = xsl(xml)
appendLog(xsl)
try:
xml = etree.fromstring(etree.tostring(result))
except etree.XMLSyntaxError, e:
msg = str(e)
xml = makeXsl('pass1_odt2red-failed.xsl')(xml, message="'%s'" % msg.replace("'", '"'))
xml = xml.getroot()
return xml
def replaceSymbols(xml):
xmlstr = etree.tostring(xml)
xmlstr = symbols.replace(xmlstr)
return etree.fromstring(xmlstr)
PIPELINE = [
drawPuller, # gets OOo Draw objects out of odt and generate odg (OOo Draw) files
replaceSymbols,
injectStyles, # include the styles.xml file because it contains list numbering info
makeXsl('pass2_odt-normalize.xsl'), # This needs to be done 2x to fix headings
makeXsl('pass2_odt-normalize.xsl'), # In the worst case all headings are 9
# and need to be 1. See (testbed) southwood__Lesson_2.doc
makeXsl('pass2_odt-collapse-spans.xsl'), # Collapse adjacent spans (for RED)
redParser, # makeXsl('pass1_odt2red-escape.xsl'),
makeXsl('pass4_odt-headers.xsl'),
imagePuller, # Need to run before math because both have a <draw:image> (see xpath)
mathIncluder,
makeXsl('pass7_odt2cnxml.xsl'),
makeXsl('pass8_cnxml-cleanup.xsl'),
makeXsl('pass8.5_cnxml-cleanup.xsl'),
makeXsl('pass9_id-generation.xsl'),
makeXsl('pass10_processing-instruction-logger.xsl'),
]
# "xml" variable gets replaced during each iteration
passNum = 0
for xslDoc in PIPELINE:
if debug: errors.append("DEBUG: Starting pass %d" % passNum)
xml = xslDoc(xml)
appendLog(xslDoc)
if outputdir is not None: writeXMLFile(os.path.join(outputdir, 'pass%d.xml' % passNum), xml)
passNum += 1
# In most cases (EIP) Invalid XML is preferable over valid but Escaped XML
if not parsable:
xml = (makeXsl('pass11_red-unescape.xsl'))(xml)
return (xml, images, errors) | 0.008795 |
def create_request_with_query(self, kind, query, size="thumb", fmt="json"):
"""api/data.[fmt], api/images/[size].[fmt] api/files.[fmt]
kind = ['data', 'images', 'files']
"""
if kind == "data" or kind == "files":
url = "{}/{}.{}".format(base_url, kind, fmt)
elif kind == "images":
url = "{}/images/{}.{}".format(base_url, size, fmt)
self.url = url
self.r = requests.get(url, params=unquote(urlencode(query))) | 0.004082 |
def join (self, timeout=None):
"""Blocks until all items in the Queue have been gotten and processed.
The count of unfinished tasks goes up whenever an item is added to the
queue. The count goes down whenever a consumer thread calls task_done()
to indicate the item was retrieved and all work on it is complete.
When the count of unfinished tasks drops to zero, join() unblocks.
"""
with self.all_tasks_done:
if timeout is None:
while self.unfinished_tasks:
self.all_tasks_done.wait()
else:
if timeout < 0:
raise ValueError("'timeout' must be a positive number")
endtime = _time() + timeout
while self.unfinished_tasks:
remaining = endtime - _time()
if remaining <= 0.0:
raise Timeout()
self.all_tasks_done.wait(remaining) | 0.003024 |
def SRU_Compute_CPU(activation_type, d, bidirectional=False, scale_x=1):
"""CPU version of the core SRU computation.
Has the same interface as SRU_Compute_GPU() but is a regular Python function
instead of a torch.autograd.Function because we don't implement backward()
explicitly.
"""
def sru_compute_cpu(u, x, bias, init=None, mask_h=None):
bidir = 2 if bidirectional else 1
length = x.size(0) if x.dim() == 3 else 1
batch = x.size(-2)
k = u.size(-1) // d // bidir
if mask_h is None:
mask_h = 1
u = u.view(length, batch, bidir, d, k)
x_tilde = u[..., 0]
forget_bias, reset_bias = bias.view(2, bidir, d)
forget = (u[..., 1] + forget_bias).sigmoid()
reset = (u[..., 2] + reset_bias).sigmoid()
if k == 3:
x_prime = x.view(length, batch, bidir, d)
x_prime = x_prime * scale_x if scale_x != 1 else x_prime
else:
x_prime = u[..., 3]
h = Variable(x.data.new(length, batch, bidir, d))
if init is None:
c_init = Variable(x.data.new(batch, bidir, d).zero_())
else:
c_init = init.view(batch, bidir, d)
c_final = []
for di in range(bidir):
if di == 0:
time_seq = range(length)
else:
time_seq = range(length - 1, -1, -1)
c_prev = c_init[:, di, :]
for t in time_seq:
c_t = (c_prev - x_tilde[t, :, di, :]) * forget[t, :, di, :] + x_tilde[t, :, di, :]
c_prev = c_t
if activation_type == 0:
g_c_t = c_t
elif activation_type == 1:
g_c_t = c_t.tanh()
elif activation_type == 2:
g_c_t = nn.functional.relu(c_t)
else:
assert False, 'Activation type must be 0, 1, or 2, not {}'.format(activation_type)
h[t, :, di, :] = (g_c_t * mask_h - x_prime[t, :, di, :]) * reset[t, :, di, :] + x_prime[t, :, di, :]
c_final.append(c_t)
return h.view(length, batch, -1), torch.stack(c_final, dim=1).view(batch, -1)
return sru_compute_cpu | 0.002671 |
def _log(self, message, stream, color=None, newline=False):
'''
Logs the message to the sys.stdout or sys.stderr stream.
When color is defined and the TERM environemnt variable contains the
string "color", then the output will be colored.
'''
if color and self.color_term:
colorend = Logger.COLOR_END
else:
color = colorend = ''
stream.write('{color}{message}{colorend}\n'.format(
color=color,
message=message,
colorend=colorend
))
if newline:
sys.stdout.write('\n')
stream.flush() | 0.003096 |
def CO_ratio(self,ifig,ixaxis):
"""
plot surface C/O ratio in Figure ifig with x-axis quantity ixaxis
Parameters
----------
ifig : integer
Figure number in which to plot
ixaxis : string
what quantity is to be on the x-axis, either 'time' or 'model'
The default is 'model'
"""
def C_O(model):
surface_c12=model.get('surface_c12')
surface_o16=model.get('surface_o16')
CORatio=old_div((surface_c12*4.),(surface_o16*3.))
return CORatio
if ixaxis=='time':
xax=self.get('star_age')
elif ixaxis=='model':
xax=self.get('model_number')
else:
raise IOError("ixaxis not recognised")
pl.figure(ifig)
pl.plot(xax,C_O(self)) | 0.015513 |
def send_msg_async(self, *, message_type, user_id=None, group_id=None, discuss_id=None, message, auto_escape=False):
"""
发送消息 (异步版本)
------------
:param str message_type: 消息类型,支持 `private`、`group`、`discuss`,分别对应私聊、群组、讨论组
:param int user_id: 对方 QQ 号(消息类型为 `private` 时需要)
:param int group_id: 群号(消息类型为 `group` 时需要)
:param int discuss_id: 讨论组 ID(需要从上报消息中获取,消息类型为 `discuss` 时需要)
:param str | list[ dict[ str, unknown ] ] message: 要发送的内容
:param bool auto_escape: 消息内容是否作为纯文本发送(即不解析 CQ 码),`message` 数据类型为 `list` 时无效
:return: None
:rtype: None
"""
return super().__getattr__('send_msg_async') \
(message_type=message_type, user_id=user_id, group_id=group_id,
discuss_id=discuss_id, message=message, auto_escape=auto_escape) | 0.007084 |
def get_datetime(self, timestamp: str, unix=True):
"""Converts a %Y%m%dT%H%M%S.%fZ to a UNIX timestamp
or a datetime.datetime object
Parameters
---------
timestamp: str
A timstamp in the %Y%m%dT%H%M%S.%fZ format, usually returned by the API
in the ``created_time`` field for example (eg. 20180718T145906.000Z)
unix: Optional[bool] = True
Whether to return a POSIX timestamp (seconds since epoch) or not
Returns int or datetime.datetime
"""
time = datetime.strptime(timestamp, '%Y%m%dT%H%M%S.%fZ')
if unix:
return int(time.timestamp())
else:
return time | 0.005698 |
def search(self, keyword, children=None, arg=None):
"""Return list of receiver's substmts with `keyword`.
"""
if children is None:
children = self.substmts
return [ ch for ch in children
if (ch.keyword == keyword and
(arg is None or ch.arg == arg))] | 0.009063 |
def create_from_item(self, languages, item, fields, trans_instance=None):
"""
Creates tasks from a model instance "item" (master)
Used in the api call
:param languages:
:param item:
:param fields:
:param trans_instance: determines if we are in bulk mode or not.
If it has a value we're processing by the signal trigger,
if not we're processing either by the api or the mixin
:return:
"""
if not isinstance(item, TranslatableModel):
return
self.log('gonna parse fields: {}'.format(fields))
with self.lock:
result = []
if trans_instance:
# get the values from the instance that is being saved, values not saved yet
trans = trans_instance
else:
# get the values from the db instance
trans = self.get_translation_from_instance(item, self.main_language)
if not trans:
return
for field in fields:
self.log('parsing field: {}'.format(field))
# for every field
object_field_label = self.get_field_label(trans, field)
object_field_value = getattr(trans, field)
# if object_field_value is None or object_field_value == '':
# object_field_value = getattr(self.instance, field, '')
self.log('object_field_value for {} - .{}.'.format(object_field_label, object_field_value))
if object_field_value == '' or object_field_value is None:
continue
for lang in languages:
# for every language
self.log('parsing lang: {}'.format(lang))
language = TransLanguage.objects.filter(code=lang).get()
users = self.translators.get(lang, [])
self.log('gonna parse users')
for user in users:
# for every user we create a task
# check if there is already a value for the destinatation lang
# when we are in bulk mode, when we are in signal mode
# we update the destination task if it exists
if self.bulk_mode and self.exists_destination_lang_value(item, field, lang):
continue
ob_class_name = item.__class__.__name__
self.log('creating or updating object_class: {} | object_pk:{} | object_field: {}'.format(
ob_class_name,
item.pk,
field
))
app_label = item._meta.app_label
model = ob_class_name.lower()
ct = ContentType.objects.get_by_natural_key(app_label, model)
try:
task, created = TransTask.objects.get_or_create(
content_type=ct,
object_class=ob_class_name,
object_pk=item.pk,
object_field=field,
language=language,
user=user,
defaults={
'object_name': '{} - {}'.format(app_label, item._meta.verbose_name),
'object_field_label': object_field_label,
'object_field_value': object_field_value,
'done': False
}
)
if not created:
self.log('updating')
task.date_modification = datetime.now()
task.object_field_value = object_field_value
task.done = False
task.save()
result.append(task)
except TransTask.MultipleObjectsReturned:
# theorically it should not occur but if so delete the repeated tasks
tasks = TransTask.objects.filter(
content_type=ct,
object_class=ob_class_name,
object_pk=item.pk,
object_field=field,
language=language,
user=user
)
for i, task in enumerate(tasks):
if i == 0:
task.date_modification = datetime.now()
task.object_field_value = object_field_value
task.done = False
task.save()
else:
task.delete()
# we return every task (created or modified)
return result | 0.00227 |
def result(filetype="PNG",saveas=None, host=cytoscape_host,port=cytoscape_port):
"""
Checks the current network.
Note: works only on localhost
:param filetype: file type, default="PNG"
:param saveas: /path/to/non/tmp/file.prefix
:param host: cytoscape host address, default=cytoscape_host
:param port: cytoscape port, default=1234
:returns: an image
"""
sleep(1)
def MAKETMP():
(fd, tmp_file) = tempfile.mkstemp()
tmp_file="/tmp/"+tmp_file.split("/")[-1]
return tmp_file
outfile=MAKETMP()
extensions={"PNG":".png","PDF":".pdf","CYS":".cys","CYJS":".cyjs"}
ext=extensions[filetype]
response=cytoscape("view","fit content",host=host,port=port)
response=cytoscape("view", "export" , \
{"options":filetype,\
"OutputFile":outfile},\
host=host,port=port)
if host!='localhost':
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(host)
ftp_client=ssh.open_sftp()
ftp_client.get(outfile+ext,outfile+ext)
ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command("rm "+outfile+ext )
img = WImage(filename=outfile+ext)
if saveas:
copyfile(outfile+ext,saveas)
os.remove(outfile+ext)
return img | 0.030172 |
def _slug_strip(self, value):
"""
Clean up a slug by removing slug separator characters that occur at
the beginning or end of a slug.
If an alternate separator is used, it will also replace any instances
of the default '-' separator with the new separator.
"""
re_sep = '(?:-|%s)' % re.escape(self.separator)
value = re.sub('%s+' % re_sep, self.separator, value)
return re.sub(r'^%s+|%s+$' % (re_sep, re_sep), '', value) | 0.004057 |
def ensure_data_exists(self, request, data, error_message=None):
"""
Ensure that the wrapped API client's response brings us valid data. If not, raise an error and log it.
"""
if not data:
error_message = (
error_message or "Unable to fetch API response from endpoint '{}'.".format(request.get_full_path())
)
LOGGER.error(error_message)
raise NotFound(error_message) | 0.008658 |
def add_data_to_df(self, data: np.array):
"""Build Pandas Dataframe in memory"""
col_names = ['high_p', 'low_p', 'open_p', 'close_p', 'volume', 'oi']
data = np.array(data).reshape(-1, len(col_names) + 1)
df = pd.DataFrame(data=data[:, 1:], index=data[:, 0],
columns=col_names)
df.index = pd.to_datetime(df.index)
# Sort the dataframe based on ascending dates.
df.sort_index(ascending=True, inplace=True)
# Convert dataframe columns to float and ints.
df[['high_p', 'low_p', 'open_p', 'close_p']] = df[
['high_p', 'low_p', 'open_p', 'close_p']].astype(float)
df[['volume', 'oi']] = df[['volume', 'oi']].astype(int)
if self._ndf.empty:
self._ndf = df
else:
self._ndf = self._ndf.append(df) | 0.00235 |
def FDMT_params(f_min, f_max, maxDT, inttime):
"""
Summarize DM grid and other parameters.
"""
maxDM = inttime*maxDT/(4.1488e-3 * (1/f_min**2 - 1/f_max**2))
logger.info('Freqs from {0}-{1}, MaxDT {2}, Int time {3} => maxDM {4}'.format(f_min, f_max, maxDT, inttime, maxDM)) | 0.006826 |
def parse_args():
"""Parse arguments."""
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Diagnose script for checking the current system.')
choices = ['python', 'pip', 'mxnet', 'os', 'hardware', 'network']
for choice in choices:
parser.add_argument('--' + choice, default=1, type=int,
help='Diagnose {}.'.format(choice))
parser.add_argument('--region', default='', type=str,
help="Additional sites in which region(s) to test. \
Specify 'cn' for example to test mirror sites in China.")
parser.add_argument('--timeout', default=10, type=int,
help="Connection test timeout threshold, 0 to disable.")
args = parser.parse_args()
return args | 0.003542 |
def create(args):
"""Create records.
Argument:
args: arguments object
"""
# for PUT HTTP method
action = True
if ((args.__dict__.get('domain') and args.__dict__.get('name')
and args.__dict__.get('rtype') and args.__dict__.get('content'))):
# for create sub-command
domain = args.domain
o = JSONConverter(domain)
name, rtype, content, ttl, priority = get_record_params(args)
record_dict = o.set_record(name, rtype, content, ttl, priority)
json = set_json(domain, action, record=record_dict)
else:
# for bulk_create sub-command
if args.__dict__.get('domain'):
domain = args.domain
else:
domain = check_infile(args.infile)
json = set_json(domain, action, filename=args.infile)
password = get_password(args)
token = connect.get_token(args.username, password, args.server)
processing.create_records(args.server, token, domain, json)
if args.auto_update_soa == 'True':
update_soa_serial(args) | 0.000936 |
def clear_descendants(self, source, clear_source=True):
"""Clear values and nodes calculated from `source`."""
removed = self.cellgraph.clear_descendants(source, clear_source)
for node in removed:
del node[OBJ].data[node[KEY]] | 0.007634 |
def tuplify(*args):
"""
Convert args to a tuple, unless there's one arg and it's a
function, then acts a decorator.
"""
if (len(args) == 1) and callable(args[0]):
func = args[0]
@wraps(func)
def _inner(*args, **kwargs):
return tuple(func(*args, **kwargs))
return _inner
else:
return tuple(args) | 0.002695 |
def to_pascal(arr, is_dp=False):
"""Force data with units either hPa or Pa to be in Pa."""
threshold = 400 if is_dp else 1200
if np.max(np.abs(arr)) < threshold:
warn_msg = "Conversion applied: hPa -> Pa to array: {}".format(arr)
logging.debug(warn_msg)
return arr*100.
return arr | 0.003125 |
def _match_cubes(ccube_clean, ccube_dirty,
bexpcube_clean, bexpcube_dirty,
hpx_order):
""" Match the HEALPIX scheme and order of all the input cubes
return a dictionary of cubes with the same HEALPIX scheme and order
"""
if hpx_order == ccube_clean.hpx.order:
ccube_clean_at_order = ccube_clean
else:
ccube_clean_at_order = ccube_clean.ud_grade(hpx_order, preserve_counts=True)
if hpx_order == ccube_dirty.hpx.order:
ccube_dirty_at_order = ccube_dirty
else:
ccube_dirty_at_order = ccube_dirty.ud_grade(hpx_order, preserve_counts=True)
if hpx_order == bexpcube_clean.hpx.order:
bexpcube_clean_at_order = bexpcube_clean
else:
bexpcube_clean_at_order = bexpcube_clean.ud_grade(hpx_order, preserve_counts=True)
if hpx_order == bexpcube_dirty.hpx.order:
bexpcube_dirty_at_order = bexpcube_dirty
else:
bexpcube_dirty_at_order = bexpcube_dirty.ud_grade(hpx_order, preserve_counts=True)
if ccube_dirty_at_order.hpx.nest != ccube_clean.hpx.nest:
ccube_dirty_at_order = ccube_dirty_at_order.swap_scheme()
if bexpcube_clean_at_order.hpx.nest != ccube_clean.hpx.nest:
bexpcube_clean_at_order = bexpcube_clean_at_order.swap_scheme()
if bexpcube_dirty_at_order.hpx.nest != ccube_clean.hpx.nest:
bexpcube_dirty_at_order = bexpcube_dirty_at_order.swap_scheme()
ret_dict = dict(ccube_clean=ccube_clean_at_order,
ccube_dirty=ccube_dirty_at_order,
bexpcube_clean=bexpcube_clean_at_order,
bexpcube_dirty=bexpcube_dirty_at_order)
return ret_dict | 0.00443 |
def op_get_consensus_fields( op_name ):
"""
Get the set of consensus-generating fields for an operation.
"""
global SERIALIZE_FIELDS
if op_name not in SERIALIZE_FIELDS.keys():
raise Exception("No such operation '%s'" % op_name )
fields = SERIALIZE_FIELDS[op_name][:]
return fields | 0.015528 |
def carmichael() -> Iterator[int]:
"""Composite numbers n such that a^(n-1) == 1 (mod n) for every a coprime
to n.
https://oeis.org/A002997
"""
for m in composite():
for a in range(2, m):
if pow(a, m, m) != a:
break
else:
yield m | 0.003279 |
async def unformat(self):
"""Unformat this block device."""
self._data = await self._handler.unformat(
system_id=self.node.system_id, id=self.id) | 0.011561 |
def apply_uncertainty(self, value, source):
"""
Apply this branchset's uncertainty with value ``value`` to source
``source``, if it passes :meth:`filters <filter_source>`.
This method is not called for uncertainties of types "gmpeModel"
and "sourceModel".
:param value:
The actual uncertainty value of :meth:`sampled <sample>` branch.
Type depends on uncertainty type.
:param source:
The opensha source data object.
:return:
0 if the source was not changed, 1 otherwise
"""
if not self.filter_source(source):
# source didn't pass the filter
return 0
if self.uncertainty_type in MFD_UNCERTAINTY_TYPES:
self._apply_uncertainty_to_mfd(source.mfd, value)
elif self.uncertainty_type in GEOMETRY_UNCERTAINTY_TYPES:
self._apply_uncertainty_to_geometry(source, value)
else:
raise AssertionError("unknown uncertainty type '%s'"
% self.uncertainty_type)
return 1 | 0.001807 |
def parse_value(value, allowed_types, name='value'):
"""Parse a value into one of a number of types.
This function is used to coerce untyped HTTP parameter strings into an
appropriate type. It tries to coerce the value into each of the allowed
types, and uses the first that evaluates properly.
Because this is coercing a string into multiple, potentially ambiguous,
types, it tests things in the order of least ambiguous to most ambiguous:
- The "null" type is checked first. If allowed, and the value is blank
(""), None will be returned.
- The "boolean" type is checked next. Values of "true" (case insensitive)
are True, and values of "false" are False.
- Numeric types are checked next -- first "integer", then "number".
- The "array" type is checked next. A value is only considered a valid
array if it begins with a "[" and can be parsed as JSON.
- The "object" type is checked next. A value is only considered a valid
object if it begins with a "{" and can be parsed as JSON.
- The "string" type is checked last, since any value is a valid string.
Unicode strings are encoded as UTF-8.
:param str value: Parameter value. Example: "1"
:param list allowed_types: Types that should be attempted. Example:
["integer", "null"]
:param str name: Parameter name. If not specified, "value" is used.
Example: "campaign_id"
:returns: a tuple of a type string and coerced value
:raises: ParseError if the value cannot be coerced to any of the types
"""
if not isinstance(value, str):
raise ValueError('value for %r must be a string' % name)
if isinstance(allowed_types, str):
allowed_types = [allowed_types]
# Note that the order of these type considerations is important. Because we
# have an untyped value that may be one of any given number of types, we
# need a consistent order of evaluation in cases when there is ambiguity
# between types.
if 'null' in allowed_types and value == '':
return 'null', None
# For all of these types, we'll pass the value to the function and it will
# raise a TypeError or ValueError or return None if it can't be parsed as
# the given type.
for allowed_type, parser in _parser_funcs:
if allowed_type in allowed_types:
try:
parsed_value = parser(value)
if parsed_value is not None:
return allowed_type, parsed_value
except (TypeError, ValueError):
# Ignore any errors, and continue trying other types
pass
raise ParseError('%s must be a valid type (%s)' %
(name, ', '.join(allowed_types))) | 0.000363 |
def playMovie(self):
"""Play button handler."""
if self.state == self.READY:
# Create a new thread to listen for RTP packets
print "Playing Movie"
threading.Thread(target=self.listenRtp).start()
self.playEvent = threading.Event()
self.playEvent.clear()
self.sendRtspRequest(self.PLAY) | 0.021341 |
def get_doc(logger=None, plugin=None, reporthook=None):
"""
Return URL to documentation. Attempt download if does not exist.
Parameters
----------
logger : obj or `None`
Ginga logger.
plugin : obj or `None`
Plugin object. If given, URL points to plugin doc directly.
If this function is called from within plugin class,
pass ``self`` here.
reporthook : callable or `None`
Report hook for ``urlretrieve()``.
Returns
-------
url : str or `None`
URL to local documentation, if available.
"""
from ginga.GingaPlugin import GlobalPlugin, LocalPlugin
if isinstance(plugin, GlobalPlugin):
plugin_page = 'plugins_global'
plugin_name = str(plugin)
elif isinstance(plugin, LocalPlugin):
plugin_page = 'plugins_local'
plugin_name = str(plugin)
else:
plugin_page = None
plugin_name = None
try:
index_html = _download_rtd_zip(reporthook=reporthook)
# Download failed, use online resource
except Exception as e:
url = 'https://ginga.readthedocs.io/en/latest/'
if plugin_name is not None:
if toolkit.family.startswith('qt'):
# This displays plugin docstring.
url = None
else:
# This redirects to online doc.
url += 'manual/{}/{}.html'.format(plugin_page, plugin_name)
if logger is not None:
logger.error(str(e))
# Use local resource
else:
pfx = 'file:'
url = '{}{}'.format(pfx, index_html)
# https://github.com/rtfd/readthedocs.org/issues/2803
if plugin_name is not None:
url += '#{}'.format(plugin_name)
return url | 0.000565 |
def get_package(self):
"""Get the URL or sandbox to release.
"""
directory = self.directory
develop = self.develop
scmtype = self.scmtype
self.scm = self.scms.get_scm(scmtype, directory)
if self.scm.is_valid_url(directory):
directory = self.urlparser.abspath(directory)
self.remoteurl = directory
self.isremote = self.push = True
else:
directory = abspath(expanduser(directory))
self.isremote = False
self.scm.check_valid_sandbox(directory)
self.setuptools.check_valid_package(directory)
name, version = self.setuptools.get_package_info(directory, develop)
print('Releasing', name, version)
if not self.skipcommit:
if self.scm.is_dirty_sandbox(directory):
self.scm.commit_sandbox(directory, name, version, self.push) | 0.004246 |
def can_approve(self, user, **data):
"""
Only org admins can approve joining an organisation
:param user: a User
:param data: data that the user wants to update
"""
is_org_admin = user.is_org_admin(self.organisation_id)
is_reseller_preverifying = user.is_reseller() and data.get('pre_verified', False)
raise Return(is_org_admin or is_reseller_preverifying) | 0.007109 |
def format_h4(s, format="text", indents=0):
"""
Encloses string in format text
Args, Returns: see format_h1()
"""
_CHAR = "^"
if format.startswith("text"):
return format_underline(s, _CHAR, indents)
elif format.startswith("markdown"):
return ["#### {}".format(s)]
elif format.startswith("rest"):
return format_underline(s, _CHAR, 0) | 0.002481 |
def make_row_dict(row):
"""
Takes in a DataFrame row (Series),
and return a dictionary with the row's index as key,
and the row's values as values.
{col1_name: col1_value, col2_name: col2_value}
"""
ind = row[row.notnull()].index
values = row[row.notnull()].values
# to transformation with extract_col_name here???
return dict(list(zip(ind, values))) | 0.002564 |
def parse_requirements(fname='requirements.txt'):
"""
Parse the package dependencies listed in a requirements file but strips
specific versioning information.
TODO:
perhaps use https://github.com/davidfischer/requirements-parser instead
CommandLine:
python -c "import setup; print(setup.parse_requirements())"
"""
from os.path import dirname, join, exists
import re
require_fpath = join(dirname(__file__), fname)
def parse_line(line):
"""
Parse information from a line in a requirements text file
"""
info = {}
if line.startswith('-e '):
info['package'] = line.split('#egg=')[1]
else:
# Remove versioning from the package
pat = '(' + '|'.join(['>=', '==', '>']) + ')'
parts = re.split(pat, line, maxsplit=1)
parts = [p.strip() for p in parts]
info['package'] = parts[0]
if len(parts) > 1:
op, rest = parts[1:]
if ';' in rest:
# Handle platform specific dependencies
# http://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies
version, platform_deps = map(str.strip, rest.split(';'))
info['platform_deps'] = platform_deps
else:
version = rest # NOQA
info['version'] = (op, version)
return info
# This breaks on pip install, so check that it exists.
if exists(require_fpath):
with open(require_fpath, 'r') as f:
packages = []
for line in f.readlines():
line = line.strip()
if line and not line.startswith('#'):
info = parse_line(line)
package = info['package']
if not sys.version.startswith('3.4'):
# apparently package_deps are broken in 3.4
platform_deps = info.get('platform_deps')
if platform_deps is not None:
package += ';' + platform_deps
packages.append(package)
return packages
return [] | 0.000441 |
def get_single_hdd_only_candidate_model(
data,
minimum_non_zero_hdd,
minimum_total_hdd,
beta_hdd_maximum_p_value,
weights_col,
balance_point,
):
""" Return a single candidate hdd-only model for a particular balance
point.
Parameters
----------
data : :any:`pandas.DataFrame`
A DataFrame containing at least the column ``meter_value`` and
``hdd_<balance_point>``
DataFrames of this form can be made using the
:any:`eemeter.create_caltrack_daily_design_matrix` or
:any:`eemeter.create_caltrack_billing_design_matrix` methods.
minimum_non_zero_hdd : :any:`int`
Minimum allowable number of non-zero heating degree day values.
minimum_total_hdd : :any:`float`
Minimum allowable total sum of heating degree day values.
beta_hdd_maximum_p_value : :any:`float`
The maximum allowable p-value of the beta hdd parameter.
weights_col : :any:`str` or None
The name of the column (if any) in ``data`` to use as weights.
balance_point : :any:`float`
The heating balance point for this model.
Returns
-------
candidate_model : :any:`CalTRACKUsagePerDayCandidateModel`
A single hdd-only candidate model, with any associated warnings.
"""
model_type = "hdd_only"
hdd_column = "hdd_%s" % balance_point
formula = "meter_value ~ %s" % hdd_column
if weights_col is None:
weights = 1
else:
weights = data[weights_col]
period_days = weights
degree_day_warnings = []
degree_day_warnings.extend(
get_total_degree_day_too_low_warning(
model_type,
balance_point,
"hdd",
data[hdd_column],
period_days,
minimum_total_hdd,
)
)
degree_day_warnings.extend(
get_too_few_non_zero_degree_day_warning(
model_type, balance_point, "hdd", data[hdd_column], minimum_non_zero_hdd
)
)
if len(degree_day_warnings) > 0:
return CalTRACKUsagePerDayCandidateModel(
model_type=model_type,
formula=formula,
status="NOT ATTEMPTED",
warnings=degree_day_warnings,
)
try:
model = smf.wls(formula=formula, data=data, weights=weights)
except Exception as e:
return get_fit_failed_candidate_model(model_type, formula)
result = model.fit()
r_squared_adj = result.rsquared_adj
beta_hdd_p_value = result.pvalues[hdd_column]
# CalTrack 3.3.1.3
model_params = {
"intercept": result.params["Intercept"],
"beta_hdd": result.params[hdd_column],
"heating_balance_point": balance_point,
}
model_warnings = []
# CalTrack 3.4.3.2
for parameter in ["intercept", "beta_hdd"]:
model_warnings.extend(
get_parameter_negative_warning(model_type, model_params, parameter)
)
model_warnings.extend(
get_parameter_p_value_too_high_warning(
model_type,
model_params,
parameter,
beta_hdd_p_value,
beta_hdd_maximum_p_value,
)
)
if len(model_warnings) > 0:
status = "DISQUALIFIED"
else:
status = "QUALIFIED"
return CalTRACKUsagePerDayCandidateModel(
model_type=model_type,
formula=formula,
status=status,
warnings=model_warnings,
model_params=model_params,
model=model,
result=result,
r_squared_adj=r_squared_adj,
) | 0.000564 |
def turn_to_angle(self, speed, angle_target_degrees, brake=True, block=True):
"""
Rotate in place to `angle_target_degrees` at `speed`
"""
assert self.odometry_thread_id, "odometry_start() must be called to track robot coordinates"
# Make both target and current angles positive numbers between 0 and 360
if angle_target_degrees < 0:
angle_target_degrees += 360
angle_current_degrees = math.degrees(self.theta)
if angle_current_degrees < 0:
angle_current_degrees += 360
# Is it shorter to rotate to the right or left
# to reach angle_target_degrees?
if angle_current_degrees > angle_target_degrees:
turn_right = True
angle_delta = angle_current_degrees - angle_target_degrees
else:
turn_right = False
angle_delta = angle_target_degrees - angle_current_degrees
if angle_delta > 180:
angle_delta = 360 - angle_delta
turn_right = not turn_right
log.debug("%s: turn_to_angle %s, current angle %s, delta %s, turn_right %s" %
(self, angle_target_degrees, angle_current_degrees, angle_delta, turn_right))
self.odometry_coordinates_log()
if turn_right:
self.turn_right(speed, angle_delta, brake, block)
else:
self.turn_left(speed, angle_delta, brake, block)
self.odometry_coordinates_log() | 0.004778 |
def check_trigger(self, date_tuple, utc_offset=0):
"""
Returns boolean indicating if the trigger is active at the given time.
The date tuple should be in the local time. Unless periodicities are
used, utc_offset does not need to be specified. If periodicities are
used, specifically in the hour and minutes fields, it is crucial that
the utc_offset is specified.
"""
year, month, day, hour, mins = date_tuple
given_date = datetime.date(year, month, day)
zeroday = datetime.date(*self.epoch[:3])
last_dom = calendar.monthrange(year, month)[-1]
dom_matched = True
# In calendar and datetime.date.weekday, Monday = 0
given_dow = (datetime.date.weekday(given_date) + 1) % 7
first_dow = (given_dow + 1 - day) % 7
# Figure out how much time has passed from the epoch to the given date
utc_diff = utc_offset - self.epoch[5]
mod_delta_yrs = year - self.epoch[0]
mod_delta_mon = month - self.epoch[1] + mod_delta_yrs * 12
mod_delta_day = (given_date - zeroday).days
mod_delta_hrs = hour - self.epoch[3] + mod_delta_day * 24 + utc_diff
mod_delta_min = mins - self.epoch[4] + mod_delta_hrs * 60
# Makes iterating through like components easier.
quintuple = zip(
(mins, hour, day, month, given_dow),
self.numerical_tab,
self.string_tab,
(mod_delta_min, mod_delta_hrs, mod_delta_day, mod_delta_mon,
mod_delta_day),
FIELD_RANGES)
for value, valid_values, field_str, delta_t, field_type in quintuple:
# All valid, static values for the fields are stored in sets
if value in valid_values:
continue
# The following for loop implements the logic for context
# sensitive and epoch sensitive constraints. break statements,
# which are executed when a match is found, lead to a continue
# in the outer loop. If there are no matches found, the given date
# does not match expression constraints, so the function returns
# False as seen at the end of this for...else... construct.
for cron_atom in field_str.split(','):
if cron_atom[0] == '%':
if not(delta_t % int(cron_atom[1:])):
break
elif '#' in cron_atom:
D, N = int(cron_atom[0]), int(cron_atom[2])
# Computes Nth occurence of D day of the week
if (((D - first_dow) % 7) + 1 + 7 * (N - 1)) == day:
break
elif cron_atom[-1] == 'W':
target = min(int(cron_atom[:-1]), last_dom)
lands_on = (first_dow + target - 1) % 7
if lands_on == 0:
# Shift from Sun. to Mon. unless Mon. is next month
if target < last_dom:
target += 1
else:
target -= 2
elif lands_on == 6:
# Shift from Sat. to Fri. unless Fri. in prior month
if target > 1:
target -= 1
else:
target += 2
# Break if the day is correct, and target is a weekday
if target == day and (first_dow + target) % 7 > 1:
break
elif cron_atom[-1] == 'L':
# In dom field, L means the last day of the month
target = last_dom
if field_type == DAYS_OF_WEEK:
# Calculates the last occurence of given day of week
desired_dow = int(cron_atom[:-1])
target = (((desired_dow - first_dow) % 7) + 29)
if target > last_dom:
target -= 7
if target == day:
break
else:
# See 2010.11.15 of CHANGELOG
if field_type == DAYS_OF_MONTH and self.string_tab[4] != '*':
dom_matched = False
continue
elif field_type == DAYS_OF_WEEK and self.string_tab[2] != '*':
# If we got here, then days of months validated so it does
# not matter that days of the week failed.
return dom_matched
# None of the expressions matched which means this field fails
return False
# Arriving at this point means the date landed within the constraints
# of all fields; the associated trigger should be fired.
return True | 0.000409 |
def validate_version(err, value, source):
'Tests a manifest version number'
field_name = '<em:version>' if source == 'install.rdf' else 'version'
err.metadata['version'] = value
# May not be longer than 32 characters
if len(value) > 32:
err.error(
('metadata_helpers', '_test_version', 'too_long'),
'The value of {name} is too long'.format(name=field_name),
'Values supplied for {name} in the {source} file must be 32 '
'characters or less.'.format(name=field_name, source=source),
source)
# Must be a valid version number.
if not VERSION_PATTERN.match(value):
err.error(
('metadata_helpers', '_test_version', 'invalid_format'),
'The value of {name} is invalid'.format(name=field_name),
'The values supplied for version in the {source} file is not a '
'valid version string. It can only contain letters, numbers, and '
'the symbols +*.-_.'.format(name=field_name, source=source),
source) | 0.001873 |
def address(self, street, city=None, state=None, zipcode=None, **kwargs):
'''Geocode an address.'''
fields = {
'street': street,
'city': city,
'state': state,
'zip': zipcode,
}
return self._fetch('address', fields, **kwargs) | 0.006557 |
def dvc_walk(
top,
topdown=True,
onerror=None,
followlinks=False,
ignore_file_handler=None,
):
"""
Proxy for `os.walk` directory tree generator.
Utilizes DvcIgnoreFilter functionality.
"""
ignore_filter = None
if topdown:
from dvc.ignore import DvcIgnoreFilter
ignore_filter = DvcIgnoreFilter(
top, ignore_file_handler=ignore_file_handler
)
for root, dirs, files in os.walk(
top, topdown=topdown, onerror=onerror, followlinks=followlinks
):
if ignore_filter:
dirs[:], files[:] = ignore_filter(root, dirs, files)
yield root, dirs, files | 0.001508 |
def _find_observable_paths(extra_files=None):
"""Finds all paths that should be observed."""
rv = set(
os.path.dirname(os.path.abspath(x)) if os.path.isfile(x) else os.path.abspath(x)
for x in sys.path
)
for filename in extra_files or ():
rv.add(os.path.dirname(os.path.abspath(filename)))
for module in list(sys.modules.values()):
fn = getattr(module, "__file__", None)
if fn is None:
continue
fn = os.path.abspath(fn)
rv.add(os.path.dirname(fn))
return _find_common_roots(rv) | 0.003497 |
def CmdVersion(self):
"""Obtain the version of the device and test transport format.
Obtains the version of the device and determines whether to use ISO
7816-4 or the U2f variant. This function should be called at least once
before CmdAuthenticate or CmdRegister to make sure the object is using the
proper transport for the device.
Returns:
The version of the U2F protocol in use.
"""
self.logger.debug('CmdVersion')
response = self.InternalSendApdu(apdu.CommandApdu(
0, apdu.CMD_VERSION, 0x00, 0x00))
if not response.IsSuccess():
raise errors.ApduError(response.sw1, response.sw2)
return response.body | 0.002976 |
def priority(self,priority):
"""Set the priority for all threads in this group.
If setting priority fails on any thread, the priority of all threads
is restored to its previous value.
"""
with self.__lock:
old_priorities = {}
try:
for thread in self.__threads:
old_priorities[thread] = thread.priority
thread.priority = priority
except Exception:
for (thread,old_priority) in old_priorities.iteritems():
try:
thread.priority = old_priority
except Exception:
pass
raise
else:
self.__priority = priority | 0.005115 |
def parse_keys_and_ranges(i_str, keyfunc, rangefunc):
'''Parse the :class:`from_kvlayer` input string.
This accepts two formats. In the textual format, it accepts any
number of stream IDs in timestamp-docid format, separated by ``,``
or ``;``, and processes those as individual stream IDs. In the
binary format, it accepts 20-byte key blobs (16 bytes md5 hash, 4
bytes timestamp) split by ``;`` or ``<``; e.g., ``a<f;x`` loads
scans keys `a` through `f` and loads singly key `x`.
`keyfunc` and `rangefunc` are run as generators and their yields
are yielded from this function.
'''
while i_str:
m = _STREAM_ID_RE.match(i_str)
if m:
# old style text stream_id
for retval in keyfunc(stream_id_to_kvlayer_key(m.group())):
yield retval
i_str = i_str[m.end():]
while i_str and ((i_str[0] == ',') or (i_str[0] == ';')):
i_str = i_str[1:]
continue
if len(i_str) == SI_KEY_LENGTH:
# one key, get it.
key = parse_si_key(i_str)
for retval in keyfunc(key):
yield retval
return
keya = i_str[:SI_KEY_LENGTH]
splitc = i_str[SI_KEY_LENGTH]
if splitc == '<':
# range
keyb = i_str[SI_KEY_LENGTH+1:SI_KEY_LENGTH+1+SI_KEY_LENGTH]
i_str = i_str[SI_KEY_LENGTH+1+SI_KEY_LENGTH:]
keya = parse_si_key(keya)
keyb = parse_si_key(keyb)
for retval in rangefunc(keya, keyb):
yield retval
elif splitc == ';':
# keya is single key to load
keya = parse_si_key(keya)
for retval in keyfunc(keya):
yield retval
i_str = i_str[SI_KEY_LENGTH+1+1:]
else:
logger.error('bogus key splitter %s, %r', splitc, i_str)
return | 0.000519 |
def send_http_request_with_body_parameters(context, method):
"""
Parameters:
+-------------+--------------+
| param_name | param_value |
+=============+==============+
| param1 | value1 |
+-------------+--------------+
| param2 | value2 |
+-------------+--------------+
"""
safe_add_http_request_context_to_behave_context(context)
set_body_parameters(context)
send_http_request(context, method) | 0.002004 |
def _group_by(data, criteria):
"""
Group objects in data using a function or a key
"""
if isinstance(criteria, str):
criteria_str = criteria
def criteria(x):
return x[criteria_str]
res = defaultdict(list)
for element in data:
key = criteria(element)
res[key].append(element)
return res | 0.002755 |
def remove_child_catalog(self, catalog_id, child_id):
"""Removes a child from a catalog.
arg: catalog_id (osid.id.Id): the ``Id`` of a catalog
arg: child_id (osid.id.Id): the ``Id`` of the new child
raise: NotFound - ``catalog_id`` is not a parent of
``child_id``
raise: NullArgument - ``catalog_id`` or ``child_id`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchyDesignSession.remove_child_bin_template
if self._catalog_session is not None:
return self._catalog_session.remove_child_catalog(catalog_id=catalog_id, child_id=child_id)
return self._hierarchy_session.remove_child(id_=catalog_id, child_id=child_id) | 0.004124 |
def annotate_metadata_action(repo):
"""
Update metadata with the action history
"""
package = repo.package
print("Including history of actions")
with cd(repo.rootdir):
filename = ".dgit/log.json"
if os.path.exists(filename):
history = open(filename).readlines()
actions = []
for a in history:
try:
a = json.loads(a)
for x in ['code']:
if x not in a or a[x] == None:
a[x] = "..."
actions.append(a)
except:
pass
package['actions'] = actions | 0.019337 |
def saveAsTextFiles(self, prefix, suffix=None):
"""
Save each RDD in this DStream as at text file, using string
representation of elements.
"""
def saveAsTextFile(t, rdd):
path = rddToFileName(prefix, suffix, t)
try:
rdd.saveAsTextFile(path)
except Py4JJavaError as e:
# after recovered from checkpointing, the foreachRDD may
# be called twice
if 'FileAlreadyExistsException' not in str(e):
raise
return self.foreachRDD(saveAsTextFile) | 0.003317 |
def kill(self, dwExitCode = 0):
"""
Terminates the execution of the process.
@raise WindowsError: On error an exception is raised.
"""
hProcess = self.get_handle(win32.PROCESS_TERMINATE)
win32.TerminateProcess(hProcess, dwExitCode) | 0.014286 |
def get_file_results(self):
"""Print the result and return the overall count for this file."""
self._deferred_print.sort()
for line_number, offset, code, text, doc in self._deferred_print:
print(self._fmt % {
'path': self.filename,
'row': self.line_offset + line_number, 'col': offset + 1,
'code': code, 'text': text,
})
if self._show_source:
if line_number > len(self.lines):
line = ''
else:
line = self.lines[line_number - 1]
print(line.rstrip())
print(re.sub(r'\S', ' ', line[:offset]) + '^')
if self._show_pep8 and doc:
print(' ' + doc.strip())
# stdout is block buffered when not stdout.isatty().
# line can be broken where buffer boundary since other processes
# write to same file.
# flush() after print() to avoid buffer boundary.
# Typical buffer size is 8192. line written safely when
# len(line) < 8192.
sys.stdout.flush()
return self.file_errors | 0.001676 |
def on_item_changed(self, item, new_value, row, column):
"""Event for the item change.
Args:
emitter (TableWidget): The emitter of the event.
item (TableItem): The TableItem instance.
new_value (str): New text content.
row (int): row index.
column (int): column index.
"""
return (item, new_value, row, column) | 0.004963 |
def _process_mgi_note_vocevidence_view(self, limit):
"""
Here we fetch the free text descriptions of the phenotype associations.
Triples:
<annot_id> dc:description "description text"
:param limit:
:return:
"""
line_counter = 0
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
LOG.info("getting free text descriptions for annotations")
raw = '/'.join((self.rawdir, 'mgi_note_vocevidence_view'))
with open(raw, 'r', encoding="utf8") as csvfile:
filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
for line in filereader:
line_counter += 1
if line_counter == 1:
continue
(object_key, note) = line
if self.test_mode is True:
if int(object_key) not in self.test_keys.get('notes'):
continue
# object_key == evidence._annotevidence_key
annotkey = self.idhash['notes'].get(object_key)
annot_id = self.idhash['annot'].get(annotkey)
# only add the description for the annotations
# we have captured through processing
if annot_id is not None:
model.addDescription(annot_id, note.strip())
if not self.test_mode and limit is not None and line_counter > limit:
break
return | 0.001914 |
def check_log_stream_exists(awsclient, log_group_name, log_stream_name):
"""Check
:param log_group_name: log group name
:param log_stream_name: log stream name
:return: True / False
"""
lg = describe_log_group(awsclient, log_group_name)
if lg and lg['logGroupName'] == log_group_name:
stream = describe_log_stream(awsclient, log_group_name, log_stream_name)
if stream and stream['logStreamName'] == log_stream_name:
return True
return False | 0.003992 |
def xyY_to_XYZ(cobj, *args, **kwargs):
"""
Convert from xyY to XYZ.
"""
# avoid division by zero
if cobj.xyy_y == 0.0:
xyz_x = 0.0
xyz_y = 0.0
xyz_z = 0.0
else:
xyz_x = (cobj.xyy_x * cobj.xyy_Y) / cobj.xyy_y
xyz_y = cobj.xyy_Y
xyz_z = ((1.0 - cobj.xyy_x - cobj.xyy_y) * xyz_y) / cobj.xyy_y
return XYZColor(
xyz_x, xyz_y, xyz_z, illuminant=cobj.illuminant, observer=cobj.observer) | 0.00431 |
def get_fresh(self, columns=None):
"""
Execute the query as a fresh "select" statement
:param columns: The columns to get
:type columns: list
:return: The result
:rtype: list
"""
if not columns:
columns = ['*']
if not self.columns:
self.columns = columns
return self._processor.process_select(self, self._run_select()) | 0.004695 |
def construct_blastn_cmdline(
fname1, fname2, outdir, blastn_exe=pyani_config.BLASTN_DEFAULT
):
"""Returns a single blastn command.
- filename - input filename
- blastn_exe - path to BLASTN executable
"""
fstem1 = os.path.splitext(os.path.split(fname1)[-1])[0]
fstem2 = os.path.splitext(os.path.split(fname2)[-1])[0]
fstem1 = fstem1.replace("-fragments", "")
prefix = os.path.join(outdir, "%s_vs_%s" % (fstem1, fstem2))
cmd = (
"{0} -out {1}.blast_tab -query {2} -db {3} "
+ "-xdrop_gap_final 150 -dust no -evalue 1e-15 "
+ "-max_target_seqs 1 -outfmt '6 qseqid sseqid length mismatch "
+ "pident nident qlen slen qstart qend sstart send positive "
+ "ppos gaps' -task blastn"
)
return cmd.format(blastn_exe, prefix, fname1, fname2) | 0.001218 |
def script(self, sql_script, split_algo='sql_split', prep_statements=True, dump_fails=True):
"""Wrapper method providing access to the SQLScript class's methods and properties."""
return Execute(sql_script, split_algo, prep_statements, dump_fails, self) | 0.018587 |
def _open_config_files(self, command_line_args):
"""Tries to parse config file path(s) from within command_line_args.
Returns a list of opened config files, including files specified on the
commandline as well as any default_config_files specified in the
constructor that are present on disk.
Args:
command_line_args: List of all args (already split on spaces)
"""
# open any default config files
config_files = [open(f) for files in map(glob.glob, map(os.path.expanduser, self._default_config_files))
for f in files]
# list actions with is_config_file_arg=True. Its possible there is more
# than one such arg.
user_config_file_arg_actions = [
a for a in self._actions if getattr(a, "is_config_file_arg", False)]
if not user_config_file_arg_actions:
return config_files
for action in user_config_file_arg_actions:
# try to parse out the config file path by using a clean new
# ArgumentParser that only knows this one arg/action.
arg_parser = argparse.ArgumentParser(
prefix_chars=self.prefix_chars,
add_help=False)
arg_parser._add_action(action)
# make parser not exit on error by replacing its error method.
# Otherwise it sys.exits(..) if, for example, config file
# is_required=True and user doesn't provide it.
def error_method(self, message):
pass
arg_parser.error = types.MethodType(error_method, arg_parser)
# check whether the user provided a value
parsed_arg = arg_parser.parse_known_args(args=command_line_args)
if not parsed_arg:
continue
namespace, _ = parsed_arg
user_config_file = getattr(namespace, action.dest, None)
if not user_config_file:
continue
# validate the user-provided config file path
user_config_file = os.path.expanduser(user_config_file)
if not os.path.isfile(user_config_file):
self.error('File not found: %s' % user_config_file)
config_files += [open(user_config_file)]
return config_files | 0.001718 |
def lineReceived(self, line):
"""
A line was received.
"""
if line.startswith(b"#"): # ignore it
return
if line.startswith(b"OK"):
# if no command issued, then just 'ready'
if self._ready:
self._dq.pop(0).callback(self._currentResponse(line))
else:
self._ready = True
if line.startswith(b"D "):
self._bufferedData.append(line[2:].replace(b"%0A", b"\r")
.replace(b"%0D", b"\n")
.replace(b"%25", b"%"))
if line.startswith(b"ERR"):
self._dq.pop(0).errback(AssuanError(line)) | 0.004243 |
def _make_serverproxy_handler(name, command, environment, timeout, absolute_url, port):
"""
Create a SuperviseAndProxyHandler subclass with given parameters
"""
# FIXME: Set 'name' properly
class _Proxy(SuperviseAndProxyHandler):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.name = name
self.proxy_base = name
self.absolute_url = absolute_url
self.requested_port = port
@property
def process_args(self):
return {
'port': self.port,
'base_url': self.base_url,
}
def _render_template(self, value):
args = self.process_args
if type(value) is str:
return value.format(**args)
elif type(value) is list:
return [self._render_template(v) for v in value]
elif type(value) is dict:
return {
self._render_template(k): self._render_template(v)
for k, v in value.items()
}
else:
raise ValueError('Value of unrecognized type {}'.format(type(value)))
def get_cmd(self):
if callable(command):
return self._render_template(call_with_asked_args(command, self.process_args))
else:
return self._render_template(command)
def get_env(self):
if callable(environment):
return self._render_template(call_with_asked_args(environment, self.process_args))
else:
return self._render_template(environment)
def get_timeout(self):
return timeout
return _Proxy | 0.002833 |
def to_seconds(string):
"""
Converts a human readable time string into seconds.
Accepts:
- 's': seconds
- 'm': minutes
- 'h': hours
- 'd': days
Examples:
>>> to_seconds('1m30s')
90
>>> to_seconds('5m')
300
>>> to_seconds('1h')
3600
>>> to_seconds('1h30m')
5400
>>> to_seconds('3d')
259200
>>> to_seconds('42x')
Traceback (most recent call last):
...
ValueError
"""
units = {
's': 1,
'm': 60,
'h': 60 * 60,
'd': 60 * 60 * 24
}
match = re.search(r'(?:(?P<d>\d+)d)?(?:(?P<h>\d+)h)?(?:(?P<m>\d+)m)?(?:(?P<s>\d+)s)?', string)
if not match or not any(match.groups()):
raise ValueError
total = 0
for unit, seconds in units.iteritems():
if match.group(unit) is not None:
total += int(match.group(unit)) * seconds
return total | 0.00221 |
def _ConvertHeaderToId(header):
"""Convert a Content-ID header value to an id.
Presumes the Content-ID header conforms to the format that
_ConvertIdToHeader() returns.
Args:
header: A string indicating the Content-ID header value.
Returns:
The extracted id value.
Raises:
BatchError if the header is not in the expected format.
"""
if not (header.startswith('<') or header.endswith('>')):
raise exceptions.BatchError(
'Invalid value for Content-ID: %s' % header)
if '+' not in header:
raise exceptions.BatchError(
'Invalid value for Content-ID: %s' % header)
_, request_id = header[1:-1].rsplit('+', 1)
return urllib_parse.unquote(request_id) | 0.002436 |
def p_constant_declaration(p):
'constant_declaration : STRING EQUALS static_scalar'
p[0] = ast.ConstantDeclaration(p[1], p[3], lineno=p.lineno(1)) | 0.006494 |
def run_in_pod(self, namespace="default"):
"""
run image inside Kubernetes Pod
:param namespace: str, name of namespace where pod will be created
:return: Pod instance
"""
core_api = get_core_api()
image_data = self.get_metadata()
pod = Pod.create(image_data)
try:
pod_instance = core_api.create_namespaced_pod(namespace=namespace, body=pod)
except ApiException as e:
raise ConuException("Exception when calling CoreV1Api->create_namespaced_pod: %s\n" % e)
logger.info(
"Starting Pod %s in namespace %s" % (pod_instance.metadata.name, namespace))
return Pod(name=pod_instance.metadata.name,
namespace=pod_instance.metadata.namespace,
spec=pod_instance.spec) | 0.006002 |
def relative_path(sub_directory='', function_index=1):
"""
This will return the file relative to this python script
:param subd_irectory: str of the relative path
:param function_index: int of the number of function calls to go back
:return: str of the full path
"""
frm = inspect.currentframe()
for i in range(function_index):
frm = frm.f_back
if frm.f_code.co_name == 'run_code':
frm = frm.f_back
if not isinstance(sub_directory, list):
sub_directory = sub_directory.replace('\\','/').split('/')
path = os.path.split(frm.f_code.co_filename)[0]
if sub_directory:
path = os.path.abspath(os.path.join(path, *sub_directory))
return path | 0.002706 |
def threadsafe_call(self, fn, *args, **kwargs):
"""Wrapper around `AsyncSession.threadsafe_call`."""
def handler():
try:
fn(*args, **kwargs)
except Exception:
warn("error caught while excecuting async callback\n%s\n",
format_exc())
def greenlet_wrapper():
gr = greenlet.greenlet(handler)
gr.switch()
self._async_session.threadsafe_call(greenlet_wrapper) | 0.004098 |
def get_unpacked_response_body(self, requestId, mimetype="application/unknown"):
'''
Return a unpacked, decoded resposne body from Network_getResponseBody()
'''
content = self.Network_getResponseBody(requestId)
assert 'result' in content
result = content['result']
assert 'base64Encoded' in result
assert 'body' in result
if result['base64Encoded']:
content = base64.b64decode(result['body'])
else:
content = result['body']
self.log.info("Navigate complete. Received %s byte response with type %s.", len(content), mimetype)
return {'binary' : result['base64Encoded'], 'mimetype' : mimetype, 'content' : content} | 0.034003 |
def send(tag, data=None):
'''
Send an event with the given tag and data.
This is useful for sending events directly to the master from the shell
with salt-run. It is also quite useful for sending events in orchestration
states where the ``fire_event`` requisite isn't sufficient because it does
not support sending custom data with the event.
Note that event tags will *not* be namespaced like events sent with the
``fire_event`` requisite! Whereas events produced from ``fire_event`` are
prefixed with ``salt/state_result/<jid>/<minion_id>/<name>``, events sent
using this runner module will have no such prefix. Make sure your reactors
don't expect a prefix!
:param tag: the tag to send with the event
:param data: an optional dictionary of data to send with the event
CLI Example:
.. code-block:: bash
salt-run event.send my/custom/event '{"foo": "bar"}'
Orchestration Example:
.. code-block:: yaml
# orch/command.sls
run_a_command:
salt.function:
- name: cmd.run
- tgt: my_minion
- arg:
- exit {{ pillar['exit_code'] }}
send_success_event:
salt.runner:
- name: event.send
- tag: my_event/success
- data:
foo: bar
- require:
- salt: run_a_command
send_failure_event:
salt.runner:
- name: event.send
- tag: my_event/failure
- data:
baz: qux
- onfail:
- salt: run_a_command
.. code-block:: bash
salt-run state.orchestrate orch.command pillar='{"exit_code": 0}'
salt-run state.orchestrate orch.command pillar='{"exit_code": 1}'
'''
data = data or {}
event = salt.utils.event.get_master_event(__opts__, __opts__['sock_dir'],
listen=False)
return event.fire_event(data, tag) | 0.000497 |
def cancel(self, job_ids):
''' Cancels the jobs specified by a list of job ids
Args:
job_ids : [<job_id> ...]
Returns :
[True/False...] : If the cancel operation fails the entire list will be False.
'''
job_id_list = ' '.join(job_ids)
retcode, stdout, stderr = self.channel.execute_wait("qdel {0}".format(job_id_list), 3)
rets = None
if retcode == 0:
for jid in job_ids:
self.resources[jid]['status'] = translate_table['E'] # Setting state to exiting
rets = [True for i in job_ids]
else:
rets = [False for i in job_ids]
return rets | 0.00731 |
def total_memory(self, image='ubuntu'):
'''Get the available ram fo the docker machine in Kb'''
try:
ret = subprocess.check_output(
f'''docker run -t {image} cat /proc/meminfo | grep MemTotal''',
shell=True,
stdin=subprocess.DEVNULL)
# ret: MemTotal: 30208916 kB
self.tot_mem = int(ret.split()[1])
except Exception:
# some system does not have cat or grep
self.tot_mem = None
return self.tot_mem | 0.005535 |
def init_module(remote_credences=None,local_path=None):
"""Connnexion informations : remote_credences for remote acces OR local_path for local access"""
if remote_credences is not None:
RemoteConnexion.HOST = remote_credences["DB"]["host"]
RemoteConnexion.USER = remote_credences["DB"]["user"]
RemoteConnexion.PASSWORD = remote_credences["DB"]["password"]
RemoteConnexion.NAME = remote_credences["DB"]["name"]
MonoExecutant.ConnectionClass = RemoteConnexion
Executant.ConnectionClass = RemoteConnexion
abstractRequetesSQL.setup_marks("psycopg2")
elif local_path is not None:
LocalConnexion.PATH = local_path
MonoExecutant.ConnectionClass = LocalConnexion
Executant.ConnectionClass = LocalConnexion
abstractRequetesSQL.setup_marks("sqlite3")
else:
raise ValueError("Sql module should be init with one of remote or local mode !")
logging.info(f"Sql module initialized with {MonoExecutant.ConnectionClass.__name__}") | 0.004859 |
def save_model(self, request, obj, form, change):
"""
sends the email and does not save it
"""
email = message.EmailMessage(
subject=obj.subject,
body=obj.body,
from_email=obj.from_email,
to=[t.strip() for t in obj.to_emails.split(',')],
bcc=[t.strip() for t in obj.bcc_emails.split(',')],
cc=[t.strip() for t in obj.cc_emails.split(',')]
)
email.send() | 0.004228 |
def create_embedded_class(self, method):
"""
Build the estimator class.
Returns
-------
:return : string
The built class as string.
"""
temp_class = self.temp('embedded.class')
return temp_class.format(class_name=self.class_name,
method_name=self.method_name, method=method,
n_features=self.n_features) | 0.004494 |
def _get_zeropoint(expnum, ccd, prefix=None, version='p'):
"""
Retrieve the zeropoint stored in the tags associated with this image.
@param expnum: Exposure number
@param ccd: ccd of the exposure
@param prefix: possible prefix (such as 'fk')
@param version: which version: p, s, or o ?
@return: zeropoint
"""
if prefix is not None:
DeprecationWarning("Prefix is no longer used here as the 'fk' and 's' have the same zeropoint.")
key = "zeropoint_{:1s}{:02d}".format(version, int(ccd))
return get_tag(expnum, key) | 0.00354 |
def safe_call(self, kwargs, args=None):
"""
Call the underlying function safely, given a set of keyword
arguments. If successful, the function return value (likely
None) will be returned. If the underlying function raises an
exception, the return value will be the exception message,
unless an argparse Namespace object defining a 'debug'
attribute of True is provided; in this case, the exception
will be re-raised.
:param kwargs: A dictionary of keyword arguments to pass to
the underlying function.
:param args: If provided, this should be a Namespace object
with a 'debug' attribute set to a boolean value.
:returns: The function return value, or the string value of
the exception raised by the function.
"""
# Now let's call the function
try:
return self._func(**kwargs)
except Exception as exc:
if args and getattr(args, 'debug', False):
raise
return str(exc) | 0.001812 |
def get_device_info(self, bigip):
'''Get device information about a specific BigIP device.
:param bigip: bigip object --- device to inspect
:returns: bigip object
'''
coll = bigip.tm.cm.devices.get_collection()
device = [device for device in coll if device.selfDevice == 'true']
assert len(device) == 1
return device[0] | 0.005195 |
def _getTransformation(self):
"""_getTransformation(self) -> PyObject *"""
CheckParent(self)
val = _fitz.Page__getTransformation(self)
val = Matrix(val)
return val | 0.009756 |
def answers(self, other):
"""In CCP, the payload of a DTO packet is dependent on the cmd field
of a corresponding CRO packet. Two packets correspond, if there
ctr field is equal. If answers detect the corresponding CRO, it will
interpret the payload of a DTO with the correct class. In CCP, there is
no other way, to determine the class of a DTO payload. Since answers is
called on sr and sr1, this modification of the original answers
implementation will give a better user experience. """
if not hasattr(other, "ctr"):
return 0
if self.ctr != other.ctr:
return 0
if not hasattr(other, "cmd"):
return 0
new_pl_cls = self.get_dto_cls(other.cmd)
if self.payload_cls != new_pl_cls and \
self.payload_cls == DEFAULT_DTO:
data = bytes(self.load)
self.remove_payload()
self.add_payload(new_pl_cls(data))
self.payload_cls = new_pl_cls
return 1 | 0.001919 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.