code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def is_square_matrix(mat):
"""Test if an array is a square matrix."""
mat = np.array(mat)
if mat.ndim != 2:
return False
shape = mat.shape
return shape[0] == shape[1]
|
Test if an array is a square matrix.
|
def _login(session):
"""Login to UPS."""
resp = session.get(LOGIN_URL, params=_get_params(session.auth.locale))
parsed = BeautifulSoup(resp.text, HTML_PARSER)
csrf = parsed.find(CSRF_FIND_TAG, CSRF_FIND_ATTR).get(VALUE_ATTR)
resp = session.post(LOGIN_URL, {
'userID': session.auth.username,
'password': session.auth.password,
'loginAction': 'X',
'CSRFToken': csrf,
'loc': session.auth.locale
})
if resp.status_code == 403:
raise UPSError('login failure')
parsed = BeautifulSoup(resp.text, HTML_PARSER)
error = parsed.find(ERROR_FIND_TAG, ERROR_FIND_ATTR)
if error and error.string:
raise UPSError(error.string.strip())
_save_cookies(session.cookies, session.auth.cookie_path)
|
Login to UPS.
|
def view_sbo(self):
"""View slackbuild.org
"""
sbo_url = self.sbo_url.replace("/slackbuilds/", "/repository/")
br1, br2, fix_sp = "", "", " "
if self.meta.use_colors in ["off", "OFF"]:
br1 = "("
br2 = ")"
fix_sp = ""
print("") # new line at start
self.msg.template(78)
print("| {0}{1}SlackBuilds Repository{2}".format(" " * 28, self.grey,
self.endc))
self.msg.template(78)
print("| {0} > {1} > {2}{3}{4}".format(slack_ver(),
sbo_url.split("/")[-3].title(),
self.cyan, self.name, self.endc))
self.msg.template(78)
print("| {0}Package url{1}: {2}".format(self.green, self.endc, sbo_url))
self.msg.template(78)
print("| {0}Description: {1}{2}".format(self.green,
self.endc, self.sbo_desc))
print("| {0}SlackBuild: {1}{2}".format(self.green, self.endc,
self.sbo_dwn.split("/")[-1]))
print("| {0}Sources: {1}{2}".format(
self.green, self.endc,
(", ".join([src.split("/")[-1] for src in self.source_dwn]))))
print("| {0}Requirements: {1}{2}".format(self.yellow,
self.endc,
", ".join(self.sbo_req)))
self.msg.template(78)
print("| {0}R{1}{2}EADME View the README file".format(
self.red, self.endc, br2))
print("| {0}S{1}{2}lackBuild View the .SlackBuild "
"file".format(self.red, self.endc, br2))
print("| In{0}{1}f{2}{3}o{4} View the .info "
"file".format(br1, self.red, self.endc, br2, fix_sp))
if "doinst.sh" in self.sbo_files.split():
print("| D{0}{1}o{2}{3}inst.sh{4} View the doinst.sh "
"file".format(br1, self.red, self.endc, br2, fix_sp))
print("| {0}D{1}{2}ownload Download this package".format(
self.red, self.endc, br2))
print("| {0}B{1}{2}uild Download and build".format(
self.red, self.endc, br2))
print("| {0}I{1}{2}nstall Download/Build/Install".format(
self.red, self.endc, br2))
print("| {0}C{1}{2}lear Clear screen".format(self.red,
self.endc,
br2))
print("| {0}Q{1}{2}uit Quit".format(self.red,
self.endc, br2))
self.msg.template(78)
|
View slackbuild.org
|
def match(self, path):
"""Return route handler with arguments if path matches this route.
Arguments:
path (str): Request path
Returns:
tuple or None: A tuple of three items:
1. Route handler (callable)
2. Positional arguments (list)
3. Keyword arguments (dict)
``None`` if the route does not match the path.
"""
match = self._re.search(path)
if match is None:
return None
kwargs_indexes = match.re.groupindex.values()
args_indexes = [i for i in range(1, match.re.groups + 1)
if i not in kwargs_indexes]
args = [match.group(i) for i in args_indexes]
kwargs = {}
for name, index in match.re.groupindex.items():
kwargs[name] = match.group(index)
return self._callback, args, kwargs
|
Return route handler with arguments if path matches this route.
Arguments:
path (str): Request path
Returns:
tuple or None: A tuple of three items:
1. Route handler (callable)
2. Positional arguments (list)
3. Keyword arguments (dict)
``None`` if the route does not match the path.
|
def del_application(self, application, sync=True):
"""
delete application from this team
:param application: the application to be deleted from this team
:param sync: If sync=True(default) synchronize with Ariane server. If sync=False,
add the application object on list to be removed on next save().
:return:
"""
LOGGER.debug("Team.del_application")
if not sync:
self.app_2_rm.append(application)
else:
if application.id is None:
application.sync()
if self.id is not None and application.id is not None:
params = {
'id': self.id,
'applicationID': application.id
}
args = {'http_operation': 'GET', 'operation_path': 'update/applications/delete', 'parameters': params}
response = TeamService.requester.call(args)
if response.rc != 0:
LOGGER.warning(
'Team.del_application - Problem while updating team ' + self.name +
'. Reason: ' + str(response.response_content) + '-' + str(response.error_message) +
" (" + str(response.rc) + ")"
)
else:
self.app_ids.remove(application.id)
application.sync()
else:
LOGGER.warning(
'Team.del_application - Problem while updating team ' + self.name + '. Reason: application ' +
application.name + ' id is None or self.id is None'
)
|
delete application from this team
:param application: the application to be deleted from this team
:param sync: If sync=True(default) synchronize with Ariane server. If sync=False,
add the application object on list to be removed on next save().
:return:
|
def dataframe(self):
"""
Returns a pandas DataFrame containing all other class properties and
values. The index for the DataFrame is the string abbreviation of the
team, such as 'PURDUE'.
"""
fields_to_include = {
'abbreviation': self.abbreviation,
'assist_percentage': self.assist_percentage,
'assists': self.assists,
'away_losses': self.away_losses,
'away_wins': self.away_wins,
'block_percentage': self.block_percentage,
'blocks': self.blocks,
'conference': self.conference,
'conference_losses': self.conference_losses,
'conference_wins': self.conference_wins,
'defensive_rebounds': self.defensive_rebounds,
'effective_field_goal_percentage':
self.effective_field_goal_percentage,
'field_goal_attempts': self.field_goal_attempts,
'field_goal_percentage': self.field_goal_percentage,
'field_goals': self.field_goals,
'free_throw_attempt_rate': self.free_throw_attempt_rate,
'free_throw_attempts': self.free_throw_attempts,
'free_throw_percentage': self.free_throw_percentage,
'free_throws': self.free_throws,
'free_throws_per_field_goal_attempt':
self.free_throws_per_field_goal_attempt,
'games_played': self.games_played,
'home_losses': self.home_losses,
'home_wins': self.home_wins,
'losses': self.losses,
'minutes_played': self.minutes_played,
'name': self.name,
'net_rating': self.net_rating,
'offensive_rating': self.offensive_rating,
'offensive_rebound_percentage': self.offensive_rebound_percentage,
'offensive_rebounds': self.offensive_rebounds,
'opp_assist_percentage': self.opp_assist_percentage,
'opp_assists': self.opp_assists,
'opp_block_percentage': self.opp_block_percentage,
'opp_blocks': self.opp_blocks,
'opp_defensive_rebounds': self.opp_defensive_rebounds,
'opp_effective_field_goal_percentage':
self.opp_effective_field_goal_percentage,
'opp_field_goal_attempts': self.opp_field_goal_attempts,
'opp_field_goal_percentage': self.opp_field_goal_percentage,
'opp_field_goals': self.opp_field_goals,
'opp_free_throw_attempt_rate': self.opp_free_throw_attempt_rate,
'opp_free_throw_attempts': self.opp_free_throw_attempts,
'opp_free_throw_percentage': self.opp_free_throw_percentage,
'opp_free_throws': self.opp_free_throws,
'opp_free_throws_per_field_goal_attempt':
self.opp_free_throws_per_field_goal_attempt,
'opp_offensive_rating': self.opp_offensive_rating,
'opp_offensive_rebound_percentage':
self.opp_offensive_rebound_percentage,
'opp_offensive_rebounds': self.opp_offensive_rebounds,
'opp_personal_fouls': self.opp_personal_fouls,
'opp_points': self.opp_points,
'opp_steal_percentage': self.opp_steal_percentage,
'opp_steals': self.opp_steals,
'opp_three_point_attempt_rate': self.opp_three_point_attempt_rate,
'opp_three_point_field_goal_attempts':
self.opp_three_point_field_goal_attempts,
'opp_three_point_field_goal_percentage':
self.opp_three_point_field_goal_percentage,
'opp_three_point_field_goals': self.opp_three_point_field_goals,
'opp_two_point_field_goal_attempts':
self.opp_two_point_field_goal_attempts,
'opp_two_point_field_goal_percentage':
self.opp_two_point_field_goal_percentage,
'opp_two_point_field_goals': self.opp_two_point_field_goals,
'opp_total_rebound_percentage': self.opp_total_rebound_percentage,
'opp_total_rebounds': self.opp_total_rebounds,
'opp_true_shooting_percentage': self.opp_true_shooting_percentage,
'opp_turnover_percentage': self.opp_turnover_percentage,
'opp_turnovers': self.opp_turnovers,
'pace': self.pace,
'personal_fouls': self.personal_fouls,
'points': self.points,
'simple_rating_system': self.simple_rating_system,
'steal_percentage': self.steal_percentage,
'steals': self.steals,
'strength_of_schedule': self.strength_of_schedule,
'three_point_attempt_rate': self.three_point_attempt_rate,
'three_point_field_goal_attempts':
self.three_point_field_goal_attempts,
'three_point_field_goal_percentage':
self.three_point_field_goal_percentage,
'three_point_field_goals': self.three_point_field_goals,
'two_point_field_goal_attempts':
self.two_point_field_goal_attempts,
'two_point_field_goal_percentage':
self.two_point_field_goal_percentage,
'two_point_field_goals': self.two_point_field_goals,
'total_rebound_percentage': self.total_rebound_percentage,
'total_rebounds': self.total_rebounds,
'true_shooting_percentage': self.true_shooting_percentage,
'turnover_percentage': self.turnover_percentage,
'turnovers': self.turnovers,
'win_percentage': self.win_percentage,
'wins': self.wins
}
return pd.DataFrame([fields_to_include], index=[self._abbreviation])
|
Returns a pandas DataFrame containing all other class properties and
values. The index for the DataFrame is the string abbreviation of the
team, such as 'PURDUE'.
|
def DictReader(file_obj, columns=None): # pylint: disable=invalid-name
"""
Reader for a parquet file object.
This function is a generator returning an OrderedDict for each row
of data in the parquet file. Nested values will be flattend into the
top-level dict and can be referenced with '.' notation (e.g. 'foo' -> 'bar'
is referenced as 'foo.bar')
:param file_obj: the file containing parquet data
:param columns: the columns to include. If None (default), all columns
are included. Nested values are referenced with "." notation
"""
footer = _read_footer(file_obj)
keys = columns if columns else [s.name for s in
footer.schema if s.type]
for row in reader(file_obj, columns):
yield OrderedDict(zip(keys, row))
|
Reader for a parquet file object.
This function is a generator returning an OrderedDict for each row
of data in the parquet file. Nested values will be flattend into the
top-level dict and can be referenced with '.' notation (e.g. 'foo' -> 'bar'
is referenced as 'foo.bar')
:param file_obj: the file containing parquet data
:param columns: the columns to include. If None (default), all columns
are included. Nested values are referenced with "." notation
|
def restore_event(self, requestId):
"""restore an event based on the requestId.
For example if the user app had to shutdown with pending requests.
The user can rebuild the Events they were waiting for based on the requestId(s).
"""
with self.__requests:
if requestId not in self.__requests:
self.__requests[requestId] = RequestEvent(requestId)
return True
return False
|
restore an event based on the requestId.
For example if the user app had to shutdown with pending requests.
The user can rebuild the Events they were waiting for based on the requestId(s).
|
def on_persist_completed(self, block):
"""
Called when a block has been persisted to disk. Used as a hook to persist notification data.
Args:
block (neo.Core.Block): the currently persisting block
"""
if len(self._events_to_write):
addr_db = self.db.prefixed_db(NotificationPrefix.PREFIX_ADDR)
block_db = self.db.prefixed_db(NotificationPrefix.PREFIX_BLOCK)
contract_db = self.db.prefixed_db(NotificationPrefix.PREFIX_CONTRACT)
block_write_batch = block_db.write_batch()
contract_write_batch = contract_db.write_batch()
block_count = 0
block_bytes = self._events_to_write[0].block_number.to_bytes(4, 'little')
for evt in self._events_to_write: # type:NotifyEvent
# write the event for both or one of the addresses involved in the transfer
write_both = True
hash_data = evt.ToByteArray()
bytes_to = bytes(evt.addr_to.Data)
bytes_from = bytes(evt.addr_from.Data)
if bytes_to == bytes_from:
write_both = False
total_bytes_to = addr_db.get(bytes_to + NotificationPrefix.PREFIX_COUNT)
total_bytes_from = addr_db.get(bytes_from + NotificationPrefix.PREFIX_COUNT)
if not total_bytes_to:
total_bytes_to = b'\x00'
if not total_bytes_from:
total_bytes_from = b'x\00'
addr_to_key = bytes_to + total_bytes_to
addr_from_key = bytes_from + total_bytes_from
with addr_db.write_batch() as b:
b.put(addr_to_key, hash_data)
if write_both:
b.put(addr_from_key, hash_data)
total_bytes_to = int.from_bytes(total_bytes_to, 'little') + 1
total_bytes_from = int.from_bytes(total_bytes_from, 'little') + 1
new_bytes_to = total_bytes_to.to_bytes(4, 'little')
new_bytes_from = total_bytes_from.to_bytes(4, 'little')
b.put(bytes_to + NotificationPrefix.PREFIX_COUNT, new_bytes_to)
if write_both:
b.put(bytes_from + NotificationPrefix.PREFIX_COUNT, new_bytes_from)
# write the event to the per-block database
per_block_key = block_bytes + block_count.to_bytes(4, 'little')
block_write_batch.put(per_block_key, hash_data)
block_count += 1
# write the event to the per-contract database
contract_bytes = bytes(evt.contract_hash.Data)
count_for_contract = contract_db.get(contract_bytes + NotificationPrefix.PREFIX_COUNT)
if not count_for_contract:
count_for_contract = b'\x00'
contract_event_key = contract_bytes + count_for_contract
contract_count_int = int.from_bytes(count_for_contract, 'little') + 1
new_contract_count = contract_count_int.to_bytes(4, 'little')
contract_write_batch.put(contract_bytes + NotificationPrefix.PREFIX_COUNT, new_contract_count)
contract_write_batch.put(contract_event_key, hash_data)
# finish off the per-block write batch and contract write batch
block_write_batch.write()
contract_write_batch.write()
self._events_to_write = []
if len(self._new_contracts_to_write):
token_db = self.db.prefixed_db(NotificationPrefix.PREFIX_TOKEN)
token_write_batch = token_db.write_batch()
for token_event in self._new_contracts_to_write:
try:
hash_data = token_event.ToByteArray() # used to fail here
hash_key = token_event.contract.Code.ScriptHash().ToBytes()
token_write_batch.put(hash_key, hash_data)
except Exception as e:
logger.debug(f"Failed to write new contract, reason: {e}")
token_write_batch.write()
self._new_contracts_to_write = []
|
Called when a block has been persisted to disk. Used as a hook to persist notification data.
Args:
block (neo.Core.Block): the currently persisting block
|
def create_json_archive(self):
"""create_json_archive"""
archive_data = {"packets": self.recv_msgs,
"dataset": self.dataset_name,
"num_packets": len(self.recv_msgs),
"created": rnow()}
self.write_to_file(archive_data,
self.archive_file)
|
create_json_archive
|
def smart_convert(original, colorkey, pixelalpha):
"""
this method does several tests on a surface to determine the optimal
flags and pixel format for each tile surface.
this is done for the best rendering speeds and removes the need to
convert() the images on your own
"""
tile_size = original.get_size()
threshold = 127 # the default
try:
# count the number of pixels in the tile that are not transparent
px = pygame.mask.from_surface(original, threshold).count()
except:
# pygame_sdl2 will fail because the mask module is not included
# in this case, just convert_alpha and return it
return original.convert_alpha()
# there are no transparent pixels in the image
if px == tile_size[0] * tile_size[1]:
tile = original.convert()
# there are transparent pixels, and tiled set a colorkey
elif colorkey:
tile = original.convert()
tile.set_colorkey(colorkey, pygame.RLEACCEL)
# there are transparent pixels, and set for perpixel alpha
elif pixelalpha:
tile = original.convert_alpha()
# there are transparent pixels, and we won't handle them
else:
tile = original.convert()
return tile
|
this method does several tests on a surface to determine the optimal
flags and pixel format for each tile surface.
this is done for the best rendering speeds and removes the need to
convert() the images on your own
|
def run(self, executable: Executable,
memory_map: Dict[str, List[Union[int, float]]] = None) -> np.ndarray:
"""
Run a quil executable. If the executable contains declared parameters, then a memory
map must be provided, which defines the runtime values of these parameters.
:param executable: The program to run. You are responsible for compiling this first.
:param memory_map: The mapping of declared parameters to their values. The values
are a list of floats or integers.
:return: A numpy array of shape (trials, len(ro-register)) that contains 0s and 1s.
"""
self.qam.load(executable)
if memory_map:
for region_name, values_list in memory_map.items():
for offset, value in enumerate(values_list):
# TODO gh-658: have write_memory take a list rather than value + offset
self.qam.write_memory(region_name=region_name, offset=offset, value=value)
return self.qam.run() \
.wait() \
.read_memory(region_name='ro')
|
Run a quil executable. If the executable contains declared parameters, then a memory
map must be provided, which defines the runtime values of these parameters.
:param executable: The program to run. You are responsible for compiling this first.
:param memory_map: The mapping of declared parameters to their values. The values
are a list of floats or integers.
:return: A numpy array of shape (trials, len(ro-register)) that contains 0s and 1s.
|
def edit( cls, record, parent = None, uifile = '', commit = True ):
"""
Prompts the user to edit the inputed record.
:param record | <orb.Table>
parent | <QWidget>
:return <bool> | accepted
"""
# create the dialog
dlg = QDialog(parent)
dlg.setWindowTitle('Edit %s' % record.schema().name())
# create the widget
cls = record.schema().property('widgetClass', cls)
widget = cls(dlg)
if ( uifile ):
widget.setUiFile(uifile)
widget.setRecord(record)
widget.layout().setContentsMargins(0, 0, 0, 0)
# create buttons
opts = QDialogButtonBox.Save | QDialogButtonBox.Cancel
btns = QDialogButtonBox(opts, Qt.Horizontal, dlg)
# create layout
layout = QVBoxLayout()
layout.addWidget(widget)
layout.addWidget(btns)
dlg.setLayout(layout)
dlg.adjustSize()
# create connections
#btns.accepted.connect(widget.save)
btns.rejected.connect(dlg.reject)
widget.saved.connect(dlg.accept)
if ( dlg.exec_() ):
if commit:
result = widget.record().commit()
if 'errored' in result:
QMessageBox.information(self.window(),
'Error Committing to Database',
result['errored'])
return False
return True
return False
|
Prompts the user to edit the inputed record.
:param record | <orb.Table>
parent | <QWidget>
:return <bool> | accepted
|
def __set_rouge_dir(self, home_dir=None):
"""
Verfify presence of ROUGE-1.5.5.pl and data folder, and set
those paths.
"""
if not home_dir:
self._home_dir = self.__get_rouge_home_dir_from_settings()
else:
self._home_dir = home_dir
self.save_home_dir()
self._bin_path = os.path.join(self._home_dir, 'ROUGE-1.5.5.pl')
self.data_dir = os.path.join(self._home_dir, 'data')
if not os.path.exists(self._bin_path):
raise Exception(
"ROUGE binary not found at {}. Please set the "
"correct path by running pyrouge_set_rouge_path "
"/path/to/rouge/home.".format(self._bin_path))
|
Verfify presence of ROUGE-1.5.5.pl and data folder, and set
those paths.
|
def thumbnail(self, size):
'''Get the thumbnail filename for a given size'''
if size in self.thumbnail_sizes:
return self.thumbnails.get(str(size))
else:
raise ValueError('Unregistered thumbnail size {0}'.format(size))
|
Get the thumbnail filename for a given size
|
def getContactItems(self, person):
"""
Return all L{EmailAddress} instances associated with the given person.
@type person: L{Person}
"""
return person.store.query(
EmailAddress,
EmailAddress.person == person)
|
Return all L{EmailAddress} instances associated with the given person.
@type person: L{Person}
|
def setColor(self, typeID, color):
"""setColor(string, (integer, integer, integer, integer)) -> None
Sets the color of this type.
"""
self._connection._beginMessage(
tc.CMD_SET_VEHICLETYPE_VARIABLE, tc.VAR_COLOR, typeID, 1 + 1 + 1 + 1 + 1)
self._connection._string += struct.pack("!BBBBB", tc.TYPE_COLOR, int(
color[0]), int(color[1]), int(color[2]), int(color[3]))
self._connection._sendExact()
|
setColor(string, (integer, integer, integer, integer)) -> None
Sets the color of this type.
|
def create_db_user(username, password=None, flags=None):
"""Create a databse user."""
flags = flags or u'-D -A -R'
sudo(u'createuser %s %s' % (flags, username), user=u'postgres')
if password:
change_db_user_password(username, password)
|
Create a databse user.
|
def search(
self, query, accept_language=None, pragma=None, user_agent=None, client_id=None, client_ip=None, location=None, answer_count=None, country_code=None, count=None, freshness=None, market="en-us", offset=None, promote=None, response_filter=None, safe_search=None, set_lang=None, text_decorations=None, text_format=None, custom_headers=None, raw=False, **operation_config):
"""The Web Search API lets you send a search query to Bing and get back
search results that include links to webpages, images, and more.
:param query: The user's search query term. The term may not be empty.
The term may contain Bing Advanced Operators. For example, to limit
results to a specific domain, use the site: operator.
:type query: str
:param accept_language: A comma-delimited list of one or more
languages to use for user interface strings. The list is in decreasing
order of preference. For additional information, including expected
format, see
[RFC2616](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html).
This header and the setLang query parameter are mutually exclusive; do
not specify both. If you set this header, you must also specify the cc
query parameter. Bing will use the first supported language it finds
from the list, and combine that language with the cc parameter value
to determine the market to return results for. If the list does not
include a supported language, Bing will find the closest language and
market that supports the request, and may use an aggregated or default
market for the results instead of a specified one. You should use this
header and the cc query parameter only if you specify multiple
languages; otherwise, you should use the mkt and setLang query
parameters. A user interface string is a string that's used as a label
in a user interface. There are very few user interface strings in the
JSON response objects. Any links in the response objects to Bing.com
properties will apply the specified language.
:type accept_language: str
:param pragma: By default, Bing returns cached content, if available.
To prevent Bing from returning cached content, set the Pragma header
to no-cache (for example, Pragma: no-cache).
:type pragma: str
:param user_agent: The user agent originating the request. Bing uses
the user agent to provide mobile users with an optimized experience.
Although optional, you are strongly encouraged to always specify this
header. The user-agent should be the same string that any commonly
used browser would send. For information about user agents, see [RFC
2616](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html).
:type user_agent: str
:param client_id: Bing uses this header to provide users with
consistent behavior across Bing API calls. Bing often flights new
features and improvements, and it uses the client ID as a key for
assigning traffic on different flights. If you do not use the same
client ID for a user across multiple requests, then Bing may assign
the user to multiple conflicting flights. Being assigned to multiple
conflicting flights can lead to an inconsistent user experience. For
example, if the second request has a different flight assignment than
the first, the experience may be unexpected. Also, Bing can use the
client ID to tailor web results to that client ID’s search history,
providing a richer experience for the user. Bing also uses this header
to help improve result rankings by analyzing the activity generated by
a client ID. The relevance improvements help with better quality of
results delivered by Bing APIs and in turn enables higher
click-through rates for the API consumer. IMPORTANT: Although
optional, you should consider this header required. Persisting the
client ID across multiple requests for the same end user and device
combination enables 1) the API consumer to receive a consistent user
experience, and 2) higher click-through rates via better quality of
results from the Bing APIs. Each user that uses your application on
the device must have a unique, Bing generated client ID. If you do not
include this header in the request, Bing generates an ID and returns
it in the X-MSEdge-ClientID response header. The only time that you
should NOT include this header in a request is the first time the user
uses your app on that device. Use the client ID for each Bing API
request that your app makes for this user on the device. Persist the
client ID. To persist the ID in a browser app, use a persistent HTTP
cookie to ensure the ID is used across all sessions. Do not use a
session cookie. For other apps such as mobile apps, use the device's
persistent storage to persist the ID. The next time the user uses your
app on that device, get the client ID that you persisted. Bing
responses may or may not include this header. If the response includes
this header, capture the client ID and use it for all subsequent Bing
requests for the user on that device. If you include the
X-MSEdge-ClientID, you must not include cookies in the request.
:type client_id: str
:param client_ip: The IPv4 or IPv6 address of the client device. The
IP address is used to discover the user's location. Bing uses the
location information to determine safe search behavior. Although
optional, you are encouraged to always specify this header and the
X-Search-Location header. Do not obfuscate the address (for example,
by changing the last octet to 0). Obfuscating the address results in
the location not being anywhere near the device's actual location,
which may result in Bing serving erroneous results.
:type client_ip: str
:param location: A semicolon-delimited list of key/value pairs that
describe the client's geographical location. Bing uses the location
information to determine safe search behavior and to return relevant
local content. Specify the key/value pair as <key>:<value>. The
following are the keys that you use to specify the user's location.
lat (required): The latitude of the client's location, in degrees. The
latitude must be greater than or equal to -90.0 and less than or equal
to +90.0. Negative values indicate southern latitudes and positive
values indicate northern latitudes. long (required): The longitude of
the client's location, in degrees. The longitude must be greater than
or equal to -180.0 and less than or equal to +180.0. Negative values
indicate western longitudes and positive values indicate eastern
longitudes. re (required): The radius, in meters, which specifies the
horizontal accuracy of the coordinates. Pass the value returned by the
device's location service. Typical values might be 22m for GPS/Wi-Fi,
380m for cell tower triangulation, and 18,000m for reverse IP lookup.
ts (optional): The UTC UNIX timestamp of when the client was at the
location. (The UNIX timestamp is the number of seconds since January
1, 1970.) head (optional): The client's relative heading or direction
of travel. Specify the direction of travel as degrees from 0 through
360, counting clockwise relative to true north. Specify this key only
if the sp key is nonzero. sp (optional): The horizontal velocity
(speed), in meters per second, that the client device is traveling.
alt (optional): The altitude of the client device, in meters. are
(optional): The radius, in meters, that specifies the vertical
accuracy of the coordinates. Specify this key only if you specify the
alt key. Although many of the keys are optional, the more information
that you provide, the more accurate the location results are. Although
optional, you are encouraged to always specify the user's geographical
location. Providing the location is especially important if the
client's IP address does not accurately reflect the user's physical
location (for example, if the client uses VPN). For optimal results,
you should include this header and the X-MSEdge-ClientIP header, but
at a minimum, you should include this header.
:type location: str
:param answer_count: The number of answers that you want the response
to include. The answers that Bing returns are based on ranking. For
example, if Bing returns webpages, images, videos, and relatedSearches
for a request and you set this parameter to two (2), the response
includes webpages and images.If you included the responseFilter query
parameter in the same request and set it to webpages and news, the
response would include only webpages.
:type answer_count: int
:param country_code: A 2-character country code of the country where
the results come from. This API supports only the United States
market. If you specify this query parameter, it must be set to us. If
you set this parameter, you must also specify the Accept-Language
header. Bing uses the first supported language it finds from the
languages list, and combine that language with the country code that
you specify to determine the market to return results for. If the
languages list does not include a supported language, Bing finds the
closest language and market that supports the request, or it may use
an aggregated or default market for the results instead of a specified
one. You should use this query parameter and the Accept-Language query
parameter only if you specify multiple languages; otherwise, you
should use the mkt and setLang query parameters. This parameter and
the mkt query parameter are mutually exclusive—do not specify both.
:type country_code: str
:param count: The number of search results to return in the response.
The default is 10 and the maximum value is 50. The actual number
delivered may be less than requested.Use this parameter along with the
offset parameter to page results.For example, if your user interface
displays 10 search results per page, set count to 10 and offset to 0
to get the first page of results. For each subsequent page, increment
offset by 10 (for example, 0, 10, 20). It is possible for multiple
pages to include some overlap in results.
:type count: int
:param freshness: Filter search results by the following age values:
Day—Return webpages that Bing discovered within the last 24 hours.
Week—Return webpages that Bing discovered within the last 7 days.
Month—Return webpages that discovered within the last 30 days. This
filter applies only to webpage results and not to the other results
such as news and images. Possible values include: 'Day', 'Week',
'Month'
:type freshness: str or
~azure.cognitiveservices.search.websearch.models.Freshness
:param market: The market where the results come from. Typically, mkt
is the country where the user is making the request from. However, it
could be a different country if the user is not located in a country
where Bing delivers results. The market must be in the form <language
code>-<country code>. For example, en-US. The string is case
insensitive. If known, you are encouraged to always specify the
market. Specifying the market helps Bing route the request and return
an appropriate and optimal response. If you specify a market that is
not listed in Market Codes, Bing uses a best fit market code based on
an internal mapping that is subject to change. This parameter and the
cc query parameter are mutually exclusive—do not specify both.
:type market: str
:param offset: The zero-based offset that indicates the number of
search results to skip before returning results. The default is 0. The
offset should be less than (totalEstimatedMatches - count). Use this
parameter along with the count parameter to page results. For example,
if your user interface displays 10 search results per page, set count
to 10 and offset to 0 to get the first page of results. For each
subsequent page, increment offset by 10 (for example, 0, 10, 20). it
is possible for multiple pages to include some overlap in results.
:type offset: int
:param promote: A comma-delimited list of answers that you want the
response to include regardless of their ranking. For example, if you
set answerCount) to two (2) so Bing returns the top two ranked
answers, but you also want the response to include news, you'd set
promote to news. If the top ranked answers are webpages, images,
videos, and relatedSearches, the response includes webpages and images
because news is not a ranked answer. But if you set promote to video,
Bing would promote the video answer into the response and return
webpages, images, and videos. The answers that you want to promote do
not count against the answerCount limit. For example, if the ranked
answers are news, images, and videos, and you set answerCount to 1 and
promote to news, the response contains news and images. Or, if the
ranked answers are videos, images, and news, the response contains
videos and news. Possible values are Computation, Images, News,
RelatedSearches, SpellSuggestions, TimeZone, Videos, Webpages. Use
only if you specify answerCount.
:type promote: list[str or
~azure.cognitiveservices.search.websearch.models.AnswerType]
:param response_filter: A comma-delimited list of answers to include
in the response. If you do not specify this parameter, the response
includes all search answers for which there's relevant data. Possible
filter values are Computation, Images, News, RelatedSearches,
SpellSuggestions, TimeZone, Videos, Webpages. Although you may use
this filter to get a single answer, you should instead use the
answer-specific endpoint in order to get richer results. For example,
to receive only images, send the request to one of the Image Search
API endpoints. The RelatedSearches and SpellSuggestions answers do not
support a separate endpoint like the Image Search API does (only the
Web Search API returns them). To include answers that would otherwise
be excluded because of ranking, see the promote query parameter.
:type response_filter: list[str or
~azure.cognitiveservices.search.websearch.models.AnswerType]
:param safe_search: A filter used to filter adult content. Off: Return
webpages with adult text, images, or videos. Moderate: Return webpages
with adult text, but not adult images or videos. Strict: Do not return
webpages with adult text, images, or videos. The default is Moderate.
If the request comes from a market that Bing's adult policy requires
that safeSearch is set to Strict, Bing ignores the safeSearch value
and uses Strict. If you use the site: query operator, there is the
chance that the response may contain adult content regardless of what
the safeSearch query parameter is set to. Use site: only if you are
aware of the content on the site and your scenario supports the
possibility of adult content. Possible values include: 'Off',
'Moderate', 'Strict'
:type safe_search: str or
~azure.cognitiveservices.search.websearch.models.SafeSearch
:param set_lang: The language to use for user interface strings.
Specify the language using the ISO 639-1 2-letter language code. For
example, the language code for English is EN. The default is EN
(English). Although optional, you should always specify the language.
Typically, you set setLang to the same language specified by mkt
unless the user wants the user interface strings displayed in a
different language. This parameter and the Accept-Language header are
mutually exclusive; do not specify both. A user interface string is a
string that's used as a label in a user interface. There are few user
interface strings in the JSON response objects. Also, any links to
Bing.com properties in the response objects apply the specified
language.
:type set_lang: str
:param text_decorations: A Boolean value that determines whether
display strings should contain decoration markers such as hit
highlighting characters. If true, the strings may include markers. The
default is false. To specify whether to use Unicode characters or HTML
tags as the markers, see the textFormat query parameter.
:type text_decorations: bool
:param text_format: The type of markers to use for text decorations
(see the textDecorations query parameter). Possible values are Raw—Use
Unicode characters to mark content that needs special formatting. The
Unicode characters are in the range E000 through E019. For example,
Bing uses E000 and E001 to mark the beginning and end of query terms
for hit highlighting. HTML—Use HTML tags to mark content that needs
special formatting. For example, use <b> tags to highlight query terms
in display strings. The default is Raw. For display strings that
contain escapable HTML characters such as <, >, and &, if textFormat
is set to HTML, Bing escapes the characters as appropriate (for
example, < is escaped to <). Possible values include: 'Raw', 'Html'
:type text_format: str or
~azure.cognitiveservices.search.websearch.models.TextFormat
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: SearchResponse or ClientRawResponse if raw=true
:rtype:
~azure.cognitiveservices.search.websearch.models.SearchResponse or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.cognitiveservices.search.websearch.models.ErrorResponseException>`
"""
# Construct URL
url = self.search.metadata['url']
# Construct parameters
query_parameters = {}
if answer_count is not None:
query_parameters['answerCount'] = self._serialize.query("answer_count", answer_count, 'int')
if country_code is not None:
query_parameters['cc'] = self._serialize.query("country_code", country_code, 'str')
if count is not None:
query_parameters['count'] = self._serialize.query("count", count, 'int')
if freshness is not None:
query_parameters['freshness'] = self._serialize.query("freshness", freshness, 'str')
if market is not None:
query_parameters['mkt'] = self._serialize.query("market", market, 'str')
if offset is not None:
query_parameters['offset'] = self._serialize.query("offset", offset, 'int')
if promote is not None:
query_parameters['promote'] = self._serialize.query("promote", promote, '[str]', div=',')
query_parameters['q'] = self._serialize.query("query", query, 'str')
if response_filter is not None:
query_parameters['responseFilter'] = self._serialize.query("response_filter", response_filter, '[str]', div=',')
if safe_search is not None:
query_parameters['safeSearch'] = self._serialize.query("safe_search", safe_search, 'str')
if set_lang is not None:
query_parameters['setLang'] = self._serialize.query("set_lang", set_lang, 'str')
if text_decorations is not None:
query_parameters['textDecorations'] = self._serialize.query("text_decorations", text_decorations, 'bool')
if text_format is not None:
query_parameters['textFormat'] = self._serialize.query("text_format", text_format, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
header_parameters['X-BingApis-SDK'] = self._serialize.header("self.x_bing_apis_sdk", self.x_bing_apis_sdk, 'str')
if accept_language is not None:
header_parameters['Accept-Language'] = self._serialize.header("accept_language", accept_language, 'str')
if pragma is not None:
header_parameters['Pragma'] = self._serialize.header("pragma", pragma, 'str')
if user_agent is not None:
header_parameters['User-Agent'] = self._serialize.header("user_agent", user_agent, 'str')
if client_id is not None:
header_parameters['X-MSEdge-ClientID'] = self._serialize.header("client_id", client_id, 'str')
if client_ip is not None:
header_parameters['X-MSEdge-ClientIP'] = self._serialize.header("client_ip", client_ip, 'str')
if location is not None:
header_parameters['X-Search-Location'] = self._serialize.header("location", location, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('SearchResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
|
The Web Search API lets you send a search query to Bing and get back
search results that include links to webpages, images, and more.
:param query: The user's search query term. The term may not be empty.
The term may contain Bing Advanced Operators. For example, to limit
results to a specific domain, use the site: operator.
:type query: str
:param accept_language: A comma-delimited list of one or more
languages to use for user interface strings. The list is in decreasing
order of preference. For additional information, including expected
format, see
[RFC2616](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html).
This header and the setLang query parameter are mutually exclusive; do
not specify both. If you set this header, you must also specify the cc
query parameter. Bing will use the first supported language it finds
from the list, and combine that language with the cc parameter value
to determine the market to return results for. If the list does not
include a supported language, Bing will find the closest language and
market that supports the request, and may use an aggregated or default
market for the results instead of a specified one. You should use this
header and the cc query parameter only if you specify multiple
languages; otherwise, you should use the mkt and setLang query
parameters. A user interface string is a string that's used as a label
in a user interface. There are very few user interface strings in the
JSON response objects. Any links in the response objects to Bing.com
properties will apply the specified language.
:type accept_language: str
:param pragma: By default, Bing returns cached content, if available.
To prevent Bing from returning cached content, set the Pragma header
to no-cache (for example, Pragma: no-cache).
:type pragma: str
:param user_agent: The user agent originating the request. Bing uses
the user agent to provide mobile users with an optimized experience.
Although optional, you are strongly encouraged to always specify this
header. The user-agent should be the same string that any commonly
used browser would send. For information about user agents, see [RFC
2616](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html).
:type user_agent: str
:param client_id: Bing uses this header to provide users with
consistent behavior across Bing API calls. Bing often flights new
features and improvements, and it uses the client ID as a key for
assigning traffic on different flights. If you do not use the same
client ID for a user across multiple requests, then Bing may assign
the user to multiple conflicting flights. Being assigned to multiple
conflicting flights can lead to an inconsistent user experience. For
example, if the second request has a different flight assignment than
the first, the experience may be unexpected. Also, Bing can use the
client ID to tailor web results to that client ID’s search history,
providing a richer experience for the user. Bing also uses this header
to help improve result rankings by analyzing the activity generated by
a client ID. The relevance improvements help with better quality of
results delivered by Bing APIs and in turn enables higher
click-through rates for the API consumer. IMPORTANT: Although
optional, you should consider this header required. Persisting the
client ID across multiple requests for the same end user and device
combination enables 1) the API consumer to receive a consistent user
experience, and 2) higher click-through rates via better quality of
results from the Bing APIs. Each user that uses your application on
the device must have a unique, Bing generated client ID. If you do not
include this header in the request, Bing generates an ID and returns
it in the X-MSEdge-ClientID response header. The only time that you
should NOT include this header in a request is the first time the user
uses your app on that device. Use the client ID for each Bing API
request that your app makes for this user on the device. Persist the
client ID. To persist the ID in a browser app, use a persistent HTTP
cookie to ensure the ID is used across all sessions. Do not use a
session cookie. For other apps such as mobile apps, use the device's
persistent storage to persist the ID. The next time the user uses your
app on that device, get the client ID that you persisted. Bing
responses may or may not include this header. If the response includes
this header, capture the client ID and use it for all subsequent Bing
requests for the user on that device. If you include the
X-MSEdge-ClientID, you must not include cookies in the request.
:type client_id: str
:param client_ip: The IPv4 or IPv6 address of the client device. The
IP address is used to discover the user's location. Bing uses the
location information to determine safe search behavior. Although
optional, you are encouraged to always specify this header and the
X-Search-Location header. Do not obfuscate the address (for example,
by changing the last octet to 0). Obfuscating the address results in
the location not being anywhere near the device's actual location,
which may result in Bing serving erroneous results.
:type client_ip: str
:param location: A semicolon-delimited list of key/value pairs that
describe the client's geographical location. Bing uses the location
information to determine safe search behavior and to return relevant
local content. Specify the key/value pair as <key>:<value>. The
following are the keys that you use to specify the user's location.
lat (required): The latitude of the client's location, in degrees. The
latitude must be greater than or equal to -90.0 and less than or equal
to +90.0. Negative values indicate southern latitudes and positive
values indicate northern latitudes. long (required): The longitude of
the client's location, in degrees. The longitude must be greater than
or equal to -180.0 and less than or equal to +180.0. Negative values
indicate western longitudes and positive values indicate eastern
longitudes. re (required): The radius, in meters, which specifies the
horizontal accuracy of the coordinates. Pass the value returned by the
device's location service. Typical values might be 22m for GPS/Wi-Fi,
380m for cell tower triangulation, and 18,000m for reverse IP lookup.
ts (optional): The UTC UNIX timestamp of when the client was at the
location. (The UNIX timestamp is the number of seconds since January
1, 1970.) head (optional): The client's relative heading or direction
of travel. Specify the direction of travel as degrees from 0 through
360, counting clockwise relative to true north. Specify this key only
if the sp key is nonzero. sp (optional): The horizontal velocity
(speed), in meters per second, that the client device is traveling.
alt (optional): The altitude of the client device, in meters. are
(optional): The radius, in meters, that specifies the vertical
accuracy of the coordinates. Specify this key only if you specify the
alt key. Although many of the keys are optional, the more information
that you provide, the more accurate the location results are. Although
optional, you are encouraged to always specify the user's geographical
location. Providing the location is especially important if the
client's IP address does not accurately reflect the user's physical
location (for example, if the client uses VPN). For optimal results,
you should include this header and the X-MSEdge-ClientIP header, but
at a minimum, you should include this header.
:type location: str
:param answer_count: The number of answers that you want the response
to include. The answers that Bing returns are based on ranking. For
example, if Bing returns webpages, images, videos, and relatedSearches
for a request and you set this parameter to two (2), the response
includes webpages and images.If you included the responseFilter query
parameter in the same request and set it to webpages and news, the
response would include only webpages.
:type answer_count: int
:param country_code: A 2-character country code of the country where
the results come from. This API supports only the United States
market. If you specify this query parameter, it must be set to us. If
you set this parameter, you must also specify the Accept-Language
header. Bing uses the first supported language it finds from the
languages list, and combine that language with the country code that
you specify to determine the market to return results for. If the
languages list does not include a supported language, Bing finds the
closest language and market that supports the request, or it may use
an aggregated or default market for the results instead of a specified
one. You should use this query parameter and the Accept-Language query
parameter only if you specify multiple languages; otherwise, you
should use the mkt and setLang query parameters. This parameter and
the mkt query parameter are mutually exclusive—do not specify both.
:type country_code: str
:param count: The number of search results to return in the response.
The default is 10 and the maximum value is 50. The actual number
delivered may be less than requested.Use this parameter along with the
offset parameter to page results.For example, if your user interface
displays 10 search results per page, set count to 10 and offset to 0
to get the first page of results. For each subsequent page, increment
offset by 10 (for example, 0, 10, 20). It is possible for multiple
pages to include some overlap in results.
:type count: int
:param freshness: Filter search results by the following age values:
Day—Return webpages that Bing discovered within the last 24 hours.
Week—Return webpages that Bing discovered within the last 7 days.
Month—Return webpages that discovered within the last 30 days. This
filter applies only to webpage results and not to the other results
such as news and images. Possible values include: 'Day', 'Week',
'Month'
:type freshness: str or
~azure.cognitiveservices.search.websearch.models.Freshness
:param market: The market where the results come from. Typically, mkt
is the country where the user is making the request from. However, it
could be a different country if the user is not located in a country
where Bing delivers results. The market must be in the form <language
code>-<country code>. For example, en-US. The string is case
insensitive. If known, you are encouraged to always specify the
market. Specifying the market helps Bing route the request and return
an appropriate and optimal response. If you specify a market that is
not listed in Market Codes, Bing uses a best fit market code based on
an internal mapping that is subject to change. This parameter and the
cc query parameter are mutually exclusive—do not specify both.
:type market: str
:param offset: The zero-based offset that indicates the number of
search results to skip before returning results. The default is 0. The
offset should be less than (totalEstimatedMatches - count). Use this
parameter along with the count parameter to page results. For example,
if your user interface displays 10 search results per page, set count
to 10 and offset to 0 to get the first page of results. For each
subsequent page, increment offset by 10 (for example, 0, 10, 20). it
is possible for multiple pages to include some overlap in results.
:type offset: int
:param promote: A comma-delimited list of answers that you want the
response to include regardless of their ranking. For example, if you
set answerCount) to two (2) so Bing returns the top two ranked
answers, but you also want the response to include news, you'd set
promote to news. If the top ranked answers are webpages, images,
videos, and relatedSearches, the response includes webpages and images
because news is not a ranked answer. But if you set promote to video,
Bing would promote the video answer into the response and return
webpages, images, and videos. The answers that you want to promote do
not count against the answerCount limit. For example, if the ranked
answers are news, images, and videos, and you set answerCount to 1 and
promote to news, the response contains news and images. Or, if the
ranked answers are videos, images, and news, the response contains
videos and news. Possible values are Computation, Images, News,
RelatedSearches, SpellSuggestions, TimeZone, Videos, Webpages. Use
only if you specify answerCount.
:type promote: list[str or
~azure.cognitiveservices.search.websearch.models.AnswerType]
:param response_filter: A comma-delimited list of answers to include
in the response. If you do not specify this parameter, the response
includes all search answers for which there's relevant data. Possible
filter values are Computation, Images, News, RelatedSearches,
SpellSuggestions, TimeZone, Videos, Webpages. Although you may use
this filter to get a single answer, you should instead use the
answer-specific endpoint in order to get richer results. For example,
to receive only images, send the request to one of the Image Search
API endpoints. The RelatedSearches and SpellSuggestions answers do not
support a separate endpoint like the Image Search API does (only the
Web Search API returns them). To include answers that would otherwise
be excluded because of ranking, see the promote query parameter.
:type response_filter: list[str or
~azure.cognitiveservices.search.websearch.models.AnswerType]
:param safe_search: A filter used to filter adult content. Off: Return
webpages with adult text, images, or videos. Moderate: Return webpages
with adult text, but not adult images or videos. Strict: Do not return
webpages with adult text, images, or videos. The default is Moderate.
If the request comes from a market that Bing's adult policy requires
that safeSearch is set to Strict, Bing ignores the safeSearch value
and uses Strict. If you use the site: query operator, there is the
chance that the response may contain adult content regardless of what
the safeSearch query parameter is set to. Use site: only if you are
aware of the content on the site and your scenario supports the
possibility of adult content. Possible values include: 'Off',
'Moderate', 'Strict'
:type safe_search: str or
~azure.cognitiveservices.search.websearch.models.SafeSearch
:param set_lang: The language to use for user interface strings.
Specify the language using the ISO 639-1 2-letter language code. For
example, the language code for English is EN. The default is EN
(English). Although optional, you should always specify the language.
Typically, you set setLang to the same language specified by mkt
unless the user wants the user interface strings displayed in a
different language. This parameter and the Accept-Language header are
mutually exclusive; do not specify both. A user interface string is a
string that's used as a label in a user interface. There are few user
interface strings in the JSON response objects. Also, any links to
Bing.com properties in the response objects apply the specified
language.
:type set_lang: str
:param text_decorations: A Boolean value that determines whether
display strings should contain decoration markers such as hit
highlighting characters. If true, the strings may include markers. The
default is false. To specify whether to use Unicode characters or HTML
tags as the markers, see the textFormat query parameter.
:type text_decorations: bool
:param text_format: The type of markers to use for text decorations
(see the textDecorations query parameter). Possible values are Raw—Use
Unicode characters to mark content that needs special formatting. The
Unicode characters are in the range E000 through E019. For example,
Bing uses E000 and E001 to mark the beginning and end of query terms
for hit highlighting. HTML—Use HTML tags to mark content that needs
special formatting. For example, use <b> tags to highlight query terms
in display strings. The default is Raw. For display strings that
contain escapable HTML characters such as <, >, and &, if textFormat
is set to HTML, Bing escapes the characters as appropriate (for
example, < is escaped to <). Possible values include: 'Raw', 'Html'
:type text_format: str or
~azure.cognitiveservices.search.websearch.models.TextFormat
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: SearchResponse or ClientRawResponse if raw=true
:rtype:
~azure.cognitiveservices.search.websearch.models.SearchResponse or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.cognitiveservices.search.websearch.models.ErrorResponseException>`
|
def _encrypt_message(self, msg, nonce, timestamp=None):
"""将公众号回复用户的消息加密打包
:param msg: 待回复用户的消息,xml格式的字符串
:param nonce: 随机串,可以自己生成,也可以用URL参数的nonce
:param timestamp: 时间戳,可以自己生成,也可以用URL参数的timestamp,如为None则自动用当前时间
:return: 加密后的可以直接回复用户的密文,包括msg_signature, timestamp, nonce, encrypt的xml格式的字符串
"""
xml = """<xml>
<Encrypt><![CDATA[{encrypt}]]></Encrypt>
<MsgSignature><![CDATA[{signature}]]></MsgSignature>
<TimeStamp>{timestamp}</TimeStamp>
<Nonce><![CDATA[{nonce}]]></Nonce>
</xml>"""
nonce = to_binary(nonce)
timestamp = to_binary(timestamp) or to_binary(int(time.time()))
encrypt = self.__pc.encrypt(to_text(msg), self.__id)
# 生成安全签名
signature = get_sha1_signature(self.__token, timestamp, nonce, encrypt)
return to_text(xml.format(
encrypt=to_text(encrypt),
signature=to_text(signature),
timestamp=to_text(timestamp),
nonce=to_text(nonce)
))
|
将公众号回复用户的消息加密打包
:param msg: 待回复用户的消息,xml格式的字符串
:param nonce: 随机串,可以自己生成,也可以用URL参数的nonce
:param timestamp: 时间戳,可以自己生成,也可以用URL参数的timestamp,如为None则自动用当前时间
:return: 加密后的可以直接回复用户的密文,包括msg_signature, timestamp, nonce, encrypt的xml格式的字符串
|
def getBurstingColumnsStats(self):
"""
Gets statistics on the Temporal Memory's bursting columns. Used as a metric
of Temporal Memory's learning performance.
:return: mean, standard deviation, and max of Temporal Memory's bursting
columns over time
"""
traceData = self.tm.mmGetTraceUnpredictedActiveColumns().data
resetData = self.tm.mmGetTraceResets().data
countTrace = []
for x in xrange(len(traceData)):
if not resetData[x]:
countTrace.append(len(traceData[x]))
mean = numpy.mean(countTrace)
stdDev = numpy.std(countTrace)
maximum = max(countTrace)
return mean, stdDev, maximum
|
Gets statistics on the Temporal Memory's bursting columns. Used as a metric
of Temporal Memory's learning performance.
:return: mean, standard deviation, and max of Temporal Memory's bursting
columns over time
|
def _method_response_handler(self, response: Dict[str, Any]):
"""处理200~399段状态码,为对应的响应设置结果.
Parameters:
(response): - 响应的python字典形式数据
Return:
(bool): - 准确地说没有错误就会返回True
"""
code = response.get("CODE")
if code in (200, 300):
self._result_handler(response)
else:
asyncio.ensure_future(self._gen_result_handler(response))
|
处理200~399段状态码,为对应的响应设置结果.
Parameters:
(response): - 响应的python字典形式数据
Return:
(bool): - 准确地说没有错误就会返回True
|
def _brute_force_install_pip(self):
"""A brute force install of pip itself."""
if os.path.exists(self.pip_installer_fname):
logger.debug("Using pip installer from %r", self.pip_installer_fname)
else:
logger.debug(
"Installer for pip not found in %r, downloading it", self.pip_installer_fname)
self._download_pip_installer()
logger.debug("Installing PIP manually in the virtualenv")
python_exe = os.path.join(self.env_bin_path, "python")
helpers.logged_exec([python_exe, self.pip_installer_fname, '-I'])
self.pip_installed = True
|
A brute force install of pip itself.
|
def find_one(self, cls, id):
"""Required functionality."""
one = self._find(cls, {"_id": id})
if not one:
return None
return one[0]
|
Required functionality.
|
def from_packed(cls, packed):
"""Unpack diploid genotypes that have been bit-packed into single
bytes.
Parameters
----------
packed : ndarray, uint8, shape (n_variants, n_samples)
Bit-packed diploid genotype array.
Returns
-------
g : GenotypeArray, shape (n_variants, n_samples, 2)
Genotype array.
Examples
--------
>>> import allel
>>> import numpy as np
>>> packed = np.array([[0, 1],
... [2, 17],
... [34, 239]], dtype='u1')
>>> allel.GenotypeArray.from_packed(packed)
<GenotypeArray shape=(3, 2, 2) dtype=int8>
0/0 0/1
0/2 1/1
2/2 ./.
"""
# check arguments
packed = np.asarray(packed)
check_ndim(packed, 2)
check_dtype(packed, 'u1')
packed = memoryview_safe(packed)
data = genotype_array_unpack_diploid(packed)
return cls(data)
|
Unpack diploid genotypes that have been bit-packed into single
bytes.
Parameters
----------
packed : ndarray, uint8, shape (n_variants, n_samples)
Bit-packed diploid genotype array.
Returns
-------
g : GenotypeArray, shape (n_variants, n_samples, 2)
Genotype array.
Examples
--------
>>> import allel
>>> import numpy as np
>>> packed = np.array([[0, 1],
... [2, 17],
... [34, 239]], dtype='u1')
>>> allel.GenotypeArray.from_packed(packed)
<GenotypeArray shape=(3, 2, 2) dtype=int8>
0/0 0/1
0/2 1/1
2/2 ./.
|
def radio_status_encode(self, rssi, remrssi, txbuf, noise, remnoise, rxerrors, fixed):
'''
Status generated by radio and injected into MAVLink stream.
rssi : Local signal strength (uint8_t)
remrssi : Remote signal strength (uint8_t)
txbuf : Remaining free buffer space in percent. (uint8_t)
noise : Background noise level (uint8_t)
remnoise : Remote background noise level (uint8_t)
rxerrors : Receive errors (uint16_t)
fixed : Count of error corrected packets (uint16_t)
'''
return MAVLink_radio_status_message(rssi, remrssi, txbuf, noise, remnoise, rxerrors, fixed)
|
Status generated by radio and injected into MAVLink stream.
rssi : Local signal strength (uint8_t)
remrssi : Remote signal strength (uint8_t)
txbuf : Remaining free buffer space in percent. (uint8_t)
noise : Background noise level (uint8_t)
remnoise : Remote background noise level (uint8_t)
rxerrors : Receive errors (uint16_t)
fixed : Count of error corrected packets (uint16_t)
|
def wait_until_visible(self, timeout=None):
"""Search element and wait until it is visible
:param timeout: max time to wait
:returns: page element instance
"""
try:
self.utils.wait_until_element_visible(self, timeout)
except TimeoutException as exception:
parent_msg = " and parent locator '{}'".format(self.parent) if self.parent else ''
msg = "Page element of type '%s' with locator %s%s not found or is not visible after %s seconds"
timeout = timeout if timeout else self.utils.get_explicitly_wait()
self.logger.error(msg, type(self).__name__, self.locator, parent_msg, timeout)
exception.msg += "\n {}".format(msg % (type(self).__name__, self.locator, parent_msg, timeout))
raise exception
return self
|
Search element and wait until it is visible
:param timeout: max time to wait
:returns: page element instance
|
def process_rdfgraph(self, rg, ont=None):
"""
Transform a skos terminology expressed in an rdf graph into an Ontology object
Arguments
---------
rg: rdflib.Graph
graph object
Returns
-------
Ontology
"""
# TODO: ontology metadata
if ont is None:
ont = Ontology()
subjs = list(rg.subjects(RDF.type, SKOS.ConceptScheme))
if len(subjs) == 0:
logging.warning("No ConceptScheme")
else:
ont.id = self._uri2id(subjs[0])
subset_map = {}
for concept in rg.subjects(RDF.type, SKOS.Concept):
for s in self._get_schemes(rg, concept):
subset_map[self._uri2id(s)] = s
for concept in sorted(list(rg.subjects(RDF.type, SKOS.Concept))):
concept_uri = str(concept)
id=self._uri2id(concept)
logging.info("ADDING: {}".format(id))
ont.add_node(id, self._get_label(rg,concept))
for defn in rg.objects(concept, SKOS.definition):
if (defn.language == self.lang):
td = TextDefinition(id, escape_value(defn.value))
ont.add_text_definition(td)
for s in rg.objects(concept, SKOS.broader):
ont.add_parent(id, self._uri2id(s))
for s in rg.objects(concept, SKOS.related):
ont.add_parent(id, self._uri2id(s), self._uri2id(SKOS.related))
for m in rg.objects(concept, SKOS.exactMatch):
ont.add_xref(id, self._uri2id(m))
for m in rg.objects(concept, SKOS.altLabel):
syn = Synonym(id, val=self._uri2id(m))
ont.add_synonym(syn)
for s in self._get_schemes(rg,concept):
ont.add_to_subset(id, self._uri2id(s))
return ont
|
Transform a skos terminology expressed in an rdf graph into an Ontology object
Arguments
---------
rg: rdflib.Graph
graph object
Returns
-------
Ontology
|
def plot_macadam(
ellipse_scaling=10,
plot_filter_positions=False,
plot_standard_deviations=False,
plot_rgb_triangle=True,
plot_mesh=True,
n=1,
xy_to_2d=lambda xy: xy,
axes_labels=("x", "y"),
):
"""See <https://en.wikipedia.org/wiki/MacAdam_ellipse>,
<https://doi.org/10.1364%2FJOSA.32.000247>.
"""
dir_path = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(dir_path, "data/macadam1942/table3.yaml")) as f:
data = yaml.safe_load(f)
# if plot_filter_positions:
# with open(os.path.join(dir_path, 'data/macadam1942/table1.yaml')) as f:
# filters_xyz = yaml.safe_load(f)
# filters_xyz = {
# key: 100 * numpy.array(value) for key, value in filters_xyz.items()
# }
# for key, xyz in filters_xyz.items():
# x, y = xyz100_to_2d(xyz)
# plt.plot(x, y, 'xk')
# ax.annotate(key, (x, y))
# collect the ellipse centers and offsets
centers = []
offsets = []
for datak in data:
# collect ellipse points
_, _, _, _, delta_y_delta_x, delta_s = numpy.array(datak["data"]).T
offset = (
numpy.array([numpy.ones(delta_y_delta_x.shape[0]), delta_y_delta_x])
/ numpy.sqrt(1 + delta_y_delta_x ** 2)
* delta_s
)
if offset.shape[1] < 2:
continue
centers.append([datak["x"], datak["y"]])
offsets.append(numpy.column_stack([+offset, -offset]))
centers = numpy.array(centers)
_plot_ellipse_data(
centers,
offsets,
ellipse_scaling=ellipse_scaling,
xy_to_2d=xy_to_2d,
plot_mesh=plot_mesh,
n=n,
plot_rgb_triangle=plot_rgb_triangle,
)
return
|
See <https://en.wikipedia.org/wiki/MacAdam_ellipse>,
<https://doi.org/10.1364%2FJOSA.32.000247>.
|
def _handle_tag_definetext2(self):
"""Handle the DefineText2 tag."""
obj = _make_object("DefineText2")
self._generic_definetext_parser(obj, self._get_struct_rgba)
return obj
|
Handle the DefineText2 tag.
|
def exit_config_mode(self, exit_config="return", pattern=r">"):
"""Exit configuration mode."""
return super(HuaweiBase, self).exit_config_mode(
exit_config=exit_config, pattern=pattern
)
|
Exit configuration mode.
|
def classmethod(self, encoding):
"""Function decorator for class methods."""
# Add encodings for hidden self and cmd arguments.
encoding = ensure_bytes(encoding)
typecodes = parse_type_encoding(encoding)
typecodes.insert(1, b'@:')
encoding = b''.join(typecodes)
def decorator(f):
def objc_class_method(objc_cls, objc_cmd, *args):
py_cls = ObjCClass(objc_cls)
py_cls.objc_cmd = objc_cmd
args = convert_method_arguments(encoding, args)
result = f(py_cls, *args)
if isinstance(result, ObjCClass):
result = result.ptr.value
elif isinstance(result, ObjCInstance):
result = result.ptr.value
return result
name = f.__name__.replace('_', ':')
self.add_class_method(objc_class_method, name, encoding)
return objc_class_method
return decorator
|
Function decorator for class methods.
|
def column_signs_(self):
"""
Return a numpy array with expected signs of features.
Values are
* +1 when all known terms which map to the column have positive sign;
* -1 when all known terms which map to the column have negative sign;
* ``nan`` when there are both positive and negative known terms
for this column, or when there is no known term which maps to this
column.
"""
if self._always_positive():
return np.ones(self.n_features)
self.unhasher.recalculate_attributes()
return self.unhasher.column_signs_
|
Return a numpy array with expected signs of features.
Values are
* +1 when all known terms which map to the column have positive sign;
* -1 when all known terms which map to the column have negative sign;
* ``nan`` when there are both positive and negative known terms
for this column, or when there is no known term which maps to this
column.
|
def get_distance_metres(aLocation1, aLocation2):
"""
Returns the ground distance in metres between two LocationGlobal objects.
This method is an approximation, and will not be accurate over large distances and close to the
earth's poles. It comes from the ArduPilot test code:
https://github.com/diydrones/ardupilot/blob/master/Tools/autotest/common.py
"""
dlat = aLocation2.lat - aLocation1.lat
dlong = aLocation2.lon - aLocation1.lon
return math.sqrt((dlat*dlat) + (dlong*dlong)) * 1.113195e5
|
Returns the ground distance in metres between two LocationGlobal objects.
This method is an approximation, and will not be accurate over large distances and close to the
earth's poles. It comes from the ArduPilot test code:
https://github.com/diydrones/ardupilot/blob/master/Tools/autotest/common.py
|
def download(self):
"""Downloads the data associated with this instance
Return:
mp3_directory (os.path): The directory into which the associated mp3's were downloaded
"""
mp3_directory = self._pre_download()
self.data.swifter.apply(func=lambda arg: self._download(*arg, mp3_directory), axis=1, raw=True)
return mp3_directory
|
Downloads the data associated with this instance
Return:
mp3_directory (os.path): The directory into which the associated mp3's were downloaded
|
def download_task(url, headers, destination, download_type='layer'):
'''download an image layer (.tar.gz) to a specified download folder.
This task is done by using local versions of the same download functions
that are used for the client.
core stream/download functions of the parent client.
Parameters
==========
image_id: the shasum id of the layer, already determined to not exist
repo_name: the image name (library/ubuntu) to retrieve
download_folder: download to this folder. If not set, uses temp.
'''
# Update the user what we are doing
bot.verbose("Downloading %s from %s" % (download_type, url))
# Step 1: Download the layer atomically
file_name = "%s.%s" % (destination,
next(tempfile._get_candidate_names()))
tar_download = download(url, file_name, headers=headers)
try:
shutil.move(tar_download, destination)
except Exception:
msg = "Cannot untar layer %s," % tar_download
msg += " was there a problem with download?"
bot.error(msg)
sys.exit(1)
return destination
|
download an image layer (.tar.gz) to a specified download folder.
This task is done by using local versions of the same download functions
that are used for the client.
core stream/download functions of the parent client.
Parameters
==========
image_id: the shasum id of the layer, already determined to not exist
repo_name: the image name (library/ubuntu) to retrieve
download_folder: download to this folder. If not set, uses temp.
|
def function(self, x, y, sigma0, Rs, center_x=0, center_y=0):
"""
lensing potential
:param x:
:param y:
:param sigma0: sigma0/sigma_crit
:param a:
:param s:
:param center_x:
:param center_y:
:return:
"""
x_ = x - center_x
y_ = y - center_y
r = np.sqrt(x_**2 + y_**2)
if isinstance(r, int) or isinstance(r, float):
r = max(self._s, r)
else:
r[r < self._s] = self._s
X = r / Rs
f_ = sigma0 * Rs ** 2 * (np.log(X ** 2 / 4.) + 2 * self._F(X))
return f_
|
lensing potential
:param x:
:param y:
:param sigma0: sigma0/sigma_crit
:param a:
:param s:
:param center_x:
:param center_y:
:return:
|
def add(self, *args, **kwargs):
"""Add Cookie objects by their names, or create new ones under
specified names.
Any unnamed arguments are interpreted as existing cookies, and
are added under the value in their .name attribute. With keyword
arguments, the key is interpreted as the cookie name and the
value as the UNENCODED value stored in the cookie.
"""
# Only the first one is accessible through the main interface,
# others accessible through get_all (all_cookies).
for cookie in args:
self.all_cookies.append(cookie)
if cookie.name in self:
continue
self[cookie.name] = cookie
for key, value in kwargs.items():
cookie = self.cookie_class(key, value)
self.all_cookies.append(cookie)
if key in self:
continue
self[key] = cookie
|
Add Cookie objects by their names, or create new ones under
specified names.
Any unnamed arguments are interpreted as existing cookies, and
are added under the value in their .name attribute. With keyword
arguments, the key is interpreted as the cookie name and the
value as the UNENCODED value stored in the cookie.
|
def get_ranks(self):
'''
Returns
-------
pd.DataFrame
'''
if self._use_non_text_features:
return self._term_doc_matrix.get_metadata_freq_df()
else:
return self._term_doc_matrix.get_term_freq_df()
|
Returns
-------
pd.DataFrame
|
def _fix_example_namespace(self):
"""Attempts to resolve issues where our samples use
'http://example.com/' for our example namespace but python-stix uses
'http://example.com' by removing the former.
"""
example_prefix = 'example' # Example ns prefix
idgen_prefix = idgen.get_id_namespace_prefix()
# If the ID namespace alias doesn't match the example alias, return.
if idgen_prefix != example_prefix:
return
# If the example namespace prefix isn't in the parsed namespace
# prefixes, return.
if example_prefix not in self._input_namespaces:
return
self._input_namespaces[example_prefix] = idgen.EXAMPLE_NAMESPACE.name
|
Attempts to resolve issues where our samples use
'http://example.com/' for our example namespace but python-stix uses
'http://example.com' by removing the former.
|
def process_warn_strings(arguments):
"""Process string specifications of enabling/disabling warnings,
as passed to the --warn option or the SetOption('warn') function.
An argument to this option should be of the form <warning-class>
or no-<warning-class>. The warning class is munged in order
to get an actual class name from the classes above, which we
need to pass to the {enable,disable}WarningClass() functions.
The supplied <warning-class> is split on hyphens, each element
is capitalized, then smushed back together. Then the string
"Warning" is appended to get the class name.
For example, 'deprecated' will enable the DeprecatedWarning
class. 'no-dependency' will disable the DependencyWarning class.
As a special case, --warn=all and --warn=no-all will enable or
disable (respectively) the base Warning class of all warnings.
"""
def _capitalize(s):
if s[:5] == "scons":
return "SCons" + s[5:]
else:
return s.capitalize()
for arg in arguments:
elems = arg.lower().split('-')
enable = 1
if elems[0] == 'no':
enable = 0
del elems[0]
if len(elems) == 1 and elems[0] == 'all':
class_name = "Warning"
else:
class_name = ''.join(map(_capitalize, elems)) + "Warning"
try:
clazz = globals()[class_name]
except KeyError:
sys.stderr.write("No warning type: '%s'\n" % arg)
else:
if enable:
enableWarningClass(clazz)
elif issubclass(clazz, MandatoryDeprecatedWarning):
fmt = "Can not disable mandataory warning: '%s'\n"
sys.stderr.write(fmt % arg)
else:
suppressWarningClass(clazz)
|
Process string specifications of enabling/disabling warnings,
as passed to the --warn option or the SetOption('warn') function.
An argument to this option should be of the form <warning-class>
or no-<warning-class>. The warning class is munged in order
to get an actual class name from the classes above, which we
need to pass to the {enable,disable}WarningClass() functions.
The supplied <warning-class> is split on hyphens, each element
is capitalized, then smushed back together. Then the string
"Warning" is appended to get the class name.
For example, 'deprecated' will enable the DeprecatedWarning
class. 'no-dependency' will disable the DependencyWarning class.
As a special case, --warn=all and --warn=no-all will enable or
disable (respectively) the base Warning class of all warnings.
|
def add(self, component: Union[Component, Sequence[Component]]) -> None:
"""Add a widget to the grid in the next available cell.
Searches over columns then rows for available cells.
Parameters
----------
components : bowtie._Component
A Bowtie widget instance.
"""
try:
self[Span(*self._available_cell())] = component
except NoUnusedCellsError:
span = list(self._spans.keys())[-1]
self._spans[span] += component
|
Add a widget to the grid in the next available cell.
Searches over columns then rows for available cells.
Parameters
----------
components : bowtie._Component
A Bowtie widget instance.
|
def flatatt(self, **attr):
'''Return a string with attributes to add to the tag'''
cs = ''
attr = self._attr
classes = self._classes
data = self._data
css = self._css
attr = attr.copy() if attr else {}
if classes:
cs = ' '.join(classes)
attr['class'] = cs
if css:
attr['style'] = ' '.join(('%s:%s;' % (k, v) for
k, v in css.items()))
if data:
for k, v in data.items():
attr['data-%s' % k] = dump_data_value(v)
if attr:
return ''.join(attr_iter(attr))
else:
return ''
|
Return a string with attributes to add to the tag
|
def _process_thread(self, client):
"""Process a single GRR client.
Args:
client: a GRR client object.
"""
system_type = client.data.os_info.system
print('System type: {0:s}'.format(system_type))
# If the list is supplied by the user via a flag, honor that.
artifact_list = []
if self.artifacts:
print('Artifacts to be collected: {0!s}'.format(self.artifacts))
artifact_list = self.artifacts
else:
default_artifacts = self.artifact_registry.get(system_type, None)
if default_artifacts:
print('Collecting default artifacts for {0:s}: {1:s}'.format(
system_type, ', '.join(default_artifacts)))
artifact_list.extend(default_artifacts)
if self.extra_artifacts:
print('Throwing in an extra {0!s}'.format(self.extra_artifacts))
artifact_list.extend(self.extra_artifacts)
artifact_list = list(set(artifact_list))
if not artifact_list:
return
flow_args = flows_pb2.ArtifactCollectorFlowArgs(
artifact_list=artifact_list,
use_tsk=self.use_tsk,
ignore_interpolation_errors=True,
apply_parsers=False)
flow_id = self._launch_flow(client, 'ArtifactCollectorFlow', flow_args)
self._await_flow(client, flow_id)
collected_flow_data = self._download_files(client, flow_id)
if collected_flow_data:
print('{0!s}: Downloaded: {1:s}'.format(flow_id, collected_flow_data))
fqdn = client.data.os_info.fqdn.lower()
self.state.output.append((fqdn, collected_flow_data))
|
Process a single GRR client.
Args:
client: a GRR client object.
|
def serialize(self, queryset, **options):
"""
Serialize a queryset.
"""
self.options = options
self.stream = options.get("stream", StringIO())
self.primary_key = options.get("primary_key", None)
self.properties = options.get("properties")
self.geometry_field = options.get("geometry_field", "geom")
self.use_natural_keys = options.get("use_natural_keys", False)
self.bbox = options.get("bbox", None)
self.bbox_auto = options.get("bbox_auto", None)
self.srid = options.get("srid", GEOJSON_DEFAULT_SRID)
self.crs = options.get("crs", True)
self.start_serialization()
if ValuesQuerySet is not None and isinstance(queryset, ValuesQuerySet):
self.serialize_values_queryset(queryset)
elif isinstance(queryset, list):
self.serialize_object_list(queryset)
elif isinstance(queryset, QuerySet):
self.serialize_queryset(queryset)
self.end_serialization()
return self.getvalue()
|
Serialize a queryset.
|
def in_file(self, filename: str) -> Iterator[FunctionDesc]:
"""
Returns an iterator over all of the functions definitions that are
contained within a given file.
"""
yield from self.__filename_to_functions.get(filename, [])
|
Returns an iterator over all of the functions definitions that are
contained within a given file.
|
def _write_response(self, response):
"""Write the response back to the client
Arguments:
response -- the dictionary containing the response.
"""
status = '{} {} {}\r\n'.format(response['version'],
response['code'],
responses[response['code']])
self.logger.debug("Responding status: '%s'", status.strip())
self._write_transport(status)
if 'body' in response and 'Content-Length' not in response['headers']:
response['headers']['Content-Length'] = len(response['body'])
response['headers']['Date'] = datetime.utcnow().strftime(
"%a, %d %b %Y %H:%M:%S +0000")
for (header, content) in response['headers'].items():
self.logger.debug("Sending header: '%s: %s'", header, content)
self._write_transport('{}: {}\r\n'.format(header, content))
self._write_transport('\r\n')
if 'body' in response:
self._write_transport(response['body'])
|
Write the response back to the client
Arguments:
response -- the dictionary containing the response.
|
def set_python(self, value):
"""Expect list of record instances, convert to a SortedDict for internal representation"""
if not self.multiselect:
if value and not isinstance(value, list):
value = [value]
value = value or []
records = SortedDict()
for record in value:
self.validate_value(record)
records[record.id] = record
return_value = self._set(records)
self.record._raw['values'][self.id] = self.get_swimlane()
return return_value
|
Expect list of record instances, convert to a SortedDict for internal representation
|
def qteRemoveMode(self, mode: str):
"""
Remove ``mode`` and associated label.
If ``mode`` does not exist then nothing happens and the method
returns **False**, otherwise **True**.
|Args|
* ``pos`` (**QRect**): size and position of new window.
* ``windowID`` (**str**): unique window ID.
|Returns|
* **bool**: **True** if the item was removed and **False** if there
was an error (most likely ``mode`` does not exist).
|Raises|
* **QtmacsArgumentError** if at least one argument has an invalid type.
"""
# Search through the list for ``mode``.
for idx, item in enumerate(self._qteModeList):
if item[0] == mode:
# Remove the record and delete the label.
self._qteModeList.remove(item)
item[2].hide()
item[2].deleteLater()
self._qteUpdateLabelWidths()
return True
return False
|
Remove ``mode`` and associated label.
If ``mode`` does not exist then nothing happens and the method
returns **False**, otherwise **True**.
|Args|
* ``pos`` (**QRect**): size and position of new window.
* ``windowID`` (**str**): unique window ID.
|Returns|
* **bool**: **True** if the item was removed and **False** if there
was an error (most likely ``mode`` does not exist).
|Raises|
* **QtmacsArgumentError** if at least one argument has an invalid type.
|
def send_emission(self):
"""
emit and remove the first emission in the queue
"""
if self._emit_queue.empty():
return
emit = self._emit_queue.get()
emit()
|
emit and remove the first emission in the queue
|
def check_tx_with_confirmations(self, tx_hash: str, confirmations: int) -> bool:
"""
Check tx hash and make sure it has the confirmations required
:param w3: Web3 instance
:param tx_hash: Hash of the tx
:param confirmations: Minimum number of confirmations required
:return: True if tx was mined with the number of confirmations required, False otherwise
"""
tx_receipt = self.w3.eth.getTransactionReceipt(tx_hash)
if not tx_receipt or tx_receipt['blockNumber'] is None:
# If tx_receipt exists but blockNumber is None, tx is still pending (just Parity)
return False
else:
return (self.w3.eth.blockNumber - tx_receipt['blockNumber']) >= confirmations
|
Check tx hash and make sure it has the confirmations required
:param w3: Web3 instance
:param tx_hash: Hash of the tx
:param confirmations: Minimum number of confirmations required
:return: True if tx was mined with the number of confirmations required, False otherwise
|
def pad_length(s):
"""
Appends characters to the end of the string to increase the string length per
IBM Globalization Design Guideline A3: UI Expansion.
https://www-01.ibm.com/software/globalization/guidelines/a3.html
:param s: String to pad.
:returns: Padded string.
"""
padding_chars = [
u'\ufe4e', # ﹎: CENTRELINE LOW LINE
u'\u040d', # Ѝ: CYRILLIC CAPITAL LETTER I WITH GRAVE
u'\u05d0', # א: HEBREW LETTER ALEF
u'\u01c6', # dž: LATIN SMALL LETTER DZ WITH CARON
u'\u1f8f', # ᾏ: GREEK CAPITAL LETTER ALPHA WITH DASIA AND PERISPOMENI AND PROSGEGRAMMENI
u'\u2167', # Ⅷ: ROMAN NUMERAL EIGHT
u'\u3234', # ㈴: PARENTHESIZED IDEOGRAPH NAME
u'\u32f9', # ㋹: CIRCLED KATAKANA RE
u'\ud4db', # 퓛: HANGUL SYLLABLE PWILH
u'\ufe8f', # ﺏ: ARABIC LETTER BEH ISOLATED FORM
u'\U0001D7D8', # 𝟘: MATHEMATICAL DOUBLE-STRUCK DIGIT ZERO
u'\U0001F6A6', # 🚦: VERTICAL TRAFFIC LIGHT
]
padding_generator = itertools.cycle(padding_chars)
target_lengths = {
six.moves.range(1, 11): 3,
six.moves.range(11, 21): 2,
six.moves.range(21, 31): 1.8,
six.moves.range(31, 51): 1.6,
six.moves.range(51, 71): 1.4,
}
if len(s) > 70:
target_length = int(math.ceil(len(s) * 1.3))
else:
for r, v in target_lengths.items():
if len(s) in r:
target_length = int(math.ceil(len(s) * v))
diff = target_length - len(s)
pad = u"".join([next(padding_generator) for _ in range(diff)])
return s + pad
|
Appends characters to the end of the string to increase the string length per
IBM Globalization Design Guideline A3: UI Expansion.
https://www-01.ibm.com/software/globalization/guidelines/a3.html
:param s: String to pad.
:returns: Padded string.
|
def entries(self):
"""A list of :class:`PasswordEntry` objects."""
timer = Timer()
passwords = []
logger.info("Scanning %s ..", format_path(self.directory))
listing = self.context.capture("find", "-type", "f", "-name", "*.gpg", "-print0")
for filename in split(listing, "\0"):
basename, extension = os.path.splitext(filename)
if extension == ".gpg":
# We use os.path.normpath() to remove the leading `./' prefixes
# that `find' adds because it searches the working directory.
passwords.append(PasswordEntry(name=os.path.normpath(basename), store=self))
logger.verbose("Found %s in %s.", pluralize(len(passwords), "password"), timer)
return natsort(passwords, key=lambda e: e.name)
|
A list of :class:`PasswordEntry` objects.
|
def get_fun(fun):
'''
Return a dict of the last function called for all minions
'''
with _get_serv(ret=None, commit=True) as cur:
sql = '''SELECT s.id,s.jid, s.full_ret
FROM `salt_returns` s
JOIN ( SELECT MAX(`jid`) as jid
from `salt_returns` GROUP BY fun, id) max
ON s.jid = max.jid
WHERE s.fun = %s
'''
cur.execute(sql, (fun,))
data = cur.fetchall()
ret = {}
if data:
for minion, _, full_ret in data:
ret[minion] = salt.utils.json.loads(full_ret)
return ret
|
Return a dict of the last function called for all minions
|
def set(self, key, value, *, section=DataStoreDocumentSection.Data):
""" Store a value under the specified key in the given section of the document.
This method stores a value into the specified section of the workflow data store
document. Any existing value is overridden. Before storing a value, any linked
GridFS document under the specified key is deleted.
Args:
key (str): The key pointing to the value that should be stored/updated.
It supports MongoDB's dot notation for nested fields.
value: The value that should be stored/updated.
section (DataStoreDocumentSection): The section from which the data should
be retrieved.
Returns:
bool: ``True`` if the value could be set/updated, otherwise ``False``.
"""
key_notation = '.'.join([section, key])
try:
self._delete_gridfs_data(self._data_from_dotnotation(key_notation,
default=None))
except KeyError:
logger.info('Adding new field {} to the data store'.format(key_notation))
result = self._collection.update_one(
{"_id": ObjectId(self._workflow_id)},
{
"$set": {
key_notation: self._encode_value(value)
},
"$currentDate": {"lastModified": True}
}
)
return result.modified_count == 1
|
Store a value under the specified key in the given section of the document.
This method stores a value into the specified section of the workflow data store
document. Any existing value is overridden. Before storing a value, any linked
GridFS document under the specified key is deleted.
Args:
key (str): The key pointing to the value that should be stored/updated.
It supports MongoDB's dot notation for nested fields.
value: The value that should be stored/updated.
section (DataStoreDocumentSection): The section from which the data should
be retrieved.
Returns:
bool: ``True`` if the value could be set/updated, otherwise ``False``.
|
def has_column(self, table, column):
"""
Determine if the given table has a given column.
:param table: The table
:type table: str
:type column: str
:rtype: bool
"""
column = column.lower()
return column in list(map(lambda x: x.lower(), self.get_column_listing(table)))
|
Determine if the given table has a given column.
:param table: The table
:type table: str
:type column: str
:rtype: bool
|
def encode_ulid(value: hints.Buffer) -> str:
"""
Encode the given buffer to a :class:`~str` using Base32 encoding.
.. note:: This uses an optimized strategy from the `NUlid` project for encoding ULID
bytes specifically and is not meant for arbitrary encoding.
:param value: Bytes to encode
:type value: :class:`~bytes`, :class:`~bytearray`, or :class:`~memoryview`
:return: Value encoded as a Base32 string
:rtype: :class:`~str`
:raises ValueError: when the value is not 16 bytes
"""
length = len(value)
if length != 16:
raise ValueError('Expects 16 bytes for timestamp + randomness; got {}'.format(length))
encoding = ENCODING
return \
encoding[(value[0] & 224) >> 5] + \
encoding[value[0] & 31] + \
encoding[(value[1] & 248) >> 3] + \
encoding[((value[1] & 7) << 2) | ((value[2] & 192) >> 6)] + \
encoding[((value[2] & 62) >> 1)] + \
encoding[((value[2] & 1) << 4) | ((value[3] & 240) >> 4)] + \
encoding[((value[3] & 15) << 1) | ((value[4] & 128) >> 7)] + \
encoding[(value[4] & 124) >> 2] + \
encoding[((value[4] & 3) << 3) | ((value[5] & 224) >> 5)] + \
encoding[value[5] & 31] + \
encoding[(value[6] & 248) >> 3] + \
encoding[((value[6] & 7) << 2) | ((value[7] & 192) >> 6)] + \
encoding[(value[7] & 62) >> 1] + \
encoding[((value[7] & 1) << 4) | ((value[8] & 240) >> 4)] + \
encoding[((value[8] & 15) << 1) | ((value[9] & 128) >> 7)] + \
encoding[(value[9] & 124) >> 2] + \
encoding[((value[9] & 3) << 3) | ((value[10] & 224) >> 5)] + \
encoding[value[10] & 31] + \
encoding[(value[11] & 248) >> 3] + \
encoding[((value[11] & 7) << 2) | ((value[12] & 192) >> 6)] + \
encoding[(value[12] & 62) >> 1] + \
encoding[((value[12] & 1) << 4) | ((value[13] & 240) >> 4)] + \
encoding[((value[13] & 15) << 1) | ((value[14] & 128) >> 7)] + \
encoding[(value[14] & 124) >> 2] + \
encoding[((value[14] & 3) << 3) | ((value[15] & 224) >> 5)] + \
encoding[value[15] & 31]
|
Encode the given buffer to a :class:`~str` using Base32 encoding.
.. note:: This uses an optimized strategy from the `NUlid` project for encoding ULID
bytes specifically and is not meant for arbitrary encoding.
:param value: Bytes to encode
:type value: :class:`~bytes`, :class:`~bytearray`, or :class:`~memoryview`
:return: Value encoded as a Base32 string
:rtype: :class:`~str`
:raises ValueError: when the value is not 16 bytes
|
def addRnaQuantificationSet(self):
"""
Adds an rnaQuantificationSet into this repo
"""
self._openRepo()
dataset = self._repo.getDatasetByName(self._args.datasetName)
if self._args.name is None:
name = getNameFromPath(self._args.filePath)
else:
name = self._args.name
rnaQuantificationSet = rna_quantification.SqliteRnaQuantificationSet(
dataset, name)
referenceSetName = self._args.referenceSetName
if referenceSetName is None:
raise exceptions.RepoManagerException(
"A reference set name must be provided")
referenceSet = self._repo.getReferenceSetByName(referenceSetName)
rnaQuantificationSet.setReferenceSet(referenceSet)
rnaQuantificationSet.populateFromFile(self._args.filePath)
rnaQuantificationSet.setAttributes(json.loads(self._args.attributes))
self._updateRepo(
self._repo.insertRnaQuantificationSet, rnaQuantificationSet)
|
Adds an rnaQuantificationSet into this repo
|
def EXTRA_LOGGING(self):
"""
lista modulos con los distintos niveles a logear y su
nivel de debug
Por ejemplo:
[Logs]
EXTRA_LOGGING = oscar.paypal:DEBUG, django.db:INFO
"""
input_text = get('EXTRA_LOGGING', '')
modules = input_text.split(',')
if input_text:
modules = input_text.split(',')
modules = [x.split(':') for x in modules]
else:
modules = []
return modules
|
lista modulos con los distintos niveles a logear y su
nivel de debug
Por ejemplo:
[Logs]
EXTRA_LOGGING = oscar.paypal:DEBUG, django.db:INFO
|
def update(self, allow_partial=True, force=False, **kwargs):
"""Updates record and returns True if record is complete after update, else False."""
if kwargs:
self.__init__(partial=allow_partial, force=force, **kwargs)
return not self._partial
if not force and CACHE.get(hash(self)):
cached = CACHE[hash(self)]
for field in self._SIMPLE_FIELDS | self._COMPLEX_FIELDS:
v = getattr(cached, field)
setattr(self, field, v)
self._partial = False
logging.info(f'Loading {str(self)} from cache')
return True
resp_dict = element_lookup_by_id(self.type, self.id)
self.__init__(partial=False, **resp_dict)
return True
|
Updates record and returns True if record is complete after update, else False.
|
def order(self, *args):
"""Return a new Query with additional sort order(s) applied."""
# q.order(Employee.name, -Employee.age)
if not args:
return self
orders = []
o = self.orders
if o:
orders.append(o)
for arg in args:
if isinstance(arg, model.Property):
orders.append(datastore_query.PropertyOrder(arg._name, _ASC))
elif isinstance(arg, datastore_query.Order):
orders.append(arg)
else:
raise TypeError('order() expects a Property or query Order; '
'received %r' % arg)
if not orders:
orders = None
elif len(orders) == 1:
orders = orders[0]
else:
orders = datastore_query.CompositeOrder(orders)
return self.__class__(kind=self.kind, ancestor=self.ancestor,
filters=self.filters, orders=orders,
app=self.app, namespace=self.namespace,
default_options=self.default_options,
projection=self.projection, group_by=self.group_by)
|
Return a new Query with additional sort order(s) applied.
|
def signature(self, value):
"""
:type value: any
:rtype: HMAC
"""
h = HMAC(self.key, self.digest, backend=settings.CRYPTOGRAPHY_BACKEND)
h.update(force_bytes(value))
return h
|
:type value: any
:rtype: HMAC
|
def find_packages_parents_requirements_dists(pkg_names, working_set=None):
"""
Leverages the `find_packages_requirements_dists` but strip out the
distributions that matches pkg_names.
"""
dists = []
# opting for a naive implementation
targets = set(pkg_names)
for dist in find_packages_requirements_dists(pkg_names, working_set):
if dist.project_name in targets:
continue
dists.append(dist)
return dists
|
Leverages the `find_packages_requirements_dists` but strip out the
distributions that matches pkg_names.
|
def split_token(output):
"""
Split an output into token tuple, real output tuple.
:param output:
:return: tuple, tuple
"""
output = ensure_tuple(output)
flags, i, len_output, data_allowed = set(), 0, len(output), True
while i < len_output and isflag(output[i]):
if output[i].must_be_first and i:
raise ValueError("{} flag must be first.".format(output[i]))
if i and output[i - 1].must_be_last:
raise ValueError("{} flag must be last.".format(output[i - 1]))
if output[i] in flags:
raise ValueError("Duplicate flag {}.".format(output[i]))
flags.add(output[i])
data_allowed &= output[i].allows_data
i += 1
output = output[i:]
if not data_allowed and len(output):
raise ValueError("Output data provided after a flag that does not allow data.")
return flags, output
|
Split an output into token tuple, real output tuple.
:param output:
:return: tuple, tuple
|
def failure_count(self):
"""
Amount of failed test cases in this list.
:return: integer
"""
return len([i for i, result in enumerate(self.data) if result.failure])
|
Amount of failed test cases in this list.
:return: integer
|
def save_neighbour_info(self, cache_dir, mask=None, **kwargs):
"""Cache resampler's index arrays if there is a cache dir."""
if cache_dir:
mask_name = getattr(mask, 'name', None)
filename = self._create_cache_filename(
cache_dir, mask=mask_name, **kwargs)
LOG.info('Saving kd_tree neighbour info to %s', filename)
cache = self._read_resampler_attrs()
# update the cache in place with persisted dask arrays
self._apply_cached_indexes(cache, persist=True)
self._index_caches[mask_name] = cache
np.savez(filename, **cache)
|
Cache resampler's index arrays if there is a cache dir.
|
def list_user_access(self, user):
"""
Returns a list of all database names for which the specified user
has access rights.
"""
user = utils.get_name(user)
uri = "/%s/%s/databases" % (self.uri_base, user)
try:
resp, resp_body = self.api.method_get(uri)
except exc.NotFound as e:
raise exc.NoSuchDatabaseUser("User '%s' does not exist." % user)
dbs = resp_body.get("databases", {})
return [CloudDatabaseDatabase(self, db) for db in dbs]
|
Returns a list of all database names for which the specified user
has access rights.
|
def _align_header(header, alignment, width, visible_width, is_multiline=False,
width_fn=None):
"Pad string header to width chars given known visible_width of the header."
if is_multiline:
header_lines = re.split(_multiline_codes, header)
padded_lines = [_align_header(h, alignment, width, width_fn(h))
for h in header_lines]
return "\n".join(padded_lines)
# else: not multiline
ninvisible = len(header) - visible_width
width += ninvisible
if alignment == "left":
return _padright(width, header)
elif alignment == "center":
return _padboth(width, header)
elif not alignment:
return "{0}".format(header)
else:
return _padleft(width, header)
|
Pad string header to width chars given known visible_width of the header.
|
def save(self, path="speech"):
"""Save data in file.
Args:
path (optional): A path to save file. Defaults to "speech".
File extension is optional. Absolute path is allowed.
Returns:
The path to the saved file.
"""
if self._data is None:
raise Exception("There's nothing to save")
extension = "." + self.__params["format"]
if os.path.splitext(path)[1] != extension:
path += extension
with open(path, "wb") as f:
for d in self._data:
f.write(d)
return path
|
Save data in file.
Args:
path (optional): A path to save file. Defaults to "speech".
File extension is optional. Absolute path is allowed.
Returns:
The path to the saved file.
|
def _collect_masters_map(self, response):
'''
Collect masters map from the network.
:return:
'''
while True:
try:
data, addr = self._socket.recvfrom(0x400)
if data:
if addr not in response:
response[addr] = []
response[addr].append(data)
else:
break
except Exception as err:
if not response:
self.log.error('Discovery master collection failure: %s', err)
break
|
Collect masters map from the network.
:return:
|
def makeParameterTable(filename, params):
'''
Makes the parameter table for the paper, saving it to a tex file in the tables folder.
Also makes two partial parameter tables for the slides.
Parameters
----------
filename : str
Name of the file in which to save output (in the tables directory).
Suffix .tex is automatically added.
params :
Object containing the parameter values.
Returns
-------
None
'''
# Calibrated macroeconomic parameters
macro_panel = "\multicolumn{3}{c}{\\textbf{Macroeconomic Parameters} } \n"
macro_panel += "\\\\ $\\kapShare$ & " + "{:.2f}".format(params.CapShare) + " & Capital's Share of Income \n"
macro_panel += "\\\\ $\\daleth$ & " + "{:.2f}".format(params.DeprFacAnn) + "^{1/4} & Depreciation Factor \n"
macro_panel += "\\\\ $\sigma_{\Theta}^{2}$ & "+ "{:.5f}".format(params.TranShkAggVar) +" & Variance Aggregate Transitory Shocks \n"
macro_panel += "\\\\ $\sigma_{\Psi}^{2}$ & "+ "{:.5f}".format(params.PermShkAggVar) +" & Variance Aggregate Permanent Shocks \n"
# Steady state values
SS_panel = "\multicolumn{3}{c}{ \\textbf{Steady State of Perfect Foresight DSGE Model} } \\ \n"
SS_panel += "\\\\ \multicolumn{3}{c}{ $(\\sigma_{\\Psi}=\\sigma_{\\Theta}=\\sigma_{\\psi}=\\sigma_{\\theta}=\wp=\\PDies=0$, $\\Phi_t = 1)$} \\ \n"
SS_panel += "\\\\ $\\breve{K}/\\breve{K}^{\\kapShare}$ & " + "{:.1f}".format(params.KYratioSS) + " & SS Capital to Output Ratio \n"
SS_panel += "\\\\ $\\breve{K}$ & " + "{:.2f}".format(params.KSS) + " & SS Capital to Labor Productivity Ratio ($=12^{1/(1-\\kapShare)}$) \n"
SS_panel += "\\\\ $\\breve{\\Wage}$ & " + "{:.2f}".format(params.wRteSS) + " & SS Wage Rate ($=(1-\\kapShare)\\breve{K}^{\\kapShare}$) \n"
SS_panel += "\\\\ $\\breve{\\mathsf{r}}$ & " + "{:.2f}".format(params.rFreeSS) + " & SS Interest Rate ($=\\kapShare \\breve{K}^{\\kapShare-1}$) \n"
SS_panel += "\\\\ $\\breve{\\Rprod}$ & " + "{:.3f}".format(params.RfreeSS) + "& SS Between-Period Return Factor ($=\\daleth + \\breve{\\mathsf{r}}$) \n"
# Calibrated preference parameters
pref_panel = "\multicolumn{3}{c}{ \\textbf{Preference Parameters} } \n"
pref_panel += "\\\\ $\\rho$ & "+ "{:.0f}".format(params.CRRA) +". & Coefficient of Relative Risk Aversion \n"
pref_panel += "\\\\ $\\beta_{SOE}$ & " + "{:.3f}".format(params.DiscFacSOE) +" & SOE Discount Factor \n" #($=0.99 \\cdot \\PLives / (\\breve{\\mathcal{R}} \\Ex [\\pmb{\\psi}^{-\CRRA}])$)\n"
pref_panel += "\\\\ $\\beta_{DSGE}$ & " + "{:.3f}".format(params.DiscFacDSGE) +" & HA-DSGE Discount Factor ($=\\breve{\\Rprod}^{-1}$) \n"
pref_panel += "\\\\ $\Pi$ & " + "{:.2f}".format(params.UpdatePrb) +" & Probability of Updating Expectations (if Sticky) \n"
# Idiosyncratic shock parameters
idio_panel = "\multicolumn{3}{c}{ \\textbf{Idiosyncratic Shock Parameters} } \n"
idio_panel += "\\\\ $\sigma_{\\theta}^{2}$ & " + "{:.3f}".format(params.TranShkVar) +" & Variance Idiosyncratic Tran Shocks (=$4 \\times$ Annual) \n"
idio_panel += "\\\\ $\sigma_{\psi}^{2}$ &" + "{:.3f}".format(params.PermShkVar) +" & Variance Idiosyncratic Perm Shocks (=$\\frac{1}{4} \\times$ Annual) \n"
idio_panel += "\\\\ $\wp$ & " + "{:.3f}".format(params.UnempPrb) +" & Probability of Unemployment Spell \n"
idio_panel += "\\\\ $\PDies$ & " + "{:.3f}".format(params.DiePrb) +" & Probability of Mortality \n"
# Make full parameter table for paper
paper_output = "\provideboolean{Slides} \setboolean{Slides}{false} \n"
paper_output += "\\begin{minipage}{\\textwidth}\n"
paper_output += " \\begin{table}\n"
paper_output += " \\caption{Calibration}\label{table:calibration}\n"
paper_output += "\\begin{tabular}{cd{5}l} \n"
paper_output += "\\\\ \\toprule \n"
paper_output += macro_panel
paper_output += "\\\\ \\midrule \n"
paper_output += SS_panel
paper_output += "\\\\ \\midrule \n"
paper_output += pref_panel
paper_output += "\\\\ \\midrule \n"
paper_output += idio_panel
paper_output += "\\\\ \\bottomrule \n"
paper_output += "\end{tabular}\n"
paper_output += "\end{table}\n"
paper_output += "\end{minipage}\n"
paper_output += "\ifthenelse{\\boolean{StandAlone}}{\end{document}}{} \n"
with open(tables_dir + filename + '.tex','w') as f:
f.write(paper_output)
f.close()
# Make two partial parameter tables for the slides
slides1_output = "\\begin{center}\label{table:calibration1} \n"
slides1_output += "\\begin{tabular}{cd{5}l} \n"
slides1_output += "\\\\ \\toprule \n"
slides1_output += macro_panel
slides1_output += "\\\\ \\midrule \n"
slides1_output += SS_panel
slides1_output += "\\\\ \\bottomrule \n"
slides1_output += "\end{tabular} \n"
slides1_output += "\end{center} \n"
with open(tables_dir + filename + '_1.tex','w') as f:
f.write(slides1_output)
f.close()
slides2_output = "\\begin{center}\label{table:calibration2} \n"
slides2_output += "\\begin{tabular}{cd{5}l} \n"
slides2_output += "\\\\ \\toprule \n"
slides2_output += pref_panel
slides2_output += "\\\\ \\midrule \n"
slides2_output += idio_panel
slides2_output += "\\\\ \\bottomrule \n"
slides2_output += "\end{tabular} \n"
slides2_output += "\end{center} \n"
with open(tables_dir + filename + '_2.tex','w') as f:
f.write(slides2_output)
f.close()
|
Makes the parameter table for the paper, saving it to a tex file in the tables folder.
Also makes two partial parameter tables for the slides.
Parameters
----------
filename : str
Name of the file in which to save output (in the tables directory).
Suffix .tex is automatically added.
params :
Object containing the parameter values.
Returns
-------
None
|
def do_bash_complete(cli, prog_name):
"""Do the completion for bash
Parameters
----------
cli : click.Command
The main click Command of the program
prog_name : str
The program name on the command line
Returns
-------
bool
True if the completion was successful, False otherwise
"""
comp_words = os.environ['COMP_WORDS']
try:
cwords = shlex.split(comp_words)
quoted = False
except ValueError: # No closing quotation
cwords = split_args(comp_words)
quoted = True
cword = int(os.environ['COMP_CWORD'])
args = cwords[1:cword]
try:
incomplete = cwords[cword]
except IndexError:
incomplete = ''
choices = get_choices(cli, prog_name, args, incomplete)
if quoted:
echo('\t'.join(opt for opt, _ in choices), nl=False)
else:
echo('\t'.join(re.sub(r"""([\s\\"'()])""", r'\\\1', opt) for opt, _ in choices), nl=False)
return True
|
Do the completion for bash
Parameters
----------
cli : click.Command
The main click Command of the program
prog_name : str
The program name on the command line
Returns
-------
bool
True if the completion was successful, False otherwise
|
def set(self, stype, sid, fields):
"""
Send a request to the API to modify something in the database if logged in.
:param str stype: What are we modifying? One of: votelist, vnlist, wishlist
:param int sid: The ID that we're modifying.
:param dict fields: A dictionary of the fields and their values
:raises ServerError: Raises a ServerError if an error is returned
:return bool: True if successful, error otherwise
"""
if stype not in ['votelist', 'vnlist', 'wishlist']:
raise SyntaxError("{} is not a valid type for set. Should be one of: votelist, vnlist or wishlist.".format(stype))
command = "{} {} {}".format(stype, id, ujson.dumps(fields))
data = self.connection.send_command('set', command)
if 'id' in data:
raise ServerError(data['msg'], data['id'])
else:
return True
|
Send a request to the API to modify something in the database if logged in.
:param str stype: What are we modifying? One of: votelist, vnlist, wishlist
:param int sid: The ID that we're modifying.
:param dict fields: A dictionary of the fields and their values
:raises ServerError: Raises a ServerError if an error is returned
:return bool: True if successful, error otherwise
|
def to_prettytable(df):
"""Convert DataFrame into ``PrettyTable``.
"""
pt = PrettyTable()
pt.field_names = df.columns
for tp in zip(*(l for col, l in df.iteritems())):
pt.add_row(tp)
return pt
|
Convert DataFrame into ``PrettyTable``.
|
def swap(tokens, maxdist=2):
"""Perform a swap operation on a sequence of tokens, exhaustively swapping all tokens up to the maximum specified distance. This is a subset of all permutations."""
assert maxdist >= 2
tokens = list(tokens)
if maxdist > len(tokens):
maxdist = len(tokens)
l = len(tokens)
for i in range(0,l - 1):
for permutation in permutations(tokens[i:i+maxdist]):
if permutation != tuple(tokens[i:i+maxdist]):
newtokens = tokens[:i]
newtokens += permutation
newtokens += tokens[i+maxdist:]
yield newtokens
if maxdist == len(tokens):
break
|
Perform a swap operation on a sequence of tokens, exhaustively swapping all tokens up to the maximum specified distance. This is a subset of all permutations.
|
def implied_local_space(*, arg_index=None, keys=None):
"""Return a simplification that converts the positional argument
`arg_index` from (str, int) to a subclass of :class:`.LocalSpace`, as well
as any keyword argument with one of the given keys.
The exact type of the resulting Hilbert space is determined by
the `default_hs_cls` argument of :func:`init_algebra`.
In many cases, we have :func:`implied_local_space` (in ``create``) in
addition to a conversion in ``__init__``, so
that :func:`match_replace` etc can rely on the relevant arguments being a
:class:`HilbertSpace` instance.
"""
from qnet.algebra.core.hilbert_space_algebra import (
HilbertSpace, LocalSpace)
def args_to_local_space(cls, args, kwargs):
"""Convert (str, int) of selected args to :class:`.LocalSpace`"""
if isinstance(args[arg_index], LocalSpace):
new_args = args
else:
if isinstance(args[arg_index], (int, str)):
try:
hs = cls._default_hs_cls(args[arg_index])
except AttributeError:
hs = LocalSpace(args[arg_index])
else:
hs = args[arg_index]
assert isinstance(hs, HilbertSpace)
new_args = (tuple(args[:arg_index]) + (hs,) +
tuple(args[arg_index + 1:]))
return new_args, kwargs
def kwargs_to_local_space(cls, args, kwargs):
"""Convert (str, int) of selected kwargs to LocalSpace"""
if all([isinstance(kwargs[key], LocalSpace) for key in keys]):
new_kwargs = kwargs
else:
new_kwargs = {}
for key, val in kwargs.items():
if key in keys:
if isinstance(val, (int, str)):
try:
val = cls._default_hs_cls(val)
except AttributeError:
val = LocalSpace(val)
assert isinstance(val, HilbertSpace)
new_kwargs[key] = val
return args, new_kwargs
def to_local_space(cls, args, kwargs):
"""Convert (str, int) of selected args and kwargs to LocalSpace"""
new_args, __ = args_to_local_space(args, kwargs, arg_index)
__, new_kwargs = kwargs_to_local_space(args, kwargs, keys)
return new_args, new_kwargs
if (arg_index is not None) and (keys is None):
return args_to_local_space
elif (arg_index is None) and (keys is not None):
return kwargs_to_local_space
elif (arg_index is not None) and (keys is not None):
return to_local_space
else:
raise ValueError("must give at least one of arg_index and keys")
|
Return a simplification that converts the positional argument
`arg_index` from (str, int) to a subclass of :class:`.LocalSpace`, as well
as any keyword argument with one of the given keys.
The exact type of the resulting Hilbert space is determined by
the `default_hs_cls` argument of :func:`init_algebra`.
In many cases, we have :func:`implied_local_space` (in ``create``) in
addition to a conversion in ``__init__``, so
that :func:`match_replace` etc can rely on the relevant arguments being a
:class:`HilbertSpace` instance.
|
def cancel(self, consumer_tag):
"""Cancel a channel by consumer tag."""
if not self.channel.connection:
return
self.channel.basic_cancel(consumer_tag)
|
Cancel a channel by consumer tag.
|
def update_dependency(self, tile, depinfo, destdir=None):
"""Attempt to install or update a dependency to the latest version.
Args:
tile (IOTile): An IOTile object describing the tile that has the dependency
depinfo (dict): a dictionary from tile.dependencies specifying the dependency
destdir (string): An optional folder into which to unpack the dependency
Returns:
string: a string indicating the outcome. Possible values are:
"already installed"
"installed"
"updated"
"not found"
"""
if destdir is None:
destdir = os.path.join(tile.folder, 'build', 'deps', depinfo['unique_id'])
has_version = False
had_version = False
if os.path.exists(destdir):
has_version = True
had_version = True
for priority, rule in self.rules:
if not self._check_rule(rule, depinfo):
continue
resolver = self._find_resolver(rule)
if has_version:
deptile = IOTile(destdir)
# If the dependency is not up to date, don't do anything
depstatus = self._check_dep(depinfo, deptile, resolver)
if depstatus is False:
shutil.rmtree(destdir)
has_version = False
else:
continue
# Now try to resolve this dependency with the latest version
result = resolver.resolve(depinfo, destdir)
if not result['found'] and result.get('stop', False):
return 'not found'
if not result['found']:
continue
settings = {
'resolver': resolver.__class__.__name__,
'factory_args': rule[2]
}
if 'settings' in result:
settings['settings'] = result['settings']
self._save_depsettings(destdir, settings)
if had_version:
return "updated"
return "installed"
if has_version:
return "already installed"
return "not found"
|
Attempt to install or update a dependency to the latest version.
Args:
tile (IOTile): An IOTile object describing the tile that has the dependency
depinfo (dict): a dictionary from tile.dependencies specifying the dependency
destdir (string): An optional folder into which to unpack the dependency
Returns:
string: a string indicating the outcome. Possible values are:
"already installed"
"installed"
"updated"
"not found"
|
def set_parameter(name, parameter, value, path=None):
'''
Set the value of a cgroup parameter for a container.
path
path to the container parent directory
default: /var/lib/lxc (system)
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt '*' lxc.set_parameter name parameter value
'''
if not exists(name, path=path):
return None
cmd = 'lxc-cgroup'
if path:
cmd += ' -P {0}'.format(pipes.quote(path))
cmd += ' -n {0} {1} {2}'.format(name, parameter, value)
ret = __salt__['cmd.run_all'](cmd, python_shell=False)
if ret['retcode'] != 0:
return False
else:
return True
|
Set the value of a cgroup parameter for a container.
path
path to the container parent directory
default: /var/lib/lxc (system)
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt '*' lxc.set_parameter name parameter value
|
def get_group_category(self, category):
"""
Get a single group category.
:calls: `GET /api/v1/group_categories/:group_category_id \
<https://canvas.instructure.com/doc/api/group_categories.html#method.group_categories.show>`_
:param category: The object or ID of the category.
:type category: :class:`canvasapi.group.GroupCategory` or int
:rtype: :class:`canvasapi.group.GroupCategory`
"""
category_id = obj_or_id(category, "category", (GroupCategory,))
response = self.__requester.request(
'GET',
'group_categories/{}'.format(category_id)
)
return GroupCategory(self.__requester, response.json())
|
Get a single group category.
:calls: `GET /api/v1/group_categories/:group_category_id \
<https://canvas.instructure.com/doc/api/group_categories.html#method.group_categories.show>`_
:param category: The object or ID of the category.
:type category: :class:`canvasapi.group.GroupCategory` or int
:rtype: :class:`canvasapi.group.GroupCategory`
|
def sharedInterfaces():
"""
This attribute is the public interface for code which wishes to discover
the list of interfaces allowed by this Share. It is a list of
Interface objects.
"""
def get(self):
if not self.sharedInterfaceNames:
return ()
if self.sharedInterfaceNames == ALL_IMPLEMENTED_DB:
I = implementedBy(self.sharedItem.__class__)
L = list(I)
T = tuple(L)
return T
else:
return tuple(map(namedAny, self.sharedInterfaceNames.split(u',')))
def set(self, newValue):
self.sharedAttributeNames = _interfacesToNames(newValue)
return get, set
|
This attribute is the public interface for code which wishes to discover
the list of interfaces allowed by this Share. It is a list of
Interface objects.
|
def lower(self):
"""Lower bound"""
if self._reaction in self._view._flipped:
return -super(FlipableFluxBounds, self).upper
return super(FlipableFluxBounds, self).lower
|
Lower bound
|
def match_tracks(self, set_a, set_b, closest_matches=False):
"""
Find the optimal set of matching assignments between set a and set b. This function supports optimal 1:1
matching using the Munkres method and matching from every object in set a to the closest object in set b.
In this situation set b accepts multiple matches from set a.
Args:
set_a:
set_b:
closest_matches:
Returns:
"""
costs = self.track_cost_matrix(set_a, set_b) * 100
min_row_costs = costs.min(axis=1)
min_col_costs = costs.min(axis=0)
good_rows = np.where(min_row_costs < 100)[0]
good_cols = np.where(min_col_costs < 100)[0]
assignments = []
if len(good_rows) > 0 and len(good_cols) > 0:
if closest_matches:
b_matches = costs[np.meshgrid(good_rows, good_cols, indexing='ij')].argmin(axis=1)
a_matches = np.arange(b_matches.size)
initial_assignments = [(good_rows[a_matches[x]], good_cols[b_matches[x]])
for x in range(b_matches.size)]
else:
munk = Munkres()
initial_assignments = munk.compute(costs[np.meshgrid(good_rows, good_cols, indexing='ij')].tolist())
initial_assignments = [(good_rows[x[0]], good_cols[x[1]]) for x in initial_assignments]
for a in initial_assignments:
if costs[a[0], a[1]] < 100:
assignments.append(a)
return assignments
|
Find the optimal set of matching assignments between set a and set b. This function supports optimal 1:1
matching using the Munkres method and matching from every object in set a to the closest object in set b.
In this situation set b accepts multiple matches from set a.
Args:
set_a:
set_b:
closest_matches:
Returns:
|
def build_polygons(self, polygons):
""" Process data to construct polygons
This method is built from the assumption that the polygons parameter
is a list of:
list of lists or tuples : a list of path points, each one
indicating the point coordinates --
[lat,lng], [lat, lng], (lat, lng), ...
tuple of lists or tuples : a tuple of path points, each one
indicating the point coordinates -- (lat,lng), [lat, lng],
(lat, lng), ...
dicts: a dictionary with polylines attributes
So, for instance, we have this general scenario as a input parameter:
polygon = {
'stroke_color': '#0AB0DE',
'stroke_opacity': 1.0,
'stroke_weight': 3,
'fill_color': '#FFABCD',
'fill_opacity': 0.5,
'path': [{'lat': 33.678, 'lng': -116.243},
{'lat': 33.679, 'lng': -116.244},
{'lat': 33.680, 'lng': -116.250},
{'lat': 33.681, 'lng': -116.239},
{'lat': 33.678, 'lng': -116.243}]
}
path1 = [(33.665, -116.235), (33.666, -116.256),
(33.667, -116.250), (33.668, -116.229)]
path2 = ((33.659, -116.243), (33.660, -116.244),
(33.649, -116.250), (33.644, -116.239))
path3 = ([33.688, -116.243], [33.680, -116.244],
[33.682, -116.250], [33.690, -116.239])
path4 = [[33.690, -116.243], [33.691, -116.244],
[33.692, -116.250], [33.693, -116.239]]
polygons = [polygon, path1, path2, path3, path4]
"""
if not polygons:
return
if not isinstance(polygons, (list, tuple)):
raise AttributeError('A list or tuple of polylines is required')
for points in polygons:
if isinstance(points, dict):
self.add_polygon(**points)
elif isinstance(points, (tuple, list)):
path = []
for coords in points:
if len(coords) != 2:
raise AttributeError('A point needs two coordinates')
path.append({'lat': coords[0],
'lng': coords[1]})
polygon_dict = self.build_polygon_dict(path)
self.add_polygon(**polygon_dict)
|
Process data to construct polygons
This method is built from the assumption that the polygons parameter
is a list of:
list of lists or tuples : a list of path points, each one
indicating the point coordinates --
[lat,lng], [lat, lng], (lat, lng), ...
tuple of lists or tuples : a tuple of path points, each one
indicating the point coordinates -- (lat,lng), [lat, lng],
(lat, lng), ...
dicts: a dictionary with polylines attributes
So, for instance, we have this general scenario as a input parameter:
polygon = {
'stroke_color': '#0AB0DE',
'stroke_opacity': 1.0,
'stroke_weight': 3,
'fill_color': '#FFABCD',
'fill_opacity': 0.5,
'path': [{'lat': 33.678, 'lng': -116.243},
{'lat': 33.679, 'lng': -116.244},
{'lat': 33.680, 'lng': -116.250},
{'lat': 33.681, 'lng': -116.239},
{'lat': 33.678, 'lng': -116.243}]
}
path1 = [(33.665, -116.235), (33.666, -116.256),
(33.667, -116.250), (33.668, -116.229)]
path2 = ((33.659, -116.243), (33.660, -116.244),
(33.649, -116.250), (33.644, -116.239))
path3 = ([33.688, -116.243], [33.680, -116.244],
[33.682, -116.250], [33.690, -116.239])
path4 = [[33.690, -116.243], [33.691, -116.244],
[33.692, -116.250], [33.693, -116.239]]
polygons = [polygon, path1, path2, path3, path4]
|
def untrigger(queue, trigger=_c.FSQ_TRIGGER):
'''Uninstalls the trigger for the specified queue -- if a queue has no
trigger, this function is a no-op.'''
trigger_path = fsq_path.trigger(queue, trigger=trigger)
_queue_ok(os.path.dirname(trigger_path))
try:
os.unlink(trigger_path)
except (OSError, IOError, ), e:
if e.errno != errno.ENOENT:
raise FSQConfigError(e.errno, wrap_io_os_err(e))
|
Uninstalls the trigger for the specified queue -- if a queue has no
trigger, this function is a no-op.
|
def assert_200(response, max_len=500):
""" Check that a HTTP response returned 200. """
if response.status_code == 200:
return
raise ValueError(
"Response was {}, not 200:\n{}\n{}".format(
response.status_code,
json.dumps(dict(response.headers), indent=2),
response.content[:max_len]))
|
Check that a HTTP response returned 200.
|
def read_pure_water_absorption_from_file(self, file_name):
"""Read the pure water absorption from a csv formatted file
:param file_name: filename and path of the csv file
"""
lg.info('Reading water absorption from file')
try:
self.a_water = self._read_iop_from_file(file_name)
except:
lg.exception('Problem reading file :: ' + file_name)
|
Read the pure water absorption from a csv formatted file
:param file_name: filename and path of the csv file
|
def encoded_to_array(encoded):
"""
Turn a dictionary with base64 encoded strings back into a numpy array.
Parameters
------------
encoded : dict
Has keys:
dtype: string of dtype
shape: int tuple of shape
base64: base64 encoded string of flat array
binary: decode result coming from numpy.tostring
Returns
----------
array: numpy array
"""
if not isinstance(encoded, dict):
if is_sequence(encoded):
as_array = np.asanyarray(encoded)
return as_array
else:
raise ValueError('Unable to extract numpy array from input')
encoded = decode_keys(encoded)
dtype = np.dtype(encoded['dtype'])
if 'base64' in encoded:
array = np.frombuffer(base64.b64decode(encoded['base64']),
dtype)
elif 'binary' in encoded:
array = np.frombuffer(encoded['binary'],
dtype=dtype)
if 'shape' in encoded:
array = array.reshape(encoded['shape'])
return array
|
Turn a dictionary with base64 encoded strings back into a numpy array.
Parameters
------------
encoded : dict
Has keys:
dtype: string of dtype
shape: int tuple of shape
base64: base64 encoded string of flat array
binary: decode result coming from numpy.tostring
Returns
----------
array: numpy array
|
def toy_heaviside(seed=default_seed, max_iters=100, optimize=True, plot=True):
"""
Simple 1D classification example using a heavy side gp transformation
:param seed: seed value for data generation (default is 4).
:type seed: int
"""
try:import pods
except ImportError:print('pods unavailable, see https://github.com/sods/ods for example datasets')
data = pods.datasets.toy_linear_1d_classification(seed=seed)
Y = data['Y'][:, 0:1]
Y[Y.flatten() == -1] = 0
# Model definition
kernel = GPy.kern.RBF(1)
likelihood = GPy.likelihoods.Bernoulli(gp_link=GPy.likelihoods.link_functions.Heaviside())
ep = GPy.inference.latent_function_inference.expectation_propagation.EP()
m = GPy.core.GP(X=data['X'], Y=Y, kernel=kernel, likelihood=likelihood, inference_method=ep, name='gp_classification_heaviside')
#m = GPy.models.GPClassification(data['X'], likelihood=likelihood)
# Optimize
if optimize:
# Parameters optimization:
for _ in range(5):
m.optimize(max_iters=int(max_iters/5))
print(m)
# Plot
if plot:
from matplotlib import pyplot as plt
fig, axes = plt.subplots(2, 1)
m.plot_f(ax=axes[0])
m.plot(ax=axes[1])
print(m)
return m
|
Simple 1D classification example using a heavy side gp transformation
:param seed: seed value for data generation (default is 4).
:type seed: int
|
def RABC(self):
""" Return ABC
轉折點 ABC
"""
A = self.raw_data[-3]*2 - self.raw_data[-6]
B = self.raw_data[-2]*2 - self.raw_data[-5]
C = self.raw_data[-1]*2 - self.raw_data[-4]
return '(%.2f,%.2f,%.2f)' % (A,B,C)
|
Return ABC
轉折點 ABC
|
def complete(self, uio, dropped=False):
"""Query for all missing information in the transaction"""
if self.dropped and not dropped:
# do nothing for dropped xn, unless specifically told to
return
for end in ['src', 'dst']:
if getattr(self, end):
continue # we have this information
uio.show('\nEnter ' + end + ' for transaction:')
uio.show('')
uio.show(self.summary())
try:
endpoints = []
remaining = self.amount
while remaining:
account = uio.text(' Enter account', None)
amount = uio.decimal(
' Enter amount',
default=remaining,
lower=0,
upper=remaining
)
endpoints.append(Endpoint(account, amount))
remaining = self.amount \
- sum(map(lambda x: x.amount, endpoints))
except ui.RejectWarning:
# bail out
sys.exit("bye!")
# flip amounts if it was a src outcome
if end == 'src':
endpoints = map(
lambda x: Endpoint(x.account, -x.amount),
endpoints
)
# set endpoints
setattr(self, end, endpoints)
|
Query for all missing information in the transaction
|
def get_finder(import_path):
"""Get a process finder."""
finder_class = import_string(import_path)
if not issubclass(finder_class, BaseProcessesFinder):
raise ImproperlyConfigured(
'Finder "{}" is not a subclass of "{}"'.format(finder_class, BaseProcessesFinder))
return finder_class()
|
Get a process finder.
|
def as_dict(self):
"""
Json-serializable dict representation of CompletePhononDos.
"""
d = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"structure": self.structure.as_dict(),
"frequencies": list(self.frequencies),
"densities": list(self.densities),
"pdos": []}
if len(self.pdos) > 0:
for at in self.structure:
d["pdos"].append(list(self.pdos[at]))
return d
|
Json-serializable dict representation of CompletePhononDos.
|
def to_fmt(self) -> str:
"""
Provide a useful representation of the register.
"""
infos = fmt.end(";\n", [])
s = fmt.sep(', ', [])
for ids in sorted(self.states.keys()):
s.lsdata.append(str(ids))
infos.lsdata.append(fmt.block('(', ')', [s]))
infos.lsdata.append("events:" + repr(self.events))
infos.lsdata.append(
"named_events:" + repr(list(self.named_events.keys()))
)
infos.lsdata.append("uid_events:" + repr(list(self.uid_events.keys())))
return infos
|
Provide a useful representation of the register.
|
def create_simple_tear_sheet(returns,
positions=None,
transactions=None,
benchmark_rets=None,
slippage=None,
estimate_intraday='infer',
live_start_date=None,
turnover_denom='AGB',
header_rows=None):
"""
Simpler version of create_full_tear_sheet; generates summary performance
statistics and important plots as a single image.
- Plots: cumulative returns, rolling beta, rolling Sharpe, underwater,
exposure, top 10 holdings, total holdings, long/short holdings,
daily turnover, transaction time distribution.
- Never accept market_data input (market_data = None)
- Never accept sector_mappings input (sector_mappings = None)
- Never perform bootstrap analysis (bootstrap = False)
- Never hide posistions on top 10 holdings plot (hide_positions = False)
- Always use default cone_std (cone_std = (1.0, 1.5, 2.0))
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- Time series with decimal returns.
- Example:
2015-07-16 -0.012143
2015-07-17 0.045350
2015-07-20 0.030957
2015-07-21 0.004902
positions : pd.DataFrame, optional
Daily net position values.
- Time series of dollar amount invested in each position and cash.
- Days where stocks are not held can be represented by 0 or NaN.
- Non-working capital is labelled 'cash'
- Example:
index 'AAPL' 'MSFT' cash
2004-01-09 13939.3800 -14012.9930 711.5585
2004-01-12 14492.6300 -14624.8700 27.1821
2004-01-13 -13853.2800 13653.6400 -43.6375
transactions : pd.DataFrame, optional
Executed trade volumes and fill prices.
- One row per trade.
- Trades on different names that occur at the
same time will have identical indicies.
- Example:
index amount price symbol
2004-01-09 12:18:01 483 324.12 'AAPL'
2004-01-09 12:18:01 122 83.10 'MSFT'
2004-01-13 14:12:23 -75 340.43 'AAPL'
benchmark_rets : pd.Series, optional
Daily returns of the benchmark, noncumulative.
slippage : int/float, optional
Basis points of slippage to apply to returns before generating
tearsheet stats and plots.
If a value is provided, slippage parameter sweep
plots will be generated from the unadjusted returns.
Transactions and positions must also be passed.
- See txn.adjust_returns_for_slippage for more details.
live_start_date : datetime, optional
The point in time when the strategy began live trading,
after its backtest period. This datetime should be normalized.
turnover_denom : str, optional
Either AGB or portfolio_value, default AGB.
- See full explanation in txn.get_turnover.
header_rows : dict or OrderedDict, optional
Extra rows to display at the top of the perf stats table.
set_context : boolean, optional
If True, set default plotting style context.
"""
positions = utils.check_intraday(estimate_intraday, returns,
positions, transactions)
if (slippage is not None) and (transactions is not None):
returns = txn.adjust_returns_for_slippage(returns, positions,
transactions, slippage)
always_sections = 4
positions_sections = 4 if positions is not None else 0
transactions_sections = 2 if transactions is not None else 0
live_sections = 1 if live_start_date is not None else 0
benchmark_sections = 1 if benchmark_rets is not None else 0
vertical_sections = sum([
always_sections,
positions_sections,
transactions_sections,
live_sections,
benchmark_sections,
])
if live_start_date is not None:
live_start_date = ep.utils.get_utc_timestamp(live_start_date)
plotting.show_perf_stats(returns,
benchmark_rets,
positions=positions,
transactions=transactions,
turnover_denom=turnover_denom,
live_start_date=live_start_date,
header_rows=header_rows)
fig = plt.figure(figsize=(14, vertical_sections * 6))
gs = gridspec.GridSpec(vertical_sections, 3, wspace=0.5, hspace=0.5)
ax_rolling_returns = plt.subplot(gs[:2, :])
i = 2
if benchmark_rets is not None:
ax_rolling_beta = plt.subplot(gs[i, :], sharex=ax_rolling_returns)
i += 1
ax_rolling_sharpe = plt.subplot(gs[i, :], sharex=ax_rolling_returns)
i += 1
ax_underwater = plt.subplot(gs[i, :], sharex=ax_rolling_returns)
i += 1
plotting.plot_rolling_returns(returns,
factor_returns=benchmark_rets,
live_start_date=live_start_date,
cone_std=(1.0, 1.5, 2.0),
ax=ax_rolling_returns)
ax_rolling_returns.set_title('Cumulative returns')
if benchmark_rets is not None:
plotting.plot_rolling_beta(returns, benchmark_rets, ax=ax_rolling_beta)
plotting.plot_rolling_sharpe(returns, ax=ax_rolling_sharpe)
plotting.plot_drawdown_underwater(returns, ax=ax_underwater)
if positions is not None:
# Plot simple positions tear sheet
ax_exposures = plt.subplot(gs[i, :])
i += 1
ax_top_positions = plt.subplot(gs[i, :], sharex=ax_exposures)
i += 1
ax_holdings = plt.subplot(gs[i, :], sharex=ax_exposures)
i += 1
ax_long_short_holdings = plt.subplot(gs[i, :])
i += 1
positions_alloc = pos.get_percent_alloc(positions)
plotting.plot_exposures(returns, positions, ax=ax_exposures)
plotting.show_and_plot_top_positions(returns,
positions_alloc,
show_and_plot=0,
hide_positions=False,
ax=ax_top_positions)
plotting.plot_holdings(returns, positions_alloc, ax=ax_holdings)
plotting.plot_long_short_holdings(returns, positions_alloc,
ax=ax_long_short_holdings)
if transactions is not None:
# Plot simple transactions tear sheet
ax_turnover = plt.subplot(gs[i, :])
i += 1
ax_txn_timings = plt.subplot(gs[i, :])
i += 1
plotting.plot_turnover(returns,
transactions,
positions,
ax=ax_turnover)
plotting.plot_txn_time_hist(transactions, ax=ax_txn_timings)
for ax in fig.axes:
plt.setp(ax.get_xticklabels(), visible=True)
|
Simpler version of create_full_tear_sheet; generates summary performance
statistics and important plots as a single image.
- Plots: cumulative returns, rolling beta, rolling Sharpe, underwater,
exposure, top 10 holdings, total holdings, long/short holdings,
daily turnover, transaction time distribution.
- Never accept market_data input (market_data = None)
- Never accept sector_mappings input (sector_mappings = None)
- Never perform bootstrap analysis (bootstrap = False)
- Never hide posistions on top 10 holdings plot (hide_positions = False)
- Always use default cone_std (cone_std = (1.0, 1.5, 2.0))
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- Time series with decimal returns.
- Example:
2015-07-16 -0.012143
2015-07-17 0.045350
2015-07-20 0.030957
2015-07-21 0.004902
positions : pd.DataFrame, optional
Daily net position values.
- Time series of dollar amount invested in each position and cash.
- Days where stocks are not held can be represented by 0 or NaN.
- Non-working capital is labelled 'cash'
- Example:
index 'AAPL' 'MSFT' cash
2004-01-09 13939.3800 -14012.9930 711.5585
2004-01-12 14492.6300 -14624.8700 27.1821
2004-01-13 -13853.2800 13653.6400 -43.6375
transactions : pd.DataFrame, optional
Executed trade volumes and fill prices.
- One row per trade.
- Trades on different names that occur at the
same time will have identical indicies.
- Example:
index amount price symbol
2004-01-09 12:18:01 483 324.12 'AAPL'
2004-01-09 12:18:01 122 83.10 'MSFT'
2004-01-13 14:12:23 -75 340.43 'AAPL'
benchmark_rets : pd.Series, optional
Daily returns of the benchmark, noncumulative.
slippage : int/float, optional
Basis points of slippage to apply to returns before generating
tearsheet stats and plots.
If a value is provided, slippage parameter sweep
plots will be generated from the unadjusted returns.
Transactions and positions must also be passed.
- See txn.adjust_returns_for_slippage for more details.
live_start_date : datetime, optional
The point in time when the strategy began live trading,
after its backtest period. This datetime should be normalized.
turnover_denom : str, optional
Either AGB or portfolio_value, default AGB.
- See full explanation in txn.get_turnover.
header_rows : dict or OrderedDict, optional
Extra rows to display at the top of the perf stats table.
set_context : boolean, optional
If True, set default plotting style context.
|
def check_support_user_port(cls, hw_info_ex):
"""
Checks whether the module supports a user I/O port.
:param HardwareInfoEx hw_info_ex:
Extended hardware information structure (see method :meth:`get_hardware_info`).
:return: True when the module supports a user I/O port, otherwise False.
:rtype: bool
"""
return ((hw_info_ex.m_dwProductCode & PRODCODE_MASK_PID) != ProductCode.PRODCODE_PID_BASIC) \
and ((hw_info_ex.m_dwProductCode & PRODCODE_MASK_PID) != ProductCode.PRODCODE_PID_RESERVED1) \
and cls.check_version_is_equal_or_higher(hw_info_ex.m_dwFwVersionEx, 2, 16)
|
Checks whether the module supports a user I/O port.
:param HardwareInfoEx hw_info_ex:
Extended hardware information structure (see method :meth:`get_hardware_info`).
:return: True when the module supports a user I/O port, otherwise False.
:rtype: bool
|
async def send_photo(self, path, entity):
"""Sends the file located at path to the desired entity as a photo"""
await self.send_file(
entity, path,
progress_callback=self.upload_progress_callback
)
print('Photo sent!')
|
Sends the file located at path to the desired entity as a photo
|
def solve_limited(self, assumptions=[]):
"""
Solve internal formula using given budgets for conflicts and
propagations.
"""
if self.maplesat:
if self.use_timer:
start_time = time.clock()
# saving default SIGINT handler
def_sigint_handler = signal.signal(signal.SIGINT, signal.SIG_DFL)
self.status = pysolvers.maplechrono_solve_lim(self.maplesat, assumptions)
# recovering default SIGINT handler
def_sigint_handler = signal.signal(signal.SIGINT, def_sigint_handler)
if self.use_timer:
self.call_time = time.clock() - start_time
self.accu_time += self.call_time
return self.status
|
Solve internal formula using given budgets for conflicts and
propagations.
|
def remove_properties(self):
"""
Removes the property layer (if exists) of the object (in memory)
"""
if self.features_layer is not None:
self.features_layer.remove_properties()
if self.header is not None:
self.header.remove_lp('features')
|
Removes the property layer (if exists) of the object (in memory)
|
def quaternion_from_axis_rotation(angle, axis):
"""Return quaternion for rotation about given axis.
Args:
angle (float): Angle in radians.
axis (str): Axis for rotation
Returns:
Quaternion: Quaternion for axis rotation.
Raises:
ValueError: Invalid input axis.
"""
out = np.zeros(4, dtype=float)
if axis == 'x':
out[1] = 1
elif axis == 'y':
out[2] = 1
elif axis == 'z':
out[3] = 1
else:
raise ValueError('Invalid axis input.')
out *= math.sin(angle/2.0)
out[0] = math.cos(angle/2.0)
return Quaternion(out)
|
Return quaternion for rotation about given axis.
Args:
angle (float): Angle in radians.
axis (str): Axis for rotation
Returns:
Quaternion: Quaternion for axis rotation.
Raises:
ValueError: Invalid input axis.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.