docstring stringlengths 52 499 | function stringlengths 67 35.2k | __index_level_0__ int64 52.6k 1.16M |
|---|---|---|
Sets time in seconds since Epoch
Args:
time (:obj:`float`): time in seconds since Epoch (see time.time())
Returns:
None | def set(self, time):
self._time = time
self._pb.sec = int(self._time)
self._pb.nsec = int((self._time - self._pb.sec) * 10 ** 9) | 875,238 |
Deregisters a local check
Parameters:
check (ObjectID): Check ID
Returns:
bool: ``True`` on success
The agent will take care of deregistering the check from the Catalog. | async def deregister(self, check):
check_id = extract_attr(check, keys=["CheckID", "ID"])
response = await self._api.get("/v1/agent/check/deregister", check_id)
return response.status == 200 | 875,272 |
Fetches existing prepared query
Parameters:
query (ObjectID): Query ID
dc (str): Specify datacenter that will be used.
Defaults to the agent's local datacenter.
watch (Blocking): Do a blocking query
consistency (Consistency): Force consisten... | async def read(self, query, *, dc=None, watch=None, consistency=None):
query_id = extract_attr(query, keys=["ID"])
response = await self._api.get("/v1/query", query_id, params={
"dc": dc}, watch=watch, consistency=consistency)
result = response.body[0]
return result | 875,421 |
Updates existing prepared query
Parameters:
Query (Object): Query definition
dc (str): Specify datacenter that will be used.
Defaults to the agent's local datacenter.
Returns:
bool: ``True`` on success | async def update(self, query, *, dc=None):
query_id = extract_attr(query, keys=["ID"])
response = await self._api.put("/v1/query", query_id,
params={"dc": dc}, data=query)
return response.status == 200 | 875,422 |
Delete existing prepared query
Parameters:
query (ObjectID): Query ID
dc (str): Specify datacenter that will be used.
Defaults to the agent's local datacenter.
Results:
bool: ``True`` on success | async def delete(self, query, *, dc=None):
query_id = extract_attr(query, keys=["ID"])
response = await self._api.delete("/v1/query", query_id,
params={"dc": dc})
return response.status == 200 | 875,423 |
Fetches existing prepared query
Parameters:
query (ObjectID): Query ID
dc (str): Specify datacenter that will be used.
Defaults to the agent's local datacenter.
watch (Blocking): Do a blocking query
consistency (Consistency): Force consisten... | async def explain(self, query, *, dc=None, consistency=None):
query_id = extract_attr(query, keys=["ID"])
path = "/v1/query/%s/explain" % query_id
response = await self._api.get(path, consistency=consistency, params={
"dc": dc})
result = response.body
return ... | 875,425 |
Add multiple command line flags
Arguments:
flags (:obj:`list` of :obj:`tuple`): List of flags
in tuples (name, flag_type, description, (optional) default)
Raises:
TypeError: Provided wrong arguments or arguments of wrong types, method will raise TypeError | def add_multiple(self, flags):
if not isinstance(flags, list):
raise TypeError("Expected list of flags, got object of type{}".format(type(flags)))
for flag in flags:
if isinstance(flag, Flag):
self.add_item(flag)
elif isinstance(flag, tuple):
... | 875,445 |
Validates incoming data
Args:
data(dict): the incoming data
Returns:
True if the data is valid
Raises:
ValueError: the data is not valid | def validate(data):
text = data.get('text')
if not isinstance(text, _string_types) or len(text) == 0:
raise ValueError('text field is required and should not be empty')
if 'markdown' in data and not type(data['markdown']) is bool:
raise ValueError('markdown field should be bool')
... | 875,563 |
Sends an incoming message
Args:
url(str): the incoming hook url
data(dict): the sending data
Returns:
requests.Response | def send(url, data):
validate(data)
return requests.post(url, json=data) | 875,564 |
Sends a RTMMessage
Should be called after starting the loop
Args:
message(RTMMessage): the sending message
Raises:
WebSocketConnectionClosedException: if the loop is closed | def send(self, message):
if "call_id" not in message:
message["call_id"] = self.gen_call_id()
self._ws.send(message.to_json()) | 875,628 |
Removes and returns a RTMMessage from self._inbox
Args:
block(bool): if True block until a RTMMessage is available,
else it will return None when self._inbox is empty
timeout(int): it blocks at most timeout seconds
Returns:
RTMMessage if sel... | def get_message(self, block=False, timeout=None):
try:
message = self._inbox.get(block=block, timeout=timeout)
return message
except Exception:
return None | 875,629 |
Removes and returns an error from self._errors
Args:
block(bool): if True block until a RTMMessage is available,
else it will return None when self._inbox is empty
timeout(int): it blocks at most timeout seconds
Returns:
error if inbox is no... | def get_error(self, block=False, timeout=None):
try:
error = self._errors.get(block=block, timeout=timeout)
return error
except Exception:
return None | 875,630 |
Deregisters a local service
Parameters:
service (ObjectID): Service ID
Returns:
bool: ``True`` on success
The deregister endpoint is used to remove a service from the local
agent. The agent will take care of deregistering the service with the
Catalog. If... | async def deregister(self, service):
service_id = extract_attr(service, keys=["ServiceID", "ID"])
response = await self._api.get(
"/v1/agent/service/deregister", service_id)
return response.status == 200 | 875,873 |
Resumes normal operation for service
Parameters:
service (ObjectID): Service ID
reason (str): Text string explaining the reason for placing the
service into normal mode.
Returns:
bool: ``True`` on success | async def enable(self, service, *, reason=None):
return await self.maintenance(service, False, reason=reason) | 875,875 |
Enters maintenance mode / Resumes normal operation for service
Parameters:
service (ObjectID): Service ID
enable (bool): Enter or exit maintenance mode
reason (str): Text string explaining the reason for placing the
service into normal mode.
... | async def maintenance(self, service, enable, *, reason=None):
service_id = extract_attr(service, keys=["ServiceID", "ID"])
response = await self._api.put(
"/v1/agent/service/maintenance", service_id,
params={"enable": enable, "reason": reason})
return response.st... | 875,876 |
Creates a MercedesMeOAuth object
Parameters:
- client_id - the client id of your app
- client_secret - the client secret of your app
- redirect_uri - the redirect URI of your app
- scope - the desired scope of the request
... | def __init__(self, client_id, client_secret, redirect_uri,
scope, cache_path):
self.client_id = client_id
self.client_secret = client_secret
self.redirect_uri = redirect_uri
self.cache_path = cache_path
self.scope=scope | 875,922 |
Gets the access token for the app given the code
Parameters:
- code - the response code | def get_access_token(self, code):
payload = {'redirect_uri': self.redirect_uri,
'code': code,
'grant_type': 'authorization_code'}
headers = self._make_authorization_headers()
response = requests.post(self.OAUTH_TOKEN_URL, data=payload,
... | 875,926 |
Gets user information by user id
Args:
user_id(int): the id of user
Returns:
User
Throws:
RTMServiceError when request failed | def info(self, user_id):
resp = self._rtm_client.get('v1/user.info?user_id={}'.format(user_id))
if resp.is_fail():
raise RTMServiceError('Failed to get user information', resp)
return resp.data['result'] | 876,263 |
Gets channel information by channel id
Args:
channel_id(int): the id of channel
Returns:
Channel
Throws:
RTMServiceError when request failed | def info(self, channel_id):
resource = 'v1/channel.info?channel_id={}'.format(channel_id)
resp = self._rtm_client.get(resource)
if resp.is_fail():
raise RTMServiceError("Failed to get channel information", resp)
return resp.data['result'] | 876,264 |
Prepare check for catalog endpoint
Parameters:
data (Object or ObjectID): Check ID or check definition
Returns:
Tuple[str, dict]: where first is ID and second is check definition | def prepare_check(data):
if not data:
return None, {}
if isinstance(data, str):
return data, {}
result = {}
if "ID" in data:
result["CheckID"] = data["ID"]
for k in ("Node", "CheckID", "Name", "Notes", "Status", "ServiceID"):
if k in data:
result[k]... | 876,629 |
Replys a text message
Args:
text(str): message content
Returns:
RTMMessage | def reply(self, text):
data = {'text': text, 'vchannel_id': self['vchannel_id']}
if self.is_p2p():
data['type'] = RTMMessageType.P2PMessage
data['to_uid'] = self['uid']
else:
data['type'] = RTMMessageType.ChannelMessage
data['channel_id'] ... | 876,695 |
Refers current message and replys a new message
Args:
text(str): message content
Returns:
RTMMessage | def refer(self, text):
data = self.reply(text)
data['refer_key'] = self['key']
return data | 876,696 |
Does the request job
Args:
resource(str): resource uri(relative path)
method(str): HTTP method
params(dict): uri queries
data(dict): HTTP body(form)
json(dict): HTTP body(json)
headers(dict): HTTP headers
Returns:
RTMR... | def do(self,
resource,
method,
params=None,
data=None,
json=None,
headers=None):
uri = "{0}/{1}".format(self._api_base, resource)
if not params:
params = {}
params.update({'token': self._token})
req =... | 876,928 |
Searches across Google Image Search with the specified image query and
downloads the specified count of images
Arguments:
imageQuery {[str]} -- [Image Search Query]
Keyword Arguments:
imageCount {[int]} -- [Count of images that need to be downloaded]
destina... | def extract_images(self, imageQuery, imageCount=100, destinationFolder='./', threadCount=4):
# Initialize the chrome driver
self._initialize_chrome_driver()
# Initialize the image download parameters
self._imageQuery = imageQuery
self._imageCount = imageCount
s... | 877,365 |
Downloads an image file from the given image URL
Arguments:
imageURL {[str]} -- [Image URL] | def _download_image(self, imageURL):
# If the required count of images have been download,
# refrain from downloading the remainder of the images
if(self._imageCounter >= self._imageCount):
return
try:
imageResponse = requests.get(imageURL)
... | 877,371 |
Update ConfigMap from mapping/iterable.
If the key exists the entry is updated else it is added.
Args:
*args: variable length argument list. A valid argument is a two item
tuple/list. The first item is the key and the second is the value.
**kwargs: Arbitrary k... | def update(self, *args, **kwargs):
for k, v in args:
self[k] = v
for k, v in kwargs.items():
self[k] = v | 877,699 |
Enters maintenance mode
Parameters:
reason (str): Reason of disabling
Returns:
bool: ``True`` on success | async def disable(self, reason=None):
params = {"enable": True, "reason": reason}
response = await self._api.put("/v1/agent/maintenance", params=params)
return response.status == 200 | 877,883 |
Resumes normal operation
Parameters:
reason (str): Reason of enabling
Returns:
bool: ``True`` on success | async def enable(self, reason=None):
params = {"enable": False, "reason": reason}
response = await self._api.put("/v1/agent/maintenance", params=params)
return response.status == 200 | 877,884 |
Triggers the local agent to join a node
Parameters:
address (str): Address of node
wan (bool): Attempt to join using the WAN pool
Returns:
bool: ``True`` on success
This endpoint is used to instruct the agent to attempt to connect to
a given address.... | async def join(self, address, *, wan=None):
response = await self._api.get("/v1/agent/join", address,
params={"wan": wan})
return response.status == 200 | 878,054 |
Metric data
Args:
value (:obj:`bool` or :obj:`int` or :obj:`long` or :obj:`float`
or :obj:`basestring` or :obj:`bytes`)
Returns:
value
Raises:
:obj:`TypeError` | def data(self):
if self._data_type == int:
if self._pb.HasField("int64_data"):
return self._pb.int64_data
if self._pb.HasField("int32_data"):
return self._pb.int32_data
if self._pb.HasField("uint64_data"):
return self._... | 878,078 |
Destroys a given token.
Parameters:
token (ObjectID): Token ID
Returns:
bool: ``True`` on success | async def destroy(self, token):
token_id = extract_attr(token, keys=["ID"])
response = await self._api.put("/v1/acl/destroy", token_id)
return response.body | 878,125 |
Draw molecule structure image.
Args:
canvas: draw.drawable.Drawable
mol: model.graphmol.Compound | def draw(canvas, mol):
mol.require("ScaleAndCenter")
mlb = mol.size2d[2]
if not mol.atom_count():
return
bond_type_fn = {
1: {
0: single_bond,
1: wedged_single,
2: dashed_wedged_single,
3: wave_single,
}, 2: {
0: cw... | 878,381 |
Base query for an url and xpath
Args:
url (str): URL to search
xpath (str): xpath to search (may be ``None``) | def _query(self, url, xpath):
return self.session.query(CachedRequest).filter(CachedRequest.url == url).filter(CachedRequest.xpath == xpath) | 878,417 |
Clear cache
Args:
url (str): If given, clear specific item only. Otherwise remove the DB file.
xpath (str): xpath to search (may be ``None``) | def clear(self, url=None, xpath=None):
if url is not None:
query = self._query(url, xpath)
if query.count() > 0:
query.delete()
self.session.commit()
else:
raise KeyError("Cannot clear URL, not in cache: " + str(url) + ... | 878,419 |
Check if a URL (and xpath) exists in the cache
If DB has not been initialized yet, returns ``False`` for any URL.
Args:
url (str): If given, clear specific item only. Otherwise remove the DB file.
xpath (str): xpath to search (may be ``None``)
Returns:
bool... | def has(self, url, xpath=None):
if not path.exists(self.db_path):
return False
return self._query(url, xpath).count() > 0 | 878,420 |
Get time stamp of cached query result.
If DB has not yet been initialized or url/xpath has not been queried yet, return None.
Args:
url (str): If given, clear specific item only. Otherwise remove the DB file.
xpath (str): xpath to search (may be ``None``)
Returns:
... | def get_timestamp(self, url, xpath=None):
if not path.exists(self.db_path):
return None
if self._query(url, xpath).count() > 0:
return self._query(url, xpath).one().queried_on | 878,421 |
Save molecules to the SDFile format file
Args:
mols: list of molecule objects
path: file path to save | def mols_to_file(mols, path):
with open(path, 'w') as f:
f.write(mols_to_text(mols)) | 878,449 |
Starts the client listener to listen for server responses.
Args:
None
Returns:
None | def listen(self):
logger.info("Listening on port " + str(self.listener.listen_port))
self.listener.listen() | 878,451 |
Processes messages that have been delivered from the transport
protocol.
Args:
data (dict): A dictionary containing the packet data to resend.
Returns:
None
Examples:
>>> data
{'method': 'REGISTER', 'address': ('192.168.0.20', 40080)} | def retransmit(self, data):
# Handle retransmitting REGISTER requests if we don't hear back from
# the server.
if data["method"] == "REGISTER":
if not self.registered and self.register_retries < self.max_retries:
logger.debug("<%s> Timeout exceeded. " % str(... | 878,452 |
This function will send a register packet to the discovered Neteria
server.
Args:
address (tuple): A tuple of the (address, port) to send the register
request to.
retry (boolean): Whether or not we want to reset the current number
of registration retries to 0... | def register(self, address, retry=True):
logger.debug("<%s> Sending REGISTER request to: %s" % (str(self.cuuid),
str(address)))
if not self.listener.listening:
logger.warning("Neteria client is not listening.")
... | 878,455 |
This method handles event legality check messages from the server.
Args:
message (dict): The unserialized legality dictionary received from
the server.
Returns:
None
Examples:
>>> message | def legal_check(self, message):
# If the event was legal, remove it from our event buffer
if message["method"] == "LEGAL":
logger.debug("<%s> <euuid:%s> Event LEGAL" % (str(self.cuuid),
message["euuid"]))
logger... | 878,457 |
Score examples from a new matrix X
Args:
estimator: an sklearn estimator object
X: design matrix with the same features that the estimator was trained on
Returns: a vector of scores of the same length as X
Note that estimator.predict_proba is preferred but when unavailable
(e.g. SVM wi... | def y_score(estimator, X):
try:
y = estimator.predict_proba(X)
return y[:, 1]
except(AttributeError):
return estimator.decision_function(X) | 878,462 |
Locu Venue Details API Call Wrapper
Args:
list of ids : ids of a particular venues to get insights about. Can process up to 5 ids | def get_details(self, ids):
if isinstance(ids, list):
if len(ids) > 5:
ids = ids[:5]
id_param = ';'.join(ids) + '/'
else:
ids = str(ids)
id_param = ids + '/'
header, content = self._http_request(id_param)
resp = js... | 878,506 |
Query all entities of a specific type, with their attributes
Args:
type_to_query (str): type of entity to query
client: DB client to perform query with
Returns:
pandas.DataFrame: table of entities, with attributes as columns | def query_with_attributes(type_to_query, client):
session = client.create_session()
# query all data
query = session.query(Attribute.name,
Attribute.value,
Entity.id) \
.join(Entity) \
.filter(Entity.ty... | 878,529 |
Convert a GeoJSON polygon feature to a numpy array
Args:
feature (pygeoj.Feature): polygon feature to draw
shape (tuple(int, int)): shape of 2D target numpy array to draw polygon in
lat_idx (func): function converting a latitude to the (fractional) row index in the map
lon_idx (func... | def geojson_polygon_to_mask(feature, shape, lat_idx, lon_idx):
import matplotlib
# specify 'agg' renderer, Mac renderer does not support what we want to do below
matplotlib.use('agg')
import matplotlib.pyplot as plt
from matplotlib import patches
import numpy as np
# we can only do p... | 878,552 |
Load data table from tsv file, from default location
Args:
key_filter (str): additional filter for key column - regex matching
key values to include; None for no filter
header_preproc (func): function to apply to column headers to extract year numbers (as strings)
... | def load(self, key_filter=None, header_preproc=None):
# read file, keep all values as strings
df = pd.read_csv(self.input_file,
sep='\t',
dtype=object)
if key_filter is not None:
# filter on key column (first column)
... | 878,557 |
Apply function to each step object in the index
Args:
fn: function to apply. If a list then each function is applied
pairwise: whether to apply the function to pairs of steps
symmetric, diagonal, block: passed to apply_pairwise when pairwise=True
kwargs: a keyword arguments to pass ... | def dapply(self, fn, pairwise=False, symmetric=True, diagonal=False, block=None, **kwargs):
search_keys = [k for k, v in kwargs.items() if isinstance(v, list) and len(v) > 1]
functions = util.make_list(fn)
search = list(product(functions, util.dict_product(kwargs)))
results = []
for fn, kw in ... | 878,608 |
Helper function for pairwise apply.
Args:
steps: an ordered collection of steps
function: function to apply, first two positional arguments are steps
symmetric: whether function is symmetric in the two steps
diagonal: whether to apply on the diagonal
block: apply only when th... | def apply_pairwise(self, function, symmetric=True, diagonal=False, block=None, **kwargs):
steps = self.index
r = pd.DataFrame(index=steps, columns=steps)
for i, s1 in enumerate(steps):
j = range(i+1 if symmetric else len(steps))
if not diagonal:
j.remove(i)
other = s... | 878,609 |
Replace unhashable values in a DataFrame with their string repr
Args:
df: DataFrame
columns: columns to replace, if necessary. Default None replaces all columns. | def _print_unhashable(df, columns=None):
for c in df.columns if columns is None else columns:
if df.dtypes[c] == object:
try:
df[c].apply(hash)
except TypeError:
df[c] = df[c].dropna().apply(pformat).ix[df.index]
return df | 878,611 |
Extracts a LinkableClass from a jar.
Args:
jar: An open ZipFile instance.
name: A string containing the binary name of a class.
Raises:
KeyError: The class does not exist in the jar. | def extract_class(jar, name):
with jar.open(name) as entry:
return LinkableClass(javatools.unpack_class(entry)) | 878,662 |
Creates a package-list URL and a link base from a docroot element.
Args:
app: the global app object
root: the docroot element [string or dictionary] | def normalize_docroot(app, root):
srcdir = app.env.srcdir
default_version = app.config.javalink_default_version
if isinstance(root, basestring):
(url, base) = _parse_docroot_str(srcdir, root)
return {'root': url, 'base': base, 'version': default_version}
else:
normalized =... | 878,747 |
Helper function: parse attribute data from a wiki html doc
Args:
doc (document parsed with lxml.html): parsed wiki page
Returns:
dict: attributes values and listed links, format ``{<key>: {'value': <value>, 'link': <link>}}``;
only the first hyperlink listed in each attribute val... | def get_attribute_data(doc):
attributes = dict()
for attribute_node in doc.xpath("//div[contains(@class, 'pi-data ')]"):
# label node
node = attribute_node.xpath(".//*[contains(@class, 'pi-data-label')]")[0]
label = " ".join(node.itertext()).strip()
# value node
nod... | 878,779 |
Rotate and return an image according to its Exif information.
ROTATION_NEEDED = {
1: 0,
2: 0 (Mirrored),
3: 180,
4: 180 (Mirrored),
5: -90 (Mirrored),
6: -90,
7: 90 (Mirrored),
8: 90,
}
Args:
image (PIL.Image.Image): PIL image to rota... | def autorotate(image, orientation=None):
orientation_value = orientation if orientation else \
image._getexif().get(EXIF_KEYS.get('Orientation'))
if orientation_value is None:
raise ImDirectException("No orientation available in Exif "
"tag or given explicitl... | 878,801 |
Modifies the Exif tag if rotation has been performed.
0th, 1st
--------
ImageWidth = 256
ImageLength = 257
XResolution = 282
YResolution = 283
TileWidth = 322
TileLength = 323
Exif
----
PixelXDimension = 40962
PixelYDimension = 40963
Args:
exif (dict): The ... | def update_exif_for_rotated_image(exif):
orientation_value = exif.get('0th', ).get(
piexif.ImageIFD.Orientation, exif.get('1st', ).get(
piexif.ImageIFD.Orientation, None))
if orientation_value is not None:
# Update orientation.
exif['0th'][piexif.ImageIFD.Orientation] =... | 878,802 |
Monkey patching PIL.Image.open method
Args:
enabled (bool): If the monkey patch should be activated or deactivated. | def monkey_patch(enabled=True):
if enabled:
Image.open = imdirect_open
else:
Image.open = pil_open | 878,804 |
Saves an image using PIL, preserving the exif information.
Args:
img (PIL.Image.Image):
*args: The arguments for the `save` method of the Image class.
**kwargs: The keywords for the `save` method of the Image class. | def save_with_exif_info(img, *args, **kwargs):
if 'exif' in kwargs:
exif = kwargs.pop('exif')
else:
exif = img.info.get('exif')
img.save(*args, exif=exif, **kwargs) | 878,805 |
scale vector
Args:
p: point (x, y)
factor: scaling factor
o: origin (x, y) | def scale(p, factor, o=(0, 0)):
v = vector(o, p)
sv = v[0] * factor, v[1] * factor
return translate(sv, o) | 878,892 |
unit vector
Args:
v: vector (x, y)
lg: length
Raises:
ValueError: Null vector was given | def unit(v, lg=1):
try:
res = scale(v, lg / distance((0, 0), v))
except ZeroDivisionError:
raise ValueError("Null vector was given")
return res | 878,893 |
rotate vector
Args:
p: point (x, y)
rad: angle(radian)
o: origin (x, y) | def rotate(p, rad, o=(0, 0)):
v = vector(o, p)
fx = lambda x, y, d: x * cos(d) - y * sin(d)
fy = lambda x, y, d: x * sin(d) + y * cos(d)
rv = fx(v[0], v[1], rad), fy(v[0], v[1], rad)
return translate(rv, o) | 878,894 |
Returns cross product
Args:
p1, p2: point (x, y)
o: origin | def cross_product(p1, p2, o=(0, 0)):
v1 = vector(o, p1)
v2 = vector(o, p2)
return v1[0] * v2[1] - v1[1] * v2[0] | 878,895 |
Returns dot product
Args:
p1, p2: point (x, y)
o: origin | def dot_product(p1, p2, o=(0, 0)):
v1 = vector(o, p1)
v2 = vector(o, p2)
return v1[0] * v2[0] + v1[1] * v2[1] | 878,896 |
Returns interior angle of two vector(0 <= θ <= pi)
Args:
p1, p2: point (x, y)
o: origin
Raises:
ValueError: p1 or p2 is overlapped with origin | def interior_angle(p1, p2, o=(0, 0)):
v1 = vector(o, p1)
v2 = vector(o, p2)
len1 = distance(o, p1)
len2 = distance(o, p2)
try:
return acos(dot_product(v1, v2) / (len1 * len2))
except ZeroDivisionError:
raise ValueError("p1 or p2 is overlapped with origin") | 878,897 |
move segment by distance
Args:
p1, p2: point(x, y)
rad: relative direction angle(radian)
dist: distance
Return:
translated segment(p1, p2) | def m_seg(p1, p2, rad, dist):
v = vector(p1, p2)
m = unit(rotate(v, rad), dist)
return translate(p1, m), translate(p2, m) | 878,898 |
trim segment
Args:
p1, p2: point(x, y)
t: scaling factor (1 - trimed segment / original segment)
align: 1: trim p2, 2: trim p1, 0: both side
Return:
trimmed segment(p1, p2) | def t_seg(p1, p2, t, align=0):
v = vector(p1, p2)
result = {
1: lambda a, b: (a, translate(b, scale(v, -t))),
2: lambda a, b: (translate(a, scale(v, t)), b),
0: lambda a, b: (translate(a, scale(v, t / 2)),
translate(b, scale(v, -t / 2)))
}
return res... | 878,899 |
parallel segment
Args:
p1, p2: point(x, y)
cw: m_seg rad True: -π/2, False: π/2
interval: m_seg dist
trim: t_seg trim
align: t_seg align | def p_seg(p1, p2, cw, interval, trim=0, align=0):
case = {True: pi / -2, False: pi / 2}
p1m, p2m = m_seg(p1, p2, case[cw], interval)
return t_seg(p1m, p2m, trim, align) | 878,900 |
Evaluate whether vertices are in clockwise order.
Args:
vertices: list of vertices (x, y) in polygon.
Returns:
True: clockwise, False: counter-clockwise
Raises:
ValueError: the polygon is complex or overlapped. | def is_clockwise(vertices):
it = iterator.consecutive(cycle(vertices), 3)
clockwise = 0
counter = 0
for _ in range(len(vertices)):
p0, p1, p2 = next(it)
cross = cross_product(p1, p2, p0)
int_angle = interior_angle(p0, p2, p1) # raises ValueError
if cross < 0:
... | 878,901 |
Convenient wrapper around functions that should exit or raise an exception
Example:
assert "Can't create folder" in verify_abort(ensure_folder, "/dev/null/not-there")
Args:
func (callable): Function to execute
*args: Args to pass to 'func'
**kwargs: Named args to pass to 'func'... | def verify_abort(func, *args, **kwargs):
expected_exception = kwargs.pop("expected_exception", runez.system.AbortException)
with CaptureOutput() as logged:
try:
value = func(*args, **kwargs)
assert False, "%s did not raise, but returned %s" % (func, value)
except ex... | 878,909 |
Save svg as file(.svg)
Args:
path (str): destination to save file | def save(self, path):
with open(path, 'w') as f:
f.write(self.contents()) | 878,927 |
Instantiate the task and build it with luigi
Args:
local_scheduler (bool): use a local scheduler (True, default) or a remote scheduler
task_params: parameters to pass to task for instantiation | def build(cls, local_scheduler=True, **task_params):
luigi.build([cls(**task_params)], local_scheduler=local_scheduler) | 878,998 |
Commit and close the DB session associated with this task (no error is raised if None is open)
Args:
commit (bool): commit session before closing (default=True) | def close_session(self, commit=True):
if self._session is not None:
if commit:
self._session.commit()
self._session.close()
self._session = None | 879,002 |
Adds hydrogens
Args:
num (int): number of hydrogens | def add_hydrogen(self, num):
self.H_count = num
if num > 0 and self.symbol in ("N", "O"):
self.H_donor = 1
else:
self.H_donor = 0 | 879,030 |
Chemical formula HTML
Args:
reversed (bool): reversed text for leftmost atom groups | def formula_html(self, reversed_=False):
if self.H_count == 1:
text = "H"
elif self.H_count > 1:
text = "H<sub>{}</sub>".format(self.H_count)
else:
text = ""
seq = [self.symbol, text, self.charge_sign_html()]
if reversed_:
... | 879,031 |
if mol is exactly same structure as the query, return True
Args:
mol: Compound
query: Compound | def equal(mol, query, largest_only=True, ignore_hydrogen=True):
m = molutil.clone(mol)
q = molutil.clone(query)
if largest_only:
m = molutil.largest_graph(m)
q = molutil.largest_graph(q)
if ignore_hydrogen:
m = molutil.make_Hs_implicit(m)
q = molutil.make_Hs_implicit... | 879,036 |
if mol is a substructure of the query, return True
Args:
mol: Compound
query: Compound
largest_only: compare only largest graph molecule | def substructure(mol, query, largest_only=True, ignore_hydrogen=True):
def subset_filter(cnt1, cnt2):
diff = cnt2
diff.subtract(cnt1)
if any(v < 0 for v in diff.values()):
return True
if not (len(mol) and len(query)):
return False # two blank molecules are not ... | 879,037 |
Instantiate a client object
A client can be configured either from a parameters dictionary ``params`` or directly
from an :mod:`sqlalchemy` connection string ``connection_string``. Exactly one of the two
must be provided.
Args:
params (dict): database configuration, as defi... | def __init__(self, params=None, connection_string=None):
if params is None and connection_string is None:
raise RuntimeError("Please provide either 'params' or 'connection_string'")
if params is not None and connection_string is not None:
raise RuntimeError("Please pro... | 879,079 |
Store the password for a database connection using :mod:`keyring`
Use the ``user`` field as the user name and ``<host>:<driver>`` as service name.
Args:
params (dict): database configuration, as defined in :mod:`ozelot.config`
password (str): password to store | def store_password(params, password):
user_name = params['user']
service_name = params['host'] + ':' + params['driver']
keyring.set_password(service_name=service_name,
username=user_name,
password=password) | 879,080 |
Get the password for a database connection from :mod:`keyring`
Args:
params (dict): database configuration, as defined in :mod:`ozelot.config`
Returns:
str: password | def _get_password(params):
user_name = params['user']
service_name = params['host'] + ':' + params['driver']
return keyring.get_password(service_name=service_name,
username=user_name) | 879,081 |
Get a database connection string
Args:
params (dict): database configuration, as defined in :mod:`ozelot.config`
hide_password (bool): if True, the password is hidden in the returned string
(use this for logging purposes).
Returns:
str: connection st... | def get_connection_string(params, hide_password=True):
connection_string = params['driver'] + '://'
user = params.get('user', None)
password = params.get('password', None)
host = params.get('host', None)
port = params.get('port', None)
database = params.get('dat... | 879,082 |
Initialize the class.
Arguments:
api_key -- your Gemini API key
secret_key -- your Gemini API secret key for signatures
live -- use the live API? otherwise, use the sandbox (default False) | def __init__(self, api_key='', secret_key='', live=False):
self.api_key = api_key
self.secret_key = secret_key
if live:
self.base_url = self.live_url | 879,087 |
Send a request to get the public order book, return the response.
Arguments:
symbol -- currency symbol (default 'btcusd')
limit_bids -- limit the number of bids returned (default 0)
limit_asks -- limit the number of asks returned (default 0) | def book(self, symbol='btcusd', limit_bids=0, limit_asks=0):
url = self.base_url + '/v1/book/' + symbol
params = {
'limit_bids': limit_bids,
'limit_asks': limit_asks
}
return requests.get(url, params) | 879,089 |
Send a request to get all public trades, return the response.
Arguments:
symbol -- currency symbol (default 'btcusd')
since -- only return trades after this unix timestamp (default 0)
limit_trades -- maximum number of trades to return (default 50).
include_breaks -- whether to d... | def trades(self, symbol='btcusd', since=0, limit_trades=50,
include_breaks=0):
url = self.base_url + '/v1/trades/' + symbol
params = {
'since': since,
'limit_trades': limit_trades,
'include_breaks': include_breaks
}
return requ... | 879,090 |
Send a request for auction history info, return the response.
Arguments:
symbol -- currency symbol (default 'btcusd')
since -- only return auction events after this timestamp (default 0)
limit_auction_results -- maximum number of auction events to return
... | def auction_history(self, symbol='btcusd', since=0,
limit_auction_results=50, include_indicative=1):
url = self.base_url + '/v1/auction/' + symbol + '/history'
params = {
'since': since,
'limit_auction_results': limit_auction_results,
... | 879,092 |
Send a request to place an order, return the response.
Arguments:
amount -- quoted decimal amount of BTC to purchase
price -- quoted decimal amount of USD to spend per BTC
side -- 'buy' or 'sell'
client_order_id -- an optional client-specified order id (default None)
sym... | def new_order(self, amount, price, side, client_order_id=None,
symbol='btcusd', type='exchange limit', options=None):
request = '/v1/order/new'
url = self.base_url + request
params = {
'request': request,
'nonce': self.get_nonce(),
'... | 879,093 |
Send a request to cancel an order, return the response.
Arguments:
order_id - the order id to cancel | def cancel_order(self, order_id):
request = '/v1/order/cancel'
url = self.base_url + request
params = {
'request': request,
'nonce': self.get_nonce(),
'order_id': order_id
}
return requests.post(url, headers=self.prepare(params)) | 879,094 |
Prepare, return the required HTTP headers.
Base 64 encode the parameters, sign it with the secret key,
create the HTTP headers, return the whole payload.
Arguments:
params -- a dictionary of parameters | def prepare(self, params):
jsonparams = json.dumps(params)
payload = base64.b64encode(jsonparams.encode())
signature = hmac.new(self.secret_key.encode(), payload,
hashlib.sha384).hexdigest()
return {'X-GEMINI-APIKEY': self.api_key,
'... | 879,098 |
Follow the a graph to find the nodes connected to a given node.
Args:
id: the id of the starting node
edges: a pandas DataFrame of edges. Each row is an edge with two columns containing
the ids of the vertices.
directed: If True, edges are directed from first column to second col... | def follow(id, edges, directed=False, _visited=None):
if _visited is None:
_visited = set()
_visited.add(id)
for row in edges[edges.ix[:, 0] == id].values:
if(row[1] not in _visited):
follow(row[1], edges, directed, _visited)
if not directed:
for row in edges[e... | 879,121 |
Return connected components from graph determined by edges matrix
Args:
edges: DataFrame of (undirected) edges.
vertices: set of vertices in graph. Defaults to union of all vertices in edges.
Returns:
set of connected components, each of which is a set of vertices. | def get_components(edges, vertices=None):
if vertices is None:
vertices = set(chain(edges.ix[:, 0], edges.ix[:, 1]))
visited = set()
components = []
for id in vertices:
if id not in visited:
c = follow(id, edges)
visited.update(c)
components.app... | 879,122 |
safely load steps in place, excluding those that fail
Args:
steps: the steps to load | def load(steps, reload=False):
# work on collections by default for fewer isinstance() calls per call to load()
if reload:
_STEP_CACHE.clear()
if callable(steps):
steps = steps()
if not isinstance(steps, collections.Iterable):
return load([steps])[0]
loaded = []
f... | 879,209 |
Merges results to form arguments to run(). There are two cases for each result:
- dictionary: dictionaries get merged and passed as keyword arguments
- list: lists get concatenated to positional arguments
- Arguments: kwargs gets merged and args gets appended
- else: concatenated and... | def merge_results(inputs, arguments=None):
if arguments is None:
arguments = Arguments()
args = arguments.args
kwargs = arguments.kwargs
for i in inputs:
# without a mapping we handle two cases
# when the result is a dict merge it with a glo... | 879,211 |
Run this step, recursively running or loading inputs.
Used in bin/run_step.py which is run by drake.
Args:
inputs: collection of steps that should be loaded
output: step that should be dumped after it is run
load_targets (boolean): load all steps which are targets.
... | def execute(self, inputs=None, output=None, load_targets=False):
if self == output:
if os.path.exists(self._dump_dirname):
shutil.rmtree(self._dump_dirname)
if os.path.exists(self._target_filename):
os.remove(self._target_filename)
os.... | 879,217 |
Searches the tree for a step
Args:
value: The value to search for. If value is a string then the search looks for
a step of that name. If the value is a type, it looks for a step
of that type.
Returns: The first step found via a depth-first search. | def get_input(self, value, _search=None):
if _search is None:
if isinstance(value, string_types):
_search = lambda s: s.name # noqa: E731
elif isinstance(value, type):
_search = type
for i in self.inputs:
step = i.get_input(v... | 879,218 |
Fetches the row-aggregated input columns for this ColumnFunction.
Args:
aggregator (Aggregator)
Returns:
pd.DataFrame: The dataframe has columns with names self.names
that were created by this ColumnFunction,
and is indexed by the index that was ... | def apply_and_name(self, aggregator):
reduced_df = self._apply(aggregator)
if len(self.names) != len(reduced_df.columns):
raise IndexError("ColumnFunction creates more columns than it has names for.")
reduced_df.columns = self.names
return reduced_df | 879,252 |
This function gets called by ColumnFunction._apply(). After a ColumnFunction
has been passed to Aggregator's constructor, the ColumnFunction can use this function
to request the populated, aggregated columns that correspond to its ColumnReductions.
Args:
column_reduction (list[Colum... | def get_reduced(self, column_reductions):
for cr in column_reductions:
if cr not in self.column_reductions:
raise ValueError("Column reduction %r is not known to this Aggregator!" % cr)
return self.reduced_df[column_reductions] | 879,255 |
Performs a groupby of the unique Columns by index, as constructed from self.df.
Args:
index (str, or pd.Index): Index or column name of self.df.
Returns:
pd.DataFrame: A dataframe, aggregated by index, that contains the result
of the various ColumnFunctions, and... | def aggregate(self, index):
# deal with index as a string vs index as a index/MultiIndex
if isinstance(index, string_types):
col_df_grouped = self.col_df.groupby(self.df[index])
else:
self.col_df.index = pd.MultiIndex.from_arrays([self.df[i] for i in index])
... | 879,256 |
Parse molfile part into molecule object
Args:
lines (list): lines of molfile part
Raises:
ValueError: Symbol not defined in periodictable.yaml
(Polymer expression not supported yet) | def molecule(lines):
count_line = lines[3]
num_atoms = int(count_line[0:3])
num_bonds = int(count_line[3:6])
# chiral_flag = int(count_line[12:15]) # Not used
# num_prop = int(count_line[30:33]) # "No longer supported"
compound = Compound()
compound.graph._node = atoms(lines[4: num_at... | 879,284 |
Yields molecules generated from CTAB text
Args:
lines (iterable): CTAB text lines
no_halt (boolean):
True: shows warning messages for invalid format and go on.
False: throws an exception for it and stop parsing.
assign_descriptors (boolean):
if True, defa... | def mol_supplier(lines, no_halt, assign_descriptors):
def sdf_block(lns):
mol = []
opt = []
is_mol = True
for line in lns:
if line.startswith("$$$$"):
yield mol[:], opt[:]
is_mol = True
mol.clear()
opt.c... | 879,285 |
Python 2/3 friendly decoding of output.
Args:
value (str | unicode | bytes | None): The value to decode.
strip (bool): If True, `strip()` the returned string. (Default value = False)
Returns:
str: Decoded value, if applicable. | def decode(value, strip=False):
if value is None:
return None
if isinstance(value, bytes) and not isinstance(value, unicode):
value = value.decode("utf-8")
if strip:
return unicode(value).strip()
return unicode(value) | 879,379 |
Conveniently set one or more fields at a time.
Args:
*args: Optionally set from other objects, available fields from the passed object are used in order
**kwargs: Set from given key/value pairs (only names defined in __slots__ are used) | def set(self, *args, **kwargs):
if args:
for arg in args:
if arg is not None:
for name in self.__slots__:
self._set(name, getattr(arg, name, UNSET))
for name in kwargs:
self._set(name, kwargs.get(name, UNSET)) | 879,384 |
Load step from yaml file
Args:
filename: a target or step.yaml filename | def load(filename):
yaml_filename = os.path.join(os.path.dirname(filename), 'step.yaml')
with open(yaml_filename) as f:
return yaml.load(f) | 879,456 |
connect atom group (for SMILES parser)
May requires recalculation of 2D coordinate for drawing
Args:
mol: graphmol.Compound()
the original object will be copied.
bond: Bond object to be connected.
the original will not be copied so be careful.
... | def add_molecule(self, mol, bond=None, base=None, target=None):
ai = self.available_idx()
mapping = {n: n + ai - 1 for n, _ in mol.atoms_iter()}
relabeled = nx.relabel_nodes(mol.graph, mapping) # copy=True
self.graph.add_nodes_from(relabeled.nodes(data=True))
self.graph... | 879,661 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.