code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1
value |
|---|---|---|---|---|---|
import torch
import torch.nn as nn
import torch.nn.functional as F
from .metrics import bbox_iou
from .tal import bbox2dist
class VarifocalLoss(nn.Module):
# Varifocal loss by Zhang et al. https://arxiv.org/abs/2008.13367
def __init__(self):
super().__init__()
def forward(self, pred_score, gt_score, label, alpha=0.75, gamma=2.0):
weight = alpha * pred_score.sigmoid().pow(gamma) * (1 - label) + gt_score * label
with torch.cuda.amp.autocast(enabled=False):
loss = (F.binary_cross_entropy_with_logits(pred_score.float(), gt_score.float(), reduction='none') *
weight).sum()
return loss
class BboxLoss(nn.Module):
def __init__(self, reg_max, use_dfl=False):
super().__init__()
self.reg_max = reg_max
self.use_dfl = use_dfl
def forward(self, pred_dist, pred_bboxes, anchor_points, target_bboxes, target_scores, target_scores_sum, fg_mask):
# IoU loss
weight = torch.masked_select(target_scores.sum(-1), fg_mask).unsqueeze(-1)
iou = bbox_iou(pred_bboxes[fg_mask], target_bboxes[fg_mask], xywh=False, CIoU=True)
loss_iou = ((1.0 - iou) * weight).sum() / target_scores_sum
# DFL loss
if self.use_dfl:
target_ltrb = bbox2dist(anchor_points, target_bboxes, self.reg_max)
loss_dfl = self._df_loss(pred_dist[fg_mask].view(-1, self.reg_max + 1), target_ltrb[fg_mask]) * weight
loss_dfl = loss_dfl.sum() / target_scores_sum
else:
loss_dfl = torch.tensor(0.0).to(pred_dist.device)
return loss_iou, loss_dfl
@staticmethod
def _df_loss(pred_dist, target):
# Return sum of left and right DFL losses
# Distribution Focal Loss (DFL) proposed in Generalized Focal Loss https://ieeexplore.ieee.org/document/9792391
tl = target.long() # target left
tr = tl + 1 # target right
wl = tr - target # weight left
wr = 1 - wl # weight right
return (F.cross_entropy(pred_dist, tl.view(-1), reduction='none').view(tl.shape) * wl +
F.cross_entropy(pred_dist, tr.view(-1), reduction='none').view(tl.shape) * wr).mean(-1, keepdim=True) | /roar_yolo-9.0.0-py3-none-any.whl/ultralytics/yolo/utils/loss.py | 0.945343 | 0.390243 | loss.py | pypi |
from copy import deepcopy
import numpy as np
import torch
from ultralytics.yolo.utils import LOGGER, colorstr
from ultralytics.yolo.utils.torch_utils import profile
def check_train_batch_size(model, imgsz=640, amp=True):
# Check YOLOv5 training batch size
with torch.cuda.amp.autocast(amp):
return autobatch(deepcopy(model).train(), imgsz) # compute optimal batch size
def autobatch(model, imgsz=640, fraction=0.7, batch_size=16):
# Automatically estimate best YOLOv5 batch size to use `fraction` of available CUDA memory
# Usage:
# import torch
# from utils.autobatch import autobatch
# model = torch.hub.load('ultralytics/yolov5', 'yolov5s', autoshape=False)
# print(autobatch(model))
# Check device
prefix = colorstr('AutoBatch: ')
LOGGER.info(f'{prefix}Computing optimal batch size for imgsz={imgsz}')
device = next(model.parameters()).device # get model device
if device.type == 'cpu':
LOGGER.info(f'{prefix}CUDA not detected, using default CPU batch-size {batch_size}')
return batch_size
if torch.backends.cudnn.benchmark:
LOGGER.info(f'{prefix} โ ๏ธ Requires torch.backends.cudnn.benchmark=False, using default batch-size {batch_size}')
return batch_size
# Inspect CUDA memory
gb = 1 << 30 # bytes to GiB (1024 ** 3)
d = str(device).upper() # 'CUDA:0'
properties = torch.cuda.get_device_properties(device) # device properties
t = properties.total_memory / gb # GiB total
r = torch.cuda.memory_reserved(device) / gb # GiB reserved
a = torch.cuda.memory_allocated(device) / gb # GiB allocated
f = t - (r + a) # GiB free
LOGGER.info(f'{prefix}{d} ({properties.name}) {t:.2f}G total, {r:.2f}G reserved, {a:.2f}G allocated, {f:.2f}G free')
# Profile batch sizes
batch_sizes = [1, 2, 4, 8, 16]
try:
img = [torch.empty(b, 3, imgsz, imgsz) for b in batch_sizes]
results = profile(img, model, n=3, device=device)
# Fit a solution
y = [x[2] for x in results if x] # memory [2]
p = np.polyfit(batch_sizes[:len(y)], y, deg=1) # first degree polynomial fit
b = int((f * fraction - p[1]) / p[0]) # y intercept (optimal batch size)
if None in results: # some sizes failed
i = results.index(None) # first fail index
if b >= batch_sizes[i]: # y intercept above failure point
b = batch_sizes[max(i - 1, 0)] # select prior safe point
if b < 1 or b > 1024: # b outside of safe range
b = batch_size
LOGGER.info(f'{prefix}WARNING โ ๏ธ CUDA anomaly detected, using default batch-size {batch_size}.')
fraction = (np.polyval(p, b) + r + a) / t # actual fraction predicted
LOGGER.info(f'{prefix}Using batch-size {b} for {d} {t * fraction:.2f}G/{t:.2f}G ({fraction * 100:.0f}%) โ
')
return b
except Exception as e:
LOGGER.warning(f'{prefix}WARNING โ ๏ธ error detected: {e}, using default batch-size {batch_size}.')
return batch_size | /roar_yolo-9.0.0-py3-none-any.whl/ultralytics/yolo/utils/autobatch.py | 0.815747 | 0.379091 | autobatch.py | pypi |
# Trainer callbacks ----------------------------------------------------------------------------------------------------
def on_pretrain_routine_start(trainer):
pass
def on_pretrain_routine_end(trainer):
pass
def on_train_start(trainer):
pass
def on_train_epoch_start(trainer):
pass
def on_train_batch_start(trainer):
pass
def optimizer_step(trainer):
pass
def on_before_zero_grad(trainer):
pass
def on_train_batch_end(trainer):
pass
def on_train_epoch_end(trainer):
pass
def on_fit_epoch_end(trainer):
pass
def on_model_save(trainer):
pass
def on_train_end(trainer):
pass
def on_params_update(trainer):
pass
def teardown(trainer):
pass
# Validator callbacks --------------------------------------------------------------------------------------------------
def on_val_start(validator):
pass
def on_val_batch_start(validator):
pass
def on_val_batch_end(validator):
pass
def on_val_end(validator):
pass
# Predictor callbacks --------------------------------------------------------------------------------------------------
def on_predict_start(predictor):
pass
def on_predict_batch_start(predictor):
pass
def on_predict_batch_end(predictor):
pass
def on_predict_postprocess_end(predictor):
pass
def on_predict_end(predictor):
pass
# Exporter callbacks ---------------------------------------------------------------------------------------------------
def on_export_start(exporter):
pass
def on_export_end(exporter):
pass
default_callbacks = {
# Run in trainer
'on_pretrain_routine_start': [on_pretrain_routine_start],
'on_pretrain_routine_end': [on_pretrain_routine_end],
'on_train_start': [on_train_start],
'on_train_epoch_start': [on_train_epoch_start],
'on_train_batch_start': [on_train_batch_start],
'optimizer_step': [optimizer_step],
'on_before_zero_grad': [on_before_zero_grad],
'on_train_batch_end': [on_train_batch_end],
'on_train_epoch_end': [on_train_epoch_end],
'on_fit_epoch_end': [on_fit_epoch_end], # fit = train + val
'on_model_save': [on_model_save],
'on_train_end': [on_train_end],
'on_params_update': [on_params_update],
'teardown': [teardown],
# Run in validator
'on_val_start': [on_val_start],
'on_val_batch_start': [on_val_batch_start],
'on_val_batch_end': [on_val_batch_end],
'on_val_end': [on_val_end],
# Run in predictor
'on_predict_start': [on_predict_start],
'on_predict_batch_start': [on_predict_batch_start],
'on_predict_postprocess_end': [on_predict_postprocess_end],
'on_predict_batch_end': [on_predict_batch_end],
'on_predict_end': [on_predict_end],
# Run in exporter
'on_export_start': [on_export_start],
'on_export_end': [on_export_end]}
def add_integration_callbacks(instance):
from .clearml import callbacks as clearml_callbacks
from .comet import callbacks as comet_callbacks
from .hub import callbacks as hub_callbacks
from .tensorboard import callbacks as tb_callbacks
for x in clearml_callbacks, comet_callbacks, hub_callbacks, tb_callbacks:
for k, v in x.items():
if v not in instance.callbacks[k]: # prevent duplicate callbacks addition
instance.callbacks[k].append(v) # callback[name].append(func) | /roar_yolo-9.0.0-py3-none-any.whl/ultralytics/yolo/utils/callbacks/base.py | 0.539226 | 0.291214 | base.py | pypi |
import os
import platform
import shutil
import sys
import threading
import time
from pathlib import Path
from random import random
import requests
from tqdm import tqdm
from ultralytics.yolo.utils import (DEFAULT_CFG_DICT, ENVIRONMENT, LOGGER, ONLINE, RANK, SETTINGS, TESTS_RUNNING,
TQDM_BAR_FORMAT, TryExcept, __version__, colorstr, emojis, get_git_origin_url,
is_colab, is_git_dir, is_pip_package)
PREFIX = colorstr('Ultralytics HUB: ')
HELP_MSG = 'If this issue persists please visit https://github.com/ultralytics/hub/issues for assistance.'
HUB_API_ROOT = os.environ.get('ULTRALYTICS_HUB_API', 'https://api.ultralytics.com')
def check_dataset_disk_space(url='https://ultralytics.com/assets/coco128.zip', sf=2.0):
# Check that url fits on disk with safety factor sf, i.e. require 2GB free if url size is 1GB with sf=2.0
gib = 1 << 30 # bytes per GiB
data = int(requests.head(url).headers['Content-Length']) / gib # dataset size (GB)
total, used, free = (x / gib for x in shutil.disk_usage('/')) # bytes
LOGGER.info(f'{PREFIX}{data:.3f} GB dataset, {free:.1f}/{total:.1f} GB free disk space')
if data * sf < free:
return True # sufficient space
LOGGER.warning(f'{PREFIX}WARNING: Insufficient free disk space {free:.1f} GB < {data * sf:.3f} GB required, '
f'training cancelled โ. Please free {data * sf - free:.1f} GB additional disk space and try again.')
return False # insufficient space
def request_with_credentials(url: str) -> any:
""" Make an ajax request with cookies attached """
if not is_colab():
raise OSError('request_with_credentials() must run in a Colab environment')
from google.colab import output # noqa
from IPython import display # noqa
display.display(
display.Javascript("""
window._hub_tmp = new Promise((resolve, reject) => {
const timeout = setTimeout(() => reject("Failed authenticating existing browser session"), 5000)
fetch("%s", {
method: 'POST',
credentials: 'include'
})
.then((response) => resolve(response.json()))
.then((json) => {
clearTimeout(timeout);
}).catch((err) => {
clearTimeout(timeout);
reject(err);
});
});
""" % url))
return output.eval_js('_hub_tmp')
def split_key(key=''):
"""
Verify and split a 'api_key[sep]model_id' string, sep is one of '.' or '_'
Args:
key (str): The model key to split. If not provided, the user will be prompted to enter it.
Returns:
Tuple[str, str]: A tuple containing the API key and model ID.
"""
import getpass
error_string = emojis(f'{PREFIX}Invalid API key โ ๏ธ\n') # error string
if not key:
key = getpass.getpass('Enter model key: ')
sep = '_' if '_' in key else '.' if '.' in key else None # separator
assert sep, error_string
api_key, model_id = key.split(sep)
assert len(api_key) and len(model_id), error_string
return api_key, model_id
def requests_with_progress(method, url, **kwargs):
"""
Make an HTTP request using the specified method and URL, with an optional progress bar.
Args:
method (str): The HTTP method to use (e.g. 'GET', 'POST').
url (str): The URL to send the request to.
progress (bool, optional): Whether to display a progress bar. Defaults to False.
**kwargs: Additional keyword arguments to pass to the underlying `requests.request` function.
Returns:
requests.Response: The response from the HTTP request.
"""
progress = kwargs.pop('progress', False)
if not progress:
return requests.request(method, url, **kwargs)
response = requests.request(method, url, stream=True, **kwargs)
total = int(response.headers.get('content-length', 0)) # total size
pbar = tqdm(total=total, unit='B', unit_scale=True, unit_divisor=1024, bar_format=TQDM_BAR_FORMAT)
for data in response.iter_content(chunk_size=1024):
pbar.update(len(data))
pbar.close()
return response
def smart_request(method, url, retry=3, timeout=30, thread=True, code=-1, verbose=True, progress=False, **kwargs):
"""
Makes an HTTP request using the 'requests' library, with exponential backoff retries up to a specified timeout.
Args:
method (str): The HTTP method to use for the request. Choices are 'post' and 'get'.
url (str): The URL to make the request to.
retry (int, optional): Number of retries to attempt before giving up. Default is 3.
timeout (int, optional): Timeout in seconds after which the function will give up retrying. Default is 30.
thread (bool, optional): Whether to execute the request in a separate daemon thread. Default is True.
code (int, optional): An identifier for the request, used for logging purposes. Default is -1.
verbose (bool, optional): A flag to determine whether to print out to console or not. Default is True.
progress (bool, optional): Whether to show a progress bar during the request. Default is False.
**kwargs: Keyword arguments to be passed to the requests function specified in method.
Returns:
requests.Response: The HTTP response object. If the request is executed in a separate thread, returns None.
"""
retry_codes = (408, 500) # retry only these codes
@TryExcept(verbose=verbose)
def func(func_method, func_url, **func_kwargs):
r = None # response
t0 = time.time() # initial time for timer
for i in range(retry + 1):
if (time.time() - t0) > timeout:
break
r = requests_with_progress(func_method, func_url, **func_kwargs) # i.e. get(url, data, json, files)
if r.status_code == 200:
break
try:
m = r.json().get('message', 'No JSON message.')
except AttributeError:
m = 'Unable to read JSON.'
if i == 0:
if r.status_code in retry_codes:
m += f' Retrying {retry}x for {timeout}s.' if retry else ''
elif r.status_code == 429: # rate limit
h = r.headers # response headers
m = f"Rate limit reached ({h['X-RateLimit-Remaining']}/{h['X-RateLimit-Limit']}). " \
f"Please retry after {h['Retry-After']}s."
if verbose:
LOGGER.warning(f'{PREFIX}{m} {HELP_MSG} ({r.status_code} #{code})')
if r.status_code not in retry_codes:
return r
time.sleep(2 ** i) # exponential standoff
return r
args = method, url
kwargs['progress'] = progress
if thread:
threading.Thread(target=func, args=args, kwargs=kwargs, daemon=True).start()
else:
return func(*args, **kwargs)
class Traces:
def __init__(self):
"""
Initialize Traces for error tracking and reporting if tests are not currently running.
"""
self.rate_limit = 3.0 # rate limit (seconds)
self.t = 0.0 # rate limit timer (seconds)
self.metadata = {
'sys_argv_name': Path(sys.argv[0]).name,
'install': 'git' if is_git_dir() else 'pip' if is_pip_package() else 'other',
'python': platform.python_version(),
'release': __version__,
'environment': ENVIRONMENT}
self.enabled = \
SETTINGS['sync'] and \
RANK in {-1, 0} and \
not TESTS_RUNNING and \
ONLINE and \
(is_pip_package() or get_git_origin_url() == 'https://github.com/ultralytics/ultralytics.git')
def __call__(self, cfg, all_keys=False, traces_sample_rate=1.0):
"""
Sync traces data if enabled in the global settings
Args:
cfg (IterableSimpleNamespace): Configuration for the task and mode.
all_keys (bool): Sync all items, not just non-default values.
traces_sample_rate (float): Fraction of traces captured from 0.0 to 1.0
"""
t = time.time() # current time
if self.enabled and random() < traces_sample_rate and (t - self.t) > self.rate_limit:
self.t = t # reset rate limit timer
cfg = vars(cfg) # convert type from IterableSimpleNamespace to dict
if not all_keys: # filter cfg
include_keys = {'task', 'mode'} # always include
cfg = {
k: (v.split(os.sep)[-1] if isinstance(v, str) and os.sep in v else v)
for k, v in cfg.items() if v != DEFAULT_CFG_DICT.get(k, None) or k in include_keys}
trace = {'uuid': SETTINGS['uuid'], 'cfg': cfg, 'metadata': self.metadata}
# Send a request to the HUB API to sync analytics
smart_request('post', f'{HUB_API_ROOT}/v1/usage/anonymous', json=trace, code=3, retry=0, verbose=False)
# Run below code on hub/utils init -------------------------------------------------------------------------------------
traces = Traces() | /roar_yolo-9.0.0-py3-none-any.whl/ultralytics/hub/utils.py | 0.593138 | 0.176193 | utils.py | pypi |
import requests
from ultralytics.hub.auth import Auth
from ultralytics.hub.session import HUBTrainingSession
from ultralytics.hub.utils import PREFIX, split_key
from ultralytics.yolo.engine.exporter import EXPORT_FORMATS_LIST
from ultralytics.yolo.engine.model import YOLO
from ultralytics.yolo.utils import LOGGER, emojis
# Define all export formats
EXPORT_FORMATS_HUB = EXPORT_FORMATS_LIST + ['ultralytics_tflite', 'ultralytics_coreml']
def start(key=''):
"""
Start training models with Ultralytics HUB. Usage: from ultralytics.hub import start; start('API_KEY')
"""
auth = Auth(key)
if not auth.get_state():
model_id = request_api_key(auth)
else:
_, model_id = split_key(key)
if not model_id:
raise ConnectionError(emojis('Connecting with global API key is not currently supported. โ'))
session = HUBTrainingSession(model_id=model_id, auth=auth)
session.check_disk_space()
model = YOLO(model=session.model_file, session=session)
model.train(**session.train_args)
def request_api_key(auth, max_attempts=3):
"""
Prompt the user to input their API key. Returns the model ID.
"""
import getpass
for attempts in range(max_attempts):
LOGGER.info(f'{PREFIX}Login. Attempt {attempts + 1} of {max_attempts}')
input_key = getpass.getpass('Enter your Ultralytics HUB API key:\n')
auth.api_key, model_id = split_key(input_key)
if auth.authenticate():
LOGGER.info(f'{PREFIX}Authenticated โ
')
return model_id
LOGGER.warning(f'{PREFIX}Invalid API key โ ๏ธ\n')
raise ConnectionError(emojis(f'{PREFIX}Failed to authenticate โ'))
def reset_model(key=''):
# Reset a trained model to an untrained state
api_key, model_id = split_key(key)
r = requests.post('https://api.ultralytics.com/model-reset', json={'apiKey': api_key, 'modelId': model_id})
if r.status_code == 200:
LOGGER.info(f'{PREFIX}Model reset successfully')
return
LOGGER.warning(f'{PREFIX}Model reset failure {r.status_code} {r.reason}')
def export_model(key='', format='torchscript'):
# Export a model to all formats
assert format in EXPORT_FORMATS_HUB, f"Unsupported export format '{format}', valid formats are {EXPORT_FORMATS_HUB}"
api_key, model_id = split_key(key)
r = requests.post('https://api.ultralytics.com/export',
json={
'apiKey': api_key,
'modelId': model_id,
'format': format})
assert r.status_code == 200, f'{PREFIX}{format} export failure {r.status_code} {r.reason}'
LOGGER.info(f'{PREFIX}{format} export started โ
')
def get_export(key='', format='torchscript'):
# Get an exported model dictionary with download URL
assert format in EXPORT_FORMATS_HUB, f"Unsupported export format '{format}', valid formats are {EXPORT_FORMATS_HUB}"
api_key, model_id = split_key(key)
r = requests.post('https://api.ultralytics.com/get-export',
json={
'apiKey': api_key,
'modelId': model_id,
'format': format})
assert r.status_code == 200, f'{PREFIX}{format} get_export failure {r.status_code} {r.reason}'
return r.json()
if __name__ == '__main__':
start() | /roar_yolo-9.0.0-py3-none-any.whl/ultralytics/hub/__init__.py | 0.583915 | 0.17172 | __init__.py | pypi |
import copy
import cv2
import matplotlib.pyplot as plt
import numpy as np
from ultralytics.yolo.utils import LOGGER
class GMC:
def __init__(self, method='sparseOptFlow', downscale=2, verbose=None):
super().__init__()
self.method = method
self.downscale = max(1, int(downscale))
if self.method == 'orb':
self.detector = cv2.FastFeatureDetector_create(20)
self.extractor = cv2.ORB_create()
self.matcher = cv2.BFMatcher(cv2.NORM_HAMMING)
elif self.method == 'sift':
self.detector = cv2.SIFT_create(nOctaveLayers=3, contrastThreshold=0.02, edgeThreshold=20)
self.extractor = cv2.SIFT_create(nOctaveLayers=3, contrastThreshold=0.02, edgeThreshold=20)
self.matcher = cv2.BFMatcher(cv2.NORM_L2)
elif self.method == 'ecc':
number_of_iterations = 5000
termination_eps = 1e-6
self.warp_mode = cv2.MOTION_EUCLIDEAN
self.criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, number_of_iterations, termination_eps)
elif self.method == 'sparseOptFlow':
self.feature_params = dict(maxCorners=1000,
qualityLevel=0.01,
minDistance=1,
blockSize=3,
useHarrisDetector=False,
k=0.04)
# self.gmc_file = open('GMC_results.txt', 'w')
elif self.method in ['file', 'files']:
seqName = verbose[0]
ablation = verbose[1]
if ablation:
filePath = r'tracker/GMC_files/MOT17_ablation'
else:
filePath = r'tracker/GMC_files/MOTChallenge'
if '-FRCNN' in seqName:
seqName = seqName[:-6]
elif '-DPM' in seqName or '-SDP' in seqName:
seqName = seqName[:-4]
self.gmcFile = open(f'{filePath}/GMC-{seqName}.txt')
if self.gmcFile is None:
raise ValueError(f'Error: Unable to open GMC file in directory:{filePath}')
elif self.method in ['none', 'None']:
self.method = 'none'
else:
raise ValueError(f'Error: Unknown CMC method:{method}')
self.prevFrame = None
self.prevKeyPoints = None
self.prevDescriptors = None
self.initializedFirstFrame = False
def apply(self, raw_frame, detections=None):
if self.method in ['orb', 'sift']:
return self.applyFeaures(raw_frame, detections)
elif self.method == 'ecc':
return self.applyEcc(raw_frame, detections)
elif self.method == 'sparseOptFlow':
return self.applySparseOptFlow(raw_frame, detections)
elif self.method == 'file':
return self.applyFile(raw_frame, detections)
elif self.method == 'none':
return np.eye(2, 3)
else:
return np.eye(2, 3)
def applyEcc(self, raw_frame, detections=None):
# Initialize
height, width, _ = raw_frame.shape
frame = cv2.cvtColor(raw_frame, cv2.COLOR_BGR2GRAY)
H = np.eye(2, 3, dtype=np.float32)
# Downscale image (TODO: consider using pyramids)
if self.downscale > 1.0:
frame = cv2.GaussianBlur(frame, (3, 3), 1.5)
frame = cv2.resize(frame, (width // self.downscale, height // self.downscale))
width = width // self.downscale
height = height // self.downscale
# Handle first frame
if not self.initializedFirstFrame:
# Initialize data
self.prevFrame = frame.copy()
# Initialization done
self.initializedFirstFrame = True
return H
# Run the ECC algorithm. The results are stored in warp_matrix.
# (cc, H) = cv2.findTransformECC(self.prevFrame, frame, H, self.warp_mode, self.criteria)
try:
(cc, H) = cv2.findTransformECC(self.prevFrame, frame, H, self.warp_mode, self.criteria, None, 1)
except Exception as e:
LOGGER.warning(f'WARNING: find transform failed. Set warp as identity {e}')
return H
def applyFeaures(self, raw_frame, detections=None):
# Initialize
height, width, _ = raw_frame.shape
frame = cv2.cvtColor(raw_frame, cv2.COLOR_BGR2GRAY)
H = np.eye(2, 3)
# Downscale image (TODO: consider using pyramids)
if self.downscale > 1.0:
# frame = cv2.GaussianBlur(frame, (3, 3), 1.5)
frame = cv2.resize(frame, (width // self.downscale, height // self.downscale))
width = width // self.downscale
height = height // self.downscale
# find the keypoints
mask = np.zeros_like(frame)
# mask[int(0.05 * height): int(0.95 * height), int(0.05 * width): int(0.95 * width)] = 255
mask[int(0.02 * height):int(0.98 * height), int(0.02 * width):int(0.98 * width)] = 255
if detections is not None:
for det in detections:
tlbr = (det[:4] / self.downscale).astype(np.int_)
mask[tlbr[1]:tlbr[3], tlbr[0]:tlbr[2]] = 0
keypoints = self.detector.detect(frame, mask)
# compute the descriptors
keypoints, descriptors = self.extractor.compute(frame, keypoints)
# Handle first frame
if not self.initializedFirstFrame:
# Initialize data
self.prevFrame = frame.copy()
self.prevKeyPoints = copy.copy(keypoints)
self.prevDescriptors = copy.copy(descriptors)
# Initialization done
self.initializedFirstFrame = True
return H
# Match descriptors.
knnMatches = self.matcher.knnMatch(self.prevDescriptors, descriptors, 2)
# Filtered matches based on smallest spatial distance
matches = []
spatialDistances = []
maxSpatialDistance = 0.25 * np.array([width, height])
# Handle empty matches case
if len(knnMatches) == 0:
# Store to next iteration
self.prevFrame = frame.copy()
self.prevKeyPoints = copy.copy(keypoints)
self.prevDescriptors = copy.copy(descriptors)
return H
for m, n in knnMatches:
if m.distance < 0.9 * n.distance:
prevKeyPointLocation = self.prevKeyPoints[m.queryIdx].pt
currKeyPointLocation = keypoints[m.trainIdx].pt
spatialDistance = (prevKeyPointLocation[0] - currKeyPointLocation[0],
prevKeyPointLocation[1] - currKeyPointLocation[1])
if (np.abs(spatialDistance[0]) < maxSpatialDistance[0]) and \
(np.abs(spatialDistance[1]) < maxSpatialDistance[1]):
spatialDistances.append(spatialDistance)
matches.append(m)
meanSpatialDistances = np.mean(spatialDistances, 0)
stdSpatialDistances = np.std(spatialDistances, 0)
inliesrs = (spatialDistances - meanSpatialDistances) < 2.5 * stdSpatialDistances
goodMatches = []
prevPoints = []
currPoints = []
for i in range(len(matches)):
if inliesrs[i, 0] and inliesrs[i, 1]:
goodMatches.append(matches[i])
prevPoints.append(self.prevKeyPoints[matches[i].queryIdx].pt)
currPoints.append(keypoints[matches[i].trainIdx].pt)
prevPoints = np.array(prevPoints)
currPoints = np.array(currPoints)
# Draw the keypoint matches on the output image
if 0:
matches_img = np.hstack((self.prevFrame, frame))
matches_img = cv2.cvtColor(matches_img, cv2.COLOR_GRAY2BGR)
W = np.size(self.prevFrame, 1)
for m in goodMatches:
prev_pt = np.array(self.prevKeyPoints[m.queryIdx].pt, dtype=np.int_)
curr_pt = np.array(keypoints[m.trainIdx].pt, dtype=np.int_)
curr_pt[0] += W
color = np.random.randint(0, 255, 3)
color = (int(color[0]), int(color[1]), int(color[2]))
matches_img = cv2.line(matches_img, prev_pt, curr_pt, tuple(color), 1, cv2.LINE_AA)
matches_img = cv2.circle(matches_img, prev_pt, 2, tuple(color), -1)
matches_img = cv2.circle(matches_img, curr_pt, 2, tuple(color), -1)
plt.figure()
plt.imshow(matches_img)
plt.show()
# Find rigid matrix
if (np.size(prevPoints, 0) > 4) and (np.size(prevPoints, 0) == np.size(prevPoints, 0)):
H, inliesrs = cv2.estimateAffinePartial2D(prevPoints, currPoints, cv2.RANSAC)
# Handle downscale
if self.downscale > 1.0:
H[0, 2] *= self.downscale
H[1, 2] *= self.downscale
else:
LOGGER.warning('WARNING: not enough matching points')
# Store to next iteration
self.prevFrame = frame.copy()
self.prevKeyPoints = copy.copy(keypoints)
self.prevDescriptors = copy.copy(descriptors)
return H
def applySparseOptFlow(self, raw_frame, detections=None):
# Initialize
# t0 = time.time()
height, width, _ = raw_frame.shape
frame = cv2.cvtColor(raw_frame, cv2.COLOR_BGR2GRAY)
H = np.eye(2, 3)
# Downscale image
if self.downscale > 1.0:
# frame = cv2.GaussianBlur(frame, (3, 3), 1.5)
frame = cv2.resize(frame, (width // self.downscale, height // self.downscale))
# find the keypoints
keypoints = cv2.goodFeaturesToTrack(frame, mask=None, **self.feature_params)
# Handle first frame
if not self.initializedFirstFrame:
# Initialize data
self.prevFrame = frame.copy()
self.prevKeyPoints = copy.copy(keypoints)
# Initialization done
self.initializedFirstFrame = True
return H
# find correspondences
matchedKeypoints, status, err = cv2.calcOpticalFlowPyrLK(self.prevFrame, frame, self.prevKeyPoints, None)
# leave good correspondences only
prevPoints = []
currPoints = []
for i in range(len(status)):
if status[i]:
prevPoints.append(self.prevKeyPoints[i])
currPoints.append(matchedKeypoints[i])
prevPoints = np.array(prevPoints)
currPoints = np.array(currPoints)
# Find rigid matrix
if (np.size(prevPoints, 0) > 4) and (np.size(prevPoints, 0) == np.size(prevPoints, 0)):
H, inliesrs = cv2.estimateAffinePartial2D(prevPoints, currPoints, cv2.RANSAC)
# Handle downscale
if self.downscale > 1.0:
H[0, 2] *= self.downscale
H[1, 2] *= self.downscale
else:
LOGGER.warning('WARNING: not enough matching points')
# Store to next iteration
self.prevFrame = frame.copy()
self.prevKeyPoints = copy.copy(keypoints)
# gmc_line = str(1000 * (time.time() - t0)) + "\t" + str(H[0, 0]) + "\t" + str(H[0, 1]) + "\t" + str(
# H[0, 2]) + "\t" + str(H[1, 0]) + "\t" + str(H[1, 1]) + "\t" + str(H[1, 2]) + "\n"
# self.gmc_file.write(gmc_line)
return H
def applyFile(self, raw_frame, detections=None):
line = self.gmcFile.readline()
tokens = line.split('\t')
H = np.eye(2, 3, dtype=np.float_)
H[0, 0] = float(tokens[1])
H[0, 1] = float(tokens[2])
H[0, 2] = float(tokens[3])
H[1, 0] = float(tokens[4])
H[1, 1] = float(tokens[5])
H[1, 2] = float(tokens[6])
return H | /roar_yolo-9.0.0-py3-none-any.whl/ultralytics/tracker/utils/gmc.py | 0.573917 | 0.288864 | gmc.py | pypi |
import numpy as np
import scipy.linalg
# Table for the 0.95 quantile of the chi-square distribution with N degrees of freedom (contains values for N=1, ..., 9)
# Taken from MATLAB/Octave's chi2inv function and used as Mahalanobis gating threshold.
chi2inv95 = {1: 3.8415, 2: 5.9915, 3: 7.8147, 4: 9.4877, 5: 11.070, 6: 12.592, 7: 14.067, 8: 15.507, 9: 16.919}
class KalmanFilterXYAH:
"""
For bytetrack
A simple Kalman filter for tracking bounding boxes in image space.
The 8-dimensional state space
x, y, a, h, vx, vy, va, vh
contains the bounding box center position (x, y), aspect ratio a, height h,
and their respective velocities.
Object motion follows a constant velocity model. The bounding box location
(x, y, a, h) is taken as direct observation of the state space (linear
observation model).
"""
def __init__(self):
ndim, dt = 4, 1.
# Create Kalman filter model matrices.
self._motion_mat = np.eye(2 * ndim, 2 * ndim)
for i in range(ndim):
self._motion_mat[i, ndim + i] = dt
self._update_mat = np.eye(ndim, 2 * ndim)
# Motion and observation uncertainty are chosen relative to the current
# state estimate. These weights control the amount of uncertainty in
# the model. This is a bit hacky.
self._std_weight_position = 1. / 20
self._std_weight_velocity = 1. / 160
def initiate(self, measurement):
"""Create track from unassociated measurement.
Parameters
----------
measurement : ndarray
Bounding box coordinates (x, y, a, h) with center position (x, y),
aspect ratio a, and height h.
Returns
-------
(ndarray, ndarray)
Returns the mean vector (8 dimensional) and covariance matrix (8x8
dimensional) of the new track. Unobserved velocities are initialized
to 0 mean.
"""
mean_pos = measurement
mean_vel = np.zeros_like(mean_pos)
mean = np.r_[mean_pos, mean_vel]
std = [
2 * self._std_weight_position * measurement[3], 2 * self._std_weight_position * measurement[3], 1e-2,
2 * self._std_weight_position * measurement[3], 10 * self._std_weight_velocity * measurement[3],
10 * self._std_weight_velocity * measurement[3], 1e-5, 10 * self._std_weight_velocity * measurement[3]]
covariance = np.diag(np.square(std))
return mean, covariance
def predict(self, mean, covariance):
"""Run Kalman filter prediction step.
Parameters
----------
mean : ndarray
The 8 dimensional mean vector of the object state at the previous
time step.
covariance : ndarray
The 8x8 dimensional covariance matrix of the object state at the
previous time step.
Returns
-------
(ndarray, ndarray)
Returns the mean vector and covariance matrix of the predicted
state. Unobserved velocities are initialized to 0 mean.
"""
std_pos = [
self._std_weight_position * mean[3], self._std_weight_position * mean[3], 1e-2,
self._std_weight_position * mean[3]]
std_vel = [
self._std_weight_velocity * mean[3], self._std_weight_velocity * mean[3], 1e-5,
self._std_weight_velocity * mean[3]]
motion_cov = np.diag(np.square(np.r_[std_pos, std_vel]))
# mean = np.dot(self._motion_mat, mean)
mean = np.dot(mean, self._motion_mat.T)
covariance = np.linalg.multi_dot((self._motion_mat, covariance, self._motion_mat.T)) + motion_cov
return mean, covariance
def project(self, mean, covariance):
"""Project state distribution to measurement space.
Parameters
----------
mean : ndarray
The state's mean vector (8 dimensional array).
covariance : ndarray
The state's covariance matrix (8x8 dimensional).
Returns
-------
(ndarray, ndarray)
Returns the projected mean and covariance matrix of the given state
estimate.
"""
std = [
self._std_weight_position * mean[3], self._std_weight_position * mean[3], 1e-1,
self._std_weight_position * mean[3]]
innovation_cov = np.diag(np.square(std))
mean = np.dot(self._update_mat, mean)
covariance = np.linalg.multi_dot((self._update_mat, covariance, self._update_mat.T))
return mean, covariance + innovation_cov
def multi_predict(self, mean, covariance):
"""Run Kalman filter prediction step (Vectorized version).
Parameters
----------
mean : ndarray
The Nx8 dimensional mean matrix of the object states at the previous
time step.
covariance : ndarray
The Nx8x8 dimensional covariance matrics of the object states at the
previous time step.
Returns
-------
(ndarray, ndarray)
Returns the mean vector and covariance matrix of the predicted
state. Unobserved velocities are initialized to 0 mean.
"""
std_pos = [
self._std_weight_position * mean[:, 3], self._std_weight_position * mean[:, 3],
1e-2 * np.ones_like(mean[:, 3]), self._std_weight_position * mean[:, 3]]
std_vel = [
self._std_weight_velocity * mean[:, 3], self._std_weight_velocity * mean[:, 3],
1e-5 * np.ones_like(mean[:, 3]), self._std_weight_velocity * mean[:, 3]]
sqr = np.square(np.r_[std_pos, std_vel]).T
motion_cov = [np.diag(sqr[i]) for i in range(len(mean))]
motion_cov = np.asarray(motion_cov)
mean = np.dot(mean, self._motion_mat.T)
left = np.dot(self._motion_mat, covariance).transpose((1, 0, 2))
covariance = np.dot(left, self._motion_mat.T) + motion_cov
return mean, covariance
def update(self, mean, covariance, measurement):
"""Run Kalman filter correction step.
Parameters
----------
mean : ndarray
The predicted state's mean vector (8 dimensional).
covariance : ndarray
The state's covariance matrix (8x8 dimensional).
measurement : ndarray
The 4 dimensional measurement vector (x, y, a, h), where (x, y)
is the center position, a the aspect ratio, and h the height of the
bounding box.
Returns
-------
(ndarray, ndarray)
Returns the measurement-corrected state distribution.
"""
projected_mean, projected_cov = self.project(mean, covariance)
chol_factor, lower = scipy.linalg.cho_factor(projected_cov, lower=True, check_finite=False)
kalman_gain = scipy.linalg.cho_solve((chol_factor, lower),
np.dot(covariance, self._update_mat.T).T,
check_finite=False).T
innovation = measurement - projected_mean
new_mean = mean + np.dot(innovation, kalman_gain.T)
new_covariance = covariance - np.linalg.multi_dot((kalman_gain, projected_cov, kalman_gain.T))
return new_mean, new_covariance
def gating_distance(self, mean, covariance, measurements, only_position=False, metric='maha'):
"""Compute gating distance between state distribution and measurements.
A suitable distance threshold can be obtained from `chi2inv95`. If
`only_position` is False, the chi-square distribution has 4 degrees of
freedom, otherwise 2.
Parameters
----------
mean : ndarray
Mean vector over the state distribution (8 dimensional).
covariance : ndarray
Covariance of the state distribution (8x8 dimensional).
measurements : ndarray
An Nx4 dimensional matrix of N measurements, each in
format (x, y, a, h) where (x, y) is the bounding box center
position, a the aspect ratio, and h the height.
only_position : Optional[bool]
If True, distance computation is done with respect to the bounding
box center position only.
Returns
-------
ndarray
Returns an array of length N, where the i-th element contains the
squared Mahalanobis distance between (mean, covariance) and
`measurements[i]`.
"""
mean, covariance = self.project(mean, covariance)
if only_position:
mean, covariance = mean[:2], covariance[:2, :2]
measurements = measurements[:, :2]
d = measurements - mean
if metric == 'gaussian':
return np.sum(d * d, axis=1)
elif metric == 'maha':
cholesky_factor = np.linalg.cholesky(covariance)
z = scipy.linalg.solve_triangular(cholesky_factor, d.T, lower=True, check_finite=False, overwrite_b=True)
return np.sum(z * z, axis=0) # square maha
else:
raise ValueError('invalid distance metric')
class KalmanFilterXYWH:
"""
For BoT-SORT
A simple Kalman filter for tracking bounding boxes in image space.
The 8-dimensional state space
x, y, w, h, vx, vy, vw, vh
contains the bounding box center position (x, y), width w, height h,
and their respective velocities.
Object motion follows a constant velocity model. The bounding box location
(x, y, w, h) is taken as direct observation of the state space (linear
observation model).
"""
def __init__(self):
ndim, dt = 4, 1.
# Create Kalman filter model matrices.
self._motion_mat = np.eye(2 * ndim, 2 * ndim)
for i in range(ndim):
self._motion_mat[i, ndim + i] = dt
self._update_mat = np.eye(ndim, 2 * ndim)
# Motion and observation uncertainty are chosen relative to the current
# state estimate. These weights control the amount of uncertainty in
# the model. This is a bit hacky.
self._std_weight_position = 1. / 20
self._std_weight_velocity = 1. / 160
def initiate(self, measurement):
"""Create track from unassociated measurement.
Parameters
----------
measurement : ndarray
Bounding box coordinates (x, y, w, h) with center position (x, y),
width w, and height h.
Returns
-------
(ndarray, ndarray)
Returns the mean vector (8 dimensional) and covariance matrix (8x8
dimensional) of the new track. Unobserved velocities are initialized
to 0 mean.
"""
mean_pos = measurement
mean_vel = np.zeros_like(mean_pos)
mean = np.r_[mean_pos, mean_vel]
std = [
2 * self._std_weight_position * measurement[2], 2 * self._std_weight_position * measurement[3],
2 * self._std_weight_position * measurement[2], 2 * self._std_weight_position * measurement[3],
10 * self._std_weight_velocity * measurement[2], 10 * self._std_weight_velocity * measurement[3],
10 * self._std_weight_velocity * measurement[2], 10 * self._std_weight_velocity * measurement[3]]
covariance = np.diag(np.square(std))
return mean, covariance
def predict(self, mean, covariance):
"""Run Kalman filter prediction step.
Parameters
----------
mean : ndarray
The 8 dimensional mean vector of the object state at the previous
time step.
covariance : ndarray
The 8x8 dimensional covariance matrix of the object state at the
previous time step.
Returns
-------
(ndarray, ndarray)
Returns the mean vector and covariance matrix of the predicted
state. Unobserved velocities are initialized to 0 mean.
"""
std_pos = [
self._std_weight_position * mean[2], self._std_weight_position * mean[3],
self._std_weight_position * mean[2], self._std_weight_position * mean[3]]
std_vel = [
self._std_weight_velocity * mean[2], self._std_weight_velocity * mean[3],
self._std_weight_velocity * mean[2], self._std_weight_velocity * mean[3]]
motion_cov = np.diag(np.square(np.r_[std_pos, std_vel]))
mean = np.dot(mean, self._motion_mat.T)
covariance = np.linalg.multi_dot((self._motion_mat, covariance, self._motion_mat.T)) + motion_cov
return mean, covariance
def project(self, mean, covariance):
"""Project state distribution to measurement space.
Parameters
----------
mean : ndarray
The state's mean vector (8 dimensional array).
covariance : ndarray
The state's covariance matrix (8x8 dimensional).
Returns
-------
(ndarray, ndarray)
Returns the projected mean and covariance matrix of the given state
estimate.
"""
std = [
self._std_weight_position * mean[2], self._std_weight_position * mean[3],
self._std_weight_position * mean[2], self._std_weight_position * mean[3]]
innovation_cov = np.diag(np.square(std))
mean = np.dot(self._update_mat, mean)
covariance = np.linalg.multi_dot((self._update_mat, covariance, self._update_mat.T))
return mean, covariance + innovation_cov
def multi_predict(self, mean, covariance):
"""Run Kalman filter prediction step (Vectorized version).
Parameters
----------
mean : ndarray
The Nx8 dimensional mean matrix of the object states at the previous
time step.
covariance : ndarray
The Nx8x8 dimensional covariance matrics of the object states at the
previous time step.
Returns
-------
(ndarray, ndarray)
Returns the mean vector and covariance matrix of the predicted
state. Unobserved velocities are initialized to 0 mean.
"""
std_pos = [
self._std_weight_position * mean[:, 2], self._std_weight_position * mean[:, 3],
self._std_weight_position * mean[:, 2], self._std_weight_position * mean[:, 3]]
std_vel = [
self._std_weight_velocity * mean[:, 2], self._std_weight_velocity * mean[:, 3],
self._std_weight_velocity * mean[:, 2], self._std_weight_velocity * mean[:, 3]]
sqr = np.square(np.r_[std_pos, std_vel]).T
motion_cov = [np.diag(sqr[i]) for i in range(len(mean))]
motion_cov = np.asarray(motion_cov)
mean = np.dot(mean, self._motion_mat.T)
left = np.dot(self._motion_mat, covariance).transpose((1, 0, 2))
covariance = np.dot(left, self._motion_mat.T) + motion_cov
return mean, covariance
def update(self, mean, covariance, measurement):
"""Run Kalman filter correction step.
Parameters
----------
mean : ndarray
The predicted state's mean vector (8 dimensional).
covariance : ndarray
The state's covariance matrix (8x8 dimensional).
measurement : ndarray
The 4 dimensional measurement vector (x, y, w, h), where (x, y)
is the center position, w the width, and h the height of the
bounding box.
Returns
-------
(ndarray, ndarray)
Returns the measurement-corrected state distribution.
"""
projected_mean, projected_cov = self.project(mean, covariance)
chol_factor, lower = scipy.linalg.cho_factor(projected_cov, lower=True, check_finite=False)
kalman_gain = scipy.linalg.cho_solve((chol_factor, lower),
np.dot(covariance, self._update_mat.T).T,
check_finite=False).T
innovation = measurement - projected_mean
new_mean = mean + np.dot(innovation, kalman_gain.T)
new_covariance = covariance - np.linalg.multi_dot((kalman_gain, projected_cov, kalman_gain.T))
return new_mean, new_covariance
def gating_distance(self, mean, covariance, measurements, only_position=False, metric='maha'):
"""Compute gating distance between state distribution and measurements.
A suitable distance threshold can be obtained from `chi2inv95`. If
`only_position` is False, the chi-square distribution has 4 degrees of
freedom, otherwise 2.
Parameters
----------
mean : ndarray
Mean vector over the state distribution (8 dimensional).
covariance : ndarray
Covariance of the state distribution (8x8 dimensional).
measurements : ndarray
An Nx4 dimensional matrix of N measurements, each in
format (x, y, a, h) where (x, y) is the bounding box center
position, a the aspect ratio, and h the height.
only_position : Optional[bool]
If True, distance computation is done with respect to the bounding
box center position only.
Returns
-------
ndarray
Returns an array of length N, where the i-th element contains the
squared Mahalanobis distance between (mean, covariance) and
`measurements[i]`.
"""
mean, covariance = self.project(mean, covariance)
if only_position:
mean, covariance = mean[:2], covariance[:2, :2]
measurements = measurements[:, :2]
d = measurements - mean
if metric == 'gaussian':
return np.sum(d * d, axis=1)
elif metric == 'maha':
cholesky_factor = np.linalg.cholesky(covariance)
z = scipy.linalg.solve_triangular(cholesky_factor, d.T, lower=True, check_finite=False, overwrite_b=True)
return np.sum(z * z, axis=0) # square maha
else:
raise ValueError('invalid distance metric') | /roar_yolo-9.0.0-py3-none-any.whl/ultralytics/tracker/utils/kalman_filter.py | 0.952408 | 0.835685 | kalman_filter.py | pypi |
Roaring Bitmap in Cython
========================
A roaring bitmap is an efficient compressed datastructure to store a set
of integers. A Roaring bitmap stores a set of 32-bit integers in a series of
arrays and bitmaps, whichever takes the least space (which is always
``2 ** 16`` bits or less).
This datastructure is useful for storing a large number of integers, e.g., for
an inverted index used by search engines and databases. In particular, it is
possible to quickly compute the intersection of a series of sets, which can be
used to implement a query as the conjunction of subqueries.
This implementation is based on the Java and C implementations at
https://github.com/lemire/RoaringBitmap
and https://github.com/lemire/CRoaring
Additional features of this implementation:
- Inverted list representation: blocks that are mostly full are stored
compactly as an array of non-members (instead of as an array of members or a
fixed-size bitmap).
- Collections of immutable roaring bitmaps can be efficiently serialized with
``mmap`` in a single file.
Missing features w.r.t. CRoaring:
- Run-length encoded blocks
- Various AVX2 / SSE optimizations
See also PyRoaringBitmap, a Python wrapper of CRoaring:
https://github.com/Ezibenroc/PyRoaringBitMap
License, requirements
---------------------
The code is licensed under GNU GPL v2, or any later version at your option.
- Python 2.7+/3.3+ http://www.python.org (headers required, e.g. python-dev package)
- Cython 0.20+ http://www.cython.org
Installation, usage
-------------------
::
$ git clone https://github.com/andreasvc/roaringbitmap.git
$ cd roaringbitmap
$ make
(or ``make py2`` for Python 2)
A ``RoaringBitmap()`` can be used as a replacement for a normal (mutable)
Python set containing (unsigned) 32-bit integers:
.. code-block:: python
>>> from roaringbitmap import RoaringBitmap
>>> RoaringBitmap(range(10)) & RoaringBitmap(range(5, 15))
RoaringBitmap({5, 6, 7, 8, 9})
``ImmutableRoaringBitmap`` is an immutable variant (analogous to ``frozenset``)
which is stored compactly as a contiguous block of memory.
A sequence of immutable RoaringBitmaps can be stored in a single file and
accessed efficiently with ``mmap``, without needing to copy or deserialize:
.. code-block:: python
>>> from roaringbitmap import MultiRoaringBitmap
>>> mrb = MultiRoaringBitmap([range(n, n + 5) for n in range(10)], filename='index')
>>> mrb = MultiRoaringBitmap.fromfile('index')
>>> mrb[5]
ImmutableRoaringBitmap({5, 6, 7, 8, 9})
For API documentation cf. http://roaringbitmap.readthedocs.io
Benchmarks
----------
Output of ``$ make bench``::
small sparse set
100 runs with sets of 200 random elements n s.t. 0 <= n < 40000
set() RoaringBitmap() ratio
init 0.000834 0.00138 0.603
initsort 0.00085 0.000394 2.16
and 0.00102 8.49e-05 12.1
or 0.00171 0.000169 10.1
xor 0.00152 0.000213 7.11
sub 0.000934 0.000197 4.74
iand 1.29e-05 2.97e-06 4.35
ior 9.7e-06 3.26e-06 2.98
ixor 8.98e-06 3.43e-06 2.62
isub 6.83e-06 3.3e-06 2.07
eq 0.000438 1.17e-05 37.6
neq 6.37e-06 7.81e-06 0.816
jaccard 0.0029 0.000126 23.1
medium load factor
100 runs with sets of 59392 random elements n s.t. 0 <= n < 118784
set() RoaringBitmap() ratio
init 0.564 0.324 1.74
initsort 0.696 0.273 2.55
and 0.613 0.000418 1466
or 0.976 0.000292 3344
xor 0.955 0.000294 3250
sub 0.346 0.000316 1092
iand 0.00658 1.14e-05 575
ior 0.00594 1.08e-05 548
ixor 0.00434 1.12e-05 385
isub 0.00431 1.09e-05 397
eq 0.0991 0.000116 851
neq 9.62e-06 1.29e-05 0.743
jaccard 1.62 0.00025 6476
dense set / high load factor
100 runs with sets of 39800 random elements n s.t. 0 <= n < 40000
set() RoaringBitmap() ratio
init 0.33 0.0775 4.26
initsort 0.352 0.148 2.38
and 0.24 0.000223 1078
or 0.45 0.000165 2734
xor 0.404 0.000161 2514
sub 0.169 0.000173 973
iand 0.00287 6.02e-06 477
ior 0.00179 6.34e-06 282
ixor 0.00195 5.53e-06 353
isub 0.0017 6.35e-06 267
eq 0.0486 4.65e-05 1045
neq 1.01e-05 1.13e-05 0.888
jaccard 0.722 0.000118 6136
See https://github.com/Ezibenroc/roaring_analysis/ for a performance comparison
of PyRoaringBitmap and this library.
References
----------
- http://roaringbitmap.org/
- Chambi, S., Lemire, D., Kaser, O., & Godin, R. (2016). Better bitmap
performance with Roaring bitmaps. Software: practice and experience, 46(5),
pp. 709-719. http://arxiv.org/abs/1402.6407
- The idea of using the inverted list representation is based on
https://issues.apache.org/jira/browse/LUCENE-5983
| /roaringbitmap-0.7.2.tar.gz/roaringbitmap-0.7.2/README.rst | 0.890312 | 0.81772 | README.rst | pypi |
import socket
import os
import re
import sys
import logging
import atexit
from typing import Optional, List
from roast.xexpect import Xexpect
from roast.utils import get_base_name, get_original_path, is_file
log = logging.getLogger(__name__)
class Xsdb(Xexpect):
def __init__(
self,
config,
hostname: str = socket.gethostname(),
hwserver: Optional[str] = None,
setup_hwserver: bool = False,
port: str = "3121",
prompt: str = "xsdb%",
):
self.config = config
self.hostname = hostname
self.hwserver = hwserver
self.setup_hwserver = setup_hwserver
self.port = port
self.init_prompt = prompt
super().__init__(log, hostname=self.hostname, non_interactive=False)
atexit.register(self.exit)
self._setup()
def _setup(self):
# Init commands for xsd to run in non-interactive mode
vitisPath = self.config["vitisPath"]
cmd_list = [
f"export TCL_LIBRARY={vitisPath}/tps/tcl/tcl8.5",
f"export RDI_DATADIR={vitisPath}/data",
f"source {vitisPath}/settings64.sh",
f"export HDI_APPROOT={vitisPath}",
f"export LD_LIBRARY_PATH={vitisPath}/lib/lnx64.o",
]
self.runcmd_list(cmd_list)
self.prompt = self.init_prompt
# Invoke RDI XSDB for non-interactive mode
self.runcmd(f"{vitisPath}/bin/unwrapped/lnx64.o/rdi_xsdb")
# Enable silent mode for non-interactive mode
self.runcmd("configparams silent-mode 1")
if self.setup_hwserver:
self.hw_server_setup() # setup hw_server when defined
else:
self.connect() # Connect to hwserver instance
def connect(self):
connect_cmd = "connect "
if self.hwserver is not None:
connect_cmd += f"-host {self.hwserver} -port {self.port}"
f_msgs = ["Connection refused"]
self.runcmd(connect_cmd, expected_failures=f_msgs, expected=r"tcfchan\#")
def hw_server_setup(self):
f_msgs = [
"child process exited abnormally",
'Device hw_server command: "hw_server" .*? exited with status [-+]?[1-9]\d*',
]
expected = f"INFO: To connect to this hw_server instance use url: TCP:{self.hostname}:3121"
self.runcmd(
"hw_server",
f_msgs,
expected,
wait_for_prompt=False,
err_msg="hw_server setup failed!",
)
def alive(self):
expected = [self.init_prompt, self.hostname]
if self.runcmd("\r\n", expected=expected, wait_for_prompt=False) == 0:
return True
def disconnect(self):
self.runcmd("disconnect")
def con(self):
self.runcmd("con")
def stop(self):
self.runcmd("stop")
def read(
self, address: str, offset: int = 1, args: str = "-value -force"
) -> List[str]:
"""This Function is to read values from memory
till offset and returns list of values
Parameters:
address - memory location to read value from
offset - by default set to 1
"""
f_msgs = ["Memory read error"]
self.runcmd(f"mrd {args} {address} {offset}", expected_failures=f_msgs)
reg_val = self.terminal.before
reg_val = reg_val.lstrip().rstrip()
return reg_val.split(" ")
# write, write to list
def write(self, addr_value: dict, num_words: str = None, args: str = "") -> None:
"""This Function takes dictionary, writes values to memory addresses
Parameters:
addr_value: Address, value dictionary
"""
f_msgs = [
'Invalid target. Use "targets" command to select a target',
"instead",
"Memory write error",
]
for addr, value in addr_value.items():
if num_words is None:
self.runcmd(f"mwr {args} {addr} {value}", expected_failures=f_msgs)
else:
self.runcmd(
f"mwr {args} {addr} {value} {num_words}", expected_failures=f_msgs
)
def memorymap(self, addr, size, flags="") -> None:
"""This Function takes address, size and flag values and does memory mapping
Parameters:
addr - memory location
size - size
flags - flag to be set
"""
f_msgs = [
'Invalid target. Use "targets" command to select a target',
"instead",
"Memory write error",
]
self.runcmd(
f"memmap -addr {addr} -size {size} -flags {flags}",
expected_failures=f_msgs,
)
def mask_write(self, *address_values, args="") -> None:
"""This Function takes address values to perform mask write
Parameters:
addr_values: comma seperated values to write
"""
data = ""
for value in address_values:
data = data + " " + value
f_msgs = [
'Invalid target. Use "targets" command to select a target',
"instead",
"Memory write error",
]
self.runcmd(f"mask_write {data}", expected_failures=f_msgs)
def get_proc(self):
pass
def set_proc(self, proc: str) -> None:
# Map proc instances with simple keys
proc_dict = {
"versal": "Versal *",
"a72_0": "Cortex-A72*#0",
"a72_1": "Cortex-A72*#1",
"a53_0": "Cortex-A53*#0",
"a53_1": "Cortex-A53*#1",
"r5_0": "Cortex-R5*#0",
"r5_1": "Cortex-R5*#1",
"a9_0": "*Cortex-A9*#0",
"MB_PSM": "MicroBlaze PSM",
"MB_PPU": "MicroBlaze PPU",
"MB": "MicroBlaze*#0",
"DPC": "DPC",
}
cmd = "targets -set -nocase -filter {name =~ "
if proc in proc_dict:
cmd += f'"{proc_dict[proc]}"' + "}"
else:
cmd += f'"{proc}"' + "}"
f_msgs = ["no targets found"]
self.runcmd(cmd, expected_failures=f_msgs)
def rst_proc(self):
f_msgs = ["Invalid reset type", "Cannot reset"]
self.runcmd("rst -proc -clear-registers", expected_failures=f_msgs)
def rst_cores(self):
f_msgs = ["Invalid reset type", "Cannot reset"]
self.runcmd("rst -cores", expected_failures=f_msgs)
def run_tcl(
self, tcl_file: str, expected: List[str] = ["SUCCESS"], timeout: int = 400
):
tcl_file = get_original_path(tcl_file)
self.runcmd(f"source {tcl_file}", expected=expected, timeout=timeout)
def load_data(self, data_file: str, addr: str, timeout: int = 200) -> None:
data_file = get_original_path(data_file)
addr = hex(int(addr))
f_msgs = [
f"Failed to download {data_file}",
"no such file or directory",
"expected integer but got",
"no such variable",
"Memory write error",
]
self.runcmd(
f"dow -data -force {data_file} {addr}",
expected_failures=f_msgs,
timeout=timeout,
)
def load_elf(self, elf_file: str, timeout: int = 200) -> None:
elf_file = get_original_path(elf_file)
f_msgs = [
f"Failed to download {elf_file}",
"no such file or directory",
"Memory write error",
]
self.runcmd(f"dow -force {elf_file}", expected_failures=f_msgs, timeout=timeout)
def device_program(self, pdi_file: str = None) -> None:
if pdi_file is None:
pdi_file = os.path.join(self.config["imagesDir"], "boot.pdi")
pdi_file = get_original_path(pdi_file)
assert is_file(
filepath=pdi_file, silent_discard=False
), f"File does not exist: {pdi_file}"
f_msgs = [
"Configuration timed out waiting for DONE",
"No supported device found",
"PLM Error",
"no such file or directory",
"PLM stalled during programming",
]
try:
self.runcmd(f"device program {pdi_file}", expected_failures=f_msgs)
except Exception as e:
self.set_proc("DPC")
self.runcmd("mrd -bin 0xf2019000 1000", expected="Register Dump")
err_match_obj = re.search(
"PLM Error Status: .*", self.output(), re.IGNORECASE
)
if err_match_obj:
log.error(err_match_obj.group())
raise Exception(err_match_obj.group())
else:
log.error(e)
raise Exception(e) from None
def fpga(self, bit_file, timeout=200) -> None:
"""This method is used to load bit stream in to target
Parameters:
bit_file : Path to bit file
"""
bit_file = get_original_path(bit_file)
f_msgs = [
f"Failed to download {bit_file}",
"no such file or directory",
"bit stream is not compatible",
]
self.runcmd(f"fpga -f {bit_file}", expected_failures=f_msgs)
def exit(self):
if self.setup_hwserver:
self.sendcontrol("c")
if self.alive():
self.runcmd(
"exit", expected=self.hostname, wait_for_prompt=False, timeout=5
) | /roast-xilinx-5.0.0.tar.gz/roast-xilinx-5.0.0/roast/component/xsdb/xsdb.py | 0.576065 | 0.18665 | xsdb.py | pypi |
import re
from math import ceil
class SerialFlash:
# TODO
# Make use of flash pattern and get all attributes dynamically from flash
FLASH_PATTERN = re.compile(
r"SF: Detected (.*?) with page size (.*?), erase \
size (.*?), total (.*?)$"
)
# 512 Kb max erase size in general flashes
ERASE_SIZE = 512 * 1024
def __init__(self, console):
self.console = console
self._probe()
def _probe(self, cs=0, hz=0, mode=0):
"""Function to probe Serial flash
Args:
cs(int, optional): chipselect
hz(int, optional): Frequency in Hertz
mode(int, optional): Mode bit
"""
self.console.runcmd(f"\r\n")
self.console.runcmd(f"sf probe {cs} {hz} {mode}", expected="SF: Detected")
def read(self, addr, length, offset=0):
""" Function to read 'length' of bytes starting from 'offset' to \
memory at 'addr'
Args:
addr(int): Memory address to load read data
length(int): length of bytes to be read
offset(int, optional): offset of flash from where to read data
"""
self.console.runcmd(f"sf read {addr} {offset} {length}", expected="OK")
def write(self, addr, length, offset=0):
""" Function to write 'length' of bytes from memory at 'addr' to \
flash at 'offset'
Args:
addr(int): Memory address from where to write data
length(int): length of bytes to be write
offset(int, optional): offset of flash to where write data
"""
self.console.runcmd(
f"sf write {addr} {offset} {length}", expected="Written: OK", timeout=900
)
def erase(self, length, offset=0):
""" Function to erase 'length' of bytes from flash 'offset'
Args:
length(int): length of bytes to be erased
offset(int, optional): offset of flash from where erase start
Note:
size of erase should be in multiples of erase block size so it \
will automatically erase size will ceil to nearest erase block size
"""
length = self.ERASE_SIZE * ceil(int(length, 16) / self.ERASE_SIZE)
length = hex(length)
self.console.runcmd(f"sf erase {offset} {length}", expected="OK")
def update(self, addr, length, offset=0):
""" Function to erase and write 'length' of bytes from memory at \
'addr' to flash at 'offset'
Args:
addr(int): Memory address from where to write data
length(int): length of bytes to be write
offset(int, optional): offset of flash to where write data
"""
self.console.runcmd(f"sf update {addr} {offset} {length}")
class Mmc:
def __init__(self, console, instance=0):
self.console = console
self.instance = instance
self._setup()
def _setup(self):
self.console.runcmd(f"mmc dev {self.instance}", expected="OK")
def read(self, addr, length, offset=0):
""" Function to read 'length' of bytes starting from 'offset' to \
memory at 'addr'
Args:
addr(int): Memory address to load read data
length(int): length of bytes to be read
offset(int, optional): offset of mmc from where to read data
"""
self.console.runcmd(f"mmc read {addr} {offset} {length}", expected="OK")
def write(self, addr, length, offset=0):
""" Function to write 'length' of bytes from memory at 'addr' to \
mmc at 'offset'
Args:
addr(int): Memory address from where to write data
length(int): length of bytes to be write
offset(int, optional): offset of mmc to where write data
"""
self.console.runcmd(f"mmc write {addr} {offset} {length}", expected="OK")
def erase(self, length, offset=0):
"""Function to erase 'length' of bytes from flash 'offset'
Args:
length(int): length of bytes to be erased
offset(int, optional): offset of flash from where erase start
"""
self.console.runcmd(f"mmc erase {offset} {length}", expected="OK")
def switch(self, instance):
"""Function to switch instance of mmc device'
Args:
instance(int): instance to which we need to switch
"""
self.console.runcmd(f"mmc dev {instance}", expected="OK")
def hwpartition(self):
pass
def setdsr(self):
pass
class Fat:
BIN_FILE = "BOOT.BIN"
def __init__(self, config, console, xsdb, bootmode, instance=0):
self.config = config
self.console = console
self.xsdb = xsdb
self.instance = instance
self.bootmode = bootmode
self._setup()
def _setup(self):
""" Check the flash whether its formatted to FAT32 or Not and format \
it if its not Currently there are no commands available in uboot \
to format Flash So using a tcl file to do that
"""
try:
self.console.runcmd(
f"fatinfo mmc {self.instance}", expected="Filesystem: FAT32"
)
except:
self.xsdb.runcmd(f"set argv [list {self.bootmode} ]")
self.xsdb.run_tcl(self.config["fat_formatter"])
self.console.expect(expected="Successfully ran", wait_for_prompt=False)
def write(self, addr, length, offset=0):
""" Function to write 'length' of bytes from memory at 'addr' to \
mmc at 'offset'
Args:
addr(int): Memory address from where to write data
length(int): length of bytes to be write
offset(int, optional): offset of mmc to where write data
"""
self.console.runcmd(
f"fatwrite mmc {self.instance} {addr} \
{self.BIN_FILE} {length}",
expected="bytes written",
)
def erase(self, length, offset=0):
"""Dummy function to sync with other flash classes"""
pass
def createdir(self):
pass | /roast-xilinx-5.0.0.tar.gz/roast-xilinx-5.0.0/roast/uboot/flashsubsystems.py | 0.533397 | 0.503418 | flashsubsystems.py | pypi |
from roast.providers.randomizer import Randomizer
from roast.utils import read_json
from box import Box
class BifProvider(Randomizer):
"""Class that contains bif file randomizer."""
def __init__(self, randomize=True, *args, **kwargs) -> None:
"""Initialize attributes lazily.
:param args: Arguments.
:param kwargs: Keyword arguments.
"""
super().__init__(randomize=randomize, *args, **kwargs)
class Meta:
name = "bif"
def shuffle_sections(self, bif, constraints):
if self.randomize:
bif_random = tuple()
blocks = {i: block.header.name for i, block in enumerate(bif)}
blocks_mod = blocks.copy()
# add locked blocks
for b_index, name in blocks.items():
if constraints.get(f"{name}.locked", False):
block = bif[b_index]
if bif_random:
bif_random = bif_random + (block,)
else:
bif_random = (block,)
del blocks_mod[b_index]
dependents = constraints.get(f"{name}.dependents", [])
for dependent in dependents:
d_index = list(blocks.keys())[
list(blocks.values()).index(dependent)
]
bif_random = bif_random + (bif[d_index],)
del blocks_mod[d_index]
blocks = blocks_mod.copy()
# shuffle blocks
keys = list(blocks_mod.items())
self.random.shuffle(keys)
blocks_mod = dict(keys)
# add required blocks
for b_index, name in blocks.items():
if constraints.get(f"{name}.required", False):
block = bif[b_index]
self.random.shuffle(block.components)
if bif_random:
bif_random = bif_random + (block,)
else:
bif_random = (block,)
del blocks_mod[b_index]
dependents = constraints.get(f"{name}.dependents", [])
for dependent in dependents:
d_index = list(blocks.keys())[
list(blocks.values()).index(dependent)
]
block = bif[d_index]
self.random.shuffle(block.components)
bif_random = bif_random + (block,)
del blocks_mod[d_index]
blocks = blocks_mod.copy()
# randomly select remaining blocks
for b_index, name in blocks.items():
if self.boolean():
block = bif[b_index]
self.random.shuffle(block.components)
if bif_random:
bif_random = bif_random + (block,)
else:
bif_random = (block,)
del blocks_mod[b_index]
dependents = constraints.get(f"{name}.dependents", [])
for dependent in dependents:
d_index = list(blocks.keys())[
list(blocks.values()).index(dependent)
]
block = bif[d_index]
self.random.shuffle(block.components)
bif_random = bif_random + (block,)
del blocks_mod[d_index]
else:
del blocks_mod[b_index]
return bif_random
else:
return bif | /roast-xilinx-5.0.0.tar.gz/roast-xilinx-5.0.0/roast/providers/bif.py | 0.782579 | 0.251616 | bif.py | pypi |
<h3>Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International Public License</h3>
<p>By exercising the Licensed Rights (defined below), You accept and agree to be bound by the terms and conditions of this Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International Public License ("Public License"). To the extent this Public License may be interpreted as a contract, You are granted the Licensed Rights in consideration of Your acceptance of these terms and conditions, and the Licensor grants You such rights in consideration of benefits the Licensor receives from making the Licensed Material available under these terms and conditions.</p>
<p id="s1">
<strong>Section 1 - Definitions.</strong>
</p>
<ol type="a">
<li id="s1a">
<strong>Adapted Material</strong>
means material subject to Copyright and Similar Rights that is derived from or based upon the Licensed Material and in which the Licensed Material is translated, altered, arranged, transformed, or otherwise modified in a manner requiring permission under the Copyright and Similar Rights held by the Licensor. For purposes of this Public License, where the Licensed Material is a musical work, performance, or sound recording, Adapted Material is always produced where the Licensed Material is synched in timed relation with a moving image.
</li>
<li id="s1b">
<strong>Adapter's License</strong>
means the license You apply to Your Copyright and Similar Rights in Your contributions to Adapted Material in accordance with the terms and conditions of this Public License.
</li>
<li id="s1c">
<strong>BY-NC-SA Compatible License</strong>
means a license listed at <a href="//creativecommons.org/compatiblelicenses">creativecommons.org/compatiblelicenses</a>
, approved by Creative Commons as essentially the equivalent of this Public License.
</li>
<li id="s1d">
<strong>Copyright and Similar Rights</strong>
means copyright and/or similar rights closely related to copyright including, without limitation, performance, broadcast, sound recording, and Sui Generis Database Rights, without regard to how the rights are labeled or categorized. For purposes of this Public License, the rights specified in Section <a href="#s2b">2(b)(1)-(2)</a>
are not Copyright and Similar Rights.
</li>
<li id="s1e">
<strong>Effective Technological Measures</strong>
means those measures that, in the absence of proper authority, may not
be circumvented under laws fulfilling obligations under Article 11 of the WIPO Copyright Treaty adopted on December 20, 1996, and/or similar
international agreements.
</li>
<li id="s1f">
<strong>Exceptions and Limitations</strong>
means fair use, fair dealing, and/or any other exception or limitation to Copyright and Similar Rights that applies to Your use of the Licensed Material.
</li>
<li id="s1g">
<strong>License Elements</strong>
means the license attributes listed in the name of a Creative Commons Public License. The License Elements of this Public License are Attribution, NonCommercial, and ShareAlike.
</li>
<li id="s1h">
<strong>Licensed Material</strong>
means the artistic or literary work, database, or other material to which the Licensor applied this Public License.
</li>
<li id="s1i">
<strong>Licensed Rights</strong>
means the rights granted to You subject to the terms and conditions of this Public License, which are limited to all Copyright and Similar Rights that apply to Your use of the Licensed Material and that the Licensor has authority to license.
</li>
<li id="s1j">
<strong>Licensor</strong>
means the individual(s) or entity(ies) (= jvherck on GitHub) granting rights under this Public License.
</li>
<li id="s1k">
<strong>NonCommercial</strong>
means not primarily intended for or directed towards commercial advantage or monetary compensation. For purposes of this Public License, the exchange of the Licensed Material for other material subject to Copyright and Similar Rights by digital file-sharing or similar means is NonCommercial provided there is no payment of monetary compensation in connection with the exchange.
</li>
<li id="s1l">
<strong>Share</strong>
means to provide material to the public by any means or process that requires permission under the Licensed Rights, such as reproduction, public display, public performance, distribution, dissemination, communication, or importation, and to make material available to the public including in ways that members of the public may access the material from a place and at a time individually chosen by them.
</li>
<li id="s1m">
<strong>Sui Generis Database Rights</strong>
means rights other than copyright resulting from Directive 96/9/EC of the European Parliament and of the Council of 11 March 1996 on the legal protection of databases, as amended and/or succeeded, as well as other essentially equivalent rights anywhere in the world.
</li>
<li id="s1n">
<strong>You</strong>
means the individual or entity exercising the Licensed Rights under this Public License. <strong>Your</strong>
has a corresponding meaning.
</li>
</ol>
<p id="s2">
<strong>Section 2 - Scope.</strong>
</p>
<ol type="a">
<li id="s2a">
<strong>License grant</strong>.
<ol>
<li id="s2a1">
Subject to the terms and conditions of this Public License, the Licensor hereby grants You a worldwide, royalty-free, non-sublicensable, non-exclusive, irrevocable license to exercise the Licensed Rights in the Licensed Material to:
<ol type="A">
<li id="s2a1A">reproduce and Share the Licensed Material, in whole or in part, for NonCommercial purposes only; and</li>
<li id="s2a1B">produce, reproduce, and Share Adapted Material for NonCommercial purposes only.</li>
</ol>
<li id="s2a2">
<span style="text-decoration: underline;">Exceptions and Limitations</span>
. For the avoidance of doubt, where Exceptions and Limitations apply to Your use, this Public License does not apply, and You do not need to comply with its terms and conditions.
</li>
<li id="s2a3">
<span style="text-decoration: underline;">Term</span>
. The term of this Public License is specified in Section <a href="#s6a">6(a)</a>
.
</li>
<li id="s2a4">
<span style="text-decoration: underline;">Media and formats; technical modifications allowed</span>
. The Licensor authorizes You to exercise the Licensed Rights in all media and formats whether now known or hereafter created, and to make technical modifications necessary to do so. The Licensor waives and/or agrees not to assert any right or authority to forbid You from making technical modifications necessary to exercise the Licensed Rights, including technical modifications necessary to circumvent Effective Technological Measures. For purposes of this Public License, simply making modifications authorized by this Section <a href="#s2a4">2(a)(4)</a>
never produces Adapted Material.
</li>
<li id="s2a5">
<span style="text-decoration: underline;">Downstream recipients</span>.
<div class="para">
<ol type="A">
<li id="s2a5A">
<span style="text-decoration: underline;">Offer from the Licensor - Licensed Material</span>
. Every recipient of the Licensed Material automatically receives an offer from the Licensor to exercise the Licensed Rights under the terms and conditions of this Public License.
</li>
<li id="s2a5B">
<span style="text-decoration: underline;">Additional offer from the Licensor - Adapted Material</span>
. Every recipient of Adapted Material from You automatically receives an offer from the Licensor to exercise the Licensed Rights in the Adapted Material under the conditions of the Adapterโs License You apply.
</li>
<li id="s2a5C">
<span style="text-decoration: underline;">No downstream restrictions</span>
. You may not offer or impose any additional or different terms or conditions on, or apply any Effective Technological Measures to, the Licensed Material if doing so restricts exercise of the Licensed Rights by any recipient of the Licensed Material.
</li>
</ol>
</div>
<li id="s2a6">
<span style="text-decoration: underline;">No endorsement</span>
. Nothing in this Public License constitutes or may be construed as permission to assert or imply that You are, or that Your use of the Licensed Material is, connected with, or sponsored, endorsed, or granted official status by, the Licensor or others designated to receive attribution as provided in Section <a href="#s3a1Ai">3(a)(1)(A)(i)</a>
.
</li>
</ol>
<li id="s2b">
<p>
<strong>Other rights</strong>
.
</p>
<ol>
<li id="s2b1">Moral rights, such as the right of integrity, are not licensed under this Public License, nor are publicity, privacy, and/or other similar personality rights; however, to the extent possible, the Licensor waives and/or agrees not to assert any such rights held by the Licensor to the limited extent necessary to allow You to exercise the Licensed Rights, but not otherwise.</li>
<li id="s2b2">Patent and trademark rights are not licensed under this Public License.</li>
<li id="s2b3">To the extent possible, the Licensor waives any right to collect royalties from You for the exercise of the Licensed Rights, whether directly or through a collecting society under any voluntary or waivable statutory or compulsory licensing scheme. In all other cases the Licensor expressly reserves any right to collect such royalties, including when the Licensed Material is used other than for NonCommercial purposes.</li>
</ol>
</li>
</ol>
<p id="s3">
<strong>Section 3 - License Conditions.</strong>
</p>
<p>Your exercise of the Licensed Rights is expressly made subject to the following conditions.</p>
<ol type="a">
<li id="s3a">
<p>
<strong>Attribution</strong>
.
</p>
<ol>
<li id="s3a1">
<p>If You Share the Licensed Material (including in modified form), You must:</p>
<ol type="A">
<li id="s3a1A">
retain the following if it is supplied by the Licensor with the Licensed Material:
<ol type="i">
<li id="s3a1Ai">identification of the creator(s) of the Licensed Material and any others designated to receive attribution, in any reasonable manner requested by the Licensor (including by pseudonym if designated);</li>
<li id="s3a1Aii">a copyright notice;</li>
<li id="s3a1Aiii">a notice that refers to this Public License; </li>
<li id="s3a1Aiv">a notice that refers to the disclaimer of warranties;</li>
<li id="s3a1Av">a URI or hyperlink to the Licensed Material to the extent reasonably practicable;</li>
</ol>
<li id="s3a1B">indicate if You modified the Licensed Material and retain an indication of any previous modifications; and</li>
<li id="s3a1C">indicate the Licensed Material is licensed under this Public License,
and include the text of, or the URI or hyperlink to, this Public
License.</li>
</ol>
</li>
<li id="s3a2">
You may satisfy the conditions in Section <a href="#s3a1">3(a)(1)</a>
in any reasonable manner based on the medium, means, and context in which You Share the Licensed Material. For example, it may be reasonable to satisfy the conditions by providing a URI or hyperlink to a resource that includes the required information.
</li>
<li id="s3a3">
If requested by the Licensor, You must remove any of the information required by Section <a href="#s3a1A">3(a)(1)(A)</a>
to the extent reasonably practicable.
</li>
</ol>
</li>
<li id="s3b">
<strong>ShareAlike</strong>.
<p>
In addition to the conditions in Section <a href="#s3a">3(a)</a>
, if You Share Adapted Material You produce, the following conditions also apply.
</p>
<ol>
<li id="s3b1">The Adapterโs License You apply must be a Creative Commons license with the same License Elements, this version or later, or a BY-NC-SA Compatible License.</li>
<li id="s3b2">You must include the text of, or the URI or hyperlink to, the Adapter's License You apply. You may satisfy this condition in any reasonable manner based on the medium, means, and context in which You Share Adapted Material.</li>
<li id="s3b3">You may not offer or impose any additional or different terms or conditions on, or apply any Effective Technological Measures to, Adapted Material that restrict exercise of the rights granted under the Adapter's License You apply.</li>
</ol>
</li>
</ol>
<p id="s4">
<strong>Section 4 - Sui Generis Database Rights.</strong>
</p>
<p>Where the Licensed Rights include Sui Generis Database Rights that apply to Your use of the Licensed Material:</p>
<ol type="a">
<li id="s4a">
for the avoidance of doubt, Section <a href="#s2a1">2(a)(1)</a>
grants You the right to extract, reuse, reproduce, and Share all or a substantial portion of the contents of the database for NonCommercial purposes only;
</li>
<li id="s4b">
if You include all or a substantial portion of the database contents in a database in which You have Sui Generis Database Rights, then the database in which You have Sui Generis Database Rights (but not its individual contents) is Adapted Material, including for purposes of Section <a href="#s3b">3(b)</a>
; and
</li>
<li id="s4c">
You must comply with the conditions in Section <a href="#s3a">3(a)</a>
if You Share all or a substantial portion of the contents of the database.
</li>
</ol>
For the avoidance of doubt, this Section <a href="#s4">4</a>
supplements and does not replace Your obligations under this Public License where the Licensed Rights include other Copyright and Similar Rights.
<p id="s5">
<strong>Section 5 - Disclaimer of Warranties and Limitation of Liability.</strong>
</p>
<ol style="font-weight: bold;" type="a">
<li id="s5a">
<strong>Unless otherwise separately undertaken by the Licensor, to the extent possible, the Licensor offers the Licensed Material as-is and as-available, and makes no representations or warranties of any kind concerning the Licensed Material, whether express, implied, statutory, or other. This includes, without limitation, warranties of title, merchantability, fitness for a particular purpose, non-infringement, absence of latent or other defects, accuracy, or the presence or absence of errors, whether or not known or discoverable. Where disclaimers of warranties are not allowed in full or in part, this disclaimer may not apply to You.</strong>
</li>
<li id="s5b">
<strong>To the extent possible, in no event will the Licensor be liable to You on any legal theory (including, without limitation, negligence) or otherwise for any direct, special, indirect, incidental, consequential, punitive, exemplary, or other losses, costs, expenses, or damages arising out of this Public License or use of the Licensed Material, even if the Licensor has been advised of the possibility of such losses, costs, expenses, or damages. Where a limitation of liability is not allowed in full or in part, this limitation may not apply to You.</strong>
</li>
</ol>
<ol start="3" type="a">
<li id="s5c">The disclaimer of warranties and limitation of liability provided above shall be interpreted in a manner that, to the extent possible, most closely approximates an absolute disclaimer and waiver of all liability.</li>
</ol>
<p id="s6">
<strong>Section 6 - Term and Termination.</strong>
</p>
<ol type="a">
<li id="s6a">This Public License applies for the term of the Copyright and Similar Rights licensed here. However, if You fail to comply with this Public License, then Your rights under this Public License terminate automatically.</li>
<li id="s6b">
<p>
Where Your right to use the Licensed Material has terminated under Section <a href="#s6a">6(a)</a>
, it reinstates:
</p>
<ol>
<li id="s6b1">automatically as of the date the violation is cured, provided it is cured within 30 days of Your discovery of the violation; or</li>
<li id="s6b2">upon express reinstatement by the Licensor.</li>
</ol>
For the avoidance of doubt, this Section <a href="#s6b">6(b)</a>
does not affect any right the Licensor may have to seek remedies for Your violations of this Public License.
</li>
<li id="s6c">For the avoidance of doubt, the Licensor may also offer the Licensed Material under separate terms or conditions or stop distributing the Licensed Material at any time; however, doing so will not terminate this Public License.</li>
<li id="s6d">
Sections <a href="#s1">1</a>
, <a href="#s5">5</a>
, <a href="#s6">6</a>
, <a href="#s7">7</a>
, and <a href="#s8">8</a>
survive termination of this Public License.
</li>
</ol>
<p id="s7">
<strong>Section 7 - Other Terms and Conditions.</strong>
</p>
<ol type="a">
<li id="s7a">The Licensor shall not be bound by any additional or different terms or conditions communicated by You unless expressly agreed.</li>
<li id="s7b">Any arrangements, understandings, or agreements regarding the Licensed Material not stated herein are separate from and independent of the terms and conditions of this Public License.</li>
</ol>
<p id="s8">
<strong>Section 8 - Interpretation.</strong>
</p>
<ol type="a">
<li id="s8a">For the avoidance of doubt, this Public License does not, and shall not be interpreted to, reduce, limit, restrict, or impose conditions on any use of the Licensed Material that could lawfully be made without permission under this Public License.</li>
<li id="s8b">To the extent possible, if any provision of this Public License is deemed unenforceable, it shall be automatically reformed to the minimum extent necessary to make it enforceable. If the provision cannot be reformed, it shall be severed from this Public License without affecting the enforceability of the remaining terms and conditions.</li>
<li id="s8c">No term or condition of this Public License will be waived and no failure to comply consented to unless expressly agreed to by the Licensor.</li>
<li id="s8d">Nothing in this Public License constitutes or may be interpreted as a limitation upon, or waiver of, any privileges and immunities that apply to the Licensor or You, including from the legal processes of any jurisdiction or authority.</li>
</ol> | /roastedbyai-1.1.0.tar.gz/roastedbyai-1.1.0/LICENSE.md | 0.777258 | 0.794744 | LICENSE.md | pypi |
import geojson
import geomet.wkt as wkt
import geomet.wkb as wkb
import roax.schema as s
class _Object(s.type):
"""Base class for all GeoJSON objects."""
def __init__(
self,
*,
python_type,
content_type="application/json",
props={},
required=set(),
additional=False,
**kwargs,
):
super().__init__(python_type=python_type, content_type=content_type, **kwargs)
self.schema = s.dict(
{
"type": s.str(enum={self.__class__.__name__}),
"bbox": s.list(items=s.float(), min_items=4),
**props,
},
required={"type"}.union(required),
additional=additional,
)
def validate(self, value):
super().validate(value)
return self.schema.validate(value.__geo_interface__)
@property
def json_schema(self):
return self.schema.json_schema
def json_encode(self, value):
self.validate(value)
return value.__geo_interface__
def json_decode(self, value):
self.schema.validate(value) # validate first to get nicer error messages
return self.python_type.to_instance(value)
def str_encode(self, value):
return wkt.dumps(self.json_encode(value))
def str_decode(self, value):
try:
return self.json_decode(self.schema.strip(wkt.loads(value)))
except Exception as e:
raise s.SchemaError(
f"invalid WKT representation of {self.__class__.__name__}"
) from e
def bin_encode(self, value):
return wkb.dumps(self.json_encode(value))
def bin_decode(self, value):
try:
return self.json_decode(self.schema.strip(wkb.loads(value)))
except Exception as e:
raise s.SchemaError(
f"invalid WKB representation of {self.__class__.__name__}"
) from e
class _Geometry(_Object):
"""Base class for all geometry objects."""
def __init__(self, python_type, coordinates_schema, **kwargs):
super().__init__(
python_type=python_type,
props={"coordinates": coordinates_schema},
required={"coordinates"},
**kwargs,
)
class Point(_Geometry):
"""A geographical point."""
def __init__(self, **kwargs):
super().__init__(geojson.Point, _PointCoordinates(), **kwargs)
class _PointCoordinates(s.list):
def __init__(self, **kwargs):
super().__init__(items=s.float(), min_items=2, max_items=2, **kwargs)
def validate(self, value):
super().validate(value)
if value[0] < -180.0 or value[0] > 180.0:
raise s.SchemaError("invalid longitude; must be -180.0 โค longitude โค 180.0")
if value[1] < -90.0 or value[1] > 90.0:
raise s.SchemaError("invalid latitude; must be -90.0 โค latitude โค 90.0")
class LineString(_Geometry):
"""A connected sequence of points."""
def __init__(self, **kwargs):
super().__init__(geojson.LineString, _LineStringCoordinates(), **kwargs)
class _LineStringCoordinates(s.list):
def __init__(self, **kwargs):
super().__init__(items=_PointCoordinates(), **kwargs)
class Polygon(_Geometry):
"""
A linear ring and zero or more interior linear rings.
Parameters:
โข min_rings: Minimum number of linear rings.
โข max_rings: Maximum number of linear rings.
"""
def __init__(self, min_rings=1, max_rings=None, **kwargs):
if min_rings < 1:
raise ValueError("min rings must be โฅ 1")
if max_rings is not None and max_rings < min_rings:
raise ValueError("max_rings must be โฅ min_rings")
super().__init__(
geojson.Polygon,
_PolygonCoordinates(min_items=min_rings, max_items=max_rings),
**kwargs,
)
class _PolygonCoordinates(s.list):
def __init__(self, **kwargs):
super().__init__(items=_LinearRingCoordinates(), **kwargs)
class _LinearRingCoordinates(s.list):
def __init__(self, **kwargs):
super().__init__(items=_PointCoordinates(), min_items=4, **kwargs)
def validate(self, value):
super().validate(value)
if value[0] != value[-1]:
raise s.SchemaError(
"last point in linear ring must be the same as the first point"
)
class MultiPoint(_Geometry):
"""A collection of points."""
def __init__(self, **kwargs):
super().__init__(geojson.MultiPoint, _MultiPointCoordinates(), **kwargs)
class _MultiPointCoordinates(s.list):
def __init__(self, **kwargs):
super().__init__(items=_PointCoordinates(), **kwargs)
class MultiLineString(_Geometry):
"""A collection of line strings."""
def __init__(self, **kwargs):
super().__init__(
geojson.MultiLineString, _MultiLineStringCoordinates(), **kwargs
)
class _MultiLineStringCoordinates(s.list):
def __init__(self, **kwargs):
super().__init__(items=_LineStringCoordinates(), **kwargs)
class MultiPolygon(_Geometry):
"""A collection of polygons."""
def __init__(self, **kwargs):
super().__init__(geojson.MultiPolygon, _MultiPolygonCoordinates(), **kwargs)
class _MultiPolygonCoordinates(s.list):
def __init__(self, **kwargs):
super().__init__(items=_PolygonCoordinates(), **kwargs)
class GeometryCollection(_Object):
"""A collection of geometries."""
def __init__(self, **kwargs):
super().__init__(
python_type=geojson.GeometryCollection,
props={"geometries": s.list(Geometry().schema)},
required={"geometries"},
**kwargs,
)
class Geometry(s.one_of):
"""One of: Point, MultiPoint, LineString, MultiLineString, Polygon, MultiPolygon."""
def __init__(self, **kwargs):
super().__init__(
{
Point(),
MultiPoint(),
LineString(),
MultiLineString(),
Polygon(),
MultiPolygon(),
},
**kwargs,
)
self.schema = s.one_of([sch.schema for sch in self.schemas], **kwargs)
class Feature(_Object):
"""A spatially bounded thing."""
def __init__(self, **kwargs):
super().__init__(
python_type=geojson.Feature,
props={
"geometry": Geometry(nullable=True).schema,
"properties": s.dict(props={}, additional=True, nullable=True),
"id": s.one_of({s.str(), s.int(), s.float()}),
},
required={"geometry", "properties"},
**kwargs,
)
class FeatureCollection(_Object):
"""A collection of features."""
def __init__(self, props={}, required=set(), **kwargs):
super().__init__(
python_type=geojson.FeatureCollection,
props={"features": s.list(Feature().schema), **props},
required={"features"}.union(set(required)),
**kwargs,
) | /roax_geo-2.0a1-py3-none-any.whl/roax/geo.py | 0.892715 | 0.228372 | geo.py | pypi |
from flowserv.model.parameter.numeric import NUMERIC_TYPES
class ResultTable(object):
"""Result table for database queries. Maintains a list or result rows.
Provides functionality to format rows for printing.
"""
def __init__(self, headline, types):
"""Initialize the list of column names and the list of column types.
Column type identifier are the same as those used fpr template
parameter declarations. Both lists are expected to be of same length.
Parameters
----------
headline: list(string)
List of column names
types: list(string)
List of column type identifier
"""
self.rows = list([headline])
self.types = types
def add(self, row):
"""Add a row to the table. The length of the row is expected to be the
same as the length of the table headline. That is, the row contains one
per column in the table.
Parameters
----------
row: list(string)
List of column values
"""
self.rows.append(row)
def format(self):
"""Format the table rows in tabular format. Each rows is a list of
string values. All rows are expected to have the same length. The first
row is the header that contains the column names.
Returns
-------
list(string)
"""
# Determine the longest value for each column.
column_size = [0] * len(self.rows[0])
for row in self.rows:
for col in range(len(column_size)):
vallen = len('{}'.format(row[col]))
if vallen > column_size[col]:
column_size[col] = vallen
# Format all riws
result = list([format_row(self.rows[0], column_size, self.types)])
line = '-' * column_size[0]
for i in range(1, len(row)):
line += '-|-'
line += '-' * column_size[i]
result.append(line)
for row in self.rows[1:]:
result.append(format_row(row, column_size, self.types))
return result
# ------------------------------------------------------------------------------
# Helper Methods
# ------------------------------------------------------------------------------
def align(type_id):
"""Get align identifier depending on the data type. Numeric types are right
aligned. All other types are left aligned.
type_id: string
Type identifier (from set of valid type identifier in parameter
declarations)
Returns
-------
string
"""
if type_id in NUMERIC_TYPES:
return '>'
else:
return '<'
def format_row(row, column_size, types):
"""Format the given row. Row values are padded using the given list of
column widths.
Parameters
----------
row: list(string)
List of cell values in a table row
column_size: list(int)
List of column widths
types: list(string)
List of column type identifier
Returns
-------
string
"""
line = '{val: {align}{width}}'.format(
val=row[0],
align=align(types[0]),
width=column_size[0]
)
for i in range(1, len(row)):
line += ' | '
line += '{val: {align}{width}}'.format(
val=row[i],
align=align(types[i]),
width=column_size[i]
)
return line
def save_file(response, filename):
"""Write the file contents in the response to the specified path. This code
is based on:
https://www.techcoil.com/blog/how-to-download-a-file-via-http-post-and-http-get-with-python-3-requests-library/
"""
with open(filename, 'wb') as local_file:
for chunk in response.iter_content(chunk_size=128):
local_file.write(chunk) | /rob_client-0.2.0-py3-none-any.whl/robclient/table.py | 0.87397 | 0.619068 | table.py | pypi |
from flask import Blueprint, jsonify, make_response, request, send_file
from flowserv.error import UnknownParameterError
from robflask.api.util import ACCESS_TOKEN, jsonbody
import flowserv.util as util
import flowserv.view.run as labels
import robflask.config as config
import robflask.error as err
bp = Blueprint('runs', __name__, url_prefix=config.API_PATH())
@bp.route('/groups/<string:group_id>/runs', methods=['GET'])
def list_runs(group_id):
"""Get a listing of all runs for a given submission. The user has to be a
submission member in order to be authorized to list the runs.
"""
# Get the access token first to raise an error immediately if no token is
# present (to avoid unnecessarily instantiating the service API).
from robflask.service import service
with service(access_token=ACCESS_TOKEN(request)) as api:
# Authentication of the user from the expected api_token in the header
# will fail if no token is given or if the user is not logged in.
r = api.runs().list_runs(group_id=group_id)
return make_response(jsonify(r), 200)
@bp.route('/groups/<string:group_id>/runs', methods=['POST'])
def start_run(group_id):
"""Start a new run. Expects argument values for each mandatory benchmark
parameter in the request body. The user has to be a submission member in
order to be authorized to start new submission runs.
"""
# Get the access token first to raise an error immediately if no token is
# present (to avoid unnecessarily instantiating the service API).
token = ACCESS_TOKEN(request)
# Verify that the request contains a valid Json object that contains a
# optional list of workflow arguments.
obj = jsonbody(request, optional=[labels.RUN_ARGUMENTS])
args = obj[labels.RUN_ARGUMENTS] if labels.RUN_ARGUMENTS in obj else dict()
from robflask.service import service
with service(access_token=token) as api:
# Authentication of the user from the expected api_token in the header
# will fail if no token is given or if the user is not logged in.
try:
r = api.runs().start_run(group_id=group_id, arguments=args)
except UnknownParameterError as ex:
# Convert unknown parameter errors into invalid request errors
# to avoid sending a 404 response
raise err.InvalidRequestError(str(ex))
return make_response(jsonify(r), 201)
@bp.route('/runs/<string:run_id>', methods=['GET'])
def get_run(run_id):
"""Get handle for a given run. The user has to be a member of the run
submission in order to be authorized to access the run.
Parameters
----------
run_id: string
Unique run identifier
Returns
-------
flask.response_class
Raises
------
flowserv.error.UnauthenticatedAccessError
flowserv.error.UnauthorizedAccessError
flowserv.error.UnknownWorkflowGroupError
"""
# Get the access token first to raise an error immediately if no token is
# present (to avoid unnecessarily instantiating the service API).
from robflask.service import service
with service(access_token=ACCESS_TOKEN(request)) as api:
# Authentication of the user from the expected api_token in the header
# will fail if no token is given or if the user is not logged in.
r = api.runs().get_run(run_id=run_id)
return make_response(jsonify(r), 200)
@bp.route('/runs/<string:run_id>', methods=['DELETE'])
def delete_run(run_id):
"""Delete the run with the given identifier. The user has to be a member of
the run submission in order to be authorized to delete the run.
Parameters
----------
run_id: string
Unique run identifier
Returns
-------
flask.response_class
Raises
------
flowserv.error.UnauthenticatedAccessError
flowserv.error.UnauthorizedAccessError
flowserv.error.UnknownWorkflowGroupError
"""
# Get the access token first to raise an error immediately if no token is
# present (to avoid unnecessarily instantiating the service API).
from robflask.service import service
with service(access_token=ACCESS_TOKEN(request)) as api:
# Authentication of the user from the expected api_token in the header
# will fail if no token is given or if the user is not logged in.
api.runs().delete_run(run_id=run_id)
return make_response(jsonify(dict()), 204)
@bp.route('/runs/<string:run_id>', methods=['PUT'])
def cancel_run(run_id):
"""Get handle for a given run. The user has to be a member of the run
submission in order to be authorized to access the run.
Parameters
----------
run_id: string
Unique run identifier
Returns
-------
flask.response_class
Raises
------
flowserv.error.UnauthenticatedAccessError
flowserv.error.UnauthorizedAccessError
flowserv.error.UnknownWorkflowGroupError
"""
# Get the access token first to raise an error immediately if no token is
# present (to avoid unnecessarily instantiating the service API).
token = ACCESS_TOKEN(request)
# If the body contains a Json object verify that the object has the
# mandatory element 'reason'
reason = None
if request.json:
try:
obj = util.validate_doc(
request.json,
mandatory=['reason']
)
reason = obj['reason']
except ValueError as ex:
raise err.InvalidRequestError(str(ex))
from robflask.service import service
with service(access_token=token) as api:
# Authentication of the user from the expected api_token in the header
# will fail if no token is given or if the user is not logged in.
r = api.runs().cancel_run(
run_id=run_id,
reason=reason
)
return make_response(jsonify(r), 200)
@bp.route('/runs/<string:run_id>/downloads/archive')
def download_result_archive(run_id):
"""Download a compressed tar archive containing all result files that were
generated by a given workflow run.
NOTE: At this point, the user is not authenticated for file downloads to
allow download in the GUI via browser redirect.
Parameters
----------
run_id: string
Unique run identifier
Returns
-------
flask.response_class
Raises
------
flowserv.error.UnknownWorkflowGroupError
"""
from robflask.service import service
with service() as api:
ioBuffer = api.runs().get_result_archive(run_id=run_id)
return send_file(
ioBuffer.open(),
as_attachment=True,
attachment_filename='run.tar.gz',
mimetype='application/gzip'
)
@bp.route('/runs/<string:run_id>/downloads/files/<string:file_id>')
def download_result_file(run_id, file_id):
"""Download a resource file that was generated by a successful workflow run.
The user has to be a member of the submission in order to be allowed to
access files.
NOTE: At this point, the user is not authenticated for file downloads to
allow download in the GUI via browser redirect.
Parameters
----------
run_id: string
Unique run identifier
file_id: string
Unique resource file identifier
Returns
-------
flask.response_class
Raises
------
flowserv.error.UnauthenticatedAccessError
flowserv.error.UnauthorizedAccessError
flowserv.error.UnknownWorkflowGroupError
"""
print('download {} {}'.format(run_id, file_id))
from robflask.service import service
with service() as api:
# Authentication of the user from the expected api_token in the header
# will fail if no token is given or if the user is not logged in.
fh = api.runs().get_result_file(run_id=run_id, file_id=file_id)
return send_file(
fh.open(),
as_attachment=True,
attachment_filename=fh.name,
mimetype=fh.mime_type
) | /rob-flask-0.6.0.tar.gz/rob-flask-0.6.0/robflask/api/run.py | 0.7181 | 0.30603 | run.py | pypi |
from flask import Blueprint, jsonify, make_response, request, send_file
from werkzeug.utils import secure_filename
from flowserv.model.files.base import FlaskFile
from robflask.api.util import ACCESS_TOKEN
import robflask.config as config
import robflask.error as err
bp = Blueprint('uploads', __name__, url_prefix=config.API_PATH())
@bp.route('/uploads/<string:group_id>/files', methods=['GET'])
def list_files(group_id):
"""List all uploaded files fora given submission. The user has to be a
member of the submission in order to be allowed to list files.
"""
# Get the access token first to raise an error immediately if no token is
# present (to avoid unnecessarily instantiating the service API).
from robflask.service import service
with service(access_token=ACCESS_TOKEN(request)) as api:
# Authentication of the user from the expected api_token in the header
# will fail if no token is given or if the user is not logged in.
r = api.uploads().list_uploaded_files(group_id=group_id)
return make_response(jsonify(r), 200)
@bp.route('/uploads/<string:group_id>/files', methods=['POST'])
def upload_file(group_id):
"""Upload a new file as part of a given submission. The user has to be a
member of the submission in order to be allowed to upload files.
"""
# Get the access token first to raise an error immediately if no token is
# present (to avoid unnecessarily instantiating the service API).
token = ACCESS_TOKEN(request)
# Ensure that the upload request contains a file object
if request.files and 'file' in request.files:
file = request.files['file']
# A browser may submit a empty part without filename
if file.filename == '':
raise err.InvalidRequestError('empty file name')
# Save uploaded file to a bytes buffer.
filename = secure_filename(file.filename)
from robflask.service import service
with service(access_token=token) as api:
# Authentication of the user from the expected api_token in the
# header will fail if the user is not logged in.
r = api.uploads().upload_file(
group_id=group_id,
file=FlaskFile(file),
name=filename
)
return make_response(jsonify(r), 201)
else:
raise err.InvalidRequestError('no file request')
@bp.route(
'/uploads/<string:group_id>/files/<string:file_id>',
methods=['GET']
)
def download_file(group_id, file_id):
"""Download a given file that was perviously uploaded for a submission.
NOTE: At this point, the user is not authenticated for file downloads to
allow download in the GUI via browser redirect.
"""
from robflask.service import service
with service() as api:
fh = api.uploads().get_uploaded_file_handle(group_id=group_id, file_id=file_id)
return send_file(
fh.open(),
as_attachment=True,
attachment_filename=fh.name,
mimetype=fh.mime_type
)
@bp.route(
'/uploads/<string:group_id>/files/<string:file_id>',
methods=['DELETE']
)
def delete_file(group_id, file_id):
"""Delete a given file that was perviously uploaded for a submission. The
user has to be a member of the submission in order to be allowed to delete
files.
"""
from robflask.service import service
with service(access_token=ACCESS_TOKEN(request)) as api:
# Authentication of the user from the expected api_token in the header
# will fail if no token is given or if the user is not logged in.
api.uploads().delete_file(group_id=group_id, file_id=file_id)
return make_response(jsonify(dict()), 204) | /rob-flask-0.6.0.tar.gz/rob-flask-0.6.0/robflask/api/files.py | 0.506836 | 0.227405 | files.py | pypi |
from flask import Blueprint, jsonify, make_response, request, send_file
from flowserv.model.template.schema import SortColumn
from robflask.api.util import ACCESS_TOKEN
import robflask.config as config
bp = Blueprint('workflows', __name__, url_prefix=config.API_PATH())
@bp.route('/workflows', methods=['GET'])
def list_benchmarks():
"""Get listing of available benchmarks. The benchmark listing is available
to everyone, independent of whether they are currently authenticated or
not.
"""
from robflask.service import service
with service() as api:
r = api.workflows().list_workflows()
return make_response(jsonify(r), 200)
@bp.route('/workflows/<string:workflow_id>', methods=['GET'])
def get_benchmark(workflow_id):
"""Get handle for given a benchmark. Benchmarks are available to everyone,
independent of whether they are currently authenticated or not.
"""
# Get the access token first. Do not raise raise an error if no token is
# present.
from robflask.service import service
with service(access_token=ACCESS_TOKEN(request, raise_error=False)) as api:
r = api.workflows().get_workflow(workflow_id=workflow_id)
return make_response(jsonify(r), 200)
@bp.route('/workflows/<string:workflow_id>/leaderboard', methods=['GET'])
def get_leaderboard(workflow_id):
"""Get leader board for a given benchmark. Benchmarks and their results are
available to everyone, independent of whether they are authenticated or
not.
"""
# The orderBy argument can include a list of column names. Each column name
# may be suffixed by the sort order.
order_by = request.args.get('orderBy')
if order_by is not None:
sort_columns = list()
for col in order_by.split(','):
sort_desc = None
pos = col.find(':')
if pos > -1:
if col[pos + 1:].lower() == 'asc':
sort_desc = False
col = col[:pos]
sort_columns.append(SortColumn(col, sort_desc=sort_desc))
else:
sort_columns = None
# The includeAll argument is a flag. If the argument is given without value
# the default is True. Otherwise, we expect a string that is equal to true.
include_all = request.args.get('includeAll')
if include_all is not None:
if include_all == '':
include_all = True
else:
include_all = include_all.lower() == 'true'
# Get serialization of the result ranking
from robflask.service import service
with service() as api:
r = api.workflows().get_ranking(
workflow_id,
order_by=sort_columns,
include_all=include_all
)
return make_response(jsonify(r), 200)
@bp.route('/workflows/<string:workflow_id>/downloads/archive')
def download_benchmark_archive(workflow_id):
"""Download a compressed tar archive containing all current resource files
for a benchmark that were created during post-processing.
"""
from robflask.service import service
with service() as api:
fh = api.workflows().get_result_archive(workflow_id)
return send_file(
fh.open(),
as_attachment=True,
attachment_filename='results.tar.gz',
mimetype='application/gzip'
)
@bp.route('/workflows/<string:workflow_id>/downloads/files/<string:file_id>')
def get_benchmark_resource(workflow_id, file_id):
"""Download the current resource file for a benchmark resource that was
created during post-processing.
"""
from robflask.service import service
with service() as api:
fh = api.workflows().get_result_file(
workflow_id=workflow_id,
file_id=file_id
)
attachment_filename = fh.name
mimetype = fh.mime_type
return send_file(
fh.open(),
as_attachment=True,
attachment_filename=attachment_filename,
mimetype=mimetype
) | /rob-flask-0.6.0.tar.gz/rob-flask-0.6.0/robflask/api/benchmark.py | 0.615781 | 0.179207 | benchmark.py | pypi |
from flask import Blueprint, jsonify, make_response, request
from robflask.api.util import ACCESS_TOKEN, jsonbody
import flowserv.view.user as labels
import robflask.config as config
bp = Blueprint('users', __name__, url_prefix=config.API_PATH())
@bp.route('/users', methods=['GET'])
def list_users():
"""Get listing of registered users. Only users that are registered and
currently logged in are allowed to query the database.
Returns
-------
flask.response_class
Raises
------
flowserv.error.UnauthenticatedAccessError
"""
# Get the access token first to raise an error immediately if no token is
# present (to avoid unnecessarily instantiating the service API).
from robflask.service import service
with service(access_token=ACCESS_TOKEN(request)) as api:
r = api.users().list_users(query=request.args.get('query'))
return make_response(jsonify(r), 200)
@bp.route('/users/activate', methods=['POST'])
def activate_user():
"""Activate a newly registered user based on their unique user identifier.
Returns
-------
flask.response_class
Raises
------
robflask.error.InvalidRequest
"""
# Verify that the request contains a valid Json object and get the user
# identifier
obj = jsonbody(request, mandatory=[labels.USER_ID])
user_id = obj[labels.USER_ID]
# Activate user in the database and return the serialized user handle.
from robflask.service import service
with service() as api:
r = api.users().activate_user(user_id=user_id)
return make_response(jsonify(r), 200)
@bp.route('/users/login', methods=['POST'])
def login_user():
"""Authenticate a user based on the given credentials. Returns the access
token that the user will use in subsequent requests.
Returns
-------
flask.response_class
Raises
------
robflask.error.InvalidRequest
"""
# Verify that the request contains a valid Json object
obj = jsonbody(request, mandatory=[labels.USER_NAME, labels.USER_PASSWORD])
# Get the name and password for the new user
user = obj[labels.USER_NAME]
passwd = obj[labels.USER_PASSWORD]
# Authenticate user.
from robflask.service import service
with service() as api:
r = api.users().login_user(username=user, password=passwd)
return make_response(jsonify(r), 200)
@bp.route('/users/logout', methods=['POST'])
def logout_user():
"""Logout user. Expects an access token for the authenticated user that is
being logged out.
Returns
-------
flask.response_class
Raises
------
flowserv.error.UnauthenticatedAccessError
"""
from robflask.service import service
with service() as api:
r = api.users().logout_user(api_key=ACCESS_TOKEN(request))
return make_response(jsonify(r), 200)
@bp.route('/users/register', methods=['POST'])
def register_user():
"""Create a new user. Raises an InvalidRequest object if the request does
not contain a JSON object with a user name and password for the new user.
If the request body contains the optional verify flag and if the flag is
set to False, the user will be activated immediately.
Returns
-------
flask.response_class
Raises
------
robflask.error.InvalidRequest
"""
# Verify that the request contains a valid Json object
obj = jsonbody(
request,
mandatory=[labels.USER_NAME, labels.USER_PASSWORD],
optional=[labels.VERIFY_USER]
)
# Get the name and password for the new user and the value of the verify
# flag. By default the flag is set to True
user = obj[labels.USER_NAME]
passwd = obj[labels.USER_PASSWORD]
if labels.VERIFY_USER in obj:
verify = bool(obj[labels.VERIFY_USER])
else:
verify = True
# Register user in the database and return the serialized user handle.
from robflask.service import service
with service() as api:
r = api.users().register_user(
username=user,
password=passwd,
verify=verify
)
return make_response(jsonify(r), 201)
@bp.route('/users/password/request', methods=['POST'])
def request_password_reset():
"""Request to rest the passowrd for a given user.
Returns
-------
flask.response_class
"""
# Verify that the request contains a valid Json object and get the name of
# the user whose password is being rest.
obj = jsonbody(request, mandatory=[labels.USER_NAME])
user = obj[labels.USER_NAME]
# Request password reset.
from robflask.service import service
with service() as api:
r = api.users().request_password_reset(username=user)
return make_response(jsonify(r), 200)
@bp.route('/users/password/reset', methods=['POST'])
def reset_password():
"""Reset the passowrd for a user. The user is identified by the request
identifier in the request body. This identifier is expected to have been
generated by a preceeding reset request.
Returns
-------
flask.response_class
Raises
------
robflask.error.ConstraintViolationError
"""
# Verify that the request contains a valid Json object
mandatory_labels = [labels.REQUEST_ID, labels.USER_PASSWORD]
obj = jsonbody(request, mandatory=mandatory_labels)
# Get the unique request identifier and the new user password
req_id = obj[labels.REQUEST_ID]
passwd = obj[labels.USER_PASSWORD]
# Reset the password for the user that is identified by the request id.
from robflask.service import service
with service() as api:
r = api.users().reset_password(request_id=req_id, password=passwd)
return make_response(jsonify(r), 200)
@bp.route('/users/whoami', methods=['GET'])
def whoami_user():
"""Get information about user that is associated with the provided access
token.
Returns
-------
flask.response_class
Raises
------
flowserv.error.UnauthenticatedAccessError
"""
# Get the access token first to raise an error immediately if no token is
# present (to avoid unnecessarily instantiating the service API).
token = ACCESS_TOKEN(request)
from robflask.service import service
with service() as api:
r = api.users().whoami_user(api_key=token)
return make_response(jsonify(r), 200) | /rob-flask-0.6.0.tar.gz/rob-flask-0.6.0/robflask/api/user.py | 0.69285 | 0.267447 | user.py | pypi |
from flask import Blueprint, jsonify, make_response, request
from flowserv.error import UnknownUserError
from robflask.api.util import ACCESS_TOKEN, jsonbody
import flowserv.view.group as labels
import robflask.config as config
import robflask.error as err
bp = Blueprint('submissions', __name__, url_prefix=config.API_PATH())
@bp.route('/workflows/<string:workflow_id>/groups', methods=['POST'])
def create_submission(workflow_id):
"""Create a new submission for a given benchmark. The user has to be
authenticated in order to be able to create a new submission.
"""
# Get the access token first to raise an error immediately if no token is
# present (to avoid unnecessarily instantiating the service API).
token = ACCESS_TOKEN(request)
# Verify that the request contains a valid Json object that contains the
# submission name and an optional list of member identifier.
obj = jsonbody(
request,
mandatory=[labels.GROUP_NAME],
optional=[labels.GROUP_MEMBERS]
)
name = obj[labels.GROUP_NAME]
members = obj.get(labels.GROUP_MEMBERS)
if members is not None and not isinstance(members, list):
raise err.InvalidRequestError('{} not a list'.format(labels.GROUP_MEMBERS))
from robflask.service import service
with service(access_token=token) as api:
# Authentication of the user from the expected api_token in the header
# will fail if no token is given or if the user is not logged in.
try:
r = api.groups().create_group(
workflow_id=workflow_id,
name=name,
members=members
)
except UnknownUserError as ex:
# Change error type from unknown object to invalid request if a
# user in the member list is unknown
raise err.InvalidRequestError(str(ex))
return make_response(jsonify(r), 201)
@bp.route('/groups/<string:group_id>', methods=['DELETE'])
def delete_submission(group_id):
"""Delete the submission with the given identifier. The user has to be a
submission member in order to be authorized to delete the submission.
"""
# Get the access token first to raise an error immediately if no token is
# present (to avoid unnecessarily instantiating the service API).
from robflask.service import service
with service(access_token=ACCESS_TOKEN(request)) as api:
api.groups().delete_group(group_id=group_id)
return make_response(jsonify(dict()), 204)
@bp.route('/groups/<string:group_id>', methods=['GET'])
def get_submission(group_id):
"""Get handle for the submission with the given identifier. The user has to
be authenticated in order to access a submission.
"""
# Get the access token first to raise an error immediately if no token is
# present (to avoid unnecessarily instantiating the service API).
from robflask.service import service
with service(access_token=ACCESS_TOKEN(request)) as api:
r = api.groups().get_group(group_id=group_id)
return make_response(jsonify(r), 200)
@bp.route('/workflows/<string:workflow_id>/groups', methods=['GET'])
def list_submission(workflow_id):
"""Get a list of all submissions for a given benchmark. The user has to be
authenticated in order to be able to access the submission list.
"""
# Get the access token first to raise an error immediately if no token is
# present (to avoid unnecessarily instantiating the service API).
from robflask.service import service
with service(access_token=ACCESS_TOKEN(request)) as api:
r = api.groups().list_groups(workflow_id=workflow_id)
return make_response(jsonify(r), 200)
@bp.route('/groups/<string:group_id>', methods=['PUT'])
def update_submission(group_id):
"""Update the submission with the given identifier. The request body can
contain a modified submission name and/or a modified list of submission
members.
"""
# Get the access token first to raise an error immediately if no token is
# present (to avoid unnecessarily instantiating the service API).
token = ACCESS_TOKEN(request)
# Verify that the request contains a valid Json object that contains an
# optional submission name and/or a list of member identifier.
obj = jsonbody(
request,
mandatory=[],
optional=[labels.GROUP_NAME, labels.GROUP_MEMBERS]
)
name = obj.get(labels.GROUP_NAME)
members = obj.get(labels.GROUP_MEMBERS)
from robflask.service import service
with service(access_token=token) as api:
# Authentication of the user from the expected api_token in the header
# will fail if no token is given or if the user is not logged in.
r = api.groups().update_group(
group_id=group_id,
name=name,
members=members
)
return make_response(jsonify(r), 200) | /rob-flask-0.6.0.tar.gz/rob-flask-0.6.0/robflask/api/submission.py | 0.614047 | 0.264287 | submission.py | pypi |
[](http://travis-ci.org/taoenator/robber.py)
[](https://coveralls.io/github/taoenator/robber.py?branch=master)
[](https://codeclimate.com/github/vesln/robber.py)
# robber.py - BDD / TDD assertion library for Python.
## Synopsis
In order to use `robber`, you need to import `expect`
from the module:
```python
from robber import expect
```
That's all. You are good to go.
### Assertions
#### eq/==
Asserts that actual is equal (==) to expected:
```python
expect(1).to.eq(1)
expect([1, 2]).to.eq([1, 2])
```
Also:
```python
expect(1) == 1
```
#### ne/!=
Asserts that actual is not equal (!=) to expected:
```python
expect(1).to.ne(2)
expect(1).to != 2
expect(1) != 2
```
#### equal
Asserts that the target is identical (is) to the expected:
```python
expect(1).to.equal(1)
```
#### true
Asserts that the target is True:
```python
expect(True).to.be.true()
```
#### false
Asserts that the target is False:
```python
expect(False).to.be.false()
```
#### instanceof
Asserts that the target is an instance of expected:
```python
expect(obj).to.be.instanceof(Klass)
```
#### match
Asserts that the target can be matched by a regular expression:
```python
expect('foo').to.match(r'foo')
```
#### respond_to
Asserts that the target responds to a method:
```python
expect(obj).to.respond_to('method')
```
#### truthy
Asserts that the target is truthy:
```python
expect(['test']).to.be.truthy()
```
#### falsy
Asserts that the target is falsy:
```python
expect([]).to.be.falsy()
```
#### length
Asserts that the target has a length of expected:
```python
expect([1, 2]).to.have.length(2)
expect('str').to.have.length(3)
```
#### empty
Asserts that the target is empty:
```python
expect([]).to.be.empty()
expect('').to.be.empty()
```
#### string
Asserts that the target is a string:
```python
expect('str').to.be.a.string()
```
#### integer
Asserts that the target is an integer:
```python
expect('str').to.be.an.integer()
```
#### float
Asserts that the target is floating point number:
```python
expect(1.0).to.be.a.float()
```
#### list
Asserts that the target is a list:
```python
expect([1, 2]).to.be.a.list()
```
#### dict
Asserts that the target is a dictionary:
```python
expect({}).to.be.a.dict()
```
#### tuple
Asserts that the target is a tuple:
```python
expect((1, 2)).to.be.a.tuple()
```
#### none
Asserts that the target is None:
```python
expect(None).to.be.none()
```
#### above
Asserts that the target is above expected:
```python
expect(2).to.be.above(1)
```
#### below
Asserts that the target is below expected:
```python
expect(1).to.be.below(2)
```
#### within
Asserts that the target is within expected:
```python
expect(2).to.be.within(0, 2)
```
#### contain
Asserts that the target contains an element, or a key:
```python
expect([1,2,3]).to.contain(1, 2, 3)
expect({'foo': 'bar', 'foo1': 'bar1'}).to.contain('foo', 'foo1')
```
#### exclude
Asserts that the target does not contain an element, or a key:
```python
expect({'foo': 'bar'}).to.exclude('baz')
```
#### throw
Asserts that the target throws an exception (or its subclass)
```python
expect(lambda: raise_exception(...)).to.throw(Exception)
expect(lambda: raise_exception(...)).to.throw(ParentException)
expect(any_callable).to.throw(Exception)
expect(any_callable).to.throw(ParentException)
```
#### throw_exactly
Asserts that the target throws exactly an exception (not its subclass)
```python
expect(lambda: raise_exception(...)).to.throw_exactly(Exception)
expect(any_callable).to.throw_exactly(Exception)
```
#### called
Asserts that a mock has been called
```python
expect(mock).to.be.called()
```
#### called_once
Asserts that a mock has been called exactly one time
```python
expect(mock).to.be.called_once()
```
#### callable
Asserts that a object is callable
```python
expect(object).to.be.callable()
```
#### called_with
Asserts that a mock has been called with params
```python
expect(mock).to.be.called_with(*args, **kwargs)
```
#### called_once_with
Asserts that a mock has been called once with params
```python
expect(mock).to.be.called_once_with(*args, **kwargs)
```
#### ever_called_with
Asserts that a mock has ever been called with params.
The call is not necessary to be to latest one (the same as assert.any_call).
```python
expect(mock).to.have.been.ever_called_with(*args, **kwargs)
expect(mock).to.have.any_call(*args, **kwargs)
```
### Language chains
In order to write more readable assertions, there are a few
built-in language chains that you can use:
#### Positive chains
- to
- be
- been
- a
- an
- have
#### Negative chains
- not_to
For example, the following two lines are functionally equivalent:
```python
expect(1.0).to.be.a.float()
expect(1.0).float()
```
### Expectation chaining
In the spirit of more readable assertions, and to eliminate redundant
evaluations of the same expression, you can chain multiple expectations.
For example, the following two lines are functionally equivalent.
The first example evaluates the expression '1 + 1' only once:
```python
expect(1 + 1).to.be.an.integer().to.be.within(1, 3)
expect(1 + 1).to.be.an.integer()
expect(1 + 1).to.be within(1, 3)
```
### Custom assertions
Writing custom assertion is as easy as extending a base
matcher class and adding the method `matches` for matching
and the property `explanation` for the error notice:
```python
class Chain(Base):
def matches(self):
expectation = self.actual(None)
chain = getattr(expectation, self.expected)
return expectation is chain
@property
def explanation(self):
return Explanation(self.actual, self.is_negative, 'have chain', self.expected)
expect.register('chain', Chain)
```
After you register the new matcher, you can use it as expected:
```python
expect(obj).to.have.chain('be')
```
### Custom error messages
If you want to have custom explanations, for
assertion or group of assertions, you can simply do:
```python
from robber import CustomExplanation
with CustomExplanation('Something went wrong'):
expect(1).to.eq(2)
```
## Installation
```bash
$ pip install robber
```
## Requirements
- Python 2.6, 2.7, 3.5 or 3.6
- pip
- nose (for testing)
## Tests
```bash
$ nosetests tests/
```
## License
MIT License
| /robber-1.1.4.tar.gz/robber-1.1.4/README.md | 0.83752 | 0.982288 | README.md | pypi |
# **ROBEL**: **Ro**botics **Be**nchmarks for **L**earning
ROBEL is an open-source platform of cost-effective robots and associated
reinforcement learning environments for benchmarking reinforcement learning in
the real world. It provides Gym-compliant environments that easily run in both
simulation (for rapid prototyping) and on real hardware. ROBEL robots are robust
and scalable - they have facilitated over 14000 hours (as of Aug'19) of
real-world training with various learning-based methods. Benchmarks using
several learning-based methods are provided for ease of comparison and
extensibility. Refer to [ROBEL's webpage](http://roboticsbenchmarks.org) for
full details.
## Robot Platforms
|  |
:----------------------:
| ROBEL robots: DโKitty (left) and DโClaw (middle and right) |
ROBEL introduces two robots that are modular, extensible, easy to build, and are
lower cost compared to many existing robotics research platforms:
1. **D'Claw** is a nine degree of freedom manipulation platform capable of
performing dexterous manipulation.
2. **D'Kitty** is a twelve degree of freedom quadruped capable of agile
locomotion.
## Features
1. **Gym Compliant** -- ROBEL environments are fully [Gym]-compliant and can be
used with any reinforcement learning library that interfaces with Gym
environments.
2. **Simulated backends** -- ROBEL also includes simulated equivalents of the
introduced benchmarks to facilitate prototyping and debugging needs.
Simulation backend is provided by [MuJoCo].
3. **Hardware interface** -- ROBEL is built using Dynamixel motors and
communicates with the hardware device through the [DynamixelSDK].
4. **External tracking support** -- For D'Kitty environments, external tracking
is supported through [OpenVR] tracking.
5. **Open-source design** -- The hardware design and build instructions are
fully open-sourced and are available for anyone to build their own robots.
[Gym]: https://gym.openai.com
[MuJoCo]: http://www.mujoco.org
[DynamixelSDK]: https://github.com/ROBOTIS-GIT/DynamixelSDK
[OpenVR]: https://github.com/ValveSoftware/openvr
# Getting started
## 1. Hardware assembly
Please refer to the
[Hardware Guide](http://roboticsbenchmarks.org/getting-started) for getting
started with the ROBEL hardware platforms.
## 2. Software installation
#### 2.1 MuJoCo
Download MuJoCo Pro 2.00 from the
[MuJoCo website](https://www.roboti.us/index.html). You should extract this
to `~/.mujoco/mujoco200`. Ensure your MuJoCo license key is placed at
`~/.mujoco/mjkey.txt`.
Add the following line to your `~/.bashrc` (or equivalent) in order for
`mujoco_py` to install properly:
```bash
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$HOME/.mujoco/mujoco200/bin
```
Run `source ~/.bashrc` afterwards.
#### 2.2 ROBEL
ROBEL requires Python 3.5 or higher. You can install ROBEL by running:
``` bash
pip install robel
```
We recommend doing this in a `virtualenv` or a Conda environment to avoid
interfering with system dependencies or existing packages.
Alternatively, you can install directly from the repository by running:
```bash
git clone --recurse-submodules https://github.com/google-research/robel.git
pip install -e robel/
```
To run on hardware, additionally install the [DynamixelSDK] Python API:
```bash
pip install git+https://github.com/ROBOTIS-GIT/DynamixelSDK.git#subdirectory=python
```
#### 2.3 Example Usage
```python
import robel
import gym
# Create a simulation environment for the D'Claw turn task.
env = gym.make('DClawTurnFixed-v0')
# Create a hardware environment for the D'Claw turn task.
# `device_path` refers to the device port of the Dynamixel USB device.
# e.g. '/dev/ttyUSB0' for Linux, '/dev/tty.usbserial-*' for Mac OS.
env = gym.make('DClawTurnFixed-v0', device_path='/dev/ttyUSB0')
# Reset the environent and perform a random action.
env.reset()
env.step(env.action_space.sample())
```
Not specifying the `device_path` i.e. `env = gym.make('DClawTurnFixed-v0')`
creates the simulated equivalent of the above hardware environment. The
simulated and hardware environments have the same interface.
To interactively render a simulation environment, run:
```bash
python -m robel.scripts.rollout -e DClawTurnFixed-v0 --render
# Also try this with other tasks such as DKittyWalkFixed-v0
```
## Benchmark Tasks
### D'Claw
| Task | Description |
| --------- | ------------------------------------ |
| **Pose** | Match a set of joint positions. |
| **Turn** | Turn an object to a specified angle. |
| **Screw** | Continuously rotate an object. |
### D'Kitty
| Task | Description |
| ---------- | --------------------------------------------- |
| **Stand** | Stand upright. |
| **Orient** | Align heading with a target. |
| **Walk** | Walk to a target location. |
## Contributing
We designed ROBEL to be an easily extensible platform for new robots, tasks, and
benchmarks. See [`CONTRIBUTING.md`](CONTRIBUTING.md) for a guide
on how to contribute.
## Citation
```
@misc{ahn2019robel,
title={ROBEL: Robotics Benchmarks for Learning with Low-Cost Robots},
author={Michael Ahn and Henry Zhu and Kristian Hartikainen and Hugo Ponte and Abhishek Gupta and Sergey Levine and Vikash Kumar},
year={2019},
eprint={1909.11639},
archivePrefix={arXiv},
primaryClass={cs.RO}
}
```
## Disclaimer
This is not an official Google product.
| /robel-0.1.2.tar.gz/robel-0.1.2/README.md | 0.760651 | 0.964355 | README.md | pypi |
.. image:: https://travis-ci.com/theochem/roberto.svg?branch=master
:target: https://travis-ci.com/theochem/roberto
.. image:: https://anaconda.org/theochem/roberto/badges/version.svg
:target: https://anaconda.org/theochem/roberto
.. image:: https://codecov.io/gh/theochem/roberto/branch/master/graph/badge.svg
:target: https://codecov.io/gh/theochem/roberto
.. image:: https://img.shields.io/pypi/v/roberto.svg
:target: https://pypi.org/project/roberto
.. image:: https://img.shields.io/pypi/pyversions/roberto.svg
:target: https://pypi.org/project/roberto
.. image:: https://img.shields.io/github/release/theochem/roberto.svg
:target: https://github.com/theochem/roberto/releases
Roberto is a collection of configurable development work flows. Its goal is to
facilitate the development and quality assurance of some packages in the
theochem organization on Github.
With a relatively simple configuration file (``.roberto.yaml``), the command
``rob`` will take the following steps:
1. Optionally install miniconda (and a MacOSX SDK on OSX).
2. Make a conda or venv environment for development and testing
3. Install dependencies (for the package being developed and for all
development tools).
4. Build the software in-place, i.e. without installing it.
5. Run Linters (optionally showing only messages related to your changes).
6. Run unit and other tests
7. Build the documentation
When you run ``rob robot``, a few additional steps will be performed, which are
not done by default because they are slow and have a low risk of failing:
8. Upload the documentation. (disabled by default)
9. Make source and binary release packages.
10. Deploy the releases. (disabled by default)
(A few minor steps were omitted for clarity.) These steps should work on your
local computer in the same way as on a continuous integration system like
Github Actions, making it easy to prepare a pull request locally. It is also
possible to just run a subset of these tasks, which is often needed when working
on the code. Several steps will also reuse previous results (e.g. conda or venv
environment) if these are already present, to speed up Roberto.
The preparation tasks (1-3) are somewhat hard-coded but they are clever enough
to install a decent development environment with the correct requirements for
the remaining tasks. These remaining tasks (4-10) are configurable and can be
changed to work for Python and/or CMake projects.
Installation
============
Python 3 (>=3.7) must be installed. Other dependencies will be pulled in with
the instructions below.
Roberto can be installed with conda:
.. code-block:: bash
conda install theochem::roberto
It can also be installed with pip. One of the following is fine, whichever you
prefer:
.. code-block:: bash
pip install roberto
pip install roberto --user
python3 -m pip install roberto
python3 -m pip install roberto --user
On some platforms, you may have to adapt your ``${PATH}`` variable before you
can run ``rob``.
Usage
=====
By default, Roberto will use venv, unless it is configured to use conda. While
conda is more powerful than venv, it is also a lot slower and requires more
storage than venv.
When using conda, be aware that Roberto will install miniconda, by default in
``~/miniconda3``, if not present yet. You can modify this directory by setting
the environment variable ``ROBERTO_CONDA_BASE_PATH`` or by putting the following
in your global Roberto configuration file ``~/.roberto.yaml``:
.. code-block:: yaml
conda:
base_path: <your/preferred/location>
E.g. you can use this to avoid interference with an existing miniconda install.
Roberto will also make new conda or venv environments for the development of
every package, with relatively long names. For example, when Roberto is executed
in its own source tree, the conda environment would be
``roberto-dev-python-3.7``.
To use Roberto, just run ``rob`` in the root of the source tree, where also the
project's ``.roberto.yaml`` is located. Use ``rob --help`` to get a list of
tasks if you are interested in replicating just a part of the CI process. If
your ``${PATH}`` variable is not set correctly, you can also run Roberto as
``python3 -m roberto`` instead of ``rob``.
It is a good practice to run ``rob`` before every ``git commit`` to make sure
the committed code is clean and working.
When using the cardboardlint tool and when you are working in a development
branch, cardboardlint will only show linter messages for lines of code that you
have changed. If you would like to see all messages, run Roberto as
``ROBERTO_ABSOLUTE=1 rob``.
More details, e.g. on how to configure Roberto, can be found in the
documentation: https://theochem.github.com/roberto
Development
===========
If you have questions or ideas, just open an issue on Github. Practical
information on how to contribute can be found in
`CONTRIBUTING.rst <CONTRIBUTING.rst>`_.
Roberto is intentionally a small code base, so one can easily understand how
it works by reading the source code. Roberto makes extensive use of `invoke
<http://pyinvoke.org>`_ to avoid having to write a lot of boiler-plate code.
| /roberto-2.0.4.tar.gz/roberto-2.0.4/README.rst | 0.650467 | 0.70557 | README.rst | pypi |
import os
import sys
from typing import List, Tuple
import cmocean
import matplotlib as mpl
import matplotlib.font_manager as fm
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.gridspec import GridSpec
from matplotlib.colors import LogNorm, Normalize
from mpl_toolkits.axes_grid1 import make_axes_locatable
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
from mpl_toolkits.axes_grid1.anchored_artists import AnchoredSizeBar
class FigureHelper:
"""Handles the scope and some style in the figure."""
cmap = cmocean.cm.dense_r
marg_hist_color = "#7960c7"
def __init__(self, fig_kwargs, add_legend=False, path=None):
self.fig_kwargs = fig_kwargs
font_loc = os.path.join(os.path.dirname(__file__), "fonts/Roboto-Regular.ttf")
self.prop = fm.FontProperties(fname=font_loc)
self.add_legend = add_legend
self.path = path
def __enter__(self):
self.figure = plt.figure(**self.fig_kwargs)
return self
def __exit__(self, type, value, traceback):
axes = self.figure.get_axes()
for ax in axes:
for label in ax.get_xticklabels() + ax.get_yticklabels():
label.set_fontproperties(self.prop)
ax.set_xlabel(ax.get_xlabel(), font_properties=self.prop)
ax.set_ylabel(ax.get_ylabel(), font_properties=self.prop)
if self.add_legend:
plt.legend(prop=self.prop)
if self.path:
plt.savefig(path=self.path, dpi=600, bbox_inches="tight")
@staticmethod
def get_color_list(n):
return FigureHelper.cmap(np.linspace(0, 1, n + 1))[:-1]
@staticmethod
def negative_helper(x, pos):
x = str(x)
pad = "" if x.startswith("-") else " "
return "{}{}".format(pad, x)
def line_plot(
x: np.ndarray,
ys: List[np.ndarray],
labels: List[str],
x_label: str = "X",
y_label: str = "Y",
figsize: Tuple[int, int] = None,
out_dir: str = None,
):
"""Makes a line plot."""
fig_kwargs = {"figsize": figsize, "tight_layout": True}
with FigureHelper(fig_kwargs, path=out_dir) as fig_helper:
fig = fig_helper.figure
ax = fig.add_subplot(111)
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
colors = FigureHelper.get_color_list(len(ys))
for y, c, l in zip(ys, colors, labels):
ax.plot(x, y, color=c, label=l)
def imshow(
arr: np.ndarray,
x_label: str = "X",
y_label: str = "Y",
out_dir: str = None,
cmap: str = None,
origin: str = "lower",
cbar_loc: str = "right",
cbar_orientation: str = "vertical",
cbar_label: str = "N",
vmin: float = None,
vmax: float = None,
figsize: Tuple[int, int] = None,
log: bool = False,
scale_bar_size_lbl_loc: Tuple[float, int, int] = None,
):
"""Graphs a single array with colorbars and labels."""
fig_kwargs = {"figsize": figsize, "tight_layout": True}
with FigureHelper(fig_kwargs, path=out_dir) as fig_helper:
fig = fig_helper.figure
color_norm = LogNorm() if log else Normalize()
# image
ax = fig.add_subplot(111)
im = ax.imshow(
arr,
cmap=cmap if cmap else FigureHelper.cmap,
origin=origin,
interpolation="none",
vmin=vmin,
vmax=vmax,
norm=color_norm,
)
print(fig.__dict__)
if scale_bar_size_lbl_loc:
size_bar = AnchoredSizeBar(
ax.transData,
scale_bar_size_lbl_loc[0],
scale_bar_size_lbl_loc[1],
scale_bar_size_lbl_loc[2],
fontproperties=fig_helper.prop,
)
ax.add_artist(size_bar)
ax.axis("off")
else:
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
# color bar
divider = make_axes_locatable(ax)
cax = divider.append_axes(cbar_loc, size="2%", pad=0.05)
cb = fig.colorbar(im, cax=cax, orientation=cbar_orientation)
fmt = mpl.ticker.FuncFormatter(FigureHelper.negative_helper)
if cbar_orientation == "vertical":
cb.ax.yaxis.major.formatter = fmt
cb.ax.set_ylabel(cbar_label)
if cbar_orientation == "horizontal":
cb.ax.xaxis.major.formatter = fmt
cb.ax.set_xlabel(cbar_label)
def historgram2d(
x1: np.ndarray,
x2: np.ndarray,
out_dir: str = None,
cmap: str = None,
xbins: int = 100,
ybins: int = 100,
x_label: str = "X",
y_label: str = "Y",
joint_color_log: bool = False,
marg_x_log: bool = False,
marg_y_log: bool = False,
figsize: Tuple[int, int] = None,
flip_joint_x: bool = False,
flip_joint_y: bool = False,
):
"""Makes a 2d histogram."""
fig_kwargs = {"figsize": figsize, "tight_layout": True}
with FigureHelper(fig_kwargs, path=out_dir) as fig_helper:
fig = fig_helper.figure
grid = GridSpec(4, 4)
# joint histogram ======================================================
ax_joint = fig.add_subplot(grid[1:4, 0:3])
ax_joint.set_xlabel(x_label)
ax_joint.set_ylabel(y_label)
color_norm = LogNorm() if joint_color_log else Normalize()
joint_hist = ax_joint.hist2d(
x1, x2, bins=(xbins, ybins), norm=color_norm, cmap=FigureHelper.cmap
)
divider = make_axes_locatable(ax_joint)
cax = divider.append_axes("right", size="2%", pad=0.05)
cb = fig.colorbar(joint_hist[-1], cax=cax, orientation="vertical")
# ======================================================================
# marginal x histogram =================================================
ax_marg_x = fig.add_subplot(grid[0, 0:3], sharex=ax_joint)
ax_marg_x.set_ylabel("N")
n, bins, patches = ax_marg_x.hist(
x1, bins=xbins, color=FigureHelper.marg_hist_color, log=marg_x_log
)
if flip_joint_x:
ax_marg_x.set_xlim([x1.max(), x1.min()])
else:
ax_marg_x.set_xlim([x1.min(), x1.max()])
# ======================================================================
# marginal y histogram =================================================
ax_marg_y = fig.add_subplot(grid[1:4, 3], sharey=ax_joint)
ax_marg_y.set_xlabel("N")
n, bins, patches = ax_marg_y.hist(
x2,
orientation="horizontal",
bins=ybins,
color=FigureHelper.marg_hist_color,
log=marg_y_log,
)
if flip_joint_y:
ax_marg_y.set_ylim([x2.max(), x2.min()])
else:
ax_marg_y.set_ylim([x2.min(), x2.max()])
# ====================================================================== | /robertsons-rules-0.1.2.tar.gz/robertsons-rules-0.1.2/robertsons_rules/plotting.py | 0.797241 | 0.421522 | plotting.py | pypi |
from datetime import datetime, timedelta
from typing import Dict, List
from robot.reservations import AlreadyCheckedInError, Reservation
class Robin:
def __init__(self, users_info: List):
self.users_info = users_info
def daterange(self, start_date, end_date):
for n in range(int((end_date - start_date).days)):
yield start_date + timedelta(n)
def reserve(self, start: datetime, end: datetime = None) -> Dict:
# there will always be only one seat_id for now
results = {}
for user_info in self.users_info:
seat_id, reserver_id = user_info.seat_id, user_info.reserver_id
results[reserver_id] = {}
for current in self.daterange(start, end):
if current.weekday() > 4:
print(f"skip weekends {current.strftime('%Y-%m-%d')}")
continue
print(
f"make a reservation seat_id: {seat_id} date: {current.strftime(f'%Y-%m-%d')}")
reservation = Reservation(user_info, current)
if "-1" != reservation._get_id():
continue
reservation_id = reservation._reserve()
if "-1" == reservation_id:
print(f"reservation {reservation_id} too far in the future. stop going through dates")
break
print(f"reservation successful with id {reservation_id}")
results[reserver_id][current.strftime("%Y-%m-%d")] = True
return results
def check_in(self, current: datetime) -> Dict:
results = {}
for user_info in self.users_info:
try:
_, reserver_id = user_info.seat_id, user_info.reserver_id
reservation = Reservation(user_info, current)
reservation_id = reservation._get_id()
if "-1" == reservation_id:
print(f"no reservation to confirm for {reserver_id}")
continue
reservation._check_in(reservation_id)
print(f"check in successful for {reservation_id}")
results[reserver_id] = True
except AlreadyCheckedInError as e:
print(f"error whilst checking in: {e}")
results[reserver_id] = False
pass
return results | /robin-powered-bot-0.0.6.tar.gz/robin-powered-bot-0.0.6/robot/robin.py | 0.524151 | 0.161221 | robin.py | pypi |
import robin_stocks.helper as helper
import robin_stocks.urls as urls
import robin_stocks.stocks as stocks
def get_top_movers_sp500(direction, info=None):
"""Returns a list of the top S&P500 movers up or down for the day.
:param direction: The direction of movement either 'up' or 'down'
:type direction: str
:param info: Will filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a list of dictionaries of key/value pairs for each mover. If info parameter is provided, \
a list of strings is returned where the strings are the value of the key that matches info.
:Dictionary Keys: * instrument_url
* symbol
* updated_at
* price_movement
* description
"""
try:
direction = direction.lower().strip()
except AttributeError as message:
print(message, file=helper.get_output())
return None
if (direction != 'up' and direction != 'down'):
print('Error: direction must be "up" or "down"', file=helper.get_output())
return([None])
url = urls.movers_sp500()
payload = {'direction': direction}
data = helper.request_get(url, 'pagination', payload)
return(helper.filter_data(data, info))
def get_top_100(info=None):
"""Returns a list of the Top 100 stocks on Robin Hood.
:param info: Will filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a list of dictionaries of key/value pairs for each mover. If info parameter is provided, \
a list of strings is returned where the strings are the value of the key that matches info.
:Dictionary Keys: * ask_price
* ask_size
* bid_price
* bid_size
* last_trade_price
* last_extended_hours_trade_price
* previous_close
* adjusted_previous_close
* previous_close_date
* symbol
* trading_halted
* has_traded
* last_trade_price_source
* updated_at
* instrument
"""
url = urls.get_100_most_popular()
data = helper.request_get(url, 'regular')
data = helper.filter_data(data, 'instruments')
symbols = [stocks.get_symbol_by_url(x) for x in data]
data = stocks.get_quotes(symbols)
return(helper.filter_data(data, info))
def get_top_movers(info=None):
"""Returns a list of the Top 20 movers on Robin Hood.
:param info: Will filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a list of dictionaries of key/value pairs for each mover. If info parameter is provided, \
a list of strings is returned where the strings are the value of the key that matches info.
:Dictionary Keys: * ask_price
* ask_size
* bid_price
* bid_size
* last_trade_price
* last_extended_hours_trade_price
* previous_close
* adjusted_previous_close
* previous_close_date
* symbol
* trading_halted
* has_traded
* last_trade_price_source
* updated_at
* instrument
"""
url = urls.movers_top()
data = helper.request_get(url, 'regular')
data = helper.filter_data(data, 'instruments')
symbols = [stocks.get_symbol_by_url(x) for x in data]
data = stocks.get_quotes(symbols)
return(helper.filter_data(data, info))
def get_all_stocks_from_market_tag(tag, info=None):
"""Returns all the stock quote information that matches a tag category.
:param tag: The category to filter for. Examples include 'biopharmaceutical', 'upcoming-earnings', 'most-popular-under-25', and 'technology'.
:type tag: str
:param info: Will filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a list of dictionaries of key/value pairs for each mover. If info parameter is provided, \
a list of strings is returned where the strings are the value of the key that matches info.
:Dictionary Keys: * ask_price
* ask_size
* bid_price
* bid_size
* last_trade_price
* last_extended_hours_trade_price
* previous_close
* adjusted_previous_close
* previous_close_date
* symbol
* trading_halted
* has_traded
* last_trade_price_source
* updated_at
* instrument
"""
url = urls.market_category(tag)
data = helper.request_get(url, 'regular')
data = helper.filter_data(data, 'instruments')
if not data:
print('ERROR: "{}" is not a valid tag'.format(tag), file=helper.get_output())
return [None]
symbols = [stocks.get_symbol_by_url(x) for x in data]
data = stocks.get_quotes(symbols)
return(helper.filter_data(data, info))
def get_markets(info=None):
"""Returns a list of available markets.
:param info: Will filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a list of dictionaries of key/value pairs for each market. If info parameter is provided, \
a list of strings is returned where the strings are the value of the key that matches info.
:Dictionary Keys: * url
* todays_hours
* mic
* operating_mic
* acronym
* name
* city
* country
* timezone
* website
"""
url = urls.markets()
data = helper.request_get(url, 'pagination')
return(helper.filter_data(data, info))
def get_market_today_hours(market, info=None):
"""Returns the opening and closing hours of a specific market for today. Also will tell you if market is
market is open on that date.
:param market: The 'mic' value for the market. Can be found using get_markets().
:type market: str
:param info: Will filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a dictionary of key/value pairs for the specific market. If info parameter is provided, \
the string value for the corresponding key will be provided.
:Dictionary Keys: * date
* is_open
* opens_at
* closes_at
* extended_opens_at
* extended_closes_at
* previous_open_hours
* next_open_hours
"""
markets = get_markets()
result = next((x for x in markets if x['mic'] == market), None)
if not result:
raise Exception('Not a valid market name. Check get_markets() for a list of market information.')
url = result['todays_hours']
data = helper.request_get(url, 'regular')
return(helper.filter_data(data, info))
def get_market_next_open_hours(market, info=None):
"""Returns the opening and closing hours for the next open trading day after today. Also will tell you if market is
market is open on that date.
:param market: The 'mic' value for the market. Can be found using get_markets().
:type market: str
:param info: Will filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a dictionary of key/value pairs for the specific market. If info parameter is provided, \
the string value for the corresponding key will be provided.
:Dictionary Keys: * date
* is_open
* opens_at
* closes_at
* extended_opens_at
* extended_closes_at
* previous_open_hours
* next_open_hours
"""
url = get_market_today_hours(market, info='next_open_hours')
data = helper.request_get(url, 'regular')
return(helper.filter_data(data, info))
def get_market_next_open_hours_after_date(market, date, info=None):
"""Returns the opening and closing hours for the next open trading day after a date that is specified. Also will tell you if market is
market is open on that date.
:param market: The 'mic' value for the market. Can be found using get_markets().
:type market: str
:param date: The date you want to find the next available trading day after. format is YYYY-MM-DD.
:type date: str
:param info: Will filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a dictionary of key/value pairs for the specific market. If info parameter is provided, \
the string value for the corresponding key will be provided.
:Dictionary Keys: * date
* is_open
* opens_at
* closes_at
* extended_opens_at
* extended_closes_at
* previous_open_hours
* next_open_hours
"""
url = get_market_hours(market, date, info='next_open_hours')
data = helper.request_get(url, 'regular')
return(helper.filter_data(data, info))
def get_market_hours(market, date, info=None):
"""Returns the opening and closing hours of a specific market on a specific date. Also will tell you if market is
market is open on that date. Can be used with past or future dates.
:param market: The 'mic' value for the market. Can be found using get_markets().
:type market: str
:param date: The date you want to get information for. format is YYYY-MM-DD.
:type date: str
:param info: Will filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a dictionary of key/value pairs for the specific market. If info parameter is provided, \
the string value for the corresponding key will be provided.
:Dictionary Keys: * date
* is_open
* opens_at
* closes_at
* extended_opens_at
* extended_closes_at
* previous_open_hours
* next_open_hours
"""
url = urls.market_hours(market, date)
data = helper.request_get(url, 'regular')
return(helper.filter_data(data, info))
def get_currency_pairs(info=None):
"""Returns currency pairs
:param info: Will filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a list of dictionaries of key/value pairs for each currency pair. If info parameter is provided, \
a list of strings is returned where the strings are the value of the key that matches info.
:Dictionary Keys: * asset_currency
* display_only
* id
* max_order_size
* min_order_size
* min_order_price_increment
* min_order_quantity_increment
* name
* quote_currency
* symbol
* tradability
"""
url = urls.currency()
data = helper.request_get(url, 'results')
return(helper.filter_data(data, info)) | /robin_stocks-3.0.6-py3-none-any.whl/robin_stocks/markets.py | 0.848706 | 0.459925 | markets.py | pypi |
from robin_stocks.helper import id_for_chain, id_for_stock
# Login
def login_url():
return('https://api.robinhood.com/oauth2/token/')
def challenge_url(challenge_id):
return('https://api.robinhood.com/challenge/{0}/respond/'.format(challenge_id))
# Profiles
def account_profile():
return('https://api.robinhood.com/accounts/')
def basic_profile():
return('https://api.robinhood.com/user/basic_info/')
def investment_profile():
return('https://api.robinhood.com/user/investment_profile/')
def portfolio_profile():
return('https://api.robinhood.com/portfolios/')
def security_profile():
return('https://api.robinhood.com/user/additional_info/')
def user_profile():
return('https://api.robinhood.com/user/')
def portfolis_historicals(account_number):
return('https://api.robinhood.com/portfolios/historicals/{0}/'.format(account_number))
# Stocks
def earnings():
return('https://api.robinhood.com/marketdata/earnings/')
def events():
return('https://api.robinhood.com/options/events/')
def fundamentals():
return('https://api.robinhood.com/fundamentals/')
def historicals():
return('https://api.robinhood.com/quotes/historicals/')
def instruments():
return('https://api.robinhood.com/instruments/')
def news(symbol):
return('https://api.robinhood.com/midlands/news/{0}/?'.format(symbol))
def popularity(symbol):
return('https://api.robinhood.com/instruments/{0}/popularity/'.format(id_for_stock(symbol)))
def quotes():
return('https://api.robinhood.com/quotes/')
def ratings(symbol):
return('https://api.robinhood.com/midlands/ratings/{0}/'.format(id_for_stock(symbol)))
def splits(symbol):
return('https://api.robinhood.com/instruments/{0}/splits/'.format(id_for_stock(symbol)))
# account
def phoenix():
return('https://phoenix.robinhood.com/accounts/unified')
def positions():
return('https://api.robinhood.com/positions/')
def banktransfers(direction=None):
if direction == 'received':
return('https://api.robinhood.com/ach/received/transfers/')
else:
return('https://api.robinhood.com/ach/transfers/')
def cardtransactions():
return('https://minerva.robinhood.com/history/transactions/')
def daytrades(account):
return('https://api.robinhood.com/accounts/{0}/recent_day_trades/'.format(account))
def dividends():
return('https://api.robinhood.com/dividends/')
def documents():
return('https://api.robinhood.com/documents/')
def withdrawl(bank_id):
return("https://api.robinhood.com/ach/relationships/{}/".format(bank_id))
def linked(id=None, unlink=False):
if unlink:
return('https://api.robinhood.com/ach/relationships/{0}/unlink/'.format(id))
if id:
return('https://api.robinhood.com/ach/relationships/{0}/'.format(id))
else:
return('https://api.robinhood.com/ach/relationships/')
def margin():
return('https://api.robinhood.com/margin/calls/')
def margininterest():
return('https://api.robinhood.com/cash_journal/margin_interest_charges/')
def notifications(tracker=False):
if tracker:
return('https://api.robinhood.com/midlands/notifications/notification_tracker/')
else:
return('https://api.robinhood.com/notifications/devices/')
def referral():
return('https://api.robinhood.com/midlands/referral/')
def stockloan():
return('https://api.robinhood.com/stock_loan/payments/')
def subscription():
return('https://api.robinhood.com/subscription/subscription_fees/')
def wiretransfers():
return('https://api.robinhood.com/wire/transfers')
def watchlists(name=None, add=False):
if name:
return('https://api.robinhood.com/midlands/lists/items/')
else:
return('https://api.robinhood.com/midlands/lists/default/')
# markets
def currency():
return('https://nummus.robinhood.com/currency_pairs/')
def markets():
return('https://api.robinhood.com/markets/')
def market_hours(market, date):
return('https://api.robinhood.com/markets/{}/hours/{}/'.format(market, date))
def movers_sp500():
return('https://api.robinhood.com/midlands/movers/sp500/')
def get_100_most_popular():
return('https://api.robinhood.com/midlands/tags/tag/100-most-popular/')
def movers_top():
return('https://api.robinhood.com/midlands/tags/tag/top-movers/')
def market_category(category):
return('https://api.robinhood.com/midlands/tags/tag/{}/'.format(category))
# options
def aggregate():
return('https://api.robinhood.com/options/aggregate_positions/')
def chains(symbol):
return('https://api.robinhood.com/options/chains/{0}/'.format(id_for_chain(symbol)))
def option_historicals(id):
return('https://api.robinhood.com/marketdata/options/historicals/{0}/'.format(id))
def option_instruments(id=None):
if id:
return('https://api.robinhood.com/options/instruments/{0}/'.format(id))
else:
return('https://api.robinhood.com/options/instruments/')
def option_orders(orderID=None):
if orderID:
return('https://api.robinhood.com/options/orders/{0}/'.format(orderID))
else:
return('https://api.robinhood.com/options/orders/')
def option_positions():
return('https://api.robinhood.com/options/positions/')
def marketdata_options():
return('https://api.robinhood.com/marketdata/options/')
# pricebook
def marketdata_quotes(id):
return ('https://api.robinhood.com/marketdata/quotes/{0}/'.format(id))
def marketdata_pricebook(id):
return ('https://api.robinhood.com/marketdata/pricebook/snapshots/{0}/'.format(id))
# crypto
def order_crypto():
return('https://nummus.robinhood.com/orders/')
def crypto_account():
return('https://nummus.robinhood.com/accounts/')
def crypto_currency_pairs():
return('https://nummus.robinhood.com/currency_pairs/')
def crypto_quote(id):
return('https://api.robinhood.com/marketdata/forex/quotes/{0}/'.format(id))
def crypto_holdings():
return('https://nummus.robinhood.com/holdings/')
def crypto_historical(id):
return('https://api.robinhood.com/marketdata/forex/historicals/{0}/'.format(id))
def crypto_orders(orderID=None):
if orderID:
return('https://nummus.robinhood.com/orders/{0}/'.format(orderID))
else:
return('https://nummus.robinhood.com/orders/')
def crypto_cancel(id):
return('https://nummus.robinhood.com/orders/{0}/cancel/'.format(id))
# orders
def cancel(url):
return('https://api.robinhood.com/orders/{0}/cancel/'.format(url))
def option_cancel(id):
return('https://api.robinhood.com/options/orders/{0}/cancel/'.format(id))
def orders(orderID=None):
if orderID:
return('https://api.robinhood.com/orders/{0}/'.format(orderID))
else:
return('https://api.robinhood.com/orders/') | /robin_stocks-3.0.6-py3-none-any.whl/robin_stocks/urls.py | 0.528533 | 0.200245 | urls.py | pypi |
from functools import wraps
import requests
from robin_stocks.globals import LOGGED_IN, SESSION, OUTPUT
def set_login_state(logged_in):
"""Sets the login state"""
global LOGGED_IN
LOGGED_IN = logged_in
def set_output(output):
"""Sets the global output stream"""
global OUTPUT
OUTPUT = output
def get_output():
"""Gets the current global output stream"""
global OUTPUT
return OUTPUT
def login_required(func):
"""A decorator for indicating which methods require the user to be logged
in."""
@wraps(func)
def login_wrapper(*args, **kwargs):
global LOGGED_IN
if not LOGGED_IN:
raise Exception('{} can only be called when logged in'.format(
func.__name__))
return(func(*args, **kwargs))
return(login_wrapper)
def convert_none_to_string(func):
"""A decorator for converting a None Type into a blank string"""
@wraps(func)
def string_wrapper(*args, **kwargs):
result = func(*args, **kwargs)
if result:
return(result)
else:
return("")
return(string_wrapper)
def id_for_stock(symbol):
"""Takes a stock ticker and returns the instrument id associated with the stock.
:param symbol: The symbol to get the id for.
:type symbol: str
:returns: A string that represents the stocks instrument id.
"""
try:
symbol = symbol.upper().strip()
except AttributeError as message:
print(message, file=get_output())
return(None)
url = 'https://api.robinhood.com/instruments/'
payload = {'symbol': symbol}
data = request_get(url, 'indexzero', payload)
return(filter_data(data, 'id'))
def id_for_chain(symbol):
"""Takes a stock ticker and returns the chain id associated with a stocks option.
:param symbol: The symbol to get the id for.
:type symbol: str
:returns: A string that represents the stocks options chain id.
"""
try:
symbol = symbol.upper().strip()
except AttributeError as message:
print(message, file=get_output())
return(None)
url = 'https://api.robinhood.com/instruments/'
payload = {'symbol': symbol}
data = request_get(url, 'indexzero', payload)
if data:
return(data['tradable_chain_id'])
else:
return(data)
def id_for_group(symbol):
"""Takes a stock ticker and returns the id associated with the group.
:param symbol: The symbol to get the id for.
:type symbol: str
:returns: A string that represents the stocks group id.
"""
try:
symbol = symbol.upper().strip()
except AttributeError as message:
print(message, file=get_output())
return(None)
url = 'https://api.robinhood.com/options/chains/{0}/'.format(
id_for_chain(symbol))
data = request_get(url)
return(data['underlying_instruments'][0]['id'])
def id_for_option(symbol, expirationDate, strike, optionType):
"""Returns the id associated with a specific option order.
:param symbol: The symbol to get the id for.
:type symbol: str
:param expirationData: The expiration date as YYYY-MM-DD
:type expirationData: str
:param strike: The strike price.
:type strike: str
:param optionType: Either call or put.
:type optionType: str
:returns: A string that represents the stocks option id.
"""
symbol = symbol.upper()
chain_id = id_for_chain(symbol)
payload = {
'chain_id': chain_id,
'expiration_dates': expirationDate,
'strike_price': strike,
'type': optionType,
'state': 'active'
}
url = 'https://api.robinhood.com/options/instruments/'
data = request_get(url, 'pagination', payload)
listOfOptions = [item for item in data if item["expiration_date"] == expirationDate]
if (len(listOfOptions) == 0):
print('Getting the option ID failed. Perhaps the expiration date is wrong format, or the strike price is wrong.', file=get_output())
return(None)
return(listOfOptions[0]['id'])
def round_price(price):
"""Takes a price and rounds it to an appropriate decimal place that Robinhood will accept.
:param price: The input price to round.
:type price: float or int
:returns: The rounded price as a float.
"""
price = float(price)
if price <= 1e-2:
returnPrice = round(price, 6)
elif price < 1e0:
returnPrice = round(price, 4)
else:
returnPrice = round(price, 2)
return returnPrice
def filter_data(data, info):
"""Takes the data and extracts the value for the keyword that matches info.
:param data: The data returned by request_get.
:type data: dict or list
:param info: The keyword to filter from the data.
:type info: str
:returns: A list or string with the values that correspond to the info keyword.
"""
if (data == None):
return(data)
elif (data == [None]):
return([])
elif (type(data) == list):
if (len(data) == 0):
return([])
compareDict = data[0]
noneType = []
elif (type(data) == dict):
compareDict = data
noneType = None
if info is not None:
if info in compareDict and type(data) == list:
return([x[info] for x in data])
elif info in compareDict and type(data) == dict:
return(data[info])
else:
print(error_argument_not_key_in_dictionary(info), file=get_output())
return(noneType)
else:
return(data)
def inputs_to_set(inputSymbols):
"""Takes in the parameters passed to *args and puts them in a set and a list.
The set will make sure there are no duplicates, and then the list will keep
the original order of the input.
:param inputSymbols: A list, dict, or tuple of stock tickers.
:type inputSymbols: list or dict or tuple or str
:returns: A list of strings that have been capitalized and stripped of white space.
"""
symbols_list = []
symbols_set = set()
def add_symbol(symbol):
symbol = symbol.upper().strip()
if symbol not in symbols_set:
symbols_set.add(symbol)
symbols_list.append(symbol)
if type(inputSymbols) is str:
add_symbol(inputSymbols)
elif type(inputSymbols) is list or type(inputSymbols) is tuple or type(inputSymbols) is set:
inputSymbols = [comp for comp in inputSymbols if type(comp) is str]
for item in inputSymbols:
add_symbol(item)
return(symbols_list)
def request_document(url, payload=None):
"""Using a document url, makes a get request and returnes the session data.
:param url: The url to send a get request to.
:type url: str
:returns: Returns the session.get() data as opppose to session.get().json() data.
"""
try:
res = SESSION.get(url, params=payload)
res.raise_for_status()
except requests.exceptions.HTTPError as message:
print(message, file=get_output())
return(None)
return(res)
def request_get(url, dataType='regular', payload=None, jsonify_data=True):
"""For a given url and payload, makes a get request and returns the data.
:param url: The url to send a get request to.
:type url: str
:param dataType: Determines how to filter the data. 'regular' returns the unfiltered data. \
'results' will return data['results']. 'pagination' will return data['results'] and append it with any \
data that is in data['next']. 'indexzero' will return data['results'][0].
:type dataType: Optional[str]
:param payload: Dictionary of parameters to pass to the url. Will append the requests url as url/?key1=value1&key2=value2.
:type payload: Optional[dict]
:param jsonify_data: If this is true, will return requests.post().json(), otherwise will return response from requests.post().
:type jsonify_data: bool
:returns: Returns the data from the get request. If jsonify_data=True and requests returns an http code other than <200> \
then either '[None]' or 'None' will be returned based on what the dataType parameter was set as.
"""
if (dataType == 'results' or dataType == 'pagination'):
data = [None]
else:
data = None
res = None
if jsonify_data:
try:
res = SESSION.get(url, params=payload)
res.raise_for_status()
data = res.json()
except (requests.exceptions.HTTPError, AttributeError) as message:
print(message, file=get_output())
return(data)
else:
res = SESSION.get(url, params=payload)
return(res)
# Only continue to filter data if jsonify_data=True, and Session.get returned status code <200>.
if (dataType == 'results'):
try:
data = data['results']
except KeyError as message:
print("{0} is not a key in the dictionary".format(message), file=get_output())
return([None])
elif (dataType == 'pagination'):
counter = 2
nextData = data
try:
data = data['results']
except KeyError as message:
print("{0} is not a key in the dictionary".format(message), file=get_output())
return([None])
if nextData['next']:
print('Found Additional pages.', file=get_output())
while nextData['next']:
try:
res = SESSION.get(nextData['next'])
res.raise_for_status()
nextData = res.json()
except:
print('Additional pages exist but could not be loaded.', file=get_output())
return(data)
print('Loading page '+str(counter)+' ...', file=get_output())
counter += 1
for item in nextData['results']:
data.append(item)
elif (dataType == 'indexzero'):
try:
data = data['results'][0]
except KeyError as message:
print("{0} is not a key in the dictionary".format(message), file=get_output())
return(None)
except IndexError as message:
return(None)
return(data)
def request_post(url, payload=None, timeout=16, json=False, jsonify_data=True):
"""For a given url and payload, makes a post request and returns the response. Allows for responses other than 200.
:param url: The url to send a post request to.
:type url: str
:param payload: Dictionary of parameters to pass to the url as url/?key1=value1&key2=value2.
:type payload: Optional[dict]
:param timeout: The time for the post to wait for a response. Should be slightly greater than multiples of 3.
:type timeout: Optional[int]
:param json: This will set the 'content-type' parameter of the session header to 'application/json'
:type json: bool
:param jsonify_data: If this is true, will return requests.post().json(), otherwise will return response from requests.post().
:type jsonify_data: bool
:returns: Returns the data from the post request.
"""
data = None
res = None
try:
if json:
update_session('Content-Type', 'application/json')
res = SESSION.post(url, json=payload, timeout=timeout)
update_session(
'Content-Type', 'application/x-www-form-urlencoded; charset=utf-8')
else:
res = SESSION.post(url, data=payload, timeout=timeout)
data = res.json()
except Exception as message:
print("Error in request_post: {0}".format(message), file=get_output())
# Either return response <200,401,etc.> or the data that is returned from requests.
if jsonify_data:
return(data)
else:
return(res)
def request_delete(url):
"""For a given url and payload, makes a delete request and returns the response.
:param url: The url to send a delete request to.
:type url: str
:returns: Returns the data from the delete request.
"""
try:
res = SESSION.delete(url)
res.raise_for_status()
data = res
except Exception as message:
data = None
print("Error in request_delete: {0}".format(message), file=get_output())
return(data)
def update_session(key, value):
"""Updates the session header used by the requests library.
:param key: The key value to update or add to session header.
:type key: str
:param value: The value that corresponds to the key.
:type value: str
:returns: None. Updates the session header with a value.
"""
SESSION.headers[key] = value
def error_argument_not_key_in_dictionary(keyword):
return('Error: The keyword "{0}" is not a key in the dictionary.'.format(keyword))
def error_ticker_does_not_exist(ticker):
return('Warning: "{0}" is not a valid stock ticker. It is being ignored'.format(ticker))
def error_must_be_nonzero(keyword):
return('Error: The input parameter "{0}" must be an integer larger than zero and non-negative'.format(keyword)) | /robin_stocks-3.0.6-py3-none-any.whl/robin_stocks/helper.py | 0.740456 | 0.343727 | helper.py | pypi |
import robin_stocks.helper as helper
import robin_stocks.urls as urls
@helper.login_required
def load_crypto_profile(info=None):
"""Gets the information associated with the crypto account.
:param info: The name of the key whose value is to be returned from the function.
:type info: Optional[str]
:returns: [dict] The function returns a dictionary of key/value pairs. \
If a string is passed in to the info parameter, then the function will return \
a string corresponding to the value of the key whose name matches the info parameter.
:Dictionary Keys: * apex_account_number
* created_at
* id
* rhs_account_number
* status
* status_reason_code
* updated_at
* user_id
"""
url = urls.crypto_account()
data = helper.request_get(url, 'indexzero')
return(helper.filter_data(data, info))
@helper.login_required
def get_crypto_positions(info=None):
"""Returns crypto positions for the account.
:param info: Will filter the results to get a specific value.
:type info: Optional[str]
:returns: [list] Returns a list of dictionaries of key/value pairs for each option. If info parameter is provided, \
a list of strings is returned where the strings are the value of the key that matches info.
:Dictionary Keys: * account_id
* cost_basis
* created_at
* currency
* id
* quantity
* quantity_available
* quantity_held_for_buy
* quantity_held_for_sell
* updated_at
"""
url = urls.crypto_holdings()
data = helper.request_get(url, 'pagination')
return(helper.filter_data(data, info))
def get_crypto_currency_pairs(info=None):
"""Gets a list of all the cypto currencies that you can trade.
:param info: Will filter the results to have a list of the values that correspond to key that matches info.
:type info: Optional[str]
:returns: [list] If info parameter is left as None then the list will contain a dictionary of key/value pairs for each ticker. \
Otherwise, it will be a list of strings where the strings are the values of the key that corresponds to info.
:Dictionary Keys: * asset_currency
* display_only
* id
* max_order_size
* min_order_size
* min_order_price_increment
* min_order_quantity_increment
* name
* quote_currency
* symbol
* tradability
"""
url = urls.crypto_currency_pairs()
data = helper.request_get(url, 'results')
return(helper.filter_data(data, info))
def get_crypto_info(symbol, info=None):
"""Gets information about a crpyto currency.
:param symbol: The crypto ticker.
:type symbol: str
:param info: Will filter the results to have a list of the values that correspond to key that matches info.
:type info: Optional[str]
:returns: [dict] If info parameter is left as None then will return a dictionary of key/value pairs for each ticker. \
Otherwise, it will be a strings representing the value of the key.
:Dictionary Keys: * asset_currency
* display_only
* id
* max_order_size
* min_order_size
* min_order_price_increment
* min_order_quantity_increment
* name
* quote_currency
* symbol
* tradability
"""
url = urls.crypto_currency_pairs()
data = helper.request_get(url, 'results')
data = [x for x in data if x['asset_currency']['code'] == symbol]
if len(data) > 0:
data = data[0]
else:
data = None
return(helper.filter_data(data, info))
@helper.login_required
def get_crypto_quote(symbol, info=None):
"""Gets information about a crypto including low price, high price, and open price
:param symbol: The crypto ticker.
:type symbol: str
:param info: Will filter the results to have a list of the values that correspond to key that matches info.
:type info: Optional[str]
:returns: [dict] If info parameter is left as None then the list will contain a dictionary of key/value pairs for each ticker. \
Otherwise, it will be a list of strings where the strings are the values of the key that corresponds to info.
:Dictionary Keys: * ask_price
* bid_price
* high_price
* id
* low_price
* mark_price
* open_price
* symbol
* volume
"""
id = get_crypto_info(symbol, info='id')
url = urls.crypto_quote(id)
data = helper.request_get(url)
return(helper.filter_data(data, info))
@helper.login_required
def get_crypto_quote_from_id(id, info=None):
"""Gets information about a crypto including low price, high price, and open price. Uses the id instead of crypto ticker.
:param id: The id of a crypto.
:type id: str
:param info: Will filter the results to have a list of the values that correspond to key that matches info.
:type info: Optional[str]
:returns: [dict] If info parameter is left as None then the list will contain a dictionary of key/value pairs for each ticker. \
Otherwise, it will be a list of strings where the strings are the values of the key that corresponds to info.
:Dictionary Keys: * ask_price
* bid_price
* high_price
* id
* low_price
* mark_price
* open_price
* symbol
* volume
"""
url = urls.crypto_quote(id)
data = helper.request_get(url)
return(helper.filter_data(data, info))
@helper.login_required
def get_crypto_historicals(symbol, interval='hour', span='week', bounds='24_7', info=None):
"""Gets historical information about a crypto including open price, close price, high price, and low price.
:param symbol: The crypto ticker.
:type symbol: str
:param interval: The time between data points. Can be '15second', '5minute', '10minute', 'hour', 'day', or 'week'. Default is 'hour'.
:type interval: str
:param span: The entire time frame to collect data points. Can be 'hour', 'day', 'week', 'month', '3month', 'year', or '5year'. Default is 'week'
:type span: str
:param bound: The times of day to collect data points. 'Regular' is 6 hours a day, 'trading' is 9 hours a day, \
'extended' is 16 hours a day, '24_7' is 24 hours a day. Default is '24_7'
:type bound: str
:param info: Will filter the results to have a list of the values that correspond to key that matches info.
:type info: Optional[str]
:returns: [list] If info parameter is left as None then the list will contain a dictionary of key/value pairs for each ticker. \
Otherwise, it will be a list of strings where the strings are the values of the key that corresponds to info.
:Dictionary Keys: * begins_at
* open_price
* close_price
* high_price
* low_price
* volume
* session
* interpolated
* symbol
"""
interval_check = ['15second', '5minute', '10minute', 'hour', 'day', 'week']
span_check = ['hour', 'day', 'week', 'month', '3month', 'year', '5year']
bounds_check = ['24_7', 'extended', 'regular', 'trading']
if interval not in interval_check:
print(
'ERROR: Interval must be "15second","5minute","10minute","hour","day",or "week"', file=helper.get_output())
return([None])
if span not in span_check:
print('ERROR: Span must be "hour","day","week","month","3month","year",or "5year"', file=helper.get_output())
return([None])
if bounds not in bounds_check:
print('ERROR: Bounds must be "24_7","extended","regular",or "trading"', file=helper.get_output())
return([None])
if (bounds == 'extended' or bounds == 'trading') and span != 'day':
print('ERROR: extended and trading bounds can only be used with a span of "day"', file=helper.get_output())
return([None])
symbol = helper.inputs_to_set(symbol)
id = get_crypto_info(symbol[0], info='id')
url = urls.crypto_historical(id)
payload = {'interval': interval,
'span': span,
'bounds': bounds}
data = helper.request_get(url, 'regular', payload)
histData = []
cryptoSymbol = data['symbol']
for subitem in data['data_points']:
subitem['symbol'] = cryptoSymbol
histData.append(subitem)
return(helper.filter_data(histData, info)) | /robin_stocks-3.0.6-py3-none-any.whl/robin_stocks/crypto.py | 0.850531 | 0.53868 | crypto.py | pypi |
import robin_stocks.helper as helper
import robin_stocks.urls as urls
def get_quotes(inputSymbols, info=None):
"""Takes any number of stock tickers and returns information pertaining to its price.
:param inputSymbols: May be a single stock ticker or a list of stock tickers.
:type inputSymbols: str or list
:param info: Will filter the results to have a list of the values that correspond to key that matches info.
:type info: Optional[str]
:returns: [list] If info parameter is left as None then the list will contain a dictionary of key/value pairs for each ticker. \
Otherwise, it will be a list of strings where the strings are the values of the key that corresponds to info.
:Dictionary Keys: * ask_price
* ask_size
* bid_price
* bid_size
* last_trade_price
* last_extended_hours_trade_price
* previous_close
* adjusted_previous_close
* previous_close_date
* symbol
* trading_halted
* has_traded
* last_trade_price_source
* updated_at
* instrument
"""
symbols = helper.inputs_to_set(inputSymbols)
url = urls.quotes()
payload = {'symbols': ','.join(symbols)}
data = helper.request_get(url, 'results', payload)
if (data == None or data == [None]):
return data
for count, item in enumerate(data):
if item is None:
print(helper.error_ticker_does_not_exist(symbols[count]), file=helper.get_output())
data = [item for item in data if item is not None]
return(helper.filter_data(data, info))
def get_fundamentals(inputSymbols, info=None):
"""Takes any number of stock tickers and returns fundamental information
about the stock such as what sector it is in, a description of the company, dividend yield, and market cap.
:param inputSymbols: May be a single stock ticker or a list of stock tickers.
:type inputSymbols: str or list
:param info: Will filter the results to have a list of the values that correspond to key that matches info.
:type info: Optional[str]
:returns: [list] If info parameter is left as None then the list will contain a dictionary of key/value pairs for each ticker. \
Otherwise, it will be a list of strings where the strings are the values of the key that corresponds to info.
:Dictionary Keys: * open
* high
* low
* volume
* average_volume_2_weeks
* average_volume
* high_52_weeks
* dividend_yield
* float
* low_52_weeks
* market_cap
* pb_ratio
* pe_ratio
* shares_outstanding
* description
* instrument
* ceo
* headquarters_city
* headquarters_state
* sector
* industry
* num_employees
* year_founded
* symbol
"""
symbols = helper.inputs_to_set(inputSymbols)
url = urls.fundamentals()
payload = {'symbols': ','.join(symbols)}
data = helper.request_get(url, 'results', payload)
if (data == None or data == [None]):
return data
for count, item in enumerate(data):
if item is None:
print(helper.error_ticker_does_not_exist(symbols[count]), file=helper.get_output())
else:
item['symbol'] = symbols[count]
data = [item for item in data if item is not None]
return(helper.filter_data(data, info))
def get_instruments_by_symbols(inputSymbols, info=None):
"""Takes any number of stock tickers and returns information held by the market
such as ticker name, bloomberg id, and listing date.
:param inputSymbols: May be a single stock ticker or a list of stock tickers.
:type inputSymbols: str or list
:param info: Will filter the results to have a list of the values that correspond to key that matches info.
:type info: Optional[str]
:returns: [list] If info parameter is left as None then the list will a dictionary of key/value pairs for each ticker. \
Otherwise, it will be a list of strings where the strings are the values of the key that corresponds to info.
:Dictionary Keys: * id
* url
* quote
* fundamentals
* splits
* state
* market
* simple_name
* name
* tradeable
* tradability
* symbol
* bloomberg_unique
* margin_initial_ratio
* maintenance_ratio
* country
* day_trade_ratio
* list_date
* min_tick_size
* type
* tradable_chain_id
* rhs_tradability
* fractional_tradability
* default_collar_fraction
"""
symbols = helper.inputs_to_set(inputSymbols)
url = urls.instruments()
data = []
for item in symbols:
payload = {'symbol': item}
itemData = helper.request_get(url, 'indexzero', payload)
if itemData:
data.append(itemData)
else:
print(helper.error_ticker_does_not_exist(item), file=helper.get_output())
return(helper.filter_data(data, info))
def get_instrument_by_url(url, info=None):
"""Takes a single url for the stock. Should be located at ``https://api.robinhood.com/instruments/<id>`` where <id> is the
id of the stock.
:param url: The url of the stock. Can be found in several locations including \
in the dictionary returned from get_instruments_by_symbols(inputSymbols,info=None)
:type url: str
:param info: Will filter the results to have a list of the values that correspond to key that matches info.
:type info: Optional[str]
:returns: [dict or str] If info parameter is left as None then will return a dictionary of key/value pairs for a specific url. \
Otherwise, it will be the string value of the key that corresponds to info.
:Dictionary Keys: * id
* url
* quote
* fundamentals
* splits
* state
* market
* simple_name
* name
* tradeable
* tradability
* symbol
* bloomberg_unique
* margin_initial_ratio
* maintenance_ratio
* country
* day_trade_ratio
* list_date
* min_tick_size
* type
* tradable_chain_id
* rhs_tradability
* fractional_tradability
* default_collar_fraction
"""
data = helper.request_get(url, 'regular')
return(helper.filter_data(data, info))
def get_latest_price(inputSymbols, priceType=None, includeExtendedHours=True):
"""Takes any number of stock tickers and returns the latest price of each one as a string.
:param inputSymbols: May be a single stock ticker or a list of stock tickers.
:type inputSymbols: str or list
:param priceType: Can either be 'ask_price' or 'bid_price'. If this parameter is set, then includeExtendedHours is ignored.
:type priceType: str
:param includeExtendedHours: Leave as True if you want to get extendedhours price if available. \
False if you only want regular hours price, even after hours.
:type includeExtendedHours: bool
:returns: [list] A list of prices as strings.
"""
symbols = helper.inputs_to_set(inputSymbols)
quote = get_quotes(symbols)
prices = []
for item in quote:
if item:
if priceType == 'ask_price':
prices.append(item['ask_price'])
elif priceType == 'bid_price':
prices.append(item['bid_price'])
else:
if priceType:
print('WARNING: priceType should be "ask_price" or "bid_price". You entered "{0}"'.format(priceType), file=helper.get_output())
if item['last_extended_hours_trade_price'] is None or not includeExtendedHours:
prices.append(item['last_trade_price'])
else:
prices.append(item['last_extended_hours_trade_price'])
else:
prices.append(None)
return(prices)
@helper.convert_none_to_string
def get_name_by_symbol(symbol):
"""Returns the name of a stock from the stock ticker.
:param symbol: The ticker of the stock as a string.
:type symbol: str
:returns: [str] Returns the simple name of the stock. If the simple name does not exist then returns the full name.
"""
try:
symbol = symbol.upper().strip()
except AttributeError as message:
print(message, file=helper.get_output())
return None
url = urls.instruments()
payload = {'symbol': symbol}
data = helper.request_get(url, 'indexzero', payload)
if not data:
return(None)
# If stock doesn't have a simple name attribute then get the full name.
filter = helper.filter_data(data, info='simple_name')
if not filter or filter == "":
filter = helper.filter_data(data, info='name')
return(filter)
@helper.convert_none_to_string
def get_name_by_url(url):
"""Returns the name of a stock from the instrument url. Should be located at ``https://api.robinhood.com/instruments/<id>``
where <id> is the id of the stock.
:param url: The url of the stock as a string.
:type url: str
:returns: [str] Returns the simple name of the stock. If the simple name does not exist then returns the full name.
"""
data = helper.request_get(url)
if not data:
return(None)
# If stock doesn't have a simple name attribute then get the full name.
filter = helper.filter_data(data, info='simple_name')
if not filter or filter == "":
filter = helper.filter_data(data, info='name')
return(filter)
@helper.convert_none_to_string
def get_symbol_by_url(url):
"""Returns the symbol of a stock from the instrument url. Should be located at ``https://api.robinhood.com/instruments/<id>``
where <id> is the id of the stock.
:param url: The url of the stock as a string.
:type url: str
:returns: [str] Returns the ticker symbol of the stock.
"""
data = helper.request_get(url)
return helper.filter_data(data, info='symbol')
@helper.convert_none_to_string
def get_ratings(symbol, info=None):
"""Returns the ratings for a stock, including the number of buy, hold, and sell ratings.
:param symbol: The stock ticker.
:type symbol: str
:param info: Will filter the results to contain a dictionary of values that correspond to the key that matches info. \
Possible values are summary, ratings, and instrument_id
:type info: Optional[str]
:returns: [dict] If info parameter is left as None then the list will contain a dictionary of key/value pairs for each ticker. \
Otherwise, it will contain the values that correspond to the keyword that matches info.
:Dictionary Keys: * summary - value is a dictionary
* ratings - value is a list of dictionaries
* instrument_id - value is a string
* ratings_published_at - value is a string
"""
try:
symbol = symbol.upper().strip()
except AttributeError as message:
print(message, file=helper.get_output())
return None
url = urls.ratings(symbol)
data = helper.request_get(url)
if not data:
return(data)
if (len(data['ratings']) == 0):
return(data)
else:
for item in data['ratings']:
oldText = item['text']
item['text'] = oldText.encode('UTF-8')
return(helper.filter_data(data, info))
def get_events(symbol, info=None):
"""Returns the events related to a stock that the user owns. For example, if you owned options for USO and that stock \
underwent a stock split resulting in you owning shares of newly created USO1, then that event will be returned when calling \
get_events('uso1')
:param symbol: The stock ticker.
:type symbol: str
:param info: Will filter the results to get a specific value.
:type info: Optional[str]
:returns: [list] If the info parameter is provided, then the function will extract the value of the key \
that matches the info parameter. Otherwise, the whole dictionary is returned.
:Dictionary Keys: * account
* cash_component
* chain_id
* created_at
* direction
* equity_components
* event_date
* id
* option
* position
* quantity
* state
* total_cash_amount
* type
* underlying_price
* updated_at
"""
try:
symbol = symbol.upper().strip()
except AttributeError as message:
print(message, file=helper.get_output())
return None
payload = {'equity_instrument_id': helper.id_for_stock(symbol)}
url = urls.events()
data = helper.request_get(url, 'results', payload)
return(helper.filter_data(data, info))
def get_earnings(symbol, info=None):
"""Returns the earnings for the different financial quarters.
:param symbol: The stock ticker.
:type symbol: str
:param info: Will filter the results to get a specific value.
:type info: Optional[str]
:returns: [list] Returns a list of dictionaries. If info parameter is provided, \
a list of strings is returned where the strings are the value \
of the key that matches info.
:Dictionary Keys: * symbol
* instrument
* year
* quarter
* eps
* report
* call
"""
try:
symbol = symbol.upper().strip()
except AttributeError as message:
print(message, file=helper.get_output())
return None
url = urls.earnings()
payload = {'symbol': symbol}
data = helper.request_get(url, 'results', payload)
return(helper.filter_data(data, info))
def get_news(symbol, info=None):
"""Returns news stories for a stock.
:param symbol: The stock ticker.
:type symbol: str
:param info: Will filter the results to get a specific value.
:type info: Optional[str]
:returns: [list] Returns a list of dictionaries. If info parameter is provided, \
a list of strings is returned where the strings are the value \
of the key that matches info.
:Dictionary Keys: * api_source
* author
* num_clicks
* preview_image_url
* published_at
* relay_url
* source
* summary
* title
* updated_at
* url
* uuid
* related_instruments
* preview_text
* currency_id
"""
try:
symbol = symbol.upper().strip()
except AttributeError as message:
print(message, file=helper.get_output())
return None
url = urls.news(symbol)
data = helper.request_get(url, 'results')
return(helper.filter_data(data, info))
def get_splits(symbol, info=None):
"""Returns the date, divisor, and multiplier for when a stock split occureed.
:param symbol: The stock ticker.
:type symbol: str
:param info: Will filter the results to get a specific value. Possible options are \
url, instrument, execution_date, divsor, and multiplier.
:type info: Optional[str]
:returns: [list] Returns a list of dictionaries. If info parameter is provided, \
a list of strings is returned where the strings are the value \
of the key that matches info.
:Dictionary Keys: * url
* instrument
* execution_date
* multiplier
* divisor
"""
try:
symbol = symbol.upper().strip()
except AttributeError as message:
print(message, file=helper.get_output())
return None
url = urls.splits(symbol)
data = helper.request_get(url, 'results')
return(helper.filter_data(data, info))
def find_instrument_data(query):
"""Will search for stocks that contain the query keyword and return the instrument data.
:param query: The keyword to search for.
:type query: str
:returns: [list] Returns a list of dictionaries that contain the instrument data for each stock that matches the query.
:Dictionary Keys: * id
* url
* quote
* fundamentals
* splits
* state
* market
* simple_name
* name
* tradeable
* tradability
* symbol
* bloomberg_unique
* margin_initial_ratio
* maintenance_ratio
* country
* day_trade_ratio
* list_date
* min_tick_size
* type
* tradable_chain_id
* rhs_tradability
* fractional_tradability
* default_collar_fraction
"""
url = urls.instruments()
payload = {'query': query}
data = helper.request_get(url, 'pagination', payload)
if len(data) == 0:
print('No results found for that keyword', file=helper.get_output())
return([None])
else:
print('Found ' + str(len(data)) + ' results', file=helper.get_output())
return(data)
def get_stock_historicals(inputSymbols, interval='hour', span='week', bounds='regular', info=None):
"""Represents the historicl data for a stock.
:param inputSymbols: May be a single stock ticker or a list of stock tickers.
:type inputSymbols: str or list
:param interval: Interval to retrieve data for. Values are '5minute', '10minute', 'hour', 'day', 'week'. Default is 'hour'.
:type interval: Optional[str]
:param span: Sets the range of the data to be either 'day', 'week', 'month', '3month', 'year', or '5year'. Default is 'week'.
:type span: Optional[str]
:param bounds: Represents if graph will include extended trading hours or just regular trading hours. Values are 'extended', 'trading', or 'regular'. Default is 'regular'
:type bounds: Optional[str]
:param info: Will filter the results to have a list of the values that correspond to key that matches info.
:type info: Optional[str]
:returns: [list] Returns a list of dictionaries where each dictionary is for a different time. If multiple stocks are provided \
the historical data is listed one after another.
:Dictionary Keys: * begins_at
* open_price
* close_price
* high_price
* low_price
* volume
* session
* interpolated
* symbol
"""
interval_check = ['5minute', '10minute', 'hour', 'day', 'week']
span_check = ['day', 'week', 'month', '3month', 'year', '5year']
bounds_check = ['extended', 'regular', 'trading']
if interval not in interval_check:
print(
'ERROR: Interval must be "5minute","10minute","hour","day",or "week"', file=helper.get_output())
return([None])
if span not in span_check:
print('ERROR: Span must be "day","week","month","3month","year",or "5year"', file=helper.get_output())
return([None])
if bounds not in bounds_check:
print('ERROR: Bounds must be "extended","regular",or "trading"', file=helper.get_output())
return([None])
if (bounds == 'extended' or bounds == 'trading') and span != 'day':
print('ERROR: extended and trading bounds can only be used with a span of "day"', file=helper.get_output())
return([None])
symbols = helper.inputs_to_set(inputSymbols)
url = urls.historicals()
payload = {'symbols': ','.join(symbols),
'interval': interval,
'span': span,
'bounds': bounds}
data = helper.request_get(url, 'results', payload)
if (data == None or data == [None]):
return data
histData = []
for count, item in enumerate(data):
if (len(item['historicals']) == 0):
print(helper.error_ticker_does_not_exist(symbols[count]), file=helper.get_output())
continue
stockSymbol = item['symbol']
for subitem in item['historicals']:
subitem['symbol'] = stockSymbol
histData.append(subitem)
return(helper.filter_data(histData, info))
def get_stock_quote_by_id(stock_id, info=None):
"""
Represents basic stock quote information
:param stock_id: robinhood stock id
:type stock_id: str
:param info: Will filter the results to get a specific value. Possible options are url, instrument, execution_date, \
divsor, and multiplier.
:type info: Optional[str]
:return: [dict] If the info parameter is provided, then the function will extract the value of the key \
that matches the info parameter. Otherwise, the whole dictionary is returned.
:Dictionary Keys: * ask_price
* ask_size
* bid_price
* bid_size
* last_trade_price
* last_extended_hours_trade_price
* previous_close
* adjusted_previous_close
* previous_close_date
* symbol
* trading_halted
* has_traded
* last_trade_price_source
* updated_at
* instrument
"""
url = urls.marketdata_quotes(stock_id)
data = helper.request_get(url)
return (helper.filter_data(data, info))
def get_stock_quote_by_symbol(symbol, info=None):
"""
Represents basic stock quote information
:param symbol: robinhood stock id
:type stock_id: str
:param info: Will filter the results to get a specific value. Possible options are url, instrument, execution_date, \
divsor, and multiplier.
:type info: Optional[str]
:return: [dict] If the info parameter is provided, then the function will extract the value of the key \
that matches the info parameter. Otherwise, the whole dictionary is returned.
:Dictionary Keys: * ask_price
* ask_size
* bid_price
* bid_size
* last_trade_price
* last_extended_hours_trade_price
* previous_close
* adjusted_previous_close
* previous_close_date
* symbol
* trading_halted
* has_traded
* last_trade_price_source
* updated_at
* instrument
"""
return get_stock_quote_by_id(helper.id_for_stock(symbol))
def get_pricebook_by_id(stock_id, info=None):
"""
Represents Level II Market Data provided for Gold subscribers
:param stock_id: robinhood stock id
:type stock_id: str
:param info: Will filter the results to get a specific value. Possible options are url, instrument, execution_date, \
divsor, and multiplier.
:type info: Optional[str]
:return: Returns a dictionary of asks and bids.
"""
url = urls.marketdata_pricebook(stock_id)
data = helper.request_get(url)
return (helper.filter_data(data, info))
def get_pricebook_by_symbol(symbol, info=None):
"""
Represents Level II Market Data provided for Gold subscribers
:param symbol: symbol id
:type symbol: str
:param info: Will filter the results to get a specific value. Possible options are url, instrument, execution_date, \
divsor, and multiplier.
:type info: Optional[str]
:return: Returns a dictionary of asks and bids.
"""
return get_pricebook_by_id(helper.id_for_stock(symbol)) | /robin_stocks-3.0.6-py3-none-any.whl/robin_stocks/stocks.py | 0.853806 | 0.410402 | stocks.py | pypi |
import getpass
import os
import pickle
import random
import robin_stocks.helper as helper
import robin_stocks.urls as urls
def generate_device_token():
"""This function will generate a token used when loggin on.
:returns: A string representing the token.
"""
rands = []
for i in range(0, 16):
r = random.random()
rand = 4294967296.0 * r
rands.append((int(rand) >> ((3 & i) << 3)) & 255)
hexa = []
for i in range(0, 256):
hexa.append(str(hex(i+256)).lstrip("0x").rstrip("L")[1:])
id = ""
for i in range(0, 16):
id += hexa[rands[i]]
if (i == 3) or (i == 5) or (i == 7) or (i == 9):
id += "-"
return(id)
def respond_to_challenge(challenge_id, sms_code):
"""This function will post to the challenge url.
:param challenge_id: The challenge id.
:type challenge_id: str
:param sms_code: The sms code.
:type sms_code: str
:returns: The response from requests.
"""
url = urls.challenge_url(challenge_id)
payload = {
'response': sms_code
}
return(helper.request_post(url, payload))
def login(username=None, password=None, expiresIn=86400, scope='internal', by_sms=True, store_session=True, mfa_code=None):
"""This function will effectively log the user into robinhood by getting an
authentication token and saving it to the session header. By default, it
will store the authentication token in a pickle file and load that value
on subsequent logins.
:param username: The username for your robinhood account, usually your email.
Not required if credentials are already cached and valid.
:type username: Optional[str]
:param password: The password for your robinhood account. Not required if
credentials are already cached and valid.
:type password: Optional[str]
:param expiresIn: The time until your login session expires. This is in seconds.
:type expiresIn: Optional[int]
:param scope: Specifies the scope of the authentication.
:type scope: Optional[str]
:param by_sms: Specifies whether to send an email(False) or an sms(True)
:type by_sms: Optional[boolean]
:param store_session: Specifies whether to save the log in authorization
for future log ins.
:type store_session: Optional[boolean]
:param mfa_code: MFA token if enabled.
:type mfa_code: Optional[str]
:returns: A dictionary with log in information. The 'access_token' keyword contains the access token, and the 'detail' keyword \
contains information on whether the access token was generated or loaded from pickle file.
"""
device_token = generate_device_token()
home_dir = os.path.expanduser("~")
data_dir = os.path.join(home_dir, ".tokens")
if not os.path.exists(data_dir):
os.makedirs(data_dir)
creds_file = "robinhood.pickle"
pickle_path = os.path.join(data_dir, creds_file)
# Challenge type is used if not logging in with two-factor authentication.
if by_sms:
challenge_type = "sms"
else:
challenge_type = "email"
url = urls.login_url()
payload = {
'client_id': 'c82SH0WZOsabOXGP2sxqcj34FxkvfnWRZBKlBjFS',
'expires_in': expiresIn,
'grant_type': 'password',
'password': password,
'scope': scope,
'username': username,
'challenge_type': challenge_type,
'device_token': device_token
}
if mfa_code:
payload['mfa_code'] = mfa_code
# If authentication has been stored in pickle file then load it. Stops login server from being pinged so much.
if os.path.isfile(pickle_path):
# If store_session has been set to false then delete the pickle file, otherwise try to load it.
# Loading pickle file will fail if the acess_token has expired.
if store_session:
try:
with open(pickle_path, 'rb') as f:
pickle_data = pickle.load(f)
access_token = pickle_data['access_token']
token_type = pickle_data['token_type']
refresh_token = pickle_data['refresh_token']
# Set device_token to be the original device token when first logged in.
pickle_device_token = pickle_data['device_token']
payload['device_token'] = pickle_device_token
# Set login status to True in order to try and get account info.
helper.set_login_state(True)
helper.update_session(
'Authorization', '{0} {1}'.format(token_type, access_token))
# Try to load account profile to check that authorization token is still valid.
res = helper.request_get(
urls.portfolio_profile(), 'regular', payload, jsonify_data=False)
# Raises exception is response code is not 200.
res.raise_for_status()
return({'access_token': access_token, 'token_type': token_type,
'expires_in': expiresIn, 'scope': scope, 'detail': 'logged in using authentication in {0}'.format(creds_file),
'backup_code': None, 'refresh_token': refresh_token})
except:
print(
"ERROR: There was an issue loading pickle file. Authentication may be expired - logging in normally.", file=helper.get_output())
helper.set_login_state(False)
helper.update_session('Authorization', None)
else:
os.remove(pickle_path)
# Try to log in normally.
if not username:
username = input("Robinhood username: ")
payload['username'] = username
if not password:
password = getpass.getpass("Robinhood password: ")
payload['password'] = password
data = helper.request_post(url, payload)
# Handle case where mfa or challenge is required.
if data:
if 'mfa_required' in data:
mfa_token = input("Please type in the MFA code: ")
payload['mfa_code'] = mfa_token
res = helper.request_post(url, payload, jsonify_data=False)
while (res.status_code != 200):
mfa_token = input(
"That MFA code was not correct. Please type in another MFA code: ")
payload['mfa_code'] = mfa_token
res = helper.request_post(url, payload, jsonify_data=False)
data = res.json()
elif 'challenge' in data:
challenge_id = data['challenge']['id']
sms_code = input('Enter Robinhood code for validation: ')
res = respond_to_challenge(challenge_id, sms_code)
while 'challenge' in res and res['challenge']['remaining_attempts'] > 0:
sms_code = input('That code was not correct. {0} tries remaining. Please type in another code: '.format(
res['challenge']['remaining_attempts']))
res = respond_to_challenge(challenge_id, sms_code)
helper.update_session(
'X-ROBINHOOD-CHALLENGE-RESPONSE-ID', challenge_id)
data = helper.request_post(url, payload)
# Update Session data with authorization or raise exception with the information present in data.
if 'access_token' in data:
token = '{0} {1}'.format(data['token_type'], data['access_token'])
helper.update_session('Authorization', token)
helper.set_login_state(True)
data['detail'] = "logged in with brand new authentication code."
if store_session:
with open(pickle_path, 'wb') as f:
pickle.dump({'token_type': data['token_type'],
'access_token': data['access_token'],
'refresh_token': data['refresh_token'],
'device_token': device_token}, f)
else:
raise Exception(data['detail'])
else:
raise Exception('Error: Trouble connecting to robinhood API. Check internet connection.')
return(data)
@helper.login_required
def logout():
"""Removes authorization from the session header.
:returns: None
"""
helper.set_login_state(False)
helper.update_session('Authorization', None) | /robin_stocks-3.0.6-py3-none-any.whl/robin_stocks/authentication.py | 0.63273 | 0.161651 | authentication.py | pypi |
from uuid import uuid4
import robin_stocks.crypto as crypto
import robin_stocks.helper as helper
import robin_stocks.profiles as profiles
import robin_stocks.stocks as stocks
import robin_stocks.urls as urls
@helper.login_required
def get_all_stock_orders(info=None):
"""Returns a list of all the orders that have been processed for the account.
:param info: Will filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a list of dictionaries of key/value pairs for each order. If info parameter is provided, \
a list of strings is returned where the strings are the value of the key that matches info.
"""
url = urls.orders()
data = helper.request_get(url, 'pagination')
return(helper.filter_data(data, info))
@helper.login_required
def get_all_option_orders(info=None):
"""Returns a list of all the option orders that have been processed for the account.
:param info: Will filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a list of dictionaries of key/value pairs for each option order. If info parameter is provided, \
a list of strings is returned where the strings are the value of the key that matches info.
"""
url = urls.option_orders()
data = helper.request_get(url, 'pagination')
return(helper.filter_data(data, info))
@helper.login_required
def get_all_crypto_orders(info=None):
"""Returns a list of all the crypto orders that have been processed for the account.
:param info: Will filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a list of dictionaries of key/value pairs for each option order. If info parameter is provided, \
a list of strings is returned where the strings are the value of the key that matches info.
"""
url = urls.crypto_orders()
data = helper.request_get(url, 'pagination')
return(helper.filter_data(data, info))
@helper.login_required
def get_all_open_stock_orders(info=None):
"""Returns a list of all the orders that are currently open.
:param info: Will filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a list of dictionaries of key/value pairs for each order. If info parameter is provided, \
a list of strings is returned where the strings are the value of the key that matches info.
"""
url = urls.orders()
data = helper.request_get(url, 'pagination')
data = [item for item in data if item['cancel'] is not None]
return(helper.filter_data(data, info))
@helper.login_required
def get_all_open_option_orders(info=None):
"""Returns a list of all the orders that are currently open.
:param info: Will filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a list of dictionaries of key/value pairs for each order. If info parameter is provided, \
a list of strings is returned where the strings are the value of the key that matches info.
"""
url = urls.option_orders()
data = helper.request_get(url, 'pagination')
data = [item for item in data if item['cancel_url'] is not None]
return(helper.filter_data(data, info))
@helper.login_required
def get_all_open_crypto_orders(info=None):
"""Returns a list of all the crypto orders that have been processed for the account.
:param info: Will filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a list of dictionaries of key/value pairs for each option order. If info parameter is provided, \
a list of strings is returned where the strings are the value of the key that matches info.
"""
url = urls.crypto_orders()
data = helper.request_get(url, 'pagination')
data = [item for item in data if item['cancel_url'] is not None]
return(helper.filter_data(data, info))
@helper.login_required
def get_stock_order_info(orderID):
"""Returns the information for a single order.
:param orderID: The ID associated with the order. Can be found using get_all_orders(info=None) or get_all_orders(info=None).
:type orderID: str
:returns: Returns a list of dictionaries of key/value pairs for the order.
"""
url = urls.orders(orderID)
data = helper.request_get(url)
return(data)
@helper.login_required
def get_option_order_info(order_id):
"""Returns the information for a single option order.
:param order_id: The ID associated with the option order.
:type order_id: str
:returns: Returns a list of dictionaries of key/value pairs for the order.
"""
url = urls.option_orders(order_id)
data = helper.request_get(url)
return data
@helper.login_required
def get_crypto_order_info(order_id):
"""Returns the information for a single crypto order.
:param order_id: The ID associated with the option order.
:type order_id: str
:returns: Returns a list of dictionaries of key/value pairs for the order.
"""
url = urls.crypto_orders(order_id)
data = helper.request_get(url)
return data
@helper.login_required
def find_stock_orders(**arguments):
"""Returns a list of orders that match the keyword parameters.
:param arguments: Variable length of keyword arguments. EX. find_orders(symbol='FB',cancel=None,quantity=1)
:type arguments: str
:returns: Returns a list of orders.
"""
url = urls.orders()
data = helper.request_get(url, 'pagination')
if (len(arguments) == 0):
return(data)
for item in data:
item['quantity'] = str(int(float(item['quantity'])))
if 'symbol' in arguments.keys():
arguments['instrument'] = stocks.get_instruments_by_symbols(
arguments['symbol'], info='url')[0]
del arguments['symbol']
if 'quantity' in arguments.keys():
arguments['quantity'] = str(arguments['quantity'])
stop = len(arguments.keys())-1
list_of_orders = []
for item in data:
for i, (key, value) in enumerate(arguments.items()):
if key not in item:
print(helper.error_argument_not_key_in_dictionary(key), file=helper.get_output())
return([None])
if value != item[key]:
break
if i == stop:
list_of_orders.append(item)
return(list_of_orders)
@helper.login_required
def cancel_stock_order(orderID):
"""Cancels a specific order.
:param orderID: The ID associated with the order. Can be found using get_all_stock_orders(info=None).
:type orderID: str
:returns: Returns the order information for the order that was cancelled.
"""
url = urls.cancel(orderID)
data = helper.request_post(url)
if data:
print('Order '+str(orderID)+' cancelled', file=helper.get_output())
return(data)
@helper.login_required
def cancel_option_order(orderID):
"""Cancels a specific option order.
:param orderID: The ID associated with the order. Can be found using get_all_option_orders(info=None).
:type orderID: str
:returns: Returns the order information for the order that was cancelled.
"""
url = urls.option_cancel(orderID)
data = helper.request_post(url)
if data:
print('Order '+str(orderID)+' cancelled', file=helper.get_output())
return(data)
@helper.login_required
def cancel_crypto_order(orderID):
"""Cancels a specific crypto order.
:param orderID: The ID associated with the order. Can be found using get_all_crypto_orders(info=None).
:type orderID: str
:returns: Returns the order information for the order that was cancelled.
"""
url = urls.crypto_cancel(orderID)
data = helper.request_post(url)
if data:
print('Order '+str(orderID)+' cancelled', file=helper.get_output())
return(data)
@helper.login_required
def cancel_all_stock_orders():
"""Cancels all stock orders.
:returns: The list of orders that were cancelled.
"""
url = urls.orders()
data = helper.request_get(url, 'pagination')
data = [item for item in data if item['cancel'] is not None]
for item in data:
helper.request_post(item['cancel'])
print('All Stock Orders Cancelled', file=helper.get_output())
return(data)
@helper.login_required
def cancel_all_option_orders():
"""Cancels all option orders.
:returns: Returns the order information for the orders that were cancelled.
"""
url = urls.option_orders()
data = helper.request_get(url, 'pagination')
data = [item for item in data if item['cancel_url'] is not None]
for item in data:
helper.request_post(item['cancel_url'])
print('All Option Orders Cancelled', file=helper.get_output())
return(data)
@helper.login_required
def cancel_all_crypto_orders():
"""Cancels all crypto orders.
:returns: Returns the order information for the orders that were cancelled.
"""
url = urls.crypto_orders()
data = helper.request_get(url, 'pagination')
data = [item for item in data if item['cancel_url'] is not None]
for item in data:
helper.request_post(item['cancel_url'])
print('All Crypto Orders Cancelled', file=helper.get_output())
return(data)
@helper.login_required
def order_buy_market(symbol, quantity, timeInForce='gtc', extendedHours=False, jsonify=True):
"""Submits a market order to be executed immediately.
:param symbol: The stock ticker of the stock to purchase.
:type symbol: str
:param quantity: The number of stocks to buy.
:type quantity: int
:param timeInForce: Changes how long the order will be in effect for. 'gtc' = good until cancelled. \
'gfd' = good for the day.
:type timeInForce: Optional[str]
:param extendedHours: Premium users only. Allows trading during extended hours. Should be true or false.
:type extendedHours: Optional[str]
:param jsonify: If set to False, function will return the request object which contains status code and headers.
:type jsonify: Optional[str]
:returns: Dictionary that contains information regarding the purchase of stocks, \
such as the order id, the state of order (queued, confired, filled, failed, canceled, etc.), \
the price, and the quantity.
"""
return order(symbol, quantity, "buy", None, None, timeInForce, extendedHours, jsonify)
@helper.login_required
def order_buy_fractional_by_quantity(symbol, quantity, timeInForce='gfd', extendedHours=False, jsonify=True):
"""Submits a market order to be executed immediately for fractional shares by specifying the amount that you want to trade.
Good for share fractions up to 6 decimal places. Robinhood does not currently support placing limit, stop, or stop loss orders
for fractional trades.
:param symbol: The stock ticker of the stock to purchase.
:type symbol: str
:param quantity: The amount of the fractional shares you want to buy.
:type quantity: float
:param timeInForce: Changes how long the order will be in effect for. 'gfd' = good for the day.
:type timeInForce: Optional[str]
:param extendedHours: Premium users only. Allows trading during extended hours. Should be true or false.
:type extendedHours: Optional[str]
:param jsonify: If set to False, function will return the request object which contains status code and headers.
:type jsonify: Optional[str]
:returns: Dictionary that contains information regarding the purchase of stocks, \
such as the order id, the state of order (queued, confired, filled, failed, canceled, etc.), \
the price, and the quantity.
"""
return order(symbol, quantity, "buy", None, None, timeInForce, extendedHours, jsonify)
@helper.login_required
def order_buy_fractional_by_price(symbol, amountInDollars, timeInForce='gfd', extendedHours=False, jsonify=True):
"""Submits a market order to be executed immediately for fractional shares by specifying the amount in dollars that you want to trade.
Good for share fractions up to 6 decimal places. Robinhood does not currently support placing limit, stop, or stop loss orders
for fractional trades.
:param symbol: The stock ticker of the stock to purchase.
:type symbol: str
:param amountInDollars: The amount in dollars of the fractional shares you want to buy.
:type amountInDollars: float
:param timeInForce: Changes how long the order will be in effect for. 'gfd' = good for the day.
:type timeInForce: Optional[str]
:param extendedHours: Premium users only. Allows trading during extended hours. Should be true or false.
:type extendedHours: Optional[str]
:param jsonify: If set to False, function will return the request object which contains status code and headers.
:type jsonify: Optional[str]
:returns: Dictionary that contains information regarding the purchase of stocks, \
such as the order id, the state of order (queued, confired, filled, failed, canceled, etc.), \
the price, and the quantity.
"""
if amountInDollars < 1:
print("ERROR: Fractional share price should meet minimum 1.00.", file=helper.get_output())
return None
# turn the money amount into decimal number of shares
price = next(iter(stocks.get_latest_price(symbol, 'ask_price', extendedHours)), 0.00)
fractional_shares = 0 if (price == 0.00) else helper.round_price(amountInDollars/float(price))
return order(symbol, fractional_shares, "buy", None, None, timeInForce, extendedHours, jsonify)
@helper.login_required
def order_buy_limit(symbol, quantity, limitPrice, timeInForce='gtc', extendedHours=False, jsonify=True):
"""Submits a limit order to be executed once a certain price is reached.
:param symbol: The stock ticker of the stock to purchase.
:type symbol: str
:param quantity: The number of stocks to buy.
:type quantity: int
:param limitPrice: The price to trigger the buy order.
:type limitPrice: float
:param timeInForce: Changes how long the order will be in effect for. 'gtc' = good until cancelled. \
'gfd' = good for the day.
:type timeInForce: Optional[str]
:param extendedHours: Premium users only. Allows trading during extended hours. Should be true or false.
:type extendedHours: Optional[str]
:param jsonify: If set to False, function will return the request object which contains status code and headers.
:type jsonify: Optional[str]
:returns: Dictionary that contains information regarding the purchase of stocks, \
such as the order id, the state of order (queued, confired, filled, failed, canceled, etc.), \
the price, and the quantity.
"""
return order(symbol, quantity, "buy", limitPrice, None, timeInForce, extendedHours, jsonify)
@helper.login_required
def order_buy_stop_loss(symbol, quantity, stopPrice, timeInForce='gtc', extendedHours=False, jsonify=True):
"""Submits a stop order to be turned into a market order once a certain stop price is reached.
:param symbol: The stock ticker of the stock to purchase.
:type symbol: str
:param quantity: The number of stocks to buy.
:type quantity: int
:param stopPrice: The price to trigger the market order.
:type stopPrice: float
:param timeInForce: Changes how long the order will be in effect for. 'gtc' = good until cancelled. \
'gfd' = good for the day.
:type timeInForce: Optional[str]
:param extendedHours: Premium users only. Allows trading during extended hours. Should be true or false.
:type extendedHours: Optional[str]
:param jsonify: If set to False, function will return the request object which contains status code and headers.
:type jsonify: Optional[str]
:returns: Dictionary that contains information regarding the purchase of stocks, \
such as the order id, the state of order (queued, confired, filled, failed, canceled, etc.), \
the price, and the quantity.
"""
return order(symbol, quantity, "buy", None, stopPrice, timeInForce, extendedHours, jsonify)
@helper.login_required
def order_buy_stop_limit(symbol, quantity, limitPrice, stopPrice, timeInForce='gtc', extendedHours=False, jsonify=True):
"""Submits a stop order to be turned into a limit order once a certain stop price is reached.
:param symbol: The stock ticker of the stock to purchase.
:type symbol: str
:param quantity: The number of stocks to buy.
:type quantity: int
:param limitPrice: The price to trigger the market order.
:type limitPrice: float
:param stopPrice: The price to trigger the limit order.
:type stopPrice: float
:param timeInForce: Changes how long the order will be in effect for. 'gtc' = good until cancelled. \
'gfd' = good for the day.
:type timeInForce: Optional[str]
:param extendedHours: Premium users only. Allows trading during extended hours. Should be true or false.
:type extendedHours: Optional[str]
:param jsonify: If set to False, function will return the request object which contains status code and headers.
:type jsonify: Optional[str]
:returns: Dictionary that contains information regarding the purchase of stocks, \
such as the order id, the state of order (queued, confired, filled, failed, canceled, etc.), \
the price, and the quantity.
"""
return order(symbol, quantity, "buy", limitPrice, stopPrice, timeInForce, extendedHours, jsonify)
@helper.login_required
def order_buy_trailing_stop(symbol, quantity, trailAmount, trailType='percentage', timeInForce='gtc', extendedHours=False, jsonify=True):
"""Submits a trailing stop buy order to be turned into a market order when traling stop price reached.
:param symbol: The stock ticker of the stock to buy.
:type symbol: str
:param quantity: The number of stocks to buy.
:type quantity: int
:param trailAmount: how much to trail by; could be percentage or dollar value depending on trailType
:type trailAmount: float
:param trailType: could be "amount" or "percentage"
:type trailType: str
:param timeInForce: Changes how long the order will be in effect for. 'gtc' = good until cancelled. \
'gfd' = good for the day.
:type timeInForce: Optional[str]
:param extendedHours: Premium users only. Allows trading during extended hours. Should be true or false.
:type extendedHours: Optional[str]
:param jsonify: If set to False, function will return the request object which contains status code and headers.
:type jsonify: Optional[str]
:returns: Dictionary that contains information regarding the selling of stocks, \
such as the order id, the state of order (queued, confired, filled, failed, canceled, etc.), \
the price, and the quantity.
:returns: Dictionary that contains information regarding the purchase of stocks, \
such as the order id, the state of order (queued, confired, filled, failed, canceled, etc.), \
the price, and the quantity.
"""
return order_trailing_stop(symbol, quantity, "buy", trailAmount, trailType, timeInForce, extendedHours, jsonify)
@helper.login_required
def order_sell_market(symbol, quantity, timeInForce='gtc', extendedHours=False, jsonify=True):
"""Submits a market order to be executed immediately.
:param symbol: The stock ticker of the stock to sell.
:type symbol: str
:param quantity: The number of stocks to sell.
:type quantity: int
:param timeInForce: Changes how long the order will be in effect for. 'gtc' = good until cancelled. \
'gfd' = good for the day.
:type timeInForce: Optional[str]
:param extendedHours: Premium users only. Allows trading during extended hours. Should be true or false.
:type extendedHours: Optional[str]
:param jsonify: If set to False, function will return the request object which contains status code and headers.
:type jsonify: Optional[str]
:returns: Dictionary that contains information regarding the selling of stocks, \
such as the order id, the state of order (queued, confired, filled, failed, canceled, etc.), \
the price, and the quantity.
"""
return order(symbol, quantity, "sell", None, None, timeInForce, extendedHours, jsonify)
@helper.login_required
def order_sell_fractional_by_quantity(symbol, quantity, timeInForce='gfd', priceType='bid_price', extendedHours=False, jsonify=True):
"""Submits a market order to be executed immediately for fractional shares by specifying the amount that you want to trade.
Good for share fractions up to 6 decimal places. Robinhood does not currently support placing limit, stop, or stop loss orders
for fractional trades.
:param symbol: The stock ticker of the stock to purchase.
:type symbol: str
:param quantity: The amount of the fractional shares you want to buy.
:type quantity: float
:param timeInForce: Changes how long the order will be in effect for. 'gfd' = good for the day.
:type timeInForce: Optional[str]
:param extendedHours: Premium users only. Allows trading during extended hours. Should be true or false.
:type extendedHours: Optional[str]
:param jsonify: If set to False, function will return the request object which contains status code and headers.
:type jsonify: Optional[str]
:returns: Dictionary that contains information regarding the purchase of stocks, \
such as the order id, the state of order (queued, confired, filled, failed, canceled, etc.), \
the price, and the quantity.
"""
return order(symbol, quantity, "sell", None, None, timeInForce, extendedHours, jsonify)
@helper.login_required
def order_sell_fractional_by_price(symbol, amountInDollars, timeInForce='gfd', extendedHours=False, jsonify=True):
"""Submits a market order to be executed immediately for fractional shares by specifying the amount in dollars that you want to trade.
Good for share fractions up to 6 decimal places. Robinhood does not currently support placing limit, stop, or stop loss orders
for fractional trades.
:param symbol: The stock ticker of the stock to purchase.
:type symbol: str
:param amountInDollars: The amount in dollars of the fractional shares you want to buy.
:type amountInDollars: float
:param timeInForce: Changes how long the order will be in effect for. 'gfd' = good for the day.
:type timeInForce: Optional[str]
:param extendedHours: Premium users only. Allows trading during extended hours. Should be true or false.
:type extendedHours: Optional[str]
:param jsonify: If set to False, function will return the request object which contains status code and headers.
:type jsonify: Optional[str]
:returns: Dictionary that contains information regarding the purchase of stocks, \
such as the order id, the state of order (queued, confired, filled, failed, canceled, etc.), \
the price, and the quantity.
"""
if amountInDollars < 1:
print("ERROR: Fractional share price should meet minimum 1.00.", file=helper.get_output())
return None
# turn the money amount into decimal number of shares
price = next(iter(stocks.get_latest_price(symbol, 'bid_price', extendedHours)), 0.00)
fractional_shares = 0 if (price == 0.00) else helper.round_price(amountInDollars/float(price))
return order(symbol, fractional_shares, "sell", None, None, timeInForce, extendedHours, jsonify)
@helper.login_required
def order_sell_limit(symbol, quantity, limitPrice, timeInForce='gtc', extendedHours=False, jsonify=True):
"""Submits a limit order to be executed once a certain price is reached.
:param symbol: The stock ticker of the stock to sell.
:type symbol: str
:param quantity: The number of stocks to sell.
:type quantity: int
:param limitPrice: The price to trigger the sell order.
:type limitPrice: float
:param timeInForce: Changes how long the order will be in effect for. 'gtc' = good until cancelled. \
'gfd' = good for the day.
:type timeInForce: Optional[str]
:param extendedHours: Premium users only. Allows trading during extended hours. Should be true or false.
:type extendedHours: Optional[str]
:param jsonify: If set to False, function will return the request object which contains status code and headers.
:type jsonify: Optional[str]
:returns: Dictionary that contains information regarding the selling of stocks, \
such as the order id, the state of order (queued, confired, filled, failed, canceled, etc.), \
the price, and the quantity.
"""
return order(symbol, quantity, "sell", limitPrice, None, timeInForce, extendedHours, jsonify)
@helper.login_required
def order_sell_stop_loss(symbol, quantity, stopPrice, timeInForce='gtc', extendedHours=False, jsonify=True):
"""Submits a stop order to be turned into a market order once a certain stop price is reached.
:param symbol: The stock ticker of the stock to sell.
:type symbol: str
:param quantity: The number of stocks to sell.
:type quantity: int
:param stopPrice: The price to trigger the market order.
:type stopPrice: float
:param timeInForce: Changes how long the order will be in effect for. 'gtc' = good until cancelled. \
'gfd' = good for the day.
:type timeInForce: Optional[str]
:param extendedHours: Premium users only. Allows trading during extended hours. Should be true or false.
:type extendedHours: Optional[str]
:param jsonify: If set to False, function will return the request object which contains status code and headers.
:type jsonify: Optional[str]
:returns: Dictionary that contains information regarding the selling of stocks, \
such as the order id, the state of order (queued, confired, filled, failed, canceled, etc.), \
the price, and the quantity.
"""
return order(symbol, quantity, "sell", None, stopPrice, timeInForce, extendedHours, jsonify)
@helper.login_required
def order_sell_stop_limit(symbol, quantity, limitPrice, stopPrice, timeInForce='gtc', extendedHours=False, jsonify=True):
"""Submits a stop order to be turned into a limit order once a certain stop price is reached.
:param symbol: The stock ticker of the stock to sell.
:type symbol: str
:param quantity: The number of stocks to sell.
:type quantity: int
:param limitPrice: The price to trigger the market order.
:type limitPrice: float
:param stopPrice: The price to trigger the limit order.
:type stopPrice: float
:param timeInForce: Changes how long the order will be in effect for. 'gtc' = good until cancelled. \
'gfd' = good for the day.
:type timeInForce: Optional[str]
:param extendedHours: Premium users only. Allows trading during extended hours. Should be true or false.
:type extendedHours: Optional[str]
:param jsonify: If set to False, function will return the request object which contains status code and headers.
:type jsonify: Optional[str]
:returns: Dictionary that contains information regarding the selling of stocks, \
such as the order id, the state of order (queued, confired, filled, failed, canceled, etc.), \
the price, and the quantity.
"""
return order(symbol, quantity, "sell", limitPrice, stopPrice, timeInForce, extendedHours, jsonify)
@helper.login_required
def order_sell_trailing_stop(symbol, quantity, trailAmount, trailType='percentage', timeInForce='gtc', extendedHours=False, jsonify=True):
"""Submits a trailing stop sell order to be turned into a market order when traling stop price reached.
:param symbol: The stock ticker of the stock to sell.
:type symbol: str
:param quantity: The number of stocks to sell.
:type quantity: int
:param trailAmount: how much to trail by; could be percentage or dollar value depending on trailType
:type trailAmount: float
:param trailType: could be "amount" or "percentage"
:type trailType: str
:param timeInForce: Changes how long the order will be in effect for. 'gtc' = good until cancelled. \
'gfd' = good for the day.
:type timeInForce: Optional[str]
:param extendedHours: Premium users only. Allows trading during extended hours. Should be true or false.
:type extendedHours: Optional[str]
:param jsonify: If set to False, function will return the request object which contains status code and headers.
:type jsonify: Optional[str]
:returns: Dictionary that contains information regarding the selling of stocks, \
such as the order id, the state of order (queued, confired, filled, failed, canceled, etc.), \
the price, and the quantity.
:returns: Dictionary that contains information regarding the purchase of stocks, \
such as the order id, the state of order (queued, confired, filled, failed, canceled, etc.), \
the price, and the quantity.
"""
return order_trailing_stop(symbol, quantity, "sell", trailAmount, trailType, timeInForce, extendedHours, jsonify)
@helper.login_required
def order_trailing_stop(symbol, quantity, side, trailAmount, trailType='percentage', timeInForce='gtc', extendedHours=False, jsonify=True):
"""Submits a trailing stop order to be turned into a market order when traling stop price reached.
:param symbol: The stock ticker of the stock to trade.
:type symbol: str
:param quantity: The number of stocks to trade.
:type quantity: int
:param side: buy or sell
:type side: str
:param trailAmount: how much to trail by; could be percentage or dollar value depending on trailType
:type trailAmount: float
:param trailType: could be "amount" or "percentage"
:type trailType: str
:param timeInForce: Changes how long the order will be in effect for. 'gtc' = good until cancelled. \
'gfd' = good for the day.
:type timeInForce: Optional[str]
:param extendedHours: Premium users only. Allows trading during extended hours. Should be true or false.
:type extendedHours: Optional[str]
:param jsonify: If set to False, function will return the request object which contains status code and headers.
:type jsonify: Optional[str]
:returns: Dictionary that contains information regarding the purchase of stocks, \
such as the order id, the state of order (queued, confired, filled, failed, canceled, etc.), \
the price, and the quantity.
"""
try:
symbol = symbol.upper().strip()
trailAmount = float(trailAmount)
except AttributeError as message:
print(message)
return None
stock_price = helper.round_price(stocks.get_latest_price(symbol, extendedHours)[0])
# find stop price based on whether trailType is "amount" or "percentage" and whether its buy or sell
percentage = 0
try:
if trailType == 'amount':
margin = trailAmount
else:
margin = stock_price * trailAmount * 0.01
percentage = trailAmount
except Exception as e:
print('ERROR: {}'.format(e))
return None
stopPrice = stock_price + margin if side == "buy" else stock_price - margin
stopPrice = helper.round_price(stopPrice)
payload = {
'account': profiles.load_account_profile(info='url'),
'instrument': stocks.get_instruments_by_symbols(symbol, info='url')[0],
'symbol': symbol,
'quantity': quantity,
'ref_id': str(uuid4()),
'type': 'market',
'stop_price': stopPrice,
'time_in_force': timeInForce,
'trigger': 'stop',
'side': side,
'extended_hours': extendedHours
}
if side == "buy":
# price should be greater than stopPrice, adding a 5% threshold
payload['price'] = helper.round_price(stopPrice * 1.05)
if trailType == 'amount':
payload['trailing_peg'] = {'type': 'price', 'price': {'amount': trailAmount, 'currency_code': 'USD'}}
else:
payload['trailing_peg'] = {'type': 'percentage', 'percentage': str(percentage)}
url = urls.orders()
data = helper.request_post(url, payload, json=True, jsonify_data=jsonify)
return (data)
@helper.login_required
def order(symbol, quantity, side, limitPrice=None, stopPrice=None, timeInForce='gtc', extendedHours=False, jsonify=True):
"""A generic order function.
:param symbol: The stock ticker of the stock to sell.
:type symbol: str
:param quantity: The number of stocks to sell.
:type quantity: int
:param side: Either 'buy' or 'sell'
:type side: str
:param limitPrice: The price to trigger the market order.
:type limitPrice: float
:param stopPrice: The price to trigger the limit or market order.
:type stopPrice: float
:param timeInForce: Changes how long the order will be in effect for. 'gtc' = good until cancelled. \
'gfd' = good for the day.
:type timeInForce: str
:param extendedHours: Premium users only. Allows trading during extended hours. Should be true or false.
:type extendedHours: Optional[str]
:param jsonify: If set to False, function will return the request object which contains status code and headers.
:type jsonify: Optional[str]
:returns: Dictionary that contains information regarding the purchase or selling of stocks, \
such as the order id, the state of order (queued, confired, filled, failed, canceled, etc.), \
the price, and the quantity.
"""
try:
symbol = symbol.upper().strip()
except AttributeError as message:
print(message, file=helper.get_output())
return None
orderType = "market"
trigger = "immediate"
if side == "buy":
priceType = "ask_price"
else:
priceType = "bid_price"
if limitPrice and stopPrice:
price = helper.round_price(limitPrice)
stopPrice = helper.round_price(stopPrice)
orderType = "limit"
trigger = "stop"
elif limitPrice:
price = helper.round_price(limitPrice)
orderType = "limit"
elif stopPrice:
stopPrice = helper.round_price(stopPrice)
if side == "buy":
price = stopPrice
else:
price = None
trigger = "stop"
else:
price = helper.round_price(next(iter(stocks.get_latest_price(symbol, priceType, extendedHours)), 0.00))
payload = {
'account': profiles.load_account_profile(info='url'),
'instrument': stocks.get_instruments_by_symbols(symbol, info='url')[0],
'symbol': symbol,
'price': price,
'quantity': quantity,
'ref_id': str(uuid4()),
'type': orderType,
'stop_price': stopPrice,
'time_in_force': timeInForce,
'trigger': trigger,
'side': side,
'extended_hours': extendedHours
}
url = urls.orders()
data = helper.request_post(url, payload, jsonify_data=jsonify)
return(data)
@helper.login_required
def order_option_credit_spread(price, symbol, quantity, spread, timeInForce='gtc', jsonify=True):
"""Submits a limit order for an option credit spread.
:param price: The limit price to trigger a sell of the option.
:type price: float
:param symbol: The stock ticker of the stock to trade.
:type symbol: str
:param quantity: The number of options to sell.
:type quantity: int
:param spread: A dictionary of spread options with the following keys: \n
- expirationDate: The expiration date of the option in 'YYYY-MM-DD' format.\n
- strike: The strike price of the option.\n
- optionType: This should be 'call' or 'put'.\n
- effect: This should be 'open' or 'close'.\n
- action: This should be 'buy' or 'sell'.
:type spread: dict
:param timeInForce: Changes how long the order will be in effect for. \
'gtc' = good until cancelled. \
'gfd' = good for the day. 'ioc' = immediate or cancel. 'opg' = execute at opening.
:type timeInForce: Optional[str]
:param jsonify: If set to False, function will return the request object which contains status code and headers.
:type jsonify: Optional[str]
:returns: Dictionary that contains information regarding the trading of options, \
such as the order id, the state of order (queued, confired, filled, failed, canceled, etc.), \
the price, and the quantity.
"""
return(order_option_spread("credit", price, symbol, quantity, spread, timeInForce, jsonify))
@helper.login_required
def order_option_debit_spread(price, symbol, quantity, spread, timeInForce='gtc', jsonify=True):
"""Submits a limit order for an option debit spread.
:param price: The limit price to trigger a sell of the option.
:type price: float
:param symbol: The stock ticker of the stock to trade.
:type symbol: str
:param quantity: The number of options to sell.
:type quantity: int
:param spread: A dictionary of spread options with the following keys: \n
- expirationDate: The expiration date of the option in 'YYYY-MM-DD' format.\n
- strike: The strike price of the option.\n
- optionType: This should be 'call' or 'put'.\n
- effect: This should be 'open' or 'close'.\n
- action: This should be 'buy' or 'sell'.
:type spread: dict
:param timeInForce: Changes how long the order will be in effect for.
'gtc' = good until cancelled. \
'gfd' = good for the day. 'ioc' = immediate or cancel. 'opg' execute at opening.
:type timeInForce: Optional[str]
:param jsonify: If set to False, function will return the request object which contains status code and headers.
:type jsonify: Optional[str]
:returns: Dictionary that contains information regarding the trading of options, \
such as the order id, the state of order (queued, confired, filled, failed, canceled, etc.), \
the price, and the quantity.
"""
return(order_option_spread("debit", price, symbol, quantity, spread, timeInForce, jsonify))
@helper.login_required
def order_option_spread(direction, price, symbol, quantity, spread, timeInForce='gtc', jsonify=True):
"""Submits a limit order for an option spread. i.e. place a debit / credit spread
:param direction: Can be "credit" or "debit".
:type direction: str
:param price: The limit price to trigger a trade of the option.
:type price: float
:param symbol: The stock ticker of the stock to trade.
:type symbol: str
:param quantity: The number of options to trade.
:type quantity: int
:param spread: A dictionary of spread options with the following keys: \n
- expirationDate: The expiration date of the option in 'YYYY-MM-DD' format.\n
- strike: The strike price of the option.\n
- optionType: This should be 'call' or 'put'.\n
- effect: This should be 'open' or 'close'.\n
- action: This should be 'buy' or 'sell'.
:type spread: dict
:param timeInForce: Changes how long the order will be in effect for.
'gtc' = good until cancelled. \
'gfd' = good for the day. 'ioc' = immediate or cancel. 'opg' execute at opening.
:type timeInForce: Optional[str]
:param jsonify: If set to False, function will return the request object which contains status code and headers.
:type jsonify: Optional[str]
:returns: Dictionary that contains information regarding the trading of options, \
such as the order id, the state of order (queued, confired, filled, failed, canceled, etc.), \
the price, and the quantity.
"""
try:
symbol = symbol.upper().strip()
except AttributeError as message:
print(message, file=helper.get_output())
return None
legs = []
for each in spread:
optionID = helper.id_for_option(symbol,
each['expirationDate'],
each['strike'],
each['optionType'])
legs.append({'position_effect': each['effect'],
'side': each['action'],
'ratio_quantity': 1,
'option': urls.option_instruments(optionID)})
payload = {
'account': profiles.load_account_profile(info='url'),
'direction': direction,
'time_in_force': timeInForce,
'legs': legs,
'type': 'limit',
'trigger': 'immediate',
'price': price,
'quantity': quantity,
'override_day_trade_checks': False,
'override_dtbp_checks': False,
'ref_id': str(uuid4()),
}
url = urls.option_orders()
data = helper.request_post(url, payload, json=True, jsonify_data=jsonify)
return(data)
@helper.login_required
def order_buy_option_limit(positionEffect, creditOrDebit, price, symbol, quantity, expirationDate, strike, optionType='both', timeInForce='gtc', jsonify=True):
"""Submits a limit order for an option. i.e. place a long call or a long put.
:param positionEffect: Either 'open' for a buy to open effect or 'close' for a buy to close effect.
:type positionEffect: str
:param creditOrDebit: Either 'debit' or 'credit'.
:type creditOrDebit: str
:param price: The limit price to trigger a buy of the option.
:type price: float
:param symbol: The stock ticker of the stock to trade.
:type symbol: str
:param quantity: The number of options to buy.
:type quantity: int
:param expirationDate: The expiration date of the option in 'YYYY-MM-DD' format.
:type expirationDate: str
:param strike: The strike price of the option.
:type strike: float
:param optionType: This should be 'call' or 'put'
:type optionType: str
:param timeInForce: Changes how long the order will be in effect for. 'gtc' = good until cancelled. \
'gfd' = good for the day. 'ioc' = immediate or cancel. 'opg' execute at opening.
:type timeInForce: Optional[str]
:param jsonify: If set to False, function will return the request object which contains status code and headers.
:type jsonify: Optional[str]
:returns: Dictionary that contains information regarding the buying of options, \
such as the order id, the state of order (queued, confired, filled, failed, canceled, etc.), \
the price, and the quantity.
"""
try:
symbol = symbol.upper().strip()
except AttributeError as message:
print(message, file=helper.get_output())
return None
optionID = helper.id_for_option(symbol, expirationDate, strike, optionType)
payload = {
'account': profiles.load_account_profile(info='url'),
'direction': creditOrDebit,
'time_in_force': timeInForce,
'legs': [
{'position_effect': positionEffect, 'side': 'buy',
'ratio_quantity': 1, 'option': urls.option_instruments(optionID)},
],
'type': 'limit',
'trigger': 'immediate',
'price': price,
'quantity': quantity,
'override_day_trade_checks': False,
'override_dtbp_checks': False,
'ref_id': str(uuid4()),
}
url = urls.option_orders()
data = helper.request_post(url, payload, json=True, jsonify_data=jsonify)
return(data)
@helper.login_required
def order_buy_option_stop_limit(positionEffect, creditOrDebit, limitPrice, stopPrice, symbol, quantity, expirationDate, strike, optionType='both', timeInForce='gtc', jsonify=True):
"""Submits a stop order to be turned into a limit order once a certain stop price is reached.
:param positionEffect: Either 'open' for a buy to open effect or 'close' for a buy to close effect.
:type positionEffect: str
:param creditOrDebit: Either 'debit' or 'credit'.
:type creditOrDebit: str
:param limitPrice: The limit price to trigger a buy of the option.
:type limitPrice: float
:param stopPrice: The price to trigger the limit order.
:type stopPrice: float
:param symbol: The stock ticker of the stock to trade.
:type symbol: str
:param quantity: The number of options to buy.
:type quantity: int
:param expirationDate: The expiration date of the option in 'YYYY-MM-DD' format.
:type expirationDate: str
:param strike: The strike price of the option.
:type strike: float
:param optionType: This should be 'call' or 'put'
:type optionType: str
:param timeInForce: Changes how long the order will be in effect for. 'gtc' = good until cancelled. \
'gfd' = good for the day. 'ioc' = immediate or cancel. 'opg' execute at opening.
:type timeInForce: Optional[str]
:param jsonify: If set to False, function will return the request object which contains status code and headers.
:type jsonify: Optional[str]
:returns: Dictionary that contains information regarding the buying of options, \
such as the order id, the state of order (queued, confired, filled, failed, canceled, etc.), \
the price, and the quantity.
"""
try:
symbol = symbol.upper().strip()
except AttributeError as message:
print(message, file=helper.get_output())
return None
optionID = helper.id_for_option(symbol, expirationDate, strike, optionType)
payload = {
'account': profiles.load_account_profile(info='url'),
'direction': creditOrDebit,
'time_in_force': timeInForce,
'legs': [
{'position_effect': positionEffect, 'side': 'buy',
'ratio_quantity': 1, 'option': urls.option_instruments(optionID)},
],
'type': 'limit',
'trigger': 'stop',
'price': limitPrice,
'stop_price': stopPrice,
'quantity': quantity,
'override_day_trade_checks': False,
'override_dtbp_checks': False,
'ref_id': str(uuid4()),
}
url = urls.option_orders()
data = helper.request_post(url, payload, json=True, jsonify_data=jsonify)
return(data)
def order_sell_option_stop_limit(positionEffect, creditOrDebit, limitPrice, stopPrice, symbol, quantity, expirationDate, strike, optionType='both', timeInForce='gtc', jsonify=True):
"""Submits a stop order to be turned into a limit order once a certain stop price is reached.
:param positionEffect: Either 'open' for a buy to open effect or 'close' for a buy to close effect.
:type positionEffect: str
:param creditOrDebit: Either 'debit' or 'credit'.
:type creditOrDebit: str
:param limitPrice: The limit price to trigger a buy of the option.
:type limitPrice: float
:param stopPrice: The price to trigger the limit order.
:type stopPrice: float
:param symbol: The stock ticker of the stock to trade.
:type symbol: str
:param quantity: The number of options to buy.
:type quantity: int
:param expirationDate: The expiration date of the option in 'YYYY-MM-DD' format.
:type expirationDate: str
:param strike: The strike price of the option.
:type strike: float
:param optionType: This should be 'call' or 'put'
:type optionType: str
:param timeInForce: Changes how long the order will be in effect for. 'gtc' = good until cancelled. \
'gfd' = good for the day. 'ioc' = immediate or cancel. 'opg' execute at opening.
:type timeInForce: Optional[str]
:param jsonify: If set to False, function will return the request object which contains status code and headers.
:type jsonify: Optional[str]
:returns: Dictionary that contains information regarding the buying of options, \
such as the order id, the state of order (queued, confired, filled, failed, canceled, etc.), \
the price, and the quantity.
"""
try:
symbol = symbol.upper().strip()
except AttributeError as message:
print(message, file=helper.get_output())
return None
optionID = helper.id_for_option(symbol, expirationDate, strike, optionType)
payload = {
'account': profiles.load_account_profile(info='url'),
'direction': creditOrDebit,
'time_in_force': timeInForce,
'legs': [
{'position_effect': positionEffect, 'side': 'sell',
'ratio_quantity': 1, 'option': urls.option_instruments(optionID)},
],
'type': 'limit',
'trigger': 'stop',
'price': limitPrice,
'stop_price': stopPrice,
'quantity': quantity,
'override_day_trade_checks': False,
'override_dtbp_checks': False,
'ref_id': str(uuid4()),
}
url = urls.option_orders()
data = helper.request_post(url, payload, json=True, jsonify_data=jsonify)
return(data)
@helper.login_required
def order_sell_option_limit(positionEffect, creditOrDebit, price, symbol, quantity, expirationDate, strike, optionType='both', timeInForce='gtc', jsonify=True):
"""Submits a limit order for an option. i.e. place a short call or a short put.
:param positionEffect: Either 'open' for a sell to open effect or 'close' for a sell to close effect.
:type positionEffect: str
:param creditOrDebit: Either 'debit' or 'credit'.
:type creditOrDebit: str
:param price: The limit price to trigger a sell of the option.
:type price: float
:param symbol: The stock ticker of the stock to trade.
:type symbol: str
:param quantity: The number of options to sell.
:type quantity: int
:param expirationDate: The expiration date of the option in 'YYYY-MM-DD' format.
:type expirationDate: str
:param strike: The strike price of the option.
:type strike: float
:param optionType: This should be 'call' or 'put'
:type optionType: str
:param timeInForce: Changes how long the order will be in effect for. 'gtc' = good until cancelled. \
'gfd' = good for the day. 'ioc' = immediate or cancel. 'opg' execute at opening.
:type timeInForce: Optional[str]
:param jsonify: If set to False, function will return the request object which contains status code and headers.
:type jsonify: Optional[str]
:returns: Dictionary that contains information regarding the selling of options, \
such as the order id, the state of order (queued, confired, filled, failed, canceled, etc.), \
the price, and the quantity.
"""
try:
symbol = symbol.upper().strip()
except AttributeError as message:
print(message, file=helper.get_output())
return None
optionID = helper.id_for_option(symbol, expirationDate, strike, optionType)
payload = {
'account': profiles.load_account_profile(info='url'),
'direction': creditOrDebit,
'time_in_force': timeInForce,
'legs': [
{'position_effect': positionEffect, 'side': 'sell',
'ratio_quantity': 1, 'option': urls.option_instruments(optionID)},
],
'type': 'limit',
'trigger': 'immediate',
'price': price,
'quantity': quantity,
'override_day_trade_checks': False,
'override_dtbp_checks': False,
'ref_id': str(uuid4()),
}
url = urls.option_orders()
data = helper.request_post(url, payload, json=True, jsonify_data=jsonify)
return(data)
@helper.login_required
def order_buy_crypto_by_price(symbol, amountInDollars, timeInForce='gtc', jsonify=True):
"""Submits a market order for a crypto by specifying the amount in dollars that you want to trade.
Good for share fractions up to 8 decimal places.
:param symbol: The crypto ticker of the crypto to trade.
:type symbol: str
:param amountInDollars: The amount in dollars of the crypto you want to buy.
:type amountInDollars: float
:param timeInForce: Changes how long the order will be in effect for. 'gtc' = good until cancelled.
:type timeInForce: Optional[str]
:param jsonify: If set to False, function will return the request object which contains status code and headers.
:type jsonify: Optional[str]
:returns: Dictionary that contains information regarding the buying of crypto, \
such as the order id, the state of order (queued, confired, filled, failed, canceled, etc.), \
the price, and the quantity.
"""
return order_crypto(symbol, "buy", amountInDollars, "price", None, timeInForce, jsonify)
@helper.login_required
def order_buy_crypto_by_quantity(symbol, quantity, timeInForce='gtc', jsonify=True):
"""Submits a market order for a crypto by specifying the decimal amount of shares to buy.
Good for share fractions up to 8 decimal places.
:param symbol: The crypto ticker of the crypto to trade.
:type symbol: str
:param quantity: The decimal amount of shares to buy.
:type quantity: float
:param timeInForce: Changes how long the order will be in effect for. 'gtc' = good until cancelled.
:type timeInForce: Optional[str]
:param jsonify: If set to False, function will return the request object which contains status code and headers.
:type jsonify: Optional[str]
:returns: Dictionary that contains information regarding the buying of crypto, \
such as the order id, the state of order (queued, confired, filled, failed, canceled, etc.), \
the price, and the quantity.
"""
return order_crypto(symbol, "buy", quantity, "quantity", None, timeInForce, jsonify)
@helper.login_required
def order_buy_crypto_limit(symbol, quantity, limitPrice, timeInForce='gtc', jsonify=True):
"""Submits a limit order for a crypto by specifying the decimal amount of shares to buy.
Good for share fractions up to 8 decimal places.
:param symbol: The crypto ticker of the crypto to trade.
:type symbol: str
:param quantity: The decimal amount of shares to buy.
:type quantity: float
:param limitPrice: The limit price to set for the crypto.
:type limitPrice: float
:param timeInForce: Changes how long the order will be in effect for. 'gtc' = good until cancelled.
:type timeInForce: Optional[str]
:param jsonify: If set to False, function will return the request object which contains status code and headers.
:type jsonify: Optional[str]
:returns: Dictionary that contains information regarding the buying of crypto, \
such as the order id, the state of order (queued, confired, filled, failed, canceled, etc.), \
the price, and the quantity.
"""
return order_crypto(symbol, "buy", quantity, "quantity", limitPrice, timeInForce, jsonify)
@helper.login_required
def order_buy_crypto_limit_by_price(symbol, amountInDollars, limitPrice, timeInForce='gtc', jsonify=True):
"""Submits a limit order for a crypto by specifying the decimal price to buy.
Good for share fractions up to 8 decimal places.
:param symbol: The crypto ticker of the crypto to trade.
:type symbol: str
:param amountInDollars: The amount in dollars of the crypto you want to buy.
:type amountInDollars: float
:param limitPrice: The limit price to set for the crypto.
:type limitPrice: float
:param timeInForce: Changes how long the order will be in effect for. 'gtc' = good until cancelled.
:type timeInForce: Optional[str]
:param jsonify: If set to False, function will return the request object which contains status code and headers.
:type jsonify: Optional[str]
:returns: Dictionary that contains information regarding the buying of crypto, \
such as the order id, the state of order (queued, confired, filled, failed, canceled, etc.), \
the price, and the quantity.
"""
return order_crypto(symbol, "buy", amountInDollars, "price", limitPrice, timeInForce, jsonify)
@helper.login_required
def order_sell_crypto_by_price(symbol, amountInDollars, timeInForce='gtc', jsonify=True):
"""Submits a market order for a crypto by specifying the amount in dollars that you want to trade.
Good for share fractions up to 8 decimal places.
:param symbol: The crypto ticker of the crypto to trade.
:type symbol: str
:param amountInDollars: The amount in dollars of the crypto you want to sell.
:type amountInDollars: float
:param timeInForce: Changes how long the order will be in effect for. 'gtc' = good until cancelled.
:type timeInForce: Optional[str]
:param jsonify: If set to False, function will return the request object which contains status code and headers.
:type jsonify: Optional[str]
:returns: Dictionary that contains information regarding the selling of crypto, \
such as the order id, the state of order (queued, confired, filled, failed, canceled, etc.), \
the price, and the quantity.
"""
return order_crypto(symbol, "sell", amountInDollars, "price", None, timeInForce, jsonify)
@helper.login_required
def order_sell_crypto_by_quantity(symbol, quantity, timeInForce='gtc', jsonify=True):
"""Submits a market order for a crypto by specifying the decimal amount of shares to buy.
Good for share fractions up to 8 decimal places.
:param symbol: The crypto ticker of the crypto to trade.
:type symbol: str
:param quantity: The decimal amount of shares to sell.
:type quantity: float
:param timeInForce: Changes how long the order will be in effect for. 'gtc' = good until cancelled.
:type timeInForce: Optional[str]
:param jsonify: If set to False, function will return the request object which contains status code and headers.
:type jsonify: Optional[str]
:returns: Dictionary that contains information regarding the selling of crypto, \
such as the order id, the state of order (queued, confired, filled, failed, canceled, etc.), \
the price, and the quantity.
"""
return order_crypto(symbol, "sell", quantity, "quantity", None, timeInForce, jsonify)
@helper.login_required
def order_sell_crypto_limit(symbol, quantity, limitPrice, timeInForce='gtc', jsonify=True):
"""Submits a limit order for a crypto by specifying the decimal amount of shares to sell.
Good for share fractions up to 8 decimal places.
:param symbol: The crypto ticker of the crypto to trade.
:type symbol: str
:param quantity: The decimal amount of shares to sell.
:type quantity: float
:param limitPrice: The limit price to set for the crypto.
:type limitPrice: float
:param timeInForce: Changes how long the order will be in effect for. 'gtc' = good until cancelled.
:type timeInForce: Optional[str]
:param jsonify: If set to False, function will return the request object which contains status code and headers.
:type jsonify: Optional[str]
:returns: Dictionary that contains information regarding the selling of crypto, \
such as the order id, the state of order (queued, confired, filled, failed, canceled, etc.), \
the price, and the quantity.
"""
return order_crypto(symbol, "sell", quantity, "quantity", limitPrice, timeInForce, jsonify)
@helper.login_required
def order_sell_crypto_limit_by_price(symbol, amountInDollars, limitPrice, timeInForce='gtc', jsonify=True):
"""Submits a limit order for a crypto by specifying the decimal price to sell.
Good for share fractions up to 8 decimal places.
:param symbol: The crypto ticker of the crypto to trade.
:type symbol: str
:param amountInDollars: The amount in dollars of the crypto you want to sell.
:type amountInDollars: float
:param limitPrice: The limit price to set for the crypto.
:type limitPrice: float
:param timeInForce: Changes how long the order will be in effect for. 'gtc' = good until cancelled.
:type timeInForce: Optional[str]
:param jsonify: If set to False, function will return the request object which contains status code and headers.
:type jsonify: Optional[str]
:returns: Dictionary that contains information regarding the buying of crypto, \
such as the order id, the state of order (queued, confired, filled, failed, canceled, etc.), \
the price, and the quantity.
"""
return order_crypto(symbol, "sell", amountInDollars, "price", limitPrice, timeInForce, jsonify)
@helper.login_required
def order_crypto(symbol, side, quantityOrPrice, amountIn="quantity", limitPrice=None, timeInForce="gtc", jsonify=True):
"""Submits an order for a crypto.
:param symbol: The crypto ticker of the crypto to trade.
:type symbol: str
:param side: Either 'buy' or 'sell'
:type side: str
:param quantityOrPrice: Either the decimal price of shares to trade or the decimal quantity of shares.
:type quantityOrPrice: float
:param amountIn: If left default value of 'quantity', order will attempt to trade cryptos by the amount of crypto \
you want to trade. If changed to 'price', order will attempt to trade cryptos by the price you want to buy or sell.
:type amountIn: Optional[str]
:param limitPrice: The price to trigger the market order.
:type limitPrice: Optional[float]
:param timeInForce: Changes how long the order will be in effect for. 'gtc' = good until cancelled.
:type timeInForce: Optional[str]
:param jsonify: If set to False, function will return the request object which contains status code and headers.
:type jsonify: Optional[str]
:returns: Dictionary that contains information regarding the selling of crypto, \
such as the order id, the state of order (queued, confired, filled, failed, canceled, etc.), \
the price, and the quantity.
"""
try:
symbol = symbol.upper().strip()
except AttributeError as message:
print(message, file=helper.get_output())
return None
crypto_info = crypto.get_crypto_info(symbol)
if crypto_info['display_only']:
print("WARNING: The dictionary returned by crypto.get_crypto_info() for this crypto has key 'display_only' set to True. May not be able to trade this crypto.", file=helper.get_output())
orderType = "market"
if side == "buy":
priceType = "ask_price"
else:
priceType = "bid_price"
if limitPrice:
price = limitPrice
orderType = "limit"
else:
price = helper.round_price(crypto.get_crypto_quote_from_id(crypto_info['id'], info=priceType))
if amountIn == "quantity":
quantity = quantityOrPrice
else:
quantity = helper.round_price(quantityOrPrice/price)
payload = {
'account_id': crypto.load_crypto_profile(info="id"),
'currency_pair_id': crypto_info['id'],
'price': price,
'quantity': quantity,
'ref_id': str(uuid4()),
'side': side,
'time_in_force': timeInForce,
'type': orderType
}
url = urls.order_crypto()
data = helper.request_post(url, payload, json=True, jsonify_data=jsonify)
return(data) | /robin_stocks-3.0.6-py3-none-any.whl/robin_stocks/orders.py | 0.767211 | 0.443058 | orders.py | pypi |
import robin_stocks.helper as helper
import robin_stocks.urls as urls
@helper.login_required
def load_account_profile(info=None):
"""Gets the information associated with the accounts profile,including day
trading information and cash being held by Robinhood.
:param info: The name of the key whose value is to be returned from the function.
:type info: Optional[str]
:returns: The function returns a dictionary of key/value pairs. \
If a string is passed in to the info parameter, then the function will return \
a string corresponding to the value of the key whose name matches the info parameter.
:Dictionary Keys: * url
* portfolio_cash
* can_downgrade_to_cash
* user
* account_number
* type
* created_at
* updated_at
* deactivated
* deposit_halted
* only_position_closing_trades
* buying_power
* cash_available_for_withdrawal
* cash
* cash_held_for_orders
* uncleared_deposits
* sma
* sma_held_for_orders
* unsettled_funds
* unsettled_debit
* crypto_buying_power
* max_ach_early_access_amount
* cash_balances
* margin_balances
* sweep_enabled
* instant_eligibility
* option_level
* is_pinnacle_account
* rhs_account_number
* state
* active_subscription_id
* locked
* permanently_deactivated
* received_ach_debit_locked
* drip_enabled
* eligible_for_fractionals
* eligible_for_drip
* eligible_for_cash_management
* cash_management_enabled
* option_trading_on_expiration_enabled
* cash_held_for_options_collateral
* fractional_position_closing_only
* user_id
* rhs_stock_loan_consent_status
"""
url = urls.account_profile()
data = helper.request_get(url, 'indexzero')
return(helper.filter_data(data, info))
@helper.login_required
def load_basic_profile(info=None):
"""Gets the information associated with the personal profile,
such as phone number, city, marital status, and date of birth.
:param info: The name of the key whose value is to be returned from the function.
:type info: Optional[str]
:returns: The function returns a dictionary of key/value pairs. If a string \
is passed in to the info parameter, then the function will return a string \
corresponding to the value of the key whose name matches the info parameter.
:Dictionary Keys: * user
* address
* city
* state
* zipcode
* phone_number
* marital_status
* date_of_birth
* citizenship
* country_of_residence
* number_dependents
* signup_as_rhs
* tax_id_ssn
* updated_at
"""
url = urls.basic_profile()
data = helper.request_get(url)
return(helper.filter_data(data, info))
@helper.login_required
def load_investment_profile(info=None):
"""Gets the information associated with the investment profile.
These are the answers to the questionaire you filled out when you made your profile.
:param info: The name of the key whose value is to be returned from the function.
:type info: Optional[str]
:returns: The function returns a dictionary of key/value pairs. \
If a string is passed in to the info parameter, then the function will return \
a string corresponding to the value of the key whose name matches the info parameter.
:Dictionary Keys: * user
* total_net_worth
* annual_income
* source_of_funds
* investment_objective
* investment_experience
* liquid_net_worth
* risk_tolerance
* tax_bracket
* time_horizon
* liquidity_needs
* investment_experience_collected
* suitability_verified
* option_trading_experience
* professional_trader
* understand_option_spreads
* interested_in_options
* updated_at
"""
url = urls.investment_profile()
data = helper.request_get(url)
return(helper.filter_data(data, info))
@helper.login_required
def load_portfolio_profile(info=None):
"""Gets the information associated with the portfolios profile,
such as withdrawable amount, market value of account, and excess margin.
:param info: The name of the key whose value is to be returned from the function.
:type info: Optional[str]
:returns: The function returns a dictionary of key/value pairs. \
If a string is passed in to the info parameter, then the function will return \
a string corresponding to the value of the key whose name matches the info parameter.
:Dictionary Keys: * url
* account
* start_date
* market_value
* equity
* extended_hours_market_value
* extended_hours_equity
* extended_hours_portfolio_equity
* last_core_market_value
* last_core_equity
* last_core_portfolio_equity
* excess_margin
* excess_maintenance
* excess_margin_with_uncleared_deposits
* excess_maintenance_with_uncleared_deposits
* equity_previous_close
* portfolio_equity_previous_close
* adjusted_equity_previous_close
* adjusted_portfolio_equity_previous_close
* withdrawable_amount
* unwithdrawable_deposits
* unwithdrawable_grants
"""
url = urls.portfolio_profile()
data = helper.request_get(url, 'indexzero')
return(helper.filter_data(data, info))
@helper.login_required
def load_security_profile(info=None):
"""Gets the information associated with the security profile.
:param info: The name of the key whose value is to be returned from the function.
:type info: Optional[str]
:returns: The function returns a dictionary of key/value pairs. \
If a string is passed in to the info parameter, then the function will return \
a string corresponding to the value of the key whose name matches the info parameter.
:Dictionary Keys: * user
* object_to_disclosure
* sweep_consent
* control_person
* control_person_security_symbol
* security_affiliated_employee
* security_affiliated_firm_relationship
* security_affiliated_firm_name
* security_affiliated_person_name
* security_affiliated_address
* security_affiliated_address_subject
* security_affiliated_requires_duplicates
* stock_loan_consent_status
* agreed_to_rhs
* agreed_to_rhs_margin
* rhs_stock_loan_consent_status
* updated_at
"""
url = urls.security_profile()
data = helper.request_get(url)
return(helper.filter_data(data, info))
@helper.login_required
def load_user_profile(info=None):
"""Gets the information associated with the user profile,
such as username, email, and links to the urls for other profiles.
:param info: The name of the key whose value is to be returned from the function.
:type info: Optional[str]
:returns: The function returns a dictionary of key/value pairs. \
If a string is passed in to the info parameter, then the function will return \
a string corresponding to the value of the key whose name matches the info parameter.
:Dictionary Keys: * url
* id
* id_info
* username
* email
* email_verified
* first_name
* last_name
* origin
* profile_name
* created_at
"""
url = urls.user_profile()
data = helper.request_get(url)
return(helper.filter_data(data, info)) | /robin_stocks-3.0.6-py3-none-any.whl/robin_stocks/profiles.py | 0.813164 | 0.324904 | profiles.py | pypi |
import sys
import robin_stocks.helper as helper
import robin_stocks.urls as urls
def spinning_cursor():
""" This is a generator function to yield a character. """
while True:
for cursor in '|/-\\':
yield cursor
spinner = spinning_cursor()
def write_spinner():
""" Function to create a spinning cursor to tell user that the code is working on getting market data. """
if helper.get_output()==sys.stdout:
marketString = 'Loading Market Data '
sys.stdout.write(marketString)
sys.stdout.write(next(spinner))
sys.stdout.flush()
sys.stdout.write('\b'*(len(marketString)+1))
@helper.login_required
def get_aggregate_positions(info=None):
"""Collapses all option orders for a stock into a single dictionary.
:param info: Will filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a list of dictionaries of key/value pairs for each order. If info parameter is provided, \
a list of strings is returned where the strings are the value of the key that matches info.
"""
url = urls.aggregate()
data = helper.request_get(url, 'pagination')
return(helper.filter_data(data, info))
@helper.login_required
def get_market_options(info=None):
"""Returns a list of all options.
:param info: Will filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a list of dictionaries of key/value pairs for each option. If info parameter is provided, \
a list of strings is returned where the strings are the value of the key that matches info.
"""
url = urls.option_orders()
data = helper.request_get(url, 'pagination')
return(helper.filter_data(data, info))
@helper.login_required
def get_all_option_positions(info=None):
"""Returns all option positions ever held for the account.
:param info: Will filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a list of dictionaries of key/value pairs for each option. If info parameter is provided, \
a list of strings is returned where the strings are the value of the key that matches info.
"""
url = urls.option_positions()
data = helper.request_get(url, 'pagination')
return(helper.filter_data(data, info))
@helper.login_required
def get_open_option_positions(info=None):
"""Returns all open option positions for the account.
:param info: Will filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a list of dictionaries of key/value pairs for each option. If info parameter is provided, \
a list of strings is returned where the strings are the value of the key that matches info.
"""
url = urls.option_positions()
payload = {'nonzero': 'True'}
data = helper.request_get(url, 'pagination', payload)
return(helper.filter_data(data, info))
def get_chains(symbol, info=None):
"""Returns the chain information of an option.
:param symbol: The ticker of the stock.
:type symbol: str
:param info: Will filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a dictionary of key/value pairs for the option. If info parameter is provided, \
a list of strings is returned where the strings are the value of the key that matches info.
"""
try:
symbol = symbol.upper().strip()
except AttributeError as message:
print(message, file=helper.get_output())
return None
url = urls.chains(symbol)
data = helper.request_get(url)
return(helper.filter_data(data, info))
@helper.login_required
def find_tradable_options(symbol, expirationDate=None, strikePrice=None, optionType=None, info=None):
"""Returns a list of all available options for a stock.
:param symbol: The ticker of the stock.
:type symbol: str
:param expirationDate: Represents the expiration date in the format YYYY-MM-DD.
:type expirationDate: str
:param strikePrice: Represents the strike price of the option.
:type strikePrice: str
:param optionType: Can be either 'call' or 'put' or left blank to get both.
:type optionType: Optional[str]
:param info: Will filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a list of dictionaries of key/value pairs for all calls of the stock. If info parameter is provided, \
a list of strings is returned where the strings are the value of the key that matches info.
"""
try:
symbol = symbol.upper().strip()
except AttributeError as message:
print(message, file=helper.get_output())
return [None]
url = urls.option_instruments()
if not helper.id_for_chain(symbol):
print("Symbol {} is not valid for finding options.".format(symbol), file=helper.get_output())
return [None]
payload = {'chain_id': helper.id_for_chain(symbol),
'chain_symbol': symbol,
'state': 'active'}
if expirationDate:
payload['expiration_dates'] = expirationDate
if strikePrice:
payload['strike_price'] = strikePrice
if optionType:
payload['type'] = optionType
data = helper.request_get(url, 'pagination', payload)
return(helper.filter_data(data, info))
@helper.login_required
def find_options_by_expiration(inputSymbols, expirationDate, optionType=None, info=None):
"""Returns a list of all the option orders that match the seach parameters
:param inputSymbols: The ticker of either a single stock or a list of stocks.
:type inputSymbols: str
:param expirationDate: Represents the expiration date in the format YYYY-MM-DD.
:type expirationDate: str
:param optionType: Can be either 'call' or 'put' or leave blank to get both.
:type optionType: Optional[str]
:param info: Will filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a list of dictionaries of key/value pairs for all options of the stock that match the search parameters. \
If info parameter is provided, a list of strings is returned where the strings are the value of the key that matches info.
"""
try:
symbols = helper.inputs_to_set(inputSymbols)
if optionType:
optionType = optionType.lower().strip()
except AttributeError as message:
print(message, file=helper.get_output())
return [None]
data = []
for symbol in symbols:
allOptions = find_tradable_options(symbol, expirationDate, None, optionType, None)
filteredOptions = [item for item in allOptions if item.get("expiration_date") == expirationDate]
for item in filteredOptions:
marketData = get_option_market_data_by_id(item['id'])
if marketData:
item.update(marketData[0])
write_spinner()
data.extend(filteredOptions)
return(helper.filter_data(data, info))
@helper.login_required
def find_options_by_strike(inputSymbols, strikePrice, optionType=None, info=None):
"""Returns a list of all the option orders that match the seach parameters
:param inputSymbols: The ticker of either a single stock or a list of stocks.
:type inputSymbols: str
:param strikePrice: Represents the strike price to filter for.
:type strikePrice: str
:param optionType: Can be either 'call' or 'put' or leave blank to get both.
:type optionType: Optional[str]
:param info: Will filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a list of dictionaries of key/value pairs for all options of the stock that match the search parameters. \
If info parameter is provided, a list of strings is returned where the strings are the value of the key that matches info.
"""
try:
symbols = helper.inputs_to_set(inputSymbols)
if optionType:
optionType = optionType.lower().strip()
except AttributeError as message:
print(message, file=helper.get_output())
return [None]
data = []
for symbol in symbols:
filteredOptions = find_tradable_options(symbol, None, strikePrice, optionType, None)
for item in filteredOptions:
marketData = get_option_market_data_by_id(item['id'])
if marketData:
item.update(marketData[0])
write_spinner()
data.extend(filteredOptions)
return(helper.filter_data(data, info))
@helper.login_required
def find_options_by_expiration_and_strike(inputSymbols, expirationDate, strikePrice, optionType=None, info=None):
"""Returns a list of all the option orders that match the seach parameters
:param inputSymbols: The ticker of either a single stock or a list of stocks.
:type inputSymbols: str
:param expirationDate: Represents the expiration date in the format YYYY-MM-DD.
:type expirationDate: str
:param strikePrice: Represents the strike price to filter for.
:type strikePrice: str
:param optionType: Can be either 'call' or 'put' or leave blank to get both.
:type optionType: Optional[str]
:param info: Will filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a list of dictionaries of key/value pairs for all options of the stock that match the search parameters. \
If info parameter is provided, a list of strings is returned where the strings are the value of the key that matches info.
"""
try:
symbols = helper.inputs_to_set(inputSymbols)
if optionType:
optionType = optionType.lower().strip()
except AttributeError as message:
print(message, file=helper.get_output())
return [None]
data = []
for symbol in symbols:
allOptions = find_tradable_options(symbol, expirationDate, strikePrice, optionType, None)
filteredOptions = [item for item in allOptions if item.get("expiration_date") == expirationDate]
for item in filteredOptions:
marketData = get_option_market_data_by_id(item['id'])
if marketData:
item.update(marketData[0])
write_spinner()
data.extend(filteredOptions)
return helper.filter_data(data, info)
@helper.login_required
def find_options_by_specific_profitability(inputSymbols, expirationDate=None, strikePrice=None, optionType=None, typeProfit="chance_of_profit_short", profitFloor=0.0, profitCeiling=1.0, info=None):
"""Returns a list of option market data for several stock tickers that match a range of profitability.
:param inputSymbols: May be a single stock ticker or a list of stock tickers.
:type inputSymbols: str or list
:param expirationDate: Represents the expiration date in the format YYYY-MM-DD. Leave as None to get all available dates.
:type expirationDate: str
:param strikePrice: Represents the price of the option. Leave as None to get all available strike prices.
:type strikePrice: str
:param optionType: Can be either 'call' or 'put' or leave blank to get both.
:type optionType: Optional[str]
:param typeProfit: Will either be "chance_of_profit_short" or "chance_of_profit_long".
:type typeProfit: str
:param profitFloor: The lower percentage on scale 0 to 1.
:type profitFloor: int
:param profitCeiling: The higher percentage on scale 0 to 1.
:type profitCeiling: int
:param info: Will filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a list of dictionaries of key/value pairs for all stock option market data. \
If info parameter is provided, a list of strings is returned where the strings are the value of the key that matches info.
"""
symbols = helper.inputs_to_set(inputSymbols)
data = []
if (typeProfit != "chance_of_profit_short" and typeProfit != "chance_of_profit_long"):
print("Invalid string for 'typeProfit'. Defaulting to 'chance_of_profit_short'.", file=helper.get_output())
typeProfit = "chance_of_profit_short"
for symbol in symbols:
tempData = find_tradable_options(symbol, expirationDate, strikePrice, optionType, info=None)
for option in tempData:
if expirationDate and option.get("expiration_date") != expirationDate:
continue
market_data = get_option_market_data_by_id(option['id'])
if len(market_data):
option.update(market_data[0])
write_spinner()
try:
floatValue = float(option[typeProfit])
if (floatValue >= profitFloor and floatValue <= profitCeiling):
data.append(option)
except:
pass
return(helper.filter_data(data, info))
@helper.login_required
def get_option_market_data_by_id(id, info=None):
"""Returns the option market data for a stock, including the greeks,
open interest, change of profit, and adjusted mark price.
:param id: The id of the stock.
:type id: str
:param info: Will filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a dictionary of key/value pairs for the stock. \
If info parameter is provided, the value of the key that matches info is extracted.
"""
instrument = get_option_instrument_data_by_id(id)
url = urls.marketdata_options()
payload = {
"instruments" : instrument['url']
}
data = helper.request_get(url, 'results', payload)
if not data:
data= {
'adjusted_mark_price':'',
'ask_price':'',
'ask_size':'',
'bid_price':'',
'bid_size':'',
'break_even_price':'',
'high_price':'',
'instrument':'',
'last_trade_price':'',
'last_trade_size':'',
'low_price':'',
'mark_price':'',
'open_interest':'',
'previous_close_date':'',
'previous_close_price':'',
'volume':'',
'chance_of_profit_long':'',
'chance_of_profit_short':'',
'delta':'',
'gamma':'',
'implied_volatility':'',
'rho':'',
'theta':'',
'vega':'',
'high_fill_rate_buy_price':'',
'high_fill_rate_sell_price':'',
'low_fill_rate_buy_price':'',
'low_fill_rate_sell_price':''
}
return(helper.filter_data(data, info))
@helper.login_required
def get_option_market_data(inputSymbols, expirationDate, strikePrice, optionType, info=None):
"""Returns the option market data for the stock option, including the greeks,
open interest, change of profit, and adjusted mark price.
:param inputSymbols: The ticker of the stock.
:type inputSymbols: str
:param expirationDate: Represents the expiration date in the format YYYY-MM-DD.
:type expirationDate: str
:param strikePrice: Represents the price of the option.
:type strikePrice: str
:param optionType: Can be either 'call' or 'put'.
:type optionType: str
:param info: Will filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a dictionary of key/value pairs for the stock. \
If info parameter is provided, the value of the key that matches info is extracted.
"""
try:
symbols = helper.inputs_to_set(inputSymbols)
if optionType:
optionType = optionType.lower().strip()
except AttributeError as message:
print(message, file=helper.get_output())
return [None]
data = []
for symbol in symbols:
optionID = helper.id_for_option(symbol, expirationDate, strikePrice, optionType)
marketData = get_option_market_data_by_id(optionID)
data.append(marketData)
return(helper.filter_data(data, info))
def get_option_instrument_data_by_id(id, info=None):
"""Returns the option instrument information.
:param id: The id of the stock.
:type id: str
:param info: Will filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a dictionary of key/value pairs for the stock. \
If info parameter is provided, the value of the key that matches info is extracted.
"""
url = urls.option_instruments(id)
data = helper.request_get(url)
return(helper.filter_data(data, info))
def get_option_instrument_data(symbol, expirationDate, strikePrice, optionType, info=None):
"""Returns the option instrument data for the stock option.
:param symbol: The ticker of the stock.
:type symbol: str
:param expirationDate: Represents the expiration date in the format YYYY-MM-DD.
:type expirationDate: str
:param strikePrice: Represents the price of the option.
:type strikePrice: str
:param optionType: Can be either 'call' or 'put'.
:type optionType: str
:param info: Will filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a dictionary of key/value pairs for the stock. \
If info parameter is provided, the value of the key that matches info is extracted.
"""
try:
symbol = symbol.upper().strip()
optionType = optionType.lower().strip()
except AttributeError as message:
print(message, file=helper.get_output())
return [None]
optionID = helper.id_for_option(symbol, expirationDate, strikePrice, optionType)
url = urls.option_instruments(optionID)
data = helper.request_get(url)
return(helper.filter_data(data, info))
def get_option_historicals(symbol, expirationDate, strikePrice, optionType, interval='hour', span='week', bounds='regular', info=None):
"""Returns the data that is used to make the graphs.
:param symbol: The ticker of the stock.
:type symbol: str
:param expirationDate: Represents the expiration date in the format YYYY-MM-DD.
:type expirationDate: str
:param strikePrice: Represents the price of the option.
:type strikePrice: str
:param optionType: Can be either 'call' or 'put'.
:type optionType: str
:param interval: Interval to retrieve data for. Values are '5minute', '10minute', 'hour', 'day', 'week'. Default is 'hour'.
:type interval: Optional[str]
:param span: Sets the range of the data to be either 'day', 'week', 'year', or '5year'. Default is 'week'.
:type span: Optional[str]
:param bounds: Represents if graph will include extended trading hours or just regular trading hours. Values are 'regular', 'trading', and 'extended'. \
regular hours are 6 hours long, trading hours are 9 hours long, and extended hours are 16 hours long. Default is 'regular'
:type bounds: Optional[str]
:param info: Will filter the results to have a list of the values that correspond to key that matches info.
:type info: Optional[str]
:returns: Returns a list that contains a list for each symbol. \
Each list contains a dictionary where each dictionary is for a different time.
"""
try:
symbol = symbol.upper().strip()
optionType = optionType.lower().strip()
except AttributeError as message:
print(message, file=helper.get_output())
return [None]
interval_check = ['5minute', '10minute', 'hour', 'day', 'week']
span_check = ['day', 'week', 'year', '5year']
bounds_check = ['extended', 'regular', 'trading']
if interval not in interval_check:
print(
'ERROR: Interval must be "5minute","10minute","hour","day",or "week"', file=helper.get_output())
return([None])
if span not in span_check:
print('ERROR: Span must be "day", "week", "year", or "5year"', file=helper.get_output())
return([None])
if bounds not in bounds_check:
print('ERROR: Bounds must be "extended","regular",or "trading"', file=helper.get_output())
return([None])
optionID = helper.id_for_option(symbol, expirationDate, strikePrice, optionType)
url = urls.option_historicals(optionID)
payload = {'span': span,
'interval': interval,
'bounds': bounds}
data = helper.request_get(url, 'regular', payload)
if (data == None or data == [None]):
return data
histData = []
for subitem in data['data_points']:
subitem['symbol'] = symbol
histData.append(subitem)
return(helper.filter_data(histData, info)) | /robin_stocks-3.0.6-py3-none-any.whl/robin_stocks/options.py | 0.708818 | 0.378947 | options.py | pypi |
from robin_stocks.robinhood.helper import *
from robin_stocks.robinhood.urls import *
from robin_stocks.robinhood.stocks import *
def get_top_movers_sp500(direction, info=None):
"""Returns a list of the top S&P500 movers up or down for the day.
:param direction: The direction of movement either 'up' or 'down'
:type direction: str
:param info: Will filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a list of dictionaries of key/value pairs for each mover. If info parameter is provided, \
a list of strings is returned where the strings are the value of the key that matches info.
:Dictionary Keys: * instrument_url
* symbol
* updated_at
* price_movement
* description
"""
try:
direction = direction.lower().strip()
except AttributeError as message:
print(message, file=get_output())
return None
if (direction != 'up' and direction != 'down'):
print('Error: direction must be "up" or "down"', file=get_output())
return([None])
url = movers_sp500_url()
payload = {'direction': direction}
data = request_get(url, 'pagination', payload)
return(filter_data(data, info))
def get_top_100(info=None):
"""Returns a list of the Top 100 stocks on Robin Hood.
:param info: Will filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a list of dictionaries of key/value pairs for each mover. If info parameter is provided, \
a list of strings is returned where the strings are the value of the key that matches info.
:Dictionary Keys: * ask_price
* ask_size
* bid_price
* bid_size
* last_trade_price
* last_extended_hours_trade_price
* previous_close
* adjusted_previous_close
* previous_close_date
* symbol
* trading_halted
* has_traded
* last_trade_price_source
* updated_at
* instrument
"""
url = get_100_most_popular_url()
data = request_get(url, 'regular')
data = filter_data(data, 'instruments')
symbols = [get_symbol_by_url(x) for x in data]
data = get_quotes(symbols)
return(filter_data(data, info))
def get_top_movers(info=None):
"""Returns a list of the Top 20 movers on Robin Hood.
:param info: Will filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a list of dictionaries of key/value pairs for each mover. If info parameter is provided, \
a list of strings is returned where the strings are the value of the key that matches info.
:Dictionary Keys: * ask_price
* ask_size
* bid_price
* bid_size
* last_trade_price
* last_extended_hours_trade_price
* previous_close
* adjusted_previous_close
* previous_close_date
* symbol
* trading_halted
* has_traded
* last_trade_price_source
* updated_at
* instrument
"""
url = movers_top_url()
data = request_get(url, 'regular')
data = filter_data(data, 'instruments')
symbols = [get_symbol_by_url(x) for x in data]
data = get_quotes(symbols)
return(filter_data(data, info))
def get_all_stocks_from_market_tag(tag, info=None):
"""Returns all the stock quote information that matches a tag category.
:param tag: The category to filter for. Examples include 'biopharmaceutical', 'upcoming-earnings', 'most-popular-under-25', and 'technology'.
:type tag: str
:param info: Will filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a list of dictionaries of key/value pairs for each mover. If info parameter is provided, \
a list of strings is returned where the strings are the value of the key that matches info.
:Dictionary Keys: * ask_price
* ask_size
* bid_price
* bid_size
* last_trade_price
* last_extended_hours_trade_price
* previous_close
* adjusted_previous_close
* previous_close_date
* symbol
* trading_halted
* has_traded
* last_trade_price_source
* updated_at
* instrument
"""
url = market_category_url(tag)
data = request_get(url, 'regular')
data = filter_data(data, 'instruments')
if not data:
print('ERROR: "{}" is not a valid tag'.format(tag), file=get_output())
return [None]
symbols = [get_symbol_by_url(x) for x in data]
data = get_quotes(symbols)
return(filter_data(data, info))
def get_markets(info=None):
"""Returns a list of available markets.
:param info: Will filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a list of dictionaries of key/value pairs for each market. If info parameter is provided, \
a list of strings is returned where the strings are the value of the key that matches info.
:Dictionary Keys: * url
* todays_hours
* mic
* operating_mic
* acronym
* name
* city
* country
* timezone
* website
"""
url = markets_url()
data = request_get(url, 'pagination')
return(filter_data(data, info))
def get_market_today_hours(market, info=None):
"""Returns the opening and closing hours of a specific market for today. Also will tell you if market is
market is open on that date.
:param market: The 'mic' value for the market. Can be found using get_markets().
:type market: str
:param info: Will filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a dictionary of key/value pairs for the specific market. If info parameter is provided, \
the string value for the corresponding key will be provided.
:Dictionary Keys: * date
* is_open
* opens_at
* closes_at
* extended_opens_at
* extended_closes_at
* previous_open_hours
* next_open_hours
"""
markets = get_markets()
result = next((x for x in markets if x['mic'] == market), None)
if not result:
raise Exception('Not a valid market name. Check get_markets() for a list of market information.')
url = result['todays_hours']
data = request_get(url, 'regular')
return(filter_data(data, info))
def get_market_next_open_hours(market, info=None):
"""Returns the opening and closing hours for the next open trading day after today. Also will tell you if market is
market is open on that date.
:param market: The 'mic' value for the market. Can be found using get_markets().
:type market: str
:param info: Will filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a dictionary of key/value pairs for the specific market. If info parameter is provided, \
the string value for the corresponding key will be provided.
:Dictionary Keys: * date
* is_open
* opens_at
* closes_at
* extended_opens_at
* extended_closes_at
* previous_open_hours
* next_open_hours
"""
url = get_market_today_hours(market, info='next_open_hours')
data = request_get(url, 'regular')
return(filter_data(data, info))
def get_market_next_open_hours_after_date(market, date, info=None):
"""Returns the opening and closing hours for the next open trading day after a date that is specified. Also will tell you if market is
market is open on that date.
:param market: The 'mic' value for the market. Can be found using get_markets().
:type market: str
:param date: The date you want to find the next available trading day after. format is YYYY-MM-DD.
:type date: str
:param info: Will filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a dictionary of key/value pairs for the specific market. If info parameter is provided, \
the string value for the corresponding key will be provided.
:Dictionary Keys: * date
* is_open
* opens_at
* closes_at
* extended_opens_at
* extended_closes_at
* previous_open_hours
* next_open_hours
"""
url = get_market_hours(market, date, info='next_open_hours')
data = request_get(url, 'regular')
return(filter_data(data, info))
def get_market_hours(market, date, info=None):
"""Returns the opening and closing hours of a specific market on a specific date. Also will tell you if market is
market is open on that date. Can be used with past or future dates.
:param market: The 'mic' value for the market. Can be found using get_markets().
:type market: str
:param date: The date you want to get information for. format is YYYY-MM-DD.
:type date: str
:param info: Will filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a dictionary of key/value pairs for the specific market. If info parameter is provided, \
the string value for the corresponding key will be provided.
:Dictionary Keys: * date
* is_open
* opens_at
* closes_at
* extended_opens_at
* extended_closes_at
* previous_open_hours
* next_open_hours
"""
url = market_hours_url(market, date)
data = request_get(url, 'regular')
return(filter_data(data, info))
def get_currency_pairs(info=None):
"""Returns currency pairs
:param info: Will filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a list of dictionaries of key/value pairs for each currency pair. If info parameter is provided, \
a list of strings is returned where the strings are the value of the key that matches info.
:Dictionary Keys: * asset_currency
* display_only
* id
* max_order_size
* min_order_size
* min_order_price_increment
* min_order_quantity_increment
* name
* quote_currency
* symbol
* tradability
"""
url = currency_url()
data = request_get(url, 'results')
return(filter_data(data, info)) | /robin_stocks-3.0.6-py3-none-any.whl/robin_stocks/robinhood/markets.py | 0.897443 | 0.335569 | markets.py | pypi |
from robin_stocks.robinhood.helper import id_for_chain, id_for_stock
# Login
def login_url():
return('https://api.robinhood.com/oauth2/token/')
def challenge_url(challenge_id):
return('https://api.robinhood.com/challenge/{0}/respond/'.format(challenge_id))
# Profiles
def account_profile_url(account_number=None):
if account_number:
return('https://api.robinhood.com/accounts/'+account_number)
else:
return('https://api.robinhood.com/accounts/')
def basic_profile_url():
return('https://api.robinhood.com/user/basic_info/')
def investment_profile_url():
return('https://api.robinhood.com/user/investment_profile/')
def portfolio_profile_url():
return('https://api.robinhood.com/portfolios/')
def security_profile_url():
return('https://api.robinhood.com/user/additional_info/')
def user_profile_url():
return('https://api.robinhood.com/user/')
def portfolis_historicals_url(account_number):
return('https://api.robinhood.com/portfolios/historicals/{0}/'.format(account_number))
# Stocks
def earnings_url():
return('https://api.robinhood.com/marketdata/earnings/')
def events_url():
return('https://api.robinhood.com/options/events/')
def fundamentals_url():
return('https://api.robinhood.com/fundamentals/')
def historicals_url():
return('https://api.robinhood.com/quotes/historicals/')
def instruments_url():
return('https://api.robinhood.com/instruments/')
def news_url(symbol):
return('https://api.robinhood.com/midlands/news/{0}/?'.format(symbol))
def popularity_url(symbol):
return('https://api.robinhood.com/instruments/{0}/popularity/'.format(id_for_stock(symbol)))
def quotes_url():
return('https://api.robinhood.com/quotes/')
def ratings_url(symbol):
return('https://api.robinhood.com/midlands/ratings/{0}/'.format(id_for_stock(symbol)))
def splits_url(symbol):
return('https://api.robinhood.com/instruments/{0}/splits/'.format(id_for_stock(symbol)))
# account
def phoenix_url():
return('https://phoenix.robinhood.com/accounts/unified')
def positions_url(account_number=None):
if account_number:
return('https://api.robinhood.com/positions/?account_number='+account_number)
else:
return('https://api.robinhood.com/positions/')
def banktransfers_url(direction=None):
if direction == 'received':
return('https://api.robinhood.com/ach/received/transfers/')
else:
return('https://api.robinhood.com/ach/transfers/')
def cardtransactions_url():
return('https://minerva.robinhood.com/history/transactions/')
def daytrades_url(account):
return('https://api.robinhood.com/accounts/{0}/recent_day_trades/'.format(account))
def dividends_url():
return('https://api.robinhood.com/dividends/')
def documents_url():
return('https://api.robinhood.com/documents/')
def withdrawl_url(bank_id):
return("https://api.robinhood.com/ach/relationships/{}/".format(bank_id))
def linked_url(id=None, unlink=False):
if unlink:
return('https://api.robinhood.com/ach/relationships/{0}/unlink/'.format(id))
if id:
return('https://api.robinhood.com/ach/relationships/{0}/'.format(id))
else:
return('https://api.robinhood.com/ach/relationships/')
def margin_url():
return('https://api.robinhood.com/margin/calls/')
def margininterest_url():
return('https://api.robinhood.com/cash_journal/margin_interest_charges/')
def notifications_url(tracker=False):
if tracker:
return('https://api.robinhood.com/midlands/notifications/notification_tracker/')
else:
return('https://api.robinhood.com/notifications/devices/')
def referral_url():
return('https://api.robinhood.com/midlands/referral/')
def stockloan_url():
return('https://api.robinhood.com/stock_loan/payments/')
def subscription_url():
return('https://api.robinhood.com/subscription/subscription_fees/')
def wiretransfers_url():
return('https://api.robinhood.com/wire/transfers')
def watchlists_url(name=None, add=False):
if name:
return('https://api.robinhood.com/midlands/lists/items/')
else:
return('https://api.robinhood.com/midlands/lists/default/')
# markets
def currency_url():
return('https://nummus.robinhood.com/currency_pairs/')
def markets_url():
return('https://api.robinhood.com/markets/')
def market_hours_url(market, date):
return('https://api.robinhood.com/markets/{}/hours/{}/'.format(market, date))
def movers_sp500_url():
return('https://api.robinhood.com/midlands/movers/sp500/')
def get_100_most_popular_url():
return('https://api.robinhood.com/midlands/tags/tag/100-most-popular/')
def movers_top_url():
return('https://api.robinhood.com/midlands/tags/tag/top-movers/')
def market_category_url(category):
return('https://api.robinhood.com/midlands/tags/tag/{}/'.format(category))
# options
def aggregate_url():
return('https://api.robinhood.com/options/aggregate_positions/')
def chains_url(symbol):
return('https://api.robinhood.com/options/chains/{0}/'.format(id_for_chain(symbol)))
def option_historicals_url(id):
return('https://api.robinhood.com/marketdata/options/historicals/{0}/'.format(id))
def option_instruments_url(id=None):
if id:
return('https://api.robinhood.com/options/instruments/{0}/'.format(id))
else:
return('https://api.robinhood.com/options/instruments/')
def option_orders_url(orderID=None, account_number=None):
url = 'https://api.robinhood.com/options/orders/'
if orderID:
url += '{0}/'.format(orderID)
if account_number:
url += ('?account_numbers='+account_number)
return url
def option_positions_url(account_number):
if account_number:
return('https://api.robinhood.com/options/positions/?account_numbers='+account_number)
else:
return('https://api.robinhood.com/options/positions/')
def marketdata_options_url():
return('https://api.robinhood.com/marketdata/options/')
# pricebook
def marketdata_quotes_url(id):
return ('https://api.robinhood.com/marketdata/quotes/{0}/'.format(id))
def marketdata_pricebook_url(id):
return ('https://api.robinhood.com/marketdata/pricebook/snapshots/{0}/'.format(id))
# crypto
def order_crypto_url():
return('https://nummus.robinhood.com/orders/')
def crypto_account_url():
return('https://nummus.robinhood.com/accounts/')
def crypto_currency_pairs_url():
return('https://nummus.robinhood.com/currency_pairs/')
def crypto_quote_url(id):
return('https://api.robinhood.com/marketdata/forex/quotes/{0}/'.format(id))
def crypto_holdings_url():
return('https://nummus.robinhood.com/holdings/')
def crypto_historical_url(id):
return('https://api.robinhood.com/marketdata/forex/historicals/{0}/'.format(id))
def crypto_orders_url(orderID=None):
if orderID:
return('https://nummus.robinhood.com/orders/{0}/'.format(orderID))
else:
return('https://nummus.robinhood.com/orders/')
def crypto_cancel_url(id):
return('https://nummus.robinhood.com/orders/{0}/cancel/'.format(id))
# orders
def cancel_url(url):
return('https://api.robinhood.com/orders/{0}/cancel/'.format(url))
def option_cancel_url(id):
return('https://api.robinhood.com/options/orders/{0}/cancel/'.format(id))
def orders_url(orderID=None, account_number=None):
url = 'https://api.robinhood.com/orders/'
if orderID:
url += '{0}/'.format(orderID)
if account_number:
url += ('?account_numbers='+account_number)
return url | /robin_stocks-3.0.6-py3-none-any.whl/robin_stocks/robinhood/urls.py | 0.455925 | 0.168788 | urls.py | pypi |
from functools import wraps
import requests
from robin_stocks.robinhood.globals import LOGGED_IN, OUTPUT, SESSION
def set_login_state(logged_in):
"""Sets the login state"""
global LOGGED_IN
LOGGED_IN = logged_in
def set_output(output):
"""Sets the global output stream"""
global OUTPUT
OUTPUT = output
def get_output():
"""Gets the current global output stream"""
global OUTPUT
return OUTPUT
def login_required(func):
"""A decorator for indicating which methods require the user to be logged
in."""
@wraps(func)
def login_wrapper(*args, **kwargs):
global LOGGED_IN
if not LOGGED_IN:
raise Exception('{} can only be called when logged in'.format(
func.__name__))
return(func(*args, **kwargs))
return(login_wrapper)
def convert_none_to_string(func):
"""A decorator for converting a None Type into a blank string"""
@wraps(func)
def string_wrapper(*args, **kwargs):
result = func(*args, **kwargs)
if result:
return(result)
else:
return("")
return(string_wrapper)
def id_for_stock(symbol):
"""Takes a stock ticker and returns the instrument id associated with the stock.
:param symbol: The symbol to get the id for.
:type symbol: str
:returns: A string that represents the stocks instrument id.
"""
try:
symbol = symbol.upper().strip()
except AttributeError as message:
print(message, file=get_output())
return(None)
url = 'https://api.robinhood.com/instruments/'
payload = {'symbol': symbol}
data = request_get(url, 'indexzero', payload)
return(filter_data(data, 'id'))
def id_for_chain(symbol):
"""Takes a stock ticker and returns the chain id associated with a stocks option.
:param symbol: The symbol to get the id for.
:type symbol: str
:returns: A string that represents the stocks options chain id.
"""
try:
symbol = symbol.upper().strip()
except AttributeError as message:
print(message, file=get_output())
return(None)
url = 'https://api.robinhood.com/instruments/'
payload = {'symbol': symbol}
data = request_get(url, 'indexzero', payload)
if data:
return(data['tradable_chain_id'])
else:
return(data)
def id_for_group(symbol):
"""Takes a stock ticker and returns the id associated with the group.
:param symbol: The symbol to get the id for.
:type symbol: str
:returns: A string that represents the stocks group id.
"""
try:
symbol = symbol.upper().strip()
except AttributeError as message:
print(message, file=get_output())
return(None)
url = 'https://api.robinhood.com/options/chains/{0}/'.format(
id_for_chain(symbol))
data = request_get(url)
return(data['underlying_instruments'][0]['id'])
def id_for_option(symbol, expirationDate, strike, optionType):
"""Returns the id associated with a specific option order.
:param symbol: The symbol to get the id for.
:type symbol: str
:param expirationData: The expiration date as YYYY-MM-DD
:type expirationData: str
:param strike: The strike price.
:type strike: str
:param optionType: Either call or put.
:type optionType: str
:returns: A string that represents the stocks option id.
"""
symbol = symbol.upper()
chain_id = id_for_chain(symbol)
payload = {
'chain_id': chain_id,
'expiration_dates': expirationDate,
'strike_price': strike,
'type': optionType,
'state': 'active'
}
url = 'https://api.robinhood.com/options/instruments/'
data = request_get(url, 'pagination', payload)
listOfOptions = [item for item in data if item["expiration_date"] == expirationDate]
if (len(listOfOptions) == 0):
print('Getting the option ID failed. Perhaps the expiration date is wrong format, or the strike price is wrong.', file=get_output())
return(None)
return(listOfOptions[0]['id'])
def round_price(price):
"""Takes a price and rounds it to an appropriate decimal place that Robinhood will accept.
:param price: The input price to round.
:type price: float or int
:returns: The rounded price as a float.
"""
price = float(price)
if price <= 1e-2:
returnPrice = round(price, 6)
elif price < 1e0:
returnPrice = round(price, 4)
else:
returnPrice = round(price, 2)
return returnPrice
def filter_data(data, info):
"""Takes the data and extracts the value for the keyword that matches info.
:param data: The data returned by request_get.
:type data: dict or list
:param info: The keyword to filter from the data.
:type info: str
:returns: A list or string with the values that correspond to the info keyword.
"""
if (data == None):
return(data)
elif (data == [None]):
return([])
elif (type(data) == list):
if (len(data) == 0):
return([])
compareDict = data[0]
noneType = []
elif (type(data) == dict):
compareDict = data
noneType = None
if info is not None:
if info in compareDict and type(data) == list:
return([x[info] for x in data])
elif info in compareDict and type(data) == dict:
return(data[info])
else:
print(error_argument_not_key_in_dictionary(info), file=get_output())
return(noneType)
else:
return(data)
def inputs_to_set(inputSymbols):
"""Takes in the parameters passed to *args and puts them in a set and a list.
The set will make sure there are no duplicates, and then the list will keep
the original order of the input.
:param inputSymbols: A list, dict, or tuple of stock tickers.
:type inputSymbols: list or dict or tuple or str
:returns: A list of strings that have been capitalized and stripped of white space.
"""
symbols_list = []
symbols_set = set()
def add_symbol(symbol):
symbol = symbol.upper().strip()
if symbol not in symbols_set:
symbols_set.add(symbol)
symbols_list.append(symbol)
if type(inputSymbols) is str:
add_symbol(inputSymbols)
elif type(inputSymbols) is list or type(inputSymbols) is tuple or type(inputSymbols) is set:
inputSymbols = [comp for comp in inputSymbols if type(comp) is str]
for item in inputSymbols:
add_symbol(item)
return(symbols_list)
def request_document(url, payload=None):
"""Using a document url, makes a get request and returnes the session data.
:param url: The url to send a get request to.
:type url: str
:returns: Returns the session.get() data as opppose to session.get().json() data.
"""
try:
res = SESSION.get(url, params=payload)
res.raise_for_status()
except requests.exceptions.HTTPError as message:
print(message, file=get_output())
return(None)
return(res)
def request_get(url, dataType='regular', payload=None, jsonify_data=True):
"""For a given url and payload, makes a get request and returns the data.
:param url: The url to send a get request to.
:type url: str
:param dataType: Determines how to filter the data. 'regular' returns the unfiltered data. \
'results' will return data['results']. 'pagination' will return data['results'] and append it with any \
data that is in data['next']. 'indexzero' will return data['results'][0].
:type dataType: Optional[str]
:param payload: Dictionary of parameters to pass to the url. Will append the requests url as url/?key1=value1&key2=value2.
:type payload: Optional[dict]
:param jsonify_data: If this is true, will return requests.post().json(), otherwise will return response from requests.post().
:type jsonify_data: bool
:returns: Returns the data from the get request. If jsonify_data=True and requests returns an http code other than <200> \
then either '[None]' or 'None' will be returned based on what the dataType parameter was set as.
"""
if (dataType == 'results' or dataType == 'pagination'):
data = [None]
else:
data = None
res = None
if jsonify_data:
try:
res = SESSION.get(url, params=payload)
res.raise_for_status()
data = res.json()
except (requests.exceptions.HTTPError, AttributeError) as message:
print(message, file=get_output())
return(data)
else:
res = SESSION.get(url, params=payload)
return(res)
# Only continue to filter data if jsonify_data=True, and Session.get returned status code <200>.
if (dataType == 'results'):
try:
data = data['results']
except KeyError as message:
print("{0} is not a key in the dictionary".format(message), file=get_output())
return([None])
elif (dataType == 'pagination'):
counter = 2
nextData = data
try:
data = data['results']
except KeyError as message:
print("{0} is not a key in the dictionary".format(message), file=get_output())
return([None])
if nextData['next']:
print('Found Additional pages.', file=get_output())
while nextData['next']:
try:
res = SESSION.get(nextData['next'])
res.raise_for_status()
nextData = res.json()
except:
print('Additional pages exist but could not be loaded.', file=get_output())
return(data)
print('Loading page '+str(counter)+' ...', file=get_output())
counter += 1
for item in nextData['results']:
data.append(item)
elif (dataType == 'indexzero'):
try:
data = data['results'][0]
except KeyError as message:
print("{0} is not a key in the dictionary".format(message), file=get_output())
return(None)
except IndexError as message:
return(None)
return(data)
def request_post(url, payload=None, timeout=16, json=False, jsonify_data=True):
"""For a given url and payload, makes a post request and returns the response. Allows for responses other than 200.
:param url: The url to send a post request to.
:type url: str
:param payload: Dictionary of parameters to pass to the url as url/?key1=value1&key2=value2.
:type payload: Optional[dict]
:param timeout: The time for the post to wait for a response. Should be slightly greater than multiples of 3.
:type timeout: Optional[int]
:param json: This will set the 'content-type' parameter of the session header to 'application/json'
:type json: bool
:param jsonify_data: If this is true, will return requests.post().json(), otherwise will return response from requests.post().
:type jsonify_data: bool
:returns: Returns the data from the post request.
"""
data = None
res = None
try:
if json:
update_session('Content-Type', 'application/json')
res = SESSION.post(url, json=payload, timeout=timeout)
update_session(
'Content-Type', 'application/x-www-form-urlencoded; charset=utf-8')
else:
res = SESSION.post(url, data=payload, timeout=timeout)
if res.status_code not in [200, 201, 202, 204, 301, 302, 303, 304, 307, 400, 401, 402, 403]:
raise Exception("Received "+ str(res.status_code))
data = res.json()
except Exception as message:
print("Error in request_post: {0}".format(message), file=get_output())
if jsonify_data:
return(data)
else:
return(res)
def request_delete(url):
"""For a given url and payload, makes a delete request and returns the response.
:param url: The url to send a delete request to.
:type url: str
:returns: Returns the data from the delete request.
"""
try:
res = SESSION.delete(url)
res.raise_for_status()
data = res
except Exception as message:
data = None
print("Error in request_delete: {0}".format(message), file=get_output())
return(data)
def update_session(key, value):
"""Updates the session header used by the requests library.
:param key: The key value to update or add to session header.
:type key: str
:param value: The value that corresponds to the key.
:type value: str
:returns: None. Updates the session header with a value.
"""
SESSION.headers[key] = value
def error_argument_not_key_in_dictionary(keyword):
return('Error: The keyword "{0}" is not a key in the dictionary.'.format(keyword))
def error_ticker_does_not_exist(ticker):
return('Warning: "{0}" is not a valid stock ticker. It is being ignored'.format(ticker))
def error_must_be_nonzero(keyword):
return('Error: The input parameter "{0}" must be an integer larger than zero and non-negative'.format(keyword)) | /robin_stocks-3.0.6-py3-none-any.whl/robin_stocks/robinhood/helper.py | 0.732783 | 0.342737 | helper.py | pypi |
from robin_stocks.robinhood.helper import *
from robin_stocks.robinhood.urls import *
@login_required
def load_crypto_profile(info=None):
"""Gets the information associated with the crypto account.
:param info: The name of the key whose value is to be returned from the function.
:type info: Optional[str]
:returns: [dict] The function returns a dictionary of key/value pairs. \
If a string is passed in to the info parameter, then the function will return \
a string corresponding to the value of the key whose name matches the info parameter.
:Dictionary Keys: * apex_account_number
* created_at
* id
* rhs_account_number
* status
* status_reason_code
* updated_at
* user_id
"""
url = crypto_account_url()
data = request_get(url, 'indexzero')
return(filter_data(data, info))
@login_required
def get_crypto_positions(info=None):
"""Returns crypto positions for the account.
:param info: Will filter the results to get a specific value.
:type info: Optional[str]
:returns: [list] Returns a list of dictionaries of key/value pairs for each option. If info parameter is provided, \
a list of strings is returned where the strings are the value of the key that matches info.
:Dictionary Keys: * account_id
* cost_basis
* created_at
* currency
* id
* quantity
* quantity_available
* quantity_held_for_buy
* quantity_held_for_sell
* updated_at
"""
url = crypto_holdings_url()
data = request_get(url, 'pagination')
return(filter_data(data, info))
def get_crypto_currency_pairs(info=None):
"""Gets a list of all the cypto currencies that you can trade.
:param info: Will filter the results to have a list of the values that correspond to key that matches info.
:type info: Optional[str]
:returns: [list] If info parameter is left as None then the list will contain a dictionary of key/value pairs for each ticker. \
Otherwise, it will be a list of strings where the strings are the values of the key that corresponds to info.
:Dictionary Keys: * asset_currency
* display_only
* id
* max_order_size
* min_order_size
* min_order_price_increment
* min_order_quantity_increment
* name
* quote_currency
* symbol
* tradability
"""
url = crypto_currency_pairs_url()
data = request_get(url, 'results')
return(filter_data(data, info))
def get_crypto_info(symbol, info=None):
"""Gets information about a crpyto currency.
:param symbol: The crypto ticker.
:type symbol: str
:param info: Will filter the results to have a list of the values that correspond to key that matches info.
:type info: Optional[str]
:returns: [dict] If info parameter is left as None then will return a dictionary of key/value pairs for each ticker. \
Otherwise, it will be a strings representing the value of the key.
:Dictionary Keys: * asset_currency
* display_only
* id
* max_order_size
* min_order_size
* min_order_price_increment
* min_order_quantity_increment
* name
* quote_currency
* symbol
* tradability
"""
url = crypto_currency_pairs_url()
data = request_get(url, 'results')
data = [x for x in data if x['asset_currency']['code'] == symbol]
if len(data) > 0:
data = data[0]
else:
data = None
return(filter_data(data, info))
SYMBOL_TO_ID_CACHE = {}
def get_crypto_id(symbol):
"""Gets the Robinhood ID of the given cryptocurrency used to make trades.
This function uses an in-memory cache of the IDs to save a network round-trip when possible.
:param symbol: The crypto ticker.
:type symbol: str
:returns: [str] The symbol's Robinhood ID.
"""
if symbol in SYMBOL_TO_ID_CACHE:
return SYMBOL_TO_ID_CACHE[symbol]
id = get_crypto_info(symbol, 'id')
if id:
SYMBOL_TO_ID_CACHE[symbol] = id
return id
@login_required
def get_crypto_quote(symbol, info=None):
"""Gets information about a crypto including low price, high price, and open price
:param symbol: The crypto ticker.
:type symbol: str
:param info: Will filter the results to have a list of the values that correspond to key that matches info.
:type info: Optional[str]
:returns: [dict] If info parameter is left as None then the list will contain a dictionary of key/value pairs for each ticker. \
Otherwise, it will be a list of strings where the strings are the values of the key that corresponds to info.
:Dictionary Keys: * ask_price
* bid_price
* high_price
* id
* low_price
* mark_price
* open_price
* symbol
* volume
"""
id = get_crypto_info(symbol, info='id')
url = crypto_quote_url(id)
data = request_get(url)
return(filter_data(data, info))
@login_required
def get_crypto_quote_from_id(id, info=None):
"""Gets information about a crypto including low price, high price, and open price. Uses the id instead of crypto ticker.
:param id: The id of a crypto.
:type id: str
:param info: Will filter the results to have a list of the values that correspond to key that matches info.
:type info: Optional[str]
:returns: [dict] If info parameter is left as None then the list will contain a dictionary of key/value pairs for each ticker. \
Otherwise, it will be a list of strings where the strings are the values of the key that corresponds to info.
:Dictionary Keys: * ask_price
* bid_price
* high_price
* id
* low_price
* mark_price
* open_price
* symbol
* volume
"""
url = crypto_quote_url(id)
data = request_get(url)
return(filter_data(data, info))
@login_required
def get_crypto_historicals(symbol, interval='hour', span='week', bounds='24_7', info=None):
"""Gets historical information about a crypto including open price, close price, high price, and low price.
:param symbol: The crypto ticker.
:type symbol: str
:param interval: The time between data points. Can be '15second', '5minute', '10minute', 'hour', 'day', or 'week'. Default is 'hour'.
:type interval: str
:param span: The entire time frame to collect data points. Can be 'hour', 'day', 'week', 'month', '3month', 'year', or '5year'. Default is 'week'
:type span: str
:param bound: The times of day to collect data points. 'Regular' is 6 hours a day, 'trading' is 9 hours a day, \
'extended' is 16 hours a day, '24_7' is 24 hours a day. Default is '24_7'
:type bound: str
:param info: Will filter the results to have a list of the values that correspond to key that matches info.
:type info: Optional[str]
:returns: [list] If info parameter is left as None then the list will contain a dictionary of key/value pairs for each ticker. \
Otherwise, it will be a list of strings where the strings are the values of the key that corresponds to info.
:Dictionary Keys: * begins_at
* open_price
* close_price
* high_price
* low_price
* volume
* session
* interpolated
* symbol
"""
interval_check = ['15second', '5minute', '10minute', 'hour', 'day', 'week']
span_check = ['hour', 'day', 'week', 'month', '3month', 'year', '5year']
bounds_check = ['24_7', 'extended', 'regular', 'trading']
if interval not in interval_check:
print(
'ERROR: Interval must be "15second","5minute","10minute","hour","day",or "week"', file=get_output())
return([None])
if span not in span_check:
print('ERROR: Span must be "hour","day","week","month","3month","year",or "5year"', file=get_output())
return([None])
if bounds not in bounds_check:
print('ERROR: Bounds must be "24_7","extended","regular",or "trading"', file=get_output())
return([None])
if (bounds == 'extended' or bounds == 'trading') and span != 'day':
print('ERROR: extended and trading bounds can only be used with a span of "day"', file=get_output())
return([None])
symbol = inputs_to_set(symbol)
id = get_crypto_info(symbol[0], info='id')
url = crypto_historical_url(id)
payload = {'interval': interval,
'span': span,
'bounds': bounds}
data = request_get(url, 'regular', payload)
histData = []
cryptoSymbol = data['symbol']
for subitem in data['data_points']:
subitem['symbol'] = cryptoSymbol
histData.append(subitem)
return(filter_data(histData, info)) | /robin_stocks-3.0.6-py3-none-any.whl/robin_stocks/robinhood/crypto.py | 0.861974 | 0.432603 | crypto.py | pypi |
from functools import lru_cache as cache
from robin_stocks.robinhood.helper import *
from robin_stocks.robinhood.urls import *
def get_quotes(inputSymbols, info=None):
"""Takes any number of stock tickers and returns information pertaining to its price.
:param inputSymbols: May be a single stock ticker or a list of stock tickers.
:type inputSymbols: str or list
:param info: Will filter the results to have a list of the values that correspond to key that matches info.
:type info: Optional[str]
:returns: [list] If info parameter is left as None then the list will contain a dictionary of key/value pairs for each ticker. \
Otherwise, it will be a list of strings where the strings are the values of the key that corresponds to info.
:Dictionary Keys: * ask_price
* ask_size
* bid_price
* bid_size
* last_trade_price
* last_extended_hours_trade_price
* previous_close
* adjusted_previous_close
* previous_close_date
* symbol
* trading_halted
* has_traded
* last_trade_price_source
* updated_at
* instrument
"""
symbols = inputs_to_set(inputSymbols)
url = quotes_url()
payload = {'symbols': ','.join(symbols)}
data = request_get(url, 'results', payload)
if (data == None or data == [None]):
return data
for count, item in enumerate(data):
if item is None:
print(error_ticker_does_not_exist(symbols[count]), file=get_output())
data = [item for item in data if item is not None]
return(filter_data(data, info))
def get_fundamentals(inputSymbols, info=None):
"""Takes any number of stock tickers and returns fundamental information
about the stock such as what sector it is in, a description of the company, dividend yield, and market cap.
:param inputSymbols: May be a single stock ticker or a list of stock tickers.
:type inputSymbols: str or list
:param info: Will filter the results to have a list of the values that correspond to key that matches info.
:type info: Optional[str]
:returns: [list] If info parameter is left as None then the list will contain a dictionary of key/value pairs for each ticker. \
Otherwise, it will be a list of strings where the strings are the values of the key that corresponds to info.
:Dictionary Keys: * open
* high
* low
* volume
* average_volume_2_weeks
* average_volume
* high_52_weeks
* dividend_yield
* float
* low_52_weeks
* market_cap
* pb_ratio
* pe_ratio
* shares_outstanding
* description
* instrument
* ceo
* headquarters_city
* headquarters_state
* sector
* industry
* num_employees
* year_founded
* symbol
"""
symbols = inputs_to_set(inputSymbols)
url = fundamentals_url()
payload = {'symbols': ','.join(symbols)}
data = request_get(url, 'results', payload)
if (data == None or data == [None]):
return data
for count, item in enumerate(data):
if item is None:
print(error_ticker_does_not_exist(symbols[count]), file=get_output())
else:
item['symbol'] = symbols[count]
data = [item for item in data if item is not None]
return(filter_data(data, info))
def get_instruments_by_symbols(inputSymbols, info=None):
"""Takes any number of stock tickers and returns information held by the market
such as ticker name, bloomberg id, and listing date.
:param inputSymbols: May be a single stock ticker or a list of stock tickers.
:type inputSymbols: str or list
:param info: Will filter the results to have a list of the values that correspond to key that matches info.
:type info: Optional[str]
:returns: [list] If info parameter is left as None then the list will a dictionary of key/value pairs for each ticker. \
Otherwise, it will be a list of strings where the strings are the values of the key that corresponds to info.
:Dictionary Keys: * id
* url
* quote
* fundamentals
* splits
* state
* market
* simple_name
* name
* tradeable
* tradability
* symbol
* bloomberg_unique
* margin_initial_ratio
* maintenance_ratio
* country
* day_trade_ratio
* list_date
* min_tick_size
* type
* tradable_chain_id
* rhs_tradability
* fractional_tradability
* default_collar_fraction
"""
symbols = inputs_to_set(inputSymbols)
url = instruments_url()
data = []
for item in symbols:
payload = {'symbol': item}
itemData = request_get(url, 'indexzero', payload)
if itemData:
data.append(itemData)
else:
print(error_ticker_does_not_exist(item), file=get_output())
return(filter_data(data, info))
def get_instrument_by_url(url, info=None):
"""Takes a single url for the stock. Should be located at ``https://api.robinhood.com/instruments/<id>`` where <id> is the
id of the stock.
:param url: The url of the stock. Can be found in several locations including \
in the dictionary returned from get_instruments_by_symbols(inputSymbols,info=None)
:type url: str
:param info: Will filter the results to have a list of the values that correspond to key that matches info.
:type info: Optional[str]
:returns: [dict or str] If info parameter is left as None then will return a dictionary of key/value pairs for a specific url. \
Otherwise, it will be the string value of the key that corresponds to info.
:Dictionary Keys: * id
* url
* quote
* fundamentals
* splits
* state
* market
* simple_name
* name
* tradeable
* tradability
* symbol
* bloomberg_unique
* margin_initial_ratio
* maintenance_ratio
* country
* day_trade_ratio
* list_date
* min_tick_size
* type
* tradable_chain_id
* rhs_tradability
* fractional_tradability
* default_collar_fraction
"""
data = request_get(url, 'regular')
return(filter_data(data, info))
def get_latest_price(inputSymbols, priceType=None, includeExtendedHours=True):
"""Takes any number of stock tickers and returns the latest price of each one as a string.
:param inputSymbols: May be a single stock ticker or a list of stock tickers.
:type inputSymbols: str or list
:param priceType: Can either be 'ask_price' or 'bid_price'. If this parameter is set, then includeExtendedHours is ignored.
:type priceType: str
:param includeExtendedHours: Leave as True if you want to get extendedhours price if available. \
False if you only want regular hours price, even after hours.
:type includeExtendedHours: bool
:returns: [list] A list of prices as strings.
"""
symbols = inputs_to_set(inputSymbols)
quote = get_quotes(symbols)
prices = []
for item in quote:
if item:
if priceType == 'ask_price':
prices.append(item['ask_price'])
elif priceType == 'bid_price':
prices.append(item['bid_price'])
else:
if priceType:
print('WARNING: priceType should be "ask_price" or "bid_price". You entered "{0}"'.format(priceType), file=get_output())
if item['last_extended_hours_trade_price'] is None or not includeExtendedHours:
prices.append(item['last_trade_price'])
else:
prices.append(item['last_extended_hours_trade_price'])
else:
prices.append(None)
return(prices)
@cache
@convert_none_to_string
def get_name_by_symbol(symbol):
"""Returns the name of a stock from the stock ticker.
:param symbol: The ticker of the stock as a string.
:type symbol: str
:returns: [str] Returns the simple name of the stock. If the simple name does not exist then returns the full name.
"""
try:
symbol = symbol.upper().strip()
except AttributeError as message:
print(message, file=get_output())
return None
url = instruments_url()
payload = {'symbol': symbol}
data = request_get(url, 'indexzero', payload)
if not data:
return(None)
# If stock doesn't have a simple name attribute then get the full name.
filter = filter_data(data, info='simple_name')
if not filter or filter == "":
filter = filter_data(data, info='name')
return(filter)
@cache
@convert_none_to_string
def get_name_by_url(url):
"""Returns the name of a stock from the instrument url. Should be located at ``https://api.robinhood.com/instruments/<id>``
where <id> is the id of the stock.
:param url: The url of the stock as a string.
:type url: str
:returns: [str] Returns the simple name of the stock. If the simple name does not exist then returns the full name.
"""
data = request_get(url)
if not data:
return(None)
# If stock doesn't have a simple name attribute then get the full name.
filter = filter_data(data, info='simple_name')
if not filter or filter == "":
filter = filter_data(data, info='name')
return(filter)
@cache
@convert_none_to_string
def get_symbol_by_url(url):
"""Returns the symbol of a stock from the instrument url. Should be located at ``https://api.robinhood.com/instruments/<id>``
where <id> is the id of the stock.
:param url: The url of the stock as a string.
:type url: str
:returns: [str] Returns the ticker symbol of the stock.
"""
data = request_get(url)
return filter_data(data, info='symbol')
@convert_none_to_string
def get_ratings(symbol, info=None):
"""Returns the ratings for a stock, including the number of buy, hold, and sell ratings.
:param symbol: The stock ticker.
:type symbol: str
:param info: Will filter the results to contain a dictionary of values that correspond to the key that matches info. \
Possible values are summary, ratings, and instrument_id
:type info: Optional[str]
:returns: [dict] If info parameter is left as None then the list will contain a dictionary of key/value pairs for each ticker. \
Otherwise, it will contain the values that correspond to the keyword that matches info.
:Dictionary Keys: * summary - value is a dictionary
* ratings - value is a list of dictionaries
* instrument_id - value is a string
* ratings_published_at - value is a string
"""
try:
symbol = symbol.upper().strip()
except AttributeError as message:
print(message, file=get_output())
return None
url = ratings_url(symbol)
data = request_get(url)
if not data:
return(data)
if (len(data['ratings']) == 0):
return(data)
else:
for item in data['ratings']:
oldText = item['text']
item['text'] = oldText.encode('UTF-8')
return(filter_data(data, info))
def get_events(symbol, info=None):
"""Returns the events related to a stock that the user owns. For example, if you owned options for USO and that stock \
underwent a stock split resulting in you owning shares of newly created USO1, then that event will be returned when calling \
get_events('uso1')
:param symbol: The stock ticker.
:type symbol: str
:param info: Will filter the results to get a specific value.
:type info: Optional[str]
:returns: [list] If the info parameter is provided, then the function will extract the value of the key \
that matches the info parameter. Otherwise, the whole dictionary is returned.
:Dictionary Keys: * account
* cash_component
* chain_id
* created_at
* direction
* equity_components
* event_date
* id
* option
* position
* quantity
* state
* total_cash_amount
* type
* underlying_price
* updated_at
"""
try:
symbol = symbol.upper().strip()
except AttributeError as message:
print(message, file=get_output())
return None
payload = {'equity_instrument_id': id_for_stock(symbol)}
url = events_url()
data = request_get(url, 'results', payload)
return(filter_data(data, info))
def get_earnings(symbol, info=None):
"""Returns the earnings for the different financial quarters.
:param symbol: The stock ticker.
:type symbol: str
:param info: Will filter the results to get a specific value.
:type info: Optional[str]
:returns: [list] Returns a list of dictionaries. If info parameter is provided, \
a list of strings is returned where the strings are the value \
of the key that matches info.
:Dictionary Keys: * symbol
* instrument
* year
* quarter
* eps
* report
* call
"""
try:
symbol = symbol.upper().strip()
except AttributeError as message:
print(message, file=get_output())
return None
url = earnings_url()
payload = {'symbol': symbol}
data = request_get(url, 'results', payload)
return(filter_data(data, info))
def get_news(symbol, info=None):
"""Returns news stories for a stock.
:param symbol: The stock ticker.
:type symbol: str
:param info: Will filter the results to get a specific value.
:type info: Optional[str]
:returns: [list] Returns a list of dictionaries. If info parameter is provided, \
a list of strings is returned where the strings are the value \
of the key that matches info.
:Dictionary Keys: * api_source
* author
* num_clicks
* preview_image_url
* published_at
* relay_url
* source
* summary
* title
* updated_at
* url
* uuid
* related_instruments
* preview_text
* currency_id
"""
try:
symbol = symbol.upper().strip()
except AttributeError as message:
print(message, file=get_output())
return None
url = news_url(symbol)
data = request_get(url, 'results')
return(filter_data(data, info))
def get_splits(symbol, info=None):
"""Returns the date, divisor, and multiplier for when a stock split occureed.
:param symbol: The stock ticker.
:type symbol: str
:param info: Will filter the results to get a specific value. Possible options are \
url, instrument, execution_date, divsor, and multiplier.
:type info: Optional[str]
:returns: [list] Returns a list of dictionaries. If info parameter is provided, \
a list of strings is returned where the strings are the value \
of the key that matches info.
:Dictionary Keys: * url
* instrument
* execution_date
* multiplier
* divisor
"""
try:
symbol = symbol.upper().strip()
except AttributeError as message:
print(message, file=get_output())
return None
url = splits_url(symbol)
data = request_get(url, 'results')
return(filter_data(data, info))
def find_instrument_data(query):
"""Will search for stocks that contain the query keyword and return the instrument data.
:param query: The keyword to search for.
:type query: str
:returns: [list] Returns a list of dictionaries that contain the instrument data for each stock that matches the query.
:Dictionary Keys: * id
* url
* quote
* fundamentals
* splits
* state
* market
* simple_name
* name
* tradeable
* tradability
* symbol
* bloomberg_unique
* margin_initial_ratio
* maintenance_ratio
* country
* day_trade_ratio
* list_date
* min_tick_size
* type
* tradable_chain_id
* rhs_tradability
* fractional_tradability
* default_collar_fraction
"""
url = instruments_url()
payload = {'query': query}
data = request_get(url, 'pagination', payload)
if len(data) == 0:
print('No results found for that keyword', file=get_output())
return([None])
else:
print('Found ' + str(len(data)) + ' results', file=get_output())
return(data)
def get_stock_historicals(inputSymbols, interval='hour', span='week', bounds='regular', info=None):
"""Represents the historicl data for a stock.
:param inputSymbols: May be a single stock ticker or a list of stock tickers.
:type inputSymbols: str or list
:param interval: Interval to retrieve data for. Values are '5minute', '10minute', 'hour', 'day', 'week'. Default is 'hour'.
:type interval: Optional[str]
:param span: Sets the range of the data to be either 'day', 'week', 'month', '3month', 'year', or '5year'. Default is 'week'.
:type span: Optional[str]
:param bounds: Represents if graph will include extended trading hours or just regular trading hours. Values are 'extended', 'trading', or 'regular'. Default is 'regular'
:type bounds: Optional[str]
:param info: Will filter the results to have a list of the values that correspond to key that matches info.
:type info: Optional[str]
:returns: [list] Returns a list of dictionaries where each dictionary is for a different time. If multiple stocks are provided \
the historical data is listed one after another.
:Dictionary Keys: * begins_at
* open_price
* close_price
* high_price
* low_price
* volume
* session
* interpolated
* symbol
"""
interval_check = ['5minute', '10minute', 'hour', 'day', 'week']
span_check = ['day', 'week', 'month', '3month', 'year', '5year']
bounds_check = ['extended', 'regular', 'trading']
if interval not in interval_check:
print(
'ERROR: Interval must be "5minute","10minute","hour","day",or "week"', file=get_output())
return([None])
if span not in span_check:
print('ERROR: Span must be "day","week","month","3month","year",or "5year"', file=get_output())
return([None])
if bounds not in bounds_check:
print('ERROR: Bounds must be "extended","regular",or "trading"', file=get_output())
return([None])
if (bounds == 'extended' or bounds == 'trading') and span != 'day':
print('ERROR: extended and trading bounds can only be used with a span of "day"', file=get_output())
return([None])
symbols = inputs_to_set(inputSymbols)
url = historicals_url()
payload = {'symbols': ','.join(symbols),
'interval': interval,
'span': span,
'bounds': bounds}
data = request_get(url, 'results', payload)
if (data == None or data == [None]):
return data
histData = []
for count, item in enumerate(data):
if (len(item['historicals']) == 0):
print(error_ticker_does_not_exist(symbols[count]), file=get_output())
continue
stockSymbol = item['symbol']
for subitem in item['historicals']:
subitem['symbol'] = stockSymbol
histData.append(subitem)
return(filter_data(histData, info))
def get_stock_quote_by_id(stock_id, info=None):
"""
Represents basic stock quote information
:param stock_id: robinhood stock id
:type stock_id: str
:param info: Will filter the results to get a specific value. Possible options are url, instrument, execution_date, \
divsor, and multiplier.
:type info: Optional[str]
:return: [dict] If the info parameter is provided, then the function will extract the value of the key \
that matches the info parameter. Otherwise, the whole dictionary is returned.
:Dictionary Keys: * ask_price
* ask_size
* bid_price
* bid_size
* last_trade_price
* last_extended_hours_trade_price
* previous_close
* adjusted_previous_close
* previous_close_date
* symbol
* trading_halted
* has_traded
* last_trade_price_source
* updated_at
* instrument
"""
url = marketdata_quotes_url(stock_id)
data = request_get(url)
return (filter_data(data, info))
def get_stock_quote_by_symbol(symbol, info=None):
"""
Represents basic stock quote information
:param symbol: robinhood stock id
:type stock_id: str
:param info: Will filter the results to get a specific value. Possible options are url, instrument, execution_date, \
divsor, and multiplier.
:type info: Optional[str]
:return: [dict] If the info parameter is provided, then the function will extract the value of the key \
that matches the info parameter. Otherwise, the whole dictionary is returned.
:Dictionary Keys: * ask_price
* ask_size
* bid_price
* bid_size
* last_trade_price
* last_extended_hours_trade_price
* previous_close
* adjusted_previous_close
* previous_close_date
* symbol
* trading_halted
* has_traded
* last_trade_price_source
* updated_at
* instrument
"""
return get_stock_quote_by_id(id_for_stock(symbol))
def get_pricebook_by_id(stock_id, info=None):
"""
Represents Level II Market Data provided for Gold subscribers
:param stock_id: robinhood stock id
:type stock_id: str
:param info: Will filter the results to get a specific value. Possible options are url, instrument, execution_date, \
divsor, and multiplier.
:type info: Optional[str]
:return: Returns a dictionary of asks and bids.
"""
url = marketdata_pricebook_url(stock_id)
data = request_get(url)
return (filter_data(data, info))
def get_pricebook_by_symbol(symbol, info=None):
"""
Represents Level II Market Data provided for Gold subscribers
:param symbol: symbol id
:type symbol: str
:param info: Will filter the results to get a specific value. Possible options are url, instrument, execution_date, \
divsor, and multiplier.
:type info: Optional[str]
:return: Returns a dictionary of asks and bids.
"""
return get_pricebook_by_id(id_for_stock(symbol)) | /robin_stocks-3.0.6-py3-none-any.whl/robin_stocks/robinhood/stocks.py | 0.881449 | 0.381306 | stocks.py | pypi |
import getpass
import os
import pickle
import random
from robin_stocks.robinhood.helper import *
from robin_stocks.robinhood.urls import *
def generate_device_token():
"""This function will generate a token used when loggin on.
:returns: A string representing the token.
"""
rands = []
for i in range(0, 16):
r = random.random()
rand = 4294967296.0 * r
rands.append((int(rand) >> ((3 & i) << 3)) & 255)
hexa = []
for i in range(0, 256):
hexa.append(str(hex(i+256)).lstrip("0x").rstrip("L")[1:])
id = ""
for i in range(0, 16):
id += hexa[rands[i]]
if (i == 3) or (i == 5) or (i == 7) or (i == 9):
id += "-"
return(id)
def respond_to_challenge(challenge_id, sms_code):
"""This function will post to the challenge url.
:param challenge_id: The challenge id.
:type challenge_id: str
:param sms_code: The sms code.
:type sms_code: str
:returns: The response from requests.
"""
url = challenge_url(challenge_id)
payload = {
'response': sms_code
}
return(request_post(url, payload))
def login(username=None, password=None, expiresIn=86400, scope='internal', by_sms=True, store_session=True, mfa_code=None, pickle_name=""):
"""This function will effectively log the user into robinhood by getting an
authentication token and saving it to the session header. By default, it
will store the authentication token in a pickle file and load that value
on subsequent logins.
:param username: The username for your robinhood account, usually your email.
Not required if credentials are already cached and valid.
:type username: Optional[str]
:param password: The password for your robinhood account. Not required if
credentials are already cached and valid.
:type password: Optional[str]
:param expiresIn: The time until your login session expires. This is in seconds.
:type expiresIn: Optional[int]
:param scope: Specifies the scope of the authentication.
:type scope: Optional[str]
:param by_sms: Specifies whether to send an email(False) or an sms(True)
:type by_sms: Optional[boolean]
:param store_session: Specifies whether to save the log in authorization
for future log ins.
:type store_session: Optional[boolean]
:param mfa_code: MFA token if enabled.
:type mfa_code: Optional[str]
:param pickle_name: Allows users to name Pickle token file in order to switch
between different accounts without having to re-login every time.
:returns: A dictionary with log in information. The 'access_token' keyword contains the access token, and the 'detail' keyword \
contains information on whether the access token was generated or loaded from pickle file.
"""
device_token = generate_device_token()
home_dir = os.path.expanduser("~")
data_dir = os.path.join(home_dir, ".tokens")
if not os.path.exists(data_dir):
os.makedirs(data_dir)
creds_file = "robinhood" + pickle_name + ".pickle"
pickle_path = os.path.join(data_dir, creds_file)
# Challenge type is used if not logging in with two-factor authentication.
if by_sms:
challenge_type = "sms"
else:
challenge_type = "email"
url = login_url()
payload = {
'client_id': 'c82SH0WZOsabOXGP2sxqcj34FxkvfnWRZBKlBjFS',
'expires_in': expiresIn,
'grant_type': 'password',
'password': password,
'scope': scope,
'username': username,
'challenge_type': challenge_type,
'device_token': device_token
}
if mfa_code:
payload['mfa_code'] = mfa_code
# If authentication has been stored in pickle file then load it. Stops login server from being pinged so much.
if os.path.isfile(pickle_path):
# If store_session has been set to false then delete the pickle file, otherwise try to load it.
# Loading pickle file will fail if the acess_token has expired.
if store_session:
try:
with open(pickle_path, 'rb') as f:
pickle_data = pickle.load(f)
access_token = pickle_data['access_token']
token_type = pickle_data['token_type']
refresh_token = pickle_data['refresh_token']
# Set device_token to be the original device token when first logged in.
pickle_device_token = pickle_data['device_token']
payload['device_token'] = pickle_device_token
# Set login status to True in order to try and get account info.
set_login_state(True)
update_session(
'Authorization', '{0} {1}'.format(token_type, access_token))
# Try to load account profile to check that authorization token is still valid.
res = request_get(
positions_url(), 'pagination', {'nonzero': 'true'}, jsonify_data=False)
# Raises exception is response code is not 200.
res.raise_for_status()
return({'access_token': access_token, 'token_type': token_type,
'expires_in': expiresIn, 'scope': scope, 'detail': 'logged in using authentication in {0}'.format(creds_file),
'backup_code': None, 'refresh_token': refresh_token})
except:
print(
"ERROR: There was an issue loading pickle file. Authentication may be expired - logging in normally.", file=get_output())
set_login_state(False)
update_session('Authorization', None)
else:
os.remove(pickle_path)
# Try to log in normally.
if not username:
username = input("Robinhood username: ")
payload['username'] = username
if not password:
password = getpass.getpass("Robinhood password: ")
payload['password'] = password
data = request_post(url, payload)
# Handle case where mfa or challenge is required.
if data:
if 'mfa_required' in data:
mfa_token = input("Please type in the MFA code: ")
payload['mfa_code'] = mfa_token
res = request_post(url, payload, jsonify_data=False)
while (res.status_code != 200):
mfa_token = input(
"That MFA code was not correct. Please type in another MFA code: ")
payload['mfa_code'] = mfa_token
res = request_post(url, payload, jsonify_data=False)
data = res.json()
elif 'challenge' in data:
challenge_id = data['challenge']['id']
sms_code = input('Enter Robinhood code for validation: ')
res = respond_to_challenge(challenge_id, sms_code)
while 'challenge' in res and res['challenge']['remaining_attempts'] > 0:
sms_code = input('That code was not correct. {0} tries remaining. Please type in another code: '.format(
res['challenge']['remaining_attempts']))
res = respond_to_challenge(challenge_id, sms_code)
update_session(
'X-ROBINHOOD-CHALLENGE-RESPONSE-ID', challenge_id)
data = request_post(url, payload)
# Update Session data with authorization or raise exception with the information present in data.
if 'access_token' in data:
token = '{0} {1}'.format(data['token_type'], data['access_token'])
update_session('Authorization', token)
set_login_state(True)
data['detail'] = "logged in with brand new authentication code."
if store_session:
with open(pickle_path, 'wb') as f:
pickle.dump({'token_type': data['token_type'],
'access_token': data['access_token'],
'refresh_token': data['refresh_token'],
'device_token': payload['device_token']}, f)
else:
raise Exception(data['detail'])
else:
raise Exception('Error: Trouble connecting to robinhood API. Check internet connection.')
return(data)
@login_required
def logout():
"""Removes authorization from the session header.
:returns: None
"""
set_login_state(False)
update_session('Authorization', None) | /robin_stocks-3.0.6-py3-none-any.whl/robin_stocks/robinhood/authentication.py | 0.607314 | 0.165661 | authentication.py | pypi |
from uuid import uuid4
from robin_stocks.robinhood.crypto import *
from robin_stocks.robinhood.helper import *
from robin_stocks.robinhood.profiles import *
from robin_stocks.robinhood.stocks import *
from robin_stocks.robinhood.urls import *
@login_required
def get_all_stock_orders(info=None):
"""Returns a list of all the orders that have been processed for the account.
:param info: Will filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a list of dictionaries of key/value pairs for each order. If info parameter is provided, \
a list of strings is returned where the strings are the value of the key that matches info.
"""
url = orders_url()
data = request_get(url, 'pagination')
return(filter_data(data, info))
@login_required
def get_all_option_orders(info=None):
"""Returns a list of all the option orders that have been processed for the account.
:param info: Will filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a list of dictionaries of key/value pairs for each option order. If info parameter is provided, \
a list of strings is returned where the strings are the value of the key that matches info.
"""
url = option_orders_url()
data = request_get(url, 'pagination')
return(filter_data(data, info))
@login_required
def get_all_crypto_orders(info=None):
"""Returns a list of all the crypto orders that have been processed for the account.
:param info: Will filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a list of dictionaries of key/value pairs for each option order. If info parameter is provided, \
a list of strings is returned where the strings are the value of the key that matches info.
"""
url = crypto_orders_url()
data = request_get(url, 'pagination')
return(filter_data(data, info))
@login_required
def get_all_open_stock_orders(info=None, account_number=None):
"""Returns a list of all the orders that are currently open.
:param info: Will filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a list of dictionaries of key/value pairs for each order. If info parameter is provided, \
a list of strings is returned where the strings are the value of the key that matches info.
"""
url = orders_url(account_number=account_number)
data = request_get(url, 'pagination')
data = [item for item in data if item['cancel'] is not None]
return(filter_data(data, info))
@login_required
def get_all_open_option_orders(info=None, account_number=None):
"""Returns a list of all the orders that are currently open.
:param info: Will filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a list of dictionaries of key/value pairs for each order. If info parameter is provided, \
a list of strings is returned where the strings are the value of the key that matches info.
"""
url = option_orders_url(account_number=account_number)
data = request_get(url, 'pagination')
data = [item for item in data if item['cancel_url'] is not None]
return(filter_data(data, info))
@login_required
def get_all_open_crypto_orders(info=None):
"""Returns a list of all the crypto orders that have been processed for the account.
:param info: Will filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a list of dictionaries of key/value pairs for each option order. If info parameter is provided, \
a list of strings is returned where the strings are the value of the key that matches info.
"""
url = crypto_orders_url()
data = request_get(url, 'pagination')
data = [item for item in data if item['cancel_url'] is not None]
return(filter_data(data, info))
@login_required
def get_stock_order_info(orderID):
"""Returns the information for a single order.
:param orderID: The ID associated with the order. Can be found using get_all_orders(info=None) or get_all_orders(info=None).
:type orderID: str
:returns: Returns a list of dictionaries of key/value pairs for the order.
"""
url = orders_url(orderID)
data = request_get(url)
return(data)
@login_required
def get_option_order_info(order_id):
"""Returns the information for a single option order.
:param order_id: The ID associated with the option order.
:type order_id: str
:returns: Returns a list of dictionaries of key/value pairs for the order.
"""
url = option_orders_url(order_id)
data = request_get(url)
return data
@login_required
def get_crypto_order_info(order_id):
"""Returns the information for a single crypto order.
:param order_id: The ID associated with the option order.
:type order_id: str
:returns: Returns a list of dictionaries of key/value pairs for the order.
"""
url = crypto_orders_url(order_id)
data = request_get(url)
return data
@login_required
def find_stock_orders(**arguments):
"""Returns a list of orders that match the keyword parameters.
:param arguments: Variable length of keyword arguments. EX. find_orders(symbol='FB',cancel=None,quantity=1)
:type arguments: str
:returns: Returns a list of orders.
"""
url = orders_url()
data = request_get(url, 'pagination')
if (len(arguments) == 0):
return(data)
for item in data:
item['quantity'] = str(float(item['quantity']))
item['cumulative_quantity'] = str(float(item['cumulative_quantity']))
if 'symbol' in arguments.keys():
arguments['instrument'] = get_instruments_by_symbols(
arguments['symbol'], info='url')[0]
del arguments['symbol']
if 'quantity' in arguments.keys():
arguments['quantity'] = str(arguments['quantity'])
stop = len(arguments.keys())-1
list_of_orders = []
for item in data:
for i, (key, value) in enumerate(arguments.items()):
if key not in item:
print(error_argument_not_key_in_dictionary(key), file=get_output())
return([None])
if value != item[key]:
break
if i == stop:
list_of_orders.append(item)
return(list_of_orders)
@login_required
def cancel_stock_order(orderID):
"""Cancels a specific order.
:param orderID: The ID associated with the order. Can be found using get_all_stock_orders(info=None).
:type orderID: str
:returns: Returns the order information for the order that was cancelled.
"""
url = cancel_url(orderID)
data = request_post(url)
if data:
print('Order '+str(orderID)+' cancelled', file=get_output())
return(data)
@login_required
def cancel_option_order(orderID):
"""Cancels a specific option order.
:param orderID: The ID associated with the order. Can be found using get_all_option_orders(info=None).
:type orderID: str
:returns: Returns the order information for the order that was cancelled.
"""
url = option_cancel_url(orderID)
data = request_post(url)
if data:
print('Order '+str(orderID)+' cancelled', file=get_output())
return(data)
@login_required
def cancel_crypto_order(orderID):
"""Cancels a specific crypto order.
:param orderID: The ID associated with the order. Can be found using get_all_crypto_orders(info=None).
:type orderID: str
:returns: Returns the order information for the order that was cancelled.
"""
url = crypto_cancel_url(orderID)
data = request_post(url)
if data:
print('Order '+str(orderID)+' cancelled', file=get_output())
return(data)
@login_required
def cancel_all_stock_orders():
"""Cancels all stock orders.
:returns: The list of orders that were cancelled.
"""
url = orders_url()
data = request_get(url, 'pagination')
data = [item for item in data if item['cancel'] is not None]
for item in data:
request_post(item['cancel'])
print('All Stock Orders Cancelled', file=get_output())
return(data)
@login_required
def cancel_all_option_orders():
"""Cancels all option orders.
:returns: Returns the order information for the orders that were cancelled.
"""
url = option_orders_url()
data = request_get(url, 'pagination')
data = [item for item in data if item['cancel_url'] is not None]
for item in data:
request_post(item['cancel_url'])
print('All Option Orders Cancelled', file=get_output())
return(data)
@login_required
def cancel_all_crypto_orders():
"""Cancels all crypto orders.
:returns: Returns the order information for the orders that were cancelled.
"""
url = crypto_orders_url()
data = request_get(url, 'pagination')
data = [item for item in data if item['cancel_url'] is not None]
for item in data:
request_post(item['cancel_url'])
print('All Crypto Orders Cancelled', file=get_output())
return(data)
@login_required
def order_buy_market(symbol, quantity, account_number=None, timeInForce='gtc', extendedHours=False, jsonify=True):
"""Submits a market order to be executed immediately.
:param symbol: The stock ticker of the stock to purchase.
:type symbol: str
:param quantity: The number of stocks to buy.
:type quantity: int
:param account_number: the robinhood account number.
:type account_number: Optional[str]
:param timeInForce: Changes how long the order will be in effect for. 'gtc' = good until cancelled. \
'gfd' = good for the day.
:type timeInForce: Optional[str]
:param extendedHours: Premium users only. Allows trading during extended hours. Should be true or false.
:type extendedHours: Optional[str]
:param jsonify: If set to False, function will return the request object which contains status code and headers.
:type jsonify: Optional[str]
:returns: Dictionary that contains information regarding the purchase of stocks, \
such as the order id, the state of order (queued, confired, filled, failed, canceled, etc.), \
the price, and the quantity.
"""
return order(symbol, quantity, "buy", account_number, None, None, timeInForce, extendedHours, jsonify)
@login_required
def order_buy_fractional_by_quantity(symbol, quantity, account_number=None, timeInForce='gfd', extendedHours=False, jsonify=True):
"""Submits a market order to be executed immediately for fractional shares by specifying the amount that you want to trade.
Good for share fractions up to 6 decimal places. Robinhood does not currently support placing limit, stop, or stop loss orders
for fractional trades.
:param symbol: The stock ticker of the stock to purchase.
:type symbol: str
:param quantity: The amount of the fractional shares you want to buy.
:type quantity: float
:param account_number: the robinhood account number.
:type account_number: Optional[str]
:param timeInForce: Changes how long the order will be in effect for. 'gfd' = good for the day.
:type timeInForce: Optional[str]
:param extendedHours: Premium users only. Allows trading during extended hours. Should be true or false.
:type extendedHours: Optional[str]
:param jsonify: If set to False, function will return the request object which contains status code and headers.
:type jsonify: Optional[str]
:returns: Dictionary that contains information regarding the purchase of stocks, \
such as the order id, the state of order (queued, confired, filled, failed, canceled, etc.), \
the price, and the quantity.
"""
return order(symbol, quantity, "buy", account_number, None, None, timeInForce, extendedHours, jsonify)
@login_required
def order_buy_fractional_by_price(symbol, amountInDollars, account_number=None, timeInForce='gfd', extendedHours=False, jsonify=True, market_hours='regular_hours'):
"""Submits a market order to be executed immediately for fractional shares by specifying the amount in dollars that you want to trade.
Good for share fractions up to 6 decimal places. Robinhood does not currently support placing limit, stop, or stop loss orders
for fractional trades.
:param symbol: The stock ticker of the stock to purchase.
:type symbol: str
:param amountInDollars: The amount in dollars of the fractional shares you want to buy.
:type amountInDollars: float
:param account_number: the robinhood account number.
:type account_number: Optional[str]
:param timeInForce: Changes how long the order will be in effect for. 'gfd' = good for the day.
:type timeInForce: Optional[str]
:param extendedHours: Premium users only. Allows trading during extended hours. Should be true or false.
:type extendedHours: Optional[str]
:param jsonify: If set to False, function will return the request object which contains status code and headers.
:type jsonify: Optional[str]
:returns: Dictionary that contains information regarding the purchase of stocks, \
such as the order id, the state of order (queued, confired, filled, failed, canceled, etc.), \
the price, and the quantity.
"""
if amountInDollars < 1:
print("ERROR: Fractional share price should meet minimum 1.00.", file=get_output())
return None
# turn the money amount into decimal number of shares
price = next(iter(get_latest_price(symbol, 'ask_price', extendedHours)), 0.00)
fractional_shares = 0 if (price == 0.00) else round_price(amountInDollars/float(price))
return order(symbol, fractional_shares, "buy", None, None, account_number, timeInForce, extendedHours, jsonify, market_hours)
@login_required
def order_buy_limit(symbol, quantity, limitPrice, account_number=None, timeInForce='gtc', extendedHours=False, jsonify=True):
"""Submits a limit order to be executed once a certain price is reached.
:param symbol: The stock ticker of the stock to purchase.
:type symbol: str
:param quantity: The number of stocks to buy.
:type quantity: int
:param limitPrice: The price to trigger the buy order.
:type limitPrice: float
:param account_number: the robinhood account number.
:type account_number: Optional[str]
:param timeInForce: Changes how long the order will be in effect for. 'gtc' = good until cancelled. \
'gfd' = good for the day.
:type timeInForce: Optional[str]
:param extendedHours: Premium users only. Allows trading during extended hours. Should be true or false.
:type extendedHours: Optional[str]
:param jsonify: If set to False, function will return the request object which contains status code and headers.
:type jsonify: Optional[str]
:returns: Dictionary that contains information regarding the purchase of stocks, \
such as the order id, the state of order (queued, confired, filled, failed, canceled, etc.), \
the price, and the quantity.
"""
return order(symbol, quantity, "buy", account_number, limitPrice, None, timeInForce, extendedHours, jsonify)
@login_required
def order_buy_stop_loss(symbol, quantity, stopPrice, account_number=None, timeInForce='gtc', extendedHours=False, jsonify=True):
"""Submits a stop order to be turned into a market order once a certain stop price is reached.
:param symbol: The stock ticker of the stock to purchase.
:type symbol: str
:param quantity: The number of stocks to buy.
:type quantity: int
:param stopPrice: The price to trigger the market order.
:type stopPrice: float
:param account_number: the robinhood account number.
:type account_number: Optional[str]
:param timeInForce: Changes how long the order will be in effect for. 'gtc' = good until cancelled. \
'gfd' = good for the day.
:type timeInForce: Optional[str]
:param extendedHours: Premium users only. Allows trading during extended hours. Should be true or false.
:type extendedHours: Optional[str]
:param jsonify: If set to False, function will return the request object which contains status code and headers.
:type jsonify: Optional[str]
:returns: Dictionary that contains information regarding the purchase of stocks, \
such as the order id, the state of order (queued, confired, filled, failed, canceled, etc.), \
the price, and the quantity.
"""
return order(symbol, quantity, "buy", account_number, None, stopPrice, timeInForce, extendedHours, jsonify)
@login_required
def order_buy_stop_limit(symbol, quantity, limitPrice, stopPrice, account_number=None, timeInForce='gtc', extendedHours=False, jsonify=True):
"""Submits a stop order to be turned into a limit order once a certain stop price is reached.
:param symbol: The stock ticker of the stock to purchase.
:type symbol: str
:param quantity: The number of stocks to buy.
:type quantity: int
:param limitPrice: The price to trigger the market order.
:type limitPrice: float
:param stopPrice: The price to trigger the limit order.
:type stopPrice: float
:param account_number: the robinhood account number.
:type account_number: Optional[str]
:param timeInForce: Changes how long the order will be in effect for. 'gtc' = good until cancelled. \
'gfd' = good for the day.
:type timeInForce: Optional[str]
:param extendedHours: Premium users only. Allows trading during extended hours. Should be true or false.
:type extendedHours: Optional[str]
:param jsonify: If set to False, function will return the request object which contains status code and headers.
:type jsonify: Optional[str]
:returns: Dictionary that contains information regarding the purchase of stocks, \
such as the order id, the state of order (queued, confired, filled, failed, canceled, etc.), \
the price, and the quantity.
"""
return order(symbol, quantity, "buy", account_number, limitPrice, stopPrice, timeInForce, extendedHours, jsonify)
@login_required
def order_buy_trailing_stop(symbol, quantity, trailAmount, trailType='percentage', timeInForce='gtc', extendedHours=False, jsonify=True):
"""Submits a trailing stop buy order to be turned into a market order when traling stop price reached.
:param symbol: The stock ticker of the stock to buy.
:type symbol: str
:param quantity: The number of stocks to buy.
:type quantity: int
:param trailAmount: how much to trail by; could be percentage or dollar value depending on trailType
:type trailAmount: float
:param trailType: could be "amount" or "percentage"
:type trailType: str
:param timeInForce: Changes how long the order will be in effect for. 'gtc' = good until cancelled. \
'gfd' = good for the day.
:type timeInForce: Optional[str]
:param extendedHours: Premium users only. Allows trading during extended hours. Should be true or false.
:type extendedHours: Optional[str]
:param jsonify: If set to False, function will return the request object which contains status code and headers.
:type jsonify: Optional[str]
:returns: Dictionary that contains information regarding the selling of stocks, \
such as the order id, the state of order (queued, confired, filled, failed, canceled, etc.), \
the price, and the quantity.
:returns: Dictionary that contains information regarding the purchase of stocks, \
such as the order id, the state of order (queued, confired, filled, failed, canceled, etc.), \
the price, and the quantity.
"""
return order_trailing_stop(symbol, quantity, "buy", trailAmount, trailType, timeInForce, extendedHours, jsonify)
@login_required
def order_sell_market(symbol, quantity, account_number=None, timeInForce='gtc', extendedHours=False, jsonify=True):
"""Submits a market order to be executed immediately.
:param symbol: The stock ticker of the stock to sell.
:type symbol: str
:param quantity: The number of stocks to sell.
:type quantity: int
:param account_number: the robinhood account number.
:type account_number: Optional[str]
:param timeInForce: Changes how long the order will be in effect for. 'gtc' = good until cancelled. \
'gfd' = good for the day.
:type timeInForce: Optional[str]
:param extendedHours: Premium users only. Allows trading during extended hours. Should be true or false.
:type extendedHours: Optional[str]
:param jsonify: If set to False, function will return the request object which contains status code and headers.
:type jsonify: Optional[str]
:returns: Dictionary that contains information regarding the selling of stocks, \
such as the order id, the state of order (queued, confired, filled, failed, canceled, etc.), \
the price, and the quantity.
"""
return order(symbol, quantity, "sell", account_number, None, None, timeInForce, extendedHours, jsonify)
@login_required
def order_sell_fractional_by_quantity(symbol, quantity, account_number=None, timeInForce='gfd', priceType='bid_price', extendedHours=False, jsonify=True, market_hours='regular_hours'):
"""Submits a market order to be executed immediately for fractional shares by specifying the amount that you want to trade.
Good for share fractions up to 6 decimal places. Robinhood does not currently support placing limit, stop, or stop loss orders
for fractional trades.
:param symbol: The stock ticker of the stock to purchase.
:type symbol: str
:param quantity: The amount of the fractional shares you want to buy.
:type quantity: float
:param account_number: the robinhood account number.
:type account_number: Optional[str]
:param timeInForce: Changes how long the order will be in effect for. 'gfd' = good for the day.
:type timeInForce: Optional[str]
:param extendedHours: Premium users only. Allows trading during extended hours. Should be true or false.
:type extendedHours: Optional[str]
:param jsonify: If set to False, function will return the request object which contains status code and headers.
:type jsonify: Optional[str]
:returns: Dictionary that contains information regarding the purchase of stocks, \
such as the order id, the state of order (queued, confired, filled, failed, canceled, etc.), \
the price, and the quantity.
"""
return order(symbol, quantity, "sell", None, None, account_number, timeInForce, extendedHours, jsonify, market_hours)
@login_required
def order_sell_fractional_by_price(symbol, amountInDollars, account_number=None, timeInForce='gfd', extendedHours=False, jsonify=True):
"""Submits a market order to be executed immediately for fractional shares by specifying the amount in dollars that you want to trade.
Good for share fractions up to 6 decimal places. Robinhood does not currently support placing limit, stop, or stop loss orders
for fractional trades.
:param symbol: The stock ticker of the stock to purchase.
:type symbol: str
:param amountInDollars: The amount in dollars of the fractional shares you want to buy.
:type amountInDollars: float
:param account_number: the robinhood account number.
:type account_number: Optional[str]
:param timeInForce: Changes how long the order will be in effect for. 'gfd' = good for the day.
:type timeInForce: Optional[str]
:param extendedHours: Premium users only. Allows trading during extended hours. Should be true or false.
:type extendedHours: Optional[str]
:param jsonify: If set to False, function will return the request object which contains status code and headers.
:type jsonify: Optional[str]
:returns: Dictionary that contains information regarding the purchase of stocks, \
such as the order id, the state of order (queued, confired, filled, failed, canceled, etc.), \
the price, and the quantity.
"""
if amountInDollars < 1:
print("ERROR: Fractional share price should meet minimum 1.00.", file=get_output())
return None
# turn the money amount into decimal number of shares
price = next(iter(get_latest_price(symbol, 'bid_price', extendedHours)), 0.00)
fractional_shares = 0 if (price == 0.00) else round_price(amountInDollars/float(price))
return order(symbol, fractional_shares, "sell", account_number, None, None, timeInForce, extendedHours, jsonify)
@login_required
def order_sell_limit(symbol, quantity, limitPrice, account_number=None, timeInForce='gtc', extendedHours=False, jsonify=True):
"""Submits a limit order to be executed once a certain price is reached.
:param symbol: The stock ticker of the stock to sell.
:type symbol: str
:param quantity: The number of stocks to sell.
:type quantity: int
:param limitPrice: The price to trigger the sell order.
:type limitPrice: float
:param account_number: the robinhood account number.
:type account_number: Optional[str]
:param timeInForce: Changes how long the order will be in effect for. 'gtc' = good until cancelled. \
'gfd' = good for the day.
:type timeInForce: Optional[str]
:param extendedHours: Premium users only. Allows trading during extended hours. Should be true or false.
:type extendedHours: Optional[str]
:param jsonify: If set to False, function will return the request object which contains status code and headers.
:type jsonify: Optional[str]
:returns: Dictionary that contains information regarding the selling of stocks, \
such as the order id, the state of order (queued, confired, filled, failed, canceled, etc.), \
the price, and the quantity.
"""
return order(symbol, quantity, "sell", account_number, limitPrice, None, timeInForce, extendedHours, jsonify)
@login_required
def order_sell_stop_loss(symbol, quantity, stopPrice, account_number=None, timeInForce='gtc', extendedHours=False, jsonify=True):
"""Submits a stop order to be turned into a market order once a certain stop price is reached.
:param symbol: The stock ticker of the stock to sell.
:type symbol: str
:param quantity: The number of stocks to sell.
:type quantity: int
:param stopPrice: The price to trigger the market order.
:type stopPrice: float
:param account_number: the robinhood account number.
:type account_number: Optional[str]
:param timeInForce: Changes how long the order will be in effect for. 'gtc' = good until cancelled. \
'gfd' = good for the day.
:type timeInForce: Optional[str]
:param extendedHours: Premium users only. Allows trading during extended hours. Should be true or false.
:type extendedHours: Optional[str]
:param jsonify: If set to False, function will return the request object which contains status code and headers.
:type jsonify: Optional[str]
:returns: Dictionary that contains information regarding the selling of stocks, \
such as the order id, the state of order (queued, confired, filled, failed, canceled, etc.), \
the price, and the quantity.
"""
return order(symbol, quantity, "sell", account_number, None, stopPrice, timeInForce, extendedHours, jsonify)
@login_required
def order_sell_stop_limit(symbol, quantity, limitPrice, stopPrice, account_number=None, timeInForce='gtc', extendedHours=False, jsonify=True):
"""Submits a stop order to be turned into a limit order once a certain stop price is reached.
:param symbol: The stock ticker of the stock to sell.
:type symbol: str
:param quantity: The number of stocks to sell.
:type quantity: int
:param limitPrice: The price to trigger the market order.
:type limitPrice: float
:param stopPrice: The price to trigger the limit order.
:type stopPrice: float
:param account_number: the robinhood account number.
:type account_number: Optional[str]
:param timeInForce: Changes how long the order will be in effect for. 'gtc' = good until cancelled. \
'gfd' = good for the day.
:type timeInForce: Optional[str]
:param extendedHours: Premium users only. Allows trading during extended hours. Should be true or false.
:type extendedHours: Optional[str]
:param jsonify: If set to False, function will return the request object which contains status code and headers.
:type jsonify: Optional[str]
:returns: Dictionary that contains information regarding the selling of stocks, \
such as the order id, the state of order (queued, confired, filled, failed, canceled, etc.), \
the price, and the quantity.
"""
return order(symbol, quantity, "sell", account_number, limitPrice, stopPrice, timeInForce, extendedHours, jsonify)
@login_required
def order_sell_trailing_stop(symbol, quantity, trailAmount, trailType='percentage', timeInForce='gtc', extendedHours=False, jsonify=True):
"""Submits a trailing stop sell order to be turned into a market order when traling stop price reached.
:param symbol: The stock ticker of the stock to sell.
:type symbol: str
:param quantity: The number of stocks to sell.
:type quantity: int
:param trailAmount: how much to trail by; could be percentage or dollar value depending on trailType
:type trailAmount: float
:param trailType: could be "amount" or "percentage"
:type trailType: str
:param timeInForce: Changes how long the order will be in effect for. 'gtc' = good until cancelled. \
'gfd' = good for the day.
:type timeInForce: Optional[str]
:param extendedHours: Premium users only. Allows trading during extended hours. Should be true or false.
:type extendedHours: Optional[str]
:param jsonify: If set to False, function will return the request object which contains status code and headers.
:type jsonify: Optional[str]
:returns: Dictionary that contains information regarding the selling of stocks, \
such as the order id, the state of order (queued, confired, filled, failed, canceled, etc.), \
the price, and the quantity.
:returns: Dictionary that contains information regarding the purchase of stocks, \
such as the order id, the state of order (queued, confired, filled, failed, canceled, etc.), \
the price, and the quantity.
"""
return order_trailing_stop(symbol, quantity, "sell", trailAmount, trailType, timeInForce, extendedHours, jsonify)
@login_required
def order_trailing_stop(symbol, quantity, side, trailAmount, trailType='percentage', account_number=None, timeInForce='gtc', extendedHours=False, jsonify=True):
"""Submits a trailing stop order to be turned into a market order when traling stop price reached.
:param symbol: The stock ticker of the stock to trade.
:type symbol: str
:param quantity: The number of stocks to trade.
:type quantity: int
:param side: buy or sell
:type side: str
:param trailAmount: how much to trail by; could be percentage or dollar value depending on trailType
:type trailAmount: float
:param trailType: could be "amount" or "percentage"
:type trailType: str
:param account_number: the robinhood account number.
:type account_number: Optional[str]
:param timeInForce: Changes how long the order will be in effect for. 'gtc' = good until cancelled. \
'gfd' = good for the day.
:type timeInForce: Optional[str]
:param extendedHours: Premium users only. Allows trading during extended hours. Should be true or false.
:type extendedHours: Optional[str]
:param jsonify: If set to False, function will return the request object which contains status code and headers.
:type jsonify: Optional[str]
:returns: Dictionary that contains information regarding the purchase of stocks, \
such as the order id, the state of order (queued, confired, filled, failed, canceled, etc.), \
the price, and the quantity.
"""
try:
symbol = symbol.upper().strip()
trailAmount = float(trailAmount)
except AttributeError as message:
print(message)
return None
stock_price = round_price(get_latest_price(symbol, extendedHours)[0])
# find stop price based on whether trailType is "amount" or "percentage" and whether its buy or sell
percentage = 0
try:
if trailType == 'amount':
margin = trailAmount
else:
margin = stock_price * trailAmount * 0.01
percentage = trailAmount
except Exception as e:
print('ERROR: {}'.format(e))
return None
stopPrice = stock_price + margin if side == "buy" else stock_price - margin
stopPrice = round_price(stopPrice)
payload = {
'account': load_account_profile(account_number=account_number, info='url'),
'instrument': get_instruments_by_symbols(symbol, info='url')[0],
'symbol': symbol,
'quantity': quantity,
'ref_id': str(uuid4()),
'type': 'market',
'stop_price': stopPrice,
'time_in_force': timeInForce,
'trigger': 'stop',
'side': side,
'extended_hours': extendedHours
}
if side == "buy":
# price should be greater than stopPrice, adding a 5% threshold
payload['price'] = round_price(stopPrice * 1.05)
if trailType == 'amount':
payload['trailing_peg'] = {'type': 'price', 'price': {'amount': trailAmount, 'currency_code': 'USD'}}
else:
payload['trailing_peg'] = {'type': 'percentage', 'percentage': str(percentage)}
url = orders_url()
data = request_post(url, payload, json=True, jsonify_data=jsonify)
return (data)
@login_required
def order(symbol, quantity, side, limitPrice=None, stopPrice=None, account_number=None, timeInForce='gtc', extendedHours=False, jsonify=True, market_hours='regular_hours'):
"""A generic order function.
:param symbol: The stock ticker of the stock to sell.
:type symbol: str
:param quantity: The number of stocks to sell.
:type quantity: int
:param side: Either 'buy' or 'sell'
:type side: str
:param limitPrice: The price to trigger the market order.
:type limitPrice: float
:param stopPrice: The price to trigger the limit or market order.
:type stopPrice: float
:param account_number: the robinhood account number.
:type account_number: Optional[str]
:param timeInForce: Changes how long the order will be in effect for. 'gtc' = good until cancelled. \
'gfd' = good for the day.
:type timeInForce: str
:param extendedHours: Premium users only. Allows trading during extended hours. Should be true or false.
:type extendedHours: Optional[str]
:param jsonify: If set to False, function will return the request object which contains status code and headers.
:type jsonify: Optional[str]
:returns: Dictionary that contains information regarding the purchase or selling of stocks, \
such as the order id, the state of order (queued, confired, filled, failed, canceled, etc.), \
the price, and the quantity.
"""
try:
symbol = symbol.upper().strip()
except AttributeError as message:
print(message, file=get_output())
return None
orderType = "market"
trigger = "immediate"
if side == "buy":
priceType = "ask_price"
else:
priceType = "bid_price"
if limitPrice and stopPrice:
price = round_price(limitPrice)
stopPrice = round_price(stopPrice)
orderType = "limit"
trigger = "stop"
elif limitPrice:
price = round_price(limitPrice)
orderType = "limit"
elif stopPrice:
stopPrice = round_price(stopPrice)
if side == "buy":
price = stopPrice
else:
price = None
trigger = "stop"
else:
price = round_price(next(iter(get_latest_price(symbol, priceType, extendedHours)), 0.00))
payload = {
'account': load_account_profile(account_number=account_number, info='url'),
'instrument': get_instruments_by_symbols(symbol, info='url')[0],
'symbol': symbol,
'price': price,
'quantity': quantity,
'ref_id': str(uuid4()),
'type': orderType,
'stop_price': stopPrice,
'time_in_force': timeInForce,
'trigger': trigger,
'side': side,
'market_hours': market_hours, # choices are ['regular_hours', 'all_day_hours']
'extended_hours': extendedHours,
'order_form_version': 4
}
# adjust market orders
if orderType == 'market':
del payload['stop_price']
del payload['extended_hours']
if market_hours == 'regular_hours':
if side == "buy":
payload['preset_percent_limit'] = "0.05"
payload['type'] = 'limit'
# regular market sell
elif orderType == 'market' and side == 'sell':
del payload['price']
elif market_hours == 'all_day_hours':
payload['type'] = 'limit'
payload['quantity']=int(payload['quantity']) # round to integer instead of fractional
url = orders_url()
data = request_post(url, payload, jsonify_data=jsonify)
return(data)
@login_required
def order_option_credit_spread(price, symbol, quantity, spread, timeInForce='gtc', account_number=None, jsonify=True):
"""Submits a limit order for an option credit spread.
:param price: The limit price to trigger a sell of the option.
:type price: float
:param symbol: The stock ticker of the stock to trade.
:type symbol: str
:param quantity: The number of options to sell.
:type quantity: int
:param spread: A dictionary of spread options with the following keys: \n
- expirationDate: The expiration date of the option in 'YYYY-MM-DD' format.\n
- strike: The strike price of the option.\n
- optionType: This should be 'call' or 'put'.\n
- effect: This should be 'open' or 'close'.\n
- action: This should be 'buy' or 'sell'.
:type spread: dict
:param timeInForce: Changes how long the order will be in effect for. \
'gtc' = good until cancelled. \
'gfd' = good for the day. 'ioc' = immediate or cancel. 'opg' = execute at opening.
:type timeInForce: Optional[str]
:param account_number: the robinhood account number.
:type account_number: Optional[str]
:param jsonify: If set to False, function will return the request object which contains status code and headers.
:type jsonify: Optional[str]
:returns: Dictionary that contains information regarding the trading of options, \
such as the order id, the state of order (queued, confired, filled, failed, canceled, etc.), \
the price, and the quantity.
"""
return(order_option_spread("credit", price, symbol, quantity, spread, account_number, timeInForce, jsonify))
@login_required
def order_option_debit_spread(price, symbol, quantity, spread, timeInForce='gtc', account_number=None, jsonify=True):
"""Submits a limit order for an option debit spread.
:param price: The limit price to trigger a sell of the option.
:type price: float
:param symbol: The stock ticker of the stock to trade.
:type symbol: str
:param quantity: The number of options to sell.
:type quantity: int
:param spread: A dictionary of spread options with the following keys: \n
- expirationDate: The expiration date of the option in 'YYYY-MM-DD' format.\n
- strike: The strike price of the option.\n
- optionType: This should be 'call' or 'put'.\n
- effect: This should be 'open' or 'close'.\n
- action: This should be 'buy' or 'sell'.
:type spread: dict
:param timeInForce: Changes how long the order will be in effect for.
'gtc' = good until cancelled. \
'gfd' = good for the day. 'ioc' = immediate or cancel. 'opg' execute at opening.
:type timeInForce: Optional[str]
:param account_number: the robinhood account number.
:type account_number: Optional[str]
:param jsonify: If set to False, function will return the request object which contains status code and headers.
:type jsonify: Optional[str]
:returns: Dictionary that contains information regarding the trading of options, \
such as the order id, the state of order (queued, confired, filled, failed, canceled, etc.), \
the price, and the quantity.
"""
return(order_option_spread("debit", price, symbol, quantity, spread, account_number, timeInForce, jsonify))
@login_required
def order_option_spread(direction, price, symbol, quantity, spread, account_number=None, timeInForce='gtc', jsonify=True):
"""Submits a limit order for an option spread. i.e. place a debit / credit spread
:param direction: Can be "credit" or "debit".
:type direction: str
:param price: The limit price to trigger a trade of the option.
:type price: float
:param symbol: The stock ticker of the stock to trade.
:type symbol: str
:param quantity: The number of options to trade.
:type quantity: int
:param spread: A dictionary of spread options with the following keys: \n
- expirationDate: The expiration date of the option in 'YYYY-MM-DD' format.\n
- strike: The strike price of the option.\n
- optionType: This should be 'call' or 'put'.\n
- effect: This should be 'open' or 'close'.\n
- action: This should be 'buy' or 'sell'.
:type spread: dict
:param account_number: the robinhood account number.
:type account_number: Optional[str]
:param timeInForce: Changes how long the order will be in effect for.
'gtc' = good until cancelled. \
'gfd' = good for the day. 'ioc' = immediate or cancel. 'opg' execute at opening.
:type timeInForce: Optional[str]
:param jsonify: If set to False, function will return the request object which contains status code and headers.
:type jsonify: Optional[str]
:returns: Dictionary that contains information regarding the trading of options, \
such as the order id, the state of order (queued, confired, filled, failed, canceled, etc.), \
the price, and the quantity.
"""
try:
symbol = symbol.upper().strip()
except AttributeError as message:
print(message, file=get_output())
return None
legs = []
for each in spread:
optionID = id_for_option(symbol,
each['expirationDate'],
each['strike'],
each['optionType'])
legs.append({'position_effect': each['effect'],
'side': each['action'],
'ratio_quantity': 1,
'option': option_instruments_url(optionID)})
payload = {
'account': load_account_profile(account_number=account_number, info='url'),
'direction': direction,
'time_in_force': timeInForce,
'legs': legs,
'type': 'limit',
'trigger': 'immediate',
'price': price,
'quantity': quantity,
'override_day_trade_checks': False,
'override_dtbp_checks': False,
'ref_id': str(uuid4()),
}
url = option_orders_url()
data = request_post(url, payload, json=True, jsonify_data=jsonify)
return(data)
@login_required
def order_buy_option_limit(positionEffect, creditOrDebit, price, symbol, quantity, expirationDate, strike, optionType='both', account_number=None, timeInForce='gtc', jsonify=True):
"""Submits a limit order for an option. i.e. place a long call or a long put.
:param positionEffect: Either 'open' for a buy to open effect or 'close' for a buy to close effect.
:type positionEffect: str
:param creditOrDebit: Either 'debit' or 'credit'.
:type creditOrDebit: str
:param price: The limit price to trigger a buy of the option.
:type price: float
:param symbol: The stock ticker of the stock to trade.
:type symbol: str
:param quantity: The number of options to buy.
:type quantity: int
:param expirationDate: The expiration date of the option in 'YYYY-MM-DD' format.
:type expirationDate: str
:param strike: The strike price of the option.
:type strike: float
:param optionType: This should be 'call' or 'put'
:type optionType: str
:param account_number: the robinhood account number.
:type account_number: Optional[str]
:param timeInForce: Changes how long the order will be in effect for. 'gtc' = good until cancelled. \
'gfd' = good for the day. 'ioc' = immediate or cancel. 'opg' execute at opening.
:type timeInForce: Optional[str]
:param jsonify: If set to False, function will return the request object which contains status code and headers.
:type jsonify: Optional[str]
:returns: Dictionary that contains information regarding the buying of options, \
such as the order id, the state of order (queued, confired, filled, failed, canceled, etc.), \
the price, and the quantity.
"""
try:
symbol = symbol.upper().strip()
except AttributeError as message:
print(message, file=get_output())
return None
optionID = id_for_option(symbol, expirationDate, strike, optionType)
payload = {
'account': load_account_profile(account_number=account_number, info='url'),
'direction': creditOrDebit,
'time_in_force': timeInForce,
'legs': [
{'position_effect': positionEffect, 'side': 'buy',
'ratio_quantity': 1, 'option': option_instruments_url(optionID)},
],
'type': 'limit',
'trigger': 'immediate',
'price': price,
'quantity': quantity,
'override_day_trade_checks': False,
'override_dtbp_checks': False,
'ref_id': str(uuid4()),
}
url = option_orders_url()
data = request_post(url, payload, json=True, jsonify_data=jsonify)
return(data)
@login_required
def order_buy_option_stop_limit(positionEffect, creditOrDebit, limitPrice, stopPrice, symbol, quantity, expirationDate, strike, optionType='both', account_number=None, timeInForce='gtc', jsonify=True):
"""Submits a stop order to be turned into a limit order once a certain stop price is reached.
:param positionEffect: Either 'open' for a buy to open effect or 'close' for a buy to close effect.
:type positionEffect: str
:param creditOrDebit: Either 'debit' or 'credit'.
:type creditOrDebit: str
:param limitPrice: The limit price to trigger a buy of the option.
:type limitPrice: float
:param stopPrice: The price to trigger the limit order.
:type stopPrice: float
:param symbol: The stock ticker of the stock to trade.
:type symbol: str
:param quantity: The number of options to buy.
:type quantity: int
:param expirationDate: The expiration date of the option in 'YYYY-MM-DD' format.
:type expirationDate: str
:param strike: The strike price of the option.
:type strike: float
:param optionType: This should be 'call' or 'put'
:type optionType: str
:param account_number: the robinhood account number.
:type account_number: Optional[str]
:param timeInForce: Changes how long the order will be in effect for. 'gtc' = good until cancelled. \
'gfd' = good for the day. 'ioc' = immediate or cancel. 'opg' execute at opening.
:type timeInForce: Optional[str]
:param jsonify: If set to False, function will return the request object which contains status code and headers.
:type jsonify: Optional[str]
:returns: Dictionary that contains information regarding the buying of options, \
such as the order id, the state of order (queued, confired, filled, failed, canceled, etc.), \
the price, and the quantity.
"""
try:
symbol = symbol.upper().strip()
except AttributeError as message:
print(message, file=get_output())
return None
optionID = id_for_option(symbol, expirationDate, strike, optionType)
payload = {
'account': load_account_profile(account_number=account_number, info='url'),
'direction': creditOrDebit,
'time_in_force': timeInForce,
'legs': [
{'position_effect': positionEffect, 'side': 'buy',
'ratio_quantity': 1, 'option': option_instruments_url(optionID)},
],
'type': 'limit',
'trigger': 'stop',
'price': limitPrice,
'stop_price': stopPrice,
'quantity': quantity,
'override_day_trade_checks': False,
'override_dtbp_checks': False,
'ref_id': str(uuid4()),
}
url = option_orders_url()
data = request_post(url, payload, json=True, jsonify_data=jsonify)
return(data)
def order_sell_option_stop_limit(positionEffect, creditOrDebit, limitPrice, stopPrice, symbol, quantity, expirationDate, strike, optionType='both', account_number=None, timeInForce='gtc', jsonify=True):
"""Submits a stop order to be turned into a limit order once a certain stop price is reached.
:param positionEffect: Either 'open' for a buy to open effect or 'close' for a buy to close effect.
:type positionEffect: str
:param creditOrDebit: Either 'debit' or 'credit'.
:type creditOrDebit: str
:param limitPrice: The limit price to trigger a buy of the option.
:type limitPrice: float
:param stopPrice: The price to trigger the limit order.
:type stopPrice: float
:param symbol: The stock ticker of the stock to trade.
:type symbol: str
:param quantity: The number of options to buy.
:type quantity: int
:param expirationDate: The expiration date of the option in 'YYYY-MM-DD' format.
:type expirationDate: str
:param strike: The strike price of the option.
:type strike: float
:param optionType: This should be 'call' or 'put'
:type optionType: str
:param account_number: the robinhood account number.
:type account_number: Optional[str]
:param timeInForce: Changes how long the order will be in effect for. 'gtc' = good until cancelled. \
'gfd' = good for the day. 'ioc' = immediate or cancel. 'opg' execute at opening.
:type timeInForce: Optional[str]
:param jsonify: If set to False, function will return the request object which contains status code and headers.
:type jsonify: Optional[str]
:returns: Dictionary that contains information regarding the buying of options, \
such as the order id, the state of order (queued, confired, filled, failed, canceled, etc.), \
the price, and the quantity.
"""
try:
symbol = symbol.upper().strip()
except AttributeError as message:
print(message, file=get_output())
return None
optionID = id_for_option(symbol, expirationDate, strike, optionType)
payload = {
'account': load_account_profile(account_number=account_number, info='url'),
'direction': creditOrDebit,
'time_in_force': timeInForce,
'legs': [
{'position_effect': positionEffect, 'side': 'sell',
'ratio_quantity': 1, 'option': option_instruments_url(optionID)},
],
'type': 'limit',
'trigger': 'stop',
'price': limitPrice,
'stop_price': stopPrice,
'quantity': quantity,
'override_day_trade_checks': False,
'override_dtbp_checks': False,
'ref_id': str(uuid4()),
}
url = option_orders_url()
data = request_post(url, payload, json=True, jsonify_data=jsonify)
return(data)
@login_required
def order_sell_option_limit(positionEffect, creditOrDebit, price, symbol, quantity, expirationDate, strike, optionType='both', account_number=None, timeInForce='gtc', jsonify=True):
"""Submits a limit order for an option. i.e. place a short call or a short put.
:param positionEffect: Either 'open' for a sell to open effect or 'close' for a sell to close effect.
:type positionEffect: str
:param creditOrDebit: Either 'debit' or 'credit'.
:type creditOrDebit: str
:param price: The limit price to trigger a sell of the option.
:type price: float
:param symbol: The stock ticker of the stock to trade.
:type symbol: str
:param quantity: The number of options to sell.
:type quantity: int
:param expirationDate: The expiration date of the option in 'YYYY-MM-DD' format.
:type expirationDate: str
:param strike: The strike price of the option.
:type strike: float
:param optionType: This should be 'call' or 'put'
:type optionType: str
:param account_number: the robinhood account number.
:type account_number: Optional[str]
:param timeInForce: Changes how long the order will be in effect for. 'gtc' = good until cancelled. \
'gfd' = good for the day. 'ioc' = immediate or cancel. 'opg' execute at opening.
:type timeInForce: Optional[str]
:param jsonify: If set to False, function will return the request object which contains status code and headers.
:type jsonify: Optional[str]
:returns: Dictionary that contains information regarding the selling of options, \
such as the order id, the state of order (queued, confired, filled, failed, canceled, etc.), \
the price, and the quantity.
"""
try:
symbol = symbol.upper().strip()
except AttributeError as message:
print(message, file=get_output())
return None
optionID = id_for_option(symbol, expirationDate, strike, optionType)
payload = {
'account': load_account_profile(account_number=account_number, info='url'),
'direction': creditOrDebit,
'time_in_force': timeInForce,
'legs': [
{'position_effect': positionEffect, 'side': 'sell',
'ratio_quantity': 1, 'option': option_instruments_url(optionID)},
],
'type': 'limit',
'trigger': 'immediate',
'price': price,
'quantity': quantity,
'override_day_trade_checks': False,
'override_dtbp_checks': False,
'ref_id': str(uuid4()),
}
url = option_orders_url()
data = request_post(url, payload, json=True, jsonify_data=jsonify)
return(data)
@login_required
def order_buy_crypto_by_price(symbol, amountInDollars, timeInForce='gtc', jsonify=True):
"""Submits a market order for a crypto by specifying the amount in dollars that you want to trade.
Good for share fractions up to 8 decimal places.
:param symbol: The crypto ticker of the crypto to trade.
:type symbol: str
:param amountInDollars: The amount in dollars of the crypto you want to buy.
:type amountInDollars: float
:param timeInForce: Changes how long the order will be in effect for. 'gtc' = good until cancelled.
:type timeInForce: Optional[str]
:param jsonify: If set to False, function will return the request object which contains status code and headers.
:type jsonify: Optional[str]
:returns: Dictionary that contains information regarding the buying of crypto, \
such as the order id, the state of order (queued, confired, filled, failed, canceled, etc.), \
the price, and the quantity.
"""
return order_crypto(symbol, "buy", amountInDollars, "price", None, timeInForce, jsonify)
@login_required
def order_buy_crypto_by_quantity(symbol, quantity, timeInForce='gtc', jsonify=True):
"""Submits a market order for a crypto by specifying the decimal amount of shares to buy.
Good for share fractions up to 8 decimal places.
:param symbol: The crypto ticker of the crypto to trade.
:type symbol: str
:param quantity: The decimal amount of shares to buy.
:type quantity: float
:param timeInForce: Changes how long the order will be in effect for. 'gtc' = good until cancelled.
:type timeInForce: Optional[str]
:param jsonify: If set to False, function will return the request object which contains status code and headers.
:type jsonify: Optional[str]
:returns: Dictionary that contains information regarding the buying of crypto, \
such as the order id, the state of order (queued, confired, filled, failed, canceled, etc.), \
the price, and the quantity.
"""
return order_crypto(symbol, "buy", quantity, "quantity", None, timeInForce, jsonify)
@login_required
def order_buy_crypto_limit(symbol, quantity, limitPrice, timeInForce='gtc', jsonify=True):
"""Submits a limit order for a crypto by specifying the decimal amount of shares to buy.
Good for share fractions up to 8 decimal places.
:param symbol: The crypto ticker of the crypto to trade.
:type symbol: str
:param quantity: The decimal amount of shares to buy.
:type quantity: float
:param limitPrice: The limit price to set for the crypto.
:type limitPrice: float
:param timeInForce: Changes how long the order will be in effect for. 'gtc' = good until cancelled.
:type timeInForce: Optional[str]
:param jsonify: If set to False, function will return the request object which contains status code and headers.
:type jsonify: Optional[str]
:returns: Dictionary that contains information regarding the buying of crypto, \
such as the order id, the state of order (queued, confired, filled, failed, canceled, etc.), \
the price, and the quantity.
"""
return order_crypto(symbol, "buy", quantity, "quantity", limitPrice, timeInForce, jsonify)
@login_required
def order_buy_crypto_limit_by_price(symbol, amountInDollars, limitPrice, timeInForce='gtc', jsonify=True):
"""Submits a limit order for a crypto by specifying the decimal price to buy.
Good for share fractions up to 8 decimal places.
:param symbol: The crypto ticker of the crypto to trade.
:type symbol: str
:param amountInDollars: The amount in dollars of the crypto you want to buy.
:type amountInDollars: float
:param limitPrice: The limit price to set for the crypto.
:type limitPrice: float
:param timeInForce: Changes how long the order will be in effect for. 'gtc' = good until cancelled.
:type timeInForce: Optional[str]
:param jsonify: If set to False, function will return the request object which contains status code and headers.
:type jsonify: Optional[str]
:returns: Dictionary that contains information regarding the buying of crypto, \
such as the order id, the state of order (queued, confired, filled, failed, canceled, etc.), \
the price, and the quantity.
"""
return order_crypto(symbol, "buy", amountInDollars, "price", limitPrice, timeInForce, jsonify)
@login_required
def order_sell_crypto_by_price(symbol, amountInDollars, timeInForce='gtc', jsonify=True):
"""Submits a market order for a crypto by specifying the amount in dollars that you want to trade.
Good for share fractions up to 8 decimal places.
:param symbol: The crypto ticker of the crypto to trade.
:type symbol: str
:param amountInDollars: The amount in dollars of the crypto you want to sell.
:type amountInDollars: float
:param timeInForce: Changes how long the order will be in effect for. 'gtc' = good until cancelled.
:type timeInForce: Optional[str]
:param jsonify: If set to False, function will return the request object which contains status code and headers.
:type jsonify: Optional[str]
:returns: Dictionary that contains information regarding the selling of crypto, \
such as the order id, the state of order (queued, confired, filled, failed, canceled, etc.), \
the price, and the quantity.
"""
return order_crypto(symbol, "sell", amountInDollars, "price", None, timeInForce, jsonify)
@login_required
def order_sell_crypto_by_quantity(symbol, quantity, timeInForce='gtc', jsonify=True):
"""Submits a market order for a crypto by specifying the decimal amount of shares to buy.
Good for share fractions up to 8 decimal places.
:param symbol: The crypto ticker of the crypto to trade.
:type symbol: str
:param quantity: The decimal amount of shares to sell.
:type quantity: float
:param timeInForce: Changes how long the order will be in effect for. 'gtc' = good until cancelled.
:type timeInForce: Optional[str]
:param jsonify: If set to False, function will return the request object which contains status code and headers.
:type jsonify: Optional[str]
:returns: Dictionary that contains information regarding the selling of crypto, \
such as the order id, the state of order (queued, confired, filled, failed, canceled, etc.), \
the price, and the quantity.
"""
return order_crypto(symbol, "sell", quantity, "quantity", None, timeInForce, jsonify)
@login_required
def order_sell_crypto_limit(symbol, quantity, limitPrice, timeInForce='gtc', jsonify=True):
"""Submits a limit order for a crypto by specifying the decimal amount of shares to sell.
Good for share fractions up to 8 decimal places.
:param symbol: The crypto ticker of the crypto to trade.
:type symbol: str
:param quantity: The decimal amount of shares to sell.
:type quantity: float
:param limitPrice: The limit price to set for the crypto.
:type limitPrice: float
:param timeInForce: Changes how long the order will be in effect for. 'gtc' = good until cancelled.
:type timeInForce: Optional[str]
:param jsonify: If set to False, function will return the request object which contains status code and headers.
:type jsonify: Optional[str]
:returns: Dictionary that contains information regarding the selling of crypto, \
such as the order id, the state of order (queued, confired, filled, failed, canceled, etc.), \
the price, and the quantity.
"""
return order_crypto(symbol, "sell", quantity, "quantity", limitPrice, timeInForce, jsonify)
@login_required
def order_sell_crypto_limit_by_price(symbol, amountInDollars, limitPrice, timeInForce='gtc', jsonify=True):
"""Submits a limit order for a crypto by specifying the decimal price to sell.
Good for share fractions up to 8 decimal places.
:param symbol: The crypto ticker of the crypto to trade.
:type symbol: str
:param amountInDollars: The amount in dollars of the crypto you want to sell.
:type amountInDollars: float
:param limitPrice: The limit price to set for the crypto.
:type limitPrice: float
:param timeInForce: Changes how long the order will be in effect for. 'gtc' = good until cancelled.
:type timeInForce: Optional[str]
:param jsonify: If set to False, function will return the request object which contains status code and headers.
:type jsonify: Optional[str]
:returns: Dictionary that contains information regarding the buying of crypto, \
such as the order id, the state of order (queued, confired, filled, failed, canceled, etc.), \
the price, and the quantity.
"""
return order_crypto(symbol, "sell", amountInDollars, "price", limitPrice, timeInForce, jsonify)
@login_required
def order_crypto(symbol, side, quantityOrPrice, amountIn="quantity", limitPrice=None, timeInForce="gtc", jsonify=True):
"""Submits an order for a crypto.
:param symbol: The crypto ticker of the crypto to trade.
:type symbol: str
:param side: Either 'buy' or 'sell'
:type side: str
:param quantityOrPrice: Either the decimal price of shares to trade or the decimal quantity of shares.
:type quantityOrPrice: float
:param amountIn: If left default value of 'quantity', order will attempt to trade cryptos by the amount of crypto \
you want to trade. If changed to 'price', order will attempt to trade cryptos by the price you want to buy or sell.
:type amountIn: Optional[str]
:param limitPrice: The price to trigger the market order.
:type limitPrice: Optional[float]
:param timeInForce: Changes how long the order will be in effect for. 'gtc' = good until cancelled.
:type timeInForce: Optional[str]
:param jsonify: If set to False, function will return the request object which contains status code and headers.
:type jsonify: Optional[str]
:returns: Dictionary that contains information regarding the selling of crypto, \
such as the order id, the state of order (queued, confired, filled, failed, canceled, etc.), \
the price, and the quantity.
"""
try:
symbol = symbol.upper().strip()
except AttributeError as message:
print(message, file=get_output())
return None
crypto_id = get_crypto_id(symbol)
orderType = "market"
if side == "buy":
priceType = "ask_price"
else:
priceType = "bid_price"
if limitPrice:
price = limitPrice
orderType = "limit"
else:
price = round_price(get_crypto_quote_from_id(crypto_id, info=priceType))
if amountIn == "quantity":
quantity = quantityOrPrice
else:
quantity = round_price(quantityOrPrice/price)
payload = {
'account_id': load_crypto_profile(info="id"),
'currency_pair_id': crypto_id,
'price': price,
'quantity': quantity,
'ref_id': str(uuid4()),
'side': side,
'time_in_force': timeInForce,
'type': orderType
}
url = order_crypto_url()
# This is safe because 'ref_id' guards us from duplicate orders
attempts = 3
while attempts > 0:
data = request_post(url, payload, json=True, jsonify_data=jsonify)
if data is not None:
break
attempts -= 1
return(data) | /robin_stocks-3.0.6-py3-none-any.whl/robin_stocks/robinhood/orders.py | 0.766905 | 0.30582 | orders.py | pypi |
from robin_stocks.robinhood.helper import *
from robin_stocks.robinhood.urls import *
@login_required
def load_account_profile(account_number=None, info=None):
"""Gets the information associated with the accounts profile,including day
trading information and cash being held by Robinhood.
:param acccount_number: the robinhood account number.
:type acccount_number: Optional[str]
:param info: The name of the key whose value is to be returned from the function.
:type info: Optional[str]
:returns: The function returns a dictionary of key/value pairs. \
If a string is passed in to the info parameter, then the function will return \
a string corresponding to the value of the key whose name matches the info parameter.
:Dictionary Keys: * url
* portfolio_cash
* can_downgrade_to_cash
* user
* account_number
* type
* created_at
* updated_at
* deactivated
* deposit_halted
* only_position_closing_trades
* buying_power
* cash_available_for_withdrawal
* cash
* cash_held_for_orders
* uncleared_deposits
* sma
* sma_held_for_orders
* unsettled_funds
* unsettled_debit
* crypto_buying_power
* max_ach_early_access_amount
* cash_balances
* margin_balances
* sweep_enabled
* instant_eligibility
* option_level
* is_pinnacle_account
* rhs_account_number
* state
* active_subscription_id
* locked
* permanently_deactivated
* received_ach_debit_locked
* drip_enabled
* eligible_for_fractionals
* eligible_for_drip
* eligible_for_cash_management
* cash_management_enabled
* option_trading_on_expiration_enabled
* cash_held_for_options_collateral
* fractional_position_closing_only
* user_id
* rhs_stock_loan_consent_status
"""
url = account_profile_url(account_number)
if account_number is not None:
data = request_get(url)
else:
data = request_get(url, 'indexzero')
return(filter_data(data, info))
@login_required
def load_basic_profile(info=None):
"""Gets the information associated with the personal profile,
such as phone number, city, marital status, and date of birth.
:param info: The name of the key whose value is to be returned from the function.
:type info: Optional[str]
:returns: The function returns a dictionary of key/value pairs. If a string \
is passed in to the info parameter, then the function will return a string \
corresponding to the value of the key whose name matches the info parameter.
:Dictionary Keys: * user
* address
* city
* state
* zipcode
* phone_number
* marital_status
* date_of_birth
* citizenship
* country_of_residence
* number_dependents
* signup_as_rhs
* tax_id_ssn
* updated_at
"""
url = basic_profile_url()
data = request_get(url)
return(filter_data(data, info))
@login_required
def load_investment_profile(info=None):
"""Gets the information associated with the investment profile.
These are the answers to the questionaire you filled out when you made your profile.
:param info: The name of the key whose value is to be returned from the function.
:type info: Optional[str]
:returns: The function returns a dictionary of key/value pairs. \
If a string is passed in to the info parameter, then the function will return \
a string corresponding to the value of the key whose name matches the info parameter.
:Dictionary Keys: * user
* total_net_worth
* annual_income
* source_of_funds
* investment_objective
* investment_experience
* liquid_net_worth
* risk_tolerance
* tax_bracket
* time_horizon
* liquidity_needs
* investment_experience_collected
* suitability_verified
* option_trading_experience
* professional_trader
* understand_option_spreads
* interested_in_options
* updated_at
"""
url = investment_profile_url()
data = request_get(url)
return(filter_data(data, info))
@login_required
def load_portfolio_profile(info=None):
"""Gets the information associated with the portfolios profile,
such as withdrawable amount, market value of account, and excess margin.
:param info: The name of the key whose value is to be returned from the function.
:type info: Optional[str]
:returns: The function returns a dictionary of key/value pairs. \
If a string is passed in to the info parameter, then the function will return \
a string corresponding to the value of the key whose name matches the info parameter.
:Dictionary Keys: * url
* account
* start_date
* market_value
* equity
* extended_hours_market_value
* extended_hours_equity
* extended_hours_portfolio_equity
* last_core_market_value
* last_core_equity
* last_core_portfolio_equity
* excess_margin
* excess_maintenance
* excess_margin_with_uncleared_deposits
* excess_maintenance_with_uncleared_deposits
* equity_previous_close
* portfolio_equity_previous_close
* adjusted_equity_previous_close
* adjusted_portfolio_equity_previous_close
* withdrawable_amount
* unwithdrawable_deposits
* unwithdrawable_grants
"""
url = portfolio_profile_url()
data = request_get(url, 'indexzero')
return(filter_data(data, info))
@login_required
def load_security_profile(info=None):
"""Gets the information associated with the security profile.
:param info: The name of the key whose value is to be returned from the function.
:type info: Optional[str]
:returns: The function returns a dictionary of key/value pairs. \
If a string is passed in to the info parameter, then the function will return \
a string corresponding to the value of the key whose name matches the info parameter.
:Dictionary Keys: * user
* object_to_disclosure
* sweep_consent
* control_person
* control_person_security_symbol
* security_affiliated_employee
* security_affiliated_firm_relationship
* security_affiliated_firm_name
* security_affiliated_person_name
* security_affiliated_address
* security_affiliated_address_subject
* security_affiliated_requires_duplicates
* stock_loan_consent_status
* agreed_to_rhs
* agreed_to_rhs_margin
* rhs_stock_loan_consent_status
* updated_at
"""
url = security_profile_url()
data = request_get(url)
return(filter_data(data, info))
@login_required
def load_user_profile(info=None):
"""Gets the information associated with the user profile,
such as username, email, and links to the urls for other profiles.
:param info: The name of the key whose value is to be returned from the function.
:type info: Optional[str]
:returns: The function returns a dictionary of key/value pairs. \
If a string is passed in to the info parameter, then the function will return \
a string corresponding to the value of the key whose name matches the info parameter.
:Dictionary Keys: * url
* id
* id_info
* username
* email
* email_verified
* first_name
* last_name
* origin
* profile_name
* created_at
"""
url = user_profile_url()
data = request_get(url)
return(filter_data(data, info)) | /robin_stocks-3.0.6-py3-none-any.whl/robin_stocks/robinhood/profiles.py | 0.851212 | 0.245209 | profiles.py | pypi |
import sys
from robin_stocks.robinhood.helper import *
from robin_stocks.robinhood.urls import *
def spinning_cursor():
""" This is a generator function to yield a character. """
while True:
for cursor in '|/-\\':
yield cursor
spinner = spinning_cursor()
def write_spinner():
""" Function to create a spinning cursor to tell user that the code is working on getting market data. """
if get_output()==sys.stdout:
marketString = 'Loading Market Data '
sys.stdout.write(marketString)
sys.stdout.write(next(spinner))
sys.stdout.flush()
sys.stdout.write('\b'*(len(marketString)+1))
@login_required
def get_aggregate_positions(info=None):
"""Collapses all option orders for a stock into a single dictionary.
:param info: Will filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a list of dictionaries of key/value pairs for each order. If info parameter is provided, \
a list of strings is returned where the strings are the value of the key that matches info.
"""
url = aggregate_url()
data = request_get(url, 'pagination')
return(filter_data(data, info))
@login_required
def get_aggregate_open_positions(info=None):
"""Collapses all open option positions for a stock into a single dictionary.
:param info: Will filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a list of dictionaries of key/value pairs for each order. If info parameter is provided, \
a list of strings is returned where the strings are the value of the key that matches info.
"""
url = aggregate_url()
payload = {'nonzero': 'True'}
data = request_get(url, 'pagination', payload)
return(filter_data(data, info))
@login_required
def get_market_options(info=None):
"""Returns a list of all options.
:param info: Will filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a list of dictionaries of key/value pairs for each option. If info parameter is provided, \
a list of strings is returned where the strings are the value of the key that matches info.
"""
url = option_orders_url()
data = request_get(url, 'pagination')
return(filter_data(data, info))
@login_required
def get_all_option_positions(info=None):
"""Returns all option positions ever held for the account.
:param info: Will filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a list of dictionaries of key/value pairs for each option. If info parameter is provided, \
a list of strings is returned where the strings are the value of the key that matches info.
"""
url = option_positions_url()
data = request_get(url, 'pagination')
return(filter_data(data, info))
@login_required
def get_open_option_positions(account_number=None, info=None):
"""Returns all open option positions for the account.
:param acccount_number: the robinhood account number.
:type acccount_number: Optional[str]
:param info: Will filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a list of dictionaries of key/value pairs for each option. If info parameter is provided, \
a list of strings is returned where the strings are the value of the key that matches info.
"""
url = option_positions_url(account_number=account_number)
payload = {'nonzero': 'True'}
data = request_get(url, 'pagination', payload)
return(filter_data(data, info))
def get_chains(symbol, info=None):
"""Returns the chain information of an option.
:param symbol: The ticker of the stock.
:type symbol: str
:param info: Will filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a dictionary of key/value pairs for the option. If info parameter is provided, \
a list of strings is returned where the strings are the value of the key that matches info.
"""
try:
symbol = symbol.upper().strip()
except AttributeError as message:
print(message, file=get_output())
return None
url = chains_url(symbol)
data = request_get(url)
return(filter_data(data, info))
@login_required
def find_tradable_options(symbol, expirationDate=None, strikePrice=None, optionType=None, info=None):
"""Returns a list of all available options for a stock.
:param symbol: The ticker of the stock.
:type symbol: str
:param expirationDate: Represents the expiration date in the format YYYY-MM-DD.
:type expirationDate: str
:param strikePrice: Represents the strike price of the option.
:type strikePrice: str
:param optionType: Can be either 'call' or 'put' or left blank to get both.
:type optionType: Optional[str]
:param info: Will filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a list of dictionaries of key/value pairs for all calls of the stock. If info parameter is provided, \
a list of strings is returned where the strings are the value of the key that matches info.
"""
try:
symbol = symbol.upper().strip()
except AttributeError as message:
print(message, file=get_output())
return [None]
url = option_instruments_url()
if not id_for_chain(symbol):
print("Symbol {} is not valid for finding options.".format(symbol), file=get_output())
return [None]
payload = {'chain_id': id_for_chain(symbol),
'chain_symbol': symbol,
'state': 'active'}
if expirationDate:
payload['expiration_dates'] = expirationDate
if strikePrice:
payload['strike_price'] = strikePrice
if optionType:
payload['type'] = optionType
data = request_get(url, 'pagination', payload)
return(filter_data(data, info))
@login_required
def find_options_by_expiration(inputSymbols, expirationDate, optionType=None, info=None):
"""Returns a list of all the option orders that match the seach parameters
:param inputSymbols: The ticker of either a single stock or a list of stocks.
:type inputSymbols: str
:param expirationDate: Represents the expiration date in the format YYYY-MM-DD.
:type expirationDate: str
:param optionType: Can be either 'call' or 'put' or leave blank to get both.
:type optionType: Optional[str]
:param info: Will filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a list of dictionaries of key/value pairs for all options of the stock that match the search parameters. \
If info parameter is provided, a list of strings is returned where the strings are the value of the key that matches info.
"""
try:
symbols = inputs_to_set(inputSymbols)
if optionType:
optionType = optionType.lower().strip()
except AttributeError as message:
print(message, file=get_output())
return [None]
data = []
for symbol in symbols:
allOptions = find_tradable_options(symbol, expirationDate, None, optionType, None)
filteredOptions = [item for item in allOptions if item.get("expiration_date") == expirationDate]
for item in filteredOptions:
marketData = get_option_market_data_by_id(item['id'])
if marketData:
item.update(marketData[0])
write_spinner()
data.extend(filteredOptions)
return(filter_data(data, info))
@login_required
def find_options_by_strike(inputSymbols, strikePrice, optionType=None, info=None):
"""Returns a list of all the option orders that match the seach parameters
:param inputSymbols: The ticker of either a single stock or a list of stocks.
:type inputSymbols: str
:param strikePrice: Represents the strike price to filter for.
:type strikePrice: str
:param optionType: Can be either 'call' or 'put' or leave blank to get both.
:type optionType: Optional[str]
:param info: Will filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a list of dictionaries of key/value pairs for all options of the stock that match the search parameters. \
If info parameter is provided, a list of strings is returned where the strings are the value of the key that matches info.
"""
try:
symbols = inputs_to_set(inputSymbols)
if optionType:
optionType = optionType.lower().strip()
except AttributeError as message:
print(message, file=get_output())
return [None]
data = []
for symbol in symbols:
filteredOptions = find_tradable_options(symbol, None, strikePrice, optionType, None)
for item in filteredOptions:
marketData = get_option_market_data_by_id(item['id'])
if marketData:
item.update(marketData[0])
write_spinner()
data.extend(filteredOptions)
return(filter_data(data, info))
@login_required
def find_options_by_expiration_and_strike(inputSymbols, expirationDate, strikePrice, optionType=None, info=None):
"""Returns a list of all the option orders that match the seach parameters
:param inputSymbols: The ticker of either a single stock or a list of stocks.
:type inputSymbols: str
:param expirationDate: Represents the expiration date in the format YYYY-MM-DD.
:type expirationDate: str
:param strikePrice: Represents the strike price to filter for.
:type strikePrice: str
:param optionType: Can be either 'call' or 'put' or leave blank to get both.
:type optionType: Optional[str]
:param info: Will filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a list of dictionaries of key/value pairs for all options of the stock that match the search parameters. \
If info parameter is provided, a list of strings is returned where the strings are the value of the key that matches info.
"""
try:
symbols = inputs_to_set(inputSymbols)
if optionType:
optionType = optionType.lower().strip()
except AttributeError as message:
print(message, file=get_output())
return [None]
data = []
for symbol in symbols:
allOptions = find_tradable_options(symbol, expirationDate, strikePrice, optionType, None)
filteredOptions = [item for item in allOptions if item.get("expiration_date") == expirationDate]
for item in filteredOptions:
marketData = get_option_market_data_by_id(item['id'])
if marketData:
item.update(marketData[0])
write_spinner()
data.extend(filteredOptions)
return filter_data(data, info)
@login_required
def find_options_by_specific_profitability(inputSymbols, expirationDate=None, strikePrice=None, optionType=None, typeProfit="chance_of_profit_short", profitFloor=0.0, profitCeiling=1.0, info=None):
"""Returns a list of option market data for several stock tickers that match a range of profitability.
:param inputSymbols: May be a single stock ticker or a list of stock tickers.
:type inputSymbols: str or list
:param expirationDate: Represents the expiration date in the format YYYY-MM-DD. Leave as None to get all available dates.
:type expirationDate: str
:param strikePrice: Represents the price of the option. Leave as None to get all available strike prices.
:type strikePrice: str
:param optionType: Can be either 'call' or 'put' or leave blank to get both.
:type optionType: Optional[str]
:param typeProfit: Will either be "chance_of_profit_short" or "chance_of_profit_long".
:type typeProfit: str
:param profitFloor: The lower percentage on scale 0 to 1.
:type profitFloor: int
:param profitCeiling: The higher percentage on scale 0 to 1.
:type profitCeiling: int
:param info: Will filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a list of dictionaries of key/value pairs for all stock option market data. \
If info parameter is provided, a list of strings is returned where the strings are the value of the key that matches info.
"""
symbols = inputs_to_set(inputSymbols)
data = []
if (typeProfit != "chance_of_profit_short" and typeProfit != "chance_of_profit_long"):
print("Invalid string for 'typeProfit'. Defaulting to 'chance_of_profit_short'.", file=get_output())
typeProfit = "chance_of_profit_short"
for symbol in symbols:
tempData = find_tradable_options(symbol, expirationDate, strikePrice, optionType, info=None)
for option in tempData:
if expirationDate and option.get("expiration_date") != expirationDate:
continue
market_data = get_option_market_data_by_id(option['id'])
if len(market_data):
option.update(market_data[0])
write_spinner()
try:
floatValue = float(option[typeProfit])
if (floatValue >= profitFloor and floatValue <= profitCeiling):
data.append(option)
except:
pass
return(filter_data(data, info))
@login_required
def get_option_market_data_by_id(id, info=None):
"""Returns the option market data for a stock, including the greeks,
open interest, change of profit, and adjusted mark price.
:param id: The id of the stock.
:type id: str
:param info: Will filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a dictionary of key/value pairs for the stock. \
If info parameter is provided, the value of the key that matches info is extracted.
"""
instrument = get_option_instrument_data_by_id(id)
if instrument is None:
# e.g. 503 Server Error: Service Unavailable for url: https://api.robinhood.com/options/instruments/d1058013-09a2-4063-b6b0-92717e17d0c0/
return None # just return None which the caller can easily check; do NOT use faked empty data, it will only cause future problem
else:
payload = {
"instruments" : instrument['url']
}
url = marketdata_options_url()
data = request_get(url, 'results', payload)
return(filter_data(data, info))
@login_required
def get_option_market_data(inputSymbols, expirationDate, strikePrice, optionType, info=None):
"""Returns the option market data for the stock option, including the greeks,
open interest, change of profit, and adjusted mark price.
:param inputSymbols: The ticker of the stock.
:type inputSymbols: str
:param expirationDate: Represents the expiration date in the format YYYY-MM-DD.
:type expirationDate: str
:param strikePrice: Represents the price of the option.
:type strikePrice: str
:param optionType: Can be either 'call' or 'put'.
:type optionType: str
:param info: Will filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a dictionary of key/value pairs for the stock. \
If info parameter is provided, the value of the key that matches info is extracted.
"""
try:
symbols = inputs_to_set(inputSymbols)
if optionType:
optionType = optionType.lower().strip()
except AttributeError as message:
print(message, file=get_output())
return [None]
data = []
for symbol in symbols:
optionID = id_for_option(symbol, expirationDate, strikePrice, optionType)
marketData = get_option_market_data_by_id(optionID)
data.append(marketData)
return(filter_data(data, info))
def get_option_instrument_data_by_id(id, info=None):
"""Returns the option instrument information.
:param id: The id of the stock.
:type id: str
:param info: Will filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a dictionary of key/value pairs for the stock. \
If info parameter is provided, the value of the key that matches info is extracted.
"""
url = option_instruments_url(id)
data = request_get(url)
return(filter_data(data, info))
def get_option_instrument_data(symbol, expirationDate, strikePrice, optionType, info=None):
"""Returns the option instrument data for the stock option.
:param symbol: The ticker of the stock.
:type symbol: str
:param expirationDate: Represents the expiration date in the format YYYY-MM-DD.
:type expirationDate: str
:param strikePrice: Represents the price of the option.
:type strikePrice: str
:param optionType: Can be either 'call' or 'put'.
:type optionType: str
:param info: Will filter the results to get a specific value.
:type info: Optional[str]
:returns: Returns a dictionary of key/value pairs for the stock. \
If info parameter is provided, the value of the key that matches info is extracted.
"""
try:
symbol = symbol.upper().strip()
optionType = optionType.lower().strip()
except AttributeError as message:
print(message, file=get_output())
return [None]
optionID = id_for_option(symbol, expirationDate, strikePrice, optionType)
url = option_instruments_url(optionID)
data = request_get(url)
return(filter_data(data, info))
def get_option_historicals(symbol, expirationDate, strikePrice, optionType, interval='hour', span='week', bounds='regular', info=None):
"""Returns the data that is used to make the graphs.
:param symbol: The ticker of the stock.
:type symbol: str
:param expirationDate: Represents the expiration date in the format YYYY-MM-DD.
:type expirationDate: str
:param strikePrice: Represents the price of the option.
:type strikePrice: str
:param optionType: Can be either 'call' or 'put'.
:type optionType: str
:param interval: Interval to retrieve data for. Values are '5minute', '10minute', 'hour', 'day', 'week'. Default is 'hour'.
:type interval: Optional[str]
:param span: Sets the range of the data to be either 'day', 'week', 'year', or '5year'. Default is 'week'.
:type span: Optional[str]
:param bounds: Represents if graph will include extended trading hours or just regular trading hours. Values are 'regular', 'trading', and 'extended'. \
regular hours are 6 hours long, trading hours are 9 hours long, and extended hours are 16 hours long. Default is 'regular'
:type bounds: Optional[str]
:param info: Will filter the results to have a list of the values that correspond to key that matches info.
:type info: Optional[str]
:returns: Returns a list that contains a list for each symbol. \
Each list contains a dictionary where each dictionary is for a different time.
"""
try:
symbol = symbol.upper().strip()
optionType = optionType.lower().strip()
except AttributeError as message:
print(message, file=get_output())
return [None]
interval_check = ['5minute', '10minute', 'hour', 'day', 'week']
span_check = ['day', 'week', 'year', '5year']
bounds_check = ['extended', 'regular', 'trading']
if interval not in interval_check:
print(
'ERROR: Interval must be "5minute","10minute","hour","day",or "week"', file=get_output())
return([None])
if span not in span_check:
print('ERROR: Span must be "day", "week", "year", or "5year"', file=get_output())
return([None])
if bounds not in bounds_check:
print('ERROR: Bounds must be "extended","regular",or "trading"', file=get_output())
return([None])
optionID = id_for_option(symbol, expirationDate, strikePrice, optionType)
url = option_historicals_url(optionID)
payload = {'span': span,
'interval': interval,
'bounds': bounds}
data = request_get(url, 'regular', payload)
if (data == None or data == [None]):
return data
histData = []
for subitem in data['data_points']:
subitem['symbol'] = symbol
histData.append(subitem)
return(filter_data(histData, info)) | /robin_stocks-3.0.6-py3-none-any.whl/robin_stocks/robinhood/options.py | 0.660282 | 0.416085 | options.py | pypi |
from robin_stocks.tda.helper import format_inputs, login_required, request_get
from robin_stocks.tda.urls import URLS
@login_required
@format_inputs
def get_hours_for_markets(markets, date, jsonify=None):
""" Gets market hours for various markets.
:param markets: The markets for which you're requesting market hours, comma-separated. \
Valid markets are EQUITY, OPTION, FUTURE, BOND, or FOREX.
:type markets: str
:param date: The date for which market hours information is requested. Valid ISO-8601 formats are : \
yyyy-MM-dd and yyyy-MM-dd'T'HH:mm:ssz.
:type date: str
:param jsonify: If set to false, will return the raw response object. \
If set to True, will return a dictionary parsed using the JSON format.
:type jsonify: Optional[str]
:returns: Returns a tuple where the first entry in the tuple is a requests reponse object \
or a dictionary parsed using the JSON format and the second entry is an error string or \
None if there was not an error.
"""
url = URLS.markets()
payload = {
"markets": markets,
"date": date
}
data, error = request_get(url, payload, jsonify)
return data, error
@login_required
@format_inputs
def get_hours_for_market(market, date, jsonify=None):
""" Gets market hours for a specific market.
:param market: The market for which you're requesting market hours, comma-separated. \
Valid markets are EQUITY, OPTION, FUTURE, BOND, or FOREX.
:type market: str
:param date: The date for which market hours information is requested. Valid ISO-8601 formats are : \
yyyy-MM-dd and yyyy-MM-dd'T'HH:mm:ssz.
:type date: str
:param jsonify: If set to false, will return the raw response object. \
If set to True, will return a dictionary parsed using the JSON format.
:type jsonify: Optional[str]
:returns: Returns a tuple where the first entry in the tuple is a requests reponse object \
or a dictionary parsed using the JSON format and the second entry is an error string or \
None if there was not an error.
"""
url = URLS.market(market)
payload = {
"date": date
}
data, error = request_get(url, payload, jsonify)
return data, error
@login_required
@format_inputs
def get_movers(market, direction, change, jsonify=None):
""" Gets market hours for a specific market.
:param market: The market for which you're requesting market hours, comma-separated. \
Valid markets are $DJI, $COMPX, or $SPX.X.
:type market: str
:param direction: To return movers with the specified directions of "up" or "down".
:type direction: str
:param change: To return movers with the specified change types of "percent" or "value".
:type change: str
:param jsonify: If set to false, will return the raw response object. \
If set to True, will return a dictionary parsed using the JSON format.
:type jsonify: Optional[str]
:returns: Returns a tuple where the first entry in the tuple is a requests reponse object \
or a dictionary parsed using the JSON format and the second entry is an error string or \
None if there was not an error.
"""
url = URLS.movers(market)
payload = {
"direction": direction,
"change": change
}
data, error = request_get(url, payload, jsonify)
return data, error | /robin_stocks-3.0.6-py3-none-any.whl/robin_stocks/tda/markets.py | 0.883638 | 0.457743 | markets.py | pypi |
from robin_stocks.tda.helper import format_inputs, login_required, request_get
from robin_stocks.tda.urls import URLS
@login_required
@format_inputs
def get_accounts(options=None, jsonify=None):
""" Gets all accounts associated with your API keys.
:param options: Balances displayed by default, additional fields can be added here by adding positions or orders\
As a comma separated list. Example:"positions,orders"
:type options: str
:param jsonify: If set to false, will return the raw response object. \
If set to True, will return a dictionary parsed using the JSON format.
:type jsonify: Optional[str]
:returns: Returns a tuple where the first entry in the tuple is a requests reponse object \
or a dictionary parsed using the JSON format and the second entry is an error string or \
None if there was not an error.
"""
url = URLS.accounts()
if options:
payload = {
"fields": options
}
else:
payload = None
data, error = request_get(url, payload, jsonify)
return data, error
@login_required
@format_inputs
def get_account(id, options=None, jsonify=None):
""" Get account information for a specific account.
:param id: The account id.
:type id: str
:param options: Balances displayed by default, additional fields can be added here by adding positions or orders\
As a comma separated list. Example:"positions,orders"
:type options: str
:param jsonify: If set to false, will return the raw response object. \
If set to True, will return a dictionary parsed using the JSON format.
:type jsonify: Optional[str]
:returns: Returns a tuple where the first entry in the tuple is a requests reponse object \
or a dictionary parsed using the JSON format and the second entry is an error string or \
None if there was not an error.
"""
url = URLS.account(id)
if options:
payload = {
"fields": options
}
else:
payload = None
data, error = request_get(url, payload, jsonify)
return data, error
@login_required
@format_inputs
def get_transactions(id, type_value=None, symbol=None, start_date=None, end_date=None, jsonify=None):
""" Get account information for a specific account.
:param id: The account id.
:type id: str
:param type_value: Only transactions with the specified type will be returned. ALL, TRADE, \
BUY_ONLY, SELL_ONLY, CASH_IN_OR_CASH_OUT, CHECKING, DIVIDEND, INTEREST, OTHER, ADVISOR_FEES
:type type_value: Optional[str]
param symbol: Only transactions with the specified symbol will be returned.
:type symbol: Optional[str]
param start_date: Only transactions after the Start Date will be returned. \
Note: The maximum date range is one year. Valid ISO-8601 formats are :yyyy-MM-dd.
:type start_date: Optional[str]
param end_date: Only transactions before the End Date will be returned. \
Note: The maximum date range is one year. Valid ISO-8601 formats are :yyyy-MM-dd.
:type end_date: Optional[str]
:param jsonify: If set to false, will return the raw response object. \
If set to True, will return a dictionary parsed using the JSON format.
:type jsonify: Optional[str]
:returns: Returns a tuple where the first entry in the tuple is a requests reponse object \
or a dictionary parsed using the JSON format and the second entry is an error string or \
None if there was not an error.
"""
url = URLS.transactions(id)
payload = {}
if type_value:
payload["type"] = type_value
if symbol:
payload["symbol"] = symbol
if start_date:
payload["startDate"] = start_date
if end_date:
payload["endDate"] = end_date
data, error = request_get(url, payload, jsonify)
return data, error
@login_required
@format_inputs
def get_transaction(account_id, transaction_id, jsonify=None):
""" Get account information for a specific account.
:param account_id: The account id.
:type account_id: str
:param transaction_id: The transaction id.
:type transaction_id: str
:param jsonify: If set to false, will return the raw response object. \
If set to True, will return a dictionary parsed using the JSON format.
:type jsonify: Optional[str]
:returns: Returns a tuple where the first entry in the tuple is a requests reponse object \
or a dictionary parsed using the JSON format and the second entry is an error string or \
None if there was not an error.
"""
url = URLS.transaction(account_id, transaction_id)
data, error = request_get(url, None, jsonify)
return data, error | /robin_stocks-3.0.6-py3-none-any.whl/robin_stocks/tda/accounts.py | 0.902856 | 0.325494 | accounts.py | pypi |
from robin_stocks.tda.helper import format_inputs, login_required, request_get
from robin_stocks.tda.urls import URLS
@login_required
@format_inputs
def get_quote(ticker, jsonify=None):
""" Gets quote information for a single stock.
:param ticker: The ticker of the stock.
:type ticker: str
:param jsonify: If set to false, will return the raw response object. \
If set to True, will return a dictionary parsed using the JSON format.
:type jsonify: Optional[str]
:returns: Returns a tuple where the first entry in the tuple is a requests reponse object \
or a dictionary parsed using the JSON format and the second entry is an error string or \
None if there was not an error.
"""
url = URLS.quote(ticker)
data, error = request_get(url, None, jsonify)
return data, error
@login_required
@format_inputs
def get_quotes(tickers, jsonify=None):
""" Gets quote information for multiple stocks. The stock string should be comma separated with no spaces.
:param ticker: The string list of stock tickers.
:type ticker: str
:param jsonify: If set to false, will return the raw response object. \
If set to True, will return a dictionary parsed using the JSON format.
:type jsonify: Optional[str]
:returns: Returns a tuple where the first entry in the tuple is a requests reponse object \
or a dictionary parsed using the JSON format and the second entry is an error string or \
None if there was not an error.
"""
url = URLS.quotes()
payload = {
"symbol": tickers
}
data, error = request_get(url, payload, jsonify)
return data, error
@login_required
@format_inputs
def get_price_history(ticker, period_type, frequency_type, frequency,
period=None, start_date=None, end_date=None, needExtendedHoursData=True, jsonify=None):
""" Gets the price history of a stock.
:param ticker: The stock ticker.
:type ticker: str
:param period_type: The type of period to show. Valid values are day, month, year, or ytd (year to date). Default is day.
:type period_type: str
:param frequency_type: The type of frequency with which a new candle is formed. \
Valid frequencyTypes by period_type (defaults marked with an asterisk):\n
* day: minute*
* month: daily, weekly*
* year: daily, weekly, monthly*
* ytd: daily, weekly*
:type frequency_type: str
:param frequency: The number of the frequencyType to be included in each candle. \
Valid frequencies by frequencyType (defaults marked with an asterisk):\n
* minute: 1*, 5, 10, 15, 30
* daily: 1*
* weekly: 1*
* monthly: 1*
:type frequency: str
:param period: The number of periods to show. Valid periods by periodType (defaults marked with an asterisk):\n
* day: 1, 2, 3, 4, 5, 10*
* month: 1*, 2, 3, 6
* year: 1*, 2, 3, 5, 10, 15, 20
* ytd: 1*
:type period: Optional[str]
:param start_date: Start date as milliseconds since epoch. If startDate and endDate are provided, period should not be provided.
:type start_date: Optional[str]
:param end_date: End date as milliseconds since epoch. If startDate and endDate are provided, period should not be provided. Default is previous trading day.
:type end_date: Optional[str]
:param needExtendedHoursData: true to return extended hours data, false for regular market hours only. Default is true.
:type needExtendedHoursData: Optional[str]
:param jsonify: If set to false, will return the raw response object. \
If set to True, will return a dictionary parsed using the JSON format.
:type jsonify: Optional[str]
:returns: Returns a tuple where the first entry in the tuple is a requests reponse object \
or a dictionary parsed using the JSON format and the second entry is an error string or \
None if there was not an error.
"""
if (start_date or end_date) and period:
raise ValueError(
"If start_date and end_date are provided, period should not be provided.")
url = URLS.price_history(ticker)
payload = {
"periodType": period_type,
"frequencyType": frequency_type,
"frequency": frequency,
"needExtendedHoursData": needExtendedHoursData
}
if period:
payload["period"] = period
if start_date:
payload["startDate"] = start_date
if end_date:
payload["endDate"] = end_date
data, error = request_get(url, payload, jsonify)
return data, error
@login_required
@format_inputs
def search_instruments(ticker_string, projection, jsonify=None):
""" Gets a list of all the instruments data for tickers that match a search string.
:param ticker_string: Value to pass to the search. See projection description for more information.
:type ticker_string: str
:param projection: The type of request:\n
* symbol-search: Retrieve instrument data of a specific symbol or cusip
* symbol-regex: Retrieve instrument data for all symbols matching regex. Example: symbol=XYZ.* will return all symbols beginning with XYZ
* desc-search: Retrieve instrument data for instruments whose description contains the word supplied. Example: symbol=FakeCompany will return all instruments with FakeCompany in the description.
* desc-regex: Search description with full regex support. Example: symbol=XYZ.[A-C] returns all instruments whose descriptions contain a word beginning with XYZ followed by a character A through C.
* fundamental: Returns fundamental data for a single instrument specified by exact symbol.
:type projection: str
:param jsonify: If set to false, will return the raw response object. \
If set to True, will return a dictionary parsed using the JSON format.
:type jsonify: Optional[str]
:returns: Returns a tuple where the first entry in the tuple is a requests reponse object \
or a dictionary parsed using the JSON format and the second entry is an error string or \
None if there was not an error.
"""
url = URLS.instruments()
payload = {
"symbol": ticker_string,
"projection": projection
}
data, error = request_get(url, payload, jsonify)
return data, error
@login_required
@format_inputs
def get_instrument(cusip, jsonify=None):
""" Gets instrument data for a specific stock.
:param cusip: The CUSIP for a stock.
:type cusip: str
:param jsonify: If set to false, will return the raw response object. \
If set to True, will return a dictionary parsed using the JSON format.
:type jsonify: Optional[str]
:returns: Returns a tuple where the first entry in the tuple is a requests reponse object \
or a dictionary parsed using the JSON format and the second entry is an error string or \
None if there was not an error.
"""
url = URLS.instrument(cusip)
data, error = request_get(url, None, jsonify)
return data, error
@login_required
@format_inputs
def get_option_chains(ticker, contract_type="ALL", strike_count="10", include_quotes="FALSE", strategy="SINGLE", interval=None, strike_price=None,
range_value="ALL", from_date=None, to_date=None, volatility=None, underlying_price=None, interest_rate=None,
days_to_expiration=None, exp_month="ALL", option_type="ALL", jsonify=None):
""" Gets instrument data for a specific stock.
:param ticker: The stock ticker.
:type ticker: str
:param contract_type: Type of contracts to return in the chain. Can be CALL, PUT, or ALL. Default is ALL.
:type contract_type: Optional[str]
:param strike_count: The number of strikes to return above and below the at-the-money price.
:type strike_count: Optional[str]
:param include_quotes: Include quotes for options in the option chain. Can be TRUE or FALSE. Default is FALSE.
:type include_quotes: Optional[str]
:param strategy: Passing a value returns a Strategy Chain. Possible values are SINGLE, ANALYTICAL (allows use of the volatility, \
underlyingPrice, interestRate, and daysToExpiration params to calculate theoretical values), COVERED, VERTICAL, CALENDAR, \
STRANGLE, STRADDLE, BUTTERFLY, CONDOR, DIAGONAL, COLLAR, or ROLL. Default is SINGLE.
:type strategy: Optional[str]
:param interval: Strike interval for spread strategy chains (see strategy param).
:type interval: Optional[str]
:param strike_price: Provide a strike price to return options only at that strike price.
:type strike_price: Optional[str]
:param range_value: Returns options for the given range. Default is ALL. Possible values are:\n
* ITM: In-the-money
* NTM: Near-the-money
* OTM: Out-of-the-money
* SAK: Strikes Above Market
* SBK: Strikes Below Market
* SNK: Strikes Near Market
* ALL: All Strikes
:type range_value: Optional[str]
:param from_date: Only return expirations after this date. For strategies, expiration refers to the \
nearest term expiration in the strategy. Valid ISO-8601 formats are: yyyy-MM-dd and yyyy-MM-dd'T'HH:mm:ssz.
:type from_date: Optional[str]
:param to_date: Only return expirations before this date. For strategies, expiration refers to the \
nearest term expiration in the strategy. Valid ISO-8601 formats are: yyyy-MM-dd and yyyy-MM-dd'T'HH:mm:ssz.
:type to_date: Optional[str]
:param volatility: Volatility to use in calculations. Applies only to ANALYTICAL strategy chains (see strategy param).
:type volatility: Optional[str]
:param underlying_price: Underlying price to use in calculations. Applies only to ANALYTICAL strategy chains (see strategy param).
:type underlying_price: Optional[str]
:param interest_rate: Interest rate to use in calculations. Applies only to ANALYTICAL strategy chains (see strategy param).
:type interest_rate: Optional[str]
:param days_to_expiration: Days to expiration to use in calculations. Applies only to ANALYTICAL strategy chains (see strategy param).
:type days_to_expiration: Optional[str]
:param exp_month: Return only options expiring in the specified month. Month is given in the three character format. Example: JAN. Default is ALL.
:type exp_month: Optional[str]
:param option_type: Type of contracts to return. Default is ALL. Possible values are:\n
* S: Standard contracts
* NS: Non-standard contracts
* ALL: All contracts
:type option_type: Optional[str]
:param jsonify: If set to false, will return the raw response object. \
If set to True, will return a dictionary parsed using the JSON format.
:type jsonify: Optional[str]
:returns: Returns a tuple where the first entry in the tuple is a requests reponse object \
or a dictionary parsed using the JSON format and the second entry is an error string or \
None if there was not an error.
"""
url = URLS.option_chains()
payload = {
"symbol": ticker,
"contractType": contract_type,
"strikeCount": strike_count,
"includeQuotes": include_quotes,
"strategy": strategy,
"range": range_value,
"expMonth": exp_month,
"optionType": option_type
}
if interval:
payload["interval"] = interval
if strike_price:
payload["strike"] = strike_price
if from_date:
payload["fromDate"] = from_date
if to_date:
payload["toDate"] = to_date
if volatility:
payload["volatility"] = volatility
if underlying_price:
payload["underlyingPrice"] = underlying_price
if interest_rate:
payload["interestRate"] = interest_rate
if days_to_expiration:
payload["daysToExpiration"] = days_to_expiration
data, error = request_get(url, payload, jsonify)
return data, error | /robin_stocks-3.0.6-py3-none-any.whl/robin_stocks/tda/stocks.py | 0.888105 | 0.628379 | stocks.py | pypi |
from robin_stocks.tda.helper import (format_inputs, login_required,
request_delete, request_get,
request_headers)
from robin_stocks.tda.urls import URLS
@login_required
@format_inputs
def place_order(account_id, order_payload, jsonify=None):
""" Place an order for a given account.
:param account_id: The account id.
:type account_id: str
:param order_payload: A dictionary of key value pairs for the infromation you want to send to order.
:type order_payload: str
:param jsonify: If set to false, will return the raw response object. \
If set to True, will return a dictionary parsed using the JSON format.
:type jsonify: Optional[str]
:returns: Returns a tuple where the first entry in the tuple is a requests reponse object \
or a dictionary parsed using the JSON format and the second entry is an error string or \
None if there was not an error.
"""
url = URLS.orders(account_id)
data, error = request_headers(url, order_payload, jsonify)
return data, error
@login_required
@format_inputs
def cancel_order(account_id, order_id, jsonify=None):
""" Cancel an order for a given account.
:param account_id: The account id.
:type account_id: str
:param order_id: The order id.
:type order_id: str
:param jsonify: If set to false, will return the raw response object. \
If set to True, will return a dictionary parsed using the JSON format.
:type jsonify: Optional[str]
:returns: Returns a tuple where the first entry in the tuple is a requests reponse object \
or a dictionary parsed using the JSON format and the second entry is an error string or \
None if there was not an error.
"""
url = URLS.order(account_id, order_id)
data, error = request_delete(url, jsonify)
return data, error
@login_required
@format_inputs
def get_order(account_id, order_id, jsonify=None):
""" Gets information for an order for a given account.
:param account_id: The account id.
:type account_id: str
:param order_id: The order id.
:type order_id: str
:param jsonify: If set to false, will return the raw response object. \
If set to True, will return a dictionary parsed using the JSON format.
:type jsonify: Optional[str]
:returns: Returns a tuple where the first entry in the tuple is a requests reponse object \
or a dictionary parsed using the JSON format and the second entry is an error string or \
None if there was not an error.
"""
url = URLS.order(account_id, order_id)
data, error = request_get(url, None, jsonify)
return data, error
@login_required
@format_inputs
def get_orders_for_account(account_id, max_results=None, from_time=None, to_time=None, status=None, jsonify=None):
""" Gets all the orders for a given account.
:param account_id: The account id.
:type account_id: Optional[str]
:param max_results: The max number of orders to retrieve.
:type max_results: Optional[str]
:param from_time: Specifies that no orders entered before this time should be returned. Valid ISO-8601 formats are : \
yyyy-MM-dd. Date must be within 60 days from today's date. 'toEnteredTime' must also be set.
:type from_time: Optional[str]
:param to_time: Specifies that no orders entered after this time should be returned.Valid ISO-8601 formats are : \
yyyy-MM-dd. 'fromEnteredTime' must also be set.
:type to_time: Optional[str]
:param status: Specifies that only orders of this status should be returned. Possible values are \
AWAITING_PARENT_ORDER, AWAITING_CONDITION, AWAITING_MANUAL_REVIEW, ACCEPTED, AWAITING_UR_OUT, PENDING_ACTIVATION, QUEUED \
WORKING, REJECTED, PENDING_CANCEL, CANCELED, PENDING_REPLACE, REPLACED, FILLED, EXPIRED
:type status: Optional[str]
:param jsonify: If set to false, will return the raw response object. \
If set to True, will return a dictionary parsed using the JSON format.
:type jsonify: Optional[str]
:returns: Returns a tuple where the first entry in the tuple is a requests reponse object \
or a dictionary parsed using the JSON format and the second entry is an error string or \
None if there was not an error.
"""
url = URLS.orders(account_id)
payload = {}
if max_results:
payload["maxResults"] = max_results
if from_time:
payload["fromEnteredTime"] = from_time
if to_time:
payload["toEnteredTime"] = to_time
if status:
payload["status"] = status
data, error = request_get(url, payload, jsonify)
return data, error | /robin_stocks-3.0.6-py3-none-any.whl/robin_stocks/tda/orders.py | 0.841988 | 0.377684 | orders.py | pypi |
from robin_stocks.gemini.authentication import generate_signature
from robin_stocks.gemini.helper import (format_inputs, login_required,
request_get, request_post)
from robin_stocks.gemini.urls import URLS
@format_inputs
def get_pubticker(ticker, jsonify=None):
""" Gets the pubticker information for a crypto.
:param ticker: The ticker of the crypto.
:type ticker: str
:param jsonify: If set to false, will return the raw response object. \
If set to True, will return a dictionary parsed using the JSON format.
:type jsonify: Optional[str]
:returns: Returns a tuple where the first entry in the tuple is a requests reponse object \
or a dictionary parsed using the JSON format and the second entry is an error string or \
None if there was not an error. \
The keys for the dictionary are listed below.
:Dictionary Keys: * bid - The highest bid currently available
* ask - The lowest ask currently available
* last - The price of the last executed trade
* volume - Information about the 24 hour volume on the exchange
"""
url = URLS.pubticker(ticker)
data, error = request_get(url, None, jsonify)
return data, error
@format_inputs
def get_ticker(ticker, jsonify=None):
""" Gets the recent trading information for a crypto.
:param ticker: The ticker of the crypto.
:type ticker: str
:param jsonify: If set to false, will return the raw response object. \
If set to True, will return a dictionary parsed using the JSON format.
:type jsonify: Optional[str]
:returns: Returns a tuple where the first entry in the tuple is a requests reponse object \
or a dictionary parsed using the JSON format and the second entry is an error string or \
None if there was not an error. \
The keys for the dictionary are listed below.
:Dictionary Keys: * symbol - BTCUSD etc.
* open - Open price from 24 hours ago
* high - High price from 24 hours ago
* low - Low price from 24 hours ago
* close - Close price (most recent trade)
* changes - Hourly prices descending for past 24 hours
* bid - Current best bid
* ask - Current best offer
"""
url = URLS.ticker(ticker)
data, error = request_get(url, None, jsonify)
return data, error
@format_inputs
def get_symbols(jsonify=None):
""" Gets a list of all available crypto tickers.
:param jsonify: If set to false, will return the raw response object. \
If set to True, will return a dictionary parsed using the JSON format.
:type jsonify: Optional[str]
:returns: Returns a tuple where the first entry in the tuple is a requests reponse object \
or a list of strings and the second entry is an error string or \
None if there was not an error.
"""
url = URLS.symbols()
data, error = request_get(url, None, jsonify)
return data, error
@format_inputs
def get_symbol_details(ticker, jsonify=None):
""" Gets detailed information for a crypto.
:param ticker: The ticker of the crypto.
:type ticker: str
:param jsonify: If set to false, will return the raw response object. \
If set to True, will return a dictionary parsed using the JSON format.
:type jsonify: Optional[str]
:returns: Returns a tuple where the first entry in the tuple is a requests reponse object \
or a dictionary parsed using the JSON format and the second entry is an error string or \
None if there was not an error. \
The keys for the dictionary are listed below.
:Dictionary Keys: * symbol - BTCUSD etc.
* base_currency - CCY1 or the top currency. (ie BTC in BTCUSD)
* quote_currency - CCY2 or the quote currency. (ie USD in BTCUSD)
* tick_size - The number of decimal places in the quote_currency
* quote_increment - The number of decimal places in the base_currency
* min_order_size - The minimum order size in base_currency units.
* status - Status of the current order book. Can be open, closed, cancel_only, post_only, limit_only.
"""
url = URLS.symbol_details(ticker)
data, error = request_get(url, None, jsonify)
return data, error
@login_required
@format_inputs
def get_notional_volume(jsonify=None):
""" Gets information about notional volume
:param jsonify: If set to false, will return the raw response object. \
If set to True, will return a dictionary parsed using the JSON format.
:type jsonify: Optional[str]
:returns: Returns a tuple where the first entry in the tuple is a requests reponse object \
or a dictionary parsed using the JSON format and the second entry is an error string or \
None if there was not an error. \
The keys for the dictionary are listed below.
:Dictionary Keys: * date - UTC date in yyyy-MM-dd format
* last_updated_ms - Unix timestamp in millisecond of the last update
* web_maker_fee_bps - Integer value representing the maker fee for all symbols in basis point for web orders
* web_taker_fee_bps - Integer value representing the taker fee for all symbols in basis point for web orders
* web_auction_fee_bps - Integer value representing the auction fee for all symbols in basis point for web orders
* api_maker_fee_bps - Integer value representing the maker fee for all symbols in basis point for API orders
* api_taker_fee_bps - Integer value representing the taker fee for all symbols in basis point for API orders
* api_auction_fee_bps - Integer value representing the auction fee for all symbols in basis point for API orders
* fix_maker_fee_bps - Integer value representing the maker fee for all symbols in basis point for FIX orders
* fix_taker_fee_bps - Integer value representing the taker fee for all symbols in basis point for FIX orders
* fix_auction_fee_bps - Integer value representing the auction fee for all symbols in basis point for FIX orders
* block_maker_fee_bps - Integer value representing the maker fee for all symbols in basis point for block orders
* block_taker_fee_bps - Integer value representing the taker fee for all symbols in basis point for block orders
* notional_30d_volume - Maker plus taker trading volume for the past 30 days, including auction volume
* notional_1d_volume - A list of 1 day notional volume for the past 30 days
"""
url = URLS.notional_volume()
payload = {
"request": URLS.get_endpoint(url)
}
generate_signature(payload)
data, err = request_post(url, payload, jsonify)
return data, err
@login_required
@format_inputs
def get_trade_volume(jsonify=None):
""" Gets information about trade volume. The response will be an array of up to 30 days of trade volume for each symbol.
:param jsonify: If set to false, will return the raw response object. \
If set to True, will return a dictionary parsed using the JSON format.
:type jsonify: Optional[str]
:returns: Returns a tuple where the first entry in the tuple is a requests reponse object \
or a dictionary parsed using the JSON format and the second entry is an error string or \
None if there was not an error. \
The keys for the dictionary are listed below.
:Dictionary Keys: * symbol - The symbol.
* base_currency - quantity is denominated in this currency.
* notional_currency - price is denominated as the amount of notional currency per one unit of base currency. Notional values are denominated in this currency.
* data_date - UTC date in yyyy-MM-dd format.
* total_volume_base - Total trade volume for this day.
* maker_buy_sell_ratio - Maker buy/sell ratio is the proportion of maker base volume on trades where the account was on the buy side versus all maker trades. If there is no maker base volume on the buy side, then this value is 0.
* buy_maker_base - Quantity for this day where the account was a maker on the buy side of the trade.
* buy_maker_notional - Notional value for this day where the account was a maker on the buy side of the trade.
* buy_maker_count - Number of trades for this day where the account was a maker on the buy side of the trade.
* sell_maker_base - Quantity for this day where the account was a maker on the sell side of the trade.
* sell_maker_notional - Notional value for this day where the account was a maker on the sell side of the trade.
* sell_maker_count - Number of trades for this day where the account was a maker on the sell side of the trade.
* buy_taker_base- Quantity for this day where the account was a taker on the buy side of the trade.
* buy_taker_notional - Notional value for this day where the account was a taker on the buy side of the trade.
* buy_taker_count - Number of trades for this day where the account was a taker on the buy side of the trade.
* sell_taker_base - Quantity for this day where the account was a taker on the sell side of the trade.
* sell_taker_notional - Notional value for this day where the account was a taker on the sell side of the trade.
* sell_taker_count - Number of trades for this day where the account was a taker on the sell side of the trade.
"""
url = URLS.trade_volume()
payload = {
"request": URLS.get_endpoint(url)
}
generate_signature(payload)
data, err = request_post(url, payload, jsonify)
return data, err
def get_price(ticker, side):
""" Returns either the bid or the ask price as a string.
:param ticker: The ticker of the crypto.
:type ticker: str
:param side: Either 'buy' or 'sell'.
:type side: str
:returns: Returns the bid or ask price as a string.
"""
data, _ = get_pubticker(ticker, jsonify=True)
if side == "buy":
return data["ask"]
else:
return data["bid"] | /robin_stocks-3.0.6-py3-none-any.whl/robin_stocks/gemini/crypto.py | 0.880463 | 0.389082 | crypto.py | pypi |
from robin_stocks.gemini.authentication import generate_signature
from robin_stocks.gemini.helper import (format_inputs, login_required,
request_post)
from robin_stocks.gemini.urls import URLS
@login_required
@format_inputs
def get_account_detail(jsonify=None):
""" Gets information about the profile attached to your API key.
:param jsonify: If set to false, will return the raw response object. \
If set to True, will return a dictionary parsed using the JSON format.
:type jsonify: Optional[str]
:returns: Returns a tuple where the first entry in the tuple is a requests reponse object \
or a dictionary parsed using the JSON format and the second entry is an error string or \
None if there was not an error. \
The keys for the dictionary are listed below.
:Dictionary Keys: * account - Contains information on the requested account
-- accountName - The name of the account provided upon creation. Will default to Primary
-- shortName - Nickname of the specific account (will take the name given, remove all symbols, replace all " " with "-" and make letters lowercase)
-- type - The type of account. Will return either exchange or custody
-- created - The timestamp of account creation, displayed as number of milliseconds since 1970-01-01 UTC. This will be transmitted as a JSON number
* users - Contains an array of JSON objects with user information for the requested account
-- name - Full legal name of the user
-- lastSignIn - Timestamp of the last sign for the user. Formatted as yyyy-MM-dd'T'HH:mm:ss.SSS'Z'
-- status - Returns user status. Will inform of active users or otherwise not active
-- countryCode - 2 Letter country code indicating residence of user.
-- isVerified - Returns verification status of user.
* memo_reference_code - Returns wire memo reference code for linked bank account.
"""
url = URLS.account_detail()
payload = {
"request": URLS.get_endpoint(url)
}
generate_signature(payload)
data, err = request_post(url, payload, jsonify)
return data, err
@login_required
@format_inputs
def check_available_balances(jsonify=None):
""" Gets a list of all available balances in every currency.
:param jsonify: If set to false, will return the raw response object. \
If set to True, will return a dictionary parsed using the JSON format.
:type jsonify: Optional[str]
:returns: Returns a tuple where the first entry in the tuple is a requests reponse object \
or a list of dictionaries parsed using the JSON format and the second entry is an error string or \
None if there was not an error. \
The keys for the dictionaries are listed below.
:Dictionary Keys: * currency - The currency code.
* amount - The current balance
* available - The amount that is available to trade
* availableForWithdrawal - The amount that is available to withdraw
* type - "exchange"
"""
url = URLS.available_balances()
payload = {
"request": URLS.get_endpoint(url)
}
generate_signature(payload)
data, err = request_post(url, payload, jsonify)
return data, err
@login_required
@format_inputs
def check_notional_balances(jsonify=None):
""" Gets a list of all available balances in every currency.
:param jsonify: If set to false, will return the raw response object. \
If set to True, will return a dictionary parsed using the JSON format.
:type jsonify: Optional[str]
:returns: Returns a tuple where the first entry in the tuple is a requests reponse object \
or a list of dictionaries parsed using the JSON format and the second entry is an error string or \
None if there was not an error.\
The keys for the dictionaries are listed below.
:Dictionary Keys: * currency - The currency code.
* amount - The current balance
* amountNotional - Amount, in notional
* available - The amount that is available to trade
* availableNotional - Available, in notional
* availableForWithdrawal - The amount that is available to withdraw
* availableForWithdrawalNotional - AvailableForWithdrawal, in notional
"""
url = URLS.notional_balances()
payload = {
"request": URLS.get_endpoint(url)
}
generate_signature(payload)
data, err = request_post(url, payload, jsonify)
return data, err
@login_required
@format_inputs
def check_transfers(timestamp=None, limit_transfers=10, show_completed_deposit_advances=False, jsonify=None):
""" Gets a list of all transfers.
:param timestamp: Only return transfers on or after this timestamp. If not present, will show the most recent transfers.
:type timestamp: Optional[str]
:param limit_transfers: The maximum number of transfers to return. Default is 10, max is 50.
:type limit_transfers: Optional[int]
:param show_completed_deposit_advances: Whether to display completed deposit advances. False by default. Must be set True to activate.
:type show_completed_deposit_advances: Optional[int]
:param jsonify: If set to false, will return the raw response object. \
If set to True, will return a dictionary parsed using the JSON format.
:type jsonify: Optional[str]
:returns: Returns a tuple where the first entry in the tuple is a requests reponse object \
or a list of dictionaries parsed using the JSON format and the second entry is an error string or \
None if there was not an error. \
The keys for the dictionaries are listed below.
:Dictionary Keys: * type - Transfer type. Deposit or Withdrawal.
* status - Transfer status. Advanced or Complete.
* timestampms - The time that the trade was executed in milliseconds
* eid - Transfer event id
* advanceEid - Deposit advance event id
* currency - Currency code
* amount - The transfer amount
* method - Optional. When currency is a fiat currency, the method field will attempt to supply ACH, Wire, or SEN. If the transfer is an internal transfer between subaccounts the method field will return Internal.
* txHash - Optional. When currency is a cryptocurrency, supplies the transaction hash when available.
* outputIdx - Optional. When currency is a cryptocurrency, supplies the output index in the transaction when available.
* destination - Optional. When currency is a cryptocurrency, supplies the destination address when available.
* purpose - Optional. Administrative field used to supply a reason for certain types of advances.
"""
url = URLS.transfers()
payload = {
"request": URLS.get_endpoint(url),
"show_completed_deposit_advances": show_completed_deposit_advances
}
if timestamp:
payload["timestamp"] = timestamp
generate_signature(payload)
data, err = request_post(url, payload, jsonify)
return data, err
@login_required
@format_inputs
def get_deposit_addresses(network, timestamp=None, jsonify=None):
""" Gets a list of all deposit addresses.
:param network: network can be bitcoin, ethereum, bitcoincash, litecoin, zcash, filecoin.
:type network: str
:param timestamp: Only returns addresses created on or after this timestamp.
:type timestamp: Optional[str]
:param jsonify: If set to false, will return the raw response object. \
If set to True, will return a dictionary parsed using the JSON format.
:type jsonify: Optional[str]
:returns: Returns a tuple where the first entry in the tuple is a requests reponse object \
or a list of dictionaries parsed using the JSON format and the second entry is an error string or \
None if there was not an error. \
The keys for the dictionaries are listed below.
:Dictionary Keys: * address - String representation of the new cryptocurrency address.
* timestamp - Creation date of the address.
* label - Optional. if you provided a label when creating the address, it will be echoed back here.
"""
url = URLS.deposit_addresses(network)
payload = {
"request": URLS.get_endpoint(url)
}
if timestamp:
payload["timestamp"] = timestamp
generate_signature(payload)
data, err = request_post(url, payload, jsonify)
return data, err
@login_required
@format_inputs
def get_approved_addresses(network, jsonify=None):
""" Allows viewing of Approved Address list.
:param network: The network of the approved address. Network can be bitcoin, ethereum, bitcoincash, litecoin, zcash, or filecoin
:type network: str
:param jsonify: If set to false, will return the raw response object. \
If set to True, will return a dictionary parsed using the JSON format.
:type jsonify: Optional[str]
:returns: Returns a tuple where the first entry in the tuple is a requests reponse object \
or a dictionary parsed using the JSON format and the second entry is an error string or \
None if there was not an error. \
The keys for the dictionary are listed below.
:Dictionary Keys: * approvedAddresses - Array of approved addresses on both the account and group level.
-- network - The network of the approved address. Network can be bitcoin, ethereum, bitcoincash, litecoin, zcash, or filecoin
-- scope - Will return the scope of the address as either "account" or "group"
-- label - The label assigned to the address
-- status - The status of the address that will return as "active", "pending-time" or "pending-mua". The remaining time is exactly 7 days after the initial request. "pending-mua" is for multi-user accounts and will require another administator or fund manager on the account to approve the address.
-- createdAt - UTC timestamp in millisecond of when the address was created.
-- address - The address on the approved address list.
"""
url = URLS.approved_addresses(network)
payload = {
"request": URLS.get_endpoint(url)
}
generate_signature(payload)
data, err = request_post(url, payload, jsonify)
return data, err
@login_required
@format_inputs
def withdraw_crypto_funds(currency_code, address, amount, jsonify=None):
""" Before you can withdraw cryptocurrency funds to an approved address, you need three things:
1. You must have an approved address list for your account
2. The address you want to withdraw funds to needs to already be on that approved address list
3. An API key with the Fund Manager role added
:param currency_code: the three-letter currency code of a supported crypto-currency, e.g. btc or eth.
:type currency_code: str
:param address: Standard string format of cryptocurrency address.
:type address: str
:param amount: Quoted decimal amount to withdraw.
:type amount: str
:param jsonify: If set to false, will return the raw response object. \
If set to True, will return a dictionary parsed using the JSON format.
:type jsonify: Optional[str]
:returns: Returns a tuple where the first entry in the tuple is a requests reponse object \
or a dictionary parsed using the JSON format and the second entry is an error string or \
None if there was not an error. \
The keys for the dictionary are listed below.
:Dictionary Keys: * address - Standard string format of the withdrawal destination address.
* amount - The withdrawal amount.
* txHash - Standard string format of the transaction hash of the withdrawal transaction. Only shown for ETH and GUSD withdrawals.
* withdrawalID - A unique ID for the withdrawal. Only shown for BTC, ZEC, LTC and BCH withdrawals.
* message - A human-readable English string describing the withdrawal. Only shown for BTC, ZEC, LTC and BCH withdrawals.
"""
url = URLS.withdrawl_crypto(currency_code)
payload = {
"request": URLS.get_endpoint(url),
"address": address,
"amount": amount
}
generate_signature(payload)
data, err = request_post(url, payload, jsonify)
return data, err | /robin_stocks-3.0.6-py3-none-any.whl/robin_stocks/gemini/account.py | 0.889421 | 0.410106 | account.py | pypi |
from bip32keys.bip32keys import Bip32Keys, decode_hex, encode_hex
import base58check
from hashlib import sha256
class Bip32NetworkKeys(Bip32Keys):
private_key_magic_bytes = {'mainnet': '80', 'testnet': 'ef'}
def __init__(self, init_params, mainnet=True):
if 'wif' in init_params:
self.wif = init_params['wif']
super().__init__({'private_key': self.wif_to_private_key(init_params['wif'])})
else:
super().__init__(init_params)
self.wif = Bip32NetworkKeys.private_key_to_wif(self.get_private_key(), mainnet)
def get_wif(self):
return self.wif
@staticmethod
def wif_to_private_key(wif):
output = base58check.b58decode(wif)
output = output[
1:-5] # drop first network type byte, last 4 bytes checksum, 5-th from the end means that priv key is compressed
return encode_hex(output)[0].decode()
@staticmethod
def private_key_to_wif(private_key, mainnet=True):
output = Bip32NetworkKeys._validate_private_key_for_wif(private_key, mainnet)
extended_private_key = output
output = decode_hex(output)[0]
checksum = sha256(sha256(output).digest()).hexdigest()[:8]
output = extended_private_key + checksum
output = decode_hex(output)[0]
output = base58check.b58encode(output)
return output.decode()
@staticmethod
def _validate_private_key_for_wif(private_key, mainnet):
mb = Bip32NetworkKeys._get_private_key_magic_byte(mainnet)
if len(private_key) == 64:
return mb + private_key + '01' # \x01 - compressed wif
elif len(private_key) == 66:
if private_key[0:2] == mb:
return private_key + '01'
elif private_key[-2:] == '01':
return mb + private_key
elif len(private_key) == 68:
return private_key
else:
raise Exception('Bad private key length')
@staticmethod
def _get_private_key_magic_byte(mainnet):
if mainnet:
return Bip32NetworkKeys.private_key_magic_bytes['mainnet']
else:
return Bip32NetworkKeys.private_key_magic_bytes['testnet']
if __name__ == '__main__':
keys = Bip32NetworkKeys({'wif': 'cRfsTP6CHgP53vvMGgdvE8tLBMs1Xdo2MQwMJtEqYTnKjWiCJrjC'}, mainnet=False)
print('public key: ', keys.get_public_key())
print('private key: ', keys.get_private_key())
print('uncompressed public key: ', keys.get_uncompressed_public_key())
print('wif: ', keys.get_wif())
print('private key to wif: ', Bip32NetworkKeys.private_key_to_wif('7a6be1df9cc5d88edce5443ef0fce246123295dd82afae9a57986543272157cc', mainnet=False)) | /robin8_utils-0.4.26.tar.gz/robin8_utils-0.4.26/bip32keys/bip32NetworkKeys.py | 0.601828 | 0.154249 | bip32NetworkKeys.py | pypi |
import codecs
from hashlib import sha256
from ecdsa import SigningKey, VerifyingKey, ellipticcurve
from ecdsa.ellipticcurve import Point, CurveFp
import ecdsa
from bip32utils import BIP32Key
import logging
decode_hex = codecs.getdecoder("hex_codec")
encode_hex = codecs.getencoder("hex_codec")
class Bip32Keys:
def __init__(self, init_params):
if isinstance(init_params, str):
self.init_from_entropy(init_params)
elif isinstance(init_params, dict):
if 'entropy' in init_params:
self.init_from_entropy(init_params['entropy'])
elif 'private_key' in init_params:
self.init_from_private_key(init_params['private_key'])
else:
raise NotImplementedError()
def init_from_entropy(self, entropy):
entropy = entropy.encode()
key = BIP32Key.fromEntropy(entropy, public=False)
self.private_key = key.PrivateKey()
self.public_key = key.PublicKey()
self.uncompressed_public_key = decode_hex(Bip32Keys.to_uncompressed_public_key(
self.get_public_key()
))[0]
def init_from_private_key(self, private_key):
sk = SigningKey.from_string(string=decode_hex(private_key)[0], curve=ecdsa.SECP256k1, hashfunc=sha256)
vk = sk.get_verifying_key()
self.private_key = sk.to_string()
self.public_key = decode_hex(Bip32Keys.to_compressed_public_key(encode_hex(vk.to_string())[0].decode()))[0]
self.uncompressed_public_key = b'\x04' + vk.to_string()
def get_public_key(self):
return encode_hex(self.public_key)[0].decode()
def get_private_key(self):
return encode_hex(self.private_key)[0].decode()
def get_uncompressed_public_key(self):
return encode_hex(self.uncompressed_public_key)[0].decode()
def sign_msg(self, message):
return Bip32Keys.sign_message(message, self.get_private_key())
def verify_msg(self, message, signature):
return Bip32Keys.verify_message(message, signature, self.get_uncompressed_public_key())
@staticmethod
def to_uncompressed_public_key(public_key):
if len(public_key) == 130:
return public_key
elif len(public_key) == 128:
return '04' + public_key
p_hex = 'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F'
p = int(p_hex, 16)
x_hex = public_key[2:66]
x = int(x_hex, 16)
prefix = public_key[0:2]
y_square = (pow(x, 3, p) + 7) % p
y_square_square_root = pow(y_square, (p + 1) // 4, p)
if (prefix == "02" and y_square_square_root & 1) or (prefix == "03" and not y_square_square_root & 1):
y = (-y_square_square_root) % p
else:
y = y_square_square_root
computed_y_hex = format(y, '064x')
computed_uncompressed_key = "04" + x_hex + computed_y_hex
return computed_uncompressed_key
@staticmethod
def to_compressed_public_key(public_key):
if len(public_key) == 66:
return public_key
y_hex = public_key[64:]
if int(y_hex, 16) & 1:
prefix = '03'
else:
prefix = '02'
if len(public_key) == 130:
return prefix + public_key[2:66]
elif len(public_key) == 128:
return prefix + public_key[:64]
@staticmethod
def sign_message(message, private_key):
priv_key = Bip32Keys._validate_private_key_for_signature(private_key)
message = message.encode()
sk = SigningKey.from_string(curve=ecdsa.SECP256k1, string=decode_hex(priv_key)[0], hashfunc=sha256)
sig = sk.sign(message, sigencode=ecdsa.util.sigencode_der)
return encode_hex(sig)[0].decode()
@staticmethod
def verify_message(message, signature, public_key):
pub_key = Bip32Keys._validate_public_key_for_signature(public_key)
sig = signature
msg = message.encode()
vk = VerifyingKey.from_string(string=decode_hex(pub_key)[0], curve=ecdsa.SECP256k1, hashfunc=sha256)
if len(sig) == 128:
vk.verify(decode_hex(sig)[0], msg, sigdecode=ecdsa.util.sigdecode_string)
else:
vk.verify(decode_hex(sig)[0], msg, sigdecode=ecdsa.util.sigdecode_der)
return True
@staticmethod
def _validate_private_key_for_signature(private_key):
if len(private_key) == 64:
return private_key
elif len(private_key) == 66:
if private_key[0:2] == '80':
return private_key[2:]
elif private_key[-2:] == '01':
return private_key[:-2]
elif len(private_key) == 68:
return private_key[2:-2]
else:
raise Exception('Bad private key length')
@staticmethod
def _validate_public_key_for_signature(public_key):
if len(public_key) == 128:
return public_key
elif len(public_key) == 130:
return public_key[2:]
elif len(public_key) == 66:
return Bip32Keys.to_uncompressed_public_key(public_key)[2:]
else:
raise Exception('Unsupported public key format')
"""
for asymetric encryption
"""
# Certicom secp256-k1
_a = 0x0000000000000000000000000000000000000000000000000000000000000000
_b = 0x0000000000000000000000000000000000000000000000000000000000000007
_p = 0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f
_Gx = 0x79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798
_Gy = 0x483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8
_r = 0xfffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141
curve_secp256k1 = ecdsa.ellipticcurve.CurveFp(_p, _a, _b)
generator_secp256k1 = ecdsa.ellipticcurve.Point(curve_secp256k1, _Gx, _Gy, _r)
def get_shared_key(self, another_public_key):
return Bip32Keys.generate_shared_key(self.get_private_key(), another_public_key)
@staticmethod
def generate_shared_key(private_key, public_key):
public_key = Bip32Keys.to_uncompressed_public_key(public_key)
private_key = int(private_key, 16)
x = int(public_key[2:66], 16) # drop prefix
y = int(public_key[-64:], 16)
another_point = Point(Bip32Keys.curve_secp256k1, x, y)
shared_point = another_point * private_key
return str(hex(shared_point.x()))[2:] + str(hex(shared_point.y()))[2:]
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
keys = Bip32Keys({'entropy': '3123213213213123312c3kjifj3'})
print('public key: ', keys.get_public_key())
print('private key: ', keys.get_private_key())
print('uncompressed public key: ', keys.get_uncompressed_public_key())
sig = keys.sign_msg('hello world')
print('signature: ', sig)
print('verify signature: ', keys.verify_msg('hello world', sig))
print('compressed: ', Bip32Keys.to_compressed_public_key('041ad7138370ef5e93fb243aff3373e2b92383818dfc20022841b655e0cd6c618cd578261c78e1adfe205c3ade8b81e1722d6058be9155eee55468fbb04b62040e'))
keys2 = Bip32Keys({'entropy': 'fdjsofjioej9fsdfjdskfdsjkhfdsj'})
print('shared key', keys2.get_shared_key(keys.get_public_key()))
print('shared key', keys.get_shared_key(keys2.get_public_key()))
print('shared key', Bip32Keys.generate_shared_key(keys.get_private_key(), keys2.get_public_key())) | /robin8_utils-0.4.26.tar.gz/robin8_utils-0.4.26/bip32keys/bip32keys.py | 0.683525 | 0.208985 | bip32keys.py | pypi |
from bip32keys.bip32NetworkKeys import Bip32NetworkKeys, encode_hex, decode_hex
from hashlib import sha256
import hashlib
import base58check
class Bip32Addresses(Bip32NetworkKeys):
def __init__(self, init_params, magic_byte, mainnet=True):
super().__init__(init_params, mainnet)
self.hex_address = Bip32Addresses.public_key_to_hex_address(self.get_public_key())
self.blockchain_address = Bip32Addresses.hex_address_to_blockchain_address(self.hex_address, magic_byte)
def get_hex_address(self):
return self.hex_address
def get_blockchain_address(self):
return self.blockchain_address
@staticmethod
def public_key_to_hex_address(public_key):
public_key = Bip32Addresses.to_compressed_public_key(public_key)
output = sha256(decode_hex(public_key)[0]).digest()
ripemd160 = hashlib.new('ripemd160')
ripemd160.update(output)
output = ripemd160.hexdigest()
return output
@staticmethod
def hex_address_to_blockchain_address(hex_address, mb):
output = mb + hex_address
extended_ripemd160 = output
output = decode_hex(output)[0]
checksum = sha256(sha256(output).digest()).hexdigest()[:8]
output = extended_ripemd160 + checksum
output = decode_hex(output)[0]
output = base58check.b58encode(output)
return output.decode()
@staticmethod
def public_key_to_blockchain_address(public_key, mb):
return Bip32Addresses.hex_address_to_blockchain_address(
Bip32Addresses.public_key_to_hex_address(public_key), mb)
@staticmethod
def address_to_hex(address):
output = encode_hex(base58check.b58decode(address))[0]
return output[2:-8].decode() # drop magic byte and checksum
@staticmethod
def verify_address(address):
output = base58check.b58decode(address)
output = encode_hex(output)[0].decode()
print(output)
if len(output) != 50:
raise Exception('Invalid address length exception')
checksum = output[-8:]
extended_ripemd160 = output[:-8]
output = decode_hex(extended_ripemd160)[0]
output = sha256(sha256(output).digest()).hexdigest()[0:8]
if checksum != output:
raise Exception('Invalid checksum')
return True
@staticmethod
def get_magic_byte(address):
output = base58check.b58decode(address)
output = encode_hex(output)[0].decode()
return int(output[0:2], 16)
if __name__ == '__main__':
keys = Bip32Addresses({'wif': 'cRgfyoXvYq8wrF6DbuFLnbRQ7RixZrmRCwzaiiPRRD8kw3qxwqxi'}, mainnet=False, magic_byte='78')
print('public key: ', keys.get_public_key())
print('private key: ', keys.get_private_key())
print('uncompressed public key: ', keys.get_uncompressed_public_key())
print('wif: ', keys.get_wif())
print('address: ', keys.get_hex_address())
print('address: ', keys.get_blockchain_address()) | /robin8_utils-0.4.26.tar.gz/robin8_utils-0.4.26/bip32keys/bip32addresses.py | 0.771542 | 0.278428 | bip32addresses.py | pypi |
from motor.motor_tornado import MotorClient
class Table(object):
"""Custom driver for writing data to mongodb
By default database name is 'profile_management_system',
collection name is 'history'
"""
def __init__(self, dbname, collection):
# Set database parameters
self.client = MotorClient()
self.database = self.client[dbname]
self.collection = self.database[collection]
async def autoincrement(self):
collection = self.database.autoincrement
counter = await collection.find_one({"name": "counter"})
if not counter:
await collection.insert_one({"name": "counter", "id": 0})
await collection.find_one_and_update(
{"name": "counter"},
{"$inc": {"id": 1}})
counter = await collection.find_one({"name": "counter"})
return counter["id"]
async def read(self, *_id):
"""Read data from database table.
Accepts ids of entries.
Returns list of results if success
or string with error code and explanation.
read(*id) => [(result), (result)] (if success)
read(*id) => [] (if missed)
read() => {"error":400, "reason":"Missed required fields"}
"""
if not _id:
return {"error": 400,
"reason": "Missed required fields"}
result = []
for i in _id:
document = await self.collection.find_one({"id": i})
try:
result.append({i: document[i] for i in document
if i != "_id"})
except:
continue
return result
async def insert(self, **kwargs):
"""
Accepts request object, retrieves data from the one`s body
and creates new account.
"""
if kwargs:
# Create autoincrement for account
pk = await self.autoincrement()
kwargs.update({"id": pk})
# Create account with received data and autoincrement
await self.collection.insert_one(kwargs)
row = await self.collection.find_one({"id": pk})
else:
row = None
if row:
return {i: row[i] for i in row if i != "_id"}
else:
return {"error": 500,
"reason": "Not created"}
async def find(self, **kwargs):
"""Find all entries with given search key.
Accepts named parameter key and arbitrary values.
Returns list of entry id`s.
find(**kwargs) => document (if exist)
find(**kwargs) => {"error":404,"reason":"Not found"} (if does not exist)
find() => {"error":400, "reason":"Missed required fields"}
"""
if not isinstance(kwargs, dict) and len(kwargs) != 1:
return {"error": 400,
"reason": "Bad request"}
document = await self.collection.find_one(kwargs)
if document:
return document
else:
return {"error": 404, "reason": "Not found"}
async def update(self, _id=None, **new_data):
"""Updates fields values.
Accepts id of sigle entry and
fields with values.
update(id, **kwargs) => {"success":200, "reason":"Updated"} (if success)
update(id, **kwargs) => {"error":400, "reason":"Missed required fields"} (if error)
"""
if not _id or not new_data:
return {"error": 400,
"reason": "Missed required fields"}
document = await self.collection.find_one({"id": _id})
if not document:
return {"error": 404,
"reason": "Not found"}
for key in new_data:
await self.collection.find_one_and_update(
{"id": _id},
{"$set": {key: new_data[key]}}
)
updated = await self.collection.find_one({"id": _id})
return {"success": 200, "reason": "Updated", **updated}
async def delete(self, _id=None):
"""Delete entry from database table.
Accepts id.
delete(id) => 1 (if exists)
delete(id) => {"error":404, "reason":"Not found"} (if does not exist)
delete() => {"error":400, "reason":"Missed required fields"}
"""
if not _id:
return {"error": 400,
"reason": "Missed required fields"}
document = await self.collection.find_one({"id": _id})
if not document:
return {"error": 404,
"reason": "Not found"}
deleted_count = await self.collection.delete_one(
{"id": _id}).deleted_count
return deleted_count | /robin8_utils-0.4.26.tar.gz/robin8_utils-0.4.26/tornado_components/mongo.py | 0.667798 | 0.232539 | mongo.py | pypi |
import abc
from kafka import ConsumerRebalanceListener as BaseConsumerRebalanceListener
class ConsumerRebalanceListener(BaseConsumerRebalanceListener):
"""
A callback interface that the user can implement to trigger custom actions
when the set of partitions assigned to the consumer changes.
This is applicable when the consumer is having Kafka auto-manage group
membership. If the consumer's directly assign partitions, those
partitions will never be reassigned and this callback is not applicable.
When Kafka is managing the group membership, a partition re-assignment will
be triggered any time the members of the group changes or the subscription
of the members changes. This can occur when processes die, new process
instances are added or old instances come back to life after failure.
Rebalances can also be triggered by changes affecting the subscribed
topics (e.g. when then number of partitions is administratively adjusted).
There are many uses for this functionality. One common use is saving
offsets in a custom store. By saving offsets in the
on_partitions_revoked(), call we can ensure that any time partition
assignment changes the offset gets saved.
Another use is flushing out any kind of cache of intermediate results the
consumer may be keeping. For example, consider a case where the consumer is
subscribed to a topic containing user page views, and the goal is to count
the number of page views per users for each five minute window. Let's say
the topic is partitioned by the user id so that all events for a particular
user will go to a single consumer instance. The consumer can keep in memory
a running tally of actions per user and only flush these out to a remote
data store when its cache gets too big. However if a partition is
reassigned it may want to automatically trigger a flush of this cache,
before the new owner takes over consumption.
This callback will execute during the rebalance process, and Consumer will
wait for callbacks to finish before proceeding with group join.
It is guaranteed that all consumer processes will invoke
on_partitions_revoked() prior to any process invoking
on_partitions_assigned(). So if offsets or other state is saved in the
on_partitions_revoked() call, it should be saved by the time the process
taking over that partition has their on_partitions_assigned() callback
called to load the state.
"""
@abc.abstractmethod
def on_partitions_revoked(self, revoked):
"""
A coroutine or function the user can implement to provide cleanup or
custom state save on the start of a rebalance operation.
This method will be called *before* a rebalance operation starts and
*after* the consumer stops fetching data.
If you are using manual commit you have to commit all consumed offsets
here, to avoid duplicate message delivery after rebalance is finished.
.. note:: This method is only called before rebalances. It is not
called prior to ``AIOKafkaConsumer.close()``
Arguments:
revoked (list of TopicPartition): the partitions that were assigned
to the consumer on the last rebalance
"""
pass
@abc.abstractmethod
def on_partitions_assigned(self, assigned):
"""
A coroutine or function the user can implement to provide load of
custom consumer state or cache warmup on completion of a successful
partition re-assignment.
This method will be called *after* partition re-assignment completes
and *before* the consumer starts fetching data again.
It is guaranteed that all the processes in a consumer group will
execute their on_partitions_revoked() callback before any instance
executes its on_partitions_assigned() callback.
Arguments:
assigned (list of TopicPartition): the partitions assigned to the
consumer (may include partitions that were previously assigned)
"""
pass
__all__ = [
"ConsumerRebalanceListener"
] | /robinhood_aiokafka-1.1.6-cp37-cp37m-macosx_10_9_x86_64.whl/aiokafka/abc.py | 0.750644 | 0.45944 | abc.py | pypi |
import asyncio
import logging
import re
import sys
import traceback
import warnings
from kafka.coordinator.assignors.roundrobin import (
RoundRobinPartitionAssignor,
)
from aiokafka.abc import ConsumerRebalanceListener
from aiokafka.client import AIOKafkaClient
from aiokafka.errors import (
TopicAuthorizationFailedError, OffsetOutOfRangeError,
ConsumerStoppedError, IllegalOperation, UnsupportedVersionError,
IllegalStateError, NoOffsetForPartitionError, RecordTooLargeError
)
from aiokafka.structs import TopicPartition
from aiokafka.util import (
PY_36, commit_structure_validate
)
from aiokafka import __version__
from .fetcher import Fetcher, OffsetResetStrategy
from .group_coordinator import GroupCoordinator, NoGroupCoordinator
from .subscription_state import SubscriptionState
log = logging.getLogger(__name__)
class AIOKafkaConsumer(object):
"""
A client that consumes records from a Kafka cluster.
The consumer will transparently handle the failure of servers in the Kafka
cluster, and adapt as topic-partitions are created or migrate between
brokers. It also interacts with the assigned kafka Group Coordinator node
to allow multiple consumers to load balance consumption of topics (feature
of kafka >= 0.9.0.0).
.. _create_connection:
https://docs.python.org/3/library/asyncio-eventloop.html\
#creating-connections
Arguments:
*topics (str): optional list of topics to subscribe to. If not set,
call subscribe() or assign() before consuming records. Passing
topics directly is same as calling ``subscribe()`` API.
bootstrap_servers: 'host[:port]' string (or list of 'host[:port]'
strings) that the consumer should contact to bootstrap initial
cluster metadata. This does not have to be the full node list.
It just needs to have at least one broker that will respond to a
Metadata API Request. Default port is 9092. If no servers are
specified, will default to localhost:9092.
client_id (str): a name for this client. This string is passed in
each request to servers and can be used to identify specific
server-side log entries that correspond to this client. Also
submitted to GroupCoordinator for logging with respect to
consumer group administration. Default: 'aiokafka-{version}'
group_id (str or None): name of the consumer group to join for dynamic
partition assignment (if enabled), and to use for fetching and
committing offsets. If None, auto-partition assignment (via
group coordinator) and offset commits are disabled.
Default: None
key_deserializer (callable): Any callable that takes a
raw message key and returns a deserialized key.
value_deserializer (callable, optional): Any callable that takes a
raw message value and returns a deserialized value.
fetch_min_bytes (int): Minimum amount of data the server should
return for a fetch request, otherwise wait up to
fetch_max_wait_ms for more data to accumulate. Default: 1.
fetch_max_bytes (int): The maximum amount of data the server should
return for a fetch request. This is not an absolute maximum, if
the first message in the first non-empty partition of the fetch
is larger than this value, the message will still be returned
to ensure that the consumer can make progress. NOTE: consumer
performs fetches to multiple brokers in parallel so memory
usage will depend on the number of brokers containing
partitions for the topic.
Supported Kafka version >= 0.10.1.0. Default: 52428800 (50 Mb).
fetch_max_wait_ms (int): The maximum amount of time in milliseconds
the server will block before answering the fetch request if
there isn't sufficient data to immediately satisfy the
requirement given by fetch_min_bytes. Default: 500.
max_partition_fetch_bytes (int): The maximum amount of data
per-partition the server will return. The maximum total memory
used for a request = #partitions * max_partition_fetch_bytes.
This size must be at least as large as the maximum message size
the server allows or else it is possible for the producer to
send messages larger than the consumer can fetch. If that
happens, the consumer can get stuck trying to fetch a large
message on a certain partition. Default: 1048576.
max_poll_records (int): The maximum number of records returned in a
single call to ``getmany()``. Defaults ``None``, no limit.
request_timeout_ms (int): Client request timeout in milliseconds.
Default: 40000.
retry_backoff_ms (int): Milliseconds to backoff when retrying on
errors. Default: 100.
auto_offset_reset (str): A policy for resetting offsets on
OffsetOutOfRange errors: 'earliest' will move to the oldest
available message, 'latest' will move to the most recent. Any
ofther value will raise the exception. Default: 'latest'.
enable_auto_commit (bool): If true the consumer's offset will be
periodically committed in the background. Default: True.
auto_commit_interval_ms (int): milliseconds between automatic
offset commits, if enable_auto_commit is True. Default: 5000.
check_crcs (bool): Automatically check the CRC32 of the records
consumed. This ensures no on-the-wire or on-disk corruption to
the messages occurred. This check adds some overhead, so it may
be disabled in cases seeking extreme performance. Default: True
metadata_max_age_ms (int): The period of time in milliseconds after
which we force a refresh of metadata even if we haven't seen any
partition leadership changes to proactively discover any new
brokers or partitions. Default: 300000
partition_assignment_strategy (list): List of objects to use to
distribute partition ownership amongst consumer instances when
group management is used. This preference is implicit in the order
of the strategies in the list. When assignment strategy changes:
to support a change to the assignment strategy, new versions must
enable support both for the old assignment strategy and the new
one. The coordinator will choose the old assignment strategy until
all members have been updated. Then it will choose the new
strategy. Default: [RoundRobinPartitionAssignor]
max_poll_interval_ms (int): Maximum allowed time between calls to
consume messages (e.g., ``consumer.getmany()``). If this interval
is exceeded the consumer is considered failed and the group will
rebalance in order to reassign the partitions to another consumer
group member. If API methods block waiting for messages, that time
does not count against this timeout. See KIP-62 for more
information. Default 300000
rebalance_timeout_ms (int): The maximum time server will wait for this
consumer to rejoin the group in a case of rebalance. In Java client
this behaviour is bound to `max.poll.interval.ms` configuration,
but as ``aiokafka`` will rejoin the group in the background, we
decouple this setting to allow finer tuning by users that use
ConsumerRebalanceListener to delay rebalacing. Defaults
to ``session_timeout_ms``
session_timeout_ms (int): Client group session and failure detection
timeout. The consumer sends periodic heartbeats
(heartbeat.interval.ms) to indicate its liveness to the broker.
If no hearts are received by the broker for a group member within
the session timeout, the broker will remove the consumer from the
group and trigger a rebalance. The allowed range is configured with
the **broker** configuration properties
`group.min.session.timeout.ms` and `group.max.session.timeout.ms`.
Default: 10000
heartbeat_interval_ms (int): The expected time in milliseconds
between heartbeats to the consumer coordinator when using
Kafka's group management feature. Heartbeats are used to ensure
that the consumer's session stays active and to facilitate
rebalancing when new consumers join or leave the group. The
value must be set lower than session_timeout_ms, but typically
should be set no higher than 1/3 of that value. It can be
adjusted even lower to control the expected time for normal
rebalances. Default: 3000
consumer_timeout_ms (int): maximum wait timeout for background fetching
routine. Mostly defines how fast the system will see rebalance and
request new data for new partitions. Default: 200
api_version (str): specify which kafka API version to use.
AIOKafkaConsumer supports Kafka API versions >=0.9 only.
If set to 'auto', will attempt to infer the broker version by
probing various APIs. Default: auto
security_protocol (str): Protocol used to communicate with brokers.
Valid values are: PLAINTEXT, SSL. Default: PLAINTEXT.
ssl_context (ssl.SSLContext): pre-configured SSLContext for wrapping
socket connections. Directly passed into asyncio's
`create_connection`_. For more information see :ref:`ssl_auth`.
Default: None.
exclude_internal_topics (bool): Whether records from internal topics
(such as offsets) should be exposed to the consumer. If set to True
the only way to receive records from an internal topic is
subscribing to it. Requires 0.10+ Default: True
connections_max_idle_ms (int): Close idle connections after the number
of milliseconds specified by this config. Specifying `None` will
disable idle checks. Default: 540000 (9 minutes).
isolation_level (str): Controls how to read messages written
transactionally. If set to *read_committed*,
``consumer.getmany()``
will only return transactional messages which have been committed.
If set to *read_uncommitted* (the default), ``consumer.getmany()``
will return all messages, even transactional messages which have
been aborted.
Non-transactional messages will be returned unconditionally in
either mode.
Messages will always be returned in offset order. Hence, in
*read_committed* mode, ``consumer.getmany()`` will only return
messages up to the last stable offset (LSO), which is the one less
than the offset of the first open transaction. In particular any
messages appearing after messages belonging to ongoing transactions
will be withheld until the relevant transaction has been completed.
As a result, *read_committed* consumers will not be able to read up
to the high watermark when there are in flight transactions.
Further, when in *read_committed* the seek_to_end method will
return the LSO. See method docs below. Default: "read_uncommitted"
sasl_mechanism (str): Authentication mechanism when security_protocol
is configured for SASL_PLAINTEXT or SASL_SSL. Valid values are:
PLAIN, GSSAPI. Default: PLAIN
sasl_plain_username (str): username for sasl PLAIN authentication.
Default: None
sasl_plain_password (str): password for sasl PLAIN authentication.
Default: None
Note:
Many configuration parameters are taken from Java Client:
https://kafka.apache.org/documentation.html#newconsumerconfigs
"""
_closed = None # Serves as an uninitialized flag for __del__
_source_traceback = None
def __init__(self, *topics, loop,
bootstrap_servers='localhost',
client_id='aiokafka-' + __version__,
group_id=None,
key_deserializer=None, value_deserializer=None,
fetch_max_wait_ms=500,
fetch_max_bytes=52428800,
fetch_min_bytes=1,
max_partition_fetch_bytes=1 * 1024 * 1024,
request_timeout_ms=40 * 1000,
retry_backoff_ms=100,
auto_offset_reset='latest',
enable_auto_commit=True,
auto_commit_interval_ms=5000,
check_crcs=True,
metadata_max_age_ms=5 * 60 * 1000,
partition_assignment_strategy=(RoundRobinPartitionAssignor,),
max_poll_interval_ms=300000,
rebalance_timeout_ms=None,
session_timeout_ms=10000,
heartbeat_interval_ms=3000,
consumer_timeout_ms=200,
max_poll_records=None,
ssl_context=None,
security_protocol='PLAINTEXT',
api_version='auto',
exclude_internal_topics=True,
connections_max_idle_ms=540000,
isolation_level="read_uncommitted",
sasl_mechanism="PLAIN",
sasl_plain_password=None,
sasl_plain_username=None,
sasl_kerberos_service_name='kafka',
sasl_kerberos_domain_name=None,
traced_from_parent_span=None,
start_rebalancing_span=None,
start_coordinator_span=None,
on_generation_id_known=None,
flush_spans=None):
if max_poll_records is not None and (
not isinstance(max_poll_records, int) or max_poll_records < 1):
raise ValueError("`max_poll_records` should be positive Integer")
if rebalance_timeout_ms is None:
rebalance_timeout_ms = session_timeout_ms
self._client = AIOKafkaClient(
loop=loop, bootstrap_servers=bootstrap_servers,
client_id=client_id, metadata_max_age_ms=metadata_max_age_ms,
request_timeout_ms=request_timeout_ms,
retry_backoff_ms=retry_backoff_ms,
api_version=api_version,
ssl_context=ssl_context,
security_protocol=security_protocol,
connections_max_idle_ms=connections_max_idle_ms,
sasl_mechanism=sasl_mechanism,
sasl_plain_username=sasl_plain_username,
sasl_plain_password=sasl_plain_password,
sasl_kerberos_service_name=sasl_kerberos_service_name,
sasl_kerberos_domain_name=sasl_kerberos_domain_name)
self._group_id = group_id
self._heartbeat_interval_ms = heartbeat_interval_ms
self._session_timeout_ms = session_timeout_ms
self._retry_backoff_ms = retry_backoff_ms
self._auto_offset_reset = auto_offset_reset
self._request_timeout_ms = request_timeout_ms
self._enable_auto_commit = enable_auto_commit
self._auto_commit_interval_ms = auto_commit_interval_ms
self._partition_assignment_strategy = partition_assignment_strategy
self._key_deserializer = key_deserializer
self._value_deserializer = value_deserializer
self._fetch_min_bytes = fetch_min_bytes
self._fetch_max_bytes = fetch_max_bytes
self._fetch_max_wait_ms = fetch_max_wait_ms
self._max_partition_fetch_bytes = max_partition_fetch_bytes
self._exclude_internal_topics = exclude_internal_topics
self._max_poll_records = max_poll_records
self._consumer_timeout = consumer_timeout_ms / 1000
self._isolation_level = isolation_level
self._rebalance_timeout_ms = rebalance_timeout_ms
self._max_poll_interval_ms = max_poll_interval_ms
self._traced_from_parent_span = traced_from_parent_span
self._start_rebalancing_span = start_rebalancing_span
self._start_coordinator_span = start_coordinator_span
self._on_generation_id_known = on_generation_id_known
self._flush_spans = flush_spans
self._check_crcs = check_crcs
self._subscription = SubscriptionState(loop=loop)
self._fetcher = None
self._coordinator = None
self._loop = loop
if loop.get_debug():
self._source_traceback = traceback.extract_stack(sys._getframe(1))
self._closed = False
if topics:
topics = self._validate_topics(topics)
self._client.set_topics(topics)
self._subscription.subscribe(topics=topics)
# Warn if consumer was not closed properly
# We don't attempt to close the Consumer, as __del__ is synchronous
def __del__(self, _warnings=warnings):
if self._closed is False:
if PY_36:
kwargs = {'source': self}
else:
kwargs = {}
_warnings.warn("Unclosed AIOKafkaConsumer {!r}".format(self),
ResourceWarning,
**kwargs)
context = {'consumer': self,
'message': 'Unclosed AIOKafkaConsumer'}
if self._source_traceback is not None:
context['source_traceback'] = self._source_traceback
self._loop.call_exception_handler(context)
async def __aenter__(self):
await self.start()
return self
async def __aexit__(self, type, value, traceback):
await self.stop()
async def start(self):
""" Connect to Kafka cluster. This will:
* Load metadata for all cluster nodes and partition allocation
* Wait for possible topic autocreation
* Join group if ``group_id`` provided
"""
assert self._fetcher is None, "Did you call `start` twice?"
await self._client.bootstrap()
if self._closed:
raise ConsumerStoppedError()
await self._wait_topics()
if self._client.api_version < (0, 9):
raise ValueError("Unsupported Kafka version: {}".format(
self._client.api_version))
if self._isolation_level == "read_committed" and \
self._client.api_version < (0, 11):
raise UnsupportedVersionError(
"`read_committed` isolation_level available only for Brokers "
"0.11 and above")
self._fetcher = Fetcher(
self._client, self._subscription, loop=self._loop,
key_deserializer=self._key_deserializer,
value_deserializer=self._value_deserializer,
fetch_min_bytes=self._fetch_min_bytes,
fetch_max_bytes=self._fetch_max_bytes,
fetch_max_wait_ms=self._fetch_max_wait_ms,
max_partition_fetch_bytes=self._max_partition_fetch_bytes,
check_crcs=self._check_crcs,
fetcher_timeout=self._consumer_timeout,
retry_backoff_ms=self._retry_backoff_ms,
auto_offset_reset=self._auto_offset_reset,
isolation_level=self._isolation_level)
if self._group_id is not None:
# using group coordinator for automatic partitions assignment
self._coordinator = GroupCoordinator(
self._client, self._subscription, loop=self._loop,
group_id=self._group_id,
heartbeat_interval_ms=self._heartbeat_interval_ms,
session_timeout_ms=self._session_timeout_ms,
retry_backoff_ms=self._retry_backoff_ms,
enable_auto_commit=self._enable_auto_commit,
auto_commit_interval_ms=self._auto_commit_interval_ms,
assignors=self._partition_assignment_strategy,
exclude_internal_topics=self._exclude_internal_topics,
rebalance_timeout_ms=self._rebalance_timeout_ms,
max_poll_interval_ms=self._max_poll_interval_ms,
traced_from_parent_span=self._traced_from_parent_span,
start_rebalancing_span=self._start_rebalancing_span,
start_coordinator_span=self._start_coordinator_span,
on_generation_id_known=self._on_generation_id_known,
flush_spans=self._flush_spans,
)
if self._subscription.subscription is not None:
if self._subscription.partitions_auto_assigned():
# Either we passed `topics` to constructor or `subscribe`
# was called before `start`
await self._subscription.wait_for_assignment()
else:
# `assign` was called before `start`. We did not start
# this task on that call, as coordinator was yet to be
# created
self._coordinator.start_commit_offsets_refresh_task(
self._subscription.subscription.assignment)
else:
# Using a simple assignment coordinator for reassignment on
# metadata changes
self._coordinator = NoGroupCoordinator(
self._client, self._subscription, loop=self._loop,
exclude_internal_topics=self._exclude_internal_topics)
if self._subscription.subscription is not None:
if self._subscription.partitions_auto_assigned():
# Either we passed `topics` to constructor or `subscribe`
# was called before `start`
await self._client.force_metadata_update()
self._coordinator.assign_all_partitions(check_unknown=True)
async def _wait_topics(self):
if self._subscription.subscription is not None:
for topic in self._subscription.subscription.topics:
await self._client._wait_on_metadata(topic)
def _validate_topics(self, topics):
if not isinstance(topics, (tuple, set, list)):
raise ValueError("Topics should be list of strings")
return set(topics)
def assign(self, partitions):
""" Manually assign a list of TopicPartitions to this consumer.
This interface does not support incremental assignment and will
replace the previous assignment (if there was one).
Arguments:
partitions (list of TopicPartition): assignment for this instance.
Raises:
IllegalStateError: if consumer has already called subscribe()
Warning:
It is not possible to use both manual partition assignment with
assign() and group assignment with subscribe().
Note:
Manual topic assignment through this method does not use the
consumer's group management functionality. As such, there will be
**no rebalance operation triggered** when group membership or
cluster and topic metadata change.
"""
self._subscription.assign_from_user(partitions)
self._client.set_topics([tp.topic for tp in partitions])
# If called before `start` we will delegate this to `start` call
if self._coordinator is not None:
if self._group_id is not None:
# refresh commit positions for all assigned partitions
assignment = self._subscription.subscription.assignment
self._coordinator.start_commit_offsets_refresh_task(assignment)
def assignment(self):
""" Get the set of partitions currently assigned to this consumer.
If partitions were directly assigned using ``assign()``, then this will
simply return the same partitions that were previously assigned.
If topics were subscribed using ``subscribe()``, then this will give
the set of topic partitions currently assigned to the consumer (which
may be empty if the assignment hasn't happened yet or if the partitions
are in the process of being reassigned).
Returns:
set: {TopicPartition, ...}
"""
return self._subscription.assigned_partitions()
def set_close(self):
self._closed = True
self._client.set_close()
async def stop(self):
""" Close the consumer, while waiting for finilizers:
* Commit last consumed message if autocommit enabled
* Leave group if used Consumer Groups
"""
if self._closed:
return
log.debug("Closing the KafkaConsumer.")
self.set_close()
if self._coordinator:
await self._coordinator.close()
if self._fetcher:
await self._fetcher.close()
await self._client.close()
log.debug("The KafkaConsumer has closed.")
async def commit(self, offsets=None):
""" Commit offsets to Kafka.
This commits offsets only to Kafka. The offsets committed using this
API will be used on the first fetch after every rebalance and also on
startup. As such, if you need to store offsets in anything other than
Kafka, this API should not be used.
Currently only supports kafka-topic offset storage (not zookeeper)
When explicitly passing ``offsets`` use either offset of next record,
or tuple of offset and metadata::
tp = TopicPartition(msg.topic, msg.partition)
metadata = "Some utf-8 metadata"
# Either
await consumer.commit({tp: msg.offset + 1})
# Or position directly
await consumer.commit({tp: (msg.offset + 1, metadata)})
.. note:: If you want `fire and forget` commit, like ``commit_async()``
in *kafka-python*, just run it in a task. Something like::
fut = loop.create_task(consumer.commit())
fut.add_done_callback(on_commit_done)
Arguments:
offsets (dict, optional): {TopicPartition: (offset, metadata)} dict
to commit with the configured ``group_id``. Defaults to current
consumed offsets for all subscribed partitions.
Raises:
IllegalOperation: If used with ``group_id == None``.
IllegalStateError: If partitions not assigned.
ValueError: If offsets is of wrong format.
CommitFailedError: If membership already changed on broker.
KafkaError: If commit failed on broker side. This could be due to
invalid offset, too long metadata, authorization failure, etc.
.. versionchanged:: 0.4.0
Changed ``AssertionError`` to ``IllegalStateError`` in case of
unassigned partition.
.. versionchanged:: 0.4.0
Will now raise ``CommitFailedError`` in case membership changed,
as (posibly) this partition is handled by another consumer.
"""
if self._group_id is None:
raise IllegalOperation("Requires group_id")
subscription = self._subscription.subscription
if subscription is None:
raise IllegalStateError("Not subscribed to any topics")
assignment = subscription.assignment
if assignment is None:
raise IllegalStateError("No partitions assigned")
if offsets is None:
offsets = assignment.all_consumed_offsets()
else:
offsets = commit_structure_validate(offsets)
for tp in offsets:
if tp not in assignment.tps:
raise IllegalStateError(
"Partition {} is not assigned".format(tp))
await self._coordinator.commit_offsets(assignment, offsets)
async def committed(self, partition):
""" Get the last committed offset for the given partition. (whether the
commit happened by this process or another).
This offset will be used as the position for the consumer in the event
of a failure.
This call will block to do a remote call to get the latest offset, as
those are not cached by consumer (Transactional Producer can change
them without Consumer knowledge as of Kafka 0.11.0)
Arguments:
partition (TopicPartition): the partition to check
Returns:
The last committed offset, or None if there was no prior commit.
Raises:
IllegalOperation: If used with ``group_id == None``
"""
if self._group_id is None:
raise IllegalOperation("Requires group_id")
commit_map = await self._coordinator.fetch_committed_offsets(
[partition])
if partition in commit_map:
committed = commit_map[partition].offset
if committed == -1:
committed = None
else:
committed = None
return committed
async def topics(self):
""" Get all topics the user is authorized to view.
Returns:
set: topics
"""
cluster = await self._client.fetch_all_metadata()
return cluster.topics()
def partitions_for_topic(self, topic):
""" Get metadata about the partitions for a given topic.
This method will return `None` if Consumer does not already have
metadata for this topic.
Arguments:
topic (str): topic to check
Returns:
set: partition ids
"""
return self._client.cluster.partitions_for_topic(topic)
async def position(self, partition):
""" Get the offset of the *next record* that will be fetched (if a
record with that offset exists on broker).
Arguments:
partition (TopicPartition): partition to check
Returns:
int: offset
Raises:
IllegalStateError: partition is not assigned
.. versionchanged:: 0.4.0
Changed ``AssertionError`` to ``IllegalStateError`` in case of
unassigned partition
"""
while True:
if not self._subscription.is_assigned(partition):
raise IllegalStateError(
'Partition {} is not assigned'.format(partition))
assignment = self._subscription.subscription.assignment
tp_state = assignment.state_value(partition)
if not tp_state.has_valid_position:
self._coordinator.check_errors()
await asyncio.wait(
[tp_state.wait_for_position(),
assignment.unassign_future],
timeout=self._request_timeout_ms / 1000,
return_when=asyncio.FIRST_COMPLETED, loop=self._loop,
)
if not tp_state.has_valid_position:
if self._subscription.subscription is None:
raise IllegalStateError(
'Partition {} is not assigned'.format(partition))
if self._subscription.subscription.assignment is None:
self._coordinator.check_errors()
await self._subscription.wait_for_assignment()
continue
return tp_state.position
def highwater(self, partition):
""" Last known highwater offset for a partition.
A highwater offset is the offset that will be assigned to the next
message that is produced. It may be useful for calculating lag, by
comparing with the reported position. Note that both position and
highwater refer to the *next* offset โ i.e., highwater offset is one
greater than the newest available message.
Highwater offsets are returned as part of ``FetchResponse``, so will
not be available if messages for this partition were not requested yet.
Arguments:
partition (TopicPartition): partition to check
Returns:
int or None: offset if available
"""
assert self._subscription.is_assigned(partition), \
'Partition is not assigned'
assignment = self._subscription.subscription.assignment
return assignment.state_value(partition).highwater
def last_stable_offset(self, partition):
""" Returns the Last Stable Offset of a topic. It will be the last
offset up to which point all transactions were completed. Only
available in with isolation_level `read_committed`, in
`read_uncommitted` will always return -1. Will return None for older
Brokers.
As with ``highwater()`` will not be available until some messages are
consumed.
Arguments:
partition (TopicPartition): partition to check
Returns:
int or None: offset if available
"""
assert self._subscription.is_assigned(partition), \
'Partition is not assigned'
assignment = self._subscription.subscription.assignment
return assignment.state_value(partition).lso
def last_poll_timestamp(self, partition):
""" Returns the timestamp of the last poll of this partition (in ms).
It is the last time `highwater` and `last_stable_offset` were
udpated. However it does not mean that new messages were received.
As with ``highwater()`` will not be available until some messages are
consumed.
Arguments:
partition (TopicPartition): partition to check
Returns:
int or None: timestamp if available
"""
assert self._subscription.is_assigned(partition), \
'Partition is not assigned'
assignment = self._subscription.subscription.assignment
return assignment.state_value(partition).timestamp
def seek(self, partition, offset):
""" Manually specify the fetch offset for a TopicPartition.
Overrides the fetch offsets that the consumer will use on the next
``getmany()``/``getone()`` call. If this API is invoked for the same
partition more than once, the latest offset will be used on the next
fetch.
Note:
You may lose data if this API is arbitrarily used in the middle
of consumption to reset the fetch offsets. Use it either on
rebalance listeners or after all pending messages are processed.
Arguments:
partition (TopicPartition): partition for seek operation
offset (int): message offset in partition
Raises:
ValueError: if offset is not a positive integer
IllegalStateError: partition is not currently assigned
.. versionchanged:: 0.4.0
Changed ``AssertionError`` to ``IllegalStateError`` and
``ValueError`` in respective cases.
"""
if not isinstance(offset, int) or offset < 0:
raise ValueError("Offset must be a positive integer")
log.debug("Seeking to offset %s for partition %s", offset, partition)
self._fetcher.seek_to(partition, offset)
async def seek_to_beginning(self, *partitions):
""" Seek to the oldest available offset for partitions.
Arguments:
*partitions: Optionally provide specific TopicPartitions, otherwise
default to all assigned partitions.
Raises:
IllegalStateError: If any partition is not currently assigned
TypeError: If partitions are not instances of TopicPartition
.. versionadded:: 0.3.0
"""
if not all([isinstance(p, TopicPartition) for p in partitions]):
raise TypeError('partitions must be TopicPartition instances')
if not partitions:
partitions = self._subscription.assigned_partitions()
assert partitions, 'No partitions are currently assigned'
else:
not_assigned = (
set(partitions) - self._subscription.assigned_partitions()
)
if not_assigned:
raise IllegalStateError(
"Partitions {} are not assigned".format(not_assigned))
for tp in partitions:
log.debug("Seeking to beginning of partition %s", tp)
fut = self._fetcher.request_offset_reset(
partitions, OffsetResetStrategy.EARLIEST)
assignment = self._subscription.subscription.assignment
await asyncio.wait(
[fut, assignment.unassign_future],
timeout=self._request_timeout_ms / 1000,
return_when=asyncio.FIRST_COMPLETED,
loop=self._loop
)
self._coordinator.check_errors()
return fut.done()
async def seek_to_end(self, *partitions):
"""Seek to the most recent available offset for partitions.
Arguments:
*partitions: Optionally provide specific TopicPartitions, otherwise
default to all assigned partitions.
Raises:
IllegalStateError: If any partition is not currently assigned
TypeError: If partitions are not instances of TopicPartition
.. versionadded:: 0.3.0
"""
if not all([isinstance(p, TopicPartition) for p in partitions]):
raise TypeError('partitions must be TopicPartition instances')
if not partitions:
partitions = self._subscription.assigned_partitions()
assert partitions, 'No partitions are currently assigned'
else:
not_assigned = (
set(partitions) - self._subscription.assigned_partitions()
)
if not_assigned:
raise IllegalStateError(
"Partitions {} are not assigned".format(not_assigned))
for tp in partitions:
log.debug("Seeking to end of partition %s", tp)
fut = self._fetcher.request_offset_reset(
partitions, OffsetResetStrategy.LATEST)
assignment = self._subscription.subscription.assignment
await asyncio.wait(
[fut, assignment.unassign_future],
timeout=self._request_timeout_ms / 1000,
return_when=asyncio.FIRST_COMPLETED,
loop=self._loop
)
self._coordinator.check_errors()
return fut.done()
async def seek_to_committed(self, *partitions):
""" Seek to the committed offset for partitions.
Arguments:
*partitions: Optionally provide specific TopicPartitions, otherwise
default to all assigned partitions.
Raises:
IllegalStateError: If any partition is not currently assigned
IllegalOperation: If used with ``group_id == None``
.. versionchanged:: 0.3.0
Changed ``AssertionError`` to ``IllegalStateError`` in case of
unassigned partition
"""
if not all([isinstance(p, TopicPartition) for p in partitions]):
raise TypeError('partitions must be TopicPartition instances')
if not partitions:
partitions = self._subscription.assigned_partitions()
assert partitions, 'No partitions are currently assigned'
else:
not_assigned = (
set(partitions) - self._subscription.assigned_partitions()
)
if not_assigned:
raise IllegalStateError(
"Partitions {} are not assigned".format(not_assigned))
committed_offsets = {}
for tp in partitions:
offset = await self.committed(tp)
committed_offsets[tp] = offset
log.debug("Seeking to committed of partition %s %s", tp, offset)
if offset is not None and offset >= 0:
self._fetcher.seek_to(tp, offset)
return committed_offsets
async def offsets_for_times(self, timestamps):
"""
Look up the offsets for the given partitions by timestamp. The returned
offset for each partition is the earliest offset whose timestamp is
greater than or equal to the given timestamp in the corresponding
partition.
The consumer does not have to be assigned the partitions.
If the message format version in a partition is before 0.10.0, i.e.
the messages do not have timestamps, ``None`` will be returned for that
partition.
Note:
This method may block indefinitely if the partition does not exist.
Arguments:
timestamps (dict): ``{TopicPartition: int}`` mapping from partition
to the timestamp to look up. Unit should be milliseconds since
beginning of the epoch (midnight Jan 1, 1970 (UTC))
Returns:
dict: ``{TopicPartition: OffsetAndTimestamp}`` mapping from
partition to the timestamp and offset of the first message with
timestamp greater than or equal to the target timestamp.
Raises:
ValueError: If the target timestamp is negative
UnsupportedVersionError: If the broker does not support looking
up the offsets by timestamp.
KafkaTimeoutError: If fetch failed in request_timeout_ms
.. versionadded:: 0.3.0
"""
if self._client.api_version <= (0, 10, 0):
raise UnsupportedVersionError(
"offsets_for_times API not supported for cluster version {}"
.format(self._client.api_version))
for tp, ts in timestamps.items():
timestamps[tp] = int(ts)
if ts < 0:
raise ValueError(
"The target time for partition {} is {}. The target time "
"cannot be negative.".format(tp, ts))
offsets = await self._fetcher.get_offsets_by_times(
timestamps, self._request_timeout_ms)
return offsets
async def beginning_offsets(self, partitions):
""" Get the first offset for the given partitions.
This method does not change the current consumer position of the
partitions.
Note:
This method may block indefinitely if the partition does not exist.
Arguments:
partitions (list): List of TopicPartition instances to fetch
offsets for.
Returns:
dict: ``{TopicPartition: int}`` mapping of partition to earliest
available offset.
Raises:
UnsupportedVersionError: If the broker does not support looking
up the offsets by timestamp.
KafkaTimeoutError: If fetch failed in request_timeout_ms.
.. versionadded:: 0.3.0
"""
if self._client.api_version <= (0, 10, 0):
raise UnsupportedVersionError(
"offsets_for_times API not supported for cluster version {}"
.format(self._client.api_version))
offsets = await self._fetcher.beginning_offsets(
partitions, self._request_timeout_ms)
return offsets
async def end_offsets(self, partitions):
""" Get the last offset for the given partitions. The last offset of a
partition is the offset of the upcoming message, i.e. the offset of the
last available message + 1.
This method does not change the current consumer position of the
partitions.
Note:
This method may block indefinitely if the partition does not exist.
Arguments:
partitions (list): List of TopicPartition instances to fetch
offsets for.
Returns:
dict: ``{TopicPartition: int}`` mapping of partition to last
available offset + 1.
Raises:
UnsupportedVersionError: If the broker does not support looking
up the offsets by timestamp.
KafkaTimeoutError: If fetch failed in request_timeout_ms
.. versionadded:: 0.3.0
"""
if self._client.api_version <= (0, 10, 0):
raise UnsupportedVersionError(
"offsets_for_times API not supported for cluster version {}"
.format(self._client.api_version))
offsets = await self._fetcher.end_offsets(
partitions, self._request_timeout_ms)
return offsets
def subscribe(self, topics=(), pattern=None, listener=None):
""" Subscribe to a list of topics, or a topic regex pattern.
Partitions will be dynamically assigned via a group coordinator.
Topic subscriptions are not incremental: this list will replace the
current assignment (if there is one).
This method is incompatible with ``assign()``.
Arguments:
topics (list): List of topics for subscription.
pattern (str): Pattern to match available topics. You must provide
either topics or pattern, but not both.
listener (ConsumerRebalanceListener): Optionally include listener
callback, which will be called before and after each rebalance
operation.
As part of group management, the consumer will keep track of
the list of consumers that belong to a particular group and
will trigger a rebalance operation if one of the following
events trigger:
* Number of partitions change for any of the subscribed topics
* Topic is created or deleted
* An existing member of the consumer group dies
* A new member is added to the consumer group
When any of these events are triggered, the provided listener
will be invoked first to indicate that the consumer's
assignment has been revoked, and then again when the new
assignment has been received. Note that this listener will
immediately override any listener set in a previous call
to subscribe. It is guaranteed, however, that the partitions
revoked/assigned
through this interface are from topics subscribed in this call.
Raises:
IllegalStateError: if called after previously calling assign()
ValueError: if neither topics or pattern is provided or both
are provided
TypeError: if listener is not a ConsumerRebalanceListener
"""
if not (topics or pattern):
raise ValueError(
"You should provide either `topics` or `pattern`")
if topics and pattern:
raise ValueError(
"You can't provide both `topics` and `pattern`")
if listener is not None and \
not isinstance(listener, ConsumerRebalanceListener):
raise TypeError(
"listener should be an instance of ConsumerRebalanceListener")
if pattern is not None:
try:
pattern = re.compile(pattern)
except re.error as err:
raise ValueError(
"{!r} is not a valid pattern: {}".format(pattern, err))
self._subscription.subscribe_pattern(
pattern=pattern, listener=listener)
# NOTE: set_topics will trigger a rebalance, so the coordinator
# will get the initial subscription shortly by ``metadata_changed``
# handler.
self._client.set_topics([])
log.info("Subscribed to topic pattern: %s", pattern)
elif topics:
topics = self._validate_topics(topics)
self._subscription.subscribe(
topics=topics, listener=listener)
self._client.set_topics(self._subscription.subscription.topics)
log.info("Subscribed to topic(s): %s", topics)
def subscription(self):
""" Get the current topic subscription.
Returns:
frozenset: {topic, ...}
"""
return self._subscription.topics
def unsubscribe(self):
""" Unsubscribe from all topics and clear all assigned partitions. """
self._subscription.unsubscribe()
if self._group_id is not None:
self._coordinator.maybe_leave_group()
self._client.set_topics([])
log.info(
"Unsubscribed all topics or patterns and assigned partitions")
async def getone(self, *partitions):
"""
Get one message from Kafka.
If no new messages prefetched, this method will wait for it.
Arguments:
partitions (List[TopicPartition]): Optional list of partitions to
return from. If no partitions specified then returned message
will be from any partition, which consumer is subscribed to.
Returns:
ConsumerRecord
Will return instance of
.. code:: python
collections.namedtuple(
"ConsumerRecord",
["topic", "partition", "offset", "key", "value"])
Example usage:
.. code:: python
while True:
message = await consumer.getone()
topic = message.topic
partition = message.partition
# Process message
print(message.offset, message.key, message.value)
"""
assert all(map(lambda k: isinstance(k, TopicPartition), partitions))
if self._closed:
raise ConsumerStoppedError()
# Raise coordination errors if any
self._coordinator.check_errors()
with self._subscription.fetch_context():
msg = await self._fetcher.next_record(partitions)
return msg
async def getmany(self, *partitions, timeout_ms=0, max_records=None):
"""Get messages from assigned topics / partitions.
Prefetched messages are returned in batches by topic-partition.
If messages is not available in the prefetched buffer this method waits
`timeout_ms` milliseconds.
Arguments:
partitions (List[TopicPartition]): The partitions that need
fetching message. If no one partition specified then all
subscribed partitions will be used
timeout_ms (int, optional): milliseconds spent waiting if
data is not available in the buffer. If 0, returns immediately
with any records that are available currently in the buffer,
else returns empty. Must not be negative. Default: 0
Returns:
dict: topic to list of records since the last fetch for the
subscribed list of topics and partitions
Example usage:
.. code:: python
data = await consumer.getmany()
for tp, messages in data.items():
topic = tp.topic
partition = tp.partition
for message in messages:
# Process message
print(message.offset, message.key, message.value)
"""
assert all(map(lambda k: isinstance(k, TopicPartition), partitions))
if self._closed:
raise ConsumerStoppedError()
if max_records is not None and (
not isinstance(max_records, int) or max_records < 1):
raise ValueError("`max_records` must be a positive Integer")
# Raise coordination errors if any
self._coordinator.check_errors()
timeout = timeout_ms / 1000
with self._subscription.fetch_context():
records = await self._fetcher.fetched_records(
partitions, timeout,
max_records=max_records or self._max_poll_records)
return records
def pause(self, *partitions):
"""Suspend fetching from the requested partitions.
Future calls to :meth:`~aiokafka.AIOKafkaConsumer.getmany` will not
return any records from these partitions until they have been resumed
using :meth:`~aiokafka.AIOKafkaConsumer.resume`.
Note: This method does not affect partition subscription.
In particular, it does not cause a group rebalance when automatic
assignment is used.
Arguments:
*partitions (TopicPartition): Partitions to pause.
"""
if not all([isinstance(p, TopicPartition) for p in partitions]):
raise TypeError('partitions must be TopicPartition namedtuples')
for partition in partitions:
log.debug("Pausing partition %s", partition)
self._subscription.pause(partition)
def paused(self):
"""Get the partitions that were previously paused using
:meth:`~aiokafka.AIOKafkaConsumer.pause`.
Returns:
set: {partition (TopicPartition), ...}
"""
return self._subscription.paused_partitions()
def resume(self, *partitions):
"""Resume fetching from the specified (paused) partitions.
Arguments:
*partitions (TopicPartition): Partitions to resume.
"""
if not all([isinstance(p, TopicPartition) for p in partitions]):
raise TypeError('partitions must be TopicPartition namedtuples')
for partition in partitions:
log.debug("Resuming partition %s", partition)
self._subscription.resume(partition)
def __aiter__(self):
if self._closed:
raise ConsumerStoppedError()
return self
async def __anext__(self):
"""Asyncio iterator interface for consumer
Note:
TopicAuthorizationFailedError and OffsetOutOfRangeError
exceptions can be raised in iterator.
All other KafkaError exceptions will be logged and not raised
"""
while True:
try:
return (await self.getone())
except ConsumerStoppedError:
raise StopAsyncIteration # noqa: F821
except (TopicAuthorizationFailedError,
OffsetOutOfRangeError,
NoOffsetForPartitionError) as err:
raise err
except RecordTooLargeError:
log.exception("error in consumer iterator: %s")
@property
def records_last_request(self):
return self._fetcher.records_last_request
@property
def records_last_response(self):
return self._fetcher.records_last_response | /robinhood_aiokafka-1.1.6-cp37-cp37m-macosx_10_9_x86_64.whl/aiokafka/consumer/consumer.py | 0.609175 | 0.262162 | consumer.py | pypi |
from ._crc32c import crc as crc32c_py
from aiokafka.util import NO_EXTENSIONS
def encode_varint_py(value, write):
""" Encode an integer to a varint presentation. See
https://developers.google.com/protocol-buffers/docs/encoding?csw=1#varints
on how those can be produced.
Arguments:
value (int): Value to encode
write (function): Called per byte that needs to be writen
Returns:
int: Number of bytes written
"""
value = (value << 1) ^ (value >> 63)
if value <= 0x7f: # 1 byte
write(value)
return 1
if value <= 0x3fff: # 2 bytes
write(0x80 | (value & 0x7f))
write(value >> 7)
return 2
if value <= 0x1fffff: # 3 bytes
write(0x80 | (value & 0x7f))
write(0x80 | ((value >> 7) & 0x7f))
write(value >> 14)
return 3
if value <= 0xfffffff: # 4 bytes
write(0x80 | (value & 0x7f))
write(0x80 | ((value >> 7) & 0x7f))
write(0x80 | ((value >> 14) & 0x7f))
write(value >> 21)
return 4
if value <= 0x7ffffffff: # 5 bytes
write(0x80 | (value & 0x7f))
write(0x80 | ((value >> 7) & 0x7f))
write(0x80 | ((value >> 14) & 0x7f))
write(0x80 | ((value >> 21) & 0x7f))
write(value >> 28)
return 5
else:
# Return to general algorithm
bits = value & 0x7f
value >>= 7
i = 0
while value:
write(0x80 | bits)
bits = value & 0x7f
value >>= 7
i += 1
write(bits)
return i
def size_of_varint_py(value):
""" Number of bytes needed to encode an integer in variable-length format.
"""
value = (value << 1) ^ (value >> 63)
if value <= 0x7f:
return 1
if value <= 0x3fff:
return 2
if value <= 0x1fffff:
return 3
if value <= 0xfffffff:
return 4
if value <= 0x7ffffffff:
return 5
if value <= 0x3ffffffffff:
return 6
if value <= 0x1ffffffffffff:
return 7
if value <= 0xffffffffffffff:
return 8
if value <= 0x7fffffffffffffff:
return 9
return 10
def decode_varint_py(buffer, pos=0):
""" Decode an integer from a varint presentation. See
https://developers.google.com/protocol-buffers/docs/encoding?csw=1#varints
on how those can be produced.
Arguments:
buffer (bytearry): buffer to read from.
pos (int): optional position to read from
Returns:
(int, int): Decoded int value and next read position
"""
result = buffer[pos]
if not (result & 0x81):
return (result >> 1), pos + 1
if not (result & 0x80):
return (result >> 1) ^ (~0), pos + 1
result &= 0x7f
pos += 1
shift = 7
while 1:
b = buffer[pos]
result |= ((b & 0x7f) << shift)
pos += 1
if not (b & 0x80):
return ((result >> 1) ^ -(result & 1), pos)
shift += 7
if shift >= 64:
raise ValueError("Out of int64 range")
def calc_crc32c_py(memview):
""" Calculate CRC-32C (Castagnoli) checksum over a memoryview of data
"""
crc = crc32c_py(memview)
return crc
if NO_EXTENSIONS:
calc_crc32c = calc_crc32c_py
decode_varint = decode_varint_py
size_of_varint = size_of_varint_py
encode_varint = encode_varint_py
else:
try:
from ._crecords import ( # noqa
decode_varint_cython, crc32c_cython, encode_varint_cython,
size_of_varint_cython
)
decode_varint = decode_varint_cython
encode_varint = encode_varint_cython
size_of_varint = size_of_varint_cython
calc_crc32c = crc32c_cython
except ImportError: # pragma: no cover
calc_crc32c = calc_crc32c_py
decode_varint = decode_varint_py
size_of_varint = size_of_varint_py
encode_varint = encode_varint_py | /robinhood_aiokafka-1.1.6-cp37-cp37m-macosx_10_9_x86_64.whl/aiokafka/record/util.py | 0.669745 | 0.375821 | util.py | pypi |
import struct
from aiokafka.errors import CorruptRecordException
from aiokafka.util import NO_EXTENSIONS
from .legacy_records import LegacyRecordBatch
from .default_records import DefaultRecordBatch
class _MemoryRecordsPy:
LENGTH_OFFSET = struct.calcsize(">q")
LOG_OVERHEAD = struct.calcsize(">qi")
MAGIC_OFFSET = struct.calcsize(">qii")
# Minimum space requirements for Record V0
MIN_SLICE = LOG_OVERHEAD + LegacyRecordBatch.RECORD_OVERHEAD_V0
def __init__(self, bytes_data):
self._buffer = bytes_data
self._pos = 0
# We keep one slice ahead so `has_next` will return very fast
self._next_slice = None
self._remaining_bytes = 0
self._cache_next()
def size_in_bytes(self):
return len(self._buffer)
# NOTE: we cache offsets here as kwargs for a bit more speed, as cPython
# will use LOAD_FAST opcode in this case
def _cache_next(self, len_offset=LENGTH_OFFSET, log_overhead=LOG_OVERHEAD):
buffer = self._buffer
buffer_len = len(buffer)
pos = self._pos
remaining = buffer_len - pos
if remaining < log_overhead:
# Will be re-checked in Fetcher for remaining bytes.
self._remaining_bytes = remaining
self._next_slice = None
return
length, = struct.unpack_from(
">i", buffer, pos + len_offset)
slice_end = pos + log_overhead + length
if slice_end > buffer_len:
# Will be re-checked in Fetcher for remaining bytes
self._remaining_bytes = remaining
self._next_slice = None
return
self._next_slice = memoryview(buffer)[pos: slice_end]
self._pos = slice_end
def has_next(self):
return self._next_slice is not None
# NOTE: same cache for LOAD_FAST as above
def next_batch(self, _min_slice=MIN_SLICE,
_magic_offset=MAGIC_OFFSET):
next_slice = self._next_slice
if next_slice is None:
return None
if len(next_slice) < _min_slice:
raise CorruptRecordException(
"Record size is less than the minimum record overhead "
"({})".format(_min_slice - self.LOG_OVERHEAD))
self._cache_next()
magic = next_slice[_magic_offset]
if magic >= 2: # pragma: no cover
return DefaultRecordBatch(next_slice)
else:
return LegacyRecordBatch(next_slice, magic)
if NO_EXTENSIONS:
MemoryRecords = _MemoryRecordsPy
else:
try:
from ._crecords import MemoryRecords as _MemoryRecordsCython
MemoryRecords = _MemoryRecordsCython
except ImportError: # pragma: no cover
MemoryRecords = _MemoryRecordsPy | /robinhood_aiokafka-1.1.6-cp37-cp37m-macosx_10_9_x86_64.whl/aiokafka/record/memory_records.py | 0.554712 | 0.168139 | memory_records.py | pypi |
import struct
import time
from binascii import crc32
from aiokafka.errors import CorruptRecordException
from aiokafka.util import NO_EXTENSIONS
from kafka.codec import (
gzip_encode, snappy_encode, lz4_encode, lz4_encode_old_kafka,
gzip_decode, snappy_decode, lz4_decode, lz4_decode_old_kafka
)
NoneType = type(None)
class LegacyRecordBase:
HEADER_STRUCT_V0 = struct.Struct(
">q" # BaseOffset => Int64
"i" # Length => Int32
"I" # CRC => Int32
"b" # Magic => Int8
"b" # Attributes => Int8
)
HEADER_STRUCT_V1 = struct.Struct(
">q" # BaseOffset => Int64
"i" # Length => Int32
"I" # CRC => Int32
"b" # Magic => Int8
"b" # Attributes => Int8
"q" # timestamp => Int64
)
LOG_OVERHEAD = CRC_OFFSET = struct.calcsize(
">q" # Offset
"i" # Size
)
MAGIC_OFFSET = LOG_OVERHEAD + struct.calcsize(
">I" # CRC
)
# Those are used for fast size calculations
RECORD_OVERHEAD_V0 = struct.calcsize(
">I" # CRC
"b" # magic
"b" # attributes
"i" # Key length
"i" # Value length
)
RECORD_OVERHEAD_V1 = struct.calcsize(
">I" # CRC
"b" # magic
"b" # attributes
"q" # timestamp
"i" # Key length
"i" # Value length
)
RECORD_OVERHEAD = {
0: RECORD_OVERHEAD_V0,
1: RECORD_OVERHEAD_V1,
}
KEY_OFFSET_V0 = HEADER_STRUCT_V0.size
KEY_OFFSET_V1 = HEADER_STRUCT_V1.size
KEY_LENGTH = VALUE_LENGTH = struct.calcsize(">i") # Bytes length is Int32
CODEC_MASK = 0x07
CODEC_GZIP = 0x01
CODEC_SNAPPY = 0x02
CODEC_LZ4 = 0x03
TIMESTAMP_TYPE_MASK = 0x08
LOG_APPEND_TIME = 1
CREATE_TIME = 0
class _LegacyRecordBatchPy(LegacyRecordBase):
is_control_batch = False
is_transactional = False
producer_id = None
def __init__(self, buffer, magic):
self._buffer = memoryview(buffer)
self._magic = magic
offset, length, crc, magic_, attrs, timestamp = self._read_header(0)
assert length == len(buffer) - self.LOG_OVERHEAD
assert magic == magic_
self._offset = offset
self._crc = crc
self._timestamp = timestamp
self._attributes = attrs
self._decompressed = False
@property
def timestamp_type(self):
"""0 for CreateTime; 1 for LogAppendTime; None if unsupported.
Value is determined by broker; produced messages should always set to 0
Requires Kafka >= 0.10 / message version >= 1
"""
if self._magic == 0:
return None
elif self._attributes & self.TIMESTAMP_TYPE_MASK:
return 1
else:
return 0
@property
def compression_type(self):
return self._attributes & self.CODEC_MASK
@property
def next_offset(self):
return self._offset + 1
def validate_crc(self):
crc = crc32(self._buffer[self.MAGIC_OFFSET:])
return self._crc == crc
def _decompress(self, key_offset):
# Copy of `_read_key_value`, but uses memoryview
pos = key_offset
key_size = struct.unpack_from(">i", self._buffer, pos)[0]
pos += self.KEY_LENGTH
if key_size != -1:
pos += key_size
value_size = struct.unpack_from(">i", self._buffer, pos)[0]
pos += self.VALUE_LENGTH
if value_size == -1:
raise CorruptRecordException("Value of compressed message is None")
else:
data = self._buffer[pos:pos + value_size]
compression_type = self.compression_type
if compression_type == self.CODEC_GZIP:
uncompressed = gzip_decode(data)
elif compression_type == self.CODEC_SNAPPY:
uncompressed = snappy_decode(data.tobytes())
elif compression_type == self.CODEC_LZ4:
if self._magic == 0:
uncompressed = lz4_decode_old_kafka(data.tobytes())
else:
uncompressed = lz4_decode(data.tobytes())
return uncompressed
def _read_header(self, pos):
if self._magic == 0:
offset, length, crc, magic_read, attrs = \
self.HEADER_STRUCT_V0.unpack_from(self._buffer, pos)
timestamp = None
else:
offset, length, crc, magic_read, attrs, timestamp = \
self.HEADER_STRUCT_V1.unpack_from(self._buffer, pos)
return offset, length, crc, magic_read, attrs, timestamp
def _read_all_headers(self):
pos = 0
msgs = []
buffer_len = len(self._buffer)
while pos < buffer_len:
header = self._read_header(pos)
msgs.append((header, pos))
pos += self.LOG_OVERHEAD + header[1] # length
return msgs
def _read_key_value(self, pos):
key_size = struct.unpack_from(">i", self._buffer, pos)[0]
pos += self.KEY_LENGTH
if key_size == -1:
key = None
else:
key = self._buffer[pos:pos + key_size].tobytes()
pos += key_size
value_size = struct.unpack_from(">i", self._buffer, pos)[0]
pos += self.VALUE_LENGTH
if value_size == -1:
value = None
else:
value = self._buffer[pos:pos + value_size].tobytes()
return key, value
def __iter__(self):
if self._magic == 1:
key_offset = self.KEY_OFFSET_V1
else:
key_offset = self.KEY_OFFSET_V0
timestamp_type = self.timestamp_type
if self.compression_type:
# In case we will call iter again
if not self._decompressed:
self._buffer = memoryview(self._decompress(key_offset))
self._decompressed = True
# If relative offset is used, we need to decompress the entire
# message first to compute the absolute offset.
headers = self._read_all_headers()
if self._magic > 0:
msg_header, _ = headers[-1]
absolute_base_offset = self._offset - msg_header[0]
else:
absolute_base_offset = -1
for header, msg_pos in headers:
offset, _, crc, _, attrs, timestamp = header
# There should only ever be a single layer of compression
assert not attrs & self.CODEC_MASK, (
'MessageSet at offset %d appears double-compressed. This '
'should not happen -- check your producers!' % offset)
# When magic value is greater than 0, the timestamp
# of a compressed message depends on the
# typestamp type of the wrapper message:
if timestamp_type == self.LOG_APPEND_TIME:
timestamp = self._timestamp
if absolute_base_offset >= 0:
offset += absolute_base_offset
key, value = self._read_key_value(msg_pos + key_offset)
yield _LegacyRecordPy(
offset, timestamp, timestamp_type,
key, value, crc)
else:
key, value = self._read_key_value(key_offset)
yield _LegacyRecordPy(
self._offset, self._timestamp, timestamp_type,
key, value, self._crc)
class _LegacyRecordPy:
__slots__ = ("_offset", "_timestamp", "_timestamp_type", "_key", "_value",
"_crc")
def __init__(self, offset, timestamp, timestamp_type, key, value, crc):
self._offset = offset
self._timestamp = timestamp
self._timestamp_type = timestamp_type
self._key = key
self._value = value
self._crc = crc
@property
def offset(self):
return self._offset
@property
def timestamp(self):
""" Epoch milliseconds
"""
return self._timestamp
@property
def timestamp_type(self):
""" CREATE_TIME(0) or APPEND_TIME(1)
"""
return self._timestamp_type
@property
def key(self):
""" Bytes key or None
"""
return self._key
@property
def value(self):
""" Bytes value or None
"""
return self._value
@property
def headers(self):
return []
@property
def checksum(self):
return self._crc
def __repr__(self):
return (
"LegacyRecord(offset={!r}, timestamp={!r}, timestamp_type={!r},"
" key={!r}, value={!r}, crc={!r})".format(
self._offset, self._timestamp, self._timestamp_type,
self._key, self._value, self._crc)
)
class _LegacyRecordBatchBuilderPy(LegacyRecordBase):
def __init__(self, magic, compression_type, batch_size):
assert magic in [0, 1]
self._magic = magic
self._compression_type = compression_type
self._batch_size = batch_size
self._msg_buffers = []
self._pos = 0
def append(self, offset, timestamp, key, value, headers=None):
""" Append message to batch.
"""
if self._magic == 0:
timestamp = -1
elif timestamp is None:
timestamp = int(time.time() * 1000)
# calculating length is not cheap; only do it once
key_size = len(key) if key is not None else 0
value_size = len(value) if value is not None else 0
pos = self._pos
size = self._size_in_bytes(key_size, value_size)
# always allow at least one record to be appended
if offset != 0 and pos + size >= self._batch_size:
return None
msg_buf = bytearray(size)
try:
crc = self._encode_msg(
msg_buf, offset, timestamp, key_size, key, value_size, value)
self._msg_buffers.append(msg_buf)
self._pos += size
return _LegacyRecordMetadataPy(offset, crc, size, timestamp)
except struct.error:
# perform expensive type checking only to translate struct errors
# to human-readable messages
if type(offset) != int:
raise TypeError(offset)
if type(timestamp) != int:
raise TypeError(timestamp)
if not isinstance(key, (bytes, bytearray, memoryview, NoneType)):
raise TypeError("Unsupported type for key: %s" % type(key))
if not isinstance(value, (bytes, bytearray, memoryview, NoneType)):
raise TypeError("Unsupported type for value: %s" % type(value))
raise
def _encode_msg(self, buf, offset, timestamp, key_size, key,
value_size, value, attributes=0):
""" Encode msg data into the `msg_buffer`, which should be allocated
to at least the size of this message.
"""
magic = self._magic
length = (self.KEY_LENGTH + key_size +
self.VALUE_LENGTH + value_size -
self.LOG_OVERHEAD)
if magic == 0:
length += self.KEY_OFFSET_V0
struct.pack_into(
">q" # BaseOffset => Int64
"i" # Length => Int32
"I" # CRC => Int32
"b" # Magic => Int8
"b" # Attributes => Int8
"i" # key length => Int32
"%ds" # key => bytes
"i" # value length => Int32
"%ds" # value => bytes
% (key_size, value_size),
buf, 0, offset, length, 0, magic, attributes,
key_size if key is not None else -1, key or b"",
value_size if value is not None else -1, value or b"")
else:
length += self.KEY_OFFSET_V1
struct.pack_into(
">q" # BaseOffset => Int64
"i" # Length => Int32
"I" # CRC => Int32
"b" # Magic => Int8
"b" # Attributes => Int8
"q" # timestamp => Int64
"i" # key length => Int32
"%ds" # key => bytes
"i" # value length => Int32
"%ds" # value => bytes
% (key_size, value_size),
buf, 0, offset, length, 0, magic, attributes, timestamp,
key_size if key is not None else -1, key or b"",
value_size if value is not None else -1, value or b"")
crc = crc32(memoryview(buf)[self.MAGIC_OFFSET:])
struct.pack_into(">I", buf, self.CRC_OFFSET, crc)
return crc
def _maybe_compress(self):
if self._compression_type:
buf = self._buffer
if self._compression_type == self.CODEC_GZIP:
compressed = gzip_encode(buf)
elif self._compression_type == self.CODEC_SNAPPY:
compressed = snappy_encode(buf)
elif self._compression_type == self.CODEC_LZ4:
if self._magic == 0:
compressed = lz4_encode_old_kafka(bytes(buf))
else:
compressed = lz4_encode(bytes(buf))
compressed_size = len(compressed)
size = self._size_in_bytes(key_size=0, value_size=compressed_size)
if size > len(self._buffer):
self._buffer = bytearray(size)
else:
del self._buffer[size:]
self._encode_msg(
self._buffer,
offset=0, timestamp=0, key_size=0, key=None,
value_size=compressed_size, value=compressed,
attributes=self._compression_type)
self._pos = size
return True
return False
def build(self):
"""Compress batch to be ready for send"""
self._buffer = bytearray().join(self._msg_buffers)
self._maybe_compress()
return self._buffer
def size(self):
""" Return current size of data written to buffer
"""
return self._pos
def size_in_bytes(self, offset, timestamp, key, value, headers=None):
""" Actual size of message to add
"""
assert not headers, "Headers not supported in v0/v1"
key_size = len(key) if key is not None else 0
value_size = len(value) if value is not None else 0
return self._size_in_bytes(key_size, value_size)
def _size_in_bytes(self, key_size, value_size):
return (self.LOG_OVERHEAD +
self.RECORD_OVERHEAD[self._magic] +
key_size +
value_size)
@classmethod
def record_overhead(cls, magic):
try:
return cls.RECORD_OVERHEAD[magic]
except KeyError:
raise ValueError("Unsupported magic: %d" % magic)
class _LegacyRecordMetadataPy:
__slots__ = ("_crc", "_size", "_timestamp", "_offset")
def __init__(self, offset, crc, size, timestamp):
self._offset = offset
self._crc = crc
self._size = size
self._timestamp = timestamp
@property
def offset(self):
return self._offset
@property
def crc(self):
return self._crc
@property
def size(self):
return self._size
@property
def timestamp(self):
return self._timestamp
def __repr__(self):
return (
"LegacyRecordMetadata(offset={!r}, crc={!r}, size={!r},"
" timestamp={!r})".format(
self._offset, self._crc, self._size, self._timestamp)
)
if NO_EXTENSIONS:
LegacyRecordBatchBuilder = _LegacyRecordBatchBuilderPy
LegacyRecordMetadata = _LegacyRecordMetadataPy
LegacyRecordBatch = _LegacyRecordBatchPy
LegacyRecord = _LegacyRecordPy
else:
try:
from ._crecords import (
LegacyRecordBatchBuilder as _LegacyRecordBatchBuilderCython,
LegacyRecordMetadata as _LegacyRecordMetadataCython,
LegacyRecordBatch as _LegacyRecordBatchCython,
LegacyRecord as _LegacyRecordCython
)
LegacyRecordBatchBuilder = _LegacyRecordBatchBuilderCython
LegacyRecordMetadata = _LegacyRecordMetadataCython
LegacyRecordBatch = _LegacyRecordBatchCython
LegacyRecord = _LegacyRecordCython
except ImportError: # pragma: no cover
LegacyRecordBatchBuilder = _LegacyRecordBatchBuilderPy
LegacyRecordMetadata = _LegacyRecordMetadataPy
LegacyRecordBatch = _LegacyRecordBatchPy
LegacyRecord = _LegacyRecordPy | /robinhood_aiokafka-1.1.6-cp37-cp37m-macosx_10_9_x86_64.whl/aiokafka/record/legacy_records.py | 0.589716 | 0.234889 | legacy_records.py | pypi |
from enum import Enum
from collections import namedtuple, defaultdict, deque
from aiokafka.structs import TopicPartition
from aiokafka.util import create_future
PidAndEpoch = namedtuple("PidAndEpoch", ["pid", "epoch"])
NO_PRODUCER_ID = -1
NO_PRODUCER_EPOCH = -1
class SubscriptionType(Enum):
NONE = 1
AUTO_TOPICS = 2
AUTO_PATTERN = 3
USER_ASSIGNED = 4
class TransactionResult:
ABORT = 0
COMMIT = 1
class TransactionState(Enum):
UNINITIALIZED = 1
READY = 2
IN_TRANSACTION = 3
COMMITTING_TRANSACTION = 4
ABORTING_TRANSACTION = 5
ABORTABLE_ERROR = 6
FATAL_ERROR = 7
@classmethod
def is_transition_valid(cls, source, target):
if target == cls.READY:
return source == cls.UNINITIALIZED or \
source == cls.COMMITTING_TRANSACTION or \
source == cls.ABORTING_TRANSACTION
elif target == cls.IN_TRANSACTION:
return source == cls.READY
elif target == cls.COMMITTING_TRANSACTION:
return source == cls.IN_TRANSACTION
elif target == cls.ABORTING_TRANSACTION:
return source == cls.IN_TRANSACTION or \
source == cls.ABORTABLE_ERROR
elif target == cls.ABORTABLE_ERROR or target == cls.FATAL_ERROR:
return True
class TransactionManager:
def __init__(self, transactional_id, transaction_timeout_ms, *, loop):
self.transactional_id = transactional_id
self.transaction_timeout_ms = transaction_timeout_ms
self.state = TransactionState.UNINITIALIZED
self._pid_and_epoch = PidAndEpoch(NO_PRODUCER_ID, NO_PRODUCER_EPOCH)
self._pid_waiter = create_future(loop)
self._sequence_numbers = defaultdict(lambda: 0)
self._transaction_waiter = None
self._task_waiter = None
self._txn_partitions = set()
self._pending_txn_partitions = set()
self._txn_consumer_group = None
self._pending_txn_offsets = deque()
self._loop = loop
# INDEMPOTANCE PART
def set_pid_and_epoch(self, pid: int, epoch: int):
self._pid_and_epoch = PidAndEpoch(pid, epoch)
self._pid_waiter.set_result(None)
if self.transactional_id:
self._transition_to(TransactionState.READY)
def has_pid(self):
return self._pid_and_epoch.pid != NO_PRODUCER_ID
async def wait_for_pid(self):
if self.has_pid():
return
else:
await self._pid_waiter
def sequence_number(self, tp: TopicPartition):
return self._sequence_numbers[tp]
def increment_sequence_number(self, tp: TopicPartition, increment: int):
# Java will wrap those automatically, but in Python we will break
# on `struct.pack` if ints are too big, so we do it here
seq = self._sequence_numbers[tp] + increment
if seq > 2 ** 31 - 1:
seq -= 2 ** 32
self._sequence_numbers[tp] = seq
@property
def producer_id(self):
return self._pid_and_epoch.pid
@property
def producer_epoch(self):
return self._pid_and_epoch.epoch
# TRANSACTION PART
def _transition_to(self, target):
assert TransactionState.is_transition_valid(self.state, target), \
"Invalid state transition {} -> {}".format(self.state, target)
self.state = target
def begin_transaction(self):
self._transition_to(TransactionState.IN_TRANSACTION)
self._transaction_waiter = create_future(loop=self._loop)
def committing_transaction(self):
if self.state == TransactionState.ABORTABLE_ERROR:
# Raise error to user, we can only abort at this point
self._transaction_waiter.result()
self._transition_to(TransactionState.COMMITTING_TRANSACTION)
self.notify_task_waiter()
def aborting_transaction(self):
self._transition_to(TransactionState.ABORTING_TRANSACTION)
# If we had an abortable error we need to create a new waiter
if self._transaction_waiter.done():
self._transaction_waiter = create_future(loop=self._loop)
self.notify_task_waiter()
def complete_transaction(self):
assert not self._pending_txn_partitions
assert not self._pending_txn_offsets
self._transition_to(TransactionState.READY)
self._txn_partitions.clear()
self._txn_consumer_group = None
if not self._transaction_waiter.done():
self._transaction_waiter.set_result(None)
def error_transaction(self, exc):
self._transition_to(TransactionState.ABORTABLE_ERROR)
self._txn_partitions.clear()
self._txn_consumer_group = None
self._pending_txn_partitions.clear()
for _, _, fut in self._pending_txn_offsets:
fut.set_exception(exc)
self._pending_txn_offsets.clear()
self._transaction_waiter.set_exception(exc)
def fatal_error(self, exc):
self._transition_to(TransactionState.FATAL_ERROR)
self._txn_partitions.clear()
self._txn_consumer_group = None
self._pending_txn_partitions.clear()
for _, _, fut in self._pending_txn_offsets:
fut.set_exception(exc)
self._pending_txn_offsets.clear()
# There may be an abortable error. We just override it
if self._transaction_waiter.done():
self._transaction_waiter = create_future(loop=self._loop)
self._transaction_waiter.set_exception(exc)
def maybe_add_partition_to_txn(self, tp: TopicPartition):
if self.transactional_id is None:
return
assert self.is_in_transaction()
if tp not in self._txn_partitions:
self._pending_txn_partitions.add(tp)
self.notify_task_waiter()
def add_offsets_to_txn(self, offsets, group_id):
assert self.is_in_transaction()
assert self.transactional_id
fut = create_future(loop=self._loop)
self._pending_txn_offsets.append(
(group_id, offsets, fut)
)
self.notify_task_waiter()
return fut
def is_in_transaction(self):
return self.state == TransactionState.IN_TRANSACTION
def partitions_to_add(self):
return self._pending_txn_partitions
def consumer_group_to_add(self):
if self._txn_consumer_group is not None:
return
for group_id, _, _ in self._pending_txn_offsets:
return group_id
def offsets_to_commit(self):
if self._txn_consumer_group is None:
return
for group_id, offsets, fut in self._pending_txn_offsets:
return offsets, group_id, fut
def partition_added(self, tp: TopicPartition):
self._pending_txn_partitions.remove(tp)
self._txn_partitions.add(tp)
def consumer_group_added(self, group_id):
self._txn_consumer_group = group_id
def offset_committed(self, tp, offset, group_id):
pending_group_id, pending_offsets, fut = self._pending_txn_offsets[0]
assert pending_group_id == group_id
assert tp in pending_offsets and pending_offsets[tp].offset == offset
del pending_offsets[tp]
if not pending_offsets:
fut.set_result(None)
self._pending_txn_offsets.popleft()
@property
def txn_partitions(self):
return self._txn_partitions
def needs_transaction_commit(self):
if self.state == TransactionState.COMMITTING_TRANSACTION:
return TransactionResult.COMMIT
elif self.state == TransactionState.ABORTING_TRANSACTION:
return TransactionResult.ABORT
else:
return
def is_empty_transaction(self):
# whether we sent either data to a partition or committed offset
return (
len(self.txn_partitions) == 0 and
self._txn_consumer_group is None
)
def is_fatal_error(self):
return self.state == TransactionState.FATAL_ERROR
def wait_for_transaction_end(self):
return self._transaction_waiter
def notify_task_waiter(self):
if self._task_waiter is not None and not self._task_waiter.done():
self._task_waiter.set_result(None)
def make_task_waiter(self):
self._task_waiter = create_future(loop=self._loop)
return self._task_waiter | /robinhood_aiokafka-1.1.6-cp37-cp37m-macosx_10_9_x86_64.whl/aiokafka/producer/transaction_manager.py | 0.757974 | 0.206374 | transaction_manager.py | pypi |
from copy import deepcopy
from dataclasses import dataclass
from datetime import datetime
from typing import Any, Dict
from robinhood_commons.util.date_utils import convert_dates
from robinhood_commons.util.num_utils import convert_floats
EXAMPLE: Dict[str, Any] = {
"url": "https://api.robinhood.com/portfolios/5PY78241/",
"account": "https://api.robinhood.com/accounts/5PY78241/",
"start_date": "2015-01-20",
"market_value": "58974.7162",
"equity": "45441.9162",
"extended_hours_market_value": "57351.8149",
"extended_hours_equity": "45380.2749",
"extended_hours_portfolio_equity": "45380.2749",
"last_core_market_value": "58974.7162",
"last_core_equity": "45441.9162",
"last_core_portfolio_equity": "45441.9162",
"excess_margin": "13767.0327",
"excess_maintenance": "25338.8665",
"excess_margin_with_uncleared_deposits": "13767.0327",
"excess_maintenance_with_uncleared_deposits": "25338.8665",
"equity_previous_close": "45177.1802",
"portfolio_equity_previous_close": "45177.1802",
"adjusted_equity_previous_close": "45177.1802",
"adjusted_portfolio_equity_previous_close": "45177.1802",
"withdrawable_amount": "0.0000",
"unwithdrawable_deposits": "0.0000",
"unwithdrawable_grants": "0.0000",
}
@dataclass(frozen=True)
class Portfolio:
url: str
account: str
start_date: datetime
market_value: float
equity: float
extended_hours_market_value: float
extended_hours_equity: float
extended_hours_portfolio_equity: float
last_core_market_value: float
last_core_equity: float
last_core_portfolio_equity: float
excess_margin: float
excess_maintenance: float
excess_margin_with_uncleared_deposits: float
excess_maintenance_with_uncleared_deposits: float
equity_previous_close: float
portfolio_equity_previous_close: float
adjusted_equity_previous_close: float
adjusted_portfolio_equity_previous_close: float
withdrawable_amount: float
unwithdrawable_deposits: float
unwithdrawable_grants: float
def clean_portfolio(input_data: Dict[str, Any]) -> Dict[str, Any]:
data = deepcopy(input_data)
data = convert_floats(
data,
[
"market_value",
"equity",
"extended_hours_market_value",
"extended_hours_equity",
"extended_hours_portfolio_equity",
"last_core_market_value",
"last_core_equity",
"last_core_portfolio_equity",
"excess_margin",
"excess_maintenance",
"excess_margin_with_uncleared_deposits",
"excess_maintenance_with_uncleared_deposits",
"equity_previous_close",
"portfolio_equity_previous_close",
"adjusted_equity_previous_close",
"adjusted_portfolio_equity_previous_close",
"withdrawable_amount",
"unwithdrawable_deposits",
"unwithdrawable_grants",
],
)
data = convert_dates(data, ["start_date"])
return data
def main() -> None:
portfolio = Portfolio(**clean_portfolio(EXAMPLE))
print(portfolio)
if __name__ == "__main__":
main() | /robinhood_commons-1.0.44.tar.gz/robinhood_commons-1.0.44/robinhood_commons/entity/portfolio.py | 0.847148 | 0.301336 | portfolio.py | pypi |
from __future__ import annotations
from copy import deepcopy
from dataclasses import dataclass
from datetime import datetime
from typing import Any, Dict, List
from robinhood_commons.entity.option_type import OptionType
from robinhood_commons.util.date_utils import convert_dates
from robinhood_commons.util.num_utils import convert_floats
LEGS_1: List[Dict[str, Any]] = [
{
"id": "4f46d8fc-7fad-4f41-a938-5d087945d52c",
"position": "https://a.r.com/options/positions/bfc6da87-c8c2-4532-b03e-34a9812503b0/",
"position_type": "short",
"option": "https://a.r.com/options/instruments/664ae954-0d19-4126-a985-efaddd77ceec/",
"ratio_quantity": 1,
"expiration_date": "2020-06-26",
"strike_price": "6.5000",
"option_type": "call",
}
]
LEGS_2: List[Dict[str, Any]] = [
{
"id": "2f3ed3f1-2ced-4e6a-8d59-34b690f84da1",
"position": "https://a.r.com/options/positions/4fe8bc89-dea1-4f53-996d-d660f31cb64e/",
"position_type": "short",
"option": "https://a.r.com/options/instruments/e84ca9cd-89a1-4697-8801-6bc1573726cf/",
"ratio_quantity": 1,
"expiration_date": "2020-06-19",
"strike_price": "6.0000",
"option_type": "call",
}
]
# TODO: create strategy enum...short_call...etc.
EXAMPLES: List[Dict[str, Any]] = [
{
"id": "9cb5159b-9450-4772-a2bb-0949f487ee97",
"chain": "https://a.r.com/options/chains/f7ed1d28-55c4-4c76-abf5-3b16cb68a2e7/",
"symbol": "MRO",
"strategy": "short_call",
"average_open_price": "39.0000",
"legs": LEGS_1,
"quantity": "2.0000",
"intraday_average_open_price": "0.0000",
"intraday_quantity": "0.0000",
"direction": "credit",
"intraday_direction": "debit",
"trade_value_multiplier": "100.0000",
"created_at": "2020-05-21T13:35:21.353974Z",
"updated_at": "2020-05-21T13:36:10.830280Z",
},
{
"id": "5bef3293-55d1-48cc-afa5-627f2946eb01",
"chain": "https://a.r.com/options/chains/f7ed1d28-55c4-4c76-abf5-3b16cb68a2e7/",
"symbol": "MRO",
"strategy": "short_call",
"average_open_price": "55.0000",
"legs": LEGS_2,
"quantity": "2.0000",
"intraday_average_open_price": "0.0000",
"intraday_quantity": "0.0000",
"direction": "credit",
"intraday_direction": "debit",
"trade_value_multiplier": "100.0000",
"created_at": "2020-05-08T13:33:24.102329Z",
"updated_at": "2020-05-08T13:33:24.291919Z",
},
]
@dataclass(frozen=True)
class AggregatePosition:
id: str
chain: str
symbol: str
strategy: str
average_open_price: float
legs: List[Leg]
quantity: float
intraday_average_open_price: float
intraday_quantity: float
direction: str
intraday_direction: str
trade_value_multiplier: float
created_at: datetime
updated_at: datetime
@dataclass(frozen=True)
class Leg:
id: str
position: str
position_type: str # TODO: enum
option: str
ratio_quantity: int
expiration_date: datetime
strike_price: float
option_type: OptionType
def clean_aggregate_position(input_data: Dict[str, Any]) -> Dict[str, Any]:
data = deepcopy(input_data)
data["legs"] = [Leg(**clean_leg(leg)) for leg in data["legs"]]
data = convert_floats(
data,
[
"average_open_price",
"quantity",
"intraday_average_open_price",
"intraday_quantity",
"trade_value_multiplier",
],
)
data = convert_dates(data, ["expiration_date"])
return data
def clean_leg(input_data: Dict[str, Any]) -> Dict[str, Any]:
data = deepcopy(input_data)
data["option_type"] = OptionType.to_enum(data["option_type"])
data = convert_floats(data, ["strike_price"])
data = convert_dates(data, ["expiration_date"])
return data
def main() -> None:
aggregate_positions = [AggregatePosition(**clean_aggregate_position(a)) for a in EXAMPLES]
print(aggregate_positions)
if __name__ == "__main__":
main() | /robinhood_commons-1.0.44.tar.gz/robinhood_commons-1.0.44/robinhood_commons/entity/aggregate_position.py | 0.533154 | 0.342242 | aggregate_position.py | pypi |
from copy import deepcopy
from dataclasses import dataclass
from typing import Any, Dict, Union
from robinhood_commons.util.date_utils import convert_dates
from robinhood_commons.util.num_utils import convert_floats
EXAMPLE: Dict[str, Union[str, int]] = dict(
open="6.310000",
high="6.785000",
low="6.192000",
volume="42124974.000000",
market_date="2020-12-14",
average_volume_2_weeks="57372131.900000",
average_volume="57372131.900000",
high_52_weeks="14.700000",
dividend_yield="2.610970",
float="787326991.576000",
low_52_weeks="3.020000",
market_cap="4947353120.000000",
pb_ratio="0.501716",
pe_ratio="23.915100",
shares_outstanding="790312000.000000",
description="Marathon Oil Corp. engages in the exploration, production, and marketing of liquid hydrocarbons and natural gas. It operates through the following two segments: United States (U. S.) and International. The U. S. segment engages in oil and gas exploration, development and production activities in the U.S. The International segment engages in oil and gas development and production across international locations primarily in Equatorial Guinea and the United Kingdom. The company was founded in 1887 and is headquartered in Houston, TX.",
instrument="https://api.robinhood.com/instruments/ab4f79fc-f84a-4f7b-8132-4f3e5fb38075/",
ceo="Lee M. Tillman",
headquarters_city="Houston",
headquarters_state="Texas",
sector="Energy Minerals",
industry="Oil & Gas Production",
num_employees=2000,
year_founded=1887,
symbol="MRO",
)
@dataclass(frozen=True)
class Fundamentals:
open: float
high: float
low: float
volume: float
market_date: str
average_volume_2_weeks: float
average_volume: float
high_52_weeks: float
dividend_yield: float
float: float
low_52_weeks: float
market_cap: float
pb_ratio: float
pe_ratio: float
shares_outstanding: float
description: str
instrument: str
ceo: str
headquarters_city: str
headquarters_state: str
sector: str
industry: str
num_employees: int
year_founded: int
symbol: str
def clean_fundamentals(input_data: Dict[str, Any]) -> Dict[str, Any]:
data = deepcopy(input_data)
data = convert_floats(
data,
[
"open",
"high",
"low",
"volume",
"average_volume_2_weeks",
"average_volume",
"high_52_weeks",
"low_52_weeks",
"market_cap",
"shares_outstanding",
],
)
data = convert_floats(data, ["dividend_yield", "float", "pb_ratio", "pe_ratio"], 0.00)
data = convert_dates(data, ["datetime"])
return data
def main() -> None:
fundamentals = Fundamentals(**clean_fundamentals(EXAMPLE))
print(fundamentals)
if __name__ == "__main__":
main() | /robinhood_commons-1.0.44.tar.gz/robinhood_commons-1.0.44/robinhood_commons/entity/fundamentals.py | 0.885061 | 0.506164 | fundamentals.py | pypi |
from __future__ import annotations
from copy import deepcopy
from dataclasses import dataclass
from datetime import datetime
from typing import Any, Dict, List, Optional
from robinhood_commons.util.date_utils import convert_dates
from robinhood_commons.util.num_utils import convert_floats
EXAMPLE: List[Dict[str, Any]] = [
{
"symbol": "MRO",
"instrument": "https://api.robinhood.com/instruments/ab4f79fc-f84a-4f7b-8132-4f3e5fb38075/",
"year": 2018,
"quarter": 3,
"eps": {"estimate": "0.210000", "actual": "0.240000"},
"report": {"date": "2018-11-07", "timing": "pm", "verified": True},
"call": None,
},
{
"symbol": "MRO",
"instrument": "https://api.robinhood.com/instruments/ab4f79fc-f84a-4f7b-8132-4f3e5fb38075/",
"year": 2018,
"quarter": 4,
"eps": {"estimate": "0.150000", "actual": "0.150000"},
"report": {"date": "2019-02-13", "timing": "pm", "verified": True},
"call": None,
},
{
"symbol": "MRO",
"instrument": "https://api.robinhood.com/instruments/ab4f79fc-f84a-4f7b-8132-4f3e5fb38075/",
"year": 2019,
"quarter": 1,
"eps": {"estimate": "0.060000", "actual": "0.310000"},
"report": {"date": "2019-05-01", "timing": "pm", "verified": True},
"call": None,
},
{
"symbol": "MRO",
"instrument": "https://api.robinhood.com/instruments/ab4f79fc-f84a-4f7b-8132-4f3e5fb38075/",
"year": 2019,
"quarter": 2,
"eps": {"estimate": "0.150000", "actual": "0.230000"},
"report": {"date": "2019-08-07", "timing": "pm", "verified": True},
"call": {
"datetime": "2019-08-08T13:00:00Z",
"broadcast_url": None,
"replay_url": "http://mmm.wallstreethorizon.com/u.asp?u=231423",
},
},
{
"symbol": "MRO",
"instrument": "https://api.robinhood.com/instruments/ab4f79fc-f84a-4f7b-8132-4f3e5fb38075/",
"year": 2019,
"quarter": 3,
"eps": {"estimate": "0.070000", "actual": "0.140000"},
"report": {"date": "2019-11-06", "timing": "pm", "verified": True},
"call": {
"datetime": "2019-11-07T14:00:00Z",
"broadcast_url": None,
"replay_url": "http://mmm.wallstreethorizon.com/u.asp?u=295100",
},
},
{
"symbol": "MRO",
"instrument": "https://api.robinhood.com/instruments/ab4f79fc-f84a-4f7b-8132-4f3e5fb38075/",
"year": 2019,
"quarter": 4,
"eps": {"estimate": "0.100000", "actual": "0.070000"},
"report": {"date": "2020-02-12", "timing": "pm", "verified": True},
"call": {
"datetime": "2020-02-13T14:00:00Z",
"broadcast_url": None,
"replay_url": "http://mmm.wallstreethorizon.com/u.asp?u=307075",
},
},
{
"symbol": "MRO",
"instrument": "https://api.robinhood.com/instruments/ab4f79fc-f84a-4f7b-8132-4f3e5fb38075/",
"year": 2020,
"quarter": 1,
"eps": {"estimate": "-0.140000", "actual": "-0.160000"},
"report": {"date": "2020-05-06", "timing": "pm", "verified": True},
"call": {
"datetime": "2020-05-07T13:00:00Z",
"broadcast_url": None,
"replay_url": "http://mmm.wallstreethorizon.com/u.asp?u=307075",
},
},
{
"symbol": "MRO",
"instrument": "https://api.robinhood.com/instruments/ab4f79fc-f84a-4f7b-8132-4f3e5fb38075/",
"year": 2020,
"quarter": 2,
"eps": {"estimate": None, "actual": None},
"report": {"date": "2020-08-05", "timing": "pm", "verified": False},
"call": None,
},
]
@dataclass(frozen=True)
class Earnings:
symbol: str
instrument: str
year: int
quarter: int
eps: EarningsPerShare
report: EarningsReport
call: EarningsCall
@dataclass(frozen=True)
class EarningsPerShare:
estimate: float
actual: float
@dataclass(frozen=True)
class EarningsReport:
date: datetime
timing: str
verified: bool
@dataclass(frozen=True)
class EarningsCall:
datetime: datetime
broadcast_url: Optional[str]
replay_url: Optional[str]
def clean_earnings(input_data: Dict[str, Any]) -> Dict[str, Any]:
data = deepcopy(input_data)
data["eps"] = EarningsPerShare(**clean_earnings_per_share(data["eps"]))
data["report"] = EarningsReport(**clean_earnings_report(data["report"])) if data["report"] is not None else None
data["call"] = EarningsCall(**clean_earnings_call(data["call"])) if data["call"] is not None else None
return data
def clean_earnings_per_share(input_data: Dict[str, Any]) -> Dict[str, Any]:
return convert_floats(input_data, ["estimate", "actual"], 0.00)
def clean_earnings_report(input_data: Dict[str, Any]) -> Dict[str, Any]:
return convert_dates(input_data, ["date"])
def clean_earnings_call(input_data: Dict[str, Any]) -> Dict[str, Any]:
return convert_dates(input_data, ["datetime"])
def main() -> None:
earnings: List[Earnings] = [Earnings(**clean_earnings(e)) for e in EXAMPLE]
print(earnings)
if __name__ == "__main__":
main() | /robinhood_commons-1.0.44.tar.gz/robinhood_commons-1.0.44/robinhood_commons/entity/earnings.py | 0.852107 | 0.38122 | earnings.py | pypi |
from __future__ import annotations
from copy import deepcopy
from dataclasses import dataclass
from datetime import datetime
from enum import Enum, auto
from typing import Any, Dict, List
from robinhood_commons.entity.printable import Printable
from robinhood_commons.util.date_utils import convert_dates
EXAMPLE: Dict[str, Any] = {
"summary": {"num_buy_ratings": 7, "num_hold_ratings": 18, "num_sell_ratings": 3},
"ratings": [
{
"published_at": "2020-06-06T00:55:32Z",
"type": "buy",
"text": b"Marathon was one of the first U.S. shale companies to establish a track record for free cash flow generation.",
},
{
"published_at": "2020-06-06T00:55:32Z",
"type": "buy",
"text": b'Marathon\'s acreage in the Bakken and Eagle Ford plays overlaps the juiciest "sweet spots" and enables the firm to deliver initial production rates far above the respective averages.',
},
{
"published_at": "2020-06-06T00:55:32Z",
"type": "buy",
"text": b"Holding acreage in the top four liquids-rich shale plays enables management to sidestep transport bottlenecks and avoid overpaying for equipment and services in areas experiencing temporary demand surges. ",
},
{
"published_at": "2020-06-06T00:55:32Z",
"type": "sell",
"text": b"Marathon's Delaware Basin acreage is relatively fragmented, limiting the scope to boost profitability by utilizing longer laterals.",
},
{
"published_at": "2020-06-06T00:55:32Z",
"type": "sell",
"text": b'Not all of Marathon\'s acreage is ideally located--well productivity could decline when the firm runs out of drilling opportunities in "sweet spots."',
},
{
"published_at": "2020-06-06T00:55:32Z",
"type": "sell",
"text": b"Marathon is unable to earn its cost of capital due to prior investments in higher-cost resources.",
},
],
"instrument_id": "ab4f79fc-f84a-4f7b-8132-4f3e5fb38075",
"ratings_published_at": "2020-06-06T00:55:32Z",
}
@dataclass(frozen=True)
class Ratings:
summary: Dict[RatingType, int]
ratings: List[Rating]
instrument_id: str
ratings_published_at: datetime
@dataclass(frozen=True)
class Rating:
published_at: datetime
type: RatingType
text: str
class RatingType(Printable, Enum):
BUY = auto()
SELL = auto()
HOLD = auto()
@staticmethod
def to_enum(value: str) -> RatingType:
v: str = value.upper()
return RatingType[v]
def clean_ratings(input_data: Dict[str, Any]) -> Dict[str, Any]:
data = deepcopy(input_data)
data["ratings"] = [Rating(**clean_rating(r)) for r in data["ratings"]]
# Map summary keys to RatingType enum
mapping = {
"num_buy_ratings": RatingType.BUY,
"num_hold_ratings": RatingType.HOLD,
"num_sell_ratings": RatingType.SELL,
}
summary: Dict[RatingType, int] = {}
for summary_key, value in data["summary"].items():
if summary_key in mapping.keys():
summary[mapping[summary_key]] = value
else:
print(f"WARNING: Rating Summary Type: {summary_key} not found.")
data["summary"] = summary
data = convert_dates(data, ["ratings_published_at"])
return data
def clean_rating(input_data: Dict[str, Any]) -> Dict[str, Any]:
data = deepcopy(input_data)
data["type"] = RatingType.to_enum(data["type"])
data = convert_dates(data, ["published_at"])
return data
def main() -> None:
ratings = Ratings(**EXAMPLE)
print(ratings)
if __name__ == "__main__":
main() | /robinhood_commons-1.0.44.tar.gz/robinhood_commons-1.0.44/robinhood_commons/entity/ratings.py | 0.836354 | 0.516291 | ratings.py | pypi |
from copy import deepcopy
from dataclasses import dataclass
from datetime import datetime
from typing import Any, Dict, Optional
from robinhood_commons.entity.option_type import OptionType
from robinhood_commons.entity.state import State
from robinhood_commons.entity.tick import Tick, clean_tick
from robinhood_commons.entity.tradability import Tradability
from robinhood_commons.util.date_utils import convert_dates
from robinhood_commons.util.num_utils import convert_floats
EXAMPLE: Dict[str, Any] = {
"chain_id": "f7ed1d28-55c4-4c76-abf5-3b16cb68a2e7",
"chain_symbol": "MRO",
"created_at": "2020-06-10T00:13:05.407629Z",
"expiration_date": "2020-06-19",
"id": "7f7720a3-ccc1-45f4-b7be-37cb8ef69cbb",
"issue_date": "1991-05-06",
"min_ticks": {"above_tick": "0.05", "below_tick": "0.01", "cutoff_price": "3.00"},
"rhs_tradability": "untradable",
"state": "active",
"strike_price": "13.0000",
"tradability": "tradable",
"type": "call",
"updated_at": "2020-06-10T00:13:05.407639Z",
"url": "https://api.robinhood.com/options/instruments/7f7720a3-ccc1-45f4-b7be-37cb8ef69cbb/",
"sellout_datetime": "2020-06-19T18:45:00+00:00",
}
STATS_EXAMPLE: Dict[str, Any] = {
"chain_id": "f7ed1d28-55c4-4c76-abf5-3b16cb68a2e7",
"chain_symbol": "MRO",
"created_at": "2020-06-10T00:13:05.407629Z",
"expiration_date": "2020-06-19",
"id": "7f7720a3-ccc1-45f4-b7be-37cb8ef69cbb",
"issue_date": "1991-05-06",
"min_ticks": {"above_tick": "0.05", "below_tick": "0.01", "cutoff_price": "3.00"},
"rhs_tradability": "untradable",
"state": "active",
"strike_price": "13.0000",
"tradability": "tradable",
"type": "call",
"updated_at": "2020-06-10T00:13:05.407639Z",
"url": "https://api.robinhood.com/options/instruments/7f7720a3-ccc1-45f4-b7be-37cb8ef69cbb/",
"sellout_datetime": "2020-06-19T18:45:00+00:00",
"adjusted_mark_price": "0.010000",
"ask_price": "0.020000",
"ask_size": 10,
"bid_price": "0.000000",
"bid_size": 0,
"break_even_price": "13.010000",
"high_price": None,
"instrument": "https://api.robinhood.com/options/instruments/7f7720a3-ccc1-45f4-b7be-37cb8ef69cbb/",
"last_trade_price": "0.040000",
"last_trade_size": 1,
"low_price": None,
"mark_price": "0.010000",
"open_interest": 1,
"previous_close_date": "2020-06-11",
"previous_close_price": "0.010000",
"volume": 0,
"chance_of_profit_long": "0.007166",
"chance_of_profit_short": "0.992834",
"delta": "0.015780",
"gamma": "0.020289",
"implied_volatility": "2.139463",
"rho": "0.000018",
"theta": "-0.005508",
"vega": "0.000360",
"high_fill_rate_buy_price": "0.020000",
"high_fill_rate_sell_price": "0.000000",
"low_fill_rate_buy_price": "0.000000",
"low_fill_rate_sell_price": "0.010000",
}
@dataclass(frozen=True)
class Option:
chain_id: str
chain_symbol: str
created_at: datetime
expiration_date: datetime
id: str
issue_date: datetime
min_ticks: Tick
rhs_tradability: Tradability
state: State
strike_price: float
tradability: Tradability
type: OptionType
updated_at: datetime
url: str
sellout_datetime: datetime
adjusted_mark_price: Optional[float] = None
ask_price: Optional[float] = None
ask_size: Optional[int] = None
bid_price: Optional[float] = None
bid_size: Optional[int] = None
break_even_price: Optional[float] = None
high_price: Optional[float] = None
instrument: Optional[str] = None
last_trade_price: Optional[float] = None
last_trade_size: Optional[int] = None
low_price: Optional[float] = None
mark_price: Optional[float] = None
open_interest: Optional[int] = None
previous_close_date: Optional[datetime] = None
previous_close_price: Optional[float] = None
volume: Optional[int] = None
chance_of_profit_long: Optional[float] = None
chance_of_profit_short: Optional[float] = None
delta: Optional[float] = None
gamma: Optional[float] = None
implied_volatility: Optional[float] = None
rho: Optional[float] = None
theta: Optional[float] = None
vega: Optional[float] = None
high_fill_rate_buy_price: Optional[float] = None
high_fill_rate_sell_price: Optional[float] = None
low_fill_rate_buy_price: Optional[float] = None
low_fill_rate_sell_price: Optional[float] = None
def clean_option(input_data: Dict[str, Any]) -> Dict[str, Any]:
data = deepcopy(input_data)
data["min_ticks"] = Tick(**clean_tick(data["min_ticks"]))
data["rhs_tradability"] = Tradability.to_enum(data["rhs_tradability"])
data["state"] = State.to_enum(data["state"])
data["tradability"] = Tradability.to_enum(data["tradability"])
data["type"] = OptionType.to_enum(data["type"])
data = convert_floats(
data,
[
"strike_price",
"adjusted_mark_price",
"ask_price",
"ask_size",
"bid_price",
"bid_size",
"break_even_price",
"high_price",
"last_trade_price",
"last_trade_size",
"low_price",
"mark_price",
"open_interest",
"previous_close_price",
"volume",
"chance_of_profit_long",
"chance_of_profit_short",
"delta",
"gamma",
"implied_volatility",
"rho",
"theta",
"vega",
"high_fill_rate_buy_price",
"high_fill_rate_sell_price",
"low_fill_rate_buy_price",
"low_fill_rate_sell_price",
],
)
data = convert_dates(data, ["expiration_date", "issue_date", "sellout_datetime", "previous_close_date"])
return data
def main() -> None:
for example in [EXAMPLE, STATS_EXAMPLE]:
option: Option = Option(**clean_option(example))
print(option)
if __name__ == "__main__":
main() | /robinhood_commons-1.0.44.tar.gz/robinhood_commons-1.0.44/robinhood_commons/entity/option.py | 0.831451 | 0.331066 | option.py | pypi |
from __future__ import annotations
from copy import deepcopy
from dataclasses import dataclass
from datetime import datetime
from enum import Enum, auto
from typing import Any, Dict, List
from robinhood_commons.entity.price import Price, clean_price
from robinhood_commons.entity.printable import Printable
from robinhood_commons.util.date_utils import convert_dates
EXAMPLE: Dict[str, List[Dict[Any]]] = {
"asks": [
{"side": "ask", "price": {"amount": "6.730000", "currency_code": "USD"}, "quantity": 125},
{"side": "ask", "price": {"amount": "6.800000", "currency_code": "USD"}, "quantity": 500},
{"side": "ask", "price": {"amount": "6.810000", "currency_code": "USD"}, "quantity": 22},
{"side": "ask", "price": {"amount": "6.850000", "currency_code": "USD"}, "quantity": 1500},
{"side": "ask", "price": {"amount": "6.860000", "currency_code": "USD"}, "quantity": 10000},
{"side": "ask", "price": {"amount": "6.990000", "currency_code": "USD"}, "quantity": 200},
{"side": "ask", "price": {"amount": "7.000000", "currency_code": "USD"}, "quantity": 5152},
{"side": "ask", "price": {"amount": "7.010000", "currency_code": "USD"}, "quantity": 84},
{"side": "ask", "price": {"amount": "7.100000", "currency_code": "USD"}, "quantity": 500},
{"side": "ask", "price": {"amount": "7.150000", "currency_code": "USD"}, "quantity": 76},
{"side": "ask", "price": {"amount": "7.190000", "currency_code": "USD"}, "quantity": 6980},
{"side": "ask", "price": {"amount": "7.200000", "currency_code": "USD"}, "quantity": 800},
{"side": "ask", "price": {"amount": "7.210000", "currency_code": "USD"}, "quantity": 400},
{"side": "ask", "price": {"amount": "7.240000", "currency_code": "USD"}, "quantity": 250},
{"side": "ask", "price": {"amount": "7.250000", "currency_code": "USD"}, "quantity": 950},
{"side": "ask", "price": {"amount": "7.400000", "currency_code": "USD"}, "quantity": 100},
{"side": "ask", "price": {"amount": "7.470000", "currency_code": "USD"}, "quantity": 200},
{"side": "ask", "price": {"amount": "7.500000", "currency_code": "USD"}, "quantity": 300},
{"side": "ask", "price": {"amount": "7.650000", "currency_code": "USD"}, "quantity": 2000},
{"side": "ask", "price": {"amount": "7.750000", "currency_code": "USD"}, "quantity": 655},
{"side": "ask", "price": {"amount": "8.000000", "currency_code": "USD"}, "quantity": 800},
{"side": "ask", "price": {"amount": "8.080000", "currency_code": "USD"}, "quantity": 975},
{"side": "ask", "price": {"amount": "8.090000", "currency_code": "USD"}, "quantity": 16000},
{"side": "ask", "price": {"amount": "8.200000", "currency_code": "USD"}, "quantity": 1000},
{"side": "ask", "price": {"amount": "8.210000", "currency_code": "USD"}, "quantity": 100},
{"side": "ask", "price": {"amount": "8.280000", "currency_code": "USD"}, "quantity": 1233},
{"side": "ask", "price": {"amount": "8.300000", "currency_code": "USD"}, "quantity": 1400},
{"side": "ask", "price": {"amount": "8.460000", "currency_code": "USD"}, "quantity": 500},
{"side": "ask", "price": {"amount": "8.500000", "currency_code": "USD"}, "quantity": 120},
{"side": "ask", "price": {"amount": "8.550000", "currency_code": "USD"}, "quantity": 70},
{"side": "ask", "price": {"amount": "8.780000", "currency_code": "USD"}, "quantity": 520},
{"side": "ask", "price": {"amount": "8.900000", "currency_code": "USD"}, "quantity": 100},
{"side": "ask", "price": {"amount": "9.000000", "currency_code": "USD"}, "quantity": 50},
{"side": "ask", "price": {"amount": "9.250000", "currency_code": "USD"}, "quantity": 538},
{"side": "ask", "price": {"amount": "9.500000", "currency_code": "USD"}, "quantity": 215},
{"side": "ask", "price": {"amount": "9.980000", "currency_code": "USD"}, "quantity": 21499},
{"side": "ask", "price": {"amount": "10.000000", "currency_code": "USD"}, "quantity": 20010},
{"side": "ask", "price": {"amount": "10.930000", "currency_code": "USD"}, "quantity": 3},
{"side": "ask", "price": {"amount": "12.000000", "currency_code": "USD"}, "quantity": 2410},
{"side": "ask", "price": {"amount": "12.250000", "currency_code": "USD"}, "quantity": 300},
],
"bids": [
{"side": "bid", "price": {"amount": "6.500000", "currency_code": "USD"}, "quantity": 900},
{"side": "bid", "price": {"amount": "6.420000", "currency_code": "USD"}, "quantity": 60},
{"side": "bid", "price": {"amount": "6.360000", "currency_code": "USD"}, "quantity": 8000},
{"side": "bid", "price": {"amount": "6.300000", "currency_code": "USD"}, "quantity": 500},
{"side": "bid", "price": {"amount": "6.200000", "currency_code": "USD"}, "quantity": 500},
{"side": "bid", "price": {"amount": "6.120000", "currency_code": "USD"}, "quantity": 140},
{"side": "bid", "price": {"amount": "6.000000", "currency_code": "USD"}, "quantity": 110},
{"side": "bid", "price": {"amount": "5.990000", "currency_code": "USD"}, "quantity": 1669},
{"side": "bid", "price": {"amount": "5.900000", "currency_code": "USD"}, "quantity": 40},
{"side": "bid", "price": {"amount": "5.540000", "currency_code": "USD"}, "quantity": 1500},
{"side": "bid", "price": {"amount": "5.500000", "currency_code": "USD"}, "quantity": 190},
{"side": "bid", "price": {"amount": "4.750000", "currency_code": "USD"}, "quantity": 5},
{"side": "bid", "price": {"amount": "4.100000", "currency_code": "USD"}, "quantity": 200},
{"side": "bid", "price": {"amount": "4.000000", "currency_code": "USD"}, "quantity": 800},
{"side": "bid", "price": {"amount": "3.500000", "currency_code": "USD"}, "quantity": 800},
{"side": "bid", "price": {"amount": "3.020000", "currency_code": "USD"}, "quantity": 165},
{"side": "bid", "price": {"amount": "3.000000", "currency_code": "USD"}, "quantity": 255},
{"side": "bid", "price": {"amount": "1.500000", "currency_code": "USD"}, "quantity": 50},
],
"instrument_id": "ab4f79fc-f84a-4f7b-8132-4f3e5fb38075",
"updated_at": "2020-06-16T17:22:36.399570266-04:00",
}
@dataclass(frozen=True)
class Offers:
asks: List[Offer]
bids: List[Offer]
instrument_id: str
updated_at: datetime
@dataclass(frozen=True)
class Offer:
side: str
price: Price
quantity: int
class OfferType(Printable, Enum):
ASK = auto()
BID = auto()
@staticmethod
def to_enum(value: str) -> OfferType:
v: str = value.upper()
return OfferType[v]
def clean_offers(input_data: Dict[str, Any]) -> Dict[str, Any]:
data = deepcopy(input_data)
data["asks"] = [Offer(**clean_offer(o)) for o in data["asks"]]
data["bids"] = [Offer(**clean_offer(o)) for o in data["bids"]]
data = convert_dates(data)
return data
def clean_offer(input_data: Dict[str, Any]) -> Dict[str, Any]:
data = deepcopy(input_data)
data["price"] = Price(**clean_price(data["price"]))
return data
def main() -> None:
offers: Offers = Offers(**clean_offers(EXAMPLE))
print(offers)
if __name__ == "__main__":
main() | /robinhood_commons-1.0.44.tar.gz/robinhood_commons-1.0.44/robinhood_commons/entity/offer.py | 0.826537 | 0.461077 | offer.py | pypi |
from __future__ import annotations
from copy import deepcopy
from dataclasses import dataclass
from datetime import datetime
from typing import Any, Dict, List
from robinhood_commons.entity.tick import Tick, clean_tick
from robinhood_commons.util.date_utils import date_parse
from robinhood_commons.util.num_utils import convert_floats
EXAMPLE: Dict[str, Any] = {
"id": "f7ed1d28-55c4-4c76-abf5-3b16cb68a2e7",
"symbol": "MRO",
"can_open_position": True,
"cash_component": None,
"expiration_dates": [
"2020-06-19",
"2020-06-26",
"2020-07-02",
"2020-07-10",
"2020-07-17",
"2020-07-24",
"2020-07-31",
"2020-10-16",
"2021-01-15",
"2021-06-18",
"2021-09-17",
"2022-01-21",
],
"trade_value_multiplier": "100.0000",
"underlying_instruments": [
{
"id": "35f7f10a-3163-4889-a64c-df0166b2dcec",
"instrument": "https://api.robinhood.com/instruments/ab4f79fc-f84a-4f7b-8132-4f3e5fb38075/",
"quantity": 100,
}
],
"min_ticks": {"above_tick": "0.05", "below_tick": "0.01", "cutoff_price": "3.00"},
}
@dataclass(frozen=True)
class Chain:
id: str
symbol: str
can_open_position: bool
cash_component: str
expiration_dates: List[datetime]
trade_value_multiplier: float
underlying_instruments: List[UnderlyingInstruments]
min_ticks: Tick
@dataclass(frozen=True)
class UnderlyingInstruments:
id: str
instrument: str
quantity: float
def clean_chain(input_data: Dict[str, Any]) -> Dict[str, Any]:
data = deepcopy(input_data)
data["expiration_dates"] = [date_parse(d) for d in data["expiration_dates"]]
data["underlying_instruments"] = [
UnderlyingInstruments(**clean_underlying_instruments(u)) for u in data["underlying_instruments"]
]
data["min_ticks"] = Tick(**clean_tick(data["min_ticks"]))
data = convert_floats(data, ["trade_value_multiplier"])
return data
def clean_underlying_instruments(input_data: Dict[str, Any]) -> Dict[str, Any]:
data = deepcopy(input_data)
data = convert_floats(data, ["quantity"])
return data
def main() -> None:
chain = Chain(**clean_chain(EXAMPLE))
print(chain)
if __name__ == "__main__":
main() | /robinhood_commons-1.0.44.tar.gz/robinhood_commons-1.0.44/robinhood_commons/entity/chain.py | 0.830972 | 0.296158 | chain.py | pypi |
from copy import deepcopy
from dataclasses import dataclass
from datetime import datetime
from typing import Any, Dict, List
from robinhood_commons.util.date_utils import convert_dates
EXAMPLES: List[Dict[str, str]] = [
{
"api_source": "cnbc",
"author": "",
"num_clicks": 0,
"preview_image_url": "https://images.robinhood.com/SFqNLCoF0LN_p9jvvLLKKhBWzSM/aHR0cHM6Ly9pbWFnZXMucm9iaW5ob29kLmNvbS9DaEVMNWZvcXJKaXZxYmhoZ0szS3hpcjJBdWcvYUhSMGNITTZMeTlwYldGblpTNWpibUpqWm0wdVkyOXRMMkZ3YVM5Mk1TOXBiV0ZuWlM4eE1EWTFOelEyTWpjdE1UVTVNVGt3T0RVd056QTROWFJ5ZFcxd0xtcHdaejkyUFRFMU9URTVNRGcxTmpF",
"published_at": "2020-06-16T12:35:00Z",
"relay_url": "https://news.robinhood.com/9c430c3c-d458-3a61-960a-e1ee4ead69d0/",
"source": "CNBC",
"summary": "",
"title": "Dow futures soar higher by 800 points after a record retail sales jump",
"updated_at": "2020-06-16T12:45:16.285881Z",
"url": "https://www.cnbc.com/2020/06/15/stock-market-futures-open-to-close-news.html",
"uuid": "9c430c3c-d458-3a61-960a-e1ee4ead69d0",
"related_instruments": ["8f92e76f-1e0e-4478-8580-16a6ffcfaef5", "bab3b12b-4216-4b01-b2d8-9587ee5f41cf"],
"preview_text": "Futures contracts tied to the major U.S. stock indexes rose sharply early Tuesday, pointing to further gains after a big comeback in the previous session.\n\nDow",
"currency_id": "None",
},
{
"api_source": "cnbc",
"author": "",
"num_clicks": 0,
"preview_image_url": "https://images.robinhood.com/8kZOCFXHoWLpCqTxCSugAp4Z-uI/aHR0cHM6Ly9pbWFnZXMucm9iaW5ob29kLmNvbS9jZ2tfNzNMeXVUZVFZUHVuYnBNY1ZCWkNjOHcvYUhSMGNITTZMeTlwYldGblpTNWpibUpqWm0wdVkyOXRMMkZ3YVM5Mk1TOXBiV0ZuWlM4eE1EWTFOemM0TVRBdE1UVTVNakl4T1RJNE1EVTRNbWRsZEhSNWFXMWhaMlZ6TFRFeU1UZzBORGd4TWpjdWFuQmxaejkyUFRFMU9USXlNVGt6TkRr",
"published_at": "2020-06-15T13:30:00Z",
"relay_url": "https://news.robinhood.com/e7822901-8b45-306f-b1f5-91e8cfb72ea4/",
"source": "CNBC",
"summary": "",
"title": "Dow drops more than 600 points as Wall Street adds to last weekโs sharp losses",
"updated_at": "2020-06-15T13:36:58.465376Z",
"url": "https://www.cnbc.com/2020/06/14/stock-market-futures-open-to-close-news.html",
"uuid": "e7822901-8b45-306f-b1f5-91e8cfb72ea4",
"related_instruments": ["bab3b12b-4216-4b01-b2d8-9587ee5f41cf", "8f92e76f-1e0e-4478-8580-16a6ffcfaef5"],
"preview_text": "Stocks dropped on Monday as investors grapple with signs of a second wave of coronavirus cases as the U.S. economy reopens.\n\nThe Dow Jones Industrial Average fe",
"currency_id": "None",
},
{
"api_source": "cnbc",
"author": "",
"num_clicks": 0,
"preview_image_url": "https://images.robinhood.com/FIcrAly-yZks69vv-0eCb6cIrL4/aHR0cHM6Ly9pbWFnZXMucm9iaW5ob29kLmNvbS9kU2Zaa0gxbzdSdFFWTWliRVJSTGZiYzAtR3MvYUhSMGNITTZMeTlwYldGblpTNWpibUpqWm0wdVkyOXRMMkZ3YVM5Mk1TOXBiV0ZuWlM4eE1EWTFOelUyTlRRdE1UVTVNVGs0TXpjNE1UVTBNMmx0WjE4eE1GODFOMTh5TURGZk1UVTROelkxTlY4eE1EQXdMVEUxT1RFNU9ETTJNRFUyTXpJdWFuQm5QM1k5TVRVNU1UazRNemd3TWc",
"published_at": "2020-06-12T17:45:00Z",
"relay_url": "https://news.robinhood.com/831bb0bf-e1cd-38d6-8a76-f1bd61cb995d/",
"source": "CNBC",
"summary": "",
"title": "Fridayโs comeback evaporates as S&P 500 turns negative, heads for big losing week",
"updated_at": "2020-06-12T17:49:43.327940Z",
"url": "https://www.cnbc.com/2020/06/11/stock-market-futures-open-to-close-news.html",
"uuid": "831bb0bf-e1cd-38d6-8a76-f1bd61cb995d",
"related_instruments": ["8f92e76f-1e0e-4478-8580-16a6ffcfaef5"],
"preview_text": "The S&P 500 and Nasdaq Composite gave up earlier gains on Friday as Wall Street struggled to recover from its worst session in three months. Stocks were on pace",
"currency_id": "None",
},
{
"api_source": "cnbc",
"author": "",
"num_clicks": 0,
"preview_image_url": "https://images.robinhood.com/wTdBSpqBFoa8aJVLbTJv1MtMlS4/aHR0cHM6Ly9pbWFnZXMucm9iaW5ob29kLmNvbS9zRE5zaXhDWFN2cTdQQ25wSHVSRTJsWEhFd00vYUhSMGNITTZMeTlwYldGblpTNWpibUpqWm0wdVkyOXRMMkZ3YVM5Mk1TOXBiV0ZuWlM4eE1EWTFOVFU0TWpNdE1UVTVNRFk1TXpRNU1UYzBOV2RsZEhSNWFXMWhaMlZ6TFRFeU1qY3hPVGcxTVRrdWFuQmxaejkyUFRFMU9URTVNVE01TURV",
"published_at": "2020-06-12T13:30:00Z",
"relay_url": "https://news.robinhood.com/3b45b8ed-7b87-34b4-ab6f-768e052d2a8c/",
"source": "CNBC",
"summary": "",
"title": "Dow jumps more than 600 points as Wall Street rebounds from its biggest sell-off since March",
"updated_at": "2020-06-12T13:32:02.738958Z",
"url": "https://www.cnbc.com/2020/06/11/stock-market-futures-open-to-close-news.html",
"uuid": "3b45b8ed-7b87-34b4-ab6f-768e052d2a8c",
"related_instruments": ["bab3b12b-4216-4b01-b2d8-9587ee5f41cf", "8f92e76f-1e0e-4478-8580-16a6ffcfaef5"],
"preview_text": "Stocks rallied on Friday, clawing back some of the sharp losses from Wall Street's worst day since March.\n\nThe Dow Jones Industrial Average traded 684 points hi",
"currency_id": "None",
},
{
"api_source": "",
"author": "",
"num_clicks": 0,
"preview_image_url": "https://images.robinhood.com/XBBlea2sb1NCkWYIfsxUm039QDg/aHR0cHM6Ly9pbWFnZXMucm9iaW5ob29kLmNvbS92Z3VKd1ZkcVdRbHF4NXlQQmNXQmp3aUR0R3cvYUhSMGNITTZMeTlwYldGblpYTXVZM1JtWVhOelpYUnpMbTVsZEM5dGQzQm9lbmx4TmpsdmMyOHZOMGxhZERScVJrMXNla3RqWkU1MU1VSjBOakZMWVM4d1l6VmlOR00wT1RBelkySmxZbVU0WkdGaU1UVXlaRGM0T0RkbU5qVmpZaTlOUTBSVFFWQkJYMFZETURVeUxtcHdadw",
"published_at": "2020-06-12T11:00:00Z",
"relay_url": "https://news.robinhood.com/882570b0-31b5-3e91-bec1-2e94477ba872/",
"source": "Robinhood Snacks",
"summary": "",
"title": "Stocks have their worst day since March on investors' worst fear: A 2nd outbreak",
"updated_at": "2020-06-12T11:13:07.745923Z",
"url": "https://snacks.robinhood.com/newsletters/6PR0xbLSMdv6hGEyPZHPHD/articles/6PO7UWvf1aF3lIwGXwx8OA",
"uuid": "882570b0-31b5-3e91-bec1-2e94477ba872",
"related_instruments": ["8f92e76f-1e0e-4478-8580-16a6ffcfaef5", "bab3b12b-4216-4b01-b2d8-9587ee5f41cf"],
"preview_text": "Our Editorial Principles\n\nRobinhood Financial LLC and Robinhood Crypto, LLC are wholly-owned subsidiaries of Robinhood Markets, Inc. Equities and options are of",
"currency_id": "None",
},
{
"api_source": "cnbc",
"author": "",
"num_clicks": 0,
"preview_image_url": "https://images.robinhood.com/wTdBSpqBFoa8aJVLbTJv1MtMlS4/aHR0cHM6Ly9pbWFnZXMucm9iaW5ob29kLmNvbS9zRE5zaXhDWFN2cTdQQ25wSHVSRTJsWEhFd00vYUhSMGNITTZMeTlwYldGblpTNWpibUpqWm0wdVkyOXRMMkZ3YVM5Mk1TOXBiV0ZuWlM4eE1EWTFOVFU0TWpNdE1UVTVNRFk1TXpRNU1UYzBOV2RsZEhSNWFXMWhaMlZ6TFRFeU1qY3hPVGcxTVRrdWFuQmxaejkyUFRFMU9URTVNVE01TURV",
"published_at": "2020-06-12T10:00:00Z",
"relay_url": "https://news.robinhood.com/5fc3fe9b-db96-369d-bfd4-90e316034927/",
"source": "CNBC",
"summary": "",
"title": "Dow futures bounce 600 points higher as Wall Street tries to recover from its worst day since March",
"updated_at": "2020-06-12T10:33:13.976215Z",
"url": "https://www.cnbc.com/2020/06/11/stock-market-futures-open-to-close-news.html",
"uuid": "5fc3fe9b-db96-369d-bfd4-90e316034927",
"related_instruments": ["8f92e76f-1e0e-4478-8580-16a6ffcfaef5", "bab3b12b-4216-4b01-b2d8-9587ee5f41cf"],
"preview_text": "The Dow, S&P 500 and Nasdaq on Thursday all recorded their biggest one-day losses since mid-March, posting losses of at least 5.3%. Thursday's declines put the",
"currency_id": "None",
},
{
"api_source": "",
"author": "",
"num_clicks": 0,
"preview_image_url": "https://images.robinhood.com/JelKNdPB2krIqCEryoCWGwCyUT4/aHR0cHM6Ly9pbWFnZXMucm9iaW5ob29kLmNvbS9BRzVBeTFIWllWSWc5TG0zYkY1WVZSNjFJMTgvYUhSMGNITTZMeTl0WldScFlTNXVjSEl1YjNKbkwyRnpjMlYwY3k5cGJXY3ZNakF5TUM4d05pOHhNUzluWlhSMGVXbHRZV2RsY3kweE1qRTFNVGs1TWpFd0xURXRYM2RwWkdVdE1ERTJNREUxTm1Ga05EVTFNVEppWlRjMk9HSXpOek0yTWpZeE5tSTFaamd5TkRSalltRmxOaTVxY0djX2N6MHhOREF3",
"published_at": "2020-06-11T17:00:00Z",
"relay_url": "https://news.robinhood.com/f896a88b-2ea7-3cd7-8f77-bb2772215cba/",
"source": "NPR",
"summary": "",
"title": "Dow Tumbles 1,500 Points On Worries Of 2nd Wave As Coronavirus Cases Spike",
"updated_at": "2020-06-11T19:01:24.454573Z",
"url": "https://www.npr.org/sections/coronavirus-live-updates/2020/06/11/874600108/stocks-tumble-as-the-fed-warns-of-a-long-recovery-coronavirus-cases-spike",
"uuid": "f896a88b-2ea7-3cd7-8f77-bb2772215cba",
"related_instruments": ["8f92e76f-1e0e-4478-8580-16a6ffcfaef5", "bab3b12b-4216-4b01-b2d8-9587ee5f41cf"],
"preview_text": "Dow Tumbles 1,500 Points On Worries Of 2nd Wave As Coronavirus Cases Spike\n\nEnlarge this image toggle caption Johannes Eisele/AFP via Getty Images Johannes Eise",
"currency_id": "None",
},
{
"api_source": "cnbc",
"author": "",
"num_clicks": 0,
"preview_image_url": "https://images.robinhood.com/VqYWBBN7DP7UM1lFV0x8PM5NhfM/aHR0cHM6Ly9pbWFnZXMucm9iaW5ob29kLmNvbS80QWd4emVtbTJIanlsYzBpbi1GWWVJWlA4VHMvYUhSMGNITTZMeTlwYldGblpTNWpibUpqWm0wdVkyOXRMMkZ3YVM5Mk1TOXBiV0ZuWlM4eE1EWTFOelF3TVRFdE1UVTVNVGc0TnpRM056WXlNbWx0WjE4eE1GODFOMTh4T1RKZk9UVTNOalUxWHpFd01EQXRNVFU1TVRnNE56STNOekV6TWk1cWNHY19kajB4TlRreE9EZzNOVEV4",
"published_at": "2020-06-11T14:40:00Z",
"relay_url": "https://news.robinhood.com/52fa19aa-1002-337b-87fd-de7de74ee3db/",
"source": "CNBC",
"summary": "",
"title": "Dow plunges 1,000 points, heads for worst day since April as airlines and retailers drop",
"updated_at": "2020-06-11T14:59:23.092528Z",
"url": "https://www.cnbc.com/2020/06/10/stock-market-futures-open-to-close-news.html",
"uuid": "52fa19aa-1002-337b-87fd-de7de74ee3db",
"related_instruments": ["8f92e76f-1e0e-4478-8580-16a6ffcfaef5", "bab3b12b-4216-4b01-b2d8-9587ee5f41cf"],
"preview_text": "Stocks fell sharply on Thursday as coronavirus cases increased in some states that are reopening up from lockdowns. Shares that have surged recently on hopes fo",
"currency_id": "None",
},
{
"api_source": "cnbc",
"author": "",
"num_clicks": 0,
"preview_image_url": "https://images.robinhood.com/l-hiORv0qMrcwuJBg99RCsdEtBc/aHR0cHM6Ly9pbWFnZXMucm9iaW5ob29kLmNvbS9MM2xsVjd0ZGYzc2s3M1lTNjM0RWxtOHZISjQvYUhSMGNITTZMeTlwYldGblpTNWpibUpqWm0wdVkyOXRMMkZ3YVM5Mk1TOXBiV0ZuWlM4eE1EWTBPRFl6T0RRdE1UVTROamc1T1RNek56TTNOM2RoYkd3dWFuQm5QM1k5TVRVNE9ESTFNamcxTmc",
"published_at": "2020-06-10T09:11:00Z",
"relay_url": "https://news.robinhood.com/d2d056fd-7de0-3303-8434-e338e5b99b46/",
"source": "CNBC",
"summary": "",
"title": "Dow futures little changed as investors await Fedโs forecast on the economy",
"updated_at": "2020-06-10T10:11:46.220319Z",
"url": "https://www.cnbc.com/2020/06/09/stock-market-futures-open-to-close-news.html",
"uuid": "d2d056fd-7de0-3303-8434-e338e5b99b46",
"related_instruments": ["8f92e76f-1e0e-4478-8580-16a6ffcfaef5", "bab3b12b-4216-4b01-b2d8-9587ee5f41cf"],
"preview_text": "Around 6 a.m. ET, Dow futures implied an opening loss of about 120 points. The S&P 500 and Nasdaq were also lower.\n\nU.S. stock futures fell in early trading Wed",
"currency_id": "None",
},
]
@dataclass(frozen=True)
class News:
api_source: str
author: str
num_clicks: int
preview_image_url: str
published_at: datetime
relay_url: str
source: str
summary: str
title: str
updated_at: datetime
url: str
uuid: str
related_instruments: List[str]
preview_text: str
currency_id: str
def clean_news(input_data: Dict[str, Any]) -> Dict[str, Any]:
data = deepcopy(input_data)
data = convert_dates(data, ["published_at"])
return data
def main() -> None:
news = [News(**clean_news(n)) for n in EXAMPLES]
print(news)
if __name__ == "__main__":
main() | /robinhood_commons-1.0.44.tar.gz/robinhood_commons-1.0.44/robinhood_commons/entity/news.py | 0.590779 | 0.267265 | news.py | pypi |
from __future__ import annotations
from copy import deepcopy
from dataclasses import dataclass
from datetime import datetime
from typing import Any, Dict
from robinhood_commons.util.date_utils import convert_dates
from robinhood_commons.util.num_utils import convert_floats
EXAMPLE: Dict[str, str] = {
"instrument_url": "https://api.robinhood.com/instruments/3c582020-d702-4c8a-b69a-4df7c57d0f49/",
"symbol": "PVH",
"updated_at": "2020-06-12T21:16:58.252906Z",
"price_movement": {"market_hours_last_movement_pct": "-5.90", "market_hours_last_price": "49.6100"},
"description": "PVH Corp. engages in the design and marketing of branded dress shirts, neckwear, sportswear, jeans wear, intimate apparel, swim products, handbags, footwear, and other related products. It operates through the following segments: Calvin Klein North America, Calvin Klein International, Tommy Hilfiger North America, Tommy Hilfiger International, Heritage Brands Wholesale, and Heritage Brands Retail. The Calvin Klein North America and Calvin Klein International segment operates in North America; and Europe, Asia, and Brazil respectively. It sells its products under the brand names CALVIN KLEIN 205 W39 NYC, CK Calvin Klein, and CALVIN KLEIN. The Tommy Hilfiger North America and Tommy Hilfiger International segment wholesales in North America; and Europe and China respectively. It consists of Tommy Hilfiger, Hilfiger Denim, Hilfiger Collection, and Tommy Hilfiger Tailored brands. The Heritage Brands Wholesale segment markets its products to department, chain, and specialty stores, digital commerce sites operated by select wholesale partners and pure play digital commerce retailers in North America. The Heritage Brands Retail segment manages retail stores, primarily located in outlet centers throughout the United States and Canada. PVH was founded in 1881 and is headquartered in New York, NY.",
}
@dataclass(frozen=True)
class Mover:
instrument_url: str
symbol: str
updated_at: datetime
price_movement: PriceMovement
description: str
@dataclass(frozen=True)
class PriceMovement:
market_hours_last_movement_pct: float
market_hours_last_price: float
def clean_mover(input_data: Dict[str, Any]) -> Dict[str, Any]:
data = deepcopy(input_data)
data["price_movement"] = PriceMovement(**clean_price_movement(data["price_movement"]))
data = convert_dates(data)
return data
def clean_price_movement(input_data: Dict[str, Any]) -> Dict[str, Any]:
data = deepcopy(input_data)
data = convert_floats(data, ["market_hours_last_movement_pct", "market_hours_last_price"])
return data
def main() -> None:
mover = Mover(**clean_mover(EXAMPLE))
print(mover)
if __name__ == "__main__":
main() | /robinhood_commons-1.0.44.tar.gz/robinhood_commons-1.0.44/robinhood_commons/entity/mover.py | 0.756537 | 0.37866 | mover.py | pypi |
from __future__ import annotations
from copy import deepcopy
from dataclasses import dataclass
from datetime import datetime
from enum import Enum, auto
from typing import Any, Dict, List, Optional, Union, Tuple
from robinhood_commons.entity.execution_type import ExecutionType
from robinhood_commons.entity.price import Price, clean_price
from robinhood_commons.entity.state import State
from robinhood_commons.entity.time_in_force import TimeInForce
from robinhood_commons.util.date_utils import convert_dates
from robinhood_commons.util.num_utils import convert_floats
ERROR_DETAIL_KEY: str = "detail"
EXAMPLE: Dict[str, Union[str, bool, List[Any], Dict[str, str]]] = {
"id": "845aa52f-f6b4-42e4-9ec7-6f4978a90bbb",
"ref_id": "ee2e875c-dbff-463d-8a12-3d3e00b76a5f",
"url": "https://api.robinhood.com/orders/845aa52f-f6b4-42e4-9ec7-6f4978a90bbb/",
"account": "https://api.robinhood.com/accounts/5PY78241/",
"position": "https://api.robinhood.com/positions/5PY78241/ab4f79fc-f84a-4f7b-8132-4f3e5fb38075/",
"cancel": "https://api.robinhood.com/orders/845aa52f-f6b4-42e4-9ec7-6f4978a90bbb/cancel/",
"instrument": "https://api.robinhood.com/instruments/ab4f79fc-f84a-4f7b-8132-4f3e5fb38075/",
"cumulative_quantity": "0.00000000",
"average_price": None,
"fees": "0.00",
"state": "unconfirmed",
"type": "market",
"side": "buy",
"time_in_force": "gtc",
"trigger": "immediate",
"price": "6.70000000",
"stop_price": None,
"quantity": "5.00000000",
"reject_reason": None,
"created_at": "2020-06-16T23:39:38.319481Z",
"updated_at": "2020-06-16T23:39:38.374303Z",
"last_transaction_at": "2020-06-16T23:39:38.319481Z",
"executions": [],
"extended_hours": True,
"override_dtbp_checks": False,
"override_day_trade_checks": False,
"response_category": None,
"stop_triggered_at": None,
"last_trail_price": None,
"last_trail_price_updated_at": None,
"dollar_based_amount": None,
"drip_dividend_id": None,
"total_notional": {
"amount": "33.50",
"currency_code": "USD",
"currency_id": "1072fc76-1862-41ab-82c2-485837590762",
},
"executed_notional": None,
"investment_schedule_id": None,
}
class OrderType(Enum):
LIMIT = auto()
MARKET = auto()
@staticmethod
def to_enum(value: str) -> Optional[OrderType]:
v: str = value.upper().replace(" ", "-").replace("-", "_")
return OrderType[v]
def value(self) -> str:
return self.name.lower()
@dataclass(frozen=True)
class OptionalOrder:
order: Optional[Order] = None
detail: Optional[str] = None
@dataclass(frozen=True)
class Execution:
id: str
price: float
quantity: float
settlement_date: datetime
timestamp: datetime
@dataclass(frozen=True)
class Order:
id: str
ref_id: str
url: str
account: str
position: str
cancel: str
instrument: str
cumulative_quantity: float
state: State
type: OrderType
side: ExecutionType
time_in_force: TimeInForce
trigger: str # TODO: enum
price: Price
quantity: float
created_at: datetime
updated_at: datetime
last_transaction_at: datetime
executions: List[Execution]
extended_hours: bool
override_dtbp_checks: bool
override_day_trade_checks: bool
stop_triggered_at: datetime
last_trail_price: float
last_trail_price_updated_at: datetime
average_price: Optional[float] = None
fees: Optional[float] = None
stop_price: Optional[Price] = None
reject_reason: Optional[str] = None
response_category: Optional[str] = None
dollar_based_amount: Optional[Price] = None
drip_dividend_id: Optional[str] = None
total_notional: Optional[Price] = None
executed_notional: Optional[Price] = None
investment_schedule_id: Optional[str] = None
details: Optional[str] = None
def execution_stats(self) -> Tuple[float, float, float]:
quantity = sum([e.quantity for e in self.executions])
total_price = sum([e.quantity * e.price for e in self.executions])
per_share_price = total_price / quantity
return quantity, total_price, per_share_price
def clean_optional_order(input_data: Dict[str, Any]) -> Dict[str, Any]:
if ERROR_DETAIL_KEY not in input_data:
return {"order": Order(**clean_order(deepcopy(input_data)))}
return deepcopy(input_data)
def clean_order(input_data: Dict[str, Any]) -> Dict[str, Any]:
data = deepcopy(input_data)
data["state"] = State.to_enum(data["state"])
data["time_in_force"] = TimeInForce.to_enum(data["time_in_force"])
data["side"] = ExecutionType.to_enum(data["side"])
data["type"] = OrderType.to_enum(data["type"])
data["executions"] = [Execution(**clean_execution(e)) for e in data["executions"]]
consolidate_price_data(data, ["price", "stop_price", "dollar_based_amount", "total_notional", "executed_notional"])
data = convert_floats(
data, ["cumulative_quantity", "quantity", "average_price", "fees", "stop_price", "last_trail_price"]
)
data = convert_dates(data, ["last_transaction_at", "stop_triggered_at", "last_trail_price_updated_at"])
return data
def clean_execution(input_data: Dict[str, Any]) -> Dict[str, Any]:
data = deepcopy(input_data)
data = convert_floats(data, ["price", "quantity"])
data = convert_dates(data, ["settlement_date", "timestamp"])
return data
def consolidate_price_data(input_data: Dict[str, Any], keys: List[str]) -> None:
"""In some cases, the price is a primitive, in others its in a single item list so handle both cases
Args:
input_data: input data
keys: price keys
"""
for key in keys:
if key not in input_data:
# Skip missing key
print(f"Skipping missing key: {key}")
elif type(input_data[key]) in [str, float]:
input_data[key] = Price(**clean_price({"amount": input_data[key]}))
else:
input_data[key] = Price(**clean_price(input_data[key])) if input_data[key] is not None else None
def main() -> None:
order = Order(**clean_order(EXAMPLE))
print(order)
optional_order_1 = OptionalOrder(**clean_optional_order(EXAMPLE))
print(optional_order_1)
optional_order_2 = OptionalOrder(
**clean_optional_order({ERROR_DETAIL_KEY: "Request was throttled. Expected available in 28 seconds."})
)
print(optional_order_2)
if __name__ == "__main__":
main() | /robinhood_commons-1.0.44.tar.gz/robinhood_commons-1.0.44/robinhood_commons/entity/order.py | 0.777342 | 0.31042 | order.py | pypi |
from __future__ import annotations
from copy import deepcopy
from dataclasses import dataclass
from datetime import datetime
from enum import Enum, auto
from typing import Any, Dict
from robinhood_commons.entity.state import State
from robinhood_commons.util.date_utils import convert_dates
from robinhood_commons.util.num_utils import convert_floats
EXAMPLE: Dict[str, str] = {
"id": "96d878fc-2023-40e9-bd6f-ff43d349fce0",
"ref_id": "96d878fc-2023-40e9-bd6f-ff43d349fce0",
"url": "https://api.robinhood.com/ach/transfers/96d878fc-2023-40e9-bd6f-ff43d349fce0/",
"cancel": None,
"ach_relationship": "https://api.robinhood.com/ach/relationships/2a0861b2-7bb3-41fc-8737-c7646dc9e1c6/",
"account": "https://api.robinhood.com/accounts/5PY78241/",
"amount": "1010.28",
"direction": "deposit",
"state": "pending",
"fees": "0.00",
"status_description": "",
"scheduled": False,
"expected_landing_date": "2020-06-15",
"early_access_amount": "1000.00",
"created_at": "2020-06-11T23:19:40.250436Z",
"updated_at": "2020-06-11T23:19:43.262917Z",
"rhs_state": None,
"expected_sweep_at": None,
"expected_landing_datetime": "2020-06-15T13:00:00Z",
"investment_schedule_id": None,
}
class TransferDirection(Enum):
DEPOSIT = auto()
@staticmethod
def to_enum(value: str) -> TransferDirection:
v: str = value.upper().replace(" ", "-").replace("-", "_")
return TransferDirection[v]
def value(self) -> str:
return self.name.lower()
@dataclass(frozen=True)
class BankTransfer:
id: str
ref_id: str
url: str
cancel: str
ach_relationship: str
account: str
amount: float
direction: TransferDirection
state: State
fees: float
status_description: str
scheduled: bool
expected_landing_date: datetime
early_access_amount: float
created_at: datetime
updated_at: datetime
rhs_state: State
expected_sweep_at: str
expected_landing_datetime: datetime
investment_schedule_id: str
def clean_transfer(input_data: Dict[str, Any]) -> Dict[str, Any]:
data = deepcopy(input_data)
data["direction"] = TransferDirection.to_enum(data["direction"])
data["state"] = State.to_enum(data["state"])
data["rhs_state"] = State.to_enum(data["rhs_state"])
data = convert_floats(data, ["amount", "fees", "early_access_amount"])
data = convert_dates(data, ["expected_landing_date", "expected_landing_datetime"])
return data
def main() -> None:
bank_transfer = BankTransfer(**clean_transfer(EXAMPLE))
print(bank_transfer)
if __name__ == "__main__":
main() | /robinhood_commons-1.0.44.tar.gz/robinhood_commons-1.0.44/robinhood_commons/entity/transfer.py | 0.831485 | 0.320077 | transfer.py | pypi |
from copy import deepcopy
from dataclasses import dataclass
from datetime import datetime
from typing import Any, Dict
from robinhood_commons.util.date_utils import convert_dates
from robinhood_commons.util.num_utils import convert_floats
EXAMPLE: Dict[str, str] = {
"url": "https://api.robinhood.com/positions/5PY78241/e5fdefaa-bb90-4af1-9dee-c9782b1519fd/",
"instrument": "https://api.robinhood.com/instruments/e5fdefaa-bb90-4af1-9dee-c9782b1519fd/",
"account": "https://api.robinhood.com/accounts/5PY78241/",
"account_number": "5PY78241",
"average_buy_price": "9.0429",
"pending_average_buy_price": "9.0429",
"quantity": "7.00000000",
"intraday_average_buy_price": "0.0000",
"intraday_quantity": "0.00000000",
"shares_held_for_buys": "0.00000000",
"shares_held_for_sells": "0.00000000",
"shares_held_for_stock_grants": "0.00000000",
"shares_held_for_options_collateral": "0.00000000",
"shares_held_for_options_events": "0.00000000",
"shares_pending_from_options_events": "0.00000000",
"shares_available_for_closing_short_position": "0.000000",
"updated_at": "2018-05-04T13:33:05.364913Z",
"created_at": "2018-04-23T14:38:07.373841Z",
"symbol": "ARCO",
"shares_available_for_exercise": "0.0000000",
}
@dataclass(frozen=True)
class Position:
symbol: str
quantity: float
intraday_quantity: float
created_at: datetime
updated_at: datetime
average_buy_price: float
pending_average_buy_price: float
intraday_average_buy_price: float
instrument: str
shares_available_for_exercise: float
shares_held_for_buys: float
shares_held_for_options_collateral: float
shares_held_for_options_events: float
shares_held_for_sells: float
shares_held_for_stock_grants: float
shares_pending_from_options_events: float
url: str
account: str
account_number: str
shares_available_for_closing_short_position: float = 0.00
def clean_position(input_data: Dict[str, Any]) -> Dict[str, Any]:
data = deepcopy(input_data)
data = convert_floats(
data,
[
"quantity",
"intraday_quantity",
"average_buy_price",
"pending_average_buy_price",
"intraday_average_buy_price",
"shares_available_for_exercise",
"shares_held_for_buys",
"shares_held_for_options_collateral",
"shares_held_for_options_events",
"shares_held_for_sells",
"shares_held_for_stock_grants",
"shares_pending_from_options_events",
],
)
data = convert_dates(data)
return data
def main() -> None:
position = Position(**EXAMPLE)
print(position)
if __name__ == "__main__":
main() | /robinhood_commons-1.0.44.tar.gz/robinhood_commons-1.0.44/robinhood_commons/entity/position.py | 0.821725 | 0.340321 | position.py | pypi |
from __future__ import annotations
from copy import deepcopy
from dataclasses import dataclass
from datetime import datetime
from enum import Enum, auto
from typing import Any, Dict, Optional
from robinhood_commons.entity.price import Price, clean_price
from robinhood_commons.entity.printable import Printable
from robinhood_commons.util.date_utils import convert_dates
from robinhood_commons.util.num_utils import convert_floats
EXAMPLE: Dict[str, Any] = {
"id": "ea49e4b7-1f01-55ce-9393-e95a9fc505fa",
"url": "https://api.robinhood.com/dividends/ea49e4b7-1f01-55ce-9393-e95a9fc505fa/",
"account": "https://api.robinhood.com/accounts/5PY78241/",
"instrument": "https://api.robinhood.com/instruments/62463799-a202-4cbe-9b7d-ae3caacc3412/",
"amount": "4.96",
"rate": "0.0935150000",
"position": "53.00000000",
"withholding": "0.00",
"record_date": "2020-05-29",
"payable_date": "2020-06-02",
"paid_at": "2020-06-03T02:25:41Z",
"state": "reinvested",
"nra_withholding": "0",
"drip_enabled": True,
"drip_order_id": "ba3b3c4d-f0ef-44ee-aa13-77a75d911b4c",
"drip_order_state": "filled",
"drip_order_quantity": "0.14794900",
"drip_order_execution_price": {
"currency_id": "1072fc76-1862-41ab-82c2-485837590762",
"currency_code": "USD",
"amount": "33.53",
},
}
class DividendState(Printable, Enum):
PAID = auto()
PENDING = auto()
REINVESTED = auto()
VOIDED = auto()
UNKNOWN = auto()
@staticmethod
def to_enum(value: str) -> Optional[DividendState]:
if value is None:
return DividendState.UNKNOWN
v: str = value.upper().replace(" ", "-").replace("-", "_")
return DividendState[v]
def value(self) -> str:
return self.name.lower()
@dataclass(frozen=True)
class Dividend:
id: str
url: str
account: str
instrument: str
amount: float
rate: float
position: float
withholding: float
record_date: datetime
payable_date: datetime
paid_at: datetime
state: DividendState
nra_withholding: str
drip_enabled: bool
drip_order_id: Optional[str] = None
drip_order_state: Optional[str] = None
drip_order_quantity: Optional[float] = None
drip_order_execution_price: Optional[Price] = None
def clean_dividend(input_data: Dict[str, str]) -> Dict[str, Any]:
data: Dict[str, Any] = deepcopy(input_data)
data["drip_order_execution_price"] = (
Price(**clean_price(data["drip_order_execution_price"])) if "drip_order_execution_price" in data else None
)
data["state"] = DividendState.to_enum(data["state"])
data = convert_floats(data, ["amount", "rate", "position", "withholding", "drip_order_quantity"])
data = convert_dates(data, ["record_date", "payable_date", "paid_at"])
return data
def main() -> None:
dividend = Dividend(**clean_dividend(EXAMPLE))
print(dividend)
if __name__ == "__main__":
main() | /robinhood_commons-1.0.44.tar.gz/robinhood_commons-1.0.44/robinhood_commons/entity/dividend.py | 0.860662 | 0.28966 | dividend.py | pypi |
from dataclasses import dataclass
from robinhood_commons.entity.execution_type import ExecutionType
from robinhood_commons.entity.order_type.order_request import OrderRequest
from robinhood_commons.entity.time_in_force import TimeInForce
@dataclass
class LimitOrderRequest(OrderRequest):
"""An order that specifies the minimum price to sell a stock or the maximum price to buy it. Allows for a ceiling
and a floor to be set so once a stock reaches into that range, an order will occur.
"""
limit_price: float
extended_hours: bool
limit_order: bool = True
def should_execute(self, current_price: float = -1) -> bool:
# TODO: extended hours check
# If selling, current price must be higher than limit
if self.execution_type is ExecutionType.SELL:
return True if current_price >= self.limit_price else False
# If buying, current price must be lower than limit
elif self.execution_type.BUY:
return True if current_price <= self.limit_price else False
else:
raise Exception(f"Unknown execution type: {self.execution_type}")
if __name__ == "__main__":
sell_order = LimitOrderRequest(
symbol="AMZN",
execution_type=ExecutionType.SELL,
limit_price=5000,
quantity=1000,
extended_hours=True,
time_in_force=TimeInForce.GOOD_TIL_CANCELLED,
)
print(sell_order)
print(sell_order.should_execute(current_price=5001))
print(sell_order.should_execute(current_price=5000))
print(sell_order.should_execute(current_price=4999))
buy_order = LimitOrderRequest(
symbol="MRO",
execution_type=ExecutionType.BUY,
limit_price=1000,
quantity=2000,
extended_hours=True,
time_in_force=TimeInForce.GOOD_TIL_CANCELLED,
)
print(buy_order)
print(buy_order.should_execute(current_price=1000))
print(buy_order.should_execute(current_price=1001))
print(buy_order.should_execute(current_price=1002)) | /robinhood_commons-1.0.44.tar.gz/robinhood_commons-1.0.44/robinhood_commons/entity/order_type/limit_order_request.py | 0.535827 | 0.29844 | limit_order_request.py | pypi |
from copy import deepcopy
from datetime import datetime, timedelta
from typing import Any, Dict, List, Optional
import pytz
from dateutil import parser
from holidays.countries.united_states import US as US_holidays
BASE_TZ = pytz.timezone(zone="US/Eastern")
UPDATED_AT: str = "updated_at"
CREATED_AT: str = "created_at"
GRAINS: List[str] = ["days", "hours", "minutes", "seconds"]
def date_time_format() -> str:
return "%Y%m%dT%H%M"
def to_est() -> datetime:
return datetime.now(tz=BASE_TZ)
def tz_localize(a_date: datetime) -> datetime:
return BASE_TZ.localize(a_date)
def date_parse(date_str: str) -> datetime:
return parser.isoparse(date_str) if date_str is not None else None
def to_date(date_str: str, format_str: str = "%Y%m%d") -> datetime:
return datetime.strptime(date_str, format_str)
def date_to_str(a_date: datetime = to_est(), format_str: str = "%Y%m%d") -> str:
"""Provides a day representation.
Returns:
A year, month, and day string
"""
return a_date.strftime(format_str)
def str_to_iso_8601(a_str: str, format_str: str = "%Y-%m-%d %H:%M:%S.%f") -> datetime:
return datetime.strptime(a_str, format_str)
def str_to_datetime(a_str: str = to_est(), format_str: str = date_time_format()) -> datetime:
return datetime.strptime(a_str, format_str)
def datetime_to_str(a_date: datetime = to_est(), format_str: str = date_time_format()) -> str:
"""Provides a minute representation.
Returns:
A year, month, day, hour, minute string
"""
return a_date.strftime(format_str)
def second_to_str(a_date: datetime = to_est(), format_str: str = "%Y%m%d%H%M%S") -> str:
"""Provides a second representation.
Returns:
A year, month, day, hour, minute, second string
"""
return a_date.strftime(format_str)
def time_to_str(a_date: datetime = to_est(), format_str: str = "%H%M") -> str:
"""Provides a time string based representation.
Args:
a_date: a date
format_str: time format string
Returns:
A hour, minute string
"""
return a_date.strftime(format_str)
def readable_datetime_to_str(a_date: datetime = to_est(), format_str: str = "%Y-%m-%d %H:%M:00") -> str:
"""Provides a minute representation.
Returns: a formatted string, defaults to year, month, day, hour, minute string
"""
return a_date.strftime(format_str)
def readable_date_to_str(a_date: datetime = to_est(), format_str: str = "%Y-%m-%d") -> str:
"""Provides a minute representation.
Returns: a year, month, day
"""
return a_date.strftime(format_str)
def time_offset(seconds: int, a_datetime=to_est()) -> datetime:
"""Provides a minute representation.
Returns: a datetime
"""
return a_datetime - timedelta(seconds=seconds)
def timeoffset_as_str(seconds: int, a_datetime=to_est()) -> str:
"""Provides a minute representation.
Returns: a year, month, day, hour, minute string
"""
return time_to_str(time_offset(seconds=seconds, a_datetime=a_datetime))
def timestamp_strs_for_window(
window: int,
seconds: int,
a_datetime: datetime = to_est(),
) -> List[str]:
"""For a start time, num of windows and length per window, produce time strings from start time to
start time - (window * seconds).
Args:
window: num intervals
seconds: num window seconds
a_datetime: time
Returns: list of times descending from a start time to the past.
"""
return [timeoffset_as_str(seconds=r * seconds, a_datetime=a_datetime) for r in range(window)]
def time_floor(a_date: datetime = to_est(), window: int = 10) -> datetime:
return a_date - timedelta(minutes=a_date.minute % window, seconds=a_date.second, microseconds=a_date.microsecond)
def convert_dates(input_data: Dict[str, Any], keys: List[str] = []) -> Dict[str, Any]:
all_keys = keys + [UPDATED_AT, CREATED_AT]
if any(k in input_data for k in all_keys):
data = deepcopy(input_data)
for key in all_keys:
if key in input_data:
data[key] = date_parse(data[key])
return data
return input_data
def is_holiday(a_time: datetime = to_est(), format_str: str = "%Y-%m-%d") -> bool:
return True if a_time.strftime(format_str) in US_holidays() else False
def is_weekend(a_time: datetime = to_est()) -> bool:
return True if a_time.date().weekday() > 4 else False
def _maybe_singleize(plural_grain: str, value: int) -> str:
"""If the value is 1, then toss the plural s at the end of the granularity.
Args:
plural_grain: grain name
value: value
Returns: either the initial grain name, or a singular version
"""
if value == 1:
return plural_grain[:-1]
return plural_grain
def to_readable_duration(delta: timedelta) -> Optional[str]:
"""Converts a timedelta to a readable duration string, e.g. "X days, Y hours, Z minutes, A seconds"
Args:
delta: time delta (timeA - timeB)
Returns: readable string based on the duration
"""
days: int = delta.days
seconds: int = delta.seconds
hours: int = seconds // 3600
remaining: int = seconds - hours * 3600
mins: int = remaining // 60
secs: int = remaining - mins * 60
combined = [
f"{data[1]} {_maybe_singleize(data[0], data[1])}"
for data in zip(GRAINS, [days, hours, mins, secs])
if data[1] > 0
]
if len(combined) <= 0:
return None
return ", ".join(combined)
if __name__ == "__main__":
print(datetime_to_str()) | /robinhood_commons-1.0.44.tar.gz/robinhood_commons-1.0.44/robinhood_commons/util/date_utils.py | 0.902681 | 0.465145 | date_utils.py | pypi |
from __future__ import absolute_import
import atexit
import binascii
import collections
import struct
from threading import Thread, Event
import weakref
from rhkafka.vendor import six
from rhkafka.errors import BufferUnderflowError
if six.PY3:
MAX_INT = 2 ** 31
TO_SIGNED = 2 ** 32
def crc32(data):
crc = binascii.crc32(data)
# py2 and py3 behave a little differently
# CRC is encoded as a signed int in kafka protocol
# so we'll convert the py3 unsigned result to signed
if crc >= MAX_INT:
crc -= TO_SIGNED
return crc
else:
from binascii import crc32
def write_int_string(s):
if s is not None and not isinstance(s, six.binary_type):
raise TypeError('Expected "%s" to be bytes\n'
'data=%s' % (type(s), repr(s)))
if s is None:
return struct.pack('>i', -1)
else:
return struct.pack('>i%ds' % len(s), len(s), s)
def read_short_string(data, cur):
if len(data) < cur + 2:
raise BufferUnderflowError("Not enough data left")
(strlen,) = struct.unpack('>h', data[cur:cur + 2])
if strlen == -1:
return None, cur + 2
cur += 2
if len(data) < cur + strlen:
raise BufferUnderflowError("Not enough data left")
out = data[cur:cur + strlen]
return out, cur + strlen
def relative_unpack(fmt, data, cur):
size = struct.calcsize(fmt)
if len(data) < cur + size:
raise BufferUnderflowError("Not enough data left")
out = struct.unpack(fmt, data[cur:cur + size])
return out, cur + size
def group_by_topic_and_partition(tuples):
out = collections.defaultdict(dict)
for t in tuples:
assert t.topic not in out or t.partition not in out[t.topic], \
'Duplicate {0}s for {1} {2}'.format(t.__class__.__name__,
t.topic, t.partition)
out[t.topic][t.partition] = t
return out
class ReentrantTimer(object):
"""
A timer that can be restarted, unlike threading.Timer
(although this uses threading.Timer)
Arguments:
t: timer interval in milliseconds
fn: a callable to invoke
args: tuple of args to be passed to function
kwargs: keyword arguments to be passed to function
"""
def __init__(self, t, fn, *args, **kwargs):
if t <= 0:
raise ValueError('Invalid timeout value')
if not callable(fn):
raise ValueError('fn must be callable')
self.thread = None
self.t = t / 1000.0
self.fn = fn
self.args = args
self.kwargs = kwargs
self.active = None
def _timer(self, active):
# python2.6 Event.wait() always returns None
# python2.7 and greater returns the flag value (true/false)
# we want the flag value, so add an 'or' here for python2.6
# this is redundant for later python versions (FLAG OR FLAG == FLAG)
while not (active.wait(self.t) or active.is_set()):
self.fn(*self.args, **self.kwargs)
def start(self):
if self.thread is not None:
self.stop()
self.active = Event()
self.thread = Thread(target=self._timer, args=(self.active,))
self.thread.daemon = True # So the app exits when main thread exits
self.thread.start()
def stop(self):
if self.thread is None:
return
self.active.set()
self.thread.join(self.t + 1)
# noinspection PyAttributeOutsideInit
self.timer = None
self.fn = None
def __del__(self):
self.stop()
class WeakMethod(object):
"""
Callable that weakly references a method and the object it is bound to. It
is based on http://stackoverflow.com/a/24287465.
Arguments:
object_dot_method: A bound instance method (i.e. 'object.method').
"""
def __init__(self, object_dot_method):
try:
self.target = weakref.ref(object_dot_method.__self__)
except AttributeError:
self.target = weakref.ref(object_dot_method.im_self)
self._target_id = id(self.target())
try:
self.method = weakref.ref(object_dot_method.__func__)
except AttributeError:
self.method = weakref.ref(object_dot_method.im_func)
self._method_id = id(self.method())
def __call__(self, *args, **kwargs):
"""
Calls the method on target with args and kwargs.
"""
return self.method()(self.target(), *args, **kwargs)
def __hash__(self):
return hash(self.target) ^ hash(self.method)
def __eq__(self, other):
if not isinstance(other, WeakMethod):
return False
return self._target_id == other._target_id and self._method_id == other._method_id
class Dict(dict):
"""Utility class to support passing weakrefs to dicts
See: https://docs.python.org/2/library/weakref.html
"""
pass
def try_method_on_system_exit(obj, method, *args, **kwargs):
def wrapper(_obj, _meth, *args, **kwargs):
try:
getattr(_obj, _meth)(*args, **kwargs)
except (ReferenceError, AttributeError):
pass
atexit.register(wrapper, weakref.proxy(obj), method, *args, **kwargs) | /robinhood-kafka-python-1.4.3.tar.gz/robinhood-kafka-python-1.4.3/rhkafka/util.py | 0.590661 | 0.296973 | util.py | pypi |
from __future__ import absolute_import
from logging import getLogger
from rhkafka.errors import check_error, OffsetOutOfRangeError
from rhkafka.structs import OffsetCommitRequestPayload
class OffsetCommitContext(object):
"""
Provides commit/rollback semantics around a `SimpleConsumer`.
Usage assumes that `auto_commit` is disabled, that messages are consumed in
batches, and that the consuming process will record its own successful
processing of each message. Both the commit and rollback operations respect
a "high-water mark" to ensure that last unsuccessfully processed message
will be retried.
Example:
.. code:: python
consumer = SimpleConsumer(client, group, topic, auto_commit=False)
consumer.provide_partition_info()
consumer.fetch_last_known_offsets()
while some_condition:
with OffsetCommitContext(consumer) as context:
messages = consumer.get_messages(count, block=False)
for partition, message in messages:
if can_process(message):
context.mark(partition, message.offset)
else:
break
if not context:
sleep(delay)
These semantics allow for deferred message processing (e.g. if `can_process`
compares message time to clock time) and for repeated processing of the last
unsuccessful message (until some external error is resolved).
"""
def __init__(self, consumer):
"""
:param consumer: an instance of `SimpleConsumer`
"""
self.consumer = consumer
self.initial_offsets = None
self.high_water_mark = None
self.logger = getLogger("kafka.context")
def mark(self, partition, offset):
"""
Set the high-water mark in the current context.
In order to know the current partition, it is helpful to initialize
the consumer to provide partition info via:
.. code:: python
consumer.provide_partition_info()
"""
max_offset = max(offset + 1, self.high_water_mark.get(partition, 0))
self.logger.debug("Setting high-water mark to: %s",
{partition: max_offset})
self.high_water_mark[partition] = max_offset
def __nonzero__(self):
"""
Return whether any operations were marked in the context.
"""
return bool(self.high_water_mark)
def __enter__(self):
"""
Start a new context:
- Record the initial offsets for rollback
- Reset the high-water mark
"""
self.initial_offsets = dict(self.consumer.offsets)
self.high_water_mark = dict()
self.logger.debug("Starting context at: %s", self.initial_offsets)
return self
def __exit__(self, exc_type, exc_value, traceback):
"""
End a context.
- If there was no exception, commit up to the current high-water mark.
- If there was an offset of range error, attempt to find the correct
initial offset.
- If there was any other error, roll back to the initial offsets.
"""
if exc_type is None:
self.commit()
elif isinstance(exc_value, OffsetOutOfRangeError):
self.handle_out_of_range()
return True
else:
self.rollback()
def commit(self):
"""
Commit this context's offsets:
- If the high-water mark has moved, commit up to and position the
consumer at the high-water mark.
- Otherwise, reset to the consumer to the initial offsets.
"""
if self.high_water_mark:
self.logger.info("Committing offsets: %s", self.high_water_mark)
self.commit_partition_offsets(self.high_water_mark)
self.update_consumer_offsets(self.high_water_mark)
else:
self.update_consumer_offsets(self.initial_offsets)
def rollback(self):
"""
Rollback this context:
- Position the consumer at the initial offsets.
"""
self.logger.info("Rolling back context: %s", self.initial_offsets)
self.update_consumer_offsets(self.initial_offsets)
def commit_partition_offsets(self, partition_offsets):
"""
Commit explicit partition/offset pairs.
"""
self.logger.debug("Committing partition offsets: %s", partition_offsets)
commit_requests = [
OffsetCommitRequestPayload(self.consumer.topic, partition, offset, None)
for partition, offset in partition_offsets.items()
]
commit_responses = self.consumer.client.send_offset_commit_request(
self.consumer.group,
commit_requests,
)
for commit_response in commit_responses:
check_error(commit_response)
def update_consumer_offsets(self, partition_offsets):
"""
Update consumer offsets to explicit positions.
"""
self.logger.debug("Updating consumer offsets to: %s", partition_offsets)
for partition, offset in partition_offsets.items():
self.consumer.offsets[partition] = offset
# consumer keeps other offset states beyond its `offsets` dictionary,
# a relative seek with zero delta forces the consumer to reset to the
# current value of the `offsets` dictionary
self.consumer.seek(0, 1)
def handle_out_of_range(self):
"""
Handle out of range condition by seeking to the beginning of valid
ranges.
This assumes that an out of range doesn't happen by seeking past the end
of valid ranges -- which is far less likely.
"""
self.logger.info("Seeking beginning of partition on out of range error")
self.consumer.seek(0, 0) | /robinhood-kafka-python-1.4.3.tar.gz/robinhood-kafka-python-1.4.3/rhkafka/context.py | 0.88666 | 0.207636 | context.py | pypi |
from __future__ import absolute_import
from collections import namedtuple
# SimpleClient Payload Structs - Deprecated
# https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-MetadataAPI
MetadataRequest = namedtuple("MetadataRequest",
["topics"])
MetadataResponse = namedtuple("MetadataResponse",
["brokers", "topics"])
# https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-ConsumerMetadataRequest
ConsumerMetadataRequest = namedtuple("ConsumerMetadataRequest",
["groups"])
ConsumerMetadataResponse = namedtuple("ConsumerMetadataResponse",
["error", "nodeId", "host", "port"])
# https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-ProduceAPI
ProduceRequestPayload = namedtuple("ProduceRequestPayload",
["topic", "partition", "messages"])
ProduceResponsePayload = namedtuple("ProduceResponsePayload",
["topic", "partition", "error", "offset"])
# https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-FetchAPI
FetchRequestPayload = namedtuple("FetchRequestPayload",
["topic", "partition", "offset", "max_bytes"])
FetchResponsePayload = namedtuple("FetchResponsePayload",
["topic", "partition", "error", "highwaterMark", "messages"])
# https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-OffsetAPI
OffsetRequestPayload = namedtuple("OffsetRequestPayload",
["topic", "partition", "time", "max_offsets"])
ListOffsetRequestPayload = namedtuple("ListOffsetRequestPayload",
["topic", "partition", "time"])
OffsetResponsePayload = namedtuple("OffsetResponsePayload",
["topic", "partition", "error", "offsets"])
ListOffsetResponsePayload = namedtuple("ListOffsetResponsePayload",
["topic", "partition", "error", "timestamp", "offset"])
# https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-OffsetCommit/FetchAPI
OffsetCommitRequestPayload = namedtuple("OffsetCommitRequestPayload",
["topic", "partition", "offset", "metadata"])
OffsetCommitResponsePayload = namedtuple("OffsetCommitResponsePayload",
["topic", "partition", "error"])
OffsetFetchRequestPayload = namedtuple("OffsetFetchRequestPayload",
["topic", "partition"])
OffsetFetchResponsePayload = namedtuple("OffsetFetchResponsePayload",
["topic", "partition", "offset", "metadata", "error"])
# Other useful structs
TopicPartition = namedtuple("TopicPartition",
["topic", "partition"])
BrokerMetadata = namedtuple("BrokerMetadata",
["nodeId", "host", "port", "rack"])
PartitionMetadata = namedtuple("PartitionMetadata",
["topic", "partition", "leader", "replicas", "isr", "error"])
OffsetAndMetadata = namedtuple("OffsetAndMetadata",
["offset", "metadata"])
OffsetAndTimestamp = namedtuple("OffsetAndTimestamp",
["offset", "timestamp"])
# Deprecated structs
OffsetAndMessage = namedtuple("OffsetAndMessage",
["offset", "message"])
Message = namedtuple("Message",
["magic", "attributes", "key", "value"])
KafkaMessage = namedtuple("KafkaMessage",
["topic", "partition", "offset", "key", "value"])
# Define retry policy for async producer
# Limit value: int >= 0, 0 means no retries
RetryOptions = namedtuple("RetryOptions",
["limit", "backoff_ms", "retry_on_timeouts"])
# Support legacy imports from kafka.common
from rhkafka.errors import * | /robinhood-kafka-python-1.4.3.tar.gz/robinhood-kafka-python-1.4.3/rhkafka/structs.py | 0.719876 | 0.163713 | structs.py | pypi |
from __future__ import absolute_import
import abc
import logging
import re
from rhkafka.vendor import six
from rhkafka.errors import IllegalStateError
from rhkafka.protocol.offset import OffsetResetStrategy
from rhkafka.structs import OffsetAndMetadata
log = logging.getLogger(__name__)
class SubscriptionState(object):
"""
A class for tracking the topics, partitions, and offsets for the consumer.
A partition is "assigned" either directly with assign_from_user() (manual
assignment) or with assign_from_subscribed() (automatic assignment from
subscription).
Once assigned, the partition is not considered "fetchable" until its initial
position has been set with seek(). Fetchable partitions track a fetch
position which is used to set the offset of the next fetch, and a consumed
position which is the last offset that has been returned to the user. You
can suspend fetching from a partition through pause() without affecting the
fetched/consumed offsets. The partition will remain unfetchable until the
resume() is used. You can also query the pause state independently with
is_paused().
Note that pause state as well as fetch/consumed positions are not preserved
when partition assignment is changed whether directly by the user or
through a group rebalance.
This class also maintains a cache of the latest commit position for each of
the assigned partitions. This is updated through committed() and can be used
to set the initial fetch position (e.g. Fetcher._reset_offset() ).
"""
_SUBSCRIPTION_EXCEPTION_MESSAGE = (
"You must choose only one way to configure your consumer:"
" (1) subscribe to specific topics by name,"
" (2) subscribe to topics matching a regex pattern,"
" (3) assign itself specific topic-partitions.")
# Taken from: https://github.com/apache/kafka/blob/39eb31feaeebfb184d98cc5d94da9148c2319d81/clients/src/main/java/org/apache/kafka/common/internals/Topic.java#L29
_MAX_NAME_LENGTH = 249
_TOPIC_LEGAL_CHARS = re.compile('^[a-zA-Z0-9._-]+$')
def __init__(self, offset_reset_strategy='earliest'):
"""Initialize a SubscriptionState instance
Keyword Arguments:
offset_reset_strategy: 'earliest' or 'latest', otherwise
exception will be raised when fetching an offset that is no
longer available. Default: 'earliest'
"""
try:
offset_reset_strategy = getattr(OffsetResetStrategy,
offset_reset_strategy.upper())
except AttributeError:
log.warning('Unrecognized offset_reset_strategy, using NONE')
offset_reset_strategy = OffsetResetStrategy.NONE
self._default_offset_reset_strategy = offset_reset_strategy
self.subscription = None # set() or None
self.subscribed_pattern = None # regex str or None
self._group_subscription = set()
self._user_assignment = set()
self.assignment = dict()
self.listener = None
# initialize to true for the consumers to fetch offset upon starting up
self.needs_fetch_committed_offsets = True
def subscribe(self, topics=(), pattern=None, listener=None):
"""Subscribe to a list of topics, or a topic regex pattern.
Partitions will be dynamically assigned via a group coordinator.
Topic subscriptions are not incremental: this list will replace the
current assignment (if there is one).
This method is incompatible with assign_from_user()
Arguments:
topics (list): List of topics for subscription.
pattern (str): Pattern to match available topics. You must provide
either topics or pattern, but not both.
listener (ConsumerRebalanceListener): Optionally include listener
callback, which will be called before and after each rebalance
operation.
As part of group management, the consumer will keep track of the
list of consumers that belong to a particular group and will
trigger a rebalance operation if one of the following events
trigger:
* Number of partitions change for any of the subscribed topics
* Topic is created or deleted
* An existing member of the consumer group dies
* A new member is added to the consumer group
When any of these events are triggered, the provided listener
will be invoked first to indicate that the consumer's assignment
has been revoked, and then again when the new assignment has
been received. Note that this listener will immediately override
any listener set in a previous call to subscribe. It is
guaranteed, however, that the partitions revoked/assigned
through this interface are from topics subscribed in this call.
"""
if self._user_assignment or (topics and pattern):
raise IllegalStateError(self._SUBSCRIPTION_EXCEPTION_MESSAGE)
assert topics or pattern, 'Must provide topics or pattern'
if pattern:
log.info('Subscribing to pattern: /%s/', pattern)
self.subscription = set()
self.subscribed_pattern = re.compile(pattern)
else:
self.change_subscription(topics)
if listener and not isinstance(listener, ConsumerRebalanceListener):
raise TypeError('listener must be a ConsumerRebalanceListener')
self.listener = listener
def _ensure_valid_topic_name(self, topic):
""" Ensures that the topic name is valid according to the kafka source. """
# See Kafka Source:
# https://github.com/apache/kafka/blob/39eb31feaeebfb184d98cc5d94da9148c2319d81/clients/src/main/java/org/apache/kafka/common/internals/Topic.java
if topic is None:
raise TypeError('All topics must not be None')
if not isinstance(topic, six.string_types):
raise TypeError('All topics must be strings')
if len(topic) == 0:
raise ValueError('All topics must be non-empty strings')
if topic == '.' or topic == '..':
raise ValueError('Topic name cannot be "." or ".."')
if len(topic) > self._MAX_NAME_LENGTH:
raise ValueError('Topic name is illegal, it can\'t be longer than {0} characters, topic: "{1}"'.format(self._MAX_NAME_LENGTH, topic))
if not self._TOPIC_LEGAL_CHARS.match(topic):
raise ValueError('Topic name "{0}" is illegal, it contains a character other than ASCII alphanumerics, ".", "_" and "-"'.format(topic))
def change_subscription(self, topics):
"""Change the topic subscription.
Arguments:
topics (list of str): topics for subscription
Raises:
IllegalStateErrror: if assign_from_user has been used already
TypeError: if a topic is None or a non-str
ValueError: if a topic is an empty string or
- a topic name is '.' or '..' or
- a topic name does not consist of ASCII-characters/'-'/'_'/'.'
"""
if self._user_assignment:
raise IllegalStateError(self._SUBSCRIPTION_EXCEPTION_MESSAGE)
if isinstance(topics, six.string_types):
topics = [topics]
if self.subscription == set(topics):
log.warning("subscription unchanged by change_subscription(%s)",
topics)
return
for t in topics:
self._ensure_valid_topic_name(t)
log.info('Updating subscribed topics to: %s', topics)
self.subscription = set(topics)
self._group_subscription.update(topics)
# Remove any assigned partitions which are no longer subscribed to
for tp in set(self.assignment.keys()):
if tp.topic not in self.subscription:
del self.assignment[tp]
def group_subscribe(self, topics):
"""Add topics to the current group subscription.
This is used by the group leader to ensure that it receives metadata
updates for all topics that any member of the group is subscribed to.
Arguments:
topics (list of str): topics to add to the group subscription
"""
if self._user_assignment:
raise IllegalStateError(self._SUBSCRIPTION_EXCEPTION_MESSAGE)
self._group_subscription.update(topics)
def reset_group_subscription(self):
"""Reset the group's subscription to only contain topics subscribed by this consumer."""
if self._user_assignment:
raise IllegalStateError(self._SUBSCRIPTION_EXCEPTION_MESSAGE)
assert self.subscription is not None, 'Subscription required'
self._group_subscription.intersection_update(self.subscription)
def assign_from_user(self, partitions):
"""Manually assign a list of TopicPartitions to this consumer.
This interface does not allow for incremental assignment and will
replace the previous assignment (if there was one).
Manual topic assignment through this method does not use the consumer's
group management functionality. As such, there will be no rebalance
operation triggered when group membership or cluster and topic metadata
change. Note that it is not possible to use both manual partition
assignment with assign() and group assignment with subscribe().
Arguments:
partitions (list of TopicPartition): assignment for this instance.
Raises:
IllegalStateError: if consumer has already called subscribe()
"""
if self.subscription is not None:
raise IllegalStateError(self._SUBSCRIPTION_EXCEPTION_MESSAGE)
if self._user_assignment != set(partitions):
self._user_assignment = set(partitions)
for partition in partitions:
if partition not in self.assignment:
self._add_assigned_partition(partition)
for tp in set(self.assignment.keys()) - self._user_assignment:
del self.assignment[tp]
self.needs_fetch_committed_offsets = True
def assign_from_subscribed(self, assignments):
"""Update the assignment to the specified partitions
This method is called by the coordinator to dynamically assign
partitions based on the consumer's topic subscription. This is different
from assign_from_user() which directly sets the assignment from a
user-supplied TopicPartition list.
Arguments:
assignments (list of TopicPartition): partitions to assign to this
consumer instance.
"""
if not self.partitions_auto_assigned():
raise IllegalStateError(self._SUBSCRIPTION_EXCEPTION_MESSAGE)
for tp in assignments:
if tp.topic not in self.subscription:
raise ValueError("Assigned partition %s for non-subscribed topic." % str(tp))
# after rebalancing, we always reinitialize the assignment state
self.assignment.clear()
for tp in assignments:
self._add_assigned_partition(tp)
self.needs_fetch_committed_offsets = True
log.info("Updated partition assignment: %s", assignments)
def unsubscribe(self):
"""Clear all topic subscriptions and partition assignments"""
self.subscription = None
self._user_assignment.clear()
self.assignment.clear()
self.subscribed_pattern = None
def group_subscription(self):
"""Get the topic subscription for the group.
For the leader, this will include the union of all member subscriptions.
For followers, it is the member's subscription only.
This is used when querying topic metadata to detect metadata changes
that would require rebalancing (the leader fetches metadata for all
topics in the group so that it can do partition assignment).
Returns:
set: topics
"""
return self._group_subscription
def seek(self, partition, offset):
"""Manually specify the fetch offset for a TopicPartition.
Overrides the fetch offsets that the consumer will use on the next
poll(). If this API is invoked for the same partition more than once,
the latest offset will be used on the next poll(). Note that you may
lose data if this API is arbitrarily used in the middle of consumption,
to reset the fetch offsets.
Arguments:
partition (TopicPartition): partition for seek operation
offset (int): message offset in partition
"""
self.assignment[partition].seek(offset)
def assigned_partitions(self):
"""Return set of TopicPartitions in current assignment."""
return set(self.assignment.keys())
def paused_partitions(self):
"""Return current set of paused TopicPartitions."""
return set(partition for partition in self.assignment
if self.is_paused(partition))
def fetchable_partitions(self):
"""Return set of TopicPartitions that should be Fetched."""
fetchable = set()
for partition, state in six.iteritems(self.assignment):
if state.is_fetchable():
fetchable.add(partition)
return fetchable
def partitions_auto_assigned(self):
"""Return True unless user supplied partitions manually."""
return self.subscription is not None
def all_consumed_offsets(self):
"""Returns consumed offsets as {TopicPartition: OffsetAndMetadata}"""
all_consumed = {}
for partition, state in six.iteritems(self.assignment):
if state.has_valid_position:
all_consumed[partition] = OffsetAndMetadata(state.position, '')
return all_consumed
def need_offset_reset(self, partition, offset_reset_strategy=None):
"""Mark partition for offset reset using specified or default strategy.
Arguments:
partition (TopicPartition): partition to mark
offset_reset_strategy (OffsetResetStrategy, optional)
"""
if offset_reset_strategy is None:
offset_reset_strategy = self._default_offset_reset_strategy
self.assignment[partition].await_reset(offset_reset_strategy)
def has_default_offset_reset_policy(self):
"""Return True if default offset reset policy is Earliest or Latest"""
return self._default_offset_reset_strategy != OffsetResetStrategy.NONE
def is_offset_reset_needed(self, partition):
return self.assignment[partition].awaiting_reset
def has_all_fetch_positions(self):
for state in self.assignment.values():
if not state.has_valid_position:
return False
return True
def missing_fetch_positions(self):
missing = set()
for partition, state in six.iteritems(self.assignment):
if not state.has_valid_position:
missing.add(partition)
return missing
def is_assigned(self, partition):
return partition in self.assignment
def is_paused(self, partition):
return partition in self.assignment and self.assignment[partition].paused
def is_fetchable(self, partition):
return partition in self.assignment and self.assignment[partition].is_fetchable()
def pause(self, partition):
self.assignment[partition].pause()
def resume(self, partition):
self.assignment[partition].resume()
def _add_assigned_partition(self, partition):
self.assignment[partition] = TopicPartitionState()
class TopicPartitionState(object):
def __init__(self):
self.committed = None # last committed position
self.has_valid_position = False # whether we have valid position
self.paused = False # whether this partition has been paused by the user
self.awaiting_reset = False # whether we are awaiting reset
self.reset_strategy = None # the reset strategy if awaitingReset is set
self._position = None # offset exposed to the user
self.highwater = None
self.drop_pending_message_set = False
def _set_position(self, offset):
assert self.has_valid_position, 'Valid position required'
self._position = offset
def _get_position(self):
return self._position
position = property(_get_position, _set_position, None, "last position")
def await_reset(self, strategy):
self.awaiting_reset = True
self.reset_strategy = strategy
self._position = None
self.has_valid_position = False
def seek(self, offset):
self._position = offset
self.awaiting_reset = False
self.reset_strategy = None
self.has_valid_position = True
self.drop_pending_message_set = True
def pause(self):
self.paused = True
def resume(self):
self.paused = False
def is_fetchable(self):
return not self.paused and self.has_valid_position
class ConsumerRebalanceListener(object):
"""
A callback interface that the user can implement to trigger custom actions
when the set of partitions assigned to the consumer changes.
This is applicable when the consumer is having Kafka auto-manage group
membership. If the consumer's directly assign partitions, those
partitions will never be reassigned and this callback is not applicable.
When Kafka is managing the group membership, a partition re-assignment will
be triggered any time the members of the group changes or the subscription
of the members changes. This can occur when processes die, new process
instances are added or old instances come back to life after failure.
Rebalances can also be triggered by changes affecting the subscribed
topics (e.g. when then number of partitions is administratively adjusted).
There are many uses for this functionality. One common use is saving offsets
in a custom store. By saving offsets in the on_partitions_revoked(), call we
can ensure that any time partition assignment changes the offset gets saved.
Another use is flushing out any kind of cache of intermediate results the
consumer may be keeping. For example, consider a case where the consumer is
subscribed to a topic containing user page views, and the goal is to count
the number of page views per users for each five minute window. Let's say
the topic is partitioned by the user id so that all events for a particular
user will go to a single consumer instance. The consumer can keep in memory
a running tally of actions per user and only flush these out to a remote
data store when its cache gets too big. However if a partition is reassigned
it may want to automatically trigger a flush of this cache, before the new
owner takes over consumption.
This callback will execute in the user thread as part of the Consumer.poll()
whenever partition assignment changes.
It is guaranteed that all consumer processes will invoke
on_partitions_revoked() prior to any process invoking
on_partitions_assigned(). So if offsets or other state is saved in the
on_partitions_revoked() call, it should be saved by the time the process
taking over that partition has their on_partitions_assigned() callback
called to load the state.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def on_partitions_revoked(self, revoked):
"""
A callback method the user can implement to provide handling of offset
commits to a customized store on the start of a rebalance operation.
This method will be called before a rebalance operation starts and
after the consumer stops fetching data. It is recommended that offsets
should be committed in this callback to either Kafka or a custom offset
store to prevent duplicate data.
NOTE: This method is only called before rebalances. It is not called
prior to KafkaConsumer.close()
Arguments:
revoked (list of TopicPartition): the partitions that were assigned
to the consumer on the last rebalance
"""
pass
@abc.abstractmethod
def on_partitions_assigned(self, assigned):
"""
A callback method the user can implement to provide handling of
customized offsets on completion of a successful partition
re-assignment. This method will be called after an offset re-assignment
completes and before the consumer starts fetching data.
It is guaranteed that all the processes in a consumer group will execute
their on_partitions_revoked() callback before any instance executes its
on_partitions_assigned() callback.
Arguments:
assigned (list of TopicPartition): the partitions assigned to the
consumer (may include partitions that were previously assigned)
"""
pass | /robinhood-kafka-python-1.4.3.tar.gz/robinhood-kafka-python-1.4.3/rhkafka/consumer/subscription_state.py | 0.795777 | 0.274722 | subscription_state.py | pypi |
from __future__ import absolute_import
import collections
import copy
import logging
import random
import sys
import time
from rhkafka.vendor import six
import rhkafka.errors as Errors
from rhkafka.future import Future
from rhkafka.metrics.stats import Avg, Count, Max, Rate
from rhkafka.protocol.fetch import FetchRequest
from rhkafka.protocol.offset import (
OffsetRequest, OffsetResetStrategy, UNKNOWN_OFFSET
)
from rhkafka.record import MemoryRecords
from rhkafka.serializer import Deserializer
from rhkafka.structs import TopicPartition, OffsetAndTimestamp
log = logging.getLogger(__name__)
# Isolation levels
READ_UNCOMMITTED = 0
READ_COMMITTED = 1
ConsumerRecord = collections.namedtuple("ConsumerRecord",
["topic", "partition", "offset", "timestamp", "timestamp_type",
"key", "value", "checksum", "serialized_key_size", "serialized_value_size"])
CompletedFetch = collections.namedtuple("CompletedFetch",
["topic_partition", "fetched_offset", "response_version",
"partition_data", "metric_aggregator"])
class NoOffsetForPartitionError(Errors.KafkaError):
pass
class RecordTooLargeError(Errors.KafkaError):
pass
class Fetcher(six.Iterator):
DEFAULT_CONFIG = {
'key_deserializer': None,
'value_deserializer': None,
'fetch_min_bytes': 1,
'fetch_max_wait_ms': 500,
'fetch_max_bytes': 52428800,
'max_partition_fetch_bytes': 1048576,
'max_poll_records': sys.maxsize,
'check_crcs': True,
'skip_double_compressed_messages': False,
'iterator_refetch_records': 1, # undocumented -- interface may change
'metric_group_prefix': 'consumer',
'api_version': (0, 8, 0),
'retry_backoff_ms': 100
}
def __init__(self, client, subscriptions, metrics, **configs):
"""Initialize a Kafka Message Fetcher.
Keyword Arguments:
key_deserializer (callable): Any callable that takes a
raw message key and returns a deserialized key.
value_deserializer (callable, optional): Any callable that takes a
raw message value and returns a deserialized value.
fetch_min_bytes (int): Minimum amount of data the server should
return for a fetch request, otherwise wait up to
fetch_max_wait_ms for more data to accumulate. Default: 1.
fetch_max_wait_ms (int): The maximum amount of time in milliseconds
the server will block before answering the fetch request if
there isn't sufficient data to immediately satisfy the
requirement given by fetch_min_bytes. Default: 500.
fetch_max_bytes (int): The maximum amount of data the server should
return for a fetch request. This is not an absolute maximum, if
the first message in the first non-empty partition of the fetch
is larger than this value, the message will still be returned
to ensure that the consumer can make progress. NOTE: consumer
performs fetches to multiple brokers in parallel so memory
usage will depend on the number of brokers containing
partitions for the topic.
Supported Kafka version >= 0.10.1.0. Default: 52428800 (50 MB).
max_partition_fetch_bytes (int): The maximum amount of data
per-partition the server will return. The maximum total memory
used for a request = #partitions * max_partition_fetch_bytes.
This size must be at least as large as the maximum message size
the server allows or else it is possible for the producer to
send messages larger than the consumer can fetch. If that
happens, the consumer can get stuck trying to fetch a large
message on a certain partition. Default: 1048576.
check_crcs (bool): Automatically check the CRC32 of the records
consumed. This ensures no on-the-wire or on-disk corruption to
the messages occurred. This check adds some overhead, so it may
be disabled in cases seeking extreme performance. Default: True
skip_double_compressed_messages (bool): A bug in KafkaProducer
caused some messages to be corrupted via double-compression.
By default, the fetcher will return the messages as a compressed
blob of bytes with a single offset, i.e. how the message was
actually published to the cluster. If you prefer to have the
fetcher automatically detect corrupt messages and skip them,
set this option to True. Default: False.
"""
self.config = copy.copy(self.DEFAULT_CONFIG)
for key in self.config:
if key in configs:
self.config[key] = configs[key]
self._client = client
self._subscriptions = subscriptions
self._completed_fetches = collections.deque() # Unparsed responses
self._next_partition_records = None # Holds a single PartitionRecords until fully consumed
self._iterator = None
self._fetch_futures = collections.deque()
self._sensors = FetchManagerMetrics(metrics, self.config['metric_group_prefix'])
self._isolation_level = READ_UNCOMMITTED
def send_fetches(self):
"""Send FetchRequests for all assigned partitions that do not already have
an in-flight fetch or pending fetch data.
Returns:
List of Futures: each future resolves to a FetchResponse
"""
futures = []
for node_id, request in six.iteritems(self._create_fetch_requests()):
if self._client.ready(node_id):
log.debug("Sending FetchRequest to node %s", node_id)
future = self._client.send(node_id, request)
future.add_callback(self._handle_fetch_response, request, time.time())
future.add_errback(log.error, 'Fetch to node %s failed: %s', node_id)
futures.append(future)
self._fetch_futures.extend(futures)
self._clean_done_fetch_futures()
return futures
def reset_offsets_if_needed(self, partitions):
"""Lookup and set offsets for any partitions which are awaiting an
explicit reset.
Arguments:
partitions (set of TopicPartitions): the partitions to reset
"""
for tp in partitions:
# TODO: If there are several offsets to reset, we could submit offset requests in parallel
if self._subscriptions.is_assigned(tp) and self._subscriptions.is_offset_reset_needed(tp):
self._reset_offset(tp)
def _clean_done_fetch_futures(self):
while True:
if not self._fetch_futures:
break
if not self._fetch_futures[0].is_done:
break
self._fetch_futures.popleft()
def in_flight_fetches(self):
"""Return True if there are any unprocessed FetchRequests in flight."""
self._clean_done_fetch_futures()
return bool(self._fetch_futures)
def update_fetch_positions(self, partitions):
"""Update the fetch positions for the provided partitions.
Arguments:
partitions (list of TopicPartitions): partitions to update
Raises:
NoOffsetForPartitionError: if no offset is stored for a given
partition and no reset policy is available
"""
# reset the fetch position to the committed position
for tp in partitions:
if not self._subscriptions.is_assigned(tp):
log.warning("partition %s is not assigned - skipping offset"
" update", tp)
continue
elif self._subscriptions.is_fetchable(tp):
log.warning("partition %s is still fetchable -- skipping offset"
" update", tp)
continue
if self._subscriptions.is_offset_reset_needed(tp):
self._reset_offset(tp)
elif self._subscriptions.assignment[tp].committed is None:
# there's no committed position, so we need to reset with the
# default strategy
self._subscriptions.need_offset_reset(tp)
self._reset_offset(tp)
else:
committed = self._subscriptions.assignment[tp].committed
log.debug("Resetting offset for partition %s to the committed"
" offset %s", tp, committed)
self._subscriptions.seek(tp, committed)
def get_offsets_by_times(self, timestamps, timeout_ms):
offsets = self._retrieve_offsets(timestamps, timeout_ms)
for tp in timestamps:
if tp not in offsets:
offsets[tp] = None
else:
offset, timestamp = offsets[tp]
offsets[tp] = OffsetAndTimestamp(offset, timestamp)
return offsets
def beginning_offsets(self, partitions, timeout_ms):
return self.beginning_or_end_offset(
partitions, OffsetResetStrategy.EARLIEST, timeout_ms)
def end_offsets(self, partitions, timeout_ms):
return self.beginning_or_end_offset(
partitions, OffsetResetStrategy.LATEST, timeout_ms)
def beginning_or_end_offset(self, partitions, timestamp, timeout_ms):
timestamps = dict([(tp, timestamp) for tp in partitions])
offsets = self._retrieve_offsets(timestamps, timeout_ms)
for tp in timestamps:
offsets[tp] = offsets[tp][0]
return offsets
def _reset_offset(self, partition):
"""Reset offsets for the given partition using the offset reset strategy.
Arguments:
partition (TopicPartition): the partition that needs reset offset
Raises:
NoOffsetForPartitionError: if no offset reset strategy is defined
"""
timestamp = self._subscriptions.assignment[partition].reset_strategy
if timestamp is OffsetResetStrategy.EARLIEST:
strategy = 'earliest'
elif timestamp is OffsetResetStrategy.LATEST:
strategy = 'latest'
else:
raise NoOffsetForPartitionError(partition)
log.debug("Resetting offset for partition %s to %s offset.",
partition, strategy)
offsets = self._retrieve_offsets({partition: timestamp})
if partition not in offsets:
raise NoOffsetForPartitionError(partition)
offset = offsets[partition][0]
# we might lose the assignment while fetching the offset,
# so check it is still active
if self._subscriptions.is_assigned(partition):
self._subscriptions.seek(partition, offset)
def _retrieve_offsets(self, timestamps, timeout_ms=float("inf")):
"""Fetch offset for each partition passed in ``timestamps`` map.
Blocks until offsets are obtained, a non-retriable exception is raised
or ``timeout_ms`` passed.
Arguments:
timestamps: {TopicPartition: int} dict with timestamps to fetch
offsets by. -1 for the latest available, -2 for the earliest
available. Otherwise timestamp is treated as epoch miliseconds.
Returns:
{TopicPartition: (int, int)}: Mapping of partition to
retrieved offset and timestamp. If offset does not exist for
the provided timestamp, that partition will be missing from
this mapping.
"""
if not timestamps:
return {}
start_time = time.time()
remaining_ms = timeout_ms
while remaining_ms > 0:
future = self._send_offset_requests(timestamps)
self._client.poll(future=future, timeout_ms=remaining_ms)
if future.succeeded():
return future.value
if not future.retriable():
raise future.exception # pylint: disable-msg=raising-bad-type
elapsed_ms = (time.time() - start_time) * 1000
remaining_ms = timeout_ms - elapsed_ms
if remaining_ms < 0:
break
if future.exception.invalid_metadata:
refresh_future = self._client.cluster.request_update()
self._client.poll(future=refresh_future, timeout_ms=remaining_ms)
else:
time.sleep(self.config['retry_backoff_ms'] / 1000.0)
elapsed_ms = (time.time() - start_time) * 1000
remaining_ms = timeout_ms - elapsed_ms
raise Errors.KafkaTimeoutError(
"Failed to get offsets by timestamps in %s ms" % timeout_ms)
def fetched_records(self, max_records=None):
"""Returns previously fetched records and updates consumed offsets.
Arguments:
max_records (int): Maximum number of records returned. Defaults
to max_poll_records configuration.
Raises:
OffsetOutOfRangeError: if no subscription offset_reset_strategy
CorruptRecordException: if message crc validation fails (check_crcs
must be set to True)
RecordTooLargeError: if a message is larger than the currently
configured max_partition_fetch_bytes
TopicAuthorizationError: if consumer is not authorized to fetch
messages from the topic
Returns: (records (dict), partial (bool))
records: {TopicPartition: [messages]}
partial: True if records returned did not fully drain any pending
partition requests. This may be useful for choosing when to
pipeline additional fetch requests.
"""
if max_records is None:
max_records = self.config['max_poll_records']
assert max_records > 0
drained = collections.defaultdict(list)
records_remaining = max_records
while records_remaining > 0:
if not self._next_partition_records:
if not self._completed_fetches:
break
completion = self._completed_fetches.popleft()
self._next_partition_records = self._parse_fetched_data(completion)
else:
records_remaining -= self._append(drained,
self._next_partition_records,
records_remaining)
return dict(drained), bool(self._completed_fetches)
def _append(self, drained, part, max_records):
if not part:
return 0
tp = part.topic_partition
fetch_offset = part.fetch_offset
if not self._subscriptions.is_assigned(tp):
# this can happen when a rebalance happened before
# fetched records are returned to the consumer's poll call
log.debug("Not returning fetched records for partition %s"
" since it is no longer assigned", tp)
else:
# note that the position should always be available
# as long as the partition is still assigned
position = self._subscriptions.assignment[tp].position
if not self._subscriptions.is_fetchable(tp):
# this can happen when a partition is paused before
# fetched records are returned to the consumer's poll call
log.debug("Not returning fetched records for assigned partition"
" %s since it is no longer fetchable", tp)
elif fetch_offset == position:
# we are ensured to have at least one record since we already checked for emptiness
part_records = part.take(max_records)
next_offset = part_records[-1].offset + 1
log.log(0, "Returning fetched records at offset %d for assigned"
" partition %s and update position to %s", position,
tp, next_offset)
for record in part_records:
drained[tp].append(record)
self._subscriptions.assignment[tp].position = next_offset
return len(part_records)
else:
# these records aren't next in line based on the last consumed
# position, ignore them they must be from an obsolete request
log.debug("Ignoring fetched records for %s at offset %s since"
" the current position is %d", tp, part.fetch_offset,
position)
part.discard()
return 0
def _message_generator(self):
"""Iterate over fetched_records"""
while self._next_partition_records or self._completed_fetches:
if not self._next_partition_records:
completion = self._completed_fetches.popleft()
self._next_partition_records = self._parse_fetched_data(completion)
continue
# Send additional FetchRequests when the internal queue is low
# this should enable moderate pipelining
if len(self._completed_fetches) <= self.config['iterator_refetch_records']:
self.send_fetches()
tp = self._next_partition_records.topic_partition
# We can ignore any prior signal to drop pending message sets
# because we are starting from a fresh one where fetch_offset == position
# i.e., the user seek()'d to this position
self._subscriptions.assignment[tp].drop_pending_message_set = False
for msg in self._next_partition_records.take():
# Because we are in a generator, it is possible for
# subscription state to change between yield calls
# so we need to re-check on each loop
# this should catch assignment changes, pauses
# and resets via seek_to_beginning / seek_to_end
if not self._subscriptions.is_fetchable(tp):
log.debug("Not returning fetched records for partition %s"
" since it is no longer fetchable", tp)
self._next_partition_records = None
break
# If there is a seek during message iteration,
# we should stop unpacking this message set and
# wait for a new fetch response that aligns with the
# new seek position
elif self._subscriptions.assignment[tp].drop_pending_message_set:
log.debug("Skipping remainder of message set for partition %s", tp)
self._subscriptions.assignment[tp].drop_pending_message_set = False
self._next_partition_records = None
break
# Compressed messagesets may include earlier messages
elif msg.offset < self._subscriptions.assignment[tp].position:
log.debug("Skipping message offset: %s (expecting %s)",
msg.offset,
self._subscriptions.assignment[tp].position)
continue
self._subscriptions.assignment[tp].position = msg.offset + 1
yield msg
self._next_partition_records = None
def _unpack_message_set(self, tp, records):
try:
batch = records.next_batch()
while batch is not None:
for record in batch:
key_size = len(record.key) if record.key is not None else -1
value_size = len(record.value) if record.value is not None else -1
key = self._deserialize(
self.config['key_deserializer'],
tp.topic, record.key)
value = self._deserialize(
self.config['value_deserializer'],
tp.topic, record.value)
yield ConsumerRecord(
tp.topic, tp.partition, record.offset, record.timestamp,
record.timestamp_type, key, value, record.checksum,
key_size, value_size)
batch = records.next_batch()
# If unpacking raises StopIteration, it is erroneously
# caught by the generator. We want all exceptions to be raised
# back to the user. See Issue 545
except StopIteration as e:
log.exception('StopIteration raised unpacking messageset')
raise RuntimeError('StopIteration raised unpacking messageset')
def __iter__(self): # pylint: disable=non-iterator-returned
return self
def __next__(self):
if not self._iterator:
self._iterator = self._message_generator()
try:
return next(self._iterator)
except StopIteration:
self._iterator = None
raise
def _deserialize(self, f, topic, bytes_):
if not f:
return bytes_
if isinstance(f, Deserializer):
return f.deserialize(topic, bytes_)
return f(bytes_)
def _send_offset_requests(self, timestamps):
"""Fetch offsets for each partition in timestamps dict. This may send
request to multiple nodes, based on who is Leader for partition.
Arguments:
timestamps (dict): {TopicPartition: int} mapping of fetching
timestamps.
Returns:
Future: resolves to a mapping of retrieved offsets
"""
timestamps_by_node = collections.defaultdict(dict)
for partition, timestamp in six.iteritems(timestamps):
node_id = self._client.cluster.leader_for_partition(partition)
if node_id is None:
self._client.add_topic(partition.topic)
log.debug("Partition %s is unknown for fetching offset,"
" wait for metadata refresh", partition)
return Future().failure(Errors.StaleMetadata(partition))
elif node_id == -1:
log.debug("Leader for partition %s unavailable for fetching "
"offset, wait for metadata refresh", partition)
return Future().failure(
Errors.LeaderNotAvailableError(partition))
else:
timestamps_by_node[node_id][partition] = timestamp
# Aggregate results until we have all
list_offsets_future = Future()
responses = []
node_count = len(timestamps_by_node)
def on_success(value):
responses.append(value)
if len(responses) == node_count:
offsets = {}
for r in responses:
offsets.update(r)
list_offsets_future.success(offsets)
def on_fail(err):
if not list_offsets_future.is_done:
list_offsets_future.failure(err)
for node_id, timestamps in six.iteritems(timestamps_by_node):
_f = self._send_offset_request(node_id, timestamps)
_f.add_callback(on_success)
_f.add_errback(on_fail)
return list_offsets_future
def _send_offset_request(self, node_id, timestamps):
by_topic = collections.defaultdict(list)
for tp, timestamp in six.iteritems(timestamps):
if self.config['api_version'] >= (0, 10, 1):
data = (tp.partition, timestamp)
else:
data = (tp.partition, timestamp, 1)
by_topic[tp.topic].append(data)
if self.config['api_version'] >= (0, 10, 1):
request = OffsetRequest[1](-1, list(six.iteritems(by_topic)))
else:
request = OffsetRequest[0](-1, list(six.iteritems(by_topic)))
# Client returns a future that only fails on network issues
# so create a separate future and attach a callback to update it
# based on response error codes
future = Future()
_f = self._client.send(node_id, request)
_f.add_callback(self._handle_offset_response, future)
_f.add_errback(lambda e: future.failure(e))
return future
def _handle_offset_response(self, future, response):
"""Callback for the response of the list offset call above.
Arguments:
future (Future): the future to update based on response
response (OffsetResponse): response from the server
Raises:
AssertionError: if response does not match partition
"""
timestamp_offset_map = {}
for topic, part_data in response.topics:
for partition_info in part_data:
partition, error_code = partition_info[:2]
partition = TopicPartition(topic, partition)
error_type = Errors.for_code(error_code)
if error_type is Errors.NoError:
if response.API_VERSION == 0:
offsets = partition_info[2]
assert len(offsets) <= 1, 'Expected OffsetResponse with one offset'
if not offsets:
offset = UNKNOWN_OFFSET
else:
offset = offsets[0]
log.debug("Handling v0 ListOffsetResponse response for %s. "
"Fetched offset %s", partition, offset)
if offset != UNKNOWN_OFFSET:
timestamp_offset_map[partition] = (offset, None)
else:
timestamp, offset = partition_info[2:]
log.debug("Handling ListOffsetResponse response for %s. "
"Fetched offset %s, timestamp %s",
partition, offset, timestamp)
if offset != UNKNOWN_OFFSET:
timestamp_offset_map[partition] = (offset, timestamp)
elif error_type is Errors.UnsupportedForMessageFormatError:
# The message format on the broker side is before 0.10.0,
# we simply put None in the response.
log.debug("Cannot search by timestamp for partition %s because the"
" message format version is before 0.10.0", partition)
elif error_type is Errors.NotLeaderForPartitionError:
log.debug("Attempt to fetch offsets for partition %s failed due"
" to obsolete leadership information, retrying.",
partition)
future.failure(error_type(partition))
return
elif error_type is Errors.UnknownTopicOrPartitionError:
log.warning("Received unknown topic or partition error in ListOffset "
"request for partition %s. The topic/partition " +
"may not exist or the user may not have Describe access "
"to it.", partition)
future.failure(error_type(partition))
return
else:
log.warning("Attempt to fetch offsets for partition %s failed due to:"
" %s", partition, error_type)
future.failure(error_type(partition))
return
if not future.is_done:
future.success(timestamp_offset_map)
def _fetchable_partitions(self):
fetchable = self._subscriptions.fetchable_partitions()
# do not fetch a partition if we have a pending fetch response to process
current = self._next_partition_records
pending = copy.copy(self._completed_fetches)
if current:
fetchable.discard(current.topic_partition)
for fetch in pending:
fetchable.discard(fetch.topic_partition)
return fetchable
def _create_fetch_requests(self):
"""Create fetch requests for all assigned partitions, grouped by node.
FetchRequests skipped if no leader, or node has requests in flight
Returns:
dict: {node_id: FetchRequest, ...} (version depends on api_version)
"""
# create the fetch info as a dict of lists of partition info tuples
# which can be passed to FetchRequest() via .items()
fetchable = collections.defaultdict(lambda: collections.defaultdict(list))
for partition in self._fetchable_partitions():
node_id = self._client.cluster.leader_for_partition(partition)
position = self._subscriptions.assignment[partition].position
# fetch if there is a leader and no in-flight requests
if node_id is None or node_id == -1:
log.debug("No leader found for partition %s."
" Requesting metadata update", partition)
self._client.cluster.request_update()
elif self._client.in_flight_request_count(node_id) == 0:
partition_info = (
partition.partition,
position,
self.config['max_partition_fetch_bytes']
)
fetchable[node_id][partition.topic].append(partition_info)
log.debug("Adding fetch request for partition %s at offset %d",
partition, position)
else:
log.log(0, "Skipping fetch for partition %s because there is an inflight request to node %s",
partition, node_id)
if self.config['api_version'] >= (0, 11, 0):
version = 4
elif self.config['api_version'] >= (0, 10, 1):
version = 3
elif self.config['api_version'] >= (0, 10):
version = 2
elif self.config['api_version'] == (0, 9):
version = 1
else:
version = 0
requests = {}
for node_id, partition_data in six.iteritems(fetchable):
if version < 3:
requests[node_id] = FetchRequest[version](
-1, # replica_id
self.config['fetch_max_wait_ms'],
self.config['fetch_min_bytes'],
partition_data.items())
else:
# As of version == 3 partitions will be returned in order as
# they are requested, so to avoid starvation with
# `fetch_max_bytes` option we need this shuffle
# NOTE: we do have partition_data in random order due to usage
# of unordered structures like dicts, but that does not
# guarantee equal distribution, and starting in Python3.6
# dicts retain insert order.
partition_data = list(partition_data.items())
random.shuffle(partition_data)
if version == 3:
requests[node_id] = FetchRequest[version](
-1, # replica_id
self.config['fetch_max_wait_ms'],
self.config['fetch_min_bytes'],
self.config['fetch_max_bytes'],
partition_data)
else:
requests[node_id] = FetchRequest[version](
-1, # replica_id
self.config['fetch_max_wait_ms'],
self.config['fetch_min_bytes'],
self.config['fetch_max_bytes'],
self._isolation_level,
partition_data)
return requests
def _handle_fetch_response(self, request, send_time, response):
"""The callback for fetch completion"""
fetch_offsets = {}
for topic, partitions in request.topics:
for partition_data in partitions:
partition, offset = partition_data[:2]
fetch_offsets[TopicPartition(topic, partition)] = offset
partitions = set([TopicPartition(topic, partition_data[0])
for topic, partitions in response.topics
for partition_data in partitions])
metric_aggregator = FetchResponseMetricAggregator(self._sensors, partitions)
# randomized ordering should improve balance for short-lived consumers
random.shuffle(response.topics)
for topic, partitions in response.topics:
random.shuffle(partitions)
for partition_data in partitions:
tp = TopicPartition(topic, partition_data[0])
completed_fetch = CompletedFetch(
tp, fetch_offsets[tp],
response.API_VERSION,
partition_data[1:],
metric_aggregator
)
self._completed_fetches.append(completed_fetch)
if response.API_VERSION >= 1:
self._sensors.fetch_throttle_time_sensor.record(response.throttle_time_ms)
self._sensors.fetch_latency.record((time.time() - send_time) * 1000)
def _parse_fetched_data(self, completed_fetch):
tp = completed_fetch.topic_partition
fetch_offset = completed_fetch.fetched_offset
num_bytes = 0
records_count = 0
parsed_records = None
error_code, highwater = completed_fetch.partition_data[:2]
error_type = Errors.for_code(error_code)
try:
if not self._subscriptions.is_fetchable(tp):
# this can happen when a rebalance happened or a partition
# consumption paused while fetch is still in-flight
log.debug("Ignoring fetched records for partition %s"
" since it is no longer fetchable", tp)
elif error_type is Errors.NoError:
self._subscriptions.assignment[tp].highwater = highwater
# we are interested in this fetch only if the beginning
# offset (of the *request*) matches the current consumed position
# Note that the *response* may return a messageset that starts
# earlier (e.g., compressed messages) or later (e.g., compacted topic)
position = self._subscriptions.assignment[tp].position
if position is None or position != fetch_offset:
log.debug("Discarding fetch response for partition %s"
" since its offset %d does not match the"
" expected offset %d", tp, fetch_offset,
position)
return None
records = MemoryRecords(completed_fetch.partition_data[-1])
if records.has_next():
log.debug("Adding fetched record for partition %s with"
" offset %d to buffered record list", tp,
position)
unpacked = list(self._unpack_message_set(tp, records))
parsed_records = self.PartitionRecords(fetch_offset, tp, unpacked)
last_offset = unpacked[-1].offset
self._sensors.records_fetch_lag.record(highwater - last_offset)
num_bytes = records.valid_bytes()
records_count = len(unpacked)
elif records.size_in_bytes() > 0:
# we did not read a single message from a non-empty
# buffer because that message's size is larger than
# fetch size, in this case record this exception
record_too_large_partitions = {tp: fetch_offset}
raise RecordTooLargeError(
"There are some messages at [Partition=Offset]: %s "
" whose size is larger than the fetch size %s"
" and hence cannot be ever returned."
" Increase the fetch size, or decrease the maximum message"
" size the broker will allow." % (
record_too_large_partitions,
self.config['max_partition_fetch_bytes']),
record_too_large_partitions)
self._sensors.record_topic_fetch_metrics(tp.topic, num_bytes, records_count)
elif error_type in (Errors.NotLeaderForPartitionError,
Errors.UnknownTopicOrPartitionError):
self._client.cluster.request_update()
elif error_type is Errors.OffsetOutOfRangeError:
position = self._subscriptions.assignment[tp].position
if position is None or position != fetch_offset:
log.debug("Discarding stale fetch response for partition %s"
" since the fetched offset %d does not match the"
" current offset %d", tp, fetch_offset, position)
elif self._subscriptions.has_default_offset_reset_policy():
log.info("Fetch offset %s is out of range for topic-partition %s", fetch_offset, tp)
self._subscriptions.need_offset_reset(tp)
else:
raise Errors.OffsetOutOfRangeError({tp: fetch_offset})
elif error_type is Errors.TopicAuthorizationFailedError:
log.warning("Not authorized to read from topic %s.", tp.topic)
raise Errors.TopicAuthorizationFailedError(set(tp.topic))
elif error_type is Errors.UnknownError:
log.warning("Unknown error fetching data for topic-partition %s", tp)
else:
raise error_type('Unexpected error while fetching data')
finally:
completed_fetch.metric_aggregator.record(tp, num_bytes, records_count)
return parsed_records
class PartitionRecords(object):
def __init__(self, fetch_offset, tp, messages):
self.fetch_offset = fetch_offset
self.topic_partition = tp
self.messages = messages
# When fetching an offset that is in the middle of a
# compressed batch, we will get all messages in the batch.
# But we want to start 'take' at the fetch_offset
# (or the next highest offset in case the message was compacted)
for i, msg in enumerate(messages):
if msg.offset < fetch_offset:
log.debug("Skipping message offset: %s (expecting %s)",
msg.offset, fetch_offset)
else:
self.message_idx = i
break
else:
self.message_idx = 0
self.messages = None
# For truthiness evaluation we need to define __len__ or __nonzero__
def __len__(self):
if self.messages is None or self.message_idx >= len(self.messages):
return 0
return len(self.messages) - self.message_idx
def discard(self):
self.messages = None
def take(self, n=None):
if not len(self):
return []
if n is None or n > len(self):
n = len(self)
next_idx = self.message_idx + n
res = self.messages[self.message_idx:next_idx]
self.message_idx = next_idx
# fetch_offset should be incremented by 1 to parallel the
# subscription position (also incremented by 1)
self.fetch_offset = max(self.fetch_offset, res[-1].offset + 1)
return res
class FetchResponseMetricAggregator(object):
"""
Since we parse the message data for each partition from each fetch
response lazily, fetch-level metrics need to be aggregated as the messages
from each partition are parsed. This class is used to facilitate this
incremental aggregation.
"""
def __init__(self, sensors, partitions):
self.sensors = sensors
self.unrecorded_partitions = partitions
self.total_bytes = 0
self.total_records = 0
def record(self, partition, num_bytes, num_records):
"""
After each partition is parsed, we update the current metric totals
with the total bytes and number of records parsed. After all partitions
have reported, we write the metric.
"""
self.unrecorded_partitions.remove(partition)
self.total_bytes += num_bytes
self.total_records += num_records
# once all expected partitions from the fetch have reported in, record the metrics
if not self.unrecorded_partitions:
self.sensors.bytes_fetched.record(self.total_bytes)
self.sensors.records_fetched.record(self.total_records)
class FetchManagerMetrics(object):
def __init__(self, metrics, prefix):
self.metrics = metrics
self.group_name = '%s-fetch-manager-metrics' % prefix
self.bytes_fetched = metrics.sensor('bytes-fetched')
self.bytes_fetched.add(metrics.metric_name('fetch-size-avg', self.group_name,
'The average number of bytes fetched per request'), Avg())
self.bytes_fetched.add(metrics.metric_name('fetch-size-max', self.group_name,
'The maximum number of bytes fetched per request'), Max())
self.bytes_fetched.add(metrics.metric_name('bytes-consumed-rate', self.group_name,
'The average number of bytes consumed per second'), Rate())
self.records_fetched = self.metrics.sensor('records-fetched')
self.records_fetched.add(metrics.metric_name('records-per-request-avg', self.group_name,
'The average number of records in each request'), Avg())
self.records_fetched.add(metrics.metric_name('records-consumed-rate', self.group_name,
'The average number of records consumed per second'), Rate())
self.fetch_latency = metrics.sensor('fetch-latency')
self.fetch_latency.add(metrics.metric_name('fetch-latency-avg', self.group_name,
'The average time taken for a fetch request.'), Avg())
self.fetch_latency.add(metrics.metric_name('fetch-latency-max', self.group_name,
'The max time taken for any fetch request.'), Max())
self.fetch_latency.add(metrics.metric_name('fetch-rate', self.group_name,
'The number of fetch requests per second.'), Rate(sampled_stat=Count()))
self.records_fetch_lag = metrics.sensor('records-lag')
self.records_fetch_lag.add(metrics.metric_name('records-lag-max', self.group_name,
'The maximum lag in terms of number of records for any partition in self window'), Max())
self.fetch_throttle_time_sensor = metrics.sensor('fetch-throttle-time')
self.fetch_throttle_time_sensor.add(metrics.metric_name('fetch-throttle-time-avg', self.group_name,
'The average throttle time in ms'), Avg())
self.fetch_throttle_time_sensor.add(metrics.metric_name('fetch-throttle-time-max', self.group_name,
'The maximum throttle time in ms'), Max())
def record_topic_fetch_metrics(self, topic, num_bytes, num_records):
# record bytes fetched
name = '.'.join(['topic', topic, 'bytes-fetched'])
bytes_fetched = self.metrics.get_sensor(name)
if not bytes_fetched:
metric_tags = {'topic': topic.replace('.', '_')}
bytes_fetched = self.metrics.sensor(name)
bytes_fetched.add(self.metrics.metric_name('fetch-size-avg',
self.group_name,
'The average number of bytes fetched per request for topic %s' % topic,
metric_tags), Avg())
bytes_fetched.add(self.metrics.metric_name('fetch-size-max',
self.group_name,
'The maximum number of bytes fetched per request for topic %s' % topic,
metric_tags), Max())
bytes_fetched.add(self.metrics.metric_name('bytes-consumed-rate',
self.group_name,
'The average number of bytes consumed per second for topic %s' % topic,
metric_tags), Rate())
bytes_fetched.record(num_bytes)
# record records fetched
name = '.'.join(['topic', topic, 'records-fetched'])
records_fetched = self.metrics.get_sensor(name)
if not records_fetched:
metric_tags = {'topic': topic.replace('.', '_')}
records_fetched = self.metrics.sensor(name)
records_fetched.add(self.metrics.metric_name('records-per-request-avg',
self.group_name,
'The average number of records in each request for topic %s' % topic,
metric_tags), Avg())
records_fetched.add(self.metrics.metric_name('records-consumed-rate',
self.group_name,
'The average number of records consumed per second for topic %s' % topic,
metric_tags), Rate())
records_fetched.record(num_records) | /robinhood-kafka-python-1.4.3.tar.gz/robinhood-kafka-python-1.4.3/rhkafka/consumer/fetcher.py | 0.551815 | 0.165897 | fetcher.py | pypi |
import binascii
from rhkafka.record._crc32c import crc as crc32c_py
try:
from crc32c import crc32 as crc32c_c
except ImportError:
crc32c_c = None
def encode_varint(value, write):
""" Encode an integer to a varint presentation. See
https://developers.google.com/protocol-buffers/docs/encoding?csw=1#varints
on how those can be produced.
Arguments:
value (int): Value to encode
write (function): Called per byte that needs to be writen
Returns:
int: Number of bytes written
"""
value = (value << 1) ^ (value >> 63)
if value <= 0x7f: # 1 byte
write(value)
return 1
if value <= 0x3fff: # 2 bytes
write(0x80 | (value & 0x7f))
write(value >> 7)
return 2
if value <= 0x1fffff: # 3 bytes
write(0x80 | (value & 0x7f))
write(0x80 | ((value >> 7) & 0x7f))
write(value >> 14)
return 3
if value <= 0xfffffff: # 4 bytes
write(0x80 | (value & 0x7f))
write(0x80 | ((value >> 7) & 0x7f))
write(0x80 | ((value >> 14) & 0x7f))
write(value >> 21)
return 4
if value <= 0x7ffffffff: # 5 bytes
write(0x80 | (value & 0x7f))
write(0x80 | ((value >> 7) & 0x7f))
write(0x80 | ((value >> 14) & 0x7f))
write(0x80 | ((value >> 21) & 0x7f))
write(value >> 28)
return 5
else:
# Return to general algorithm
bits = value & 0x7f
value >>= 7
i = 0
while value:
write(0x80 | bits)
bits = value & 0x7f
value >>= 7
i += 1
write(bits)
return i
def size_of_varint(value):
""" Number of bytes needed to encode an integer in variable-length format.
"""
value = (value << 1) ^ (value >> 63)
if value <= 0x7f:
return 1
if value <= 0x3fff:
return 2
if value <= 0x1fffff:
return 3
if value <= 0xfffffff:
return 4
if value <= 0x7ffffffff:
return 5
if value <= 0x3ffffffffff:
return 6
if value <= 0x1ffffffffffff:
return 7
if value <= 0xffffffffffffff:
return 8
if value <= 0x7fffffffffffffff:
return 9
return 10
def decode_varint(buffer, pos=0):
""" Decode an integer from a varint presentation. See
https://developers.google.com/protocol-buffers/docs/encoding?csw=1#varints
on how those can be produced.
Arguments:
buffer (bytearry): buffer to read from.
pos (int): optional position to read from
Returns:
(int, int): Decoded int value and next read position
"""
result = buffer[pos]
if not (result & 0x81):
return (result >> 1), pos + 1
if not (result & 0x80):
return (result >> 1) ^ (~0), pos + 1
result &= 0x7f
pos += 1
shift = 7
while 1:
b = buffer[pos]
result |= ((b & 0x7f) << shift)
pos += 1
if not (b & 0x80):
return ((result >> 1) ^ -(result & 1), pos)
shift += 7
if shift >= 64:
raise ValueError("Out of int64 range")
_crc32c = crc32c_py
if crc32c_c is not None:
_crc32c = crc32c_c
def calc_crc32c(memview, _crc32c=_crc32c):
""" Calculate CRC-32C (Castagnoli) checksum over a memoryview of data
"""
return _crc32c(memview)
def calc_crc32(memview):
""" Calculate simple CRC-32 checksum over a memoryview of data
"""
crc = binascii.crc32(memview) & 0xffffffff
return crc | /robinhood-kafka-python-1.4.3.tar.gz/robinhood-kafka-python-1.4.3/rhkafka/record/util.py | 0.642881 | 0.388154 | util.py | pypi |
from __future__ import division
import struct
from rhkafka.errors import CorruptRecordException
from rhkafka.record.abc import ABCRecords
from rhkafka.record.legacy_records import LegacyRecordBatch, LegacyRecordBatchBuilder
from rhkafka.record.default_records import DefaultRecordBatch, DefaultRecordBatchBuilder
class MemoryRecords(ABCRecords):
LENGTH_OFFSET = struct.calcsize(">q")
LOG_OVERHEAD = struct.calcsize(">qi")
MAGIC_OFFSET = struct.calcsize(">qii")
# Minimum space requirements for Record V0
MIN_SLICE = LOG_OVERHEAD + LegacyRecordBatch.RECORD_OVERHEAD_V0
def __init__(self, bytes_data):
self._buffer = bytes_data
self._pos = 0
# We keep one slice ahead so `has_next` will return very fast
self._next_slice = None
self._remaining_bytes = None
self._cache_next()
def size_in_bytes(self):
return len(self._buffer)
def valid_bytes(self):
# We need to read the whole buffer to get the valid_bytes.
# NOTE: in Fetcher we do the call after iteration, so should be fast
if self._remaining_bytes is None:
next_slice = self._next_slice
pos = self._pos
while self._remaining_bytes is None:
self._cache_next()
# Reset previous iterator position
self._next_slice = next_slice
self._pos = pos
return len(self._buffer) - self._remaining_bytes
# NOTE: we cache offsets here as kwargs for a bit more speed, as cPython
# will use LOAD_FAST opcode in this case
def _cache_next(self, len_offset=LENGTH_OFFSET, log_overhead=LOG_OVERHEAD):
buffer = self._buffer
buffer_len = len(buffer)
pos = self._pos
remaining = buffer_len - pos
if remaining < log_overhead:
# Will be re-checked in Fetcher for remaining bytes.
self._remaining_bytes = remaining
self._next_slice = None
return
length, = struct.unpack_from(
">i", buffer, pos + len_offset)
slice_end = pos + log_overhead + length
if slice_end > buffer_len:
# Will be re-checked in Fetcher for remaining bytes
self._remaining_bytes = remaining
self._next_slice = None
return
self._next_slice = memoryview(buffer)[pos: slice_end]
self._pos = slice_end
def has_next(self):
return self._next_slice is not None
# NOTE: same cache for LOAD_FAST as above
def next_batch(self, _min_slice=MIN_SLICE,
_magic_offset=MAGIC_OFFSET):
next_slice = self._next_slice
if next_slice is None:
return None
if len(next_slice) < _min_slice:
raise CorruptRecordException(
"Record size is less than the minimum record overhead "
"({})".format(_min_slice - self.LOG_OVERHEAD))
self._cache_next()
magic, = struct.unpack_from(">b", next_slice, _magic_offset)
if magic <= 1:
return LegacyRecordBatch(next_slice, magic)
else:
return DefaultRecordBatch(next_slice)
class MemoryRecordsBuilder(object):
def __init__(self, magic, compression_type, batch_size):
assert magic in [0, 1, 2], "Not supported magic"
assert compression_type in [0, 1, 2, 3], "Not valid compression type"
if magic >= 2:
self._builder = DefaultRecordBatchBuilder(
magic=magic, compression_type=compression_type,
is_transactional=False, producer_id=-1, producer_epoch=-1,
base_sequence=-1, batch_size=batch_size)
else:
self._builder = LegacyRecordBatchBuilder(
magic=magic, compression_type=compression_type,
batch_size=batch_size)
self._batch_size = batch_size
self._buffer = None
self._next_offset = 0
self._closed = False
self._bytes_written = 0
def append(self, timestamp, key, value, headers=[]):
""" Append a message to the buffer.
Returns: RecordMetadata or None if unable to append
"""
if self._closed:
return None
offset = self._next_offset
metadata = self._builder.append(offset, timestamp, key, value, headers)
# Return of None means there's no space to add a new message
if metadata is None:
return None
self._next_offset += 1
return metadata
def close(self):
# This method may be called multiple times on the same batch
# i.e., on retries
# we need to make sure we only close it out once
# otherwise compressed messages may be double-compressed
# see Issue 718
if not self._closed:
self._bytes_written = self._builder.size()
self._buffer = bytes(self._builder.build())
self._builder = None
self._closed = True
def size_in_bytes(self):
if not self._closed:
return self._builder.size()
else:
return len(self._buffer)
def compression_rate(self):
assert self._closed
return self.size_in_bytes() / self._bytes_written
def is_full(self):
if self._closed:
return True
else:
return self._builder.size() >= self._batch_size
def next_offset(self):
return self._next_offset
def buffer(self):
assert self._closed
return self._buffer | /robinhood-kafka-python-1.4.3.tar.gz/robinhood-kafka-python-1.4.3/rhkafka/record/memory_records.py | 0.873929 | 0.240925 | memory_records.py | pypi |
from __future__ import absolute_import
import abc
class ABCRecord(object):
__metaclass__ = abc.ABCMeta
@abc.abstractproperty
def offset(self):
""" Absolute offset of record
"""
@abc.abstractproperty
def timestamp(self):
""" Epoch milliseconds
"""
@abc.abstractproperty
def timestamp_type(self):
""" CREATE_TIME(0) or APPEND_TIME(1)
"""
@abc.abstractproperty
def key(self):
""" Bytes key or None
"""
@abc.abstractproperty
def value(self):
""" Bytes value or None
"""
@abc.abstractproperty
def checksum(self):
""" Prior to v2 format CRC was contained in every message. This will
be the checksum for v0 and v1 and None for v2 and above.
"""
@abc.abstractproperty
def headers(self):
""" If supported by version list of key-value tuples, or empty list if
not supported by format.
"""
class ABCRecordBatchBuilder(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def append(self, offset, timestamp, key, value, headers=None):
""" Writes record to internal buffer.
Arguments:
offset (int): Relative offset of record, starting from 0
timestamp (int or None): Timestamp in milliseconds since beginning
of the epoch (midnight Jan 1, 1970 (UTC)). If omitted, will be
set to current time.
key (bytes or None): Key of the record
value (bytes or None): Value of the record
headers (List[Tuple[str, bytes]]): Headers of the record. Header
keys can not be ``None``.
Returns:
(bytes, int): Checksum of the written record (or None for v2 and
above) and size of the written record.
"""
@abc.abstractmethod
def size_in_bytes(self, offset, timestamp, key, value, headers):
""" Return the expected size change on buffer (uncompressed) if we add
this message. This will account for varint size changes and give a
reliable size.
"""
@abc.abstractmethod
def build(self):
""" Close for append, compress if needed, write size and header and
return a ready to send buffer object.
Return:
bytearray: finished batch, ready to send.
"""
class ABCRecordBatch(object):
""" For v2 incapsulates a RecordBatch, for v0/v1 a single (maybe
compressed) message.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def __iter__(self):
""" Return iterator over records (ABCRecord instances). Will decompress
if needed.
"""
class ABCRecords(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def __init__(self, buffer):
""" Initialize with bytes-like object conforming to the buffer
interface (ie. bytes, bytearray, memoryview etc.).
"""
@abc.abstractmethod
def size_in_bytes(self):
""" Returns the size of inner buffer.
"""
@abc.abstractmethod
def next_batch(self):
""" Return next batch of records (ABCRecordBatch instances).
"""
@abc.abstractmethod
def has_next(self):
""" True if there are more batches to read, False otherwise.
""" | /robinhood-kafka-python-1.4.3.tar.gz/robinhood-kafka-python-1.4.3/rhkafka/record/abc.py | 0.903575 | 0.310969 | abc.py | pypi |
# Builder and reader implementation for V0 and V1 record versions. As of Kafka
# 0.11.0.0 those were replaced with V2, thus the Legacy naming.
# The schema is given below (see
# https://kafka.apache.org/protocol#protocol_message_sets for more details):
# MessageSet => [Offset MessageSize Message]
# Offset => int64
# MessageSize => int32
# v0
# Message => Crc MagicByte Attributes Key Value
# Crc => int32
# MagicByte => int8
# Attributes => int8
# Key => bytes
# Value => bytes
# v1 (supported since 0.10.0)
# Message => Crc MagicByte Attributes Key Value
# Crc => int32
# MagicByte => int8
# Attributes => int8
# Timestamp => int64
# Key => bytes
# Value => bytes
# The message attribute bits are given below:
# * Unused (4-7)
# * Timestamp Type (3) (added in V1)
# * Compression Type (0-2)
# Note that when compression is enabled (see attributes above), the whole
# array of MessageSet's is compressed and places into a message as the `value`.
# Only the parent message is marked with `compression` bits in attributes.
# The CRC covers the data from the Magic byte to the end of the message.
import struct
import time
from rhkafka.record.abc import ABCRecord, ABCRecordBatch, ABCRecordBatchBuilder
from rhkafka.record.util import calc_crc32
from rhkafka.codec import (
gzip_encode, snappy_encode, lz4_encode, lz4_encode_old_kafka,
gzip_decode, snappy_decode, lz4_decode, lz4_decode_old_kafka,
)
import rhkafka.codec as codecs
from rhkafka.errors import CorruptRecordException, UnsupportedCodecError
class LegacyRecordBase(object):
HEADER_STRUCT_V0 = struct.Struct(
">q" # BaseOffset => Int64
"i" # Length => Int32
"I" # CRC => Int32
"b" # Magic => Int8
"b" # Attributes => Int8
)
HEADER_STRUCT_V1 = struct.Struct(
">q" # BaseOffset => Int64
"i" # Length => Int32
"I" # CRC => Int32
"b" # Magic => Int8
"b" # Attributes => Int8
"q" # timestamp => Int64
)
LOG_OVERHEAD = CRC_OFFSET = struct.calcsize(
">q" # Offset
"i" # Size
)
MAGIC_OFFSET = LOG_OVERHEAD + struct.calcsize(
">I" # CRC
)
# Those are used for fast size calculations
RECORD_OVERHEAD_V0 = struct.calcsize(
">I" # CRC
"b" # magic
"b" # attributes
"i" # Key length
"i" # Value length
)
RECORD_OVERHEAD_V1 = struct.calcsize(
">I" # CRC
"b" # magic
"b" # attributes
"q" # timestamp
"i" # Key length
"i" # Value length
)
KEY_OFFSET_V0 = HEADER_STRUCT_V0.size
KEY_OFFSET_V1 = HEADER_STRUCT_V1.size
KEY_LENGTH = VALUE_LENGTH = struct.calcsize(">i") # Bytes length is Int32
CODEC_MASK = 0x07
CODEC_NONE = 0x00
CODEC_GZIP = 0x01
CODEC_SNAPPY = 0x02
CODEC_LZ4 = 0x03
TIMESTAMP_TYPE_MASK = 0x08
LOG_APPEND_TIME = 1
CREATE_TIME = 0
NO_TIMESTAMP = -1
def _assert_has_codec(self, compression_type):
if compression_type == self.CODEC_GZIP:
checker, name = codecs.has_gzip, "gzip"
elif compression_type == self.CODEC_SNAPPY:
checker, name = codecs.has_snappy, "snappy"
elif compression_type == self.CODEC_LZ4:
checker, name = codecs.has_lz4, "lz4"
if not checker():
raise UnsupportedCodecError(
"Libraries for {} compression codec not found".format(name))
class LegacyRecordBatch(ABCRecordBatch, LegacyRecordBase):
def __init__(self, buffer, magic):
self._buffer = memoryview(buffer)
self._magic = magic
offset, length, crc, magic_, attrs, timestamp = self._read_header(0)
assert length == len(buffer) - self.LOG_OVERHEAD
assert magic == magic_
self._offset = offset
self._crc = crc
self._timestamp = timestamp
self._attributes = attrs
self._decompressed = False
@property
def timestamp_type(self):
"""0 for CreateTime; 1 for LogAppendTime; None if unsupported.
Value is determined by broker; produced messages should always set to 0
Requires Kafka >= 0.10 / message version >= 1
"""
if self._magic == 0:
return None
elif self._attributes & self.TIMESTAMP_TYPE_MASK:
return 1
else:
return 0
@property
def compression_type(self):
return self._attributes & self.CODEC_MASK
def validate_crc(self):
crc = calc_crc32(self._buffer[self.MAGIC_OFFSET:])
return self._crc == crc
def _decompress(self, key_offset):
# Copy of `_read_key_value`, but uses memoryview
pos = key_offset
key_size = struct.unpack_from(">i", self._buffer, pos)[0]
pos += self.KEY_LENGTH
if key_size != -1:
pos += key_size
value_size = struct.unpack_from(">i", self._buffer, pos)[0]
pos += self.VALUE_LENGTH
if value_size == -1:
raise CorruptRecordException("Value of compressed message is None")
else:
data = self._buffer[pos:pos + value_size]
compression_type = self.compression_type
self._assert_has_codec(compression_type)
if compression_type == self.CODEC_GZIP:
uncompressed = gzip_decode(data)
elif compression_type == self.CODEC_SNAPPY:
uncompressed = snappy_decode(data.tobytes())
elif compression_type == self.CODEC_LZ4:
if self._magic == 0:
uncompressed = lz4_decode_old_kafka(data.tobytes())
else:
uncompressed = lz4_decode(data.tobytes())
return uncompressed
def _read_header(self, pos):
if self._magic == 0:
offset, length, crc, magic_read, attrs = \
self.HEADER_STRUCT_V0.unpack_from(self._buffer, pos)
timestamp = None
else:
offset, length, crc, magic_read, attrs, timestamp = \
self.HEADER_STRUCT_V1.unpack_from(self._buffer, pos)
return offset, length, crc, magic_read, attrs, timestamp
def _read_all_headers(self):
pos = 0
msgs = []
buffer_len = len(self._buffer)
while pos < buffer_len:
header = self._read_header(pos)
msgs.append((header, pos))
pos += self.LOG_OVERHEAD + header[1] # length
return msgs
def _read_key_value(self, pos):
key_size = struct.unpack_from(">i", self._buffer, pos)[0]
pos += self.KEY_LENGTH
if key_size == -1:
key = None
else:
key = self._buffer[pos:pos + key_size].tobytes()
pos += key_size
value_size = struct.unpack_from(">i", self._buffer, pos)[0]
pos += self.VALUE_LENGTH
if value_size == -1:
value = None
else:
value = self._buffer[pos:pos + value_size].tobytes()
return key, value
def __iter__(self):
if self._magic == 1:
key_offset = self.KEY_OFFSET_V1
else:
key_offset = self.KEY_OFFSET_V0
timestamp_type = self.timestamp_type
if self.compression_type:
# In case we will call iter again
if not self._decompressed:
self._buffer = memoryview(self._decompress(key_offset))
self._decompressed = True
# If relative offset is used, we need to decompress the entire
# message first to compute the absolute offset.
headers = self._read_all_headers()
if self._magic > 0:
msg_header, _ = headers[-1]
absolute_base_offset = self._offset - msg_header[0]
else:
absolute_base_offset = -1
for header, msg_pos in headers:
offset, _, crc, _, attrs, timestamp = header
# There should only ever be a single layer of compression
assert not attrs & self.CODEC_MASK, (
'MessageSet at offset %d appears double-compressed. This '
'should not happen -- check your producers!' % offset)
# When magic value is greater than 0, the timestamp
# of a compressed message depends on the
# typestamp type of the wrapper message:
if timestamp_type == self.LOG_APPEND_TIME:
timestamp = self._timestamp
if absolute_base_offset >= 0:
offset += absolute_base_offset
key, value = self._read_key_value(msg_pos + key_offset)
yield LegacyRecord(
offset, timestamp, timestamp_type,
key, value, crc)
else:
key, value = self._read_key_value(key_offset)
yield LegacyRecord(
self._offset, self._timestamp, timestamp_type,
key, value, self._crc)
class LegacyRecord(ABCRecord):
__slots__ = ("_offset", "_timestamp", "_timestamp_type", "_key", "_value",
"_crc")
def __init__(self, offset, timestamp, timestamp_type, key, value, crc):
self._offset = offset
self._timestamp = timestamp
self._timestamp_type = timestamp_type
self._key = key
self._value = value
self._crc = crc
@property
def offset(self):
return self._offset
@property
def timestamp(self):
""" Epoch milliseconds
"""
return self._timestamp
@property
def timestamp_type(self):
""" CREATE_TIME(0) or APPEND_TIME(1)
"""
return self._timestamp_type
@property
def key(self):
""" Bytes key or None
"""
return self._key
@property
def value(self):
""" Bytes value or None
"""
return self._value
@property
def headers(self):
return []
@property
def checksum(self):
return self._crc
def __repr__(self):
return (
"LegacyRecord(offset={!r}, timestamp={!r}, timestamp_type={!r},"
" key={!r}, value={!r}, crc={!r})".format(
self._offset, self._timestamp, self._timestamp_type,
self._key, self._value, self._crc)
)
class LegacyRecordBatchBuilder(ABCRecordBatchBuilder, LegacyRecordBase):
def __init__(self, magic, compression_type, batch_size):
self._magic = magic
self._compression_type = compression_type
self._batch_size = batch_size
self._buffer = bytearray()
def append(self, offset, timestamp, key, value, headers=None):
""" Append message to batch.
"""
assert not headers, "Headers not supported in v0/v1"
# Check types
if type(offset) != int:
raise TypeError(offset)
if self._magic == 0:
timestamp = self.NO_TIMESTAMP
elif timestamp is None:
timestamp = int(time.time() * 1000)
elif type(timestamp) != int:
raise TypeError(
"`timestamp` should be int, but {} provided".format(
type(timestamp)))
if not (key is None or
isinstance(key, (bytes, bytearray, memoryview))):
raise TypeError(
"Not supported type for key: {}".format(type(key)))
if not (value is None or
isinstance(value, (bytes, bytearray, memoryview))):
raise TypeError(
"Not supported type for value: {}".format(type(value)))
# Check if we have room for another message
pos = len(self._buffer)
size = self.size_in_bytes(offset, timestamp, key, value)
# We always allow at least one record to be appended
if offset != 0 and pos + size >= self._batch_size:
return None
# Allocate proper buffer length
self._buffer.extend(bytearray(size))
# Encode message
crc = self._encode_msg(pos, offset, timestamp, key, value)
return LegacyRecordMetadata(offset, crc, size, timestamp)
def _encode_msg(self, start_pos, offset, timestamp, key, value,
attributes=0):
""" Encode msg data into the `msg_buffer`, which should be allocated
to at least the size of this message.
"""
magic = self._magic
buf = self._buffer
pos = start_pos
# Write key and value
pos += self.KEY_OFFSET_V0 if magic == 0 else self.KEY_OFFSET_V1
if key is None:
struct.pack_into(">i", buf, pos, -1)
pos += self.KEY_LENGTH
else:
key_size = len(key)
struct.pack_into(">i", buf, pos, key_size)
pos += self.KEY_LENGTH
buf[pos: pos + key_size] = key
pos += key_size
if value is None:
struct.pack_into(">i", buf, pos, -1)
pos += self.VALUE_LENGTH
else:
value_size = len(value)
struct.pack_into(">i", buf, pos, value_size)
pos += self.VALUE_LENGTH
buf[pos: pos + value_size] = value
pos += value_size
length = (pos - start_pos) - self.LOG_OVERHEAD
# Write msg header. Note, that Crc will be updated later
if magic == 0:
self.HEADER_STRUCT_V0.pack_into(
buf, start_pos,
offset, length, 0, magic, attributes)
else:
self.HEADER_STRUCT_V1.pack_into(
buf, start_pos,
offset, length, 0, magic, attributes, timestamp)
# Calculate CRC for msg
crc_data = memoryview(buf)[start_pos + self.MAGIC_OFFSET:]
crc = calc_crc32(crc_data)
struct.pack_into(">I", buf, start_pos + self.CRC_OFFSET, crc)
return crc
def _maybe_compress(self):
if self._compression_type:
self._assert_has_codec(self._compression_type)
data = bytes(self._buffer)
if self._compression_type == self.CODEC_GZIP:
compressed = gzip_encode(data)
elif self._compression_type == self.CODEC_SNAPPY:
compressed = snappy_encode(data)
elif self._compression_type == self.CODEC_LZ4:
if self._magic == 0:
compressed = lz4_encode_old_kafka(data)
else:
compressed = lz4_encode(data)
size = self.size_in_bytes(
0, timestamp=0, key=None, value=compressed)
# We will try to reuse the same buffer if we have enough space
if size > len(self._buffer):
self._buffer = bytearray(size)
else:
del self._buffer[size:]
self._encode_msg(
start_pos=0,
offset=0, timestamp=0, key=None, value=compressed,
attributes=self._compression_type)
return True
return False
def build(self):
"""Compress batch to be ready for send"""
self._maybe_compress()
return self._buffer
def size(self):
""" Return current size of data written to buffer
"""
return len(self._buffer)
# Size calculations. Just copied Java's implementation
def size_in_bytes(self, offset, timestamp, key, value, headers=None):
""" Actual size of message to add
"""
assert not headers, "Headers not supported in v0/v1"
magic = self._magic
return self.LOG_OVERHEAD + self.record_size(magic, key, value)
@classmethod
def record_size(cls, magic, key, value):
message_size = cls.record_overhead(magic)
if key is not None:
message_size += len(key)
if value is not None:
message_size += len(value)
return message_size
@classmethod
def record_overhead(cls, magic):
assert magic in [0, 1], "Not supported magic"
if magic == 0:
return cls.RECORD_OVERHEAD_V0
else:
return cls.RECORD_OVERHEAD_V1
@classmethod
def estimate_size_in_bytes(cls, magic, compression_type, key, value):
""" Upper bound estimate of record size.
"""
assert magic in [0, 1], "Not supported magic"
# In case of compression we may need another overhead for inner msg
if compression_type:
return (
cls.LOG_OVERHEAD + cls.record_overhead(magic) +
cls.record_size(magic, key, value)
)
return cls.LOG_OVERHEAD + cls.record_size(magic, key, value)
class LegacyRecordMetadata(object):
__slots__ = ("_crc", "_size", "_timestamp", "_offset")
def __init__(self, offset, crc, size, timestamp):
self._offset = offset
self._crc = crc
self._size = size
self._timestamp = timestamp
@property
def offset(self):
return self._offset
@property
def crc(self):
return self._crc
@property
def size(self):
return self._size
@property
def timestamp(self):
return self._timestamp
def __repr__(self):
return (
"LegacyRecordMetadata(offset={!r}, crc={!r}, size={!r},"
" timestamp={!r})".format(
self._offset, self._crc, self._size, self._timestamp)
) | /robinhood-kafka-python-1.4.3.tar.gz/robinhood-kafka-python-1.4.3/rhkafka/record/legacy_records.py | 0.764012 | 0.373962 | legacy_records.py | pypi |
from __future__ import absolute_import
import collections
import logging
from rhkafka.vendor import six
from rhkafka.coordinator.assignors.abstract import AbstractPartitionAssignor
from rhkafka.coordinator.protocol import ConsumerProtocolMemberMetadata, ConsumerProtocolMemberAssignment
log = logging.getLogger(__name__)
class RangePartitionAssignor(AbstractPartitionAssignor):
"""
The range assignor works on a per-topic basis. For each topic, we lay out
the available partitions in numeric order and the consumers in
lexicographic order. We then divide the number of partitions by the total
number of consumers to determine the number of partitions to assign to each
consumer. If it does not evenly divide, then the first few consumers will
have one extra partition.
For example, suppose there are two consumers C0 and C1, two topics t0 and
t1, and each topic has 3 partitions, resulting in partitions t0p0, t0p1,
t0p2, t1p0, t1p1, and t1p2.
The assignment will be:
C0: [t0p0, t0p1, t1p0, t1p1]
C1: [t0p2, t1p2]
"""
name = 'range'
version = 0
@classmethod
def assign(cls, cluster, member_metadata):
consumers_per_topic = collections.defaultdict(list)
for member, metadata in six.iteritems(member_metadata):
for topic in metadata.subscription:
consumers_per_topic[topic].append(member)
# construct {member_id: {topic: [partition, ...]}}
assignment = collections.defaultdict(dict)
for topic, consumers_for_topic in six.iteritems(consumers_per_topic):
partitions = cluster.partitions_for_topic(topic)
if partitions is None:
log.warning('No partition metadata for topic %s', topic)
continue
partitions = sorted(list(partitions))
partitions_for_topic = len(partitions)
consumers_for_topic.sort()
partitions_per_consumer = len(partitions) // len(consumers_for_topic)
consumers_with_extra = len(partitions) % len(consumers_for_topic)
for i in range(len(consumers_for_topic)):
start = partitions_per_consumer * i
start += min(i, consumers_with_extra)
length = partitions_per_consumer
if not i + 1 > consumers_with_extra:
length += 1
member = consumers_for_topic[i]
assignment[member][topic] = partitions[start:start+length]
protocol_assignment = {}
for member_id in member_metadata:
protocol_assignment[member_id] = ConsumerProtocolMemberAssignment(
cls.version,
sorted(assignment[member_id].items()),
b'')
return protocol_assignment
@classmethod
def metadata(cls, topics):
return ConsumerProtocolMemberMetadata(cls.version, list(topics), b'')
@classmethod
def on_assignment(cls, assignment):
pass | /robinhood-kafka-python-1.4.3.tar.gz/robinhood-kafka-python-1.4.3/rhkafka/coordinator/assignors/range.py | 0.627381 | 0.35883 | range.py | pypi |
from __future__ import absolute_import
import collections
import itertools
import logging
from rhkafka.vendor import six
from rhkafka.coordinator.assignors.abstract import AbstractPartitionAssignor
from rhkafka.common import TopicPartition
from rhkafka.coordinator.protocol import ConsumerProtocolMemberMetadata, ConsumerProtocolMemberAssignment
log = logging.getLogger(__name__)
class RoundRobinPartitionAssignor(AbstractPartitionAssignor):
"""
The roundrobin assignor lays out all the available partitions and all the
available consumers. It then proceeds to do a roundrobin assignment from
partition to consumer. If the subscriptions of all consumer instances are
identical, then the partitions will be uniformly distributed. (i.e., the
partition ownership counts will be within a delta of exactly one across all
consumers.)
For example, suppose there are two consumers C0 and C1, two topics t0 and
t1, and each topic has 3 partitions, resulting in partitions t0p0, t0p1,
t0p2, t1p0, t1p1, and t1p2.
The assignment will be:
C0: [t0p0, t0p2, t1p1]
C1: [t0p1, t1p0, t1p2]
When subscriptions differ across consumer instances, the assignment process
still considers each consumer instance in round robin fashion but skips
over an instance if it is not subscribed to the topic. Unlike the case when
subscriptions are identical, this can result in imbalanced assignments.
For example, suppose we have three consumers C0, C1, C2, and three topics
t0, t1, t2, with unbalanced partitions t0p0, t1p0, t1p1, t2p0, t2p1, t2p2,
where C0 is subscribed to t0; C1 is subscribed to t0, t1; and C2 is
subscribed to t0, t1, t2.
The assignment will be:
C0: [t0p0]
C1: [t1p0]
C2: [t1p1, t2p0, t2p1, t2p2]
"""
name = 'roundrobin'
version = 0
@classmethod
def assign(cls, cluster, member_metadata):
all_topics = set()
for metadata in six.itervalues(member_metadata):
all_topics.update(metadata.subscription)
all_topic_partitions = []
for topic in all_topics:
partitions = cluster.partitions_for_topic(topic)
if partitions is None:
log.warning('No partition metadata for topic %s', topic)
continue
for partition in partitions:
all_topic_partitions.append(TopicPartition(topic, partition))
all_topic_partitions.sort()
# construct {member_id: {topic: [partition, ...]}}
assignment = collections.defaultdict(lambda: collections.defaultdict(list))
member_iter = itertools.cycle(sorted(member_metadata.keys()))
for partition in all_topic_partitions:
member_id = next(member_iter)
# Because we constructed all_topic_partitions from the set of
# member subscribed topics, we should be safe assuming that
# each topic in all_topic_partitions is in at least one member
# subscription; otherwise this could yield an infinite loop
while partition.topic not in member_metadata[member_id].subscription:
member_id = next(member_iter)
assignment[member_id][partition.topic].append(partition.partition)
protocol_assignment = {}
for member_id in member_metadata:
protocol_assignment[member_id] = ConsumerProtocolMemberAssignment(
cls.version,
sorted(assignment[member_id].items()),
b'')
return protocol_assignment
@classmethod
def metadata(cls, topics):
return ConsumerProtocolMemberMetadata(cls.version, list(topics), b'')
@classmethod
def on_assignment(cls, assignment):
pass | /robinhood-kafka-python-1.4.3.tar.gz/robinhood-kafka-python-1.4.3/rhkafka/coordinator/assignors/roundrobin.py | 0.783947 | 0.39257 | roundrobin.py | pypi |
from __future__ import absolute_import
from rhkafka.vendor import six
from rhkafka.partitioner.base import Partitioner
class Murmur2Partitioner(Partitioner):
"""
Implements a partitioner which selects the target partition based on
the hash of the key. Attempts to apply the same hashing
function as mainline java client.
"""
def __call__(self, key, partitions=None, available=None):
if available:
return self.partition(key, available)
return self.partition(key, partitions)
def partition(self, key, partitions=None):
if not partitions:
partitions = self.partitions
# https://github.com/apache/kafka/blob/0.8.2/clients/src/main/java/org/apache/kafka/clients/producer/internals/Partitioner.java#L69
idx = (murmur2(key) & 0x7fffffff) % len(partitions)
return partitions[idx]
class LegacyPartitioner(object):
"""DEPRECATED -- See Issue 374
Implements a partitioner which selects the target partition based on
the hash of the key
"""
def __init__(self, partitions):
self.partitions = partitions
def partition(self, key, partitions=None):
if not partitions:
partitions = self.partitions
size = len(partitions)
idx = hash(key) % size
return partitions[idx]
# Default will change to Murmur2 in 0.10 release
HashedPartitioner = LegacyPartitioner
# https://github.com/apache/kafka/blob/0.8.2/clients/src/main/java/org/apache/kafka/common/utils/Utils.java#L244
def murmur2(data):
"""Pure-python Murmur2 implementation.
Based on java client, see org.apache.kafka.common.utils.Utils.murmur2
Args:
data (bytes): opaque bytes
Returns: MurmurHash2 of data
"""
# Python2 bytes is really a str, causing the bitwise operations below to fail
# so convert to bytearray.
if six.PY2:
data = bytearray(bytes(data))
length = len(data)
seed = 0x9747b28c
# 'm' and 'r' are mixing constants generated offline.
# They're not really 'magic', they just happen to work well.
m = 0x5bd1e995
r = 24
# Initialize the hash to a random value
h = seed ^ length
length4 = length // 4
for i in range(length4):
i4 = i * 4
k = ((data[i4 + 0] & 0xff) +
((data[i4 + 1] & 0xff) << 8) +
((data[i4 + 2] & 0xff) << 16) +
((data[i4 + 3] & 0xff) << 24))
k &= 0xffffffff
k *= m
k &= 0xffffffff
k ^= (k % 0x100000000) >> r # k ^= k >>> r
k &= 0xffffffff
k *= m
k &= 0xffffffff
h *= m
h &= 0xffffffff
h ^= k
h &= 0xffffffff
# Handle the last few bytes of the input array
extra_bytes = length % 4
if extra_bytes >= 3:
h ^= (data[(length & ~3) + 2] & 0xff) << 16
h &= 0xffffffff
if extra_bytes >= 2:
h ^= (data[(length & ~3) + 1] & 0xff) << 8
h &= 0xffffffff
if extra_bytes >= 1:
h ^= (data[length & ~3] & 0xff)
h &= 0xffffffff
h *= m
h &= 0xffffffff
h ^= (h % 0x100000000) >> 13 # h >>> 13;
h &= 0xffffffff
h *= m
h &= 0xffffffff
h ^= (h % 0x100000000) >> 15 # h >>> 15;
h &= 0xffffffff
return h | /robinhood-kafka-python-1.4.3.tar.gz/robinhood-kafka-python-1.4.3/rhkafka/partitioner/hashed.py | 0.879393 | 0.257373 | hashed.py | pypi |
from __future__ import absolute_import
from rhkafka.partitioner.base import Partitioner
class RoundRobinPartitioner(Partitioner):
def __init__(self, partitions=None):
self.partitions_iterable = CachedPartitionCycler(partitions)
if partitions:
self._set_partitions(partitions)
else:
self.partitions = None
def __call__(self, key, all_partitions=None, available_partitions=None):
if available_partitions:
cur_partitions = available_partitions
else:
cur_partitions = all_partitions
if not self.partitions:
self._set_partitions(cur_partitions)
elif cur_partitions != self.partitions_iterable.partitions and cur_partitions is not None:
self._set_partitions(cur_partitions)
return next(self.partitions_iterable)
def _set_partitions(self, available_partitions):
self.partitions = available_partitions
self.partitions_iterable.set_partitions(available_partitions)
def partition(self, key, all_partitions=None, available_partitions=None):
return self.__call__(key, all_partitions, available_partitions)
class CachedPartitionCycler(object):
def __init__(self, partitions=None):
self.partitions = partitions
if partitions:
assert type(partitions) is list
self.cur_pos = None
def __next__(self):
return self.next()
@staticmethod
def _index_available(cur_pos, partitions):
return cur_pos < len(partitions)
def set_partitions(self, partitions):
if self.cur_pos:
if not self._index_available(self.cur_pos, partitions):
self.cur_pos = 0
self.partitions = partitions
return None
self.partitions = partitions
next_item = self.partitions[self.cur_pos]
if next_item in partitions:
self.cur_pos = partitions.index(next_item)
else:
self.cur_pos = 0
return None
self.partitions = partitions
def next(self):
assert self.partitions is not None
if self.cur_pos is None or not self._index_available(self.cur_pos, self.partitions):
self.cur_pos = 1
return self.partitions[0]
cur_item = self.partitions[self.cur_pos]
self.cur_pos += 1
return cur_item | /robinhood-kafka-python-1.4.3.tar.gz/robinhood-kafka-python-1.4.3/rhkafka/partitioner/roundrobin.py | 0.629888 | 0.2665 | roundrobin.py | pypi |
from __future__ import absolute_import
from struct import pack, unpack, error
from rhkafka.protocol.abstract import AbstractType
def _pack(f, value):
try:
return pack(f, value)
except error as e:
raise ValueError("Error encountered when attempting to convert value: "
"{!r} to struct format: '{}', hit error: {}"
.format(value, f, e))
def _unpack(f, data):
try:
(value,) = unpack(f, data)
return value
except error as e:
raise ValueError("Error encountered when attempting to convert value: "
"{!r} to struct format: '{}', hit error: {}"
.format(data, f, e))
class Int8(AbstractType):
@classmethod
def encode(cls, value):
return _pack('>b', value)
@classmethod
def decode(cls, data):
return _unpack('>b', data.read(1))
class Int16(AbstractType):
@classmethod
def encode(cls, value):
return _pack('>h', value)
@classmethod
def decode(cls, data):
return _unpack('>h', data.read(2))
class Int32(AbstractType):
@classmethod
def encode(cls, value):
return _pack('>i', value)
@classmethod
def decode(cls, data):
return _unpack('>i', data.read(4))
class Int64(AbstractType):
@classmethod
def encode(cls, value):
return _pack('>q', value)
@classmethod
def decode(cls, data):
return _unpack('>q', data.read(8))
class String(AbstractType):
def __init__(self, encoding='utf-8'):
self.encoding = encoding
def encode(self, value):
if value is None:
return Int16.encode(-1)
value = str(value).encode(self.encoding)
return Int16.encode(len(value)) + value
def decode(self, data):
length = Int16.decode(data)
if length < 0:
return None
value = data.read(length)
if len(value) != length:
raise ValueError('Buffer underrun decoding string')
return value.decode(self.encoding)
class Bytes(AbstractType):
@classmethod
def encode(cls, value):
if value is None:
return Int32.encode(-1)
else:
return Int32.encode(len(value)) + value
@classmethod
def decode(cls, data):
length = Int32.decode(data)
if length < 0:
return None
value = data.read(length)
if len(value) != length:
raise ValueError('Buffer underrun decoding Bytes')
return value
@classmethod
def repr(cls, value):
return repr(value[:100] + b'...' if value is not None and len(value) > 100 else value)
class Boolean(AbstractType):
@classmethod
def encode(cls, value):
return _pack('>?', value)
@classmethod
def decode(cls, data):
return _unpack('>?', data.read(1))
class Schema(AbstractType):
def __init__(self, *fields):
if fields:
self.names, self.fields = zip(*fields)
else:
self.names, self.fields = (), ()
def encode(self, item):
if len(item) != len(self.fields):
raise ValueError('Item field count does not match Schema')
return b''.join([
field.encode(item[i])
for i, field in enumerate(self.fields)
])
def decode(self, data):
return tuple([field.decode(data) for field in self.fields])
def __len__(self):
return len(self.fields)
def repr(self, value):
key_vals = []
try:
for i in range(len(self)):
try:
field_val = getattr(value, self.names[i])
except AttributeError:
field_val = value[i]
key_vals.append('%s=%s' % (self.names[i], self.fields[i].repr(field_val)))
return '(' + ', '.join(key_vals) + ')'
except Exception:
return repr(value)
class Array(AbstractType):
def __init__(self, *array_of):
if len(array_of) > 1:
self.array_of = Schema(*array_of)
elif len(array_of) == 1 and (isinstance(array_of[0], AbstractType) or
issubclass(array_of[0], AbstractType)):
self.array_of = array_of[0]
else:
raise ValueError('Array instantiated with no array_of type')
def encode(self, items):
if items is None:
return Int32.encode(-1)
return b''.join(
[Int32.encode(len(items))] +
[self.array_of.encode(item) for item in items]
)
def decode(self, data):
length = Int32.decode(data)
if length == -1:
return None
return [self.array_of.decode(data) for _ in range(length)]
def repr(self, list_of_items):
if list_of_items is None:
return 'NULL'
return '[' + ', '.join([self.array_of.repr(item) for item in list_of_items]) + ']' | /robinhood-kafka-python-1.4.3.tar.gz/robinhood-kafka-python-1.4.3/rhkafka/protocol/types.py | 0.657428 | 0.330417 | types.py | pypi |
from __future__ import absolute_import
import logging
import sys
import time
import threading
from rhkafka.metrics import AnonMeasurable, KafkaMetric, MetricConfig, MetricName
from rhkafka.metrics.stats import Sensor
logger = logging.getLogger(__name__)
class Metrics(object):
"""
A registry of sensors and metrics.
A metric is a named, numerical measurement. A sensor is a handle to
record numerical measurements as they occur. Each Sensor has zero or
more associated metrics. For example a Sensor might represent message
sizes and we might associate with this sensor a metric for the average,
maximum, or other statistics computed off the sequence of message sizes
that are recorded by the sensor.
Usage looks something like this:
# set up metrics:
metrics = Metrics() # the global repository of metrics and sensors
sensor = metrics.sensor('message-sizes')
metric_name = MetricName('message-size-avg', 'producer-metrics')
sensor.add(metric_name, Avg())
metric_name = MetricName('message-size-max', 'producer-metrics')
sensor.add(metric_name, Max())
# as messages are sent we record the sizes
sensor.record(message_size);
"""
def __init__(self, default_config=None, reporters=None,
enable_expiration=False):
"""
Create a metrics repository with a default config, given metric
reporters and the ability to expire eligible sensors
Arguments:
default_config (MetricConfig, optional): The default config
reporters (list of AbstractMetricsReporter, optional):
The metrics reporters
enable_expiration (bool, optional): true if the metrics instance
can garbage collect inactive sensors, false otherwise
"""
self._lock = threading.RLock()
self._config = default_config or MetricConfig()
self._sensors = {}
self._metrics = {}
self._children_sensors = {}
self._reporters = reporters or []
for reporter in self._reporters:
reporter.init([])
if enable_expiration:
def expire_loop():
while True:
# delay 30 seconds
time.sleep(30)
self.ExpireSensorTask.run(self)
metrics_scheduler = threading.Thread(target=expire_loop)
# Creating a daemon thread to not block shutdown
metrics_scheduler.daemon = True
metrics_scheduler.start()
self.add_metric(self.metric_name('count', 'kafka-metrics-count',
'total number of registered metrics'),
AnonMeasurable(lambda config, now: len(self._metrics)))
@property
def config(self):
return self._config
@property
def metrics(self):
"""
Get all the metrics currently maintained and indexed by metricName
"""
return self._metrics
def metric_name(self, name, group, description='', tags=None):
"""
Create a MetricName with the given name, group, description and tags,
plus default tags specified in the metric configuration.
Tag in tags takes precedence if the same tag key is specified in
the default metric configuration.
Arguments:
name (str): The name of the metric
group (str): logical group name of the metrics to which this
metric belongs
description (str, optional): A human-readable description to
include in the metric
tags (dict, optionals): additional key/value attributes of
the metric
"""
combined_tags = dict(self.config.tags)
combined_tags.update(tags or {})
return MetricName(name, group, description, combined_tags)
def get_sensor(self, name):
"""
Get the sensor with the given name if it exists
Arguments:
name (str): The name of the sensor
Returns:
Sensor: The sensor or None if no such sensor exists
"""
if not name:
raise ValueError('name must be non-empty')
return self._sensors.get(name, None)
def sensor(self, name, config=None,
inactive_sensor_expiration_time_seconds=sys.maxsize,
parents=None):
"""
Get or create a sensor with the given unique name and zero or
more parent sensors. All parent sensors will receive every value
recorded with this sensor.
Arguments:
name (str): The name of the sensor
config (MetricConfig, optional): A default configuration to use
for this sensor for metrics that don't have their own config
inactive_sensor_expiration_time_seconds (int, optional):
If no value if recorded on the Sensor for this duration of
time, it is eligible for removal
parents (list of Sensor): The parent sensors
Returns:
Sensor: The sensor that is created
"""
sensor = self.get_sensor(name)
if sensor:
return sensor
with self._lock:
sensor = self.get_sensor(name)
if not sensor:
sensor = Sensor(self, name, parents, config or self.config,
inactive_sensor_expiration_time_seconds)
self._sensors[name] = sensor
if parents:
for parent in parents:
children = self._children_sensors.get(parent)
if not children:
children = []
self._children_sensors[parent] = children
children.append(sensor)
logger.debug('Added sensor with name %s', name)
return sensor
def remove_sensor(self, name):
"""
Remove a sensor (if it exists), associated metrics and its children.
Arguments:
name (str): The name of the sensor to be removed
"""
sensor = self._sensors.get(name)
if sensor:
child_sensors = None
with sensor._lock:
with self._lock:
val = self._sensors.pop(name, None)
if val and val == sensor:
for metric in sensor.metrics:
self.remove_metric(metric.metric_name)
logger.debug('Removed sensor with name %s', name)
child_sensors = self._children_sensors.pop(sensor, None)
if child_sensors:
for child_sensor in child_sensors:
self.remove_sensor(child_sensor.name)
def add_metric(self, metric_name, measurable, config=None):
"""
Add a metric to monitor an object that implements measurable.
This metric won't be associated with any sensor.
This is a way to expose existing values as metrics.
Arguments:
metricName (MetricName): The name of the metric
measurable (AbstractMeasurable): The measurable that will be
measured by this metric
config (MetricConfig, optional): The configuration to use when
measuring this measurable
"""
# NOTE there was a lock here, but i don't think it's needed
metric = KafkaMetric(metric_name, measurable, config or self.config)
self.register_metric(metric)
def remove_metric(self, metric_name):
"""
Remove a metric if it exists and return it. Return None otherwise.
If a metric is removed, `metric_removal` will be invoked
for each reporter.
Arguments:
metric_name (MetricName): The name of the metric
Returns:
KafkaMetric: the removed `KafkaMetric` or None if no such
metric exists
"""
with self._lock:
metric = self._metrics.pop(metric_name, None)
if metric:
for reporter in self._reporters:
reporter.metric_removal(metric)
return metric
def add_reporter(self, reporter):
"""Add a MetricReporter"""
with self._lock:
reporter.init(list(self.metrics.values()))
self._reporters.append(reporter)
def register_metric(self, metric):
with self._lock:
if metric.metric_name in self.metrics:
raise ValueError('A metric named "%s" already exists, cannot'
' register another one.' % metric.metric_name)
self.metrics[metric.metric_name] = metric
for reporter in self._reporters:
reporter.metric_change(metric)
class ExpireSensorTask(object):
"""
This iterates over every Sensor and triggers a remove_sensor
if it has expired. Package private for testing
"""
@staticmethod
def run(metrics):
items = list(metrics._sensors.items())
for name, sensor in items:
# remove_sensor also locks the sensor object. This is fine
# because synchronized is reentrant. There is however a minor
# race condition here. Assume we have a parent sensor P and
# child sensor C. Calling record on C would cause a record on
# P as well. So expiration time for P == expiration time for C.
# If the record on P happens via C just after P is removed,
# that will cause C to also get removed. Since the expiration
# time is typically high it is not expected to be a significant
# concern and thus not necessary to optimize
with sensor._lock:
if sensor.has_expired():
logger.debug('Removing expired sensor %s', name)
metrics.remove_sensor(name)
def close(self):
"""Close this metrics repository."""
for reporter in self._reporters:
reporter.close() | /robinhood-kafka-python-1.4.3.tar.gz/robinhood-kafka-python-1.4.3/rhkafka/metrics/metrics.py | 0.841044 | 0.216301 | metrics.py | pypi |
from __future__ import absolute_import
import math
class Histogram(object):
def __init__(self, bin_scheme):
self._hist = [0.0] * bin_scheme.bins
self._count = 0.0
self._bin_scheme = bin_scheme
def record(self, value):
self._hist[self._bin_scheme.to_bin(value)] += 1.0
self._count += 1.0
def value(self, quantile):
if self._count == 0.0:
return float('NaN')
_sum = 0.0
quant = float(quantile)
for i, value in enumerate(self._hist[:-1]):
_sum += value
if _sum / self._count > quant:
return self._bin_scheme.from_bin(i)
return float('inf')
@property
def counts(self):
return self._hist
def clear(self):
for i in range(self._hist):
self._hist[i] = 0.0
self._count = 0
def __str__(self):
values = ['%.10f:%.0f' % (self._bin_scheme.from_bin(i), value) for
i, value in enumerate(self._hist[:-1])]
values.append('%s:%s' % (float('inf'), self._hist[-1]))
return '{%s}' % ','.join(values)
class ConstantBinScheme(object):
def __init__(self, bins, min_val, max_val):
if bins < 2:
raise ValueError('Must have at least 2 bins.')
self._min = float(min_val)
self._max = float(max_val)
self._bins = int(bins)
self._bucket_width = (max_val - min_val) / (bins - 2)
@property
def bins(self):
return self._bins
def from_bin(self, b):
if b == 0:
return float('-inf')
elif b == self._bins - 1:
return float('inf')
else:
return self._min + (b - 1) * self._bucket_width
def to_bin(self, x):
if x < self._min:
return 0
elif x > self._max:
return self._bins - 1
else:
return int(((x - self._min) / self._bucket_width) + 1)
class LinearBinScheme(object):
def __init__(self, num_bins, max_val):
self._bins = num_bins
self._max = max_val
self._scale = max_val / (num_bins * (num_bins - 1) / 2)
@property
def bins(self):
return self._bins
def from_bin(self, b):
if b == self._bins - 1:
return float('inf')
else:
unscaled = (b * (b + 1.0)) / 2.0
return unscaled * self._scale
def to_bin(self, x):
if x < 0.0:
raise ValueError('Values less than 0.0 not accepted.')
elif x > self._max:
return self._bins - 1
else:
scaled = x / self._scale
return int(-0.5 + math.sqrt(2.0 * scaled + 0.25)) | /robinhood-kafka-python-1.4.3.tar.gz/robinhood-kafka-python-1.4.3/rhkafka/metrics/stats/histogram.py | 0.854885 | 0.26705 | histogram.py | pypi |
from __future__ import absolute_import
import threading
import time
from rhkafka.errors import QuotaViolationError
from rhkafka.metrics import KafkaMetric
class Sensor(object):
"""
A sensor applies a continuous sequence of numerical values
to a set of associated metrics. For example a sensor on
message size would record a sequence of message sizes using
the `record(double)` api and would maintain a set
of metrics about request sizes such as the average or max.
"""
def __init__(self, registry, name, parents, config,
inactive_sensor_expiration_time_seconds):
if not name:
raise ValueError('name must be non-empty')
self._lock = threading.RLock()
self._registry = registry
self._name = name
self._parents = parents or []
self._metrics = []
self._stats = []
self._config = config
self._inactive_sensor_expiration_time_ms = (
inactive_sensor_expiration_time_seconds * 1000)
self._last_record_time = time.time() * 1000
self._check_forest(set())
def _check_forest(self, sensors):
"""Validate that this sensor doesn't end up referencing itself."""
if self in sensors:
raise ValueError('Circular dependency in sensors: %s is its own'
'parent.' % self.name)
sensors.add(self)
for parent in self._parents:
parent._check_forest(sensors)
@property
def name(self):
"""
The name this sensor is registered with.
This name will be unique among all registered sensors.
"""
return self._name
@property
def metrics(self):
return tuple(self._metrics)
def record(self, value=1.0, time_ms=None):
"""
Record a value at a known time.
Arguments:
value (double): The value we are recording
time_ms (int): A POSIX timestamp in milliseconds.
Default: The time when record() is evaluated (now)
Raises:
QuotaViolationException: if recording this value moves a
metric beyond its configured maximum or minimum bound
"""
if time_ms is None:
time_ms = time.time() * 1000
self._last_record_time = time_ms
with self._lock: # XXX high volume, might be performance issue
# increment all the stats
for stat in self._stats:
stat.record(self._config, value, time_ms)
self._check_quotas(time_ms)
for parent in self._parents:
parent.record(value, time_ms)
def _check_quotas(self, time_ms):
"""
Check if we have violated our quota for any metric that
has a configured quota
"""
for metric in self._metrics:
if metric.config and metric.config.quota:
value = metric.value(time_ms)
if not metric.config.quota.is_acceptable(value):
raise QuotaViolationError("'%s' violated quota. Actual: "
"%d, Threshold: %d" %
(metric.metric_name,
value,
metric.config.quota.bound))
def add_compound(self, compound_stat, config=None):
"""
Register a compound statistic with this sensor which
yields multiple measurable quantities (like a histogram)
Arguments:
stat (AbstractCompoundStat): The stat to register
config (MetricConfig): The configuration for this stat.
If None then the stat will use the default configuration
for this sensor.
"""
if not compound_stat:
raise ValueError('compound stat must be non-empty')
self._stats.append(compound_stat)
for named_measurable in compound_stat.stats():
metric = KafkaMetric(named_measurable.name, named_measurable.stat,
config or self._config)
self._registry.register_metric(metric)
self._metrics.append(metric)
def add(self, metric_name, stat, config=None):
"""
Register a metric with this sensor
Arguments:
metric_name (MetricName): The name of the metric
stat (AbstractMeasurableStat): The statistic to keep
config (MetricConfig): A special configuration for this metric.
If None use the sensor default configuration.
"""
with self._lock:
metric = KafkaMetric(metric_name, stat, config or self._config)
self._registry.register_metric(metric)
self._metrics.append(metric)
self._stats.append(stat)
def has_expired(self):
"""
Return True if the Sensor is eligible for removal due to inactivity.
"""
return ((time.time() * 1000 - self._last_record_time) >
self._inactive_sensor_expiration_time_ms) | /robinhood-kafka-python-1.4.3.tar.gz/robinhood-kafka-python-1.4.3/rhkafka/metrics/stats/sensor.py | 0.920763 | 0.237499 | sensor.py | pypi |
from __future__ import absolute_import
from rhkafka.metrics import AnonMeasurable, NamedMeasurable
from rhkafka.metrics.compound_stat import AbstractCompoundStat
from rhkafka.metrics.stats import Histogram
from rhkafka.metrics.stats.sampled_stat import AbstractSampledStat
class BucketSizing(object):
CONSTANT = 0
LINEAR = 1
class Percentiles(AbstractSampledStat, AbstractCompoundStat):
"""A compound stat that reports one or more percentiles"""
def __init__(self, size_in_bytes, bucketing, max_val, min_val=0.0,
percentiles=None):
super(Percentiles, self).__init__(0.0)
self._percentiles = percentiles or []
self._buckets = int(size_in_bytes / 4)
if bucketing == BucketSizing.CONSTANT:
self._bin_scheme = Histogram.ConstantBinScheme(self._buckets,
min_val, max_val)
elif bucketing == BucketSizing.LINEAR:
if min_val != 0.0:
raise ValueError('Linear bucket sizing requires min_val'
' to be 0.0.')
self.bin_scheme = Histogram.LinearBinScheme(self._buckets, max_val)
else:
ValueError('Unknown bucket type: %s' % bucketing)
def stats(self):
measurables = []
def make_measure_fn(pct):
return lambda config, now: self.value(config, now,
pct / 100.0)
for percentile in self._percentiles:
measure_fn = make_measure_fn(percentile.percentile)
stat = NamedMeasurable(percentile.name, AnonMeasurable(measure_fn))
measurables.append(stat)
return measurables
def value(self, config, now, quantile):
self.purge_obsolete_samples(config, now)
count = sum(sample.event_count for sample in self._samples)
if count == 0.0:
return float('NaN')
sum_val = 0.0
quant = float(quantile)
for b in range(self._buckets):
for sample in self._samples:
assert type(sample) is self.HistogramSample
hist = sample.histogram.counts
sum_val += hist[b]
if sum_val / count > quant:
return self._bin_scheme.from_bin(b)
return float('inf')
def combine(self, samples, config, now):
return self.value(config, now, 0.5)
def new_sample(self, time_ms):
return Percentiles.HistogramSample(self._bin_scheme, time_ms)
def update(self, sample, config, value, time_ms):
assert type(sample) is self.HistogramSample
sample.histogram.record(value)
class HistogramSample(AbstractSampledStat.Sample):
def __init__(self, scheme, now):
super(Percentiles.HistogramSample, self).__init__(0.0, now)
self.histogram = Histogram(scheme) | /robinhood-kafka-python-1.4.3.tar.gz/robinhood-kafka-python-1.4.3/rhkafka/metrics/stats/percentiles.py | 0.793466 | 0.261378 | percentiles.py | pypi |
from __future__ import absolute_import
import abc
from rhkafka.metrics.measurable_stat import AbstractMeasurableStat
class AbstractSampledStat(AbstractMeasurableStat):
"""
An AbstractSampledStat records a single scalar value measured over
one or more samples. Each sample is recorded over a configurable
window. The window can be defined by number of events or elapsed
time (or both, if both are given the window is complete when
*either* the event count or elapsed time criterion is met).
All the samples are combined to produce the measurement. When a
window is complete the oldest sample is cleared and recycled to
begin recording the next sample.
Subclasses of this class define different statistics measured
using this basic pattern.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, initial_value):
self._initial_value = initial_value
self._samples = []
self._current = 0
@abc.abstractmethod
def update(self, sample, config, value, time_ms):
raise NotImplementedError
@abc.abstractmethod
def combine(self, samples, config, now):
raise NotImplementedError
def record(self, config, value, time_ms):
sample = self.current(time_ms)
if sample.is_complete(time_ms, config):
sample = self._advance(config, time_ms)
self.update(sample, config, float(value), time_ms)
sample.event_count += 1
def new_sample(self, time_ms):
return self.Sample(self._initial_value, time_ms)
def measure(self, config, now):
self.purge_obsolete_samples(config, now)
return float(self.combine(self._samples, config, now))
def current(self, time_ms):
if not self._samples:
self._samples.append(self.new_sample(time_ms))
return self._samples[self._current]
def oldest(self, now):
if not self._samples:
self._samples.append(self.new_sample(now))
oldest = self._samples[0]
for sample in self._samples[1:]:
if sample.last_window_ms < oldest.last_window_ms:
oldest = sample
return oldest
def purge_obsolete_samples(self, config, now):
"""
Timeout any windows that have expired in the absence of any events
"""
expire_age = config.samples * config.time_window_ms
for sample in self._samples:
if now - sample.last_window_ms >= expire_age:
sample.reset(now)
def _advance(self, config, time_ms):
self._current = (self._current + 1) % config.samples
if self._current >= len(self._samples):
sample = self.new_sample(time_ms)
self._samples.append(sample)
return sample
else:
sample = self.current(time_ms)
sample.reset(time_ms)
return sample
class Sample(object):
def __init__(self, initial_value, now):
self.initial_value = initial_value
self.event_count = 0
self.last_window_ms = now
self.value = initial_value
def reset(self, now):
self.event_count = 0
self.last_window_ms = now
self.value = self.initial_value
def is_complete(self, time_ms, config):
return (time_ms - self.last_window_ms >= config.time_window_ms or
self.event_count >= config.event_window) | /robinhood-kafka-python-1.4.3.tar.gz/robinhood-kafka-python-1.4.3/rhkafka/metrics/stats/sampled_stat.py | 0.887832 | 0.347759 | sampled_stat.py | pypi |
from __future__ import absolute_import
from rhkafka.metrics.measurable_stat import AbstractMeasurableStat
from rhkafka.metrics.stats.sampled_stat import AbstractSampledStat
class TimeUnit(object):
_names = {
'nanosecond': 0,
'microsecond': 1,
'millisecond': 2,
'second': 3,
'minute': 4,
'hour': 5,
'day': 6,
}
NANOSECONDS = _names['nanosecond']
MICROSECONDS = _names['microsecond']
MILLISECONDS = _names['millisecond']
SECONDS = _names['second']
MINUTES = _names['minute']
HOURS = _names['hour']
DAYS = _names['day']
@staticmethod
def get_name(time_unit):
return TimeUnit._names[time_unit]
class Rate(AbstractMeasurableStat):
"""
The rate of the given quantity. By default this is the total observed
over a set of samples from a sampled statistic divided by the elapsed
time over the sample windows. Alternative AbstractSampledStat
implementations can be provided, however, to record the rate of
occurrences (e.g. the count of values measured over the time interval)
or other such values.
"""
def __init__(self, time_unit=TimeUnit.SECONDS, sampled_stat=None):
self._stat = sampled_stat or SampledTotal()
self._unit = time_unit
def unit_name(self):
return TimeUnit.get_name(self._unit)
def record(self, config, value, time_ms):
self._stat.record(config, value, time_ms)
def measure(self, config, now):
value = self._stat.measure(config, now)
return float(value) / self.convert(self.window_size(config, now))
def window_size(self, config, now):
# purge old samples before we compute the window size
self._stat.purge_obsolete_samples(config, now)
"""
Here we check the total amount of time elapsed since the oldest
non-obsolete window. This give the total window_size of the batch
which is the time used for Rate computation. However, there is
an issue if we do not have sufficient data for e.g. if only
1 second has elapsed in a 30 second window, the measured rate
will be very high. Hence we assume that the elapsed time is
always N-1 complete windows plus whatever fraction of the final
window is complete.
Note that we could simply count the amount of time elapsed in
the current window and add n-1 windows to get the total time,
but this approach does not account for sleeps. AbstractSampledStat
only creates samples whenever record is called, if no record is
called for a period of time that time is not accounted for in
window_size and produces incorrect results.
"""
total_elapsed_time_ms = now - self._stat.oldest(now).last_window_ms
# Check how many full windows of data we have currently retained
num_full_windows = int(total_elapsed_time_ms / config.time_window_ms)
min_full_windows = config.samples - 1
# If the available windows are less than the minimum required,
# add the difference to the totalElapsedTime
if num_full_windows < min_full_windows:
total_elapsed_time_ms += ((min_full_windows - num_full_windows) *
config.time_window_ms)
return total_elapsed_time_ms
def convert(self, time_ms):
if self._unit == TimeUnit.NANOSECONDS:
return time_ms * 1000.0 * 1000.0
elif self._unit == TimeUnit.MICROSECONDS:
return time_ms * 1000.0
elif self._unit == TimeUnit.MILLISECONDS:
return time_ms
elif self._unit == TimeUnit.SECONDS:
return time_ms / 1000.0
elif self._unit == TimeUnit.MINUTES:
return time_ms / (60.0 * 1000.0)
elif self._unit == TimeUnit.HOURS:
return time_ms / (60.0 * 60.0 * 1000.0)
elif self._unit == TimeUnit.DAYS:
return time_ms / (24.0 * 60.0 * 60.0 * 1000.0)
else:
raise ValueError('Unknown unit: %s' % self._unit)
class SampledTotal(AbstractSampledStat):
def __init__(self, initial_value=None):
if initial_value is not None:
raise ValueError('initial_value cannot be set on SampledTotal')
super(SampledTotal, self).__init__(0.0)
def update(self, sample, config, value, time_ms):
sample.value += value
def combine(self, samples, config, now):
return float(sum(sample.value for sample in samples)) | /robinhood-kafka-python-1.4.3.tar.gz/robinhood-kafka-python-1.4.3/rhkafka/metrics/stats/rate.py | 0.87699 | 0.374676 | rate.py | pypi |
import re
class Reporter:
"""
Formats the results of pyflakes checks to users.
"""
def __init__(self):
self.diagnostics = []
def unexpectedError(self, filename, msg):
"""
An unexpected error occurred attemptinh to process C{filename}.
@param filename: The path to a file that we could not process.
@ptype filename: C{unicode}
@param msg: A message explaining the problem.
@ptype msg: C{unicode}
"""
self.diagnostics.append(f"1{filename}: {msg}\n")
def syntaxError(self, filename, msg, lineno, offset, text):
"""
There was a syntax error in C{filename}.
@param filename: The path to the file with the syntax error.
@ptype filename: C{unicode}
@param msg: An explanation of the syntax error.
@ptype msg: C{unicode}
@param lineno: The line number where the syntax error occurred.
@ptype lineno: C{int}
@param offset: The column on which the syntax error occurred, or None.
@ptype offset: C{int}
@param text: The source code containing the syntax error.
@ptype text: C{unicode}
"""
if text is None:
line = None
else:
line = text.splitlines()[-1]
# lineno might be None if the error was during tokenization
# lineno might be 0 if the error came from stdin
lineno = max(lineno or 0, 1)
if offset is not None:
# some versions of python emit an offset of -1 for certain encoding errors
offset = max(offset, 1)
self.diagnostics.append('1%s:%d:%d: %s\n' %
(filename, lineno, offset, msg))
else:
self.diagnostics.append('1%s:%d: %s\n' % (filename, lineno, msg))
if line is not None:
self.diagnostics.append(line)
self.diagnostics.append('\n')
if offset is not None:
self.diagnostics.append(re.sub(r'\S', ' ', line[:offset - 1]) +
"^\n")
def flake(self, message):
"""
pyflakes found something wrong with the code.
@param: A L{pyflakes.messages.Message}.
"""
self.diagnostics.append("2"+str(message))
self.diagnostics.append('\n') | /roblox_pyc-2.26.113-py3-none-any.whl/robloxpyc/reporter.py | 0.545528 | 0.250071 | reporter.py | pypi |
import * as luau from "../../LuauAST/bundle";
function makeGuard<T extends keyof luau.NodeByKind>(...kinds: [...Array<T>]) {
const set = new Set<luau.SyntaxKind>(kinds);
return (node: luau.Node): node is luau.NodeByKind[T] => set.has(node.kind);
}
// indexable expressions
export const isAnyIdentifier = makeGuard(luau.SyntaxKind.Identifier, luau.SyntaxKind.TemporaryIdentifier);
export const isIdentifier = makeGuard(luau.SyntaxKind.Identifier);
export const isTemporaryIdentifier = makeGuard(luau.SyntaxKind.TemporaryIdentifier);
export const isComputedIndexExpression = makeGuard(luau.SyntaxKind.ComputedIndexExpression);
export const isPropertyAccessExpression = makeGuard(luau.SyntaxKind.PropertyAccessExpression);
export const isCallExpression = makeGuard(luau.SyntaxKind.CallExpression);
export const isMethodCallExpression = makeGuard(luau.SyntaxKind.MethodCallExpression);
export const isParenthesizedExpression = makeGuard(luau.SyntaxKind.ParenthesizedExpression);
export function isIndexableExpression(node: luau.Node): node is luau.IndexableExpression {
return (
node.kind >= luau.SyntaxKind.FirstIndexableExpression && node.kind <= luau.SyntaxKind.LastIndexableExpression
);
}
// expressions
export const isNone = makeGuard(luau.SyntaxKind.None);
export const isNilLiteral = makeGuard(luau.SyntaxKind.NilLiteral);
export const isFalseLiteral = makeGuard(luau.SyntaxKind.FalseLiteral);
export const isTrueLiteral = makeGuard(luau.SyntaxKind.TrueLiteral);
export const isNumberLiteral = makeGuard(luau.SyntaxKind.NumberLiteral);
export const isStringLiteral = makeGuard(luau.SyntaxKind.StringLiteral);
export const isVarArgsLiteral = makeGuard(luau.SyntaxKind.VarArgsLiteral);
export const isFunctionExpression = makeGuard(luau.SyntaxKind.FunctionExpression);
export const isBinaryExpression = makeGuard(luau.SyntaxKind.BinaryExpression);
export const isUnaryExpression = makeGuard(luau.SyntaxKind.UnaryExpression);
export const isIfExpression = makeGuard(luau.SyntaxKind.IfExpression);
export const isArray = makeGuard(luau.SyntaxKind.Array);
export const isMap = makeGuard(luau.SyntaxKind.Map);
export const isSet = makeGuard(luau.SyntaxKind.Set);
export const isMixedTable = makeGuard(luau.SyntaxKind.MixedTable);
export function isExpression(node: luau.Node): node is luau.Expression {
return node.kind >= luau.SyntaxKind.FirstExpression && node.kind <= luau.SyntaxKind.LastExpression;
}
// statements
export const isAssignment = makeGuard(luau.SyntaxKind.Assignment);
export const isBreakStatement = makeGuard(luau.SyntaxKind.BreakStatement);
export const isCallStatement = makeGuard(luau.SyntaxKind.CallStatement);
export const isContinueStatement = makeGuard(luau.SyntaxKind.ContinueStatement);
export const isDoStatement = makeGuard(luau.SyntaxKind.DoStatement);
export const isWhileStatement = makeGuard(luau.SyntaxKind.WhileStatement);
export const isRepeatStatement = makeGuard(luau.SyntaxKind.RepeatStatement);
export const isIfStatement = makeGuard(luau.SyntaxKind.IfStatement);
export const isNumericForStatement = makeGuard(luau.SyntaxKind.NumericForStatement);
export const isForStatement = makeGuard(luau.SyntaxKind.ForStatement);
export const isFunctionDeclaration = makeGuard(luau.SyntaxKind.FunctionDeclaration);
export const isMethodDeclaration = makeGuard(luau.SyntaxKind.MethodDeclaration);
export const isVariableDeclaration = makeGuard(luau.SyntaxKind.VariableDeclaration);
export const isReturnStatement = makeGuard(luau.SyntaxKind.ReturnStatement);
export const isComment = makeGuard(luau.SyntaxKind.Comment);
export function isStatement(node: luau.Node): node is luau.Statement {
return node.kind >= luau.SyntaxKind.FirstStatement && node.kind <= luau.SyntaxKind.LastStatement;
}
// fields
export const isMapField = makeGuard(luau.SyntaxKind.MapField);
export function isField(node: luau.Node): node is luau.Field {
return node.kind >= luau.SyntaxKind.FirstField && node.kind <= luau.SyntaxKind.LastField;
}
export function isNode(value: unknown): value is luau.Node {
if (typeof value === "object" && value !== null && "kind" in value) {
// hack
const { kind } = value as { kind: unknown };
return (
typeof kind === "number" &&
kind >= luau.SyntaxKind.FirstIndexableExpression &&
kind <= luau.SyntaxKind.LastField
);
}
return false;
}
export const isSimple = makeGuard(
luau.SyntaxKind.Identifier,
luau.SyntaxKind.TemporaryIdentifier,
luau.SyntaxKind.NilLiteral,
luau.SyntaxKind.TrueLiteral,
luau.SyntaxKind.FalseLiteral,
luau.SyntaxKind.NumberLiteral,
luau.SyntaxKind.StringLiteral,
);
export const isSimplePrimitive = makeGuard(
luau.SyntaxKind.NilLiteral,
luau.SyntaxKind.TrueLiteral,
luau.SyntaxKind.FalseLiteral,
luau.SyntaxKind.NumberLiteral,
luau.SyntaxKind.StringLiteral,
);
export const isTable = makeGuard(luau.SyntaxKind.Array, luau.SyntaxKind.Set, luau.SyntaxKind.Map);
export const isFinalStatement = makeGuard(
luau.SyntaxKind.BreakStatement,
luau.SyntaxKind.ReturnStatement,
luau.SyntaxKind.ContinueStatement,
);
export const isCall = makeGuard(luau.SyntaxKind.CallExpression, luau.SyntaxKind.MethodCallExpression);
export const isWritableExpression: (node: luau.Node) => node is luau.WritableExpression = makeGuard(
luau.SyntaxKind.Identifier,
luau.SyntaxKind.TemporaryIdentifier,
luau.SyntaxKind.PropertyAccessExpression,
luau.SyntaxKind.ComputedIndexExpression,
);
export const isFunctionLike = makeGuard(
luau.SyntaxKind.FunctionDeclaration,
luau.SyntaxKind.FunctionExpression,
luau.SyntaxKind.MethodDeclaration,
);
export const hasStatements = makeGuard(
luau.SyntaxKind.ForStatement,
luau.SyntaxKind.NumericForStatement,
luau.SyntaxKind.FunctionExpression,
luau.SyntaxKind.DoStatement,
luau.SyntaxKind.FunctionDeclaration,
luau.SyntaxKind.IfStatement,
luau.SyntaxKind.MethodDeclaration,
luau.SyntaxKind.RepeatStatement,
luau.SyntaxKind.WhileStatement,
);
export const isExpressionWithPrecedence = makeGuard(
luau.SyntaxKind.IfExpression,
luau.SyntaxKind.UnaryExpression,
luau.SyntaxKind.BinaryExpression,
); | /roblox_pyc-2.26.113-py3-none-any.whl/robloxpyc/LuauAST/impl/typeGuards.ts | 0.545528 | 0.614365 | typeGuards.ts | pypi |
import luau from "../../LuauAST";
import { assert } from "../../LuauAST/util/assert";
import { RenderState } from "../../LuauRenderer";
function endsWithIndexableExpressionInner(node: luau.Expression): boolean {
if (luau.isIndexableExpression(node)) {
// `a` or `(a)` or `a.b` or `a[b]` or `a()`
return true;
} else if (luau.isBinaryExpression(node)) {
// `a + b`
return endsWithIndexableExpressionInner(node.right);
} else if (luau.isUnaryExpression(node)) {
// `-a`
return endsWithIndexableExpressionInner(node.expression);
} else if (luau.isIfExpression(node)) {
// `if a then b else c`
// `if a then b elseif c then d else e`
return endsWithIndexableExpressionInner(node.alternative);
}
return false;
}
function endsWithIndexableExpression(node: luau.Statement) {
if (luau.isCallStatement(node)) {
// `a()`
return true;
} else if (luau.isVariableDeclaration(node) || luau.isAssignment(node)) {
// `local a = b` or `a = b` or `local a` or `local a, b`
let furthestRight: luau.Expression;
if (node.right) {
if (luau.list.isList(node.right)) {
assert(luau.list.isNonEmpty(node.right));
furthestRight = node.right.tail.value;
} else {
furthestRight = node.right;
}
} else if (luau.list.isList(node.left)) {
assert(luau.list.isNonEmpty(node.left));
furthestRight = node.left.tail.value;
} else {
furthestRight = node.left;
}
return endsWithIndexableExpressionInner(furthestRight);
}
return false;
}
function startsWithParenthesisInner(node: luau.Expression): boolean {
if (luau.isParenthesizedExpression(node)) {
// `(a)`
return true;
} else if (luau.isCall(node) || luau.isPropertyAccessExpression(node) || luau.isComputedIndexExpression(node)) {
// `(a)()` or `(a):b()` or `(a).b` or `(a)[b]`
return startsWithParenthesisInner(node.expression);
}
return false;
}
function startsWithParenthesis(node: luau.Statement) {
if (luau.isCallStatement(node)) {
// `(a)()`
return startsWithParenthesisInner(node.expression.expression);
} else if (luau.isAssignment(node)) {
if (luau.list.isList(node.left)) {
// `(a).b, c = d`
assert(luau.list.isNonEmpty(node.left));
return startsWithParenthesisInner(node.left.head.value);
} else {
// `(a).b = c`
return startsWithParenthesisInner(node.left);
}
}
return false;
}
function getNextNonComment(state: RenderState) {
let listNode = state.peekListNode()?.next;
while (listNode && luau.isComment(listNode.value)) {
listNode = listNode.next;
}
return listNode?.value;
}
/**
* Resolves if the given statement needs to end with a `;` or not.
*
* Used to avoid "ambiguous syntax" errors in Luau.
*
* This is only necessary in statements which can end in an IndexableExpression:
* - CallStatement
* - VariableDeclaration
* - Assignment
*/
export function getEnding(state: RenderState, node: luau.Statement) {
const nextStatement = getNextNonComment(state);
if (nextStatement !== undefined && endsWithIndexableExpression(node) && startsWithParenthesis(nextStatement)) {
return ";";
} else {
return "";
}
} | /roblox_pyc-2.26.113-py3-none-any.whl/robloxpyc/LuauRenderer/util/getEnding.ts | 0.729905 | 0.676084 | getEnding.ts | pypi |
from enum import IntEnum, Enum
from typing import Optional, List
import orjson
from .utilities import int_or_none
class MembershipType(IntEnum):
"""
Represents a Roblox membership type.
Please see https://developer.roblox.com/en-us/api-reference/enum/MembershipType for more information.
"""
none = 0
builders_club = 1
turbo_builders_club = 2
outrageous_builders_club = 3
premium = 4
class PolicyServiceHttpResponse:
def __init__(self, data: dict):
self.is_subject_to_china_policies: bool = data["isSubjectToChinaPolicies"]
self.are_paid_random_items_restricted: bool = data["arePaidRandomItemsRestricted"]
self.is_paid_item_trading_allowed: bool = data["isPaidItemTradingAllowed"]
self.allowed_external_link_references: List[str] = data["allowedExternalLinkReferences"]
class Theme(Enum):
dark = "dark"
light = "light"
class AppStorage:
def __init__(self, data: dict):
self.app_installation_id: Optional[int] = int_or_none(data.get("AppInstallationId"))
self.username: Optional[str] = data.get("Username")
self.membership: Optional[MembershipType] = MembershipType(int(data["Membership"])) if data.get("Membership") \
else None
self.roblox_locale_id: Optional[str] = data.get("RobloxLocaleId")
self.browser_tracker_id: Optional[int] = int_or_none(data.get("BrowserTrackerId"))
self.web_login: Optional[int] = int_or_none(data.get("WebLogin"))
# rethink where to do these orjson loads. this is probably bad
self.policy_service_http_response: Optional[PolicyServiceHttpResponse] = \
PolicyServiceHttpResponse(orjson.loads(data["PolicyServiceHttpResponse"])) \
if data.get("PolicyServiceHttpResponse") else None
self.player_exe_launch_time: Optional[int] = int_or_none(data.get("PlayerExeLaunchTime"))
self.user_id: Optional[int] = int_or_none(data.get("UserId"))
self.is_under_13: Optional[int] = (True if data["IsUnder13"] == "true" else "false") if data.get("IsUnder13") \
else None
self.display_name: Optional[str] = data.get("DisplayName")
self.country_code: Optional[str] = data.get("CountryCode")
self.game_locale_id: Optional[str] = data.get("GameLocaleId")
self.authenticated_theme: Optional[Theme] = Theme(data["AuthenticatedTheme"]) \
if data.get("AuthenticatedTheme") else None
self.experience_menu_version: Optional[str] = data.get("ExperienceMenuVersion")
self.native_close_lua_prompt_display_count: Optional[dict] = \
orjson.loads(data["NativeCloseLuaPromptDisplayCount"]) if data.get("NativeCloseLuaPromptDisplayCount") \
else None | /roblox_studio-0.0.15-py3-none-any.whl/roblox_studio/storage.py | 0.81772 | 0.263355 | storage.py | pypi |
# RobloxPy 0.2.21
RobloxPy is a python API wrapper for roblox. This allows for quick and easy integration of these API's into a python project.
<sup><center>If you need any help using RobloxPy or want to request an additional please join the discord server at
https://www.kristansmout.com/discord
Accept The Terms & Create a ticket</center></sup>
## Table Of Contents
* [Getting Started](#Getting-Started)
* [Prerequisites](#Prerequisites)
* [Installation](#Installation)
* [Feature List](#Features)
* [Game](#Game)
* [Group](#Group)
* [Market](#Market)
* [User](#User)
* [User.Friends](#User.Friends)
* [User.Groups](#User.Groups)
* [Usage Examples](#Usage-Examples)
* [Utilities](#Utilities)
* [Game](#Game)
#
## Getting-Started
To use the wrapper you will need to download and import robloxpy into your current project. The project has been designed to not include external requirements that are not included within the base installation of python.
### Prerequisites
> None
### Installation
```python
pip install robloxpy
```
If you wish to update robloxpy in the future you can also do this through pip
```python
pip install robloxpy --upgrade
```
### Requests
Robloxpy is built on community feedback, if you have a feature you want added please make it known on the discord and we will see if we can implement it for you. Not all features can be added and some are emitted to prevent abuse.
#
## Feature List
#### Utilities
* GetVersion
* CheckForUpdate
* UpdateInstructions
* SetProxy
* CheckProxy
* CheckCookie
#### Game
* External
* GetUniverseData
* GetUniverseVotes
* GetUniverseFavourites
* GetCurrentUniversePlayers
* GetUniverseVisits
* GetUniverseLikes
* GetUniverseDislikes
* Internal
* GetUniverseID
* GetCurrentPlayers
* GetGameVisits
* GetGameLikes
* GetGameDislikes
* GetGameFavourites
* GetMyGameData
#### Group
* External
* IsGroupOwned
* GetName
* GetOwner
* GetDescription
* GetEmblem
* GetRoles
* GetAllies
* GetEnemies
* GetMemberCount
* isPublic
* isBCOnly
* GetMembersList
* GetMembersinRoleList
#### Market
* External
* CanManageAsset
* GetLimitedPriceData
* GetLimitedRemaining
* GetLimitedTotal
* GetLimitedSales
* GetLimitedRAP
* GetLimitedSalePrice
* GetLimitedChangePercentage
* GetAssetImage
* Internal
* BuyItem
#### User
* External
* GetID
* GetUserName
* UsernameHistory
* DoesNameExist
* IsOnline
* Isbanned
* GetDescription
* GetAge
* CreationDate
* GetRAP
* GetLimiteds
* GetBust
* GetHeadshot
* GetStatus
* Internal
* SetCookie
* GetDetails
* isFollowing
* FollowUser
* UnfollowUser
* BlockUser
* UnblockUser
* GetBlockedUsers
* SendMessage
* JoinGame
#### User.Friends
* External
* GetAll
* GetCount
* GetOnline
* GetOffline
* GetFollowerCount
* GetFollowers
* GetFollowingCount
* GetFollowing
* Internal
* SendFriendRequest
* Unfriend
* TotalFriends
#### User.Groups
* External
* GetGroups
* Internal
* Claim
* Join
* Leave
* GetFunds
* Payout
* PercentagePayout
* SendWallPost
* SendGroupShout
* ChangeDescription
* ChangeRank
#
## Usage-Examples
This section will cover the usage of robloxpy, it will provide examples for commands and the expected outputs to help you achieve what you want to achieve with robloxpy.
### Utilities
The utiliy functions are set to be used more for checking stuff within robloxpy as well as being used a reference point for robloxpy to store values between different areas of the API such as a central place for URL's.
> Utilities are called by standard as
> ```
> robloxpy.Utils.<UTILITYFUNCTION>
> ```
> Not all functions require arguments however some do, you will be alerted to this in your IDE providing it supports intellisense.
* **CheckForUpdate()**
This function checks to see if an update is available for robloxpy, an internet connection will be required for this to work. If you wish to display this you will need to print it.
>Example Usage
>```python
>robloxpy.Utils.CheckForUpdate()
>```
>
>Example Output
>```
>You are up to date!
>```
* **GetVersion()**
This function returns the current version of robloxpy that is being used for the current project.
>Example Usage
>```python
>robloxpy.Utils.GetVersion()
>```
>
>Example Output
>```
>0.2.8
>```
* **UpdateInstructions(Version)**
This function returns instructions on how to update robloxpy, this can be used to show users of software how to get to the latest version. Alternatively if your tool was designed for a specific version of robloxpy you can give the user instructions on how to get to that version
>Example Usage
>```python
>robloxpy.Utils.UpdateInstructions()
>```
>
>Example Output
>```
>Update robloxpy through pip using following command: 'pip install robloxpy --upgrade'
>```
If you wish for your users to use a specific version of robloxpy you can have the instructions given to them on how to reach that specific version using the following.
>Example Usage
>```python
>robloxpy.Utils.UpdateInstructions("0.2.8")
>```
>
>Example Output
>```
>This software is intended to work on robloxpy version 0.2.8 please install using the following command
>'pip install robloxpy==0.2.8'
>If you get an error with this command the developer of this tool has not provided a valid version
>```
* **SetProxy(ProxyIP)**
This function will set a global proxy to be used within the python program and is not just limited to robloxpy. The expected format is IP:PORT
>Example Usage
>```python
>robloxpy.Utils.SetProxy("144.217.101.245:3129")
>```
>
>Example Output
>```
> OUTPUT NOT GIVEN
>```
* **CheckProxy(proxyAddress)**
This function will check the current proxy and provided the IP Shown to external sites.This function does not need an argument, if none is provided then the currently set proxy will be used. The expected format is IP:PORT if an argument is provided
>Example Usage
>```python
>robloxpy.Utils.CheckProxy("144.217.101.245:3129")
>```
>
>Example Output
>```
> 144.217.101.245
>```
* **CheckCookie(Cookie)**
This function will check if a cookie is valid, if no cookie is provided it will use the current cookie which has been set using the **SetCookie()** function.
>Example Usage
>```python
>robloxpy.Utils.CheckCookie()
>```
>
>Example Output
>```
> Valid Cookie
>```
### Game
The Game functions are functions geared towards getting data from games/universes. This group of functions has both internal and external commands. The internal commands will utilize the external commands while having an easier way to input data due to the way roblox has locked some API's behind the need to be logged in.
#### Internal
* **GetUniverseID(PlaceID)**
This function will convert a placeID to a universe ID to allow it to be used with external functions
>Example Usage
>```python
>robloxpy.Game.Internal.GetUniverseID(164118757)
>```
>
>Example Output
>```
> 23476326
>```
* **GetCurrentPlayers(PlaceID)**
This function will return the amount of players in a game
>Example Usage
>```python
>robloxpy.Game.Internal.GetCurrentPlayers(164118757)
>```
>
>Example Output
>```
> 52
>```
* **GetGameVisits(PlaceID)**
This function will return the amount of visits a game has
>Example Usage
>```python
>robloxpy.Game.Internal.GetGameVisits(164118757)
>```
>
>Example Output
>```
> 97
>```
* **GetGameLikes(PlaceID)**
This function will return the amount of likes a game has
>Example Usage
>```python
>robloxpy.Game.Internal.GetGameLikes(164118757)
>```
>
>Example Output
>```
> 6
>```
* **GetGameDislikes(PlaceID)**
This function will return the amount of dislikes a game has
>Example Usage
>```python
>robloxpy.Game.Internal.GetGameDislikes(164118757)
>```
>
>Example Output
>```
> 2
>```
* **GetGameFavourites(PlaceID)**
This function will return the amount of favourites a game has
>Example Usage
>```python
>robloxpy.Game.Internal.GetGameFavourites(164118757)
>```
>
>Example Output
>```
> 8
>```
* **GetMyGameData(PlaceID)**
This function will return a range of data of a game owned by the current set cookie. The PlaceID must be a game that the current user has permissions for to edit.
>Example Usage
>```python
>robloxpy.Game.Internal.GetMyGameData(164118757)
>```
>
>Example Output
>```
> Saved
>```
This function will save the games data in a sort of cache to be accessed when needed. This is the data which this function will collect and be used as needed:
> * maxPlayerCount
> * socialSlotType
> * customSocialSlotsCount
> * allowCopying
> * currentSavedVersion
> * name
> * isRootPlace
> * descriptionisRootPlace
These variables can then be used as needed such as the following
```python
print(robloxpy.Game.Internal.MyGame.maxPlayerCount)
```
#### External
* **GetUniverseData(UniverseID)**
This function will provide a range of data for a game which you can then parse to get what information you need.
>Example Usage
>```python
>robloxpy.Game.External.GetUniverseData(1069201198)
>```
>
>Example Output
>```
> {'id': 1069201198, 'rootPlaceId': 2960777560, 'name': '๐CHRISTMAS๐Treasure Quest', 'description': 'โ Christmas Event ends on Friday, January 29
>th! โ \r\n\r\n๐ UPDATE 28!๐\r\nโ๏ธ New event boss! Team up to defeat Hyperfrost and earn limited time rewards!\r\n๐ฌ New Candy currency! Earn them from dungeons and quests!\r\n๐ New Event shop! Limited time items that can be purchased using Candy!\r\nโ๏ธ New Winter Lobby!\r\n๐ New Orname
>nt Hunt around the lobby! Find all 6 for a limited time cosmetic!\r\n๐ New Prize Wheel items!\r\nโก New Energy Blade Quest rewards!\r\n๐ ๏ธ 4 new
>crafting recipes!\r\n๐ New Miniboss - Tank!\r\n๐ฅ New Ability - Stomp!\r\n๐ฐ New Mythical Festive Pack!\r\n\r\nโ๏ธ Welcome to Treasure Quest! Ste
>al treasure, battle monsters, and complete unique quests as you and your friends become the ultimate treasure hunters! Fight as a Wizard or a Warrior, the choice is yours!\r\n\r\n๐ Join the group "Nosniy Games" for a Chat tag, Royalty Sword, and the ability to spin the Prize Wheel in the
>game!\r\nhttps://www.roblox.com/groups/3461453/CLICK-HERE', 'creator': {'id': 3461453, 'name': 'Nosniy Gamesโข', 'type': 'Group'}, 'price': None,
>'allowedGearGenres': ['RPG'], 'allowedGearCategories': [], 'playing': 1463, 'visits': 247185224, 'maxPlayers': 40, 'created': '2019-03-15T04:27:24.327Z', 'updated': '2021-01-25T05:40:11.4420701Z', 'studioAccessToApisAllowed': False, 'createVipServersAllowed': False, 'universeAvatarType': 'MorphToR15', 'genre': 'RPG'}
>```
* **GetUniverseVotes(UniverseID)**
This function will return data about the votes of a game
>Example Usage
>```python
>robloxpy.Game.External.GetUniverseVotes(1069201198)
>```
>
>Example Output
>```
> {'id': 1069201198, 'upVotes': 170780, 'downVotes': 25066}
>```
* **GetCurrentUniversePlayers(UniverseID)**
This function will return the amount of players in a game
>Example Usage
>```python
>robloxpy.Game.External.GetCurrentUniversePlayers(164118757)
>```
>
>Example Output
>```
> 52
>```
* **GetGameVisits(UniverseID)**
This function will return the amount of visits a game has
>Example Usage
>```python
>robloxpy.Game.Internal.GetGameVisits(164118757)
>```
>
>Example Output
>```
> 97
>```
* **GetUniverseFavourites(UniverseID)**
This function will return the amount of likes a game has
>Example Usage
>```python
>robloxpy.Game.External.GetUniverseFavourites(164118757)
>```
>
>Example Output
>```
> 643534
>```
* **GetUniverseVisits(UniverseID)**
This function will return the amount of visits a game has
>Example Usage
>```python
>robloxpy.Game.External.GetUniverseVisits(164118757)
>```
>
>Example Output
>```
> 24536342543
>```
* **GetCurrentUniversePlayers(UniverseID)**
This function will return the amount of current players a game has
>Example Usage
>```python
>robloxpy.Game.External.GetCurrentUniversePlayers(164118757)
>```
>
>Example Output
>```
> 8535
>```
* **GetUniverseLikes(UniverseID)**
This function will return the amount of likes a game has
>Example Usage
>```python
>robloxpy.Game.External.GetUniverseLikes(164118757)
>```
>
>Example Output
>```
> 85
>```
* **GetUniverseDislikes(UniverseID)**
This function will return the amount of dislikes a game has
>Example Usage
>```python
>robloxpy.Game.External.GetUniverseDislikes(164118757)
>```
>
>Example Output
>```
> 73
>```
### Group
The Group functions are aimed towards gathering data from groups. These functions allow you get all the data needed about groups. This section contains bugs which will be fixed shortly.
**THIS SECTION OF ROBLOXPY IS PLANNED TO CHANGE IN NEAR FUTURE UPDATES**
#### Internal
_There are currently no Internal functions_
#### External
These functions allow you gather group data without needing an active cookie set
* **IsGroupOwned(GroupID)**
This function whether a group is current owned
>Example Usage
>```python
>robloxpy.Group.External.IsGroupOwned(916576)
>```
>
>Example Output
>```
> True
>```
* **GetName(GroupID)**
This function returns the name of a group
>Example Usage
>```python
>robloxpy.Group.External.GetName(916576)
>```
>
>Example Output
>```
> NEVER WALK ALONE
>```
* **GetOwner(GroupID)**
This function returns the name of an owner of a group
>Example Usage
>```python
>robloxpy.Group.External.GetOwner(916576)
>```
>
>Example Output
>```
> kristan99
>```
* **GetDescription(GroupID)**
This function provides the description of a group
>Example Usage
>```python
>robloxpy.Group.External.GetDescription(916576)
>```
>
>Example Output
>```
> [NWA]Never Walk Alone
>NWA is a PMC style group that aims for perfection and are looking for all types of members to join to help us with our goal.
>
>We like active members at NWA and have a wide range of bots to help the group function with things such as
> - Automatic Promotion
>- Inactivity Detector
>
>[Automatic Promotions]
>{Temp Down Will Be Up Within 1 Week}
>
>[Inactivity Kicked]
>{Online - Set to 30 Days}
>```
* **GetEmblem(GroupID)**
This function will provide a url to a group emblem
>Example Usage
>```python
>robloxpy.Group.External.GetEmblem(916576)
>```
>
>Example Output
>```
> http://www.roblox.com/asset/?id=176186568
>```
* **GetRoles(GroupID)**
This function is to generate the roles and a permission value
>Example Usage
>```python
>robloxpy.Group.External.GetRoles(916576)
>```
>
>Example Output
>```
> (['[LR I] Recruit', '[LR II] Trooper', '[LR III] Specialist', '[MR I] Squad Leader', '[MR II] Operative', '[OiT] Officer in Training', '[MP]Military Police', '[HR I] Officer', '[HR II] Chief', '[GN I] Lieutenant General', '[GN II] General', '[DP] Diplomat', '[HC] High Command', '[CC] Co -
>Commander', '[CM] Commander'], [1, 180, 190, 195, 196, 200, 205, 210, 220, 230, 240, 245, 250, 254, 255])
>```
* **GetAllies(GroupID)**
This function to generate a allies list of a group
>Example Usage
>```python
>robloxpy.Group.External.GetAllies(916576)
>```
>
>Example Output
>```
> ['Akios', 'Dank']
>```
* **GetEnemies(GroupID)**
This function to generate a enemies list of a group
>Example Usage
>```python
>robloxpy.Group.External.GetEnemies(916576)
>```
>
>Example Output
>```
> ["US Military 1940's", 'United Alliance Of Roblox']
>```
* **GetMemberCount(GroupID)**
This function to provide total members in a group
>Example Usage
>```python
>robloxpy.Group.External.GetMemberCount(916576)
>```
>
>Example Output
>```
> 2347
>```
* **isPublic(GroupID)**
This function whether a group is availible to join by anyone
>Example Usage
>```python
>robloxpy.Group.External.isPublic(916576)
>```
>
>Example Output
>```
> BUG
>```
* **isBCOnly(GroupID)**
This function whether a group is only availible to join by BC members
>Example Usage
>```python
>robloxpy.Group.External.isBCOnly(916576)
>```
>
>Example Output
>```
> BUG
>```
* **GetMembersList(GroupID,Limit)**
This function will generate a members list, the limit is optional; if none if provided it will generate a full list from the group
>Example Usage
>```python
>robloxpy.Group.External.GetMemberCount(916576,20)
>```
>
>Example Output
>```
> BUG
>```
* **GetMembersinRoleList(GroupID,RoleID,Limit)**
This function will generate a members list from a specifc role, the limit is optional; if none if provided it will generate a full list from the group
>Example Usage
>```python
>robloxpy.Group.External.GetMembersinRoleList(916576,32148,100)
>```
>
>Example Output
>```
> BUG
>```
### Market
The Market functions are based around the roblox market place. These functions allow you to make actions on these items as well retrieve data from each.
#### Internal
* **BuyItem(MarketID)**
This function will buy the item denoted by the market id
>Example Usage
>```python
>robloxpy.Market.Internal.BuyItem(363119963)
>```
>
>Example Output
>```
> True
#### External
* **CanManageAsset(UserID,AssetID)**
This function will return if a user can manage a selected asset
>Example Usage
>```python
>robloxpy.Market.External.CanManageAsset(1368140,363119963)
>```
>
>Example Output
>```
> Purchased
* **GetLimitedPriceData(LimitedID)**
This function will return a set of data points of the limited price over time
>Example Usage
>```python
>robloxpy.Market.External.GetLimitedPriceData(1081300)
>```
>
>Example Output
>```
> [{'value': 1826, 'date': '2021-01-25T06:00:00Z'}, {'value': 1648, 'date': '2021-01-24T06:00:00Z'}, {'value': 1767, 'date': '2021-01-23T06:00:00Z'}, {'value': 1984, 'date': '2021-01-22T06:00:00Z'}, {'value': 1786, 'date': '2021-01-21T06:00:00Z'}, {'value': 1599, 'date': '2021-01-20T06:00:00Z'}, {'value': 1604, 'date': '2021-01-19T06:00:00Z'}, {'value': 1736, 'date': '2021-01-18T06:00:00Z'}, {'value': 1889, 'date': '2021-01-17T06:00:00Z'}, {'value': 1798, 'date': '2021-01-16T06:00:00Z'}, {'value': 1892, 'date': '2021-01-15T06:00:00Z'}, {'value': 2041, 'date': '2021-01-14T06:00:00Z'}, {'value': 1796, 'date': '2021-01-13T06:00:00Z'}, {'value': 1843, 'date': '2021-01-12T06:00:00Z'}, {'value': 1834, 'date': '2021-01-11T06:00:00Z'}, {'value': 2081, 'date': '2021-01-10T06:00:00Z'}, {'value': 1931, 'date': '2021-01-09T06:00:00Z'}, {'value': 2110, 'date': '2021-01-08T06:00:00Z'}, {'value': 1871, 'date': '2021-01-07T06:00:00Z'}, {'value': 1983, 'date': '2021-01-06T06:00:00Z'}, {'value': 1971, 'date': '2021-01-05T06:00:00Z'}, {'value': 2048, 'date': '2021-01-04T06:00:00Z'}, {'value': 2055, 'date': '2021-01-03T06:00:00Z'}, {'value': 2251, 'date': '2021-01-02T06:00:00Z'}, {'value': 2458, 'date': '2021-01-01T06:00:00Z'}, {'value': 3541, 'date': '2020-12-31T06:00:00Z'}, {'value': 2239, 'date': '2020-12-30T06:00:00Z'}, {'value': 2041, 'date': '2020-12-29T06:00:00Z'}, {'value': 2519, 'date': '2020-12-28T06:00:00Z'}, {'value': 2224, 'date': '2020-12-27T06:00:00Z'}, {'value': 2570, 'date': '2020-12-26T06:00:00Z'}, {'value': 2725, 'date': '2020-12-25T06:00:00Z'}, {'value': 2137, 'date': '2020-12-24T06:00:00Z'}, {'value': 1781, 'date': '2020-12-23T06:00:00Z'}, {'value': 1611, 'date': '2020-12-22T06:00:00Z'}, {'value': 1819, 'date':
>'2020-12-21T06:00:00Z'}, {'value': 1727, 'date': '2020-12-20T06:00:00Z'}, {'value': 1508, 'date': '2020-12-19T06:00:00Z'}, {'value': 1555, 'date': '2020-12-18T06:00:00Z'}, {'value': 1558, 'date': '2020-12-17T06:00:00Z'}, {'value': 1647, 'date': '2020-12-16T06:00:00Z'}, {'value': 1337, 'date': '2020-12-15T06:00:00Z'}, {'value': 1842, 'date': '2020-12-14T06:00:00Z'}, {'value': 1570, 'date': '2020-12-13T06:00:00Z'}, {'value': 1435, 'date': '2020-12-12T06:00:00Z'}, {'value': 1649, 'date': '2020-12-11T06:00:00Z'}, {'value': 1402, 'date': '2020-12-10T06:00:00Z'}, {'value': 1538,
>'date': '2020-12-09T06:00:00Z'}, {'value': 1437, 'date': '2020-12-08T06:00:00Z'}, {'value': 1333, 'date': '2020-12-07T06:00:00Z'}, {'value': 1534, 'date': '2020-12-06T06:00:00Z'}, {'value': 1182, 'date': '2020-12-05T06:00:00Z'}, {'value': 1382, 'date': '2020-12-04T06:00:00Z'}, {'value': 1515, 'date': '2020-12-03T06:00:00Z'}, {'value': 1467, 'date': '2020-12-02T06:00:00Z'}, {'value': 1606, 'date': '2020-12-01T06:00:00Z'}, {'value':
>1428, 'date': '2020-11-30T06:00:00Z'}, {'value': 1598, 'date': '2020-11-29T06:00:00Z'}, {'value': 1614, 'date': '2020-11-28T06:00:00Z'}, {'value': 3101, 'date': '2020-11-27T06:00:00Z'}, {'value': 1503, 'date': '2020-11-26T06:00:00Z'}, {'value': 1383, 'date': '2020-11-25T06:00:00Z'}, {'value': 1455, 'date': '2020-11-24T06:00:00Z'}, {'value': 1217, 'date': '2020-11-23T06:00:00Z'}, {'value': 1425, 'date': '2020-11-22T06:00:00Z'}, {'value': 1587, 'date': '2020-11-21T06:00:00Z'}, {'value': 2308, 'date': '2020-11-20T06:00:00Z'}, {'value': 1557, 'date': '2020-11-19T06:00:00Z'}, {'value': 1401, 'date': '2020-11-18T06:00:00Z'}, {'value': 1388, 'date': '2020-11-17T06:00:00Z'}, {'value': 1631, 'date': '2020-11-16T06:00:00Z'},
>{'value': 1811, 'date': '2020-11-15T06:00:00Z'}, {'value': 1583, 'date': '2020-11-14T06:00:00Z'}, {'value': 1366, 'date': '2020-11-13T06:00:00Z'}, {'value': 1197, 'date': '2020-11-12T06:00:00Z'}, {'value': 1426, 'date': '2020-11-11T06:00:00Z'}, {'value': 1494, 'date': '2020-11-10T06:00:00Z'}, {'value': 1594, 'date': '2020-11-09T06:00:00Z'}, {'value': 1526, 'date': '2020-11-08T06:00:00Z'}, {'value': 1347, 'date': '2020-11-07T06:00:00Z'}, {'value': 1355, 'date': '2020-11-06T06:00:00Z'}, {'value': 1315, 'date': '2020-11-05T06:00:00Z'}, {'value': 1204, 'date': '2020-11-04T06:00:00Z'}, {'value': 1016, 'date': '2020-11-03T06:00:00Z'}, {'value': 1332, 'date': '2020-11-02T06:00:00Z'}, {'value': 1274, 'date': '2020-11-01T05:00:00Z'}, {'value': 1407, 'date': '2020-10-31T05:00:00Z'}, {'value': 1270, 'date': '2020-10-30T05:00:00Z'}, {'value': 1205, 'date': '2020-10-29T05:00:00Z'}, {'value': 1216, 'date': '2020-10-28T05:00:00Z'}, {'value': 1246, 'date': '2020-10-27T05:00:00Z'}, {'value': 1124, 'date': '2020-10-26T05:00:00Z'}, {'value': 1338, 'date': '2020-10-25T05:00:00Z'}, {'value': 1107, 'date': '2020-10-24T05:00:00Z'}, {'value': 1164, 'date': '2020-10-23T05:00:00Z'}, {'value': 1089, 'date': '2020-10-22T05:00:00Z'}, {'value': 1041, 'date': '2020-10-21T05:00:00Z'}, {'value': 962, 'date': '2020-10-20T05:00:00Z'}, {'value': 1054, 'date': '2020-10-19T05:00:00Z'}, {'value': 1117, 'date': '2020-10-18T05:00:00Z'}, {'value': 1328, 'date': '2020-10-17T05:00:00Z'}, {'value': 1129, 'date': '2020-10-16T05:00:00Z'}, {'value': 1191, 'date': '2020-10-15T05:00:00Z'}, {'value': 1120, 'date': '2020-10-14T05:00:00Z'}, {'value': 1262, 'date': '2020-10-13T05:00:00Z'}, {'value': 1147, 'date': '2020-10-12T05:00:00Z'}, {'value': 1264, 'date': '2020-10-11T05:00:00Z'}, {'value': 988, 'date': '2020-10-10T05:00:00Z'}, {'value': 1467, 'date': '2020-10-09T05:00:00Z'}, {'value': 2389, 'date': '2020-10-08T05:00:00Z'}, {'value': 1283, 'date': '2020-10-07T05:00:00Z'}, {'value': 1078, 'date': '2020-10-06T05:00:00Z'}, {'value': 1404, 'date': '2020-10-05T05:00:00Z'}, {'value': 1312, 'date': '2020-10-04T05:00:00Z'}, {'value': 1305, 'date': '2020-10-03T05:00:00Z'}, {'value': 1234, 'date': '2020-10-02T05:00:00Z'}, {'value': 1222, 'date': '2020-10-01T05:00:00Z'}, {'value': 1166, 'date': '2020-09-30T05:00:00Z'}, {'value': 1082, 'date': '2020-09-29T05:00:00Z'}, {'value': 1081, 'date': '2020-09-28T05:00:00Z'}, {'value': 1311, 'date': '2020-09-27T05:00:00Z'}, {'value': 1378, 'date': '2020-09-26T05:00:00Z'}, {'value': 1374, 'date': '2020-09-25T05:00:00Z'}, {'value': 1252, 'date': '2020-09-24T05:00:00Z'}, {'value': 1271, 'date': '2020-09-23T05:00:00Z'}, {'value': 1206, 'date': '2020-09-22T05:00:00Z'}, {'value': 1290, 'date': '2020-09-21T05:00:00Z'}, {'value': 1101, 'date': '2020-09-20T05:00:00Z'}, {'value': 1065, 'date': '2020-09-19T05:00:00Z'}, {'value': 1229, 'date': '2020-09-18T05:00:00Z'}, {'value': 945, 'date': '2020-09-17T05:00:00Z'}, {'value': 1053, 'date': '2020-09-16T05:00:00Z'}, {'value': 1192, 'date': '2020-09-15T05:00:00Z'}, {'value':
>1299, 'date': '2020-09-14T05:00:00Z'}, {'value': 1292, 'date': '2020-09-13T05:00:00Z'}, {'value': 1338, 'date': '2020-09-12T05:00:00Z'}, {'value': 1360, 'date': '2020-09-11T05:00:00Z'}, {'value': 1077, 'date': '2020-09-10T05:00:00Z'}, {'value': 1273, 'date': '2020-09-09T05:00:00Z'}, {'value': 1101, 'date': '2020-09-08T05:00:00Z'}, {'value': 1234, 'date': '2020-09-07T05:00:00Z'}, {'value': 1175, 'date': '2020-09-06T05:00:00Z'}, {'value': 1229, 'date': '2020-09-05T05:00:00Z'}, {'value': 1329, 'date': '2020-09-04T05:00:00Z'}, {'value': 1216, 'date': '2020-09-03T05:00:00Z'}, {'value': 1298, 'date': '2020-09-02T05:00:00Z'}, {'value': 1247, 'date': '2020-09-01T05:00:00Z'}, {'value': 1094, 'date': '2020-08-31T05:00:00Z'},
>{'value': 1178, 'date': '2020-08-30T05:00:00Z'}, {'value': 1176, 'date': '2020-08-29T05:00:00Z'}, {'value': 1190, 'date': '2020-08-28T05:00:00Z'}, {'value': 1257, 'date': '2020-08-27T05:00:00Z'}, {'value': 1094, 'date': '2020-08-26T05:00:00Z'}, {'value': 1113, 'date': '2020-08-25T05:00:00Z'}, {'value': 1057, 'date': '2020-08-24T05:00:00Z'}, {'value': 1279, 'date': '2020-08-23T05:00:00Z'}, {'value': 1289, 'date': '2020-08-22T05:00:00Z'}, {'value': 1109, 'date': '2020-08-21T05:00:00Z'}, {'value': 1054, 'date': '2020-08-20T05:00:00Z'}, {'value': 981, 'date': '2020-08-19T05:00:00Z'}, {'value': 1088, 'date': '2020-08-18T05:00:00Z'}, {'value': 1003, 'date': '2020-08-17T05:00:00Z'}, {'value': 1088, 'date': '2020-08-16T05:00:00Z'}, {'value': 1070, 'date': '2020-08-15T05:00:00Z'}, {'value': 968, 'date': '2020-08-14T05:00:00Z'}, {'value': 934, 'date': '2020-08-13T05:00:00Z'}, {'value': 919, 'date': '2020-08-12T05:00:00Z'}, {'value': 970, 'date': '2020-08-11T05:00:00Z'}, {'value': 909, 'date': '2020-08-10T05:00:00Z'}, {'value': 1046, 'date': '2020-08-09T05:00:00Z'}, {'value': 1038, 'date': '2020-08-08T05:00:00Z'}, {'value': 1098, 'date': '2020-08-07T05:00:00Z'}, {'value': 1062, 'date': '2020-08-06T05:00:00Z'}, {'value': 1138, 'date': '2020-08-05T05:00:00Z'}, {'value': 1072, 'date': '2020-08-04T05:00:00Z'}, {'value': 1080, 'date': '2020-08-03T05:00:00Z'}, {'value': 1161, 'date': '2020-08-02T05:00:00Z'}, {'value': 1227, 'date': '2020-08-01T05:00:00Z'}, {'value': 1169, 'date': '2020-07-31T05:00:00Z'}, {'value': 1023, 'date': '2020-07-30T05:00:00Z'}, {'value': 1017, 'date': '2020-07-29T05:00:00Z'}]
> ```
* **GetLimitedRemaining(LimitedID)**
This function the reaming limiteds for sale
>Example Usage
>```python
>robloxpy.Market.External.GetLimitedRemaining(1081300)
>```
>
>Example Output
>```
> 0
> ```
* **GetLimitedTotal(LimitedID)**
This function returns the total amount of the limited sold. If the item did not use to be limited this function will always return None
>Example Usage
>```python
>robloxpy.Market.External.GetLimitedTotal(1081300)
>```
>
>Example Output
>```
> None
> ```
* **GetLimitedSales(LimitedID)**
This function returns the total amount of the limited which are for sale currently.
>Example Usage
>```python
>robloxpy.Market.External.GetLimitedSales(1081300)
>```
>
>Example Output
>```
> 37813
> ```
* **GetLimitedRAP(LimitedID)**
This function returns the total recent average price of a limited in robux
>Example Usage
>```python
>robloxpy.Market.External.GetLimitedRAP(1081300)
>```
>
>Example Output
>```
> 1707
> ```
* **GetLimitedSalePrice(LimitedID)**
This function returns the price of a limited when it first went on sale, items which were not origionally limited will return None
>Example Usage
>```python
>robloxpy.Market.External.GetLimitedSalePrice(4390890198)
>```
>
>Example Output
>```
> 12000
> ```
* **GetLimitedChangePercentage(LimitedID)**
This function returns the price change of a limited price currently to what it origionally sold at
>Example Usage
>```python
>robloxpy.Market.External.GetLimitedChangePercentage(4390890198)
>```
>
>Example Output
>```
> 191.3%
> ```
* **GetAssetImage(LimitedID,Width,Height)**
This function returns the a link to the image of an asset
>Example Usage
>```python
>robloxpy.Market.External.GetAssetImage(4390890198,420,420)
>```
>
>Example Output
>```
> https://tr.rbxcdn.com/c4f5ec2e849306ebe3cb4dccaf1369f8/420/420/Hat/Png
> ```
### User
The user functions are how you interact with specific users to gather data as well as how you interact with roblox as a specific user based on the currently used cookie.
#### Internal
* **SetCookie(Cookie,Details)**
This function will set the cookie to be used with any internal commands of robloxpy. The details argument is optional allowing you to pre-fill a wide range of data of the current account as the cookie is set
>Example Usage
>```python
>robloxpy.User.Internal.SetCookie("_WARNING:-DO-NOT-SHARE-THIS.--Sharing-this-will-allow-someone-to-log-in-as-you-and-to-steal-your-ROBUX-and-items._XXXXXXXXXXXXX",True)
>```
>
>Example Output
>```
> Cookie Set
> ```
The additional data that is set when you set a cookie is as follows:
> RawCookie
> UserID
> Username
> Robux
> Thumbnail
> isBuildersclub
> isPremium
> canChangeUsername
> isAdmin
> isEmailOnFile
> isEmailVerified
> isPhoneFeatureEnabled
> isSuperSafePrivacyMode
> IsAppChatSettingEnabled
> IsGameChatSettingEnabled
> IsContentRatingsSettingEnabled
> IsParentalControlsTabEnabled
> IsSetPasswordNotificationEnabled
> ChangePasswordRequiresTwoStepVerification
> ChangeEmailRequiresTwoStepVerification
> UserEmail
> UserEmailMasked
> UserEmailVerified
> CanHideInventory
> CanTrade
> MissingParentEmail
> IsUpdateEmailSectionShown
> IsUnder13UpdateEmailMessageSectionShown
> IsUserConnectedToFacebook
> IsTwoStepToggleEnabled
> AgeBracket
> UserAbove13
> ClientIpAddress
> UserAge
> IsBcRenewalMembership
> IsAccountPinEnabled
> IsAccountRestrictionsFeatureEnabled
> IsAccountRestrictionsSettingEnabled
> IsAccountSettingsSocialNetworksV2Enabled
> InApp
> HasFreeNameChange
> IsAgeDownEnabled
> ReceiveNewsletter
This data can be called as a standard variable as needed such as the following:
```python
print(robloxpy.User.Internal.CanTrade)
Output > True
```
* **GetDetails(Details)**
This function will collect the data of the current cookie, this is only useful is you decide not to collec it by default when setting the cookie.
>Example Usage
>```python
>robloxpy.User.Internal.GetDetails(True)
>```
>
>Example Output
>```
> Data Gathered
> ```
* **isFollowing(targetUserID)**
This function will check if the user is following a different user
>Example Usage
>```python
>robloxpy.User.Internal.isFollowing(1)
>```
>
>Example Output
>```
> True
> ```
* **FollowUser(targetUserID)**
This function will follow the target user
>Example Usage
>```python
>robloxpy.User.Internal.FollowUser(1)
>```
>
>Example Output
>```
> success
> ```
* **UnfollowUser(targetUserID)**
This function will unfollow the target user
>Example Usage
>```python
>robloxpy.User.Internal.UnfollowUser(1)
>```
>
>Example Output
>```
> success
> ```
* **BlockUser(targetUserID)**
This function will block the target user
>Example Usage
>```python
>robloxpy.User.Internal.BlockUser(1)
>```
>
>Example Output
>```
> success
> ```
* **UnblockUser(targetUserID)**
This function will unblock the target user
>Example Usage
>```python
>robloxpy.User.Internal.UnblockUser(1)
>```
>
>Example Output
>```
> success
> ```
* **GetBlockedUsers()**
This function will return a list of all blocked users
>Example Usage
>```python
>robloxpy.User.Internal.GetBlockedUsers
>```
>
>Example Output
>```
> ['Roblox','Builderman']
> ```
* **SendMessage(targetUserID,Subject,Body)**
This function will send a customised message to the target user
>Example Usage
>```python
>robloxpy.User.Internal.SendMessage(1,"HI THERE","This is a private message sent with robloxpy")
>```
>
>Example Output
>```
> Sent
> ```
* **JoinGame(PlaceID)**
This function will make the current user join a game
>Example Usage
>```python
>robloxpy.User.Internal.JoinGame(1762345)
>```
>
>Example Output
>```
> The game will open
> ```
### External
* **GetID(Username)**
This function will return the userID of a user based on their name
>Example Usage
>```python
>robloxpy.User.External.GetID("kristan99")
>```
>
>Example Output
>```
> 1368140
> ```
* **GetUserName(UserID)**
This function will return the username of a user based on their ID
>Example Usage
>```python
>robloxpy.User.External.GetID(1368140)
>```
>
>Example Output
>```
> kristan99
> ```
* **UsernameHistory(UserID)**
This function will return a username history of a user
>Example Usage
>```python
>robloxpy.User.External.UsernameHistory(1368140)
>```
>
>Example Output
>```
> ['kristan99']
> ```
* **IsOnline(UserID)**
This function will return if a user is seen as online
>Example Usage
>```python
>robloxpy.User.External.IsOnline(1368140)
>```
>
>Example Output
>```
> True
> ```
* **Isbanned(UserID)**
This function will return if a user is banned
>Example Usage
>```python
>robloxpy.User.External.Isbanned(1368140)
>```
>
>Example Output
>```
> False
> ```
* **GetDescription(UserID)**
This function will return a users description
>Example Usage
>```python
>robloxpy.User.External.GetDescription(1368140)
>```
>
>Example Output
>```
> No longer really play Roblox, I am however working on RobloxPy a python wrapper for Roblox. You can check it out on GitHub @KristanSmout or install through 'pip install robloxpy'
>
>I also provide python tutorials and free software on my youtube channel which you can find somewhere here :P
> ```
* **GetAge(UserID)**
This function will return a users age in days
>Example Usage
>```python
>robloxpy.User.External.GetAge(1368140)
>```
>
>Example Output
>```
> 4470
> ```
* **CreationDate(UserID,Style)**
This function will return a users creation date, if you use the wrong format and need the month first set the style to 1
>Example Usage
>```python
>robloxpy.User.External.CreationDate(1368140)
>```
>
>Example Output
>```
> 30/10/2008
> ```
* **GetRAP(UserID)**
This function will return the RAP of a user in robux
>Example Usage
>```python
>robloxpy.User.External.GetRAP(1368140)
>```
>
>Example Output
>```
> 432908
> ```
* **GetLimiteds(UserID)**
This function will return the an array of limiteds of a user and the corresponding item ID's
>Example Usage
>```python
>robloxpy.User.External.GetLimiteds(1368140)
>```
>
>Example Output
>```
>(['Racing Helmet', 'Summertime 2009 R&R&R', 'Tee Vee', 'Gobble Gobble', 'Gobble Gobble', 'Clown School Dropout', 'Chrome Egg of Speeding Bullet', 'Norseman', 'Fiery Egg of Egg Testing', 'Brass Top Hat', 'Crocheted Cthulhu', 'Staff of Celestial Light', 'Swordpack', 'Ornate Valkyrie', 'Police Badge', 'Shady Business Hat', 'Rogue Masquerader', "Cupid's Beloved Blade", "Cupid's Beloved Blade", "Cupid's Beloved Blade", "Cupid's Beloved
>Blade", "Cupid's Beloved Blade", "Cupid's Beloved Blade", "Cupid's Beloved Blade", 'The Last Egg of 2013', 'Deluxe Game Headset', 'Green Starface ', 'Captain Steelshanks Recruiting Staff', 'Fawkes Face', 'Gold Visor', 'Mr X', 'Mr X', 'Egg of Verticality', 'Golden Crown', 'Classy ROBLOX Bow Tie', 'Furry Rock Star Hat', 'Skull of Robloxians Past', 'Halloween Baseball Cap 2014', "Merely's Green Sparkle Time Hoverboard", "St Patrick's
>Day Fairy", 'Bluesteel Katana', 'Bluesteel Katana', 'Overseer Collar', 'Periastron Crown', 'True Love Smile', "Brighteyes' Top Hat", "Brighteye's Bloxy Cola Hat", 'Valkyrie Helm', 'Neon Green Beautiful Hair', 'Virtual Commando', 'The Crown of Warlords', 'Chiefjustus Gavel', 'ROBLOX Madness Face', "Overseer Warlord's Sword", 'Cursed Korblox Pendant ', 'Purple Wistful Wink', 'Blue Wistful Wink', 'Bacon Face', "DJ Remix's Goldphones", 'Red Goof ', 'Noob Attack: Laser Scythe Scuffle'], [6379764, 13334984, 15857936, 18448414, 18448414, 21392863, 24826640, 24941896, 27345567, 35685137, 35685477, 49491781, 19398258, 23634704, 82358339, 89624140, 93078804, 106064277, 106064277, 106064277, 106064277, 106064277, 106064277, 106064277, 111776247, 100425864, 119812738, 71597060, 134522901, 134087261, 125861676, 125861676, 152980639, 1081300, 162069243, 163496075, 181354245, 184745025, 215392741, 226189871, 243791145, 243791145, 343585127, 343585234, 362051405, 169454280, 24114402, 1365767, 151786902, 362081769, 2264398, 120749528, 130213380, 483308034, 483899424, 583722710, 583721561, 399021751, 102618797, 1191125008, 2566105661])
> ```
* **GetBust(UserID,Width,Height)**
This function will a url to a bust image of a user
>Example Usage
>```python
>robloxpy.User.External.GetBust(1368140)
>```
>
>Example Output
>```
> https://tr.rbxcdn.com/d4ff03e82298e804c89de3098e51abe6/420/420/AvatarBust/Png
> ```
* **GetHeadshot(UserID,Width,Height)**
This function will a url to a headshot image of a user
>Example Usage
>```python
>robloxpy.User.External.GetBust(1368140)
>```
>
>Example Output
>```
> https://tr.rbxcdn.com/b8864e930fcc0bfd4c3b19a558724841/420/420/AvatarHeadshot/Png
> ```
* **GetStatus(UserID)**
This function will return a users status
>Example Usage
>```python
>robloxpy.User.External.GetBust(1368140)
>```
>
>Example Output
>```
> Currently working on RobloxPy a python wrapper of the roblox API
> ```
* **DoesNameExist(username)**
This function will return if a name is being used or not
>Example Usage
>```python
>robloxpy.User.External.DoesNameExist("kristan99")
>```
>
>Example Output
>```
> Unavailible
> ```
### Friends
This is the sub category for functions for the user Friends.
### Internal
* **SendFriendRequest(UserID)**
This function will send a friend request to the target user
>Example Usage
>```python
>robloxpy.User.Friend.Internal.SendFriendRequest(1368140)
>```
>
>Example Output
>```
> Sent
> ```
* **Unfriend(UserID)**
This function will unfriend the target user
>Example Usage
>```python
>robloxpy.User.Friend.Internal.Unfriend(1368140)
>```
>
>Example Output
>```
> Sent
> ```
* **TotalFriends()**
This function will return a full list of a users friends
>Example Usage
>```python
>robloxpy.User.Friend.Internal.TotalFriends()
>```
>
>Example Output
>```
> 34
> ```
### External
* **GetAll(1368140)**
This function will return the total list of friends for the current user
>Example Usage
>```python
>robloxpy.User.Friend.External.GetAll(1368140)
>```
>
>Example Output
>```
> ['LocalFapper', 'SlimemingPlayz', 'E_xitium', 'Kawaii_Katicorn99', 'KatieeLouisee99', 'Yung_nignogpaddywog', 'BigDDave', 'Nosowl', 'Mirro_rs', 'Gareth1990', 'Voxxes', 'matantheman', 'ItzDishan', 'KioshiShimano', 'CinnabonNinja', 'roxo_pl', 'GlowwLikeThat', 'BritishP0litics', 'Nicolas9970', 'YunPlant', 'sirjoshh', 'iMistifye', 'Scorp1x', 'Fribbzdaman', 'xMcKenziee', 'AjinKovac', 'Angels_Develop', 'RonerRehnskiold', 'agnen', 'RocketValkyrie', 'methanshacked', 'GingyWyven', 'KingsmanSS', 'glitch19']
> ```
* **GetCount(UserID)**
This function will return the total number of friends for the current user
>Example Usage
>```python
>robloxpy.User.Friend.External.GetCount(1368140)
>```
>
>Example Output
>```
> 34
> ```
* **GetOnline(UserID)**
This function will return the total list of online friends for the current user
>Example Usage
>```python
>robloxpy.User.Friend.External.GetOnline(1368140)
>```
>
>Example Output
>```
> ['Mirro_rs', 'Angels_Develop']
> ```
* **GetOffline(UserID)**
This function will return the total list of offline friends for the current user
>Example Usage
>```python
>robloxpy.User.Friend.External.GetOffline(1368140)
>```
>
>Example Output
>```
> ['LocalFapper', 'SlimemingPlayz', 'E_xitium', 'Kawaii_Katicorn99', 'KatieeLouisee99', 'Yung_nignogpaddywog', 'BigDDave', 'Nosowl', 'Gareth1990',
>'Voxxes', 'matantheman', 'ItzDishan', 'KioshiShimano', 'CinnabonNinja', 'roxo_pl', 'GlowwLikeThat', 'BritishP0litics', 'Nicolas9970', 'YunPlant', 'sirjoshh', 'iMistifye', 'Scorp1x', 'Fribbzdaman', 'xMcKenziee', 'AjinKovac', 'RonerRehnskiold', 'agnen', 'RocketValkyrie', 'methanshacked', 'GingyWyven', 'KingsmanSS', 'glitch19']
> ```
* **GetFollowerCount(UserID)**
This function will return the total follower count friends for the current user
>Example Usage
>```python
>robloxpy.User.Friend.External.GetOnline(1368140)
>```
>
>Example Output
>```
> 12607
> ```
* **GetFollowers(UserID,Amount)**
This function will return a list of users following a user, limit the list size using the amount variable. This function also provides the user ID's of the followers
>Example Usage
>```python
>robloxpy.User.Friend.External.GetOnline(1368140,10)
>```
>
>Example Output
>```
> (['builderman', 'Gaming112', 'snowbeat54321', 'BobHag', 'lilmigithunter', 'UchihaSasukePat', 'Alessi7953', 'GarraSabakuno', 'jangofettt', 'Garty983chub'], [156, 1359952, 2918062, 3149494, 2899616, 2937573, 2754369, 2982496, 20169, 169558])
> ```
* **GetFollowingCount(UserID)**
This function will return the total amount of users followed by the current user
>Example Usage
>```python
>robloxpy.User.Friend.External.GetFollowingCount(1368140)
>```
>
>Example Output
>```
> 16
> ```
* **GetFollowing(UserID,Amount)**
This function will return a list of users being followed by the user, limit the list size using the amount variable. This function also provides the user ID's of the followers
>Example Usage
>```python
>robloxpy.User.Friend.External.GetFollowing(1368140,10)
>```
>
>Example Output
>```
> (['takeovertom', 'dino5aur', 'iClanTech', '1waffle1', 'ForyxeV', 'Imaginze', 'StoryBased', 'LoneTraveler', 'beanme100', 'enyahs7'], [1096520, 649206, 65797433, 75323, 9622035, 17256624, 27572897, 3304627, 485933, 336048])
> ```
### Groups
This is the sub category for functions for the user Groups.
### Internal
* **Claim(GroupID)**
This function will attempt to claim an unowned group
>Example Usage
>```python
>robloxpy.User.Groups.Internal.Claim(916576)
>```
>
>Example Output
>```
> Sent
> ```
* **Join(GroupID)**
This function will attempt to Join a group
>Example Usage
>```python
>robloxpy.User.Groups.Internal.Join(916576)
>```
>
>Example Output
>```
> Join request sent
> ```
* **Leave(GroupID)**
This function will attempt to Leave a group
>Example Usage
>```python
>robloxpy.User.Groups.Internal.Leave(916576)
>```
>
>Example Output
>```
> Sent
> ```
* **GetFunds(GroupID)**
This function will attempt to get the group funds (You need permission to see them)
>Example Usage
>```python
>robloxpy.User.Groups.Internal.GetFunds(916576)
>```
>
>Example Output
>```
> 17290
> ```
* **Payout(GroupID,targetUserID,RobuxAmount)**
This function will attempt to payout a specific amount of the group funds (You need permission to do so)
>Example Usage
>```python
>robloxpy.User.Groups.Internal.Payout(916576,1368140,1000)
>```
>
>Example Output
>```
> Sent
> ```
* **PercentagePayout(GroupID,targetUserID,Percentage)**
This function will attempt to payout a specific percentage of the group funds (You need permission to do so)
>Example Usage
>```python
>robloxpy.User.Groups.Internal.Payout(916576,1368140,5)
>```
>
>Example Output
>```
> Sent
> ```
* **SendWallPost(GroupID,PostText)**
This function will attempt to post on the group wall
>Example Usage
>```python
>robloxpy.User.Groups.Internal.SendWallPost(916576,"Hello World")
>```
>
>Example Output
>```
> Sent
> ```
* **SendGroupShout(GroupID,ShoutText)**
This function will attempt to post on the group wall
>Example Usage
>```python
>robloxpy.User.Groups.Internal.SendGroupShout(916576,"Hello World")
>```
>
>Example Output
>```
> Sent
> ```
* **ChangeDescription(GroupID,DescriptionText)**
This function will attempt to post on the group wall
>Example Usage
>```python
>robloxpy.User.Groups.Internal.ChangeDescription(916576,"Hello World")
>```
>
>Example Output
>```
> Sent
> ```
* **ChangeRank(GroupID,targetUserID,RoleID)**
This function will attempt to post on the group wall
>Example Usage
>```python
>robloxpy.User.Groups.Internal.ChangeDescription(916576,1368140,15728)
>```
>
>Example Output
>```
> Sent
> ```
### External
* **GetGroups(UserID)**
This function will Returnsthe list of groups a user is in with the group ID's
>Example Usage
>```python
>robloxpy.User.Groups.External.GetGroups(1368140)
>```
>
>Example Output
>```
> (['Simple Studio', 'BlackRock Studio', 'White Wolf Hounds', '๐ถ๏ธHot Pepper Clothes', 'Twisted Murder er Official Group', 'StarCraftยฎ', 'United Alliance Of Roblox', 'NEVER WALK ALONE'], [3297855, 847360, 1201505, 3206677, 1225381, 1132763, 14195, 916576])
> ```
These docs should be complete, if you find something that is not included in the documentation please let me know and I will add it when I get a chance. If you see anything in the documentation that is incorrect please also make me aware and I resolve it ASAP. | /robloxpy-0.2.21.tar.gz/robloxpy-0.2.21/README.md | 0.865651 | 0.857351 | README.md | pypi |
[](https://opensource.org/licenses/MIT)
# ROBO.AI Bot Runtime manager CLI tool #
<img align="right" width="200" height="200" alt="robo-bot" src="robo-bot.png"></img>
This tool allows anyone to create, train, deploy, monitor and manage a Rasa based bot on the ROBO.AI platform.
Check our [CHANGELOG](CHANGELOG.md) for the latest changes.
Tutorials:
* [The ROBO.AI platform - creating bots and API keys for deployment](docs/manage_roboai_account.md)
* [Creating and deploying a Rasa chatbot on the ROBO.AI platform](docs/create_deploy_bot.md)
### How to install ###
#### Requirements ####
* Python 3.6 or 3.7
* Pip and/or anaconda
You can create a virtual environment using conda:
```sh
conda create -n robo-bot python=3.7
conda activate robo-bot
```
#### Install the ROBO.AI tool ####
Assuming you are already in your virtual environment with Python 3.6 or 3.7, you can install the tool with the following command:
```
pip install robo-bot
```
After installing the library you should be able to execute the robo-bot command in your terminal.
#### Usage ####
The command line tool is available through the following terminal command:
```
robo-bot
```
When you execute it in a terminal you should see an output with a list of commands supported
by the tool.
I.e:
```
user@host:~$ robo-bot
____ ___ ____ ___ _ ___
| _ \ / _ \| __ ) / _ \ / \ |_ _|
| |_) | | | | _ \| | | | / _ \ | |
| _ <| |_| | |_) | |_| | _ / ___ \ | |
|_| \_\\___/|____/ \___/ (_) /_/ \_\___|
Bot Management Tool robo-ai.com
Usage: robo-bot [OPTIONS] COMMAND [ARGS]...
robo-bot 0.1.0
Options:
--version Show the version and exit.
--help Show this message and exit.
Commands:
clean Clean the last package
connect Connect a local bot to a ROBO.AI server bot instance.
deploy Deploy the current bot into the ROBO.AI platform.
diff Check for structural differences between languages for the...
environment Define the ROBO.AI platform API endpoint to use.
interactive Run in interactive learning mode where you can provide...
login Initialize a new session using a ROBO.AI API key.
logout Close the current session in the ROBO.AI platform.
logs Display selected bot runtime logs.
package Package the required bot and make it ready for deployment.
remove Remove a deployed bot from the ROBO.AI platform.
run Start the action server.
seed Create a new ROBO.AI project seedling, including folder...
shell Start a shell to interact with the required bot.
start Start a bot deployed on the ROBO.AI platform.
status Display the bot status.
stories Generate stories for a Rasa bot.
stop Stop a bot running in the ROBO.AI platform.
test Tests Rasa models for the required bots.
train Trains Rasa models for the required bots.
```
Each of the listed commands provides you a functionality to deal with your bots,
each one has a description, and a help option, so you can see what options and
arguments are available.
You can invoke each of the tool commands by following the pattern:
```
robo-bot <command> [command arguments or options]
```
i.e.:
```
robo-bot login --api-key=my-apy-key
```
You can check the supported options and arguments for every command by following
the pattern:
```
robo-bot <command> --help
```
i.e.:
```
user@host:~$ robo-bot login --help
____ ___ ____ ___ _ ___
| _ \ / _ \| __ ) / _ \ / \ |_ _|
| |_) | | | | _ \| | | | / _ \ | |
| _ <| |_| | |_) | |_| | _ / ___ \ | |
|_| \_\\___/|____/ \___/ (_) /_/ \_\___|
Bot Management Tool robo-ai.com
Usage: robo-bot login [OPTIONS]
Initialize a new session using a ROBO.AI API key.
Options:
--api-key TEXT The ROBO.AI platform API key.
--help Show this message and exit.
```
### Using robo-bot to create and maintain a bot ###
##### Generating an initial structure #####
The ROBO.AI tool provides you with a set of commands useful to create, train, interact and test a bot
before its deployment.
To create a bot you can use the **seed** command:
```
robo-bot seed [language-codes] [--path <path> --language-detection --chit-chat --coref-resolution]
```
i.e.:
```
robo-bot seed en de --path bot/ --language-detection --chit-chat --coref-resolution
```
The first argument of the seed command is the language-codes which indicate the languages the bot will be built upon.
If no language-codes are passed, only an english sub-directory (en) will be created.
The optional parameters are referring to features you may want to add to the bot.
This command behaves like rasa init but it'll generate a dedicated structure where you can have
multi-language bots related with the same domain. Below there's an example of a bot generated with this command.
```
.
โโโ actions
โ โโโ action_parlai_fallback.py
โโโ custom
โ โโโ components
โ โ โโโ spacy_nlp
โ โ โโโ spacy_nlp_neuralcoref.py
โ โ โโโ spacy_tokenizer_neuralcoref.py
โ โโโ policies
โ โโโ language_detection
โ โโโ lang_change_policy.py
โ โโโ lid.176.ftz
โโโ languages
| โโโ de
| โ โโโ config.yml
| โ โโโ data
| โ โ โโโ lookup_tables
| โ โ โโโ nlu.md
| โ โโโ domain.yml
| โโโ en
| โ โโโ config.yml
| โ โโโ data
| โ โ โโโ lookup_tables
| โ โ โโโ nlu.md
| โ โโโ domain.yml
| โโโ stories.md
โโโ credentials.yml
โโโ endpoints.yml
โโโ __init__.py
```
##### Generating stories for a bot #####
After defining intents and actions for a bot you need to combine these in stories. This command allows you to generate the most basic interactions in your Rasa bot.
Note: Manual checks will be needed to implement more complex stories but basic ping-pong dialogues should be covered with this feature.
Usage:
```
robo-bot stories [language-codes] [--check-covered-intents]
```
If no language-code is passed, robo-bot will assume you're working in a single-language bot (and thus the default Rasa structure).
The option --check-covered-intents will go through your stories file and check if the intents you have defined in the domain file are being covered in the dialogues. This command is more useful when you're deep in the development of your bot.
##### Checking for differences in a bot #####
After making all the necessary changes to your bots, you want to make sure that all bots (languages) are coherent between each other (i.e. the same stories.md file will work for the nlu.md and domain.yml files configured for the different languages.) To know whether your bot is achieving this, you can use the **diff** command.
```
robo-bot diff [language-codes] [--path <path>]
```
It will check for structural differences between the domain.yml and stories.md files for the same multi-language bot.
If no language codes are passed, then it'll pair all the languages found and check for differences between them.
##### Training a bot #####
You're now in a position to train the bot. To do so you only need to run the **train** command just as you would do in Rasa.
```
robo-bot train [language-codes] [--path <path> --nlu --core --augmentation <value>]
```
It will train the bot and store the model in the language sub-directory. If no language codes are passed,
all bots will be trained.
##### Interacting with a bot #####
To interact with the bot, you can use the **shell** command. Before running it, you need to execute the **run actions** command.
```
robo-bot run actions [--debug]
```
After doing so, you can execute the shell command.
```
robo-bot shell [language-code] [--debug]
```
You need to specify what language (bot) you want to interact with - you can only interact with one bot at the time.
##### Testing a bot #####
Testing a bot is also probably in your pipeline. And this is possible with the **test** command.
```
robo-bot test [language-code]
```
It'll test the bot with the conversation_tests.md file you have stored in your tests folder.
The results will be stored in the language sub-directory. Besides Rasa's default results, robo-bot also produces
an excel file with a confusion list of mistmatched intents.
##### Interactive learning #####
If you want to use Rasa's interactive learning mode you can do this by using the interactive command.
```
robo-bot interactive [language-code]
```
It'll launch an interactive session where you can provide feedback to the bot. At the end don't forget to
adjust the paths to where the new files should be saved.
By now you're probably ready to deploy your bot...
### Using robo-bot to deploy a bot ###
##### Setting the target endpoint #####
Before doing any operation you must indicate to the tool in what environment you're working in,
for that you have the **environment** command:
The tool provides you with a default production environment in the ROBO.AI platform.
You can activate it by running:
```
robo-bot environment activate production
```
You can also create new environments by executing:
```
robo-bot environment create <environment name> --base-url <base-url> [--username <username> --password <password>]
```
The base-url refers to the environment URL and you can optionally pass a username
and password if your environment requires them.
i.e.:
```
robo-bot environment create development --base-url https://robo-core.my-robo-server.com --username m2m --password GgvJrZSCXger
```
After creating an environment, do not forget to activate it if you want to use it.
To know which environment is activated you can simply run:
```
robo-bot environment which
```
It's possible to check what environments are available in your configuration file by running:
```
robo-bot environment list
```
You can also remove environments by executing:
```
robo-bot environment remove <environment name>
```
##### Logging in #####
Once you have the desired environment activated, you need to login into the account you'd like to use by using
an API key.
1. Log-in into your ROBO.AI administration and generate an API key (do not forget to enable it).
2. Execute the login command and enter the API key.
i.e.:
```
robo-bot login --api-key=my-api-key
```
Or if you don't want to enter the api key in your command, you can enter it interactively by only executing:
```
robo-bot login
```
##### Initializing a bot #####
In order to manage a bot runtime, it needs to be initialized so the tool will know what bot this runtime
refers to. If you already have the Rasa bot initialized, just execute the following command:
```
robo-bot connect [language-code] --target-dir <path to rasa bot files>
```
i.e.:
```
robo-bot connect [language-code] --target-dir /path/to/rasa/bot
```
First it'll ask you to pick an existing bot (if it does not exist, you must create it before executing this step).
After doing it, it'll generate a new file called robo-manifest.json which contains meta-information about the deployment
and the target bot.
**Note:** if no language-code is provided, it's assumed that you're working with the default Rasa structure.
##### Deploying a bot #####
When your bot is ready for deployment, you must train it first and remove any older model, then ensure you're in
the bot root directory, and then just execute:
```
robo-bot deploy [language-code]
```
It'll package your bot files and upload them to the ROBO.AI platform, starting a new deployment. This step may take
some time.
**Note:** if no language-code is provided, it's assumed that you're working with the default Rasa structure.
##### Checking a bot status #####
If you want to check your bot status, just run the following command from the same directory as of
your robo-manifest.json
```
robo-bot status [language-code]
```
**Note:** if no language-code is provided, it's assumed that you're working with the default Rasa structure.
##### Removing a bot #####
If you need to remove a bot, execute the following command from the bot root directory:
```
robo-bot remove [language-code]
```
**Note:** if no language-code is provided, it's assumed that you're working with the default Rasa structure.
##### Checking a deployed bot logs #####
Sometimes it's useful to have a peak in the logs, for that you need to execute:
```
robo-bot logs [language-code]
```
It'll show you the latest 1000 lines from that rasa bot logs.
**Note:** if no language-code is provided, it's assumed that you're working with the default Rasa structure.
| /robo-bot-0.1.3.1.tar.gz/robo-bot-0.1.3/README.md | 0.50293 | 0.931338 | README.md | pypi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.