repo
stringlengths
7
55
path
stringlengths
4
223
func_name
stringlengths
1
134
original_string
stringlengths
75
104k
language
stringclasses
1 value
code
stringlengths
75
104k
code_tokens
listlengths
19
28.4k
docstring
stringlengths
1
46.9k
docstring_tokens
listlengths
1
1.97k
sha
stringlengths
40
40
url
stringlengths
87
315
partition
stringclasses
1 value
quantopian/zipline
zipline/utils/run_algo.py
_run
def _run(handle_data, initialize, before_trading_start, analyze, algofile, algotext, defines, data_frequency, capital_base, bundle, bundle_timestamp, start, end, output, trading_calendar, print_algo, metrics_set, local_namespace, environ, blotter, benchmark_returns): """Run a backtest for the given algorithm. This is shared between the cli and :func:`zipline.run_algo`. """ if benchmark_returns is None: benchmark_returns, _ = load_market_data(environ=environ) if algotext is not None: if local_namespace: ip = get_ipython() # noqa namespace = ip.user_ns else: namespace = {} for assign in defines: try: name, value = assign.split('=', 2) except ValueError: raise ValueError( 'invalid define %r, should be of the form name=value' % assign, ) try: # evaluate in the same namespace so names may refer to # eachother namespace[name] = eval(value, namespace) except Exception as e: raise ValueError( 'failed to execute definition for name %r: %s' % (name, e), ) elif defines: raise _RunAlgoError( 'cannot pass define without `algotext`', "cannot pass '-D' / '--define' without '-t' / '--algotext'", ) else: namespace = {} if algofile is not None: algotext = algofile.read() if print_algo: if PYGMENTS: highlight( algotext, PythonLexer(), TerminalFormatter(), outfile=sys.stdout, ) else: click.echo(algotext) if trading_calendar is None: trading_calendar = get_calendar('XNYS') # date parameter validation if trading_calendar.session_distance(start, end) < 1: raise _RunAlgoError( 'There are no trading days between %s and %s' % ( start.date(), end.date(), ), ) bundle_data = bundles.load( bundle, environ, bundle_timestamp, ) first_trading_day = \ bundle_data.equity_minute_bar_reader.first_trading_day data = DataPortal( bundle_data.asset_finder, trading_calendar=trading_calendar, first_trading_day=first_trading_day, equity_minute_reader=bundle_data.equity_minute_bar_reader, equity_daily_reader=bundle_data.equity_daily_bar_reader, adjustment_reader=bundle_data.adjustment_reader, ) pipeline_loader = USEquityPricingLoader( bundle_data.equity_daily_bar_reader, bundle_data.adjustment_reader, ) def choose_loader(column): if column in USEquityPricing.columns: return pipeline_loader raise ValueError( "No PipelineLoader registered for column %s." % column ) if isinstance(metrics_set, six.string_types): try: metrics_set = metrics.load(metrics_set) except ValueError as e: raise _RunAlgoError(str(e)) if isinstance(blotter, six.string_types): try: blotter = load(Blotter, blotter) except ValueError as e: raise _RunAlgoError(str(e)) perf = TradingAlgorithm( namespace=namespace, data_portal=data, get_pipeline_loader=choose_loader, trading_calendar=trading_calendar, sim_params=SimulationParameters( start_session=start, end_session=end, trading_calendar=trading_calendar, capital_base=capital_base, data_frequency=data_frequency, ), metrics_set=metrics_set, blotter=blotter, benchmark_returns=benchmark_returns, **{ 'initialize': initialize, 'handle_data': handle_data, 'before_trading_start': before_trading_start, 'analyze': analyze, } if algotext is None else { 'algo_filename': getattr(algofile, 'name', '<algorithm>'), 'script': algotext, } ).run() if output == '-': click.echo(str(perf)) elif output != os.devnull: # make the zipline magic not write any data perf.to_pickle(output) return perf
python
def _run(handle_data, initialize, before_trading_start, analyze, algofile, algotext, defines, data_frequency, capital_base, bundle, bundle_timestamp, start, end, output, trading_calendar, print_algo, metrics_set, local_namespace, environ, blotter, benchmark_returns): """Run a backtest for the given algorithm. This is shared between the cli and :func:`zipline.run_algo`. """ if benchmark_returns is None: benchmark_returns, _ = load_market_data(environ=environ) if algotext is not None: if local_namespace: ip = get_ipython() # noqa namespace = ip.user_ns else: namespace = {} for assign in defines: try: name, value = assign.split('=', 2) except ValueError: raise ValueError( 'invalid define %r, should be of the form name=value' % assign, ) try: # evaluate in the same namespace so names may refer to # eachother namespace[name] = eval(value, namespace) except Exception as e: raise ValueError( 'failed to execute definition for name %r: %s' % (name, e), ) elif defines: raise _RunAlgoError( 'cannot pass define without `algotext`', "cannot pass '-D' / '--define' without '-t' / '--algotext'", ) else: namespace = {} if algofile is not None: algotext = algofile.read() if print_algo: if PYGMENTS: highlight( algotext, PythonLexer(), TerminalFormatter(), outfile=sys.stdout, ) else: click.echo(algotext) if trading_calendar is None: trading_calendar = get_calendar('XNYS') # date parameter validation if trading_calendar.session_distance(start, end) < 1: raise _RunAlgoError( 'There are no trading days between %s and %s' % ( start.date(), end.date(), ), ) bundle_data = bundles.load( bundle, environ, bundle_timestamp, ) first_trading_day = \ bundle_data.equity_minute_bar_reader.first_trading_day data = DataPortal( bundle_data.asset_finder, trading_calendar=trading_calendar, first_trading_day=first_trading_day, equity_minute_reader=bundle_data.equity_minute_bar_reader, equity_daily_reader=bundle_data.equity_daily_bar_reader, adjustment_reader=bundle_data.adjustment_reader, ) pipeline_loader = USEquityPricingLoader( bundle_data.equity_daily_bar_reader, bundle_data.adjustment_reader, ) def choose_loader(column): if column in USEquityPricing.columns: return pipeline_loader raise ValueError( "No PipelineLoader registered for column %s." % column ) if isinstance(metrics_set, six.string_types): try: metrics_set = metrics.load(metrics_set) except ValueError as e: raise _RunAlgoError(str(e)) if isinstance(blotter, six.string_types): try: blotter = load(Blotter, blotter) except ValueError as e: raise _RunAlgoError(str(e)) perf = TradingAlgorithm( namespace=namespace, data_portal=data, get_pipeline_loader=choose_loader, trading_calendar=trading_calendar, sim_params=SimulationParameters( start_session=start, end_session=end, trading_calendar=trading_calendar, capital_base=capital_base, data_frequency=data_frequency, ), metrics_set=metrics_set, blotter=blotter, benchmark_returns=benchmark_returns, **{ 'initialize': initialize, 'handle_data': handle_data, 'before_trading_start': before_trading_start, 'analyze': analyze, } if algotext is None else { 'algo_filename': getattr(algofile, 'name', '<algorithm>'), 'script': algotext, } ).run() if output == '-': click.echo(str(perf)) elif output != os.devnull: # make the zipline magic not write any data perf.to_pickle(output) return perf
[ "def", "_run", "(", "handle_data", ",", "initialize", ",", "before_trading_start", ",", "analyze", ",", "algofile", ",", "algotext", ",", "defines", ",", "data_frequency", ",", "capital_base", ",", "bundle", ",", "bundle_timestamp", ",", "start", ",", "end", ",", "output", ",", "trading_calendar", ",", "print_algo", ",", "metrics_set", ",", "local_namespace", ",", "environ", ",", "blotter", ",", "benchmark_returns", ")", ":", "if", "benchmark_returns", "is", "None", ":", "benchmark_returns", ",", "_", "=", "load_market_data", "(", "environ", "=", "environ", ")", "if", "algotext", "is", "not", "None", ":", "if", "local_namespace", ":", "ip", "=", "get_ipython", "(", ")", "# noqa", "namespace", "=", "ip", ".", "user_ns", "else", ":", "namespace", "=", "{", "}", "for", "assign", "in", "defines", ":", "try", ":", "name", ",", "value", "=", "assign", ".", "split", "(", "'='", ",", "2", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "'invalid define %r, should be of the form name=value'", "%", "assign", ",", ")", "try", ":", "# evaluate in the same namespace so names may refer to", "# eachother", "namespace", "[", "name", "]", "=", "eval", "(", "value", ",", "namespace", ")", "except", "Exception", "as", "e", ":", "raise", "ValueError", "(", "'failed to execute definition for name %r: %s'", "%", "(", "name", ",", "e", ")", ",", ")", "elif", "defines", ":", "raise", "_RunAlgoError", "(", "'cannot pass define without `algotext`'", ",", "\"cannot pass '-D' / '--define' without '-t' / '--algotext'\"", ",", ")", "else", ":", "namespace", "=", "{", "}", "if", "algofile", "is", "not", "None", ":", "algotext", "=", "algofile", ".", "read", "(", ")", "if", "print_algo", ":", "if", "PYGMENTS", ":", "highlight", "(", "algotext", ",", "PythonLexer", "(", ")", ",", "TerminalFormatter", "(", ")", ",", "outfile", "=", "sys", ".", "stdout", ",", ")", "else", ":", "click", ".", "echo", "(", "algotext", ")", "if", "trading_calendar", "is", "None", ":", "trading_calendar", "=", "get_calendar", "(", "'XNYS'", ")", "# date parameter validation", "if", "trading_calendar", ".", "session_distance", "(", "start", ",", "end", ")", "<", "1", ":", "raise", "_RunAlgoError", "(", "'There are no trading days between %s and %s'", "%", "(", "start", ".", "date", "(", ")", ",", "end", ".", "date", "(", ")", ",", ")", ",", ")", "bundle_data", "=", "bundles", ".", "load", "(", "bundle", ",", "environ", ",", "bundle_timestamp", ",", ")", "first_trading_day", "=", "bundle_data", ".", "equity_minute_bar_reader", ".", "first_trading_day", "data", "=", "DataPortal", "(", "bundle_data", ".", "asset_finder", ",", "trading_calendar", "=", "trading_calendar", ",", "first_trading_day", "=", "first_trading_day", ",", "equity_minute_reader", "=", "bundle_data", ".", "equity_minute_bar_reader", ",", "equity_daily_reader", "=", "bundle_data", ".", "equity_daily_bar_reader", ",", "adjustment_reader", "=", "bundle_data", ".", "adjustment_reader", ",", ")", "pipeline_loader", "=", "USEquityPricingLoader", "(", "bundle_data", ".", "equity_daily_bar_reader", ",", "bundle_data", ".", "adjustment_reader", ",", ")", "def", "choose_loader", "(", "column", ")", ":", "if", "column", "in", "USEquityPricing", ".", "columns", ":", "return", "pipeline_loader", "raise", "ValueError", "(", "\"No PipelineLoader registered for column %s.\"", "%", "column", ")", "if", "isinstance", "(", "metrics_set", ",", "six", ".", "string_types", ")", ":", "try", ":", "metrics_set", "=", "metrics", ".", "load", "(", "metrics_set", ")", "except", "ValueError", "as", "e", ":", "raise", "_RunAlgoError", "(", "str", "(", "e", ")", ")", "if", "isinstance", "(", "blotter", ",", "six", ".", "string_types", ")", ":", "try", ":", "blotter", "=", "load", "(", "Blotter", ",", "blotter", ")", "except", "ValueError", "as", "e", ":", "raise", "_RunAlgoError", "(", "str", "(", "e", ")", ")", "perf", "=", "TradingAlgorithm", "(", "namespace", "=", "namespace", ",", "data_portal", "=", "data", ",", "get_pipeline_loader", "=", "choose_loader", ",", "trading_calendar", "=", "trading_calendar", ",", "sim_params", "=", "SimulationParameters", "(", "start_session", "=", "start", ",", "end_session", "=", "end", ",", "trading_calendar", "=", "trading_calendar", ",", "capital_base", "=", "capital_base", ",", "data_frequency", "=", "data_frequency", ",", ")", ",", "metrics_set", "=", "metrics_set", ",", "blotter", "=", "blotter", ",", "benchmark_returns", "=", "benchmark_returns", ",", "*", "*", "{", "'initialize'", ":", "initialize", ",", "'handle_data'", ":", "handle_data", ",", "'before_trading_start'", ":", "before_trading_start", ",", "'analyze'", ":", "analyze", ",", "}", "if", "algotext", "is", "None", "else", "{", "'algo_filename'", ":", "getattr", "(", "algofile", ",", "'name'", ",", "'<algorithm>'", ")", ",", "'script'", ":", "algotext", ",", "}", ")", ".", "run", "(", ")", "if", "output", "==", "'-'", ":", "click", ".", "echo", "(", "str", "(", "perf", ")", ")", "elif", "output", "!=", "os", ".", "devnull", ":", "# make the zipline magic not write any data", "perf", ".", "to_pickle", "(", "output", ")", "return", "perf" ]
Run a backtest for the given algorithm. This is shared between the cli and :func:`zipline.run_algo`.
[ "Run", "a", "backtest", "for", "the", "given", "algorithm", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/run_algo.py#L56-L213
train
quantopian/zipline
zipline/utils/run_algo.py
load_extensions
def load_extensions(default, extensions, strict, environ, reload=False): """Load all of the given extensions. This should be called by run_algo or the cli. Parameters ---------- default : bool Load the default exension (~/.zipline/extension.py)? extension : iterable[str] The paths to the extensions to load. If the path ends in ``.py`` it is treated as a script and executed. If it does not end in ``.py`` it is treated as a module to be imported. strict : bool Should failure to load an extension raise. If this is false it will still warn. environ : mapping The environment to use to find the default extension path. reload : bool, optional Reload any extensions that have already been loaded. """ if default: default_extension_path = pth.default_extension(environ=environ) pth.ensure_file(default_extension_path) # put the default extension first so other extensions can depend on # the order they are loaded extensions = concatv([default_extension_path], extensions) for ext in extensions: if ext in _loaded_extensions and not reload: continue try: # load all of the zipline extensionss if ext.endswith('.py'): with open(ext) as f: ns = {} six.exec_(compile(f.read(), ext, 'exec'), ns, ns) else: __import__(ext) except Exception as e: if strict: # if `strict` we should raise the actual exception and fail raise # without `strict` we should just log the failure warnings.warn( 'Failed to load extension: %r\n%s' % (ext, e), stacklevel=2 ) else: _loaded_extensions.add(ext)
python
def load_extensions(default, extensions, strict, environ, reload=False): """Load all of the given extensions. This should be called by run_algo or the cli. Parameters ---------- default : bool Load the default exension (~/.zipline/extension.py)? extension : iterable[str] The paths to the extensions to load. If the path ends in ``.py`` it is treated as a script and executed. If it does not end in ``.py`` it is treated as a module to be imported. strict : bool Should failure to load an extension raise. If this is false it will still warn. environ : mapping The environment to use to find the default extension path. reload : bool, optional Reload any extensions that have already been loaded. """ if default: default_extension_path = pth.default_extension(environ=environ) pth.ensure_file(default_extension_path) # put the default extension first so other extensions can depend on # the order they are loaded extensions = concatv([default_extension_path], extensions) for ext in extensions: if ext in _loaded_extensions and not reload: continue try: # load all of the zipline extensionss if ext.endswith('.py'): with open(ext) as f: ns = {} six.exec_(compile(f.read(), ext, 'exec'), ns, ns) else: __import__(ext) except Exception as e: if strict: # if `strict` we should raise the actual exception and fail raise # without `strict` we should just log the failure warnings.warn( 'Failed to load extension: %r\n%s' % (ext, e), stacklevel=2 ) else: _loaded_extensions.add(ext)
[ "def", "load_extensions", "(", "default", ",", "extensions", ",", "strict", ",", "environ", ",", "reload", "=", "False", ")", ":", "if", "default", ":", "default_extension_path", "=", "pth", ".", "default_extension", "(", "environ", "=", "environ", ")", "pth", ".", "ensure_file", "(", "default_extension_path", ")", "# put the default extension first so other extensions can depend on", "# the order they are loaded", "extensions", "=", "concatv", "(", "[", "default_extension_path", "]", ",", "extensions", ")", "for", "ext", "in", "extensions", ":", "if", "ext", "in", "_loaded_extensions", "and", "not", "reload", ":", "continue", "try", ":", "# load all of the zipline extensionss", "if", "ext", ".", "endswith", "(", "'.py'", ")", ":", "with", "open", "(", "ext", ")", "as", "f", ":", "ns", "=", "{", "}", "six", ".", "exec_", "(", "compile", "(", "f", ".", "read", "(", ")", ",", "ext", ",", "'exec'", ")", ",", "ns", ",", "ns", ")", "else", ":", "__import__", "(", "ext", ")", "except", "Exception", "as", "e", ":", "if", "strict", ":", "# if `strict` we should raise the actual exception and fail", "raise", "# without `strict` we should just log the failure", "warnings", ".", "warn", "(", "'Failed to load extension: %r\\n%s'", "%", "(", "ext", ",", "e", ")", ",", "stacklevel", "=", "2", ")", "else", ":", "_loaded_extensions", ".", "add", "(", "ext", ")" ]
Load all of the given extensions. This should be called by run_algo or the cli. Parameters ---------- default : bool Load the default exension (~/.zipline/extension.py)? extension : iterable[str] The paths to the extensions to load. If the path ends in ``.py`` it is treated as a script and executed. If it does not end in ``.py`` it is treated as a module to be imported. strict : bool Should failure to load an extension raise. If this is false it will still warn. environ : mapping The environment to use to find the default extension path. reload : bool, optional Reload any extensions that have already been loaded.
[ "Load", "all", "of", "the", "given", "extensions", ".", "This", "should", "be", "called", "by", "run_algo", "or", "the", "cli", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/run_algo.py#L220-L268
train
quantopian/zipline
zipline/utils/run_algo.py
run_algorithm
def run_algorithm(start, end, initialize, capital_base, handle_data=None, before_trading_start=None, analyze=None, data_frequency='daily', bundle='quantopian-quandl', bundle_timestamp=None, trading_calendar=None, metrics_set='default', benchmark_returns=None, default_extension=True, extensions=(), strict_extensions=True, environ=os.environ, blotter='default'): """ Run a trading algorithm. Parameters ---------- start : datetime The start date of the backtest. end : datetime The end date of the backtest.. initialize : callable[context -> None] The initialize function to use for the algorithm. This is called once at the very begining of the backtest and should be used to set up any state needed by the algorithm. capital_base : float The starting capital for the backtest. handle_data : callable[(context, BarData) -> None], optional The handle_data function to use for the algorithm. This is called every minute when ``data_frequency == 'minute'`` or every day when ``data_frequency == 'daily'``. before_trading_start : callable[(context, BarData) -> None], optional The before_trading_start function for the algorithm. This is called once before each trading day (after initialize on the first day). analyze : callable[(context, pd.DataFrame) -> None], optional The analyze function to use for the algorithm. This function is called once at the end of the backtest and is passed the context and the performance data. data_frequency : {'daily', 'minute'}, optional The data frequency to run the algorithm at. bundle : str, optional The name of the data bundle to use to load the data to run the backtest with. This defaults to 'quantopian-quandl'. bundle_timestamp : datetime, optional The datetime to lookup the bundle data for. This defaults to the current time. trading_calendar : TradingCalendar, optional The trading calendar to use for your backtest. metrics_set : iterable[Metric] or str, optional The set of metrics to compute in the simulation. If a string is passed, resolve the set with :func:`zipline.finance.metrics.load`. default_extension : bool, optional Should the default zipline extension be loaded. This is found at ``$ZIPLINE_ROOT/extension.py`` extensions : iterable[str], optional The names of any other extensions to load. Each element may either be a dotted module path like ``a.b.c`` or a path to a python file ending in ``.py`` like ``a/b/c.py``. strict_extensions : bool, optional Should the run fail if any extensions fail to load. If this is false, a warning will be raised instead. environ : mapping[str -> str], optional The os environment to use. Many extensions use this to get parameters. This defaults to ``os.environ``. blotter : str or zipline.finance.blotter.Blotter, optional Blotter to use with this algorithm. If passed as a string, we look for a blotter construction function registered with ``zipline.extensions.register`` and call it with no parameters. Default is a :class:`zipline.finance.blotter.SimulationBlotter` that never cancels orders. Returns ------- perf : pd.DataFrame The daily performance of the algorithm. See Also -------- zipline.data.bundles.bundles : The available data bundles. """ load_extensions(default_extension, extensions, strict_extensions, environ) return _run( handle_data=handle_data, initialize=initialize, before_trading_start=before_trading_start, analyze=analyze, algofile=None, algotext=None, defines=(), data_frequency=data_frequency, capital_base=capital_base, bundle=bundle, bundle_timestamp=bundle_timestamp, start=start, end=end, output=os.devnull, trading_calendar=trading_calendar, print_algo=False, metrics_set=metrics_set, local_namespace=False, environ=environ, blotter=blotter, benchmark_returns=benchmark_returns, )
python
def run_algorithm(start, end, initialize, capital_base, handle_data=None, before_trading_start=None, analyze=None, data_frequency='daily', bundle='quantopian-quandl', bundle_timestamp=None, trading_calendar=None, metrics_set='default', benchmark_returns=None, default_extension=True, extensions=(), strict_extensions=True, environ=os.environ, blotter='default'): """ Run a trading algorithm. Parameters ---------- start : datetime The start date of the backtest. end : datetime The end date of the backtest.. initialize : callable[context -> None] The initialize function to use for the algorithm. This is called once at the very begining of the backtest and should be used to set up any state needed by the algorithm. capital_base : float The starting capital for the backtest. handle_data : callable[(context, BarData) -> None], optional The handle_data function to use for the algorithm. This is called every minute when ``data_frequency == 'minute'`` or every day when ``data_frequency == 'daily'``. before_trading_start : callable[(context, BarData) -> None], optional The before_trading_start function for the algorithm. This is called once before each trading day (after initialize on the first day). analyze : callable[(context, pd.DataFrame) -> None], optional The analyze function to use for the algorithm. This function is called once at the end of the backtest and is passed the context and the performance data. data_frequency : {'daily', 'minute'}, optional The data frequency to run the algorithm at. bundle : str, optional The name of the data bundle to use to load the data to run the backtest with. This defaults to 'quantopian-quandl'. bundle_timestamp : datetime, optional The datetime to lookup the bundle data for. This defaults to the current time. trading_calendar : TradingCalendar, optional The trading calendar to use for your backtest. metrics_set : iterable[Metric] or str, optional The set of metrics to compute in the simulation. If a string is passed, resolve the set with :func:`zipline.finance.metrics.load`. default_extension : bool, optional Should the default zipline extension be loaded. This is found at ``$ZIPLINE_ROOT/extension.py`` extensions : iterable[str], optional The names of any other extensions to load. Each element may either be a dotted module path like ``a.b.c`` or a path to a python file ending in ``.py`` like ``a/b/c.py``. strict_extensions : bool, optional Should the run fail if any extensions fail to load. If this is false, a warning will be raised instead. environ : mapping[str -> str], optional The os environment to use. Many extensions use this to get parameters. This defaults to ``os.environ``. blotter : str or zipline.finance.blotter.Blotter, optional Blotter to use with this algorithm. If passed as a string, we look for a blotter construction function registered with ``zipline.extensions.register`` and call it with no parameters. Default is a :class:`zipline.finance.blotter.SimulationBlotter` that never cancels orders. Returns ------- perf : pd.DataFrame The daily performance of the algorithm. See Also -------- zipline.data.bundles.bundles : The available data bundles. """ load_extensions(default_extension, extensions, strict_extensions, environ) return _run( handle_data=handle_data, initialize=initialize, before_trading_start=before_trading_start, analyze=analyze, algofile=None, algotext=None, defines=(), data_frequency=data_frequency, capital_base=capital_base, bundle=bundle, bundle_timestamp=bundle_timestamp, start=start, end=end, output=os.devnull, trading_calendar=trading_calendar, print_algo=False, metrics_set=metrics_set, local_namespace=False, environ=environ, blotter=blotter, benchmark_returns=benchmark_returns, )
[ "def", "run_algorithm", "(", "start", ",", "end", ",", "initialize", ",", "capital_base", ",", "handle_data", "=", "None", ",", "before_trading_start", "=", "None", ",", "analyze", "=", "None", ",", "data_frequency", "=", "'daily'", ",", "bundle", "=", "'quantopian-quandl'", ",", "bundle_timestamp", "=", "None", ",", "trading_calendar", "=", "None", ",", "metrics_set", "=", "'default'", ",", "benchmark_returns", "=", "None", ",", "default_extension", "=", "True", ",", "extensions", "=", "(", ")", ",", "strict_extensions", "=", "True", ",", "environ", "=", "os", ".", "environ", ",", "blotter", "=", "'default'", ")", ":", "load_extensions", "(", "default_extension", ",", "extensions", ",", "strict_extensions", ",", "environ", ")", "return", "_run", "(", "handle_data", "=", "handle_data", ",", "initialize", "=", "initialize", ",", "before_trading_start", "=", "before_trading_start", ",", "analyze", "=", "analyze", ",", "algofile", "=", "None", ",", "algotext", "=", "None", ",", "defines", "=", "(", ")", ",", "data_frequency", "=", "data_frequency", ",", "capital_base", "=", "capital_base", ",", "bundle", "=", "bundle", ",", "bundle_timestamp", "=", "bundle_timestamp", ",", "start", "=", "start", ",", "end", "=", "end", ",", "output", "=", "os", ".", "devnull", ",", "trading_calendar", "=", "trading_calendar", ",", "print_algo", "=", "False", ",", "metrics_set", "=", "metrics_set", ",", "local_namespace", "=", "False", ",", "environ", "=", "environ", ",", "blotter", "=", "blotter", ",", "benchmark_returns", "=", "benchmark_returns", ",", ")" ]
Run a trading algorithm. Parameters ---------- start : datetime The start date of the backtest. end : datetime The end date of the backtest.. initialize : callable[context -> None] The initialize function to use for the algorithm. This is called once at the very begining of the backtest and should be used to set up any state needed by the algorithm. capital_base : float The starting capital for the backtest. handle_data : callable[(context, BarData) -> None], optional The handle_data function to use for the algorithm. This is called every minute when ``data_frequency == 'minute'`` or every day when ``data_frequency == 'daily'``. before_trading_start : callable[(context, BarData) -> None], optional The before_trading_start function for the algorithm. This is called once before each trading day (after initialize on the first day). analyze : callable[(context, pd.DataFrame) -> None], optional The analyze function to use for the algorithm. This function is called once at the end of the backtest and is passed the context and the performance data. data_frequency : {'daily', 'minute'}, optional The data frequency to run the algorithm at. bundle : str, optional The name of the data bundle to use to load the data to run the backtest with. This defaults to 'quantopian-quandl'. bundle_timestamp : datetime, optional The datetime to lookup the bundle data for. This defaults to the current time. trading_calendar : TradingCalendar, optional The trading calendar to use for your backtest. metrics_set : iterable[Metric] or str, optional The set of metrics to compute in the simulation. If a string is passed, resolve the set with :func:`zipline.finance.metrics.load`. default_extension : bool, optional Should the default zipline extension be loaded. This is found at ``$ZIPLINE_ROOT/extension.py`` extensions : iterable[str], optional The names of any other extensions to load. Each element may either be a dotted module path like ``a.b.c`` or a path to a python file ending in ``.py`` like ``a/b/c.py``. strict_extensions : bool, optional Should the run fail if any extensions fail to load. If this is false, a warning will be raised instead. environ : mapping[str -> str], optional The os environment to use. Many extensions use this to get parameters. This defaults to ``os.environ``. blotter : str or zipline.finance.blotter.Blotter, optional Blotter to use with this algorithm. If passed as a string, we look for a blotter construction function registered with ``zipline.extensions.register`` and call it with no parameters. Default is a :class:`zipline.finance.blotter.SimulationBlotter` that never cancels orders. Returns ------- perf : pd.DataFrame The daily performance of the algorithm. See Also -------- zipline.data.bundles.bundles : The available data bundles.
[ "Run", "a", "trading", "algorithm", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/run_algo.py#L271-L381
train
quantopian/zipline
zipline/data/data_portal.py
DataPortal.handle_extra_source
def handle_extra_source(self, source_df, sim_params): """ Extra sources always have a sid column. We expand the given data (by forward filling) to the full range of the simulation dates, so that lookup is fast during simulation. """ if source_df is None: return # Normalize all the dates in the df source_df.index = source_df.index.normalize() # source_df's sid column can either consist of assets we know about # (such as sid(24)) or of assets we don't know about (such as # palladium). # # In both cases, we break up the dataframe into individual dfs # that only contain a single asset's information. ie, if source_df # has data for PALLADIUM and GOLD, we split source_df into two # dataframes, one for each. (same applies if source_df has data for # AAPL and IBM). # # We then take each child df and reindex it to the simulation's date # range by forward-filling missing values. this makes reads simpler. # # Finally, we store the data. For each column, we store a mapping in # self.augmented_sources_map from the column to a dictionary of # asset -> df. In other words, # self.augmented_sources_map['days_to_cover']['AAPL'] gives us the df # holding that data. source_date_index = self.trading_calendar.sessions_in_range( sim_params.start_session, sim_params.end_session ) # Break the source_df up into one dataframe per sid. This lets # us (more easily) calculate accurate start/end dates for each sid, # de-dup data, and expand the data to fit the backtest start/end date. grouped_by_sid = source_df.groupby(["sid"]) group_names = grouped_by_sid.groups.keys() group_dict = {} for group_name in group_names: group_dict[group_name] = grouped_by_sid.get_group(group_name) # This will be the dataframe which we query to get fetcher assets at # any given time. Get's overwritten every time there's a new fetcher # call extra_source_df = pd.DataFrame() for identifier, df in iteritems(group_dict): # Since we know this df only contains a single sid, we can safely # de-dupe by the index (dt). If minute granularity, will take the # last data point on any given day df = df.groupby(level=0).last() # Reindex the dataframe based on the backtest start/end date. # This makes reads easier during the backtest. df = self._reindex_extra_source(df, source_date_index) for col_name in df.columns.difference(['sid']): if col_name not in self._augmented_sources_map: self._augmented_sources_map[col_name] = {} self._augmented_sources_map[col_name][identifier] = df # Append to extra_source_df the reindexed dataframe for the single # sid extra_source_df = extra_source_df.append(df) self._extra_source_df = extra_source_df
python
def handle_extra_source(self, source_df, sim_params): """ Extra sources always have a sid column. We expand the given data (by forward filling) to the full range of the simulation dates, so that lookup is fast during simulation. """ if source_df is None: return # Normalize all the dates in the df source_df.index = source_df.index.normalize() # source_df's sid column can either consist of assets we know about # (such as sid(24)) or of assets we don't know about (such as # palladium). # # In both cases, we break up the dataframe into individual dfs # that only contain a single asset's information. ie, if source_df # has data for PALLADIUM and GOLD, we split source_df into two # dataframes, one for each. (same applies if source_df has data for # AAPL and IBM). # # We then take each child df and reindex it to the simulation's date # range by forward-filling missing values. this makes reads simpler. # # Finally, we store the data. For each column, we store a mapping in # self.augmented_sources_map from the column to a dictionary of # asset -> df. In other words, # self.augmented_sources_map['days_to_cover']['AAPL'] gives us the df # holding that data. source_date_index = self.trading_calendar.sessions_in_range( sim_params.start_session, sim_params.end_session ) # Break the source_df up into one dataframe per sid. This lets # us (more easily) calculate accurate start/end dates for each sid, # de-dup data, and expand the data to fit the backtest start/end date. grouped_by_sid = source_df.groupby(["sid"]) group_names = grouped_by_sid.groups.keys() group_dict = {} for group_name in group_names: group_dict[group_name] = grouped_by_sid.get_group(group_name) # This will be the dataframe which we query to get fetcher assets at # any given time. Get's overwritten every time there's a new fetcher # call extra_source_df = pd.DataFrame() for identifier, df in iteritems(group_dict): # Since we know this df only contains a single sid, we can safely # de-dupe by the index (dt). If minute granularity, will take the # last data point on any given day df = df.groupby(level=0).last() # Reindex the dataframe based on the backtest start/end date. # This makes reads easier during the backtest. df = self._reindex_extra_source(df, source_date_index) for col_name in df.columns.difference(['sid']): if col_name not in self._augmented_sources_map: self._augmented_sources_map[col_name] = {} self._augmented_sources_map[col_name][identifier] = df # Append to extra_source_df the reindexed dataframe for the single # sid extra_source_df = extra_source_df.append(df) self._extra_source_df = extra_source_df
[ "def", "handle_extra_source", "(", "self", ",", "source_df", ",", "sim_params", ")", ":", "if", "source_df", "is", "None", ":", "return", "# Normalize all the dates in the df", "source_df", ".", "index", "=", "source_df", ".", "index", ".", "normalize", "(", ")", "# source_df's sid column can either consist of assets we know about", "# (such as sid(24)) or of assets we don't know about (such as", "# palladium).", "#", "# In both cases, we break up the dataframe into individual dfs", "# that only contain a single asset's information. ie, if source_df", "# has data for PALLADIUM and GOLD, we split source_df into two", "# dataframes, one for each. (same applies if source_df has data for", "# AAPL and IBM).", "#", "# We then take each child df and reindex it to the simulation's date", "# range by forward-filling missing values. this makes reads simpler.", "#", "# Finally, we store the data. For each column, we store a mapping in", "# self.augmented_sources_map from the column to a dictionary of", "# asset -> df. In other words,", "# self.augmented_sources_map['days_to_cover']['AAPL'] gives us the df", "# holding that data.", "source_date_index", "=", "self", ".", "trading_calendar", ".", "sessions_in_range", "(", "sim_params", ".", "start_session", ",", "sim_params", ".", "end_session", ")", "# Break the source_df up into one dataframe per sid. This lets", "# us (more easily) calculate accurate start/end dates for each sid,", "# de-dup data, and expand the data to fit the backtest start/end date.", "grouped_by_sid", "=", "source_df", ".", "groupby", "(", "[", "\"sid\"", "]", ")", "group_names", "=", "grouped_by_sid", ".", "groups", ".", "keys", "(", ")", "group_dict", "=", "{", "}", "for", "group_name", "in", "group_names", ":", "group_dict", "[", "group_name", "]", "=", "grouped_by_sid", ".", "get_group", "(", "group_name", ")", "# This will be the dataframe which we query to get fetcher assets at", "# any given time. Get's overwritten every time there's a new fetcher", "# call", "extra_source_df", "=", "pd", ".", "DataFrame", "(", ")", "for", "identifier", ",", "df", "in", "iteritems", "(", "group_dict", ")", ":", "# Since we know this df only contains a single sid, we can safely", "# de-dupe by the index (dt). If minute granularity, will take the", "# last data point on any given day", "df", "=", "df", ".", "groupby", "(", "level", "=", "0", ")", ".", "last", "(", ")", "# Reindex the dataframe based on the backtest start/end date.", "# This makes reads easier during the backtest.", "df", "=", "self", ".", "_reindex_extra_source", "(", "df", ",", "source_date_index", ")", "for", "col_name", "in", "df", ".", "columns", ".", "difference", "(", "[", "'sid'", "]", ")", ":", "if", "col_name", "not", "in", "self", ".", "_augmented_sources_map", ":", "self", ".", "_augmented_sources_map", "[", "col_name", "]", "=", "{", "}", "self", ".", "_augmented_sources_map", "[", "col_name", "]", "[", "identifier", "]", "=", "df", "# Append to extra_source_df the reindexed dataframe for the single", "# sid", "extra_source_df", "=", "extra_source_df", ".", "append", "(", "df", ")", "self", ".", "_extra_source_df", "=", "extra_source_df" ]
Extra sources always have a sid column. We expand the given data (by forward filling) to the full range of the simulation dates, so that lookup is fast during simulation.
[ "Extra", "sources", "always", "have", "a", "sid", "column", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/data_portal.py#L324-L394
train
quantopian/zipline
zipline/data/data_portal.py
DataPortal.get_last_traded_dt
def get_last_traded_dt(self, asset, dt, data_frequency): """ Given an asset and dt, returns the last traded dt from the viewpoint of the given dt. If there is a trade on the dt, the answer is dt provided. """ return self._get_pricing_reader(data_frequency).get_last_traded_dt( asset, dt)
python
def get_last_traded_dt(self, asset, dt, data_frequency): """ Given an asset and dt, returns the last traded dt from the viewpoint of the given dt. If there is a trade on the dt, the answer is dt provided. """ return self._get_pricing_reader(data_frequency).get_last_traded_dt( asset, dt)
[ "def", "get_last_traded_dt", "(", "self", ",", "asset", ",", "dt", ",", "data_frequency", ")", ":", "return", "self", ".", "_get_pricing_reader", "(", "data_frequency", ")", ".", "get_last_traded_dt", "(", "asset", ",", "dt", ")" ]
Given an asset and dt, returns the last traded dt from the viewpoint of the given dt. If there is a trade on the dt, the answer is dt provided.
[ "Given", "an", "asset", "and", "dt", "returns", "the", "last", "traded", "dt", "from", "the", "viewpoint", "of", "the", "given", "dt", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/data_portal.py#L399-L407
train
quantopian/zipline
zipline/data/data_portal.py
DataPortal._is_extra_source
def _is_extra_source(asset, field, map): """ Internal method that determines if this asset/field combination represents a fetcher value or a regular OHLCVP lookup. """ # If we have an extra source with a column called "price", only look # at it if it's on something like palladium and not AAPL (since our # own price data always wins when dealing with assets). return not (field in BASE_FIELDS and (isinstance(asset, (Asset, ContinuousFuture))))
python
def _is_extra_source(asset, field, map): """ Internal method that determines if this asset/field combination represents a fetcher value or a regular OHLCVP lookup. """ # If we have an extra source with a column called "price", only look # at it if it's on something like palladium and not AAPL (since our # own price data always wins when dealing with assets). return not (field in BASE_FIELDS and (isinstance(asset, (Asset, ContinuousFuture))))
[ "def", "_is_extra_source", "(", "asset", ",", "field", ",", "map", ")", ":", "# If we have an extra source with a column called \"price\", only look", "# at it if it's on something like palladium and not AAPL (since our", "# own price data always wins when dealing with assets).", "return", "not", "(", "field", "in", "BASE_FIELDS", "and", "(", "isinstance", "(", "asset", ",", "(", "Asset", ",", "ContinuousFuture", ")", ")", ")", ")" ]
Internal method that determines if this asset/field combination represents a fetcher value or a regular OHLCVP lookup.
[ "Internal", "method", "that", "determines", "if", "this", "asset", "/", "field", "combination", "represents", "a", "fetcher", "value", "or", "a", "regular", "OHLCVP", "lookup", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/data_portal.py#L410-L420
train
quantopian/zipline
zipline/data/data_portal.py
DataPortal.get_spot_value
def get_spot_value(self, assets, field, dt, data_frequency): """ Public API method that returns a scalar value representing the value of the desired asset's field at either the given dt. Parameters ---------- assets : Asset, ContinuousFuture, or iterable of same. The asset or assets whose data is desired. field : {'open', 'high', 'low', 'close', 'volume', 'price', 'last_traded'} The desired field of the asset. dt : pd.Timestamp The timestamp for the desired value. data_frequency : str The frequency of the data to query; i.e. whether the data is 'daily' or 'minute' bars Returns ------- value : float, int, or pd.Timestamp The spot value of ``field`` for ``asset`` The return type is based on the ``field`` requested. If the field is one of 'open', 'high', 'low', 'close', or 'price', the value will be a float. If the ``field`` is 'volume' the value will be a int. If the ``field`` is 'last_traded' the value will be a Timestamp. """ assets_is_scalar = False if isinstance(assets, (AssetConvertible, PricingDataAssociable)): assets_is_scalar = True else: # If 'assets' was not one of the expected types then it should be # an iterable. try: iter(assets) except TypeError: raise TypeError( "Unexpected 'assets' value of type {}." .format(type(assets)) ) session_label = self.trading_calendar.minute_to_session_label(dt) if assets_is_scalar: return self._get_single_asset_value( session_label, assets, field, dt, data_frequency, ) else: get_single_asset_value = self._get_single_asset_value return [ get_single_asset_value( session_label, asset, field, dt, data_frequency, ) for asset in assets ]
python
def get_spot_value(self, assets, field, dt, data_frequency): """ Public API method that returns a scalar value representing the value of the desired asset's field at either the given dt. Parameters ---------- assets : Asset, ContinuousFuture, or iterable of same. The asset or assets whose data is desired. field : {'open', 'high', 'low', 'close', 'volume', 'price', 'last_traded'} The desired field of the asset. dt : pd.Timestamp The timestamp for the desired value. data_frequency : str The frequency of the data to query; i.e. whether the data is 'daily' or 'minute' bars Returns ------- value : float, int, or pd.Timestamp The spot value of ``field`` for ``asset`` The return type is based on the ``field`` requested. If the field is one of 'open', 'high', 'low', 'close', or 'price', the value will be a float. If the ``field`` is 'volume' the value will be a int. If the ``field`` is 'last_traded' the value will be a Timestamp. """ assets_is_scalar = False if isinstance(assets, (AssetConvertible, PricingDataAssociable)): assets_is_scalar = True else: # If 'assets' was not one of the expected types then it should be # an iterable. try: iter(assets) except TypeError: raise TypeError( "Unexpected 'assets' value of type {}." .format(type(assets)) ) session_label = self.trading_calendar.minute_to_session_label(dt) if assets_is_scalar: return self._get_single_asset_value( session_label, assets, field, dt, data_frequency, ) else: get_single_asset_value = self._get_single_asset_value return [ get_single_asset_value( session_label, asset, field, dt, data_frequency, ) for asset in assets ]
[ "def", "get_spot_value", "(", "self", ",", "assets", ",", "field", ",", "dt", ",", "data_frequency", ")", ":", "assets_is_scalar", "=", "False", "if", "isinstance", "(", "assets", ",", "(", "AssetConvertible", ",", "PricingDataAssociable", ")", ")", ":", "assets_is_scalar", "=", "True", "else", ":", "# If 'assets' was not one of the expected types then it should be", "# an iterable.", "try", ":", "iter", "(", "assets", ")", "except", "TypeError", ":", "raise", "TypeError", "(", "\"Unexpected 'assets' value of type {}.\"", ".", "format", "(", "type", "(", "assets", ")", ")", ")", "session_label", "=", "self", ".", "trading_calendar", ".", "minute_to_session_label", "(", "dt", ")", "if", "assets_is_scalar", ":", "return", "self", ".", "_get_single_asset_value", "(", "session_label", ",", "assets", ",", "field", ",", "dt", ",", "data_frequency", ",", ")", "else", ":", "get_single_asset_value", "=", "self", ".", "_get_single_asset_value", "return", "[", "get_single_asset_value", "(", "session_label", ",", "asset", ",", "field", ",", "dt", ",", "data_frequency", ",", ")", "for", "asset", "in", "assets", "]" ]
Public API method that returns a scalar value representing the value of the desired asset's field at either the given dt. Parameters ---------- assets : Asset, ContinuousFuture, or iterable of same. The asset or assets whose data is desired. field : {'open', 'high', 'low', 'close', 'volume', 'price', 'last_traded'} The desired field of the asset. dt : pd.Timestamp The timestamp for the desired value. data_frequency : str The frequency of the data to query; i.e. whether the data is 'daily' or 'minute' bars Returns ------- value : float, int, or pd.Timestamp The spot value of ``field`` for ``asset`` The return type is based on the ``field`` requested. If the field is one of 'open', 'high', 'low', 'close', or 'price', the value will be a float. If the ``field`` is 'volume' the value will be a int. If the ``field`` is 'last_traded' the value will be a Timestamp.
[ "Public", "API", "method", "that", "returns", "a", "scalar", "value", "representing", "the", "value", "of", "the", "desired", "asset", "s", "field", "at", "either", "the", "given", "dt", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/data_portal.py#L475-L537
train
quantopian/zipline
zipline/data/data_portal.py
DataPortal.get_scalar_asset_spot_value
def get_scalar_asset_spot_value(self, asset, field, dt, data_frequency): """ Public API method that returns a scalar value representing the value of the desired asset's field at either the given dt. Parameters ---------- assets : Asset The asset or assets whose data is desired. This cannot be an arbitrary AssetConvertible. field : {'open', 'high', 'low', 'close', 'volume', 'price', 'last_traded'} The desired field of the asset. dt : pd.Timestamp The timestamp for the desired value. data_frequency : str The frequency of the data to query; i.e. whether the data is 'daily' or 'minute' bars Returns ------- value : float, int, or pd.Timestamp The spot value of ``field`` for ``asset`` The return type is based on the ``field`` requested. If the field is one of 'open', 'high', 'low', 'close', or 'price', the value will be a float. If the ``field`` is 'volume' the value will be a int. If the ``field`` is 'last_traded' the value will be a Timestamp. """ return self._get_single_asset_value( self.trading_calendar.minute_to_session_label(dt), asset, field, dt, data_frequency, )
python
def get_scalar_asset_spot_value(self, asset, field, dt, data_frequency): """ Public API method that returns a scalar value representing the value of the desired asset's field at either the given dt. Parameters ---------- assets : Asset The asset or assets whose data is desired. This cannot be an arbitrary AssetConvertible. field : {'open', 'high', 'low', 'close', 'volume', 'price', 'last_traded'} The desired field of the asset. dt : pd.Timestamp The timestamp for the desired value. data_frequency : str The frequency of the data to query; i.e. whether the data is 'daily' or 'minute' bars Returns ------- value : float, int, or pd.Timestamp The spot value of ``field`` for ``asset`` The return type is based on the ``field`` requested. If the field is one of 'open', 'high', 'low', 'close', or 'price', the value will be a float. If the ``field`` is 'volume' the value will be a int. If the ``field`` is 'last_traded' the value will be a Timestamp. """ return self._get_single_asset_value( self.trading_calendar.minute_to_session_label(dt), asset, field, dt, data_frequency, )
[ "def", "get_scalar_asset_spot_value", "(", "self", ",", "asset", ",", "field", ",", "dt", ",", "data_frequency", ")", ":", "return", "self", ".", "_get_single_asset_value", "(", "self", ".", "trading_calendar", ".", "minute_to_session_label", "(", "dt", ")", ",", "asset", ",", "field", ",", "dt", ",", "data_frequency", ",", ")" ]
Public API method that returns a scalar value representing the value of the desired asset's field at either the given dt. Parameters ---------- assets : Asset The asset or assets whose data is desired. This cannot be an arbitrary AssetConvertible. field : {'open', 'high', 'low', 'close', 'volume', 'price', 'last_traded'} The desired field of the asset. dt : pd.Timestamp The timestamp for the desired value. data_frequency : str The frequency of the data to query; i.e. whether the data is 'daily' or 'minute' bars Returns ------- value : float, int, or pd.Timestamp The spot value of ``field`` for ``asset`` The return type is based on the ``field`` requested. If the field is one of 'open', 'high', 'low', 'close', or 'price', the value will be a float. If the ``field`` is 'volume' the value will be a int. If the ``field`` is 'last_traded' the value will be a Timestamp.
[ "Public", "API", "method", "that", "returns", "a", "scalar", "value", "representing", "the", "value", "of", "the", "desired", "asset", "s", "field", "at", "either", "the", "given", "dt", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/data_portal.py#L539-L573
train
quantopian/zipline
zipline/data/data_portal.py
DataPortal.get_adjustments
def get_adjustments(self, assets, field, dt, perspective_dt): """ Returns a list of adjustments between the dt and perspective_dt for the given field and list of assets Parameters ---------- assets : list of type Asset, or Asset The asset, or assets whose adjustments are desired. field : {'open', 'high', 'low', 'close', 'volume', \ 'price', 'last_traded'} The desired field of the asset. dt : pd.Timestamp The timestamp for the desired value. perspective_dt : pd.Timestamp The timestamp from which the data is being viewed back from. Returns ------- adjustments : list[Adjustment] The adjustments to that field. """ if isinstance(assets, Asset): assets = [assets] adjustment_ratios_per_asset = [] def split_adj_factor(x): return x if field != 'volume' else 1.0 / x for asset in assets: adjustments_for_asset = [] split_adjustments = self._get_adjustment_list( asset, self._splits_dict, "SPLITS" ) for adj_dt, adj in split_adjustments: if dt < adj_dt <= perspective_dt: adjustments_for_asset.append(split_adj_factor(adj)) elif adj_dt > perspective_dt: break if field != 'volume': merger_adjustments = self._get_adjustment_list( asset, self._mergers_dict, "MERGERS" ) for adj_dt, adj in merger_adjustments: if dt < adj_dt <= perspective_dt: adjustments_for_asset.append(adj) elif adj_dt > perspective_dt: break dividend_adjustments = self._get_adjustment_list( asset, self._dividends_dict, "DIVIDENDS", ) for adj_dt, adj in dividend_adjustments: if dt < adj_dt <= perspective_dt: adjustments_for_asset.append(adj) elif adj_dt > perspective_dt: break ratio = reduce(mul, adjustments_for_asset, 1.0) adjustment_ratios_per_asset.append(ratio) return adjustment_ratios_per_asset
python
def get_adjustments(self, assets, field, dt, perspective_dt): """ Returns a list of adjustments between the dt and perspective_dt for the given field and list of assets Parameters ---------- assets : list of type Asset, or Asset The asset, or assets whose adjustments are desired. field : {'open', 'high', 'low', 'close', 'volume', \ 'price', 'last_traded'} The desired field of the asset. dt : pd.Timestamp The timestamp for the desired value. perspective_dt : pd.Timestamp The timestamp from which the data is being viewed back from. Returns ------- adjustments : list[Adjustment] The adjustments to that field. """ if isinstance(assets, Asset): assets = [assets] adjustment_ratios_per_asset = [] def split_adj_factor(x): return x if field != 'volume' else 1.0 / x for asset in assets: adjustments_for_asset = [] split_adjustments = self._get_adjustment_list( asset, self._splits_dict, "SPLITS" ) for adj_dt, adj in split_adjustments: if dt < adj_dt <= perspective_dt: adjustments_for_asset.append(split_adj_factor(adj)) elif adj_dt > perspective_dt: break if field != 'volume': merger_adjustments = self._get_adjustment_list( asset, self._mergers_dict, "MERGERS" ) for adj_dt, adj in merger_adjustments: if dt < adj_dt <= perspective_dt: adjustments_for_asset.append(adj) elif adj_dt > perspective_dt: break dividend_adjustments = self._get_adjustment_list( asset, self._dividends_dict, "DIVIDENDS", ) for adj_dt, adj in dividend_adjustments: if dt < adj_dt <= perspective_dt: adjustments_for_asset.append(adj) elif adj_dt > perspective_dt: break ratio = reduce(mul, adjustments_for_asset, 1.0) adjustment_ratios_per_asset.append(ratio) return adjustment_ratios_per_asset
[ "def", "get_adjustments", "(", "self", ",", "assets", ",", "field", ",", "dt", ",", "perspective_dt", ")", ":", "if", "isinstance", "(", "assets", ",", "Asset", ")", ":", "assets", "=", "[", "assets", "]", "adjustment_ratios_per_asset", "=", "[", "]", "def", "split_adj_factor", "(", "x", ")", ":", "return", "x", "if", "field", "!=", "'volume'", "else", "1.0", "/", "x", "for", "asset", "in", "assets", ":", "adjustments_for_asset", "=", "[", "]", "split_adjustments", "=", "self", ".", "_get_adjustment_list", "(", "asset", ",", "self", ".", "_splits_dict", ",", "\"SPLITS\"", ")", "for", "adj_dt", ",", "adj", "in", "split_adjustments", ":", "if", "dt", "<", "adj_dt", "<=", "perspective_dt", ":", "adjustments_for_asset", ".", "append", "(", "split_adj_factor", "(", "adj", ")", ")", "elif", "adj_dt", ">", "perspective_dt", ":", "break", "if", "field", "!=", "'volume'", ":", "merger_adjustments", "=", "self", ".", "_get_adjustment_list", "(", "asset", ",", "self", ".", "_mergers_dict", ",", "\"MERGERS\"", ")", "for", "adj_dt", ",", "adj", "in", "merger_adjustments", ":", "if", "dt", "<", "adj_dt", "<=", "perspective_dt", ":", "adjustments_for_asset", ".", "append", "(", "adj", ")", "elif", "adj_dt", ">", "perspective_dt", ":", "break", "dividend_adjustments", "=", "self", ".", "_get_adjustment_list", "(", "asset", ",", "self", ".", "_dividends_dict", ",", "\"DIVIDENDS\"", ",", ")", "for", "adj_dt", ",", "adj", "in", "dividend_adjustments", ":", "if", "dt", "<", "adj_dt", "<=", "perspective_dt", ":", "adjustments_for_asset", ".", "append", "(", "adj", ")", "elif", "adj_dt", ">", "perspective_dt", ":", "break", "ratio", "=", "reduce", "(", "mul", ",", "adjustments_for_asset", ",", "1.0", ")", "adjustment_ratios_per_asset", ".", "append", "(", "ratio", ")", "return", "adjustment_ratios_per_asset" ]
Returns a list of adjustments between the dt and perspective_dt for the given field and list of assets Parameters ---------- assets : list of type Asset, or Asset The asset, or assets whose adjustments are desired. field : {'open', 'high', 'low', 'close', 'volume', \ 'price', 'last_traded'} The desired field of the asset. dt : pd.Timestamp The timestamp for the desired value. perspective_dt : pd.Timestamp The timestamp from which the data is being viewed back from. Returns ------- adjustments : list[Adjustment] The adjustments to that field.
[ "Returns", "a", "list", "of", "adjustments", "between", "the", "dt", "and", "perspective_dt", "for", "the", "given", "field", "and", "list", "of", "assets" ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/data_portal.py#L575-L638
train
quantopian/zipline
zipline/data/data_portal.py
DataPortal.get_adjusted_value
def get_adjusted_value(self, asset, field, dt, perspective_dt, data_frequency, spot_value=None): """ Returns a scalar value representing the value of the desired asset's field at the given dt with adjustments applied. Parameters ---------- asset : Asset The asset whose data is desired. field : {'open', 'high', 'low', 'close', 'volume', \ 'price', 'last_traded'} The desired field of the asset. dt : pd.Timestamp The timestamp for the desired value. perspective_dt : pd.Timestamp The timestamp from which the data is being viewed back from. data_frequency : str The frequency of the data to query; i.e. whether the data is 'daily' or 'minute' bars Returns ------- value : float, int, or pd.Timestamp The value of the given ``field`` for ``asset`` at ``dt`` with any adjustments known by ``perspective_dt`` applied. The return type is based on the ``field`` requested. If the field is one of 'open', 'high', 'low', 'close', or 'price', the value will be a float. If the ``field`` is 'volume' the value will be a int. If the ``field`` is 'last_traded' the value will be a Timestamp. """ if spot_value is None: # if this a fetcher field, we want to use perspective_dt (not dt) # because we want the new value as of midnight (fetcher only works # on a daily basis, all timestamps are on midnight) if self._is_extra_source(asset, field, self._augmented_sources_map): spot_value = self.get_spot_value(asset, field, perspective_dt, data_frequency) else: spot_value = self.get_spot_value(asset, field, dt, data_frequency) if isinstance(asset, Equity): ratio = self.get_adjustments(asset, field, dt, perspective_dt)[0] spot_value *= ratio return spot_value
python
def get_adjusted_value(self, asset, field, dt, perspective_dt, data_frequency, spot_value=None): """ Returns a scalar value representing the value of the desired asset's field at the given dt with adjustments applied. Parameters ---------- asset : Asset The asset whose data is desired. field : {'open', 'high', 'low', 'close', 'volume', \ 'price', 'last_traded'} The desired field of the asset. dt : pd.Timestamp The timestamp for the desired value. perspective_dt : pd.Timestamp The timestamp from which the data is being viewed back from. data_frequency : str The frequency of the data to query; i.e. whether the data is 'daily' or 'minute' bars Returns ------- value : float, int, or pd.Timestamp The value of the given ``field`` for ``asset`` at ``dt`` with any adjustments known by ``perspective_dt`` applied. The return type is based on the ``field`` requested. If the field is one of 'open', 'high', 'low', 'close', or 'price', the value will be a float. If the ``field`` is 'volume' the value will be a int. If the ``field`` is 'last_traded' the value will be a Timestamp. """ if spot_value is None: # if this a fetcher field, we want to use perspective_dt (not dt) # because we want the new value as of midnight (fetcher only works # on a daily basis, all timestamps are on midnight) if self._is_extra_source(asset, field, self._augmented_sources_map): spot_value = self.get_spot_value(asset, field, perspective_dt, data_frequency) else: spot_value = self.get_spot_value(asset, field, dt, data_frequency) if isinstance(asset, Equity): ratio = self.get_adjustments(asset, field, dt, perspective_dt)[0] spot_value *= ratio return spot_value
[ "def", "get_adjusted_value", "(", "self", ",", "asset", ",", "field", ",", "dt", ",", "perspective_dt", ",", "data_frequency", ",", "spot_value", "=", "None", ")", ":", "if", "spot_value", "is", "None", ":", "# if this a fetcher field, we want to use perspective_dt (not dt)", "# because we want the new value as of midnight (fetcher only works", "# on a daily basis, all timestamps are on midnight)", "if", "self", ".", "_is_extra_source", "(", "asset", ",", "field", ",", "self", ".", "_augmented_sources_map", ")", ":", "spot_value", "=", "self", ".", "get_spot_value", "(", "asset", ",", "field", ",", "perspective_dt", ",", "data_frequency", ")", "else", ":", "spot_value", "=", "self", ".", "get_spot_value", "(", "asset", ",", "field", ",", "dt", ",", "data_frequency", ")", "if", "isinstance", "(", "asset", ",", "Equity", ")", ":", "ratio", "=", "self", ".", "get_adjustments", "(", "asset", ",", "field", ",", "dt", ",", "perspective_dt", ")", "[", "0", "]", "spot_value", "*=", "ratio", "return", "spot_value" ]
Returns a scalar value representing the value of the desired asset's field at the given dt with adjustments applied. Parameters ---------- asset : Asset The asset whose data is desired. field : {'open', 'high', 'low', 'close', 'volume', \ 'price', 'last_traded'} The desired field of the asset. dt : pd.Timestamp The timestamp for the desired value. perspective_dt : pd.Timestamp The timestamp from which the data is being viewed back from. data_frequency : str The frequency of the data to query; i.e. whether the data is 'daily' or 'minute' bars Returns ------- value : float, int, or pd.Timestamp The value of the given ``field`` for ``asset`` at ``dt`` with any adjustments known by ``perspective_dt`` applied. The return type is based on the ``field`` requested. If the field is one of 'open', 'high', 'low', 'close', or 'price', the value will be a float. If the ``field`` is 'volume' the value will be a int. If the ``field`` is 'last_traded' the value will be a Timestamp.
[ "Returns", "a", "scalar", "value", "representing", "the", "value", "of", "the", "desired", "asset", "s", "field", "at", "the", "given", "dt", "with", "adjustments", "applied", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/data_portal.py#L640-L689
train
quantopian/zipline
zipline/data/data_portal.py
DataPortal._get_history_daily_window
def _get_history_daily_window(self, assets, end_dt, bar_count, field_to_use, data_frequency): """ Internal method that returns a dataframe containing history bars of daily frequency for the given sids. """ session = self.trading_calendar.minute_to_session_label(end_dt) days_for_window = self._get_days_for_window(session, bar_count) if len(assets) == 0: return pd.DataFrame(None, index=days_for_window, columns=None) data = self._get_history_daily_window_data( assets, days_for_window, end_dt, field_to_use, data_frequency ) return pd.DataFrame( data, index=days_for_window, columns=assets )
python
def _get_history_daily_window(self, assets, end_dt, bar_count, field_to_use, data_frequency): """ Internal method that returns a dataframe containing history bars of daily frequency for the given sids. """ session = self.trading_calendar.minute_to_session_label(end_dt) days_for_window = self._get_days_for_window(session, bar_count) if len(assets) == 0: return pd.DataFrame(None, index=days_for_window, columns=None) data = self._get_history_daily_window_data( assets, days_for_window, end_dt, field_to_use, data_frequency ) return pd.DataFrame( data, index=days_for_window, columns=assets )
[ "def", "_get_history_daily_window", "(", "self", ",", "assets", ",", "end_dt", ",", "bar_count", ",", "field_to_use", ",", "data_frequency", ")", ":", "session", "=", "self", ".", "trading_calendar", ".", "minute_to_session_label", "(", "end_dt", ")", "days_for_window", "=", "self", ".", "_get_days_for_window", "(", "session", ",", "bar_count", ")", "if", "len", "(", "assets", ")", "==", "0", ":", "return", "pd", ".", "DataFrame", "(", "None", ",", "index", "=", "days_for_window", ",", "columns", "=", "None", ")", "data", "=", "self", ".", "_get_history_daily_window_data", "(", "assets", ",", "days_for_window", ",", "end_dt", ",", "field_to_use", ",", "data_frequency", ")", "return", "pd", ".", "DataFrame", "(", "data", ",", "index", "=", "days_for_window", ",", "columns", "=", "assets", ")" ]
Internal method that returns a dataframe containing history bars of daily frequency for the given sids.
[ "Internal", "method", "that", "returns", "a", "dataframe", "containing", "history", "bars", "of", "daily", "frequency", "for", "the", "given", "sids", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/data_portal.py#L787-L812
train
quantopian/zipline
zipline/data/data_portal.py
DataPortal._get_history_minute_window
def _get_history_minute_window(self, assets, end_dt, bar_count, field_to_use): """ Internal method that returns a dataframe containing history bars of minute frequency for the given sids. """ # get all the minutes for this window try: minutes_for_window = self.trading_calendar.minutes_window( end_dt, -bar_count ) except KeyError: self._handle_minute_history_out_of_bounds(bar_count) if minutes_for_window[0] < self._first_trading_minute: self._handle_minute_history_out_of_bounds(bar_count) asset_minute_data = self._get_minute_window_data( assets, field_to_use, minutes_for_window, ) return pd.DataFrame( asset_minute_data, index=minutes_for_window, columns=assets )
python
def _get_history_minute_window(self, assets, end_dt, bar_count, field_to_use): """ Internal method that returns a dataframe containing history bars of minute frequency for the given sids. """ # get all the minutes for this window try: minutes_for_window = self.trading_calendar.minutes_window( end_dt, -bar_count ) except KeyError: self._handle_minute_history_out_of_bounds(bar_count) if minutes_for_window[0] < self._first_trading_minute: self._handle_minute_history_out_of_bounds(bar_count) asset_minute_data = self._get_minute_window_data( assets, field_to_use, minutes_for_window, ) return pd.DataFrame( asset_minute_data, index=minutes_for_window, columns=assets )
[ "def", "_get_history_minute_window", "(", "self", ",", "assets", ",", "end_dt", ",", "bar_count", ",", "field_to_use", ")", ":", "# get all the minutes for this window", "try", ":", "minutes_for_window", "=", "self", ".", "trading_calendar", ".", "minutes_window", "(", "end_dt", ",", "-", "bar_count", ")", "except", "KeyError", ":", "self", ".", "_handle_minute_history_out_of_bounds", "(", "bar_count", ")", "if", "minutes_for_window", "[", "0", "]", "<", "self", ".", "_first_trading_minute", ":", "self", ".", "_handle_minute_history_out_of_bounds", "(", "bar_count", ")", "asset_minute_data", "=", "self", ".", "_get_minute_window_data", "(", "assets", ",", "field_to_use", ",", "minutes_for_window", ",", ")", "return", "pd", ".", "DataFrame", "(", "asset_minute_data", ",", "index", "=", "minutes_for_window", ",", "columns", "=", "assets", ")" ]
Internal method that returns a dataframe containing history bars of minute frequency for the given sids.
[ "Internal", "method", "that", "returns", "a", "dataframe", "containing", "history", "bars", "of", "minute", "frequency", "for", "the", "given", "sids", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/data_portal.py#L886-L913
train
quantopian/zipline
zipline/data/data_portal.py
DataPortal.get_history_window
def get_history_window(self, assets, end_dt, bar_count, frequency, field, data_frequency, ffill=True): """ Public API method that returns a dataframe containing the requested history window. Data is fully adjusted. Parameters ---------- assets : list of zipline.data.Asset objects The assets whose data is desired. bar_count: int The number of bars desired. frequency: string "1d" or "1m" field: string The desired field of the asset. data_frequency: string The frequency of the data to query; i.e. whether the data is 'daily' or 'minute' bars. ffill: boolean Forward-fill missing values. Only has effect if field is 'price'. Returns ------- A dataframe containing the requested data. """ if field not in OHLCVP_FIELDS and field != 'sid': raise ValueError("Invalid field: {0}".format(field)) if bar_count < 1: raise ValueError( "bar_count must be >= 1, but got {}".format(bar_count) ) if frequency == "1d": if field == "price": df = self._get_history_daily_window(assets, end_dt, bar_count, "close", data_frequency) else: df = self._get_history_daily_window(assets, end_dt, bar_count, field, data_frequency) elif frequency == "1m": if field == "price": df = self._get_history_minute_window(assets, end_dt, bar_count, "close") else: df = self._get_history_minute_window(assets, end_dt, bar_count, field) else: raise ValueError("Invalid frequency: {0}".format(frequency)) # forward-fill price if field == "price": if frequency == "1m": ffill_data_frequency = 'minute' elif frequency == "1d": ffill_data_frequency = 'daily' else: raise Exception( "Only 1d and 1m are supported for forward-filling.") assets_with_leading_nan = np.where(isnull(df.iloc[0]))[0] history_start, history_end = df.index[[0, -1]] if ffill_data_frequency == 'daily' and data_frequency == 'minute': # When we're looking for a daily value, but we haven't seen any # volume in today's minute bars yet, we need to use the # previous day's ffilled daily price. Using today's daily price # could yield a value from later today. history_start -= self.trading_calendar.day initial_values = [] for asset in df.columns[assets_with_leading_nan]: last_traded = self.get_last_traded_dt( asset, history_start, ffill_data_frequency, ) if isnull(last_traded): initial_values.append(nan) else: initial_values.append( self.get_adjusted_value( asset, field, dt=last_traded, perspective_dt=history_end, data_frequency=ffill_data_frequency, ) ) # Set leading values for assets that were missing data, then ffill. df.ix[0, assets_with_leading_nan] = np.array( initial_values, dtype=np.float64 ) df.fillna(method='ffill', inplace=True) # forward-filling will incorrectly produce values after the end of # an asset's lifetime, so write NaNs back over the asset's # end_date. normed_index = df.index.normalize() for asset in df.columns: if history_end >= asset.end_date: # if the window extends past the asset's end date, set # all post-end-date values to NaN in that asset's series df.loc[normed_index > asset.end_date, asset] = nan return df
python
def get_history_window(self, assets, end_dt, bar_count, frequency, field, data_frequency, ffill=True): """ Public API method that returns a dataframe containing the requested history window. Data is fully adjusted. Parameters ---------- assets : list of zipline.data.Asset objects The assets whose data is desired. bar_count: int The number of bars desired. frequency: string "1d" or "1m" field: string The desired field of the asset. data_frequency: string The frequency of the data to query; i.e. whether the data is 'daily' or 'minute' bars. ffill: boolean Forward-fill missing values. Only has effect if field is 'price'. Returns ------- A dataframe containing the requested data. """ if field not in OHLCVP_FIELDS and field != 'sid': raise ValueError("Invalid field: {0}".format(field)) if bar_count < 1: raise ValueError( "bar_count must be >= 1, but got {}".format(bar_count) ) if frequency == "1d": if field == "price": df = self._get_history_daily_window(assets, end_dt, bar_count, "close", data_frequency) else: df = self._get_history_daily_window(assets, end_dt, bar_count, field, data_frequency) elif frequency == "1m": if field == "price": df = self._get_history_minute_window(assets, end_dt, bar_count, "close") else: df = self._get_history_minute_window(assets, end_dt, bar_count, field) else: raise ValueError("Invalid frequency: {0}".format(frequency)) # forward-fill price if field == "price": if frequency == "1m": ffill_data_frequency = 'minute' elif frequency == "1d": ffill_data_frequency = 'daily' else: raise Exception( "Only 1d and 1m are supported for forward-filling.") assets_with_leading_nan = np.where(isnull(df.iloc[0]))[0] history_start, history_end = df.index[[0, -1]] if ffill_data_frequency == 'daily' and data_frequency == 'minute': # When we're looking for a daily value, but we haven't seen any # volume in today's minute bars yet, we need to use the # previous day's ffilled daily price. Using today's daily price # could yield a value from later today. history_start -= self.trading_calendar.day initial_values = [] for asset in df.columns[assets_with_leading_nan]: last_traded = self.get_last_traded_dt( asset, history_start, ffill_data_frequency, ) if isnull(last_traded): initial_values.append(nan) else: initial_values.append( self.get_adjusted_value( asset, field, dt=last_traded, perspective_dt=history_end, data_frequency=ffill_data_frequency, ) ) # Set leading values for assets that were missing data, then ffill. df.ix[0, assets_with_leading_nan] = np.array( initial_values, dtype=np.float64 ) df.fillna(method='ffill', inplace=True) # forward-filling will incorrectly produce values after the end of # an asset's lifetime, so write NaNs back over the asset's # end_date. normed_index = df.index.normalize() for asset in df.columns: if history_end >= asset.end_date: # if the window extends past the asset's end date, set # all post-end-date values to NaN in that asset's series df.loc[normed_index > asset.end_date, asset] = nan return df
[ "def", "get_history_window", "(", "self", ",", "assets", ",", "end_dt", ",", "bar_count", ",", "frequency", ",", "field", ",", "data_frequency", ",", "ffill", "=", "True", ")", ":", "if", "field", "not", "in", "OHLCVP_FIELDS", "and", "field", "!=", "'sid'", ":", "raise", "ValueError", "(", "\"Invalid field: {0}\"", ".", "format", "(", "field", ")", ")", "if", "bar_count", "<", "1", ":", "raise", "ValueError", "(", "\"bar_count must be >= 1, but got {}\"", ".", "format", "(", "bar_count", ")", ")", "if", "frequency", "==", "\"1d\"", ":", "if", "field", "==", "\"price\"", ":", "df", "=", "self", ".", "_get_history_daily_window", "(", "assets", ",", "end_dt", ",", "bar_count", ",", "\"close\"", ",", "data_frequency", ")", "else", ":", "df", "=", "self", ".", "_get_history_daily_window", "(", "assets", ",", "end_dt", ",", "bar_count", ",", "field", ",", "data_frequency", ")", "elif", "frequency", "==", "\"1m\"", ":", "if", "field", "==", "\"price\"", ":", "df", "=", "self", ".", "_get_history_minute_window", "(", "assets", ",", "end_dt", ",", "bar_count", ",", "\"close\"", ")", "else", ":", "df", "=", "self", ".", "_get_history_minute_window", "(", "assets", ",", "end_dt", ",", "bar_count", ",", "field", ")", "else", ":", "raise", "ValueError", "(", "\"Invalid frequency: {0}\"", ".", "format", "(", "frequency", ")", ")", "# forward-fill price", "if", "field", "==", "\"price\"", ":", "if", "frequency", "==", "\"1m\"", ":", "ffill_data_frequency", "=", "'minute'", "elif", "frequency", "==", "\"1d\"", ":", "ffill_data_frequency", "=", "'daily'", "else", ":", "raise", "Exception", "(", "\"Only 1d and 1m are supported for forward-filling.\"", ")", "assets_with_leading_nan", "=", "np", ".", "where", "(", "isnull", "(", "df", ".", "iloc", "[", "0", "]", ")", ")", "[", "0", "]", "history_start", ",", "history_end", "=", "df", ".", "index", "[", "[", "0", ",", "-", "1", "]", "]", "if", "ffill_data_frequency", "==", "'daily'", "and", "data_frequency", "==", "'minute'", ":", "# When we're looking for a daily value, but we haven't seen any", "# volume in today's minute bars yet, we need to use the", "# previous day's ffilled daily price. Using today's daily price", "# could yield a value from later today.", "history_start", "-=", "self", ".", "trading_calendar", ".", "day", "initial_values", "=", "[", "]", "for", "asset", "in", "df", ".", "columns", "[", "assets_with_leading_nan", "]", ":", "last_traded", "=", "self", ".", "get_last_traded_dt", "(", "asset", ",", "history_start", ",", "ffill_data_frequency", ",", ")", "if", "isnull", "(", "last_traded", ")", ":", "initial_values", ".", "append", "(", "nan", ")", "else", ":", "initial_values", ".", "append", "(", "self", ".", "get_adjusted_value", "(", "asset", ",", "field", ",", "dt", "=", "last_traded", ",", "perspective_dt", "=", "history_end", ",", "data_frequency", "=", "ffill_data_frequency", ",", ")", ")", "# Set leading values for assets that were missing data, then ffill.", "df", ".", "ix", "[", "0", ",", "assets_with_leading_nan", "]", "=", "np", ".", "array", "(", "initial_values", ",", "dtype", "=", "np", ".", "float64", ")", "df", ".", "fillna", "(", "method", "=", "'ffill'", ",", "inplace", "=", "True", ")", "# forward-filling will incorrectly produce values after the end of", "# an asset's lifetime, so write NaNs back over the asset's", "# end_date.", "normed_index", "=", "df", ".", "index", ".", "normalize", "(", ")", "for", "asset", "in", "df", ".", "columns", ":", "if", "history_end", ">=", "asset", ".", "end_date", ":", "# if the window extends past the asset's end date, set", "# all post-end-date values to NaN in that asset's series", "df", ".", "loc", "[", "normed_index", ">", "asset", ".", "end_date", ",", "asset", "]", "=", "nan", "return", "df" ]
Public API method that returns a dataframe containing the requested history window. Data is fully adjusted. Parameters ---------- assets : list of zipline.data.Asset objects The assets whose data is desired. bar_count: int The number of bars desired. frequency: string "1d" or "1m" field: string The desired field of the asset. data_frequency: string The frequency of the data to query; i.e. whether the data is 'daily' or 'minute' bars. ffill: boolean Forward-fill missing values. Only has effect if field is 'price'. Returns ------- A dataframe containing the requested data.
[ "Public", "API", "method", "that", "returns", "a", "dataframe", "containing", "the", "requested", "history", "window", ".", "Data", "is", "fully", "adjusted", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/data_portal.py#L915-L1034
train
quantopian/zipline
zipline/data/data_portal.py
DataPortal._get_minute_window_data
def _get_minute_window_data(self, assets, field, minutes_for_window): """ Internal method that gets a window of adjusted minute data for an asset and specified date range. Used to support the history API method for minute bars. Missing bars are filled with NaN. Parameters ---------- assets : iterable[Asset] The assets whose data is desired. field: string The specific field to return. "open", "high", "close_price", etc. minutes_for_window: pd.DateTimeIndex The list of minutes representing the desired window. Each minute is a pd.Timestamp. Returns ------- A numpy array with requested values. """ return self._minute_history_loader.history(assets, minutes_for_window, field, False)
python
def _get_minute_window_data(self, assets, field, minutes_for_window): """ Internal method that gets a window of adjusted minute data for an asset and specified date range. Used to support the history API method for minute bars. Missing bars are filled with NaN. Parameters ---------- assets : iterable[Asset] The assets whose data is desired. field: string The specific field to return. "open", "high", "close_price", etc. minutes_for_window: pd.DateTimeIndex The list of minutes representing the desired window. Each minute is a pd.Timestamp. Returns ------- A numpy array with requested values. """ return self._minute_history_loader.history(assets, minutes_for_window, field, False)
[ "def", "_get_minute_window_data", "(", "self", ",", "assets", ",", "field", ",", "minutes_for_window", ")", ":", "return", "self", ".", "_minute_history_loader", ".", "history", "(", "assets", ",", "minutes_for_window", ",", "field", ",", "False", ")" ]
Internal method that gets a window of adjusted minute data for an asset and specified date range. Used to support the history API method for minute bars. Missing bars are filled with NaN. Parameters ---------- assets : iterable[Asset] The assets whose data is desired. field: string The specific field to return. "open", "high", "close_price", etc. minutes_for_window: pd.DateTimeIndex The list of minutes representing the desired window. Each minute is a pd.Timestamp. Returns ------- A numpy array with requested values.
[ "Internal", "method", "that", "gets", "a", "window", "of", "adjusted", "minute", "data", "for", "an", "asset", "and", "specified", "date", "range", ".", "Used", "to", "support", "the", "history", "API", "method", "for", "minute", "bars", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/data_portal.py#L1036-L1063
train
quantopian/zipline
zipline/data/data_portal.py
DataPortal._get_daily_window_data
def _get_daily_window_data(self, assets, field, days_in_window, extra_slot=True): """ Internal method that gets a window of adjusted daily data for a sid and specified date range. Used to support the history API method for daily bars. Parameters ---------- asset : Asset The asset whose data is desired. start_dt: pandas.Timestamp The start of the desired window of data. bar_count: int The number of days of data to return. field: string The specific field to return. "open", "high", "close_price", etc. extra_slot: boolean Whether to allocate an extra slot in the returned numpy array. This extra slot will hold the data for the last partial day. It's much better to create it here than to create a copy of the array later just to add a slot. Returns ------- A numpy array with requested values. Any missing slots filled with nan. """ bar_count = len(days_in_window) # create an np.array of size bar_count dtype = float64 if field != 'sid' else int64 if extra_slot: return_array = np.zeros((bar_count + 1, len(assets)), dtype=dtype) else: return_array = np.zeros((bar_count, len(assets)), dtype=dtype) if field != "volume": # volumes default to 0, so we don't need to put NaNs in the array return_array[:] = np.NAN if bar_count != 0: data = self._history_loader.history(assets, days_in_window, field, extra_slot) if extra_slot: return_array[:len(return_array) - 1, :] = data else: return_array[:len(data)] = data return return_array
python
def _get_daily_window_data(self, assets, field, days_in_window, extra_slot=True): """ Internal method that gets a window of adjusted daily data for a sid and specified date range. Used to support the history API method for daily bars. Parameters ---------- asset : Asset The asset whose data is desired. start_dt: pandas.Timestamp The start of the desired window of data. bar_count: int The number of days of data to return. field: string The specific field to return. "open", "high", "close_price", etc. extra_slot: boolean Whether to allocate an extra slot in the returned numpy array. This extra slot will hold the data for the last partial day. It's much better to create it here than to create a copy of the array later just to add a slot. Returns ------- A numpy array with requested values. Any missing slots filled with nan. """ bar_count = len(days_in_window) # create an np.array of size bar_count dtype = float64 if field != 'sid' else int64 if extra_slot: return_array = np.zeros((bar_count + 1, len(assets)), dtype=dtype) else: return_array = np.zeros((bar_count, len(assets)), dtype=dtype) if field != "volume": # volumes default to 0, so we don't need to put NaNs in the array return_array[:] = np.NAN if bar_count != 0: data = self._history_loader.history(assets, days_in_window, field, extra_slot) if extra_slot: return_array[:len(return_array) - 1, :] = data else: return_array[:len(data)] = data return return_array
[ "def", "_get_daily_window_data", "(", "self", ",", "assets", ",", "field", ",", "days_in_window", ",", "extra_slot", "=", "True", ")", ":", "bar_count", "=", "len", "(", "days_in_window", ")", "# create an np.array of size bar_count", "dtype", "=", "float64", "if", "field", "!=", "'sid'", "else", "int64", "if", "extra_slot", ":", "return_array", "=", "np", ".", "zeros", "(", "(", "bar_count", "+", "1", ",", "len", "(", "assets", ")", ")", ",", "dtype", "=", "dtype", ")", "else", ":", "return_array", "=", "np", ".", "zeros", "(", "(", "bar_count", ",", "len", "(", "assets", ")", ")", ",", "dtype", "=", "dtype", ")", "if", "field", "!=", "\"volume\"", ":", "# volumes default to 0, so we don't need to put NaNs in the array", "return_array", "[", ":", "]", "=", "np", ".", "NAN", "if", "bar_count", "!=", "0", ":", "data", "=", "self", ".", "_history_loader", ".", "history", "(", "assets", ",", "days_in_window", ",", "field", ",", "extra_slot", ")", "if", "extra_slot", ":", "return_array", "[", ":", "len", "(", "return_array", ")", "-", "1", ",", ":", "]", "=", "data", "else", ":", "return_array", "[", ":", "len", "(", "data", ")", "]", "=", "data", "return", "return_array" ]
Internal method that gets a window of adjusted daily data for a sid and specified date range. Used to support the history API method for daily bars. Parameters ---------- asset : Asset The asset whose data is desired. start_dt: pandas.Timestamp The start of the desired window of data. bar_count: int The number of days of data to return. field: string The specific field to return. "open", "high", "close_price", etc. extra_slot: boolean Whether to allocate an extra slot in the returned numpy array. This extra slot will hold the data for the last partial day. It's much better to create it here than to create a copy of the array later just to add a slot. Returns ------- A numpy array with requested values. Any missing slots filled with nan.
[ "Internal", "method", "that", "gets", "a", "window", "of", "adjusted", "daily", "data", "for", "a", "sid", "and", "specified", "date", "range", ".", "Used", "to", "support", "the", "history", "API", "method", "for", "daily", "bars", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/data_portal.py#L1065-L1122
train
quantopian/zipline
zipline/data/data_portal.py
DataPortal._get_adjustment_list
def _get_adjustment_list(self, asset, adjustments_dict, table_name): """ Internal method that returns a list of adjustments for the given sid. Parameters ---------- asset : Asset The asset for which to return adjustments. adjustments_dict: dict A dictionary of sid -> list that is used as a cache. table_name: string The table that contains this data in the adjustments db. Returns ------- adjustments: list A list of [multiplier, pd.Timestamp], earliest first """ if self._adjustment_reader is None: return [] sid = int(asset) try: adjustments = adjustments_dict[sid] except KeyError: adjustments = adjustments_dict[sid] = self._adjustment_reader.\ get_adjustments_for_sid(table_name, sid) return adjustments
python
def _get_adjustment_list(self, asset, adjustments_dict, table_name): """ Internal method that returns a list of adjustments for the given sid. Parameters ---------- asset : Asset The asset for which to return adjustments. adjustments_dict: dict A dictionary of sid -> list that is used as a cache. table_name: string The table that contains this data in the adjustments db. Returns ------- adjustments: list A list of [multiplier, pd.Timestamp], earliest first """ if self._adjustment_reader is None: return [] sid = int(asset) try: adjustments = adjustments_dict[sid] except KeyError: adjustments = adjustments_dict[sid] = self._adjustment_reader.\ get_adjustments_for_sid(table_name, sid) return adjustments
[ "def", "_get_adjustment_list", "(", "self", ",", "asset", ",", "adjustments_dict", ",", "table_name", ")", ":", "if", "self", ".", "_adjustment_reader", "is", "None", ":", "return", "[", "]", "sid", "=", "int", "(", "asset", ")", "try", ":", "adjustments", "=", "adjustments_dict", "[", "sid", "]", "except", "KeyError", ":", "adjustments", "=", "adjustments_dict", "[", "sid", "]", "=", "self", ".", "_adjustment_reader", ".", "get_adjustments_for_sid", "(", "table_name", ",", "sid", ")", "return", "adjustments" ]
Internal method that returns a list of adjustments for the given sid. Parameters ---------- asset : Asset The asset for which to return adjustments. adjustments_dict: dict A dictionary of sid -> list that is used as a cache. table_name: string The table that contains this data in the adjustments db. Returns ------- adjustments: list A list of [multiplier, pd.Timestamp], earliest first
[ "Internal", "method", "that", "returns", "a", "list", "of", "adjustments", "for", "the", "given", "sid", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/data_portal.py#L1124-L1156
train
quantopian/zipline
zipline/data/data_portal.py
DataPortal.get_splits
def get_splits(self, assets, dt): """ Returns any splits for the given sids and the given dt. Parameters ---------- assets : container Assets for which we want splits. dt : pd.Timestamp The date for which we are checking for splits. Note: this is expected to be midnight UTC. Returns ------- splits : list[(asset, float)] List of splits, where each split is a (asset, ratio) tuple. """ if self._adjustment_reader is None or not assets: return [] # convert dt to # of seconds since epoch, because that's what we use # in the adjustments db seconds = int(dt.value / 1e9) splits = self._adjustment_reader.conn.execute( "SELECT sid, ratio FROM SPLITS WHERE effective_date = ?", (seconds,)).fetchall() splits = [split for split in splits if split[0] in assets] splits = [(self.asset_finder.retrieve_asset(split[0]), split[1]) for split in splits] return splits
python
def get_splits(self, assets, dt): """ Returns any splits for the given sids and the given dt. Parameters ---------- assets : container Assets for which we want splits. dt : pd.Timestamp The date for which we are checking for splits. Note: this is expected to be midnight UTC. Returns ------- splits : list[(asset, float)] List of splits, where each split is a (asset, ratio) tuple. """ if self._adjustment_reader is None or not assets: return [] # convert dt to # of seconds since epoch, because that's what we use # in the adjustments db seconds = int(dt.value / 1e9) splits = self._adjustment_reader.conn.execute( "SELECT sid, ratio FROM SPLITS WHERE effective_date = ?", (seconds,)).fetchall() splits = [split for split in splits if split[0] in assets] splits = [(self.asset_finder.retrieve_asset(split[0]), split[1]) for split in splits] return splits
[ "def", "get_splits", "(", "self", ",", "assets", ",", "dt", ")", ":", "if", "self", ".", "_adjustment_reader", "is", "None", "or", "not", "assets", ":", "return", "[", "]", "# convert dt to # of seconds since epoch, because that's what we use", "# in the adjustments db", "seconds", "=", "int", "(", "dt", ".", "value", "/", "1e9", ")", "splits", "=", "self", ".", "_adjustment_reader", ".", "conn", ".", "execute", "(", "\"SELECT sid, ratio FROM SPLITS WHERE effective_date = ?\"", ",", "(", "seconds", ",", ")", ")", ".", "fetchall", "(", ")", "splits", "=", "[", "split", "for", "split", "in", "splits", "if", "split", "[", "0", "]", "in", "assets", "]", "splits", "=", "[", "(", "self", ".", "asset_finder", ".", "retrieve_asset", "(", "split", "[", "0", "]", ")", ",", "split", "[", "1", "]", ")", "for", "split", "in", "splits", "]", "return", "splits" ]
Returns any splits for the given sids and the given dt. Parameters ---------- assets : container Assets for which we want splits. dt : pd.Timestamp The date for which we are checking for splits. Note: this is expected to be midnight UTC. Returns ------- splits : list[(asset, float)] List of splits, where each split is a (asset, ratio) tuple.
[ "Returns", "any", "splits", "for", "the", "given", "sids", "and", "the", "given", "dt", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/data_portal.py#L1158-L1190
train
quantopian/zipline
zipline/data/data_portal.py
DataPortal.get_stock_dividends
def get_stock_dividends(self, sid, trading_days): """ Returns all the stock dividends for a specific sid that occur in the given trading range. Parameters ---------- sid: int The asset whose stock dividends should be returned. trading_days: pd.DatetimeIndex The trading range. Returns ------- list: A list of objects with all relevant attributes populated. All timestamp fields are converted to pd.Timestamps. """ if self._adjustment_reader is None: return [] if len(trading_days) == 0: return [] start_dt = trading_days[0].value / 1e9 end_dt = trading_days[-1].value / 1e9 dividends = self._adjustment_reader.conn.execute( "SELECT * FROM stock_dividend_payouts WHERE sid = ? AND " "ex_date > ? AND pay_date < ?", (int(sid), start_dt, end_dt,)).\ fetchall() dividend_info = [] for dividend_tuple in dividends: dividend_info.append({ "declared_date": dividend_tuple[1], "ex_date": pd.Timestamp(dividend_tuple[2], unit="s"), "pay_date": pd.Timestamp(dividend_tuple[3], unit="s"), "payment_sid": dividend_tuple[4], "ratio": dividend_tuple[5], "record_date": pd.Timestamp(dividend_tuple[6], unit="s"), "sid": dividend_tuple[7] }) return dividend_info
python
def get_stock_dividends(self, sid, trading_days): """ Returns all the stock dividends for a specific sid that occur in the given trading range. Parameters ---------- sid: int The asset whose stock dividends should be returned. trading_days: pd.DatetimeIndex The trading range. Returns ------- list: A list of objects with all relevant attributes populated. All timestamp fields are converted to pd.Timestamps. """ if self._adjustment_reader is None: return [] if len(trading_days) == 0: return [] start_dt = trading_days[0].value / 1e9 end_dt = trading_days[-1].value / 1e9 dividends = self._adjustment_reader.conn.execute( "SELECT * FROM stock_dividend_payouts WHERE sid = ? AND " "ex_date > ? AND pay_date < ?", (int(sid), start_dt, end_dt,)).\ fetchall() dividend_info = [] for dividend_tuple in dividends: dividend_info.append({ "declared_date": dividend_tuple[1], "ex_date": pd.Timestamp(dividend_tuple[2], unit="s"), "pay_date": pd.Timestamp(dividend_tuple[3], unit="s"), "payment_sid": dividend_tuple[4], "ratio": dividend_tuple[5], "record_date": pd.Timestamp(dividend_tuple[6], unit="s"), "sid": dividend_tuple[7] }) return dividend_info
[ "def", "get_stock_dividends", "(", "self", ",", "sid", ",", "trading_days", ")", ":", "if", "self", ".", "_adjustment_reader", "is", "None", ":", "return", "[", "]", "if", "len", "(", "trading_days", ")", "==", "0", ":", "return", "[", "]", "start_dt", "=", "trading_days", "[", "0", "]", ".", "value", "/", "1e9", "end_dt", "=", "trading_days", "[", "-", "1", "]", ".", "value", "/", "1e9", "dividends", "=", "self", ".", "_adjustment_reader", ".", "conn", ".", "execute", "(", "\"SELECT * FROM stock_dividend_payouts WHERE sid = ? AND \"", "\"ex_date > ? AND pay_date < ?\"", ",", "(", "int", "(", "sid", ")", ",", "start_dt", ",", "end_dt", ",", ")", ")", ".", "fetchall", "(", ")", "dividend_info", "=", "[", "]", "for", "dividend_tuple", "in", "dividends", ":", "dividend_info", ".", "append", "(", "{", "\"declared_date\"", ":", "dividend_tuple", "[", "1", "]", ",", "\"ex_date\"", ":", "pd", ".", "Timestamp", "(", "dividend_tuple", "[", "2", "]", ",", "unit", "=", "\"s\"", ")", ",", "\"pay_date\"", ":", "pd", ".", "Timestamp", "(", "dividend_tuple", "[", "3", "]", ",", "unit", "=", "\"s\"", ")", ",", "\"payment_sid\"", ":", "dividend_tuple", "[", "4", "]", ",", "\"ratio\"", ":", "dividend_tuple", "[", "5", "]", ",", "\"record_date\"", ":", "pd", ".", "Timestamp", "(", "dividend_tuple", "[", "6", "]", ",", "unit", "=", "\"s\"", ")", ",", "\"sid\"", ":", "dividend_tuple", "[", "7", "]", "}", ")", "return", "dividend_info" ]
Returns all the stock dividends for a specific sid that occur in the given trading range. Parameters ---------- sid: int The asset whose stock dividends should be returned. trading_days: pd.DatetimeIndex The trading range. Returns ------- list: A list of objects with all relevant attributes populated. All timestamp fields are converted to pd.Timestamps.
[ "Returns", "all", "the", "stock", "dividends", "for", "a", "specific", "sid", "that", "occur", "in", "the", "given", "trading", "range", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/data_portal.py#L1192-L1237
train
quantopian/zipline
zipline/data/data_portal.py
DataPortal.get_fetcher_assets
def get_fetcher_assets(self, dt): """ Returns a list of assets for the current date, as defined by the fetcher data. Returns ------- list: a list of Asset objects. """ # return a list of assets for the current date, as defined by the # fetcher source if self._extra_source_df is None: return [] day = normalize_date(dt) if day in self._extra_source_df.index: assets = self._extra_source_df.loc[day]['sid'] else: return [] if isinstance(assets, pd.Series): return [x for x in assets if isinstance(x, Asset)] else: return [assets] if isinstance(assets, Asset) else []
python
def get_fetcher_assets(self, dt): """ Returns a list of assets for the current date, as defined by the fetcher data. Returns ------- list: a list of Asset objects. """ # return a list of assets for the current date, as defined by the # fetcher source if self._extra_source_df is None: return [] day = normalize_date(dt) if day in self._extra_source_df.index: assets = self._extra_source_df.loc[day]['sid'] else: return [] if isinstance(assets, pd.Series): return [x for x in assets if isinstance(x, Asset)] else: return [assets] if isinstance(assets, Asset) else []
[ "def", "get_fetcher_assets", "(", "self", ",", "dt", ")", ":", "# return a list of assets for the current date, as defined by the", "# fetcher source", "if", "self", ".", "_extra_source_df", "is", "None", ":", "return", "[", "]", "day", "=", "normalize_date", "(", "dt", ")", "if", "day", "in", "self", ".", "_extra_source_df", ".", "index", ":", "assets", "=", "self", ".", "_extra_source_df", ".", "loc", "[", "day", "]", "[", "'sid'", "]", "else", ":", "return", "[", "]", "if", "isinstance", "(", "assets", ",", "pd", ".", "Series", ")", ":", "return", "[", "x", "for", "x", "in", "assets", "if", "isinstance", "(", "x", ",", "Asset", ")", "]", "else", ":", "return", "[", "assets", "]", "if", "isinstance", "(", "assets", ",", "Asset", ")", "else", "[", "]" ]
Returns a list of assets for the current date, as defined by the fetcher data. Returns ------- list: a list of Asset objects.
[ "Returns", "a", "list", "of", "assets", "for", "the", "current", "date", "as", "defined", "by", "the", "fetcher", "data", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/data_portal.py#L1244-L1268
train
quantopian/zipline
zipline/data/data_portal.py
DataPortal.get_current_future_chain
def get_current_future_chain(self, continuous_future, dt): """ Retrieves the future chain for the contract at the given `dt` according the `continuous_future` specification. Returns ------- future_chain : list[Future] A list of active futures, where the first index is the current contract specified by the continuous future definition, the second is the next upcoming contract and so on. """ rf = self._roll_finders[continuous_future.roll_style] session = self.trading_calendar.minute_to_session_label(dt) contract_center = rf.get_contract_center( continuous_future.root_symbol, session, continuous_future.offset) oc = self.asset_finder.get_ordered_contracts( continuous_future.root_symbol) chain = oc.active_chain(contract_center, session.value) return self.asset_finder.retrieve_all(chain)
python
def get_current_future_chain(self, continuous_future, dt): """ Retrieves the future chain for the contract at the given `dt` according the `continuous_future` specification. Returns ------- future_chain : list[Future] A list of active futures, where the first index is the current contract specified by the continuous future definition, the second is the next upcoming contract and so on. """ rf = self._roll_finders[continuous_future.roll_style] session = self.trading_calendar.minute_to_session_label(dt) contract_center = rf.get_contract_center( continuous_future.root_symbol, session, continuous_future.offset) oc = self.asset_finder.get_ordered_contracts( continuous_future.root_symbol) chain = oc.active_chain(contract_center, session.value) return self.asset_finder.retrieve_all(chain)
[ "def", "get_current_future_chain", "(", "self", ",", "continuous_future", ",", "dt", ")", ":", "rf", "=", "self", ".", "_roll_finders", "[", "continuous_future", ".", "roll_style", "]", "session", "=", "self", ".", "trading_calendar", ".", "minute_to_session_label", "(", "dt", ")", "contract_center", "=", "rf", ".", "get_contract_center", "(", "continuous_future", ".", "root_symbol", ",", "session", ",", "continuous_future", ".", "offset", ")", "oc", "=", "self", ".", "asset_finder", ".", "get_ordered_contracts", "(", "continuous_future", ".", "root_symbol", ")", "chain", "=", "oc", ".", "active_chain", "(", "contract_center", ",", "session", ".", "value", ")", "return", "self", ".", "asset_finder", ".", "retrieve_all", "(", "chain", ")" ]
Retrieves the future chain for the contract at the given `dt` according the `continuous_future` specification. Returns ------- future_chain : list[Future] A list of active futures, where the first index is the current contract specified by the continuous future definition, the second is the next upcoming contract and so on.
[ "Retrieves", "the", "future", "chain", "for", "the", "contract", "at", "the", "given", "dt", "according", "the", "continuous_future", "specification", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/data_portal.py#L1391-L1412
train
quantopian/zipline
zipline/utils/numpy_utils.py
make_kind_check
def make_kind_check(python_types, numpy_kind): """ Make a function that checks whether a scalar or array is of a given kind (e.g. float, int, datetime, timedelta). """ def check(value): if hasattr(value, 'dtype'): return value.dtype.kind == numpy_kind return isinstance(value, python_types) return check
python
def make_kind_check(python_types, numpy_kind): """ Make a function that checks whether a scalar or array is of a given kind (e.g. float, int, datetime, timedelta). """ def check(value): if hasattr(value, 'dtype'): return value.dtype.kind == numpy_kind return isinstance(value, python_types) return check
[ "def", "make_kind_check", "(", "python_types", ",", "numpy_kind", ")", ":", "def", "check", "(", "value", ")", ":", "if", "hasattr", "(", "value", ",", "'dtype'", ")", ":", "return", "value", ".", "dtype", ".", "kind", "==", "numpy_kind", "return", "isinstance", "(", "value", ",", "python_types", ")", "return", "check" ]
Make a function that checks whether a scalar or array is of a given kind (e.g. float, int, datetime, timedelta).
[ "Make", "a", "function", "that", "checks", "whether", "a", "scalar", "or", "array", "is", "of", "a", "given", "kind", "(", "e", ".", "g", ".", "float", "int", "datetime", "timedelta", ")", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/numpy_utils.py#L124-L133
train
quantopian/zipline
zipline/utils/numpy_utils.py
coerce_to_dtype
def coerce_to_dtype(dtype, value): """ Make a value with the specified numpy dtype. Only datetime64[ns] and datetime64[D] are supported for datetime dtypes. """ name = dtype.name if name.startswith('datetime64'): if name == 'datetime64[D]': return make_datetime64D(value) elif name == 'datetime64[ns]': return make_datetime64ns(value) else: raise TypeError( "Don't know how to coerce values of dtype %s" % dtype ) return dtype.type(value)
python
def coerce_to_dtype(dtype, value): """ Make a value with the specified numpy dtype. Only datetime64[ns] and datetime64[D] are supported for datetime dtypes. """ name = dtype.name if name.startswith('datetime64'): if name == 'datetime64[D]': return make_datetime64D(value) elif name == 'datetime64[ns]': return make_datetime64ns(value) else: raise TypeError( "Don't know how to coerce values of dtype %s" % dtype ) return dtype.type(value)
[ "def", "coerce_to_dtype", "(", "dtype", ",", "value", ")", ":", "name", "=", "dtype", ".", "name", "if", "name", ".", "startswith", "(", "'datetime64'", ")", ":", "if", "name", "==", "'datetime64[D]'", ":", "return", "make_datetime64D", "(", "value", ")", "elif", "name", "==", "'datetime64[ns]'", ":", "return", "make_datetime64ns", "(", "value", ")", "else", ":", "raise", "TypeError", "(", "\"Don't know how to coerce values of dtype %s\"", "%", "dtype", ")", "return", "dtype", ".", "type", "(", "value", ")" ]
Make a value with the specified numpy dtype. Only datetime64[ns] and datetime64[D] are supported for datetime dtypes.
[ "Make", "a", "value", "with", "the", "specified", "numpy", "dtype", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/numpy_utils.py#L142-L158
train
quantopian/zipline
zipline/utils/numpy_utils.py
repeat_first_axis
def repeat_first_axis(array, count): """ Restride `array` to repeat `count` times along the first axis. Parameters ---------- array : np.array The array to restride. count : int Number of times to repeat `array`. Returns ------- result : array Array of shape (count,) + array.shape, composed of `array` repeated `count` times along the first axis. Example ------- >>> from numpy import arange >>> a = arange(3); a array([0, 1, 2]) >>> repeat_first_axis(a, 2) array([[0, 1, 2], [0, 1, 2]]) >>> repeat_first_axis(a, 4) array([[0, 1, 2], [0, 1, 2], [0, 1, 2], [0, 1, 2]]) Notes ---- The resulting array will share memory with `array`. If you need to assign to the input or output, you should probably make a copy first. See Also -------- repeat_last_axis """ return as_strided(array, (count,) + array.shape, (0,) + array.strides)
python
def repeat_first_axis(array, count): """ Restride `array` to repeat `count` times along the first axis. Parameters ---------- array : np.array The array to restride. count : int Number of times to repeat `array`. Returns ------- result : array Array of shape (count,) + array.shape, composed of `array` repeated `count` times along the first axis. Example ------- >>> from numpy import arange >>> a = arange(3); a array([0, 1, 2]) >>> repeat_first_axis(a, 2) array([[0, 1, 2], [0, 1, 2]]) >>> repeat_first_axis(a, 4) array([[0, 1, 2], [0, 1, 2], [0, 1, 2], [0, 1, 2]]) Notes ---- The resulting array will share memory with `array`. If you need to assign to the input or output, you should probably make a copy first. See Also -------- repeat_last_axis """ return as_strided(array, (count,) + array.shape, (0,) + array.strides)
[ "def", "repeat_first_axis", "(", "array", ",", "count", ")", ":", "return", "as_strided", "(", "array", ",", "(", "count", ",", ")", "+", "array", ".", "shape", ",", "(", "0", ",", ")", "+", "array", ".", "strides", ")" ]
Restride `array` to repeat `count` times along the first axis. Parameters ---------- array : np.array The array to restride. count : int Number of times to repeat `array`. Returns ------- result : array Array of shape (count,) + array.shape, composed of `array` repeated `count` times along the first axis. Example ------- >>> from numpy import arange >>> a = arange(3); a array([0, 1, 2]) >>> repeat_first_axis(a, 2) array([[0, 1, 2], [0, 1, 2]]) >>> repeat_first_axis(a, 4) array([[0, 1, 2], [0, 1, 2], [0, 1, 2], [0, 1, 2]]) Notes ---- The resulting array will share memory with `array`. If you need to assign to the input or output, you should probably make a copy first. See Also -------- repeat_last_axis
[ "Restride", "array", "to", "repeat", "count", "times", "along", "the", "first", "axis", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/numpy_utils.py#L173-L213
train
quantopian/zipline
zipline/utils/numpy_utils.py
repeat_last_axis
def repeat_last_axis(array, count): """ Restride `array` to repeat `count` times along the last axis. Parameters ---------- array : np.array The array to restride. count : int Number of times to repeat `array`. Returns ------- result : array Array of shape array.shape + (count,) composed of `array` repeated `count` times along the last axis. Example ------- >>> from numpy import arange >>> a = arange(3); a array([0, 1, 2]) >>> repeat_last_axis(a, 2) array([[0, 0], [1, 1], [2, 2]]) >>> repeat_last_axis(a, 4) array([[0, 0, 0, 0], [1, 1, 1, 1], [2, 2, 2, 2]]) Notes ---- The resulting array will share memory with `array`. If you need to assign to the input or output, you should probably make a copy first. See Also -------- repeat_last_axis """ return as_strided(array, array.shape + (count,), array.strides + (0,))
python
def repeat_last_axis(array, count): """ Restride `array` to repeat `count` times along the last axis. Parameters ---------- array : np.array The array to restride. count : int Number of times to repeat `array`. Returns ------- result : array Array of shape array.shape + (count,) composed of `array` repeated `count` times along the last axis. Example ------- >>> from numpy import arange >>> a = arange(3); a array([0, 1, 2]) >>> repeat_last_axis(a, 2) array([[0, 0], [1, 1], [2, 2]]) >>> repeat_last_axis(a, 4) array([[0, 0, 0, 0], [1, 1, 1, 1], [2, 2, 2, 2]]) Notes ---- The resulting array will share memory with `array`. If you need to assign to the input or output, you should probably make a copy first. See Also -------- repeat_last_axis """ return as_strided(array, array.shape + (count,), array.strides + (0,))
[ "def", "repeat_last_axis", "(", "array", ",", "count", ")", ":", "return", "as_strided", "(", "array", ",", "array", ".", "shape", "+", "(", "count", ",", ")", ",", "array", ".", "strides", "+", "(", "0", ",", ")", ")" ]
Restride `array` to repeat `count` times along the last axis. Parameters ---------- array : np.array The array to restride. count : int Number of times to repeat `array`. Returns ------- result : array Array of shape array.shape + (count,) composed of `array` repeated `count` times along the last axis. Example ------- >>> from numpy import arange >>> a = arange(3); a array([0, 1, 2]) >>> repeat_last_axis(a, 2) array([[0, 0], [1, 1], [2, 2]]) >>> repeat_last_axis(a, 4) array([[0, 0, 0, 0], [1, 1, 1, 1], [2, 2, 2, 2]]) Notes ---- The resulting array will share memory with `array`. If you need to assign to the input or output, you should probably make a copy first. See Also -------- repeat_last_axis
[ "Restride", "array", "to", "repeat", "count", "times", "along", "the", "last", "axis", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/numpy_utils.py#L216-L256
train
quantopian/zipline
zipline/utils/numpy_utils.py
rolling_window
def rolling_window(array, length): """ Restride an array of shape (X_0, ... X_N) into an array of shape (length, X_0 - length + 1, ... X_N) where each slice at index i along the first axis is equivalent to result[i] = array[length * i:length * (i + 1)] Parameters ---------- array : np.ndarray The base array. length : int Length of the synthetic first axis to generate. Returns ------- out : np.ndarray Example ------- >>> from numpy import arange >>> a = arange(25).reshape(5, 5) >>> a array([[ 0, 1, 2, 3, 4], [ 5, 6, 7, 8, 9], [10, 11, 12, 13, 14], [15, 16, 17, 18, 19], [20, 21, 22, 23, 24]]) >>> rolling_window(a, 2) array([[[ 0, 1, 2, 3, 4], [ 5, 6, 7, 8, 9]], <BLANKLINE> [[ 5, 6, 7, 8, 9], [10, 11, 12, 13, 14]], <BLANKLINE> [[10, 11, 12, 13, 14], [15, 16, 17, 18, 19]], <BLANKLINE> [[15, 16, 17, 18, 19], [20, 21, 22, 23, 24]]]) """ orig_shape = array.shape if not orig_shape: raise IndexError("Can't restride a scalar.") elif orig_shape[0] <= length: raise IndexError( "Can't restride array of shape {shape} with" " a window length of {len}".format( shape=orig_shape, len=length, ) ) num_windows = (orig_shape[0] - length + 1) new_shape = (num_windows, length) + orig_shape[1:] new_strides = (array.strides[0],) + array.strides return as_strided(array, new_shape, new_strides)
python
def rolling_window(array, length): """ Restride an array of shape (X_0, ... X_N) into an array of shape (length, X_0 - length + 1, ... X_N) where each slice at index i along the first axis is equivalent to result[i] = array[length * i:length * (i + 1)] Parameters ---------- array : np.ndarray The base array. length : int Length of the synthetic first axis to generate. Returns ------- out : np.ndarray Example ------- >>> from numpy import arange >>> a = arange(25).reshape(5, 5) >>> a array([[ 0, 1, 2, 3, 4], [ 5, 6, 7, 8, 9], [10, 11, 12, 13, 14], [15, 16, 17, 18, 19], [20, 21, 22, 23, 24]]) >>> rolling_window(a, 2) array([[[ 0, 1, 2, 3, 4], [ 5, 6, 7, 8, 9]], <BLANKLINE> [[ 5, 6, 7, 8, 9], [10, 11, 12, 13, 14]], <BLANKLINE> [[10, 11, 12, 13, 14], [15, 16, 17, 18, 19]], <BLANKLINE> [[15, 16, 17, 18, 19], [20, 21, 22, 23, 24]]]) """ orig_shape = array.shape if not orig_shape: raise IndexError("Can't restride a scalar.") elif orig_shape[0] <= length: raise IndexError( "Can't restride array of shape {shape} with" " a window length of {len}".format( shape=orig_shape, len=length, ) ) num_windows = (orig_shape[0] - length + 1) new_shape = (num_windows, length) + orig_shape[1:] new_strides = (array.strides[0],) + array.strides return as_strided(array, new_shape, new_strides)
[ "def", "rolling_window", "(", "array", ",", "length", ")", ":", "orig_shape", "=", "array", ".", "shape", "if", "not", "orig_shape", ":", "raise", "IndexError", "(", "\"Can't restride a scalar.\"", ")", "elif", "orig_shape", "[", "0", "]", "<=", "length", ":", "raise", "IndexError", "(", "\"Can't restride array of shape {shape} with\"", "\" a window length of {len}\"", ".", "format", "(", "shape", "=", "orig_shape", ",", "len", "=", "length", ",", ")", ")", "num_windows", "=", "(", "orig_shape", "[", "0", "]", "-", "length", "+", "1", ")", "new_shape", "=", "(", "num_windows", ",", "length", ")", "+", "orig_shape", "[", "1", ":", "]", "new_strides", "=", "(", "array", ".", "strides", "[", "0", "]", ",", ")", "+", "array", ".", "strides", "return", "as_strided", "(", "array", ",", "new_shape", ",", "new_strides", ")" ]
Restride an array of shape (X_0, ... X_N) into an array of shape (length, X_0 - length + 1, ... X_N) where each slice at index i along the first axis is equivalent to result[i] = array[length * i:length * (i + 1)] Parameters ---------- array : np.ndarray The base array. length : int Length of the synthetic first axis to generate. Returns ------- out : np.ndarray Example ------- >>> from numpy import arange >>> a = arange(25).reshape(5, 5) >>> a array([[ 0, 1, 2, 3, 4], [ 5, 6, 7, 8, 9], [10, 11, 12, 13, 14], [15, 16, 17, 18, 19], [20, 21, 22, 23, 24]]) >>> rolling_window(a, 2) array([[[ 0, 1, 2, 3, 4], [ 5, 6, 7, 8, 9]], <BLANKLINE> [[ 5, 6, 7, 8, 9], [10, 11, 12, 13, 14]], <BLANKLINE> [[10, 11, 12, 13, 14], [15, 16, 17, 18, 19]], <BLANKLINE> [[15, 16, 17, 18, 19], [20, 21, 22, 23, 24]]])
[ "Restride", "an", "array", "of", "shape" ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/numpy_utils.py#L259-L325
train
quantopian/zipline
zipline/utils/numpy_utils.py
isnat
def isnat(obj): """ Check if a value is np.NaT. """ if obj.dtype.kind not in ('m', 'M'): raise ValueError("%s is not a numpy datetime or timedelta") return obj.view(int64_dtype) == iNaT
python
def isnat(obj): """ Check if a value is np.NaT. """ if obj.dtype.kind not in ('m', 'M'): raise ValueError("%s is not a numpy datetime or timedelta") return obj.view(int64_dtype) == iNaT
[ "def", "isnat", "(", "obj", ")", ":", "if", "obj", ".", "dtype", ".", "kind", "not", "in", "(", "'m'", ",", "'M'", ")", ":", "raise", "ValueError", "(", "\"%s is not a numpy datetime or timedelta\"", ")", "return", "obj", ".", "view", "(", "int64_dtype", ")", "==", "iNaT" ]
Check if a value is np.NaT.
[ "Check", "if", "a", "value", "is", "np", ".", "NaT", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/numpy_utils.py#L334-L340
train
quantopian/zipline
zipline/utils/numpy_utils.py
is_missing
def is_missing(data, missing_value): """ Generic is_missing function that handles NaN and NaT. """ if is_float(data) and isnan(missing_value): return isnan(data) elif is_datetime(data) and isnat(missing_value): return isnat(data) return (data == missing_value)
python
def is_missing(data, missing_value): """ Generic is_missing function that handles NaN and NaT. """ if is_float(data) and isnan(missing_value): return isnan(data) elif is_datetime(data) and isnat(missing_value): return isnat(data) return (data == missing_value)
[ "def", "is_missing", "(", "data", ",", "missing_value", ")", ":", "if", "is_float", "(", "data", ")", "and", "isnan", "(", "missing_value", ")", ":", "return", "isnan", "(", "data", ")", "elif", "is_datetime", "(", "data", ")", "and", "isnat", "(", "missing_value", ")", ":", "return", "isnat", "(", "data", ")", "return", "(", "data", "==", "missing_value", ")" ]
Generic is_missing function that handles NaN and NaT.
[ "Generic", "is_missing", "function", "that", "handles", "NaN", "and", "NaT", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/numpy_utils.py#L343-L351
train
quantopian/zipline
zipline/utils/numpy_utils.py
busday_count_mask_NaT
def busday_count_mask_NaT(begindates, enddates, out=None): """ Simple of numpy.busday_count that returns `float` arrays rather than int arrays, and handles `NaT`s by returning `NaN`s where the inputs were `NaT`. Doesn't support custom weekdays or calendars, but probably should in the future. See Also -------- np.busday_count """ if out is None: out = empty(broadcast(begindates, enddates).shape, dtype=float) beginmask = isnat(begindates) endmask = isnat(enddates) out = busday_count( # Temporarily fill in non-NaT values. where(beginmask, _notNaT, begindates), where(endmask, _notNaT, enddates), out=out, ) # Fill in entries where either comparison was NaT with nan in the output. out[beginmask | endmask] = nan return out
python
def busday_count_mask_NaT(begindates, enddates, out=None): """ Simple of numpy.busday_count that returns `float` arrays rather than int arrays, and handles `NaT`s by returning `NaN`s where the inputs were `NaT`. Doesn't support custom weekdays or calendars, but probably should in the future. See Also -------- np.busday_count """ if out is None: out = empty(broadcast(begindates, enddates).shape, dtype=float) beginmask = isnat(begindates) endmask = isnat(enddates) out = busday_count( # Temporarily fill in non-NaT values. where(beginmask, _notNaT, begindates), where(endmask, _notNaT, enddates), out=out, ) # Fill in entries where either comparison was NaT with nan in the output. out[beginmask | endmask] = nan return out
[ "def", "busday_count_mask_NaT", "(", "begindates", ",", "enddates", ",", "out", "=", "None", ")", ":", "if", "out", "is", "None", ":", "out", "=", "empty", "(", "broadcast", "(", "begindates", ",", "enddates", ")", ".", "shape", ",", "dtype", "=", "float", ")", "beginmask", "=", "isnat", "(", "begindates", ")", "endmask", "=", "isnat", "(", "enddates", ")", "out", "=", "busday_count", "(", "# Temporarily fill in non-NaT values.", "where", "(", "beginmask", ",", "_notNaT", ",", "begindates", ")", ",", "where", "(", "endmask", ",", "_notNaT", ",", "enddates", ")", ",", "out", "=", "out", ",", ")", "# Fill in entries where either comparison was NaT with nan in the output.", "out", "[", "beginmask", "|", "endmask", "]", "=", "nan", "return", "out" ]
Simple of numpy.busday_count that returns `float` arrays rather than int arrays, and handles `NaT`s by returning `NaN`s where the inputs were `NaT`. Doesn't support custom weekdays or calendars, but probably should in the future. See Also -------- np.busday_count
[ "Simple", "of", "numpy", ".", "busday_count", "that", "returns", "float", "arrays", "rather", "than", "int", "arrays", "and", "handles", "NaT", "s", "by", "returning", "NaN", "s", "where", "the", "inputs", "were", "NaT", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/numpy_utils.py#L354-L381
train
quantopian/zipline
zipline/utils/numpy_utils.py
changed_locations
def changed_locations(a, include_first): """ Compute indices of values in ``a`` that differ from the previous value. Parameters ---------- a : np.ndarray The array on which to indices of change. include_first : bool Whether or not to consider the first index of the array as "changed". Example ------- >>> import numpy as np >>> changed_locations(np.array([0, 0, 5, 5, 1, 1]), include_first=False) array([2, 4]) >>> changed_locations(np.array([0, 0, 5, 5, 1, 1]), include_first=True) array([0, 2, 4]) """ if a.ndim > 1: raise ValueError("indices_of_changed_values only supports 1D arrays.") indices = flatnonzero(diff(a)) + 1 if not include_first: return indices return hstack([[0], indices])
python
def changed_locations(a, include_first): """ Compute indices of values in ``a`` that differ from the previous value. Parameters ---------- a : np.ndarray The array on which to indices of change. include_first : bool Whether or not to consider the first index of the array as "changed". Example ------- >>> import numpy as np >>> changed_locations(np.array([0, 0, 5, 5, 1, 1]), include_first=False) array([2, 4]) >>> changed_locations(np.array([0, 0, 5, 5, 1, 1]), include_first=True) array([0, 2, 4]) """ if a.ndim > 1: raise ValueError("indices_of_changed_values only supports 1D arrays.") indices = flatnonzero(diff(a)) + 1 if not include_first: return indices return hstack([[0], indices])
[ "def", "changed_locations", "(", "a", ",", "include_first", ")", ":", "if", "a", ".", "ndim", ">", "1", ":", "raise", "ValueError", "(", "\"indices_of_changed_values only supports 1D arrays.\"", ")", "indices", "=", "flatnonzero", "(", "diff", "(", "a", ")", ")", "+", "1", "if", "not", "include_first", ":", "return", "indices", "return", "hstack", "(", "[", "[", "0", "]", ",", "indices", "]", ")" ]
Compute indices of values in ``a`` that differ from the previous value. Parameters ---------- a : np.ndarray The array on which to indices of change. include_first : bool Whether or not to consider the first index of the array as "changed". Example ------- >>> import numpy as np >>> changed_locations(np.array([0, 0, 5, 5, 1, 1]), include_first=False) array([2, 4]) >>> changed_locations(np.array([0, 0, 5, 5, 1, 1]), include_first=True) array([0, 2, 4])
[ "Compute", "indices", "of", "values", "in", "a", "that", "differ", "from", "the", "previous", "value", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/numpy_utils.py#L469-L496
train
quantopian/zipline
zipline/utils/date_utils.py
compute_date_range_chunks
def compute_date_range_chunks(sessions, start_date, end_date, chunksize): """Compute the start and end dates to run a pipeline for. Parameters ---------- sessions : DatetimeIndex The available dates. start_date : pd.Timestamp The first date in the pipeline. end_date : pd.Timestamp The last date in the pipeline. chunksize : int or None The size of the chunks to run. Setting this to None returns one chunk. Returns ------- ranges : iterable[(np.datetime64, np.datetime64)] A sequence of start and end dates to run the pipeline for. """ if start_date not in sessions: raise KeyError("Start date %s is not found in calendar." % (start_date.strftime("%Y-%m-%d"),)) if end_date not in sessions: raise KeyError("End date %s is not found in calendar." % (end_date.strftime("%Y-%m-%d"),)) if end_date < start_date: raise ValueError("End date %s cannot precede start date %s." % (end_date.strftime("%Y-%m-%d"), start_date.strftime("%Y-%m-%d"))) if chunksize is None: return [(start_date, end_date)] start_ix, end_ix = sessions.slice_locs(start_date, end_date) return ( (r[0], r[-1]) for r in partition_all( chunksize, sessions[start_ix:end_ix] ) )
python
def compute_date_range_chunks(sessions, start_date, end_date, chunksize): """Compute the start and end dates to run a pipeline for. Parameters ---------- sessions : DatetimeIndex The available dates. start_date : pd.Timestamp The first date in the pipeline. end_date : pd.Timestamp The last date in the pipeline. chunksize : int or None The size of the chunks to run. Setting this to None returns one chunk. Returns ------- ranges : iterable[(np.datetime64, np.datetime64)] A sequence of start and end dates to run the pipeline for. """ if start_date not in sessions: raise KeyError("Start date %s is not found in calendar." % (start_date.strftime("%Y-%m-%d"),)) if end_date not in sessions: raise KeyError("End date %s is not found in calendar." % (end_date.strftime("%Y-%m-%d"),)) if end_date < start_date: raise ValueError("End date %s cannot precede start date %s." % (end_date.strftime("%Y-%m-%d"), start_date.strftime("%Y-%m-%d"))) if chunksize is None: return [(start_date, end_date)] start_ix, end_ix = sessions.slice_locs(start_date, end_date) return ( (r[0], r[-1]) for r in partition_all( chunksize, sessions[start_ix:end_ix] ) )
[ "def", "compute_date_range_chunks", "(", "sessions", ",", "start_date", ",", "end_date", ",", "chunksize", ")", ":", "if", "start_date", "not", "in", "sessions", ":", "raise", "KeyError", "(", "\"Start date %s is not found in calendar.\"", "%", "(", "start_date", ".", "strftime", "(", "\"%Y-%m-%d\"", ")", ",", ")", ")", "if", "end_date", "not", "in", "sessions", ":", "raise", "KeyError", "(", "\"End date %s is not found in calendar.\"", "%", "(", "end_date", ".", "strftime", "(", "\"%Y-%m-%d\"", ")", ",", ")", ")", "if", "end_date", "<", "start_date", ":", "raise", "ValueError", "(", "\"End date %s cannot precede start date %s.\"", "%", "(", "end_date", ".", "strftime", "(", "\"%Y-%m-%d\"", ")", ",", "start_date", ".", "strftime", "(", "\"%Y-%m-%d\"", ")", ")", ")", "if", "chunksize", "is", "None", ":", "return", "[", "(", "start_date", ",", "end_date", ")", "]", "start_ix", ",", "end_ix", "=", "sessions", ".", "slice_locs", "(", "start_date", ",", "end_date", ")", "return", "(", "(", "r", "[", "0", "]", ",", "r", "[", "-", "1", "]", ")", "for", "r", "in", "partition_all", "(", "chunksize", ",", "sessions", "[", "start_ix", ":", "end_ix", "]", ")", ")" ]
Compute the start and end dates to run a pipeline for. Parameters ---------- sessions : DatetimeIndex The available dates. start_date : pd.Timestamp The first date in the pipeline. end_date : pd.Timestamp The last date in the pipeline. chunksize : int or None The size of the chunks to run. Setting this to None returns one chunk. Returns ------- ranges : iterable[(np.datetime64, np.datetime64)] A sequence of start and end dates to run the pipeline for.
[ "Compute", "the", "start", "and", "end", "dates", "to", "run", "a", "pipeline", "for", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/date_utils.py#L4-L42
train
quantopian/zipline
zipline/pipeline/engine.py
SimplePipelineEngine.run_pipeline
def run_pipeline(self, pipeline, start_date, end_date): """ Compute a pipeline. Parameters ---------- pipeline : zipline.pipeline.Pipeline The pipeline to run. start_date : pd.Timestamp Start date of the computed matrix. end_date : pd.Timestamp End date of the computed matrix. Returns ------- result : pd.DataFrame A frame of computed results. The ``result`` columns correspond to the entries of `pipeline.columns`, which should be a dictionary mapping strings to instances of :class:`zipline.pipeline.term.Term`. For each date between ``start_date`` and ``end_date``, ``result`` will contain a row for each asset that passed `pipeline.screen`. A screen of ``None`` indicates that a row should be returned for each asset that existed each day. See Also -------- :meth:`zipline.pipeline.engine.PipelineEngine.run_pipeline` :meth:`zipline.pipeline.engine.PipelineEngine.run_chunked_pipeline` """ # See notes at the top of this module for a description of the # algorithm implemented here. if end_date < start_date: raise ValueError( "start_date must be before or equal to end_date \n" "start_date=%s, end_date=%s" % (start_date, end_date) ) domain = self.resolve_domain(pipeline) graph = pipeline.to_execution_plan( domain, self._root_mask_term, start_date, end_date, ) extra_rows = graph.extra_rows[self._root_mask_term] root_mask = self._compute_root_mask( domain, start_date, end_date, extra_rows, ) dates, assets, root_mask_values = explode(root_mask) initial_workspace = self._populate_initial_workspace( { self._root_mask_term: root_mask_values, self._root_mask_dates_term: as_column(dates.values) }, self._root_mask_term, graph, dates, assets, ) results = self.compute_chunk(graph, dates, assets, initial_workspace) return self._to_narrow( graph.outputs, results, results.pop(graph.screen_name), dates[extra_rows:], assets, )
python
def run_pipeline(self, pipeline, start_date, end_date): """ Compute a pipeline. Parameters ---------- pipeline : zipline.pipeline.Pipeline The pipeline to run. start_date : pd.Timestamp Start date of the computed matrix. end_date : pd.Timestamp End date of the computed matrix. Returns ------- result : pd.DataFrame A frame of computed results. The ``result`` columns correspond to the entries of `pipeline.columns`, which should be a dictionary mapping strings to instances of :class:`zipline.pipeline.term.Term`. For each date between ``start_date`` and ``end_date``, ``result`` will contain a row for each asset that passed `pipeline.screen`. A screen of ``None`` indicates that a row should be returned for each asset that existed each day. See Also -------- :meth:`zipline.pipeline.engine.PipelineEngine.run_pipeline` :meth:`zipline.pipeline.engine.PipelineEngine.run_chunked_pipeline` """ # See notes at the top of this module for a description of the # algorithm implemented here. if end_date < start_date: raise ValueError( "start_date must be before or equal to end_date \n" "start_date=%s, end_date=%s" % (start_date, end_date) ) domain = self.resolve_domain(pipeline) graph = pipeline.to_execution_plan( domain, self._root_mask_term, start_date, end_date, ) extra_rows = graph.extra_rows[self._root_mask_term] root_mask = self._compute_root_mask( domain, start_date, end_date, extra_rows, ) dates, assets, root_mask_values = explode(root_mask) initial_workspace = self._populate_initial_workspace( { self._root_mask_term: root_mask_values, self._root_mask_dates_term: as_column(dates.values) }, self._root_mask_term, graph, dates, assets, ) results = self.compute_chunk(graph, dates, assets, initial_workspace) return self._to_narrow( graph.outputs, results, results.pop(graph.screen_name), dates[extra_rows:], assets, )
[ "def", "run_pipeline", "(", "self", ",", "pipeline", ",", "start_date", ",", "end_date", ")", ":", "# See notes at the top of this module for a description of the", "# algorithm implemented here.", "if", "end_date", "<", "start_date", ":", "raise", "ValueError", "(", "\"start_date must be before or equal to end_date \\n\"", "\"start_date=%s, end_date=%s\"", "%", "(", "start_date", ",", "end_date", ")", ")", "domain", "=", "self", ".", "resolve_domain", "(", "pipeline", ")", "graph", "=", "pipeline", ".", "to_execution_plan", "(", "domain", ",", "self", ".", "_root_mask_term", ",", "start_date", ",", "end_date", ",", ")", "extra_rows", "=", "graph", ".", "extra_rows", "[", "self", ".", "_root_mask_term", "]", "root_mask", "=", "self", ".", "_compute_root_mask", "(", "domain", ",", "start_date", ",", "end_date", ",", "extra_rows", ",", ")", "dates", ",", "assets", ",", "root_mask_values", "=", "explode", "(", "root_mask", ")", "initial_workspace", "=", "self", ".", "_populate_initial_workspace", "(", "{", "self", ".", "_root_mask_term", ":", "root_mask_values", ",", "self", ".", "_root_mask_dates_term", ":", "as_column", "(", "dates", ".", "values", ")", "}", ",", "self", ".", "_root_mask_term", ",", "graph", ",", "dates", ",", "assets", ",", ")", "results", "=", "self", ".", "compute_chunk", "(", "graph", ",", "dates", ",", "assets", ",", "initial_workspace", ")", "return", "self", ".", "_to_narrow", "(", "graph", ".", "outputs", ",", "results", ",", "results", ".", "pop", "(", "graph", ".", "screen_name", ")", ",", "dates", "[", "extra_rows", ":", "]", ",", "assets", ",", ")" ]
Compute a pipeline. Parameters ---------- pipeline : zipline.pipeline.Pipeline The pipeline to run. start_date : pd.Timestamp Start date of the computed matrix. end_date : pd.Timestamp End date of the computed matrix. Returns ------- result : pd.DataFrame A frame of computed results. The ``result`` columns correspond to the entries of `pipeline.columns`, which should be a dictionary mapping strings to instances of :class:`zipline.pipeline.term.Term`. For each date between ``start_date`` and ``end_date``, ``result`` will contain a row for each asset that passed `pipeline.screen`. A screen of ``None`` indicates that a row should be returned for each asset that existed each day. See Also -------- :meth:`zipline.pipeline.engine.PipelineEngine.run_pipeline` :meth:`zipline.pipeline.engine.PipelineEngine.run_chunked_pipeline`
[ "Compute", "a", "pipeline", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/engine.py#L265-L336
train
quantopian/zipline
zipline/pipeline/engine.py
SimplePipelineEngine._compute_root_mask
def _compute_root_mask(self, domain, start_date, end_date, extra_rows): """ Compute a lifetimes matrix from our AssetFinder, then drop columns that didn't exist at all during the query dates. Parameters ---------- domain : zipline.pipeline.domain.Domain Domain for which we're computing a pipeline. start_date : pd.Timestamp Base start date for the matrix. end_date : pd.Timestamp End date for the matrix. extra_rows : int Number of extra rows to compute before `start_date`. Extra rows are needed by terms like moving averages that require a trailing window of data. Returns ------- lifetimes : pd.DataFrame Frame of dtype `bool` containing dates from `extra_rows` days before `start_date`, continuing through to `end_date`. The returned frame contains as columns all assets in our AssetFinder that existed for at least one day between `start_date` and `end_date`. """ sessions = domain.all_sessions() if start_date not in sessions: raise ValueError( "Pipeline start date ({}) is not a trading session for " "domain {}.".format(start_date, domain) ) elif end_date not in sessions: raise ValueError( "Pipeline end date {} is not a trading session for " "domain {}.".format(end_date, domain) ) start_idx, end_idx = sessions.slice_locs(start_date, end_date) if start_idx < extra_rows: raise NoFurtherDataError.from_lookback_window( initial_message="Insufficient data to compute Pipeline:", first_date=sessions[0], lookback_start=start_date, lookback_length=extra_rows, ) # NOTE: This logic should probably be delegated to the domain once we # start adding more complex domains. # # Build lifetimes matrix reaching back to `extra_rows` days before # `start_date.` finder = self._finder lifetimes = finder.lifetimes( sessions[start_idx - extra_rows:end_idx], include_start_date=False, country_codes=(domain.country_code,), ) if not lifetimes.columns.unique: columns = lifetimes.columns duplicated = columns[columns.duplicated()].unique() raise AssertionError("Duplicated sids: %d" % duplicated) # Filter out columns that didn't exist from the farthest look back # window through the end of the requested dates. existed = lifetimes.any() ret = lifetimes.loc[:, existed] num_assets = ret.shape[1] if num_assets == 0: raise ValueError( "Failed to find any assets with country_code {!r} that traded " "between {} and {}.\n" "This probably means that your asset db is old or that it has " "incorrect country/exchange metadata.".format( domain.country_code, start_date, end_date, ) ) return ret
python
def _compute_root_mask(self, domain, start_date, end_date, extra_rows): """ Compute a lifetimes matrix from our AssetFinder, then drop columns that didn't exist at all during the query dates. Parameters ---------- domain : zipline.pipeline.domain.Domain Domain for which we're computing a pipeline. start_date : pd.Timestamp Base start date for the matrix. end_date : pd.Timestamp End date for the matrix. extra_rows : int Number of extra rows to compute before `start_date`. Extra rows are needed by terms like moving averages that require a trailing window of data. Returns ------- lifetimes : pd.DataFrame Frame of dtype `bool` containing dates from `extra_rows` days before `start_date`, continuing through to `end_date`. The returned frame contains as columns all assets in our AssetFinder that existed for at least one day between `start_date` and `end_date`. """ sessions = domain.all_sessions() if start_date not in sessions: raise ValueError( "Pipeline start date ({}) is not a trading session for " "domain {}.".format(start_date, domain) ) elif end_date not in sessions: raise ValueError( "Pipeline end date {} is not a trading session for " "domain {}.".format(end_date, domain) ) start_idx, end_idx = sessions.slice_locs(start_date, end_date) if start_idx < extra_rows: raise NoFurtherDataError.from_lookback_window( initial_message="Insufficient data to compute Pipeline:", first_date=sessions[0], lookback_start=start_date, lookback_length=extra_rows, ) # NOTE: This logic should probably be delegated to the domain once we # start adding more complex domains. # # Build lifetimes matrix reaching back to `extra_rows` days before # `start_date.` finder = self._finder lifetimes = finder.lifetimes( sessions[start_idx - extra_rows:end_idx], include_start_date=False, country_codes=(domain.country_code,), ) if not lifetimes.columns.unique: columns = lifetimes.columns duplicated = columns[columns.duplicated()].unique() raise AssertionError("Duplicated sids: %d" % duplicated) # Filter out columns that didn't exist from the farthest look back # window through the end of the requested dates. existed = lifetimes.any() ret = lifetimes.loc[:, existed] num_assets = ret.shape[1] if num_assets == 0: raise ValueError( "Failed to find any assets with country_code {!r} that traded " "between {} and {}.\n" "This probably means that your asset db is old or that it has " "incorrect country/exchange metadata.".format( domain.country_code, start_date, end_date, ) ) return ret
[ "def", "_compute_root_mask", "(", "self", ",", "domain", ",", "start_date", ",", "end_date", ",", "extra_rows", ")", ":", "sessions", "=", "domain", ".", "all_sessions", "(", ")", "if", "start_date", "not", "in", "sessions", ":", "raise", "ValueError", "(", "\"Pipeline start date ({}) is not a trading session for \"", "\"domain {}.\"", ".", "format", "(", "start_date", ",", "domain", ")", ")", "elif", "end_date", "not", "in", "sessions", ":", "raise", "ValueError", "(", "\"Pipeline end date {} is not a trading session for \"", "\"domain {}.\"", ".", "format", "(", "end_date", ",", "domain", ")", ")", "start_idx", ",", "end_idx", "=", "sessions", ".", "slice_locs", "(", "start_date", ",", "end_date", ")", "if", "start_idx", "<", "extra_rows", ":", "raise", "NoFurtherDataError", ".", "from_lookback_window", "(", "initial_message", "=", "\"Insufficient data to compute Pipeline:\"", ",", "first_date", "=", "sessions", "[", "0", "]", ",", "lookback_start", "=", "start_date", ",", "lookback_length", "=", "extra_rows", ",", ")", "# NOTE: This logic should probably be delegated to the domain once we", "# start adding more complex domains.", "#", "# Build lifetimes matrix reaching back to `extra_rows` days before", "# `start_date.`", "finder", "=", "self", ".", "_finder", "lifetimes", "=", "finder", ".", "lifetimes", "(", "sessions", "[", "start_idx", "-", "extra_rows", ":", "end_idx", "]", ",", "include_start_date", "=", "False", ",", "country_codes", "=", "(", "domain", ".", "country_code", ",", ")", ",", ")", "if", "not", "lifetimes", ".", "columns", ".", "unique", ":", "columns", "=", "lifetimes", ".", "columns", "duplicated", "=", "columns", "[", "columns", ".", "duplicated", "(", ")", "]", ".", "unique", "(", ")", "raise", "AssertionError", "(", "\"Duplicated sids: %d\"", "%", "duplicated", ")", "# Filter out columns that didn't exist from the farthest look back", "# window through the end of the requested dates.", "existed", "=", "lifetimes", ".", "any", "(", ")", "ret", "=", "lifetimes", ".", "loc", "[", ":", ",", "existed", "]", "num_assets", "=", "ret", ".", "shape", "[", "1", "]", "if", "num_assets", "==", "0", ":", "raise", "ValueError", "(", "\"Failed to find any assets with country_code {!r} that traded \"", "\"between {} and {}.\\n\"", "\"This probably means that your asset db is old or that it has \"", "\"incorrect country/exchange metadata.\"", ".", "format", "(", "domain", ".", "country_code", ",", "start_date", ",", "end_date", ",", ")", ")", "return", "ret" ]
Compute a lifetimes matrix from our AssetFinder, then drop columns that didn't exist at all during the query dates. Parameters ---------- domain : zipline.pipeline.domain.Domain Domain for which we're computing a pipeline. start_date : pd.Timestamp Base start date for the matrix. end_date : pd.Timestamp End date for the matrix. extra_rows : int Number of extra rows to compute before `start_date`. Extra rows are needed by terms like moving averages that require a trailing window of data. Returns ------- lifetimes : pd.DataFrame Frame of dtype `bool` containing dates from `extra_rows` days before `start_date`, continuing through to `end_date`. The returned frame contains as columns all assets in our AssetFinder that existed for at least one day between `start_date` and `end_date`.
[ "Compute", "a", "lifetimes", "matrix", "from", "our", "AssetFinder", "then", "drop", "columns", "that", "didn", "t", "exist", "at", "all", "during", "the", "query", "dates", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/engine.py#L356-L439
train
quantopian/zipline
zipline/pipeline/engine.py
SimplePipelineEngine.compute_chunk
def compute_chunk(self, graph, dates, sids, initial_workspace): """ Compute the Pipeline terms in the graph for the requested start and end dates. This is where we do the actual work of running a pipeline. Parameters ---------- graph : zipline.pipeline.graph.ExecutionPlan Dependency graph of the terms to be executed. dates : pd.DatetimeIndex Row labels for our root mask. assets : pd.Int64Index Column labels for our root mask. initial_workspace : dict Map from term -> output. Must contain at least entry for `self._root_mask_term` whose shape is `(len(dates), len(assets))`, but may contain additional pre-computed terms for testing or optimization purposes. Returns ------- results : dict Dictionary mapping requested results to outputs. """ self._validate_compute_chunk_params( graph, dates, sids, initial_workspace, ) get_loader = self._get_loader # Copy the supplied initial workspace so we don't mutate it in place. workspace = initial_workspace.copy() refcounts = graph.initial_refcounts(workspace) execution_order = graph.execution_order(refcounts) domain = graph.domain # Many loaders can fetch data more efficiently if we ask them to # retrieve all their inputs at once. For example, a loader backed by a # SQL database can fetch multiple columns from the database in a single # query. # # To enable these loaders to fetch their data efficiently, we group # together requests for LoadableTerms if they are provided by the same # loader and they require the same number of extra rows. # # The extra rows condition is a simplification: we don't currently have # a mechanism for asking a loader to fetch different windows of data # for different terms, so we only batch requests together when they're # going to produce data for the same set of dates. That may change in # the future if we find a loader that can still benefit significantly # from batching unequal-length requests. def loader_group_key(term): loader = get_loader(term) extra_rows = graph.extra_rows[term] return loader, extra_rows # Only produce loader groups for the terms we expect to load. This # ensures that we can run pipelines for graphs where we don't have a # loader registered for an atomic term if all the dependencies of that # term were supplied in the initial workspace. will_be_loaded = graph.loadable_terms - viewkeys(workspace) loader_groups = groupby( loader_group_key, (t for t in execution_order if t in will_be_loaded), ) for term in graph.execution_order(refcounts): # `term` may have been supplied in `initial_workspace`, and in the # future we may pre-compute loadable terms coming from the same # dataset. In either case, we will already have an entry for this # term, which we shouldn't re-compute. if term in workspace: continue # Asset labels are always the same, but date labels vary by how # many extra rows are needed. mask, mask_dates = graph.mask_and_dates_for_term( term, self._root_mask_term, workspace, dates, ) if isinstance(term, LoadableTerm): loader = get_loader(term) to_load = sorted( loader_groups[loader_group_key(term)], key=lambda t: t.dataset ) loaded = loader.load_adjusted_array( domain, to_load, mask_dates, sids, mask, ) assert set(loaded) == set(to_load), ( 'loader did not return an AdjustedArray for each column\n' 'expected: %r\n' 'got: %r' % (sorted(to_load), sorted(loaded)) ) workspace.update(loaded) else: workspace[term] = term._compute( self._inputs_for_term(term, workspace, graph, domain), mask_dates, sids, mask, ) if term.ndim == 2: assert workspace[term].shape == mask.shape else: assert workspace[term].shape == (mask.shape[0], 1) # Decref dependencies of ``term``, and clear any terms whose # refcounts hit 0. for garbage_term in graph.decref_dependencies(term, refcounts): del workspace[garbage_term] # At this point, all the output terms are in the workspace. out = {} graph_extra_rows = graph.extra_rows for name, term in iteritems(graph.outputs): # Truncate off extra rows from outputs. out[name] = workspace[term][graph_extra_rows[term]:] return out
python
def compute_chunk(self, graph, dates, sids, initial_workspace): """ Compute the Pipeline terms in the graph for the requested start and end dates. This is where we do the actual work of running a pipeline. Parameters ---------- graph : zipline.pipeline.graph.ExecutionPlan Dependency graph of the terms to be executed. dates : pd.DatetimeIndex Row labels for our root mask. assets : pd.Int64Index Column labels for our root mask. initial_workspace : dict Map from term -> output. Must contain at least entry for `self._root_mask_term` whose shape is `(len(dates), len(assets))`, but may contain additional pre-computed terms for testing or optimization purposes. Returns ------- results : dict Dictionary mapping requested results to outputs. """ self._validate_compute_chunk_params( graph, dates, sids, initial_workspace, ) get_loader = self._get_loader # Copy the supplied initial workspace so we don't mutate it in place. workspace = initial_workspace.copy() refcounts = graph.initial_refcounts(workspace) execution_order = graph.execution_order(refcounts) domain = graph.domain # Many loaders can fetch data more efficiently if we ask them to # retrieve all their inputs at once. For example, a loader backed by a # SQL database can fetch multiple columns from the database in a single # query. # # To enable these loaders to fetch their data efficiently, we group # together requests for LoadableTerms if they are provided by the same # loader and they require the same number of extra rows. # # The extra rows condition is a simplification: we don't currently have # a mechanism for asking a loader to fetch different windows of data # for different terms, so we only batch requests together when they're # going to produce data for the same set of dates. That may change in # the future if we find a loader that can still benefit significantly # from batching unequal-length requests. def loader_group_key(term): loader = get_loader(term) extra_rows = graph.extra_rows[term] return loader, extra_rows # Only produce loader groups for the terms we expect to load. This # ensures that we can run pipelines for graphs where we don't have a # loader registered for an atomic term if all the dependencies of that # term were supplied in the initial workspace. will_be_loaded = graph.loadable_terms - viewkeys(workspace) loader_groups = groupby( loader_group_key, (t for t in execution_order if t in will_be_loaded), ) for term in graph.execution_order(refcounts): # `term` may have been supplied in `initial_workspace`, and in the # future we may pre-compute loadable terms coming from the same # dataset. In either case, we will already have an entry for this # term, which we shouldn't re-compute. if term in workspace: continue # Asset labels are always the same, but date labels vary by how # many extra rows are needed. mask, mask_dates = graph.mask_and_dates_for_term( term, self._root_mask_term, workspace, dates, ) if isinstance(term, LoadableTerm): loader = get_loader(term) to_load = sorted( loader_groups[loader_group_key(term)], key=lambda t: t.dataset ) loaded = loader.load_adjusted_array( domain, to_load, mask_dates, sids, mask, ) assert set(loaded) == set(to_load), ( 'loader did not return an AdjustedArray for each column\n' 'expected: %r\n' 'got: %r' % (sorted(to_load), sorted(loaded)) ) workspace.update(loaded) else: workspace[term] = term._compute( self._inputs_for_term(term, workspace, graph, domain), mask_dates, sids, mask, ) if term.ndim == 2: assert workspace[term].shape == mask.shape else: assert workspace[term].shape == (mask.shape[0], 1) # Decref dependencies of ``term``, and clear any terms whose # refcounts hit 0. for garbage_term in graph.decref_dependencies(term, refcounts): del workspace[garbage_term] # At this point, all the output terms are in the workspace. out = {} graph_extra_rows = graph.extra_rows for name, term in iteritems(graph.outputs): # Truncate off extra rows from outputs. out[name] = workspace[term][graph_extra_rows[term]:] return out
[ "def", "compute_chunk", "(", "self", ",", "graph", ",", "dates", ",", "sids", ",", "initial_workspace", ")", ":", "self", ".", "_validate_compute_chunk_params", "(", "graph", ",", "dates", ",", "sids", ",", "initial_workspace", ",", ")", "get_loader", "=", "self", ".", "_get_loader", "# Copy the supplied initial workspace so we don't mutate it in place.", "workspace", "=", "initial_workspace", ".", "copy", "(", ")", "refcounts", "=", "graph", ".", "initial_refcounts", "(", "workspace", ")", "execution_order", "=", "graph", ".", "execution_order", "(", "refcounts", ")", "domain", "=", "graph", ".", "domain", "# Many loaders can fetch data more efficiently if we ask them to", "# retrieve all their inputs at once. For example, a loader backed by a", "# SQL database can fetch multiple columns from the database in a single", "# query.", "#", "# To enable these loaders to fetch their data efficiently, we group", "# together requests for LoadableTerms if they are provided by the same", "# loader and they require the same number of extra rows.", "#", "# The extra rows condition is a simplification: we don't currently have", "# a mechanism for asking a loader to fetch different windows of data", "# for different terms, so we only batch requests together when they're", "# going to produce data for the same set of dates. That may change in", "# the future if we find a loader that can still benefit significantly", "# from batching unequal-length requests.", "def", "loader_group_key", "(", "term", ")", ":", "loader", "=", "get_loader", "(", "term", ")", "extra_rows", "=", "graph", ".", "extra_rows", "[", "term", "]", "return", "loader", ",", "extra_rows", "# Only produce loader groups for the terms we expect to load. This", "# ensures that we can run pipelines for graphs where we don't have a", "# loader registered for an atomic term if all the dependencies of that", "# term were supplied in the initial workspace.", "will_be_loaded", "=", "graph", ".", "loadable_terms", "-", "viewkeys", "(", "workspace", ")", "loader_groups", "=", "groupby", "(", "loader_group_key", ",", "(", "t", "for", "t", "in", "execution_order", "if", "t", "in", "will_be_loaded", ")", ",", ")", "for", "term", "in", "graph", ".", "execution_order", "(", "refcounts", ")", ":", "# `term` may have been supplied in `initial_workspace`, and in the", "# future we may pre-compute loadable terms coming from the same", "# dataset. In either case, we will already have an entry for this", "# term, which we shouldn't re-compute.", "if", "term", "in", "workspace", ":", "continue", "# Asset labels are always the same, but date labels vary by how", "# many extra rows are needed.", "mask", ",", "mask_dates", "=", "graph", ".", "mask_and_dates_for_term", "(", "term", ",", "self", ".", "_root_mask_term", ",", "workspace", ",", "dates", ",", ")", "if", "isinstance", "(", "term", ",", "LoadableTerm", ")", ":", "loader", "=", "get_loader", "(", "term", ")", "to_load", "=", "sorted", "(", "loader_groups", "[", "loader_group_key", "(", "term", ")", "]", ",", "key", "=", "lambda", "t", ":", "t", ".", "dataset", ")", "loaded", "=", "loader", ".", "load_adjusted_array", "(", "domain", ",", "to_load", ",", "mask_dates", ",", "sids", ",", "mask", ",", ")", "assert", "set", "(", "loaded", ")", "==", "set", "(", "to_load", ")", ",", "(", "'loader did not return an AdjustedArray for each column\\n'", "'expected: %r\\n'", "'got: %r'", "%", "(", "sorted", "(", "to_load", ")", ",", "sorted", "(", "loaded", ")", ")", ")", "workspace", ".", "update", "(", "loaded", ")", "else", ":", "workspace", "[", "term", "]", "=", "term", ".", "_compute", "(", "self", ".", "_inputs_for_term", "(", "term", ",", "workspace", ",", "graph", ",", "domain", ")", ",", "mask_dates", ",", "sids", ",", "mask", ",", ")", "if", "term", ".", "ndim", "==", "2", ":", "assert", "workspace", "[", "term", "]", ".", "shape", "==", "mask", ".", "shape", "else", ":", "assert", "workspace", "[", "term", "]", ".", "shape", "==", "(", "mask", ".", "shape", "[", "0", "]", ",", "1", ")", "# Decref dependencies of ``term``, and clear any terms whose", "# refcounts hit 0.", "for", "garbage_term", "in", "graph", ".", "decref_dependencies", "(", "term", ",", "refcounts", ")", ":", "del", "workspace", "[", "garbage_term", "]", "# At this point, all the output terms are in the workspace.", "out", "=", "{", "}", "graph_extra_rows", "=", "graph", ".", "extra_rows", "for", "name", ",", "term", "in", "iteritems", "(", "graph", ".", "outputs", ")", ":", "# Truncate off extra rows from outputs.", "out", "[", "name", "]", "=", "workspace", "[", "term", "]", "[", "graph_extra_rows", "[", "term", "]", ":", "]", "return", "out" ]
Compute the Pipeline terms in the graph for the requested start and end dates. This is where we do the actual work of running a pipeline. Parameters ---------- graph : zipline.pipeline.graph.ExecutionPlan Dependency graph of the terms to be executed. dates : pd.DatetimeIndex Row labels for our root mask. assets : pd.Int64Index Column labels for our root mask. initial_workspace : dict Map from term -> output. Must contain at least entry for `self._root_mask_term` whose shape is `(len(dates), len(assets))`, but may contain additional pre-computed terms for testing or optimization purposes. Returns ------- results : dict Dictionary mapping requested results to outputs.
[ "Compute", "the", "Pipeline", "terms", "in", "the", "graph", "for", "the", "requested", "start", "and", "end", "dates", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/engine.py#L484-L606
train
quantopian/zipline
zipline/pipeline/engine.py
SimplePipelineEngine._to_narrow
def _to_narrow(self, terms, data, mask, dates, assets): """ Convert raw computed pipeline results into a DataFrame for public APIs. Parameters ---------- terms : dict[str -> Term] Dict mapping column names to terms. data : dict[str -> ndarray[ndim=2]] Dict mapping column names to computed results for those names. mask : ndarray[bool, ndim=2] Mask array of values to keep. dates : ndarray[datetime64, ndim=1] Row index for arrays `data` and `mask` assets : ndarray[int64, ndim=2] Column index for arrays `data` and `mask` Returns ------- results : pd.DataFrame The indices of `results` are as follows: index : two-tiered MultiIndex of (date, asset). Contains an entry for each (date, asset) pair corresponding to a `True` value in `mask`. columns : Index of str One column per entry in `data`. If mask[date, asset] is True, then result.loc[(date, asset), colname] will contain the value of data[colname][date, asset]. """ if not mask.any(): # Manually handle the empty DataFrame case. This is a workaround # to pandas failing to tz_localize an empty dataframe with a # MultiIndex. It also saves us the work of applying a known-empty # mask to each array. # # Slicing `dates` here to preserve pandas metadata. empty_dates = dates[:0] empty_assets = array([], dtype=object) return DataFrame( data={ name: array([], dtype=arr.dtype) for name, arr in iteritems(data) }, index=MultiIndex.from_arrays([empty_dates, empty_assets]), ) resolved_assets = array(self._finder.retrieve_all(assets)) dates_kept = repeat_last_axis(dates.values, len(assets))[mask] assets_kept = repeat_first_axis(resolved_assets, len(dates))[mask] final_columns = {} for name in data: # Each term that computed an output has its postprocess method # called on the filtered result. # # As of Mon May 2 15:38:47 2016, we only use this to convert # LabelArrays into categoricals. final_columns[name] = terms[name].postprocess(data[name][mask]) return DataFrame( data=final_columns, index=MultiIndex.from_arrays([dates_kept, assets_kept]), ).tz_localize('UTC', level=0)
python
def _to_narrow(self, terms, data, mask, dates, assets): """ Convert raw computed pipeline results into a DataFrame for public APIs. Parameters ---------- terms : dict[str -> Term] Dict mapping column names to terms. data : dict[str -> ndarray[ndim=2]] Dict mapping column names to computed results for those names. mask : ndarray[bool, ndim=2] Mask array of values to keep. dates : ndarray[datetime64, ndim=1] Row index for arrays `data` and `mask` assets : ndarray[int64, ndim=2] Column index for arrays `data` and `mask` Returns ------- results : pd.DataFrame The indices of `results` are as follows: index : two-tiered MultiIndex of (date, asset). Contains an entry for each (date, asset) pair corresponding to a `True` value in `mask`. columns : Index of str One column per entry in `data`. If mask[date, asset] is True, then result.loc[(date, asset), colname] will contain the value of data[colname][date, asset]. """ if not mask.any(): # Manually handle the empty DataFrame case. This is a workaround # to pandas failing to tz_localize an empty dataframe with a # MultiIndex. It also saves us the work of applying a known-empty # mask to each array. # # Slicing `dates` here to preserve pandas metadata. empty_dates = dates[:0] empty_assets = array([], dtype=object) return DataFrame( data={ name: array([], dtype=arr.dtype) for name, arr in iteritems(data) }, index=MultiIndex.from_arrays([empty_dates, empty_assets]), ) resolved_assets = array(self._finder.retrieve_all(assets)) dates_kept = repeat_last_axis(dates.values, len(assets))[mask] assets_kept = repeat_first_axis(resolved_assets, len(dates))[mask] final_columns = {} for name in data: # Each term that computed an output has its postprocess method # called on the filtered result. # # As of Mon May 2 15:38:47 2016, we only use this to convert # LabelArrays into categoricals. final_columns[name] = terms[name].postprocess(data[name][mask]) return DataFrame( data=final_columns, index=MultiIndex.from_arrays([dates_kept, assets_kept]), ).tz_localize('UTC', level=0)
[ "def", "_to_narrow", "(", "self", ",", "terms", ",", "data", ",", "mask", ",", "dates", ",", "assets", ")", ":", "if", "not", "mask", ".", "any", "(", ")", ":", "# Manually handle the empty DataFrame case. This is a workaround", "# to pandas failing to tz_localize an empty dataframe with a", "# MultiIndex. It also saves us the work of applying a known-empty", "# mask to each array.", "#", "# Slicing `dates` here to preserve pandas metadata.", "empty_dates", "=", "dates", "[", ":", "0", "]", "empty_assets", "=", "array", "(", "[", "]", ",", "dtype", "=", "object", ")", "return", "DataFrame", "(", "data", "=", "{", "name", ":", "array", "(", "[", "]", ",", "dtype", "=", "arr", ".", "dtype", ")", "for", "name", ",", "arr", "in", "iteritems", "(", "data", ")", "}", ",", "index", "=", "MultiIndex", ".", "from_arrays", "(", "[", "empty_dates", ",", "empty_assets", "]", ")", ",", ")", "resolved_assets", "=", "array", "(", "self", ".", "_finder", ".", "retrieve_all", "(", "assets", ")", ")", "dates_kept", "=", "repeat_last_axis", "(", "dates", ".", "values", ",", "len", "(", "assets", ")", ")", "[", "mask", "]", "assets_kept", "=", "repeat_first_axis", "(", "resolved_assets", ",", "len", "(", "dates", ")", ")", "[", "mask", "]", "final_columns", "=", "{", "}", "for", "name", "in", "data", ":", "# Each term that computed an output has its postprocess method", "# called on the filtered result.", "#", "# As of Mon May 2 15:38:47 2016, we only use this to convert", "# LabelArrays into categoricals.", "final_columns", "[", "name", "]", "=", "terms", "[", "name", "]", ".", "postprocess", "(", "data", "[", "name", "]", "[", "mask", "]", ")", "return", "DataFrame", "(", "data", "=", "final_columns", ",", "index", "=", "MultiIndex", ".", "from_arrays", "(", "[", "dates_kept", ",", "assets_kept", "]", ")", ",", ")", ".", "tz_localize", "(", "'UTC'", ",", "level", "=", "0", ")" ]
Convert raw computed pipeline results into a DataFrame for public APIs. Parameters ---------- terms : dict[str -> Term] Dict mapping column names to terms. data : dict[str -> ndarray[ndim=2]] Dict mapping column names to computed results for those names. mask : ndarray[bool, ndim=2] Mask array of values to keep. dates : ndarray[datetime64, ndim=1] Row index for arrays `data` and `mask` assets : ndarray[int64, ndim=2] Column index for arrays `data` and `mask` Returns ------- results : pd.DataFrame The indices of `results` are as follows: index : two-tiered MultiIndex of (date, asset). Contains an entry for each (date, asset) pair corresponding to a `True` value in `mask`. columns : Index of str One column per entry in `data`. If mask[date, asset] is True, then result.loc[(date, asset), colname] will contain the value of data[colname][date, asset].
[ "Convert", "raw", "computed", "pipeline", "results", "into", "a", "DataFrame", "for", "public", "APIs", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/engine.py#L608-L672
train
quantopian/zipline
zipline/pipeline/engine.py
SimplePipelineEngine._validate_compute_chunk_params
def _validate_compute_chunk_params(self, graph, dates, sids, initial_workspace): """ Verify that the values passed to compute_chunk are well-formed. """ root = self._root_mask_term clsname = type(self).__name__ # Writing this out explicitly so this errors in testing if we change # the name without updating this line. compute_chunk_name = self.compute_chunk.__name__ if root not in initial_workspace: raise AssertionError( "root_mask values not supplied to {cls}.{method}".format( cls=clsname, method=compute_chunk_name, ) ) shape = initial_workspace[root].shape implied_shape = len(dates), len(sids) if shape != implied_shape: raise AssertionError( "root_mask shape is {shape}, but received dates/assets " "imply that shape should be {implied}".format( shape=shape, implied=implied_shape, ) ) for term in initial_workspace: if self._is_special_root_term(term): continue if term.domain is GENERIC: # XXX: We really shouldn't allow **any** generic terms to be # populated in the initial workspace. A generic term, by # definition, can't correspond to concrete data until it's # paired with a domain, and populate_initial_workspace isn't # given the domain of execution, so it can't possibly know what # data to use when populating a generic term. # # In our current implementation, however, we don't have a good # way to represent specializations of ComputableTerms that take # only generic inputs, so there's no good way for the initial # workspace to provide data for such terms except by populating # the generic ComputableTerm. # # The right fix for the above is to implement "full # specialization", i.e., implementing ``specialize`` uniformly # across all terms, not just LoadableTerms. Having full # specialization will also remove the need for all of the # remaining ``maybe_specialize`` calls floating around in this # file. # # In the meantime, disallowing ComputableTerms in the initial # workspace would break almost every test in # `test_filter`/`test_factor`/`test_classifier`, and fixing # them would require updating all those tests to compute with # more specialized terms. Once we have full specialization, we # can fix all the tests without a large volume of edits by # simply specializing their workspaces, so for now I'm leaving # this in place as a somewhat sharp edge. if isinstance(term, LoadableTerm): raise ValueError( "Loadable workspace terms must be specialized to a " "domain, but got generic term {}".format(term) ) elif term.domain != graph.domain: raise ValueError( "Initial workspace term {} has domain {}. " "Does not match pipeline domain {}".format( term, term.domain, graph.domain, ) )
python
def _validate_compute_chunk_params(self, graph, dates, sids, initial_workspace): """ Verify that the values passed to compute_chunk are well-formed. """ root = self._root_mask_term clsname = type(self).__name__ # Writing this out explicitly so this errors in testing if we change # the name without updating this line. compute_chunk_name = self.compute_chunk.__name__ if root not in initial_workspace: raise AssertionError( "root_mask values not supplied to {cls}.{method}".format( cls=clsname, method=compute_chunk_name, ) ) shape = initial_workspace[root].shape implied_shape = len(dates), len(sids) if shape != implied_shape: raise AssertionError( "root_mask shape is {shape}, but received dates/assets " "imply that shape should be {implied}".format( shape=shape, implied=implied_shape, ) ) for term in initial_workspace: if self._is_special_root_term(term): continue if term.domain is GENERIC: # XXX: We really shouldn't allow **any** generic terms to be # populated in the initial workspace. A generic term, by # definition, can't correspond to concrete data until it's # paired with a domain, and populate_initial_workspace isn't # given the domain of execution, so it can't possibly know what # data to use when populating a generic term. # # In our current implementation, however, we don't have a good # way to represent specializations of ComputableTerms that take # only generic inputs, so there's no good way for the initial # workspace to provide data for such terms except by populating # the generic ComputableTerm. # # The right fix for the above is to implement "full # specialization", i.e., implementing ``specialize`` uniformly # across all terms, not just LoadableTerms. Having full # specialization will also remove the need for all of the # remaining ``maybe_specialize`` calls floating around in this # file. # # In the meantime, disallowing ComputableTerms in the initial # workspace would break almost every test in # `test_filter`/`test_factor`/`test_classifier`, and fixing # them would require updating all those tests to compute with # more specialized terms. Once we have full specialization, we # can fix all the tests without a large volume of edits by # simply specializing their workspaces, so for now I'm leaving # this in place as a somewhat sharp edge. if isinstance(term, LoadableTerm): raise ValueError( "Loadable workspace terms must be specialized to a " "domain, but got generic term {}".format(term) ) elif term.domain != graph.domain: raise ValueError( "Initial workspace term {} has domain {}. " "Does not match pipeline domain {}".format( term, term.domain, graph.domain, ) )
[ "def", "_validate_compute_chunk_params", "(", "self", ",", "graph", ",", "dates", ",", "sids", ",", "initial_workspace", ")", ":", "root", "=", "self", ".", "_root_mask_term", "clsname", "=", "type", "(", "self", ")", ".", "__name__", "# Writing this out explicitly so this errors in testing if we change", "# the name without updating this line.", "compute_chunk_name", "=", "self", ".", "compute_chunk", ".", "__name__", "if", "root", "not", "in", "initial_workspace", ":", "raise", "AssertionError", "(", "\"root_mask values not supplied to {cls}.{method}\"", ".", "format", "(", "cls", "=", "clsname", ",", "method", "=", "compute_chunk_name", ",", ")", ")", "shape", "=", "initial_workspace", "[", "root", "]", ".", "shape", "implied_shape", "=", "len", "(", "dates", ")", ",", "len", "(", "sids", ")", "if", "shape", "!=", "implied_shape", ":", "raise", "AssertionError", "(", "\"root_mask shape is {shape}, but received dates/assets \"", "\"imply that shape should be {implied}\"", ".", "format", "(", "shape", "=", "shape", ",", "implied", "=", "implied_shape", ",", ")", ")", "for", "term", "in", "initial_workspace", ":", "if", "self", ".", "_is_special_root_term", "(", "term", ")", ":", "continue", "if", "term", ".", "domain", "is", "GENERIC", ":", "# XXX: We really shouldn't allow **any** generic terms to be", "# populated in the initial workspace. A generic term, by", "# definition, can't correspond to concrete data until it's", "# paired with a domain, and populate_initial_workspace isn't", "# given the domain of execution, so it can't possibly know what", "# data to use when populating a generic term.", "#", "# In our current implementation, however, we don't have a good", "# way to represent specializations of ComputableTerms that take", "# only generic inputs, so there's no good way for the initial", "# workspace to provide data for such terms except by populating", "# the generic ComputableTerm.", "#", "# The right fix for the above is to implement \"full", "# specialization\", i.e., implementing ``specialize`` uniformly", "# across all terms, not just LoadableTerms. Having full", "# specialization will also remove the need for all of the", "# remaining ``maybe_specialize`` calls floating around in this", "# file.", "#", "# In the meantime, disallowing ComputableTerms in the initial", "# workspace would break almost every test in", "# `test_filter`/`test_factor`/`test_classifier`, and fixing", "# them would require updating all those tests to compute with", "# more specialized terms. Once we have full specialization, we", "# can fix all the tests without a large volume of edits by", "# simply specializing their workspaces, so for now I'm leaving", "# this in place as a somewhat sharp edge.", "if", "isinstance", "(", "term", ",", "LoadableTerm", ")", ":", "raise", "ValueError", "(", "\"Loadable workspace terms must be specialized to a \"", "\"domain, but got generic term {}\"", ".", "format", "(", "term", ")", ")", "elif", "term", ".", "domain", "!=", "graph", ".", "domain", ":", "raise", "ValueError", "(", "\"Initial workspace term {} has domain {}. \"", "\"Does not match pipeline domain {}\"", ".", "format", "(", "term", ",", "term", ".", "domain", ",", "graph", ".", "domain", ",", ")", ")" ]
Verify that the values passed to compute_chunk are well-formed.
[ "Verify", "that", "the", "values", "passed", "to", "compute_chunk", "are", "well", "-", "formed", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/engine.py#L674-L752
train
quantopian/zipline
zipline/pipeline/engine.py
SimplePipelineEngine.resolve_domain
def resolve_domain(self, pipeline): """Resolve a concrete domain for ``pipeline``. """ domain = pipeline.domain(default=self._default_domain) if domain is GENERIC: raise ValueError( "Unable to determine domain for Pipeline.\n" "Pass domain=<desired domain> to your Pipeline to set a " "domain." ) return domain
python
def resolve_domain(self, pipeline): """Resolve a concrete domain for ``pipeline``. """ domain = pipeline.domain(default=self._default_domain) if domain is GENERIC: raise ValueError( "Unable to determine domain for Pipeline.\n" "Pass domain=<desired domain> to your Pipeline to set a " "domain." ) return domain
[ "def", "resolve_domain", "(", "self", ",", "pipeline", ")", ":", "domain", "=", "pipeline", ".", "domain", "(", "default", "=", "self", ".", "_default_domain", ")", "if", "domain", "is", "GENERIC", ":", "raise", "ValueError", "(", "\"Unable to determine domain for Pipeline.\\n\"", "\"Pass domain=<desired domain> to your Pipeline to set a \"", "\"domain.\"", ")", "return", "domain" ]
Resolve a concrete domain for ``pipeline``.
[ "Resolve", "a", "concrete", "domain", "for", "pipeline", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/engine.py#L754-L764
train
quantopian/zipline
zipline/utils/api_support.py
require_initialized
def require_initialized(exception): """ Decorator for API methods that should only be called after TradingAlgorithm.initialize. `exception` will be raised if the method is called before initialize has completed. Examples -------- @require_initialized(SomeException("Don't do that!")) def method(self): # Do stuff that should only be allowed after initialize. """ def decorator(method): @wraps(method) def wrapped_method(self, *args, **kwargs): if not self.initialized: raise exception return method(self, *args, **kwargs) return wrapped_method return decorator
python
def require_initialized(exception): """ Decorator for API methods that should only be called after TradingAlgorithm.initialize. `exception` will be raised if the method is called before initialize has completed. Examples -------- @require_initialized(SomeException("Don't do that!")) def method(self): # Do stuff that should only be allowed after initialize. """ def decorator(method): @wraps(method) def wrapped_method(self, *args, **kwargs): if not self.initialized: raise exception return method(self, *args, **kwargs) return wrapped_method return decorator
[ "def", "require_initialized", "(", "exception", ")", ":", "def", "decorator", "(", "method", ")", ":", "@", "wraps", "(", "method", ")", "def", "wrapped_method", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "not", "self", ".", "initialized", ":", "raise", "exception", "return", "method", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "wrapped_method", "return", "decorator" ]
Decorator for API methods that should only be called after TradingAlgorithm.initialize. `exception` will be raised if the method is called before initialize has completed. Examples -------- @require_initialized(SomeException("Don't do that!")) def method(self): # Do stuff that should only be allowed after initialize.
[ "Decorator", "for", "API", "methods", "that", "should", "only", "be", "called", "after", "TradingAlgorithm", ".", "initialize", ".", "exception", "will", "be", "raised", "if", "the", "method", "is", "called", "before", "initialize", "has", "completed", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/api_support.py#L86-L105
train
quantopian/zipline
zipline/utils/api_support.py
disallowed_in_before_trading_start
def disallowed_in_before_trading_start(exception): """ Decorator for API methods that cannot be called from within TradingAlgorithm.before_trading_start. `exception` will be raised if the method is called inside `before_trading_start`. Examples -------- @disallowed_in_before_trading_start(SomeException("Don't do that!")) def method(self): # Do stuff that is not allowed inside before_trading_start. """ def decorator(method): @wraps(method) def wrapped_method(self, *args, **kwargs): if self._in_before_trading_start: raise exception return method(self, *args, **kwargs) return wrapped_method return decorator
python
def disallowed_in_before_trading_start(exception): """ Decorator for API methods that cannot be called from within TradingAlgorithm.before_trading_start. `exception` will be raised if the method is called inside `before_trading_start`. Examples -------- @disallowed_in_before_trading_start(SomeException("Don't do that!")) def method(self): # Do stuff that is not allowed inside before_trading_start. """ def decorator(method): @wraps(method) def wrapped_method(self, *args, **kwargs): if self._in_before_trading_start: raise exception return method(self, *args, **kwargs) return wrapped_method return decorator
[ "def", "disallowed_in_before_trading_start", "(", "exception", ")", ":", "def", "decorator", "(", "method", ")", ":", "@", "wraps", "(", "method", ")", "def", "wrapped_method", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "_in_before_trading_start", ":", "raise", "exception", "return", "method", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "wrapped_method", "return", "decorator" ]
Decorator for API methods that cannot be called from within TradingAlgorithm.before_trading_start. `exception` will be raised if the method is called inside `before_trading_start`. Examples -------- @disallowed_in_before_trading_start(SomeException("Don't do that!")) def method(self): # Do stuff that is not allowed inside before_trading_start.
[ "Decorator", "for", "API", "methods", "that", "cannot", "be", "called", "from", "within", "TradingAlgorithm", ".", "before_trading_start", ".", "exception", "will", "be", "raised", "if", "the", "method", "is", "called", "inside", "before_trading_start", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/api_support.py#L108-L127
train
quantopian/zipline
zipline/lib/normalize.py
naive_grouped_rowwise_apply
def naive_grouped_rowwise_apply(data, group_labels, func, func_args=(), out=None): """ Simple implementation of grouped row-wise function application. Parameters ---------- data : ndarray[ndim=2] Input array over which to apply a grouped function. group_labels : ndarray[ndim=2, dtype=int64] Labels to use to bucket inputs from array. Should be the same shape as array. func : function[ndarray[ndim=1]] -> function[ndarray[ndim=1]] Function to apply to pieces of each row in array. func_args : tuple Additional positional arguments to provide to each row in array. out : ndarray, optional Array into which to write output. If not supplied, a new array of the same shape as ``data`` is allocated and returned. Examples -------- >>> data = np.array([[1., 2., 3.], ... [2., 3., 4.], ... [5., 6., 7.]]) >>> labels = np.array([[0, 0, 1], ... [0, 1, 0], ... [1, 0, 2]]) >>> naive_grouped_rowwise_apply(data, labels, lambda row: row - row.min()) array([[ 0., 1., 0.], [ 0., 0., 2.], [ 0., 0., 0.]]) >>> naive_grouped_rowwise_apply(data, labels, lambda row: row / row.sum()) array([[ 0.33333333, 0.66666667, 1. ], [ 0.33333333, 1. , 0.66666667], [ 1. , 1. , 1. ]]) """ if out is None: out = np.empty_like(data) for (row, label_row, out_row) in zip(data, group_labels, out): for label in np.unique(label_row): locs = (label_row == label) out_row[locs] = func(row[locs], *func_args) return out
python
def naive_grouped_rowwise_apply(data, group_labels, func, func_args=(), out=None): """ Simple implementation of grouped row-wise function application. Parameters ---------- data : ndarray[ndim=2] Input array over which to apply a grouped function. group_labels : ndarray[ndim=2, dtype=int64] Labels to use to bucket inputs from array. Should be the same shape as array. func : function[ndarray[ndim=1]] -> function[ndarray[ndim=1]] Function to apply to pieces of each row in array. func_args : tuple Additional positional arguments to provide to each row in array. out : ndarray, optional Array into which to write output. If not supplied, a new array of the same shape as ``data`` is allocated and returned. Examples -------- >>> data = np.array([[1., 2., 3.], ... [2., 3., 4.], ... [5., 6., 7.]]) >>> labels = np.array([[0, 0, 1], ... [0, 1, 0], ... [1, 0, 2]]) >>> naive_grouped_rowwise_apply(data, labels, lambda row: row - row.min()) array([[ 0., 1., 0.], [ 0., 0., 2.], [ 0., 0., 0.]]) >>> naive_grouped_rowwise_apply(data, labels, lambda row: row / row.sum()) array([[ 0.33333333, 0.66666667, 1. ], [ 0.33333333, 1. , 0.66666667], [ 1. , 1. , 1. ]]) """ if out is None: out = np.empty_like(data) for (row, label_row, out_row) in zip(data, group_labels, out): for label in np.unique(label_row): locs = (label_row == label) out_row[locs] = func(row[locs], *func_args) return out
[ "def", "naive_grouped_rowwise_apply", "(", "data", ",", "group_labels", ",", "func", ",", "func_args", "=", "(", ")", ",", "out", "=", "None", ")", ":", "if", "out", "is", "None", ":", "out", "=", "np", ".", "empty_like", "(", "data", ")", "for", "(", "row", ",", "label_row", ",", "out_row", ")", "in", "zip", "(", "data", ",", "group_labels", ",", "out", ")", ":", "for", "label", "in", "np", ".", "unique", "(", "label_row", ")", ":", "locs", "=", "(", "label_row", "==", "label", ")", "out_row", "[", "locs", "]", "=", "func", "(", "row", "[", "locs", "]", ",", "*", "func_args", ")", "return", "out" ]
Simple implementation of grouped row-wise function application. Parameters ---------- data : ndarray[ndim=2] Input array over which to apply a grouped function. group_labels : ndarray[ndim=2, dtype=int64] Labels to use to bucket inputs from array. Should be the same shape as array. func : function[ndarray[ndim=1]] -> function[ndarray[ndim=1]] Function to apply to pieces of each row in array. func_args : tuple Additional positional arguments to provide to each row in array. out : ndarray, optional Array into which to write output. If not supplied, a new array of the same shape as ``data`` is allocated and returned. Examples -------- >>> data = np.array([[1., 2., 3.], ... [2., 3., 4.], ... [5., 6., 7.]]) >>> labels = np.array([[0, 0, 1], ... [0, 1, 0], ... [1, 0, 2]]) >>> naive_grouped_rowwise_apply(data, labels, lambda row: row - row.min()) array([[ 0., 1., 0.], [ 0., 0., 2.], [ 0., 0., 0.]]) >>> naive_grouped_rowwise_apply(data, labels, lambda row: row / row.sum()) array([[ 0.33333333, 0.66666667, 1. ], [ 0.33333333, 1. , 0.66666667], [ 1. , 1. , 1. ]])
[ "Simple", "implementation", "of", "grouped", "row", "-", "wise", "function", "application", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/lib/normalize.py#L4-L51
train
quantopian/zipline
zipline/utils/formatting.py
bulleted_list
def bulleted_list(items, indent=0, bullet_type='-'): """Format a bulleted list of values. Parameters ---------- items : sequence The items to make a list. indent : int, optional The number of spaces to add before each bullet. bullet_type : str, optional The bullet type to use. Returns ------- formatted_list : str The formatted list as a single string. """ format_string = ' ' * indent + bullet_type + ' {}' return "\n".join(map(format_string.format, items))
python
def bulleted_list(items, indent=0, bullet_type='-'): """Format a bulleted list of values. Parameters ---------- items : sequence The items to make a list. indent : int, optional The number of spaces to add before each bullet. bullet_type : str, optional The bullet type to use. Returns ------- formatted_list : str The formatted list as a single string. """ format_string = ' ' * indent + bullet_type + ' {}' return "\n".join(map(format_string.format, items))
[ "def", "bulleted_list", "(", "items", ",", "indent", "=", "0", ",", "bullet_type", "=", "'-'", ")", ":", "format_string", "=", "' '", "*", "indent", "+", "bullet_type", "+", "' {}'", "return", "\"\\n\"", ".", "join", "(", "map", "(", "format_string", ".", "format", ",", "items", ")", ")" ]
Format a bulleted list of values. Parameters ---------- items : sequence The items to make a list. indent : int, optional The number of spaces to add before each bullet. bullet_type : str, optional The bullet type to use. Returns ------- formatted_list : str The formatted list as a single string.
[ "Format", "a", "bulleted", "list", "of", "values", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/formatting.py#L48-L66
train
quantopian/zipline
zipline/assets/synthetic.py
make_rotating_equity_info
def make_rotating_equity_info(num_assets, first_start, frequency, periods_between_starts, asset_lifetime, exchange='TEST'): """ Create a DataFrame representing lifetimes of assets that are constantly rotating in and out of existence. Parameters ---------- num_assets : int How many assets to create. first_start : pd.Timestamp The start date for the first asset. frequency : str or pd.tseries.offsets.Offset (e.g. trading_day) Frequency used to interpret next two arguments. periods_between_starts : int Create a new asset every `frequency` * `periods_between_new` asset_lifetime : int Each asset exists for `frequency` * `asset_lifetime` days. exchange : str, optional The exchange name. Returns ------- info : pd.DataFrame DataFrame representing newly-created assets. """ return pd.DataFrame( { 'symbol': [chr(ord('A') + i) for i in range(num_assets)], # Start a new asset every `periods_between_starts` days. 'start_date': pd.date_range( first_start, freq=(periods_between_starts * frequency), periods=num_assets, ), # Each asset lasts for `asset_lifetime` days. 'end_date': pd.date_range( first_start + (asset_lifetime * frequency), freq=(periods_between_starts * frequency), periods=num_assets, ), 'exchange': exchange, }, index=range(num_assets), )
python
def make_rotating_equity_info(num_assets, first_start, frequency, periods_between_starts, asset_lifetime, exchange='TEST'): """ Create a DataFrame representing lifetimes of assets that are constantly rotating in and out of existence. Parameters ---------- num_assets : int How many assets to create. first_start : pd.Timestamp The start date for the first asset. frequency : str or pd.tseries.offsets.Offset (e.g. trading_day) Frequency used to interpret next two arguments. periods_between_starts : int Create a new asset every `frequency` * `periods_between_new` asset_lifetime : int Each asset exists for `frequency` * `asset_lifetime` days. exchange : str, optional The exchange name. Returns ------- info : pd.DataFrame DataFrame representing newly-created assets. """ return pd.DataFrame( { 'symbol': [chr(ord('A') + i) for i in range(num_assets)], # Start a new asset every `periods_between_starts` days. 'start_date': pd.date_range( first_start, freq=(periods_between_starts * frequency), periods=num_assets, ), # Each asset lasts for `asset_lifetime` days. 'end_date': pd.date_range( first_start + (asset_lifetime * frequency), freq=(periods_between_starts * frequency), periods=num_assets, ), 'exchange': exchange, }, index=range(num_assets), )
[ "def", "make_rotating_equity_info", "(", "num_assets", ",", "first_start", ",", "frequency", ",", "periods_between_starts", ",", "asset_lifetime", ",", "exchange", "=", "'TEST'", ")", ":", "return", "pd", ".", "DataFrame", "(", "{", "'symbol'", ":", "[", "chr", "(", "ord", "(", "'A'", ")", "+", "i", ")", "for", "i", "in", "range", "(", "num_assets", ")", "]", ",", "# Start a new asset every `periods_between_starts` days.", "'start_date'", ":", "pd", ".", "date_range", "(", "first_start", ",", "freq", "=", "(", "periods_between_starts", "*", "frequency", ")", ",", "periods", "=", "num_assets", ",", ")", ",", "# Each asset lasts for `asset_lifetime` days.", "'end_date'", ":", "pd", ".", "date_range", "(", "first_start", "+", "(", "asset_lifetime", "*", "frequency", ")", ",", "freq", "=", "(", "periods_between_starts", "*", "frequency", ")", ",", "periods", "=", "num_assets", ",", ")", ",", "'exchange'", ":", "exchange", ",", "}", ",", "index", "=", "range", "(", "num_assets", ")", ",", ")" ]
Create a DataFrame representing lifetimes of assets that are constantly rotating in and out of existence. Parameters ---------- num_assets : int How many assets to create. first_start : pd.Timestamp The start date for the first asset. frequency : str or pd.tseries.offsets.Offset (e.g. trading_day) Frequency used to interpret next two arguments. periods_between_starts : int Create a new asset every `frequency` * `periods_between_new` asset_lifetime : int Each asset exists for `frequency` * `asset_lifetime` days. exchange : str, optional The exchange name. Returns ------- info : pd.DataFrame DataFrame representing newly-created assets.
[ "Create", "a", "DataFrame", "representing", "lifetimes", "of", "assets", "that", "are", "constantly", "rotating", "in", "and", "out", "of", "existence", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/assets/synthetic.py#L11-L59
train
quantopian/zipline
zipline/assets/synthetic.py
make_simple_equity_info
def make_simple_equity_info(sids, start_date, end_date, symbols=None, names=None, exchange='TEST'): """ Create a DataFrame representing assets that exist for the full duration between `start_date` and `end_date`. Parameters ---------- sids : array-like of int start_date : pd.Timestamp, optional end_date : pd.Timestamp, optional symbols : list, optional Symbols to use for the assets. If not provided, symbols are generated from the sequence 'A', 'B', ... names : list, optional Names to use for the assets. If not provided, names are generated by adding " INC." to each of the symbols (which might also be auto-generated). exchange : str, optional The exchange name. Returns ------- info : pd.DataFrame DataFrame representing newly-created assets. """ num_assets = len(sids) if symbols is None: symbols = list(ascii_uppercase[:num_assets]) else: symbols = list(symbols) if names is None: names = [str(s) + " INC." for s in symbols] return pd.DataFrame( { 'symbol': symbols, 'start_date': pd.to_datetime([start_date] * num_assets), 'end_date': pd.to_datetime([end_date] * num_assets), 'asset_name': list(names), 'exchange': exchange, }, index=sids, columns=( 'start_date', 'end_date', 'symbol', 'exchange', 'asset_name', ), )
python
def make_simple_equity_info(sids, start_date, end_date, symbols=None, names=None, exchange='TEST'): """ Create a DataFrame representing assets that exist for the full duration between `start_date` and `end_date`. Parameters ---------- sids : array-like of int start_date : pd.Timestamp, optional end_date : pd.Timestamp, optional symbols : list, optional Symbols to use for the assets. If not provided, symbols are generated from the sequence 'A', 'B', ... names : list, optional Names to use for the assets. If not provided, names are generated by adding " INC." to each of the symbols (which might also be auto-generated). exchange : str, optional The exchange name. Returns ------- info : pd.DataFrame DataFrame representing newly-created assets. """ num_assets = len(sids) if symbols is None: symbols = list(ascii_uppercase[:num_assets]) else: symbols = list(symbols) if names is None: names = [str(s) + " INC." for s in symbols] return pd.DataFrame( { 'symbol': symbols, 'start_date': pd.to_datetime([start_date] * num_assets), 'end_date': pd.to_datetime([end_date] * num_assets), 'asset_name': list(names), 'exchange': exchange, }, index=sids, columns=( 'start_date', 'end_date', 'symbol', 'exchange', 'asset_name', ), )
[ "def", "make_simple_equity_info", "(", "sids", ",", "start_date", ",", "end_date", ",", "symbols", "=", "None", ",", "names", "=", "None", ",", "exchange", "=", "'TEST'", ")", ":", "num_assets", "=", "len", "(", "sids", ")", "if", "symbols", "is", "None", ":", "symbols", "=", "list", "(", "ascii_uppercase", "[", ":", "num_assets", "]", ")", "else", ":", "symbols", "=", "list", "(", "symbols", ")", "if", "names", "is", "None", ":", "names", "=", "[", "str", "(", "s", ")", "+", "\" INC.\"", "for", "s", "in", "symbols", "]", "return", "pd", ".", "DataFrame", "(", "{", "'symbol'", ":", "symbols", ",", "'start_date'", ":", "pd", ".", "to_datetime", "(", "[", "start_date", "]", "*", "num_assets", ")", ",", "'end_date'", ":", "pd", ".", "to_datetime", "(", "[", "end_date", "]", "*", "num_assets", ")", ",", "'asset_name'", ":", "list", "(", "names", ")", ",", "'exchange'", ":", "exchange", ",", "}", ",", "index", "=", "sids", ",", "columns", "=", "(", "'start_date'", ",", "'end_date'", ",", "'symbol'", ",", "'exchange'", ",", "'asset_name'", ",", ")", ",", ")" ]
Create a DataFrame representing assets that exist for the full duration between `start_date` and `end_date`. Parameters ---------- sids : array-like of int start_date : pd.Timestamp, optional end_date : pd.Timestamp, optional symbols : list, optional Symbols to use for the assets. If not provided, symbols are generated from the sequence 'A', 'B', ... names : list, optional Names to use for the assets. If not provided, names are generated by adding " INC." to each of the symbols (which might also be auto-generated). exchange : str, optional The exchange name. Returns ------- info : pd.DataFrame DataFrame representing newly-created assets.
[ "Create", "a", "DataFrame", "representing", "assets", "that", "exist", "for", "the", "full", "duration", "between", "start_date", "and", "end_date", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/assets/synthetic.py#L62-L117
train
quantopian/zipline
zipline/assets/synthetic.py
make_simple_multi_country_equity_info
def make_simple_multi_country_equity_info(countries_to_sids, countries_to_exchanges, start_date, end_date): """Create a DataFrame representing assets that exist for the full duration between `start_date` and `end_date`, from multiple countries. """ sids = [] symbols = [] exchanges = [] for country, country_sids in countries_to_sids.items(): exchange = countries_to_exchanges[country] for i, sid in enumerate(country_sids): sids.append(sid) symbols.append('-'.join([country, str(i)])) exchanges.append(exchange) return pd.DataFrame( { 'symbol': symbols, 'start_date': start_date, 'end_date': end_date, 'asset_name': symbols, 'exchange': exchanges, }, index=sids, columns=( 'start_date', 'end_date', 'symbol', 'exchange', 'asset_name', ), )
python
def make_simple_multi_country_equity_info(countries_to_sids, countries_to_exchanges, start_date, end_date): """Create a DataFrame representing assets that exist for the full duration between `start_date` and `end_date`, from multiple countries. """ sids = [] symbols = [] exchanges = [] for country, country_sids in countries_to_sids.items(): exchange = countries_to_exchanges[country] for i, sid in enumerate(country_sids): sids.append(sid) symbols.append('-'.join([country, str(i)])) exchanges.append(exchange) return pd.DataFrame( { 'symbol': symbols, 'start_date': start_date, 'end_date': end_date, 'asset_name': symbols, 'exchange': exchanges, }, index=sids, columns=( 'start_date', 'end_date', 'symbol', 'exchange', 'asset_name', ), )
[ "def", "make_simple_multi_country_equity_info", "(", "countries_to_sids", ",", "countries_to_exchanges", ",", "start_date", ",", "end_date", ")", ":", "sids", "=", "[", "]", "symbols", "=", "[", "]", "exchanges", "=", "[", "]", "for", "country", ",", "country_sids", "in", "countries_to_sids", ".", "items", "(", ")", ":", "exchange", "=", "countries_to_exchanges", "[", "country", "]", "for", "i", ",", "sid", "in", "enumerate", "(", "country_sids", ")", ":", "sids", ".", "append", "(", "sid", ")", "symbols", ".", "append", "(", "'-'", ".", "join", "(", "[", "country", ",", "str", "(", "i", ")", "]", ")", ")", "exchanges", ".", "append", "(", "exchange", ")", "return", "pd", ".", "DataFrame", "(", "{", "'symbol'", ":", "symbols", ",", "'start_date'", ":", "start_date", ",", "'end_date'", ":", "end_date", ",", "'asset_name'", ":", "symbols", ",", "'exchange'", ":", "exchanges", ",", "}", ",", "index", "=", "sids", ",", "columns", "=", "(", "'start_date'", ",", "'end_date'", ",", "'symbol'", ",", "'exchange'", ",", "'asset_name'", ",", ")", ",", ")" ]
Create a DataFrame representing assets that exist for the full duration between `start_date` and `end_date`, from multiple countries.
[ "Create", "a", "DataFrame", "representing", "assets", "that", "exist", "for", "the", "full", "duration", "between", "start_date", "and", "end_date", "from", "multiple", "countries", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/assets/synthetic.py#L120-L154
train
quantopian/zipline
zipline/assets/synthetic.py
make_jagged_equity_info
def make_jagged_equity_info(num_assets, start_date, first_end, frequency, periods_between_ends, auto_close_delta): """ Create a DataFrame representing assets that all begin at the same start date, but have cascading end dates. Parameters ---------- num_assets : int How many assets to create. start_date : pd.Timestamp The start date for all the assets. first_end : pd.Timestamp The date at which the first equity will end. frequency : str or pd.tseries.offsets.Offset (e.g. trading_day) Frequency used to interpret the next argument. periods_between_ends : int Starting after the first end date, end each asset every `frequency` * `periods_between_ends`. Returns ------- info : pd.DataFrame DataFrame representing newly-created assets. """ frame = pd.DataFrame( { 'symbol': [chr(ord('A') + i) for i in range(num_assets)], 'start_date': start_date, 'end_date': pd.date_range( first_end, freq=(periods_between_ends * frequency), periods=num_assets, ), 'exchange': 'TEST', }, index=range(num_assets), ) # Explicitly pass None to disable setting the auto_close_date column. if auto_close_delta is not None: frame['auto_close_date'] = frame['end_date'] + auto_close_delta return frame
python
def make_jagged_equity_info(num_assets, start_date, first_end, frequency, periods_between_ends, auto_close_delta): """ Create a DataFrame representing assets that all begin at the same start date, but have cascading end dates. Parameters ---------- num_assets : int How many assets to create. start_date : pd.Timestamp The start date for all the assets. first_end : pd.Timestamp The date at which the first equity will end. frequency : str or pd.tseries.offsets.Offset (e.g. trading_day) Frequency used to interpret the next argument. periods_between_ends : int Starting after the first end date, end each asset every `frequency` * `periods_between_ends`. Returns ------- info : pd.DataFrame DataFrame representing newly-created assets. """ frame = pd.DataFrame( { 'symbol': [chr(ord('A') + i) for i in range(num_assets)], 'start_date': start_date, 'end_date': pd.date_range( first_end, freq=(periods_between_ends * frequency), periods=num_assets, ), 'exchange': 'TEST', }, index=range(num_assets), ) # Explicitly pass None to disable setting the auto_close_date column. if auto_close_delta is not None: frame['auto_close_date'] = frame['end_date'] + auto_close_delta return frame
[ "def", "make_jagged_equity_info", "(", "num_assets", ",", "start_date", ",", "first_end", ",", "frequency", ",", "periods_between_ends", ",", "auto_close_delta", ")", ":", "frame", "=", "pd", ".", "DataFrame", "(", "{", "'symbol'", ":", "[", "chr", "(", "ord", "(", "'A'", ")", "+", "i", ")", "for", "i", "in", "range", "(", "num_assets", ")", "]", ",", "'start_date'", ":", "start_date", ",", "'end_date'", ":", "pd", ".", "date_range", "(", "first_end", ",", "freq", "=", "(", "periods_between_ends", "*", "frequency", ")", ",", "periods", "=", "num_assets", ",", ")", ",", "'exchange'", ":", "'TEST'", ",", "}", ",", "index", "=", "range", "(", "num_assets", ")", ",", ")", "# Explicitly pass None to disable setting the auto_close_date column.", "if", "auto_close_delta", "is", "not", "None", ":", "frame", "[", "'auto_close_date'", "]", "=", "frame", "[", "'end_date'", "]", "+", "auto_close_delta", "return", "frame" ]
Create a DataFrame representing assets that all begin at the same start date, but have cascading end dates. Parameters ---------- num_assets : int How many assets to create. start_date : pd.Timestamp The start date for all the assets. first_end : pd.Timestamp The date at which the first equity will end. frequency : str or pd.tseries.offsets.Offset (e.g. trading_day) Frequency used to interpret the next argument. periods_between_ends : int Starting after the first end date, end each asset every `frequency` * `periods_between_ends`. Returns ------- info : pd.DataFrame DataFrame representing newly-created assets.
[ "Create", "a", "DataFrame", "representing", "assets", "that", "all", "begin", "at", "the", "same", "start", "date", "but", "have", "cascading", "end", "dates", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/assets/synthetic.py#L157-L204
train
quantopian/zipline
zipline/assets/synthetic.py
make_future_info
def make_future_info(first_sid, root_symbols, years, notice_date_func, expiration_date_func, start_date_func, month_codes=None, multiplier=500): """ Create a DataFrame representing futures for `root_symbols` during `year`. Generates a contract per triple of (symbol, year, month) supplied to `root_symbols`, `years`, and `month_codes`. Parameters ---------- first_sid : int The first sid to use for assigning sids to the created contracts. root_symbols : list[str] A list of root symbols for which to create futures. years : list[int or str] Years (e.g. 2014), for which to produce individual contracts. notice_date_func : (Timestamp) -> Timestamp Function to generate notice dates from first of the month associated with asset month code. Return NaT to simulate futures with no notice date. expiration_date_func : (Timestamp) -> Timestamp Function to generate expiration dates from first of the month associated with asset month code. start_date_func : (Timestamp) -> Timestamp, optional Function to generate start dates from first of the month associated with each asset month code. Defaults to a start_date one year prior to the month_code date. month_codes : dict[str -> [1..12]], optional Dictionary of month codes for which to create contracts. Entries should be strings mapped to values from 1 (January) to 12 (December). Default is zipline.futures.CMES_CODE_TO_MONTH multiplier : int The contract multiplier. Returns ------- futures_info : pd.DataFrame DataFrame of futures data suitable for passing to an AssetDBWriter. """ if month_codes is None: month_codes = CMES_CODE_TO_MONTH year_strs = list(map(str, years)) years = [pd.Timestamp(s, tz='UTC') for s in year_strs] # Pairs of string/date like ('K06', 2006-05-01) contract_suffix_to_beginning_of_month = tuple( (month_code + year_str[-2:], year + MonthBegin(month_num)) for ((year, year_str), (month_code, month_num)) in product( zip(years, year_strs), iteritems(month_codes), ) ) contracts = [] parts = product(root_symbols, contract_suffix_to_beginning_of_month) for sid, (root_sym, (suffix, month_begin)) in enumerate(parts, first_sid): contracts.append({ 'sid': sid, 'root_symbol': root_sym, 'symbol': root_sym + suffix, 'start_date': start_date_func(month_begin), 'notice_date': notice_date_func(month_begin), 'expiration_date': notice_date_func(month_begin), 'multiplier': multiplier, 'exchange': "TEST", }) return pd.DataFrame.from_records(contracts, index='sid')
python
def make_future_info(first_sid, root_symbols, years, notice_date_func, expiration_date_func, start_date_func, month_codes=None, multiplier=500): """ Create a DataFrame representing futures for `root_symbols` during `year`. Generates a contract per triple of (symbol, year, month) supplied to `root_symbols`, `years`, and `month_codes`. Parameters ---------- first_sid : int The first sid to use for assigning sids to the created contracts. root_symbols : list[str] A list of root symbols for which to create futures. years : list[int or str] Years (e.g. 2014), for which to produce individual contracts. notice_date_func : (Timestamp) -> Timestamp Function to generate notice dates from first of the month associated with asset month code. Return NaT to simulate futures with no notice date. expiration_date_func : (Timestamp) -> Timestamp Function to generate expiration dates from first of the month associated with asset month code. start_date_func : (Timestamp) -> Timestamp, optional Function to generate start dates from first of the month associated with each asset month code. Defaults to a start_date one year prior to the month_code date. month_codes : dict[str -> [1..12]], optional Dictionary of month codes for which to create contracts. Entries should be strings mapped to values from 1 (January) to 12 (December). Default is zipline.futures.CMES_CODE_TO_MONTH multiplier : int The contract multiplier. Returns ------- futures_info : pd.DataFrame DataFrame of futures data suitable for passing to an AssetDBWriter. """ if month_codes is None: month_codes = CMES_CODE_TO_MONTH year_strs = list(map(str, years)) years = [pd.Timestamp(s, tz='UTC') for s in year_strs] # Pairs of string/date like ('K06', 2006-05-01) contract_suffix_to_beginning_of_month = tuple( (month_code + year_str[-2:], year + MonthBegin(month_num)) for ((year, year_str), (month_code, month_num)) in product( zip(years, year_strs), iteritems(month_codes), ) ) contracts = [] parts = product(root_symbols, contract_suffix_to_beginning_of_month) for sid, (root_sym, (suffix, month_begin)) in enumerate(parts, first_sid): contracts.append({ 'sid': sid, 'root_symbol': root_sym, 'symbol': root_sym + suffix, 'start_date': start_date_func(month_begin), 'notice_date': notice_date_func(month_begin), 'expiration_date': notice_date_func(month_begin), 'multiplier': multiplier, 'exchange': "TEST", }) return pd.DataFrame.from_records(contracts, index='sid')
[ "def", "make_future_info", "(", "first_sid", ",", "root_symbols", ",", "years", ",", "notice_date_func", ",", "expiration_date_func", ",", "start_date_func", ",", "month_codes", "=", "None", ",", "multiplier", "=", "500", ")", ":", "if", "month_codes", "is", "None", ":", "month_codes", "=", "CMES_CODE_TO_MONTH", "year_strs", "=", "list", "(", "map", "(", "str", ",", "years", ")", ")", "years", "=", "[", "pd", ".", "Timestamp", "(", "s", ",", "tz", "=", "'UTC'", ")", "for", "s", "in", "year_strs", "]", "# Pairs of string/date like ('K06', 2006-05-01)", "contract_suffix_to_beginning_of_month", "=", "tuple", "(", "(", "month_code", "+", "year_str", "[", "-", "2", ":", "]", ",", "year", "+", "MonthBegin", "(", "month_num", ")", ")", "for", "(", "(", "year", ",", "year_str", ")", ",", "(", "month_code", ",", "month_num", ")", ")", "in", "product", "(", "zip", "(", "years", ",", "year_strs", ")", ",", "iteritems", "(", "month_codes", ")", ",", ")", ")", "contracts", "=", "[", "]", "parts", "=", "product", "(", "root_symbols", ",", "contract_suffix_to_beginning_of_month", ")", "for", "sid", ",", "(", "root_sym", ",", "(", "suffix", ",", "month_begin", ")", ")", "in", "enumerate", "(", "parts", ",", "first_sid", ")", ":", "contracts", ".", "append", "(", "{", "'sid'", ":", "sid", ",", "'root_symbol'", ":", "root_sym", ",", "'symbol'", ":", "root_sym", "+", "suffix", ",", "'start_date'", ":", "start_date_func", "(", "month_begin", ")", ",", "'notice_date'", ":", "notice_date_func", "(", "month_begin", ")", ",", "'expiration_date'", ":", "notice_date_func", "(", "month_begin", ")", ",", "'multiplier'", ":", "multiplier", ",", "'exchange'", ":", "\"TEST\"", ",", "}", ")", "return", "pd", ".", "DataFrame", ".", "from_records", "(", "contracts", ",", "index", "=", "'sid'", ")" ]
Create a DataFrame representing futures for `root_symbols` during `year`. Generates a contract per triple of (symbol, year, month) supplied to `root_symbols`, `years`, and `month_codes`. Parameters ---------- first_sid : int The first sid to use for assigning sids to the created contracts. root_symbols : list[str] A list of root symbols for which to create futures. years : list[int or str] Years (e.g. 2014), for which to produce individual contracts. notice_date_func : (Timestamp) -> Timestamp Function to generate notice dates from first of the month associated with asset month code. Return NaT to simulate futures with no notice date. expiration_date_func : (Timestamp) -> Timestamp Function to generate expiration dates from first of the month associated with asset month code. start_date_func : (Timestamp) -> Timestamp, optional Function to generate start dates from first of the month associated with each asset month code. Defaults to a start_date one year prior to the month_code date. month_codes : dict[str -> [1..12]], optional Dictionary of month codes for which to create contracts. Entries should be strings mapped to values from 1 (January) to 12 (December). Default is zipline.futures.CMES_CODE_TO_MONTH multiplier : int The contract multiplier. Returns ------- futures_info : pd.DataFrame DataFrame of futures data suitable for passing to an AssetDBWriter.
[ "Create", "a", "DataFrame", "representing", "futures", "for", "root_symbols", "during", "year", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/assets/synthetic.py#L207-L281
train
quantopian/zipline
zipline/assets/synthetic.py
make_commodity_future_info
def make_commodity_future_info(first_sid, root_symbols, years, month_codes=None, multiplier=500): """ Make futures testing data that simulates the notice/expiration date behavior of physical commodities like oil. Parameters ---------- first_sid : int The first sid to use for assigning sids to the created contracts. root_symbols : list[str] A list of root symbols for which to create futures. years : list[int or str] Years (e.g. 2014), for which to produce individual contracts. month_codes : dict[str -> [1..12]], optional Dictionary of month codes for which to create contracts. Entries should be strings mapped to values from 1 (January) to 12 (December). Default is zipline.futures.CMES_CODE_TO_MONTH multiplier : int The contract multiplier. Expiration dates are on the 20th of the month prior to the month code. Notice dates are are on the 20th two months prior to the month code. Start dates are one year before the contract month. See Also -------- make_future_info """ nineteen_days = pd.Timedelta(days=19) one_year = pd.Timedelta(days=365) return make_future_info( first_sid=first_sid, root_symbols=root_symbols, years=years, notice_date_func=lambda dt: dt - MonthBegin(2) + nineteen_days, expiration_date_func=lambda dt: dt - MonthBegin(1) + nineteen_days, start_date_func=lambda dt: dt - one_year, month_codes=month_codes, multiplier=multiplier, )
python
def make_commodity_future_info(first_sid, root_symbols, years, month_codes=None, multiplier=500): """ Make futures testing data that simulates the notice/expiration date behavior of physical commodities like oil. Parameters ---------- first_sid : int The first sid to use for assigning sids to the created contracts. root_symbols : list[str] A list of root symbols for which to create futures. years : list[int or str] Years (e.g. 2014), for which to produce individual contracts. month_codes : dict[str -> [1..12]], optional Dictionary of month codes for which to create contracts. Entries should be strings mapped to values from 1 (January) to 12 (December). Default is zipline.futures.CMES_CODE_TO_MONTH multiplier : int The contract multiplier. Expiration dates are on the 20th of the month prior to the month code. Notice dates are are on the 20th two months prior to the month code. Start dates are one year before the contract month. See Also -------- make_future_info """ nineteen_days = pd.Timedelta(days=19) one_year = pd.Timedelta(days=365) return make_future_info( first_sid=first_sid, root_symbols=root_symbols, years=years, notice_date_func=lambda dt: dt - MonthBegin(2) + nineteen_days, expiration_date_func=lambda dt: dt - MonthBegin(1) + nineteen_days, start_date_func=lambda dt: dt - one_year, month_codes=month_codes, multiplier=multiplier, )
[ "def", "make_commodity_future_info", "(", "first_sid", ",", "root_symbols", ",", "years", ",", "month_codes", "=", "None", ",", "multiplier", "=", "500", ")", ":", "nineteen_days", "=", "pd", ".", "Timedelta", "(", "days", "=", "19", ")", "one_year", "=", "pd", ".", "Timedelta", "(", "days", "=", "365", ")", "return", "make_future_info", "(", "first_sid", "=", "first_sid", ",", "root_symbols", "=", "root_symbols", ",", "years", "=", "years", ",", "notice_date_func", "=", "lambda", "dt", ":", "dt", "-", "MonthBegin", "(", "2", ")", "+", "nineteen_days", ",", "expiration_date_func", "=", "lambda", "dt", ":", "dt", "-", "MonthBegin", "(", "1", ")", "+", "nineteen_days", ",", "start_date_func", "=", "lambda", "dt", ":", "dt", "-", "one_year", ",", "month_codes", "=", "month_codes", ",", "multiplier", "=", "multiplier", ",", ")" ]
Make futures testing data that simulates the notice/expiration date behavior of physical commodities like oil. Parameters ---------- first_sid : int The first sid to use for assigning sids to the created contracts. root_symbols : list[str] A list of root symbols for which to create futures. years : list[int or str] Years (e.g. 2014), for which to produce individual contracts. month_codes : dict[str -> [1..12]], optional Dictionary of month codes for which to create contracts. Entries should be strings mapped to values from 1 (January) to 12 (December). Default is zipline.futures.CMES_CODE_TO_MONTH multiplier : int The contract multiplier. Expiration dates are on the 20th of the month prior to the month code. Notice dates are are on the 20th two months prior to the month code. Start dates are one year before the contract month. See Also -------- make_future_info
[ "Make", "futures", "testing", "data", "that", "simulates", "the", "notice", "/", "expiration", "date", "behavior", "of", "physical", "commodities", "like", "oil", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/assets/synthetic.py#L284-L327
train
quantopian/zipline
zipline/pipeline/classifiers/classifier.py
Classifier.eq
def eq(self, other): """ Construct a Filter returning True for asset/date pairs where the output of ``self`` matches ``other``. """ # We treat this as an error because missing_values have NaN semantics, # which means this would return an array of all False, which is almost # certainly not what the user wants. if other == self.missing_value: raise ValueError( "Comparison against self.missing_value ({value!r}) in" " {typename}.eq().\n" "Missing values have NaN semantics, so the " "requested comparison would always produce False.\n" "Use the isnull() method to check for missing values.".format( value=other, typename=(type(self).__name__), ) ) if isinstance(other, Number) != (self.dtype == int64_dtype): raise InvalidClassifierComparison(self, other) if isinstance(other, Number): return NumExprFilter.create( "x_0 == {other}".format(other=int(other)), binds=(self,), ) else: return ArrayPredicate( term=self, op=operator.eq, opargs=(other,), )
python
def eq(self, other): """ Construct a Filter returning True for asset/date pairs where the output of ``self`` matches ``other``. """ # We treat this as an error because missing_values have NaN semantics, # which means this would return an array of all False, which is almost # certainly not what the user wants. if other == self.missing_value: raise ValueError( "Comparison against self.missing_value ({value!r}) in" " {typename}.eq().\n" "Missing values have NaN semantics, so the " "requested comparison would always produce False.\n" "Use the isnull() method to check for missing values.".format( value=other, typename=(type(self).__name__), ) ) if isinstance(other, Number) != (self.dtype == int64_dtype): raise InvalidClassifierComparison(self, other) if isinstance(other, Number): return NumExprFilter.create( "x_0 == {other}".format(other=int(other)), binds=(self,), ) else: return ArrayPredicate( term=self, op=operator.eq, opargs=(other,), )
[ "def", "eq", "(", "self", ",", "other", ")", ":", "# We treat this as an error because missing_values have NaN semantics,", "# which means this would return an array of all False, which is almost", "# certainly not what the user wants.", "if", "other", "==", "self", ".", "missing_value", ":", "raise", "ValueError", "(", "\"Comparison against self.missing_value ({value!r}) in\"", "\" {typename}.eq().\\n\"", "\"Missing values have NaN semantics, so the \"", "\"requested comparison would always produce False.\\n\"", "\"Use the isnull() method to check for missing values.\"", ".", "format", "(", "value", "=", "other", ",", "typename", "=", "(", "type", "(", "self", ")", ".", "__name__", ")", ",", ")", ")", "if", "isinstance", "(", "other", ",", "Number", ")", "!=", "(", "self", ".", "dtype", "==", "int64_dtype", ")", ":", "raise", "InvalidClassifierComparison", "(", "self", ",", "other", ")", "if", "isinstance", "(", "other", ",", "Number", ")", ":", "return", "NumExprFilter", ".", "create", "(", "\"x_0 == {other}\"", ".", "format", "(", "other", "=", "int", "(", "other", ")", ")", ",", "binds", "=", "(", "self", ",", ")", ",", ")", "else", ":", "return", "ArrayPredicate", "(", "term", "=", "self", ",", "op", "=", "operator", ".", "eq", ",", "opargs", "=", "(", "other", ",", ")", ",", ")" ]
Construct a Filter returning True for asset/date pairs where the output of ``self`` matches ``other``.
[ "Construct", "a", "Filter", "returning", "True", "for", "asset", "/", "date", "pairs", "where", "the", "output", "of", "self", "matches", "other", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/classifiers/classifier.py#L83-L116
train
quantopian/zipline
zipline/pipeline/classifiers/classifier.py
Classifier.startswith
def startswith(self, prefix): """ Construct a Filter matching values starting with ``prefix``. Parameters ---------- prefix : str String prefix against which to compare values produced by ``self``. Returns ------- matches : Filter Filter returning True for all sid/date pairs for which ``self`` produces a string starting with ``prefix``. """ return ArrayPredicate( term=self, op=LabelArray.startswith, opargs=(prefix,), )
python
def startswith(self, prefix): """ Construct a Filter matching values starting with ``prefix``. Parameters ---------- prefix : str String prefix against which to compare values produced by ``self``. Returns ------- matches : Filter Filter returning True for all sid/date pairs for which ``self`` produces a string starting with ``prefix``. """ return ArrayPredicate( term=self, op=LabelArray.startswith, opargs=(prefix,), )
[ "def", "startswith", "(", "self", ",", "prefix", ")", ":", "return", "ArrayPredicate", "(", "term", "=", "self", ",", "op", "=", "LabelArray", ".", "startswith", ",", "opargs", "=", "(", "prefix", ",", ")", ",", ")" ]
Construct a Filter matching values starting with ``prefix``. Parameters ---------- prefix : str String prefix against which to compare values produced by ``self``. Returns ------- matches : Filter Filter returning True for all sid/date pairs for which ``self`` produces a string starting with ``prefix``.
[ "Construct", "a", "Filter", "matching", "values", "starting", "with", "prefix", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/classifiers/classifier.py#L150-L169
train
quantopian/zipline
zipline/pipeline/classifiers/classifier.py
Classifier.endswith
def endswith(self, suffix): """ Construct a Filter matching values ending with ``suffix``. Parameters ---------- suffix : str String suffix against which to compare values produced by ``self``. Returns ------- matches : Filter Filter returning True for all sid/date pairs for which ``self`` produces a string ending with ``prefix``. """ return ArrayPredicate( term=self, op=LabelArray.endswith, opargs=(suffix,), )
python
def endswith(self, suffix): """ Construct a Filter matching values ending with ``suffix``. Parameters ---------- suffix : str String suffix against which to compare values produced by ``self``. Returns ------- matches : Filter Filter returning True for all sid/date pairs for which ``self`` produces a string ending with ``prefix``. """ return ArrayPredicate( term=self, op=LabelArray.endswith, opargs=(suffix,), )
[ "def", "endswith", "(", "self", ",", "suffix", ")", ":", "return", "ArrayPredicate", "(", "term", "=", "self", ",", "op", "=", "LabelArray", ".", "endswith", ",", "opargs", "=", "(", "suffix", ",", ")", ",", ")" ]
Construct a Filter matching values ending with ``suffix``. Parameters ---------- suffix : str String suffix against which to compare values produced by ``self``. Returns ------- matches : Filter Filter returning True for all sid/date pairs for which ``self`` produces a string ending with ``prefix``.
[ "Construct", "a", "Filter", "matching", "values", "ending", "with", "suffix", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/classifiers/classifier.py#L173-L192
train
quantopian/zipline
zipline/pipeline/classifiers/classifier.py
Classifier.has_substring
def has_substring(self, substring): """ Construct a Filter matching values containing ``substring``. Parameters ---------- substring : str Sub-string against which to compare values produced by ``self``. Returns ------- matches : Filter Filter returning True for all sid/date pairs for which ``self`` produces a string containing ``substring``. """ return ArrayPredicate( term=self, op=LabelArray.has_substring, opargs=(substring,), )
python
def has_substring(self, substring): """ Construct a Filter matching values containing ``substring``. Parameters ---------- substring : str Sub-string against which to compare values produced by ``self``. Returns ------- matches : Filter Filter returning True for all sid/date pairs for which ``self`` produces a string containing ``substring``. """ return ArrayPredicate( term=self, op=LabelArray.has_substring, opargs=(substring,), )
[ "def", "has_substring", "(", "self", ",", "substring", ")", ":", "return", "ArrayPredicate", "(", "term", "=", "self", ",", "op", "=", "LabelArray", ".", "has_substring", ",", "opargs", "=", "(", "substring", ",", ")", ",", ")" ]
Construct a Filter matching values containing ``substring``. Parameters ---------- substring : str Sub-string against which to compare values produced by ``self``. Returns ------- matches : Filter Filter returning True for all sid/date pairs for which ``self`` produces a string containing ``substring``.
[ "Construct", "a", "Filter", "matching", "values", "containing", "substring", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/classifiers/classifier.py#L196-L215
train
quantopian/zipline
zipline/pipeline/classifiers/classifier.py
Classifier.matches
def matches(self, pattern): """ Construct a Filter that checks regex matches against ``pattern``. Parameters ---------- pattern : str Regex pattern against which to compare values produced by ``self``. Returns ------- matches : Filter Filter returning True for all sid/date pairs for which ``self`` produces a string matched by ``pattern``. See Also -------- :mod:`Python Regular Expressions <re>` """ return ArrayPredicate( term=self, op=LabelArray.matches, opargs=(pattern,), )
python
def matches(self, pattern): """ Construct a Filter that checks regex matches against ``pattern``. Parameters ---------- pattern : str Regex pattern against which to compare values produced by ``self``. Returns ------- matches : Filter Filter returning True for all sid/date pairs for which ``self`` produces a string matched by ``pattern``. See Also -------- :mod:`Python Regular Expressions <re>` """ return ArrayPredicate( term=self, op=LabelArray.matches, opargs=(pattern,), )
[ "def", "matches", "(", "self", ",", "pattern", ")", ":", "return", "ArrayPredicate", "(", "term", "=", "self", ",", "op", "=", "LabelArray", ".", "matches", ",", "opargs", "=", "(", "pattern", ",", ")", ",", ")" ]
Construct a Filter that checks regex matches against ``pattern``. Parameters ---------- pattern : str Regex pattern against which to compare values produced by ``self``. Returns ------- matches : Filter Filter returning True for all sid/date pairs for which ``self`` produces a string matched by ``pattern``. See Also -------- :mod:`Python Regular Expressions <re>`
[ "Construct", "a", "Filter", "that", "checks", "regex", "matches", "against", "pattern", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/classifiers/classifier.py#L219-L242
train
quantopian/zipline
zipline/pipeline/classifiers/classifier.py
Classifier.element_of
def element_of(self, choices): """ Construct a Filter indicating whether values are in ``choices``. Parameters ---------- choices : iterable[str or int] An iterable of choices. Returns ------- matches : Filter Filter returning True for all sid/date pairs for which ``self`` produces an entry in ``choices``. """ try: choices = frozenset(choices) except Exception as e: raise TypeError( "Expected `choices` to be an iterable of hashable values," " but got {} instead.\n" "This caused the following error: {!r}.".format(choices, e) ) if self.missing_value in choices: raise ValueError( "Found self.missing_value ({mv!r}) in choices supplied to" " {typename}.{meth_name}().\n" "Missing values have NaN semantics, so the" " requested comparison would always produce False.\n" "Use the isnull() method to check for missing values.\n" "Received choices were {choices}.".format( mv=self.missing_value, typename=(type(self).__name__), choices=sorted(choices), meth_name=self.element_of.__name__, ) ) def only_contains(type_, values): return all(isinstance(v, type_) for v in values) if self.dtype == int64_dtype: if only_contains(int, choices): return ArrayPredicate( term=self, op=vectorized_is_element, opargs=(choices,), ) else: raise TypeError( "Found non-int in choices for {typename}.element_of.\n" "Supplied choices were {choices}.".format( typename=type(self).__name__, choices=choices, ) ) elif self.dtype == categorical_dtype: if only_contains((bytes, unicode), choices): return ArrayPredicate( term=self, op=LabelArray.element_of, opargs=(choices,), ) else: raise TypeError( "Found non-string in choices for {typename}.element_of.\n" "Supplied choices were {choices}.".format( typename=type(self).__name__, choices=choices, ) ) assert False, "Unknown dtype in Classifier.element_of %s." % self.dtype
python
def element_of(self, choices): """ Construct a Filter indicating whether values are in ``choices``. Parameters ---------- choices : iterable[str or int] An iterable of choices. Returns ------- matches : Filter Filter returning True for all sid/date pairs for which ``self`` produces an entry in ``choices``. """ try: choices = frozenset(choices) except Exception as e: raise TypeError( "Expected `choices` to be an iterable of hashable values," " but got {} instead.\n" "This caused the following error: {!r}.".format(choices, e) ) if self.missing_value in choices: raise ValueError( "Found self.missing_value ({mv!r}) in choices supplied to" " {typename}.{meth_name}().\n" "Missing values have NaN semantics, so the" " requested comparison would always produce False.\n" "Use the isnull() method to check for missing values.\n" "Received choices were {choices}.".format( mv=self.missing_value, typename=(type(self).__name__), choices=sorted(choices), meth_name=self.element_of.__name__, ) ) def only_contains(type_, values): return all(isinstance(v, type_) for v in values) if self.dtype == int64_dtype: if only_contains(int, choices): return ArrayPredicate( term=self, op=vectorized_is_element, opargs=(choices,), ) else: raise TypeError( "Found non-int in choices for {typename}.element_of.\n" "Supplied choices were {choices}.".format( typename=type(self).__name__, choices=choices, ) ) elif self.dtype == categorical_dtype: if only_contains((bytes, unicode), choices): return ArrayPredicate( term=self, op=LabelArray.element_of, opargs=(choices,), ) else: raise TypeError( "Found non-string in choices for {typename}.element_of.\n" "Supplied choices were {choices}.".format( typename=type(self).__name__, choices=choices, ) ) assert False, "Unknown dtype in Classifier.element_of %s." % self.dtype
[ "def", "element_of", "(", "self", ",", "choices", ")", ":", "try", ":", "choices", "=", "frozenset", "(", "choices", ")", "except", "Exception", "as", "e", ":", "raise", "TypeError", "(", "\"Expected `choices` to be an iterable of hashable values,\"", "\" but got {} instead.\\n\"", "\"This caused the following error: {!r}.\"", ".", "format", "(", "choices", ",", "e", ")", ")", "if", "self", ".", "missing_value", "in", "choices", ":", "raise", "ValueError", "(", "\"Found self.missing_value ({mv!r}) in choices supplied to\"", "\" {typename}.{meth_name}().\\n\"", "\"Missing values have NaN semantics, so the\"", "\" requested comparison would always produce False.\\n\"", "\"Use the isnull() method to check for missing values.\\n\"", "\"Received choices were {choices}.\"", ".", "format", "(", "mv", "=", "self", ".", "missing_value", ",", "typename", "=", "(", "type", "(", "self", ")", ".", "__name__", ")", ",", "choices", "=", "sorted", "(", "choices", ")", ",", "meth_name", "=", "self", ".", "element_of", ".", "__name__", ",", ")", ")", "def", "only_contains", "(", "type_", ",", "values", ")", ":", "return", "all", "(", "isinstance", "(", "v", ",", "type_", ")", "for", "v", "in", "values", ")", "if", "self", ".", "dtype", "==", "int64_dtype", ":", "if", "only_contains", "(", "int", ",", "choices", ")", ":", "return", "ArrayPredicate", "(", "term", "=", "self", ",", "op", "=", "vectorized_is_element", ",", "opargs", "=", "(", "choices", ",", ")", ",", ")", "else", ":", "raise", "TypeError", "(", "\"Found non-int in choices for {typename}.element_of.\\n\"", "\"Supplied choices were {choices}.\"", ".", "format", "(", "typename", "=", "type", "(", "self", ")", ".", "__name__", ",", "choices", "=", "choices", ",", ")", ")", "elif", "self", ".", "dtype", "==", "categorical_dtype", ":", "if", "only_contains", "(", "(", "bytes", ",", "unicode", ")", ",", "choices", ")", ":", "return", "ArrayPredicate", "(", "term", "=", "self", ",", "op", "=", "LabelArray", ".", "element_of", ",", "opargs", "=", "(", "choices", ",", ")", ",", ")", "else", ":", "raise", "TypeError", "(", "\"Found non-string in choices for {typename}.element_of.\\n\"", "\"Supplied choices were {choices}.\"", ".", "format", "(", "typename", "=", "type", "(", "self", ")", ".", "__name__", ",", "choices", "=", "choices", ",", ")", ")", "assert", "False", ",", "\"Unknown dtype in Classifier.element_of %s.\"", "%", "self", ".", "dtype" ]
Construct a Filter indicating whether values are in ``choices``. Parameters ---------- choices : iterable[str or int] An iterable of choices. Returns ------- matches : Filter Filter returning True for all sid/date pairs for which ``self`` produces an entry in ``choices``.
[ "Construct", "a", "Filter", "indicating", "whether", "values", "are", "in", "choices", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/classifiers/classifier.py#L264-L336
train
quantopian/zipline
zipline/pipeline/classifiers/classifier.py
Classifier.to_workspace_value
def to_workspace_value(self, result, assets): """ Called with the result of a pipeline. This needs to return an object which can be put into the workspace to continue doing computations. This is the inverse of :func:`~zipline.pipeline.term.Term.postprocess`. """ if self.dtype == int64_dtype: return super(Classifier, self).to_workspace_value(result, assets) assert isinstance(result.values, pd.Categorical), ( 'Expected a Categorical, got %r.' % type(result.values) ) with_missing = pd.Series( data=pd.Categorical( result.values, result.values.categories.union([self.missing_value]), ), index=result.index, ) return LabelArray( super(Classifier, self).to_workspace_value( with_missing, assets, ), self.missing_value, )
python
def to_workspace_value(self, result, assets): """ Called with the result of a pipeline. This needs to return an object which can be put into the workspace to continue doing computations. This is the inverse of :func:`~zipline.pipeline.term.Term.postprocess`. """ if self.dtype == int64_dtype: return super(Classifier, self).to_workspace_value(result, assets) assert isinstance(result.values, pd.Categorical), ( 'Expected a Categorical, got %r.' % type(result.values) ) with_missing = pd.Series( data=pd.Categorical( result.values, result.values.categories.union([self.missing_value]), ), index=result.index, ) return LabelArray( super(Classifier, self).to_workspace_value( with_missing, assets, ), self.missing_value, )
[ "def", "to_workspace_value", "(", "self", ",", "result", ",", "assets", ")", ":", "if", "self", ".", "dtype", "==", "int64_dtype", ":", "return", "super", "(", "Classifier", ",", "self", ")", ".", "to_workspace_value", "(", "result", ",", "assets", ")", "assert", "isinstance", "(", "result", ".", "values", ",", "pd", ".", "Categorical", ")", ",", "(", "'Expected a Categorical, got %r.'", "%", "type", "(", "result", ".", "values", ")", ")", "with_missing", "=", "pd", ".", "Series", "(", "data", "=", "pd", ".", "Categorical", "(", "result", ".", "values", ",", "result", ".", "values", ".", "categories", ".", "union", "(", "[", "self", ".", "missing_value", "]", ")", ",", ")", ",", "index", "=", "result", ".", "index", ",", ")", "return", "LabelArray", "(", "super", "(", "Classifier", ",", "self", ")", ".", "to_workspace_value", "(", "with_missing", ",", "assets", ",", ")", ",", "self", ".", "missing_value", ",", ")" ]
Called with the result of a pipeline. This needs to return an object which can be put into the workspace to continue doing computations. This is the inverse of :func:`~zipline.pipeline.term.Term.postprocess`.
[ "Called", "with", "the", "result", "of", "a", "pipeline", ".", "This", "needs", "to", "return", "an", "object", "which", "can", "be", "put", "into", "the", "workspace", "to", "continue", "doing", "computations", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/classifiers/classifier.py#L345-L371
train
quantopian/zipline
zipline/pipeline/classifiers/classifier.py
Classifier._to_integral
def _to_integral(self, output_array): """ Convert an array produced by this classifier into an array of integer labels and a missing value label. """ if self.dtype == int64_dtype: group_labels = output_array null_label = self.missing_value elif self.dtype == categorical_dtype: # Coerce LabelArray into an isomorphic array of ints. This is # necessary because np.where doesn't know about LabelArrays or the # void dtype. group_labels = output_array.as_int_array() null_label = output_array.missing_value_code else: raise AssertionError( "Unexpected Classifier dtype: %s." % self.dtype ) return group_labels, null_label
python
def _to_integral(self, output_array): """ Convert an array produced by this classifier into an array of integer labels and a missing value label. """ if self.dtype == int64_dtype: group_labels = output_array null_label = self.missing_value elif self.dtype == categorical_dtype: # Coerce LabelArray into an isomorphic array of ints. This is # necessary because np.where doesn't know about LabelArrays or the # void dtype. group_labels = output_array.as_int_array() null_label = output_array.missing_value_code else: raise AssertionError( "Unexpected Classifier dtype: %s." % self.dtype ) return group_labels, null_label
[ "def", "_to_integral", "(", "self", ",", "output_array", ")", ":", "if", "self", ".", "dtype", "==", "int64_dtype", ":", "group_labels", "=", "output_array", "null_label", "=", "self", ".", "missing_value", "elif", "self", ".", "dtype", "==", "categorical_dtype", ":", "# Coerce LabelArray into an isomorphic array of ints. This is", "# necessary because np.where doesn't know about LabelArrays or the", "# void dtype.", "group_labels", "=", "output_array", ".", "as_int_array", "(", ")", "null_label", "=", "output_array", ".", "missing_value_code", "else", ":", "raise", "AssertionError", "(", "\"Unexpected Classifier dtype: %s.\"", "%", "self", ".", "dtype", ")", "return", "group_labels", ",", "null_label" ]
Convert an array produced by this classifier into an array of integer labels and a missing value label.
[ "Convert", "an", "array", "produced", "by", "this", "classifier", "into", "an", "array", "of", "integer", "labels", "and", "a", "missing", "value", "label", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/classifiers/classifier.py#L381-L399
train
quantopian/zipline
zipline/pipeline/classifiers/classifier.py
CustomClassifier._allocate_output
def _allocate_output(self, windows, shape): """ Override the default array allocation to produce a LabelArray when we have a string-like dtype. """ if self.dtype == int64_dtype: return super(CustomClassifier, self)._allocate_output( windows, shape, ) # This is a little bit of a hack. We might not know what the # categories for a LabelArray are until it's actually been loaded, so # we need to look at the underlying data. return windows[0].data.empty_like(shape)
python
def _allocate_output(self, windows, shape): """ Override the default array allocation to produce a LabelArray when we have a string-like dtype. """ if self.dtype == int64_dtype: return super(CustomClassifier, self)._allocate_output( windows, shape, ) # This is a little bit of a hack. We might not know what the # categories for a LabelArray are until it's actually been loaded, so # we need to look at the underlying data. return windows[0].data.empty_like(shape)
[ "def", "_allocate_output", "(", "self", ",", "windows", ",", "shape", ")", ":", "if", "self", ".", "dtype", "==", "int64_dtype", ":", "return", "super", "(", "CustomClassifier", ",", "self", ")", ".", "_allocate_output", "(", "windows", ",", "shape", ",", ")", "# This is a little bit of a hack. We might not know what the", "# categories for a LabelArray are until it's actually been loaded, so", "# we need to look at the underlying data.", "return", "windows", "[", "0", "]", ".", "data", ".", "empty_like", "(", "shape", ")" ]
Override the default array allocation to produce a LabelArray when we have a string-like dtype.
[ "Override", "the", "default", "array", "allocation", "to", "produce", "a", "LabelArray", "when", "we", "have", "a", "string", "-", "like", "dtype", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/classifiers/classifier.py#L517-L531
train
quantopian/zipline
zipline/utils/input_validation.py
verify_indices_all_unique
def verify_indices_all_unique(obj): """ Check that all axes of a pandas object are unique. Parameters ---------- obj : pd.Series / pd.DataFrame / pd.Panel The object to validate. Returns ------- obj : pd.Series / pd.DataFrame / pd.Panel The validated object, unchanged. Raises ------ ValueError If any axis has duplicate entries. """ axis_names = [ ('index',), # Series ('index', 'columns'), # DataFrame ('items', 'major_axis', 'minor_axis') # Panel ][obj.ndim - 1] # ndim = 1 should go to entry 0, for axis_name, index in zip(axis_names, obj.axes): if index.is_unique: continue raise ValueError( "Duplicate entries in {type}.{axis}: {dupes}.".format( type=type(obj).__name__, axis=axis_name, dupes=sorted(index[index.duplicated()]), ) ) return obj
python
def verify_indices_all_unique(obj): """ Check that all axes of a pandas object are unique. Parameters ---------- obj : pd.Series / pd.DataFrame / pd.Panel The object to validate. Returns ------- obj : pd.Series / pd.DataFrame / pd.Panel The validated object, unchanged. Raises ------ ValueError If any axis has duplicate entries. """ axis_names = [ ('index',), # Series ('index', 'columns'), # DataFrame ('items', 'major_axis', 'minor_axis') # Panel ][obj.ndim - 1] # ndim = 1 should go to entry 0, for axis_name, index in zip(axis_names, obj.axes): if index.is_unique: continue raise ValueError( "Duplicate entries in {type}.{axis}: {dupes}.".format( type=type(obj).__name__, axis=axis_name, dupes=sorted(index[index.duplicated()]), ) ) return obj
[ "def", "verify_indices_all_unique", "(", "obj", ")", ":", "axis_names", "=", "[", "(", "'index'", ",", ")", ",", "# Series", "(", "'index'", ",", "'columns'", ")", ",", "# DataFrame", "(", "'items'", ",", "'major_axis'", ",", "'minor_axis'", ")", "# Panel", "]", "[", "obj", ".", "ndim", "-", "1", "]", "# ndim = 1 should go to entry 0,", "for", "axis_name", ",", "index", "in", "zip", "(", "axis_names", ",", "obj", ".", "axes", ")", ":", "if", "index", ".", "is_unique", ":", "continue", "raise", "ValueError", "(", "\"Duplicate entries in {type}.{axis}: {dupes}.\"", ".", "format", "(", "type", "=", "type", "(", "obj", ")", ".", "__name__", ",", "axis", "=", "axis_name", ",", "dupes", "=", "sorted", "(", "index", "[", "index", ".", "duplicated", "(", ")", "]", ")", ",", ")", ")", "return", "obj" ]
Check that all axes of a pandas object are unique. Parameters ---------- obj : pd.Series / pd.DataFrame / pd.Panel The object to validate. Returns ------- obj : pd.Series / pd.DataFrame / pd.Panel The validated object, unchanged. Raises ------ ValueError If any axis has duplicate entries.
[ "Check", "that", "all", "axes", "of", "a", "pandas", "object", "are", "unique", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/input_validation.py#L50-L86
train
quantopian/zipline
zipline/utils/input_validation.py
optionally
def optionally(preprocessor): """Modify a preprocessor to explicitly allow `None`. Parameters ---------- preprocessor : callable[callable, str, any -> any] A preprocessor to delegate to when `arg is not None`. Returns ------- optional_preprocessor : callable[callable, str, any -> any] A preprocessor that delegates to `preprocessor` when `arg is not None`. Examples -------- >>> def preprocessor(func, argname, arg): ... if not isinstance(arg, int): ... raise TypeError('arg must be int') ... return arg ... >>> @preprocess(a=optionally(preprocessor)) ... def f(a): ... return a ... >>> f(1) # call with int 1 >>> f('a') # call with not int Traceback (most recent call last): ... TypeError: arg must be int >>> f(None) is None # call with explicit None True """ @wraps(preprocessor) def wrapper(func, argname, arg): return arg if arg is None else preprocessor(func, argname, arg) return wrapper
python
def optionally(preprocessor): """Modify a preprocessor to explicitly allow `None`. Parameters ---------- preprocessor : callable[callable, str, any -> any] A preprocessor to delegate to when `arg is not None`. Returns ------- optional_preprocessor : callable[callable, str, any -> any] A preprocessor that delegates to `preprocessor` when `arg is not None`. Examples -------- >>> def preprocessor(func, argname, arg): ... if not isinstance(arg, int): ... raise TypeError('arg must be int') ... return arg ... >>> @preprocess(a=optionally(preprocessor)) ... def f(a): ... return a ... >>> f(1) # call with int 1 >>> f('a') # call with not int Traceback (most recent call last): ... TypeError: arg must be int >>> f(None) is None # call with explicit None True """ @wraps(preprocessor) def wrapper(func, argname, arg): return arg if arg is None else preprocessor(func, argname, arg) return wrapper
[ "def", "optionally", "(", "preprocessor", ")", ":", "@", "wraps", "(", "preprocessor", ")", "def", "wrapper", "(", "func", ",", "argname", ",", "arg", ")", ":", "return", "arg", "if", "arg", "is", "None", "else", "preprocessor", "(", "func", ",", "argname", ",", "arg", ")", "return", "wrapper" ]
Modify a preprocessor to explicitly allow `None`. Parameters ---------- preprocessor : callable[callable, str, any -> any] A preprocessor to delegate to when `arg is not None`. Returns ------- optional_preprocessor : callable[callable, str, any -> any] A preprocessor that delegates to `preprocessor` when `arg is not None`. Examples -------- >>> def preprocessor(func, argname, arg): ... if not isinstance(arg, int): ... raise TypeError('arg must be int') ... return arg ... >>> @preprocess(a=optionally(preprocessor)) ... def f(a): ... return a ... >>> f(1) # call with int 1 >>> f('a') # call with not int Traceback (most recent call last): ... TypeError: arg must be int >>> f(None) is None # call with explicit None True
[ "Modify", "a", "preprocessor", "to", "explicitly", "allow", "None", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/input_validation.py#L89-L126
train
quantopian/zipline
zipline/utils/input_validation.py
ensure_dtype
def ensure_dtype(func, argname, arg): """ Argument preprocessor that converts the input into a numpy dtype. Examples -------- >>> import numpy as np >>> from zipline.utils.preprocess import preprocess >>> @preprocess(dtype=ensure_dtype) ... def foo(dtype): ... return dtype ... >>> foo(float) dtype('float64') """ try: return dtype(arg) except TypeError: raise TypeError( "{func}() couldn't convert argument " "{argname}={arg!r} to a numpy dtype.".format( func=_qualified_name(func), argname=argname, arg=arg, ), )
python
def ensure_dtype(func, argname, arg): """ Argument preprocessor that converts the input into a numpy dtype. Examples -------- >>> import numpy as np >>> from zipline.utils.preprocess import preprocess >>> @preprocess(dtype=ensure_dtype) ... def foo(dtype): ... return dtype ... >>> foo(float) dtype('float64') """ try: return dtype(arg) except TypeError: raise TypeError( "{func}() couldn't convert argument " "{argname}={arg!r} to a numpy dtype.".format( func=_qualified_name(func), argname=argname, arg=arg, ), )
[ "def", "ensure_dtype", "(", "func", ",", "argname", ",", "arg", ")", ":", "try", ":", "return", "dtype", "(", "arg", ")", "except", "TypeError", ":", "raise", "TypeError", "(", "\"{func}() couldn't convert argument \"", "\"{argname}={arg!r} to a numpy dtype.\"", ".", "format", "(", "func", "=", "_qualified_name", "(", "func", ")", ",", "argname", "=", "argname", ",", "arg", "=", "arg", ",", ")", ",", ")" ]
Argument preprocessor that converts the input into a numpy dtype. Examples -------- >>> import numpy as np >>> from zipline.utils.preprocess import preprocess >>> @preprocess(dtype=ensure_dtype) ... def foo(dtype): ... return dtype ... >>> foo(float) dtype('float64')
[ "Argument", "preprocessor", "that", "converts", "the", "input", "into", "a", "numpy", "dtype", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/input_validation.py#L143-L168
train
quantopian/zipline
zipline/utils/input_validation.py
ensure_timezone
def ensure_timezone(func, argname, arg): """Argument preprocessor that converts the input into a tzinfo object. Examples -------- >>> from zipline.utils.preprocess import preprocess >>> @preprocess(tz=ensure_timezone) ... def foo(tz): ... return tz >>> foo('utc') <UTC> """ if isinstance(arg, tzinfo): return arg if isinstance(arg, string_types): return timezone(arg) raise TypeError( "{func}() couldn't convert argument " "{argname}={arg!r} to a timezone.".format( func=_qualified_name(func), argname=argname, arg=arg, ), )
python
def ensure_timezone(func, argname, arg): """Argument preprocessor that converts the input into a tzinfo object. Examples -------- >>> from zipline.utils.preprocess import preprocess >>> @preprocess(tz=ensure_timezone) ... def foo(tz): ... return tz >>> foo('utc') <UTC> """ if isinstance(arg, tzinfo): return arg if isinstance(arg, string_types): return timezone(arg) raise TypeError( "{func}() couldn't convert argument " "{argname}={arg!r} to a timezone.".format( func=_qualified_name(func), argname=argname, arg=arg, ), )
[ "def", "ensure_timezone", "(", "func", ",", "argname", ",", "arg", ")", ":", "if", "isinstance", "(", "arg", ",", "tzinfo", ")", ":", "return", "arg", "if", "isinstance", "(", "arg", ",", "string_types", ")", ":", "return", "timezone", "(", "arg", ")", "raise", "TypeError", "(", "\"{func}() couldn't convert argument \"", "\"{argname}={arg!r} to a timezone.\"", ".", "format", "(", "func", "=", "_qualified_name", "(", "func", ")", ",", "argname", "=", "argname", ",", "arg", "=", "arg", ",", ")", ",", ")" ]
Argument preprocessor that converts the input into a tzinfo object. Examples -------- >>> from zipline.utils.preprocess import preprocess >>> @preprocess(tz=ensure_timezone) ... def foo(tz): ... return tz >>> foo('utc') <UTC>
[ "Argument", "preprocessor", "that", "converts", "the", "input", "into", "a", "tzinfo", "object", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/input_validation.py#L171-L195
train
quantopian/zipline
zipline/utils/input_validation.py
ensure_timestamp
def ensure_timestamp(func, argname, arg): """Argument preprocessor that converts the input into a pandas Timestamp object. Examples -------- >>> from zipline.utils.preprocess import preprocess >>> @preprocess(ts=ensure_timestamp) ... def foo(ts): ... return ts >>> foo('2014-01-01') Timestamp('2014-01-01 00:00:00') """ try: return pd.Timestamp(arg) except ValueError as e: raise TypeError( "{func}() couldn't convert argument " "{argname}={arg!r} to a pandas Timestamp.\n" "Original error was: {t}: {e}".format( func=_qualified_name(func), argname=argname, arg=arg, t=_qualified_name(type(e)), e=e, ), )
python
def ensure_timestamp(func, argname, arg): """Argument preprocessor that converts the input into a pandas Timestamp object. Examples -------- >>> from zipline.utils.preprocess import preprocess >>> @preprocess(ts=ensure_timestamp) ... def foo(ts): ... return ts >>> foo('2014-01-01') Timestamp('2014-01-01 00:00:00') """ try: return pd.Timestamp(arg) except ValueError as e: raise TypeError( "{func}() couldn't convert argument " "{argname}={arg!r} to a pandas Timestamp.\n" "Original error was: {t}: {e}".format( func=_qualified_name(func), argname=argname, arg=arg, t=_qualified_name(type(e)), e=e, ), )
[ "def", "ensure_timestamp", "(", "func", ",", "argname", ",", "arg", ")", ":", "try", ":", "return", "pd", ".", "Timestamp", "(", "arg", ")", "except", "ValueError", "as", "e", ":", "raise", "TypeError", "(", "\"{func}() couldn't convert argument \"", "\"{argname}={arg!r} to a pandas Timestamp.\\n\"", "\"Original error was: {t}: {e}\"", ".", "format", "(", "func", "=", "_qualified_name", "(", "func", ")", ",", "argname", "=", "argname", ",", "arg", "=", "arg", ",", "t", "=", "_qualified_name", "(", "type", "(", "e", ")", ")", ",", "e", "=", "e", ",", ")", ",", ")" ]
Argument preprocessor that converts the input into a pandas Timestamp object. Examples -------- >>> from zipline.utils.preprocess import preprocess >>> @preprocess(ts=ensure_timestamp) ... def foo(ts): ... return ts >>> foo('2014-01-01') Timestamp('2014-01-01 00:00:00')
[ "Argument", "preprocessor", "that", "converts", "the", "input", "into", "a", "pandas", "Timestamp", "object", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/input_validation.py#L198-L224
train
quantopian/zipline
zipline/utils/input_validation.py
expect_dtypes
def expect_dtypes(__funcname=_qualified_name, **named): """ Preprocessing decorator that verifies inputs have expected numpy dtypes. Examples -------- >>> from numpy import dtype, arange, int8, float64 >>> @expect_dtypes(x=dtype(int8)) ... def foo(x, y): ... return x, y ... >>> foo(arange(3, dtype=int8), 'foo') (array([0, 1, 2], dtype=int8), 'foo') >>> foo(arange(3, dtype=float64), 'foo') # doctest: +NORMALIZE_WHITESPACE ... # doctest: +ELLIPSIS Traceback (most recent call last): ... TypeError: ...foo() expected a value with dtype 'int8' for argument 'x', but got 'float64' instead. """ for name, type_ in iteritems(named): if not isinstance(type_, (dtype, tuple)): raise TypeError( "expect_dtypes() expected a numpy dtype or tuple of dtypes" " for argument {name!r}, but got {dtype} instead.".format( name=name, dtype=dtype, ) ) if isinstance(__funcname, str): def get_funcname(_): return __funcname else: get_funcname = __funcname @preprocess(dtypes=call(lambda x: x if isinstance(x, tuple) else (x,))) def _expect_dtype(dtypes): """ Factory for dtype-checking functions that work with the @preprocess decorator. """ def error_message(func, argname, value): # If the bad value has a dtype, but it's wrong, show the dtype # name. Otherwise just show the value. try: value_to_show = value.dtype.name except AttributeError: value_to_show = value return ( "{funcname}() expected a value with dtype {dtype_str} " "for argument {argname!r}, but got {value!r} instead." ).format( funcname=get_funcname(func), dtype_str=' or '.join(repr(d.name) for d in dtypes), argname=argname, value=value_to_show, ) def _actual_preprocessor(func, argname, argvalue): if getattr(argvalue, 'dtype', object()) not in dtypes: raise TypeError(error_message(func, argname, argvalue)) return argvalue return _actual_preprocessor return preprocess(**valmap(_expect_dtype, named))
python
def expect_dtypes(__funcname=_qualified_name, **named): """ Preprocessing decorator that verifies inputs have expected numpy dtypes. Examples -------- >>> from numpy import dtype, arange, int8, float64 >>> @expect_dtypes(x=dtype(int8)) ... def foo(x, y): ... return x, y ... >>> foo(arange(3, dtype=int8), 'foo') (array([0, 1, 2], dtype=int8), 'foo') >>> foo(arange(3, dtype=float64), 'foo') # doctest: +NORMALIZE_WHITESPACE ... # doctest: +ELLIPSIS Traceback (most recent call last): ... TypeError: ...foo() expected a value with dtype 'int8' for argument 'x', but got 'float64' instead. """ for name, type_ in iteritems(named): if not isinstance(type_, (dtype, tuple)): raise TypeError( "expect_dtypes() expected a numpy dtype or tuple of dtypes" " for argument {name!r}, but got {dtype} instead.".format( name=name, dtype=dtype, ) ) if isinstance(__funcname, str): def get_funcname(_): return __funcname else: get_funcname = __funcname @preprocess(dtypes=call(lambda x: x if isinstance(x, tuple) else (x,))) def _expect_dtype(dtypes): """ Factory for dtype-checking functions that work with the @preprocess decorator. """ def error_message(func, argname, value): # If the bad value has a dtype, but it's wrong, show the dtype # name. Otherwise just show the value. try: value_to_show = value.dtype.name except AttributeError: value_to_show = value return ( "{funcname}() expected a value with dtype {dtype_str} " "for argument {argname!r}, but got {value!r} instead." ).format( funcname=get_funcname(func), dtype_str=' or '.join(repr(d.name) for d in dtypes), argname=argname, value=value_to_show, ) def _actual_preprocessor(func, argname, argvalue): if getattr(argvalue, 'dtype', object()) not in dtypes: raise TypeError(error_message(func, argname, argvalue)) return argvalue return _actual_preprocessor return preprocess(**valmap(_expect_dtype, named))
[ "def", "expect_dtypes", "(", "__funcname", "=", "_qualified_name", ",", "*", "*", "named", ")", ":", "for", "name", ",", "type_", "in", "iteritems", "(", "named", ")", ":", "if", "not", "isinstance", "(", "type_", ",", "(", "dtype", ",", "tuple", ")", ")", ":", "raise", "TypeError", "(", "\"expect_dtypes() expected a numpy dtype or tuple of dtypes\"", "\" for argument {name!r}, but got {dtype} instead.\"", ".", "format", "(", "name", "=", "name", ",", "dtype", "=", "dtype", ",", ")", ")", "if", "isinstance", "(", "__funcname", ",", "str", ")", ":", "def", "get_funcname", "(", "_", ")", ":", "return", "__funcname", "else", ":", "get_funcname", "=", "__funcname", "@", "preprocess", "(", "dtypes", "=", "call", "(", "lambda", "x", ":", "x", "if", "isinstance", "(", "x", ",", "tuple", ")", "else", "(", "x", ",", ")", ")", ")", "def", "_expect_dtype", "(", "dtypes", ")", ":", "\"\"\"\n Factory for dtype-checking functions that work with the @preprocess\n decorator.\n \"\"\"", "def", "error_message", "(", "func", ",", "argname", ",", "value", ")", ":", "# If the bad value has a dtype, but it's wrong, show the dtype", "# name. Otherwise just show the value.", "try", ":", "value_to_show", "=", "value", ".", "dtype", ".", "name", "except", "AttributeError", ":", "value_to_show", "=", "value", "return", "(", "\"{funcname}() expected a value with dtype {dtype_str} \"", "\"for argument {argname!r}, but got {value!r} instead.\"", ")", ".", "format", "(", "funcname", "=", "get_funcname", "(", "func", ")", ",", "dtype_str", "=", "' or '", ".", "join", "(", "repr", "(", "d", ".", "name", ")", "for", "d", "in", "dtypes", ")", ",", "argname", "=", "argname", ",", "value", "=", "value_to_show", ",", ")", "def", "_actual_preprocessor", "(", "func", ",", "argname", ",", "argvalue", ")", ":", "if", "getattr", "(", "argvalue", ",", "'dtype'", ",", "object", "(", ")", ")", "not", "in", "dtypes", ":", "raise", "TypeError", "(", "error_message", "(", "func", ",", "argname", ",", "argvalue", ")", ")", "return", "argvalue", "return", "_actual_preprocessor", "return", "preprocess", "(", "*", "*", "valmap", "(", "_expect_dtype", ",", "named", ")", ")" ]
Preprocessing decorator that verifies inputs have expected numpy dtypes. Examples -------- >>> from numpy import dtype, arange, int8, float64 >>> @expect_dtypes(x=dtype(int8)) ... def foo(x, y): ... return x, y ... >>> foo(arange(3, dtype=int8), 'foo') (array([0, 1, 2], dtype=int8), 'foo') >>> foo(arange(3, dtype=float64), 'foo') # doctest: +NORMALIZE_WHITESPACE ... # doctest: +ELLIPSIS Traceback (most recent call last): ... TypeError: ...foo() expected a value with dtype 'int8' for argument 'x', but got 'float64' instead.
[ "Preprocessing", "decorator", "that", "verifies", "inputs", "have", "expected", "numpy", "dtypes", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/input_validation.py#L227-L292
train
quantopian/zipline
zipline/utils/input_validation.py
expect_kinds
def expect_kinds(**named): """ Preprocessing decorator that verifies inputs have expected dtype kinds. Examples -------- >>> from numpy import int64, int32, float32 >>> @expect_kinds(x='i') ... def foo(x): ... return x ... >>> foo(int64(2)) 2 >>> foo(int32(2)) 2 >>> foo(float32(2)) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS Traceback (most recent call last): ... TypeError: ...foo() expected a numpy object of kind 'i' for argument 'x', but got 'f' instead. """ for name, kind in iteritems(named): if not isinstance(kind, (str, tuple)): raise TypeError( "expect_dtype_kinds() expected a string or tuple of strings" " for argument {name!r}, but got {kind} instead.".format( name=name, kind=dtype, ) ) @preprocess(kinds=call(lambda x: x if isinstance(x, tuple) else (x,))) def _expect_kind(kinds): """ Factory for kind-checking functions that work the @preprocess decorator. """ def error_message(func, argname, value): # If the bad value has a dtype, but it's wrong, show the dtype # kind. Otherwise just show the value. try: value_to_show = value.dtype.kind except AttributeError: value_to_show = value return ( "{funcname}() expected a numpy object of kind {kinds} " "for argument {argname!r}, but got {value!r} instead." ).format( funcname=_qualified_name(func), kinds=' or '.join(map(repr, kinds)), argname=argname, value=value_to_show, ) def _actual_preprocessor(func, argname, argvalue): if getattrs(argvalue, ('dtype', 'kind'), object()) not in kinds: raise TypeError(error_message(func, argname, argvalue)) return argvalue return _actual_preprocessor return preprocess(**valmap(_expect_kind, named))
python
def expect_kinds(**named): """ Preprocessing decorator that verifies inputs have expected dtype kinds. Examples -------- >>> from numpy import int64, int32, float32 >>> @expect_kinds(x='i') ... def foo(x): ... return x ... >>> foo(int64(2)) 2 >>> foo(int32(2)) 2 >>> foo(float32(2)) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS Traceback (most recent call last): ... TypeError: ...foo() expected a numpy object of kind 'i' for argument 'x', but got 'f' instead. """ for name, kind in iteritems(named): if not isinstance(kind, (str, tuple)): raise TypeError( "expect_dtype_kinds() expected a string or tuple of strings" " for argument {name!r}, but got {kind} instead.".format( name=name, kind=dtype, ) ) @preprocess(kinds=call(lambda x: x if isinstance(x, tuple) else (x,))) def _expect_kind(kinds): """ Factory for kind-checking functions that work the @preprocess decorator. """ def error_message(func, argname, value): # If the bad value has a dtype, but it's wrong, show the dtype # kind. Otherwise just show the value. try: value_to_show = value.dtype.kind except AttributeError: value_to_show = value return ( "{funcname}() expected a numpy object of kind {kinds} " "for argument {argname!r}, but got {value!r} instead." ).format( funcname=_qualified_name(func), kinds=' or '.join(map(repr, kinds)), argname=argname, value=value_to_show, ) def _actual_preprocessor(func, argname, argvalue): if getattrs(argvalue, ('dtype', 'kind'), object()) not in kinds: raise TypeError(error_message(func, argname, argvalue)) return argvalue return _actual_preprocessor return preprocess(**valmap(_expect_kind, named))
[ "def", "expect_kinds", "(", "*", "*", "named", ")", ":", "for", "name", ",", "kind", "in", "iteritems", "(", "named", ")", ":", "if", "not", "isinstance", "(", "kind", ",", "(", "str", ",", "tuple", ")", ")", ":", "raise", "TypeError", "(", "\"expect_dtype_kinds() expected a string or tuple of strings\"", "\" for argument {name!r}, but got {kind} instead.\"", ".", "format", "(", "name", "=", "name", ",", "kind", "=", "dtype", ",", ")", ")", "@", "preprocess", "(", "kinds", "=", "call", "(", "lambda", "x", ":", "x", "if", "isinstance", "(", "x", ",", "tuple", ")", "else", "(", "x", ",", ")", ")", ")", "def", "_expect_kind", "(", "kinds", ")", ":", "\"\"\"\n Factory for kind-checking functions that work the @preprocess\n decorator.\n \"\"\"", "def", "error_message", "(", "func", ",", "argname", ",", "value", ")", ":", "# If the bad value has a dtype, but it's wrong, show the dtype", "# kind. Otherwise just show the value.", "try", ":", "value_to_show", "=", "value", ".", "dtype", ".", "kind", "except", "AttributeError", ":", "value_to_show", "=", "value", "return", "(", "\"{funcname}() expected a numpy object of kind {kinds} \"", "\"for argument {argname!r}, but got {value!r} instead.\"", ")", ".", "format", "(", "funcname", "=", "_qualified_name", "(", "func", ")", ",", "kinds", "=", "' or '", ".", "join", "(", "map", "(", "repr", ",", "kinds", ")", ")", ",", "argname", "=", "argname", ",", "value", "=", "value_to_show", ",", ")", "def", "_actual_preprocessor", "(", "func", ",", "argname", ",", "argvalue", ")", ":", "if", "getattrs", "(", "argvalue", ",", "(", "'dtype'", ",", "'kind'", ")", ",", "object", "(", ")", ")", "not", "in", "kinds", ":", "raise", "TypeError", "(", "error_message", "(", "func", ",", "argname", ",", "argvalue", ")", ")", "return", "argvalue", "return", "_actual_preprocessor", "return", "preprocess", "(", "*", "*", "valmap", "(", "_expect_kind", ",", "named", ")", ")" ]
Preprocessing decorator that verifies inputs have expected dtype kinds. Examples -------- >>> from numpy import int64, int32, float32 >>> @expect_kinds(x='i') ... def foo(x): ... return x ... >>> foo(int64(2)) 2 >>> foo(int32(2)) 2 >>> foo(float32(2)) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS Traceback (most recent call last): ... TypeError: ...foo() expected a numpy object of kind 'i' for argument 'x', but got 'f' instead.
[ "Preprocessing", "decorator", "that", "verifies", "inputs", "have", "expected", "dtype", "kinds", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/input_validation.py#L295-L355
train
quantopian/zipline
zipline/utils/input_validation.py
expect_types
def expect_types(__funcname=_qualified_name, **named): """ Preprocessing decorator that verifies inputs have expected types. Examples -------- >>> @expect_types(x=int, y=str) ... def foo(x, y): ... return x, y ... >>> foo(2, '3') (2, '3') >>> foo(2.0, '3') # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS Traceback (most recent call last): ... TypeError: ...foo() expected a value of type int for argument 'x', but got float instead. Notes ----- A special argument, __funcname, can be provided as a string to override the function name shown in error messages. This is most often used on __init__ or __new__ methods to make errors refer to the class name instead of the function name. """ for name, type_ in iteritems(named): if not isinstance(type_, (type, tuple)): raise TypeError( "expect_types() expected a type or tuple of types for " "argument '{name}', but got {type_} instead.".format( name=name, type_=type_, ) ) def _expect_type(type_): # Slightly different messages for type and tuple of types. _template = ( "%(funcname)s() expected a value of type {type_or_types} " "for argument '%(argname)s', but got %(actual)s instead." ) if isinstance(type_, tuple): template = _template.format( type_or_types=' or '.join(map(_qualified_name, type_)) ) else: template = _template.format(type_or_types=_qualified_name(type_)) return make_check( exc_type=TypeError, template=template, pred=lambda v: not isinstance(v, type_), actual=compose(_qualified_name, type), funcname=__funcname, ) return preprocess(**valmap(_expect_type, named))
python
def expect_types(__funcname=_qualified_name, **named): """ Preprocessing decorator that verifies inputs have expected types. Examples -------- >>> @expect_types(x=int, y=str) ... def foo(x, y): ... return x, y ... >>> foo(2, '3') (2, '3') >>> foo(2.0, '3') # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS Traceback (most recent call last): ... TypeError: ...foo() expected a value of type int for argument 'x', but got float instead. Notes ----- A special argument, __funcname, can be provided as a string to override the function name shown in error messages. This is most often used on __init__ or __new__ methods to make errors refer to the class name instead of the function name. """ for name, type_ in iteritems(named): if not isinstance(type_, (type, tuple)): raise TypeError( "expect_types() expected a type or tuple of types for " "argument '{name}', but got {type_} instead.".format( name=name, type_=type_, ) ) def _expect_type(type_): # Slightly different messages for type and tuple of types. _template = ( "%(funcname)s() expected a value of type {type_or_types} " "for argument '%(argname)s', but got %(actual)s instead." ) if isinstance(type_, tuple): template = _template.format( type_or_types=' or '.join(map(_qualified_name, type_)) ) else: template = _template.format(type_or_types=_qualified_name(type_)) return make_check( exc_type=TypeError, template=template, pred=lambda v: not isinstance(v, type_), actual=compose(_qualified_name, type), funcname=__funcname, ) return preprocess(**valmap(_expect_type, named))
[ "def", "expect_types", "(", "__funcname", "=", "_qualified_name", ",", "*", "*", "named", ")", ":", "for", "name", ",", "type_", "in", "iteritems", "(", "named", ")", ":", "if", "not", "isinstance", "(", "type_", ",", "(", "type", ",", "tuple", ")", ")", ":", "raise", "TypeError", "(", "\"expect_types() expected a type or tuple of types for \"", "\"argument '{name}', but got {type_} instead.\"", ".", "format", "(", "name", "=", "name", ",", "type_", "=", "type_", ",", ")", ")", "def", "_expect_type", "(", "type_", ")", ":", "# Slightly different messages for type and tuple of types.", "_template", "=", "(", "\"%(funcname)s() expected a value of type {type_or_types} \"", "\"for argument '%(argname)s', but got %(actual)s instead.\"", ")", "if", "isinstance", "(", "type_", ",", "tuple", ")", ":", "template", "=", "_template", ".", "format", "(", "type_or_types", "=", "' or '", ".", "join", "(", "map", "(", "_qualified_name", ",", "type_", ")", ")", ")", "else", ":", "template", "=", "_template", ".", "format", "(", "type_or_types", "=", "_qualified_name", "(", "type_", ")", ")", "return", "make_check", "(", "exc_type", "=", "TypeError", ",", "template", "=", "template", ",", "pred", "=", "lambda", "v", ":", "not", "isinstance", "(", "v", ",", "type_", ")", ",", "actual", "=", "compose", "(", "_qualified_name", ",", "type", ")", ",", "funcname", "=", "__funcname", ",", ")", "return", "preprocess", "(", "*", "*", "valmap", "(", "_expect_type", ",", "named", ")", ")" ]
Preprocessing decorator that verifies inputs have expected types. Examples -------- >>> @expect_types(x=int, y=str) ... def foo(x, y): ... return x, y ... >>> foo(2, '3') (2, '3') >>> foo(2.0, '3') # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS Traceback (most recent call last): ... TypeError: ...foo() expected a value of type int for argument 'x', but got float instead. Notes ----- A special argument, __funcname, can be provided as a string to override the function name shown in error messages. This is most often used on __init__ or __new__ methods to make errors refer to the class name instead of the function name.
[ "Preprocessing", "decorator", "that", "verifies", "inputs", "have", "expected", "types", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/input_validation.py#L358-L413
train
quantopian/zipline
zipline/utils/input_validation.py
make_check
def make_check(exc_type, template, pred, actual, funcname): """ Factory for making preprocessing functions that check a predicate on the input value. Parameters ---------- exc_type : Exception The exception type to raise if the predicate fails. template : str A template string to use to create error messages. Should have %-style named template parameters for 'funcname', 'argname', and 'actual'. pred : function[object -> bool] A function to call on the argument being preprocessed. If the predicate returns `True`, we raise an instance of `exc_type`. actual : function[object -> object] A function to call on bad values to produce the value to display in the error message. funcname : str or callable Name to use in error messages, or function to call on decorated functions to produce a name. Passing an explicit name is useful when creating checks for __init__ or __new__ methods when you want the error to refer to the class name instead of the method name. """ if isinstance(funcname, str): def get_funcname(_): return funcname else: get_funcname = funcname def _check(func, argname, argvalue): if pred(argvalue): raise exc_type( template % { 'funcname': get_funcname(func), 'argname': argname, 'actual': actual(argvalue), }, ) return argvalue return _check
python
def make_check(exc_type, template, pred, actual, funcname): """ Factory for making preprocessing functions that check a predicate on the input value. Parameters ---------- exc_type : Exception The exception type to raise if the predicate fails. template : str A template string to use to create error messages. Should have %-style named template parameters for 'funcname', 'argname', and 'actual'. pred : function[object -> bool] A function to call on the argument being preprocessed. If the predicate returns `True`, we raise an instance of `exc_type`. actual : function[object -> object] A function to call on bad values to produce the value to display in the error message. funcname : str or callable Name to use in error messages, or function to call on decorated functions to produce a name. Passing an explicit name is useful when creating checks for __init__ or __new__ methods when you want the error to refer to the class name instead of the method name. """ if isinstance(funcname, str): def get_funcname(_): return funcname else: get_funcname = funcname def _check(func, argname, argvalue): if pred(argvalue): raise exc_type( template % { 'funcname': get_funcname(func), 'argname': argname, 'actual': actual(argvalue), }, ) return argvalue return _check
[ "def", "make_check", "(", "exc_type", ",", "template", ",", "pred", ",", "actual", ",", "funcname", ")", ":", "if", "isinstance", "(", "funcname", ",", "str", ")", ":", "def", "get_funcname", "(", "_", ")", ":", "return", "funcname", "else", ":", "get_funcname", "=", "funcname", "def", "_check", "(", "func", ",", "argname", ",", "argvalue", ")", ":", "if", "pred", "(", "argvalue", ")", ":", "raise", "exc_type", "(", "template", "%", "{", "'funcname'", ":", "get_funcname", "(", "func", ")", ",", "'argname'", ":", "argname", ",", "'actual'", ":", "actual", "(", "argvalue", ")", ",", "}", ",", ")", "return", "argvalue", "return", "_check" ]
Factory for making preprocessing functions that check a predicate on the input value. Parameters ---------- exc_type : Exception The exception type to raise if the predicate fails. template : str A template string to use to create error messages. Should have %-style named template parameters for 'funcname', 'argname', and 'actual'. pred : function[object -> bool] A function to call on the argument being preprocessed. If the predicate returns `True`, we raise an instance of `exc_type`. actual : function[object -> object] A function to call on bad values to produce the value to display in the error message. funcname : str or callable Name to use in error messages, or function to call on decorated functions to produce a name. Passing an explicit name is useful when creating checks for __init__ or __new__ methods when you want the error to refer to the class name instead of the method name.
[ "Factory", "for", "making", "preprocessing", "functions", "that", "check", "a", "predicate", "on", "the", "input", "value", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/input_validation.py#L416-L457
train
quantopian/zipline
zipline/utils/input_validation.py
expect_element
def expect_element(__funcname=_qualified_name, **named): """ Preprocessing decorator that verifies inputs are elements of some expected collection. Examples -------- >>> @expect_element(x=('a', 'b')) ... def foo(x): ... return x.upper() ... >>> foo('a') 'A' >>> foo('b') 'B' >>> foo('c') # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS Traceback (most recent call last): ... ValueError: ...foo() expected a value in ('a', 'b') for argument 'x', but got 'c' instead. Notes ----- A special argument, __funcname, can be provided as a string to override the function name shown in error messages. This is most often used on __init__ or __new__ methods to make errors refer to the class name instead of the function name. This uses the `in` operator (__contains__) to make the containment check. This allows us to use any custom container as long as the object supports the container protocol. """ def _expect_element(collection): if isinstance(collection, (set, frozenset)): # Special case the error message for set and frozen set to make it # less verbose. collection_for_error_message = tuple(sorted(collection)) else: collection_for_error_message = collection template = ( "%(funcname)s() expected a value in {collection} " "for argument '%(argname)s', but got %(actual)s instead." ).format(collection=collection_for_error_message) return make_check( ValueError, template, complement(op.contains(collection)), repr, funcname=__funcname, ) return preprocess(**valmap(_expect_element, named))
python
def expect_element(__funcname=_qualified_name, **named): """ Preprocessing decorator that verifies inputs are elements of some expected collection. Examples -------- >>> @expect_element(x=('a', 'b')) ... def foo(x): ... return x.upper() ... >>> foo('a') 'A' >>> foo('b') 'B' >>> foo('c') # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS Traceback (most recent call last): ... ValueError: ...foo() expected a value in ('a', 'b') for argument 'x', but got 'c' instead. Notes ----- A special argument, __funcname, can be provided as a string to override the function name shown in error messages. This is most often used on __init__ or __new__ methods to make errors refer to the class name instead of the function name. This uses the `in` operator (__contains__) to make the containment check. This allows us to use any custom container as long as the object supports the container protocol. """ def _expect_element(collection): if isinstance(collection, (set, frozenset)): # Special case the error message for set and frozen set to make it # less verbose. collection_for_error_message = tuple(sorted(collection)) else: collection_for_error_message = collection template = ( "%(funcname)s() expected a value in {collection} " "for argument '%(argname)s', but got %(actual)s instead." ).format(collection=collection_for_error_message) return make_check( ValueError, template, complement(op.contains(collection)), repr, funcname=__funcname, ) return preprocess(**valmap(_expect_element, named))
[ "def", "expect_element", "(", "__funcname", "=", "_qualified_name", ",", "*", "*", "named", ")", ":", "def", "_expect_element", "(", "collection", ")", ":", "if", "isinstance", "(", "collection", ",", "(", "set", ",", "frozenset", ")", ")", ":", "# Special case the error message for set and frozen set to make it", "# less verbose.", "collection_for_error_message", "=", "tuple", "(", "sorted", "(", "collection", ")", ")", "else", ":", "collection_for_error_message", "=", "collection", "template", "=", "(", "\"%(funcname)s() expected a value in {collection} \"", "\"for argument '%(argname)s', but got %(actual)s instead.\"", ")", ".", "format", "(", "collection", "=", "collection_for_error_message", ")", "return", "make_check", "(", "ValueError", ",", "template", ",", "complement", "(", "op", ".", "contains", "(", "collection", ")", ")", ",", "repr", ",", "funcname", "=", "__funcname", ",", ")", "return", "preprocess", "(", "*", "*", "valmap", "(", "_expect_element", ",", "named", ")", ")" ]
Preprocessing decorator that verifies inputs are elements of some expected collection. Examples -------- >>> @expect_element(x=('a', 'b')) ... def foo(x): ... return x.upper() ... >>> foo('a') 'A' >>> foo('b') 'B' >>> foo('c') # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS Traceback (most recent call last): ... ValueError: ...foo() expected a value in ('a', 'b') for argument 'x', but got 'c' instead. Notes ----- A special argument, __funcname, can be provided as a string to override the function name shown in error messages. This is most often used on __init__ or __new__ methods to make errors refer to the class name instead of the function name. This uses the `in` operator (__contains__) to make the containment check. This allows us to use any custom container as long as the object supports the container protocol.
[ "Preprocessing", "decorator", "that", "verifies", "inputs", "are", "elements", "of", "some", "expected", "collection", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/input_validation.py#L484-L535
train
quantopian/zipline
zipline/utils/input_validation.py
expect_bounded
def expect_bounded(__funcname=_qualified_name, **named): """ Preprocessing decorator verifying that inputs fall INCLUSIVELY between bounds. Bounds should be passed as a pair of ``(min_value, max_value)``. ``None`` may be passed as ``min_value`` or ``max_value`` to signify that the input is only bounded above or below. Examples -------- >>> @expect_bounded(x=(1, 5)) ... def foo(x): ... return x + 1 ... >>> foo(1) 2 >>> foo(5) 6 >>> foo(6) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS Traceback (most recent call last): ... ValueError: ...foo() expected a value inclusively between 1 and 5 for argument 'x', but got 6 instead. >>> @expect_bounded(x=(2, None)) ... def foo(x): ... return x ... >>> foo(100000) 100000 >>> foo(1) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS Traceback (most recent call last): ... ValueError: ...foo() expected a value greater than or equal to 2 for argument 'x', but got 1 instead. >>> @expect_bounded(x=(None, 5)) ... def foo(x): ... return x ... >>> foo(6) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS Traceback (most recent call last): ... ValueError: ...foo() expected a value less than or equal to 5 for argument 'x', but got 6 instead. """ def _make_bounded_check(bounds): (lower, upper) = bounds if lower is None: def should_fail(value): return value > upper predicate_descr = "less than or equal to " + str(upper) elif upper is None: def should_fail(value): return value < lower predicate_descr = "greater than or equal to " + str(lower) else: def should_fail(value): return not (lower <= value <= upper) predicate_descr = "inclusively between %s and %s" % bounds template = ( "%(funcname)s() expected a value {predicate}" " for argument '%(argname)s', but got %(actual)s instead." ).format(predicate=predicate_descr) return make_check( exc_type=ValueError, template=template, pred=should_fail, actual=repr, funcname=__funcname, ) return _expect_bounded(_make_bounded_check, __funcname=__funcname, **named)
python
def expect_bounded(__funcname=_qualified_name, **named): """ Preprocessing decorator verifying that inputs fall INCLUSIVELY between bounds. Bounds should be passed as a pair of ``(min_value, max_value)``. ``None`` may be passed as ``min_value`` or ``max_value`` to signify that the input is only bounded above or below. Examples -------- >>> @expect_bounded(x=(1, 5)) ... def foo(x): ... return x + 1 ... >>> foo(1) 2 >>> foo(5) 6 >>> foo(6) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS Traceback (most recent call last): ... ValueError: ...foo() expected a value inclusively between 1 and 5 for argument 'x', but got 6 instead. >>> @expect_bounded(x=(2, None)) ... def foo(x): ... return x ... >>> foo(100000) 100000 >>> foo(1) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS Traceback (most recent call last): ... ValueError: ...foo() expected a value greater than or equal to 2 for argument 'x', but got 1 instead. >>> @expect_bounded(x=(None, 5)) ... def foo(x): ... return x ... >>> foo(6) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS Traceback (most recent call last): ... ValueError: ...foo() expected a value less than or equal to 5 for argument 'x', but got 6 instead. """ def _make_bounded_check(bounds): (lower, upper) = bounds if lower is None: def should_fail(value): return value > upper predicate_descr = "less than or equal to " + str(upper) elif upper is None: def should_fail(value): return value < lower predicate_descr = "greater than or equal to " + str(lower) else: def should_fail(value): return not (lower <= value <= upper) predicate_descr = "inclusively between %s and %s" % bounds template = ( "%(funcname)s() expected a value {predicate}" " for argument '%(argname)s', but got %(actual)s instead." ).format(predicate=predicate_descr) return make_check( exc_type=ValueError, template=template, pred=should_fail, actual=repr, funcname=__funcname, ) return _expect_bounded(_make_bounded_check, __funcname=__funcname, **named)
[ "def", "expect_bounded", "(", "__funcname", "=", "_qualified_name", ",", "*", "*", "named", ")", ":", "def", "_make_bounded_check", "(", "bounds", ")", ":", "(", "lower", ",", "upper", ")", "=", "bounds", "if", "lower", "is", "None", ":", "def", "should_fail", "(", "value", ")", ":", "return", "value", ">", "upper", "predicate_descr", "=", "\"less than or equal to \"", "+", "str", "(", "upper", ")", "elif", "upper", "is", "None", ":", "def", "should_fail", "(", "value", ")", ":", "return", "value", "<", "lower", "predicate_descr", "=", "\"greater than or equal to \"", "+", "str", "(", "lower", ")", "else", ":", "def", "should_fail", "(", "value", ")", ":", "return", "not", "(", "lower", "<=", "value", "<=", "upper", ")", "predicate_descr", "=", "\"inclusively between %s and %s\"", "%", "bounds", "template", "=", "(", "\"%(funcname)s() expected a value {predicate}\"", "\" for argument '%(argname)s', but got %(actual)s instead.\"", ")", ".", "format", "(", "predicate", "=", "predicate_descr", ")", "return", "make_check", "(", "exc_type", "=", "ValueError", ",", "template", "=", "template", ",", "pred", "=", "should_fail", ",", "actual", "=", "repr", ",", "funcname", "=", "__funcname", ",", ")", "return", "_expect_bounded", "(", "_make_bounded_check", ",", "__funcname", "=", "__funcname", ",", "*", "*", "named", ")" ]
Preprocessing decorator verifying that inputs fall INCLUSIVELY between bounds. Bounds should be passed as a pair of ``(min_value, max_value)``. ``None`` may be passed as ``min_value`` or ``max_value`` to signify that the input is only bounded above or below. Examples -------- >>> @expect_bounded(x=(1, 5)) ... def foo(x): ... return x + 1 ... >>> foo(1) 2 >>> foo(5) 6 >>> foo(6) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS Traceback (most recent call last): ... ValueError: ...foo() expected a value inclusively between 1 and 5 for argument 'x', but got 6 instead. >>> @expect_bounded(x=(2, None)) ... def foo(x): ... return x ... >>> foo(100000) 100000 >>> foo(1) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS Traceback (most recent call last): ... ValueError: ...foo() expected a value greater than or equal to 2 for argument 'x', but got 1 instead. >>> @expect_bounded(x=(None, 5)) ... def foo(x): ... return x ... >>> foo(6) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS Traceback (most recent call last): ... ValueError: ...foo() expected a value less than or equal to 5 for argument 'x', but got 6 instead.
[ "Preprocessing", "decorator", "verifying", "that", "inputs", "fall", "INCLUSIVELY", "between", "bounds", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/input_validation.py#L538-L614
train
quantopian/zipline
zipline/utils/input_validation.py
expect_dimensions
def expect_dimensions(__funcname=_qualified_name, **dimensions): """ Preprocessing decorator that verifies inputs are numpy arrays with a specific dimensionality. Examples -------- >>> from numpy import array >>> @expect_dimensions(x=1, y=2) ... def foo(x, y): ... return x[0] + y[0, 0] ... >>> foo(array([1, 1]), array([[1, 1], [2, 2]])) 2 >>> foo(array([1, 1]), array([1, 1])) # doctest: +NORMALIZE_WHITESPACE ... # doctest: +ELLIPSIS Traceback (most recent call last): ... ValueError: ...foo() expected a 2-D array for argument 'y', but got a 1-D array instead. """ if isinstance(__funcname, str): def get_funcname(_): return __funcname else: get_funcname = __funcname def _expect_dimension(expected_ndim): def _check(func, argname, argvalue): actual_ndim = argvalue.ndim if actual_ndim != expected_ndim: if actual_ndim == 0: actual_repr = 'scalar' else: actual_repr = "%d-D array" % actual_ndim raise ValueError( "{func}() expected a {expected:d}-D array" " for argument {argname!r}, but got a {actual}" " instead.".format( func=get_funcname(func), expected=expected_ndim, argname=argname, actual=actual_repr, ) ) return argvalue return _check return preprocess(**valmap(_expect_dimension, dimensions))
python
def expect_dimensions(__funcname=_qualified_name, **dimensions): """ Preprocessing decorator that verifies inputs are numpy arrays with a specific dimensionality. Examples -------- >>> from numpy import array >>> @expect_dimensions(x=1, y=2) ... def foo(x, y): ... return x[0] + y[0, 0] ... >>> foo(array([1, 1]), array([[1, 1], [2, 2]])) 2 >>> foo(array([1, 1]), array([1, 1])) # doctest: +NORMALIZE_WHITESPACE ... # doctest: +ELLIPSIS Traceback (most recent call last): ... ValueError: ...foo() expected a 2-D array for argument 'y', but got a 1-D array instead. """ if isinstance(__funcname, str): def get_funcname(_): return __funcname else: get_funcname = __funcname def _expect_dimension(expected_ndim): def _check(func, argname, argvalue): actual_ndim = argvalue.ndim if actual_ndim != expected_ndim: if actual_ndim == 0: actual_repr = 'scalar' else: actual_repr = "%d-D array" % actual_ndim raise ValueError( "{func}() expected a {expected:d}-D array" " for argument {argname!r}, but got a {actual}" " instead.".format( func=get_funcname(func), expected=expected_ndim, argname=argname, actual=actual_repr, ) ) return argvalue return _check return preprocess(**valmap(_expect_dimension, dimensions))
[ "def", "expect_dimensions", "(", "__funcname", "=", "_qualified_name", ",", "*", "*", "dimensions", ")", ":", "if", "isinstance", "(", "__funcname", ",", "str", ")", ":", "def", "get_funcname", "(", "_", ")", ":", "return", "__funcname", "else", ":", "get_funcname", "=", "__funcname", "def", "_expect_dimension", "(", "expected_ndim", ")", ":", "def", "_check", "(", "func", ",", "argname", ",", "argvalue", ")", ":", "actual_ndim", "=", "argvalue", ".", "ndim", "if", "actual_ndim", "!=", "expected_ndim", ":", "if", "actual_ndim", "==", "0", ":", "actual_repr", "=", "'scalar'", "else", ":", "actual_repr", "=", "\"%d-D array\"", "%", "actual_ndim", "raise", "ValueError", "(", "\"{func}() expected a {expected:d}-D array\"", "\" for argument {argname!r}, but got a {actual}\"", "\" instead.\"", ".", "format", "(", "func", "=", "get_funcname", "(", "func", ")", ",", "expected", "=", "expected_ndim", ",", "argname", "=", "argname", ",", "actual", "=", "actual_repr", ",", ")", ")", "return", "argvalue", "return", "_check", "return", "preprocess", "(", "*", "*", "valmap", "(", "_expect_dimension", ",", "dimensions", ")", ")" ]
Preprocessing decorator that verifies inputs are numpy arrays with a specific dimensionality. Examples -------- >>> from numpy import array >>> @expect_dimensions(x=1, y=2) ... def foo(x, y): ... return x[0] + y[0, 0] ... >>> foo(array([1, 1]), array([[1, 1], [2, 2]])) 2 >>> foo(array([1, 1]), array([1, 1])) # doctest: +NORMALIZE_WHITESPACE ... # doctest: +ELLIPSIS Traceback (most recent call last): ... ValueError: ...foo() expected a 2-D array for argument 'y', but got a 1-D array instead.
[ "Preprocessing", "decorator", "that", "verifies", "inputs", "are", "numpy", "arrays", "with", "a", "specific", "dimensionality", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/input_validation.py#L717-L764
train
quantopian/zipline
zipline/utils/input_validation.py
coerce
def coerce(from_, to, **to_kwargs): """ A preprocessing decorator that coerces inputs of a given type by passing them to a callable. Parameters ---------- from : type or tuple or types Inputs types on which to call ``to``. to : function Coercion function to call on inputs. **to_kwargs Additional keywords to forward to every call to ``to``. Examples -------- >>> @preprocess(x=coerce(float, int), y=coerce(float, int)) ... def floordiff(x, y): ... return x - y ... >>> floordiff(3.2, 2.5) 1 >>> @preprocess(x=coerce(str, int, base=2), y=coerce(str, int, base=2)) ... def add_binary_strings(x, y): ... return bin(x + y)[2:] ... >>> add_binary_strings('101', '001') '110' """ def preprocessor(func, argname, arg): if isinstance(arg, from_): return to(arg, **to_kwargs) return arg return preprocessor
python
def coerce(from_, to, **to_kwargs): """ A preprocessing decorator that coerces inputs of a given type by passing them to a callable. Parameters ---------- from : type or tuple or types Inputs types on which to call ``to``. to : function Coercion function to call on inputs. **to_kwargs Additional keywords to forward to every call to ``to``. Examples -------- >>> @preprocess(x=coerce(float, int), y=coerce(float, int)) ... def floordiff(x, y): ... return x - y ... >>> floordiff(3.2, 2.5) 1 >>> @preprocess(x=coerce(str, int, base=2), y=coerce(str, int, base=2)) ... def add_binary_strings(x, y): ... return bin(x + y)[2:] ... >>> add_binary_strings('101', '001') '110' """ def preprocessor(func, argname, arg): if isinstance(arg, from_): return to(arg, **to_kwargs) return arg return preprocessor
[ "def", "coerce", "(", "from_", ",", "to", ",", "*", "*", "to_kwargs", ")", ":", "def", "preprocessor", "(", "func", ",", "argname", ",", "arg", ")", ":", "if", "isinstance", "(", "arg", ",", "from_", ")", ":", "return", "to", "(", "arg", ",", "*", "*", "to_kwargs", ")", "return", "arg", "return", "preprocessor" ]
A preprocessing decorator that coerces inputs of a given type by passing them to a callable. Parameters ---------- from : type or tuple or types Inputs types on which to call ``to``. to : function Coercion function to call on inputs. **to_kwargs Additional keywords to forward to every call to ``to``. Examples -------- >>> @preprocess(x=coerce(float, int), y=coerce(float, int)) ... def floordiff(x, y): ... return x - y ... >>> floordiff(3.2, 2.5) 1 >>> @preprocess(x=coerce(str, int, base=2), y=coerce(str, int, base=2)) ... def add_binary_strings(x, y): ... return bin(x + y)[2:] ... >>> add_binary_strings('101', '001') '110'
[ "A", "preprocessing", "decorator", "that", "coerces", "inputs", "of", "a", "given", "type", "by", "passing", "them", "to", "a", "callable", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/input_validation.py#L767-L801
train
quantopian/zipline
zipline/utils/input_validation.py
coerce_types
def coerce_types(**kwargs): """ Preprocessing decorator that applies type coercions. Parameters ---------- **kwargs : dict[str -> (type, callable)] Keyword arguments mapping function parameter names to pairs of (from_type, to_type). Examples -------- >>> @coerce_types(x=(float, int), y=(int, str)) ... def func(x, y): ... return (x, y) ... >>> func(1.0, 3) (1, '3') """ def _coerce(types): return coerce(*types) return preprocess(**valmap(_coerce, kwargs))
python
def coerce_types(**kwargs): """ Preprocessing decorator that applies type coercions. Parameters ---------- **kwargs : dict[str -> (type, callable)] Keyword arguments mapping function parameter names to pairs of (from_type, to_type). Examples -------- >>> @coerce_types(x=(float, int), y=(int, str)) ... def func(x, y): ... return (x, y) ... >>> func(1.0, 3) (1, '3') """ def _coerce(types): return coerce(*types) return preprocess(**valmap(_coerce, kwargs))
[ "def", "coerce_types", "(", "*", "*", "kwargs", ")", ":", "def", "_coerce", "(", "types", ")", ":", "return", "coerce", "(", "*", "types", ")", "return", "preprocess", "(", "*", "*", "valmap", "(", "_coerce", ",", "kwargs", ")", ")" ]
Preprocessing decorator that applies type coercions. Parameters ---------- **kwargs : dict[str -> (type, callable)] Keyword arguments mapping function parameter names to pairs of (from_type, to_type). Examples -------- >>> @coerce_types(x=(float, int), y=(int, str)) ... def func(x, y): ... return (x, y) ... >>> func(1.0, 3) (1, '3')
[ "Preprocessing", "decorator", "that", "applies", "type", "coercions", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/input_validation.py#L804-L826
train
quantopian/zipline
zipline/utils/input_validation.py
validate_keys
def validate_keys(dict_, expected, funcname): """Validate that a dictionary has an expected set of keys. """ expected = set(expected) received = set(dict_) missing = expected - received if missing: raise ValueError( "Missing keys in {}:\n" "Expected Keys: {}\n" "Received Keys: {}".format( funcname, sorted(expected), sorted(received), ) ) unexpected = received - expected if unexpected: raise ValueError( "Unexpected keys in {}:\n" "Expected Keys: {}\n" "Received Keys: {}".format( funcname, sorted(expected), sorted(received), ) )
python
def validate_keys(dict_, expected, funcname): """Validate that a dictionary has an expected set of keys. """ expected = set(expected) received = set(dict_) missing = expected - received if missing: raise ValueError( "Missing keys in {}:\n" "Expected Keys: {}\n" "Received Keys: {}".format( funcname, sorted(expected), sorted(received), ) ) unexpected = received - expected if unexpected: raise ValueError( "Unexpected keys in {}:\n" "Expected Keys: {}\n" "Received Keys: {}".format( funcname, sorted(expected), sorted(received), ) )
[ "def", "validate_keys", "(", "dict_", ",", "expected", ",", "funcname", ")", ":", "expected", "=", "set", "(", "expected", ")", "received", "=", "set", "(", "dict_", ")", "missing", "=", "expected", "-", "received", "if", "missing", ":", "raise", "ValueError", "(", "\"Missing keys in {}:\\n\"", "\"Expected Keys: {}\\n\"", "\"Received Keys: {}\"", ".", "format", "(", "funcname", ",", "sorted", "(", "expected", ")", ",", "sorted", "(", "received", ")", ",", ")", ")", "unexpected", "=", "received", "-", "expected", "if", "unexpected", ":", "raise", "ValueError", "(", "\"Unexpected keys in {}:\\n\"", "\"Expected Keys: {}\\n\"", "\"Received Keys: {}\"", ".", "format", "(", "funcname", ",", "sorted", "(", "expected", ")", ",", "sorted", "(", "received", ")", ",", ")", ")" ]
Validate that a dictionary has an expected set of keys.
[ "Validate", "that", "a", "dictionary", "has", "an", "expected", "set", "of", "keys", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/input_validation.py#L847-L875
train
quantopian/zipline
zipline/utils/enum.py
enum
def enum(option, *options): """ Construct a new enum object. Parameters ---------- *options : iterable of str The names of the fields for the enum. Returns ------- enum A new enum collection. Examples -------- >>> e = enum('a', 'b', 'c') >>> e <enum: ('a', 'b', 'c')> >>> e.a 0 >>> e.b 1 >>> e.a in e True >>> tuple(e) (0, 1, 2) Notes ----- Identity checking is not guaranteed to work with enum members, instead equality checks should be used. From CPython's documentation: "The current implementation keeps an array of integer objects for all integers between -5 and 256, when you create an int in that range you actually just get back a reference to the existing object. So it should be possible to change the value of 1. I suspect the behaviour of Python in this case is undefined. :-)" """ options = (option,) + options rangeob = range(len(options)) try: inttype = _inttypes[int(np.log2(len(options) - 1)) // 8] except IndexError: raise OverflowError( 'Cannot store enums with more than sys.maxsize elements, got %d' % len(options), ) class _enum(Structure): _fields_ = [(o, inttype) for o in options] def __iter__(self): return iter(rangeob) def __contains__(self, value): return 0 <= value < len(options) def __repr__(self): return '<enum: %s>' % ( ('%d fields' % len(options)) if len(options) > 10 else repr(options) ) return _enum(*rangeob)
python
def enum(option, *options): """ Construct a new enum object. Parameters ---------- *options : iterable of str The names of the fields for the enum. Returns ------- enum A new enum collection. Examples -------- >>> e = enum('a', 'b', 'c') >>> e <enum: ('a', 'b', 'c')> >>> e.a 0 >>> e.b 1 >>> e.a in e True >>> tuple(e) (0, 1, 2) Notes ----- Identity checking is not guaranteed to work with enum members, instead equality checks should be used. From CPython's documentation: "The current implementation keeps an array of integer objects for all integers between -5 and 256, when you create an int in that range you actually just get back a reference to the existing object. So it should be possible to change the value of 1. I suspect the behaviour of Python in this case is undefined. :-)" """ options = (option,) + options rangeob = range(len(options)) try: inttype = _inttypes[int(np.log2(len(options) - 1)) // 8] except IndexError: raise OverflowError( 'Cannot store enums with more than sys.maxsize elements, got %d' % len(options), ) class _enum(Structure): _fields_ = [(o, inttype) for o in options] def __iter__(self): return iter(rangeob) def __contains__(self, value): return 0 <= value < len(options) def __repr__(self): return '<enum: %s>' % ( ('%d fields' % len(options)) if len(options) > 10 else repr(options) ) return _enum(*rangeob)
[ "def", "enum", "(", "option", ",", "*", "options", ")", ":", "options", "=", "(", "option", ",", ")", "+", "options", "rangeob", "=", "range", "(", "len", "(", "options", ")", ")", "try", ":", "inttype", "=", "_inttypes", "[", "int", "(", "np", ".", "log2", "(", "len", "(", "options", ")", "-", "1", ")", ")", "//", "8", "]", "except", "IndexError", ":", "raise", "OverflowError", "(", "'Cannot store enums with more than sys.maxsize elements, got %d'", "%", "len", "(", "options", ")", ",", ")", "class", "_enum", "(", "Structure", ")", ":", "_fields_", "=", "[", "(", "o", ",", "inttype", ")", "for", "o", "in", "options", "]", "def", "__iter__", "(", "self", ")", ":", "return", "iter", "(", "rangeob", ")", "def", "__contains__", "(", "self", ",", "value", ")", ":", "return", "0", "<=", "value", "<", "len", "(", "options", ")", "def", "__repr__", "(", "self", ")", ":", "return", "'<enum: %s>'", "%", "(", "(", "'%d fields'", "%", "len", "(", "options", ")", ")", "if", "len", "(", "options", ")", ">", "10", "else", "repr", "(", "options", ")", ")", "return", "_enum", "(", "*", "rangeob", ")" ]
Construct a new enum object. Parameters ---------- *options : iterable of str The names of the fields for the enum. Returns ------- enum A new enum collection. Examples -------- >>> e = enum('a', 'b', 'c') >>> e <enum: ('a', 'b', 'c')> >>> e.a 0 >>> e.b 1 >>> e.a in e True >>> tuple(e) (0, 1, 2) Notes ----- Identity checking is not guaranteed to work with enum members, instead equality checks should be used. From CPython's documentation: "The current implementation keeps an array of integer objects for all integers between -5 and 256, when you create an int in that range you actually just get back a reference to the existing object. So it should be possible to change the value of 1. I suspect the behaviour of Python in this case is undefined. :-)"
[ "Construct", "a", "new", "enum", "object", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/enum.py#L48-L114
train
quantopian/zipline
zipline/utils/data.py
RollingPanel.oldest_frame
def oldest_frame(self, raw=False): """ Get the oldest frame in the panel. """ if raw: return self.buffer.values[:, self._start_index, :] return self.buffer.iloc[:, self._start_index, :]
python
def oldest_frame(self, raw=False): """ Get the oldest frame in the panel. """ if raw: return self.buffer.values[:, self._start_index, :] return self.buffer.iloc[:, self._start_index, :]
[ "def", "oldest_frame", "(", "self", ",", "raw", "=", "False", ")", ":", "if", "raw", ":", "return", "self", ".", "buffer", ".", "values", "[", ":", ",", "self", ".", "_start_index", ",", ":", "]", "return", "self", ".", "buffer", ".", "iloc", "[", ":", ",", "self", ".", "_start_index", ",", ":", "]" ]
Get the oldest frame in the panel.
[ "Get", "the", "oldest", "frame", "in", "the", "panel", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/data.py#L82-L88
train
quantopian/zipline
zipline/utils/data.py
RollingPanel.extend_back
def extend_back(self, missing_dts): """ Resizes the buffer to hold a new window with a new cap_multiple. If cap_multiple is None, then the old cap_multiple is used. """ delta = len(missing_dts) if not delta: raise ValueError( 'missing_dts must be a non-empty index', ) self._window += delta self._pos += delta self.date_buf = self.date_buf.copy() self.date_buf.resize(self.cap) self.date_buf = np.roll(self.date_buf, delta) old_vals = self.buffer.values shape = old_vals.shape nan_arr = np.empty((shape[0], delta, shape[2])) nan_arr.fill(np.nan) new_vals = np.column_stack( (nan_arr, old_vals, np.empty((shape[0], delta * (self.cap_multiple - 1), shape[2]))), ) self.buffer = pd.Panel( data=new_vals, items=self.items, minor_axis=self.minor_axis, major_axis=np.arange(self.cap), dtype=self.dtype, ) # Fill the delta with the dates we calculated. where = slice(self._start_index, self._start_index + delta) self.date_buf[where] = missing_dts
python
def extend_back(self, missing_dts): """ Resizes the buffer to hold a new window with a new cap_multiple. If cap_multiple is None, then the old cap_multiple is used. """ delta = len(missing_dts) if not delta: raise ValueError( 'missing_dts must be a non-empty index', ) self._window += delta self._pos += delta self.date_buf = self.date_buf.copy() self.date_buf.resize(self.cap) self.date_buf = np.roll(self.date_buf, delta) old_vals = self.buffer.values shape = old_vals.shape nan_arr = np.empty((shape[0], delta, shape[2])) nan_arr.fill(np.nan) new_vals = np.column_stack( (nan_arr, old_vals, np.empty((shape[0], delta * (self.cap_multiple - 1), shape[2]))), ) self.buffer = pd.Panel( data=new_vals, items=self.items, minor_axis=self.minor_axis, major_axis=np.arange(self.cap), dtype=self.dtype, ) # Fill the delta with the dates we calculated. where = slice(self._start_index, self._start_index + delta) self.date_buf[where] = missing_dts
[ "def", "extend_back", "(", "self", ",", "missing_dts", ")", ":", "delta", "=", "len", "(", "missing_dts", ")", "if", "not", "delta", ":", "raise", "ValueError", "(", "'missing_dts must be a non-empty index'", ",", ")", "self", ".", "_window", "+=", "delta", "self", ".", "_pos", "+=", "delta", "self", ".", "date_buf", "=", "self", ".", "date_buf", ".", "copy", "(", ")", "self", ".", "date_buf", ".", "resize", "(", "self", ".", "cap", ")", "self", ".", "date_buf", "=", "np", ".", "roll", "(", "self", ".", "date_buf", ",", "delta", ")", "old_vals", "=", "self", ".", "buffer", ".", "values", "shape", "=", "old_vals", ".", "shape", "nan_arr", "=", "np", ".", "empty", "(", "(", "shape", "[", "0", "]", ",", "delta", ",", "shape", "[", "2", "]", ")", ")", "nan_arr", ".", "fill", "(", "np", ".", "nan", ")", "new_vals", "=", "np", ".", "column_stack", "(", "(", "nan_arr", ",", "old_vals", ",", "np", ".", "empty", "(", "(", "shape", "[", "0", "]", ",", "delta", "*", "(", "self", ".", "cap_multiple", "-", "1", ")", ",", "shape", "[", "2", "]", ")", ")", ")", ",", ")", "self", ".", "buffer", "=", "pd", ".", "Panel", "(", "data", "=", "new_vals", ",", "items", "=", "self", ".", "items", ",", "minor_axis", "=", "self", ".", "minor_axis", ",", "major_axis", "=", "np", ".", "arange", "(", "self", ".", "cap", ")", ",", "dtype", "=", "self", ".", "dtype", ",", ")", "# Fill the delta with the dates we calculated.", "where", "=", "slice", "(", "self", ".", "_start_index", ",", "self", ".", "_start_index", "+", "delta", ")", "self", ".", "date_buf", "[", "where", "]", "=", "missing_dts" ]
Resizes the buffer to hold a new window with a new cap_multiple. If cap_multiple is None, then the old cap_multiple is used.
[ "Resizes", "the", "buffer", "to", "hold", "a", "new", "window", "with", "a", "new", "cap_multiple", ".", "If", "cap_multiple", "is", "None", "then", "the", "old", "cap_multiple", "is", "used", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/data.py#L107-L148
train
quantopian/zipline
zipline/utils/data.py
RollingPanel.get_current
def get_current(self, item=None, raw=False, start=None, end=None): """ Get a Panel that is the current data in view. It is not safe to persist these objects because internal data might change """ item_indexer = slice(None) if item: item_indexer = self.items.get_loc(item) start_index = self._start_index end_index = self._pos # get inital date window where = slice(start_index, end_index) current_dates = self.date_buf[where] def convert_datelike_to_long(dt): if isinstance(dt, pd.Timestamp): return dt.asm8 if isinstance(dt, datetime.datetime): return np.datetime64(dt) return dt # constrict further by date if start: start = convert_datelike_to_long(start) start_index += current_dates.searchsorted(start) if end: end = convert_datelike_to_long(end) _end = current_dates.searchsorted(end, 'right') end_index -= len(current_dates) - _end where = slice(start_index, end_index) values = self.buffer.values[item_indexer, where, :] current_dates = self.date_buf[where] if raw: # return copy so we can change it without side effects here return values.copy() major_axis = pd.DatetimeIndex(deepcopy(current_dates), tz='utc') if values.ndim == 3: return pd.Panel(values, self.items, major_axis, self.minor_axis, dtype=self.dtype) elif values.ndim == 2: return pd.DataFrame(values, major_axis, self.minor_axis, dtype=self.dtype)
python
def get_current(self, item=None, raw=False, start=None, end=None): """ Get a Panel that is the current data in view. It is not safe to persist these objects because internal data might change """ item_indexer = slice(None) if item: item_indexer = self.items.get_loc(item) start_index = self._start_index end_index = self._pos # get inital date window where = slice(start_index, end_index) current_dates = self.date_buf[where] def convert_datelike_to_long(dt): if isinstance(dt, pd.Timestamp): return dt.asm8 if isinstance(dt, datetime.datetime): return np.datetime64(dt) return dt # constrict further by date if start: start = convert_datelike_to_long(start) start_index += current_dates.searchsorted(start) if end: end = convert_datelike_to_long(end) _end = current_dates.searchsorted(end, 'right') end_index -= len(current_dates) - _end where = slice(start_index, end_index) values = self.buffer.values[item_indexer, where, :] current_dates = self.date_buf[where] if raw: # return copy so we can change it without side effects here return values.copy() major_axis = pd.DatetimeIndex(deepcopy(current_dates), tz='utc') if values.ndim == 3: return pd.Panel(values, self.items, major_axis, self.minor_axis, dtype=self.dtype) elif values.ndim == 2: return pd.DataFrame(values, major_axis, self.minor_axis, dtype=self.dtype)
[ "def", "get_current", "(", "self", ",", "item", "=", "None", ",", "raw", "=", "False", ",", "start", "=", "None", ",", "end", "=", "None", ")", ":", "item_indexer", "=", "slice", "(", "None", ")", "if", "item", ":", "item_indexer", "=", "self", ".", "items", ".", "get_loc", "(", "item", ")", "start_index", "=", "self", ".", "_start_index", "end_index", "=", "self", ".", "_pos", "# get inital date window", "where", "=", "slice", "(", "start_index", ",", "end_index", ")", "current_dates", "=", "self", ".", "date_buf", "[", "where", "]", "def", "convert_datelike_to_long", "(", "dt", ")", ":", "if", "isinstance", "(", "dt", ",", "pd", ".", "Timestamp", ")", ":", "return", "dt", ".", "asm8", "if", "isinstance", "(", "dt", ",", "datetime", ".", "datetime", ")", ":", "return", "np", ".", "datetime64", "(", "dt", ")", "return", "dt", "# constrict further by date", "if", "start", ":", "start", "=", "convert_datelike_to_long", "(", "start", ")", "start_index", "+=", "current_dates", ".", "searchsorted", "(", "start", ")", "if", "end", ":", "end", "=", "convert_datelike_to_long", "(", "end", ")", "_end", "=", "current_dates", ".", "searchsorted", "(", "end", ",", "'right'", ")", "end_index", "-=", "len", "(", "current_dates", ")", "-", "_end", "where", "=", "slice", "(", "start_index", ",", "end_index", ")", "values", "=", "self", ".", "buffer", ".", "values", "[", "item_indexer", ",", "where", ",", ":", "]", "current_dates", "=", "self", ".", "date_buf", "[", "where", "]", "if", "raw", ":", "# return copy so we can change it without side effects here", "return", "values", ".", "copy", "(", ")", "major_axis", "=", "pd", ".", "DatetimeIndex", "(", "deepcopy", "(", "current_dates", ")", ",", "tz", "=", "'utc'", ")", "if", "values", ".", "ndim", "==", "3", ":", "return", "pd", ".", "Panel", "(", "values", ",", "self", ".", "items", ",", "major_axis", ",", "self", ".", "minor_axis", ",", "dtype", "=", "self", ".", "dtype", ")", "elif", "values", ".", "ndim", "==", "2", ":", "return", "pd", ".", "DataFrame", "(", "values", ",", "major_axis", ",", "self", ".", "minor_axis", ",", "dtype", "=", "self", ".", "dtype", ")" ]
Get a Panel that is the current data in view. It is not safe to persist these objects because internal data might change
[ "Get", "a", "Panel", "that", "is", "the", "current", "data", "in", "view", ".", "It", "is", "not", "safe", "to", "persist", "these", "objects", "because", "internal", "data", "might", "change" ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/data.py#L165-L214
train
quantopian/zipline
zipline/utils/data.py
RollingPanel.set_current
def set_current(self, panel): """ Set the values stored in our current in-view data to be values of the passed panel. The passed panel must have the same indices as the panel that would be returned by self.get_current. """ where = slice(self._start_index, self._pos) self.buffer.values[:, where, :] = panel.values
python
def set_current(self, panel): """ Set the values stored in our current in-view data to be values of the passed panel. The passed panel must have the same indices as the panel that would be returned by self.get_current. """ where = slice(self._start_index, self._pos) self.buffer.values[:, where, :] = panel.values
[ "def", "set_current", "(", "self", ",", "panel", ")", ":", "where", "=", "slice", "(", "self", ".", "_start_index", ",", "self", ".", "_pos", ")", "self", ".", "buffer", ".", "values", "[", ":", ",", "where", ",", ":", "]", "=", "panel", ".", "values" ]
Set the values stored in our current in-view data to be values of the passed panel. The passed panel must have the same indices as the panel that would be returned by self.get_current.
[ "Set", "the", "values", "stored", "in", "our", "current", "in", "-", "view", "data", "to", "be", "values", "of", "the", "passed", "panel", ".", "The", "passed", "panel", "must", "have", "the", "same", "indices", "as", "the", "panel", "that", "would", "be", "returned", "by", "self", ".", "get_current", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/data.py#L216-L223
train
quantopian/zipline
zipline/utils/data.py
RollingPanel._roll_data
def _roll_data(self): """ Roll window worth of data up to position zero. Save the effort of having to expensively roll at each iteration """ self.buffer.values[:, :self._window, :] = \ self.buffer.values[:, -self._window:, :] self.date_buf[:self._window] = self.date_buf[-self._window:] self._pos = self._window
python
def _roll_data(self): """ Roll window worth of data up to position zero. Save the effort of having to expensively roll at each iteration """ self.buffer.values[:, :self._window, :] = \ self.buffer.values[:, -self._window:, :] self.date_buf[:self._window] = self.date_buf[-self._window:] self._pos = self._window
[ "def", "_roll_data", "(", "self", ")", ":", "self", ".", "buffer", ".", "values", "[", ":", ",", ":", "self", ".", "_window", ",", ":", "]", "=", "self", ".", "buffer", ".", "values", "[", ":", ",", "-", "self", ".", "_window", ":", ",", ":", "]", "self", ".", "date_buf", "[", ":", "self", ".", "_window", "]", "=", "self", ".", "date_buf", "[", "-", "self", ".", "_window", ":", "]", "self", ".", "_pos", "=", "self", ".", "_window" ]
Roll window worth of data up to position zero. Save the effort of having to expensively roll at each iteration
[ "Roll", "window", "worth", "of", "data", "up", "to", "position", "zero", ".", "Save", "the", "effort", "of", "having", "to", "expensively", "roll", "at", "each", "iteration" ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/data.py#L229-L238
train
quantopian/zipline
zipline/utils/data.py
MutableIndexRollingPanel.oldest_frame
def oldest_frame(self, raw=False): """ Get the oldest frame in the panel. """ if raw: return self.buffer.values[:, self._oldest_frame_idx(), :] return self.buffer.iloc[:, self._oldest_frame_idx(), :]
python
def oldest_frame(self, raw=False): """ Get the oldest frame in the panel. """ if raw: return self.buffer.values[:, self._oldest_frame_idx(), :] return self.buffer.iloc[:, self._oldest_frame_idx(), :]
[ "def", "oldest_frame", "(", "self", ",", "raw", "=", "False", ")", ":", "if", "raw", ":", "return", "self", ".", "buffer", ".", "values", "[", ":", ",", "self", ".", "_oldest_frame_idx", "(", ")", ",", ":", "]", "return", "self", ".", "buffer", ".", "iloc", "[", ":", ",", "self", ".", "_oldest_frame_idx", "(", ")", ",", ":", "]" ]
Get the oldest frame in the panel.
[ "Get", "the", "oldest", "frame", "in", "the", "panel", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/data.py#L273-L279
train
quantopian/zipline
zipline/utils/data.py
MutableIndexRollingPanel.get_current
def get_current(self): """ Get a Panel that is the current data in view. It is not safe to persist these objects because internal data might change """ where = slice(self._oldest_frame_idx(), self._pos) major_axis = pd.DatetimeIndex(deepcopy(self.date_buf[where]), tz='utc') return pd.Panel(self.buffer.values[:, where, :], self.items, major_axis, self.minor_axis, dtype=self.dtype)
python
def get_current(self): """ Get a Panel that is the current data in view. It is not safe to persist these objects because internal data might change """ where = slice(self._oldest_frame_idx(), self._pos) major_axis = pd.DatetimeIndex(deepcopy(self.date_buf[where]), tz='utc') return pd.Panel(self.buffer.values[:, where, :], self.items, major_axis, self.minor_axis, dtype=self.dtype)
[ "def", "get_current", "(", "self", ")", ":", "where", "=", "slice", "(", "self", ".", "_oldest_frame_idx", "(", ")", ",", "self", ".", "_pos", ")", "major_axis", "=", "pd", ".", "DatetimeIndex", "(", "deepcopy", "(", "self", ".", "date_buf", "[", "where", "]", ")", ",", "tz", "=", "'utc'", ")", "return", "pd", ".", "Panel", "(", "self", ".", "buffer", ".", "values", "[", ":", ",", "where", ",", ":", "]", ",", "self", ".", "items", ",", "major_axis", ",", "self", ".", "minor_axis", ",", "dtype", "=", "self", ".", "dtype", ")" ]
Get a Panel that is the current data in view. It is not safe to persist these objects because internal data might change
[ "Get", "a", "Panel", "that", "is", "the", "current", "data", "in", "view", ".", "It", "is", "not", "safe", "to", "persist", "these", "objects", "because", "internal", "data", "might", "change" ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/data.py#L294-L303
train
quantopian/zipline
zipline/finance/order.py
Order.check_triggers
def check_triggers(self, price, dt): """ Update internal state based on price triggers and the trade event's price. """ stop_reached, limit_reached, sl_stop_reached = \ self.check_order_triggers(price) if (stop_reached, limit_reached) \ != (self.stop_reached, self.limit_reached): self.dt = dt self.stop_reached = stop_reached self.limit_reached = limit_reached if sl_stop_reached: # Change the STOP LIMIT order into a LIMIT order self.stop = None
python
def check_triggers(self, price, dt): """ Update internal state based on price triggers and the trade event's price. """ stop_reached, limit_reached, sl_stop_reached = \ self.check_order_triggers(price) if (stop_reached, limit_reached) \ != (self.stop_reached, self.limit_reached): self.dt = dt self.stop_reached = stop_reached self.limit_reached = limit_reached if sl_stop_reached: # Change the STOP LIMIT order into a LIMIT order self.stop = None
[ "def", "check_triggers", "(", "self", ",", "price", ",", "dt", ")", ":", "stop_reached", ",", "limit_reached", ",", "sl_stop_reached", "=", "self", ".", "check_order_triggers", "(", "price", ")", "if", "(", "stop_reached", ",", "limit_reached", ")", "!=", "(", "self", ".", "stop_reached", ",", "self", ".", "limit_reached", ")", ":", "self", ".", "dt", "=", "dt", "self", ".", "stop_reached", "=", "stop_reached", "self", ".", "limit_reached", "=", "limit_reached", "if", "sl_stop_reached", ":", "# Change the STOP LIMIT order into a LIMIT order", "self", ".", "stop", "=", "None" ]
Update internal state based on price triggers and the trade event's price.
[ "Update", "internal", "state", "based", "on", "price", "triggers", "and", "the", "trade", "event", "s", "price", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/finance/order.py#L108-L122
train
quantopian/zipline
zipline/finance/order.py
Order.check_order_triggers
def check_order_triggers(self, current_price): """ Given an order and a trade event, return a tuple of (stop_reached, limit_reached). For market orders, will return (False, False). For stop orders, limit_reached will always be False. For limit orders, stop_reached will always be False. For stop limit orders a Boolean is returned to flag that the stop has been reached. Orders that have been triggered already (price targets reached), the order's current values are returned. """ if self.triggered: return (self.stop_reached, self.limit_reached, False) stop_reached = False limit_reached = False sl_stop_reached = False order_type = 0 if self.amount > 0: order_type |= BUY else: order_type |= SELL if self.stop is not None: order_type |= STOP if self.limit is not None: order_type |= LIMIT if order_type == BUY | STOP | LIMIT: if current_price >= self.stop: sl_stop_reached = True if current_price <= self.limit: limit_reached = True elif order_type == SELL | STOP | LIMIT: if current_price <= self.stop: sl_stop_reached = True if current_price >= self.limit: limit_reached = True elif order_type == BUY | STOP: if current_price >= self.stop: stop_reached = True elif order_type == SELL | STOP: if current_price <= self.stop: stop_reached = True elif order_type == BUY | LIMIT: if current_price <= self.limit: limit_reached = True elif order_type == SELL | LIMIT: # This is a SELL LIMIT order if current_price >= self.limit: limit_reached = True return (stop_reached, limit_reached, sl_stop_reached)
python
def check_order_triggers(self, current_price): """ Given an order and a trade event, return a tuple of (stop_reached, limit_reached). For market orders, will return (False, False). For stop orders, limit_reached will always be False. For limit orders, stop_reached will always be False. For stop limit orders a Boolean is returned to flag that the stop has been reached. Orders that have been triggered already (price targets reached), the order's current values are returned. """ if self.triggered: return (self.stop_reached, self.limit_reached, False) stop_reached = False limit_reached = False sl_stop_reached = False order_type = 0 if self.amount > 0: order_type |= BUY else: order_type |= SELL if self.stop is not None: order_type |= STOP if self.limit is not None: order_type |= LIMIT if order_type == BUY | STOP | LIMIT: if current_price >= self.stop: sl_stop_reached = True if current_price <= self.limit: limit_reached = True elif order_type == SELL | STOP | LIMIT: if current_price <= self.stop: sl_stop_reached = True if current_price >= self.limit: limit_reached = True elif order_type == BUY | STOP: if current_price >= self.stop: stop_reached = True elif order_type == SELL | STOP: if current_price <= self.stop: stop_reached = True elif order_type == BUY | LIMIT: if current_price <= self.limit: limit_reached = True elif order_type == SELL | LIMIT: # This is a SELL LIMIT order if current_price >= self.limit: limit_reached = True return (stop_reached, limit_reached, sl_stop_reached)
[ "def", "check_order_triggers", "(", "self", ",", "current_price", ")", ":", "if", "self", ".", "triggered", ":", "return", "(", "self", ".", "stop_reached", ",", "self", ".", "limit_reached", ",", "False", ")", "stop_reached", "=", "False", "limit_reached", "=", "False", "sl_stop_reached", "=", "False", "order_type", "=", "0", "if", "self", ".", "amount", ">", "0", ":", "order_type", "|=", "BUY", "else", ":", "order_type", "|=", "SELL", "if", "self", ".", "stop", "is", "not", "None", ":", "order_type", "|=", "STOP", "if", "self", ".", "limit", "is", "not", "None", ":", "order_type", "|=", "LIMIT", "if", "order_type", "==", "BUY", "|", "STOP", "|", "LIMIT", ":", "if", "current_price", ">=", "self", ".", "stop", ":", "sl_stop_reached", "=", "True", "if", "current_price", "<=", "self", ".", "limit", ":", "limit_reached", "=", "True", "elif", "order_type", "==", "SELL", "|", "STOP", "|", "LIMIT", ":", "if", "current_price", "<=", "self", ".", "stop", ":", "sl_stop_reached", "=", "True", "if", "current_price", ">=", "self", ".", "limit", ":", "limit_reached", "=", "True", "elif", "order_type", "==", "BUY", "|", "STOP", ":", "if", "current_price", ">=", "self", ".", "stop", ":", "stop_reached", "=", "True", "elif", "order_type", "==", "SELL", "|", "STOP", ":", "if", "current_price", "<=", "self", ".", "stop", ":", "stop_reached", "=", "True", "elif", "order_type", "==", "BUY", "|", "LIMIT", ":", "if", "current_price", "<=", "self", ".", "limit", ":", "limit_reached", "=", "True", "elif", "order_type", "==", "SELL", "|", "LIMIT", ":", "# This is a SELL LIMIT order", "if", "current_price", ">=", "self", ".", "limit", ":", "limit_reached", "=", "True", "return", "(", "stop_reached", ",", "limit_reached", ",", "sl_stop_reached", ")" ]
Given an order and a trade event, return a tuple of (stop_reached, limit_reached). For market orders, will return (False, False). For stop orders, limit_reached will always be False. For limit orders, stop_reached will always be False. For stop limit orders a Boolean is returned to flag that the stop has been reached. Orders that have been triggered already (price targets reached), the order's current values are returned.
[ "Given", "an", "order", "and", "a", "trade", "event", "return", "a", "tuple", "of", "(", "stop_reached", "limit_reached", ")", ".", "For", "market", "orders", "will", "return", "(", "False", "False", ")", ".", "For", "stop", "orders", "limit_reached", "will", "always", "be", "False", ".", "For", "limit", "orders", "stop_reached", "will", "always", "be", "False", ".", "For", "stop", "limit", "orders", "a", "Boolean", "is", "returned", "to", "flag", "that", "the", "stop", "has", "been", "reached", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/finance/order.py#L124-L181
train
quantopian/zipline
zipline/finance/order.py
Order.triggered
def triggered(self): """ For a market order, True. For a stop order, True IFF stop_reached. For a limit order, True IFF limit_reached. """ if self.stop is not None and not self.stop_reached: return False if self.limit is not None and not self.limit_reached: return False return True
python
def triggered(self): """ For a market order, True. For a stop order, True IFF stop_reached. For a limit order, True IFF limit_reached. """ if self.stop is not None and not self.stop_reached: return False if self.limit is not None and not self.limit_reached: return False return True
[ "def", "triggered", "(", "self", ")", ":", "if", "self", ".", "stop", "is", "not", "None", "and", "not", "self", ".", "stop_reached", ":", "return", "False", "if", "self", ".", "limit", "is", "not", "None", "and", "not", "self", ".", "limit_reached", ":", "return", "False", "return", "True" ]
For a market order, True. For a stop order, True IFF stop_reached. For a limit order, True IFF limit_reached.
[ "For", "a", "market", "order", "True", ".", "For", "a", "stop", "order", "True", "IFF", "stop_reached", ".", "For", "a", "limit", "order", "True", "IFF", "limit_reached", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/finance/order.py#L230-L242
train
quantopian/zipline
zipline/__init__.py
setup
def setup(self, np=np, numpy_version=numpy_version, StrictVersion=StrictVersion, new_pandas=new_pandas): """Lives in zipline.__init__ for doctests.""" if numpy_version >= StrictVersion('1.14'): self.old_opts = np.get_printoptions() np.set_printoptions(legacy='1.13') else: self.old_opts = None if new_pandas: self.old_err = np.geterr() # old pandas has numpy compat that sets this np.seterr(all='ignore') else: self.old_err = None
python
def setup(self, np=np, numpy_version=numpy_version, StrictVersion=StrictVersion, new_pandas=new_pandas): """Lives in zipline.__init__ for doctests.""" if numpy_version >= StrictVersion('1.14'): self.old_opts = np.get_printoptions() np.set_printoptions(legacy='1.13') else: self.old_opts = None if new_pandas: self.old_err = np.geterr() # old pandas has numpy compat that sets this np.seterr(all='ignore') else: self.old_err = None
[ "def", "setup", "(", "self", ",", "np", "=", "np", ",", "numpy_version", "=", "numpy_version", ",", "StrictVersion", "=", "StrictVersion", ",", "new_pandas", "=", "new_pandas", ")", ":", "if", "numpy_version", ">=", "StrictVersion", "(", "'1.14'", ")", ":", "self", ".", "old_opts", "=", "np", ".", "get_printoptions", "(", ")", "np", ".", "set_printoptions", "(", "legacy", "=", "'1.13'", ")", "else", ":", "self", ".", "old_opts", "=", "None", "if", "new_pandas", ":", "self", ".", "old_err", "=", "np", ".", "geterr", "(", ")", "# old pandas has numpy compat that sets this", "np", ".", "seterr", "(", "all", "=", "'ignore'", ")", "else", ":", "self", ".", "old_err", "=", "None" ]
Lives in zipline.__init__ for doctests.
[ "Lives", "in", "zipline", ".", "__init__", "for", "doctests", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/__init__.py#L93-L111
train
quantopian/zipline
zipline/__init__.py
teardown
def teardown(self, np=np): """Lives in zipline.__init__ for doctests.""" if self.old_err is not None: np.seterr(**self.old_err) if self.old_opts is not None: np.set_printoptions(**self.old_opts)
python
def teardown(self, np=np): """Lives in zipline.__init__ for doctests.""" if self.old_err is not None: np.seterr(**self.old_err) if self.old_opts is not None: np.set_printoptions(**self.old_opts)
[ "def", "teardown", "(", "self", ",", "np", "=", "np", ")", ":", "if", "self", ".", "old_err", "is", "not", "None", ":", "np", ".", "seterr", "(", "*", "*", "self", ".", "old_err", ")", "if", "self", ".", "old_opts", "is", "not", "None", ":", "np", ".", "set_printoptions", "(", "*", "*", "self", ".", "old_opts", ")" ]
Lives in zipline.__init__ for doctests.
[ "Lives", "in", "zipline", ".", "__init__", "for", "doctests", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/__init__.py#L114-L121
train
quantopian/zipline
zipline/gens/utils.py
hash_args
def hash_args(*args, **kwargs): """Define a unique string for any set of representable args.""" arg_string = '_'.join([str(arg) for arg in args]) kwarg_string = '_'.join([str(key) + '=' + str(value) for key, value in iteritems(kwargs)]) combined = ':'.join([arg_string, kwarg_string]) hasher = md5() hasher.update(b(combined)) return hasher.hexdigest()
python
def hash_args(*args, **kwargs): """Define a unique string for any set of representable args.""" arg_string = '_'.join([str(arg) for arg in args]) kwarg_string = '_'.join([str(key) + '=' + str(value) for key, value in iteritems(kwargs)]) combined = ':'.join([arg_string, kwarg_string]) hasher = md5() hasher.update(b(combined)) return hasher.hexdigest()
[ "def", "hash_args", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "arg_string", "=", "'_'", ".", "join", "(", "[", "str", "(", "arg", ")", "for", "arg", "in", "args", "]", ")", "kwarg_string", "=", "'_'", ".", "join", "(", "[", "str", "(", "key", ")", "+", "'='", "+", "str", "(", "value", ")", "for", "key", ",", "value", "in", "iteritems", "(", "kwargs", ")", "]", ")", "combined", "=", "':'", ".", "join", "(", "[", "arg_string", ",", "kwarg_string", "]", ")", "hasher", "=", "md5", "(", ")", "hasher", ".", "update", "(", "b", "(", "combined", ")", ")", "return", "hasher", ".", "hexdigest", "(", ")" ]
Define a unique string for any set of representable args.
[ "Define", "a", "unique", "string", "for", "any", "set", "of", "representable", "args", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/gens/utils.py#L27-L36
train
quantopian/zipline
zipline/gens/utils.py
assert_datasource_protocol
def assert_datasource_protocol(event): """Assert that an event meets the protocol for datasource outputs.""" assert event.type in DATASOURCE_TYPE # Done packets have no dt. if not event.type == DATASOURCE_TYPE.DONE: assert isinstance(event.dt, datetime) assert event.dt.tzinfo == pytz.utc
python
def assert_datasource_protocol(event): """Assert that an event meets the protocol for datasource outputs.""" assert event.type in DATASOURCE_TYPE # Done packets have no dt. if not event.type == DATASOURCE_TYPE.DONE: assert isinstance(event.dt, datetime) assert event.dt.tzinfo == pytz.utc
[ "def", "assert_datasource_protocol", "(", "event", ")", ":", "assert", "event", ".", "type", "in", "DATASOURCE_TYPE", "# Done packets have no dt.", "if", "not", "event", ".", "type", "==", "DATASOURCE_TYPE", ".", "DONE", ":", "assert", "isinstance", "(", "event", ".", "dt", ",", "datetime", ")", "assert", "event", ".", "dt", ".", "tzinfo", "==", "pytz", ".", "utc" ]
Assert that an event meets the protocol for datasource outputs.
[ "Assert", "that", "an", "event", "meets", "the", "protocol", "for", "datasource", "outputs", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/gens/utils.py#L39-L47
train
quantopian/zipline
zipline/gens/utils.py
assert_trade_protocol
def assert_trade_protocol(event): """Assert that an event meets the protocol for datasource TRADE outputs.""" assert_datasource_protocol(event) assert event.type == DATASOURCE_TYPE.TRADE assert isinstance(event.price, numbers.Real) assert isinstance(event.volume, numbers.Integral) assert isinstance(event.dt, datetime)
python
def assert_trade_protocol(event): """Assert that an event meets the protocol for datasource TRADE outputs.""" assert_datasource_protocol(event) assert event.type == DATASOURCE_TYPE.TRADE assert isinstance(event.price, numbers.Real) assert isinstance(event.volume, numbers.Integral) assert isinstance(event.dt, datetime)
[ "def", "assert_trade_protocol", "(", "event", ")", ":", "assert_datasource_protocol", "(", "event", ")", "assert", "event", ".", "type", "==", "DATASOURCE_TYPE", ".", "TRADE", "assert", "isinstance", "(", "event", ".", "price", ",", "numbers", ".", "Real", ")", "assert", "isinstance", "(", "event", ".", "volume", ",", "numbers", ".", "Integral", ")", "assert", "isinstance", "(", "event", ".", "dt", ",", "datetime", ")" ]
Assert that an event meets the protocol for datasource TRADE outputs.
[ "Assert", "that", "an", "event", "meets", "the", "protocol", "for", "datasource", "TRADE", "outputs", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/gens/utils.py#L50-L57
train
quantopian/zipline
zipline/gens/composites.py
date_sorted_sources
def date_sorted_sources(*sources): """ Takes an iterable of sources, generating namestrings and piping their output into date_sort. """ sorted_stream = heapq.merge(*(_decorate_source(s) for s in sources)) # Strip out key decoration for _, message in sorted_stream: yield message
python
def date_sorted_sources(*sources): """ Takes an iterable of sources, generating namestrings and piping their output into date_sort. """ sorted_stream = heapq.merge(*(_decorate_source(s) for s in sources)) # Strip out key decoration for _, message in sorted_stream: yield message
[ "def", "date_sorted_sources", "(", "*", "sources", ")", ":", "sorted_stream", "=", "heapq", ".", "merge", "(", "*", "(", "_decorate_source", "(", "s", ")", "for", "s", "in", "sources", ")", ")", "# Strip out key decoration", "for", "_", ",", "message", "in", "sorted_stream", ":", "yield", "message" ]
Takes an iterable of sources, generating namestrings and piping their output into date_sort.
[ "Takes", "an", "iterable", "of", "sources", "generating", "namestrings", "and", "piping", "their", "output", "into", "date_sort", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/gens/composites.py#L24-L33
train
quantopian/zipline
zipline/utils/factory.py
create_daily_trade_source
def create_daily_trade_source(sids, sim_params, asset_finder, trading_calendar): """ creates trade_count trades for each sid in sids list. first trade will be on sim_params.start_session, and daily thereafter for each sid. Thus, two sids should result in two trades per day. """ return create_trade_source( sids, timedelta(days=1), sim_params, asset_finder, trading_calendar=trading_calendar, )
python
def create_daily_trade_source(sids, sim_params, asset_finder, trading_calendar): """ creates trade_count trades for each sid in sids list. first trade will be on sim_params.start_session, and daily thereafter for each sid. Thus, two sids should result in two trades per day. """ return create_trade_source( sids, timedelta(days=1), sim_params, asset_finder, trading_calendar=trading_calendar, )
[ "def", "create_daily_trade_source", "(", "sids", ",", "sim_params", ",", "asset_finder", ",", "trading_calendar", ")", ":", "return", "create_trade_source", "(", "sids", ",", "timedelta", "(", "days", "=", "1", ")", ",", "sim_params", ",", "asset_finder", ",", "trading_calendar", "=", "trading_calendar", ",", ")" ]
creates trade_count trades for each sid in sids list. first trade will be on sim_params.start_session, and daily thereafter for each sid. Thus, two sids should result in two trades per day.
[ "creates", "trade_count", "trades", "for", "each", "sid", "in", "sids", "list", ".", "first", "trade", "will", "be", "on", "sim_params", ".", "start_session", "and", "daily", "thereafter", "for", "each", "sid", ".", "Thus", "two", "sids", "should", "result", "in", "two", "trades", "per", "day", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/factory.py#L115-L131
train
quantopian/zipline
zipline/data/bundles/quandl.py
load_data_table
def load_data_table(file, index_col, show_progress=False): """ Load data table from zip file provided by Quandl. """ with ZipFile(file) as zip_file: file_names = zip_file.namelist() assert len(file_names) == 1, "Expected a single file from Quandl." wiki_prices = file_names.pop() with zip_file.open(wiki_prices) as table_file: if show_progress: log.info('Parsing raw data.') data_table = pd.read_csv( table_file, parse_dates=['date'], index_col=index_col, usecols=[ 'ticker', 'date', 'open', 'high', 'low', 'close', 'volume', 'ex-dividend', 'split_ratio', ], ) data_table.rename( columns={ 'ticker': 'symbol', 'ex-dividend': 'ex_dividend', }, inplace=True, copy=False, ) return data_table
python
def load_data_table(file, index_col, show_progress=False): """ Load data table from zip file provided by Quandl. """ with ZipFile(file) as zip_file: file_names = zip_file.namelist() assert len(file_names) == 1, "Expected a single file from Quandl." wiki_prices = file_names.pop() with zip_file.open(wiki_prices) as table_file: if show_progress: log.info('Parsing raw data.') data_table = pd.read_csv( table_file, parse_dates=['date'], index_col=index_col, usecols=[ 'ticker', 'date', 'open', 'high', 'low', 'close', 'volume', 'ex-dividend', 'split_ratio', ], ) data_table.rename( columns={ 'ticker': 'symbol', 'ex-dividend': 'ex_dividend', }, inplace=True, copy=False, ) return data_table
[ "def", "load_data_table", "(", "file", ",", "index_col", ",", "show_progress", "=", "False", ")", ":", "with", "ZipFile", "(", "file", ")", "as", "zip_file", ":", "file_names", "=", "zip_file", ".", "namelist", "(", ")", "assert", "len", "(", "file_names", ")", "==", "1", ",", "\"Expected a single file from Quandl.\"", "wiki_prices", "=", "file_names", ".", "pop", "(", ")", "with", "zip_file", ".", "open", "(", "wiki_prices", ")", "as", "table_file", ":", "if", "show_progress", ":", "log", ".", "info", "(", "'Parsing raw data.'", ")", "data_table", "=", "pd", ".", "read_csv", "(", "table_file", ",", "parse_dates", "=", "[", "'date'", "]", ",", "index_col", "=", "index_col", ",", "usecols", "=", "[", "'ticker'", ",", "'date'", ",", "'open'", ",", "'high'", ",", "'low'", ",", "'close'", ",", "'volume'", ",", "'ex-dividend'", ",", "'split_ratio'", ",", "]", ",", ")", "data_table", ".", "rename", "(", "columns", "=", "{", "'ticker'", ":", "'symbol'", ",", "'ex-dividend'", ":", "'ex_dividend'", ",", "}", ",", "inplace", "=", "True", ",", "copy", "=", "False", ",", ")", "return", "data_table" ]
Load data table from zip file provided by Quandl.
[ "Load", "data", "table", "from", "zip", "file", "provided", "by", "Quandl", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/bundles/quandl.py#L38-L75
train
quantopian/zipline
zipline/data/bundles/quandl.py
fetch_data_table
def fetch_data_table(api_key, show_progress, retries): """ Fetch WIKI Prices data table from Quandl """ for _ in range(retries): try: if show_progress: log.info('Downloading WIKI metadata.') metadata = pd.read_csv( format_metadata_url(api_key) ) # Extract link from metadata and download zip file. table_url = metadata.loc[0, 'file.link'] if show_progress: raw_file = download_with_progress( table_url, chunk_size=ONE_MEGABYTE, label="Downloading WIKI Prices table from Quandl" ) else: raw_file = download_without_progress(table_url) return load_data_table( file=raw_file, index_col=None, show_progress=show_progress, ) except Exception: log.exception("Exception raised reading Quandl data. Retrying.") else: raise ValueError( "Failed to download Quandl data after %d attempts." % (retries) )
python
def fetch_data_table(api_key, show_progress, retries): """ Fetch WIKI Prices data table from Quandl """ for _ in range(retries): try: if show_progress: log.info('Downloading WIKI metadata.') metadata = pd.read_csv( format_metadata_url(api_key) ) # Extract link from metadata and download zip file. table_url = metadata.loc[0, 'file.link'] if show_progress: raw_file = download_with_progress( table_url, chunk_size=ONE_MEGABYTE, label="Downloading WIKI Prices table from Quandl" ) else: raw_file = download_without_progress(table_url) return load_data_table( file=raw_file, index_col=None, show_progress=show_progress, ) except Exception: log.exception("Exception raised reading Quandl data. Retrying.") else: raise ValueError( "Failed to download Quandl data after %d attempts." % (retries) )
[ "def", "fetch_data_table", "(", "api_key", ",", "show_progress", ",", "retries", ")", ":", "for", "_", "in", "range", "(", "retries", ")", ":", "try", ":", "if", "show_progress", ":", "log", ".", "info", "(", "'Downloading WIKI metadata.'", ")", "metadata", "=", "pd", ".", "read_csv", "(", "format_metadata_url", "(", "api_key", ")", ")", "# Extract link from metadata and download zip file.", "table_url", "=", "metadata", ".", "loc", "[", "0", ",", "'file.link'", "]", "if", "show_progress", ":", "raw_file", "=", "download_with_progress", "(", "table_url", ",", "chunk_size", "=", "ONE_MEGABYTE", ",", "label", "=", "\"Downloading WIKI Prices table from Quandl\"", ")", "else", ":", "raw_file", "=", "download_without_progress", "(", "table_url", ")", "return", "load_data_table", "(", "file", "=", "raw_file", ",", "index_col", "=", "None", ",", "show_progress", "=", "show_progress", ",", ")", "except", "Exception", ":", "log", ".", "exception", "(", "\"Exception raised reading Quandl data. Retrying.\"", ")", "else", ":", "raise", "ValueError", "(", "\"Failed to download Quandl data after %d attempts.\"", "%", "(", "retries", ")", ")" ]
Fetch WIKI Prices data table from Quandl
[ "Fetch", "WIKI", "Prices", "data", "table", "from", "Quandl" ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/bundles/quandl.py#L78-L114
train
quantopian/zipline
zipline/data/bundles/quandl.py
quandl_bundle
def quandl_bundle(environ, asset_db_writer, minute_bar_writer, daily_bar_writer, adjustment_writer, calendar, start_session, end_session, cache, show_progress, output_dir): """ quandl_bundle builds a daily dataset using Quandl's WIKI Prices dataset. For more information on Quandl's API and how to obtain an API key, please visit https://docs.quandl.com/docs#section-authentication """ api_key = environ.get('QUANDL_API_KEY') if api_key is None: raise ValueError( "Please set your QUANDL_API_KEY environment variable and retry." ) raw_data = fetch_data_table( api_key, show_progress, environ.get('QUANDL_DOWNLOAD_ATTEMPTS', 5) ) asset_metadata = gen_asset_metadata( raw_data[['symbol', 'date']], show_progress ) asset_db_writer.write(asset_metadata) symbol_map = asset_metadata.symbol sessions = calendar.sessions_in_range(start_session, end_session) raw_data.set_index(['date', 'symbol'], inplace=True) daily_bar_writer.write( parse_pricing_and_vol( raw_data, sessions, symbol_map ), show_progress=show_progress ) raw_data.reset_index(inplace=True) raw_data['symbol'] = raw_data['symbol'].astype('category') raw_data['sid'] = raw_data.symbol.cat.codes adjustment_writer.write( splits=parse_splits( raw_data[[ 'sid', 'date', 'split_ratio', ]].loc[raw_data.split_ratio != 1], show_progress=show_progress ), dividends=parse_dividends( raw_data[[ 'sid', 'date', 'ex_dividend', ]].loc[raw_data.ex_dividend != 0], show_progress=show_progress ) )
python
def quandl_bundle(environ, asset_db_writer, minute_bar_writer, daily_bar_writer, adjustment_writer, calendar, start_session, end_session, cache, show_progress, output_dir): """ quandl_bundle builds a daily dataset using Quandl's WIKI Prices dataset. For more information on Quandl's API and how to obtain an API key, please visit https://docs.quandl.com/docs#section-authentication """ api_key = environ.get('QUANDL_API_KEY') if api_key is None: raise ValueError( "Please set your QUANDL_API_KEY environment variable and retry." ) raw_data = fetch_data_table( api_key, show_progress, environ.get('QUANDL_DOWNLOAD_ATTEMPTS', 5) ) asset_metadata = gen_asset_metadata( raw_data[['symbol', 'date']], show_progress ) asset_db_writer.write(asset_metadata) symbol_map = asset_metadata.symbol sessions = calendar.sessions_in_range(start_session, end_session) raw_data.set_index(['date', 'symbol'], inplace=True) daily_bar_writer.write( parse_pricing_and_vol( raw_data, sessions, symbol_map ), show_progress=show_progress ) raw_data.reset_index(inplace=True) raw_data['symbol'] = raw_data['symbol'].astype('category') raw_data['sid'] = raw_data.symbol.cat.codes adjustment_writer.write( splits=parse_splits( raw_data[[ 'sid', 'date', 'split_ratio', ]].loc[raw_data.split_ratio != 1], show_progress=show_progress ), dividends=parse_dividends( raw_data[[ 'sid', 'date', 'ex_dividend', ]].loc[raw_data.ex_dividend != 0], show_progress=show_progress ) )
[ "def", "quandl_bundle", "(", "environ", ",", "asset_db_writer", ",", "minute_bar_writer", ",", "daily_bar_writer", ",", "adjustment_writer", ",", "calendar", ",", "start_session", ",", "end_session", ",", "cache", ",", "show_progress", ",", "output_dir", ")", ":", "api_key", "=", "environ", ".", "get", "(", "'QUANDL_API_KEY'", ")", "if", "api_key", "is", "None", ":", "raise", "ValueError", "(", "\"Please set your QUANDL_API_KEY environment variable and retry.\"", ")", "raw_data", "=", "fetch_data_table", "(", "api_key", ",", "show_progress", ",", "environ", ".", "get", "(", "'QUANDL_DOWNLOAD_ATTEMPTS'", ",", "5", ")", ")", "asset_metadata", "=", "gen_asset_metadata", "(", "raw_data", "[", "[", "'symbol'", ",", "'date'", "]", "]", ",", "show_progress", ")", "asset_db_writer", ".", "write", "(", "asset_metadata", ")", "symbol_map", "=", "asset_metadata", ".", "symbol", "sessions", "=", "calendar", ".", "sessions_in_range", "(", "start_session", ",", "end_session", ")", "raw_data", ".", "set_index", "(", "[", "'date'", ",", "'symbol'", "]", ",", "inplace", "=", "True", ")", "daily_bar_writer", ".", "write", "(", "parse_pricing_and_vol", "(", "raw_data", ",", "sessions", ",", "symbol_map", ")", ",", "show_progress", "=", "show_progress", ")", "raw_data", ".", "reset_index", "(", "inplace", "=", "True", ")", "raw_data", "[", "'symbol'", "]", "=", "raw_data", "[", "'symbol'", "]", ".", "astype", "(", "'category'", ")", "raw_data", "[", "'sid'", "]", "=", "raw_data", ".", "symbol", ".", "cat", ".", "codes", "adjustment_writer", ".", "write", "(", "splits", "=", "parse_splits", "(", "raw_data", "[", "[", "'sid'", ",", "'date'", ",", "'split_ratio'", ",", "]", "]", ".", "loc", "[", "raw_data", ".", "split_ratio", "!=", "1", "]", ",", "show_progress", "=", "show_progress", ")", ",", "dividends", "=", "parse_dividends", "(", "raw_data", "[", "[", "'sid'", ",", "'date'", ",", "'ex_dividend'", ",", "]", "]", ".", "loc", "[", "raw_data", ".", "ex_dividend", "!=", "0", "]", ",", "show_progress", "=", "show_progress", ")", ")" ]
quandl_bundle builds a daily dataset using Quandl's WIKI Prices dataset. For more information on Quandl's API and how to obtain an API key, please visit https://docs.quandl.com/docs#section-authentication
[ "quandl_bundle", "builds", "a", "daily", "dataset", "using", "Quandl", "s", "WIKI", "Prices", "dataset", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/bundles/quandl.py#L183-L250
train
quantopian/zipline
zipline/data/bundles/quandl.py
download_with_progress
def download_with_progress(url, chunk_size, **progress_kwargs): """ Download streaming data from a URL, printing progress information to the terminal. Parameters ---------- url : str A URL that can be understood by ``requests.get``. chunk_size : int Number of bytes to read at a time from requests. **progress_kwargs Forwarded to click.progressbar. Returns ------- data : BytesIO A BytesIO containing the downloaded data. """ resp = requests.get(url, stream=True) resp.raise_for_status() total_size = int(resp.headers['content-length']) data = BytesIO() with progressbar(length=total_size, **progress_kwargs) as pbar: for chunk in resp.iter_content(chunk_size=chunk_size): data.write(chunk) pbar.update(len(chunk)) data.seek(0) return data
python
def download_with_progress(url, chunk_size, **progress_kwargs): """ Download streaming data from a URL, printing progress information to the terminal. Parameters ---------- url : str A URL that can be understood by ``requests.get``. chunk_size : int Number of bytes to read at a time from requests. **progress_kwargs Forwarded to click.progressbar. Returns ------- data : BytesIO A BytesIO containing the downloaded data. """ resp = requests.get(url, stream=True) resp.raise_for_status() total_size = int(resp.headers['content-length']) data = BytesIO() with progressbar(length=total_size, **progress_kwargs) as pbar: for chunk in resp.iter_content(chunk_size=chunk_size): data.write(chunk) pbar.update(len(chunk)) data.seek(0) return data
[ "def", "download_with_progress", "(", "url", ",", "chunk_size", ",", "*", "*", "progress_kwargs", ")", ":", "resp", "=", "requests", ".", "get", "(", "url", ",", "stream", "=", "True", ")", "resp", ".", "raise_for_status", "(", ")", "total_size", "=", "int", "(", "resp", ".", "headers", "[", "'content-length'", "]", ")", "data", "=", "BytesIO", "(", ")", "with", "progressbar", "(", "length", "=", "total_size", ",", "*", "*", "progress_kwargs", ")", "as", "pbar", ":", "for", "chunk", "in", "resp", ".", "iter_content", "(", "chunk_size", "=", "chunk_size", ")", ":", "data", ".", "write", "(", "chunk", ")", "pbar", ".", "update", "(", "len", "(", "chunk", ")", ")", "data", ".", "seek", "(", "0", ")", "return", "data" ]
Download streaming data from a URL, printing progress information to the terminal. Parameters ---------- url : str A URL that can be understood by ``requests.get``. chunk_size : int Number of bytes to read at a time from requests. **progress_kwargs Forwarded to click.progressbar. Returns ------- data : BytesIO A BytesIO containing the downloaded data.
[ "Download", "streaming", "data", "from", "a", "URL", "printing", "progress", "information", "to", "the", "terminal", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/bundles/quandl.py#L253-L283
train
quantopian/zipline
zipline/data/bundles/quandl.py
download_without_progress
def download_without_progress(url): """ Download data from a URL, returning a BytesIO containing the loaded data. Parameters ---------- url : str A URL that can be understood by ``requests.get``. Returns ------- data : BytesIO A BytesIO containing the downloaded data. """ resp = requests.get(url) resp.raise_for_status() return BytesIO(resp.content)
python
def download_without_progress(url): """ Download data from a URL, returning a BytesIO containing the loaded data. Parameters ---------- url : str A URL that can be understood by ``requests.get``. Returns ------- data : BytesIO A BytesIO containing the downloaded data. """ resp = requests.get(url) resp.raise_for_status() return BytesIO(resp.content)
[ "def", "download_without_progress", "(", "url", ")", ":", "resp", "=", "requests", ".", "get", "(", "url", ")", "resp", ".", "raise_for_status", "(", ")", "return", "BytesIO", "(", "resp", ".", "content", ")" ]
Download data from a URL, returning a BytesIO containing the loaded data. Parameters ---------- url : str A URL that can be understood by ``requests.get``. Returns ------- data : BytesIO A BytesIO containing the downloaded data.
[ "Download", "data", "from", "a", "URL", "returning", "a", "BytesIO", "containing", "the", "loaded", "data", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/bundles/quandl.py#L286-L302
train
quantopian/zipline
zipline/data/resample.py
minute_frame_to_session_frame
def minute_frame_to_session_frame(minute_frame, calendar): """ Resample a DataFrame with minute data into the frame expected by a BcolzDailyBarWriter. Parameters ---------- minute_frame : pd.DataFrame A DataFrame with the columns `open`, `high`, `low`, `close`, `volume`, and `dt` (minute dts) calendar : trading_calendars.trading_calendar.TradingCalendar A TradingCalendar on which session labels to resample from minute to session. Return ------ session_frame : pd.DataFrame A DataFrame with the columns `open`, `high`, `low`, `close`, `volume`, and `day` (datetime-like). """ how = OrderedDict((c, _MINUTE_TO_SESSION_OHCLV_HOW[c]) for c in minute_frame.columns) labels = calendar.minute_index_to_session_labels(minute_frame.index) return minute_frame.groupby(labels).agg(how)
python
def minute_frame_to_session_frame(minute_frame, calendar): """ Resample a DataFrame with minute data into the frame expected by a BcolzDailyBarWriter. Parameters ---------- minute_frame : pd.DataFrame A DataFrame with the columns `open`, `high`, `low`, `close`, `volume`, and `dt` (minute dts) calendar : trading_calendars.trading_calendar.TradingCalendar A TradingCalendar on which session labels to resample from minute to session. Return ------ session_frame : pd.DataFrame A DataFrame with the columns `open`, `high`, `low`, `close`, `volume`, and `day` (datetime-like). """ how = OrderedDict((c, _MINUTE_TO_SESSION_OHCLV_HOW[c]) for c in minute_frame.columns) labels = calendar.minute_index_to_session_labels(minute_frame.index) return minute_frame.groupby(labels).agg(how)
[ "def", "minute_frame_to_session_frame", "(", "minute_frame", ",", "calendar", ")", ":", "how", "=", "OrderedDict", "(", "(", "c", ",", "_MINUTE_TO_SESSION_OHCLV_HOW", "[", "c", "]", ")", "for", "c", "in", "minute_frame", ".", "columns", ")", "labels", "=", "calendar", ".", "minute_index_to_session_labels", "(", "minute_frame", ".", "index", ")", "return", "minute_frame", ".", "groupby", "(", "labels", ")", ".", "agg", "(", "how", ")" ]
Resample a DataFrame with minute data into the frame expected by a BcolzDailyBarWriter. Parameters ---------- minute_frame : pd.DataFrame A DataFrame with the columns `open`, `high`, `low`, `close`, `volume`, and `dt` (minute dts) calendar : trading_calendars.trading_calendar.TradingCalendar A TradingCalendar on which session labels to resample from minute to session. Return ------ session_frame : pd.DataFrame A DataFrame with the columns `open`, `high`, `low`, `close`, `volume`, and `day` (datetime-like).
[ "Resample", "a", "DataFrame", "with", "minute", "data", "into", "the", "frame", "expected", "by", "a", "BcolzDailyBarWriter", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/resample.py#L42-L66
train
quantopian/zipline
zipline/data/resample.py
minute_to_session
def minute_to_session(column, close_locs, data, out): """ Resample an array with minute data into an array with session data. This function assumes that the minute data is the exact length of all minutes in the sessions in the output. Parameters ---------- column : str The `open`, `high`, `low`, `close`, or `volume` column. close_locs : array[intp] The locations in `data` which are the market close minutes. data : array[float64|uint32] The minute data to be sampled into session data. The first value should align with the market open of the first session, containing values for all minutes for all sessions. With the last value being the market close of the last session. out : array[float64|uint32] The output array into which to write the sampled sessions. """ if column == 'open': _minute_to_session_open(close_locs, data, out) elif column == 'high': _minute_to_session_high(close_locs, data, out) elif column == 'low': _minute_to_session_low(close_locs, data, out) elif column == 'close': _minute_to_session_close(close_locs, data, out) elif column == 'volume': _minute_to_session_volume(close_locs, data, out) return out
python
def minute_to_session(column, close_locs, data, out): """ Resample an array with minute data into an array with session data. This function assumes that the minute data is the exact length of all minutes in the sessions in the output. Parameters ---------- column : str The `open`, `high`, `low`, `close`, or `volume` column. close_locs : array[intp] The locations in `data` which are the market close minutes. data : array[float64|uint32] The minute data to be sampled into session data. The first value should align with the market open of the first session, containing values for all minutes for all sessions. With the last value being the market close of the last session. out : array[float64|uint32] The output array into which to write the sampled sessions. """ if column == 'open': _minute_to_session_open(close_locs, data, out) elif column == 'high': _minute_to_session_high(close_locs, data, out) elif column == 'low': _minute_to_session_low(close_locs, data, out) elif column == 'close': _minute_to_session_close(close_locs, data, out) elif column == 'volume': _minute_to_session_volume(close_locs, data, out) return out
[ "def", "minute_to_session", "(", "column", ",", "close_locs", ",", "data", ",", "out", ")", ":", "if", "column", "==", "'open'", ":", "_minute_to_session_open", "(", "close_locs", ",", "data", ",", "out", ")", "elif", "column", "==", "'high'", ":", "_minute_to_session_high", "(", "close_locs", ",", "data", ",", "out", ")", "elif", "column", "==", "'low'", ":", "_minute_to_session_low", "(", "close_locs", ",", "data", ",", "out", ")", "elif", "column", "==", "'close'", ":", "_minute_to_session_close", "(", "close_locs", ",", "data", ",", "out", ")", "elif", "column", "==", "'volume'", ":", "_minute_to_session_volume", "(", "close_locs", ",", "data", ",", "out", ")", "return", "out" ]
Resample an array with minute data into an array with session data. This function assumes that the minute data is the exact length of all minutes in the sessions in the output. Parameters ---------- column : str The `open`, `high`, `low`, `close`, or `volume` column. close_locs : array[intp] The locations in `data` which are the market close minutes. data : array[float64|uint32] The minute data to be sampled into session data. The first value should align with the market open of the first session, containing values for all minutes for all sessions. With the last value being the market close of the last session. out : array[float64|uint32] The output array into which to write the sampled sessions.
[ "Resample", "an", "array", "with", "minute", "data", "into", "an", "array", "with", "session", "data", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/resample.py#L69-L100
train
quantopian/zipline
zipline/data/resample.py
DailyHistoryAggregator.opens
def opens(self, assets, dt): """ The open field's aggregation returns the first value that occurs for the day, if there has been no data on or before the `dt` the open is `nan`. Once the first non-nan open is seen, that value remains constant per asset for the remainder of the day. Returns ------- np.array with dtype=float64, in order of assets parameter. """ market_open, prev_dt, dt_value, entries = self._prelude(dt, 'open') opens = [] session_label = self._trading_calendar.minute_to_session_label(dt) for asset in assets: if not asset.is_alive_for_session(session_label): opens.append(np.NaN) continue if prev_dt is None: val = self._minute_reader.get_value(asset, dt, 'open') entries[asset] = (dt_value, val) opens.append(val) continue else: try: last_visited_dt, first_open = entries[asset] if last_visited_dt == dt_value: opens.append(first_open) continue elif not pd.isnull(first_open): opens.append(first_open) entries[asset] = (dt_value, first_open) continue else: after_last = pd.Timestamp( last_visited_dt + self._one_min, tz='UTC') window = self._minute_reader.load_raw_arrays( ['open'], after_last, dt, [asset], )[0] nonnan = window[~pd.isnull(window)] if len(nonnan): val = nonnan[0] else: val = np.nan entries[asset] = (dt_value, val) opens.append(val) continue except KeyError: window = self._minute_reader.load_raw_arrays( ['open'], market_open, dt, [asset], )[0] nonnan = window[~pd.isnull(window)] if len(nonnan): val = nonnan[0] else: val = np.nan entries[asset] = (dt_value, val) opens.append(val) continue return np.array(opens)
python
def opens(self, assets, dt): """ The open field's aggregation returns the first value that occurs for the day, if there has been no data on or before the `dt` the open is `nan`. Once the first non-nan open is seen, that value remains constant per asset for the remainder of the day. Returns ------- np.array with dtype=float64, in order of assets parameter. """ market_open, prev_dt, dt_value, entries = self._prelude(dt, 'open') opens = [] session_label = self._trading_calendar.minute_to_session_label(dt) for asset in assets: if not asset.is_alive_for_session(session_label): opens.append(np.NaN) continue if prev_dt is None: val = self._minute_reader.get_value(asset, dt, 'open') entries[asset] = (dt_value, val) opens.append(val) continue else: try: last_visited_dt, first_open = entries[asset] if last_visited_dt == dt_value: opens.append(first_open) continue elif not pd.isnull(first_open): opens.append(first_open) entries[asset] = (dt_value, first_open) continue else: after_last = pd.Timestamp( last_visited_dt + self._one_min, tz='UTC') window = self._minute_reader.load_raw_arrays( ['open'], after_last, dt, [asset], )[0] nonnan = window[~pd.isnull(window)] if len(nonnan): val = nonnan[0] else: val = np.nan entries[asset] = (dt_value, val) opens.append(val) continue except KeyError: window = self._minute_reader.load_raw_arrays( ['open'], market_open, dt, [asset], )[0] nonnan = window[~pd.isnull(window)] if len(nonnan): val = nonnan[0] else: val = np.nan entries[asset] = (dt_value, val) opens.append(val) continue return np.array(opens)
[ "def", "opens", "(", "self", ",", "assets", ",", "dt", ")", ":", "market_open", ",", "prev_dt", ",", "dt_value", ",", "entries", "=", "self", ".", "_prelude", "(", "dt", ",", "'open'", ")", "opens", "=", "[", "]", "session_label", "=", "self", ".", "_trading_calendar", ".", "minute_to_session_label", "(", "dt", ")", "for", "asset", "in", "assets", ":", "if", "not", "asset", ".", "is_alive_for_session", "(", "session_label", ")", ":", "opens", ".", "append", "(", "np", ".", "NaN", ")", "continue", "if", "prev_dt", "is", "None", ":", "val", "=", "self", ".", "_minute_reader", ".", "get_value", "(", "asset", ",", "dt", ",", "'open'", ")", "entries", "[", "asset", "]", "=", "(", "dt_value", ",", "val", ")", "opens", ".", "append", "(", "val", ")", "continue", "else", ":", "try", ":", "last_visited_dt", ",", "first_open", "=", "entries", "[", "asset", "]", "if", "last_visited_dt", "==", "dt_value", ":", "opens", ".", "append", "(", "first_open", ")", "continue", "elif", "not", "pd", ".", "isnull", "(", "first_open", ")", ":", "opens", ".", "append", "(", "first_open", ")", "entries", "[", "asset", "]", "=", "(", "dt_value", ",", "first_open", ")", "continue", "else", ":", "after_last", "=", "pd", ".", "Timestamp", "(", "last_visited_dt", "+", "self", ".", "_one_min", ",", "tz", "=", "'UTC'", ")", "window", "=", "self", ".", "_minute_reader", ".", "load_raw_arrays", "(", "[", "'open'", "]", ",", "after_last", ",", "dt", ",", "[", "asset", "]", ",", ")", "[", "0", "]", "nonnan", "=", "window", "[", "~", "pd", ".", "isnull", "(", "window", ")", "]", "if", "len", "(", "nonnan", ")", ":", "val", "=", "nonnan", "[", "0", "]", "else", ":", "val", "=", "np", ".", "nan", "entries", "[", "asset", "]", "=", "(", "dt_value", ",", "val", ")", "opens", ".", "append", "(", "val", ")", "continue", "except", "KeyError", ":", "window", "=", "self", ".", "_minute_reader", ".", "load_raw_arrays", "(", "[", "'open'", "]", ",", "market_open", ",", "dt", ",", "[", "asset", "]", ",", ")", "[", "0", "]", "nonnan", "=", "window", "[", "~", "pd", ".", "isnull", "(", "window", ")", "]", "if", "len", "(", "nonnan", ")", ":", "val", "=", "nonnan", "[", "0", "]", "else", ":", "val", "=", "np", ".", "nan", "entries", "[", "asset", "]", "=", "(", "dt_value", ",", "val", ")", "opens", ".", "append", "(", "val", ")", "continue", "return", "np", ".", "array", "(", "opens", ")" ]
The open field's aggregation returns the first value that occurs for the day, if there has been no data on or before the `dt` the open is `nan`. Once the first non-nan open is seen, that value remains constant per asset for the remainder of the day. Returns ------- np.array with dtype=float64, in order of assets parameter.
[ "The", "open", "field", "s", "aggregation", "returns", "the", "first", "value", "that", "occurs", "for", "the", "day", "if", "there", "has", "been", "no", "data", "on", "or", "before", "the", "dt", "the", "open", "is", "nan", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/resample.py#L167-L237
train
quantopian/zipline
zipline/data/resample.py
DailyHistoryAggregator.highs
def highs(self, assets, dt): """ The high field's aggregation returns the largest high seen between the market open and the current dt. If there has been no data on or before the `dt` the high is `nan`. Returns ------- np.array with dtype=float64, in order of assets parameter. """ market_open, prev_dt, dt_value, entries = self._prelude(dt, 'high') highs = [] session_label = self._trading_calendar.minute_to_session_label(dt) for asset in assets: if not asset.is_alive_for_session(session_label): highs.append(np.NaN) continue if prev_dt is None: val = self._minute_reader.get_value(asset, dt, 'high') entries[asset] = (dt_value, val) highs.append(val) continue else: try: last_visited_dt, last_max = entries[asset] if last_visited_dt == dt_value: highs.append(last_max) continue elif last_visited_dt == prev_dt: curr_val = self._minute_reader.get_value( asset, dt, 'high') if pd.isnull(curr_val): val = last_max elif pd.isnull(last_max): val = curr_val else: val = max(last_max, curr_val) entries[asset] = (dt_value, val) highs.append(val) continue else: after_last = pd.Timestamp( last_visited_dt + self._one_min, tz='UTC') window = self._minute_reader.load_raw_arrays( ['high'], after_last, dt, [asset], )[0].T val = np.nanmax(np.append(window, last_max)) entries[asset] = (dt_value, val) highs.append(val) continue except KeyError: window = self._minute_reader.load_raw_arrays( ['high'], market_open, dt, [asset], )[0].T val = np.nanmax(window) entries[asset] = (dt_value, val) highs.append(val) continue return np.array(highs)
python
def highs(self, assets, dt): """ The high field's aggregation returns the largest high seen between the market open and the current dt. If there has been no data on or before the `dt` the high is `nan`. Returns ------- np.array with dtype=float64, in order of assets parameter. """ market_open, prev_dt, dt_value, entries = self._prelude(dt, 'high') highs = [] session_label = self._trading_calendar.minute_to_session_label(dt) for asset in assets: if not asset.is_alive_for_session(session_label): highs.append(np.NaN) continue if prev_dt is None: val = self._minute_reader.get_value(asset, dt, 'high') entries[asset] = (dt_value, val) highs.append(val) continue else: try: last_visited_dt, last_max = entries[asset] if last_visited_dt == dt_value: highs.append(last_max) continue elif last_visited_dt == prev_dt: curr_val = self._minute_reader.get_value( asset, dt, 'high') if pd.isnull(curr_val): val = last_max elif pd.isnull(last_max): val = curr_val else: val = max(last_max, curr_val) entries[asset] = (dt_value, val) highs.append(val) continue else: after_last = pd.Timestamp( last_visited_dt + self._one_min, tz='UTC') window = self._minute_reader.load_raw_arrays( ['high'], after_last, dt, [asset], )[0].T val = np.nanmax(np.append(window, last_max)) entries[asset] = (dt_value, val) highs.append(val) continue except KeyError: window = self._minute_reader.load_raw_arrays( ['high'], market_open, dt, [asset], )[0].T val = np.nanmax(window) entries[asset] = (dt_value, val) highs.append(val) continue return np.array(highs)
[ "def", "highs", "(", "self", ",", "assets", ",", "dt", ")", ":", "market_open", ",", "prev_dt", ",", "dt_value", ",", "entries", "=", "self", ".", "_prelude", "(", "dt", ",", "'high'", ")", "highs", "=", "[", "]", "session_label", "=", "self", ".", "_trading_calendar", ".", "minute_to_session_label", "(", "dt", ")", "for", "asset", "in", "assets", ":", "if", "not", "asset", ".", "is_alive_for_session", "(", "session_label", ")", ":", "highs", ".", "append", "(", "np", ".", "NaN", ")", "continue", "if", "prev_dt", "is", "None", ":", "val", "=", "self", ".", "_minute_reader", ".", "get_value", "(", "asset", ",", "dt", ",", "'high'", ")", "entries", "[", "asset", "]", "=", "(", "dt_value", ",", "val", ")", "highs", ".", "append", "(", "val", ")", "continue", "else", ":", "try", ":", "last_visited_dt", ",", "last_max", "=", "entries", "[", "asset", "]", "if", "last_visited_dt", "==", "dt_value", ":", "highs", ".", "append", "(", "last_max", ")", "continue", "elif", "last_visited_dt", "==", "prev_dt", ":", "curr_val", "=", "self", ".", "_minute_reader", ".", "get_value", "(", "asset", ",", "dt", ",", "'high'", ")", "if", "pd", ".", "isnull", "(", "curr_val", ")", ":", "val", "=", "last_max", "elif", "pd", ".", "isnull", "(", "last_max", ")", ":", "val", "=", "curr_val", "else", ":", "val", "=", "max", "(", "last_max", ",", "curr_val", ")", "entries", "[", "asset", "]", "=", "(", "dt_value", ",", "val", ")", "highs", ".", "append", "(", "val", ")", "continue", "else", ":", "after_last", "=", "pd", ".", "Timestamp", "(", "last_visited_dt", "+", "self", ".", "_one_min", ",", "tz", "=", "'UTC'", ")", "window", "=", "self", ".", "_minute_reader", ".", "load_raw_arrays", "(", "[", "'high'", "]", ",", "after_last", ",", "dt", ",", "[", "asset", "]", ",", ")", "[", "0", "]", ".", "T", "val", "=", "np", ".", "nanmax", "(", "np", ".", "append", "(", "window", ",", "last_max", ")", ")", "entries", "[", "asset", "]", "=", "(", "dt_value", ",", "val", ")", "highs", ".", "append", "(", "val", ")", "continue", "except", "KeyError", ":", "window", "=", "self", ".", "_minute_reader", ".", "load_raw_arrays", "(", "[", "'high'", "]", ",", "market_open", ",", "dt", ",", "[", "asset", "]", ",", ")", "[", "0", "]", ".", "T", "val", "=", "np", ".", "nanmax", "(", "window", ")", "entries", "[", "asset", "]", "=", "(", "dt_value", ",", "val", ")", "highs", ".", "append", "(", "val", ")", "continue", "return", "np", ".", "array", "(", "highs", ")" ]
The high field's aggregation returns the largest high seen between the market open and the current dt. If there has been no data on or before the `dt` the high is `nan`. Returns ------- np.array with dtype=float64, in order of assets parameter.
[ "The", "high", "field", "s", "aggregation", "returns", "the", "largest", "high", "seen", "between", "the", "market", "open", "and", "the", "current", "dt", ".", "If", "there", "has", "been", "no", "data", "on", "or", "before", "the", "dt", "the", "high", "is", "nan", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/resample.py#L239-L306
train
quantopian/zipline
zipline/data/resample.py
DailyHistoryAggregator.lows
def lows(self, assets, dt): """ The low field's aggregation returns the smallest low seen between the market open and the current dt. If there has been no data on or before the `dt` the low is `nan`. Returns ------- np.array with dtype=float64, in order of assets parameter. """ market_open, prev_dt, dt_value, entries = self._prelude(dt, 'low') lows = [] session_label = self._trading_calendar.minute_to_session_label(dt) for asset in assets: if not asset.is_alive_for_session(session_label): lows.append(np.NaN) continue if prev_dt is None: val = self._minute_reader.get_value(asset, dt, 'low') entries[asset] = (dt_value, val) lows.append(val) continue else: try: last_visited_dt, last_min = entries[asset] if last_visited_dt == dt_value: lows.append(last_min) continue elif last_visited_dt == prev_dt: curr_val = self._minute_reader.get_value( asset, dt, 'low') val = np.nanmin([last_min, curr_val]) entries[asset] = (dt_value, val) lows.append(val) continue else: after_last = pd.Timestamp( last_visited_dt + self._one_min, tz='UTC') window = self._minute_reader.load_raw_arrays( ['low'], after_last, dt, [asset], )[0].T val = np.nanmin(np.append(window, last_min)) entries[asset] = (dt_value, val) lows.append(val) continue except KeyError: window = self._minute_reader.load_raw_arrays( ['low'], market_open, dt, [asset], )[0].T val = np.nanmin(window) entries[asset] = (dt_value, val) lows.append(val) continue return np.array(lows)
python
def lows(self, assets, dt): """ The low field's aggregation returns the smallest low seen between the market open and the current dt. If there has been no data on or before the `dt` the low is `nan`. Returns ------- np.array with dtype=float64, in order of assets parameter. """ market_open, prev_dt, dt_value, entries = self._prelude(dt, 'low') lows = [] session_label = self._trading_calendar.minute_to_session_label(dt) for asset in assets: if not asset.is_alive_for_session(session_label): lows.append(np.NaN) continue if prev_dt is None: val = self._minute_reader.get_value(asset, dt, 'low') entries[asset] = (dt_value, val) lows.append(val) continue else: try: last_visited_dt, last_min = entries[asset] if last_visited_dt == dt_value: lows.append(last_min) continue elif last_visited_dt == prev_dt: curr_val = self._minute_reader.get_value( asset, dt, 'low') val = np.nanmin([last_min, curr_val]) entries[asset] = (dt_value, val) lows.append(val) continue else: after_last = pd.Timestamp( last_visited_dt + self._one_min, tz='UTC') window = self._minute_reader.load_raw_arrays( ['low'], after_last, dt, [asset], )[0].T val = np.nanmin(np.append(window, last_min)) entries[asset] = (dt_value, val) lows.append(val) continue except KeyError: window = self._minute_reader.load_raw_arrays( ['low'], market_open, dt, [asset], )[0].T val = np.nanmin(window) entries[asset] = (dt_value, val) lows.append(val) continue return np.array(lows)
[ "def", "lows", "(", "self", ",", "assets", ",", "dt", ")", ":", "market_open", ",", "prev_dt", ",", "dt_value", ",", "entries", "=", "self", ".", "_prelude", "(", "dt", ",", "'low'", ")", "lows", "=", "[", "]", "session_label", "=", "self", ".", "_trading_calendar", ".", "minute_to_session_label", "(", "dt", ")", "for", "asset", "in", "assets", ":", "if", "not", "asset", ".", "is_alive_for_session", "(", "session_label", ")", ":", "lows", ".", "append", "(", "np", ".", "NaN", ")", "continue", "if", "prev_dt", "is", "None", ":", "val", "=", "self", ".", "_minute_reader", ".", "get_value", "(", "asset", ",", "dt", ",", "'low'", ")", "entries", "[", "asset", "]", "=", "(", "dt_value", ",", "val", ")", "lows", ".", "append", "(", "val", ")", "continue", "else", ":", "try", ":", "last_visited_dt", ",", "last_min", "=", "entries", "[", "asset", "]", "if", "last_visited_dt", "==", "dt_value", ":", "lows", ".", "append", "(", "last_min", ")", "continue", "elif", "last_visited_dt", "==", "prev_dt", ":", "curr_val", "=", "self", ".", "_minute_reader", ".", "get_value", "(", "asset", ",", "dt", ",", "'low'", ")", "val", "=", "np", ".", "nanmin", "(", "[", "last_min", ",", "curr_val", "]", ")", "entries", "[", "asset", "]", "=", "(", "dt_value", ",", "val", ")", "lows", ".", "append", "(", "val", ")", "continue", "else", ":", "after_last", "=", "pd", ".", "Timestamp", "(", "last_visited_dt", "+", "self", ".", "_one_min", ",", "tz", "=", "'UTC'", ")", "window", "=", "self", ".", "_minute_reader", ".", "load_raw_arrays", "(", "[", "'low'", "]", ",", "after_last", ",", "dt", ",", "[", "asset", "]", ",", ")", "[", "0", "]", ".", "T", "val", "=", "np", ".", "nanmin", "(", "np", ".", "append", "(", "window", ",", "last_min", ")", ")", "entries", "[", "asset", "]", "=", "(", "dt_value", ",", "val", ")", "lows", ".", "append", "(", "val", ")", "continue", "except", "KeyError", ":", "window", "=", "self", ".", "_minute_reader", ".", "load_raw_arrays", "(", "[", "'low'", "]", ",", "market_open", ",", "dt", ",", "[", "asset", "]", ",", ")", "[", "0", "]", ".", "T", "val", "=", "np", ".", "nanmin", "(", "window", ")", "entries", "[", "asset", "]", "=", "(", "dt_value", ",", "val", ")", "lows", ".", "append", "(", "val", ")", "continue", "return", "np", ".", "array", "(", "lows", ")" ]
The low field's aggregation returns the smallest low seen between the market open and the current dt. If there has been no data on or before the `dt` the low is `nan`. Returns ------- np.array with dtype=float64, in order of assets parameter.
[ "The", "low", "field", "s", "aggregation", "returns", "the", "smallest", "low", "seen", "between", "the", "market", "open", "and", "the", "current", "dt", ".", "If", "there", "has", "been", "no", "data", "on", "or", "before", "the", "dt", "the", "low", "is", "nan", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/resample.py#L308-L370
train
quantopian/zipline
zipline/data/resample.py
DailyHistoryAggregator.closes
def closes(self, assets, dt): """ The close field's aggregation returns the latest close at the given dt. If the close for the given dt is `nan`, the most recent non-nan `close` is used. If there has been no data on or before the `dt` the close is `nan`. Returns ------- np.array with dtype=float64, in order of assets parameter. """ market_open, prev_dt, dt_value, entries = self._prelude(dt, 'close') closes = [] session_label = self._trading_calendar.minute_to_session_label(dt) def _get_filled_close(asset): """ Returns the most recent non-nan close for the asset in this session. If there has been no data in this session on or before the `dt`, returns `nan` """ window = self._minute_reader.load_raw_arrays( ['close'], market_open, dt, [asset], )[0] try: return window[~np.isnan(window)][-1] except IndexError: return np.NaN for asset in assets: if not asset.is_alive_for_session(session_label): closes.append(np.NaN) continue if prev_dt is None: val = self._minute_reader.get_value(asset, dt, 'close') entries[asset] = (dt_value, val) closes.append(val) continue else: try: last_visited_dt, last_close = entries[asset] if last_visited_dt == dt_value: closes.append(last_close) continue elif last_visited_dt == prev_dt: val = self._minute_reader.get_value( asset, dt, 'close') if pd.isnull(val): val = last_close entries[asset] = (dt_value, val) closes.append(val) continue else: val = self._minute_reader.get_value( asset, dt, 'close') if pd.isnull(val): val = _get_filled_close(asset) entries[asset] = (dt_value, val) closes.append(val) continue except KeyError: val = self._minute_reader.get_value( asset, dt, 'close') if pd.isnull(val): val = _get_filled_close(asset) entries[asset] = (dt_value, val) closes.append(val) continue return np.array(closes)
python
def closes(self, assets, dt): """ The close field's aggregation returns the latest close at the given dt. If the close for the given dt is `nan`, the most recent non-nan `close` is used. If there has been no data on or before the `dt` the close is `nan`. Returns ------- np.array with dtype=float64, in order of assets parameter. """ market_open, prev_dt, dt_value, entries = self._prelude(dt, 'close') closes = [] session_label = self._trading_calendar.minute_to_session_label(dt) def _get_filled_close(asset): """ Returns the most recent non-nan close for the asset in this session. If there has been no data in this session on or before the `dt`, returns `nan` """ window = self._minute_reader.load_raw_arrays( ['close'], market_open, dt, [asset], )[0] try: return window[~np.isnan(window)][-1] except IndexError: return np.NaN for asset in assets: if not asset.is_alive_for_session(session_label): closes.append(np.NaN) continue if prev_dt is None: val = self._minute_reader.get_value(asset, dt, 'close') entries[asset] = (dt_value, val) closes.append(val) continue else: try: last_visited_dt, last_close = entries[asset] if last_visited_dt == dt_value: closes.append(last_close) continue elif last_visited_dt == prev_dt: val = self._minute_reader.get_value( asset, dt, 'close') if pd.isnull(val): val = last_close entries[asset] = (dt_value, val) closes.append(val) continue else: val = self._minute_reader.get_value( asset, dt, 'close') if pd.isnull(val): val = _get_filled_close(asset) entries[asset] = (dt_value, val) closes.append(val) continue except KeyError: val = self._minute_reader.get_value( asset, dt, 'close') if pd.isnull(val): val = _get_filled_close(asset) entries[asset] = (dt_value, val) closes.append(val) continue return np.array(closes)
[ "def", "closes", "(", "self", ",", "assets", ",", "dt", ")", ":", "market_open", ",", "prev_dt", ",", "dt_value", ",", "entries", "=", "self", ".", "_prelude", "(", "dt", ",", "'close'", ")", "closes", "=", "[", "]", "session_label", "=", "self", ".", "_trading_calendar", ".", "minute_to_session_label", "(", "dt", ")", "def", "_get_filled_close", "(", "asset", ")", ":", "\"\"\"\n Returns the most recent non-nan close for the asset in this\n session. If there has been no data in this session on or before the\n `dt`, returns `nan`\n \"\"\"", "window", "=", "self", ".", "_minute_reader", ".", "load_raw_arrays", "(", "[", "'close'", "]", ",", "market_open", ",", "dt", ",", "[", "asset", "]", ",", ")", "[", "0", "]", "try", ":", "return", "window", "[", "~", "np", ".", "isnan", "(", "window", ")", "]", "[", "-", "1", "]", "except", "IndexError", ":", "return", "np", ".", "NaN", "for", "asset", "in", "assets", ":", "if", "not", "asset", ".", "is_alive_for_session", "(", "session_label", ")", ":", "closes", ".", "append", "(", "np", ".", "NaN", ")", "continue", "if", "prev_dt", "is", "None", ":", "val", "=", "self", ".", "_minute_reader", ".", "get_value", "(", "asset", ",", "dt", ",", "'close'", ")", "entries", "[", "asset", "]", "=", "(", "dt_value", ",", "val", ")", "closes", ".", "append", "(", "val", ")", "continue", "else", ":", "try", ":", "last_visited_dt", ",", "last_close", "=", "entries", "[", "asset", "]", "if", "last_visited_dt", "==", "dt_value", ":", "closes", ".", "append", "(", "last_close", ")", "continue", "elif", "last_visited_dt", "==", "prev_dt", ":", "val", "=", "self", ".", "_minute_reader", ".", "get_value", "(", "asset", ",", "dt", ",", "'close'", ")", "if", "pd", ".", "isnull", "(", "val", ")", ":", "val", "=", "last_close", "entries", "[", "asset", "]", "=", "(", "dt_value", ",", "val", ")", "closes", ".", "append", "(", "val", ")", "continue", "else", ":", "val", "=", "self", ".", "_minute_reader", ".", "get_value", "(", "asset", ",", "dt", ",", "'close'", ")", "if", "pd", ".", "isnull", "(", "val", ")", ":", "val", "=", "_get_filled_close", "(", "asset", ")", "entries", "[", "asset", "]", "=", "(", "dt_value", ",", "val", ")", "closes", ".", "append", "(", "val", ")", "continue", "except", "KeyError", ":", "val", "=", "self", ".", "_minute_reader", ".", "get_value", "(", "asset", ",", "dt", ",", "'close'", ")", "if", "pd", ".", "isnull", "(", "val", ")", ":", "val", "=", "_get_filled_close", "(", "asset", ")", "entries", "[", "asset", "]", "=", "(", "dt_value", ",", "val", ")", "closes", ".", "append", "(", "val", ")", "continue", "return", "np", ".", "array", "(", "closes", ")" ]
The close field's aggregation returns the latest close at the given dt. If the close for the given dt is `nan`, the most recent non-nan `close` is used. If there has been no data on or before the `dt` the close is `nan`. Returns ------- np.array with dtype=float64, in order of assets parameter.
[ "The", "close", "field", "s", "aggregation", "returns", "the", "latest", "close", "at", "the", "given", "dt", ".", "If", "the", "close", "for", "the", "given", "dt", "is", "nan", "the", "most", "recent", "non", "-", "nan", "close", "is", "used", ".", "If", "there", "has", "been", "no", "data", "on", "or", "before", "the", "dt", "the", "close", "is", "nan", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/resample.py#L372-L446
train
quantopian/zipline
zipline/data/resample.py
DailyHistoryAggregator.volumes
def volumes(self, assets, dt): """ The volume field's aggregation returns the sum of all volumes between the market open and the `dt` If there has been no data on or before the `dt` the volume is 0. Returns ------- np.array with dtype=int64, in order of assets parameter. """ market_open, prev_dt, dt_value, entries = self._prelude(dt, 'volume') volumes = [] session_label = self._trading_calendar.minute_to_session_label(dt) for asset in assets: if not asset.is_alive_for_session(session_label): volumes.append(0) continue if prev_dt is None: val = self._minute_reader.get_value(asset, dt, 'volume') entries[asset] = (dt_value, val) volumes.append(val) continue else: try: last_visited_dt, last_total = entries[asset] if last_visited_dt == dt_value: volumes.append(last_total) continue elif last_visited_dt == prev_dt: val = self._minute_reader.get_value( asset, dt, 'volume') val += last_total entries[asset] = (dt_value, val) volumes.append(val) continue else: after_last = pd.Timestamp( last_visited_dt + self._one_min, tz='UTC') window = self._minute_reader.load_raw_arrays( ['volume'], after_last, dt, [asset], )[0] val = np.nansum(window) + last_total entries[asset] = (dt_value, val) volumes.append(val) continue except KeyError: window = self._minute_reader.load_raw_arrays( ['volume'], market_open, dt, [asset], )[0] val = np.nansum(window) entries[asset] = (dt_value, val) volumes.append(val) continue return np.array(volumes)
python
def volumes(self, assets, dt): """ The volume field's aggregation returns the sum of all volumes between the market open and the `dt` If there has been no data on or before the `dt` the volume is 0. Returns ------- np.array with dtype=int64, in order of assets parameter. """ market_open, prev_dt, dt_value, entries = self._prelude(dt, 'volume') volumes = [] session_label = self._trading_calendar.minute_to_session_label(dt) for asset in assets: if not asset.is_alive_for_session(session_label): volumes.append(0) continue if prev_dt is None: val = self._minute_reader.get_value(asset, dt, 'volume') entries[asset] = (dt_value, val) volumes.append(val) continue else: try: last_visited_dt, last_total = entries[asset] if last_visited_dt == dt_value: volumes.append(last_total) continue elif last_visited_dt == prev_dt: val = self._minute_reader.get_value( asset, dt, 'volume') val += last_total entries[asset] = (dt_value, val) volumes.append(val) continue else: after_last = pd.Timestamp( last_visited_dt + self._one_min, tz='UTC') window = self._minute_reader.load_raw_arrays( ['volume'], after_last, dt, [asset], )[0] val = np.nansum(window) + last_total entries[asset] = (dt_value, val) volumes.append(val) continue except KeyError: window = self._minute_reader.load_raw_arrays( ['volume'], market_open, dt, [asset], )[0] val = np.nansum(window) entries[asset] = (dt_value, val) volumes.append(val) continue return np.array(volumes)
[ "def", "volumes", "(", "self", ",", "assets", ",", "dt", ")", ":", "market_open", ",", "prev_dt", ",", "dt_value", ",", "entries", "=", "self", ".", "_prelude", "(", "dt", ",", "'volume'", ")", "volumes", "=", "[", "]", "session_label", "=", "self", ".", "_trading_calendar", ".", "minute_to_session_label", "(", "dt", ")", "for", "asset", "in", "assets", ":", "if", "not", "asset", ".", "is_alive_for_session", "(", "session_label", ")", ":", "volumes", ".", "append", "(", "0", ")", "continue", "if", "prev_dt", "is", "None", ":", "val", "=", "self", ".", "_minute_reader", ".", "get_value", "(", "asset", ",", "dt", ",", "'volume'", ")", "entries", "[", "asset", "]", "=", "(", "dt_value", ",", "val", ")", "volumes", ".", "append", "(", "val", ")", "continue", "else", ":", "try", ":", "last_visited_dt", ",", "last_total", "=", "entries", "[", "asset", "]", "if", "last_visited_dt", "==", "dt_value", ":", "volumes", ".", "append", "(", "last_total", ")", "continue", "elif", "last_visited_dt", "==", "prev_dt", ":", "val", "=", "self", ".", "_minute_reader", ".", "get_value", "(", "asset", ",", "dt", ",", "'volume'", ")", "val", "+=", "last_total", "entries", "[", "asset", "]", "=", "(", "dt_value", ",", "val", ")", "volumes", ".", "append", "(", "val", ")", "continue", "else", ":", "after_last", "=", "pd", ".", "Timestamp", "(", "last_visited_dt", "+", "self", ".", "_one_min", ",", "tz", "=", "'UTC'", ")", "window", "=", "self", ".", "_minute_reader", ".", "load_raw_arrays", "(", "[", "'volume'", "]", ",", "after_last", ",", "dt", ",", "[", "asset", "]", ",", ")", "[", "0", "]", "val", "=", "np", ".", "nansum", "(", "window", ")", "+", "last_total", "entries", "[", "asset", "]", "=", "(", "dt_value", ",", "val", ")", "volumes", ".", "append", "(", "val", ")", "continue", "except", "KeyError", ":", "window", "=", "self", ".", "_minute_reader", ".", "load_raw_arrays", "(", "[", "'volume'", "]", ",", "market_open", ",", "dt", ",", "[", "asset", "]", ",", ")", "[", "0", "]", "val", "=", "np", ".", "nansum", "(", "window", ")", "entries", "[", "asset", "]", "=", "(", "dt_value", ",", "val", ")", "volumes", ".", "append", "(", "val", ")", "continue", "return", "np", ".", "array", "(", "volumes", ")" ]
The volume field's aggregation returns the sum of all volumes between the market open and the `dt` If there has been no data on or before the `dt` the volume is 0. Returns ------- np.array with dtype=int64, in order of assets parameter.
[ "The", "volume", "field", "s", "aggregation", "returns", "the", "sum", "of", "all", "volumes", "between", "the", "market", "open", "and", "the", "dt", "If", "there", "has", "been", "no", "data", "on", "or", "before", "the", "dt", "the", "volume", "is", "0", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/resample.py#L448-L510
train