repo
stringlengths 7
55
| path
stringlengths 4
223
| func_name
stringlengths 1
134
| original_string
stringlengths 75
104k
| language
stringclasses 1
value | code
stringlengths 75
104k
| code_tokens
listlengths 19
28.4k
| docstring
stringlengths 1
46.9k
| docstring_tokens
listlengths 1
1.97k
| sha
stringlengths 40
40
| url
stringlengths 87
315
| partition
stringclasses 1
value |
|---|---|---|---|---|---|---|---|---|---|---|---|
quantopian/zipline
|
zipline/data/benchmarks.py
|
get_benchmark_returns
|
def get_benchmark_returns(symbol):
"""
Get a Series of benchmark returns from IEX associated with `symbol`.
Default is `SPY`.
Parameters
----------
symbol : str
Benchmark symbol for which we're getting the returns.
The data is provided by IEX (https://iextrading.com/), and we can
get up to 5 years worth of data.
"""
r = requests.get(
'https://api.iextrading.com/1.0/stock/{}/chart/5y'.format(symbol)
)
data = r.json()
df = pd.DataFrame(data)
df.index = pd.DatetimeIndex(df['date'])
df = df['close']
return df.sort_index().tz_localize('UTC').pct_change(1).iloc[1:]
|
python
|
def get_benchmark_returns(symbol):
"""
Get a Series of benchmark returns from IEX associated with `symbol`.
Default is `SPY`.
Parameters
----------
symbol : str
Benchmark symbol for which we're getting the returns.
The data is provided by IEX (https://iextrading.com/), and we can
get up to 5 years worth of data.
"""
r = requests.get(
'https://api.iextrading.com/1.0/stock/{}/chart/5y'.format(symbol)
)
data = r.json()
df = pd.DataFrame(data)
df.index = pd.DatetimeIndex(df['date'])
df = df['close']
return df.sort_index().tz_localize('UTC').pct_change(1).iloc[1:]
|
[
"def",
"get_benchmark_returns",
"(",
"symbol",
")",
":",
"r",
"=",
"requests",
".",
"get",
"(",
"'https://api.iextrading.com/1.0/stock/{}/chart/5y'",
".",
"format",
"(",
"symbol",
")",
")",
"data",
"=",
"r",
".",
"json",
"(",
")",
"df",
"=",
"pd",
".",
"DataFrame",
"(",
"data",
")",
"df",
".",
"index",
"=",
"pd",
".",
"DatetimeIndex",
"(",
"df",
"[",
"'date'",
"]",
")",
"df",
"=",
"df",
"[",
"'close'",
"]",
"return",
"df",
".",
"sort_index",
"(",
")",
".",
"tz_localize",
"(",
"'UTC'",
")",
".",
"pct_change",
"(",
"1",
")",
".",
"iloc",
"[",
"1",
":",
"]"
] |
Get a Series of benchmark returns from IEX associated with `symbol`.
Default is `SPY`.
Parameters
----------
symbol : str
Benchmark symbol for which we're getting the returns.
The data is provided by IEX (https://iextrading.com/), and we can
get up to 5 years worth of data.
|
[
"Get",
"a",
"Series",
"of",
"benchmark",
"returns",
"from",
"IEX",
"associated",
"with",
"symbol",
".",
"Default",
"is",
"SPY",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/benchmarks.py#L19-L42
|
train
|
quantopian/zipline
|
zipline/pipeline/visualize.py
|
delimit
|
def delimit(delimiters, content):
"""
Surround `content` with the first and last characters of `delimiters`.
>>> delimit('[]', "foo") # doctest: +SKIP
'[foo]'
>>> delimit('""', "foo") # doctest: +SKIP
'"foo"'
"""
if len(delimiters) != 2:
raise ValueError(
"`delimiters` must be of length 2. Got %r" % delimiters
)
return ''.join([delimiters[0], content, delimiters[1]])
|
python
|
def delimit(delimiters, content):
"""
Surround `content` with the first and last characters of `delimiters`.
>>> delimit('[]', "foo") # doctest: +SKIP
'[foo]'
>>> delimit('""', "foo") # doctest: +SKIP
'"foo"'
"""
if len(delimiters) != 2:
raise ValueError(
"`delimiters` must be of length 2. Got %r" % delimiters
)
return ''.join([delimiters[0], content, delimiters[1]])
|
[
"def",
"delimit",
"(",
"delimiters",
",",
"content",
")",
":",
"if",
"len",
"(",
"delimiters",
")",
"!=",
"2",
":",
"raise",
"ValueError",
"(",
"\"`delimiters` must be of length 2. Got %r\"",
"%",
"delimiters",
")",
"return",
"''",
".",
"join",
"(",
"[",
"delimiters",
"[",
"0",
"]",
",",
"content",
",",
"delimiters",
"[",
"1",
"]",
"]",
")"
] |
Surround `content` with the first and last characters of `delimiters`.
>>> delimit('[]', "foo") # doctest: +SKIP
'[foo]'
>>> delimit('""', "foo") # doctest: +SKIP
'"foo"'
|
[
"Surround",
"content",
"with",
"the",
"first",
"and",
"last",
"characters",
"of",
"delimiters",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/visualize.py#L24-L37
|
train
|
quantopian/zipline
|
zipline/pipeline/visualize.py
|
roots
|
def roots(g):
"Get nodes from graph G with indegree 0"
return set(n for n, d in iteritems(g.in_degree()) if d == 0)
|
python
|
def roots(g):
"Get nodes from graph G with indegree 0"
return set(n for n, d in iteritems(g.in_degree()) if d == 0)
|
[
"def",
"roots",
"(",
"g",
")",
":",
"return",
"set",
"(",
"n",
"for",
"n",
",",
"d",
"in",
"iteritems",
"(",
"g",
".",
"in_degree",
"(",
")",
")",
"if",
"d",
"==",
"0",
")"
] |
Get nodes from graph G with indegree 0
|
[
"Get",
"nodes",
"from",
"graph",
"G",
"with",
"indegree",
"0"
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/visualize.py#L73-L75
|
train
|
quantopian/zipline
|
zipline/pipeline/visualize.py
|
_render
|
def _render(g, out, format_, include_asset_exists=False):
"""
Draw `g` as a graph to `out`, in format `format`.
Parameters
----------
g : zipline.pipeline.graph.TermGraph
Graph to render.
out : file-like object
format_ : str {'png', 'svg'}
Output format.
include_asset_exists : bool
Whether to filter out `AssetExists()` nodes.
"""
graph_attrs = {'rankdir': 'TB', 'splines': 'ortho'}
cluster_attrs = {'style': 'filled', 'color': 'lightgoldenrod1'}
in_nodes = g.loadable_terms
out_nodes = list(g.outputs.values())
f = BytesIO()
with graph(f, "G", **graph_attrs):
# Write outputs cluster.
with cluster(f, 'Output', labelloc='b', **cluster_attrs):
for term in filter_nodes(include_asset_exists, out_nodes):
add_term_node(f, term)
# Write inputs cluster.
with cluster(f, 'Input', **cluster_attrs):
for term in filter_nodes(include_asset_exists, in_nodes):
add_term_node(f, term)
# Write intermediate results.
for term in filter_nodes(include_asset_exists,
topological_sort(g.graph)):
if term in in_nodes or term in out_nodes:
continue
add_term_node(f, term)
# Write edges
for source, dest in g.graph.edges():
if source is AssetExists() and not include_asset_exists:
continue
add_edge(f, id(source), id(dest))
cmd = ['dot', '-T', format_]
try:
proc = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
except OSError as e:
if e.errno == errno.ENOENT:
raise RuntimeError(
"Couldn't find `dot` graph layout program. "
"Make sure Graphviz is installed and `dot` is on your path."
)
else:
raise
f.seek(0)
proc_stdout, proc_stderr = proc.communicate(f.read())
if proc_stderr:
raise RuntimeError(
"Error(s) while rendering graph: %s" % proc_stderr.decode('utf-8')
)
out.write(proc_stdout)
|
python
|
def _render(g, out, format_, include_asset_exists=False):
"""
Draw `g` as a graph to `out`, in format `format`.
Parameters
----------
g : zipline.pipeline.graph.TermGraph
Graph to render.
out : file-like object
format_ : str {'png', 'svg'}
Output format.
include_asset_exists : bool
Whether to filter out `AssetExists()` nodes.
"""
graph_attrs = {'rankdir': 'TB', 'splines': 'ortho'}
cluster_attrs = {'style': 'filled', 'color': 'lightgoldenrod1'}
in_nodes = g.loadable_terms
out_nodes = list(g.outputs.values())
f = BytesIO()
with graph(f, "G", **graph_attrs):
# Write outputs cluster.
with cluster(f, 'Output', labelloc='b', **cluster_attrs):
for term in filter_nodes(include_asset_exists, out_nodes):
add_term_node(f, term)
# Write inputs cluster.
with cluster(f, 'Input', **cluster_attrs):
for term in filter_nodes(include_asset_exists, in_nodes):
add_term_node(f, term)
# Write intermediate results.
for term in filter_nodes(include_asset_exists,
topological_sort(g.graph)):
if term in in_nodes or term in out_nodes:
continue
add_term_node(f, term)
# Write edges
for source, dest in g.graph.edges():
if source is AssetExists() and not include_asset_exists:
continue
add_edge(f, id(source), id(dest))
cmd = ['dot', '-T', format_]
try:
proc = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
except OSError as e:
if e.errno == errno.ENOENT:
raise RuntimeError(
"Couldn't find `dot` graph layout program. "
"Make sure Graphviz is installed and `dot` is on your path."
)
else:
raise
f.seek(0)
proc_stdout, proc_stderr = proc.communicate(f.read())
if proc_stderr:
raise RuntimeError(
"Error(s) while rendering graph: %s" % proc_stderr.decode('utf-8')
)
out.write(proc_stdout)
|
[
"def",
"_render",
"(",
"g",
",",
"out",
",",
"format_",
",",
"include_asset_exists",
"=",
"False",
")",
":",
"graph_attrs",
"=",
"{",
"'rankdir'",
":",
"'TB'",
",",
"'splines'",
":",
"'ortho'",
"}",
"cluster_attrs",
"=",
"{",
"'style'",
":",
"'filled'",
",",
"'color'",
":",
"'lightgoldenrod1'",
"}",
"in_nodes",
"=",
"g",
".",
"loadable_terms",
"out_nodes",
"=",
"list",
"(",
"g",
".",
"outputs",
".",
"values",
"(",
")",
")",
"f",
"=",
"BytesIO",
"(",
")",
"with",
"graph",
"(",
"f",
",",
"\"G\"",
",",
"*",
"*",
"graph_attrs",
")",
":",
"# Write outputs cluster.",
"with",
"cluster",
"(",
"f",
",",
"'Output'",
",",
"labelloc",
"=",
"'b'",
",",
"*",
"*",
"cluster_attrs",
")",
":",
"for",
"term",
"in",
"filter_nodes",
"(",
"include_asset_exists",
",",
"out_nodes",
")",
":",
"add_term_node",
"(",
"f",
",",
"term",
")",
"# Write inputs cluster.",
"with",
"cluster",
"(",
"f",
",",
"'Input'",
",",
"*",
"*",
"cluster_attrs",
")",
":",
"for",
"term",
"in",
"filter_nodes",
"(",
"include_asset_exists",
",",
"in_nodes",
")",
":",
"add_term_node",
"(",
"f",
",",
"term",
")",
"# Write intermediate results.",
"for",
"term",
"in",
"filter_nodes",
"(",
"include_asset_exists",
",",
"topological_sort",
"(",
"g",
".",
"graph",
")",
")",
":",
"if",
"term",
"in",
"in_nodes",
"or",
"term",
"in",
"out_nodes",
":",
"continue",
"add_term_node",
"(",
"f",
",",
"term",
")",
"# Write edges",
"for",
"source",
",",
"dest",
"in",
"g",
".",
"graph",
".",
"edges",
"(",
")",
":",
"if",
"source",
"is",
"AssetExists",
"(",
")",
"and",
"not",
"include_asset_exists",
":",
"continue",
"add_edge",
"(",
"f",
",",
"id",
"(",
"source",
")",
",",
"id",
"(",
"dest",
")",
")",
"cmd",
"=",
"[",
"'dot'",
",",
"'-T'",
",",
"format_",
"]",
"try",
":",
"proc",
"=",
"Popen",
"(",
"cmd",
",",
"stdin",
"=",
"PIPE",
",",
"stdout",
"=",
"PIPE",
",",
"stderr",
"=",
"PIPE",
")",
"except",
"OSError",
"as",
"e",
":",
"if",
"e",
".",
"errno",
"==",
"errno",
".",
"ENOENT",
":",
"raise",
"RuntimeError",
"(",
"\"Couldn't find `dot` graph layout program. \"",
"\"Make sure Graphviz is installed and `dot` is on your path.\"",
")",
"else",
":",
"raise",
"f",
".",
"seek",
"(",
"0",
")",
"proc_stdout",
",",
"proc_stderr",
"=",
"proc",
".",
"communicate",
"(",
"f",
".",
"read",
"(",
")",
")",
"if",
"proc_stderr",
":",
"raise",
"RuntimeError",
"(",
"\"Error(s) while rendering graph: %s\"",
"%",
"proc_stderr",
".",
"decode",
"(",
"'utf-8'",
")",
")",
"out",
".",
"write",
"(",
"proc_stdout",
")"
] |
Draw `g` as a graph to `out`, in format `format`.
Parameters
----------
g : zipline.pipeline.graph.TermGraph
Graph to render.
out : file-like object
format_ : str {'png', 'svg'}
Output format.
include_asset_exists : bool
Whether to filter out `AssetExists()` nodes.
|
[
"Draw",
"g",
"as",
"a",
"graph",
"to",
"out",
"in",
"format",
"format",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/visualize.py#L84-L149
|
train
|
quantopian/zipline
|
zipline/pipeline/visualize.py
|
display_graph
|
def display_graph(g, format='svg', include_asset_exists=False):
"""
Display a TermGraph interactively from within IPython.
"""
try:
import IPython.display as display
except ImportError:
raise NoIPython("IPython is not installed. Can't display graph.")
if format == 'svg':
display_cls = display.SVG
elif format in ("jpeg", "png"):
display_cls = partial(display.Image, format=format, embed=True)
out = BytesIO()
_render(g, out, format, include_asset_exists=include_asset_exists)
return display_cls(data=out.getvalue())
|
python
|
def display_graph(g, format='svg', include_asset_exists=False):
"""
Display a TermGraph interactively from within IPython.
"""
try:
import IPython.display as display
except ImportError:
raise NoIPython("IPython is not installed. Can't display graph.")
if format == 'svg':
display_cls = display.SVG
elif format in ("jpeg", "png"):
display_cls = partial(display.Image, format=format, embed=True)
out = BytesIO()
_render(g, out, format, include_asset_exists=include_asset_exists)
return display_cls(data=out.getvalue())
|
[
"def",
"display_graph",
"(",
"g",
",",
"format",
"=",
"'svg'",
",",
"include_asset_exists",
"=",
"False",
")",
":",
"try",
":",
"import",
"IPython",
".",
"display",
"as",
"display",
"except",
"ImportError",
":",
"raise",
"NoIPython",
"(",
"\"IPython is not installed. Can't display graph.\"",
")",
"if",
"format",
"==",
"'svg'",
":",
"display_cls",
"=",
"display",
".",
"SVG",
"elif",
"format",
"in",
"(",
"\"jpeg\"",
",",
"\"png\"",
")",
":",
"display_cls",
"=",
"partial",
"(",
"display",
".",
"Image",
",",
"format",
"=",
"format",
",",
"embed",
"=",
"True",
")",
"out",
"=",
"BytesIO",
"(",
")",
"_render",
"(",
"g",
",",
"out",
",",
"format",
",",
"include_asset_exists",
"=",
"include_asset_exists",
")",
"return",
"display_cls",
"(",
"data",
"=",
"out",
".",
"getvalue",
"(",
")",
")"
] |
Display a TermGraph interactively from within IPython.
|
[
"Display",
"a",
"TermGraph",
"interactively",
"from",
"within",
"IPython",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/visualize.py#L152-L168
|
train
|
quantopian/zipline
|
zipline/pipeline/visualize.py
|
format_attrs
|
def format_attrs(attrs):
"""
Format key, value pairs from attrs into graphviz attrs format
Examples
--------
>>> format_attrs({'key1': 'value1', 'key2': 'value2'}) # doctest: +SKIP
'[key1=value1, key2=value2]'
"""
if not attrs:
return ''
entries = ['='.join((key, value)) for key, value in iteritems(attrs)]
return '[' + ', '.join(entries) + ']'
|
python
|
def format_attrs(attrs):
"""
Format key, value pairs from attrs into graphviz attrs format
Examples
--------
>>> format_attrs({'key1': 'value1', 'key2': 'value2'}) # doctest: +SKIP
'[key1=value1, key2=value2]'
"""
if not attrs:
return ''
entries = ['='.join((key, value)) for key, value in iteritems(attrs)]
return '[' + ', '.join(entries) + ']'
|
[
"def",
"format_attrs",
"(",
"attrs",
")",
":",
"if",
"not",
"attrs",
":",
"return",
"''",
"entries",
"=",
"[",
"'='",
".",
"join",
"(",
"(",
"key",
",",
"value",
")",
")",
"for",
"key",
",",
"value",
"in",
"iteritems",
"(",
"attrs",
")",
"]",
"return",
"'['",
"+",
"', '",
".",
"join",
"(",
"entries",
")",
"+",
"']'"
] |
Format key, value pairs from attrs into graphviz attrs format
Examples
--------
>>> format_attrs({'key1': 'value1', 'key2': 'value2'}) # doctest: +SKIP
'[key1=value1, key2=value2]'
|
[
"Format",
"key",
"value",
"pairs",
"from",
"attrs",
"into",
"graphviz",
"attrs",
"format"
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/visualize.py#L215-L227
|
train
|
quantopian/zipline
|
zipline/utils/pool.py
|
SequentialPool.apply_async
|
def apply_async(f, args=(), kwargs=None, callback=None):
"""Apply a function but emulate the API of an asynchronous call.
Parameters
----------
f : callable
The function to call.
args : tuple, optional
The positional arguments.
kwargs : dict, optional
The keyword arguments.
Returns
-------
future : ApplyAsyncResult
The result of calling the function boxed in a future-like api.
Notes
-----
This calls the function eagerly but wraps it so that ``SequentialPool``
can be used where a :class:`multiprocessing.Pool` or
:class:`gevent.pool.Pool` would be used.
"""
try:
value = (identity if callback is None else callback)(
f(*args, **kwargs or {}),
)
successful = True
except Exception as e:
value = e
successful = False
return ApplyAsyncResult(value, successful)
|
python
|
def apply_async(f, args=(), kwargs=None, callback=None):
"""Apply a function but emulate the API of an asynchronous call.
Parameters
----------
f : callable
The function to call.
args : tuple, optional
The positional arguments.
kwargs : dict, optional
The keyword arguments.
Returns
-------
future : ApplyAsyncResult
The result of calling the function boxed in a future-like api.
Notes
-----
This calls the function eagerly but wraps it so that ``SequentialPool``
can be used where a :class:`multiprocessing.Pool` or
:class:`gevent.pool.Pool` would be used.
"""
try:
value = (identity if callback is None else callback)(
f(*args, **kwargs or {}),
)
successful = True
except Exception as e:
value = e
successful = False
return ApplyAsyncResult(value, successful)
|
[
"def",
"apply_async",
"(",
"f",
",",
"args",
"=",
"(",
")",
",",
"kwargs",
"=",
"None",
",",
"callback",
"=",
"None",
")",
":",
"try",
":",
"value",
"=",
"(",
"identity",
"if",
"callback",
"is",
"None",
"else",
"callback",
")",
"(",
"f",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
"or",
"{",
"}",
")",
",",
")",
"successful",
"=",
"True",
"except",
"Exception",
"as",
"e",
":",
"value",
"=",
"e",
"successful",
"=",
"False",
"return",
"ApplyAsyncResult",
"(",
"value",
",",
"successful",
")"
] |
Apply a function but emulate the API of an asynchronous call.
Parameters
----------
f : callable
The function to call.
args : tuple, optional
The positional arguments.
kwargs : dict, optional
The keyword arguments.
Returns
-------
future : ApplyAsyncResult
The result of calling the function boxed in a future-like api.
Notes
-----
This calls the function eagerly but wraps it so that ``SequentialPool``
can be used where a :class:`multiprocessing.Pool` or
:class:`gevent.pool.Pool` would be used.
|
[
"Apply",
"a",
"function",
"but",
"emulate",
"the",
"API",
"of",
"an",
"asynchronous",
"call",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/pool.py#L84-L116
|
train
|
quantopian/zipline
|
zipline/utils/cli.py
|
maybe_show_progress
|
def maybe_show_progress(it, show_progress, **kwargs):
"""Optionally show a progress bar for the given iterator.
Parameters
----------
it : iterable
The underlying iterator.
show_progress : bool
Should progress be shown.
**kwargs
Forwarded to the click progress bar.
Returns
-------
itercontext : context manager
A context manager whose enter is the actual iterator to use.
Examples
--------
.. code-block:: python
with maybe_show_progress([1, 2, 3], True) as ns:
for n in ns:
...
"""
if show_progress:
return click.progressbar(it, **kwargs)
# context manager that just return `it` when we enter it
return CallbackManager(lambda it=it: it)
|
python
|
def maybe_show_progress(it, show_progress, **kwargs):
"""Optionally show a progress bar for the given iterator.
Parameters
----------
it : iterable
The underlying iterator.
show_progress : bool
Should progress be shown.
**kwargs
Forwarded to the click progress bar.
Returns
-------
itercontext : context manager
A context manager whose enter is the actual iterator to use.
Examples
--------
.. code-block:: python
with maybe_show_progress([1, 2, 3], True) as ns:
for n in ns:
...
"""
if show_progress:
return click.progressbar(it, **kwargs)
# context manager that just return `it` when we enter it
return CallbackManager(lambda it=it: it)
|
[
"def",
"maybe_show_progress",
"(",
"it",
",",
"show_progress",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"show_progress",
":",
"return",
"click",
".",
"progressbar",
"(",
"it",
",",
"*",
"*",
"kwargs",
")",
"# context manager that just return `it` when we enter it",
"return",
"CallbackManager",
"(",
"lambda",
"it",
"=",
"it",
":",
"it",
")"
] |
Optionally show a progress bar for the given iterator.
Parameters
----------
it : iterable
The underlying iterator.
show_progress : bool
Should progress be shown.
**kwargs
Forwarded to the click progress bar.
Returns
-------
itercontext : context manager
A context manager whose enter is the actual iterator to use.
Examples
--------
.. code-block:: python
with maybe_show_progress([1, 2, 3], True) as ns:
for n in ns:
...
|
[
"Optionally",
"show",
"a",
"progress",
"bar",
"for",
"the",
"given",
"iterator",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/cli.py#L7-L36
|
train
|
quantopian/zipline
|
zipline/__main__.py
|
main
|
def main(extension, strict_extensions, default_extension, x):
"""Top level zipline entry point.
"""
# install a logbook handler before performing any other operations
logbook.StderrHandler().push_application()
create_args(x, zipline.extension_args)
load_extensions(
default_extension,
extension,
strict_extensions,
os.environ,
)
|
python
|
def main(extension, strict_extensions, default_extension, x):
"""Top level zipline entry point.
"""
# install a logbook handler before performing any other operations
logbook.StderrHandler().push_application()
create_args(x, zipline.extension_args)
load_extensions(
default_extension,
extension,
strict_extensions,
os.environ,
)
|
[
"def",
"main",
"(",
"extension",
",",
"strict_extensions",
",",
"default_extension",
",",
"x",
")",
":",
"# install a logbook handler before performing any other operations",
"logbook",
".",
"StderrHandler",
"(",
")",
".",
"push_application",
"(",
")",
"create_args",
"(",
"x",
",",
"zipline",
".",
"extension_args",
")",
"load_extensions",
"(",
"default_extension",
",",
"extension",
",",
"strict_extensions",
",",
"os",
".",
"environ",
",",
")"
] |
Top level zipline entry point.
|
[
"Top",
"level",
"zipline",
"entry",
"point",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/__main__.py#L49-L61
|
train
|
quantopian/zipline
|
zipline/__main__.py
|
ipython_only
|
def ipython_only(option):
"""Mark that an option should only be exposed in IPython.
Parameters
----------
option : decorator
A click.option decorator.
Returns
-------
ipython_only_dec : decorator
A decorator that correctly applies the argument even when not
using IPython mode.
"""
if __IPYTHON__:
return option
argname = extract_option_object(option).name
def d(f):
@wraps(f)
def _(*args, **kwargs):
kwargs[argname] = None
return f(*args, **kwargs)
return _
return d
|
python
|
def ipython_only(option):
"""Mark that an option should only be exposed in IPython.
Parameters
----------
option : decorator
A click.option decorator.
Returns
-------
ipython_only_dec : decorator
A decorator that correctly applies the argument even when not
using IPython mode.
"""
if __IPYTHON__:
return option
argname = extract_option_object(option).name
def d(f):
@wraps(f)
def _(*args, **kwargs):
kwargs[argname] = None
return f(*args, **kwargs)
return _
return d
|
[
"def",
"ipython_only",
"(",
"option",
")",
":",
"if",
"__IPYTHON__",
":",
"return",
"option",
"argname",
"=",
"extract_option_object",
"(",
"option",
")",
".",
"name",
"def",
"d",
"(",
"f",
")",
":",
"@",
"wraps",
"(",
"f",
")",
"def",
"_",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"argname",
"]",
"=",
"None",
"return",
"f",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"_",
"return",
"d"
] |
Mark that an option should only be exposed in IPython.
Parameters
----------
option : decorator
A click.option decorator.
Returns
-------
ipython_only_dec : decorator
A decorator that correctly applies the argument even when not
using IPython mode.
|
[
"Mark",
"that",
"an",
"option",
"should",
"only",
"be",
"exposed",
"in",
"IPython",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/__main__.py#L84-L109
|
train
|
quantopian/zipline
|
zipline/__main__.py
|
run
|
def run(ctx,
algofile,
algotext,
define,
data_frequency,
capital_base,
bundle,
bundle_timestamp,
start,
end,
output,
trading_calendar,
print_algo,
metrics_set,
local_namespace,
blotter):
"""Run a backtest for the given algorithm.
"""
# check that the start and end dates are passed correctly
if start is None and end is None:
# check both at the same time to avoid the case where a user
# does not pass either of these and then passes the first only
# to be told they need to pass the second argument also
ctx.fail(
"must specify dates with '-s' / '--start' and '-e' / '--end'",
)
if start is None:
ctx.fail("must specify a start date with '-s' / '--start'")
if end is None:
ctx.fail("must specify an end date with '-e' / '--end'")
if (algotext is not None) == (algofile is not None):
ctx.fail(
"must specify exactly one of '-f' / '--algofile' or"
" '-t' / '--algotext'",
)
trading_calendar = get_calendar(trading_calendar)
perf = _run(
initialize=None,
handle_data=None,
before_trading_start=None,
analyze=None,
algofile=algofile,
algotext=algotext,
defines=define,
data_frequency=data_frequency,
capital_base=capital_base,
bundle=bundle,
bundle_timestamp=bundle_timestamp,
start=start,
end=end,
output=output,
trading_calendar=trading_calendar,
print_algo=print_algo,
metrics_set=metrics_set,
local_namespace=local_namespace,
environ=os.environ,
blotter=blotter,
benchmark_returns=None,
)
if output == '-':
click.echo(str(perf))
elif output != os.devnull: # make the zipline magic not write any data
perf.to_pickle(output)
return perf
|
python
|
def run(ctx,
algofile,
algotext,
define,
data_frequency,
capital_base,
bundle,
bundle_timestamp,
start,
end,
output,
trading_calendar,
print_algo,
metrics_set,
local_namespace,
blotter):
"""Run a backtest for the given algorithm.
"""
# check that the start and end dates are passed correctly
if start is None and end is None:
# check both at the same time to avoid the case where a user
# does not pass either of these and then passes the first only
# to be told they need to pass the second argument also
ctx.fail(
"must specify dates with '-s' / '--start' and '-e' / '--end'",
)
if start is None:
ctx.fail("must specify a start date with '-s' / '--start'")
if end is None:
ctx.fail("must specify an end date with '-e' / '--end'")
if (algotext is not None) == (algofile is not None):
ctx.fail(
"must specify exactly one of '-f' / '--algofile' or"
" '-t' / '--algotext'",
)
trading_calendar = get_calendar(trading_calendar)
perf = _run(
initialize=None,
handle_data=None,
before_trading_start=None,
analyze=None,
algofile=algofile,
algotext=algotext,
defines=define,
data_frequency=data_frequency,
capital_base=capital_base,
bundle=bundle,
bundle_timestamp=bundle_timestamp,
start=start,
end=end,
output=output,
trading_calendar=trading_calendar,
print_algo=print_algo,
metrics_set=metrics_set,
local_namespace=local_namespace,
environ=os.environ,
blotter=blotter,
benchmark_returns=None,
)
if output == '-':
click.echo(str(perf))
elif output != os.devnull: # make the zipline magic not write any data
perf.to_pickle(output)
return perf
|
[
"def",
"run",
"(",
"ctx",
",",
"algofile",
",",
"algotext",
",",
"define",
",",
"data_frequency",
",",
"capital_base",
",",
"bundle",
",",
"bundle_timestamp",
",",
"start",
",",
"end",
",",
"output",
",",
"trading_calendar",
",",
"print_algo",
",",
"metrics_set",
",",
"local_namespace",
",",
"blotter",
")",
":",
"# check that the start and end dates are passed correctly",
"if",
"start",
"is",
"None",
"and",
"end",
"is",
"None",
":",
"# check both at the same time to avoid the case where a user",
"# does not pass either of these and then passes the first only",
"# to be told they need to pass the second argument also",
"ctx",
".",
"fail",
"(",
"\"must specify dates with '-s' / '--start' and '-e' / '--end'\"",
",",
")",
"if",
"start",
"is",
"None",
":",
"ctx",
".",
"fail",
"(",
"\"must specify a start date with '-s' / '--start'\"",
")",
"if",
"end",
"is",
"None",
":",
"ctx",
".",
"fail",
"(",
"\"must specify an end date with '-e' / '--end'\"",
")",
"if",
"(",
"algotext",
"is",
"not",
"None",
")",
"==",
"(",
"algofile",
"is",
"not",
"None",
")",
":",
"ctx",
".",
"fail",
"(",
"\"must specify exactly one of '-f' / '--algofile' or\"",
"\" '-t' / '--algotext'\"",
",",
")",
"trading_calendar",
"=",
"get_calendar",
"(",
"trading_calendar",
")",
"perf",
"=",
"_run",
"(",
"initialize",
"=",
"None",
",",
"handle_data",
"=",
"None",
",",
"before_trading_start",
"=",
"None",
",",
"analyze",
"=",
"None",
",",
"algofile",
"=",
"algofile",
",",
"algotext",
"=",
"algotext",
",",
"defines",
"=",
"define",
",",
"data_frequency",
"=",
"data_frequency",
",",
"capital_base",
"=",
"capital_base",
",",
"bundle",
"=",
"bundle",
",",
"bundle_timestamp",
"=",
"bundle_timestamp",
",",
"start",
"=",
"start",
",",
"end",
"=",
"end",
",",
"output",
"=",
"output",
",",
"trading_calendar",
"=",
"trading_calendar",
",",
"print_algo",
"=",
"print_algo",
",",
"metrics_set",
"=",
"metrics_set",
",",
"local_namespace",
"=",
"local_namespace",
",",
"environ",
"=",
"os",
".",
"environ",
",",
"blotter",
"=",
"blotter",
",",
"benchmark_returns",
"=",
"None",
",",
")",
"if",
"output",
"==",
"'-'",
":",
"click",
".",
"echo",
"(",
"str",
"(",
"perf",
")",
")",
"elif",
"output",
"!=",
"os",
".",
"devnull",
":",
"# make the zipline magic not write any data",
"perf",
".",
"to_pickle",
"(",
"output",
")",
"return",
"perf"
] |
Run a backtest for the given algorithm.
|
[
"Run",
"a",
"backtest",
"for",
"the",
"given",
"algorithm",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/__main__.py#L216-L284
|
train
|
quantopian/zipline
|
zipline/__main__.py
|
zipline_magic
|
def zipline_magic(line, cell=None):
"""The zipline IPython cell magic.
"""
load_extensions(
default=True,
extensions=[],
strict=True,
environ=os.environ,
)
try:
return run.main(
# put our overrides at the start of the parameter list so that
# users may pass values with higher precedence
[
'--algotext', cell,
'--output', os.devnull, # don't write the results by default
] + ([
# these options are set when running in line magic mode
# set a non None algo text to use the ipython user_ns
'--algotext', '',
'--local-namespace',
] if cell is None else []) + line.split(),
'%s%%zipline' % ((cell or '') and '%'),
# don't use system exit and propogate errors to the caller
standalone_mode=False,
)
except SystemExit as e:
# https://github.com/mitsuhiko/click/pull/533
# even in standalone_mode=False `--help` really wants to kill us ;_;
if e.code:
raise ValueError('main returned non-zero status code: %d' % e.code)
|
python
|
def zipline_magic(line, cell=None):
"""The zipline IPython cell magic.
"""
load_extensions(
default=True,
extensions=[],
strict=True,
environ=os.environ,
)
try:
return run.main(
# put our overrides at the start of the parameter list so that
# users may pass values with higher precedence
[
'--algotext', cell,
'--output', os.devnull, # don't write the results by default
] + ([
# these options are set when running in line magic mode
# set a non None algo text to use the ipython user_ns
'--algotext', '',
'--local-namespace',
] if cell is None else []) + line.split(),
'%s%%zipline' % ((cell or '') and '%'),
# don't use system exit and propogate errors to the caller
standalone_mode=False,
)
except SystemExit as e:
# https://github.com/mitsuhiko/click/pull/533
# even in standalone_mode=False `--help` really wants to kill us ;_;
if e.code:
raise ValueError('main returned non-zero status code: %d' % e.code)
|
[
"def",
"zipline_magic",
"(",
"line",
",",
"cell",
"=",
"None",
")",
":",
"load_extensions",
"(",
"default",
"=",
"True",
",",
"extensions",
"=",
"[",
"]",
",",
"strict",
"=",
"True",
",",
"environ",
"=",
"os",
".",
"environ",
",",
")",
"try",
":",
"return",
"run",
".",
"main",
"(",
"# put our overrides at the start of the parameter list so that",
"# users may pass values with higher precedence",
"[",
"'--algotext'",
",",
"cell",
",",
"'--output'",
",",
"os",
".",
"devnull",
",",
"# don't write the results by default",
"]",
"+",
"(",
"[",
"# these options are set when running in line magic mode",
"# set a non None algo text to use the ipython user_ns",
"'--algotext'",
",",
"''",
",",
"'--local-namespace'",
",",
"]",
"if",
"cell",
"is",
"None",
"else",
"[",
"]",
")",
"+",
"line",
".",
"split",
"(",
")",
",",
"'%s%%zipline'",
"%",
"(",
"(",
"cell",
"or",
"''",
")",
"and",
"'%'",
")",
",",
"# don't use system exit and propogate errors to the caller",
"standalone_mode",
"=",
"False",
",",
")",
"except",
"SystemExit",
"as",
"e",
":",
"# https://github.com/mitsuhiko/click/pull/533",
"# even in standalone_mode=False `--help` really wants to kill us ;_;",
"if",
"e",
".",
"code",
":",
"raise",
"ValueError",
"(",
"'main returned non-zero status code: %d'",
"%",
"e",
".",
"code",
")"
] |
The zipline IPython cell magic.
|
[
"The",
"zipline",
"IPython",
"cell",
"magic",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/__main__.py#L287-L317
|
train
|
quantopian/zipline
|
zipline/__main__.py
|
ingest
|
def ingest(bundle, assets_version, show_progress):
"""Ingest the data for the given bundle.
"""
bundles_module.ingest(
bundle,
os.environ,
pd.Timestamp.utcnow(),
assets_version,
show_progress,
)
|
python
|
def ingest(bundle, assets_version, show_progress):
"""Ingest the data for the given bundle.
"""
bundles_module.ingest(
bundle,
os.environ,
pd.Timestamp.utcnow(),
assets_version,
show_progress,
)
|
[
"def",
"ingest",
"(",
"bundle",
",",
"assets_version",
",",
"show_progress",
")",
":",
"bundles_module",
".",
"ingest",
"(",
"bundle",
",",
"os",
".",
"environ",
",",
"pd",
".",
"Timestamp",
".",
"utcnow",
"(",
")",
",",
"assets_version",
",",
"show_progress",
",",
")"
] |
Ingest the data for the given bundle.
|
[
"Ingest",
"the",
"data",
"for",
"the",
"given",
"bundle",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/__main__.py#L340-L349
|
train
|
quantopian/zipline
|
zipline/__main__.py
|
clean
|
def clean(bundle, before, after, keep_last):
"""Clean up data downloaded with the ingest command.
"""
bundles_module.clean(
bundle,
before,
after,
keep_last,
)
|
python
|
def clean(bundle, before, after, keep_last):
"""Clean up data downloaded with the ingest command.
"""
bundles_module.clean(
bundle,
before,
after,
keep_last,
)
|
[
"def",
"clean",
"(",
"bundle",
",",
"before",
",",
"after",
",",
"keep_last",
")",
":",
"bundles_module",
".",
"clean",
"(",
"bundle",
",",
"before",
",",
"after",
",",
"keep_last",
",",
")"
] |
Clean up data downloaded with the ingest command.
|
[
"Clean",
"up",
"data",
"downloaded",
"with",
"the",
"ingest",
"command",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/__main__.py#L383-L391
|
train
|
quantopian/zipline
|
zipline/__main__.py
|
bundles
|
def bundles():
"""List all of the available data bundles.
"""
for bundle in sorted(bundles_module.bundles.keys()):
if bundle.startswith('.'):
# hide the test data
continue
try:
ingestions = list(
map(text_type, bundles_module.ingestions_for_bundle(bundle))
)
except OSError as e:
if e.errno != errno.ENOENT:
raise
ingestions = []
# If we got no ingestions, either because the directory didn't exist or
# because there were no entries, print a single message indicating that
# no ingestions have yet been made.
for timestamp in ingestions or ["<no ingestions>"]:
click.echo("%s %s" % (bundle, timestamp))
|
python
|
def bundles():
"""List all of the available data bundles.
"""
for bundle in sorted(bundles_module.bundles.keys()):
if bundle.startswith('.'):
# hide the test data
continue
try:
ingestions = list(
map(text_type, bundles_module.ingestions_for_bundle(bundle))
)
except OSError as e:
if e.errno != errno.ENOENT:
raise
ingestions = []
# If we got no ingestions, either because the directory didn't exist or
# because there were no entries, print a single message indicating that
# no ingestions have yet been made.
for timestamp in ingestions or ["<no ingestions>"]:
click.echo("%s %s" % (bundle, timestamp))
|
[
"def",
"bundles",
"(",
")",
":",
"for",
"bundle",
"in",
"sorted",
"(",
"bundles_module",
".",
"bundles",
".",
"keys",
"(",
")",
")",
":",
"if",
"bundle",
".",
"startswith",
"(",
"'.'",
")",
":",
"# hide the test data",
"continue",
"try",
":",
"ingestions",
"=",
"list",
"(",
"map",
"(",
"text_type",
",",
"bundles_module",
".",
"ingestions_for_bundle",
"(",
"bundle",
")",
")",
")",
"except",
"OSError",
"as",
"e",
":",
"if",
"e",
".",
"errno",
"!=",
"errno",
".",
"ENOENT",
":",
"raise",
"ingestions",
"=",
"[",
"]",
"# If we got no ingestions, either because the directory didn't exist or",
"# because there were no entries, print a single message indicating that",
"# no ingestions have yet been made.",
"for",
"timestamp",
"in",
"ingestions",
"or",
"[",
"\"<no ingestions>\"",
"]",
":",
"click",
".",
"echo",
"(",
"\"%s %s\"",
"%",
"(",
"bundle",
",",
"timestamp",
")",
")"
] |
List all of the available data bundles.
|
[
"List",
"all",
"of",
"the",
"available",
"data",
"bundles",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/__main__.py#L395-L415
|
train
|
quantopian/zipline
|
zipline/pipeline/filters/filter.py
|
binary_operator
|
def binary_operator(op):
"""
Factory function for making binary operator methods on a Filter subclass.
Returns a function "binary_operator" suitable for implementing functions
like __and__ or __or__.
"""
# When combining a Filter with a NumericalExpression, we use this
# attrgetter instance to defer to the commuted interpretation of the
# NumericalExpression operator.
commuted_method_getter = attrgetter(method_name_for_op(op, commute=True))
def binary_operator(self, other):
if isinstance(self, NumericalExpression):
self_expr, other_expr, new_inputs = self.build_binary_op(
op, other,
)
return NumExprFilter.create(
"({left}) {op} ({right})".format(
left=self_expr,
op=op,
right=other_expr,
),
new_inputs,
)
elif isinstance(other, NumericalExpression):
# NumericalExpression overrides numerical ops to correctly handle
# merging of inputs. Look up and call the appropriate
# right-binding operator with ourself as the input.
return commuted_method_getter(other)(self)
elif isinstance(other, Term):
if other.dtype != bool_dtype:
raise BadBinaryOperator(op, self, other)
if self is other:
return NumExprFilter.create(
"x_0 {op} x_0".format(op=op),
(self,),
)
return NumExprFilter.create(
"x_0 {op} x_1".format(op=op),
(self, other),
)
elif isinstance(other, int): # Note that this is true for bool as well
return NumExprFilter.create(
"x_0 {op} {constant}".format(op=op, constant=int(other)),
binds=(self,),
)
raise BadBinaryOperator(op, self, other)
binary_operator.__doc__ = "Binary Operator: '%s'" % op
return binary_operator
|
python
|
def binary_operator(op):
"""
Factory function for making binary operator methods on a Filter subclass.
Returns a function "binary_operator" suitable for implementing functions
like __and__ or __or__.
"""
# When combining a Filter with a NumericalExpression, we use this
# attrgetter instance to defer to the commuted interpretation of the
# NumericalExpression operator.
commuted_method_getter = attrgetter(method_name_for_op(op, commute=True))
def binary_operator(self, other):
if isinstance(self, NumericalExpression):
self_expr, other_expr, new_inputs = self.build_binary_op(
op, other,
)
return NumExprFilter.create(
"({left}) {op} ({right})".format(
left=self_expr,
op=op,
right=other_expr,
),
new_inputs,
)
elif isinstance(other, NumericalExpression):
# NumericalExpression overrides numerical ops to correctly handle
# merging of inputs. Look up and call the appropriate
# right-binding operator with ourself as the input.
return commuted_method_getter(other)(self)
elif isinstance(other, Term):
if other.dtype != bool_dtype:
raise BadBinaryOperator(op, self, other)
if self is other:
return NumExprFilter.create(
"x_0 {op} x_0".format(op=op),
(self,),
)
return NumExprFilter.create(
"x_0 {op} x_1".format(op=op),
(self, other),
)
elif isinstance(other, int): # Note that this is true for bool as well
return NumExprFilter.create(
"x_0 {op} {constant}".format(op=op, constant=int(other)),
binds=(self,),
)
raise BadBinaryOperator(op, self, other)
binary_operator.__doc__ = "Binary Operator: '%s'" % op
return binary_operator
|
[
"def",
"binary_operator",
"(",
"op",
")",
":",
"# When combining a Filter with a NumericalExpression, we use this",
"# attrgetter instance to defer to the commuted interpretation of the",
"# NumericalExpression operator.",
"commuted_method_getter",
"=",
"attrgetter",
"(",
"method_name_for_op",
"(",
"op",
",",
"commute",
"=",
"True",
")",
")",
"def",
"binary_operator",
"(",
"self",
",",
"other",
")",
":",
"if",
"isinstance",
"(",
"self",
",",
"NumericalExpression",
")",
":",
"self_expr",
",",
"other_expr",
",",
"new_inputs",
"=",
"self",
".",
"build_binary_op",
"(",
"op",
",",
"other",
",",
")",
"return",
"NumExprFilter",
".",
"create",
"(",
"\"({left}) {op} ({right})\"",
".",
"format",
"(",
"left",
"=",
"self_expr",
",",
"op",
"=",
"op",
",",
"right",
"=",
"other_expr",
",",
")",
",",
"new_inputs",
",",
")",
"elif",
"isinstance",
"(",
"other",
",",
"NumericalExpression",
")",
":",
"# NumericalExpression overrides numerical ops to correctly handle",
"# merging of inputs. Look up and call the appropriate",
"# right-binding operator with ourself as the input.",
"return",
"commuted_method_getter",
"(",
"other",
")",
"(",
"self",
")",
"elif",
"isinstance",
"(",
"other",
",",
"Term",
")",
":",
"if",
"other",
".",
"dtype",
"!=",
"bool_dtype",
":",
"raise",
"BadBinaryOperator",
"(",
"op",
",",
"self",
",",
"other",
")",
"if",
"self",
"is",
"other",
":",
"return",
"NumExprFilter",
".",
"create",
"(",
"\"x_0 {op} x_0\"",
".",
"format",
"(",
"op",
"=",
"op",
")",
",",
"(",
"self",
",",
")",
",",
")",
"return",
"NumExprFilter",
".",
"create",
"(",
"\"x_0 {op} x_1\"",
".",
"format",
"(",
"op",
"=",
"op",
")",
",",
"(",
"self",
",",
"other",
")",
",",
")",
"elif",
"isinstance",
"(",
"other",
",",
"int",
")",
":",
"# Note that this is true for bool as well",
"return",
"NumExprFilter",
".",
"create",
"(",
"\"x_0 {op} {constant}\"",
".",
"format",
"(",
"op",
"=",
"op",
",",
"constant",
"=",
"int",
"(",
"other",
")",
")",
",",
"binds",
"=",
"(",
"self",
",",
")",
",",
")",
"raise",
"BadBinaryOperator",
"(",
"op",
",",
"self",
",",
"other",
")",
"binary_operator",
".",
"__doc__",
"=",
"\"Binary Operator: '%s'\"",
"%",
"op",
"return",
"binary_operator"
] |
Factory function for making binary operator methods on a Filter subclass.
Returns a function "binary_operator" suitable for implementing functions
like __and__ or __or__.
|
[
"Factory",
"function",
"for",
"making",
"binary",
"operator",
"methods",
"on",
"a",
"Filter",
"subclass",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/filters/filter.py#L62-L112
|
train
|
quantopian/zipline
|
zipline/pipeline/filters/filter.py
|
unary_operator
|
def unary_operator(op):
"""
Factory function for making unary operator methods for Filters.
"""
valid_ops = {'~'}
if op not in valid_ops:
raise ValueError("Invalid unary operator %s." % op)
def unary_operator(self):
# This can't be hoisted up a scope because the types returned by
# unary_op_return_type aren't defined when the top-level function is
# invoked.
if isinstance(self, NumericalExpression):
return NumExprFilter.create(
"{op}({expr})".format(op=op, expr=self._expr),
self.inputs,
)
else:
return NumExprFilter.create("{op}x_0".format(op=op), (self,))
unary_operator.__doc__ = "Unary Operator: '%s'" % op
return unary_operator
|
python
|
def unary_operator(op):
"""
Factory function for making unary operator methods for Filters.
"""
valid_ops = {'~'}
if op not in valid_ops:
raise ValueError("Invalid unary operator %s." % op)
def unary_operator(self):
# This can't be hoisted up a scope because the types returned by
# unary_op_return_type aren't defined when the top-level function is
# invoked.
if isinstance(self, NumericalExpression):
return NumExprFilter.create(
"{op}({expr})".format(op=op, expr=self._expr),
self.inputs,
)
else:
return NumExprFilter.create("{op}x_0".format(op=op), (self,))
unary_operator.__doc__ = "Unary Operator: '%s'" % op
return unary_operator
|
[
"def",
"unary_operator",
"(",
"op",
")",
":",
"valid_ops",
"=",
"{",
"'~'",
"}",
"if",
"op",
"not",
"in",
"valid_ops",
":",
"raise",
"ValueError",
"(",
"\"Invalid unary operator %s.\"",
"%",
"op",
")",
"def",
"unary_operator",
"(",
"self",
")",
":",
"# This can't be hoisted up a scope because the types returned by",
"# unary_op_return_type aren't defined when the top-level function is",
"# invoked.",
"if",
"isinstance",
"(",
"self",
",",
"NumericalExpression",
")",
":",
"return",
"NumExprFilter",
".",
"create",
"(",
"\"{op}({expr})\"",
".",
"format",
"(",
"op",
"=",
"op",
",",
"expr",
"=",
"self",
".",
"_expr",
")",
",",
"self",
".",
"inputs",
",",
")",
"else",
":",
"return",
"NumExprFilter",
".",
"create",
"(",
"\"{op}x_0\"",
".",
"format",
"(",
"op",
"=",
"op",
")",
",",
"(",
"self",
",",
")",
")",
"unary_operator",
".",
"__doc__",
"=",
"\"Unary Operator: '%s'\"",
"%",
"op",
"return",
"unary_operator"
] |
Factory function for making unary operator methods for Filters.
|
[
"Factory",
"function",
"for",
"making",
"unary",
"operator",
"methods",
"for",
"Filters",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/filters/filter.py#L115-L136
|
train
|
quantopian/zipline
|
zipline/pipeline/filters/filter.py
|
NumExprFilter.create
|
def create(cls, expr, binds):
"""
Helper for creating new NumExprFactors.
This is just a wrapper around NumericalExpression.__new__ that always
forwards `bool` as the dtype, since Filters can only be of boolean
dtype.
"""
return cls(expr=expr, binds=binds, dtype=bool_dtype)
|
python
|
def create(cls, expr, binds):
"""
Helper for creating new NumExprFactors.
This is just a wrapper around NumericalExpression.__new__ that always
forwards `bool` as the dtype, since Filters can only be of boolean
dtype.
"""
return cls(expr=expr, binds=binds, dtype=bool_dtype)
|
[
"def",
"create",
"(",
"cls",
",",
"expr",
",",
"binds",
")",
":",
"return",
"cls",
"(",
"expr",
"=",
"expr",
",",
"binds",
"=",
"binds",
",",
"dtype",
"=",
"bool_dtype",
")"
] |
Helper for creating new NumExprFactors.
This is just a wrapper around NumericalExpression.__new__ that always
forwards `bool` as the dtype, since Filters can only be of boolean
dtype.
|
[
"Helper",
"for",
"creating",
"new",
"NumExprFactors",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/filters/filter.py#L237-L245
|
train
|
quantopian/zipline
|
zipline/pipeline/filters/filter.py
|
NumExprFilter._compute
|
def _compute(self, arrays, dates, assets, mask):
"""
Compute our result with numexpr, then re-apply `mask`.
"""
return super(NumExprFilter, self)._compute(
arrays,
dates,
assets,
mask,
) & mask
|
python
|
def _compute(self, arrays, dates, assets, mask):
"""
Compute our result with numexpr, then re-apply `mask`.
"""
return super(NumExprFilter, self)._compute(
arrays,
dates,
assets,
mask,
) & mask
|
[
"def",
"_compute",
"(",
"self",
",",
"arrays",
",",
"dates",
",",
"assets",
",",
"mask",
")",
":",
"return",
"super",
"(",
"NumExprFilter",
",",
"self",
")",
".",
"_compute",
"(",
"arrays",
",",
"dates",
",",
"assets",
",",
"mask",
",",
")",
"&",
"mask"
] |
Compute our result with numexpr, then re-apply `mask`.
|
[
"Compute",
"our",
"result",
"with",
"numexpr",
"then",
"re",
"-",
"apply",
"mask",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/filters/filter.py#L247-L256
|
train
|
quantopian/zipline
|
zipline/pipeline/filters/filter.py
|
PercentileFilter._validate
|
def _validate(self):
"""
Ensure that our percentile bounds are well-formed.
"""
if not 0.0 <= self._min_percentile < self._max_percentile <= 100.0:
raise BadPercentileBounds(
min_percentile=self._min_percentile,
max_percentile=self._max_percentile,
upper_bound=100.0
)
return super(PercentileFilter, self)._validate()
|
python
|
def _validate(self):
"""
Ensure that our percentile bounds are well-formed.
"""
if not 0.0 <= self._min_percentile < self._max_percentile <= 100.0:
raise BadPercentileBounds(
min_percentile=self._min_percentile,
max_percentile=self._max_percentile,
upper_bound=100.0
)
return super(PercentileFilter, self)._validate()
|
[
"def",
"_validate",
"(",
"self",
")",
":",
"if",
"not",
"0.0",
"<=",
"self",
".",
"_min_percentile",
"<",
"self",
".",
"_max_percentile",
"<=",
"100.0",
":",
"raise",
"BadPercentileBounds",
"(",
"min_percentile",
"=",
"self",
".",
"_min_percentile",
",",
"max_percentile",
"=",
"self",
".",
"_max_percentile",
",",
"upper_bound",
"=",
"100.0",
")",
"return",
"super",
"(",
"PercentileFilter",
",",
"self",
")",
".",
"_validate",
"(",
")"
] |
Ensure that our percentile bounds are well-formed.
|
[
"Ensure",
"that",
"our",
"percentile",
"bounds",
"are",
"well",
"-",
"formed",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/filters/filter.py#L344-L354
|
train
|
quantopian/zipline
|
zipline/pipeline/filters/filter.py
|
PercentileFilter._compute
|
def _compute(self, arrays, dates, assets, mask):
"""
For each row in the input, compute a mask of all values falling between
the given percentiles.
"""
# TODO: Review whether there's a better way of handling small numbers
# of columns.
data = arrays[0].copy().astype(float64)
data[~mask] = nan
# FIXME: np.nanpercentile **should** support computing multiple bounds
# at once, but there's a bug in the logic for multiple bounds in numpy
# 1.9.2. It will be fixed in 1.10.
# c.f. https://github.com/numpy/numpy/pull/5981
lower_bounds = nanpercentile(
data,
self._min_percentile,
axis=1,
keepdims=True,
)
upper_bounds = nanpercentile(
data,
self._max_percentile,
axis=1,
keepdims=True,
)
return (lower_bounds <= data) & (data <= upper_bounds)
|
python
|
def _compute(self, arrays, dates, assets, mask):
"""
For each row in the input, compute a mask of all values falling between
the given percentiles.
"""
# TODO: Review whether there's a better way of handling small numbers
# of columns.
data = arrays[0].copy().astype(float64)
data[~mask] = nan
# FIXME: np.nanpercentile **should** support computing multiple bounds
# at once, but there's a bug in the logic for multiple bounds in numpy
# 1.9.2. It will be fixed in 1.10.
# c.f. https://github.com/numpy/numpy/pull/5981
lower_bounds = nanpercentile(
data,
self._min_percentile,
axis=1,
keepdims=True,
)
upper_bounds = nanpercentile(
data,
self._max_percentile,
axis=1,
keepdims=True,
)
return (lower_bounds <= data) & (data <= upper_bounds)
|
[
"def",
"_compute",
"(",
"self",
",",
"arrays",
",",
"dates",
",",
"assets",
",",
"mask",
")",
":",
"# TODO: Review whether there's a better way of handling small numbers",
"# of columns.",
"data",
"=",
"arrays",
"[",
"0",
"]",
".",
"copy",
"(",
")",
".",
"astype",
"(",
"float64",
")",
"data",
"[",
"~",
"mask",
"]",
"=",
"nan",
"# FIXME: np.nanpercentile **should** support computing multiple bounds",
"# at once, but there's a bug in the logic for multiple bounds in numpy",
"# 1.9.2. It will be fixed in 1.10.",
"# c.f. https://github.com/numpy/numpy/pull/5981",
"lower_bounds",
"=",
"nanpercentile",
"(",
"data",
",",
"self",
".",
"_min_percentile",
",",
"axis",
"=",
"1",
",",
"keepdims",
"=",
"True",
",",
")",
"upper_bounds",
"=",
"nanpercentile",
"(",
"data",
",",
"self",
".",
"_max_percentile",
",",
"axis",
"=",
"1",
",",
"keepdims",
"=",
"True",
",",
")",
"return",
"(",
"lower_bounds",
"<=",
"data",
")",
"&",
"(",
"data",
"<=",
"upper_bounds",
")"
] |
For each row in the input, compute a mask of all values falling between
the given percentiles.
|
[
"For",
"each",
"row",
"in",
"the",
"input",
"compute",
"a",
"mask",
"of",
"all",
"values",
"falling",
"between",
"the",
"given",
"percentiles",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/filters/filter.py#L356-L382
|
train
|
quantopian/zipline
|
zipline/data/treasuries.py
|
parse_treasury_csv_column
|
def parse_treasury_csv_column(column):
"""
Parse a treasury CSV column into a more human-readable format.
Columns start with 'RIFLGFC', followed by Y or M (year or month), followed
by a two-digit number signifying number of years/months, followed by _N.B.
We only care about the middle two entries, which we turn into a string like
3month or 30year.
"""
column_re = re.compile(
r"^(?P<prefix>RIFLGFC)"
"(?P<unit>[YM])"
"(?P<periods>[0-9]{2})"
"(?P<suffix>_N.B)$"
)
match = column_re.match(column)
if match is None:
raise ValueError("Couldn't parse CSV column %r." % column)
unit, periods = get_unit_and_periods(match.groupdict())
# Roundtrip through int to coerce '06' into '6'.
return str(int(periods)) + ('year' if unit == 'Y' else 'month')
|
python
|
def parse_treasury_csv_column(column):
"""
Parse a treasury CSV column into a more human-readable format.
Columns start with 'RIFLGFC', followed by Y or M (year or month), followed
by a two-digit number signifying number of years/months, followed by _N.B.
We only care about the middle two entries, which we turn into a string like
3month or 30year.
"""
column_re = re.compile(
r"^(?P<prefix>RIFLGFC)"
"(?P<unit>[YM])"
"(?P<periods>[0-9]{2})"
"(?P<suffix>_N.B)$"
)
match = column_re.match(column)
if match is None:
raise ValueError("Couldn't parse CSV column %r." % column)
unit, periods = get_unit_and_periods(match.groupdict())
# Roundtrip through int to coerce '06' into '6'.
return str(int(periods)) + ('year' if unit == 'Y' else 'month')
|
[
"def",
"parse_treasury_csv_column",
"(",
"column",
")",
":",
"column_re",
"=",
"re",
".",
"compile",
"(",
"r\"^(?P<prefix>RIFLGFC)\"",
"\"(?P<unit>[YM])\"",
"\"(?P<periods>[0-9]{2})\"",
"\"(?P<suffix>_N.B)$\"",
")",
"match",
"=",
"column_re",
".",
"match",
"(",
"column",
")",
"if",
"match",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Couldn't parse CSV column %r.\"",
"%",
"column",
")",
"unit",
",",
"periods",
"=",
"get_unit_and_periods",
"(",
"match",
".",
"groupdict",
"(",
")",
")",
"# Roundtrip through int to coerce '06' into '6'.",
"return",
"str",
"(",
"int",
"(",
"periods",
")",
")",
"+",
"(",
"'year'",
"if",
"unit",
"==",
"'Y'",
"else",
"'month'",
")"
] |
Parse a treasury CSV column into a more human-readable format.
Columns start with 'RIFLGFC', followed by Y or M (year or month), followed
by a two-digit number signifying number of years/months, followed by _N.B.
We only care about the middle two entries, which we turn into a string like
3month or 30year.
|
[
"Parse",
"a",
"treasury",
"CSV",
"column",
"into",
"a",
"more",
"human",
"-",
"readable",
"format",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/treasuries.py#L25-L47
|
train
|
quantopian/zipline
|
zipline/data/treasuries.py
|
get_daily_10yr_treasury_data
|
def get_daily_10yr_treasury_data():
"""Download daily 10 year treasury rates from the Federal Reserve and
return a pandas.Series."""
url = "https://www.federalreserve.gov/datadownload/Output.aspx?rel=H15" \
"&series=bcb44e57fb57efbe90002369321bfb3f&lastObs=&from=&to=" \
"&filetype=csv&label=include&layout=seriescolumn"
return pd.read_csv(url, header=5, index_col=0, names=['DATE', 'BC_10YEAR'],
parse_dates=True, converters={1: dataconverter},
squeeze=True)
|
python
|
def get_daily_10yr_treasury_data():
"""Download daily 10 year treasury rates from the Federal Reserve and
return a pandas.Series."""
url = "https://www.federalreserve.gov/datadownload/Output.aspx?rel=H15" \
"&series=bcb44e57fb57efbe90002369321bfb3f&lastObs=&from=&to=" \
"&filetype=csv&label=include&layout=seriescolumn"
return pd.read_csv(url, header=5, index_col=0, names=['DATE', 'BC_10YEAR'],
parse_dates=True, converters={1: dataconverter},
squeeze=True)
|
[
"def",
"get_daily_10yr_treasury_data",
"(",
")",
":",
"url",
"=",
"\"https://www.federalreserve.gov/datadownload/Output.aspx?rel=H15\"",
"\"&series=bcb44e57fb57efbe90002369321bfb3f&lastObs=&from=&to=\"",
"\"&filetype=csv&label=include&layout=seriescolumn\"",
"return",
"pd",
".",
"read_csv",
"(",
"url",
",",
"header",
"=",
"5",
",",
"index_col",
"=",
"0",
",",
"names",
"=",
"[",
"'DATE'",
",",
"'BC_10YEAR'",
"]",
",",
"parse_dates",
"=",
"True",
",",
"converters",
"=",
"{",
"1",
":",
"dataconverter",
"}",
",",
"squeeze",
"=",
"True",
")"
] |
Download daily 10 year treasury rates from the Federal Reserve and
return a pandas.Series.
|
[
"Download",
"daily",
"10",
"year",
"treasury",
"rates",
"from",
"the",
"Federal",
"Reserve",
"and",
"return",
"a",
"pandas",
".",
"Series",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/treasuries.py#L93-L101
|
train
|
quantopian/zipline
|
zipline/data/minute_bars.py
|
_sid_subdir_path
|
def _sid_subdir_path(sid):
"""
Format subdir path to limit the number directories in any given
subdirectory to 100.
The number in each directory is designed to support at least 100000
equities.
Parameters
----------
sid : int
Asset identifier.
Returns
-------
out : string
A path for the bcolz rootdir, including subdirectory prefixes based on
the padded string representation of the given sid.
e.g. 1 is formatted as 00/00/000001.bcolz
"""
padded_sid = format(sid, '06')
return os.path.join(
# subdir 1 00/XX
padded_sid[0:2],
# subdir 2 XX/00
padded_sid[2:4],
"{0}.bcolz".format(str(padded_sid))
)
|
python
|
def _sid_subdir_path(sid):
"""
Format subdir path to limit the number directories in any given
subdirectory to 100.
The number in each directory is designed to support at least 100000
equities.
Parameters
----------
sid : int
Asset identifier.
Returns
-------
out : string
A path for the bcolz rootdir, including subdirectory prefixes based on
the padded string representation of the given sid.
e.g. 1 is formatted as 00/00/000001.bcolz
"""
padded_sid = format(sid, '06')
return os.path.join(
# subdir 1 00/XX
padded_sid[0:2],
# subdir 2 XX/00
padded_sid[2:4],
"{0}.bcolz".format(str(padded_sid))
)
|
[
"def",
"_sid_subdir_path",
"(",
"sid",
")",
":",
"padded_sid",
"=",
"format",
"(",
"sid",
",",
"'06'",
")",
"return",
"os",
".",
"path",
".",
"join",
"(",
"# subdir 1 00/XX",
"padded_sid",
"[",
"0",
":",
"2",
"]",
",",
"# subdir 2 XX/00",
"padded_sid",
"[",
"2",
":",
"4",
"]",
",",
"\"{0}.bcolz\"",
".",
"format",
"(",
"str",
"(",
"padded_sid",
")",
")",
")"
] |
Format subdir path to limit the number directories in any given
subdirectory to 100.
The number in each directory is designed to support at least 100000
equities.
Parameters
----------
sid : int
Asset identifier.
Returns
-------
out : string
A path for the bcolz rootdir, including subdirectory prefixes based on
the padded string representation of the given sid.
e.g. 1 is formatted as 00/00/000001.bcolz
|
[
"Format",
"subdir",
"path",
"to",
"limit",
"the",
"number",
"directories",
"in",
"any",
"given",
"subdirectory",
"to",
"100",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/minute_bars.py#L85-L113
|
train
|
quantopian/zipline
|
zipline/data/minute_bars.py
|
convert_cols
|
def convert_cols(cols, scale_factor, sid, invalid_data_behavior):
"""Adapt OHLCV columns into uint32 columns.
Parameters
----------
cols : dict
A dict mapping each column name (open, high, low, close, volume)
to a float column to convert to uint32.
scale_factor : int
Factor to use to scale float values before converting to uint32.
sid : int
Sid of the relevant asset, for logging.
invalid_data_behavior : str
Specifies behavior when data cannot be converted to uint32.
If 'raise', raises an exception.
If 'warn', logs a warning and filters out incompatible values.
If 'ignore', silently filters out incompatible values.
"""
scaled_opens = (np.nan_to_num(cols['open']) * scale_factor).round()
scaled_highs = (np.nan_to_num(cols['high']) * scale_factor).round()
scaled_lows = (np.nan_to_num(cols['low']) * scale_factor).round()
scaled_closes = (np.nan_to_num(cols['close']) * scale_factor).round()
exclude_mask = np.zeros_like(scaled_opens, dtype=bool)
for col_name, scaled_col in [
('open', scaled_opens),
('high', scaled_highs),
('low', scaled_lows),
('close', scaled_closes),
]:
max_val = scaled_col.max()
try:
check_uint32_safe(max_val, col_name)
except ValueError:
if invalid_data_behavior == 'raise':
raise
if invalid_data_behavior == 'warn':
logger.warn(
'Values for sid={}, col={} contain some too large for '
'uint32 (max={}), filtering them out',
sid, col_name, max_val,
)
# We want to exclude all rows that have an unsafe value in
# this column.
exclude_mask &= (scaled_col >= np.iinfo(np.uint32).max)
# Convert all cols to uint32.
opens = scaled_opens.astype(np.uint32)
highs = scaled_highs.astype(np.uint32)
lows = scaled_lows.astype(np.uint32)
closes = scaled_closes.astype(np.uint32)
volumes = cols['volume'].astype(np.uint32)
# Exclude rows with unsafe values by setting to zero.
opens[exclude_mask] = 0
highs[exclude_mask] = 0
lows[exclude_mask] = 0
closes[exclude_mask] = 0
volumes[exclude_mask] = 0
return opens, highs, lows, closes, volumes
|
python
|
def convert_cols(cols, scale_factor, sid, invalid_data_behavior):
"""Adapt OHLCV columns into uint32 columns.
Parameters
----------
cols : dict
A dict mapping each column name (open, high, low, close, volume)
to a float column to convert to uint32.
scale_factor : int
Factor to use to scale float values before converting to uint32.
sid : int
Sid of the relevant asset, for logging.
invalid_data_behavior : str
Specifies behavior when data cannot be converted to uint32.
If 'raise', raises an exception.
If 'warn', logs a warning and filters out incompatible values.
If 'ignore', silently filters out incompatible values.
"""
scaled_opens = (np.nan_to_num(cols['open']) * scale_factor).round()
scaled_highs = (np.nan_to_num(cols['high']) * scale_factor).round()
scaled_lows = (np.nan_to_num(cols['low']) * scale_factor).round()
scaled_closes = (np.nan_to_num(cols['close']) * scale_factor).round()
exclude_mask = np.zeros_like(scaled_opens, dtype=bool)
for col_name, scaled_col in [
('open', scaled_opens),
('high', scaled_highs),
('low', scaled_lows),
('close', scaled_closes),
]:
max_val = scaled_col.max()
try:
check_uint32_safe(max_val, col_name)
except ValueError:
if invalid_data_behavior == 'raise':
raise
if invalid_data_behavior == 'warn':
logger.warn(
'Values for sid={}, col={} contain some too large for '
'uint32 (max={}), filtering them out',
sid, col_name, max_val,
)
# We want to exclude all rows that have an unsafe value in
# this column.
exclude_mask &= (scaled_col >= np.iinfo(np.uint32).max)
# Convert all cols to uint32.
opens = scaled_opens.astype(np.uint32)
highs = scaled_highs.astype(np.uint32)
lows = scaled_lows.astype(np.uint32)
closes = scaled_closes.astype(np.uint32)
volumes = cols['volume'].astype(np.uint32)
# Exclude rows with unsafe values by setting to zero.
opens[exclude_mask] = 0
highs[exclude_mask] = 0
lows[exclude_mask] = 0
closes[exclude_mask] = 0
volumes[exclude_mask] = 0
return opens, highs, lows, closes, volumes
|
[
"def",
"convert_cols",
"(",
"cols",
",",
"scale_factor",
",",
"sid",
",",
"invalid_data_behavior",
")",
":",
"scaled_opens",
"=",
"(",
"np",
".",
"nan_to_num",
"(",
"cols",
"[",
"'open'",
"]",
")",
"*",
"scale_factor",
")",
".",
"round",
"(",
")",
"scaled_highs",
"=",
"(",
"np",
".",
"nan_to_num",
"(",
"cols",
"[",
"'high'",
"]",
")",
"*",
"scale_factor",
")",
".",
"round",
"(",
")",
"scaled_lows",
"=",
"(",
"np",
".",
"nan_to_num",
"(",
"cols",
"[",
"'low'",
"]",
")",
"*",
"scale_factor",
")",
".",
"round",
"(",
")",
"scaled_closes",
"=",
"(",
"np",
".",
"nan_to_num",
"(",
"cols",
"[",
"'close'",
"]",
")",
"*",
"scale_factor",
")",
".",
"round",
"(",
")",
"exclude_mask",
"=",
"np",
".",
"zeros_like",
"(",
"scaled_opens",
",",
"dtype",
"=",
"bool",
")",
"for",
"col_name",
",",
"scaled_col",
"in",
"[",
"(",
"'open'",
",",
"scaled_opens",
")",
",",
"(",
"'high'",
",",
"scaled_highs",
")",
",",
"(",
"'low'",
",",
"scaled_lows",
")",
",",
"(",
"'close'",
",",
"scaled_closes",
")",
",",
"]",
":",
"max_val",
"=",
"scaled_col",
".",
"max",
"(",
")",
"try",
":",
"check_uint32_safe",
"(",
"max_val",
",",
"col_name",
")",
"except",
"ValueError",
":",
"if",
"invalid_data_behavior",
"==",
"'raise'",
":",
"raise",
"if",
"invalid_data_behavior",
"==",
"'warn'",
":",
"logger",
".",
"warn",
"(",
"'Values for sid={}, col={} contain some too large for '",
"'uint32 (max={}), filtering them out'",
",",
"sid",
",",
"col_name",
",",
"max_val",
",",
")",
"# We want to exclude all rows that have an unsafe value in",
"# this column.",
"exclude_mask",
"&=",
"(",
"scaled_col",
">=",
"np",
".",
"iinfo",
"(",
"np",
".",
"uint32",
")",
".",
"max",
")",
"# Convert all cols to uint32.",
"opens",
"=",
"scaled_opens",
".",
"astype",
"(",
"np",
".",
"uint32",
")",
"highs",
"=",
"scaled_highs",
".",
"astype",
"(",
"np",
".",
"uint32",
")",
"lows",
"=",
"scaled_lows",
".",
"astype",
"(",
"np",
".",
"uint32",
")",
"closes",
"=",
"scaled_closes",
".",
"astype",
"(",
"np",
".",
"uint32",
")",
"volumes",
"=",
"cols",
"[",
"'volume'",
"]",
".",
"astype",
"(",
"np",
".",
"uint32",
")",
"# Exclude rows with unsafe values by setting to zero.",
"opens",
"[",
"exclude_mask",
"]",
"=",
"0",
"highs",
"[",
"exclude_mask",
"]",
"=",
"0",
"lows",
"[",
"exclude_mask",
"]",
"=",
"0",
"closes",
"[",
"exclude_mask",
"]",
"=",
"0",
"volumes",
"[",
"exclude_mask",
"]",
"=",
"0",
"return",
"opens",
",",
"highs",
",",
"lows",
",",
"closes",
",",
"volumes"
] |
Adapt OHLCV columns into uint32 columns.
Parameters
----------
cols : dict
A dict mapping each column name (open, high, low, close, volume)
to a float column to convert to uint32.
scale_factor : int
Factor to use to scale float values before converting to uint32.
sid : int
Sid of the relevant asset, for logging.
invalid_data_behavior : str
Specifies behavior when data cannot be converted to uint32.
If 'raise', raises an exception.
If 'warn', logs a warning and filters out incompatible values.
If 'ignore', silently filters out incompatible values.
|
[
"Adapt",
"OHLCV",
"columns",
"into",
"uint32",
"columns",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/minute_bars.py#L116-L180
|
train
|
quantopian/zipline
|
zipline/data/minute_bars.py
|
BcolzMinuteBarMetadata.write
|
def write(self, rootdir):
"""
Write the metadata to a JSON file in the rootdir.
Values contained in the metadata are:
version : int
The value of FORMAT_VERSION of this class.
ohlc_ratio : int
The default ratio by which to multiply the pricing data to
convert the floats from floats to an integer to fit within
the np.uint32. If ohlc_ratios_per_sid is None or does not
contain a mapping for a given sid, this ratio is used.
ohlc_ratios_per_sid : dict
A dict mapping each sid in the output to the factor by
which the pricing data is multiplied so that the float data
can be stored as an integer.
minutes_per_day : int
The number of minutes per each period.
calendar_name : str
The name of the TradingCalendar on which the minute bars are
based.
start_session : datetime
'YYYY-MM-DD' formatted representation of the first trading
session in the data set.
end_session : datetime
'YYYY-MM-DD' formatted representation of the last trading
session in the data set.
Deprecated, but included for backwards compatibility:
first_trading_day : string
'YYYY-MM-DD' formatted representation of the first trading day
available in the dataset.
market_opens : list
List of int64 values representing UTC market opens as
minutes since epoch.
market_closes : list
List of int64 values representing UTC market closes as
minutes since epoch.
"""
calendar = self.calendar
slicer = calendar.schedule.index.slice_indexer(
self.start_session,
self.end_session,
)
schedule = calendar.schedule[slicer]
market_opens = schedule.market_open
market_closes = schedule.market_close
metadata = {
'version': self.version,
'ohlc_ratio': self.default_ohlc_ratio,
'ohlc_ratios_per_sid': self.ohlc_ratios_per_sid,
'minutes_per_day': self.minutes_per_day,
'calendar_name': self.calendar.name,
'start_session': str(self.start_session.date()),
'end_session': str(self.end_session.date()),
# Write these values for backwards compatibility
'first_trading_day': str(self.start_session.date()),
'market_opens': (
market_opens.values.astype('datetime64[m]').
astype(np.int64).tolist()),
'market_closes': (
market_closes.values.astype('datetime64[m]').
astype(np.int64).tolist()),
}
with open(self.metadata_path(rootdir), 'w+') as fp:
json.dump(metadata, fp)
|
python
|
def write(self, rootdir):
"""
Write the metadata to a JSON file in the rootdir.
Values contained in the metadata are:
version : int
The value of FORMAT_VERSION of this class.
ohlc_ratio : int
The default ratio by which to multiply the pricing data to
convert the floats from floats to an integer to fit within
the np.uint32. If ohlc_ratios_per_sid is None or does not
contain a mapping for a given sid, this ratio is used.
ohlc_ratios_per_sid : dict
A dict mapping each sid in the output to the factor by
which the pricing data is multiplied so that the float data
can be stored as an integer.
minutes_per_day : int
The number of minutes per each period.
calendar_name : str
The name of the TradingCalendar on which the minute bars are
based.
start_session : datetime
'YYYY-MM-DD' formatted representation of the first trading
session in the data set.
end_session : datetime
'YYYY-MM-DD' formatted representation of the last trading
session in the data set.
Deprecated, but included for backwards compatibility:
first_trading_day : string
'YYYY-MM-DD' formatted representation of the first trading day
available in the dataset.
market_opens : list
List of int64 values representing UTC market opens as
minutes since epoch.
market_closes : list
List of int64 values representing UTC market closes as
minutes since epoch.
"""
calendar = self.calendar
slicer = calendar.schedule.index.slice_indexer(
self.start_session,
self.end_session,
)
schedule = calendar.schedule[slicer]
market_opens = schedule.market_open
market_closes = schedule.market_close
metadata = {
'version': self.version,
'ohlc_ratio': self.default_ohlc_ratio,
'ohlc_ratios_per_sid': self.ohlc_ratios_per_sid,
'minutes_per_day': self.minutes_per_day,
'calendar_name': self.calendar.name,
'start_session': str(self.start_session.date()),
'end_session': str(self.end_session.date()),
# Write these values for backwards compatibility
'first_trading_day': str(self.start_session.date()),
'market_opens': (
market_opens.values.astype('datetime64[m]').
astype(np.int64).tolist()),
'market_closes': (
market_closes.values.astype('datetime64[m]').
astype(np.int64).tolist()),
}
with open(self.metadata_path(rootdir), 'w+') as fp:
json.dump(metadata, fp)
|
[
"def",
"write",
"(",
"self",
",",
"rootdir",
")",
":",
"calendar",
"=",
"self",
".",
"calendar",
"slicer",
"=",
"calendar",
".",
"schedule",
".",
"index",
".",
"slice_indexer",
"(",
"self",
".",
"start_session",
",",
"self",
".",
"end_session",
",",
")",
"schedule",
"=",
"calendar",
".",
"schedule",
"[",
"slicer",
"]",
"market_opens",
"=",
"schedule",
".",
"market_open",
"market_closes",
"=",
"schedule",
".",
"market_close",
"metadata",
"=",
"{",
"'version'",
":",
"self",
".",
"version",
",",
"'ohlc_ratio'",
":",
"self",
".",
"default_ohlc_ratio",
",",
"'ohlc_ratios_per_sid'",
":",
"self",
".",
"ohlc_ratios_per_sid",
",",
"'minutes_per_day'",
":",
"self",
".",
"minutes_per_day",
",",
"'calendar_name'",
":",
"self",
".",
"calendar",
".",
"name",
",",
"'start_session'",
":",
"str",
"(",
"self",
".",
"start_session",
".",
"date",
"(",
")",
")",
",",
"'end_session'",
":",
"str",
"(",
"self",
".",
"end_session",
".",
"date",
"(",
")",
")",
",",
"# Write these values for backwards compatibility",
"'first_trading_day'",
":",
"str",
"(",
"self",
".",
"start_session",
".",
"date",
"(",
")",
")",
",",
"'market_opens'",
":",
"(",
"market_opens",
".",
"values",
".",
"astype",
"(",
"'datetime64[m]'",
")",
".",
"astype",
"(",
"np",
".",
"int64",
")",
".",
"tolist",
"(",
")",
")",
",",
"'market_closes'",
":",
"(",
"market_closes",
".",
"values",
".",
"astype",
"(",
"'datetime64[m]'",
")",
".",
"astype",
"(",
"np",
".",
"int64",
")",
".",
"tolist",
"(",
")",
")",
",",
"}",
"with",
"open",
"(",
"self",
".",
"metadata_path",
"(",
"rootdir",
")",
",",
"'w+'",
")",
"as",
"fp",
":",
"json",
".",
"dump",
"(",
"metadata",
",",
"fp",
")"
] |
Write the metadata to a JSON file in the rootdir.
Values contained in the metadata are:
version : int
The value of FORMAT_VERSION of this class.
ohlc_ratio : int
The default ratio by which to multiply the pricing data to
convert the floats from floats to an integer to fit within
the np.uint32. If ohlc_ratios_per_sid is None or does not
contain a mapping for a given sid, this ratio is used.
ohlc_ratios_per_sid : dict
A dict mapping each sid in the output to the factor by
which the pricing data is multiplied so that the float data
can be stored as an integer.
minutes_per_day : int
The number of minutes per each period.
calendar_name : str
The name of the TradingCalendar on which the minute bars are
based.
start_session : datetime
'YYYY-MM-DD' formatted representation of the first trading
session in the data set.
end_session : datetime
'YYYY-MM-DD' formatted representation of the last trading
session in the data set.
Deprecated, but included for backwards compatibility:
first_trading_day : string
'YYYY-MM-DD' formatted representation of the first trading day
available in the dataset.
market_opens : list
List of int64 values representing UTC market opens as
minutes since epoch.
market_closes : list
List of int64 values representing UTC market closes as
minutes since epoch.
|
[
"Write",
"the",
"metadata",
"to",
"a",
"JSON",
"file",
"in",
"the",
"rootdir",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/minute_bars.py#L280-L349
|
train
|
quantopian/zipline
|
zipline/data/minute_bars.py
|
BcolzMinuteBarWriter.open
|
def open(cls, rootdir, end_session=None):
"""
Open an existing ``rootdir`` for writing.
Parameters
----------
end_session : Timestamp (optional)
When appending, the intended new ``end_session``.
"""
metadata = BcolzMinuteBarMetadata.read(rootdir)
return BcolzMinuteBarWriter(
rootdir,
metadata.calendar,
metadata.start_session,
end_session if end_session is not None else metadata.end_session,
metadata.minutes_per_day,
metadata.default_ohlc_ratio,
metadata.ohlc_ratios_per_sid,
write_metadata=end_session is not None
)
|
python
|
def open(cls, rootdir, end_session=None):
"""
Open an existing ``rootdir`` for writing.
Parameters
----------
end_session : Timestamp (optional)
When appending, the intended new ``end_session``.
"""
metadata = BcolzMinuteBarMetadata.read(rootdir)
return BcolzMinuteBarWriter(
rootdir,
metadata.calendar,
metadata.start_session,
end_session if end_session is not None else metadata.end_session,
metadata.minutes_per_day,
metadata.default_ohlc_ratio,
metadata.ohlc_ratios_per_sid,
write_metadata=end_session is not None
)
|
[
"def",
"open",
"(",
"cls",
",",
"rootdir",
",",
"end_session",
"=",
"None",
")",
":",
"metadata",
"=",
"BcolzMinuteBarMetadata",
".",
"read",
"(",
"rootdir",
")",
"return",
"BcolzMinuteBarWriter",
"(",
"rootdir",
",",
"metadata",
".",
"calendar",
",",
"metadata",
".",
"start_session",
",",
"end_session",
"if",
"end_session",
"is",
"not",
"None",
"else",
"metadata",
".",
"end_session",
",",
"metadata",
".",
"minutes_per_day",
",",
"metadata",
".",
"default_ohlc_ratio",
",",
"metadata",
".",
"ohlc_ratios_per_sid",
",",
"write_metadata",
"=",
"end_session",
"is",
"not",
"None",
")"
] |
Open an existing ``rootdir`` for writing.
Parameters
----------
end_session : Timestamp (optional)
When appending, the intended new ``end_session``.
|
[
"Open",
"an",
"existing",
"rootdir",
"for",
"writing",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/minute_bars.py#L482-L501
|
train
|
quantopian/zipline
|
zipline/data/minute_bars.py
|
BcolzMinuteBarWriter.sidpath
|
def sidpath(self, sid):
"""
Parameters
----------
sid : int
Asset identifier.
Returns
-------
out : string
Full path to the bcolz rootdir for the given sid.
"""
sid_subdir = _sid_subdir_path(sid)
return join(self._rootdir, sid_subdir)
|
python
|
def sidpath(self, sid):
"""
Parameters
----------
sid : int
Asset identifier.
Returns
-------
out : string
Full path to the bcolz rootdir for the given sid.
"""
sid_subdir = _sid_subdir_path(sid)
return join(self._rootdir, sid_subdir)
|
[
"def",
"sidpath",
"(",
"self",
",",
"sid",
")",
":",
"sid_subdir",
"=",
"_sid_subdir_path",
"(",
"sid",
")",
"return",
"join",
"(",
"self",
".",
"_rootdir",
",",
"sid_subdir",
")"
] |
Parameters
----------
sid : int
Asset identifier.
Returns
-------
out : string
Full path to the bcolz rootdir for the given sid.
|
[
"Parameters",
"----------",
"sid",
":",
"int",
"Asset",
"identifier",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/minute_bars.py#L518-L531
|
train
|
quantopian/zipline
|
zipline/data/minute_bars.py
|
BcolzMinuteBarWriter.last_date_in_output_for_sid
|
def last_date_in_output_for_sid(self, sid):
"""
Parameters
----------
sid : int
Asset identifier.
Returns
-------
out : pd.Timestamp
The midnight of the last date written in to the output for the
given sid.
"""
sizes_path = "{0}/close/meta/sizes".format(self.sidpath(sid))
if not os.path.exists(sizes_path):
return pd.NaT
with open(sizes_path, mode='r') as f:
sizes = f.read()
data = json.loads(sizes)
# use integer division so that the result is an int
# for pandas index later https://github.com/pandas-dev/pandas/blob/master/pandas/tseries/base.py#L247 # noqa
num_days = data['shape'][0] // self._minutes_per_day
if num_days == 0:
# empty container
return pd.NaT
return self._session_labels[num_days - 1]
|
python
|
def last_date_in_output_for_sid(self, sid):
"""
Parameters
----------
sid : int
Asset identifier.
Returns
-------
out : pd.Timestamp
The midnight of the last date written in to the output for the
given sid.
"""
sizes_path = "{0}/close/meta/sizes".format(self.sidpath(sid))
if not os.path.exists(sizes_path):
return pd.NaT
with open(sizes_path, mode='r') as f:
sizes = f.read()
data = json.loads(sizes)
# use integer division so that the result is an int
# for pandas index later https://github.com/pandas-dev/pandas/blob/master/pandas/tseries/base.py#L247 # noqa
num_days = data['shape'][0] // self._minutes_per_day
if num_days == 0:
# empty container
return pd.NaT
return self._session_labels[num_days - 1]
|
[
"def",
"last_date_in_output_for_sid",
"(",
"self",
",",
"sid",
")",
":",
"sizes_path",
"=",
"\"{0}/close/meta/sizes\"",
".",
"format",
"(",
"self",
".",
"sidpath",
"(",
"sid",
")",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"sizes_path",
")",
":",
"return",
"pd",
".",
"NaT",
"with",
"open",
"(",
"sizes_path",
",",
"mode",
"=",
"'r'",
")",
"as",
"f",
":",
"sizes",
"=",
"f",
".",
"read",
"(",
")",
"data",
"=",
"json",
".",
"loads",
"(",
"sizes",
")",
"# use integer division so that the result is an int",
"# for pandas index later https://github.com/pandas-dev/pandas/blob/master/pandas/tseries/base.py#L247 # noqa",
"num_days",
"=",
"data",
"[",
"'shape'",
"]",
"[",
"0",
"]",
"//",
"self",
".",
"_minutes_per_day",
"if",
"num_days",
"==",
"0",
":",
"# empty container",
"return",
"pd",
".",
"NaT",
"return",
"self",
".",
"_session_labels",
"[",
"num_days",
"-",
"1",
"]"
] |
Parameters
----------
sid : int
Asset identifier.
Returns
-------
out : pd.Timestamp
The midnight of the last date written in to the output for the
given sid.
|
[
"Parameters",
"----------",
"sid",
":",
"int",
"Asset",
"identifier",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/minute_bars.py#L533-L558
|
train
|
quantopian/zipline
|
zipline/data/minute_bars.py
|
BcolzMinuteBarWriter._init_ctable
|
def _init_ctable(self, path):
"""
Create empty ctable for given path.
Parameters
----------
path : string
The path to rootdir of the new ctable.
"""
# Only create the containing subdir on creation.
# This is not to be confused with the `.bcolz` directory, but is the
# directory up one level from the `.bcolz` directories.
sid_containing_dirname = os.path.dirname(path)
if not os.path.exists(sid_containing_dirname):
# Other sids may have already created the containing directory.
os.makedirs(sid_containing_dirname)
initial_array = np.empty(0, np.uint32)
table = ctable(
rootdir=path,
columns=[
initial_array,
initial_array,
initial_array,
initial_array,
initial_array,
],
names=[
'open',
'high',
'low',
'close',
'volume'
],
expectedlen=self._expectedlen,
mode='w',
)
table.flush()
return table
|
python
|
def _init_ctable(self, path):
"""
Create empty ctable for given path.
Parameters
----------
path : string
The path to rootdir of the new ctable.
"""
# Only create the containing subdir on creation.
# This is not to be confused with the `.bcolz` directory, but is the
# directory up one level from the `.bcolz` directories.
sid_containing_dirname = os.path.dirname(path)
if not os.path.exists(sid_containing_dirname):
# Other sids may have already created the containing directory.
os.makedirs(sid_containing_dirname)
initial_array = np.empty(0, np.uint32)
table = ctable(
rootdir=path,
columns=[
initial_array,
initial_array,
initial_array,
initial_array,
initial_array,
],
names=[
'open',
'high',
'low',
'close',
'volume'
],
expectedlen=self._expectedlen,
mode='w',
)
table.flush()
return table
|
[
"def",
"_init_ctable",
"(",
"self",
",",
"path",
")",
":",
"# Only create the containing subdir on creation.",
"# This is not to be confused with the `.bcolz` directory, but is the",
"# directory up one level from the `.bcolz` directories.",
"sid_containing_dirname",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"path",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"sid_containing_dirname",
")",
":",
"# Other sids may have already created the containing directory.",
"os",
".",
"makedirs",
"(",
"sid_containing_dirname",
")",
"initial_array",
"=",
"np",
".",
"empty",
"(",
"0",
",",
"np",
".",
"uint32",
")",
"table",
"=",
"ctable",
"(",
"rootdir",
"=",
"path",
",",
"columns",
"=",
"[",
"initial_array",
",",
"initial_array",
",",
"initial_array",
",",
"initial_array",
",",
"initial_array",
",",
"]",
",",
"names",
"=",
"[",
"'open'",
",",
"'high'",
",",
"'low'",
",",
"'close'",
",",
"'volume'",
"]",
",",
"expectedlen",
"=",
"self",
".",
"_expectedlen",
",",
"mode",
"=",
"'w'",
",",
")",
"table",
".",
"flush",
"(",
")",
"return",
"table"
] |
Create empty ctable for given path.
Parameters
----------
path : string
The path to rootdir of the new ctable.
|
[
"Create",
"empty",
"ctable",
"for",
"given",
"path",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/minute_bars.py#L560-L597
|
train
|
quantopian/zipline
|
zipline/data/minute_bars.py
|
BcolzMinuteBarWriter._ensure_ctable
|
def _ensure_ctable(self, sid):
"""Ensure that a ctable exists for ``sid``, then return it."""
sidpath = self.sidpath(sid)
if not os.path.exists(sidpath):
return self._init_ctable(sidpath)
return bcolz.ctable(rootdir=sidpath, mode='a')
|
python
|
def _ensure_ctable(self, sid):
"""Ensure that a ctable exists for ``sid``, then return it."""
sidpath = self.sidpath(sid)
if not os.path.exists(sidpath):
return self._init_ctable(sidpath)
return bcolz.ctable(rootdir=sidpath, mode='a')
|
[
"def",
"_ensure_ctable",
"(",
"self",
",",
"sid",
")",
":",
"sidpath",
"=",
"self",
".",
"sidpath",
"(",
"sid",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"sidpath",
")",
":",
"return",
"self",
".",
"_init_ctable",
"(",
"sidpath",
")",
"return",
"bcolz",
".",
"ctable",
"(",
"rootdir",
"=",
"sidpath",
",",
"mode",
"=",
"'a'",
")"
] |
Ensure that a ctable exists for ``sid``, then return it.
|
[
"Ensure",
"that",
"a",
"ctable",
"exists",
"for",
"sid",
"then",
"return",
"it",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/minute_bars.py#L599-L604
|
train
|
quantopian/zipline
|
zipline/data/minute_bars.py
|
BcolzMinuteBarWriter.pad
|
def pad(self, sid, date):
"""
Fill sid container with empty data through the specified date.
If the last recorded trade is not at the close, then that day will be
padded with zeros until its close. Any day after that (up to and
including the specified date) will be padded with `minute_per_day`
worth of zeros
Parameters
----------
sid : int
The asset identifier for the data being written.
date : datetime-like
The date used to calculate how many slots to be pad.
The padding is done through the date, i.e. after the padding is
done the `last_date_in_output_for_sid` will be equal to `date`
"""
table = self._ensure_ctable(sid)
last_date = self.last_date_in_output_for_sid(sid)
tds = self._session_labels
if date <= last_date or date < tds[0]:
# No need to pad.
return
if last_date == pd.NaT:
# If there is no data, determine how many days to add so that
# desired days are written to the correct slots.
days_to_zerofill = tds[tds.slice_indexer(end=date)]
else:
days_to_zerofill = tds[tds.slice_indexer(
start=last_date + tds.freq,
end=date)]
self._zerofill(table, len(days_to_zerofill))
new_last_date = self.last_date_in_output_for_sid(sid)
assert new_last_date == date, "new_last_date={0} != date={1}".format(
new_last_date, date)
|
python
|
def pad(self, sid, date):
"""
Fill sid container with empty data through the specified date.
If the last recorded trade is not at the close, then that day will be
padded with zeros until its close. Any day after that (up to and
including the specified date) will be padded with `minute_per_day`
worth of zeros
Parameters
----------
sid : int
The asset identifier for the data being written.
date : datetime-like
The date used to calculate how many slots to be pad.
The padding is done through the date, i.e. after the padding is
done the `last_date_in_output_for_sid` will be equal to `date`
"""
table = self._ensure_ctable(sid)
last_date = self.last_date_in_output_for_sid(sid)
tds = self._session_labels
if date <= last_date or date < tds[0]:
# No need to pad.
return
if last_date == pd.NaT:
# If there is no data, determine how many days to add so that
# desired days are written to the correct slots.
days_to_zerofill = tds[tds.slice_indexer(end=date)]
else:
days_to_zerofill = tds[tds.slice_indexer(
start=last_date + tds.freq,
end=date)]
self._zerofill(table, len(days_to_zerofill))
new_last_date = self.last_date_in_output_for_sid(sid)
assert new_last_date == date, "new_last_date={0} != date={1}".format(
new_last_date, date)
|
[
"def",
"pad",
"(",
"self",
",",
"sid",
",",
"date",
")",
":",
"table",
"=",
"self",
".",
"_ensure_ctable",
"(",
"sid",
")",
"last_date",
"=",
"self",
".",
"last_date_in_output_for_sid",
"(",
"sid",
")",
"tds",
"=",
"self",
".",
"_session_labels",
"if",
"date",
"<=",
"last_date",
"or",
"date",
"<",
"tds",
"[",
"0",
"]",
":",
"# No need to pad.",
"return",
"if",
"last_date",
"==",
"pd",
".",
"NaT",
":",
"# If there is no data, determine how many days to add so that",
"# desired days are written to the correct slots.",
"days_to_zerofill",
"=",
"tds",
"[",
"tds",
".",
"slice_indexer",
"(",
"end",
"=",
"date",
")",
"]",
"else",
":",
"days_to_zerofill",
"=",
"tds",
"[",
"tds",
".",
"slice_indexer",
"(",
"start",
"=",
"last_date",
"+",
"tds",
".",
"freq",
",",
"end",
"=",
"date",
")",
"]",
"self",
".",
"_zerofill",
"(",
"table",
",",
"len",
"(",
"days_to_zerofill",
")",
")",
"new_last_date",
"=",
"self",
".",
"last_date_in_output_for_sid",
"(",
"sid",
")",
"assert",
"new_last_date",
"==",
"date",
",",
"\"new_last_date={0} != date={1}\"",
".",
"format",
"(",
"new_last_date",
",",
"date",
")"
] |
Fill sid container with empty data through the specified date.
If the last recorded trade is not at the close, then that day will be
padded with zeros until its close. Any day after that (up to and
including the specified date) will be padded with `minute_per_day`
worth of zeros
Parameters
----------
sid : int
The asset identifier for the data being written.
date : datetime-like
The date used to calculate how many slots to be pad.
The padding is done through the date, i.e. after the padding is
done the `last_date_in_output_for_sid` will be equal to `date`
|
[
"Fill",
"sid",
"container",
"with",
"empty",
"data",
"through",
"the",
"specified",
"date",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/minute_bars.py#L618-L659
|
train
|
quantopian/zipline
|
zipline/data/minute_bars.py
|
BcolzMinuteBarWriter.set_sid_attrs
|
def set_sid_attrs(self, sid, **kwargs):
"""Write all the supplied kwargs as attributes of the sid's file.
"""
table = self._ensure_ctable(sid)
for k, v in kwargs.items():
table.attrs[k] = v
|
python
|
def set_sid_attrs(self, sid, **kwargs):
"""Write all the supplied kwargs as attributes of the sid's file.
"""
table = self._ensure_ctable(sid)
for k, v in kwargs.items():
table.attrs[k] = v
|
[
"def",
"set_sid_attrs",
"(",
"self",
",",
"sid",
",",
"*",
"*",
"kwargs",
")",
":",
"table",
"=",
"self",
".",
"_ensure_ctable",
"(",
"sid",
")",
"for",
"k",
",",
"v",
"in",
"kwargs",
".",
"items",
"(",
")",
":",
"table",
".",
"attrs",
"[",
"k",
"]",
"=",
"v"
] |
Write all the supplied kwargs as attributes of the sid's file.
|
[
"Write",
"all",
"the",
"supplied",
"kwargs",
"as",
"attributes",
"of",
"the",
"sid",
"s",
"file",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/minute_bars.py#L661-L666
|
train
|
quantopian/zipline
|
zipline/data/minute_bars.py
|
BcolzMinuteBarWriter.write
|
def write(self, data, show_progress=False, invalid_data_behavior='warn'):
"""Write a stream of minute data.
Parameters
----------
data : iterable[(int, pd.DataFrame)]
The data to write. Each element should be a tuple of sid, data
where data has the following format:
columns : ('open', 'high', 'low', 'close', 'volume')
open : float64
high : float64
low : float64
close : float64
volume : float64|int64
index : DatetimeIndex of market minutes.
A given sid may appear more than once in ``data``; however,
the dates must be strictly increasing.
show_progress : bool, optional
Whether or not to show a progress bar while writing.
"""
ctx = maybe_show_progress(
data,
show_progress=show_progress,
item_show_func=lambda e: e if e is None else str(e[0]),
label="Merging minute equity files:",
)
write_sid = self.write_sid
with ctx as it:
for e in it:
write_sid(*e, invalid_data_behavior=invalid_data_behavior)
|
python
|
def write(self, data, show_progress=False, invalid_data_behavior='warn'):
"""Write a stream of minute data.
Parameters
----------
data : iterable[(int, pd.DataFrame)]
The data to write. Each element should be a tuple of sid, data
where data has the following format:
columns : ('open', 'high', 'low', 'close', 'volume')
open : float64
high : float64
low : float64
close : float64
volume : float64|int64
index : DatetimeIndex of market minutes.
A given sid may appear more than once in ``data``; however,
the dates must be strictly increasing.
show_progress : bool, optional
Whether or not to show a progress bar while writing.
"""
ctx = maybe_show_progress(
data,
show_progress=show_progress,
item_show_func=lambda e: e if e is None else str(e[0]),
label="Merging minute equity files:",
)
write_sid = self.write_sid
with ctx as it:
for e in it:
write_sid(*e, invalid_data_behavior=invalid_data_behavior)
|
[
"def",
"write",
"(",
"self",
",",
"data",
",",
"show_progress",
"=",
"False",
",",
"invalid_data_behavior",
"=",
"'warn'",
")",
":",
"ctx",
"=",
"maybe_show_progress",
"(",
"data",
",",
"show_progress",
"=",
"show_progress",
",",
"item_show_func",
"=",
"lambda",
"e",
":",
"e",
"if",
"e",
"is",
"None",
"else",
"str",
"(",
"e",
"[",
"0",
"]",
")",
",",
"label",
"=",
"\"Merging minute equity files:\"",
",",
")",
"write_sid",
"=",
"self",
".",
"write_sid",
"with",
"ctx",
"as",
"it",
":",
"for",
"e",
"in",
"it",
":",
"write_sid",
"(",
"*",
"e",
",",
"invalid_data_behavior",
"=",
"invalid_data_behavior",
")"
] |
Write a stream of minute data.
Parameters
----------
data : iterable[(int, pd.DataFrame)]
The data to write. Each element should be a tuple of sid, data
where data has the following format:
columns : ('open', 'high', 'low', 'close', 'volume')
open : float64
high : float64
low : float64
close : float64
volume : float64|int64
index : DatetimeIndex of market minutes.
A given sid may appear more than once in ``data``; however,
the dates must be strictly increasing.
show_progress : bool, optional
Whether or not to show a progress bar while writing.
|
[
"Write",
"a",
"stream",
"of",
"minute",
"data",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/minute_bars.py#L668-L697
|
train
|
quantopian/zipline
|
zipline/data/minute_bars.py
|
BcolzMinuteBarWriter.write_sid
|
def write_sid(self, sid, df, invalid_data_behavior='warn'):
"""
Write the OHLCV data for the given sid.
If there is no bcolz ctable yet created for the sid, create it.
If the length of the bcolz ctable is not exactly to the date before
the first day provided, fill the ctable with 0s up to that date.
Parameters
----------
sid : int
The asset identifer for the data being written.
df : pd.DataFrame
DataFrame of market data with the following characteristics.
columns : ('open', 'high', 'low', 'close', 'volume')
open : float64
high : float64
low : float64
close : float64
volume : float64|int64
index : DatetimeIndex of market minutes.
"""
cols = {
'open': df.open.values,
'high': df.high.values,
'low': df.low.values,
'close': df.close.values,
'volume': df.volume.values,
}
dts = df.index.values
# Call internal method, since DataFrame has already ensured matching
# index and value lengths.
self._write_cols(sid, dts, cols, invalid_data_behavior)
|
python
|
def write_sid(self, sid, df, invalid_data_behavior='warn'):
"""
Write the OHLCV data for the given sid.
If there is no bcolz ctable yet created for the sid, create it.
If the length of the bcolz ctable is not exactly to the date before
the first day provided, fill the ctable with 0s up to that date.
Parameters
----------
sid : int
The asset identifer for the data being written.
df : pd.DataFrame
DataFrame of market data with the following characteristics.
columns : ('open', 'high', 'low', 'close', 'volume')
open : float64
high : float64
low : float64
close : float64
volume : float64|int64
index : DatetimeIndex of market minutes.
"""
cols = {
'open': df.open.values,
'high': df.high.values,
'low': df.low.values,
'close': df.close.values,
'volume': df.volume.values,
}
dts = df.index.values
# Call internal method, since DataFrame has already ensured matching
# index and value lengths.
self._write_cols(sid, dts, cols, invalid_data_behavior)
|
[
"def",
"write_sid",
"(",
"self",
",",
"sid",
",",
"df",
",",
"invalid_data_behavior",
"=",
"'warn'",
")",
":",
"cols",
"=",
"{",
"'open'",
":",
"df",
".",
"open",
".",
"values",
",",
"'high'",
":",
"df",
".",
"high",
".",
"values",
",",
"'low'",
":",
"df",
".",
"low",
".",
"values",
",",
"'close'",
":",
"df",
".",
"close",
".",
"values",
",",
"'volume'",
":",
"df",
".",
"volume",
".",
"values",
",",
"}",
"dts",
"=",
"df",
".",
"index",
".",
"values",
"# Call internal method, since DataFrame has already ensured matching",
"# index and value lengths.",
"self",
".",
"_write_cols",
"(",
"sid",
",",
"dts",
",",
"cols",
",",
"invalid_data_behavior",
")"
] |
Write the OHLCV data for the given sid.
If there is no bcolz ctable yet created for the sid, create it.
If the length of the bcolz ctable is not exactly to the date before
the first day provided, fill the ctable with 0s up to that date.
Parameters
----------
sid : int
The asset identifer for the data being written.
df : pd.DataFrame
DataFrame of market data with the following characteristics.
columns : ('open', 'high', 'low', 'close', 'volume')
open : float64
high : float64
low : float64
close : float64
volume : float64|int64
index : DatetimeIndex of market minutes.
|
[
"Write",
"the",
"OHLCV",
"data",
"for",
"the",
"given",
"sid",
".",
"If",
"there",
"is",
"no",
"bcolz",
"ctable",
"yet",
"created",
"for",
"the",
"sid",
"create",
"it",
".",
"If",
"the",
"length",
"of",
"the",
"bcolz",
"ctable",
"is",
"not",
"exactly",
"to",
"the",
"date",
"before",
"the",
"first",
"day",
"provided",
"fill",
"the",
"ctable",
"with",
"0s",
"up",
"to",
"that",
"date",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/minute_bars.py#L699-L730
|
train
|
quantopian/zipline
|
zipline/data/minute_bars.py
|
BcolzMinuteBarWriter.write_cols
|
def write_cols(self, sid, dts, cols, invalid_data_behavior='warn'):
"""
Write the OHLCV data for the given sid.
If there is no bcolz ctable yet created for the sid, create it.
If the length of the bcolz ctable is not exactly to the date before
the first day provided, fill the ctable with 0s up to that date.
Parameters
----------
sid : int
The asset identifier for the data being written.
dts : datetime64 array
The dts corresponding to values in cols.
cols : dict of str -> np.array
dict of market data with the following characteristics.
keys are ('open', 'high', 'low', 'close', 'volume')
open : float64
high : float64
low : float64
close : float64
volume : float64|int64
"""
if not all(len(dts) == len(cols[name]) for name in self.COL_NAMES):
raise BcolzMinuteWriterColumnMismatch(
"Length of dts={0} should match cols: {1}".format(
len(dts),
" ".join("{0}={1}".format(name, len(cols[name]))
for name in self.COL_NAMES)))
self._write_cols(sid, dts, cols, invalid_data_behavior)
|
python
|
def write_cols(self, sid, dts, cols, invalid_data_behavior='warn'):
"""
Write the OHLCV data for the given sid.
If there is no bcolz ctable yet created for the sid, create it.
If the length of the bcolz ctable is not exactly to the date before
the first day provided, fill the ctable with 0s up to that date.
Parameters
----------
sid : int
The asset identifier for the data being written.
dts : datetime64 array
The dts corresponding to values in cols.
cols : dict of str -> np.array
dict of market data with the following characteristics.
keys are ('open', 'high', 'low', 'close', 'volume')
open : float64
high : float64
low : float64
close : float64
volume : float64|int64
"""
if not all(len(dts) == len(cols[name]) for name in self.COL_NAMES):
raise BcolzMinuteWriterColumnMismatch(
"Length of dts={0} should match cols: {1}".format(
len(dts),
" ".join("{0}={1}".format(name, len(cols[name]))
for name in self.COL_NAMES)))
self._write_cols(sid, dts, cols, invalid_data_behavior)
|
[
"def",
"write_cols",
"(",
"self",
",",
"sid",
",",
"dts",
",",
"cols",
",",
"invalid_data_behavior",
"=",
"'warn'",
")",
":",
"if",
"not",
"all",
"(",
"len",
"(",
"dts",
")",
"==",
"len",
"(",
"cols",
"[",
"name",
"]",
")",
"for",
"name",
"in",
"self",
".",
"COL_NAMES",
")",
":",
"raise",
"BcolzMinuteWriterColumnMismatch",
"(",
"\"Length of dts={0} should match cols: {1}\"",
".",
"format",
"(",
"len",
"(",
"dts",
")",
",",
"\" \"",
".",
"join",
"(",
"\"{0}={1}\"",
".",
"format",
"(",
"name",
",",
"len",
"(",
"cols",
"[",
"name",
"]",
")",
")",
"for",
"name",
"in",
"self",
".",
"COL_NAMES",
")",
")",
")",
"self",
".",
"_write_cols",
"(",
"sid",
",",
"dts",
",",
"cols",
",",
"invalid_data_behavior",
")"
] |
Write the OHLCV data for the given sid.
If there is no bcolz ctable yet created for the sid, create it.
If the length of the bcolz ctable is not exactly to the date before
the first day provided, fill the ctable with 0s up to that date.
Parameters
----------
sid : int
The asset identifier for the data being written.
dts : datetime64 array
The dts corresponding to values in cols.
cols : dict of str -> np.array
dict of market data with the following characteristics.
keys are ('open', 'high', 'low', 'close', 'volume')
open : float64
high : float64
low : float64
close : float64
volume : float64|int64
|
[
"Write",
"the",
"OHLCV",
"data",
"for",
"the",
"given",
"sid",
".",
"If",
"there",
"is",
"no",
"bcolz",
"ctable",
"yet",
"created",
"for",
"the",
"sid",
"create",
"it",
".",
"If",
"the",
"length",
"of",
"the",
"bcolz",
"ctable",
"is",
"not",
"exactly",
"to",
"the",
"date",
"before",
"the",
"first",
"day",
"provided",
"fill",
"the",
"ctable",
"with",
"0s",
"up",
"to",
"that",
"date",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/minute_bars.py#L732-L760
|
train
|
quantopian/zipline
|
zipline/data/minute_bars.py
|
BcolzMinuteBarWriter._write_cols
|
def _write_cols(self, sid, dts, cols, invalid_data_behavior):
"""
Internal method for `write_cols` and `write`.
Parameters
----------
sid : int
The asset identifier for the data being written.
dts : datetime64 array
The dts corresponding to values in cols.
cols : dict of str -> np.array
dict of market data with the following characteristics.
keys are ('open', 'high', 'low', 'close', 'volume')
open : float64
high : float64
low : float64
close : float64
volume : float64|int64
"""
table = self._ensure_ctable(sid)
tds = self._session_labels
input_first_day = self._calendar.minute_to_session_label(
pd.Timestamp(dts[0]), direction='previous')
last_date = self.last_date_in_output_for_sid(sid)
day_before_input = input_first_day - tds.freq
self.pad(sid, day_before_input)
table = self._ensure_ctable(sid)
# Get the number of minutes already recorded in this sid's ctable
num_rec_mins = table.size
all_minutes = self._minute_index
# Get the latest minute we wish to write to the ctable
last_minute_to_write = pd.Timestamp(dts[-1], tz='UTC')
# In the event that we've already written some minutely data to the
# ctable, guard against overwriting that data.
if num_rec_mins > 0:
last_recorded_minute = all_minutes[num_rec_mins - 1]
if last_minute_to_write <= last_recorded_minute:
raise BcolzMinuteOverlappingData(dedent("""
Data with last_date={0} already includes input start={1} for
sid={2}""".strip()).format(last_date, input_first_day, sid))
latest_min_count = all_minutes.get_loc(last_minute_to_write)
# Get all the minutes we wish to write (all market minutes after the
# latest currently written, up to and including last_minute_to_write)
all_minutes_in_window = all_minutes[num_rec_mins:latest_min_count + 1]
minutes_count = all_minutes_in_window.size
open_col = np.zeros(minutes_count, dtype=np.uint32)
high_col = np.zeros(minutes_count, dtype=np.uint32)
low_col = np.zeros(minutes_count, dtype=np.uint32)
close_col = np.zeros(minutes_count, dtype=np.uint32)
vol_col = np.zeros(minutes_count, dtype=np.uint32)
dt_ixs = np.searchsorted(all_minutes_in_window.values,
dts.astype('datetime64[ns]'))
ohlc_ratio = self.ohlc_ratio_for_sid(sid)
(
open_col[dt_ixs],
high_col[dt_ixs],
low_col[dt_ixs],
close_col[dt_ixs],
vol_col[dt_ixs],
) = convert_cols(cols, ohlc_ratio, sid, invalid_data_behavior)
table.append([
open_col,
high_col,
low_col,
close_col,
vol_col
])
table.flush()
|
python
|
def _write_cols(self, sid, dts, cols, invalid_data_behavior):
"""
Internal method for `write_cols` and `write`.
Parameters
----------
sid : int
The asset identifier for the data being written.
dts : datetime64 array
The dts corresponding to values in cols.
cols : dict of str -> np.array
dict of market data with the following characteristics.
keys are ('open', 'high', 'low', 'close', 'volume')
open : float64
high : float64
low : float64
close : float64
volume : float64|int64
"""
table = self._ensure_ctable(sid)
tds = self._session_labels
input_first_day = self._calendar.minute_to_session_label(
pd.Timestamp(dts[0]), direction='previous')
last_date = self.last_date_in_output_for_sid(sid)
day_before_input = input_first_day - tds.freq
self.pad(sid, day_before_input)
table = self._ensure_ctable(sid)
# Get the number of minutes already recorded in this sid's ctable
num_rec_mins = table.size
all_minutes = self._minute_index
# Get the latest minute we wish to write to the ctable
last_minute_to_write = pd.Timestamp(dts[-1], tz='UTC')
# In the event that we've already written some minutely data to the
# ctable, guard against overwriting that data.
if num_rec_mins > 0:
last_recorded_minute = all_minutes[num_rec_mins - 1]
if last_minute_to_write <= last_recorded_minute:
raise BcolzMinuteOverlappingData(dedent("""
Data with last_date={0} already includes input start={1} for
sid={2}""".strip()).format(last_date, input_first_day, sid))
latest_min_count = all_minutes.get_loc(last_minute_to_write)
# Get all the minutes we wish to write (all market minutes after the
# latest currently written, up to and including last_minute_to_write)
all_minutes_in_window = all_minutes[num_rec_mins:latest_min_count + 1]
minutes_count = all_minutes_in_window.size
open_col = np.zeros(minutes_count, dtype=np.uint32)
high_col = np.zeros(minutes_count, dtype=np.uint32)
low_col = np.zeros(minutes_count, dtype=np.uint32)
close_col = np.zeros(minutes_count, dtype=np.uint32)
vol_col = np.zeros(minutes_count, dtype=np.uint32)
dt_ixs = np.searchsorted(all_minutes_in_window.values,
dts.astype('datetime64[ns]'))
ohlc_ratio = self.ohlc_ratio_for_sid(sid)
(
open_col[dt_ixs],
high_col[dt_ixs],
low_col[dt_ixs],
close_col[dt_ixs],
vol_col[dt_ixs],
) = convert_cols(cols, ohlc_ratio, sid, invalid_data_behavior)
table.append([
open_col,
high_col,
low_col,
close_col,
vol_col
])
table.flush()
|
[
"def",
"_write_cols",
"(",
"self",
",",
"sid",
",",
"dts",
",",
"cols",
",",
"invalid_data_behavior",
")",
":",
"table",
"=",
"self",
".",
"_ensure_ctable",
"(",
"sid",
")",
"tds",
"=",
"self",
".",
"_session_labels",
"input_first_day",
"=",
"self",
".",
"_calendar",
".",
"minute_to_session_label",
"(",
"pd",
".",
"Timestamp",
"(",
"dts",
"[",
"0",
"]",
")",
",",
"direction",
"=",
"'previous'",
")",
"last_date",
"=",
"self",
".",
"last_date_in_output_for_sid",
"(",
"sid",
")",
"day_before_input",
"=",
"input_first_day",
"-",
"tds",
".",
"freq",
"self",
".",
"pad",
"(",
"sid",
",",
"day_before_input",
")",
"table",
"=",
"self",
".",
"_ensure_ctable",
"(",
"sid",
")",
"# Get the number of minutes already recorded in this sid's ctable",
"num_rec_mins",
"=",
"table",
".",
"size",
"all_minutes",
"=",
"self",
".",
"_minute_index",
"# Get the latest minute we wish to write to the ctable",
"last_minute_to_write",
"=",
"pd",
".",
"Timestamp",
"(",
"dts",
"[",
"-",
"1",
"]",
",",
"tz",
"=",
"'UTC'",
")",
"# In the event that we've already written some minutely data to the",
"# ctable, guard against overwriting that data.",
"if",
"num_rec_mins",
">",
"0",
":",
"last_recorded_minute",
"=",
"all_minutes",
"[",
"num_rec_mins",
"-",
"1",
"]",
"if",
"last_minute_to_write",
"<=",
"last_recorded_minute",
":",
"raise",
"BcolzMinuteOverlappingData",
"(",
"dedent",
"(",
"\"\"\"\n Data with last_date={0} already includes input start={1} for\n sid={2}\"\"\"",
".",
"strip",
"(",
")",
")",
".",
"format",
"(",
"last_date",
",",
"input_first_day",
",",
"sid",
")",
")",
"latest_min_count",
"=",
"all_minutes",
".",
"get_loc",
"(",
"last_minute_to_write",
")",
"# Get all the minutes we wish to write (all market minutes after the",
"# latest currently written, up to and including last_minute_to_write)",
"all_minutes_in_window",
"=",
"all_minutes",
"[",
"num_rec_mins",
":",
"latest_min_count",
"+",
"1",
"]",
"minutes_count",
"=",
"all_minutes_in_window",
".",
"size",
"open_col",
"=",
"np",
".",
"zeros",
"(",
"minutes_count",
",",
"dtype",
"=",
"np",
".",
"uint32",
")",
"high_col",
"=",
"np",
".",
"zeros",
"(",
"minutes_count",
",",
"dtype",
"=",
"np",
".",
"uint32",
")",
"low_col",
"=",
"np",
".",
"zeros",
"(",
"minutes_count",
",",
"dtype",
"=",
"np",
".",
"uint32",
")",
"close_col",
"=",
"np",
".",
"zeros",
"(",
"minutes_count",
",",
"dtype",
"=",
"np",
".",
"uint32",
")",
"vol_col",
"=",
"np",
".",
"zeros",
"(",
"minutes_count",
",",
"dtype",
"=",
"np",
".",
"uint32",
")",
"dt_ixs",
"=",
"np",
".",
"searchsorted",
"(",
"all_minutes_in_window",
".",
"values",
",",
"dts",
".",
"astype",
"(",
"'datetime64[ns]'",
")",
")",
"ohlc_ratio",
"=",
"self",
".",
"ohlc_ratio_for_sid",
"(",
"sid",
")",
"(",
"open_col",
"[",
"dt_ixs",
"]",
",",
"high_col",
"[",
"dt_ixs",
"]",
",",
"low_col",
"[",
"dt_ixs",
"]",
",",
"close_col",
"[",
"dt_ixs",
"]",
",",
"vol_col",
"[",
"dt_ixs",
"]",
",",
")",
"=",
"convert_cols",
"(",
"cols",
",",
"ohlc_ratio",
",",
"sid",
",",
"invalid_data_behavior",
")",
"table",
".",
"append",
"(",
"[",
"open_col",
",",
"high_col",
",",
"low_col",
",",
"close_col",
",",
"vol_col",
"]",
")",
"table",
".",
"flush",
"(",
")"
] |
Internal method for `write_cols` and `write`.
Parameters
----------
sid : int
The asset identifier for the data being written.
dts : datetime64 array
The dts corresponding to values in cols.
cols : dict of str -> np.array
dict of market data with the following characteristics.
keys are ('open', 'high', 'low', 'close', 'volume')
open : float64
high : float64
low : float64
close : float64
volume : float64|int64
|
[
"Internal",
"method",
"for",
"write_cols",
"and",
"write",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/minute_bars.py#L762-L844
|
train
|
quantopian/zipline
|
zipline/data/minute_bars.py
|
BcolzMinuteBarWriter.data_len_for_day
|
def data_len_for_day(self, day):
"""
Return the number of data points up to and including the
provided day.
"""
day_ix = self._session_labels.get_loc(day)
# Add one to the 0-indexed day_ix to get the number of days.
num_days = day_ix + 1
return num_days * self._minutes_per_day
|
python
|
def data_len_for_day(self, day):
"""
Return the number of data points up to and including the
provided day.
"""
day_ix = self._session_labels.get_loc(day)
# Add one to the 0-indexed day_ix to get the number of days.
num_days = day_ix + 1
return num_days * self._minutes_per_day
|
[
"def",
"data_len_for_day",
"(",
"self",
",",
"day",
")",
":",
"day_ix",
"=",
"self",
".",
"_session_labels",
".",
"get_loc",
"(",
"day",
")",
"# Add one to the 0-indexed day_ix to get the number of days.",
"num_days",
"=",
"day_ix",
"+",
"1",
"return",
"num_days",
"*",
"self",
".",
"_minutes_per_day"
] |
Return the number of data points up to and including the
provided day.
|
[
"Return",
"the",
"number",
"of",
"data",
"points",
"up",
"to",
"and",
"including",
"the",
"provided",
"day",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/minute_bars.py#L846-L854
|
train
|
quantopian/zipline
|
zipline/data/minute_bars.py
|
BcolzMinuteBarWriter.truncate
|
def truncate(self, date):
"""Truncate data beyond this date in all ctables."""
truncate_slice_end = self.data_len_for_day(date)
glob_path = os.path.join(self._rootdir, "*", "*", "*.bcolz")
sid_paths = sorted(glob(glob_path))
for sid_path in sid_paths:
file_name = os.path.basename(sid_path)
try:
table = bcolz.open(rootdir=sid_path)
except IOError:
continue
if table.len <= truncate_slice_end:
logger.info("{0} not past truncate date={1}.", file_name, date)
continue
logger.info(
"Truncating {0} at end_date={1}", file_name, date.date()
)
table.resize(truncate_slice_end)
# Update end session in metadata.
metadata = BcolzMinuteBarMetadata.read(self._rootdir)
metadata.end_session = date
metadata.write(self._rootdir)
|
python
|
def truncate(self, date):
"""Truncate data beyond this date in all ctables."""
truncate_slice_end = self.data_len_for_day(date)
glob_path = os.path.join(self._rootdir, "*", "*", "*.bcolz")
sid_paths = sorted(glob(glob_path))
for sid_path in sid_paths:
file_name = os.path.basename(sid_path)
try:
table = bcolz.open(rootdir=sid_path)
except IOError:
continue
if table.len <= truncate_slice_end:
logger.info("{0} not past truncate date={1}.", file_name, date)
continue
logger.info(
"Truncating {0} at end_date={1}", file_name, date.date()
)
table.resize(truncate_slice_end)
# Update end session in metadata.
metadata = BcolzMinuteBarMetadata.read(self._rootdir)
metadata.end_session = date
metadata.write(self._rootdir)
|
[
"def",
"truncate",
"(",
"self",
",",
"date",
")",
":",
"truncate_slice_end",
"=",
"self",
".",
"data_len_for_day",
"(",
"date",
")",
"glob_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"_rootdir",
",",
"\"*\"",
",",
"\"*\"",
",",
"\"*.bcolz\"",
")",
"sid_paths",
"=",
"sorted",
"(",
"glob",
"(",
"glob_path",
")",
")",
"for",
"sid_path",
"in",
"sid_paths",
":",
"file_name",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"sid_path",
")",
"try",
":",
"table",
"=",
"bcolz",
".",
"open",
"(",
"rootdir",
"=",
"sid_path",
")",
"except",
"IOError",
":",
"continue",
"if",
"table",
".",
"len",
"<=",
"truncate_slice_end",
":",
"logger",
".",
"info",
"(",
"\"{0} not past truncate date={1}.\"",
",",
"file_name",
",",
"date",
")",
"continue",
"logger",
".",
"info",
"(",
"\"Truncating {0} at end_date={1}\"",
",",
"file_name",
",",
"date",
".",
"date",
"(",
")",
")",
"table",
".",
"resize",
"(",
"truncate_slice_end",
")",
"# Update end session in metadata.",
"metadata",
"=",
"BcolzMinuteBarMetadata",
".",
"read",
"(",
"self",
".",
"_rootdir",
")",
"metadata",
".",
"end_session",
"=",
"date",
"metadata",
".",
"write",
"(",
"self",
".",
"_rootdir",
")"
] |
Truncate data beyond this date in all ctables.
|
[
"Truncate",
"data",
"beyond",
"this",
"date",
"in",
"all",
"ctables",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/minute_bars.py#L856-L883
|
train
|
quantopian/zipline
|
zipline/data/minute_bars.py
|
BcolzMinuteBarReader._minutes_to_exclude
|
def _minutes_to_exclude(self):
"""
Calculate the minutes which should be excluded when a window
occurs on days which had an early close, i.e. days where the close
based on the regular period of minutes per day and the market close
do not match.
Returns
-------
List of DatetimeIndex representing the minutes to exclude because
of early closes.
"""
market_opens = self._market_opens.values.astype('datetime64[m]')
market_closes = self._market_closes.values.astype('datetime64[m]')
minutes_per_day = (market_closes - market_opens).astype(np.int64)
early_indices = np.where(
minutes_per_day != self._minutes_per_day - 1)[0]
early_opens = self._market_opens[early_indices]
early_closes = self._market_closes[early_indices]
minutes = [(market_open, early_close)
for market_open, early_close
in zip(early_opens, early_closes)]
return minutes
|
python
|
def _minutes_to_exclude(self):
"""
Calculate the minutes which should be excluded when a window
occurs on days which had an early close, i.e. days where the close
based on the regular period of minutes per day and the market close
do not match.
Returns
-------
List of DatetimeIndex representing the minutes to exclude because
of early closes.
"""
market_opens = self._market_opens.values.astype('datetime64[m]')
market_closes = self._market_closes.values.astype('datetime64[m]')
minutes_per_day = (market_closes - market_opens).astype(np.int64)
early_indices = np.where(
minutes_per_day != self._minutes_per_day - 1)[0]
early_opens = self._market_opens[early_indices]
early_closes = self._market_closes[early_indices]
minutes = [(market_open, early_close)
for market_open, early_close
in zip(early_opens, early_closes)]
return minutes
|
[
"def",
"_minutes_to_exclude",
"(",
"self",
")",
":",
"market_opens",
"=",
"self",
".",
"_market_opens",
".",
"values",
".",
"astype",
"(",
"'datetime64[m]'",
")",
"market_closes",
"=",
"self",
".",
"_market_closes",
".",
"values",
".",
"astype",
"(",
"'datetime64[m]'",
")",
"minutes_per_day",
"=",
"(",
"market_closes",
"-",
"market_opens",
")",
".",
"astype",
"(",
"np",
".",
"int64",
")",
"early_indices",
"=",
"np",
".",
"where",
"(",
"minutes_per_day",
"!=",
"self",
".",
"_minutes_per_day",
"-",
"1",
")",
"[",
"0",
"]",
"early_opens",
"=",
"self",
".",
"_market_opens",
"[",
"early_indices",
"]",
"early_closes",
"=",
"self",
".",
"_market_closes",
"[",
"early_indices",
"]",
"minutes",
"=",
"[",
"(",
"market_open",
",",
"early_close",
")",
"for",
"market_open",
",",
"early_close",
"in",
"zip",
"(",
"early_opens",
",",
"early_closes",
")",
"]",
"return",
"minutes"
] |
Calculate the minutes which should be excluded when a window
occurs on days which had an early close, i.e. days where the close
based on the regular period of minutes per day and the market close
do not match.
Returns
-------
List of DatetimeIndex representing the minutes to exclude because
of early closes.
|
[
"Calculate",
"the",
"minutes",
"which",
"should",
"be",
"excluded",
"when",
"a",
"window",
"occurs",
"on",
"days",
"which",
"had",
"an",
"early",
"close",
"i",
".",
"e",
".",
"days",
"where",
"the",
"close",
"based",
"on",
"the",
"regular",
"period",
"of",
"minutes",
"per",
"day",
"and",
"the",
"market",
"close",
"do",
"not",
"match",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/minute_bars.py#L991-L1013
|
train
|
quantopian/zipline
|
zipline/data/minute_bars.py
|
BcolzMinuteBarReader._minute_exclusion_tree
|
def _minute_exclusion_tree(self):
"""
Build an interval tree keyed by the start and end of each range
of positions should be dropped from windows. (These are the minutes
between an early close and the minute which would be the close based
on the regular period if there were no early close.)
The value of each node is the same start and end position stored as
a tuple.
The data is stored as such in support of a fast answer to the question,
does a given start and end position overlap any of the exclusion spans?
Returns
-------
IntervalTree containing nodes which represent the minutes to exclude
because of early closes.
"""
itree = IntervalTree()
for market_open, early_close in self._minutes_to_exclude():
start_pos = self._find_position_of_minute(early_close) + 1
end_pos = (
self._find_position_of_minute(market_open)
+
self._minutes_per_day
-
1
)
data = (start_pos, end_pos)
itree[start_pos:end_pos + 1] = data
return itree
|
python
|
def _minute_exclusion_tree(self):
"""
Build an interval tree keyed by the start and end of each range
of positions should be dropped from windows. (These are the minutes
between an early close and the minute which would be the close based
on the regular period if there were no early close.)
The value of each node is the same start and end position stored as
a tuple.
The data is stored as such in support of a fast answer to the question,
does a given start and end position overlap any of the exclusion spans?
Returns
-------
IntervalTree containing nodes which represent the minutes to exclude
because of early closes.
"""
itree = IntervalTree()
for market_open, early_close in self._minutes_to_exclude():
start_pos = self._find_position_of_minute(early_close) + 1
end_pos = (
self._find_position_of_minute(market_open)
+
self._minutes_per_day
-
1
)
data = (start_pos, end_pos)
itree[start_pos:end_pos + 1] = data
return itree
|
[
"def",
"_minute_exclusion_tree",
"(",
"self",
")",
":",
"itree",
"=",
"IntervalTree",
"(",
")",
"for",
"market_open",
",",
"early_close",
"in",
"self",
".",
"_minutes_to_exclude",
"(",
")",
":",
"start_pos",
"=",
"self",
".",
"_find_position_of_minute",
"(",
"early_close",
")",
"+",
"1",
"end_pos",
"=",
"(",
"self",
".",
"_find_position_of_minute",
"(",
"market_open",
")",
"+",
"self",
".",
"_minutes_per_day",
"-",
"1",
")",
"data",
"=",
"(",
"start_pos",
",",
"end_pos",
")",
"itree",
"[",
"start_pos",
":",
"end_pos",
"+",
"1",
"]",
"=",
"data",
"return",
"itree"
] |
Build an interval tree keyed by the start and end of each range
of positions should be dropped from windows. (These are the minutes
between an early close and the minute which would be the close based
on the regular period if there were no early close.)
The value of each node is the same start and end position stored as
a tuple.
The data is stored as such in support of a fast answer to the question,
does a given start and end position overlap any of the exclusion spans?
Returns
-------
IntervalTree containing nodes which represent the minutes to exclude
because of early closes.
|
[
"Build",
"an",
"interval",
"tree",
"keyed",
"by",
"the",
"start",
"and",
"end",
"of",
"each",
"range",
"of",
"positions",
"should",
"be",
"dropped",
"from",
"windows",
".",
"(",
"These",
"are",
"the",
"minutes",
"between",
"an",
"early",
"close",
"and",
"the",
"minute",
"which",
"would",
"be",
"the",
"close",
"based",
"on",
"the",
"regular",
"period",
"if",
"there",
"were",
"no",
"early",
"close",
".",
")",
"The",
"value",
"of",
"each",
"node",
"is",
"the",
"same",
"start",
"and",
"end",
"position",
"stored",
"as",
"a",
"tuple",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/minute_bars.py#L1016-L1045
|
train
|
quantopian/zipline
|
zipline/data/minute_bars.py
|
BcolzMinuteBarReader._exclusion_indices_for_range
|
def _exclusion_indices_for_range(self, start_idx, end_idx):
"""
Returns
-------
List of tuples of (start, stop) which represent the ranges of minutes
which should be excluded when a market minute window is requested.
"""
itree = self._minute_exclusion_tree
if itree.overlaps(start_idx, end_idx):
ranges = []
intervals = itree[start_idx:end_idx]
for interval in intervals:
ranges.append(interval.data)
return sorted(ranges)
else:
return None
|
python
|
def _exclusion_indices_for_range(self, start_idx, end_idx):
"""
Returns
-------
List of tuples of (start, stop) which represent the ranges of minutes
which should be excluded when a market minute window is requested.
"""
itree = self._minute_exclusion_tree
if itree.overlaps(start_idx, end_idx):
ranges = []
intervals = itree[start_idx:end_idx]
for interval in intervals:
ranges.append(interval.data)
return sorted(ranges)
else:
return None
|
[
"def",
"_exclusion_indices_for_range",
"(",
"self",
",",
"start_idx",
",",
"end_idx",
")",
":",
"itree",
"=",
"self",
".",
"_minute_exclusion_tree",
"if",
"itree",
".",
"overlaps",
"(",
"start_idx",
",",
"end_idx",
")",
":",
"ranges",
"=",
"[",
"]",
"intervals",
"=",
"itree",
"[",
"start_idx",
":",
"end_idx",
"]",
"for",
"interval",
"in",
"intervals",
":",
"ranges",
".",
"append",
"(",
"interval",
".",
"data",
")",
"return",
"sorted",
"(",
"ranges",
")",
"else",
":",
"return",
"None"
] |
Returns
-------
List of tuples of (start, stop) which represent the ranges of minutes
which should be excluded when a market minute window is requested.
|
[
"Returns",
"-------",
"List",
"of",
"tuples",
"of",
"(",
"start",
"stop",
")",
"which",
"represent",
"the",
"ranges",
"of",
"minutes",
"which",
"should",
"be",
"excluded",
"when",
"a",
"market",
"minute",
"window",
"is",
"requested",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/minute_bars.py#L1047-L1062
|
train
|
quantopian/zipline
|
zipline/data/minute_bars.py
|
BcolzMinuteBarReader.get_value
|
def get_value(self, sid, dt, field):
"""
Retrieve the pricing info for the given sid, dt, and field.
Parameters
----------
sid : int
Asset identifier.
dt : datetime-like
The datetime at which the trade occurred.
field : string
The type of pricing data to retrieve.
('open', 'high', 'low', 'close', 'volume')
Returns
-------
out : float|int
The market data for the given sid, dt, and field coordinates.
For OHLC:
Returns a float if a trade occurred at the given dt.
If no trade occurred, a np.nan is returned.
For volume:
Returns the integer value of the volume.
(A volume of 0 signifies no trades for the given dt.)
"""
if self._last_get_value_dt_value == dt.value:
minute_pos = self._last_get_value_dt_position
else:
try:
minute_pos = self._find_position_of_minute(dt)
except ValueError:
raise NoDataOnDate()
self._last_get_value_dt_value = dt.value
self._last_get_value_dt_position = minute_pos
try:
value = self._open_minute_file(field, sid)[minute_pos]
except IndexError:
value = 0
if value == 0:
if field == 'volume':
return 0
else:
return np.nan
if field != 'volume':
value *= self._ohlc_ratio_inverse_for_sid(sid)
return value
|
python
|
def get_value(self, sid, dt, field):
"""
Retrieve the pricing info for the given sid, dt, and field.
Parameters
----------
sid : int
Asset identifier.
dt : datetime-like
The datetime at which the trade occurred.
field : string
The type of pricing data to retrieve.
('open', 'high', 'low', 'close', 'volume')
Returns
-------
out : float|int
The market data for the given sid, dt, and field coordinates.
For OHLC:
Returns a float if a trade occurred at the given dt.
If no trade occurred, a np.nan is returned.
For volume:
Returns the integer value of the volume.
(A volume of 0 signifies no trades for the given dt.)
"""
if self._last_get_value_dt_value == dt.value:
minute_pos = self._last_get_value_dt_position
else:
try:
minute_pos = self._find_position_of_minute(dt)
except ValueError:
raise NoDataOnDate()
self._last_get_value_dt_value = dt.value
self._last_get_value_dt_position = minute_pos
try:
value = self._open_minute_file(field, sid)[minute_pos]
except IndexError:
value = 0
if value == 0:
if field == 'volume':
return 0
else:
return np.nan
if field != 'volume':
value *= self._ohlc_ratio_inverse_for_sid(sid)
return value
|
[
"def",
"get_value",
"(",
"self",
",",
"sid",
",",
"dt",
",",
"field",
")",
":",
"if",
"self",
".",
"_last_get_value_dt_value",
"==",
"dt",
".",
"value",
":",
"minute_pos",
"=",
"self",
".",
"_last_get_value_dt_position",
"else",
":",
"try",
":",
"minute_pos",
"=",
"self",
".",
"_find_position_of_minute",
"(",
"dt",
")",
"except",
"ValueError",
":",
"raise",
"NoDataOnDate",
"(",
")",
"self",
".",
"_last_get_value_dt_value",
"=",
"dt",
".",
"value",
"self",
".",
"_last_get_value_dt_position",
"=",
"minute_pos",
"try",
":",
"value",
"=",
"self",
".",
"_open_minute_file",
"(",
"field",
",",
"sid",
")",
"[",
"minute_pos",
"]",
"except",
"IndexError",
":",
"value",
"=",
"0",
"if",
"value",
"==",
"0",
":",
"if",
"field",
"==",
"'volume'",
":",
"return",
"0",
"else",
":",
"return",
"np",
".",
"nan",
"if",
"field",
"!=",
"'volume'",
":",
"value",
"*=",
"self",
".",
"_ohlc_ratio_inverse_for_sid",
"(",
"sid",
")",
"return",
"value"
] |
Retrieve the pricing info for the given sid, dt, and field.
Parameters
----------
sid : int
Asset identifier.
dt : datetime-like
The datetime at which the trade occurred.
field : string
The type of pricing data to retrieve.
('open', 'high', 'low', 'close', 'volume')
Returns
-------
out : float|int
The market data for the given sid, dt, and field coordinates.
For OHLC:
Returns a float if a trade occurred at the given dt.
If no trade occurred, a np.nan is returned.
For volume:
Returns the integer value of the volume.
(A volume of 0 signifies no trades for the given dt.)
|
[
"Retrieve",
"the",
"pricing",
"info",
"for",
"the",
"given",
"sid",
"dt",
"and",
"field",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/minute_bars.py#L1098-L1149
|
train
|
quantopian/zipline
|
zipline/data/minute_bars.py
|
BcolzMinuteBarReader._find_position_of_minute
|
def _find_position_of_minute(self, minute_dt):
"""
Internal method that returns the position of the given minute in the
list of every trading minute since market open of the first trading
day. Adjusts non market minutes to the last close.
ex. this method would return 1 for 2002-01-02 9:32 AM Eastern, if
2002-01-02 is the first trading day of the dataset.
Parameters
----------
minute_dt: pd.Timestamp
The minute whose position should be calculated.
Returns
-------
int: The position of the given minute in the list of all trading
minutes since market open on the first trading day.
"""
return find_position_of_minute(
self._market_open_values,
self._market_close_values,
minute_dt.value / NANOS_IN_MINUTE,
self._minutes_per_day,
False,
)
|
python
|
def _find_position_of_minute(self, minute_dt):
"""
Internal method that returns the position of the given minute in the
list of every trading minute since market open of the first trading
day. Adjusts non market minutes to the last close.
ex. this method would return 1 for 2002-01-02 9:32 AM Eastern, if
2002-01-02 is the first trading day of the dataset.
Parameters
----------
minute_dt: pd.Timestamp
The minute whose position should be calculated.
Returns
-------
int: The position of the given minute in the list of all trading
minutes since market open on the first trading day.
"""
return find_position_of_minute(
self._market_open_values,
self._market_close_values,
minute_dt.value / NANOS_IN_MINUTE,
self._minutes_per_day,
False,
)
|
[
"def",
"_find_position_of_minute",
"(",
"self",
",",
"minute_dt",
")",
":",
"return",
"find_position_of_minute",
"(",
"self",
".",
"_market_open_values",
",",
"self",
".",
"_market_close_values",
",",
"minute_dt",
".",
"value",
"/",
"NANOS_IN_MINUTE",
",",
"self",
".",
"_minutes_per_day",
",",
"False",
",",
")"
] |
Internal method that returns the position of the given minute in the
list of every trading minute since market open of the first trading
day. Adjusts non market minutes to the last close.
ex. this method would return 1 for 2002-01-02 9:32 AM Eastern, if
2002-01-02 is the first trading day of the dataset.
Parameters
----------
minute_dt: pd.Timestamp
The minute whose position should be calculated.
Returns
-------
int: The position of the given minute in the list of all trading
minutes since market open on the first trading day.
|
[
"Internal",
"method",
"that",
"returns",
"the",
"position",
"of",
"the",
"given",
"minute",
"in",
"the",
"list",
"of",
"every",
"trading",
"minute",
"since",
"market",
"open",
"of",
"the",
"first",
"trading",
"day",
".",
"Adjusts",
"non",
"market",
"minutes",
"to",
"the",
"last",
"close",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/minute_bars.py#L1203-L1228
|
train
|
quantopian/zipline
|
zipline/data/minute_bars.py
|
BcolzMinuteBarReader.load_raw_arrays
|
def load_raw_arrays(self, fields, start_dt, end_dt, sids):
"""
Parameters
----------
fields : list of str
'open', 'high', 'low', 'close', or 'volume'
start_dt: Timestamp
Beginning of the window range.
end_dt: Timestamp
End of the window range.
sids : list of int
The asset identifiers in the window.
Returns
-------
list of np.ndarray
A list with an entry per field of ndarrays with shape
(minutes in range, sids) with a dtype of float64, containing the
values for the respective field over start and end dt range.
"""
start_idx = self._find_position_of_minute(start_dt)
end_idx = self._find_position_of_minute(end_dt)
num_minutes = (end_idx - start_idx + 1)
results = []
indices_to_exclude = self._exclusion_indices_for_range(
start_idx, end_idx)
if indices_to_exclude is not None:
for excl_start, excl_stop in indices_to_exclude:
length = excl_stop - excl_start + 1
num_minutes -= length
shape = num_minutes, len(sids)
for field in fields:
if field != 'volume':
out = np.full(shape, np.nan)
else:
out = np.zeros(shape, dtype=np.uint32)
for i, sid in enumerate(sids):
carray = self._open_minute_file(field, sid)
values = carray[start_idx:end_idx + 1]
if indices_to_exclude is not None:
for excl_start, excl_stop in indices_to_exclude[::-1]:
excl_slice = np.s_[
excl_start - start_idx:excl_stop - start_idx + 1]
values = np.delete(values, excl_slice)
where = values != 0
# first slice down to len(where) because we might not have
# written data for all the minutes requested
if field != 'volume':
out[:len(where), i][where] = (
values[where] * self._ohlc_ratio_inverse_for_sid(sid))
else:
out[:len(where), i][where] = values[where]
results.append(out)
return results
|
python
|
def load_raw_arrays(self, fields, start_dt, end_dt, sids):
"""
Parameters
----------
fields : list of str
'open', 'high', 'low', 'close', or 'volume'
start_dt: Timestamp
Beginning of the window range.
end_dt: Timestamp
End of the window range.
sids : list of int
The asset identifiers in the window.
Returns
-------
list of np.ndarray
A list with an entry per field of ndarrays with shape
(minutes in range, sids) with a dtype of float64, containing the
values for the respective field over start and end dt range.
"""
start_idx = self._find_position_of_minute(start_dt)
end_idx = self._find_position_of_minute(end_dt)
num_minutes = (end_idx - start_idx + 1)
results = []
indices_to_exclude = self._exclusion_indices_for_range(
start_idx, end_idx)
if indices_to_exclude is not None:
for excl_start, excl_stop in indices_to_exclude:
length = excl_stop - excl_start + 1
num_minutes -= length
shape = num_minutes, len(sids)
for field in fields:
if field != 'volume':
out = np.full(shape, np.nan)
else:
out = np.zeros(shape, dtype=np.uint32)
for i, sid in enumerate(sids):
carray = self._open_minute_file(field, sid)
values = carray[start_idx:end_idx + 1]
if indices_to_exclude is not None:
for excl_start, excl_stop in indices_to_exclude[::-1]:
excl_slice = np.s_[
excl_start - start_idx:excl_stop - start_idx + 1]
values = np.delete(values, excl_slice)
where = values != 0
# first slice down to len(where) because we might not have
# written data for all the minutes requested
if field != 'volume':
out[:len(where), i][where] = (
values[where] * self._ohlc_ratio_inverse_for_sid(sid))
else:
out[:len(where), i][where] = values[where]
results.append(out)
return results
|
[
"def",
"load_raw_arrays",
"(",
"self",
",",
"fields",
",",
"start_dt",
",",
"end_dt",
",",
"sids",
")",
":",
"start_idx",
"=",
"self",
".",
"_find_position_of_minute",
"(",
"start_dt",
")",
"end_idx",
"=",
"self",
".",
"_find_position_of_minute",
"(",
"end_dt",
")",
"num_minutes",
"=",
"(",
"end_idx",
"-",
"start_idx",
"+",
"1",
")",
"results",
"=",
"[",
"]",
"indices_to_exclude",
"=",
"self",
".",
"_exclusion_indices_for_range",
"(",
"start_idx",
",",
"end_idx",
")",
"if",
"indices_to_exclude",
"is",
"not",
"None",
":",
"for",
"excl_start",
",",
"excl_stop",
"in",
"indices_to_exclude",
":",
"length",
"=",
"excl_stop",
"-",
"excl_start",
"+",
"1",
"num_minutes",
"-=",
"length",
"shape",
"=",
"num_minutes",
",",
"len",
"(",
"sids",
")",
"for",
"field",
"in",
"fields",
":",
"if",
"field",
"!=",
"'volume'",
":",
"out",
"=",
"np",
".",
"full",
"(",
"shape",
",",
"np",
".",
"nan",
")",
"else",
":",
"out",
"=",
"np",
".",
"zeros",
"(",
"shape",
",",
"dtype",
"=",
"np",
".",
"uint32",
")",
"for",
"i",
",",
"sid",
"in",
"enumerate",
"(",
"sids",
")",
":",
"carray",
"=",
"self",
".",
"_open_minute_file",
"(",
"field",
",",
"sid",
")",
"values",
"=",
"carray",
"[",
"start_idx",
":",
"end_idx",
"+",
"1",
"]",
"if",
"indices_to_exclude",
"is",
"not",
"None",
":",
"for",
"excl_start",
",",
"excl_stop",
"in",
"indices_to_exclude",
"[",
":",
":",
"-",
"1",
"]",
":",
"excl_slice",
"=",
"np",
".",
"s_",
"[",
"excl_start",
"-",
"start_idx",
":",
"excl_stop",
"-",
"start_idx",
"+",
"1",
"]",
"values",
"=",
"np",
".",
"delete",
"(",
"values",
",",
"excl_slice",
")",
"where",
"=",
"values",
"!=",
"0",
"# first slice down to len(where) because we might not have",
"# written data for all the minutes requested",
"if",
"field",
"!=",
"'volume'",
":",
"out",
"[",
":",
"len",
"(",
"where",
")",
",",
"i",
"]",
"[",
"where",
"]",
"=",
"(",
"values",
"[",
"where",
"]",
"*",
"self",
".",
"_ohlc_ratio_inverse_for_sid",
"(",
"sid",
")",
")",
"else",
":",
"out",
"[",
":",
"len",
"(",
"where",
")",
",",
"i",
"]",
"[",
"where",
"]",
"=",
"values",
"[",
"where",
"]",
"results",
".",
"append",
"(",
"out",
")",
"return",
"results"
] |
Parameters
----------
fields : list of str
'open', 'high', 'low', 'close', or 'volume'
start_dt: Timestamp
Beginning of the window range.
end_dt: Timestamp
End of the window range.
sids : list of int
The asset identifiers in the window.
Returns
-------
list of np.ndarray
A list with an entry per field of ndarrays with shape
(minutes in range, sids) with a dtype of float64, containing the
values for the respective field over start and end dt range.
|
[
"Parameters",
"----------",
"fields",
":",
"list",
"of",
"str",
"open",
"high",
"low",
"close",
"or",
"volume",
"start_dt",
":",
"Timestamp",
"Beginning",
"of",
"the",
"window",
"range",
".",
"end_dt",
":",
"Timestamp",
"End",
"of",
"the",
"window",
"range",
".",
"sids",
":",
"list",
"of",
"int",
"The",
"asset",
"identifiers",
"in",
"the",
"window",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/minute_bars.py#L1230-L1291
|
train
|
quantopian/zipline
|
zipline/data/minute_bars.py
|
H5MinuteBarUpdateWriter.write
|
def write(self, frames):
"""
Write the frames to the target HDF5 file, using the format used by
``pd.Panel.to_hdf``
Parameters
----------
frames : iter[(int, DataFrame)] or dict[int -> DataFrame]
An iterable or other mapping of sid to the corresponding OHLCV
pricing data.
"""
with HDFStore(self._path, 'w',
complevel=self._complevel, complib=self._complib) \
as store:
panel = pd.Panel.from_dict(dict(frames))
panel.to_hdf(store, 'updates')
with tables.open_file(self._path, mode='r+') as h5file:
h5file.set_node_attr('/', 'version', 0)
|
python
|
def write(self, frames):
"""
Write the frames to the target HDF5 file, using the format used by
``pd.Panel.to_hdf``
Parameters
----------
frames : iter[(int, DataFrame)] or dict[int -> DataFrame]
An iterable or other mapping of sid to the corresponding OHLCV
pricing data.
"""
with HDFStore(self._path, 'w',
complevel=self._complevel, complib=self._complib) \
as store:
panel = pd.Panel.from_dict(dict(frames))
panel.to_hdf(store, 'updates')
with tables.open_file(self._path, mode='r+') as h5file:
h5file.set_node_attr('/', 'version', 0)
|
[
"def",
"write",
"(",
"self",
",",
"frames",
")",
":",
"with",
"HDFStore",
"(",
"self",
".",
"_path",
",",
"'w'",
",",
"complevel",
"=",
"self",
".",
"_complevel",
",",
"complib",
"=",
"self",
".",
"_complib",
")",
"as",
"store",
":",
"panel",
"=",
"pd",
".",
"Panel",
".",
"from_dict",
"(",
"dict",
"(",
"frames",
")",
")",
"panel",
".",
"to_hdf",
"(",
"store",
",",
"'updates'",
")",
"with",
"tables",
".",
"open_file",
"(",
"self",
".",
"_path",
",",
"mode",
"=",
"'r+'",
")",
"as",
"h5file",
":",
"h5file",
".",
"set_node_attr",
"(",
"'/'",
",",
"'version'",
",",
"0",
")"
] |
Write the frames to the target HDF5 file, using the format used by
``pd.Panel.to_hdf``
Parameters
----------
frames : iter[(int, DataFrame)] or dict[int -> DataFrame]
An iterable or other mapping of sid to the corresponding OHLCV
pricing data.
|
[
"Write",
"the",
"frames",
"to",
"the",
"target",
"HDF5",
"file",
"using",
"the",
"format",
"used",
"by",
"pd",
".",
"Panel",
".",
"to_hdf"
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/minute_bars.py#L1346-L1363
|
train
|
quantopian/zipline
|
zipline/pipeline/loaders/utils.py
|
next_event_indexer
|
def next_event_indexer(all_dates,
data_query_cutoff,
all_sids,
event_dates,
event_timestamps,
event_sids):
"""
Construct an index array that, when applied to an array of values, produces
a 2D array containing the values associated with the next event for each
sid at each moment in time.
Locations where no next event was known will be filled with -1.
Parameters
----------
all_dates : ndarray[datetime64[ns], ndim=1]
Row labels for the target output.
data_query_cutoff : pd.DatetimeIndex
The boundaries for the given trading sessions in ``all_dates``.
all_sids : ndarray[int, ndim=1]
Column labels for the target output.
event_dates : ndarray[datetime64[ns], ndim=1]
Dates on which each input events occurred/will occur. ``event_dates``
must be in sorted order, and may not contain any NaT values.
event_timestamps : ndarray[datetime64[ns], ndim=1]
Dates on which we learned about each input event.
event_sids : ndarray[int, ndim=1]
Sids assocated with each input event.
Returns
-------
indexer : ndarray[int, ndim=2]
An array of shape (len(all_dates), len(all_sids)) of indices into
``event_{dates,timestamps,sids}``.
"""
validate_event_metadata(event_dates, event_timestamps, event_sids)
out = np.full((len(all_dates), len(all_sids)), -1, dtype=np.int64)
sid_ixs = all_sids.searchsorted(event_sids)
# side='right' here ensures that we include the event date itself
# if it's in all_dates.
dt_ixs = all_dates.searchsorted(event_dates, side='right')
ts_ixs = data_query_cutoff.searchsorted(event_timestamps, side='right')
# Walk backward through the events, writing the index of the event into
# slots ranging from the event's timestamp to its asof. This depends for
# correctness on the fact that event_dates is sorted in ascending order,
# because we need to overwrite later events with earlier ones if their
# eligible windows overlap.
for i in range(len(event_sids) - 1, -1, -1):
start_ix = ts_ixs[i]
end_ix = dt_ixs[i]
out[start_ix:end_ix, sid_ixs[i]] = i
return out
|
python
|
def next_event_indexer(all_dates,
data_query_cutoff,
all_sids,
event_dates,
event_timestamps,
event_sids):
"""
Construct an index array that, when applied to an array of values, produces
a 2D array containing the values associated with the next event for each
sid at each moment in time.
Locations where no next event was known will be filled with -1.
Parameters
----------
all_dates : ndarray[datetime64[ns], ndim=1]
Row labels for the target output.
data_query_cutoff : pd.DatetimeIndex
The boundaries for the given trading sessions in ``all_dates``.
all_sids : ndarray[int, ndim=1]
Column labels for the target output.
event_dates : ndarray[datetime64[ns], ndim=1]
Dates on which each input events occurred/will occur. ``event_dates``
must be in sorted order, and may not contain any NaT values.
event_timestamps : ndarray[datetime64[ns], ndim=1]
Dates on which we learned about each input event.
event_sids : ndarray[int, ndim=1]
Sids assocated with each input event.
Returns
-------
indexer : ndarray[int, ndim=2]
An array of shape (len(all_dates), len(all_sids)) of indices into
``event_{dates,timestamps,sids}``.
"""
validate_event_metadata(event_dates, event_timestamps, event_sids)
out = np.full((len(all_dates), len(all_sids)), -1, dtype=np.int64)
sid_ixs = all_sids.searchsorted(event_sids)
# side='right' here ensures that we include the event date itself
# if it's in all_dates.
dt_ixs = all_dates.searchsorted(event_dates, side='right')
ts_ixs = data_query_cutoff.searchsorted(event_timestamps, side='right')
# Walk backward through the events, writing the index of the event into
# slots ranging from the event's timestamp to its asof. This depends for
# correctness on the fact that event_dates is sorted in ascending order,
# because we need to overwrite later events with earlier ones if their
# eligible windows overlap.
for i in range(len(event_sids) - 1, -1, -1):
start_ix = ts_ixs[i]
end_ix = dt_ixs[i]
out[start_ix:end_ix, sid_ixs[i]] = i
return out
|
[
"def",
"next_event_indexer",
"(",
"all_dates",
",",
"data_query_cutoff",
",",
"all_sids",
",",
"event_dates",
",",
"event_timestamps",
",",
"event_sids",
")",
":",
"validate_event_metadata",
"(",
"event_dates",
",",
"event_timestamps",
",",
"event_sids",
")",
"out",
"=",
"np",
".",
"full",
"(",
"(",
"len",
"(",
"all_dates",
")",
",",
"len",
"(",
"all_sids",
")",
")",
",",
"-",
"1",
",",
"dtype",
"=",
"np",
".",
"int64",
")",
"sid_ixs",
"=",
"all_sids",
".",
"searchsorted",
"(",
"event_sids",
")",
"# side='right' here ensures that we include the event date itself",
"# if it's in all_dates.",
"dt_ixs",
"=",
"all_dates",
".",
"searchsorted",
"(",
"event_dates",
",",
"side",
"=",
"'right'",
")",
"ts_ixs",
"=",
"data_query_cutoff",
".",
"searchsorted",
"(",
"event_timestamps",
",",
"side",
"=",
"'right'",
")",
"# Walk backward through the events, writing the index of the event into",
"# slots ranging from the event's timestamp to its asof. This depends for",
"# correctness on the fact that event_dates is sorted in ascending order,",
"# because we need to overwrite later events with earlier ones if their",
"# eligible windows overlap.",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"event_sids",
")",
"-",
"1",
",",
"-",
"1",
",",
"-",
"1",
")",
":",
"start_ix",
"=",
"ts_ixs",
"[",
"i",
"]",
"end_ix",
"=",
"dt_ixs",
"[",
"i",
"]",
"out",
"[",
"start_ix",
":",
"end_ix",
",",
"sid_ixs",
"[",
"i",
"]",
"]",
"=",
"i",
"return",
"out"
] |
Construct an index array that, when applied to an array of values, produces
a 2D array containing the values associated with the next event for each
sid at each moment in time.
Locations where no next event was known will be filled with -1.
Parameters
----------
all_dates : ndarray[datetime64[ns], ndim=1]
Row labels for the target output.
data_query_cutoff : pd.DatetimeIndex
The boundaries for the given trading sessions in ``all_dates``.
all_sids : ndarray[int, ndim=1]
Column labels for the target output.
event_dates : ndarray[datetime64[ns], ndim=1]
Dates on which each input events occurred/will occur. ``event_dates``
must be in sorted order, and may not contain any NaT values.
event_timestamps : ndarray[datetime64[ns], ndim=1]
Dates on which we learned about each input event.
event_sids : ndarray[int, ndim=1]
Sids assocated with each input event.
Returns
-------
indexer : ndarray[int, ndim=2]
An array of shape (len(all_dates), len(all_sids)) of indices into
``event_{dates,timestamps,sids}``.
|
[
"Construct",
"an",
"index",
"array",
"that",
"when",
"applied",
"to",
"an",
"array",
"of",
"values",
"produces",
"a",
"2D",
"array",
"containing",
"the",
"values",
"associated",
"with",
"the",
"next",
"event",
"for",
"each",
"sid",
"at",
"each",
"moment",
"in",
"time",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/loaders/utils.py#L25-L79
|
train
|
quantopian/zipline
|
zipline/pipeline/loaders/utils.py
|
previous_event_indexer
|
def previous_event_indexer(data_query_cutoff_times,
all_sids,
event_dates,
event_timestamps,
event_sids):
"""
Construct an index array that, when applied to an array of values, produces
a 2D array containing the values associated with the previous event for
each sid at each moment in time.
Locations where no previous event was known will be filled with -1.
Parameters
----------
data_query_cutoff : pd.DatetimeIndex
The boundaries for the given trading sessions.
all_dates : ndarray[datetime64[ns], ndim=1]
Row labels for the target output.
all_sids : ndarray[int, ndim=1]
Column labels for the target output.
event_dates : ndarray[datetime64[ns], ndim=1]
Dates on which each input events occurred/will occur. ``event_dates``
must be in sorted order, and may not contain any NaT values.
event_timestamps : ndarray[datetime64[ns], ndim=1]
Dates on which we learned about each input event.
event_sids : ndarray[int, ndim=1]
Sids assocated with each input event.
Returns
-------
indexer : ndarray[int, ndim=2]
An array of shape (len(all_dates), len(all_sids)) of indices into
``event_{dates,timestamps,sids}``.
"""
validate_event_metadata(event_dates, event_timestamps, event_sids)
out = np.full(
(len(data_query_cutoff_times), len(all_sids)),
-1,
dtype=np.int64,
)
eff_dts = np.maximum(event_dates, event_timestamps)
sid_ixs = all_sids.searchsorted(event_sids)
dt_ixs = data_query_cutoff_times.searchsorted(eff_dts, side='right')
# Walk backwards through the events, writing the index of the event into
# slots ranging from max(event_date, event_timestamp) to the start of the
# previously-written event. This depends for correctness on the fact that
# event_dates is sorted in ascending order, because we need to have written
# later events so we know where to stop forward-filling earlier events.
last_written = {}
for i in range(len(event_dates) - 1, -1, -1):
sid_ix = sid_ixs[i]
dt_ix = dt_ixs[i]
out[dt_ix:last_written.get(sid_ix, None), sid_ix] = i
last_written[sid_ix] = dt_ix
return out
|
python
|
def previous_event_indexer(data_query_cutoff_times,
all_sids,
event_dates,
event_timestamps,
event_sids):
"""
Construct an index array that, when applied to an array of values, produces
a 2D array containing the values associated with the previous event for
each sid at each moment in time.
Locations where no previous event was known will be filled with -1.
Parameters
----------
data_query_cutoff : pd.DatetimeIndex
The boundaries for the given trading sessions.
all_dates : ndarray[datetime64[ns], ndim=1]
Row labels for the target output.
all_sids : ndarray[int, ndim=1]
Column labels for the target output.
event_dates : ndarray[datetime64[ns], ndim=1]
Dates on which each input events occurred/will occur. ``event_dates``
must be in sorted order, and may not contain any NaT values.
event_timestamps : ndarray[datetime64[ns], ndim=1]
Dates on which we learned about each input event.
event_sids : ndarray[int, ndim=1]
Sids assocated with each input event.
Returns
-------
indexer : ndarray[int, ndim=2]
An array of shape (len(all_dates), len(all_sids)) of indices into
``event_{dates,timestamps,sids}``.
"""
validate_event_metadata(event_dates, event_timestamps, event_sids)
out = np.full(
(len(data_query_cutoff_times), len(all_sids)),
-1,
dtype=np.int64,
)
eff_dts = np.maximum(event_dates, event_timestamps)
sid_ixs = all_sids.searchsorted(event_sids)
dt_ixs = data_query_cutoff_times.searchsorted(eff_dts, side='right')
# Walk backwards through the events, writing the index of the event into
# slots ranging from max(event_date, event_timestamp) to the start of the
# previously-written event. This depends for correctness on the fact that
# event_dates is sorted in ascending order, because we need to have written
# later events so we know where to stop forward-filling earlier events.
last_written = {}
for i in range(len(event_dates) - 1, -1, -1):
sid_ix = sid_ixs[i]
dt_ix = dt_ixs[i]
out[dt_ix:last_written.get(sid_ix, None), sid_ix] = i
last_written[sid_ix] = dt_ix
return out
|
[
"def",
"previous_event_indexer",
"(",
"data_query_cutoff_times",
",",
"all_sids",
",",
"event_dates",
",",
"event_timestamps",
",",
"event_sids",
")",
":",
"validate_event_metadata",
"(",
"event_dates",
",",
"event_timestamps",
",",
"event_sids",
")",
"out",
"=",
"np",
".",
"full",
"(",
"(",
"len",
"(",
"data_query_cutoff_times",
")",
",",
"len",
"(",
"all_sids",
")",
")",
",",
"-",
"1",
",",
"dtype",
"=",
"np",
".",
"int64",
",",
")",
"eff_dts",
"=",
"np",
".",
"maximum",
"(",
"event_dates",
",",
"event_timestamps",
")",
"sid_ixs",
"=",
"all_sids",
".",
"searchsorted",
"(",
"event_sids",
")",
"dt_ixs",
"=",
"data_query_cutoff_times",
".",
"searchsorted",
"(",
"eff_dts",
",",
"side",
"=",
"'right'",
")",
"# Walk backwards through the events, writing the index of the event into",
"# slots ranging from max(event_date, event_timestamp) to the start of the",
"# previously-written event. This depends for correctness on the fact that",
"# event_dates is sorted in ascending order, because we need to have written",
"# later events so we know where to stop forward-filling earlier events.",
"last_written",
"=",
"{",
"}",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"event_dates",
")",
"-",
"1",
",",
"-",
"1",
",",
"-",
"1",
")",
":",
"sid_ix",
"=",
"sid_ixs",
"[",
"i",
"]",
"dt_ix",
"=",
"dt_ixs",
"[",
"i",
"]",
"out",
"[",
"dt_ix",
":",
"last_written",
".",
"get",
"(",
"sid_ix",
",",
"None",
")",
",",
"sid_ix",
"]",
"=",
"i",
"last_written",
"[",
"sid_ix",
"]",
"=",
"dt_ix",
"return",
"out"
] |
Construct an index array that, when applied to an array of values, produces
a 2D array containing the values associated with the previous event for
each sid at each moment in time.
Locations where no previous event was known will be filled with -1.
Parameters
----------
data_query_cutoff : pd.DatetimeIndex
The boundaries for the given trading sessions.
all_dates : ndarray[datetime64[ns], ndim=1]
Row labels for the target output.
all_sids : ndarray[int, ndim=1]
Column labels for the target output.
event_dates : ndarray[datetime64[ns], ndim=1]
Dates on which each input events occurred/will occur. ``event_dates``
must be in sorted order, and may not contain any NaT values.
event_timestamps : ndarray[datetime64[ns], ndim=1]
Dates on which we learned about each input event.
event_sids : ndarray[int, ndim=1]
Sids assocated with each input event.
Returns
-------
indexer : ndarray[int, ndim=2]
An array of shape (len(all_dates), len(all_sids)) of indices into
``event_{dates,timestamps,sids}``.
|
[
"Construct",
"an",
"index",
"array",
"that",
"when",
"applied",
"to",
"an",
"array",
"of",
"values",
"produces",
"a",
"2D",
"array",
"containing",
"the",
"values",
"associated",
"with",
"the",
"previous",
"event",
"for",
"each",
"sid",
"at",
"each",
"moment",
"in",
"time",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/loaders/utils.py#L82-L138
|
train
|
quantopian/zipline
|
zipline/pipeline/loaders/utils.py
|
last_in_date_group
|
def last_in_date_group(df,
data_query_cutoff_times,
assets,
reindex=True,
have_sids=True,
extra_groupers=None):
"""
Determine the last piece of information known on each date in the date
index for each group. Input df MUST be sorted such that the correct last
item is chosen from each group.
Parameters
----------
df : pd.DataFrame
The DataFrame containing the data to be grouped. Must be sorted so that
the correct last item is chosen from each group.
data_query_cutoff_times : pd.DatetimeIndex
The dates to use for grouping and reindexing.
assets : pd.Int64Index
The assets that should be included in the column multiindex.
reindex : bool
Whether or not the DataFrame should be reindexed against the date
index. This will add back any dates to the index that were grouped
away.
have_sids : bool
Whether or not the DataFrame has sids. If it does, they will be used
in the groupby.
extra_groupers : list of str
Any extra field names that should be included in the groupby.
Returns
-------
last_in_group : pd.DataFrame
A DataFrame with dates as the index and fields used in the groupby as
levels of a multiindex of columns.
"""
idx = [data_query_cutoff_times[data_query_cutoff_times.searchsorted(
df[TS_FIELD_NAME].values,
)]]
if have_sids:
idx += [SID_FIELD_NAME]
if extra_groupers is None:
extra_groupers = []
idx += extra_groupers
last_in_group = df.drop(TS_FIELD_NAME, axis=1).groupby(
idx,
sort=False,
).last()
# For the number of things that we're grouping by (except TS), unstack
# the df. Done this way because of an unresolved pandas bug whereby
# passing a list of levels with mixed dtypes to unstack causes the
# resulting DataFrame to have all object-type columns.
for _ in range(len(idx) - 1):
last_in_group = last_in_group.unstack(-1)
if reindex:
if have_sids:
cols = last_in_group.columns
last_in_group = last_in_group.reindex(
index=data_query_cutoff_times,
columns=pd.MultiIndex.from_product(
tuple(cols.levels[0:len(extra_groupers) + 1]) + (assets,),
names=cols.names,
),
)
else:
last_in_group = last_in_group.reindex(data_query_cutoff_times)
return last_in_group
|
python
|
def last_in_date_group(df,
data_query_cutoff_times,
assets,
reindex=True,
have_sids=True,
extra_groupers=None):
"""
Determine the last piece of information known on each date in the date
index for each group. Input df MUST be sorted such that the correct last
item is chosen from each group.
Parameters
----------
df : pd.DataFrame
The DataFrame containing the data to be grouped. Must be sorted so that
the correct last item is chosen from each group.
data_query_cutoff_times : pd.DatetimeIndex
The dates to use for grouping and reindexing.
assets : pd.Int64Index
The assets that should be included in the column multiindex.
reindex : bool
Whether or not the DataFrame should be reindexed against the date
index. This will add back any dates to the index that were grouped
away.
have_sids : bool
Whether or not the DataFrame has sids. If it does, they will be used
in the groupby.
extra_groupers : list of str
Any extra field names that should be included in the groupby.
Returns
-------
last_in_group : pd.DataFrame
A DataFrame with dates as the index and fields used in the groupby as
levels of a multiindex of columns.
"""
idx = [data_query_cutoff_times[data_query_cutoff_times.searchsorted(
df[TS_FIELD_NAME].values,
)]]
if have_sids:
idx += [SID_FIELD_NAME]
if extra_groupers is None:
extra_groupers = []
idx += extra_groupers
last_in_group = df.drop(TS_FIELD_NAME, axis=1).groupby(
idx,
sort=False,
).last()
# For the number of things that we're grouping by (except TS), unstack
# the df. Done this way because of an unresolved pandas bug whereby
# passing a list of levels with mixed dtypes to unstack causes the
# resulting DataFrame to have all object-type columns.
for _ in range(len(idx) - 1):
last_in_group = last_in_group.unstack(-1)
if reindex:
if have_sids:
cols = last_in_group.columns
last_in_group = last_in_group.reindex(
index=data_query_cutoff_times,
columns=pd.MultiIndex.from_product(
tuple(cols.levels[0:len(extra_groupers) + 1]) + (assets,),
names=cols.names,
),
)
else:
last_in_group = last_in_group.reindex(data_query_cutoff_times)
return last_in_group
|
[
"def",
"last_in_date_group",
"(",
"df",
",",
"data_query_cutoff_times",
",",
"assets",
",",
"reindex",
"=",
"True",
",",
"have_sids",
"=",
"True",
",",
"extra_groupers",
"=",
"None",
")",
":",
"idx",
"=",
"[",
"data_query_cutoff_times",
"[",
"data_query_cutoff_times",
".",
"searchsorted",
"(",
"df",
"[",
"TS_FIELD_NAME",
"]",
".",
"values",
",",
")",
"]",
"]",
"if",
"have_sids",
":",
"idx",
"+=",
"[",
"SID_FIELD_NAME",
"]",
"if",
"extra_groupers",
"is",
"None",
":",
"extra_groupers",
"=",
"[",
"]",
"idx",
"+=",
"extra_groupers",
"last_in_group",
"=",
"df",
".",
"drop",
"(",
"TS_FIELD_NAME",
",",
"axis",
"=",
"1",
")",
".",
"groupby",
"(",
"idx",
",",
"sort",
"=",
"False",
",",
")",
".",
"last",
"(",
")",
"# For the number of things that we're grouping by (except TS), unstack",
"# the df. Done this way because of an unresolved pandas bug whereby",
"# passing a list of levels with mixed dtypes to unstack causes the",
"# resulting DataFrame to have all object-type columns.",
"for",
"_",
"in",
"range",
"(",
"len",
"(",
"idx",
")",
"-",
"1",
")",
":",
"last_in_group",
"=",
"last_in_group",
".",
"unstack",
"(",
"-",
"1",
")",
"if",
"reindex",
":",
"if",
"have_sids",
":",
"cols",
"=",
"last_in_group",
".",
"columns",
"last_in_group",
"=",
"last_in_group",
".",
"reindex",
"(",
"index",
"=",
"data_query_cutoff_times",
",",
"columns",
"=",
"pd",
".",
"MultiIndex",
".",
"from_product",
"(",
"tuple",
"(",
"cols",
".",
"levels",
"[",
"0",
":",
"len",
"(",
"extra_groupers",
")",
"+",
"1",
"]",
")",
"+",
"(",
"assets",
",",
")",
",",
"names",
"=",
"cols",
".",
"names",
",",
")",
",",
")",
"else",
":",
"last_in_group",
"=",
"last_in_group",
".",
"reindex",
"(",
"data_query_cutoff_times",
")",
"return",
"last_in_group"
] |
Determine the last piece of information known on each date in the date
index for each group. Input df MUST be sorted such that the correct last
item is chosen from each group.
Parameters
----------
df : pd.DataFrame
The DataFrame containing the data to be grouped. Must be sorted so that
the correct last item is chosen from each group.
data_query_cutoff_times : pd.DatetimeIndex
The dates to use for grouping and reindexing.
assets : pd.Int64Index
The assets that should be included in the column multiindex.
reindex : bool
Whether or not the DataFrame should be reindexed against the date
index. This will add back any dates to the index that were grouped
away.
have_sids : bool
Whether or not the DataFrame has sids. If it does, they will be used
in the groupby.
extra_groupers : list of str
Any extra field names that should be included in the groupby.
Returns
-------
last_in_group : pd.DataFrame
A DataFrame with dates as the index and fields used in the groupby as
levels of a multiindex of columns.
|
[
"Determine",
"the",
"last",
"piece",
"of",
"information",
"known",
"on",
"each",
"date",
"in",
"the",
"date"
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/loaders/utils.py#L141-L213
|
train
|
quantopian/zipline
|
zipline/pipeline/loaders/utils.py
|
ffill_across_cols
|
def ffill_across_cols(df, columns, name_map):
"""
Forward fill values in a DataFrame with special logic to handle cases
that pd.DataFrame.ffill cannot and cast columns to appropriate types.
Parameters
----------
df : pd.DataFrame
The DataFrame to do forward-filling on.
columns : list of BoundColumn
The BoundColumns that correspond to columns in the DataFrame to which
special filling and/or casting logic should be applied.
name_map: map of string -> string
Mapping from the name of each BoundColumn to the associated column
name in `df`.
"""
df.ffill(inplace=True)
# Fill in missing values specified by each column. This is made
# significantly more complex by the fact that we need to work around
# two pandas issues:
# 1) When we have sids, if there are no records for a given sid for any
# dates, pandas will generate a column full of NaNs for that sid.
# This means that some of the columns in `dense_output` are now
# float instead of the intended dtype, so we have to coerce back to
# our expected type and convert NaNs into the desired missing value.
# 2) DataFrame.ffill assumes that receiving None as a fill-value means
# that no value was passed. Consequently, there's no way to tell
# pandas to replace NaNs in an object column with None using fillna,
# so we have to roll our own instead using df.where.
for column in columns:
column_name = name_map[column.name]
# Special logic for strings since `fillna` doesn't work if the
# missing value is `None`.
if column.dtype == categorical_dtype:
df[column_name] = df[
column.name
].where(pd.notnull(df[column_name]),
column.missing_value)
else:
# We need to execute `fillna` before `astype` in case the
# column contains NaNs and needs to be cast to bool or int.
# This is so that the NaNs are replaced first, since pandas
# can't convert NaNs for those types.
df[column_name] = df[
column_name
].fillna(column.missing_value).astype(column.dtype)
|
python
|
def ffill_across_cols(df, columns, name_map):
"""
Forward fill values in a DataFrame with special logic to handle cases
that pd.DataFrame.ffill cannot and cast columns to appropriate types.
Parameters
----------
df : pd.DataFrame
The DataFrame to do forward-filling on.
columns : list of BoundColumn
The BoundColumns that correspond to columns in the DataFrame to which
special filling and/or casting logic should be applied.
name_map: map of string -> string
Mapping from the name of each BoundColumn to the associated column
name in `df`.
"""
df.ffill(inplace=True)
# Fill in missing values specified by each column. This is made
# significantly more complex by the fact that we need to work around
# two pandas issues:
# 1) When we have sids, if there are no records for a given sid for any
# dates, pandas will generate a column full of NaNs for that sid.
# This means that some of the columns in `dense_output` are now
# float instead of the intended dtype, so we have to coerce back to
# our expected type and convert NaNs into the desired missing value.
# 2) DataFrame.ffill assumes that receiving None as a fill-value means
# that no value was passed. Consequently, there's no way to tell
# pandas to replace NaNs in an object column with None using fillna,
# so we have to roll our own instead using df.where.
for column in columns:
column_name = name_map[column.name]
# Special logic for strings since `fillna` doesn't work if the
# missing value is `None`.
if column.dtype == categorical_dtype:
df[column_name] = df[
column.name
].where(pd.notnull(df[column_name]),
column.missing_value)
else:
# We need to execute `fillna` before `astype` in case the
# column contains NaNs and needs to be cast to bool or int.
# This is so that the NaNs are replaced first, since pandas
# can't convert NaNs for those types.
df[column_name] = df[
column_name
].fillna(column.missing_value).astype(column.dtype)
|
[
"def",
"ffill_across_cols",
"(",
"df",
",",
"columns",
",",
"name_map",
")",
":",
"df",
".",
"ffill",
"(",
"inplace",
"=",
"True",
")",
"# Fill in missing values specified by each column. This is made",
"# significantly more complex by the fact that we need to work around",
"# two pandas issues:",
"# 1) When we have sids, if there are no records for a given sid for any",
"# dates, pandas will generate a column full of NaNs for that sid.",
"# This means that some of the columns in `dense_output` are now",
"# float instead of the intended dtype, so we have to coerce back to",
"# our expected type and convert NaNs into the desired missing value.",
"# 2) DataFrame.ffill assumes that receiving None as a fill-value means",
"# that no value was passed. Consequently, there's no way to tell",
"# pandas to replace NaNs in an object column with None using fillna,",
"# so we have to roll our own instead using df.where.",
"for",
"column",
"in",
"columns",
":",
"column_name",
"=",
"name_map",
"[",
"column",
".",
"name",
"]",
"# Special logic for strings since `fillna` doesn't work if the",
"# missing value is `None`.",
"if",
"column",
".",
"dtype",
"==",
"categorical_dtype",
":",
"df",
"[",
"column_name",
"]",
"=",
"df",
"[",
"column",
".",
"name",
"]",
".",
"where",
"(",
"pd",
".",
"notnull",
"(",
"df",
"[",
"column_name",
"]",
")",
",",
"column",
".",
"missing_value",
")",
"else",
":",
"# We need to execute `fillna` before `astype` in case the",
"# column contains NaNs and needs to be cast to bool or int.",
"# This is so that the NaNs are replaced first, since pandas",
"# can't convert NaNs for those types.",
"df",
"[",
"column_name",
"]",
"=",
"df",
"[",
"column_name",
"]",
".",
"fillna",
"(",
"column",
".",
"missing_value",
")",
".",
"astype",
"(",
"column",
".",
"dtype",
")"
] |
Forward fill values in a DataFrame with special logic to handle cases
that pd.DataFrame.ffill cannot and cast columns to appropriate types.
Parameters
----------
df : pd.DataFrame
The DataFrame to do forward-filling on.
columns : list of BoundColumn
The BoundColumns that correspond to columns in the DataFrame to which
special filling and/or casting logic should be applied.
name_map: map of string -> string
Mapping from the name of each BoundColumn to the associated column
name in `df`.
|
[
"Forward",
"fill",
"values",
"in",
"a",
"DataFrame",
"with",
"special",
"logic",
"to",
"handle",
"cases",
"that",
"pd",
".",
"DataFrame",
".",
"ffill",
"cannot",
"and",
"cast",
"columns",
"to",
"appropriate",
"types",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/loaders/utils.py#L216-L264
|
train
|
quantopian/zipline
|
zipline/pipeline/loaders/utils.py
|
shift_dates
|
def shift_dates(dates, start_date, end_date, shift):
"""
Shift dates of a pipeline query back by `shift` days.
load_adjusted_array is called with dates on which the user's algo
will be shown data, which means we need to return the data that would
be known at the start of each date. This is often labeled with a
previous date in the underlying data (e.g. at the start of today, we
have the data as of yesterday). In this case, we can shift the query
dates back to query the appropriate values.
Parameters
----------
dates : DatetimeIndex
All known dates.
start_date : pd.Timestamp
Start date of the pipeline query.
end_date : pd.Timestamp
End date of the pipeline query.
shift : int
The number of days to shift back the query dates.
"""
try:
start = dates.get_loc(start_date)
except KeyError:
if start_date < dates[0]:
raise NoFurtherDataError(
msg=(
"Pipeline Query requested data starting on {query_start}, "
"but first known date is {calendar_start}"
).format(
query_start=str(start_date),
calendar_start=str(dates[0]),
)
)
else:
raise ValueError("Query start %s not in calendar" % start_date)
# Make sure that shifting doesn't push us out of the calendar.
if start < shift:
raise NoFurtherDataError(
msg=(
"Pipeline Query requested data from {shift}"
" days before {query_start}, but first known date is only "
"{start} days earlier."
).format(shift=shift, query_start=start_date, start=start),
)
try:
end = dates.get_loc(end_date)
except KeyError:
if end_date > dates[-1]:
raise NoFurtherDataError(
msg=(
"Pipeline Query requesting data up to {query_end}, "
"but last known date is {calendar_end}"
).format(
query_end=end_date,
calendar_end=dates[-1],
)
)
else:
raise ValueError("Query end %s not in calendar" % end_date)
return dates[start - shift], dates[end - shift]
|
python
|
def shift_dates(dates, start_date, end_date, shift):
"""
Shift dates of a pipeline query back by `shift` days.
load_adjusted_array is called with dates on which the user's algo
will be shown data, which means we need to return the data that would
be known at the start of each date. This is often labeled with a
previous date in the underlying data (e.g. at the start of today, we
have the data as of yesterday). In this case, we can shift the query
dates back to query the appropriate values.
Parameters
----------
dates : DatetimeIndex
All known dates.
start_date : pd.Timestamp
Start date of the pipeline query.
end_date : pd.Timestamp
End date of the pipeline query.
shift : int
The number of days to shift back the query dates.
"""
try:
start = dates.get_loc(start_date)
except KeyError:
if start_date < dates[0]:
raise NoFurtherDataError(
msg=(
"Pipeline Query requested data starting on {query_start}, "
"but first known date is {calendar_start}"
).format(
query_start=str(start_date),
calendar_start=str(dates[0]),
)
)
else:
raise ValueError("Query start %s not in calendar" % start_date)
# Make sure that shifting doesn't push us out of the calendar.
if start < shift:
raise NoFurtherDataError(
msg=(
"Pipeline Query requested data from {shift}"
" days before {query_start}, but first known date is only "
"{start} days earlier."
).format(shift=shift, query_start=start_date, start=start),
)
try:
end = dates.get_loc(end_date)
except KeyError:
if end_date > dates[-1]:
raise NoFurtherDataError(
msg=(
"Pipeline Query requesting data up to {query_end}, "
"but last known date is {calendar_end}"
).format(
query_end=end_date,
calendar_end=dates[-1],
)
)
else:
raise ValueError("Query end %s not in calendar" % end_date)
return dates[start - shift], dates[end - shift]
|
[
"def",
"shift_dates",
"(",
"dates",
",",
"start_date",
",",
"end_date",
",",
"shift",
")",
":",
"try",
":",
"start",
"=",
"dates",
".",
"get_loc",
"(",
"start_date",
")",
"except",
"KeyError",
":",
"if",
"start_date",
"<",
"dates",
"[",
"0",
"]",
":",
"raise",
"NoFurtherDataError",
"(",
"msg",
"=",
"(",
"\"Pipeline Query requested data starting on {query_start}, \"",
"\"but first known date is {calendar_start}\"",
")",
".",
"format",
"(",
"query_start",
"=",
"str",
"(",
"start_date",
")",
",",
"calendar_start",
"=",
"str",
"(",
"dates",
"[",
"0",
"]",
")",
",",
")",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Query start %s not in calendar\"",
"%",
"start_date",
")",
"# Make sure that shifting doesn't push us out of the calendar.",
"if",
"start",
"<",
"shift",
":",
"raise",
"NoFurtherDataError",
"(",
"msg",
"=",
"(",
"\"Pipeline Query requested data from {shift}\"",
"\" days before {query_start}, but first known date is only \"",
"\"{start} days earlier.\"",
")",
".",
"format",
"(",
"shift",
"=",
"shift",
",",
"query_start",
"=",
"start_date",
",",
"start",
"=",
"start",
")",
",",
")",
"try",
":",
"end",
"=",
"dates",
".",
"get_loc",
"(",
"end_date",
")",
"except",
"KeyError",
":",
"if",
"end_date",
">",
"dates",
"[",
"-",
"1",
"]",
":",
"raise",
"NoFurtherDataError",
"(",
"msg",
"=",
"(",
"\"Pipeline Query requesting data up to {query_end}, \"",
"\"but last known date is {calendar_end}\"",
")",
".",
"format",
"(",
"query_end",
"=",
"end_date",
",",
"calendar_end",
"=",
"dates",
"[",
"-",
"1",
"]",
",",
")",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Query end %s not in calendar\"",
"%",
"end_date",
")",
"return",
"dates",
"[",
"start",
"-",
"shift",
"]",
",",
"dates",
"[",
"end",
"-",
"shift",
"]"
] |
Shift dates of a pipeline query back by `shift` days.
load_adjusted_array is called with dates on which the user's algo
will be shown data, which means we need to return the data that would
be known at the start of each date. This is often labeled with a
previous date in the underlying data (e.g. at the start of today, we
have the data as of yesterday). In this case, we can shift the query
dates back to query the appropriate values.
Parameters
----------
dates : DatetimeIndex
All known dates.
start_date : pd.Timestamp
Start date of the pipeline query.
end_date : pd.Timestamp
End date of the pipeline query.
shift : int
The number of days to shift back the query dates.
|
[
"Shift",
"dates",
"of",
"a",
"pipeline",
"query",
"back",
"by",
"shift",
"days",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/loaders/utils.py#L267-L330
|
train
|
quantopian/zipline
|
zipline/utils/sharedoc.py
|
format_docstring
|
def format_docstring(owner_name, docstring, formatters):
"""
Template ``formatters`` into ``docstring``.
Parameters
----------
owner_name : str
The name of the function or class whose docstring is being templated.
Only used for error messages.
docstring : str
The docstring to template.
formatters : dict[str -> str]
Parameters for a a str.format() call on ``docstring``.
Multi-line values in ``formatters`` will have leading whitespace padded
to match the leading whitespace of the substitution string.
"""
# Build a dict of parameters to a vanilla format() call by searching for
# each entry in **formatters and applying any leading whitespace to each
# line in the desired substitution.
format_params = {}
for target, doc_for_target in iteritems(formatters):
# Search for '{name}', with optional leading whitespace.
regex = re.compile(r'^(\s*)' + '({' + target + '})$', re.MULTILINE)
matches = regex.findall(docstring)
if not matches:
raise ValueError(
"Couldn't find template for parameter {!r} in docstring "
"for {}."
"\nParameter name must be alone on a line surrounded by "
"braces.".format(target, owner_name),
)
elif len(matches) > 1:
raise ValueError(
"Couldn't found multiple templates for parameter {!r}"
"in docstring for {}."
"\nParameter should only appear once.".format(
target, owner_name
)
)
(leading_whitespace, _) = matches[0]
format_params[target] = pad_lines_after_first(
leading_whitespace,
doc_for_target,
)
return docstring.format(**format_params)
|
python
|
def format_docstring(owner_name, docstring, formatters):
"""
Template ``formatters`` into ``docstring``.
Parameters
----------
owner_name : str
The name of the function or class whose docstring is being templated.
Only used for error messages.
docstring : str
The docstring to template.
formatters : dict[str -> str]
Parameters for a a str.format() call on ``docstring``.
Multi-line values in ``formatters`` will have leading whitespace padded
to match the leading whitespace of the substitution string.
"""
# Build a dict of parameters to a vanilla format() call by searching for
# each entry in **formatters and applying any leading whitespace to each
# line in the desired substitution.
format_params = {}
for target, doc_for_target in iteritems(formatters):
# Search for '{name}', with optional leading whitespace.
regex = re.compile(r'^(\s*)' + '({' + target + '})$', re.MULTILINE)
matches = regex.findall(docstring)
if not matches:
raise ValueError(
"Couldn't find template for parameter {!r} in docstring "
"for {}."
"\nParameter name must be alone on a line surrounded by "
"braces.".format(target, owner_name),
)
elif len(matches) > 1:
raise ValueError(
"Couldn't found multiple templates for parameter {!r}"
"in docstring for {}."
"\nParameter should only appear once.".format(
target, owner_name
)
)
(leading_whitespace, _) = matches[0]
format_params[target] = pad_lines_after_first(
leading_whitespace,
doc_for_target,
)
return docstring.format(**format_params)
|
[
"def",
"format_docstring",
"(",
"owner_name",
",",
"docstring",
",",
"formatters",
")",
":",
"# Build a dict of parameters to a vanilla format() call by searching for",
"# each entry in **formatters and applying any leading whitespace to each",
"# line in the desired substitution.",
"format_params",
"=",
"{",
"}",
"for",
"target",
",",
"doc_for_target",
"in",
"iteritems",
"(",
"formatters",
")",
":",
"# Search for '{name}', with optional leading whitespace.",
"regex",
"=",
"re",
".",
"compile",
"(",
"r'^(\\s*)'",
"+",
"'({'",
"+",
"target",
"+",
"'})$'",
",",
"re",
".",
"MULTILINE",
")",
"matches",
"=",
"regex",
".",
"findall",
"(",
"docstring",
")",
"if",
"not",
"matches",
":",
"raise",
"ValueError",
"(",
"\"Couldn't find template for parameter {!r} in docstring \"",
"\"for {}.\"",
"\"\\nParameter name must be alone on a line surrounded by \"",
"\"braces.\"",
".",
"format",
"(",
"target",
",",
"owner_name",
")",
",",
")",
"elif",
"len",
"(",
"matches",
")",
">",
"1",
":",
"raise",
"ValueError",
"(",
"\"Couldn't found multiple templates for parameter {!r}\"",
"\"in docstring for {}.\"",
"\"\\nParameter should only appear once.\"",
".",
"format",
"(",
"target",
",",
"owner_name",
")",
")",
"(",
"leading_whitespace",
",",
"_",
")",
"=",
"matches",
"[",
"0",
"]",
"format_params",
"[",
"target",
"]",
"=",
"pad_lines_after_first",
"(",
"leading_whitespace",
",",
"doc_for_target",
",",
")",
"return",
"docstring",
".",
"format",
"(",
"*",
"*",
"format_params",
")"
] |
Template ``formatters`` into ``docstring``.
Parameters
----------
owner_name : str
The name of the function or class whose docstring is being templated.
Only used for error messages.
docstring : str
The docstring to template.
formatters : dict[str -> str]
Parameters for a a str.format() call on ``docstring``.
Multi-line values in ``formatters`` will have leading whitespace padded
to match the leading whitespace of the substitution string.
|
[
"Template",
"formatters",
"into",
"docstring",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/sharedoc.py#L35-L82
|
train
|
quantopian/zipline
|
zipline/utils/sharedoc.py
|
templated_docstring
|
def templated_docstring(**docs):
"""
Decorator allowing the use of templated docstrings.
Examples
--------
>>> @templated_docstring(foo='bar')
... def my_func(self, foo):
... '''{foo}'''
...
>>> my_func.__doc__
'bar'
"""
def decorator(f):
f.__doc__ = format_docstring(f.__name__, f.__doc__, docs)
return f
return decorator
|
python
|
def templated_docstring(**docs):
"""
Decorator allowing the use of templated docstrings.
Examples
--------
>>> @templated_docstring(foo='bar')
... def my_func(self, foo):
... '''{foo}'''
...
>>> my_func.__doc__
'bar'
"""
def decorator(f):
f.__doc__ = format_docstring(f.__name__, f.__doc__, docs)
return f
return decorator
|
[
"def",
"templated_docstring",
"(",
"*",
"*",
"docs",
")",
":",
"def",
"decorator",
"(",
"f",
")",
":",
"f",
".",
"__doc__",
"=",
"format_docstring",
"(",
"f",
".",
"__name__",
",",
"f",
".",
"__doc__",
",",
"docs",
")",
"return",
"f",
"return",
"decorator"
] |
Decorator allowing the use of templated docstrings.
Examples
--------
>>> @templated_docstring(foo='bar')
... def my_func(self, foo):
... '''{foo}'''
...
>>> my_func.__doc__
'bar'
|
[
"Decorator",
"allowing",
"the",
"use",
"of",
"templated",
"docstrings",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/sharedoc.py#L85-L101
|
train
|
quantopian/zipline
|
zipline/pipeline/pipeline.py
|
Pipeline.add
|
def add(self, term, name, overwrite=False):
"""
Add a column.
The results of computing `term` will show up as a column in the
DataFrame produced by running this pipeline.
Parameters
----------
column : zipline.pipeline.Term
A Filter, Factor, or Classifier to add to the pipeline.
name : str
Name of the column to add.
overwrite : bool
Whether to overwrite the existing entry if we already have a column
named `name`.
"""
self.validate_column(name, term)
columns = self.columns
if name in columns:
if overwrite:
self.remove(name)
else:
raise KeyError("Column '{}' already exists.".format(name))
if not isinstance(term, ComputableTerm):
raise TypeError(
"{term} is not a valid pipeline column. Did you mean to "
"append '.latest'?".format(term=term)
)
self._columns[name] = term
|
python
|
def add(self, term, name, overwrite=False):
"""
Add a column.
The results of computing `term` will show up as a column in the
DataFrame produced by running this pipeline.
Parameters
----------
column : zipline.pipeline.Term
A Filter, Factor, or Classifier to add to the pipeline.
name : str
Name of the column to add.
overwrite : bool
Whether to overwrite the existing entry if we already have a column
named `name`.
"""
self.validate_column(name, term)
columns = self.columns
if name in columns:
if overwrite:
self.remove(name)
else:
raise KeyError("Column '{}' already exists.".format(name))
if not isinstance(term, ComputableTerm):
raise TypeError(
"{term} is not a valid pipeline column. Did you mean to "
"append '.latest'?".format(term=term)
)
self._columns[name] = term
|
[
"def",
"add",
"(",
"self",
",",
"term",
",",
"name",
",",
"overwrite",
"=",
"False",
")",
":",
"self",
".",
"validate_column",
"(",
"name",
",",
"term",
")",
"columns",
"=",
"self",
".",
"columns",
"if",
"name",
"in",
"columns",
":",
"if",
"overwrite",
":",
"self",
".",
"remove",
"(",
"name",
")",
"else",
":",
"raise",
"KeyError",
"(",
"\"Column '{}' already exists.\"",
".",
"format",
"(",
"name",
")",
")",
"if",
"not",
"isinstance",
"(",
"term",
",",
"ComputableTerm",
")",
":",
"raise",
"TypeError",
"(",
"\"{term} is not a valid pipeline column. Did you mean to \"",
"\"append '.latest'?\"",
".",
"format",
"(",
"term",
"=",
"term",
")",
")",
"self",
".",
"_columns",
"[",
"name",
"]",
"=",
"term"
] |
Add a column.
The results of computing `term` will show up as a column in the
DataFrame produced by running this pipeline.
Parameters
----------
column : zipline.pipeline.Term
A Filter, Factor, or Classifier to add to the pipeline.
name : str
Name of the column to add.
overwrite : bool
Whether to overwrite the existing entry if we already have a column
named `name`.
|
[
"Add",
"a",
"column",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/pipeline.py#L80-L112
|
train
|
quantopian/zipline
|
zipline/pipeline/pipeline.py
|
Pipeline.set_screen
|
def set_screen(self, screen, overwrite=False):
"""
Set a screen on this Pipeline.
Parameters
----------
filter : zipline.pipeline.Filter
The filter to apply as a screen.
overwrite : bool
Whether to overwrite any existing screen. If overwrite is False
and self.screen is not None, we raise an error.
"""
if self._screen is not None and not overwrite:
raise ValueError(
"set_screen() called with overwrite=False and screen already "
"set.\n"
"If you want to apply multiple filters as a screen use "
"set_screen(filter1 & filter2 & ...).\n"
"If you want to replace the previous screen with a new one, "
"use set_screen(new_filter, overwrite=True)."
)
self._screen = screen
|
python
|
def set_screen(self, screen, overwrite=False):
"""
Set a screen on this Pipeline.
Parameters
----------
filter : zipline.pipeline.Filter
The filter to apply as a screen.
overwrite : bool
Whether to overwrite any existing screen. If overwrite is False
and self.screen is not None, we raise an error.
"""
if self._screen is not None and not overwrite:
raise ValueError(
"set_screen() called with overwrite=False and screen already "
"set.\n"
"If you want to apply multiple filters as a screen use "
"set_screen(filter1 & filter2 & ...).\n"
"If you want to replace the previous screen with a new one, "
"use set_screen(new_filter, overwrite=True)."
)
self._screen = screen
|
[
"def",
"set_screen",
"(",
"self",
",",
"screen",
",",
"overwrite",
"=",
"False",
")",
":",
"if",
"self",
".",
"_screen",
"is",
"not",
"None",
"and",
"not",
"overwrite",
":",
"raise",
"ValueError",
"(",
"\"set_screen() called with overwrite=False and screen already \"",
"\"set.\\n\"",
"\"If you want to apply multiple filters as a screen use \"",
"\"set_screen(filter1 & filter2 & ...).\\n\"",
"\"If you want to replace the previous screen with a new one, \"",
"\"use set_screen(new_filter, overwrite=True).\"",
")",
"self",
".",
"_screen",
"=",
"screen"
] |
Set a screen on this Pipeline.
Parameters
----------
filter : zipline.pipeline.Filter
The filter to apply as a screen.
overwrite : bool
Whether to overwrite any existing screen. If overwrite is False
and self.screen is not None, we raise an error.
|
[
"Set",
"a",
"screen",
"on",
"this",
"Pipeline",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/pipeline.py#L137-L158
|
train
|
quantopian/zipline
|
zipline/pipeline/pipeline.py
|
Pipeline.to_execution_plan
|
def to_execution_plan(self,
domain,
default_screen,
start_date,
end_date):
"""
Compile into an ExecutionPlan.
Parameters
----------
domain : zipline.pipeline.domain.Domain
Domain on which the pipeline will be executed.
default_screen : zipline.pipeline.term.Term
Term to use as a screen if self.screen is None.
all_dates : pd.DatetimeIndex
A calendar of dates to use to calculate starts and ends for each
term.
start_date : pd.Timestamp
The first date of requested output.
end_date : pd.Timestamp
The last date of requested output.
Returns
-------
graph : zipline.pipeline.graph.ExecutionPlan
Graph encoding term dependencies, including metadata about extra
row requirements.
"""
if self._domain is not GENERIC and self._domain is not domain:
raise AssertionError(
"Attempted to compile Pipeline with domain {} to execution "
"plan with different domain {}.".format(self._domain, domain)
)
return ExecutionPlan(
domain=domain,
terms=self._prepare_graph_terms(default_screen),
start_date=start_date,
end_date=end_date,
)
|
python
|
def to_execution_plan(self,
domain,
default_screen,
start_date,
end_date):
"""
Compile into an ExecutionPlan.
Parameters
----------
domain : zipline.pipeline.domain.Domain
Domain on which the pipeline will be executed.
default_screen : zipline.pipeline.term.Term
Term to use as a screen if self.screen is None.
all_dates : pd.DatetimeIndex
A calendar of dates to use to calculate starts and ends for each
term.
start_date : pd.Timestamp
The first date of requested output.
end_date : pd.Timestamp
The last date of requested output.
Returns
-------
graph : zipline.pipeline.graph.ExecutionPlan
Graph encoding term dependencies, including metadata about extra
row requirements.
"""
if self._domain is not GENERIC and self._domain is not domain:
raise AssertionError(
"Attempted to compile Pipeline with domain {} to execution "
"plan with different domain {}.".format(self._domain, domain)
)
return ExecutionPlan(
domain=domain,
terms=self._prepare_graph_terms(default_screen),
start_date=start_date,
end_date=end_date,
)
|
[
"def",
"to_execution_plan",
"(",
"self",
",",
"domain",
",",
"default_screen",
",",
"start_date",
",",
"end_date",
")",
":",
"if",
"self",
".",
"_domain",
"is",
"not",
"GENERIC",
"and",
"self",
".",
"_domain",
"is",
"not",
"domain",
":",
"raise",
"AssertionError",
"(",
"\"Attempted to compile Pipeline with domain {} to execution \"",
"\"plan with different domain {}.\"",
".",
"format",
"(",
"self",
".",
"_domain",
",",
"domain",
")",
")",
"return",
"ExecutionPlan",
"(",
"domain",
"=",
"domain",
",",
"terms",
"=",
"self",
".",
"_prepare_graph_terms",
"(",
"default_screen",
")",
",",
"start_date",
"=",
"start_date",
",",
"end_date",
"=",
"end_date",
",",
")"
] |
Compile into an ExecutionPlan.
Parameters
----------
domain : zipline.pipeline.domain.Domain
Domain on which the pipeline will be executed.
default_screen : zipline.pipeline.term.Term
Term to use as a screen if self.screen is None.
all_dates : pd.DatetimeIndex
A calendar of dates to use to calculate starts and ends for each
term.
start_date : pd.Timestamp
The first date of requested output.
end_date : pd.Timestamp
The last date of requested output.
Returns
-------
graph : zipline.pipeline.graph.ExecutionPlan
Graph encoding term dependencies, including metadata about extra
row requirements.
|
[
"Compile",
"into",
"an",
"ExecutionPlan",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/pipeline.py#L160-L199
|
train
|
quantopian/zipline
|
zipline/pipeline/pipeline.py
|
Pipeline._prepare_graph_terms
|
def _prepare_graph_terms(self, default_screen):
"""Helper for to_graph and to_execution_plan."""
columns = self.columns.copy()
screen = self.screen
if screen is None:
screen = default_screen
columns[SCREEN_NAME] = screen
return columns
|
python
|
def _prepare_graph_terms(self, default_screen):
"""Helper for to_graph and to_execution_plan."""
columns = self.columns.copy()
screen = self.screen
if screen is None:
screen = default_screen
columns[SCREEN_NAME] = screen
return columns
|
[
"def",
"_prepare_graph_terms",
"(",
"self",
",",
"default_screen",
")",
":",
"columns",
"=",
"self",
".",
"columns",
".",
"copy",
"(",
")",
"screen",
"=",
"self",
".",
"screen",
"if",
"screen",
"is",
"None",
":",
"screen",
"=",
"default_screen",
"columns",
"[",
"SCREEN_NAME",
"]",
"=",
"screen",
"return",
"columns"
] |
Helper for to_graph and to_execution_plan.
|
[
"Helper",
"for",
"to_graph",
"and",
"to_execution_plan",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/pipeline.py#L217-L224
|
train
|
quantopian/zipline
|
zipline/pipeline/pipeline.py
|
Pipeline.show_graph
|
def show_graph(self, format='svg'):
"""
Render this Pipeline as a DAG.
Parameters
----------
format : {'svg', 'png', 'jpeg'}
Image format to render with. Default is 'svg'.
"""
g = self.to_simple_graph(AssetExists())
if format == 'svg':
return g.svg
elif format == 'png':
return g.png
elif format == 'jpeg':
return g.jpeg
else:
# We should never get here because of the expect_element decorator
# above.
raise AssertionError("Unknown graph format %r." % format)
|
python
|
def show_graph(self, format='svg'):
"""
Render this Pipeline as a DAG.
Parameters
----------
format : {'svg', 'png', 'jpeg'}
Image format to render with. Default is 'svg'.
"""
g = self.to_simple_graph(AssetExists())
if format == 'svg':
return g.svg
elif format == 'png':
return g.png
elif format == 'jpeg':
return g.jpeg
else:
# We should never get here because of the expect_element decorator
# above.
raise AssertionError("Unknown graph format %r." % format)
|
[
"def",
"show_graph",
"(",
"self",
",",
"format",
"=",
"'svg'",
")",
":",
"g",
"=",
"self",
".",
"to_simple_graph",
"(",
"AssetExists",
"(",
")",
")",
"if",
"format",
"==",
"'svg'",
":",
"return",
"g",
".",
"svg",
"elif",
"format",
"==",
"'png'",
":",
"return",
"g",
".",
"png",
"elif",
"format",
"==",
"'jpeg'",
":",
"return",
"g",
".",
"jpeg",
"else",
":",
"# We should never get here because of the expect_element decorator",
"# above.",
"raise",
"AssertionError",
"(",
"\"Unknown graph format %r.\"",
"%",
"format",
")"
] |
Render this Pipeline as a DAG.
Parameters
----------
format : {'svg', 'png', 'jpeg'}
Image format to render with. Default is 'svg'.
|
[
"Render",
"this",
"Pipeline",
"as",
"a",
"DAG",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/pipeline.py#L227-L246
|
train
|
quantopian/zipline
|
zipline/pipeline/pipeline.py
|
Pipeline._output_terms
|
def _output_terms(self):
"""
A list of terms that are outputs of this pipeline.
Includes all terms registered as data outputs of the pipeline, plus the
screen, if present.
"""
terms = list(six.itervalues(self._columns))
screen = self.screen
if screen is not None:
terms.append(screen)
return terms
|
python
|
def _output_terms(self):
"""
A list of terms that are outputs of this pipeline.
Includes all terms registered as data outputs of the pipeline, plus the
screen, if present.
"""
terms = list(six.itervalues(self._columns))
screen = self.screen
if screen is not None:
terms.append(screen)
return terms
|
[
"def",
"_output_terms",
"(",
"self",
")",
":",
"terms",
"=",
"list",
"(",
"six",
".",
"itervalues",
"(",
"self",
".",
"_columns",
")",
")",
"screen",
"=",
"self",
".",
"screen",
"if",
"screen",
"is",
"not",
"None",
":",
"terms",
".",
"append",
"(",
"screen",
")",
"return",
"terms"
] |
A list of terms that are outputs of this pipeline.
Includes all terms registered as data outputs of the pipeline, plus the
screen, if present.
|
[
"A",
"list",
"of",
"terms",
"that",
"are",
"outputs",
"of",
"this",
"pipeline",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/pipeline.py#L255-L266
|
train
|
quantopian/zipline
|
zipline/pipeline/pipeline.py
|
Pipeline.domain
|
def domain(self, default):
"""
Get the domain for this pipeline.
- If an explicit domain was provided at construction time, use it.
- Otherwise, infer a domain from the registered columns.
- If no domain can be inferred, return ``default``.
Parameters
----------
default : zipline.pipeline.Domain
Domain to use if no domain can be inferred from this pipeline by
itself.
Returns
-------
domain : zipline.pipeline.Domain
The domain for the pipeline.
Raises
------
AmbiguousDomain
ValueError
If the terms in ``self`` conflict with self._domain.
"""
# Always compute our inferred domain to ensure that it's compatible
# with our explicit domain.
inferred = infer_domain(self._output_terms)
if inferred is GENERIC and self._domain is GENERIC:
# Both generic. Fall back to default.
return default
elif inferred is GENERIC and self._domain is not GENERIC:
# Use the non-generic domain.
return self._domain
elif inferred is not GENERIC and self._domain is GENERIC:
# Use the non-generic domain.
return inferred
else:
# Both non-generic. They have to match.
if inferred is not self._domain:
raise ValueError(
"Conflicting domains in Pipeline. Inferred {}, but {} was "
"passed at construction.".format(inferred, self._domain)
)
return inferred
|
python
|
def domain(self, default):
"""
Get the domain for this pipeline.
- If an explicit domain was provided at construction time, use it.
- Otherwise, infer a domain from the registered columns.
- If no domain can be inferred, return ``default``.
Parameters
----------
default : zipline.pipeline.Domain
Domain to use if no domain can be inferred from this pipeline by
itself.
Returns
-------
domain : zipline.pipeline.Domain
The domain for the pipeline.
Raises
------
AmbiguousDomain
ValueError
If the terms in ``self`` conflict with self._domain.
"""
# Always compute our inferred domain to ensure that it's compatible
# with our explicit domain.
inferred = infer_domain(self._output_terms)
if inferred is GENERIC and self._domain is GENERIC:
# Both generic. Fall back to default.
return default
elif inferred is GENERIC and self._domain is not GENERIC:
# Use the non-generic domain.
return self._domain
elif inferred is not GENERIC and self._domain is GENERIC:
# Use the non-generic domain.
return inferred
else:
# Both non-generic. They have to match.
if inferred is not self._domain:
raise ValueError(
"Conflicting domains in Pipeline. Inferred {}, but {} was "
"passed at construction.".format(inferred, self._domain)
)
return inferred
|
[
"def",
"domain",
"(",
"self",
",",
"default",
")",
":",
"# Always compute our inferred domain to ensure that it's compatible",
"# with our explicit domain.",
"inferred",
"=",
"infer_domain",
"(",
"self",
".",
"_output_terms",
")",
"if",
"inferred",
"is",
"GENERIC",
"and",
"self",
".",
"_domain",
"is",
"GENERIC",
":",
"# Both generic. Fall back to default.",
"return",
"default",
"elif",
"inferred",
"is",
"GENERIC",
"and",
"self",
".",
"_domain",
"is",
"not",
"GENERIC",
":",
"# Use the non-generic domain.",
"return",
"self",
".",
"_domain",
"elif",
"inferred",
"is",
"not",
"GENERIC",
"and",
"self",
".",
"_domain",
"is",
"GENERIC",
":",
"# Use the non-generic domain.",
"return",
"inferred",
"else",
":",
"# Both non-generic. They have to match.",
"if",
"inferred",
"is",
"not",
"self",
".",
"_domain",
":",
"raise",
"ValueError",
"(",
"\"Conflicting domains in Pipeline. Inferred {}, but {} was \"",
"\"passed at construction.\"",
".",
"format",
"(",
"inferred",
",",
"self",
".",
"_domain",
")",
")",
"return",
"inferred"
] |
Get the domain for this pipeline.
- If an explicit domain was provided at construction time, use it.
- Otherwise, infer a domain from the registered columns.
- If no domain can be inferred, return ``default``.
Parameters
----------
default : zipline.pipeline.Domain
Domain to use if no domain can be inferred from this pipeline by
itself.
Returns
-------
domain : zipline.pipeline.Domain
The domain for the pipeline.
Raises
------
AmbiguousDomain
ValueError
If the terms in ``self`` conflict with self._domain.
|
[
"Get",
"the",
"domain",
"for",
"this",
"pipeline",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/pipeline.py#L269-L314
|
train
|
quantopian/zipline
|
zipline/pipeline/expression.py
|
_ensure_element
|
def _ensure_element(tup, elem):
"""
Create a tuple containing all elements of tup, plus elem.
Returns the new tuple and the index of elem in the new tuple.
"""
try:
return tup, tup.index(elem)
except ValueError:
return tuple(chain(tup, (elem,))), len(tup)
|
python
|
def _ensure_element(tup, elem):
"""
Create a tuple containing all elements of tup, plus elem.
Returns the new tuple and the index of elem in the new tuple.
"""
try:
return tup, tup.index(elem)
except ValueError:
return tuple(chain(tup, (elem,))), len(tup)
|
[
"def",
"_ensure_element",
"(",
"tup",
",",
"elem",
")",
":",
"try",
":",
"return",
"tup",
",",
"tup",
".",
"index",
"(",
"elem",
")",
"except",
"ValueError",
":",
"return",
"tuple",
"(",
"chain",
"(",
"tup",
",",
"(",
"elem",
",",
")",
")",
")",
",",
"len",
"(",
"tup",
")"
] |
Create a tuple containing all elements of tup, plus elem.
Returns the new tuple and the index of elem in the new tuple.
|
[
"Create",
"a",
"tuple",
"containing",
"all",
"elements",
"of",
"tup",
"plus",
"elem",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/expression.py#L92-L101
|
train
|
quantopian/zipline
|
zipline/pipeline/expression.py
|
NumericalExpression._validate
|
def _validate(self):
"""
Ensure that our expression string has variables of the form x_0, x_1,
... x_(N - 1), where N is the length of our inputs.
"""
variable_names, _unused = getExprNames(self._expr, {})
expr_indices = []
for name in variable_names:
if name == 'inf':
continue
match = _VARIABLE_NAME_RE.match(name)
if not match:
raise ValueError("%r is not a valid variable name" % name)
expr_indices.append(int(match.group(2)))
expr_indices.sort()
expected_indices = list(range(len(self.inputs)))
if expr_indices != expected_indices:
raise ValueError(
"Expected %s for variable indices, but got %s" % (
expected_indices, expr_indices,
)
)
super(NumericalExpression, self)._validate()
|
python
|
def _validate(self):
"""
Ensure that our expression string has variables of the form x_0, x_1,
... x_(N - 1), where N is the length of our inputs.
"""
variable_names, _unused = getExprNames(self._expr, {})
expr_indices = []
for name in variable_names:
if name == 'inf':
continue
match = _VARIABLE_NAME_RE.match(name)
if not match:
raise ValueError("%r is not a valid variable name" % name)
expr_indices.append(int(match.group(2)))
expr_indices.sort()
expected_indices = list(range(len(self.inputs)))
if expr_indices != expected_indices:
raise ValueError(
"Expected %s for variable indices, but got %s" % (
expected_indices, expr_indices,
)
)
super(NumericalExpression, self)._validate()
|
[
"def",
"_validate",
"(",
"self",
")",
":",
"variable_names",
",",
"_unused",
"=",
"getExprNames",
"(",
"self",
".",
"_expr",
",",
"{",
"}",
")",
"expr_indices",
"=",
"[",
"]",
"for",
"name",
"in",
"variable_names",
":",
"if",
"name",
"==",
"'inf'",
":",
"continue",
"match",
"=",
"_VARIABLE_NAME_RE",
".",
"match",
"(",
"name",
")",
"if",
"not",
"match",
":",
"raise",
"ValueError",
"(",
"\"%r is not a valid variable name\"",
"%",
"name",
")",
"expr_indices",
".",
"append",
"(",
"int",
"(",
"match",
".",
"group",
"(",
"2",
")",
")",
")",
"expr_indices",
".",
"sort",
"(",
")",
"expected_indices",
"=",
"list",
"(",
"range",
"(",
"len",
"(",
"self",
".",
"inputs",
")",
")",
")",
"if",
"expr_indices",
"!=",
"expected_indices",
":",
"raise",
"ValueError",
"(",
"\"Expected %s for variable indices, but got %s\"",
"%",
"(",
"expected_indices",
",",
"expr_indices",
",",
")",
")",
"super",
"(",
"NumericalExpression",
",",
"self",
")",
".",
"_validate",
"(",
")"
] |
Ensure that our expression string has variables of the form x_0, x_1,
... x_(N - 1), where N is the length of our inputs.
|
[
"Ensure",
"that",
"our",
"expression",
"string",
"has",
"variables",
"of",
"the",
"form",
"x_0",
"x_1",
"...",
"x_",
"(",
"N",
"-",
"1",
")",
"where",
"N",
"is",
"the",
"length",
"of",
"our",
"inputs",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/expression.py#L213-L236
|
train
|
quantopian/zipline
|
zipline/pipeline/expression.py
|
NumericalExpression._compute
|
def _compute(self, arrays, dates, assets, mask):
"""
Compute our stored expression string with numexpr.
"""
out = full(mask.shape, self.missing_value, dtype=self.dtype)
# This writes directly into our output buffer.
numexpr.evaluate(
self._expr,
local_dict={
"x_%d" % idx: array
for idx, array in enumerate(arrays)
},
global_dict={'inf': inf},
out=out,
)
return out
|
python
|
def _compute(self, arrays, dates, assets, mask):
"""
Compute our stored expression string with numexpr.
"""
out = full(mask.shape, self.missing_value, dtype=self.dtype)
# This writes directly into our output buffer.
numexpr.evaluate(
self._expr,
local_dict={
"x_%d" % idx: array
for idx, array in enumerate(arrays)
},
global_dict={'inf': inf},
out=out,
)
return out
|
[
"def",
"_compute",
"(",
"self",
",",
"arrays",
",",
"dates",
",",
"assets",
",",
"mask",
")",
":",
"out",
"=",
"full",
"(",
"mask",
".",
"shape",
",",
"self",
".",
"missing_value",
",",
"dtype",
"=",
"self",
".",
"dtype",
")",
"# This writes directly into our output buffer.",
"numexpr",
".",
"evaluate",
"(",
"self",
".",
"_expr",
",",
"local_dict",
"=",
"{",
"\"x_%d\"",
"%",
"idx",
":",
"array",
"for",
"idx",
",",
"array",
"in",
"enumerate",
"(",
"arrays",
")",
"}",
",",
"global_dict",
"=",
"{",
"'inf'",
":",
"inf",
"}",
",",
"out",
"=",
"out",
",",
")",
"return",
"out"
] |
Compute our stored expression string with numexpr.
|
[
"Compute",
"our",
"stored",
"expression",
"string",
"with",
"numexpr",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/expression.py#L238-L253
|
train
|
quantopian/zipline
|
zipline/pipeline/expression.py
|
NumericalExpression._rebind_variables
|
def _rebind_variables(self, new_inputs):
"""
Return self._expr with all variables rebound to the indices implied by
new_inputs.
"""
expr = self._expr
# If we have 11+ variables, some of our variable names may be
# substrings of other variable names. For example, we might have x_1,
# x_10, and x_100. By enumerating in reverse order, we ensure that
# every variable name which is a substring of another variable name is
# processed after the variable of which it is a substring. This
# guarantees that the substitution of any given variable index only
# ever affects exactly its own index. For example, if we have variables
# with indices going up to 100, we will process all of the x_1xx names
# before x_1x, which will be before x_1, so the substitution of x_1
# will not affect x_1x, which will not affect x_1xx.
for idx, input_ in reversed(list(enumerate(self.inputs))):
old_varname = "x_%d" % idx
# Temporarily rebind to x_temp_N so that we don't overwrite the
# same value multiple times.
temp_new_varname = "x_temp_%d" % new_inputs.index(input_)
expr = expr.replace(old_varname, temp_new_varname)
# Clear out the temp variables now that we've finished iteration.
return expr.replace("_temp_", "_")
|
python
|
def _rebind_variables(self, new_inputs):
"""
Return self._expr with all variables rebound to the indices implied by
new_inputs.
"""
expr = self._expr
# If we have 11+ variables, some of our variable names may be
# substrings of other variable names. For example, we might have x_1,
# x_10, and x_100. By enumerating in reverse order, we ensure that
# every variable name which is a substring of another variable name is
# processed after the variable of which it is a substring. This
# guarantees that the substitution of any given variable index only
# ever affects exactly its own index. For example, if we have variables
# with indices going up to 100, we will process all of the x_1xx names
# before x_1x, which will be before x_1, so the substitution of x_1
# will not affect x_1x, which will not affect x_1xx.
for idx, input_ in reversed(list(enumerate(self.inputs))):
old_varname = "x_%d" % idx
# Temporarily rebind to x_temp_N so that we don't overwrite the
# same value multiple times.
temp_new_varname = "x_temp_%d" % new_inputs.index(input_)
expr = expr.replace(old_varname, temp_new_varname)
# Clear out the temp variables now that we've finished iteration.
return expr.replace("_temp_", "_")
|
[
"def",
"_rebind_variables",
"(",
"self",
",",
"new_inputs",
")",
":",
"expr",
"=",
"self",
".",
"_expr",
"# If we have 11+ variables, some of our variable names may be",
"# substrings of other variable names. For example, we might have x_1,",
"# x_10, and x_100. By enumerating in reverse order, we ensure that",
"# every variable name which is a substring of another variable name is",
"# processed after the variable of which it is a substring. This",
"# guarantees that the substitution of any given variable index only",
"# ever affects exactly its own index. For example, if we have variables",
"# with indices going up to 100, we will process all of the x_1xx names",
"# before x_1x, which will be before x_1, so the substitution of x_1",
"# will not affect x_1x, which will not affect x_1xx.",
"for",
"idx",
",",
"input_",
"in",
"reversed",
"(",
"list",
"(",
"enumerate",
"(",
"self",
".",
"inputs",
")",
")",
")",
":",
"old_varname",
"=",
"\"x_%d\"",
"%",
"idx",
"# Temporarily rebind to x_temp_N so that we don't overwrite the",
"# same value multiple times.",
"temp_new_varname",
"=",
"\"x_temp_%d\"",
"%",
"new_inputs",
".",
"index",
"(",
"input_",
")",
"expr",
"=",
"expr",
".",
"replace",
"(",
"old_varname",
",",
"temp_new_varname",
")",
"# Clear out the temp variables now that we've finished iteration.",
"return",
"expr",
".",
"replace",
"(",
"\"_temp_\"",
",",
"\"_\"",
")"
] |
Return self._expr with all variables rebound to the indices implied by
new_inputs.
|
[
"Return",
"self",
".",
"_expr",
"with",
"all",
"variables",
"rebound",
"to",
"the",
"indices",
"implied",
"by",
"new_inputs",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/expression.py#L255-L279
|
train
|
quantopian/zipline
|
zipline/pipeline/expression.py
|
NumericalExpression._merge_expressions
|
def _merge_expressions(self, other):
"""
Merge the inputs of two NumericalExpressions into a single input tuple,
rewriting their respective string expressions to make input names
resolve correctly.
Returns a tuple of (new_self_expr, new_other_expr, new_inputs)
"""
new_inputs = tuple(set(self.inputs).union(other.inputs))
new_self_expr = self._rebind_variables(new_inputs)
new_other_expr = other._rebind_variables(new_inputs)
return new_self_expr, new_other_expr, new_inputs
|
python
|
def _merge_expressions(self, other):
"""
Merge the inputs of two NumericalExpressions into a single input tuple,
rewriting their respective string expressions to make input names
resolve correctly.
Returns a tuple of (new_self_expr, new_other_expr, new_inputs)
"""
new_inputs = tuple(set(self.inputs).union(other.inputs))
new_self_expr = self._rebind_variables(new_inputs)
new_other_expr = other._rebind_variables(new_inputs)
return new_self_expr, new_other_expr, new_inputs
|
[
"def",
"_merge_expressions",
"(",
"self",
",",
"other",
")",
":",
"new_inputs",
"=",
"tuple",
"(",
"set",
"(",
"self",
".",
"inputs",
")",
".",
"union",
"(",
"other",
".",
"inputs",
")",
")",
"new_self_expr",
"=",
"self",
".",
"_rebind_variables",
"(",
"new_inputs",
")",
"new_other_expr",
"=",
"other",
".",
"_rebind_variables",
"(",
"new_inputs",
")",
"return",
"new_self_expr",
",",
"new_other_expr",
",",
"new_inputs"
] |
Merge the inputs of two NumericalExpressions into a single input tuple,
rewriting their respective string expressions to make input names
resolve correctly.
Returns a tuple of (new_self_expr, new_other_expr, new_inputs)
|
[
"Merge",
"the",
"inputs",
"of",
"two",
"NumericalExpressions",
"into",
"a",
"single",
"input",
"tuple",
"rewriting",
"their",
"respective",
"string",
"expressions",
"to",
"make",
"input",
"names",
"resolve",
"correctly",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/expression.py#L281-L292
|
train
|
quantopian/zipline
|
zipline/pipeline/expression.py
|
NumericalExpression.build_binary_op
|
def build_binary_op(self, op, other):
"""
Compute new expression strings and a new inputs tuple for combining
self and other with a binary operator.
"""
if isinstance(other, NumericalExpression):
self_expr, other_expr, new_inputs = self._merge_expressions(other)
elif isinstance(other, Term):
self_expr = self._expr
new_inputs, other_idx = _ensure_element(self.inputs, other)
other_expr = "x_%d" % other_idx
elif isinstance(other, Number):
self_expr = self._expr
other_expr = str(other)
new_inputs = self.inputs
else:
raise BadBinaryOperator(op, other)
return self_expr, other_expr, new_inputs
|
python
|
def build_binary_op(self, op, other):
"""
Compute new expression strings and a new inputs tuple for combining
self and other with a binary operator.
"""
if isinstance(other, NumericalExpression):
self_expr, other_expr, new_inputs = self._merge_expressions(other)
elif isinstance(other, Term):
self_expr = self._expr
new_inputs, other_idx = _ensure_element(self.inputs, other)
other_expr = "x_%d" % other_idx
elif isinstance(other, Number):
self_expr = self._expr
other_expr = str(other)
new_inputs = self.inputs
else:
raise BadBinaryOperator(op, other)
return self_expr, other_expr, new_inputs
|
[
"def",
"build_binary_op",
"(",
"self",
",",
"op",
",",
"other",
")",
":",
"if",
"isinstance",
"(",
"other",
",",
"NumericalExpression",
")",
":",
"self_expr",
",",
"other_expr",
",",
"new_inputs",
"=",
"self",
".",
"_merge_expressions",
"(",
"other",
")",
"elif",
"isinstance",
"(",
"other",
",",
"Term",
")",
":",
"self_expr",
"=",
"self",
".",
"_expr",
"new_inputs",
",",
"other_idx",
"=",
"_ensure_element",
"(",
"self",
".",
"inputs",
",",
"other",
")",
"other_expr",
"=",
"\"x_%d\"",
"%",
"other_idx",
"elif",
"isinstance",
"(",
"other",
",",
"Number",
")",
":",
"self_expr",
"=",
"self",
".",
"_expr",
"other_expr",
"=",
"str",
"(",
"other",
")",
"new_inputs",
"=",
"self",
".",
"inputs",
"else",
":",
"raise",
"BadBinaryOperator",
"(",
"op",
",",
"other",
")",
"return",
"self_expr",
",",
"other_expr",
",",
"new_inputs"
] |
Compute new expression strings and a new inputs tuple for combining
self and other with a binary operator.
|
[
"Compute",
"new",
"expression",
"strings",
"and",
"a",
"new",
"inputs",
"tuple",
"for",
"combining",
"self",
"and",
"other",
"with",
"a",
"binary",
"operator",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/expression.py#L294-L311
|
train
|
quantopian/zipline
|
zipline/pipeline/expression.py
|
NumericalExpression.graph_repr
|
def graph_repr(self):
"""Short repr to use when rendering Pipeline graphs."""
# Replace any floating point numbers in the expression
# with their scientific notation
final = re.sub(r"[-+]?\d*\.\d+",
lambda x: format(float(x.group(0)), '.2E'),
self._expr)
# Graphviz interprets `\l` as "divide label into lines, left-justified"
return "Expression:\\l {}\\l".format(
final,
)
|
python
|
def graph_repr(self):
"""Short repr to use when rendering Pipeline graphs."""
# Replace any floating point numbers in the expression
# with their scientific notation
final = re.sub(r"[-+]?\d*\.\d+",
lambda x: format(float(x.group(0)), '.2E'),
self._expr)
# Graphviz interprets `\l` as "divide label into lines, left-justified"
return "Expression:\\l {}\\l".format(
final,
)
|
[
"def",
"graph_repr",
"(",
"self",
")",
":",
"# Replace any floating point numbers in the expression",
"# with their scientific notation",
"final",
"=",
"re",
".",
"sub",
"(",
"r\"[-+]?\\d*\\.\\d+\"",
",",
"lambda",
"x",
":",
"format",
"(",
"float",
"(",
"x",
".",
"group",
"(",
"0",
")",
")",
",",
"'.2E'",
")",
",",
"self",
".",
"_expr",
")",
"# Graphviz interprets `\\l` as \"divide label into lines, left-justified\"",
"return",
"\"Expression:\\\\l {}\\\\l\"",
".",
"format",
"(",
"final",
",",
")"
] |
Short repr to use when rendering Pipeline graphs.
|
[
"Short",
"repr",
"to",
"use",
"when",
"rendering",
"Pipeline",
"graphs",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/expression.py#L327-L338
|
train
|
quantopian/zipline
|
zipline/utils/paths.py
|
last_modified_time
|
def last_modified_time(path):
"""
Get the last modified time of path as a Timestamp.
"""
return pd.Timestamp(os.path.getmtime(path), unit='s', tz='UTC')
|
python
|
def last_modified_time(path):
"""
Get the last modified time of path as a Timestamp.
"""
return pd.Timestamp(os.path.getmtime(path), unit='s', tz='UTC')
|
[
"def",
"last_modified_time",
"(",
"path",
")",
":",
"return",
"pd",
".",
"Timestamp",
"(",
"os",
".",
"path",
".",
"getmtime",
"(",
"path",
")",
",",
"unit",
"=",
"'s'",
",",
"tz",
"=",
"'UTC'",
")"
] |
Get the last modified time of path as a Timestamp.
|
[
"Get",
"the",
"last",
"modified",
"time",
"of",
"path",
"as",
"a",
"Timestamp",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/paths.py#L78-L82
|
train
|
quantopian/zipline
|
zipline/utils/paths.py
|
zipline_root
|
def zipline_root(environ=None):
"""
Get the root directory for all zipline-managed files.
For testing purposes, this accepts a dictionary to interpret as the os
environment.
Parameters
----------
environ : dict, optional
A dict to interpret as the os environment.
Returns
-------
root : string
Path to the zipline root dir.
"""
if environ is None:
environ = os.environ
root = environ.get('ZIPLINE_ROOT', None)
if root is None:
root = expanduser('~/.zipline')
return root
|
python
|
def zipline_root(environ=None):
"""
Get the root directory for all zipline-managed files.
For testing purposes, this accepts a dictionary to interpret as the os
environment.
Parameters
----------
environ : dict, optional
A dict to interpret as the os environment.
Returns
-------
root : string
Path to the zipline root dir.
"""
if environ is None:
environ = os.environ
root = environ.get('ZIPLINE_ROOT', None)
if root is None:
root = expanduser('~/.zipline')
return root
|
[
"def",
"zipline_root",
"(",
"environ",
"=",
"None",
")",
":",
"if",
"environ",
"is",
"None",
":",
"environ",
"=",
"os",
".",
"environ",
"root",
"=",
"environ",
".",
"get",
"(",
"'ZIPLINE_ROOT'",
",",
"None",
")",
"if",
"root",
"is",
"None",
":",
"root",
"=",
"expanduser",
"(",
"'~/.zipline'",
")",
"return",
"root"
] |
Get the root directory for all zipline-managed files.
For testing purposes, this accepts a dictionary to interpret as the os
environment.
Parameters
----------
environ : dict, optional
A dict to interpret as the os environment.
Returns
-------
root : string
Path to the zipline root dir.
|
[
"Get",
"the",
"root",
"directory",
"for",
"all",
"zipline",
"-",
"managed",
"files",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/paths.py#L107-L131
|
train
|
quantopian/zipline
|
zipline/pipeline/loaders/frame.py
|
DataFrameLoader.format_adjustments
|
def format_adjustments(self, dates, assets):
"""
Build a dict of Adjustment objects in the format expected by
AdjustedArray.
Returns a dict of the form:
{
# Integer index into `dates` for the date on which we should
# apply the list of adjustments.
1 : [
Float64Multiply(first_row=2, last_row=4, col=3, value=0.5),
Float64Overwrite(first_row=3, last_row=5, col=1, value=2.0),
...
],
...
}
"""
make_adjustment = partial(make_adjustment_from_labels, dates, assets)
min_date, max_date = dates[[0, -1]]
# TODO: Consider porting this to Cython.
if len(self.adjustments) == 0:
return {}
# Mask for adjustments whose apply_dates are in the requested window of
# dates.
date_bounds = self.adjustment_apply_dates.slice_indexer(
min_date,
max_date,
)
dates_filter = zeros(len(self.adjustments), dtype='bool')
dates_filter[date_bounds] = True
# Ignore adjustments whose apply_date is in range, but whose end_date
# is out of range.
dates_filter &= (self.adjustment_end_dates >= min_date)
# Mask for adjustments whose sids are in the requested assets.
sids_filter = self.adjustment_sids.isin(assets.values)
adjustments_to_use = self.adjustments.loc[
dates_filter & sids_filter
].set_index('apply_date')
# For each apply_date on which we have an adjustment, compute
# the integer index of that adjustment's apply_date in `dates`.
# Then build a list of Adjustment objects for that apply_date.
# This logic relies on the sorting applied on the previous line.
out = {}
previous_apply_date = object()
for row in adjustments_to_use.itertuples():
# This expansion depends on the ordering of the DataFrame columns,
# defined above.
apply_date, sid, value, kind, start_date, end_date = row
if apply_date != previous_apply_date:
# Get the next apply date if no exact match.
row_loc = dates.get_loc(apply_date, method='bfill')
current_date_adjustments = out[row_loc] = []
previous_apply_date = apply_date
# Look up the approprate Adjustment constructor based on the value
# of `kind`.
current_date_adjustments.append(
make_adjustment(start_date, end_date, sid, kind, value)
)
return out
|
python
|
def format_adjustments(self, dates, assets):
"""
Build a dict of Adjustment objects in the format expected by
AdjustedArray.
Returns a dict of the form:
{
# Integer index into `dates` for the date on which we should
# apply the list of adjustments.
1 : [
Float64Multiply(first_row=2, last_row=4, col=3, value=0.5),
Float64Overwrite(first_row=3, last_row=5, col=1, value=2.0),
...
],
...
}
"""
make_adjustment = partial(make_adjustment_from_labels, dates, assets)
min_date, max_date = dates[[0, -1]]
# TODO: Consider porting this to Cython.
if len(self.adjustments) == 0:
return {}
# Mask for adjustments whose apply_dates are in the requested window of
# dates.
date_bounds = self.adjustment_apply_dates.slice_indexer(
min_date,
max_date,
)
dates_filter = zeros(len(self.adjustments), dtype='bool')
dates_filter[date_bounds] = True
# Ignore adjustments whose apply_date is in range, but whose end_date
# is out of range.
dates_filter &= (self.adjustment_end_dates >= min_date)
# Mask for adjustments whose sids are in the requested assets.
sids_filter = self.adjustment_sids.isin(assets.values)
adjustments_to_use = self.adjustments.loc[
dates_filter & sids_filter
].set_index('apply_date')
# For each apply_date on which we have an adjustment, compute
# the integer index of that adjustment's apply_date in `dates`.
# Then build a list of Adjustment objects for that apply_date.
# This logic relies on the sorting applied on the previous line.
out = {}
previous_apply_date = object()
for row in adjustments_to_use.itertuples():
# This expansion depends on the ordering of the DataFrame columns,
# defined above.
apply_date, sid, value, kind, start_date, end_date = row
if apply_date != previous_apply_date:
# Get the next apply date if no exact match.
row_loc = dates.get_loc(apply_date, method='bfill')
current_date_adjustments = out[row_loc] = []
previous_apply_date = apply_date
# Look up the approprate Adjustment constructor based on the value
# of `kind`.
current_date_adjustments.append(
make_adjustment(start_date, end_date, sid, kind, value)
)
return out
|
[
"def",
"format_adjustments",
"(",
"self",
",",
"dates",
",",
"assets",
")",
":",
"make_adjustment",
"=",
"partial",
"(",
"make_adjustment_from_labels",
",",
"dates",
",",
"assets",
")",
"min_date",
",",
"max_date",
"=",
"dates",
"[",
"[",
"0",
",",
"-",
"1",
"]",
"]",
"# TODO: Consider porting this to Cython.",
"if",
"len",
"(",
"self",
".",
"adjustments",
")",
"==",
"0",
":",
"return",
"{",
"}",
"# Mask for adjustments whose apply_dates are in the requested window of",
"# dates.",
"date_bounds",
"=",
"self",
".",
"adjustment_apply_dates",
".",
"slice_indexer",
"(",
"min_date",
",",
"max_date",
",",
")",
"dates_filter",
"=",
"zeros",
"(",
"len",
"(",
"self",
".",
"adjustments",
")",
",",
"dtype",
"=",
"'bool'",
")",
"dates_filter",
"[",
"date_bounds",
"]",
"=",
"True",
"# Ignore adjustments whose apply_date is in range, but whose end_date",
"# is out of range.",
"dates_filter",
"&=",
"(",
"self",
".",
"adjustment_end_dates",
">=",
"min_date",
")",
"# Mask for adjustments whose sids are in the requested assets.",
"sids_filter",
"=",
"self",
".",
"adjustment_sids",
".",
"isin",
"(",
"assets",
".",
"values",
")",
"adjustments_to_use",
"=",
"self",
".",
"adjustments",
".",
"loc",
"[",
"dates_filter",
"&",
"sids_filter",
"]",
".",
"set_index",
"(",
"'apply_date'",
")",
"# For each apply_date on which we have an adjustment, compute",
"# the integer index of that adjustment's apply_date in `dates`.",
"# Then build a list of Adjustment objects for that apply_date.",
"# This logic relies on the sorting applied on the previous line.",
"out",
"=",
"{",
"}",
"previous_apply_date",
"=",
"object",
"(",
")",
"for",
"row",
"in",
"adjustments_to_use",
".",
"itertuples",
"(",
")",
":",
"# This expansion depends on the ordering of the DataFrame columns,",
"# defined above.",
"apply_date",
",",
"sid",
",",
"value",
",",
"kind",
",",
"start_date",
",",
"end_date",
"=",
"row",
"if",
"apply_date",
"!=",
"previous_apply_date",
":",
"# Get the next apply date if no exact match.",
"row_loc",
"=",
"dates",
".",
"get_loc",
"(",
"apply_date",
",",
"method",
"=",
"'bfill'",
")",
"current_date_adjustments",
"=",
"out",
"[",
"row_loc",
"]",
"=",
"[",
"]",
"previous_apply_date",
"=",
"apply_date",
"# Look up the approprate Adjustment constructor based on the value",
"# of `kind`.",
"current_date_adjustments",
".",
"append",
"(",
"make_adjustment",
"(",
"start_date",
",",
"end_date",
",",
"sid",
",",
"kind",
",",
"value",
")",
")",
"return",
"out"
] |
Build a dict of Adjustment objects in the format expected by
AdjustedArray.
Returns a dict of the form:
{
# Integer index into `dates` for the date on which we should
# apply the list of adjustments.
1 : [
Float64Multiply(first_row=2, last_row=4, col=3, value=0.5),
Float64Overwrite(first_row=3, last_row=5, col=1, value=2.0),
...
],
...
}
|
[
"Build",
"a",
"dict",
"of",
"Adjustment",
"objects",
"in",
"the",
"format",
"expected",
"by",
"AdjustedArray",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/loaders/frame.py#L83-L147
|
train
|
quantopian/zipline
|
zipline/pipeline/loaders/frame.py
|
DataFrameLoader.load_adjusted_array
|
def load_adjusted_array(self, domain, columns, dates, sids, mask):
"""
Load data from our stored baseline.
"""
if len(columns) != 1:
raise ValueError(
"Can't load multiple columns with DataFrameLoader"
)
column = columns[0]
self._validate_input_column(column)
date_indexer = self.dates.get_indexer(dates)
assets_indexer = self.assets.get_indexer(sids)
# Boolean arrays with True on matched entries
good_dates = (date_indexer != -1)
good_assets = (assets_indexer != -1)
data = self.baseline[ix_(date_indexer, assets_indexer)]
mask = (good_assets & as_column(good_dates)) & mask
# Mask out requested columns/rows that didn't match.
data[~mask] = column.missing_value
return {
column: AdjustedArray(
# Pull out requested columns/rows from our baseline data.
data=data,
adjustments=self.format_adjustments(dates, sids),
missing_value=column.missing_value,
),
}
|
python
|
def load_adjusted_array(self, domain, columns, dates, sids, mask):
"""
Load data from our stored baseline.
"""
if len(columns) != 1:
raise ValueError(
"Can't load multiple columns with DataFrameLoader"
)
column = columns[0]
self._validate_input_column(column)
date_indexer = self.dates.get_indexer(dates)
assets_indexer = self.assets.get_indexer(sids)
# Boolean arrays with True on matched entries
good_dates = (date_indexer != -1)
good_assets = (assets_indexer != -1)
data = self.baseline[ix_(date_indexer, assets_indexer)]
mask = (good_assets & as_column(good_dates)) & mask
# Mask out requested columns/rows that didn't match.
data[~mask] = column.missing_value
return {
column: AdjustedArray(
# Pull out requested columns/rows from our baseline data.
data=data,
adjustments=self.format_adjustments(dates, sids),
missing_value=column.missing_value,
),
}
|
[
"def",
"load_adjusted_array",
"(",
"self",
",",
"domain",
",",
"columns",
",",
"dates",
",",
"sids",
",",
"mask",
")",
":",
"if",
"len",
"(",
"columns",
")",
"!=",
"1",
":",
"raise",
"ValueError",
"(",
"\"Can't load multiple columns with DataFrameLoader\"",
")",
"column",
"=",
"columns",
"[",
"0",
"]",
"self",
".",
"_validate_input_column",
"(",
"column",
")",
"date_indexer",
"=",
"self",
".",
"dates",
".",
"get_indexer",
"(",
"dates",
")",
"assets_indexer",
"=",
"self",
".",
"assets",
".",
"get_indexer",
"(",
"sids",
")",
"# Boolean arrays with True on matched entries",
"good_dates",
"=",
"(",
"date_indexer",
"!=",
"-",
"1",
")",
"good_assets",
"=",
"(",
"assets_indexer",
"!=",
"-",
"1",
")",
"data",
"=",
"self",
".",
"baseline",
"[",
"ix_",
"(",
"date_indexer",
",",
"assets_indexer",
")",
"]",
"mask",
"=",
"(",
"good_assets",
"&",
"as_column",
"(",
"good_dates",
")",
")",
"&",
"mask",
"# Mask out requested columns/rows that didn't match.",
"data",
"[",
"~",
"mask",
"]",
"=",
"column",
".",
"missing_value",
"return",
"{",
"column",
":",
"AdjustedArray",
"(",
"# Pull out requested columns/rows from our baseline data.",
"data",
"=",
"data",
",",
"adjustments",
"=",
"self",
".",
"format_adjustments",
"(",
"dates",
",",
"sids",
")",
",",
"missing_value",
"=",
"column",
".",
"missing_value",
",",
")",
",",
"}"
] |
Load data from our stored baseline.
|
[
"Load",
"data",
"from",
"our",
"stored",
"baseline",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/loaders/frame.py#L149-L181
|
train
|
quantopian/zipline
|
zipline/pipeline/loaders/frame.py
|
DataFrameLoader._validate_input_column
|
def _validate_input_column(self, column):
"""Make sure a passed column is our column.
"""
if column != self.column and column.unspecialize() != self.column:
raise ValueError("Can't load unknown column %s" % column)
|
python
|
def _validate_input_column(self, column):
"""Make sure a passed column is our column.
"""
if column != self.column and column.unspecialize() != self.column:
raise ValueError("Can't load unknown column %s" % column)
|
[
"def",
"_validate_input_column",
"(",
"self",
",",
"column",
")",
":",
"if",
"column",
"!=",
"self",
".",
"column",
"and",
"column",
".",
"unspecialize",
"(",
")",
"!=",
"self",
".",
"column",
":",
"raise",
"ValueError",
"(",
"\"Can't load unknown column %s\"",
"%",
"column",
")"
] |
Make sure a passed column is our column.
|
[
"Make",
"sure",
"a",
"passed",
"column",
"is",
"our",
"column",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/loaders/frame.py#L183-L187
|
train
|
quantopian/zipline
|
zipline/utils/security_list.py
|
load_from_directory
|
def load_from_directory(list_name):
"""
To resolve the symbol in the LEVERAGED_ETF list,
the date on which the symbol was in effect is needed.
Furthermore, to maintain a point in time record of our own maintenance
of the restricted list, we need a knowledge date. Thus, restricted lists
are dictionaries of datetime->symbol lists.
new symbols should be entered as a new knowledge date entry.
This method assumes a directory structure of:
SECURITY_LISTS_DIR/listname/knowledge_date/lookup_date/add.txt
SECURITY_LISTS_DIR/listname/knowledge_date/lookup_date/delete.txt
The return value is a dictionary with:
knowledge_date -> lookup_date ->
{add: [symbol list], 'delete': [symbol list]}
"""
data = {}
dir_path = os.path.join(SECURITY_LISTS_DIR, list_name)
for kd_name in listdir(dir_path):
kd = datetime.strptime(kd_name, DATE_FORMAT).replace(
tzinfo=pytz.utc)
data[kd] = {}
kd_path = os.path.join(dir_path, kd_name)
for ld_name in listdir(kd_path):
ld = datetime.strptime(ld_name, DATE_FORMAT).replace(
tzinfo=pytz.utc)
data[kd][ld] = {}
ld_path = os.path.join(kd_path, ld_name)
for fname in listdir(ld_path):
fpath = os.path.join(ld_path, fname)
with open(fpath) as f:
symbols = f.read().splitlines()
data[kd][ld][fname] = symbols
return data
|
python
|
def load_from_directory(list_name):
"""
To resolve the symbol in the LEVERAGED_ETF list,
the date on which the symbol was in effect is needed.
Furthermore, to maintain a point in time record of our own maintenance
of the restricted list, we need a knowledge date. Thus, restricted lists
are dictionaries of datetime->symbol lists.
new symbols should be entered as a new knowledge date entry.
This method assumes a directory structure of:
SECURITY_LISTS_DIR/listname/knowledge_date/lookup_date/add.txt
SECURITY_LISTS_DIR/listname/knowledge_date/lookup_date/delete.txt
The return value is a dictionary with:
knowledge_date -> lookup_date ->
{add: [symbol list], 'delete': [symbol list]}
"""
data = {}
dir_path = os.path.join(SECURITY_LISTS_DIR, list_name)
for kd_name in listdir(dir_path):
kd = datetime.strptime(kd_name, DATE_FORMAT).replace(
tzinfo=pytz.utc)
data[kd] = {}
kd_path = os.path.join(dir_path, kd_name)
for ld_name in listdir(kd_path):
ld = datetime.strptime(ld_name, DATE_FORMAT).replace(
tzinfo=pytz.utc)
data[kd][ld] = {}
ld_path = os.path.join(kd_path, ld_name)
for fname in listdir(ld_path):
fpath = os.path.join(ld_path, fname)
with open(fpath) as f:
symbols = f.read().splitlines()
data[kd][ld][fname] = symbols
return data
|
[
"def",
"load_from_directory",
"(",
"list_name",
")",
":",
"data",
"=",
"{",
"}",
"dir_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"SECURITY_LISTS_DIR",
",",
"list_name",
")",
"for",
"kd_name",
"in",
"listdir",
"(",
"dir_path",
")",
":",
"kd",
"=",
"datetime",
".",
"strptime",
"(",
"kd_name",
",",
"DATE_FORMAT",
")",
".",
"replace",
"(",
"tzinfo",
"=",
"pytz",
".",
"utc",
")",
"data",
"[",
"kd",
"]",
"=",
"{",
"}",
"kd_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dir_path",
",",
"kd_name",
")",
"for",
"ld_name",
"in",
"listdir",
"(",
"kd_path",
")",
":",
"ld",
"=",
"datetime",
".",
"strptime",
"(",
"ld_name",
",",
"DATE_FORMAT",
")",
".",
"replace",
"(",
"tzinfo",
"=",
"pytz",
".",
"utc",
")",
"data",
"[",
"kd",
"]",
"[",
"ld",
"]",
"=",
"{",
"}",
"ld_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"kd_path",
",",
"ld_name",
")",
"for",
"fname",
"in",
"listdir",
"(",
"ld_path",
")",
":",
"fpath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"ld_path",
",",
"fname",
")",
"with",
"open",
"(",
"fpath",
")",
"as",
"f",
":",
"symbols",
"=",
"f",
".",
"read",
"(",
")",
".",
"splitlines",
"(",
")",
"data",
"[",
"kd",
"]",
"[",
"ld",
"]",
"[",
"fname",
"]",
"=",
"symbols",
"return",
"data"
] |
To resolve the symbol in the LEVERAGED_ETF list,
the date on which the symbol was in effect is needed.
Furthermore, to maintain a point in time record of our own maintenance
of the restricted list, we need a knowledge date. Thus, restricted lists
are dictionaries of datetime->symbol lists.
new symbols should be entered as a new knowledge date entry.
This method assumes a directory structure of:
SECURITY_LISTS_DIR/listname/knowledge_date/lookup_date/add.txt
SECURITY_LISTS_DIR/listname/knowledge_date/lookup_date/delete.txt
The return value is a dictionary with:
knowledge_date -> lookup_date ->
{add: [symbol list], 'delete': [symbol list]}
|
[
"To",
"resolve",
"the",
"symbol",
"in",
"the",
"LEVERAGED_ETF",
"list",
"the",
"date",
"on",
"which",
"the",
"symbol",
"was",
"in",
"effect",
"is",
"needed",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/security_list.py#L123-L159
|
train
|
quantopian/zipline
|
zipline/utils/memoize.py
|
_weak_lru_cache
|
def _weak_lru_cache(maxsize=100):
"""
Users should only access the lru_cache through its public API:
cache_info, cache_clear
The internals of the lru_cache are encapsulated for thread safety and
to allow the implementation to change.
"""
def decorating_function(
user_function, tuple=tuple, sorted=sorted, len=len,
KeyError=KeyError):
hits, misses = [0], [0]
kwd_mark = (object(),) # separates positional and keyword args
lock = Lock() # needed because OrderedDict isn't threadsafe
if maxsize is None:
cache = _WeakArgsDict() # cache without ordering or size limit
@wraps(user_function)
def wrapper(*args, **kwds):
key = args
if kwds:
key += kwd_mark + tuple(sorted(kwds.items()))
try:
result = cache[key]
hits[0] += 1
return result
except KeyError:
pass
result = user_function(*args, **kwds)
cache[key] = result
misses[0] += 1
return result
else:
# ordered least recent to most recent
cache = _WeakArgsOrderedDict()
cache_popitem = cache.popitem
cache_renew = cache.move_to_end
@wraps(user_function)
def wrapper(*args, **kwds):
key = args
if kwds:
key += kwd_mark + tuple(sorted(kwds.items()))
with lock:
try:
result = cache[key]
cache_renew(key) # record recent use of this key
hits[0] += 1
return result
except KeyError:
pass
result = user_function(*args, **kwds)
with lock:
cache[key] = result # record recent use of this key
misses[0] += 1
if len(cache) > maxsize:
# purge least recently used cache entry
cache_popitem(False)
return result
def cache_info():
"""Report cache statistics"""
with lock:
return hits[0], misses[0], maxsize, len(cache)
def cache_clear():
"""Clear the cache and cache statistics"""
with lock:
cache.clear()
hits[0] = misses[0] = 0
wrapper.cache_info = cache_info
wrapper.cache_clear = cache_clear
return wrapper
return decorating_function
|
python
|
def _weak_lru_cache(maxsize=100):
"""
Users should only access the lru_cache through its public API:
cache_info, cache_clear
The internals of the lru_cache are encapsulated for thread safety and
to allow the implementation to change.
"""
def decorating_function(
user_function, tuple=tuple, sorted=sorted, len=len,
KeyError=KeyError):
hits, misses = [0], [0]
kwd_mark = (object(),) # separates positional and keyword args
lock = Lock() # needed because OrderedDict isn't threadsafe
if maxsize is None:
cache = _WeakArgsDict() # cache without ordering or size limit
@wraps(user_function)
def wrapper(*args, **kwds):
key = args
if kwds:
key += kwd_mark + tuple(sorted(kwds.items()))
try:
result = cache[key]
hits[0] += 1
return result
except KeyError:
pass
result = user_function(*args, **kwds)
cache[key] = result
misses[0] += 1
return result
else:
# ordered least recent to most recent
cache = _WeakArgsOrderedDict()
cache_popitem = cache.popitem
cache_renew = cache.move_to_end
@wraps(user_function)
def wrapper(*args, **kwds):
key = args
if kwds:
key += kwd_mark + tuple(sorted(kwds.items()))
with lock:
try:
result = cache[key]
cache_renew(key) # record recent use of this key
hits[0] += 1
return result
except KeyError:
pass
result = user_function(*args, **kwds)
with lock:
cache[key] = result # record recent use of this key
misses[0] += 1
if len(cache) > maxsize:
# purge least recently used cache entry
cache_popitem(False)
return result
def cache_info():
"""Report cache statistics"""
with lock:
return hits[0], misses[0], maxsize, len(cache)
def cache_clear():
"""Clear the cache and cache statistics"""
with lock:
cache.clear()
hits[0] = misses[0] = 0
wrapper.cache_info = cache_info
wrapper.cache_clear = cache_clear
return wrapper
return decorating_function
|
[
"def",
"_weak_lru_cache",
"(",
"maxsize",
"=",
"100",
")",
":",
"def",
"decorating_function",
"(",
"user_function",
",",
"tuple",
"=",
"tuple",
",",
"sorted",
"=",
"sorted",
",",
"len",
"=",
"len",
",",
"KeyError",
"=",
"KeyError",
")",
":",
"hits",
",",
"misses",
"=",
"[",
"0",
"]",
",",
"[",
"0",
"]",
"kwd_mark",
"=",
"(",
"object",
"(",
")",
",",
")",
"# separates positional and keyword args",
"lock",
"=",
"Lock",
"(",
")",
"# needed because OrderedDict isn't threadsafe",
"if",
"maxsize",
"is",
"None",
":",
"cache",
"=",
"_WeakArgsDict",
"(",
")",
"# cache without ordering or size limit",
"@",
"wraps",
"(",
"user_function",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwds",
")",
":",
"key",
"=",
"args",
"if",
"kwds",
":",
"key",
"+=",
"kwd_mark",
"+",
"tuple",
"(",
"sorted",
"(",
"kwds",
".",
"items",
"(",
")",
")",
")",
"try",
":",
"result",
"=",
"cache",
"[",
"key",
"]",
"hits",
"[",
"0",
"]",
"+=",
"1",
"return",
"result",
"except",
"KeyError",
":",
"pass",
"result",
"=",
"user_function",
"(",
"*",
"args",
",",
"*",
"*",
"kwds",
")",
"cache",
"[",
"key",
"]",
"=",
"result",
"misses",
"[",
"0",
"]",
"+=",
"1",
"return",
"result",
"else",
":",
"# ordered least recent to most recent",
"cache",
"=",
"_WeakArgsOrderedDict",
"(",
")",
"cache_popitem",
"=",
"cache",
".",
"popitem",
"cache_renew",
"=",
"cache",
".",
"move_to_end",
"@",
"wraps",
"(",
"user_function",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwds",
")",
":",
"key",
"=",
"args",
"if",
"kwds",
":",
"key",
"+=",
"kwd_mark",
"+",
"tuple",
"(",
"sorted",
"(",
"kwds",
".",
"items",
"(",
")",
")",
")",
"with",
"lock",
":",
"try",
":",
"result",
"=",
"cache",
"[",
"key",
"]",
"cache_renew",
"(",
"key",
")",
"# record recent use of this key",
"hits",
"[",
"0",
"]",
"+=",
"1",
"return",
"result",
"except",
"KeyError",
":",
"pass",
"result",
"=",
"user_function",
"(",
"*",
"args",
",",
"*",
"*",
"kwds",
")",
"with",
"lock",
":",
"cache",
"[",
"key",
"]",
"=",
"result",
"# record recent use of this key",
"misses",
"[",
"0",
"]",
"+=",
"1",
"if",
"len",
"(",
"cache",
")",
">",
"maxsize",
":",
"# purge least recently used cache entry",
"cache_popitem",
"(",
"False",
")",
"return",
"result",
"def",
"cache_info",
"(",
")",
":",
"\"\"\"Report cache statistics\"\"\"",
"with",
"lock",
":",
"return",
"hits",
"[",
"0",
"]",
",",
"misses",
"[",
"0",
"]",
",",
"maxsize",
",",
"len",
"(",
"cache",
")",
"def",
"cache_clear",
"(",
")",
":",
"\"\"\"Clear the cache and cache statistics\"\"\"",
"with",
"lock",
":",
"cache",
".",
"clear",
"(",
")",
"hits",
"[",
"0",
"]",
"=",
"misses",
"[",
"0",
"]",
"=",
"0",
"wrapper",
".",
"cache_info",
"=",
"cache_info",
"wrapper",
".",
"cache_clear",
"=",
"cache_clear",
"return",
"wrapper",
"return",
"decorating_function"
] |
Users should only access the lru_cache through its public API:
cache_info, cache_clear
The internals of the lru_cache are encapsulated for thread safety and
to allow the implementation to change.
|
[
"Users",
"should",
"only",
"access",
"the",
"lru_cache",
"through",
"its",
"public",
"API",
":",
"cache_info",
"cache_clear",
"The",
"internals",
"of",
"the",
"lru_cache",
"are",
"encapsulated",
"for",
"thread",
"safety",
"and",
"to",
"allow",
"the",
"implementation",
"to",
"change",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/memoize.py#L44-L120
|
train
|
quantopian/zipline
|
zipline/utils/memoize.py
|
weak_lru_cache
|
def weak_lru_cache(maxsize=100):
"""Weak least-recently-used cache decorator.
If *maxsize* is set to None, the LRU features are disabled and the cache
can grow without bound.
Arguments to the cached function must be hashable. Any that are weak-
referenceable will be stored by weak reference. Once any of the args have
been garbage collected, the entry will be removed from the cache.
View the cache statistics named tuple (hits, misses, maxsize, currsize)
with f.cache_info(). Clear the cache and statistics with f.cache_clear().
See: http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used
"""
class desc(lazyval):
def __get__(self, instance, owner):
if instance is None:
return self
try:
return self._cache[instance]
except KeyError:
inst = ref(instance)
@_weak_lru_cache(maxsize)
@wraps(self._get)
def wrapper(*args, **kwargs):
return self._get(inst(), *args, **kwargs)
self._cache[instance] = wrapper
return wrapper
@_weak_lru_cache(maxsize)
def __call__(self, *args, **kwargs):
return self._get(*args, **kwargs)
return desc
|
python
|
def weak_lru_cache(maxsize=100):
"""Weak least-recently-used cache decorator.
If *maxsize* is set to None, the LRU features are disabled and the cache
can grow without bound.
Arguments to the cached function must be hashable. Any that are weak-
referenceable will be stored by weak reference. Once any of the args have
been garbage collected, the entry will be removed from the cache.
View the cache statistics named tuple (hits, misses, maxsize, currsize)
with f.cache_info(). Clear the cache and statistics with f.cache_clear().
See: http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used
"""
class desc(lazyval):
def __get__(self, instance, owner):
if instance is None:
return self
try:
return self._cache[instance]
except KeyError:
inst = ref(instance)
@_weak_lru_cache(maxsize)
@wraps(self._get)
def wrapper(*args, **kwargs):
return self._get(inst(), *args, **kwargs)
self._cache[instance] = wrapper
return wrapper
@_weak_lru_cache(maxsize)
def __call__(self, *args, **kwargs):
return self._get(*args, **kwargs)
return desc
|
[
"def",
"weak_lru_cache",
"(",
"maxsize",
"=",
"100",
")",
":",
"class",
"desc",
"(",
"lazyval",
")",
":",
"def",
"__get__",
"(",
"self",
",",
"instance",
",",
"owner",
")",
":",
"if",
"instance",
"is",
"None",
":",
"return",
"self",
"try",
":",
"return",
"self",
".",
"_cache",
"[",
"instance",
"]",
"except",
"KeyError",
":",
"inst",
"=",
"ref",
"(",
"instance",
")",
"@",
"_weak_lru_cache",
"(",
"maxsize",
")",
"@",
"wraps",
"(",
"self",
".",
"_get",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"_get",
"(",
"inst",
"(",
")",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"self",
".",
"_cache",
"[",
"instance",
"]",
"=",
"wrapper",
"return",
"wrapper",
"@",
"_weak_lru_cache",
"(",
"maxsize",
")",
"def",
"__call__",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"_get",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"desc"
] |
Weak least-recently-used cache decorator.
If *maxsize* is set to None, the LRU features are disabled and the cache
can grow without bound.
Arguments to the cached function must be hashable. Any that are weak-
referenceable will be stored by weak reference. Once any of the args have
been garbage collected, the entry will be removed from the cache.
View the cache statistics named tuple (hits, misses, maxsize, currsize)
with f.cache_info(). Clear the cache and statistics with f.cache_clear().
See: http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used
|
[
"Weak",
"least",
"-",
"recently",
"-",
"used",
"cache",
"decorator",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/memoize.py#L211-L248
|
train
|
quantopian/zipline
|
zipline/utils/final.py
|
is_final
|
def is_final(name, mro):
"""
Checks if `name` is a `final` object in the given `mro`.
We need to check the mro because we need to directly go into the __dict__
of the classes. Because `final` objects are descriptor, we need to grab
them _BEFORE_ the `__call__` is invoked.
"""
return any(isinstance(getattr(c, '__dict__', {}).get(name), final)
for c in bases_mro(mro))
|
python
|
def is_final(name, mro):
"""
Checks if `name` is a `final` object in the given `mro`.
We need to check the mro because we need to directly go into the __dict__
of the classes. Because `final` objects are descriptor, we need to grab
them _BEFORE_ the `__call__` is invoked.
"""
return any(isinstance(getattr(c, '__dict__', {}).get(name), final)
for c in bases_mro(mro))
|
[
"def",
"is_final",
"(",
"name",
",",
"mro",
")",
":",
"return",
"any",
"(",
"isinstance",
"(",
"getattr",
"(",
"c",
",",
"'__dict__'",
",",
"{",
"}",
")",
".",
"get",
"(",
"name",
")",
",",
"final",
")",
"for",
"c",
"in",
"bases_mro",
"(",
"mro",
")",
")"
] |
Checks if `name` is a `final` object in the given `mro`.
We need to check the mro because we need to directly go into the __dict__
of the classes. Because `final` objects are descriptor, we need to grab
them _BEFORE_ the `__call__` is invoked.
|
[
"Checks",
"if",
"name",
"is",
"a",
"final",
"object",
"in",
"the",
"given",
"mro",
".",
"We",
"need",
"to",
"check",
"the",
"mro",
"because",
"we",
"need",
"to",
"directly",
"go",
"into",
"the",
"__dict__",
"of",
"the",
"classes",
".",
"Because",
"final",
"objects",
"are",
"descriptor",
"we",
"need",
"to",
"grab",
"them",
"_BEFORE_",
"the",
"__call__",
"is",
"invoked",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/final.py#L20-L28
|
train
|
quantopian/zipline
|
zipline/pipeline/data/dataset.py
|
Column.bind
|
def bind(self, name):
"""
Bind a `Column` object to its name.
"""
return _BoundColumnDescr(
dtype=self.dtype,
missing_value=self.missing_value,
name=name,
doc=self.doc,
metadata=self.metadata,
)
|
python
|
def bind(self, name):
"""
Bind a `Column` object to its name.
"""
return _BoundColumnDescr(
dtype=self.dtype,
missing_value=self.missing_value,
name=name,
doc=self.doc,
metadata=self.metadata,
)
|
[
"def",
"bind",
"(",
"self",
",",
"name",
")",
":",
"return",
"_BoundColumnDescr",
"(",
"dtype",
"=",
"self",
".",
"dtype",
",",
"missing_value",
"=",
"self",
".",
"missing_value",
",",
"name",
"=",
"name",
",",
"doc",
"=",
"self",
".",
"doc",
",",
"metadata",
"=",
"self",
".",
"metadata",
",",
")"
] |
Bind a `Column` object to its name.
|
[
"Bind",
"a",
"Column",
"object",
"to",
"its",
"name",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/data/dataset.py#L49-L59
|
train
|
quantopian/zipline
|
zipline/pipeline/data/dataset.py
|
BoundColumn.specialize
|
def specialize(self, domain):
"""Specialize ``self`` to a concrete domain.
"""
if domain == self.domain:
return self
return type(self)(
dtype=self.dtype,
missing_value=self.missing_value,
dataset=self._dataset.specialize(domain),
name=self._name,
doc=self.__doc__,
metadata=self._metadata,
)
|
python
|
def specialize(self, domain):
"""Specialize ``self`` to a concrete domain.
"""
if domain == self.domain:
return self
return type(self)(
dtype=self.dtype,
missing_value=self.missing_value,
dataset=self._dataset.specialize(domain),
name=self._name,
doc=self.__doc__,
metadata=self._metadata,
)
|
[
"def",
"specialize",
"(",
"self",
",",
"domain",
")",
":",
"if",
"domain",
"==",
"self",
".",
"domain",
":",
"return",
"self",
"return",
"type",
"(",
"self",
")",
"(",
"dtype",
"=",
"self",
".",
"dtype",
",",
"missing_value",
"=",
"self",
".",
"missing_value",
",",
"dataset",
"=",
"self",
".",
"_dataset",
".",
"specialize",
"(",
"domain",
")",
",",
"name",
"=",
"self",
".",
"_name",
",",
"doc",
"=",
"self",
".",
"__doc__",
",",
"metadata",
"=",
"self",
".",
"_metadata",
",",
")"
] |
Specialize ``self`` to a concrete domain.
|
[
"Specialize",
"self",
"to",
"a",
"concrete",
"domain",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/data/dataset.py#L177-L190
|
train
|
quantopian/zipline
|
zipline/pipeline/data/dataset.py
|
DataSet.get_column
|
def get_column(cls, name):
"""Look up a column by name.
Parameters
----------
name : str
Name of the column to look up.
Returns
-------
column : zipline.pipeline.data.BoundColumn
Column with the given name.
Raises
------
AttributeError
If no column with the given name exists.
"""
clsdict = vars(cls)
try:
maybe_column = clsdict[name]
if not isinstance(maybe_column, _BoundColumnDescr):
raise KeyError(name)
except KeyError:
raise AttributeError(
"{dset} has no column {colname!r}:\n\n"
"Possible choices are:\n"
"{choices}".format(
dset=cls.qualname,
colname=name,
choices=bulleted_list(
sorted(cls._column_names),
max_count=10,
),
)
)
# Resolve column descriptor into a BoundColumn.
return maybe_column.__get__(None, cls)
|
python
|
def get_column(cls, name):
"""Look up a column by name.
Parameters
----------
name : str
Name of the column to look up.
Returns
-------
column : zipline.pipeline.data.BoundColumn
Column with the given name.
Raises
------
AttributeError
If no column with the given name exists.
"""
clsdict = vars(cls)
try:
maybe_column = clsdict[name]
if not isinstance(maybe_column, _BoundColumnDescr):
raise KeyError(name)
except KeyError:
raise AttributeError(
"{dset} has no column {colname!r}:\n\n"
"Possible choices are:\n"
"{choices}".format(
dset=cls.qualname,
colname=name,
choices=bulleted_list(
sorted(cls._column_names),
max_count=10,
),
)
)
# Resolve column descriptor into a BoundColumn.
return maybe_column.__get__(None, cls)
|
[
"def",
"get_column",
"(",
"cls",
",",
"name",
")",
":",
"clsdict",
"=",
"vars",
"(",
"cls",
")",
"try",
":",
"maybe_column",
"=",
"clsdict",
"[",
"name",
"]",
"if",
"not",
"isinstance",
"(",
"maybe_column",
",",
"_BoundColumnDescr",
")",
":",
"raise",
"KeyError",
"(",
"name",
")",
"except",
"KeyError",
":",
"raise",
"AttributeError",
"(",
"\"{dset} has no column {colname!r}:\\n\\n\"",
"\"Possible choices are:\\n\"",
"\"{choices}\"",
".",
"format",
"(",
"dset",
"=",
"cls",
".",
"qualname",
",",
"colname",
"=",
"name",
",",
"choices",
"=",
"bulleted_list",
"(",
"sorted",
"(",
"cls",
".",
"_column_names",
")",
",",
"max_count",
"=",
"10",
",",
")",
",",
")",
")",
"# Resolve column descriptor into a BoundColumn.",
"return",
"maybe_column",
".",
"__get__",
"(",
"None",
",",
"cls",
")"
] |
Look up a column by name.
Parameters
----------
name : str
Name of the column to look up.
Returns
-------
column : zipline.pipeline.data.BoundColumn
Column with the given name.
Raises
------
AttributeError
If no column with the given name exists.
|
[
"Look",
"up",
"a",
"column",
"by",
"name",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/data/dataset.py#L502-L540
|
train
|
quantopian/zipline
|
zipline/pipeline/data/dataset.py
|
DataSetFamily._make_dataset
|
def _make_dataset(cls, coords):
"""Construct a new dataset given the coordinates.
"""
class Slice(cls._SliceType):
extra_coords = coords
Slice.__name__ = '%s.slice(%s)' % (
cls.__name__,
', '.join('%s=%r' % item for item in coords.items()),
)
return Slice
|
python
|
def _make_dataset(cls, coords):
"""Construct a new dataset given the coordinates.
"""
class Slice(cls._SliceType):
extra_coords = coords
Slice.__name__ = '%s.slice(%s)' % (
cls.__name__,
', '.join('%s=%r' % item for item in coords.items()),
)
return Slice
|
[
"def",
"_make_dataset",
"(",
"cls",
",",
"coords",
")",
":",
"class",
"Slice",
"(",
"cls",
".",
"_SliceType",
")",
":",
"extra_coords",
"=",
"coords",
"Slice",
".",
"__name__",
"=",
"'%s.slice(%s)'",
"%",
"(",
"cls",
".",
"__name__",
",",
"', '",
".",
"join",
"(",
"'%s=%r'",
"%",
"item",
"for",
"item",
"in",
"coords",
".",
"items",
"(",
")",
")",
",",
")",
"return",
"Slice"
] |
Construct a new dataset given the coordinates.
|
[
"Construct",
"a",
"new",
"dataset",
"given",
"the",
"coordinates",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/data/dataset.py#L813-L823
|
train
|
quantopian/zipline
|
zipline/pipeline/data/dataset.py
|
DataSetFamily.slice
|
def slice(cls, *args, **kwargs):
"""Take a slice of a DataSetFamily to produce a dataset
indexed by asset and date.
Parameters
----------
*args
**kwargs
The coordinates to fix along each extra dimension.
Returns
-------
dataset : DataSet
A regular pipeline dataset indexed by asset and date.
Notes
-----
The extra dimensions coords used to produce the result are available
under the ``extra_coords`` attribute.
"""
coords, hash_key = cls._canonical_key(args, kwargs)
try:
return cls._slice_cache[hash_key]
except KeyError:
pass
Slice = cls._make_dataset(coords)
cls._slice_cache[hash_key] = Slice
return Slice
|
python
|
def slice(cls, *args, **kwargs):
"""Take a slice of a DataSetFamily to produce a dataset
indexed by asset and date.
Parameters
----------
*args
**kwargs
The coordinates to fix along each extra dimension.
Returns
-------
dataset : DataSet
A regular pipeline dataset indexed by asset and date.
Notes
-----
The extra dimensions coords used to produce the result are available
under the ``extra_coords`` attribute.
"""
coords, hash_key = cls._canonical_key(args, kwargs)
try:
return cls._slice_cache[hash_key]
except KeyError:
pass
Slice = cls._make_dataset(coords)
cls._slice_cache[hash_key] = Slice
return Slice
|
[
"def",
"slice",
"(",
"cls",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"coords",
",",
"hash_key",
"=",
"cls",
".",
"_canonical_key",
"(",
"args",
",",
"kwargs",
")",
"try",
":",
"return",
"cls",
".",
"_slice_cache",
"[",
"hash_key",
"]",
"except",
"KeyError",
":",
"pass",
"Slice",
"=",
"cls",
".",
"_make_dataset",
"(",
"coords",
")",
"cls",
".",
"_slice_cache",
"[",
"hash_key",
"]",
"=",
"Slice",
"return",
"Slice"
] |
Take a slice of a DataSetFamily to produce a dataset
indexed by asset and date.
Parameters
----------
*args
**kwargs
The coordinates to fix along each extra dimension.
Returns
-------
dataset : DataSet
A regular pipeline dataset indexed by asset and date.
Notes
-----
The extra dimensions coords used to produce the result are available
under the ``extra_coords`` attribute.
|
[
"Take",
"a",
"slice",
"of",
"a",
"DataSetFamily",
"to",
"produce",
"a",
"dataset",
"indexed",
"by",
"asset",
"and",
"date",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/data/dataset.py#L826-L854
|
train
|
quantopian/zipline
|
zipline/pipeline/loaders/synthetic.py
|
expected_bar_value
|
def expected_bar_value(asset_id, date, colname):
"""
Check that the raw value for an asset/date/column triple is as
expected.
Used by tests to verify data written by a writer.
"""
from_asset = asset_id * 100000
from_colname = OHLCV.index(colname) * 1000
from_date = (date - PSEUDO_EPOCH).days
return from_asset + from_colname + from_date
|
python
|
def expected_bar_value(asset_id, date, colname):
"""
Check that the raw value for an asset/date/column triple is as
expected.
Used by tests to verify data written by a writer.
"""
from_asset = asset_id * 100000
from_colname = OHLCV.index(colname) * 1000
from_date = (date - PSEUDO_EPOCH).days
return from_asset + from_colname + from_date
|
[
"def",
"expected_bar_value",
"(",
"asset_id",
",",
"date",
",",
"colname",
")",
":",
"from_asset",
"=",
"asset_id",
"*",
"100000",
"from_colname",
"=",
"OHLCV",
".",
"index",
"(",
"colname",
")",
"*",
"1000",
"from_date",
"=",
"(",
"date",
"-",
"PSEUDO_EPOCH",
")",
".",
"days",
"return",
"from_asset",
"+",
"from_colname",
"+",
"from_date"
] |
Check that the raw value for an asset/date/column triple is as
expected.
Used by tests to verify data written by a writer.
|
[
"Check",
"that",
"the",
"raw",
"value",
"for",
"an",
"asset",
"/",
"date",
"/",
"column",
"triple",
"is",
"as",
"expected",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/loaders/synthetic.py#L319-L329
|
train
|
quantopian/zipline
|
zipline/pipeline/loaders/synthetic.py
|
expected_bar_values_2d
|
def expected_bar_values_2d(dates,
assets,
asset_info,
colname,
holes=None):
"""
Return an 2D array containing cls.expected_value(asset_id, date,
colname) for each date/asset pair in the inputs.
Missing locs are filled with 0 for volume and NaN for price columns:
- Values before/after an asset's lifetime.
- Values for asset_ids not contained in asset_info.
- Locs defined in `holes`.
"""
if colname == 'volume':
dtype = uint32
missing = 0
else:
dtype = float64
missing = float('nan')
data = full((len(dates), len(assets)), missing, dtype=dtype)
for j, asset in enumerate(assets):
# Use missing values when asset_id is not contained in asset_info.
if asset not in asset_info.index:
continue
start = asset_start(asset_info, asset)
end = asset_end(asset_info, asset)
for i, date in enumerate(dates):
# No value expected for dates outside the asset's start/end
# date.
if not (start <= date <= end):
continue
if holes is not None:
expected = expected_bar_value_with_holes(
asset,
date,
colname,
holes,
missing,
)
else:
expected = expected_bar_value(asset, date, colname)
data[i, j] = expected
return data
|
python
|
def expected_bar_values_2d(dates,
assets,
asset_info,
colname,
holes=None):
"""
Return an 2D array containing cls.expected_value(asset_id, date,
colname) for each date/asset pair in the inputs.
Missing locs are filled with 0 for volume and NaN for price columns:
- Values before/after an asset's lifetime.
- Values for asset_ids not contained in asset_info.
- Locs defined in `holes`.
"""
if colname == 'volume':
dtype = uint32
missing = 0
else:
dtype = float64
missing = float('nan')
data = full((len(dates), len(assets)), missing, dtype=dtype)
for j, asset in enumerate(assets):
# Use missing values when asset_id is not contained in asset_info.
if asset not in asset_info.index:
continue
start = asset_start(asset_info, asset)
end = asset_end(asset_info, asset)
for i, date in enumerate(dates):
# No value expected for dates outside the asset's start/end
# date.
if not (start <= date <= end):
continue
if holes is not None:
expected = expected_bar_value_with_holes(
asset,
date,
colname,
holes,
missing,
)
else:
expected = expected_bar_value(asset, date, colname)
data[i, j] = expected
return data
|
[
"def",
"expected_bar_values_2d",
"(",
"dates",
",",
"assets",
",",
"asset_info",
",",
"colname",
",",
"holes",
"=",
"None",
")",
":",
"if",
"colname",
"==",
"'volume'",
":",
"dtype",
"=",
"uint32",
"missing",
"=",
"0",
"else",
":",
"dtype",
"=",
"float64",
"missing",
"=",
"float",
"(",
"'nan'",
")",
"data",
"=",
"full",
"(",
"(",
"len",
"(",
"dates",
")",
",",
"len",
"(",
"assets",
")",
")",
",",
"missing",
",",
"dtype",
"=",
"dtype",
")",
"for",
"j",
",",
"asset",
"in",
"enumerate",
"(",
"assets",
")",
":",
"# Use missing values when asset_id is not contained in asset_info.",
"if",
"asset",
"not",
"in",
"asset_info",
".",
"index",
":",
"continue",
"start",
"=",
"asset_start",
"(",
"asset_info",
",",
"asset",
")",
"end",
"=",
"asset_end",
"(",
"asset_info",
",",
"asset",
")",
"for",
"i",
",",
"date",
"in",
"enumerate",
"(",
"dates",
")",
":",
"# No value expected for dates outside the asset's start/end",
"# date.",
"if",
"not",
"(",
"start",
"<=",
"date",
"<=",
"end",
")",
":",
"continue",
"if",
"holes",
"is",
"not",
"None",
":",
"expected",
"=",
"expected_bar_value_with_holes",
"(",
"asset",
",",
"date",
",",
"colname",
",",
"holes",
",",
"missing",
",",
")",
"else",
":",
"expected",
"=",
"expected_bar_value",
"(",
"asset",
",",
"date",
",",
"colname",
")",
"data",
"[",
"i",
",",
"j",
"]",
"=",
"expected",
"return",
"data"
] |
Return an 2D array containing cls.expected_value(asset_id, date,
colname) for each date/asset pair in the inputs.
Missing locs are filled with 0 for volume and NaN for price columns:
- Values before/after an asset's lifetime.
- Values for asset_ids not contained in asset_info.
- Locs defined in `holes`.
|
[
"Return",
"an",
"2D",
"array",
"containing",
"cls",
".",
"expected_value",
"(",
"asset_id",
"date",
"colname",
")",
"for",
"each",
"date",
"/",
"asset",
"pair",
"in",
"the",
"inputs",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/loaders/synthetic.py#L344-L392
|
train
|
quantopian/zipline
|
zipline/pipeline/loaders/synthetic.py
|
PrecomputedLoader.load_adjusted_array
|
def load_adjusted_array(self, domain, columns, dates, sids, mask):
"""
Load by delegating to sub-loaders.
"""
out = {}
for col in columns:
try:
loader = self._loaders.get(col)
if loader is None:
loader = self._loaders[col.unspecialize()]
except KeyError:
raise ValueError("Couldn't find loader for %s" % col)
out.update(
loader.load_adjusted_array(domain, [col], dates, sids, mask)
)
return out
|
python
|
def load_adjusted_array(self, domain, columns, dates, sids, mask):
"""
Load by delegating to sub-loaders.
"""
out = {}
for col in columns:
try:
loader = self._loaders.get(col)
if loader is None:
loader = self._loaders[col.unspecialize()]
except KeyError:
raise ValueError("Couldn't find loader for %s" % col)
out.update(
loader.load_adjusted_array(domain, [col], dates, sids, mask)
)
return out
|
[
"def",
"load_adjusted_array",
"(",
"self",
",",
"domain",
",",
"columns",
",",
"dates",
",",
"sids",
",",
"mask",
")",
":",
"out",
"=",
"{",
"}",
"for",
"col",
"in",
"columns",
":",
"try",
":",
"loader",
"=",
"self",
".",
"_loaders",
".",
"get",
"(",
"col",
")",
"if",
"loader",
"is",
"None",
":",
"loader",
"=",
"self",
".",
"_loaders",
"[",
"col",
".",
"unspecialize",
"(",
")",
"]",
"except",
"KeyError",
":",
"raise",
"ValueError",
"(",
"\"Couldn't find loader for %s\"",
"%",
"col",
")",
"out",
".",
"update",
"(",
"loader",
".",
"load_adjusted_array",
"(",
"domain",
",",
"[",
"col",
"]",
",",
"dates",
",",
"sids",
",",
"mask",
")",
")",
"return",
"out"
] |
Load by delegating to sub-loaders.
|
[
"Load",
"by",
"delegating",
"to",
"sub",
"-",
"loaders",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/loaders/synthetic.py#L82-L97
|
train
|
quantopian/zipline
|
zipline/pipeline/loaders/synthetic.py
|
SeededRandomLoader.values
|
def values(self, dtype, dates, sids):
"""
Make a random array of shape (len(dates), len(sids)) with ``dtype``.
"""
shape = (len(dates), len(sids))
return {
datetime64ns_dtype: self._datetime_values,
float64_dtype: self._float_values,
int64_dtype: self._int_values,
bool_dtype: self._bool_values,
object_dtype: self._object_values,
}[dtype](shape)
|
python
|
def values(self, dtype, dates, sids):
"""
Make a random array of shape (len(dates), len(sids)) with ``dtype``.
"""
shape = (len(dates), len(sids))
return {
datetime64ns_dtype: self._datetime_values,
float64_dtype: self._float_values,
int64_dtype: self._int_values,
bool_dtype: self._bool_values,
object_dtype: self._object_values,
}[dtype](shape)
|
[
"def",
"values",
"(",
"self",
",",
"dtype",
",",
"dates",
",",
"sids",
")",
":",
"shape",
"=",
"(",
"len",
"(",
"dates",
")",
",",
"len",
"(",
"sids",
")",
")",
"return",
"{",
"datetime64ns_dtype",
":",
"self",
".",
"_datetime_values",
",",
"float64_dtype",
":",
"self",
".",
"_float_values",
",",
"int64_dtype",
":",
"self",
".",
"_int_values",
",",
"bool_dtype",
":",
"self",
".",
"_bool_values",
",",
"object_dtype",
":",
"self",
".",
"_object_values",
",",
"}",
"[",
"dtype",
"]",
"(",
"shape",
")"
] |
Make a random array of shape (len(dates), len(sids)) with ``dtype``.
|
[
"Make",
"a",
"random",
"array",
"of",
"shape",
"(",
"len",
"(",
"dates",
")",
"len",
"(",
"sids",
"))",
"with",
"dtype",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/loaders/synthetic.py#L147-L158
|
train
|
quantopian/zipline
|
zipline/pipeline/loaders/synthetic.py
|
SeededRandomLoader._float_values
|
def _float_values(self, shape):
"""
Return uniformly-distributed floats between -0.0 and 100.0.
"""
return self.state.uniform(low=0.0, high=100.0, size=shape)
|
python
|
def _float_values(self, shape):
"""
Return uniformly-distributed floats between -0.0 and 100.0.
"""
return self.state.uniform(low=0.0, high=100.0, size=shape)
|
[
"def",
"_float_values",
"(",
"self",
",",
"shape",
")",
":",
"return",
"self",
".",
"state",
".",
"uniform",
"(",
"low",
"=",
"0.0",
",",
"high",
"=",
"100.0",
",",
"size",
"=",
"shape",
")"
] |
Return uniformly-distributed floats between -0.0 and 100.0.
|
[
"Return",
"uniformly",
"-",
"distributed",
"floats",
"between",
"-",
"0",
".",
"0",
"and",
"100",
".",
"0",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/loaders/synthetic.py#L170-L174
|
train
|
quantopian/zipline
|
zipline/pipeline/loaders/synthetic.py
|
SeededRandomLoader._int_values
|
def _int_values(self, shape):
"""
Return uniformly-distributed integers between 0 and 100.
"""
return (self.state.randint(low=0, high=100, size=shape)
.astype('int64'))
|
python
|
def _int_values(self, shape):
"""
Return uniformly-distributed integers between 0 and 100.
"""
return (self.state.randint(low=0, high=100, size=shape)
.astype('int64'))
|
[
"def",
"_int_values",
"(",
"self",
",",
"shape",
")",
":",
"return",
"(",
"self",
".",
"state",
".",
"randint",
"(",
"low",
"=",
"0",
",",
"high",
"=",
"100",
",",
"size",
"=",
"shape",
")",
".",
"astype",
"(",
"'int64'",
")",
")"
] |
Return uniformly-distributed integers between 0 and 100.
|
[
"Return",
"uniformly",
"-",
"distributed",
"integers",
"between",
"0",
"and",
"100",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/loaders/synthetic.py#L176-L181
|
train
|
quantopian/zipline
|
zipline/pipeline/loaders/synthetic.py
|
SeededRandomLoader._datetime_values
|
def _datetime_values(self, shape):
"""
Return uniformly-distributed dates in 2014.
"""
start = Timestamp('2014', tz='UTC').asm8
offsets = self.state.randint(
low=0,
high=364,
size=shape,
).astype('timedelta64[D]')
return start + offsets
|
python
|
def _datetime_values(self, shape):
"""
Return uniformly-distributed dates in 2014.
"""
start = Timestamp('2014', tz='UTC').asm8
offsets = self.state.randint(
low=0,
high=364,
size=shape,
).astype('timedelta64[D]')
return start + offsets
|
[
"def",
"_datetime_values",
"(",
"self",
",",
"shape",
")",
":",
"start",
"=",
"Timestamp",
"(",
"'2014'",
",",
"tz",
"=",
"'UTC'",
")",
".",
"asm8",
"offsets",
"=",
"self",
".",
"state",
".",
"randint",
"(",
"low",
"=",
"0",
",",
"high",
"=",
"364",
",",
"size",
"=",
"shape",
",",
")",
".",
"astype",
"(",
"'timedelta64[D]'",
")",
"return",
"start",
"+",
"offsets"
] |
Return uniformly-distributed dates in 2014.
|
[
"Return",
"uniformly",
"-",
"distributed",
"dates",
"in",
"2014",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/loaders/synthetic.py#L183-L193
|
train
|
quantopian/zipline
|
zipline/lib/quantiles.py
|
quantiles
|
def quantiles(data, nbins_or_partition_bounds):
"""
Compute rowwise array quantiles on an input.
"""
return apply_along_axis(
qcut,
1,
data,
q=nbins_or_partition_bounds, labels=False,
)
|
python
|
def quantiles(data, nbins_or_partition_bounds):
"""
Compute rowwise array quantiles on an input.
"""
return apply_along_axis(
qcut,
1,
data,
q=nbins_or_partition_bounds, labels=False,
)
|
[
"def",
"quantiles",
"(",
"data",
",",
"nbins_or_partition_bounds",
")",
":",
"return",
"apply_along_axis",
"(",
"qcut",
",",
"1",
",",
"data",
",",
"q",
"=",
"nbins_or_partition_bounds",
",",
"labels",
"=",
"False",
",",
")"
] |
Compute rowwise array quantiles on an input.
|
[
"Compute",
"rowwise",
"array",
"quantiles",
"on",
"an",
"input",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/lib/quantiles.py#L8-L17
|
train
|
quantopian/zipline
|
zipline/finance/metrics/tracker.py
|
MetricsTracker.handle_minute_close
|
def handle_minute_close(self, dt, data_portal):
"""
Handles the close of the given minute in minute emission.
Parameters
----------
dt : Timestamp
The minute that is ending
Returns
-------
A minute perf packet.
"""
self.sync_last_sale_prices(dt, data_portal)
packet = {
'period_start': self._first_session,
'period_end': self._last_session,
'capital_base': self._capital_base,
'minute_perf': {
'period_open': self._market_open,
'period_close': dt,
},
'cumulative_perf': {
'period_open': self._first_session,
'period_close': self._last_session,
},
'progress': self._progress(self),
'cumulative_risk_metrics': {},
}
ledger = self._ledger
ledger.end_of_bar(self._session_count)
self.end_of_bar(
packet,
ledger,
dt,
self._session_count,
data_portal,
)
return packet
|
python
|
def handle_minute_close(self, dt, data_portal):
"""
Handles the close of the given minute in minute emission.
Parameters
----------
dt : Timestamp
The minute that is ending
Returns
-------
A minute perf packet.
"""
self.sync_last_sale_prices(dt, data_portal)
packet = {
'period_start': self._first_session,
'period_end': self._last_session,
'capital_base': self._capital_base,
'minute_perf': {
'period_open': self._market_open,
'period_close': dt,
},
'cumulative_perf': {
'period_open': self._first_session,
'period_close': self._last_session,
},
'progress': self._progress(self),
'cumulative_risk_metrics': {},
}
ledger = self._ledger
ledger.end_of_bar(self._session_count)
self.end_of_bar(
packet,
ledger,
dt,
self._session_count,
data_portal,
)
return packet
|
[
"def",
"handle_minute_close",
"(",
"self",
",",
"dt",
",",
"data_portal",
")",
":",
"self",
".",
"sync_last_sale_prices",
"(",
"dt",
",",
"data_portal",
")",
"packet",
"=",
"{",
"'period_start'",
":",
"self",
".",
"_first_session",
",",
"'period_end'",
":",
"self",
".",
"_last_session",
",",
"'capital_base'",
":",
"self",
".",
"_capital_base",
",",
"'minute_perf'",
":",
"{",
"'period_open'",
":",
"self",
".",
"_market_open",
",",
"'period_close'",
":",
"dt",
",",
"}",
",",
"'cumulative_perf'",
":",
"{",
"'period_open'",
":",
"self",
".",
"_first_session",
",",
"'period_close'",
":",
"self",
".",
"_last_session",
",",
"}",
",",
"'progress'",
":",
"self",
".",
"_progress",
"(",
"self",
")",
",",
"'cumulative_risk_metrics'",
":",
"{",
"}",
",",
"}",
"ledger",
"=",
"self",
".",
"_ledger",
"ledger",
".",
"end_of_bar",
"(",
"self",
".",
"_session_count",
")",
"self",
".",
"end_of_bar",
"(",
"packet",
",",
"ledger",
",",
"dt",
",",
"self",
".",
"_session_count",
",",
"data_portal",
",",
")",
"return",
"packet"
] |
Handles the close of the given minute in minute emission.
Parameters
----------
dt : Timestamp
The minute that is ending
Returns
-------
A minute perf packet.
|
[
"Handles",
"the",
"close",
"of",
"the",
"given",
"minute",
"in",
"minute",
"emission",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/finance/metrics/tracker.py#L204-L243
|
train
|
quantopian/zipline
|
zipline/finance/metrics/tracker.py
|
MetricsTracker.handle_market_open
|
def handle_market_open(self, session_label, data_portal):
"""Handles the start of each session.
Parameters
----------
session_label : Timestamp
The label of the session that is about to begin.
data_portal : DataPortal
The current data portal.
"""
ledger = self._ledger
ledger.start_of_session(session_label)
adjustment_reader = data_portal.adjustment_reader
if adjustment_reader is not None:
# this is None when running with a dataframe source
ledger.process_dividends(
session_label,
self._asset_finder,
adjustment_reader,
)
self._current_session = session_label
cal = self._trading_calendar
self._market_open, self._market_close = self._execution_open_and_close(
cal,
session_label,
)
self.start_of_session(ledger, session_label, data_portal)
|
python
|
def handle_market_open(self, session_label, data_portal):
"""Handles the start of each session.
Parameters
----------
session_label : Timestamp
The label of the session that is about to begin.
data_portal : DataPortal
The current data portal.
"""
ledger = self._ledger
ledger.start_of_session(session_label)
adjustment_reader = data_portal.adjustment_reader
if adjustment_reader is not None:
# this is None when running with a dataframe source
ledger.process_dividends(
session_label,
self._asset_finder,
adjustment_reader,
)
self._current_session = session_label
cal = self._trading_calendar
self._market_open, self._market_close = self._execution_open_and_close(
cal,
session_label,
)
self.start_of_session(ledger, session_label, data_portal)
|
[
"def",
"handle_market_open",
"(",
"self",
",",
"session_label",
",",
"data_portal",
")",
":",
"ledger",
"=",
"self",
".",
"_ledger",
"ledger",
".",
"start_of_session",
"(",
"session_label",
")",
"adjustment_reader",
"=",
"data_portal",
".",
"adjustment_reader",
"if",
"adjustment_reader",
"is",
"not",
"None",
":",
"# this is None when running with a dataframe source",
"ledger",
".",
"process_dividends",
"(",
"session_label",
",",
"self",
".",
"_asset_finder",
",",
"adjustment_reader",
",",
")",
"self",
".",
"_current_session",
"=",
"session_label",
"cal",
"=",
"self",
".",
"_trading_calendar",
"self",
".",
"_market_open",
",",
"self",
".",
"_market_close",
"=",
"self",
".",
"_execution_open_and_close",
"(",
"cal",
",",
"session_label",
",",
")",
"self",
".",
"start_of_session",
"(",
"ledger",
",",
"session_label",
",",
"data_portal",
")"
] |
Handles the start of each session.
Parameters
----------
session_label : Timestamp
The label of the session that is about to begin.
data_portal : DataPortal
The current data portal.
|
[
"Handles",
"the",
"start",
"of",
"each",
"session",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/finance/metrics/tracker.py#L245-L275
|
train
|
quantopian/zipline
|
zipline/finance/metrics/tracker.py
|
MetricsTracker.handle_market_close
|
def handle_market_close(self, dt, data_portal):
"""Handles the close of the given day.
Parameters
----------
dt : Timestamp
The most recently completed simulation datetime.
data_portal : DataPortal
The current data portal.
Returns
-------
A daily perf packet.
"""
completed_session = self._current_session
if self.emission_rate == 'daily':
# this method is called for both minutely and daily emissions, but
# this chunk of code here only applies for daily emissions. (since
# it's done every minute, elsewhere, for minutely emission).
self.sync_last_sale_prices(dt, data_portal)
session_ix = self._session_count
# increment the day counter before we move markers forward.
self._session_count += 1
packet = {
'period_start': self._first_session,
'period_end': self._last_session,
'capital_base': self._capital_base,
'daily_perf': {
'period_open': self._market_open,
'period_close': dt,
},
'cumulative_perf': {
'period_open': self._first_session,
'period_close': self._last_session,
},
'progress': self._progress(self),
'cumulative_risk_metrics': {},
}
ledger = self._ledger
ledger.end_of_session(session_ix)
self.end_of_session(
packet,
ledger,
completed_session,
session_ix,
data_portal,
)
return packet
|
python
|
def handle_market_close(self, dt, data_portal):
"""Handles the close of the given day.
Parameters
----------
dt : Timestamp
The most recently completed simulation datetime.
data_portal : DataPortal
The current data portal.
Returns
-------
A daily perf packet.
"""
completed_session = self._current_session
if self.emission_rate == 'daily':
# this method is called for both minutely and daily emissions, but
# this chunk of code here only applies for daily emissions. (since
# it's done every minute, elsewhere, for minutely emission).
self.sync_last_sale_prices(dt, data_portal)
session_ix = self._session_count
# increment the day counter before we move markers forward.
self._session_count += 1
packet = {
'period_start': self._first_session,
'period_end': self._last_session,
'capital_base': self._capital_base,
'daily_perf': {
'period_open': self._market_open,
'period_close': dt,
},
'cumulative_perf': {
'period_open': self._first_session,
'period_close': self._last_session,
},
'progress': self._progress(self),
'cumulative_risk_metrics': {},
}
ledger = self._ledger
ledger.end_of_session(session_ix)
self.end_of_session(
packet,
ledger,
completed_session,
session_ix,
data_portal,
)
return packet
|
[
"def",
"handle_market_close",
"(",
"self",
",",
"dt",
",",
"data_portal",
")",
":",
"completed_session",
"=",
"self",
".",
"_current_session",
"if",
"self",
".",
"emission_rate",
"==",
"'daily'",
":",
"# this method is called for both minutely and daily emissions, but",
"# this chunk of code here only applies for daily emissions. (since",
"# it's done every minute, elsewhere, for minutely emission).",
"self",
".",
"sync_last_sale_prices",
"(",
"dt",
",",
"data_portal",
")",
"session_ix",
"=",
"self",
".",
"_session_count",
"# increment the day counter before we move markers forward.",
"self",
".",
"_session_count",
"+=",
"1",
"packet",
"=",
"{",
"'period_start'",
":",
"self",
".",
"_first_session",
",",
"'period_end'",
":",
"self",
".",
"_last_session",
",",
"'capital_base'",
":",
"self",
".",
"_capital_base",
",",
"'daily_perf'",
":",
"{",
"'period_open'",
":",
"self",
".",
"_market_open",
",",
"'period_close'",
":",
"dt",
",",
"}",
",",
"'cumulative_perf'",
":",
"{",
"'period_open'",
":",
"self",
".",
"_first_session",
",",
"'period_close'",
":",
"self",
".",
"_last_session",
",",
"}",
",",
"'progress'",
":",
"self",
".",
"_progress",
"(",
"self",
")",
",",
"'cumulative_risk_metrics'",
":",
"{",
"}",
",",
"}",
"ledger",
"=",
"self",
".",
"_ledger",
"ledger",
".",
"end_of_session",
"(",
"session_ix",
")",
"self",
".",
"end_of_session",
"(",
"packet",
",",
"ledger",
",",
"completed_session",
",",
"session_ix",
",",
"data_portal",
",",
")",
"return",
"packet"
] |
Handles the close of the given day.
Parameters
----------
dt : Timestamp
The most recently completed simulation datetime.
data_portal : DataPortal
The current data portal.
Returns
-------
A daily perf packet.
|
[
"Handles",
"the",
"close",
"of",
"the",
"given",
"day",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/finance/metrics/tracker.py#L277-L328
|
train
|
quantopian/zipline
|
zipline/finance/metrics/tracker.py
|
MetricsTracker.handle_simulation_end
|
def handle_simulation_end(self, data_portal):
"""
When the simulation is complete, run the full period risk report
and send it out on the results socket.
"""
log.info(
'Simulated {} trading days\n'
'first open: {}\n'
'last close: {}',
self._session_count,
self._trading_calendar.session_open(self._first_session),
self._trading_calendar.session_close(self._last_session),
)
packet = {}
self.end_of_simulation(
packet,
self._ledger,
self._trading_calendar,
self._sessions,
data_portal,
self._benchmark_source,
)
return packet
|
python
|
def handle_simulation_end(self, data_portal):
"""
When the simulation is complete, run the full period risk report
and send it out on the results socket.
"""
log.info(
'Simulated {} trading days\n'
'first open: {}\n'
'last close: {}',
self._session_count,
self._trading_calendar.session_open(self._first_session),
self._trading_calendar.session_close(self._last_session),
)
packet = {}
self.end_of_simulation(
packet,
self._ledger,
self._trading_calendar,
self._sessions,
data_portal,
self._benchmark_source,
)
return packet
|
[
"def",
"handle_simulation_end",
"(",
"self",
",",
"data_portal",
")",
":",
"log",
".",
"info",
"(",
"'Simulated {} trading days\\n'",
"'first open: {}\\n'",
"'last close: {}'",
",",
"self",
".",
"_session_count",
",",
"self",
".",
"_trading_calendar",
".",
"session_open",
"(",
"self",
".",
"_first_session",
")",
",",
"self",
".",
"_trading_calendar",
".",
"session_close",
"(",
"self",
".",
"_last_session",
")",
",",
")",
"packet",
"=",
"{",
"}",
"self",
".",
"end_of_simulation",
"(",
"packet",
",",
"self",
".",
"_ledger",
",",
"self",
".",
"_trading_calendar",
",",
"self",
".",
"_sessions",
",",
"data_portal",
",",
"self",
".",
"_benchmark_source",
",",
")",
"return",
"packet"
] |
When the simulation is complete, run the full period risk report
and send it out on the results socket.
|
[
"When",
"the",
"simulation",
"is",
"complete",
"run",
"the",
"full",
"period",
"risk",
"report",
"and",
"send",
"it",
"out",
"on",
"the",
"results",
"socket",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/finance/metrics/tracker.py#L330-L353
|
train
|
quantopian/zipline
|
zipline/extensions.py
|
create_args
|
def create_args(args, root):
"""
Encapsulates a set of custom command line arguments in key=value
or key.namespace=value form into a chain of Namespace objects,
where each next level is an attribute of the Namespace object on the
current level
Parameters
----------
args : list
A list of strings representing arguments in key=value form
root : Namespace
The top-level element of the argument tree
"""
extension_args = {}
for arg in args:
parse_extension_arg(arg, extension_args)
for name in sorted(extension_args, key=len):
path = name.split('.')
update_namespace(root, path, extension_args[name])
|
python
|
def create_args(args, root):
"""
Encapsulates a set of custom command line arguments in key=value
or key.namespace=value form into a chain of Namespace objects,
where each next level is an attribute of the Namespace object on the
current level
Parameters
----------
args : list
A list of strings representing arguments in key=value form
root : Namespace
The top-level element of the argument tree
"""
extension_args = {}
for arg in args:
parse_extension_arg(arg, extension_args)
for name in sorted(extension_args, key=len):
path = name.split('.')
update_namespace(root, path, extension_args[name])
|
[
"def",
"create_args",
"(",
"args",
",",
"root",
")",
":",
"extension_args",
"=",
"{",
"}",
"for",
"arg",
"in",
"args",
":",
"parse_extension_arg",
"(",
"arg",
",",
"extension_args",
")",
"for",
"name",
"in",
"sorted",
"(",
"extension_args",
",",
"key",
"=",
"len",
")",
":",
"path",
"=",
"name",
".",
"split",
"(",
"'.'",
")",
"update_namespace",
"(",
"root",
",",
"path",
",",
"extension_args",
"[",
"name",
"]",
")"
] |
Encapsulates a set of custom command line arguments in key=value
or key.namespace=value form into a chain of Namespace objects,
where each next level is an attribute of the Namespace object on the
current level
Parameters
----------
args : list
A list of strings representing arguments in key=value form
root : Namespace
The top-level element of the argument tree
|
[
"Encapsulates",
"a",
"set",
"of",
"custom",
"command",
"line",
"arguments",
"in",
"key",
"=",
"value",
"or",
"key",
".",
"namespace",
"=",
"value",
"form",
"into",
"a",
"chain",
"of",
"Namespace",
"objects",
"where",
"each",
"next",
"level",
"is",
"an",
"attribute",
"of",
"the",
"Namespace",
"object",
"on",
"the",
"current",
"level"
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/extensions.py#L6-L28
|
train
|
quantopian/zipline
|
zipline/extensions.py
|
parse_extension_arg
|
def parse_extension_arg(arg, arg_dict):
"""
Converts argument strings in key=value or key.namespace=value form
to dictionary entries
Parameters
----------
arg : str
The argument string to parse, which must be in key=value or
key.namespace=value form.
arg_dict : dict
The dictionary into which the key/value pair will be added
"""
match = re.match(r'^(([^\d\W]\w*)(\.[^\d\W]\w*)*)=(.*)$', arg)
if match is None:
raise ValueError(
"invalid extension argument '%s', must be in key=value form" % arg
)
name = match.group(1)
value = match.group(4)
arg_dict[name] = value
|
python
|
def parse_extension_arg(arg, arg_dict):
"""
Converts argument strings in key=value or key.namespace=value form
to dictionary entries
Parameters
----------
arg : str
The argument string to parse, which must be in key=value or
key.namespace=value form.
arg_dict : dict
The dictionary into which the key/value pair will be added
"""
match = re.match(r'^(([^\d\W]\w*)(\.[^\d\W]\w*)*)=(.*)$', arg)
if match is None:
raise ValueError(
"invalid extension argument '%s', must be in key=value form" % arg
)
name = match.group(1)
value = match.group(4)
arg_dict[name] = value
|
[
"def",
"parse_extension_arg",
"(",
"arg",
",",
"arg_dict",
")",
":",
"match",
"=",
"re",
".",
"match",
"(",
"r'^(([^\\d\\W]\\w*)(\\.[^\\d\\W]\\w*)*)=(.*)$'",
",",
"arg",
")",
"if",
"match",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"invalid extension argument '%s', must be in key=value form\"",
"%",
"arg",
")",
"name",
"=",
"match",
".",
"group",
"(",
"1",
")",
"value",
"=",
"match",
".",
"group",
"(",
"4",
")",
"arg_dict",
"[",
"name",
"]",
"=",
"value"
] |
Converts argument strings in key=value or key.namespace=value form
to dictionary entries
Parameters
----------
arg : str
The argument string to parse, which must be in key=value or
key.namespace=value form.
arg_dict : dict
The dictionary into which the key/value pair will be added
|
[
"Converts",
"argument",
"strings",
"in",
"key",
"=",
"value",
"or",
"key",
".",
"namespace",
"=",
"value",
"form",
"to",
"dictionary",
"entries"
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/extensions.py#L31-L53
|
train
|
quantopian/zipline
|
zipline/extensions.py
|
update_namespace
|
def update_namespace(namespace, path, name):
"""
A recursive function that takes a root element, list of namespaces,
and the value being stored, and assigns namespaces to the root object
via a chain of Namespace objects, connected through attributes
Parameters
----------
namespace : Namespace
The object onto which an attribute will be added
path : list
A list of strings representing namespaces
name : str
The value to be stored at the bottom level
"""
if len(path) == 1:
setattr(namespace, path[0], name)
else:
if hasattr(namespace, path[0]):
if isinstance(getattr(namespace, path[0]), six.string_types):
raise ValueError("Conflicting assignments at namespace"
" level '%s'" % path[0])
else:
a = Namespace()
setattr(namespace, path[0], a)
update_namespace(getattr(namespace, path[0]), path[1:], name)
|
python
|
def update_namespace(namespace, path, name):
"""
A recursive function that takes a root element, list of namespaces,
and the value being stored, and assigns namespaces to the root object
via a chain of Namespace objects, connected through attributes
Parameters
----------
namespace : Namespace
The object onto which an attribute will be added
path : list
A list of strings representing namespaces
name : str
The value to be stored at the bottom level
"""
if len(path) == 1:
setattr(namespace, path[0], name)
else:
if hasattr(namespace, path[0]):
if isinstance(getattr(namespace, path[0]), six.string_types):
raise ValueError("Conflicting assignments at namespace"
" level '%s'" % path[0])
else:
a = Namespace()
setattr(namespace, path[0], a)
update_namespace(getattr(namespace, path[0]), path[1:], name)
|
[
"def",
"update_namespace",
"(",
"namespace",
",",
"path",
",",
"name",
")",
":",
"if",
"len",
"(",
"path",
")",
"==",
"1",
":",
"setattr",
"(",
"namespace",
",",
"path",
"[",
"0",
"]",
",",
"name",
")",
"else",
":",
"if",
"hasattr",
"(",
"namespace",
",",
"path",
"[",
"0",
"]",
")",
":",
"if",
"isinstance",
"(",
"getattr",
"(",
"namespace",
",",
"path",
"[",
"0",
"]",
")",
",",
"six",
".",
"string_types",
")",
":",
"raise",
"ValueError",
"(",
"\"Conflicting assignments at namespace\"",
"\" level '%s'\"",
"%",
"path",
"[",
"0",
"]",
")",
"else",
":",
"a",
"=",
"Namespace",
"(",
")",
"setattr",
"(",
"namespace",
",",
"path",
"[",
"0",
"]",
",",
"a",
")",
"update_namespace",
"(",
"getattr",
"(",
"namespace",
",",
"path",
"[",
"0",
"]",
")",
",",
"path",
"[",
"1",
":",
"]",
",",
"name",
")"
] |
A recursive function that takes a root element, list of namespaces,
and the value being stored, and assigns namespaces to the root object
via a chain of Namespace objects, connected through attributes
Parameters
----------
namespace : Namespace
The object onto which an attribute will be added
path : list
A list of strings representing namespaces
name : str
The value to be stored at the bottom level
|
[
"A",
"recursive",
"function",
"that",
"takes",
"a",
"root",
"element",
"list",
"of",
"namespaces",
"and",
"the",
"value",
"being",
"stored",
"and",
"assigns",
"namespaces",
"to",
"the",
"root",
"object",
"via",
"a",
"chain",
"of",
"Namespace",
"objects",
"connected",
"through",
"attributes"
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/extensions.py#L56-L83
|
train
|
quantopian/zipline
|
zipline/extensions.py
|
create_registry
|
def create_registry(interface):
"""
Create a new registry for an extensible interface.
Parameters
----------
interface : type
The abstract data type for which to create a registry,
which will manage registration of factories for this type.
Returns
-------
interface : type
The data type specified/decorated, unaltered.
"""
if interface in custom_types:
raise ValueError('there is already a Registry instance '
'for the specified type')
custom_types[interface] = Registry(interface)
return interface
|
python
|
def create_registry(interface):
"""
Create a new registry for an extensible interface.
Parameters
----------
interface : type
The abstract data type for which to create a registry,
which will manage registration of factories for this type.
Returns
-------
interface : type
The data type specified/decorated, unaltered.
"""
if interface in custom_types:
raise ValueError('there is already a Registry instance '
'for the specified type')
custom_types[interface] = Registry(interface)
return interface
|
[
"def",
"create_registry",
"(",
"interface",
")",
":",
"if",
"interface",
"in",
"custom_types",
":",
"raise",
"ValueError",
"(",
"'there is already a Registry instance '",
"'for the specified type'",
")",
"custom_types",
"[",
"interface",
"]",
"=",
"Registry",
"(",
"interface",
")",
"return",
"interface"
] |
Create a new registry for an extensible interface.
Parameters
----------
interface : type
The abstract data type for which to create a registry,
which will manage registration of factories for this type.
Returns
-------
interface : type
The data type specified/decorated, unaltered.
|
[
"Create",
"a",
"new",
"registry",
"for",
"an",
"extensible",
"interface",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/extensions.py#L244-L263
|
train
|
quantopian/zipline
|
zipline/extensions.py
|
Registry.load
|
def load(self, name):
"""Construct an object from a registered factory.
Parameters
----------
name : str
Name with which the factory was registered.
"""
try:
return self._factories[name]()
except KeyError:
raise ValueError(
"no %s factory registered under name %r, options are: %r" %
(self.interface.__name__, name, sorted(self._factories)),
)
|
python
|
def load(self, name):
"""Construct an object from a registered factory.
Parameters
----------
name : str
Name with which the factory was registered.
"""
try:
return self._factories[name]()
except KeyError:
raise ValueError(
"no %s factory registered under name %r, options are: %r" %
(self.interface.__name__, name, sorted(self._factories)),
)
|
[
"def",
"load",
"(",
"self",
",",
"name",
")",
":",
"try",
":",
"return",
"self",
".",
"_factories",
"[",
"name",
"]",
"(",
")",
"except",
"KeyError",
":",
"raise",
"ValueError",
"(",
"\"no %s factory registered under name %r, options are: %r\"",
"%",
"(",
"self",
".",
"interface",
".",
"__name__",
",",
"name",
",",
"sorted",
"(",
"self",
".",
"_factories",
")",
")",
",",
")"
] |
Construct an object from a registered factory.
Parameters
----------
name : str
Name with which the factory was registered.
|
[
"Construct",
"an",
"object",
"from",
"a",
"registered",
"factory",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/extensions.py#L110-L124
|
train
|
quantopian/zipline
|
zipline/finance/commission.py
|
calculate_per_unit_commission
|
def calculate_per_unit_commission(order,
transaction,
cost_per_unit,
initial_commission,
min_trade_cost):
"""
If there is a minimum commission:
If the order hasn't had a commission paid yet, pay the minimum
commission.
If the order has paid a commission, start paying additional
commission once the minimum commission has been reached.
If there is no minimum commission:
Pay commission based on number of units in the transaction.
"""
additional_commission = abs(transaction.amount * cost_per_unit)
if order.commission == 0:
# no commission paid yet, pay at least the minimum plus a one-time
# exchange fee.
return max(min_trade_cost, additional_commission + initial_commission)
else:
# we've already paid some commission, so figure out how much we
# would be paying if we only counted per unit.
per_unit_total = \
abs(order.filled * cost_per_unit) + \
additional_commission + \
initial_commission
if per_unit_total < min_trade_cost:
# if we haven't hit the minimum threshold yet, don't pay
# additional commission
return 0
else:
# we've exceeded the threshold, so pay more commission.
return per_unit_total - order.commission
|
python
|
def calculate_per_unit_commission(order,
transaction,
cost_per_unit,
initial_commission,
min_trade_cost):
"""
If there is a minimum commission:
If the order hasn't had a commission paid yet, pay the minimum
commission.
If the order has paid a commission, start paying additional
commission once the minimum commission has been reached.
If there is no minimum commission:
Pay commission based on number of units in the transaction.
"""
additional_commission = abs(transaction.amount * cost_per_unit)
if order.commission == 0:
# no commission paid yet, pay at least the minimum plus a one-time
# exchange fee.
return max(min_trade_cost, additional_commission + initial_commission)
else:
# we've already paid some commission, so figure out how much we
# would be paying if we only counted per unit.
per_unit_total = \
abs(order.filled * cost_per_unit) + \
additional_commission + \
initial_commission
if per_unit_total < min_trade_cost:
# if we haven't hit the minimum threshold yet, don't pay
# additional commission
return 0
else:
# we've exceeded the threshold, so pay more commission.
return per_unit_total - order.commission
|
[
"def",
"calculate_per_unit_commission",
"(",
"order",
",",
"transaction",
",",
"cost_per_unit",
",",
"initial_commission",
",",
"min_trade_cost",
")",
":",
"additional_commission",
"=",
"abs",
"(",
"transaction",
".",
"amount",
"*",
"cost_per_unit",
")",
"if",
"order",
".",
"commission",
"==",
"0",
":",
"# no commission paid yet, pay at least the minimum plus a one-time",
"# exchange fee.",
"return",
"max",
"(",
"min_trade_cost",
",",
"additional_commission",
"+",
"initial_commission",
")",
"else",
":",
"# we've already paid some commission, so figure out how much we",
"# would be paying if we only counted per unit.",
"per_unit_total",
"=",
"abs",
"(",
"order",
".",
"filled",
"*",
"cost_per_unit",
")",
"+",
"additional_commission",
"+",
"initial_commission",
"if",
"per_unit_total",
"<",
"min_trade_cost",
":",
"# if we haven't hit the minimum threshold yet, don't pay",
"# additional commission",
"return",
"0",
"else",
":",
"# we've exceeded the threshold, so pay more commission.",
"return",
"per_unit_total",
"-",
"order",
".",
"commission"
] |
If there is a minimum commission:
If the order hasn't had a commission paid yet, pay the minimum
commission.
If the order has paid a commission, start paying additional
commission once the minimum commission has been reached.
If there is no minimum commission:
Pay commission based on number of units in the transaction.
|
[
"If",
"there",
"is",
"a",
"minimum",
"commission",
":",
"If",
"the",
"order",
"hasn",
"t",
"had",
"a",
"commission",
"paid",
"yet",
"pay",
"the",
"minimum",
"commission",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/finance/commission.py#L102-L138
|
train
|
quantopian/zipline
|
zipline/finance/commission.py
|
PerDollar.calculate
|
def calculate(self, order, transaction):
"""
Pay commission based on dollar value of shares.
"""
cost_per_share = transaction.price * self.cost_per_dollar
return abs(transaction.amount) * cost_per_share
|
python
|
def calculate(self, order, transaction):
"""
Pay commission based on dollar value of shares.
"""
cost_per_share = transaction.price * self.cost_per_dollar
return abs(transaction.amount) * cost_per_share
|
[
"def",
"calculate",
"(",
"self",
",",
"order",
",",
"transaction",
")",
":",
"cost_per_share",
"=",
"transaction",
".",
"price",
"*",
"self",
".",
"cost_per_dollar",
"return",
"abs",
"(",
"transaction",
".",
"amount",
")",
"*",
"cost_per_share"
] |
Pay commission based on dollar value of shares.
|
[
"Pay",
"commission",
"based",
"on",
"dollar",
"value",
"of",
"shares",
"."
] |
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
|
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/finance/commission.py#L364-L369
|
train
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.