repo
stringlengths
7
55
path
stringlengths
4
223
func_name
stringlengths
1
134
original_string
stringlengths
75
104k
language
stringclasses
1 value
code
stringlengths
75
104k
code_tokens
listlengths
19
28.4k
docstring
stringlengths
1
46.9k
docstring_tokens
listlengths
1
1.97k
sha
stringlengths
40
40
url
stringlengths
87
315
partition
stringclasses
1 value
quantopian/zipline
zipline/finance/metrics/metric.py
_ClassicRiskMetrics.risk_metric_period
def risk_metric_period(cls, start_session, end_session, algorithm_returns, benchmark_returns, algorithm_leverages): """ Creates a dictionary representing the state of the risk report. Parameters ---------- start_session : pd.Timestamp Start of period (inclusive) to produce metrics on end_session : pd.Timestamp End of period (inclusive) to produce metrics on algorithm_returns : pd.Series(pd.Timestamp -> float) Series of algorithm returns as of the end of each session benchmark_returns : pd.Series(pd.Timestamp -> float) Series of benchmark returns as of the end of each session algorithm_leverages : pd.Series(pd.Timestamp -> float) Series of algorithm leverages as of the end of each session Returns ------- risk_metric : dict[str, any] Dict of metrics that with fields like: { 'algorithm_period_return': 0.0, 'benchmark_period_return': 0.0, 'treasury_period_return': 0, 'excess_return': 0.0, 'alpha': 0.0, 'beta': 0.0, 'sharpe': 0.0, 'sortino': 0.0, 'period_label': '1970-01', 'trading_days': 0, 'algo_volatility': 0.0, 'benchmark_volatility': 0.0, 'max_drawdown': 0.0, 'max_leverage': 0.0, } """ algorithm_returns = algorithm_returns[ (algorithm_returns.index >= start_session) & (algorithm_returns.index <= end_session) ] # Benchmark needs to be masked to the same dates as the algo returns benchmark_returns = benchmark_returns[ (benchmark_returns.index >= start_session) & (benchmark_returns.index <= algorithm_returns.index[-1]) ] benchmark_period_returns = ep.cum_returns(benchmark_returns).iloc[-1] algorithm_period_returns = ep.cum_returns(algorithm_returns).iloc[-1] alpha, beta = ep.alpha_beta_aligned( algorithm_returns.values, benchmark_returns.values, ) sharpe = ep.sharpe_ratio(algorithm_returns) # The consumer currently expects a 0.0 value for sharpe in period, # this differs from cumulative which was np.nan. # When factoring out the sharpe_ratio, the different return types # were collapsed into `np.nan`. # TODO: Either fix consumer to accept `np.nan` or make the # `sharpe_ratio` return type configurable. # In the meantime, convert nan values to 0.0 if pd.isnull(sharpe): sharpe = 0.0 sortino = ep.sortino_ratio( algorithm_returns.values, _downside_risk=ep.downside_risk(algorithm_returns.values), ) rval = { 'algorithm_period_return': algorithm_period_returns, 'benchmark_period_return': benchmark_period_returns, 'treasury_period_return': 0, 'excess_return': algorithm_period_returns, 'alpha': alpha, 'beta': beta, 'sharpe': sharpe, 'sortino': sortino, 'period_label': end_session.strftime("%Y-%m"), 'trading_days': len(benchmark_returns), 'algo_volatility': ep.annual_volatility(algorithm_returns), 'benchmark_volatility': ep.annual_volatility(benchmark_returns), 'max_drawdown': ep.max_drawdown(algorithm_returns.values), 'max_leverage': algorithm_leverages.max(), } # check if a field in rval is nan or inf, and replace it with None # except period_label which is always a str return { k: ( None if k != 'period_label' and not np.isfinite(v) else v ) for k, v in iteritems(rval) }
python
def risk_metric_period(cls, start_session, end_session, algorithm_returns, benchmark_returns, algorithm_leverages): """ Creates a dictionary representing the state of the risk report. Parameters ---------- start_session : pd.Timestamp Start of period (inclusive) to produce metrics on end_session : pd.Timestamp End of period (inclusive) to produce metrics on algorithm_returns : pd.Series(pd.Timestamp -> float) Series of algorithm returns as of the end of each session benchmark_returns : pd.Series(pd.Timestamp -> float) Series of benchmark returns as of the end of each session algorithm_leverages : pd.Series(pd.Timestamp -> float) Series of algorithm leverages as of the end of each session Returns ------- risk_metric : dict[str, any] Dict of metrics that with fields like: { 'algorithm_period_return': 0.0, 'benchmark_period_return': 0.0, 'treasury_period_return': 0, 'excess_return': 0.0, 'alpha': 0.0, 'beta': 0.0, 'sharpe': 0.0, 'sortino': 0.0, 'period_label': '1970-01', 'trading_days': 0, 'algo_volatility': 0.0, 'benchmark_volatility': 0.0, 'max_drawdown': 0.0, 'max_leverage': 0.0, } """ algorithm_returns = algorithm_returns[ (algorithm_returns.index >= start_session) & (algorithm_returns.index <= end_session) ] # Benchmark needs to be masked to the same dates as the algo returns benchmark_returns = benchmark_returns[ (benchmark_returns.index >= start_session) & (benchmark_returns.index <= algorithm_returns.index[-1]) ] benchmark_period_returns = ep.cum_returns(benchmark_returns).iloc[-1] algorithm_period_returns = ep.cum_returns(algorithm_returns).iloc[-1] alpha, beta = ep.alpha_beta_aligned( algorithm_returns.values, benchmark_returns.values, ) sharpe = ep.sharpe_ratio(algorithm_returns) # The consumer currently expects a 0.0 value for sharpe in period, # this differs from cumulative which was np.nan. # When factoring out the sharpe_ratio, the different return types # were collapsed into `np.nan`. # TODO: Either fix consumer to accept `np.nan` or make the # `sharpe_ratio` return type configurable. # In the meantime, convert nan values to 0.0 if pd.isnull(sharpe): sharpe = 0.0 sortino = ep.sortino_ratio( algorithm_returns.values, _downside_risk=ep.downside_risk(algorithm_returns.values), ) rval = { 'algorithm_period_return': algorithm_period_returns, 'benchmark_period_return': benchmark_period_returns, 'treasury_period_return': 0, 'excess_return': algorithm_period_returns, 'alpha': alpha, 'beta': beta, 'sharpe': sharpe, 'sortino': sortino, 'period_label': end_session.strftime("%Y-%m"), 'trading_days': len(benchmark_returns), 'algo_volatility': ep.annual_volatility(algorithm_returns), 'benchmark_volatility': ep.annual_volatility(benchmark_returns), 'max_drawdown': ep.max_drawdown(algorithm_returns.values), 'max_leverage': algorithm_leverages.max(), } # check if a field in rval is nan or inf, and replace it with None # except period_label which is always a str return { k: ( None if k != 'period_label' and not np.isfinite(v) else v ) for k, v in iteritems(rval) }
[ "def", "risk_metric_period", "(", "cls", ",", "start_session", ",", "end_session", ",", "algorithm_returns", ",", "benchmark_returns", ",", "algorithm_leverages", ")", ":", "algorithm_returns", "=", "algorithm_returns", "[", "(", "algorithm_returns", ".", "index", ">=", "start_session", ")", "&", "(", "algorithm_returns", ".", "index", "<=", "end_session", ")", "]", "# Benchmark needs to be masked to the same dates as the algo returns", "benchmark_returns", "=", "benchmark_returns", "[", "(", "benchmark_returns", ".", "index", ">=", "start_session", ")", "&", "(", "benchmark_returns", ".", "index", "<=", "algorithm_returns", ".", "index", "[", "-", "1", "]", ")", "]", "benchmark_period_returns", "=", "ep", ".", "cum_returns", "(", "benchmark_returns", ")", ".", "iloc", "[", "-", "1", "]", "algorithm_period_returns", "=", "ep", ".", "cum_returns", "(", "algorithm_returns", ")", ".", "iloc", "[", "-", "1", "]", "alpha", ",", "beta", "=", "ep", ".", "alpha_beta_aligned", "(", "algorithm_returns", ".", "values", ",", "benchmark_returns", ".", "values", ",", ")", "sharpe", "=", "ep", ".", "sharpe_ratio", "(", "algorithm_returns", ")", "# The consumer currently expects a 0.0 value for sharpe in period,", "# this differs from cumulative which was np.nan.", "# When factoring out the sharpe_ratio, the different return types", "# were collapsed into `np.nan`.", "# TODO: Either fix consumer to accept `np.nan` or make the", "# `sharpe_ratio` return type configurable.", "# In the meantime, convert nan values to 0.0", "if", "pd", ".", "isnull", "(", "sharpe", ")", ":", "sharpe", "=", "0.0", "sortino", "=", "ep", ".", "sortino_ratio", "(", "algorithm_returns", ".", "values", ",", "_downside_risk", "=", "ep", ".", "downside_risk", "(", "algorithm_returns", ".", "values", ")", ",", ")", "rval", "=", "{", "'algorithm_period_return'", ":", "algorithm_period_returns", ",", "'benchmark_period_return'", ":", "benchmark_period_returns", ",", "'treasury_period_return'", ":", "0", ",", "'excess_return'", ":", "algorithm_period_returns", ",", "'alpha'", ":", "alpha", ",", "'beta'", ":", "beta", ",", "'sharpe'", ":", "sharpe", ",", "'sortino'", ":", "sortino", ",", "'period_label'", ":", "end_session", ".", "strftime", "(", "\"%Y-%m\"", ")", ",", "'trading_days'", ":", "len", "(", "benchmark_returns", ")", ",", "'algo_volatility'", ":", "ep", ".", "annual_volatility", "(", "algorithm_returns", ")", ",", "'benchmark_volatility'", ":", "ep", ".", "annual_volatility", "(", "benchmark_returns", ")", ",", "'max_drawdown'", ":", "ep", ".", "max_drawdown", "(", "algorithm_returns", ".", "values", ")", ",", "'max_leverage'", ":", "algorithm_leverages", ".", "max", "(", ")", ",", "}", "# check if a field in rval is nan or inf, and replace it with None", "# except period_label which is always a str", "return", "{", "k", ":", "(", "None", "if", "k", "!=", "'period_label'", "and", "not", "np", ".", "isfinite", "(", "v", ")", "else", "v", ")", "for", "k", ",", "v", "in", "iteritems", "(", "rval", ")", "}" ]
Creates a dictionary representing the state of the risk report. Parameters ---------- start_session : pd.Timestamp Start of period (inclusive) to produce metrics on end_session : pd.Timestamp End of period (inclusive) to produce metrics on algorithm_returns : pd.Series(pd.Timestamp -> float) Series of algorithm returns as of the end of each session benchmark_returns : pd.Series(pd.Timestamp -> float) Series of benchmark returns as of the end of each session algorithm_leverages : pd.Series(pd.Timestamp -> float) Series of algorithm leverages as of the end of each session Returns ------- risk_metric : dict[str, any] Dict of metrics that with fields like: { 'algorithm_period_return': 0.0, 'benchmark_period_return': 0.0, 'treasury_period_return': 0, 'excess_return': 0.0, 'alpha': 0.0, 'beta': 0.0, 'sharpe': 0.0, 'sortino': 0.0, 'period_label': '1970-01', 'trading_days': 0, 'algo_volatility': 0.0, 'benchmark_volatility': 0.0, 'max_drawdown': 0.0, 'max_leverage': 0.0, }
[ "Creates", "a", "dictionary", "representing", "the", "state", "of", "the", "risk", "report", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/finance/metrics/metric.py#L559-L666
train
quantopian/zipline
zipline/assets/roll_finder.py
RollFinder._get_active_contract_at_offset
def _get_active_contract_at_offset(self, root_symbol, dt, offset): """ For the given root symbol, find the contract that is considered active on a specific date at a specific offset. """ oc = self.asset_finder.get_ordered_contracts(root_symbol) session = self.trading_calendar.minute_to_session_label(dt) front = oc.contract_before_auto_close(session.value) back = oc.contract_at_offset(front, 1, dt.value) if back is None: return front primary = self._active_contract(oc, front, back, session) return oc.contract_at_offset(primary, offset, session.value)
python
def _get_active_contract_at_offset(self, root_symbol, dt, offset): """ For the given root symbol, find the contract that is considered active on a specific date at a specific offset. """ oc = self.asset_finder.get_ordered_contracts(root_symbol) session = self.trading_calendar.minute_to_session_label(dt) front = oc.contract_before_auto_close(session.value) back = oc.contract_at_offset(front, 1, dt.value) if back is None: return front primary = self._active_contract(oc, front, back, session) return oc.contract_at_offset(primary, offset, session.value)
[ "def", "_get_active_contract_at_offset", "(", "self", ",", "root_symbol", ",", "dt", ",", "offset", ")", ":", "oc", "=", "self", ".", "asset_finder", ".", "get_ordered_contracts", "(", "root_symbol", ")", "session", "=", "self", ".", "trading_calendar", ".", "minute_to_session_label", "(", "dt", ")", "front", "=", "oc", ".", "contract_before_auto_close", "(", "session", ".", "value", ")", "back", "=", "oc", ".", "contract_at_offset", "(", "front", ",", "1", ",", "dt", ".", "value", ")", "if", "back", "is", "None", ":", "return", "front", "primary", "=", "self", ".", "_active_contract", "(", "oc", ",", "front", ",", "back", ",", "session", ")", "return", "oc", ".", "contract_at_offset", "(", "primary", ",", "offset", ",", "session", ".", "value", ")" ]
For the given root symbol, find the contract that is considered active on a specific date at a specific offset.
[ "For", "the", "given", "root", "symbol", "find", "the", "contract", "that", "is", "considered", "active", "on", "a", "specific", "date", "at", "a", "specific", "offset", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/assets/roll_finder.py#L33-L45
train
quantopian/zipline
zipline/assets/roll_finder.py
RollFinder.get_contract_center
def get_contract_center(self, root_symbol, dt, offset): """ Parameters ---------- root_symbol : str The root symbol for the contract chain. dt : Timestamp The datetime for which to retrieve the current contract. offset : int The offset from the primary contract. 0 is the primary, 1 is the secondary, etc. Returns ------- Future The active future contract at the given dt. """ return self._get_active_contract_at_offset(root_symbol, dt, offset)
python
def get_contract_center(self, root_symbol, dt, offset): """ Parameters ---------- root_symbol : str The root symbol for the contract chain. dt : Timestamp The datetime for which to retrieve the current contract. offset : int The offset from the primary contract. 0 is the primary, 1 is the secondary, etc. Returns ------- Future The active future contract at the given dt. """ return self._get_active_contract_at_offset(root_symbol, dt, offset)
[ "def", "get_contract_center", "(", "self", ",", "root_symbol", ",", "dt", ",", "offset", ")", ":", "return", "self", ".", "_get_active_contract_at_offset", "(", "root_symbol", ",", "dt", ",", "offset", ")" ]
Parameters ---------- root_symbol : str The root symbol for the contract chain. dt : Timestamp The datetime for which to retrieve the current contract. offset : int The offset from the primary contract. 0 is the primary, 1 is the secondary, etc. Returns ------- Future The active future contract at the given dt.
[ "Parameters", "----------", "root_symbol", ":", "str", "The", "root", "symbol", "for", "the", "contract", "chain", ".", "dt", ":", "Timestamp", "The", "datetime", "for", "which", "to", "retrieve", "the", "current", "contract", ".", "offset", ":", "int", "The", "offset", "from", "the", "primary", "contract", ".", "0", "is", "the", "primary", "1", "is", "the", "secondary", "etc", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/assets/roll_finder.py#L47-L64
train
quantopian/zipline
zipline/assets/roll_finder.py
RollFinder.get_rolls
def get_rolls(self, root_symbol, start, end, offset): """ Get the rolls, i.e. the session at which to hop from contract to contract in the chain. Parameters ---------- root_symbol : str The root symbol for which to calculate rolls. start : Timestamp Start of the date range. end : Timestamp End of the date range. offset : int Offset from the primary. Returns ------- rolls - list[tuple(sid, roll_date)] A list of rolls, where first value is the first active `sid`, and the `roll_date` on which to hop to the next contract. The last pair in the chain has a value of `None` since the roll is after the range. """ oc = self.asset_finder.get_ordered_contracts(root_symbol) front = self._get_active_contract_at_offset(root_symbol, end, 0) back = oc.contract_at_offset(front, 1, end.value) if back is not None: end_session = self.trading_calendar.minute_to_session_label(end) first = self._active_contract(oc, front, back, end_session) else: first = front first_contract = oc.sid_to_contract[first] rolls = [((first_contract >> offset).contract.sid, None)] tc = self.trading_calendar sessions = tc.sessions_in_range(tc.minute_to_session_label(start), tc.minute_to_session_label(end)) freq = sessions.freq if first == front: # This is a bit tricky to grasp. Once we have the active contract # on the given end date, we want to start walking backwards towards # the start date and checking for rolls. For this, we treat the # previous month's contract as the 'first' contract, and the # contract we just found to be active as the 'back'. As we walk # towards the start date, if the 'back' is no longer active, we add # that date as a roll. curr = first_contract << 1 else: curr = first_contract << 2 session = sessions[-1] while session > start and curr is not None: front = curr.contract.sid back = rolls[0][0] prev_c = curr.prev while session > start: prev = session - freq if prev_c is not None: if prev < prev_c.contract.auto_close_date: break if back != self._active_contract(oc, front, back, prev): # TODO: Instead of listing each contract with its roll date # as tuples, create a series which maps every day to the # active contract on that day. rolls.insert(0, ((curr >> offset).contract.sid, session)) break session = prev curr = curr.prev if curr is not None: session = min(session, curr.contract.auto_close_date + freq) return rolls
python
def get_rolls(self, root_symbol, start, end, offset): """ Get the rolls, i.e. the session at which to hop from contract to contract in the chain. Parameters ---------- root_symbol : str The root symbol for which to calculate rolls. start : Timestamp Start of the date range. end : Timestamp End of the date range. offset : int Offset from the primary. Returns ------- rolls - list[tuple(sid, roll_date)] A list of rolls, where first value is the first active `sid`, and the `roll_date` on which to hop to the next contract. The last pair in the chain has a value of `None` since the roll is after the range. """ oc = self.asset_finder.get_ordered_contracts(root_symbol) front = self._get_active_contract_at_offset(root_symbol, end, 0) back = oc.contract_at_offset(front, 1, end.value) if back is not None: end_session = self.trading_calendar.minute_to_session_label(end) first = self._active_contract(oc, front, back, end_session) else: first = front first_contract = oc.sid_to_contract[first] rolls = [((first_contract >> offset).contract.sid, None)] tc = self.trading_calendar sessions = tc.sessions_in_range(tc.minute_to_session_label(start), tc.minute_to_session_label(end)) freq = sessions.freq if first == front: # This is a bit tricky to grasp. Once we have the active contract # on the given end date, we want to start walking backwards towards # the start date and checking for rolls. For this, we treat the # previous month's contract as the 'first' contract, and the # contract we just found to be active as the 'back'. As we walk # towards the start date, if the 'back' is no longer active, we add # that date as a roll. curr = first_contract << 1 else: curr = first_contract << 2 session = sessions[-1] while session > start and curr is not None: front = curr.contract.sid back = rolls[0][0] prev_c = curr.prev while session > start: prev = session - freq if prev_c is not None: if prev < prev_c.contract.auto_close_date: break if back != self._active_contract(oc, front, back, prev): # TODO: Instead of listing each contract with its roll date # as tuples, create a series which maps every day to the # active contract on that day. rolls.insert(0, ((curr >> offset).contract.sid, session)) break session = prev curr = curr.prev if curr is not None: session = min(session, curr.contract.auto_close_date + freq) return rolls
[ "def", "get_rolls", "(", "self", ",", "root_symbol", ",", "start", ",", "end", ",", "offset", ")", ":", "oc", "=", "self", ".", "asset_finder", ".", "get_ordered_contracts", "(", "root_symbol", ")", "front", "=", "self", ".", "_get_active_contract_at_offset", "(", "root_symbol", ",", "end", ",", "0", ")", "back", "=", "oc", ".", "contract_at_offset", "(", "front", ",", "1", ",", "end", ".", "value", ")", "if", "back", "is", "not", "None", ":", "end_session", "=", "self", ".", "trading_calendar", ".", "minute_to_session_label", "(", "end", ")", "first", "=", "self", ".", "_active_contract", "(", "oc", ",", "front", ",", "back", ",", "end_session", ")", "else", ":", "first", "=", "front", "first_contract", "=", "oc", ".", "sid_to_contract", "[", "first", "]", "rolls", "=", "[", "(", "(", "first_contract", ">>", "offset", ")", ".", "contract", ".", "sid", ",", "None", ")", "]", "tc", "=", "self", ".", "trading_calendar", "sessions", "=", "tc", ".", "sessions_in_range", "(", "tc", ".", "minute_to_session_label", "(", "start", ")", ",", "tc", ".", "minute_to_session_label", "(", "end", ")", ")", "freq", "=", "sessions", ".", "freq", "if", "first", "==", "front", ":", "# This is a bit tricky to grasp. Once we have the active contract", "# on the given end date, we want to start walking backwards towards", "# the start date and checking for rolls. For this, we treat the", "# previous month's contract as the 'first' contract, and the", "# contract we just found to be active as the 'back'. As we walk", "# towards the start date, if the 'back' is no longer active, we add", "# that date as a roll.", "curr", "=", "first_contract", "<<", "1", "else", ":", "curr", "=", "first_contract", "<<", "2", "session", "=", "sessions", "[", "-", "1", "]", "while", "session", ">", "start", "and", "curr", "is", "not", "None", ":", "front", "=", "curr", ".", "contract", ".", "sid", "back", "=", "rolls", "[", "0", "]", "[", "0", "]", "prev_c", "=", "curr", ".", "prev", "while", "session", ">", "start", ":", "prev", "=", "session", "-", "freq", "if", "prev_c", "is", "not", "None", ":", "if", "prev", "<", "prev_c", ".", "contract", ".", "auto_close_date", ":", "break", "if", "back", "!=", "self", ".", "_active_contract", "(", "oc", ",", "front", ",", "back", ",", "prev", ")", ":", "# TODO: Instead of listing each contract with its roll date", "# as tuples, create a series which maps every day to the", "# active contract on that day.", "rolls", ".", "insert", "(", "0", ",", "(", "(", "curr", ">>", "offset", ")", ".", "contract", ".", "sid", ",", "session", ")", ")", "break", "session", "=", "prev", "curr", "=", "curr", ".", "prev", "if", "curr", "is", "not", "None", ":", "session", "=", "min", "(", "session", ",", "curr", ".", "contract", ".", "auto_close_date", "+", "freq", ")", "return", "rolls" ]
Get the rolls, i.e. the session at which to hop from contract to contract in the chain. Parameters ---------- root_symbol : str The root symbol for which to calculate rolls. start : Timestamp Start of the date range. end : Timestamp End of the date range. offset : int Offset from the primary. Returns ------- rolls - list[tuple(sid, roll_date)] A list of rolls, where first value is the first active `sid`, and the `roll_date` on which to hop to the next contract. The last pair in the chain has a value of `None` since the roll is after the range.
[ "Get", "the", "rolls", "i", ".", "e", ".", "the", "session", "at", "which", "to", "hop", "from", "contract", "to", "contract", "in", "the", "chain", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/assets/roll_finder.py#L66-L137
train
quantopian/zipline
zipline/assets/roll_finder.py
VolumeRollFinder._active_contract
def _active_contract(self, oc, front, back, dt): r""" Return the active contract based on the previous trading day's volume. In the rare case that a double volume switch occurs we treat the first switch as the roll. Take the following case for example: | +++++ _____ | + __ / <--- 'G' | ++/++\++++/++ | _/ \__/ + | / + | ____/ + <--- 'F' |_________|__|___|________ a b c <--- Switches We should treat 'a' as the roll date rather than 'c' because from the perspective of 'a', if a switch happens and we are pretty close to the auto-close date, we would probably assume it is time to roll. This means that for every date after 'a', `data.current(cf, 'contract')` should return the 'G' contract. """ front_contract = oc.sid_to_contract[front].contract back_contract = oc.sid_to_contract[back].contract tc = self.trading_calendar trading_day = tc.day prev = dt - trading_day get_value = self.session_reader.get_value # If the front contract is past its auto close date it cannot be the # active contract, so return the back contract. Similarly, if the back # contract has not even started yet, just return the front contract. # The reason for using 'prev' to see if the contracts are alive instead # of using 'dt' is because we need to get each contract's volume on the # previous day, so we need to make sure that each contract exists on # 'prev' in order to call 'get_value' below. if dt > min(front_contract.auto_close_date, front_contract.end_date): return back elif front_contract.start_date > prev: return back elif dt > min(back_contract.auto_close_date, back_contract.end_date): return front elif back_contract.start_date > prev: return front front_vol = get_value(front, prev, 'volume') back_vol = get_value(back, prev, 'volume') if back_vol > front_vol: return back gap_start = max( back_contract.start_date, front_contract.auto_close_date - (trading_day * self.GRACE_DAYS), ) gap_end = prev - trading_day if dt < gap_start: return front # If we are within `self.GRACE_DAYS` of the front contract's auto close # date, and a volume flip happened during that period, return the back # contract as the active one. sessions = tc.sessions_in_range( tc.minute_to_session_label(gap_start), tc.minute_to_session_label(gap_end), ) for session in sessions: front_vol = get_value(front, session, 'volume') back_vol = get_value(back, session, 'volume') if back_vol > front_vol: return back return front
python
def _active_contract(self, oc, front, back, dt): r""" Return the active contract based on the previous trading day's volume. In the rare case that a double volume switch occurs we treat the first switch as the roll. Take the following case for example: | +++++ _____ | + __ / <--- 'G' | ++/++\++++/++ | _/ \__/ + | / + | ____/ + <--- 'F' |_________|__|___|________ a b c <--- Switches We should treat 'a' as the roll date rather than 'c' because from the perspective of 'a', if a switch happens and we are pretty close to the auto-close date, we would probably assume it is time to roll. This means that for every date after 'a', `data.current(cf, 'contract')` should return the 'G' contract. """ front_contract = oc.sid_to_contract[front].contract back_contract = oc.sid_to_contract[back].contract tc = self.trading_calendar trading_day = tc.day prev = dt - trading_day get_value = self.session_reader.get_value # If the front contract is past its auto close date it cannot be the # active contract, so return the back contract. Similarly, if the back # contract has not even started yet, just return the front contract. # The reason for using 'prev' to see if the contracts are alive instead # of using 'dt' is because we need to get each contract's volume on the # previous day, so we need to make sure that each contract exists on # 'prev' in order to call 'get_value' below. if dt > min(front_contract.auto_close_date, front_contract.end_date): return back elif front_contract.start_date > prev: return back elif dt > min(back_contract.auto_close_date, back_contract.end_date): return front elif back_contract.start_date > prev: return front front_vol = get_value(front, prev, 'volume') back_vol = get_value(back, prev, 'volume') if back_vol > front_vol: return back gap_start = max( back_contract.start_date, front_contract.auto_close_date - (trading_day * self.GRACE_DAYS), ) gap_end = prev - trading_day if dt < gap_start: return front # If we are within `self.GRACE_DAYS` of the front contract's auto close # date, and a volume flip happened during that period, return the back # contract as the active one. sessions = tc.sessions_in_range( tc.minute_to_session_label(gap_start), tc.minute_to_session_label(gap_end), ) for session in sessions: front_vol = get_value(front, session, 'volume') back_vol = get_value(back, session, 'volume') if back_vol > front_vol: return back return front
[ "def", "_active_contract", "(", "self", ",", "oc", ",", "front", ",", "back", ",", "dt", ")", ":", "front_contract", "=", "oc", ".", "sid_to_contract", "[", "front", "]", ".", "contract", "back_contract", "=", "oc", ".", "sid_to_contract", "[", "back", "]", ".", "contract", "tc", "=", "self", ".", "trading_calendar", "trading_day", "=", "tc", ".", "day", "prev", "=", "dt", "-", "trading_day", "get_value", "=", "self", ".", "session_reader", ".", "get_value", "# If the front contract is past its auto close date it cannot be the", "# active contract, so return the back contract. Similarly, if the back", "# contract has not even started yet, just return the front contract.", "# The reason for using 'prev' to see if the contracts are alive instead", "# of using 'dt' is because we need to get each contract's volume on the", "# previous day, so we need to make sure that each contract exists on", "# 'prev' in order to call 'get_value' below.", "if", "dt", ">", "min", "(", "front_contract", ".", "auto_close_date", ",", "front_contract", ".", "end_date", ")", ":", "return", "back", "elif", "front_contract", ".", "start_date", ">", "prev", ":", "return", "back", "elif", "dt", ">", "min", "(", "back_contract", ".", "auto_close_date", ",", "back_contract", ".", "end_date", ")", ":", "return", "front", "elif", "back_contract", ".", "start_date", ">", "prev", ":", "return", "front", "front_vol", "=", "get_value", "(", "front", ",", "prev", ",", "'volume'", ")", "back_vol", "=", "get_value", "(", "back", ",", "prev", ",", "'volume'", ")", "if", "back_vol", ">", "front_vol", ":", "return", "back", "gap_start", "=", "max", "(", "back_contract", ".", "start_date", ",", "front_contract", ".", "auto_close_date", "-", "(", "trading_day", "*", "self", ".", "GRACE_DAYS", ")", ",", ")", "gap_end", "=", "prev", "-", "trading_day", "if", "dt", "<", "gap_start", ":", "return", "front", "# If we are within `self.GRACE_DAYS` of the front contract's auto close", "# date, and a volume flip happened during that period, return the back", "# contract as the active one.", "sessions", "=", "tc", ".", "sessions_in_range", "(", "tc", ".", "minute_to_session_label", "(", "gap_start", ")", ",", "tc", ".", "minute_to_session_label", "(", "gap_end", ")", ",", ")", "for", "session", "in", "sessions", ":", "front_vol", "=", "get_value", "(", "front", ",", "session", ",", "'volume'", ")", "back_vol", "=", "get_value", "(", "back", ",", "session", ",", "'volume'", ")", "if", "back_vol", ">", "front_vol", ":", "return", "back", "return", "front" ]
r""" Return the active contract based on the previous trading day's volume. In the rare case that a double volume switch occurs we treat the first switch as the roll. Take the following case for example: | +++++ _____ | + __ / <--- 'G' | ++/++\++++/++ | _/ \__/ + | / + | ____/ + <--- 'F' |_________|__|___|________ a b c <--- Switches We should treat 'a' as the roll date rather than 'c' because from the perspective of 'a', if a switch happens and we are pretty close to the auto-close date, we would probably assume it is time to roll. This means that for every date after 'a', `data.current(cf, 'contract')` should return the 'G' contract.
[ "r", "Return", "the", "active", "contract", "based", "on", "the", "previous", "trading", "day", "s", "volume", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/assets/roll_finder.py#L170-L241
train
quantopian/zipline
zipline/assets/roll_finder.py
VolumeRollFinder.get_contract_center
def get_contract_center(self, root_symbol, dt, offset): """ Parameters ---------- root_symbol : str The root symbol for the contract chain. dt : Timestamp The datetime for which to retrieve the current contract. offset : int The offset from the primary contract. 0 is the primary, 1 is the secondary, etc. Returns ------- Future The active future contract at the given dt. """ # When determining the center contract on a specific day using volume # rolls, simply picking the contract with the highest volume could # cause flip-flopping between active contracts each day if the front # and back contracts are close in volume. Therefore, information about # the surrounding rolls is required. The `get_rolls` logic prevents # contracts from being considered active once they have rolled, so # incorporating that logic here prevents flip-flopping. day = self.trading_calendar.day end_date = min( dt + (ROLL_DAYS_FOR_CURRENT_CONTRACT * day), self.session_reader.last_available_dt, ) rolls = self.get_rolls( root_symbol=root_symbol, start=dt, end=end_date, offset=offset, ) sid, acd = rolls[0] return self.asset_finder.retrieve_asset(sid)
python
def get_contract_center(self, root_symbol, dt, offset): """ Parameters ---------- root_symbol : str The root symbol for the contract chain. dt : Timestamp The datetime for which to retrieve the current contract. offset : int The offset from the primary contract. 0 is the primary, 1 is the secondary, etc. Returns ------- Future The active future contract at the given dt. """ # When determining the center contract on a specific day using volume # rolls, simply picking the contract with the highest volume could # cause flip-flopping between active contracts each day if the front # and back contracts are close in volume. Therefore, information about # the surrounding rolls is required. The `get_rolls` logic prevents # contracts from being considered active once they have rolled, so # incorporating that logic here prevents flip-flopping. day = self.trading_calendar.day end_date = min( dt + (ROLL_DAYS_FOR_CURRENT_CONTRACT * day), self.session_reader.last_available_dt, ) rolls = self.get_rolls( root_symbol=root_symbol, start=dt, end=end_date, offset=offset, ) sid, acd = rolls[0] return self.asset_finder.retrieve_asset(sid)
[ "def", "get_contract_center", "(", "self", ",", "root_symbol", ",", "dt", ",", "offset", ")", ":", "# When determining the center contract on a specific day using volume", "# rolls, simply picking the contract with the highest volume could", "# cause flip-flopping between active contracts each day if the front", "# and back contracts are close in volume. Therefore, information about", "# the surrounding rolls is required. The `get_rolls` logic prevents", "# contracts from being considered active once they have rolled, so", "# incorporating that logic here prevents flip-flopping.", "day", "=", "self", ".", "trading_calendar", ".", "day", "end_date", "=", "min", "(", "dt", "+", "(", "ROLL_DAYS_FOR_CURRENT_CONTRACT", "*", "day", ")", ",", "self", ".", "session_reader", ".", "last_available_dt", ",", ")", "rolls", "=", "self", ".", "get_rolls", "(", "root_symbol", "=", "root_symbol", ",", "start", "=", "dt", ",", "end", "=", "end_date", ",", "offset", "=", "offset", ",", ")", "sid", ",", "acd", "=", "rolls", "[", "0", "]", "return", "self", ".", "asset_finder", ".", "retrieve_asset", "(", "sid", ")" ]
Parameters ---------- root_symbol : str The root symbol for the contract chain. dt : Timestamp The datetime for which to retrieve the current contract. offset : int The offset from the primary contract. 0 is the primary, 1 is the secondary, etc. Returns ------- Future The active future contract at the given dt.
[ "Parameters", "----------", "root_symbol", ":", "str", "The", "root", "symbol", "for", "the", "contract", "chain", ".", "dt", ":", "Timestamp", "The", "datetime", "for", "which", "to", "retrieve", "the", "current", "contract", ".", "offset", ":", "int", "The", "offset", "from", "the", "primary", "contract", ".", "0", "is", "the", "primary", "1", "is", "the", "secondary", "etc", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/assets/roll_finder.py#L243-L276
train
quantopian/zipline
zipline/lib/adjusted_array.py
_normalize_array
def _normalize_array(data, missing_value): """ Coerce buffer data for an AdjustedArray into a standard scalar representation, returning the coerced array and a dict of argument to pass to np.view to use when providing a user-facing view of the underlying data. - float* data is coerced to float64 with viewtype float64. - int32, int64, and uint32 are converted to int64 with viewtype int64. - datetime[*] data is coerced to int64 with a viewtype of datetime64[ns]. - bool_ data is coerced to uint8 with a viewtype of bool_. Parameters ---------- data : np.ndarray Returns ------- coerced, view_kwargs : (np.ndarray, np.dtype) """ if isinstance(data, LabelArray): return data, {} data_dtype = data.dtype if data_dtype in BOOL_DTYPES: return data.astype(uint8), {'dtype': dtype(bool_)} elif data_dtype in FLOAT_DTYPES: return data.astype(float64), {'dtype': dtype(float64)} elif data_dtype in INT_DTYPES: return data.astype(int64), {'dtype': dtype(int64)} elif is_categorical(data_dtype): if not isinstance(missing_value, LabelArray.SUPPORTED_SCALAR_TYPES): raise TypeError( "Invalid missing_value for categorical array.\n" "Expected None, bytes or unicode. Got %r." % missing_value, ) return LabelArray(data, missing_value), {} elif data_dtype.kind == 'M': try: outarray = data.astype('datetime64[ns]').view('int64') return outarray, {'dtype': datetime64ns_dtype} except OverflowError: raise ValueError( "AdjustedArray received a datetime array " "not representable as datetime64[ns].\n" "Min Date: %s\n" "Max Date: %s\n" % (data.min(), data.max()) ) else: raise TypeError( "Don't know how to construct AdjustedArray " "on data of type %s." % data_dtype )
python
def _normalize_array(data, missing_value): """ Coerce buffer data for an AdjustedArray into a standard scalar representation, returning the coerced array and a dict of argument to pass to np.view to use when providing a user-facing view of the underlying data. - float* data is coerced to float64 with viewtype float64. - int32, int64, and uint32 are converted to int64 with viewtype int64. - datetime[*] data is coerced to int64 with a viewtype of datetime64[ns]. - bool_ data is coerced to uint8 with a viewtype of bool_. Parameters ---------- data : np.ndarray Returns ------- coerced, view_kwargs : (np.ndarray, np.dtype) """ if isinstance(data, LabelArray): return data, {} data_dtype = data.dtype if data_dtype in BOOL_DTYPES: return data.astype(uint8), {'dtype': dtype(bool_)} elif data_dtype in FLOAT_DTYPES: return data.astype(float64), {'dtype': dtype(float64)} elif data_dtype in INT_DTYPES: return data.astype(int64), {'dtype': dtype(int64)} elif is_categorical(data_dtype): if not isinstance(missing_value, LabelArray.SUPPORTED_SCALAR_TYPES): raise TypeError( "Invalid missing_value for categorical array.\n" "Expected None, bytes or unicode. Got %r." % missing_value, ) return LabelArray(data, missing_value), {} elif data_dtype.kind == 'M': try: outarray = data.astype('datetime64[ns]').view('int64') return outarray, {'dtype': datetime64ns_dtype} except OverflowError: raise ValueError( "AdjustedArray received a datetime array " "not representable as datetime64[ns].\n" "Min Date: %s\n" "Max Date: %s\n" % (data.min(), data.max()) ) else: raise TypeError( "Don't know how to construct AdjustedArray " "on data of type %s." % data_dtype )
[ "def", "_normalize_array", "(", "data", ",", "missing_value", ")", ":", "if", "isinstance", "(", "data", ",", "LabelArray", ")", ":", "return", "data", ",", "{", "}", "data_dtype", "=", "data", ".", "dtype", "if", "data_dtype", "in", "BOOL_DTYPES", ":", "return", "data", ".", "astype", "(", "uint8", ")", ",", "{", "'dtype'", ":", "dtype", "(", "bool_", ")", "}", "elif", "data_dtype", "in", "FLOAT_DTYPES", ":", "return", "data", ".", "astype", "(", "float64", ")", ",", "{", "'dtype'", ":", "dtype", "(", "float64", ")", "}", "elif", "data_dtype", "in", "INT_DTYPES", ":", "return", "data", ".", "astype", "(", "int64", ")", ",", "{", "'dtype'", ":", "dtype", "(", "int64", ")", "}", "elif", "is_categorical", "(", "data_dtype", ")", ":", "if", "not", "isinstance", "(", "missing_value", ",", "LabelArray", ".", "SUPPORTED_SCALAR_TYPES", ")", ":", "raise", "TypeError", "(", "\"Invalid missing_value for categorical array.\\n\"", "\"Expected None, bytes or unicode. Got %r.\"", "%", "missing_value", ",", ")", "return", "LabelArray", "(", "data", ",", "missing_value", ")", ",", "{", "}", "elif", "data_dtype", ".", "kind", "==", "'M'", ":", "try", ":", "outarray", "=", "data", ".", "astype", "(", "'datetime64[ns]'", ")", ".", "view", "(", "'int64'", ")", "return", "outarray", ",", "{", "'dtype'", ":", "datetime64ns_dtype", "}", "except", "OverflowError", ":", "raise", "ValueError", "(", "\"AdjustedArray received a datetime array \"", "\"not representable as datetime64[ns].\\n\"", "\"Min Date: %s\\n\"", "\"Max Date: %s\\n\"", "%", "(", "data", ".", "min", "(", ")", ",", "data", ".", "max", "(", ")", ")", ")", "else", ":", "raise", "TypeError", "(", "\"Don't know how to construct AdjustedArray \"", "\"on data of type %s.\"", "%", "data_dtype", ")" ]
Coerce buffer data for an AdjustedArray into a standard scalar representation, returning the coerced array and a dict of argument to pass to np.view to use when providing a user-facing view of the underlying data. - float* data is coerced to float64 with viewtype float64. - int32, int64, and uint32 are converted to int64 with viewtype int64. - datetime[*] data is coerced to int64 with a viewtype of datetime64[ns]. - bool_ data is coerced to uint8 with a viewtype of bool_. Parameters ---------- data : np.ndarray Returns ------- coerced, view_kwargs : (np.ndarray, np.dtype)
[ "Coerce", "buffer", "data", "for", "an", "AdjustedArray", "into", "a", "standard", "scalar", "representation", "returning", "the", "coerced", "array", "and", "a", "dict", "of", "argument", "to", "pass", "to", "np", ".", "view", "to", "use", "when", "providing", "a", "user", "-", "facing", "view", "of", "the", "underlying", "data", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/lib/adjusted_array.py#L84-L136
train
quantopian/zipline
zipline/lib/adjusted_array.py
_merge_simple
def _merge_simple(adjustment_lists, front_idx, back_idx): """ Merge lists of new and existing adjustments for a given index by appending or prepending new adjustments to existing adjustments. Notes ----- This method is meant to be used with ``toolz.merge_with`` to merge adjustment mappings. In case of a collision ``adjustment_lists`` contains two lists, existing adjustments at index 0 and new adjustments at index 1. When there are no collisions, ``adjustment_lists`` contains a single list. Parameters ---------- adjustment_lists : list[list[Adjustment]] List(s) of new and/or existing adjustments for a given index. front_idx : int Index of list in ``adjustment_lists`` that should be used as baseline in case of a collision. back_idx : int Index of list in ``adjustment_lists`` that should extend baseline list in case of a collision. Returns ------- adjustments : list[Adjustment] List of merged adjustments for a given index. """ if len(adjustment_lists) == 1: return list(adjustment_lists[0]) else: return adjustment_lists[front_idx] + adjustment_lists[back_idx]
python
def _merge_simple(adjustment_lists, front_idx, back_idx): """ Merge lists of new and existing adjustments for a given index by appending or prepending new adjustments to existing adjustments. Notes ----- This method is meant to be used with ``toolz.merge_with`` to merge adjustment mappings. In case of a collision ``adjustment_lists`` contains two lists, existing adjustments at index 0 and new adjustments at index 1. When there are no collisions, ``adjustment_lists`` contains a single list. Parameters ---------- adjustment_lists : list[list[Adjustment]] List(s) of new and/or existing adjustments for a given index. front_idx : int Index of list in ``adjustment_lists`` that should be used as baseline in case of a collision. back_idx : int Index of list in ``adjustment_lists`` that should extend baseline list in case of a collision. Returns ------- adjustments : list[Adjustment] List of merged adjustments for a given index. """ if len(adjustment_lists) == 1: return list(adjustment_lists[0]) else: return adjustment_lists[front_idx] + adjustment_lists[back_idx]
[ "def", "_merge_simple", "(", "adjustment_lists", ",", "front_idx", ",", "back_idx", ")", ":", "if", "len", "(", "adjustment_lists", ")", "==", "1", ":", "return", "list", "(", "adjustment_lists", "[", "0", "]", ")", "else", ":", "return", "adjustment_lists", "[", "front_idx", "]", "+", "adjustment_lists", "[", "back_idx", "]" ]
Merge lists of new and existing adjustments for a given index by appending or prepending new adjustments to existing adjustments. Notes ----- This method is meant to be used with ``toolz.merge_with`` to merge adjustment mappings. In case of a collision ``adjustment_lists`` contains two lists, existing adjustments at index 0 and new adjustments at index 1. When there are no collisions, ``adjustment_lists`` contains a single list. Parameters ---------- adjustment_lists : list[list[Adjustment]] List(s) of new and/or existing adjustments for a given index. front_idx : int Index of list in ``adjustment_lists`` that should be used as baseline in case of a collision. back_idx : int Index of list in ``adjustment_lists`` that should extend baseline list in case of a collision. Returns ------- adjustments : list[Adjustment] List of merged adjustments for a given index.
[ "Merge", "lists", "of", "new", "and", "existing", "adjustments", "for", "a", "given", "index", "by", "appending", "or", "prepending", "new", "adjustments", "to", "existing", "adjustments", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/lib/adjusted_array.py#L139-L170
train
quantopian/zipline
zipline/lib/adjusted_array.py
ensure_ndarray
def ensure_ndarray(ndarray_or_adjusted_array): """ Return the input as a numpy ndarray. This is a no-op if the input is already an ndarray. If the input is an adjusted_array, this extracts a read-only view of its internal data buffer. Parameters ---------- ndarray_or_adjusted_array : numpy.ndarray | zipline.data.adjusted_array Returns ------- out : The input, converted to an ndarray. """ if isinstance(ndarray_or_adjusted_array, ndarray): return ndarray_or_adjusted_array elif isinstance(ndarray_or_adjusted_array, AdjustedArray): return ndarray_or_adjusted_array.data else: raise TypeError( "Can't convert %s to ndarray" % type(ndarray_or_adjusted_array).__name__ )
python
def ensure_ndarray(ndarray_or_adjusted_array): """ Return the input as a numpy ndarray. This is a no-op if the input is already an ndarray. If the input is an adjusted_array, this extracts a read-only view of its internal data buffer. Parameters ---------- ndarray_or_adjusted_array : numpy.ndarray | zipline.data.adjusted_array Returns ------- out : The input, converted to an ndarray. """ if isinstance(ndarray_or_adjusted_array, ndarray): return ndarray_or_adjusted_array elif isinstance(ndarray_or_adjusted_array, AdjustedArray): return ndarray_or_adjusted_array.data else: raise TypeError( "Can't convert %s to ndarray" % type(ndarray_or_adjusted_array).__name__ )
[ "def", "ensure_ndarray", "(", "ndarray_or_adjusted_array", ")", ":", "if", "isinstance", "(", "ndarray_or_adjusted_array", ",", "ndarray", ")", ":", "return", "ndarray_or_adjusted_array", "elif", "isinstance", "(", "ndarray_or_adjusted_array", ",", "AdjustedArray", ")", ":", "return", "ndarray_or_adjusted_array", ".", "data", "else", ":", "raise", "TypeError", "(", "\"Can't convert %s to ndarray\"", "%", "type", "(", "ndarray_or_adjusted_array", ")", ".", "__name__", ")" ]
Return the input as a numpy ndarray. This is a no-op if the input is already an ndarray. If the input is an adjusted_array, this extracts a read-only view of its internal data buffer. Parameters ---------- ndarray_or_adjusted_array : numpy.ndarray | zipline.data.adjusted_array Returns ------- out : The input, converted to an ndarray.
[ "Return", "the", "input", "as", "a", "numpy", "ndarray", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/lib/adjusted_array.py#L345-L368
train
quantopian/zipline
zipline/lib/adjusted_array.py
_check_window_params
def _check_window_params(data, window_length): """ Check that a window of length `window_length` is well-defined on `data`. Parameters ---------- data : np.ndarray[ndim=2] The array of data to check. window_length : int Length of the desired window. Returns ------- None Raises ------ WindowLengthNotPositive If window_length < 1. WindowLengthTooLong If window_length is greater than the number of rows in `data`. """ if window_length < 1: raise WindowLengthNotPositive(window_length=window_length) if window_length > data.shape[0]: raise WindowLengthTooLong( nrows=data.shape[0], window_length=window_length, )
python
def _check_window_params(data, window_length): """ Check that a window of length `window_length` is well-defined on `data`. Parameters ---------- data : np.ndarray[ndim=2] The array of data to check. window_length : int Length of the desired window. Returns ------- None Raises ------ WindowLengthNotPositive If window_length < 1. WindowLengthTooLong If window_length is greater than the number of rows in `data`. """ if window_length < 1: raise WindowLengthNotPositive(window_length=window_length) if window_length > data.shape[0]: raise WindowLengthTooLong( nrows=data.shape[0], window_length=window_length, )
[ "def", "_check_window_params", "(", "data", ",", "window_length", ")", ":", "if", "window_length", "<", "1", ":", "raise", "WindowLengthNotPositive", "(", "window_length", "=", "window_length", ")", "if", "window_length", ">", "data", ".", "shape", "[", "0", "]", ":", "raise", "WindowLengthTooLong", "(", "nrows", "=", "data", ".", "shape", "[", "0", "]", ",", "window_length", "=", "window_length", ",", ")" ]
Check that a window of length `window_length` is well-defined on `data`. Parameters ---------- data : np.ndarray[ndim=2] The array of data to check. window_length : int Length of the desired window. Returns ------- None Raises ------ WindowLengthNotPositive If window_length < 1. WindowLengthTooLong If window_length is greater than the number of rows in `data`.
[ "Check", "that", "a", "window", "of", "length", "window_length", "is", "well", "-", "defined", "on", "data", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/lib/adjusted_array.py#L371-L400
train
quantopian/zipline
zipline/lib/adjusted_array.py
AdjustedArray.update_adjustments
def update_adjustments(self, adjustments, method): """ Merge ``adjustments`` with existing adjustments, handling index collisions according to ``method``. Parameters ---------- adjustments : dict[int -> list[Adjustment]] The mapping of row indices to lists of adjustments that should be appended to existing adjustments. method : {'append', 'prepend'} How to handle index collisions. If 'append', new adjustments will be applied after previously-existing adjustments. If 'prepend', new adjustments will be applied before previously-existing adjustments. """ try: merge_func = _merge_methods[method] except KeyError: raise ValueError( "Invalid merge method %s\n" "Valid methods are: %s" % (method, ', '.join(_merge_methods)) ) self.adjustments = merge_with( merge_func, self.adjustments, adjustments, )
python
def update_adjustments(self, adjustments, method): """ Merge ``adjustments`` with existing adjustments, handling index collisions according to ``method``. Parameters ---------- adjustments : dict[int -> list[Adjustment]] The mapping of row indices to lists of adjustments that should be appended to existing adjustments. method : {'append', 'prepend'} How to handle index collisions. If 'append', new adjustments will be applied after previously-existing adjustments. If 'prepend', new adjustments will be applied before previously-existing adjustments. """ try: merge_func = _merge_methods[method] except KeyError: raise ValueError( "Invalid merge method %s\n" "Valid methods are: %s" % (method, ', '.join(_merge_methods)) ) self.adjustments = merge_with( merge_func, self.adjustments, adjustments, )
[ "def", "update_adjustments", "(", "self", ",", "adjustments", ",", "method", ")", ":", "try", ":", "merge_func", "=", "_merge_methods", "[", "method", "]", "except", "KeyError", ":", "raise", "ValueError", "(", "\"Invalid merge method %s\\n\"", "\"Valid methods are: %s\"", "%", "(", "method", ",", "', '", ".", "join", "(", "_merge_methods", ")", ")", ")", "self", ".", "adjustments", "=", "merge_with", "(", "merge_func", ",", "self", ".", "adjustments", ",", "adjustments", ",", ")" ]
Merge ``adjustments`` with existing adjustments, handling index collisions according to ``method``. Parameters ---------- adjustments : dict[int -> list[Adjustment]] The mapping of row indices to lists of adjustments that should be appended to existing adjustments. method : {'append', 'prepend'} How to handle index collisions. If 'append', new adjustments will be applied after previously-existing adjustments. If 'prepend', new adjustments will be applied before previously-existing adjustments.
[ "Merge", "adjustments", "with", "existing", "adjustments", "handling", "index", "collisions", "according", "to", "method", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/lib/adjusted_array.py#L209-L236
train
quantopian/zipline
zipline/lib/adjusted_array.py
AdjustedArray._iterator_type
def _iterator_type(self): """ The iterator produced when `traverse` is called on this Array. """ if isinstance(self._data, LabelArray): return LabelWindow return CONCRETE_WINDOW_TYPES[self._data.dtype]
python
def _iterator_type(self): """ The iterator produced when `traverse` is called on this Array. """ if isinstance(self._data, LabelArray): return LabelWindow return CONCRETE_WINDOW_TYPES[self._data.dtype]
[ "def", "_iterator_type", "(", "self", ")", ":", "if", "isinstance", "(", "self", ".", "_data", ",", "LabelArray", ")", ":", "return", "LabelWindow", "return", "CONCRETE_WINDOW_TYPES", "[", "self", ".", "_data", ".", "dtype", "]" ]
The iterator produced when `traverse` is called on this Array.
[ "The", "iterator", "produced", "when", "traverse", "is", "called", "on", "this", "Array", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/lib/adjusted_array.py#L253-L259
train
quantopian/zipline
zipline/lib/adjusted_array.py
AdjustedArray.traverse
def traverse(self, window_length, offset=0, perspective_offset=0): """ Produce an iterator rolling windows rows over our data. Each emitted window will have `window_length` rows. Parameters ---------- window_length : int The number of rows in each emitted window. offset : int, optional Number of rows to skip before the first window. Default is 0. perspective_offset : int, optional Number of rows past the end of the current window from which to "view" the underlying data. """ data = self._data.copy() _check_window_params(data, window_length) return self._iterator_type( data, self._view_kwargs, self.adjustments, offset, window_length, perspective_offset, rounding_places=None, )
python
def traverse(self, window_length, offset=0, perspective_offset=0): """ Produce an iterator rolling windows rows over our data. Each emitted window will have `window_length` rows. Parameters ---------- window_length : int The number of rows in each emitted window. offset : int, optional Number of rows to skip before the first window. Default is 0. perspective_offset : int, optional Number of rows past the end of the current window from which to "view" the underlying data. """ data = self._data.copy() _check_window_params(data, window_length) return self._iterator_type( data, self._view_kwargs, self.adjustments, offset, window_length, perspective_offset, rounding_places=None, )
[ "def", "traverse", "(", "self", ",", "window_length", ",", "offset", "=", "0", ",", "perspective_offset", "=", "0", ")", ":", "data", "=", "self", ".", "_data", ".", "copy", "(", ")", "_check_window_params", "(", "data", ",", "window_length", ")", "return", "self", ".", "_iterator_type", "(", "data", ",", "self", ".", "_view_kwargs", ",", "self", ".", "adjustments", ",", "offset", ",", "window_length", ",", "perspective_offset", ",", "rounding_places", "=", "None", ",", ")" ]
Produce an iterator rolling windows rows over our data. Each emitted window will have `window_length` rows. Parameters ---------- window_length : int The number of rows in each emitted window. offset : int, optional Number of rows to skip before the first window. Default is 0. perspective_offset : int, optional Number of rows past the end of the current window from which to "view" the underlying data.
[ "Produce", "an", "iterator", "rolling", "windows", "rows", "over", "our", "data", ".", "Each", "emitted", "window", "will", "have", "window_length", "rows", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/lib/adjusted_array.py#L261-L289
train
quantopian/zipline
zipline/lib/adjusted_array.py
AdjustedArray.inspect
def inspect(self): """ Return a string representation of the data stored in this array. """ return dedent( """\ Adjusted Array ({dtype}): Data: {data!r} Adjustments: {adjustments} """ ).format( dtype=self.dtype.name, data=self.data, adjustments=self.adjustments, )
python
def inspect(self): """ Return a string representation of the data stored in this array. """ return dedent( """\ Adjusted Array ({dtype}): Data: {data!r} Adjustments: {adjustments} """ ).format( dtype=self.dtype.name, data=self.data, adjustments=self.adjustments, )
[ "def", "inspect", "(", "self", ")", ":", "return", "dedent", "(", "\"\"\"\\\n Adjusted Array ({dtype}):\n\n Data:\n {data!r}\n\n Adjustments:\n {adjustments}\n \"\"\"", ")", ".", "format", "(", "dtype", "=", "self", ".", "dtype", ".", "name", ",", "data", "=", "self", ".", "data", ",", "adjustments", "=", "self", ".", "adjustments", ",", ")" ]
Return a string representation of the data stored in this array.
[ "Return", "a", "string", "representation", "of", "the", "data", "stored", "in", "this", "array", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/lib/adjusted_array.py#L291-L309
train
quantopian/zipline
zipline/lib/adjusted_array.py
AdjustedArray.update_labels
def update_labels(self, func): """ Map a function over baseline and adjustment values in place. Note that the baseline data values must be a LabelArray. """ if not isinstance(self.data, LabelArray): raise TypeError( 'update_labels only supported if data is of type LabelArray.' ) # Map the baseline values. self._data = self._data.map(func) # Map each of the adjustments. for _, row_adjustments in iteritems(self.adjustments): for adjustment in row_adjustments: adjustment.value = func(adjustment.value)
python
def update_labels(self, func): """ Map a function over baseline and adjustment values in place. Note that the baseline data values must be a LabelArray. """ if not isinstance(self.data, LabelArray): raise TypeError( 'update_labels only supported if data is of type LabelArray.' ) # Map the baseline values. self._data = self._data.map(func) # Map each of the adjustments. for _, row_adjustments in iteritems(self.adjustments): for adjustment in row_adjustments: adjustment.value = func(adjustment.value)
[ "def", "update_labels", "(", "self", ",", "func", ")", ":", "if", "not", "isinstance", "(", "self", ".", "data", ",", "LabelArray", ")", ":", "raise", "TypeError", "(", "'update_labels only supported if data is of type LabelArray.'", ")", "# Map the baseline values.", "self", ".", "_data", "=", "self", ".", "_data", ".", "map", "(", "func", ")", "# Map each of the adjustments.", "for", "_", ",", "row_adjustments", "in", "iteritems", "(", "self", ".", "adjustments", ")", ":", "for", "adjustment", "in", "row_adjustments", ":", "adjustment", ".", "value", "=", "func", "(", "adjustment", ".", "value", ")" ]
Map a function over baseline and adjustment values in place. Note that the baseline data values must be a LabelArray.
[ "Map", "a", "function", "over", "baseline", "and", "adjustment", "values", "in", "place", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/lib/adjusted_array.py#L311-L328
train
quantopian/zipline
zipline/finance/controls.py
TradingControl.handle_violation
def handle_violation(self, asset, amount, datetime, metadata=None): """ Handle a TradingControlViolation, either by raising or logging and error with information about the failure. If dynamic information should be displayed as well, pass it in via `metadata`. """ constraint = self._constraint_msg(metadata) if self.on_error == 'fail': raise TradingControlViolation( asset=asset, amount=amount, datetime=datetime, constraint=constraint) elif self.on_error == 'log': log.error("Order for {amount} shares of {asset} at {dt} " "violates trading constraint {constraint}", amount=amount, asset=asset, dt=datetime, constraint=constraint)
python
def handle_violation(self, asset, amount, datetime, metadata=None): """ Handle a TradingControlViolation, either by raising or logging and error with information about the failure. If dynamic information should be displayed as well, pass it in via `metadata`. """ constraint = self._constraint_msg(metadata) if self.on_error == 'fail': raise TradingControlViolation( asset=asset, amount=amount, datetime=datetime, constraint=constraint) elif self.on_error == 'log': log.error("Order for {amount} shares of {asset} at {dt} " "violates trading constraint {constraint}", amount=amount, asset=asset, dt=datetime, constraint=constraint)
[ "def", "handle_violation", "(", "self", ",", "asset", ",", "amount", ",", "datetime", ",", "metadata", "=", "None", ")", ":", "constraint", "=", "self", ".", "_constraint_msg", "(", "metadata", ")", "if", "self", ".", "on_error", "==", "'fail'", ":", "raise", "TradingControlViolation", "(", "asset", "=", "asset", ",", "amount", "=", "amount", ",", "datetime", "=", "datetime", ",", "constraint", "=", "constraint", ")", "elif", "self", ".", "on_error", "==", "'log'", ":", "log", ".", "error", "(", "\"Order for {amount} shares of {asset} at {dt} \"", "\"violates trading constraint {constraint}\"", ",", "amount", "=", "amount", ",", "asset", "=", "asset", ",", "dt", "=", "datetime", ",", "constraint", "=", "constraint", ")" ]
Handle a TradingControlViolation, either by raising or logging and error with information about the failure. If dynamic information should be displayed as well, pass it in via `metadata`.
[ "Handle", "a", "TradingControlViolation", "either", "by", "raising", "or", "logging", "and", "error", "with", "information", "about", "the", "failure", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/finance/controls.py#L79-L99
train
quantopian/zipline
zipline/finance/controls.py
MaxOrderCount.validate
def validate(self, asset, amount, portfolio, algo_datetime, algo_current_data): """ Fail if we've already placed self.max_count orders today. """ algo_date = algo_datetime.date() # Reset order count if it's a new day. if self.current_date and self.current_date != algo_date: self.orders_placed = 0 self.current_date = algo_date if self.orders_placed >= self.max_count: self.handle_violation(asset, amount, algo_datetime) self.orders_placed += 1
python
def validate(self, asset, amount, portfolio, algo_datetime, algo_current_data): """ Fail if we've already placed self.max_count orders today. """ algo_date = algo_datetime.date() # Reset order count if it's a new day. if self.current_date and self.current_date != algo_date: self.orders_placed = 0 self.current_date = algo_date if self.orders_placed >= self.max_count: self.handle_violation(asset, amount, algo_datetime) self.orders_placed += 1
[ "def", "validate", "(", "self", ",", "asset", ",", "amount", ",", "portfolio", ",", "algo_datetime", ",", "algo_current_data", ")", ":", "algo_date", "=", "algo_datetime", ".", "date", "(", ")", "# Reset order count if it's a new day.", "if", "self", ".", "current_date", "and", "self", ".", "current_date", "!=", "algo_date", ":", "self", ".", "orders_placed", "=", "0", "self", ".", "current_date", "=", "algo_date", "if", "self", ".", "orders_placed", ">=", "self", ".", "max_count", ":", "self", ".", "handle_violation", "(", "asset", ",", "amount", ",", "algo_datetime", ")", "self", ".", "orders_placed", "+=", "1" ]
Fail if we've already placed self.max_count orders today.
[ "Fail", "if", "we", "ve", "already", "placed", "self", ".", "max_count", "orders", "today", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/finance/controls.py#L119-L137
train
quantopian/zipline
zipline/finance/controls.py
RestrictedListOrder.validate
def validate(self, asset, amount, portfolio, algo_datetime, algo_current_data): """ Fail if the asset is in the restricted_list. """ if self.restrictions.is_restricted(asset, algo_datetime): self.handle_violation(asset, amount, algo_datetime)
python
def validate(self, asset, amount, portfolio, algo_datetime, algo_current_data): """ Fail if the asset is in the restricted_list. """ if self.restrictions.is_restricted(asset, algo_datetime): self.handle_violation(asset, amount, algo_datetime)
[ "def", "validate", "(", "self", ",", "asset", ",", "amount", ",", "portfolio", ",", "algo_datetime", ",", "algo_current_data", ")", ":", "if", "self", ".", "restrictions", ".", "is_restricted", "(", "asset", ",", "algo_datetime", ")", ":", "self", ".", "handle_violation", "(", "asset", ",", "amount", ",", "algo_datetime", ")" ]
Fail if the asset is in the restricted_list.
[ "Fail", "if", "the", "asset", "is", "in", "the", "restricted_list", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/finance/controls.py#L154-L164
train
quantopian/zipline
zipline/finance/controls.py
MaxOrderSize.validate
def validate(self, asset, amount, portfolio, algo_datetime, algo_current_data): """ Fail if the magnitude of the given order exceeds either self.max_shares or self.max_notional. """ if self.asset is not None and self.asset != asset: return if self.max_shares is not None and abs(amount) > self.max_shares: self.handle_violation(asset, amount, algo_datetime) current_asset_price = algo_current_data.current(asset, "price") order_value = amount * current_asset_price too_much_value = (self.max_notional is not None and abs(order_value) > self.max_notional) if too_much_value: self.handle_violation(asset, amount, algo_datetime)
python
def validate(self, asset, amount, portfolio, algo_datetime, algo_current_data): """ Fail if the magnitude of the given order exceeds either self.max_shares or self.max_notional. """ if self.asset is not None and self.asset != asset: return if self.max_shares is not None and abs(amount) > self.max_shares: self.handle_violation(asset, amount, algo_datetime) current_asset_price = algo_current_data.current(asset, "price") order_value = amount * current_asset_price too_much_value = (self.max_notional is not None and abs(order_value) > self.max_notional) if too_much_value: self.handle_violation(asset, amount, algo_datetime)
[ "def", "validate", "(", "self", ",", "asset", ",", "amount", ",", "portfolio", ",", "algo_datetime", ",", "algo_current_data", ")", ":", "if", "self", ".", "asset", "is", "not", "None", "and", "self", ".", "asset", "!=", "asset", ":", "return", "if", "self", ".", "max_shares", "is", "not", "None", "and", "abs", "(", "amount", ")", ">", "self", ".", "max_shares", ":", "self", ".", "handle_violation", "(", "asset", ",", "amount", ",", "algo_datetime", ")", "current_asset_price", "=", "algo_current_data", ".", "current", "(", "asset", ",", "\"price\"", ")", "order_value", "=", "amount", "*", "current_asset_price", "too_much_value", "=", "(", "self", ".", "max_notional", "is", "not", "None", "and", "abs", "(", "order_value", ")", ">", "self", ".", "max_notional", ")", "if", "too_much_value", ":", "self", ".", "handle_violation", "(", "asset", ",", "amount", ",", "algo_datetime", ")" ]
Fail if the magnitude of the given order exceeds either self.max_shares or self.max_notional.
[ "Fail", "if", "the", "magnitude", "of", "the", "given", "order", "exceeds", "either", "self", ".", "max_shares", "or", "self", ".", "max_notional", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/finance/controls.py#L199-L223
train
quantopian/zipline
zipline/finance/controls.py
MaxPositionSize.validate
def validate(self, asset, amount, portfolio, algo_datetime, algo_current_data): """ Fail if the given order would cause the magnitude of our position to be greater in shares than self.max_shares or greater in dollar value than self.max_notional. """ if self.asset is not None and self.asset != asset: return current_share_count = portfolio.positions[asset].amount shares_post_order = current_share_count + amount too_many_shares = (self.max_shares is not None and abs(shares_post_order) > self.max_shares) if too_many_shares: self.handle_violation(asset, amount, algo_datetime) current_price = algo_current_data.current(asset, "price") value_post_order = shares_post_order * current_price too_much_value = (self.max_notional is not None and abs(value_post_order) > self.max_notional) if too_much_value: self.handle_violation(asset, amount, algo_datetime)
python
def validate(self, asset, amount, portfolio, algo_datetime, algo_current_data): """ Fail if the given order would cause the magnitude of our position to be greater in shares than self.max_shares or greater in dollar value than self.max_notional. """ if self.asset is not None and self.asset != asset: return current_share_count = portfolio.positions[asset].amount shares_post_order = current_share_count + amount too_many_shares = (self.max_shares is not None and abs(shares_post_order) > self.max_shares) if too_many_shares: self.handle_violation(asset, amount, algo_datetime) current_price = algo_current_data.current(asset, "price") value_post_order = shares_post_order * current_price too_much_value = (self.max_notional is not None and abs(value_post_order) > self.max_notional) if too_much_value: self.handle_violation(asset, amount, algo_datetime)
[ "def", "validate", "(", "self", ",", "asset", ",", "amount", ",", "portfolio", ",", "algo_datetime", ",", "algo_current_data", ")", ":", "if", "self", ".", "asset", "is", "not", "None", "and", "self", ".", "asset", "!=", "asset", ":", "return", "current_share_count", "=", "portfolio", ".", "positions", "[", "asset", "]", ".", "amount", "shares_post_order", "=", "current_share_count", "+", "amount", "too_many_shares", "=", "(", "self", ".", "max_shares", "is", "not", "None", "and", "abs", "(", "shares_post_order", ")", ">", "self", ".", "max_shares", ")", "if", "too_many_shares", ":", "self", ".", "handle_violation", "(", "asset", ",", "amount", ",", "algo_datetime", ")", "current_price", "=", "algo_current_data", ".", "current", "(", "asset", ",", "\"price\"", ")", "value_post_order", "=", "shares_post_order", "*", "current_price", "too_much_value", "=", "(", "self", ".", "max_notional", "is", "not", "None", "and", "abs", "(", "value_post_order", ")", ">", "self", ".", "max_notional", ")", "if", "too_much_value", ":", "self", ".", "handle_violation", "(", "asset", ",", "amount", ",", "algo_datetime", ")" ]
Fail if the given order would cause the magnitude of our position to be greater in shares than self.max_shares or greater in dollar value than self.max_notional.
[ "Fail", "if", "the", "given", "order", "would", "cause", "the", "magnitude", "of", "our", "position", "to", "be", "greater", "in", "shares", "than", "self", ".", "max_shares", "or", "greater", "in", "dollar", "value", "than", "self", ".", "max_notional", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/finance/controls.py#L257-L287
train
quantopian/zipline
zipline/finance/controls.py
LongOnly.validate
def validate(self, asset, amount, portfolio, algo_datetime, algo_current_data): """ Fail if we would hold negative shares of asset after completing this order. """ if portfolio.positions[asset].amount + amount < 0: self.handle_violation(asset, amount, algo_datetime)
python
def validate(self, asset, amount, portfolio, algo_datetime, algo_current_data): """ Fail if we would hold negative shares of asset after completing this order. """ if portfolio.positions[asset].amount + amount < 0: self.handle_violation(asset, amount, algo_datetime)
[ "def", "validate", "(", "self", ",", "asset", ",", "amount", ",", "portfolio", ",", "algo_datetime", ",", "algo_current_data", ")", ":", "if", "portfolio", ".", "positions", "[", "asset", "]", ".", "amount", "+", "amount", "<", "0", ":", "self", ".", "handle_violation", "(", "asset", ",", "amount", ",", "algo_datetime", ")" ]
Fail if we would hold negative shares of asset after completing this order.
[ "Fail", "if", "we", "would", "hold", "negative", "shares", "of", "asset", "after", "completing", "this", "order", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/finance/controls.py#L298-L309
train
quantopian/zipline
zipline/finance/controls.py
AssetDateBounds.validate
def validate(self, asset, amount, portfolio, algo_datetime, algo_current_data): """ Fail if the algo has passed this Asset's end_date, or before the Asset's start date. """ # If the order is for 0 shares, then silently pass through. if amount == 0: return normalized_algo_dt = pd.Timestamp(algo_datetime).normalize() # Fail if the algo is before this Asset's start_date if asset.start_date: normalized_start = pd.Timestamp(asset.start_date).normalize() if normalized_algo_dt < normalized_start: metadata = { 'asset_start_date': normalized_start } self.handle_violation( asset, amount, algo_datetime, metadata=metadata) # Fail if the algo has passed this Asset's end_date if asset.end_date: normalized_end = pd.Timestamp(asset.end_date).normalize() if normalized_algo_dt > normalized_end: metadata = { 'asset_end_date': normalized_end } self.handle_violation( asset, amount, algo_datetime, metadata=metadata)
python
def validate(self, asset, amount, portfolio, algo_datetime, algo_current_data): """ Fail if the algo has passed this Asset's end_date, or before the Asset's start date. """ # If the order is for 0 shares, then silently pass through. if amount == 0: return normalized_algo_dt = pd.Timestamp(algo_datetime).normalize() # Fail if the algo is before this Asset's start_date if asset.start_date: normalized_start = pd.Timestamp(asset.start_date).normalize() if normalized_algo_dt < normalized_start: metadata = { 'asset_start_date': normalized_start } self.handle_violation( asset, amount, algo_datetime, metadata=metadata) # Fail if the algo has passed this Asset's end_date if asset.end_date: normalized_end = pd.Timestamp(asset.end_date).normalize() if normalized_algo_dt > normalized_end: metadata = { 'asset_end_date': normalized_end } self.handle_violation( asset, amount, algo_datetime, metadata=metadata)
[ "def", "validate", "(", "self", ",", "asset", ",", "amount", ",", "portfolio", ",", "algo_datetime", ",", "algo_current_data", ")", ":", "# If the order is for 0 shares, then silently pass through.", "if", "amount", "==", "0", ":", "return", "normalized_algo_dt", "=", "pd", ".", "Timestamp", "(", "algo_datetime", ")", ".", "normalize", "(", ")", "# Fail if the algo is before this Asset's start_date", "if", "asset", ".", "start_date", ":", "normalized_start", "=", "pd", ".", "Timestamp", "(", "asset", ".", "start_date", ")", ".", "normalize", "(", ")", "if", "normalized_algo_dt", "<", "normalized_start", ":", "metadata", "=", "{", "'asset_start_date'", ":", "normalized_start", "}", "self", ".", "handle_violation", "(", "asset", ",", "amount", ",", "algo_datetime", ",", "metadata", "=", "metadata", ")", "# Fail if the algo has passed this Asset's end_date", "if", "asset", ".", "end_date", ":", "normalized_end", "=", "pd", ".", "Timestamp", "(", "asset", ".", "end_date", ")", ".", "normalize", "(", ")", "if", "normalized_algo_dt", ">", "normalized_end", ":", "metadata", "=", "{", "'asset_end_date'", ":", "normalized_end", "}", "self", ".", "handle_violation", "(", "asset", ",", "amount", ",", "algo_datetime", ",", "metadata", "=", "metadata", ")" ]
Fail if the algo has passed this Asset's end_date, or before the Asset's start date.
[ "Fail", "if", "the", "algo", "has", "passed", "this", "Asset", "s", "end_date", "or", "before", "the", "Asset", "s", "start", "date", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/finance/controls.py#L321-L354
train
quantopian/zipline
zipline/finance/controls.py
MaxLeverage.validate
def validate(self, _portfolio, _account, _algo_datetime, _algo_current_data): """ Fail if the leverage is greater than the allowed leverage. """ if _account.leverage > self.max_leverage: self.fail()
python
def validate(self, _portfolio, _account, _algo_datetime, _algo_current_data): """ Fail if the leverage is greater than the allowed leverage. """ if _account.leverage > self.max_leverage: self.fail()
[ "def", "validate", "(", "self", ",", "_portfolio", ",", "_account", ",", "_algo_datetime", ",", "_algo_current_data", ")", ":", "if", "_account", ".", "leverage", ">", "self", ".", "max_leverage", ":", "self", ".", "fail", "(", ")" ]
Fail if the leverage is greater than the allowed leverage.
[ "Fail", "if", "the", "leverage", "is", "greater", "than", "the", "allowed", "leverage", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/finance/controls.py#L424-L433
train
quantopian/zipline
zipline/finance/controls.py
MinLeverage.validate
def validate(self, _portfolio, account, algo_datetime, _algo_current_data): """ Make validation checks if we are after the deadline. Fail if the leverage is less than the min leverage. """ if (algo_datetime > self.deadline and account.leverage < self.min_leverage): self.fail()
python
def validate(self, _portfolio, account, algo_datetime, _algo_current_data): """ Make validation checks if we are after the deadline. Fail if the leverage is less than the min leverage. """ if (algo_datetime > self.deadline and account.leverage < self.min_leverage): self.fail()
[ "def", "validate", "(", "self", ",", "_portfolio", ",", "account", ",", "algo_datetime", ",", "_algo_current_data", ")", ":", "if", "(", "algo_datetime", ">", "self", ".", "deadline", "and", "account", ".", "leverage", "<", "self", ".", "min_leverage", ")", ":", "self", ".", "fail", "(", ")" ]
Make validation checks if we are after the deadline. Fail if the leverage is less than the min leverage.
[ "Make", "validation", "checks", "if", "we", "are", "after", "the", "deadline", ".", "Fail", "if", "the", "leverage", "is", "less", "than", "the", "min", "leverage", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/finance/controls.py#L463-L474
train
quantopian/zipline
zipline/assets/asset_db_migrations.py
alter_columns
def alter_columns(op, name, *columns, **kwargs): """Alter columns from a table. Parameters ---------- name : str The name of the table. *columns The new columns to have. selection_string : str, optional The string to use in the selection. If not provided, it will select all of the new columns from the old table. Notes ----- The columns are passed explicitly because this should only be used in a downgrade where ``zipline.assets.asset_db_schema`` could change. """ selection_string = kwargs.pop('selection_string', None) if kwargs: raise TypeError( 'alter_columns received extra arguments: %r' % sorted(kwargs), ) if selection_string is None: selection_string = ', '.join(column.name for column in columns) tmp_name = '_alter_columns_' + name op.rename_table(name, tmp_name) for column in columns: # Clear any indices that already exist on this table, otherwise we will # fail to create the table because the indices will already be present. # When we create the table below, the indices that we want to preserve # will just get recreated. for table in name, tmp_name: try: op.drop_index('ix_%s_%s' % (table, column.name)) except sa.exc.OperationalError: pass op.create_table(name, *columns) op.execute( 'insert into %s select %s from %s' % ( name, selection_string, tmp_name, ), ) op.drop_table(tmp_name)
python
def alter_columns(op, name, *columns, **kwargs): """Alter columns from a table. Parameters ---------- name : str The name of the table. *columns The new columns to have. selection_string : str, optional The string to use in the selection. If not provided, it will select all of the new columns from the old table. Notes ----- The columns are passed explicitly because this should only be used in a downgrade where ``zipline.assets.asset_db_schema`` could change. """ selection_string = kwargs.pop('selection_string', None) if kwargs: raise TypeError( 'alter_columns received extra arguments: %r' % sorted(kwargs), ) if selection_string is None: selection_string = ', '.join(column.name for column in columns) tmp_name = '_alter_columns_' + name op.rename_table(name, tmp_name) for column in columns: # Clear any indices that already exist on this table, otherwise we will # fail to create the table because the indices will already be present. # When we create the table below, the indices that we want to preserve # will just get recreated. for table in name, tmp_name: try: op.drop_index('ix_%s_%s' % (table, column.name)) except sa.exc.OperationalError: pass op.create_table(name, *columns) op.execute( 'insert into %s select %s from %s' % ( name, selection_string, tmp_name, ), ) op.drop_table(tmp_name)
[ "def", "alter_columns", "(", "op", ",", "name", ",", "*", "columns", ",", "*", "*", "kwargs", ")", ":", "selection_string", "=", "kwargs", ".", "pop", "(", "'selection_string'", ",", "None", ")", "if", "kwargs", ":", "raise", "TypeError", "(", "'alter_columns received extra arguments: %r'", "%", "sorted", "(", "kwargs", ")", ",", ")", "if", "selection_string", "is", "None", ":", "selection_string", "=", "', '", ".", "join", "(", "column", ".", "name", "for", "column", "in", "columns", ")", "tmp_name", "=", "'_alter_columns_'", "+", "name", "op", ".", "rename_table", "(", "name", ",", "tmp_name", ")", "for", "column", "in", "columns", ":", "# Clear any indices that already exist on this table, otherwise we will", "# fail to create the table because the indices will already be present.", "# When we create the table below, the indices that we want to preserve", "# will just get recreated.", "for", "table", "in", "name", ",", "tmp_name", ":", "try", ":", "op", ".", "drop_index", "(", "'ix_%s_%s'", "%", "(", "table", ",", "column", ".", "name", ")", ")", "except", "sa", ".", "exc", ".", "OperationalError", ":", "pass", "op", ".", "create_table", "(", "name", ",", "*", "columns", ")", "op", ".", "execute", "(", "'insert into %s select %s from %s'", "%", "(", "name", ",", "selection_string", ",", "tmp_name", ",", ")", ",", ")", "op", ".", "drop_table", "(", "tmp_name", ")" ]
Alter columns from a table. Parameters ---------- name : str The name of the table. *columns The new columns to have. selection_string : str, optional The string to use in the selection. If not provided, it will select all of the new columns from the old table. Notes ----- The columns are passed explicitly because this should only be used in a downgrade where ``zipline.assets.asset_db_schema`` could change.
[ "Alter", "columns", "from", "a", "table", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/assets/asset_db_migrations.py#L13-L61
train
quantopian/zipline
zipline/assets/asset_db_migrations.py
downgrade
def downgrade(engine, desired_version): """Downgrades the assets db at the given engine to the desired version. Parameters ---------- engine : Engine An SQLAlchemy engine to the assets database. desired_version : int The desired resulting version for the assets database. """ # Check the version of the db at the engine with engine.begin() as conn: metadata = sa.MetaData(conn) metadata.reflect() version_info_table = metadata.tables['version_info'] starting_version = sa.select((version_info_table.c.version,)).scalar() # Check for accidental upgrade if starting_version < desired_version: raise AssetDBImpossibleDowngrade(db_version=starting_version, desired_version=desired_version) # Check if the desired version is already the db version if starting_version == desired_version: # No downgrade needed return # Create alembic context ctx = MigrationContext.configure(conn) op = Operations(ctx) # Integer keys of downgrades to run # E.g.: [5, 4, 3, 2] would downgrade v6 to v2 downgrade_keys = range(desired_version, starting_version)[::-1] # Disable foreign keys until all downgrades are complete _pragma_foreign_keys(conn, False) # Execute the downgrades in order for downgrade_key in downgrade_keys: _downgrade_methods[downgrade_key](op, conn, version_info_table) # Re-enable foreign keys _pragma_foreign_keys(conn, True)
python
def downgrade(engine, desired_version): """Downgrades the assets db at the given engine to the desired version. Parameters ---------- engine : Engine An SQLAlchemy engine to the assets database. desired_version : int The desired resulting version for the assets database. """ # Check the version of the db at the engine with engine.begin() as conn: metadata = sa.MetaData(conn) metadata.reflect() version_info_table = metadata.tables['version_info'] starting_version = sa.select((version_info_table.c.version,)).scalar() # Check for accidental upgrade if starting_version < desired_version: raise AssetDBImpossibleDowngrade(db_version=starting_version, desired_version=desired_version) # Check if the desired version is already the db version if starting_version == desired_version: # No downgrade needed return # Create alembic context ctx = MigrationContext.configure(conn) op = Operations(ctx) # Integer keys of downgrades to run # E.g.: [5, 4, 3, 2] would downgrade v6 to v2 downgrade_keys = range(desired_version, starting_version)[::-1] # Disable foreign keys until all downgrades are complete _pragma_foreign_keys(conn, False) # Execute the downgrades in order for downgrade_key in downgrade_keys: _downgrade_methods[downgrade_key](op, conn, version_info_table) # Re-enable foreign keys _pragma_foreign_keys(conn, True)
[ "def", "downgrade", "(", "engine", ",", "desired_version", ")", ":", "# Check the version of the db at the engine", "with", "engine", ".", "begin", "(", ")", "as", "conn", ":", "metadata", "=", "sa", ".", "MetaData", "(", "conn", ")", "metadata", ".", "reflect", "(", ")", "version_info_table", "=", "metadata", ".", "tables", "[", "'version_info'", "]", "starting_version", "=", "sa", ".", "select", "(", "(", "version_info_table", ".", "c", ".", "version", ",", ")", ")", ".", "scalar", "(", ")", "# Check for accidental upgrade", "if", "starting_version", "<", "desired_version", ":", "raise", "AssetDBImpossibleDowngrade", "(", "db_version", "=", "starting_version", ",", "desired_version", "=", "desired_version", ")", "# Check if the desired version is already the db version", "if", "starting_version", "==", "desired_version", ":", "# No downgrade needed", "return", "# Create alembic context", "ctx", "=", "MigrationContext", ".", "configure", "(", "conn", ")", "op", "=", "Operations", "(", "ctx", ")", "# Integer keys of downgrades to run", "# E.g.: [5, 4, 3, 2] would downgrade v6 to v2", "downgrade_keys", "=", "range", "(", "desired_version", ",", "starting_version", ")", "[", ":", ":", "-", "1", "]", "# Disable foreign keys until all downgrades are complete", "_pragma_foreign_keys", "(", "conn", ",", "False", ")", "# Execute the downgrades in order", "for", "downgrade_key", "in", "downgrade_keys", ":", "_downgrade_methods", "[", "downgrade_key", "]", "(", "op", ",", "conn", ",", "version_info_table", ")", "# Re-enable foreign keys", "_pragma_foreign_keys", "(", "conn", ",", "True", ")" ]
Downgrades the assets db at the given engine to the desired version. Parameters ---------- engine : Engine An SQLAlchemy engine to the assets database. desired_version : int The desired resulting version for the assets database.
[ "Downgrades", "the", "assets", "db", "at", "the", "given", "engine", "to", "the", "desired", "version", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/assets/asset_db_migrations.py#L65-L109
train
quantopian/zipline
zipline/assets/asset_db_migrations.py
downgrades
def downgrades(src): """Decorator for marking that a method is a downgrade to a version to the previous version. Parameters ---------- src : int The version this downgrades from. Returns ------- decorator : callable[(callable) -> callable] The decorator to apply. """ def _(f): destination = src - 1 @do(operator.setitem(_downgrade_methods, destination)) @wraps(f) def wrapper(op, conn, version_info_table): conn.execute(version_info_table.delete()) # clear the version f(op) write_version_info(conn, version_info_table, destination) return wrapper return _
python
def downgrades(src): """Decorator for marking that a method is a downgrade to a version to the previous version. Parameters ---------- src : int The version this downgrades from. Returns ------- decorator : callable[(callable) -> callable] The decorator to apply. """ def _(f): destination = src - 1 @do(operator.setitem(_downgrade_methods, destination)) @wraps(f) def wrapper(op, conn, version_info_table): conn.execute(version_info_table.delete()) # clear the version f(op) write_version_info(conn, version_info_table, destination) return wrapper return _
[ "def", "downgrades", "(", "src", ")", ":", "def", "_", "(", "f", ")", ":", "destination", "=", "src", "-", "1", "@", "do", "(", "operator", ".", "setitem", "(", "_downgrade_methods", ",", "destination", ")", ")", "@", "wraps", "(", "f", ")", "def", "wrapper", "(", "op", ",", "conn", ",", "version_info_table", ")", ":", "conn", ".", "execute", "(", "version_info_table", ".", "delete", "(", ")", ")", "# clear the version", "f", "(", "op", ")", "write_version_info", "(", "conn", ",", "version_info_table", ",", "destination", ")", "return", "wrapper", "return", "_" ]
Decorator for marking that a method is a downgrade to a version to the previous version. Parameters ---------- src : int The version this downgrades from. Returns ------- decorator : callable[(callable) -> callable] The decorator to apply.
[ "Decorator", "for", "marking", "that", "a", "method", "is", "a", "downgrade", "to", "a", "version", "to", "the", "previous", "version", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/assets/asset_db_migrations.py#L133-L158
train
quantopian/zipline
zipline/assets/asset_db_migrations.py
_downgrade_v1
def _downgrade_v1(op): """ Downgrade assets db by removing the 'tick_size' column and renaming the 'multiplier' column. """ # Drop indices before batch # This is to prevent index collision when creating the temp table op.drop_index('ix_futures_contracts_root_symbol') op.drop_index('ix_futures_contracts_symbol') # Execute batch op to allow column modification in SQLite with op.batch_alter_table('futures_contracts') as batch_op: # Rename 'multiplier' batch_op.alter_column(column_name='multiplier', new_column_name='contract_multiplier') # Delete 'tick_size' batch_op.drop_column('tick_size') # Recreate indices after batch op.create_index('ix_futures_contracts_root_symbol', table_name='futures_contracts', columns=['root_symbol']) op.create_index('ix_futures_contracts_symbol', table_name='futures_contracts', columns=['symbol'], unique=True)
python
def _downgrade_v1(op): """ Downgrade assets db by removing the 'tick_size' column and renaming the 'multiplier' column. """ # Drop indices before batch # This is to prevent index collision when creating the temp table op.drop_index('ix_futures_contracts_root_symbol') op.drop_index('ix_futures_contracts_symbol') # Execute batch op to allow column modification in SQLite with op.batch_alter_table('futures_contracts') as batch_op: # Rename 'multiplier' batch_op.alter_column(column_name='multiplier', new_column_name='contract_multiplier') # Delete 'tick_size' batch_op.drop_column('tick_size') # Recreate indices after batch op.create_index('ix_futures_contracts_root_symbol', table_name='futures_contracts', columns=['root_symbol']) op.create_index('ix_futures_contracts_symbol', table_name='futures_contracts', columns=['symbol'], unique=True)
[ "def", "_downgrade_v1", "(", "op", ")", ":", "# Drop indices before batch", "# This is to prevent index collision when creating the temp table", "op", ".", "drop_index", "(", "'ix_futures_contracts_root_symbol'", ")", "op", ".", "drop_index", "(", "'ix_futures_contracts_symbol'", ")", "# Execute batch op to allow column modification in SQLite", "with", "op", ".", "batch_alter_table", "(", "'futures_contracts'", ")", "as", "batch_op", ":", "# Rename 'multiplier'", "batch_op", ".", "alter_column", "(", "column_name", "=", "'multiplier'", ",", "new_column_name", "=", "'contract_multiplier'", ")", "# Delete 'tick_size'", "batch_op", ".", "drop_column", "(", "'tick_size'", ")", "# Recreate indices after batch", "op", ".", "create_index", "(", "'ix_futures_contracts_root_symbol'", ",", "table_name", "=", "'futures_contracts'", ",", "columns", "=", "[", "'root_symbol'", "]", ")", "op", ".", "create_index", "(", "'ix_futures_contracts_symbol'", ",", "table_name", "=", "'futures_contracts'", ",", "columns", "=", "[", "'symbol'", "]", ",", "unique", "=", "True", ")" ]
Downgrade assets db by removing the 'tick_size' column and renaming the 'multiplier' column.
[ "Downgrade", "assets", "db", "by", "removing", "the", "tick_size", "column", "and", "renaming", "the", "multiplier", "column", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/assets/asset_db_migrations.py#L162-L189
train
quantopian/zipline
zipline/assets/asset_db_migrations.py
_downgrade_v2
def _downgrade_v2(op): """ Downgrade assets db by removing the 'auto_close_date' column. """ # Drop indices before batch # This is to prevent index collision when creating the temp table op.drop_index('ix_equities_fuzzy_symbol') op.drop_index('ix_equities_company_symbol') # Execute batch op to allow column modification in SQLite with op.batch_alter_table('equities') as batch_op: batch_op.drop_column('auto_close_date') # Recreate indices after batch op.create_index('ix_equities_fuzzy_symbol', table_name='equities', columns=['fuzzy_symbol']) op.create_index('ix_equities_company_symbol', table_name='equities', columns=['company_symbol'])
python
def _downgrade_v2(op): """ Downgrade assets db by removing the 'auto_close_date' column. """ # Drop indices before batch # This is to prevent index collision when creating the temp table op.drop_index('ix_equities_fuzzy_symbol') op.drop_index('ix_equities_company_symbol') # Execute batch op to allow column modification in SQLite with op.batch_alter_table('equities') as batch_op: batch_op.drop_column('auto_close_date') # Recreate indices after batch op.create_index('ix_equities_fuzzy_symbol', table_name='equities', columns=['fuzzy_symbol']) op.create_index('ix_equities_company_symbol', table_name='equities', columns=['company_symbol'])
[ "def", "_downgrade_v2", "(", "op", ")", ":", "# Drop indices before batch", "# This is to prevent index collision when creating the temp table", "op", ".", "drop_index", "(", "'ix_equities_fuzzy_symbol'", ")", "op", ".", "drop_index", "(", "'ix_equities_company_symbol'", ")", "# Execute batch op to allow column modification in SQLite", "with", "op", ".", "batch_alter_table", "(", "'equities'", ")", "as", "batch_op", ":", "batch_op", ".", "drop_column", "(", "'auto_close_date'", ")", "# Recreate indices after batch", "op", ".", "create_index", "(", "'ix_equities_fuzzy_symbol'", ",", "table_name", "=", "'equities'", ",", "columns", "=", "[", "'fuzzy_symbol'", "]", ")", "op", ".", "create_index", "(", "'ix_equities_company_symbol'", ",", "table_name", "=", "'equities'", ",", "columns", "=", "[", "'company_symbol'", "]", ")" ]
Downgrade assets db by removing the 'auto_close_date' column.
[ "Downgrade", "assets", "db", "by", "removing", "the", "auto_close_date", "column", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/assets/asset_db_migrations.py#L193-L212
train
quantopian/zipline
zipline/assets/asset_db_migrations.py
_downgrade_v3
def _downgrade_v3(op): """ Downgrade assets db by adding a not null constraint on ``equities.first_traded`` """ op.create_table( '_new_equities', sa.Column( 'sid', sa.Integer, unique=True, nullable=False, primary_key=True, ), sa.Column('symbol', sa.Text), sa.Column('company_symbol', sa.Text), sa.Column('share_class_symbol', sa.Text), sa.Column('fuzzy_symbol', sa.Text), sa.Column('asset_name', sa.Text), sa.Column('start_date', sa.Integer, default=0, nullable=False), sa.Column('end_date', sa.Integer, nullable=False), sa.Column('first_traded', sa.Integer, nullable=False), sa.Column('auto_close_date', sa.Integer), sa.Column('exchange', sa.Text), ) op.execute( """ insert into _new_equities select * from equities where equities.first_traded is not null """, ) op.drop_table('equities') op.rename_table('_new_equities', 'equities') # we need to make sure the indices have the proper names after the rename op.create_index( 'ix_equities_company_symbol', 'equities', ['company_symbol'], ) op.create_index( 'ix_equities_fuzzy_symbol', 'equities', ['fuzzy_symbol'], )
python
def _downgrade_v3(op): """ Downgrade assets db by adding a not null constraint on ``equities.first_traded`` """ op.create_table( '_new_equities', sa.Column( 'sid', sa.Integer, unique=True, nullable=False, primary_key=True, ), sa.Column('symbol', sa.Text), sa.Column('company_symbol', sa.Text), sa.Column('share_class_symbol', sa.Text), sa.Column('fuzzy_symbol', sa.Text), sa.Column('asset_name', sa.Text), sa.Column('start_date', sa.Integer, default=0, nullable=False), sa.Column('end_date', sa.Integer, nullable=False), sa.Column('first_traded', sa.Integer, nullable=False), sa.Column('auto_close_date', sa.Integer), sa.Column('exchange', sa.Text), ) op.execute( """ insert into _new_equities select * from equities where equities.first_traded is not null """, ) op.drop_table('equities') op.rename_table('_new_equities', 'equities') # we need to make sure the indices have the proper names after the rename op.create_index( 'ix_equities_company_symbol', 'equities', ['company_symbol'], ) op.create_index( 'ix_equities_fuzzy_symbol', 'equities', ['fuzzy_symbol'], )
[ "def", "_downgrade_v3", "(", "op", ")", ":", "op", ".", "create_table", "(", "'_new_equities'", ",", "sa", ".", "Column", "(", "'sid'", ",", "sa", ".", "Integer", ",", "unique", "=", "True", ",", "nullable", "=", "False", ",", "primary_key", "=", "True", ",", ")", ",", "sa", ".", "Column", "(", "'symbol'", ",", "sa", ".", "Text", ")", ",", "sa", ".", "Column", "(", "'company_symbol'", ",", "sa", ".", "Text", ")", ",", "sa", ".", "Column", "(", "'share_class_symbol'", ",", "sa", ".", "Text", ")", ",", "sa", ".", "Column", "(", "'fuzzy_symbol'", ",", "sa", ".", "Text", ")", ",", "sa", ".", "Column", "(", "'asset_name'", ",", "sa", ".", "Text", ")", ",", "sa", ".", "Column", "(", "'start_date'", ",", "sa", ".", "Integer", ",", "default", "=", "0", ",", "nullable", "=", "False", ")", ",", "sa", ".", "Column", "(", "'end_date'", ",", "sa", ".", "Integer", ",", "nullable", "=", "False", ")", ",", "sa", ".", "Column", "(", "'first_traded'", ",", "sa", ".", "Integer", ",", "nullable", "=", "False", ")", ",", "sa", ".", "Column", "(", "'auto_close_date'", ",", "sa", ".", "Integer", ")", ",", "sa", ".", "Column", "(", "'exchange'", ",", "sa", ".", "Text", ")", ",", ")", "op", ".", "execute", "(", "\"\"\"\n insert into _new_equities\n select * from equities\n where equities.first_traded is not null\n \"\"\"", ",", ")", "op", ".", "drop_table", "(", "'equities'", ")", "op", ".", "rename_table", "(", "'_new_equities'", ",", "'equities'", ")", "# we need to make sure the indices have the proper names after the rename", "op", ".", "create_index", "(", "'ix_equities_company_symbol'", ",", "'equities'", ",", "[", "'company_symbol'", "]", ",", ")", "op", ".", "create_index", "(", "'ix_equities_fuzzy_symbol'", ",", "'equities'", ",", "[", "'fuzzy_symbol'", "]", ",", ")" ]
Downgrade assets db by adding a not null constraint on ``equities.first_traded``
[ "Downgrade", "assets", "db", "by", "adding", "a", "not", "null", "constraint", "on", "equities", ".", "first_traded" ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/assets/asset_db_migrations.py#L216-L260
train
quantopian/zipline
zipline/assets/asset_db_migrations.py
_downgrade_v4
def _downgrade_v4(op): """ Downgrades assets db by copying the `exchange_full` column to `exchange`, then dropping the `exchange_full` column. """ op.drop_index('ix_equities_fuzzy_symbol') op.drop_index('ix_equities_company_symbol') op.execute("UPDATE equities SET exchange = exchange_full") with op.batch_alter_table('equities') as batch_op: batch_op.drop_column('exchange_full') op.create_index('ix_equities_fuzzy_symbol', table_name='equities', columns=['fuzzy_symbol']) op.create_index('ix_equities_company_symbol', table_name='equities', columns=['company_symbol'])
python
def _downgrade_v4(op): """ Downgrades assets db by copying the `exchange_full` column to `exchange`, then dropping the `exchange_full` column. """ op.drop_index('ix_equities_fuzzy_symbol') op.drop_index('ix_equities_company_symbol') op.execute("UPDATE equities SET exchange = exchange_full") with op.batch_alter_table('equities') as batch_op: batch_op.drop_column('exchange_full') op.create_index('ix_equities_fuzzy_symbol', table_name='equities', columns=['fuzzy_symbol']) op.create_index('ix_equities_company_symbol', table_name='equities', columns=['company_symbol'])
[ "def", "_downgrade_v4", "(", "op", ")", ":", "op", ".", "drop_index", "(", "'ix_equities_fuzzy_symbol'", ")", "op", ".", "drop_index", "(", "'ix_equities_company_symbol'", ")", "op", ".", "execute", "(", "\"UPDATE equities SET exchange = exchange_full\"", ")", "with", "op", ".", "batch_alter_table", "(", "'equities'", ")", "as", "batch_op", ":", "batch_op", ".", "drop_column", "(", "'exchange_full'", ")", "op", ".", "create_index", "(", "'ix_equities_fuzzy_symbol'", ",", "table_name", "=", "'equities'", ",", "columns", "=", "[", "'fuzzy_symbol'", "]", ")", "op", ".", "create_index", "(", "'ix_equities_company_symbol'", ",", "table_name", "=", "'equities'", ",", "columns", "=", "[", "'company_symbol'", "]", ")" ]
Downgrades assets db by copying the `exchange_full` column to `exchange`, then dropping the `exchange_full` column.
[ "Downgrades", "assets", "db", "by", "copying", "the", "exchange_full", "column", "to", "exchange", "then", "dropping", "the", "exchange_full", "column", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/assets/asset_db_migrations.py#L264-L282
train
quantopian/zipline
zipline/finance/metrics/core.py
_make_metrics_set_core
def _make_metrics_set_core(): """Create a family of metrics sets functions that read from the same metrics set mapping. Returns ------- metrics_sets : mappingproxy The mapping of metrics sets to load functions. register : callable The function which registers new metrics sets in the ``metrics_sets`` mapping. unregister : callable The function which deregisters metrics sets from the ``metrics_sets`` mapping. load : callable The function which loads the ingested metrics sets back into memory. """ _metrics_sets = {} # Expose _metrics_sets through a proxy so that users cannot mutate this # accidentally. Users may go through `register` to update this which will # warn when trampling another metrics set. metrics_sets = mappingproxy(_metrics_sets) def register(name, function=None): """Register a new metrics set. Parameters ---------- name : str The name of the metrics set function : callable The callable which produces the metrics set. Notes ----- This may be used as a decorator if only ``name`` is passed. See Also -------- zipline.finance.metrics.get_metrics_set zipline.finance.metrics.unregister_metrics_set """ if function is None: # allow as decorator with just name. return partial(register, name) if name in _metrics_sets: raise ValueError('metrics set %r is already registered' % name) _metrics_sets[name] = function return function def unregister(name): """Unregister an existing metrics set. Parameters ---------- name : str The name of the metrics set See Also -------- zipline.finance.metrics.register_metrics_set """ try: del _metrics_sets[name] except KeyError: raise ValueError( 'metrics set %r was not already registered' % name, ) def load(name): """Return an instance of the metrics set registered with the given name. Returns ------- metrics : set[Metric] A new instance of the metrics set. Raises ------ ValueError Raised when no metrics set is registered to ``name`` """ try: function = _metrics_sets[name] except KeyError: raise ValueError( 'no metrics set registered as %r, options are: %r' % ( name, sorted(_metrics_sets), ), ) return function() return metrics_sets, register, unregister, load
python
def _make_metrics_set_core(): """Create a family of metrics sets functions that read from the same metrics set mapping. Returns ------- metrics_sets : mappingproxy The mapping of metrics sets to load functions. register : callable The function which registers new metrics sets in the ``metrics_sets`` mapping. unregister : callable The function which deregisters metrics sets from the ``metrics_sets`` mapping. load : callable The function which loads the ingested metrics sets back into memory. """ _metrics_sets = {} # Expose _metrics_sets through a proxy so that users cannot mutate this # accidentally. Users may go through `register` to update this which will # warn when trampling another metrics set. metrics_sets = mappingproxy(_metrics_sets) def register(name, function=None): """Register a new metrics set. Parameters ---------- name : str The name of the metrics set function : callable The callable which produces the metrics set. Notes ----- This may be used as a decorator if only ``name`` is passed. See Also -------- zipline.finance.metrics.get_metrics_set zipline.finance.metrics.unregister_metrics_set """ if function is None: # allow as decorator with just name. return partial(register, name) if name in _metrics_sets: raise ValueError('metrics set %r is already registered' % name) _metrics_sets[name] = function return function def unregister(name): """Unregister an existing metrics set. Parameters ---------- name : str The name of the metrics set See Also -------- zipline.finance.metrics.register_metrics_set """ try: del _metrics_sets[name] except KeyError: raise ValueError( 'metrics set %r was not already registered' % name, ) def load(name): """Return an instance of the metrics set registered with the given name. Returns ------- metrics : set[Metric] A new instance of the metrics set. Raises ------ ValueError Raised when no metrics set is registered to ``name`` """ try: function = _metrics_sets[name] except KeyError: raise ValueError( 'no metrics set registered as %r, options are: %r' % ( name, sorted(_metrics_sets), ), ) return function() return metrics_sets, register, unregister, load
[ "def", "_make_metrics_set_core", "(", ")", ":", "_metrics_sets", "=", "{", "}", "# Expose _metrics_sets through a proxy so that users cannot mutate this", "# accidentally. Users may go through `register` to update this which will", "# warn when trampling another metrics set.", "metrics_sets", "=", "mappingproxy", "(", "_metrics_sets", ")", "def", "register", "(", "name", ",", "function", "=", "None", ")", ":", "\"\"\"Register a new metrics set.\n\n Parameters\n ----------\n name : str\n The name of the metrics set\n function : callable\n The callable which produces the metrics set.\n\n Notes\n -----\n This may be used as a decorator if only ``name`` is passed.\n\n See Also\n --------\n zipline.finance.metrics.get_metrics_set\n zipline.finance.metrics.unregister_metrics_set\n \"\"\"", "if", "function", "is", "None", ":", "# allow as decorator with just name.", "return", "partial", "(", "register", ",", "name", ")", "if", "name", "in", "_metrics_sets", ":", "raise", "ValueError", "(", "'metrics set %r is already registered'", "%", "name", ")", "_metrics_sets", "[", "name", "]", "=", "function", "return", "function", "def", "unregister", "(", "name", ")", ":", "\"\"\"Unregister an existing metrics set.\n\n Parameters\n ----------\n name : str\n The name of the metrics set\n\n See Also\n --------\n zipline.finance.metrics.register_metrics_set\n \"\"\"", "try", ":", "del", "_metrics_sets", "[", "name", "]", "except", "KeyError", ":", "raise", "ValueError", "(", "'metrics set %r was not already registered'", "%", "name", ",", ")", "def", "load", "(", "name", ")", ":", "\"\"\"Return an instance of the metrics set registered with the given name.\n\n Returns\n -------\n metrics : set[Metric]\n A new instance of the metrics set.\n\n Raises\n ------\n ValueError\n Raised when no metrics set is registered to ``name``\n \"\"\"", "try", ":", "function", "=", "_metrics_sets", "[", "name", "]", "except", "KeyError", ":", "raise", "ValueError", "(", "'no metrics set registered as %r, options are: %r'", "%", "(", "name", ",", "sorted", "(", "_metrics_sets", ")", ",", ")", ",", ")", "return", "function", "(", ")", "return", "metrics_sets", ",", "register", ",", "unregister", ",", "load" ]
Create a family of metrics sets functions that read from the same metrics set mapping. Returns ------- metrics_sets : mappingproxy The mapping of metrics sets to load functions. register : callable The function which registers new metrics sets in the ``metrics_sets`` mapping. unregister : callable The function which deregisters metrics sets from the ``metrics_sets`` mapping. load : callable The function which loads the ingested metrics sets back into memory.
[ "Create", "a", "family", "of", "metrics", "sets", "functions", "that", "read", "from", "the", "same", "metrics", "set", "mapping", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/finance/metrics/core.py#L6-L103
train
quantopian/zipline
zipline/pipeline/loaders/earnings_estimates.py
validate_column_specs
def validate_column_specs(events, columns): """ Verify that the columns of ``events`` can be used by a EarningsEstimatesLoader to serve the BoundColumns described by `columns`. """ required = required_estimates_fields(columns) received = set(events.columns) missing = required - received if missing: raise ValueError( "EarningsEstimatesLoader missing required columns {missing}.\n" "Got Columns: {received}\n" "Expected Columns: {required}".format( missing=sorted(missing), received=sorted(received), required=sorted(required), ) )
python
def validate_column_specs(events, columns): """ Verify that the columns of ``events`` can be used by a EarningsEstimatesLoader to serve the BoundColumns described by `columns`. """ required = required_estimates_fields(columns) received = set(events.columns) missing = required - received if missing: raise ValueError( "EarningsEstimatesLoader missing required columns {missing}.\n" "Got Columns: {received}\n" "Expected Columns: {required}".format( missing=sorted(missing), received=sorted(received), required=sorted(required), ) )
[ "def", "validate_column_specs", "(", "events", ",", "columns", ")", ":", "required", "=", "required_estimates_fields", "(", "columns", ")", "received", "=", "set", "(", "events", ".", "columns", ")", "missing", "=", "required", "-", "received", "if", "missing", ":", "raise", "ValueError", "(", "\"EarningsEstimatesLoader missing required columns {missing}.\\n\"", "\"Got Columns: {received}\\n\"", "\"Expected Columns: {required}\"", ".", "format", "(", "missing", "=", "sorted", "(", "missing", ")", ",", "received", "=", "sorted", "(", "received", ")", ",", "required", "=", "sorted", "(", "required", ")", ",", ")", ")" ]
Verify that the columns of ``events`` can be used by a EarningsEstimatesLoader to serve the BoundColumns described by `columns`.
[ "Verify", "that", "the", "columns", "of", "events", "can", "be", "used", "by", "a", "EarningsEstimatesLoader", "to", "serve", "the", "BoundColumns", "described", "by", "columns", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/loaders/earnings_estimates.py#L74-L92
train
quantopian/zipline
zipline/pipeline/loaders/earnings_estimates.py
EarningsEstimatesLoader.get_requested_quarter_data
def get_requested_quarter_data(self, zero_qtr_data, zeroth_quarter_idx, stacked_last_per_qtr, num_announcements, dates): """ Selects the requested data for each date. Parameters ---------- zero_qtr_data : pd.DataFrame The 'time zero' data for each calendar date per sid. zeroth_quarter_idx : pd.Index An index of calendar dates, sid, and normalized quarters, for only the rows that have a next or previous earnings estimate. stacked_last_per_qtr : pd.DataFrame The latest estimate known with the dates, normalized quarter, and sid as the index. num_announcements : int The number of annoucements out the user requested relative to each date in the calendar dates. dates : pd.DatetimeIndex The calendar dates for which estimates data is requested. Returns -------- requested_qtr_data : pd.DataFrame The DataFrame with the latest values for the requested quarter for all columns; `dates` are the index and columns are a MultiIndex with sids at the top level and the dataset columns on the bottom. """ zero_qtr_data_idx = zero_qtr_data.index requested_qtr_idx = pd.MultiIndex.from_arrays( [ zero_qtr_data_idx.get_level_values(0), zero_qtr_data_idx.get_level_values(1), self.get_shifted_qtrs( zeroth_quarter_idx.get_level_values( NORMALIZED_QUARTERS, ), num_announcements, ), ], names=[ zero_qtr_data_idx.names[0], zero_qtr_data_idx.names[1], SHIFTED_NORMALIZED_QTRS, ], ) requested_qtr_data = stacked_last_per_qtr.loc[requested_qtr_idx] requested_qtr_data = requested_qtr_data.reset_index( SHIFTED_NORMALIZED_QTRS, ) # Calculate the actual year/quarter being requested and add those in # as columns. (requested_qtr_data[FISCAL_YEAR_FIELD_NAME], requested_qtr_data[FISCAL_QUARTER_FIELD_NAME]) = \ split_normalized_quarters( requested_qtr_data[SHIFTED_NORMALIZED_QTRS] ) # Once we're left with just dates as the index, we can reindex by all # dates so that we have a value for each calendar date. return requested_qtr_data.unstack(SID_FIELD_NAME).reindex(dates)
python
def get_requested_quarter_data(self, zero_qtr_data, zeroth_quarter_idx, stacked_last_per_qtr, num_announcements, dates): """ Selects the requested data for each date. Parameters ---------- zero_qtr_data : pd.DataFrame The 'time zero' data for each calendar date per sid. zeroth_quarter_idx : pd.Index An index of calendar dates, sid, and normalized quarters, for only the rows that have a next or previous earnings estimate. stacked_last_per_qtr : pd.DataFrame The latest estimate known with the dates, normalized quarter, and sid as the index. num_announcements : int The number of annoucements out the user requested relative to each date in the calendar dates. dates : pd.DatetimeIndex The calendar dates for which estimates data is requested. Returns -------- requested_qtr_data : pd.DataFrame The DataFrame with the latest values for the requested quarter for all columns; `dates` are the index and columns are a MultiIndex with sids at the top level and the dataset columns on the bottom. """ zero_qtr_data_idx = zero_qtr_data.index requested_qtr_idx = pd.MultiIndex.from_arrays( [ zero_qtr_data_idx.get_level_values(0), zero_qtr_data_idx.get_level_values(1), self.get_shifted_qtrs( zeroth_quarter_idx.get_level_values( NORMALIZED_QUARTERS, ), num_announcements, ), ], names=[ zero_qtr_data_idx.names[0], zero_qtr_data_idx.names[1], SHIFTED_NORMALIZED_QTRS, ], ) requested_qtr_data = stacked_last_per_qtr.loc[requested_qtr_idx] requested_qtr_data = requested_qtr_data.reset_index( SHIFTED_NORMALIZED_QTRS, ) # Calculate the actual year/quarter being requested and add those in # as columns. (requested_qtr_data[FISCAL_YEAR_FIELD_NAME], requested_qtr_data[FISCAL_QUARTER_FIELD_NAME]) = \ split_normalized_quarters( requested_qtr_data[SHIFTED_NORMALIZED_QTRS] ) # Once we're left with just dates as the index, we can reindex by all # dates so that we have a value for each calendar date. return requested_qtr_data.unstack(SID_FIELD_NAME).reindex(dates)
[ "def", "get_requested_quarter_data", "(", "self", ",", "zero_qtr_data", ",", "zeroth_quarter_idx", ",", "stacked_last_per_qtr", ",", "num_announcements", ",", "dates", ")", ":", "zero_qtr_data_idx", "=", "zero_qtr_data", ".", "index", "requested_qtr_idx", "=", "pd", ".", "MultiIndex", ".", "from_arrays", "(", "[", "zero_qtr_data_idx", ".", "get_level_values", "(", "0", ")", ",", "zero_qtr_data_idx", ".", "get_level_values", "(", "1", ")", ",", "self", ".", "get_shifted_qtrs", "(", "zeroth_quarter_idx", ".", "get_level_values", "(", "NORMALIZED_QUARTERS", ",", ")", ",", "num_announcements", ",", ")", ",", "]", ",", "names", "=", "[", "zero_qtr_data_idx", ".", "names", "[", "0", "]", ",", "zero_qtr_data_idx", ".", "names", "[", "1", "]", ",", "SHIFTED_NORMALIZED_QTRS", ",", "]", ",", ")", "requested_qtr_data", "=", "stacked_last_per_qtr", ".", "loc", "[", "requested_qtr_idx", "]", "requested_qtr_data", "=", "requested_qtr_data", ".", "reset_index", "(", "SHIFTED_NORMALIZED_QTRS", ",", ")", "# Calculate the actual year/quarter being requested and add those in", "# as columns.", "(", "requested_qtr_data", "[", "FISCAL_YEAR_FIELD_NAME", "]", ",", "requested_qtr_data", "[", "FISCAL_QUARTER_FIELD_NAME", "]", ")", "=", "split_normalized_quarters", "(", "requested_qtr_data", "[", "SHIFTED_NORMALIZED_QTRS", "]", ")", "# Once we're left with just dates as the index, we can reindex by all", "# dates so that we have a value for each calendar date.", "return", "requested_qtr_data", ".", "unstack", "(", "SID_FIELD_NAME", ")", ".", "reindex", "(", "dates", ")" ]
Selects the requested data for each date. Parameters ---------- zero_qtr_data : pd.DataFrame The 'time zero' data for each calendar date per sid. zeroth_quarter_idx : pd.Index An index of calendar dates, sid, and normalized quarters, for only the rows that have a next or previous earnings estimate. stacked_last_per_qtr : pd.DataFrame The latest estimate known with the dates, normalized quarter, and sid as the index. num_announcements : int The number of annoucements out the user requested relative to each date in the calendar dates. dates : pd.DatetimeIndex The calendar dates for which estimates data is requested. Returns -------- requested_qtr_data : pd.DataFrame The DataFrame with the latest values for the requested quarter for all columns; `dates` are the index and columns are a MultiIndex with sids at the top level and the dataset columns on the bottom.
[ "Selects", "the", "requested", "data", "for", "each", "date", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/loaders/earnings_estimates.py#L190-L253
train
quantopian/zipline
zipline/pipeline/loaders/earnings_estimates.py
EarningsEstimatesLoader.get_split_adjusted_asof_idx
def get_split_adjusted_asof_idx(self, dates): """ Compute the index in `dates` where the split-adjusted-asof-date falls. This is the date up to which, and including which, we will need to unapply all adjustments for and then re-apply them as they come in. After this date, adjustments are applied as normal. Parameters ---------- dates : pd.DatetimeIndex The calendar dates over which the Pipeline is being computed. Returns ------- split_adjusted_asof_idx : int The index in `dates` at which the data should be split. """ split_adjusted_asof_idx = dates.searchsorted( self._split_adjusted_asof ) # The split-asof date is after the date index. if split_adjusted_asof_idx == len(dates): split_adjusted_asof_idx = len(dates) - 1 elif self._split_adjusted_asof < dates[0].tz_localize(None): split_adjusted_asof_idx = -1 return split_adjusted_asof_idx
python
def get_split_adjusted_asof_idx(self, dates): """ Compute the index in `dates` where the split-adjusted-asof-date falls. This is the date up to which, and including which, we will need to unapply all adjustments for and then re-apply them as they come in. After this date, adjustments are applied as normal. Parameters ---------- dates : pd.DatetimeIndex The calendar dates over which the Pipeline is being computed. Returns ------- split_adjusted_asof_idx : int The index in `dates` at which the data should be split. """ split_adjusted_asof_idx = dates.searchsorted( self._split_adjusted_asof ) # The split-asof date is after the date index. if split_adjusted_asof_idx == len(dates): split_adjusted_asof_idx = len(dates) - 1 elif self._split_adjusted_asof < dates[0].tz_localize(None): split_adjusted_asof_idx = -1 return split_adjusted_asof_idx
[ "def", "get_split_adjusted_asof_idx", "(", "self", ",", "dates", ")", ":", "split_adjusted_asof_idx", "=", "dates", ".", "searchsorted", "(", "self", ".", "_split_adjusted_asof", ")", "# The split-asof date is after the date index.", "if", "split_adjusted_asof_idx", "==", "len", "(", "dates", ")", ":", "split_adjusted_asof_idx", "=", "len", "(", "dates", ")", "-", "1", "elif", "self", ".", "_split_adjusted_asof", "<", "dates", "[", "0", "]", ".", "tz_localize", "(", "None", ")", ":", "split_adjusted_asof_idx", "=", "-", "1", "return", "split_adjusted_asof_idx" ]
Compute the index in `dates` where the split-adjusted-asof-date falls. This is the date up to which, and including which, we will need to unapply all adjustments for and then re-apply them as they come in. After this date, adjustments are applied as normal. Parameters ---------- dates : pd.DatetimeIndex The calendar dates over which the Pipeline is being computed. Returns ------- split_adjusted_asof_idx : int The index in `dates` at which the data should be split.
[ "Compute", "the", "index", "in", "dates", "where", "the", "split", "-", "adjusted", "-", "asof", "-", "date", "falls", ".", "This", "is", "the", "date", "up", "to", "which", "and", "including", "which", "we", "will", "need", "to", "unapply", "all", "adjustments", "for", "and", "then", "re", "-", "apply", "them", "as", "they", "come", "in", ".", "After", "this", "date", "adjustments", "are", "applied", "as", "normal", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/loaders/earnings_estimates.py#L255-L280
train
quantopian/zipline
zipline/pipeline/loaders/earnings_estimates.py
EarningsEstimatesLoader.collect_overwrites_for_sid
def collect_overwrites_for_sid(self, group, dates, requested_qtr_data, last_per_qtr, sid_idx, columns, all_adjustments_for_sid, sid): """ Given a sid, collect all overwrites that should be applied for this sid at each quarter boundary. Parameters ---------- group : pd.DataFrame The data for `sid`. dates : pd.DatetimeIndex The calendar dates for which estimates data is requested. requested_qtr_data : pd.DataFrame The DataFrame with the latest values for the requested quarter for all columns. last_per_qtr : pd.DataFrame A DataFrame with a column MultiIndex of [self.estimates.columns, normalized_quarters, sid] that allows easily getting the timeline of estimates for a particular sid for a particular quarter. sid_idx : int The sid's index in the asset index. columns : list of BoundColumn The columns for which the overwrites should be computed. all_adjustments_for_sid : dict[int -> AdjustedArray] A dictionary of the integer index of each timestamp into the date index, mapped to adjustments that should be applied at that index for the given sid (`sid`). This dictionary is modified as adjustments are collected. sid : int The sid for which overwrites should be computed. """ # If data was requested for only 1 date, there can never be any # overwrites, so skip the extra work. if len(dates) == 1: return next_qtr_start_indices = dates.searchsorted( group[EVENT_DATE_FIELD_NAME].values, side=self.searchsorted_side, ) qtrs_with_estimates = group.index.get_level_values( NORMALIZED_QUARTERS ).values for idx in next_qtr_start_indices: if 0 < idx < len(dates): # Find the quarter being requested in the quarter we're # crossing into. requested_quarter = requested_qtr_data[ SHIFTED_NORMALIZED_QTRS, sid, ].iloc[idx] # Only add adjustments if the next quarter starts somewhere # in our date index for this sid. Our 'next' quarter can # never start at index 0; a starting index of 0 means that # the next quarter's event date was NaT. self.create_overwrites_for_quarter( all_adjustments_for_sid, idx, last_per_qtr, qtrs_with_estimates, requested_quarter, sid, sid_idx, columns )
python
def collect_overwrites_for_sid(self, group, dates, requested_qtr_data, last_per_qtr, sid_idx, columns, all_adjustments_for_sid, sid): """ Given a sid, collect all overwrites that should be applied for this sid at each quarter boundary. Parameters ---------- group : pd.DataFrame The data for `sid`. dates : pd.DatetimeIndex The calendar dates for which estimates data is requested. requested_qtr_data : pd.DataFrame The DataFrame with the latest values for the requested quarter for all columns. last_per_qtr : pd.DataFrame A DataFrame with a column MultiIndex of [self.estimates.columns, normalized_quarters, sid] that allows easily getting the timeline of estimates for a particular sid for a particular quarter. sid_idx : int The sid's index in the asset index. columns : list of BoundColumn The columns for which the overwrites should be computed. all_adjustments_for_sid : dict[int -> AdjustedArray] A dictionary of the integer index of each timestamp into the date index, mapped to adjustments that should be applied at that index for the given sid (`sid`). This dictionary is modified as adjustments are collected. sid : int The sid for which overwrites should be computed. """ # If data was requested for only 1 date, there can never be any # overwrites, so skip the extra work. if len(dates) == 1: return next_qtr_start_indices = dates.searchsorted( group[EVENT_DATE_FIELD_NAME].values, side=self.searchsorted_side, ) qtrs_with_estimates = group.index.get_level_values( NORMALIZED_QUARTERS ).values for idx in next_qtr_start_indices: if 0 < idx < len(dates): # Find the quarter being requested in the quarter we're # crossing into. requested_quarter = requested_qtr_data[ SHIFTED_NORMALIZED_QTRS, sid, ].iloc[idx] # Only add adjustments if the next quarter starts somewhere # in our date index for this sid. Our 'next' quarter can # never start at index 0; a starting index of 0 means that # the next quarter's event date was NaT. self.create_overwrites_for_quarter( all_adjustments_for_sid, idx, last_per_qtr, qtrs_with_estimates, requested_quarter, sid, sid_idx, columns )
[ "def", "collect_overwrites_for_sid", "(", "self", ",", "group", ",", "dates", ",", "requested_qtr_data", ",", "last_per_qtr", ",", "sid_idx", ",", "columns", ",", "all_adjustments_for_sid", ",", "sid", ")", ":", "# If data was requested for only 1 date, there can never be any", "# overwrites, so skip the extra work.", "if", "len", "(", "dates", ")", "==", "1", ":", "return", "next_qtr_start_indices", "=", "dates", ".", "searchsorted", "(", "group", "[", "EVENT_DATE_FIELD_NAME", "]", ".", "values", ",", "side", "=", "self", ".", "searchsorted_side", ",", ")", "qtrs_with_estimates", "=", "group", ".", "index", ".", "get_level_values", "(", "NORMALIZED_QUARTERS", ")", ".", "values", "for", "idx", "in", "next_qtr_start_indices", ":", "if", "0", "<", "idx", "<", "len", "(", "dates", ")", ":", "# Find the quarter being requested in the quarter we're", "# crossing into.", "requested_quarter", "=", "requested_qtr_data", "[", "SHIFTED_NORMALIZED_QTRS", ",", "sid", ",", "]", ".", "iloc", "[", "idx", "]", "# Only add adjustments if the next quarter starts somewhere", "# in our date index for this sid. Our 'next' quarter can", "# never start at index 0; a starting index of 0 means that", "# the next quarter's event date was NaT.", "self", ".", "create_overwrites_for_quarter", "(", "all_adjustments_for_sid", ",", "idx", ",", "last_per_qtr", ",", "qtrs_with_estimates", ",", "requested_quarter", ",", "sid", ",", "sid_idx", ",", "columns", ")" ]
Given a sid, collect all overwrites that should be applied for this sid at each quarter boundary. Parameters ---------- group : pd.DataFrame The data for `sid`. dates : pd.DatetimeIndex The calendar dates for which estimates data is requested. requested_qtr_data : pd.DataFrame The DataFrame with the latest values for the requested quarter for all columns. last_per_qtr : pd.DataFrame A DataFrame with a column MultiIndex of [self.estimates.columns, normalized_quarters, sid] that allows easily getting the timeline of estimates for a particular sid for a particular quarter. sid_idx : int The sid's index in the asset index. columns : list of BoundColumn The columns for which the overwrites should be computed. all_adjustments_for_sid : dict[int -> AdjustedArray] A dictionary of the integer index of each timestamp into the date index, mapped to adjustments that should be applied at that index for the given sid (`sid`). This dictionary is modified as adjustments are collected. sid : int The sid for which overwrites should be computed.
[ "Given", "a", "sid", "collect", "all", "overwrites", "that", "should", "be", "applied", "for", "this", "sid", "at", "each", "quarter", "boundary", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/loaders/earnings_estimates.py#L282-L353
train
quantopian/zipline
zipline/pipeline/loaders/earnings_estimates.py
EarningsEstimatesLoader.merge_into_adjustments_for_all_sids
def merge_into_adjustments_for_all_sids(self, all_adjustments_for_sid, col_to_all_adjustments): """ Merge adjustments for a particular sid into a dictionary containing adjustments for all sids. Parameters ---------- all_adjustments_for_sid : dict[int -> AdjustedArray] All adjustments for a particular sid. col_to_all_adjustments : dict[int -> AdjustedArray] All adjustments for all sids. """ for col_name in all_adjustments_for_sid: if col_name not in col_to_all_adjustments: col_to_all_adjustments[col_name] = {} for ts in all_adjustments_for_sid[col_name]: adjs = all_adjustments_for_sid[col_name][ts] add_new_adjustments(col_to_all_adjustments, adjs, col_name, ts)
python
def merge_into_adjustments_for_all_sids(self, all_adjustments_for_sid, col_to_all_adjustments): """ Merge adjustments for a particular sid into a dictionary containing adjustments for all sids. Parameters ---------- all_adjustments_for_sid : dict[int -> AdjustedArray] All adjustments for a particular sid. col_to_all_adjustments : dict[int -> AdjustedArray] All adjustments for all sids. """ for col_name in all_adjustments_for_sid: if col_name not in col_to_all_adjustments: col_to_all_adjustments[col_name] = {} for ts in all_adjustments_for_sid[col_name]: adjs = all_adjustments_for_sid[col_name][ts] add_new_adjustments(col_to_all_adjustments, adjs, col_name, ts)
[ "def", "merge_into_adjustments_for_all_sids", "(", "self", ",", "all_adjustments_for_sid", ",", "col_to_all_adjustments", ")", ":", "for", "col_name", "in", "all_adjustments_for_sid", ":", "if", "col_name", "not", "in", "col_to_all_adjustments", ":", "col_to_all_adjustments", "[", "col_name", "]", "=", "{", "}", "for", "ts", "in", "all_adjustments_for_sid", "[", "col_name", "]", ":", "adjs", "=", "all_adjustments_for_sid", "[", "col_name", "]", "[", "ts", "]", "add_new_adjustments", "(", "col_to_all_adjustments", ",", "adjs", ",", "col_name", ",", "ts", ")" ]
Merge adjustments for a particular sid into a dictionary containing adjustments for all sids. Parameters ---------- all_adjustments_for_sid : dict[int -> AdjustedArray] All adjustments for a particular sid. col_to_all_adjustments : dict[int -> AdjustedArray] All adjustments for all sids.
[ "Merge", "adjustments", "for", "a", "particular", "sid", "into", "a", "dictionary", "containing", "adjustments", "for", "all", "sids", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/loaders/earnings_estimates.py#L406-L429
train
quantopian/zipline
zipline/pipeline/loaders/earnings_estimates.py
EarningsEstimatesLoader.get_adjustments
def get_adjustments(self, zero_qtr_data, requested_qtr_data, last_per_qtr, dates, assets, columns, **kwargs): """ Creates an AdjustedArray from the given estimates data for the given dates. Parameters ---------- zero_qtr_data : pd.DataFrame The 'time zero' data for each calendar date per sid. requested_qtr_data : pd.DataFrame The requested quarter data for each calendar date per sid. last_per_qtr : pd.DataFrame A DataFrame with a column MultiIndex of [self.estimates.columns, normalized_quarters, sid] that allows easily getting the timeline of estimates for a particular sid for a particular quarter. dates : pd.DatetimeIndex The calendar dates for which estimates data is requested. assets : pd.Int64Index An index of all the assets from the raw data. columns : list of BoundColumn The columns for which adjustments need to be calculated. kwargs : Additional keyword arguments that should be forwarded to `get_adjustments_for_sid` and to be used in computing adjustments for each sid. Returns ------- col_to_all_adjustments : dict[int -> AdjustedArray] A dictionary of all adjustments that should be applied. """ zero_qtr_data.sort_index(inplace=True) # Here we want to get the LAST record from each group of records # corresponding to a single quarter. This is to ensure that we select # the most up-to-date event date in case the event date changes. quarter_shifts = zero_qtr_data.groupby( level=[SID_FIELD_NAME, NORMALIZED_QUARTERS] ).nth(-1) col_to_all_adjustments = {} sid_to_idx = dict(zip(assets, range(len(assets)))) quarter_shifts.groupby(level=SID_FIELD_NAME).apply( self.get_adjustments_for_sid, dates, requested_qtr_data, last_per_qtr, sid_to_idx, columns, col_to_all_adjustments, **kwargs ) return col_to_all_adjustments
python
def get_adjustments(self, zero_qtr_data, requested_qtr_data, last_per_qtr, dates, assets, columns, **kwargs): """ Creates an AdjustedArray from the given estimates data for the given dates. Parameters ---------- zero_qtr_data : pd.DataFrame The 'time zero' data for each calendar date per sid. requested_qtr_data : pd.DataFrame The requested quarter data for each calendar date per sid. last_per_qtr : pd.DataFrame A DataFrame with a column MultiIndex of [self.estimates.columns, normalized_quarters, sid] that allows easily getting the timeline of estimates for a particular sid for a particular quarter. dates : pd.DatetimeIndex The calendar dates for which estimates data is requested. assets : pd.Int64Index An index of all the assets from the raw data. columns : list of BoundColumn The columns for which adjustments need to be calculated. kwargs : Additional keyword arguments that should be forwarded to `get_adjustments_for_sid` and to be used in computing adjustments for each sid. Returns ------- col_to_all_adjustments : dict[int -> AdjustedArray] A dictionary of all adjustments that should be applied. """ zero_qtr_data.sort_index(inplace=True) # Here we want to get the LAST record from each group of records # corresponding to a single quarter. This is to ensure that we select # the most up-to-date event date in case the event date changes. quarter_shifts = zero_qtr_data.groupby( level=[SID_FIELD_NAME, NORMALIZED_QUARTERS] ).nth(-1) col_to_all_adjustments = {} sid_to_idx = dict(zip(assets, range(len(assets)))) quarter_shifts.groupby(level=SID_FIELD_NAME).apply( self.get_adjustments_for_sid, dates, requested_qtr_data, last_per_qtr, sid_to_idx, columns, col_to_all_adjustments, **kwargs ) return col_to_all_adjustments
[ "def", "get_adjustments", "(", "self", ",", "zero_qtr_data", ",", "requested_qtr_data", ",", "last_per_qtr", ",", "dates", ",", "assets", ",", "columns", ",", "*", "*", "kwargs", ")", ":", "zero_qtr_data", ".", "sort_index", "(", "inplace", "=", "True", ")", "# Here we want to get the LAST record from each group of records", "# corresponding to a single quarter. This is to ensure that we select", "# the most up-to-date event date in case the event date changes.", "quarter_shifts", "=", "zero_qtr_data", ".", "groupby", "(", "level", "=", "[", "SID_FIELD_NAME", ",", "NORMALIZED_QUARTERS", "]", ")", ".", "nth", "(", "-", "1", ")", "col_to_all_adjustments", "=", "{", "}", "sid_to_idx", "=", "dict", "(", "zip", "(", "assets", ",", "range", "(", "len", "(", "assets", ")", ")", ")", ")", "quarter_shifts", ".", "groupby", "(", "level", "=", "SID_FIELD_NAME", ")", ".", "apply", "(", "self", ".", "get_adjustments_for_sid", ",", "dates", ",", "requested_qtr_data", ",", "last_per_qtr", ",", "sid_to_idx", ",", "columns", ",", "col_to_all_adjustments", ",", "*", "*", "kwargs", ")", "return", "col_to_all_adjustments" ]
Creates an AdjustedArray from the given estimates data for the given dates. Parameters ---------- zero_qtr_data : pd.DataFrame The 'time zero' data for each calendar date per sid. requested_qtr_data : pd.DataFrame The requested quarter data for each calendar date per sid. last_per_qtr : pd.DataFrame A DataFrame with a column MultiIndex of [self.estimates.columns, normalized_quarters, sid] that allows easily getting the timeline of estimates for a particular sid for a particular quarter. dates : pd.DatetimeIndex The calendar dates for which estimates data is requested. assets : pd.Int64Index An index of all the assets from the raw data. columns : list of BoundColumn The columns for which adjustments need to be calculated. kwargs : Additional keyword arguments that should be forwarded to `get_adjustments_for_sid` and to be used in computing adjustments for each sid. Returns ------- col_to_all_adjustments : dict[int -> AdjustedArray] A dictionary of all adjustments that should be applied.
[ "Creates", "an", "AdjustedArray", "from", "the", "given", "estimates", "data", "for", "the", "given", "dates", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/loaders/earnings_estimates.py#L431-L490
train
quantopian/zipline
zipline/pipeline/loaders/earnings_estimates.py
EarningsEstimatesLoader.create_overwrites_for_quarter
def create_overwrites_for_quarter(self, col_to_overwrites, next_qtr_start_idx, last_per_qtr, quarters_with_estimates_for_sid, requested_quarter, sid, sid_idx, columns): """ Add entries to the dictionary of columns to adjustments for the given sid and the given quarter. Parameters ---------- col_to_overwrites : dict [column_name -> list of ArrayAdjustment] A dictionary mapping column names to all overwrites for those columns. next_qtr_start_idx : int The index of the first day of the next quarter in the calendar dates. last_per_qtr : pd.DataFrame A DataFrame with a column MultiIndex of [self.estimates.columns, normalized_quarters, sid] that allows easily getting the timeline of estimates for a particular sid for a particular quarter; this is particularly useful for getting adjustments for 'next' estimates. quarters_with_estimates_for_sid : np.array An array of all quarters for which there are estimates for the given sid. requested_quarter : float The quarter for which the overwrite should be created. sid : int The sid for which to create overwrites. sid_idx : int The index of the sid in `assets`. columns : list of BoundColumn The columns for which to create overwrites. """ for col in columns: column_name = self.name_map[col.name] if column_name not in col_to_overwrites: col_to_overwrites[column_name] = {} # If there are estimates for the requested quarter, # overwrite all values going up to the starting index of # that quarter with estimates for that quarter. if requested_quarter in quarters_with_estimates_for_sid: adjs = self.create_overwrite_for_estimate( col, column_name, last_per_qtr, next_qtr_start_idx, requested_quarter, sid, sid_idx, ) add_new_adjustments(col_to_overwrites, adjs, column_name, next_qtr_start_idx) # There are no estimates for the quarter. Overwrite all # values going up to the starting index of that quarter # with the missing value for this column. else: adjs = [self.overwrite_with_null( col, next_qtr_start_idx, sid_idx)] add_new_adjustments(col_to_overwrites, adjs, column_name, next_qtr_start_idx)
python
def create_overwrites_for_quarter(self, col_to_overwrites, next_qtr_start_idx, last_per_qtr, quarters_with_estimates_for_sid, requested_quarter, sid, sid_idx, columns): """ Add entries to the dictionary of columns to adjustments for the given sid and the given quarter. Parameters ---------- col_to_overwrites : dict [column_name -> list of ArrayAdjustment] A dictionary mapping column names to all overwrites for those columns. next_qtr_start_idx : int The index of the first day of the next quarter in the calendar dates. last_per_qtr : pd.DataFrame A DataFrame with a column MultiIndex of [self.estimates.columns, normalized_quarters, sid] that allows easily getting the timeline of estimates for a particular sid for a particular quarter; this is particularly useful for getting adjustments for 'next' estimates. quarters_with_estimates_for_sid : np.array An array of all quarters for which there are estimates for the given sid. requested_quarter : float The quarter for which the overwrite should be created. sid : int The sid for which to create overwrites. sid_idx : int The index of the sid in `assets`. columns : list of BoundColumn The columns for which to create overwrites. """ for col in columns: column_name = self.name_map[col.name] if column_name not in col_to_overwrites: col_to_overwrites[column_name] = {} # If there are estimates for the requested quarter, # overwrite all values going up to the starting index of # that quarter with estimates for that quarter. if requested_quarter in quarters_with_estimates_for_sid: adjs = self.create_overwrite_for_estimate( col, column_name, last_per_qtr, next_qtr_start_idx, requested_quarter, sid, sid_idx, ) add_new_adjustments(col_to_overwrites, adjs, column_name, next_qtr_start_idx) # There are no estimates for the quarter. Overwrite all # values going up to the starting index of that quarter # with the missing value for this column. else: adjs = [self.overwrite_with_null( col, next_qtr_start_idx, sid_idx)] add_new_adjustments(col_to_overwrites, adjs, column_name, next_qtr_start_idx)
[ "def", "create_overwrites_for_quarter", "(", "self", ",", "col_to_overwrites", ",", "next_qtr_start_idx", ",", "last_per_qtr", ",", "quarters_with_estimates_for_sid", ",", "requested_quarter", ",", "sid", ",", "sid_idx", ",", "columns", ")", ":", "for", "col", "in", "columns", ":", "column_name", "=", "self", ".", "name_map", "[", "col", ".", "name", "]", "if", "column_name", "not", "in", "col_to_overwrites", ":", "col_to_overwrites", "[", "column_name", "]", "=", "{", "}", "# If there are estimates for the requested quarter,", "# overwrite all values going up to the starting index of", "# that quarter with estimates for that quarter.", "if", "requested_quarter", "in", "quarters_with_estimates_for_sid", ":", "adjs", "=", "self", ".", "create_overwrite_for_estimate", "(", "col", ",", "column_name", ",", "last_per_qtr", ",", "next_qtr_start_idx", ",", "requested_quarter", ",", "sid", ",", "sid_idx", ",", ")", "add_new_adjustments", "(", "col_to_overwrites", ",", "adjs", ",", "column_name", ",", "next_qtr_start_idx", ")", "# There are no estimates for the quarter. Overwrite all", "# values going up to the starting index of that quarter", "# with the missing value for this column.", "else", ":", "adjs", "=", "[", "self", ".", "overwrite_with_null", "(", "col", ",", "next_qtr_start_idx", ",", "sid_idx", ")", "]", "add_new_adjustments", "(", "col_to_overwrites", ",", "adjs", ",", "column_name", ",", "next_qtr_start_idx", ")" ]
Add entries to the dictionary of columns to adjustments for the given sid and the given quarter. Parameters ---------- col_to_overwrites : dict [column_name -> list of ArrayAdjustment] A dictionary mapping column names to all overwrites for those columns. next_qtr_start_idx : int The index of the first day of the next quarter in the calendar dates. last_per_qtr : pd.DataFrame A DataFrame with a column MultiIndex of [self.estimates.columns, normalized_quarters, sid] that allows easily getting the timeline of estimates for a particular sid for a particular quarter; this is particularly useful for getting adjustments for 'next' estimates. quarters_with_estimates_for_sid : np.array An array of all quarters for which there are estimates for the given sid. requested_quarter : float The quarter for which the overwrite should be created. sid : int The sid for which to create overwrites. sid_idx : int The index of the sid in `assets`. columns : list of BoundColumn The columns for which to create overwrites.
[ "Add", "entries", "to", "the", "dictionary", "of", "columns", "to", "adjustments", "for", "the", "given", "sid", "and", "the", "given", "quarter", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/loaders/earnings_estimates.py#L492-L563
train
quantopian/zipline
zipline/pipeline/loaders/earnings_estimates.py
EarningsEstimatesLoader.get_last_data_per_qtr
def get_last_data_per_qtr(self, assets_with_data, columns, dates, data_query_cutoff_times): """ Determine the last piece of information we know for each column on each date in the index for each sid and quarter. Parameters ---------- assets_with_data : pd.Index Index of all assets that appear in the raw data given to the loader. columns : iterable of BoundColumn The columns that need to be loaded from the raw data. data_query_cutoff_times : pd.DatetimeIndex The calendar of dates for which data should be loaded. Returns ------- stacked_last_per_qtr : pd.DataFrame A DataFrame indexed by [dates, sid, normalized_quarters] that has the latest information for each row of the index, sorted by event date. last_per_qtr : pd.DataFrame A DataFrame with columns that are a MultiIndex of [ self.estimates.columns, normalized_quarters, sid]. """ # Get a DataFrame indexed by date with a MultiIndex of columns of # [self.estimates.columns, normalized_quarters, sid], where each cell # contains the latest data for that day. last_per_qtr = last_in_date_group( self.estimates, data_query_cutoff_times, assets_with_data, reindex=True, extra_groupers=[NORMALIZED_QUARTERS], ) last_per_qtr.index = dates # Forward fill values for each quarter/sid/dataset column. ffill_across_cols(last_per_qtr, columns, self.name_map) # Stack quarter and sid into the index. stacked_last_per_qtr = last_per_qtr.stack( [SID_FIELD_NAME, NORMALIZED_QUARTERS], ) # Set date index name for ease of reference stacked_last_per_qtr.index.set_names( SIMULATION_DATES, level=0, inplace=True, ) stacked_last_per_qtr = stacked_last_per_qtr.sort_values( EVENT_DATE_FIELD_NAME, ) stacked_last_per_qtr[EVENT_DATE_FIELD_NAME] = pd.to_datetime( stacked_last_per_qtr[EVENT_DATE_FIELD_NAME] ) return last_per_qtr, stacked_last_per_qtr
python
def get_last_data_per_qtr(self, assets_with_data, columns, dates, data_query_cutoff_times): """ Determine the last piece of information we know for each column on each date in the index for each sid and quarter. Parameters ---------- assets_with_data : pd.Index Index of all assets that appear in the raw data given to the loader. columns : iterable of BoundColumn The columns that need to be loaded from the raw data. data_query_cutoff_times : pd.DatetimeIndex The calendar of dates for which data should be loaded. Returns ------- stacked_last_per_qtr : pd.DataFrame A DataFrame indexed by [dates, sid, normalized_quarters] that has the latest information for each row of the index, sorted by event date. last_per_qtr : pd.DataFrame A DataFrame with columns that are a MultiIndex of [ self.estimates.columns, normalized_quarters, sid]. """ # Get a DataFrame indexed by date with a MultiIndex of columns of # [self.estimates.columns, normalized_quarters, sid], where each cell # contains the latest data for that day. last_per_qtr = last_in_date_group( self.estimates, data_query_cutoff_times, assets_with_data, reindex=True, extra_groupers=[NORMALIZED_QUARTERS], ) last_per_qtr.index = dates # Forward fill values for each quarter/sid/dataset column. ffill_across_cols(last_per_qtr, columns, self.name_map) # Stack quarter and sid into the index. stacked_last_per_qtr = last_per_qtr.stack( [SID_FIELD_NAME, NORMALIZED_QUARTERS], ) # Set date index name for ease of reference stacked_last_per_qtr.index.set_names( SIMULATION_DATES, level=0, inplace=True, ) stacked_last_per_qtr = stacked_last_per_qtr.sort_values( EVENT_DATE_FIELD_NAME, ) stacked_last_per_qtr[EVENT_DATE_FIELD_NAME] = pd.to_datetime( stacked_last_per_qtr[EVENT_DATE_FIELD_NAME] ) return last_per_qtr, stacked_last_per_qtr
[ "def", "get_last_data_per_qtr", "(", "self", ",", "assets_with_data", ",", "columns", ",", "dates", ",", "data_query_cutoff_times", ")", ":", "# Get a DataFrame indexed by date with a MultiIndex of columns of", "# [self.estimates.columns, normalized_quarters, sid], where each cell", "# contains the latest data for that day.", "last_per_qtr", "=", "last_in_date_group", "(", "self", ".", "estimates", ",", "data_query_cutoff_times", ",", "assets_with_data", ",", "reindex", "=", "True", ",", "extra_groupers", "=", "[", "NORMALIZED_QUARTERS", "]", ",", ")", "last_per_qtr", ".", "index", "=", "dates", "# Forward fill values for each quarter/sid/dataset column.", "ffill_across_cols", "(", "last_per_qtr", ",", "columns", ",", "self", ".", "name_map", ")", "# Stack quarter and sid into the index.", "stacked_last_per_qtr", "=", "last_per_qtr", ".", "stack", "(", "[", "SID_FIELD_NAME", ",", "NORMALIZED_QUARTERS", "]", ",", ")", "# Set date index name for ease of reference", "stacked_last_per_qtr", ".", "index", ".", "set_names", "(", "SIMULATION_DATES", ",", "level", "=", "0", ",", "inplace", "=", "True", ",", ")", "stacked_last_per_qtr", "=", "stacked_last_per_qtr", ".", "sort_values", "(", "EVENT_DATE_FIELD_NAME", ",", ")", "stacked_last_per_qtr", "[", "EVENT_DATE_FIELD_NAME", "]", "=", "pd", ".", "to_datetime", "(", "stacked_last_per_qtr", "[", "EVENT_DATE_FIELD_NAME", "]", ")", "return", "last_per_qtr", ",", "stacked_last_per_qtr" ]
Determine the last piece of information we know for each column on each date in the index for each sid and quarter. Parameters ---------- assets_with_data : pd.Index Index of all assets that appear in the raw data given to the loader. columns : iterable of BoundColumn The columns that need to be loaded from the raw data. data_query_cutoff_times : pd.DatetimeIndex The calendar of dates for which data should be loaded. Returns ------- stacked_last_per_qtr : pd.DataFrame A DataFrame indexed by [dates, sid, normalized_quarters] that has the latest information for each row of the index, sorted by event date. last_per_qtr : pd.DataFrame A DataFrame with columns that are a MultiIndex of [ self.estimates.columns, normalized_quarters, sid].
[ "Determine", "the", "last", "piece", "of", "information", "we", "know", "for", "each", "column", "on", "each", "date", "in", "the", "index", "for", "each", "sid", "and", "quarter", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/loaders/earnings_estimates.py#L667-L725
train
quantopian/zipline
zipline/pipeline/loaders/earnings_estimates.py
PreviousEarningsEstimatesLoader.get_zeroth_quarter_idx
def get_zeroth_quarter_idx(self, stacked_last_per_qtr): """ Filters for releases that are on or after each simulation date and determines the previous quarter by picking out the most recent release relative to each date in the index. Parameters ---------- stacked_last_per_qtr : pd.DataFrame A DataFrame with index of calendar dates, sid, and normalized quarters with each row being the latest estimate for the row's index values, sorted by event date. Returns ------- previous_releases_per_date_index : pd.MultiIndex An index of calendar dates, sid, and normalized quarters, for only the rows that have a previous event. """ previous_releases_per_date = stacked_last_per_qtr.loc[ stacked_last_per_qtr[EVENT_DATE_FIELD_NAME] <= stacked_last_per_qtr.index.get_level_values(SIMULATION_DATES) ].groupby( level=[SIMULATION_DATES, SID_FIELD_NAME], as_index=False, # Here we take advantage of the fact that `stacked_last_per_qtr` is # sorted by event date. ).nth(-1) return previous_releases_per_date.index
python
def get_zeroth_quarter_idx(self, stacked_last_per_qtr): """ Filters for releases that are on or after each simulation date and determines the previous quarter by picking out the most recent release relative to each date in the index. Parameters ---------- stacked_last_per_qtr : pd.DataFrame A DataFrame with index of calendar dates, sid, and normalized quarters with each row being the latest estimate for the row's index values, sorted by event date. Returns ------- previous_releases_per_date_index : pd.MultiIndex An index of calendar dates, sid, and normalized quarters, for only the rows that have a previous event. """ previous_releases_per_date = stacked_last_per_qtr.loc[ stacked_last_per_qtr[EVENT_DATE_FIELD_NAME] <= stacked_last_per_qtr.index.get_level_values(SIMULATION_DATES) ].groupby( level=[SIMULATION_DATES, SID_FIELD_NAME], as_index=False, # Here we take advantage of the fact that `stacked_last_per_qtr` is # sorted by event date. ).nth(-1) return previous_releases_per_date.index
[ "def", "get_zeroth_quarter_idx", "(", "self", ",", "stacked_last_per_qtr", ")", ":", "previous_releases_per_date", "=", "stacked_last_per_qtr", ".", "loc", "[", "stacked_last_per_qtr", "[", "EVENT_DATE_FIELD_NAME", "]", "<=", "stacked_last_per_qtr", ".", "index", ".", "get_level_values", "(", "SIMULATION_DATES", ")", "]", ".", "groupby", "(", "level", "=", "[", "SIMULATION_DATES", ",", "SID_FIELD_NAME", "]", ",", "as_index", "=", "False", ",", "# Here we take advantage of the fact that `stacked_last_per_qtr` is", "# sorted by event date.", ")", ".", "nth", "(", "-", "1", ")", "return", "previous_releases_per_date", ".", "index" ]
Filters for releases that are on or after each simulation date and determines the previous quarter by picking out the most recent release relative to each date in the index. Parameters ---------- stacked_last_per_qtr : pd.DataFrame A DataFrame with index of calendar dates, sid, and normalized quarters with each row being the latest estimate for the row's index values, sorted by event date. Returns ------- previous_releases_per_date_index : pd.MultiIndex An index of calendar dates, sid, and normalized quarters, for only the rows that have a previous event.
[ "Filters", "for", "releases", "that", "are", "on", "or", "after", "each", "simulation", "date", "and", "determines", "the", "previous", "quarter", "by", "picking", "out", "the", "most", "recent", "release", "relative", "to", "each", "date", "in", "the", "index", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/loaders/earnings_estimates.py#L810-L838
train
quantopian/zipline
zipline/pipeline/loaders/earnings_estimates.py
SplitAdjustedEstimatesLoader.get_adjustments_for_sid
def get_adjustments_for_sid(self, group, dates, requested_qtr_data, last_per_qtr, sid_to_idx, columns, col_to_all_adjustments, split_adjusted_asof_idx=None, split_adjusted_cols_for_group=None): """ Collects both overwrites and adjustments for a particular sid. Parameters ---------- split_adjusted_asof_idx : int The integer index of the date on which the data was split-adjusted. split_adjusted_cols_for_group : list of str The names of requested columns that should also be split-adjusted. """ all_adjustments_for_sid = {} sid = int(group.name) self.collect_overwrites_for_sid(group, dates, requested_qtr_data, last_per_qtr, sid_to_idx[sid], columns, all_adjustments_for_sid, sid) (pre_adjustments, post_adjustments) = self.retrieve_split_adjustment_data_for_sid( dates, sid, split_adjusted_asof_idx ) sid_estimates = self.estimates[ self.estimates[SID_FIELD_NAME] == sid ] # We might not have any overwrites but still have # adjustments, and we will need to manually add columns if # that is the case. for col_name in split_adjusted_cols_for_group: if col_name not in all_adjustments_for_sid: all_adjustments_for_sid[col_name] = {} self.collect_split_adjustments( all_adjustments_for_sid, requested_qtr_data, dates, sid, sid_to_idx[sid], sid_estimates, split_adjusted_asof_idx, pre_adjustments, post_adjustments, split_adjusted_cols_for_group ) self.merge_into_adjustments_for_all_sids( all_adjustments_for_sid, col_to_all_adjustments )
python
def get_adjustments_for_sid(self, group, dates, requested_qtr_data, last_per_qtr, sid_to_idx, columns, col_to_all_adjustments, split_adjusted_asof_idx=None, split_adjusted_cols_for_group=None): """ Collects both overwrites and adjustments for a particular sid. Parameters ---------- split_adjusted_asof_idx : int The integer index of the date on which the data was split-adjusted. split_adjusted_cols_for_group : list of str The names of requested columns that should also be split-adjusted. """ all_adjustments_for_sid = {} sid = int(group.name) self.collect_overwrites_for_sid(group, dates, requested_qtr_data, last_per_qtr, sid_to_idx[sid], columns, all_adjustments_for_sid, sid) (pre_adjustments, post_adjustments) = self.retrieve_split_adjustment_data_for_sid( dates, sid, split_adjusted_asof_idx ) sid_estimates = self.estimates[ self.estimates[SID_FIELD_NAME] == sid ] # We might not have any overwrites but still have # adjustments, and we will need to manually add columns if # that is the case. for col_name in split_adjusted_cols_for_group: if col_name not in all_adjustments_for_sid: all_adjustments_for_sid[col_name] = {} self.collect_split_adjustments( all_adjustments_for_sid, requested_qtr_data, dates, sid, sid_to_idx[sid], sid_estimates, split_adjusted_asof_idx, pre_adjustments, post_adjustments, split_adjusted_cols_for_group ) self.merge_into_adjustments_for_all_sids( all_adjustments_for_sid, col_to_all_adjustments )
[ "def", "get_adjustments_for_sid", "(", "self", ",", "group", ",", "dates", ",", "requested_qtr_data", ",", "last_per_qtr", ",", "sid_to_idx", ",", "columns", ",", "col_to_all_adjustments", ",", "split_adjusted_asof_idx", "=", "None", ",", "split_adjusted_cols_for_group", "=", "None", ")", ":", "all_adjustments_for_sid", "=", "{", "}", "sid", "=", "int", "(", "group", ".", "name", ")", "self", ".", "collect_overwrites_for_sid", "(", "group", ",", "dates", ",", "requested_qtr_data", ",", "last_per_qtr", ",", "sid_to_idx", "[", "sid", "]", ",", "columns", ",", "all_adjustments_for_sid", ",", "sid", ")", "(", "pre_adjustments", ",", "post_adjustments", ")", "=", "self", ".", "retrieve_split_adjustment_data_for_sid", "(", "dates", ",", "sid", ",", "split_adjusted_asof_idx", ")", "sid_estimates", "=", "self", ".", "estimates", "[", "self", ".", "estimates", "[", "SID_FIELD_NAME", "]", "==", "sid", "]", "# We might not have any overwrites but still have", "# adjustments, and we will need to manually add columns if", "# that is the case.", "for", "col_name", "in", "split_adjusted_cols_for_group", ":", "if", "col_name", "not", "in", "all_adjustments_for_sid", ":", "all_adjustments_for_sid", "[", "col_name", "]", "=", "{", "}", "self", ".", "collect_split_adjustments", "(", "all_adjustments_for_sid", ",", "requested_qtr_data", ",", "dates", ",", "sid", ",", "sid_to_idx", "[", "sid", "]", ",", "sid_estimates", ",", "split_adjusted_asof_idx", ",", "pre_adjustments", ",", "post_adjustments", ",", "split_adjusted_cols_for_group", ")", "self", ".", "merge_into_adjustments_for_all_sids", "(", "all_adjustments_for_sid", ",", "col_to_all_adjustments", ")" ]
Collects both overwrites and adjustments for a particular sid. Parameters ---------- split_adjusted_asof_idx : int The integer index of the date on which the data was split-adjusted. split_adjusted_cols_for_group : list of str The names of requested columns that should also be split-adjusted.
[ "Collects", "both", "overwrites", "and", "adjustments", "for", "a", "particular", "sid", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/loaders/earnings_estimates.py#L907-L965
train
quantopian/zipline
zipline/pipeline/loaders/earnings_estimates.py
SplitAdjustedEstimatesLoader.get_adjustments
def get_adjustments(self, zero_qtr_data, requested_qtr_data, last_per_qtr, dates, assets, columns, **kwargs): """ Calculates both split adjustments and overwrites for all sids. """ split_adjusted_cols_for_group = [ self.name_map[col.name] for col in columns if self.name_map[col.name] in self._split_adjusted_column_names ] # Add all splits to the adjustment dict for this sid. split_adjusted_asof_idx = self.get_split_adjusted_asof_idx( dates ) return super(SplitAdjustedEstimatesLoader, self).get_adjustments( zero_qtr_data, requested_qtr_data, last_per_qtr, dates, assets, columns, split_adjusted_cols_for_group=split_adjusted_cols_for_group, split_adjusted_asof_idx=split_adjusted_asof_idx )
python
def get_adjustments(self, zero_qtr_data, requested_qtr_data, last_per_qtr, dates, assets, columns, **kwargs): """ Calculates both split adjustments and overwrites for all sids. """ split_adjusted_cols_for_group = [ self.name_map[col.name] for col in columns if self.name_map[col.name] in self._split_adjusted_column_names ] # Add all splits to the adjustment dict for this sid. split_adjusted_asof_idx = self.get_split_adjusted_asof_idx( dates ) return super(SplitAdjustedEstimatesLoader, self).get_adjustments( zero_qtr_data, requested_qtr_data, last_per_qtr, dates, assets, columns, split_adjusted_cols_for_group=split_adjusted_cols_for_group, split_adjusted_asof_idx=split_adjusted_asof_idx )
[ "def", "get_adjustments", "(", "self", ",", "zero_qtr_data", ",", "requested_qtr_data", ",", "last_per_qtr", ",", "dates", ",", "assets", ",", "columns", ",", "*", "*", "kwargs", ")", ":", "split_adjusted_cols_for_group", "=", "[", "self", ".", "name_map", "[", "col", ".", "name", "]", "for", "col", "in", "columns", "if", "self", ".", "name_map", "[", "col", ".", "name", "]", "in", "self", ".", "_split_adjusted_column_names", "]", "# Add all splits to the adjustment dict for this sid.", "split_adjusted_asof_idx", "=", "self", ".", "get_split_adjusted_asof_idx", "(", "dates", ")", "return", "super", "(", "SplitAdjustedEstimatesLoader", ",", "self", ")", ".", "get_adjustments", "(", "zero_qtr_data", ",", "requested_qtr_data", ",", "last_per_qtr", ",", "dates", ",", "assets", ",", "columns", ",", "split_adjusted_cols_for_group", "=", "split_adjusted_cols_for_group", ",", "split_adjusted_asof_idx", "=", "split_adjusted_asof_idx", ")" ]
Calculates both split adjustments and overwrites for all sids.
[ "Calculates", "both", "split", "adjustments", "and", "overwrites", "for", "all", "sids", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/loaders/earnings_estimates.py#L967-L996
train
quantopian/zipline
zipline/pipeline/loaders/earnings_estimates.py
SplitAdjustedEstimatesLoader.determine_end_idx_for_adjustment
def determine_end_idx_for_adjustment(self, adjustment_ts, dates, upper_bound, requested_quarter, sid_estimates): """ Determines the date until which the adjustment at the given date index should be applied for the given quarter. Parameters ---------- adjustment_ts : pd.Timestamp The timestamp at which the adjustment occurs. dates : pd.DatetimeIndex The calendar dates over which the Pipeline is being computed. upper_bound : int The index of the upper bound in the calendar dates. This is the index until which the adjusment will be applied unless there is information for the requested quarter that comes in on or before that date. requested_quarter : float The quarter for which we are determining how the adjustment should be applied. sid_estimates : pd.DataFrame The DataFrame of estimates data for the sid for which we're applying the given adjustment. Returns ------- end_idx : int The last index to which the adjustment should be applied for the given quarter/sid. """ end_idx = upper_bound # Find the next newest kd that happens on or after # the date of this adjustment newest_kd_for_qtr = sid_estimates[ (sid_estimates[NORMALIZED_QUARTERS] == requested_quarter) & (sid_estimates[TS_FIELD_NAME] >= adjustment_ts) ][TS_FIELD_NAME].min() if pd.notnull(newest_kd_for_qtr): newest_kd_idx = dates.searchsorted( newest_kd_for_qtr ) # We have fresh information that comes in # before the end of the overwrite and # presumably is already split-adjusted to the # current split. We should stop applying the # adjustment the day before this new # information comes in. if newest_kd_idx <= upper_bound: end_idx = newest_kd_idx - 1 return end_idx
python
def determine_end_idx_for_adjustment(self, adjustment_ts, dates, upper_bound, requested_quarter, sid_estimates): """ Determines the date until which the adjustment at the given date index should be applied for the given quarter. Parameters ---------- adjustment_ts : pd.Timestamp The timestamp at which the adjustment occurs. dates : pd.DatetimeIndex The calendar dates over which the Pipeline is being computed. upper_bound : int The index of the upper bound in the calendar dates. This is the index until which the adjusment will be applied unless there is information for the requested quarter that comes in on or before that date. requested_quarter : float The quarter for which we are determining how the adjustment should be applied. sid_estimates : pd.DataFrame The DataFrame of estimates data for the sid for which we're applying the given adjustment. Returns ------- end_idx : int The last index to which the adjustment should be applied for the given quarter/sid. """ end_idx = upper_bound # Find the next newest kd that happens on or after # the date of this adjustment newest_kd_for_qtr = sid_estimates[ (sid_estimates[NORMALIZED_QUARTERS] == requested_quarter) & (sid_estimates[TS_FIELD_NAME] >= adjustment_ts) ][TS_FIELD_NAME].min() if pd.notnull(newest_kd_for_qtr): newest_kd_idx = dates.searchsorted( newest_kd_for_qtr ) # We have fresh information that comes in # before the end of the overwrite and # presumably is already split-adjusted to the # current split. We should stop applying the # adjustment the day before this new # information comes in. if newest_kd_idx <= upper_bound: end_idx = newest_kd_idx - 1 return end_idx
[ "def", "determine_end_idx_for_adjustment", "(", "self", ",", "adjustment_ts", ",", "dates", ",", "upper_bound", ",", "requested_quarter", ",", "sid_estimates", ")", ":", "end_idx", "=", "upper_bound", "# Find the next newest kd that happens on or after", "# the date of this adjustment", "newest_kd_for_qtr", "=", "sid_estimates", "[", "(", "sid_estimates", "[", "NORMALIZED_QUARTERS", "]", "==", "requested_quarter", ")", "&", "(", "sid_estimates", "[", "TS_FIELD_NAME", "]", ">=", "adjustment_ts", ")", "]", "[", "TS_FIELD_NAME", "]", ".", "min", "(", ")", "if", "pd", ".", "notnull", "(", "newest_kd_for_qtr", ")", ":", "newest_kd_idx", "=", "dates", ".", "searchsorted", "(", "newest_kd_for_qtr", ")", "# We have fresh information that comes in", "# before the end of the overwrite and", "# presumably is already split-adjusted to the", "# current split. We should stop applying the", "# adjustment the day before this new", "# information comes in.", "if", "newest_kd_idx", "<=", "upper_bound", ":", "end_idx", "=", "newest_kd_idx", "-", "1", "return", "end_idx" ]
Determines the date until which the adjustment at the given date index should be applied for the given quarter. Parameters ---------- adjustment_ts : pd.Timestamp The timestamp at which the adjustment occurs. dates : pd.DatetimeIndex The calendar dates over which the Pipeline is being computed. upper_bound : int The index of the upper bound in the calendar dates. This is the index until which the adjusment will be applied unless there is information for the requested quarter that comes in on or before that date. requested_quarter : float The quarter for which we are determining how the adjustment should be applied. sid_estimates : pd.DataFrame The DataFrame of estimates data for the sid for which we're applying the given adjustment. Returns ------- end_idx : int The last index to which the adjustment should be applied for the given quarter/sid.
[ "Determines", "the", "date", "until", "which", "the", "adjustment", "at", "the", "given", "date", "index", "should", "be", "applied", "for", "the", "given", "quarter", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/loaders/earnings_estimates.py#L998-L1051
train
quantopian/zipline
zipline/pipeline/loaders/earnings_estimates.py
SplitAdjustedEstimatesLoader.collect_pre_split_asof_date_adjustments
def collect_pre_split_asof_date_adjustments( self, split_adjusted_asof_date_idx, sid_idx, pre_adjustments, requested_split_adjusted_columns ): """ Collect split adjustments that occur before the split-adjusted-asof-date. All those adjustments must first be UN-applied at the first date index and then re-applied on the appropriate dates in order to match point in time share pricing data. Parameters ---------- split_adjusted_asof_date_idx : int The index in the calendar dates as-of which all data was split-adjusted. sid_idx : int The index of the sid for which adjustments should be collected in the adjusted array. pre_adjustments : tuple(list(float), list(int)) The adjustment values, indexes in `dates`, and timestamps for adjustments that happened after the split-asof-date. requested_split_adjusted_columns : list of str The requested split adjusted columns. Returns ------- col_to_split_adjustments : dict[str -> dict[int -> list of Adjustment]] The adjustments for this sid that occurred on or before the split-asof-date. """ col_to_split_adjustments = {} if len(pre_adjustments[0]): adjustment_values, date_indexes = pre_adjustments for column_name in requested_split_adjusted_columns: col_to_split_adjustments[column_name] = {} # We need to undo all adjustments that happen before the # split_asof_date here by reversing the split ratio. col_to_split_adjustments[column_name][0] = [Float64Multiply( 0, split_adjusted_asof_date_idx, sid_idx, sid_idx, 1 / future_adjustment ) for future_adjustment in adjustment_values] for adjustment, date_index in zip(adjustment_values, date_indexes): adj = Float64Multiply( 0, split_adjusted_asof_date_idx, sid_idx, sid_idx, adjustment ) add_new_adjustments(col_to_split_adjustments, [adj], column_name, date_index) return col_to_split_adjustments
python
def collect_pre_split_asof_date_adjustments( self, split_adjusted_asof_date_idx, sid_idx, pre_adjustments, requested_split_adjusted_columns ): """ Collect split adjustments that occur before the split-adjusted-asof-date. All those adjustments must first be UN-applied at the first date index and then re-applied on the appropriate dates in order to match point in time share pricing data. Parameters ---------- split_adjusted_asof_date_idx : int The index in the calendar dates as-of which all data was split-adjusted. sid_idx : int The index of the sid for which adjustments should be collected in the adjusted array. pre_adjustments : tuple(list(float), list(int)) The adjustment values, indexes in `dates`, and timestamps for adjustments that happened after the split-asof-date. requested_split_adjusted_columns : list of str The requested split adjusted columns. Returns ------- col_to_split_adjustments : dict[str -> dict[int -> list of Adjustment]] The adjustments for this sid that occurred on or before the split-asof-date. """ col_to_split_adjustments = {} if len(pre_adjustments[0]): adjustment_values, date_indexes = pre_adjustments for column_name in requested_split_adjusted_columns: col_to_split_adjustments[column_name] = {} # We need to undo all adjustments that happen before the # split_asof_date here by reversing the split ratio. col_to_split_adjustments[column_name][0] = [Float64Multiply( 0, split_adjusted_asof_date_idx, sid_idx, sid_idx, 1 / future_adjustment ) for future_adjustment in adjustment_values] for adjustment, date_index in zip(adjustment_values, date_indexes): adj = Float64Multiply( 0, split_adjusted_asof_date_idx, sid_idx, sid_idx, adjustment ) add_new_adjustments(col_to_split_adjustments, [adj], column_name, date_index) return col_to_split_adjustments
[ "def", "collect_pre_split_asof_date_adjustments", "(", "self", ",", "split_adjusted_asof_date_idx", ",", "sid_idx", ",", "pre_adjustments", ",", "requested_split_adjusted_columns", ")", ":", "col_to_split_adjustments", "=", "{", "}", "if", "len", "(", "pre_adjustments", "[", "0", "]", ")", ":", "adjustment_values", ",", "date_indexes", "=", "pre_adjustments", "for", "column_name", "in", "requested_split_adjusted_columns", ":", "col_to_split_adjustments", "[", "column_name", "]", "=", "{", "}", "# We need to undo all adjustments that happen before the", "# split_asof_date here by reversing the split ratio.", "col_to_split_adjustments", "[", "column_name", "]", "[", "0", "]", "=", "[", "Float64Multiply", "(", "0", ",", "split_adjusted_asof_date_idx", ",", "sid_idx", ",", "sid_idx", ",", "1", "/", "future_adjustment", ")", "for", "future_adjustment", "in", "adjustment_values", "]", "for", "adjustment", ",", "date_index", "in", "zip", "(", "adjustment_values", ",", "date_indexes", ")", ":", "adj", "=", "Float64Multiply", "(", "0", ",", "split_adjusted_asof_date_idx", ",", "sid_idx", ",", "sid_idx", ",", "adjustment", ")", "add_new_adjustments", "(", "col_to_split_adjustments", ",", "[", "adj", "]", ",", "column_name", ",", "date_index", ")", "return", "col_to_split_adjustments" ]
Collect split adjustments that occur before the split-adjusted-asof-date. All those adjustments must first be UN-applied at the first date index and then re-applied on the appropriate dates in order to match point in time share pricing data. Parameters ---------- split_adjusted_asof_date_idx : int The index in the calendar dates as-of which all data was split-adjusted. sid_idx : int The index of the sid for which adjustments should be collected in the adjusted array. pre_adjustments : tuple(list(float), list(int)) The adjustment values, indexes in `dates`, and timestamps for adjustments that happened after the split-asof-date. requested_split_adjusted_columns : list of str The requested split adjusted columns. Returns ------- col_to_split_adjustments : dict[str -> dict[int -> list of Adjustment]] The adjustments for this sid that occurred on or before the split-asof-date.
[ "Collect", "split", "adjustments", "that", "occur", "before", "the", "split", "-", "adjusted", "-", "asof", "-", "date", ".", "All", "those", "adjustments", "must", "first", "be", "UN", "-", "applied", "at", "the", "first", "date", "index", "and", "then", "re", "-", "applied", "on", "the", "appropriate", "dates", "in", "order", "to", "match", "point", "in", "time", "share", "pricing", "data", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/loaders/earnings_estimates.py#L1053-L1115
train
quantopian/zipline
zipline/pipeline/loaders/earnings_estimates.py
SplitAdjustedEstimatesLoader.collect_post_asof_split_adjustments
def collect_post_asof_split_adjustments(self, post_adjustments, requested_qtr_data, sid, sid_idx, sid_estimates, requested_split_adjusted_columns): """ Collect split adjustments that occur after the split-adjusted-asof-date. Each adjustment needs to be applied to all dates on which knowledge for the requested quarter was older than the date of the adjustment. Parameters ---------- post_adjustments : tuple(list(float), list(int), pd.DatetimeIndex) The adjustment values, indexes in `dates`, and timestamps for adjustments that happened after the split-asof-date. requested_qtr_data : pd.DataFrame The requested quarter data for each calendar date per sid. sid : int The sid for which adjustments need to be collected. sid_idx : int The index of `sid` in the adjusted array. sid_estimates : pd.DataFrame The raw estimates data for this sid. requested_split_adjusted_columns : list of str The requested split adjusted columns. Returns ------- col_to_split_adjustments : dict[str -> dict[int -> list of Adjustment]] The adjustments for this sid that occurred after the split-asof-date. """ col_to_split_adjustments = {} if post_adjustments: # Get an integer index requested_qtr_timeline = requested_qtr_data[ SHIFTED_NORMALIZED_QTRS ][sid].reset_index() requested_qtr_timeline = requested_qtr_timeline[ requested_qtr_timeline[sid].notnull() ] # Split the data into range by quarter and determine which quarter # was being requested in each range. # Split integer indexes up by quarter range qtr_ranges_idxs = np.split( requested_qtr_timeline.index, np.where(np.diff(requested_qtr_timeline[sid]) != 0)[0] + 1 ) requested_quarters_per_range = [requested_qtr_timeline[sid][r[0]] for r in qtr_ranges_idxs] # Try to apply each adjustment to each quarter range. for i, qtr_range in enumerate(qtr_ranges_idxs): for adjustment, date_index, timestamp in zip( *post_adjustments ): # In the default case, apply through the end of the quarter upper_bound = qtr_range[-1] # Find the smallest KD in estimates that is on or after the # date of the given adjustment. Apply the given adjustment # until that KD. end_idx = self.determine_end_idx_for_adjustment( timestamp, requested_qtr_data.index, upper_bound, requested_quarters_per_range[i], sid_estimates ) # In the default case, apply adjustment on the first day of # the quarter. start_idx = qtr_range[0] # If the adjustment happens during this quarter, apply the # adjustment on the day it happens. if date_index > start_idx: start_idx = date_index # We only want to apply the adjustment if we have any stale # data to apply it to. if qtr_range[0] <= end_idx: for column_name in requested_split_adjusted_columns: if column_name not in col_to_split_adjustments: col_to_split_adjustments[column_name] = {} adj = Float64Multiply( # Always apply from first day of qtr qtr_range[0], end_idx, sid_idx, sid_idx, adjustment ) add_new_adjustments( col_to_split_adjustments, [adj], column_name, start_idx ) return col_to_split_adjustments
python
def collect_post_asof_split_adjustments(self, post_adjustments, requested_qtr_data, sid, sid_idx, sid_estimates, requested_split_adjusted_columns): """ Collect split adjustments that occur after the split-adjusted-asof-date. Each adjustment needs to be applied to all dates on which knowledge for the requested quarter was older than the date of the adjustment. Parameters ---------- post_adjustments : tuple(list(float), list(int), pd.DatetimeIndex) The adjustment values, indexes in `dates`, and timestamps for adjustments that happened after the split-asof-date. requested_qtr_data : pd.DataFrame The requested quarter data for each calendar date per sid. sid : int The sid for which adjustments need to be collected. sid_idx : int The index of `sid` in the adjusted array. sid_estimates : pd.DataFrame The raw estimates data for this sid. requested_split_adjusted_columns : list of str The requested split adjusted columns. Returns ------- col_to_split_adjustments : dict[str -> dict[int -> list of Adjustment]] The adjustments for this sid that occurred after the split-asof-date. """ col_to_split_adjustments = {} if post_adjustments: # Get an integer index requested_qtr_timeline = requested_qtr_data[ SHIFTED_NORMALIZED_QTRS ][sid].reset_index() requested_qtr_timeline = requested_qtr_timeline[ requested_qtr_timeline[sid].notnull() ] # Split the data into range by quarter and determine which quarter # was being requested in each range. # Split integer indexes up by quarter range qtr_ranges_idxs = np.split( requested_qtr_timeline.index, np.where(np.diff(requested_qtr_timeline[sid]) != 0)[0] + 1 ) requested_quarters_per_range = [requested_qtr_timeline[sid][r[0]] for r in qtr_ranges_idxs] # Try to apply each adjustment to each quarter range. for i, qtr_range in enumerate(qtr_ranges_idxs): for adjustment, date_index, timestamp in zip( *post_adjustments ): # In the default case, apply through the end of the quarter upper_bound = qtr_range[-1] # Find the smallest KD in estimates that is on or after the # date of the given adjustment. Apply the given adjustment # until that KD. end_idx = self.determine_end_idx_for_adjustment( timestamp, requested_qtr_data.index, upper_bound, requested_quarters_per_range[i], sid_estimates ) # In the default case, apply adjustment on the first day of # the quarter. start_idx = qtr_range[0] # If the adjustment happens during this quarter, apply the # adjustment on the day it happens. if date_index > start_idx: start_idx = date_index # We only want to apply the adjustment if we have any stale # data to apply it to. if qtr_range[0] <= end_idx: for column_name in requested_split_adjusted_columns: if column_name not in col_to_split_adjustments: col_to_split_adjustments[column_name] = {} adj = Float64Multiply( # Always apply from first day of qtr qtr_range[0], end_idx, sid_idx, sid_idx, adjustment ) add_new_adjustments( col_to_split_adjustments, [adj], column_name, start_idx ) return col_to_split_adjustments
[ "def", "collect_post_asof_split_adjustments", "(", "self", ",", "post_adjustments", ",", "requested_qtr_data", ",", "sid", ",", "sid_idx", ",", "sid_estimates", ",", "requested_split_adjusted_columns", ")", ":", "col_to_split_adjustments", "=", "{", "}", "if", "post_adjustments", ":", "# Get an integer index", "requested_qtr_timeline", "=", "requested_qtr_data", "[", "SHIFTED_NORMALIZED_QTRS", "]", "[", "sid", "]", ".", "reset_index", "(", ")", "requested_qtr_timeline", "=", "requested_qtr_timeline", "[", "requested_qtr_timeline", "[", "sid", "]", ".", "notnull", "(", ")", "]", "# Split the data into range by quarter and determine which quarter", "# was being requested in each range.", "# Split integer indexes up by quarter range", "qtr_ranges_idxs", "=", "np", ".", "split", "(", "requested_qtr_timeline", ".", "index", ",", "np", ".", "where", "(", "np", ".", "diff", "(", "requested_qtr_timeline", "[", "sid", "]", ")", "!=", "0", ")", "[", "0", "]", "+", "1", ")", "requested_quarters_per_range", "=", "[", "requested_qtr_timeline", "[", "sid", "]", "[", "r", "[", "0", "]", "]", "for", "r", "in", "qtr_ranges_idxs", "]", "# Try to apply each adjustment to each quarter range.", "for", "i", ",", "qtr_range", "in", "enumerate", "(", "qtr_ranges_idxs", ")", ":", "for", "adjustment", ",", "date_index", ",", "timestamp", "in", "zip", "(", "*", "post_adjustments", ")", ":", "# In the default case, apply through the end of the quarter", "upper_bound", "=", "qtr_range", "[", "-", "1", "]", "# Find the smallest KD in estimates that is on or after the", "# date of the given adjustment. Apply the given adjustment", "# until that KD.", "end_idx", "=", "self", ".", "determine_end_idx_for_adjustment", "(", "timestamp", ",", "requested_qtr_data", ".", "index", ",", "upper_bound", ",", "requested_quarters_per_range", "[", "i", "]", ",", "sid_estimates", ")", "# In the default case, apply adjustment on the first day of", "# the quarter.", "start_idx", "=", "qtr_range", "[", "0", "]", "# If the adjustment happens during this quarter, apply the", "# adjustment on the day it happens.", "if", "date_index", ">", "start_idx", ":", "start_idx", "=", "date_index", "# We only want to apply the adjustment if we have any stale", "# data to apply it to.", "if", "qtr_range", "[", "0", "]", "<=", "end_idx", ":", "for", "column_name", "in", "requested_split_adjusted_columns", ":", "if", "column_name", "not", "in", "col_to_split_adjustments", ":", "col_to_split_adjustments", "[", "column_name", "]", "=", "{", "}", "adj", "=", "Float64Multiply", "(", "# Always apply from first day of qtr", "qtr_range", "[", "0", "]", ",", "end_idx", ",", "sid_idx", ",", "sid_idx", ",", "adjustment", ")", "add_new_adjustments", "(", "col_to_split_adjustments", ",", "[", "adj", "]", ",", "column_name", ",", "start_idx", ")", "return", "col_to_split_adjustments" ]
Collect split adjustments that occur after the split-adjusted-asof-date. Each adjustment needs to be applied to all dates on which knowledge for the requested quarter was older than the date of the adjustment. Parameters ---------- post_adjustments : tuple(list(float), list(int), pd.DatetimeIndex) The adjustment values, indexes in `dates`, and timestamps for adjustments that happened after the split-asof-date. requested_qtr_data : pd.DataFrame The requested quarter data for each calendar date per sid. sid : int The sid for which adjustments need to be collected. sid_idx : int The index of `sid` in the adjusted array. sid_estimates : pd.DataFrame The raw estimates data for this sid. requested_split_adjusted_columns : list of str The requested split adjusted columns. Returns ------- col_to_split_adjustments : dict[str -> dict[int -> list of Adjustment]] The adjustments for this sid that occurred after the split-asof-date.
[ "Collect", "split", "adjustments", "that", "occur", "after", "the", "split", "-", "adjusted", "-", "asof", "-", "date", ".", "Each", "adjustment", "needs", "to", "be", "applied", "to", "all", "dates", "on", "which", "knowledge", "for", "the", "requested", "quarter", "was", "older", "than", "the", "date", "of", "the", "adjustment", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/loaders/earnings_estimates.py#L1117-L1215
train
quantopian/zipline
zipline/pipeline/loaders/earnings_estimates.py
SplitAdjustedEstimatesLoader.retrieve_split_adjustment_data_for_sid
def retrieve_split_adjustment_data_for_sid(self, dates, sid, split_adjusted_asof_idx): """ dates : pd.DatetimeIndex The calendar dates. sid : int The sid for which we want to retrieve adjustments. split_adjusted_asof_idx : int The index in `dates` as-of which the data is split adjusted. Returns ------- pre_adjustments : tuple(list(float), list(int), pd.DatetimeIndex) The adjustment values and indexes in `dates` for adjustments that happened before the split-asof-date. post_adjustments : tuple(list(float), list(int), pd.DatetimeIndex) The adjustment values, indexes in `dates`, and timestamps for adjustments that happened after the split-asof-date. """ adjustments = self._split_adjustments.get_adjustments_for_sid( 'splits', sid ) sorted(adjustments, key=lambda adj: adj[0]) # Get rid of any adjustments that happen outside of our date index. adjustments = list(filter(lambda x: dates[0] <= x[0] <= dates[-1], adjustments)) adjustment_values = np.array([adj[1] for adj in adjustments]) timestamps = pd.DatetimeIndex([adj[0] for adj in adjustments]) # We need the first date on which we would have known about each # adjustment. date_indexes = dates.searchsorted(timestamps) pre_adjustment_idxs = np.where( date_indexes <= split_adjusted_asof_idx )[0] last_adjustment_split_asof_idx = -1 if len(pre_adjustment_idxs): last_adjustment_split_asof_idx = pre_adjustment_idxs.max() pre_adjustments = ( adjustment_values[:last_adjustment_split_asof_idx + 1], date_indexes[:last_adjustment_split_asof_idx + 1] ) post_adjustments = ( adjustment_values[last_adjustment_split_asof_idx + 1:], date_indexes[last_adjustment_split_asof_idx + 1:], timestamps[last_adjustment_split_asof_idx + 1:] ) return pre_adjustments, post_adjustments
python
def retrieve_split_adjustment_data_for_sid(self, dates, sid, split_adjusted_asof_idx): """ dates : pd.DatetimeIndex The calendar dates. sid : int The sid for which we want to retrieve adjustments. split_adjusted_asof_idx : int The index in `dates` as-of which the data is split adjusted. Returns ------- pre_adjustments : tuple(list(float), list(int), pd.DatetimeIndex) The adjustment values and indexes in `dates` for adjustments that happened before the split-asof-date. post_adjustments : tuple(list(float), list(int), pd.DatetimeIndex) The adjustment values, indexes in `dates`, and timestamps for adjustments that happened after the split-asof-date. """ adjustments = self._split_adjustments.get_adjustments_for_sid( 'splits', sid ) sorted(adjustments, key=lambda adj: adj[0]) # Get rid of any adjustments that happen outside of our date index. adjustments = list(filter(lambda x: dates[0] <= x[0] <= dates[-1], adjustments)) adjustment_values = np.array([adj[1] for adj in adjustments]) timestamps = pd.DatetimeIndex([adj[0] for adj in adjustments]) # We need the first date on which we would have known about each # adjustment. date_indexes = dates.searchsorted(timestamps) pre_adjustment_idxs = np.where( date_indexes <= split_adjusted_asof_idx )[0] last_adjustment_split_asof_idx = -1 if len(pre_adjustment_idxs): last_adjustment_split_asof_idx = pre_adjustment_idxs.max() pre_adjustments = ( adjustment_values[:last_adjustment_split_asof_idx + 1], date_indexes[:last_adjustment_split_asof_idx + 1] ) post_adjustments = ( adjustment_values[last_adjustment_split_asof_idx + 1:], date_indexes[last_adjustment_split_asof_idx + 1:], timestamps[last_adjustment_split_asof_idx + 1:] ) return pre_adjustments, post_adjustments
[ "def", "retrieve_split_adjustment_data_for_sid", "(", "self", ",", "dates", ",", "sid", ",", "split_adjusted_asof_idx", ")", ":", "adjustments", "=", "self", ".", "_split_adjustments", ".", "get_adjustments_for_sid", "(", "'splits'", ",", "sid", ")", "sorted", "(", "adjustments", ",", "key", "=", "lambda", "adj", ":", "adj", "[", "0", "]", ")", "# Get rid of any adjustments that happen outside of our date index.", "adjustments", "=", "list", "(", "filter", "(", "lambda", "x", ":", "dates", "[", "0", "]", "<=", "x", "[", "0", "]", "<=", "dates", "[", "-", "1", "]", ",", "adjustments", ")", ")", "adjustment_values", "=", "np", ".", "array", "(", "[", "adj", "[", "1", "]", "for", "adj", "in", "adjustments", "]", ")", "timestamps", "=", "pd", ".", "DatetimeIndex", "(", "[", "adj", "[", "0", "]", "for", "adj", "in", "adjustments", "]", ")", "# We need the first date on which we would have known about each", "# adjustment.", "date_indexes", "=", "dates", ".", "searchsorted", "(", "timestamps", ")", "pre_adjustment_idxs", "=", "np", ".", "where", "(", "date_indexes", "<=", "split_adjusted_asof_idx", ")", "[", "0", "]", "last_adjustment_split_asof_idx", "=", "-", "1", "if", "len", "(", "pre_adjustment_idxs", ")", ":", "last_adjustment_split_asof_idx", "=", "pre_adjustment_idxs", ".", "max", "(", ")", "pre_adjustments", "=", "(", "adjustment_values", "[", ":", "last_adjustment_split_asof_idx", "+", "1", "]", ",", "date_indexes", "[", ":", "last_adjustment_split_asof_idx", "+", "1", "]", ")", "post_adjustments", "=", "(", "adjustment_values", "[", "last_adjustment_split_asof_idx", "+", "1", ":", "]", ",", "date_indexes", "[", "last_adjustment_split_asof_idx", "+", "1", ":", "]", ",", "timestamps", "[", "last_adjustment_split_asof_idx", "+", "1", ":", "]", ")", "return", "pre_adjustments", ",", "post_adjustments" ]
dates : pd.DatetimeIndex The calendar dates. sid : int The sid for which we want to retrieve adjustments. split_adjusted_asof_idx : int The index in `dates` as-of which the data is split adjusted. Returns ------- pre_adjustments : tuple(list(float), list(int), pd.DatetimeIndex) The adjustment values and indexes in `dates` for adjustments that happened before the split-asof-date. post_adjustments : tuple(list(float), list(int), pd.DatetimeIndex) The adjustment values, indexes in `dates`, and timestamps for adjustments that happened after the split-asof-date.
[ "dates", ":", "pd", ".", "DatetimeIndex", "The", "calendar", "dates", ".", "sid", ":", "int", "The", "sid", "for", "which", "we", "want", "to", "retrieve", "adjustments", ".", "split_adjusted_asof_idx", ":", "int", "The", "index", "in", "dates", "as", "-", "of", "which", "the", "data", "is", "split", "adjusted", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/loaders/earnings_estimates.py#L1217-L1265
train
quantopian/zipline
zipline/pipeline/loaders/earnings_estimates.py
SplitAdjustedEstimatesLoader.merge_split_adjustments_with_overwrites
def merge_split_adjustments_with_overwrites( self, pre, post, overwrites, requested_split_adjusted_columns ): """ Merge split adjustments with the dict containing overwrites. Parameters ---------- pre : dict[str -> dict[int -> list]] The adjustments that occur before the split-adjusted-asof-date. post : dict[str -> dict[int -> list]] The adjustments that occur after the split-adjusted-asof-date. overwrites : dict[str -> dict[int -> list]] The overwrites across all time. Adjustments will be merged into this dictionary. requested_split_adjusted_columns : list of str List of names of split adjusted columns that are being requested. """ for column_name in requested_split_adjusted_columns: # We can do a merge here because the timestamps in 'pre' and # 'post' are guaranteed to not overlap. if pre: # Either empty or contains all columns. for ts in pre[column_name]: add_new_adjustments( overwrites, pre[column_name][ts], column_name, ts ) if post: # Either empty or contains all columns. for ts in post[column_name]: add_new_adjustments( overwrites, post[column_name][ts], column_name, ts )
python
def merge_split_adjustments_with_overwrites( self, pre, post, overwrites, requested_split_adjusted_columns ): """ Merge split adjustments with the dict containing overwrites. Parameters ---------- pre : dict[str -> dict[int -> list]] The adjustments that occur before the split-adjusted-asof-date. post : dict[str -> dict[int -> list]] The adjustments that occur after the split-adjusted-asof-date. overwrites : dict[str -> dict[int -> list]] The overwrites across all time. Adjustments will be merged into this dictionary. requested_split_adjusted_columns : list of str List of names of split adjusted columns that are being requested. """ for column_name in requested_split_adjusted_columns: # We can do a merge here because the timestamps in 'pre' and # 'post' are guaranteed to not overlap. if pre: # Either empty or contains all columns. for ts in pre[column_name]: add_new_adjustments( overwrites, pre[column_name][ts], column_name, ts ) if post: # Either empty or contains all columns. for ts in post[column_name]: add_new_adjustments( overwrites, post[column_name][ts], column_name, ts )
[ "def", "merge_split_adjustments_with_overwrites", "(", "self", ",", "pre", ",", "post", ",", "overwrites", ",", "requested_split_adjusted_columns", ")", ":", "for", "column_name", "in", "requested_split_adjusted_columns", ":", "# We can do a merge here because the timestamps in 'pre' and", "# 'post' are guaranteed to not overlap.", "if", "pre", ":", "# Either empty or contains all columns.", "for", "ts", "in", "pre", "[", "column_name", "]", ":", "add_new_adjustments", "(", "overwrites", ",", "pre", "[", "column_name", "]", "[", "ts", "]", ",", "column_name", ",", "ts", ")", "if", "post", ":", "# Either empty or contains all columns.", "for", "ts", "in", "post", "[", "column_name", "]", ":", "add_new_adjustments", "(", "overwrites", ",", "post", "[", "column_name", "]", "[", "ts", "]", ",", "column_name", ",", "ts", ")" ]
Merge split adjustments with the dict containing overwrites. Parameters ---------- pre : dict[str -> dict[int -> list]] The adjustments that occur before the split-adjusted-asof-date. post : dict[str -> dict[int -> list]] The adjustments that occur after the split-adjusted-asof-date. overwrites : dict[str -> dict[int -> list]] The overwrites across all time. Adjustments will be merged into this dictionary. requested_split_adjusted_columns : list of str List of names of split adjusted columns that are being requested.
[ "Merge", "split", "adjustments", "with", "the", "dict", "containing", "overwrites", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/loaders/earnings_estimates.py#L1294-L1336
train
quantopian/zipline
zipline/pipeline/loaders/earnings_estimates.py
PreviousSplitAdjustedEarningsEstimatesLoader.collect_split_adjustments
def collect_split_adjustments(self, adjustments_for_sid, requested_qtr_data, dates, sid, sid_idx, sid_estimates, split_adjusted_asof_idx, pre_adjustments, post_adjustments, requested_split_adjusted_columns): """ Collect split adjustments for previous quarters and apply them to the given dictionary of splits for the given sid. Since overwrites just replace all estimates before the new quarter with NaN, we don't need to worry about re-applying split adjustments. Parameters ---------- adjustments_for_sid : dict[str -> dict[int -> list]] The dictionary of adjustments to which splits need to be added. Initially it contains only overwrites. requested_qtr_data : pd.DataFrame The requested quarter data for each calendar date per sid. dates : pd.DatetimeIndex The calendar dates for which estimates data is requested. sid : int The sid for which adjustments need to be collected. sid_idx : int The index of `sid` in the adjusted array. sid_estimates : pd.DataFrame The raw estimates data for the given sid. split_adjusted_asof_idx : int The index in `dates` as-of which the data is split adjusted. pre_adjustments : tuple(list(float), list(int), pd.DatetimeIndex) The adjustment values and indexes in `dates` for adjustments that happened before the split-asof-date. post_adjustments : tuple(list(float), list(int), pd.DatetimeIndex) The adjustment values, indexes in `dates`, and timestamps for adjustments that happened after the split-asof-date. requested_split_adjusted_columns : list of str List of requested split adjusted column names. """ (pre_adjustments_dict, post_adjustments_dict) = self._collect_adjustments( requested_qtr_data, sid, sid_idx, sid_estimates, split_adjusted_asof_idx, pre_adjustments, post_adjustments, requested_split_adjusted_columns ) self.merge_split_adjustments_with_overwrites( pre_adjustments_dict, post_adjustments_dict, adjustments_for_sid, requested_split_adjusted_columns )
python
def collect_split_adjustments(self, adjustments_for_sid, requested_qtr_data, dates, sid, sid_idx, sid_estimates, split_adjusted_asof_idx, pre_adjustments, post_adjustments, requested_split_adjusted_columns): """ Collect split adjustments for previous quarters and apply them to the given dictionary of splits for the given sid. Since overwrites just replace all estimates before the new quarter with NaN, we don't need to worry about re-applying split adjustments. Parameters ---------- adjustments_for_sid : dict[str -> dict[int -> list]] The dictionary of adjustments to which splits need to be added. Initially it contains only overwrites. requested_qtr_data : pd.DataFrame The requested quarter data for each calendar date per sid. dates : pd.DatetimeIndex The calendar dates for which estimates data is requested. sid : int The sid for which adjustments need to be collected. sid_idx : int The index of `sid` in the adjusted array. sid_estimates : pd.DataFrame The raw estimates data for the given sid. split_adjusted_asof_idx : int The index in `dates` as-of which the data is split adjusted. pre_adjustments : tuple(list(float), list(int), pd.DatetimeIndex) The adjustment values and indexes in `dates` for adjustments that happened before the split-asof-date. post_adjustments : tuple(list(float), list(int), pd.DatetimeIndex) The adjustment values, indexes in `dates`, and timestamps for adjustments that happened after the split-asof-date. requested_split_adjusted_columns : list of str List of requested split adjusted column names. """ (pre_adjustments_dict, post_adjustments_dict) = self._collect_adjustments( requested_qtr_data, sid, sid_idx, sid_estimates, split_adjusted_asof_idx, pre_adjustments, post_adjustments, requested_split_adjusted_columns ) self.merge_split_adjustments_with_overwrites( pre_adjustments_dict, post_adjustments_dict, adjustments_for_sid, requested_split_adjusted_columns )
[ "def", "collect_split_adjustments", "(", "self", ",", "adjustments_for_sid", ",", "requested_qtr_data", ",", "dates", ",", "sid", ",", "sid_idx", ",", "sid_estimates", ",", "split_adjusted_asof_idx", ",", "pre_adjustments", ",", "post_adjustments", ",", "requested_split_adjusted_columns", ")", ":", "(", "pre_adjustments_dict", ",", "post_adjustments_dict", ")", "=", "self", ".", "_collect_adjustments", "(", "requested_qtr_data", ",", "sid", ",", "sid_idx", ",", "sid_estimates", ",", "split_adjusted_asof_idx", ",", "pre_adjustments", ",", "post_adjustments", ",", "requested_split_adjusted_columns", ")", "self", ".", "merge_split_adjustments_with_overwrites", "(", "pre_adjustments_dict", ",", "post_adjustments_dict", ",", "adjustments_for_sid", ",", "requested_split_adjusted_columns", ")" ]
Collect split adjustments for previous quarters and apply them to the given dictionary of splits for the given sid. Since overwrites just replace all estimates before the new quarter with NaN, we don't need to worry about re-applying split adjustments. Parameters ---------- adjustments_for_sid : dict[str -> dict[int -> list]] The dictionary of adjustments to which splits need to be added. Initially it contains only overwrites. requested_qtr_data : pd.DataFrame The requested quarter data for each calendar date per sid. dates : pd.DatetimeIndex The calendar dates for which estimates data is requested. sid : int The sid for which adjustments need to be collected. sid_idx : int The index of `sid` in the adjusted array. sid_estimates : pd.DataFrame The raw estimates data for the given sid. split_adjusted_asof_idx : int The index in `dates` as-of which the data is split adjusted. pre_adjustments : tuple(list(float), list(int), pd.DatetimeIndex) The adjustment values and indexes in `dates` for adjustments that happened before the split-asof-date. post_adjustments : tuple(list(float), list(int), pd.DatetimeIndex) The adjustment values, indexes in `dates`, and timestamps for adjustments that happened after the split-asof-date. requested_split_adjusted_columns : list of str List of requested split adjusted column names.
[ "Collect", "split", "adjustments", "for", "previous", "quarters", "and", "apply", "them", "to", "the", "given", "dictionary", "of", "splits", "for", "the", "given", "sid", ".", "Since", "overwrites", "just", "replace", "all", "estimates", "before", "the", "new", "quarter", "with", "NaN", "we", "don", "t", "need", "to", "worry", "about", "re", "-", "applying", "split", "adjustments", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/loaders/earnings_estimates.py#L1342-L1401
train
quantopian/zipline
zipline/pipeline/loaders/earnings_estimates.py
NextSplitAdjustedEarningsEstimatesLoader.collect_split_adjustments
def collect_split_adjustments(self, adjustments_for_sid, requested_qtr_data, dates, sid, sid_idx, sid_estimates, split_adjusted_asof_idx, pre_adjustments, post_adjustments, requested_split_adjusted_columns): """ Collect split adjustments for future quarters. Re-apply adjustments that would be overwritten by overwrites. Merge split adjustments with overwrites into the given dictionary of splits for the given sid. Parameters ---------- adjustments_for_sid : dict[str -> dict[int -> list]] The dictionary of adjustments to which splits need to be added. Initially it contains only overwrites. requested_qtr_data : pd.DataFrame The requested quarter data for each calendar date per sid. dates : pd.DatetimeIndex The calendar dates for which estimates data is requested. sid : int The sid for which adjustments need to be collected. sid_idx : int The index of `sid` in the adjusted array. sid_estimates : pd.DataFrame The raw estimates data for the given sid. split_adjusted_asof_idx : int The index in `dates` as-of which the data is split adjusted. pre_adjustments : tuple(list(float), list(int), pd.DatetimeIndex) The adjustment values and indexes in `dates` for adjustments that happened before the split-asof-date. post_adjustments : tuple(list(float), list(int), pd.DatetimeIndex) The adjustment values, indexes in `dates`, and timestamps for adjustments that happened after the split-asof-date. requested_split_adjusted_columns : list of str List of requested split adjusted column names. """ (pre_adjustments_dict, post_adjustments_dict) = self._collect_adjustments( requested_qtr_data, sid, sid_idx, sid_estimates, split_adjusted_asof_idx, pre_adjustments, post_adjustments, requested_split_adjusted_columns, ) for column_name in requested_split_adjusted_columns: for overwrite_ts in adjustments_for_sid[column_name]: # We need to cumulatively re-apply all adjustments up to the # split-adjusted-asof-date. We might not have any # pre-adjustments, so we should check for that. if overwrite_ts <= split_adjusted_asof_idx \ and pre_adjustments_dict: for split_ts in pre_adjustments_dict[column_name]: # The split has to have occurred during the span of # the overwrite. if split_ts < overwrite_ts: # Create new adjustments here so that we can # re-apply all applicable adjustments to ONLY # the dates being overwritten. adjustments_for_sid[ column_name ][overwrite_ts].extend([ Float64Multiply( 0, overwrite_ts - 1, sid_idx, sid_idx, adjustment.value ) for adjustment in pre_adjustments_dict[ column_name ][split_ts] ]) # After the split-adjusted-asof-date, we need to re-apply all # adjustments that occur after that date and within the # bounds of the overwrite. They need to be applied starting # from the first date and until an end date. The end date is # the date of the newest information we get about # `requested_quarter` that is >= `split_ts`, or if there is no # new knowledge before `overwrite_ts`, then it is the date # before `overwrite_ts`. else: # Overwrites happen at the first index of a new quarter, # so determine here which quarter that is. requested_quarter = requested_qtr_data[ SHIFTED_NORMALIZED_QTRS, sid ].iloc[overwrite_ts] for adjustment_value, date_index, timestamp in zip( *post_adjustments ): if split_adjusted_asof_idx < date_index < overwrite_ts: # Assume the entire overwrite contains stale data upper_bound = overwrite_ts - 1 end_idx = self.determine_end_idx_for_adjustment( timestamp, dates, upper_bound, requested_quarter, sid_estimates ) adjustments_for_sid[ column_name ][overwrite_ts].append( Float64Multiply( 0, end_idx, sid_idx, sid_idx, adjustment_value ) ) self.merge_split_adjustments_with_overwrites( pre_adjustments_dict, post_adjustments_dict, adjustments_for_sid, requested_split_adjusted_columns )
python
def collect_split_adjustments(self, adjustments_for_sid, requested_qtr_data, dates, sid, sid_idx, sid_estimates, split_adjusted_asof_idx, pre_adjustments, post_adjustments, requested_split_adjusted_columns): """ Collect split adjustments for future quarters. Re-apply adjustments that would be overwritten by overwrites. Merge split adjustments with overwrites into the given dictionary of splits for the given sid. Parameters ---------- adjustments_for_sid : dict[str -> dict[int -> list]] The dictionary of adjustments to which splits need to be added. Initially it contains only overwrites. requested_qtr_data : pd.DataFrame The requested quarter data for each calendar date per sid. dates : pd.DatetimeIndex The calendar dates for which estimates data is requested. sid : int The sid for which adjustments need to be collected. sid_idx : int The index of `sid` in the adjusted array. sid_estimates : pd.DataFrame The raw estimates data for the given sid. split_adjusted_asof_idx : int The index in `dates` as-of which the data is split adjusted. pre_adjustments : tuple(list(float), list(int), pd.DatetimeIndex) The adjustment values and indexes in `dates` for adjustments that happened before the split-asof-date. post_adjustments : tuple(list(float), list(int), pd.DatetimeIndex) The adjustment values, indexes in `dates`, and timestamps for adjustments that happened after the split-asof-date. requested_split_adjusted_columns : list of str List of requested split adjusted column names. """ (pre_adjustments_dict, post_adjustments_dict) = self._collect_adjustments( requested_qtr_data, sid, sid_idx, sid_estimates, split_adjusted_asof_idx, pre_adjustments, post_adjustments, requested_split_adjusted_columns, ) for column_name in requested_split_adjusted_columns: for overwrite_ts in adjustments_for_sid[column_name]: # We need to cumulatively re-apply all adjustments up to the # split-adjusted-asof-date. We might not have any # pre-adjustments, so we should check for that. if overwrite_ts <= split_adjusted_asof_idx \ and pre_adjustments_dict: for split_ts in pre_adjustments_dict[column_name]: # The split has to have occurred during the span of # the overwrite. if split_ts < overwrite_ts: # Create new adjustments here so that we can # re-apply all applicable adjustments to ONLY # the dates being overwritten. adjustments_for_sid[ column_name ][overwrite_ts].extend([ Float64Multiply( 0, overwrite_ts - 1, sid_idx, sid_idx, adjustment.value ) for adjustment in pre_adjustments_dict[ column_name ][split_ts] ]) # After the split-adjusted-asof-date, we need to re-apply all # adjustments that occur after that date and within the # bounds of the overwrite. They need to be applied starting # from the first date and until an end date. The end date is # the date of the newest information we get about # `requested_quarter` that is >= `split_ts`, or if there is no # new knowledge before `overwrite_ts`, then it is the date # before `overwrite_ts`. else: # Overwrites happen at the first index of a new quarter, # so determine here which quarter that is. requested_quarter = requested_qtr_data[ SHIFTED_NORMALIZED_QTRS, sid ].iloc[overwrite_ts] for adjustment_value, date_index, timestamp in zip( *post_adjustments ): if split_adjusted_asof_idx < date_index < overwrite_ts: # Assume the entire overwrite contains stale data upper_bound = overwrite_ts - 1 end_idx = self.determine_end_idx_for_adjustment( timestamp, dates, upper_bound, requested_quarter, sid_estimates ) adjustments_for_sid[ column_name ][overwrite_ts].append( Float64Multiply( 0, end_idx, sid_idx, sid_idx, adjustment_value ) ) self.merge_split_adjustments_with_overwrites( pre_adjustments_dict, post_adjustments_dict, adjustments_for_sid, requested_split_adjusted_columns )
[ "def", "collect_split_adjustments", "(", "self", ",", "adjustments_for_sid", ",", "requested_qtr_data", ",", "dates", ",", "sid", ",", "sid_idx", ",", "sid_estimates", ",", "split_adjusted_asof_idx", ",", "pre_adjustments", ",", "post_adjustments", ",", "requested_split_adjusted_columns", ")", ":", "(", "pre_adjustments_dict", ",", "post_adjustments_dict", ")", "=", "self", ".", "_collect_adjustments", "(", "requested_qtr_data", ",", "sid", ",", "sid_idx", ",", "sid_estimates", ",", "split_adjusted_asof_idx", ",", "pre_adjustments", ",", "post_adjustments", ",", "requested_split_adjusted_columns", ",", ")", "for", "column_name", "in", "requested_split_adjusted_columns", ":", "for", "overwrite_ts", "in", "adjustments_for_sid", "[", "column_name", "]", ":", "# We need to cumulatively re-apply all adjustments up to the", "# split-adjusted-asof-date. We might not have any", "# pre-adjustments, so we should check for that.", "if", "overwrite_ts", "<=", "split_adjusted_asof_idx", "and", "pre_adjustments_dict", ":", "for", "split_ts", "in", "pre_adjustments_dict", "[", "column_name", "]", ":", "# The split has to have occurred during the span of", "# the overwrite.", "if", "split_ts", "<", "overwrite_ts", ":", "# Create new adjustments here so that we can", "# re-apply all applicable adjustments to ONLY", "# the dates being overwritten.", "adjustments_for_sid", "[", "column_name", "]", "[", "overwrite_ts", "]", ".", "extend", "(", "[", "Float64Multiply", "(", "0", ",", "overwrite_ts", "-", "1", ",", "sid_idx", ",", "sid_idx", ",", "adjustment", ".", "value", ")", "for", "adjustment", "in", "pre_adjustments_dict", "[", "column_name", "]", "[", "split_ts", "]", "]", ")", "# After the split-adjusted-asof-date, we need to re-apply all", "# adjustments that occur after that date and within the", "# bounds of the overwrite. They need to be applied starting", "# from the first date and until an end date. The end date is", "# the date of the newest information we get about", "# `requested_quarter` that is >= `split_ts`, or if there is no", "# new knowledge before `overwrite_ts`, then it is the date", "# before `overwrite_ts`.", "else", ":", "# Overwrites happen at the first index of a new quarter,", "# so determine here which quarter that is.", "requested_quarter", "=", "requested_qtr_data", "[", "SHIFTED_NORMALIZED_QTRS", ",", "sid", "]", ".", "iloc", "[", "overwrite_ts", "]", "for", "adjustment_value", ",", "date_index", ",", "timestamp", "in", "zip", "(", "*", "post_adjustments", ")", ":", "if", "split_adjusted_asof_idx", "<", "date_index", "<", "overwrite_ts", ":", "# Assume the entire overwrite contains stale data", "upper_bound", "=", "overwrite_ts", "-", "1", "end_idx", "=", "self", ".", "determine_end_idx_for_adjustment", "(", "timestamp", ",", "dates", ",", "upper_bound", ",", "requested_quarter", ",", "sid_estimates", ")", "adjustments_for_sid", "[", "column_name", "]", "[", "overwrite_ts", "]", ".", "append", "(", "Float64Multiply", "(", "0", ",", "end_idx", ",", "sid_idx", ",", "sid_idx", ",", "adjustment_value", ")", ")", "self", ".", "merge_split_adjustments_with_overwrites", "(", "pre_adjustments_dict", ",", "post_adjustments_dict", ",", "adjustments_for_sid", ",", "requested_split_adjusted_columns", ")" ]
Collect split adjustments for future quarters. Re-apply adjustments that would be overwritten by overwrites. Merge split adjustments with overwrites into the given dictionary of splits for the given sid. Parameters ---------- adjustments_for_sid : dict[str -> dict[int -> list]] The dictionary of adjustments to which splits need to be added. Initially it contains only overwrites. requested_qtr_data : pd.DataFrame The requested quarter data for each calendar date per sid. dates : pd.DatetimeIndex The calendar dates for which estimates data is requested. sid : int The sid for which adjustments need to be collected. sid_idx : int The index of `sid` in the adjusted array. sid_estimates : pd.DataFrame The raw estimates data for the given sid. split_adjusted_asof_idx : int The index in `dates` as-of which the data is split adjusted. pre_adjustments : tuple(list(float), list(int), pd.DatetimeIndex) The adjustment values and indexes in `dates` for adjustments that happened before the split-asof-date. post_adjustments : tuple(list(float), list(int), pd.DatetimeIndex) The adjustment values, indexes in `dates`, and timestamps for adjustments that happened after the split-asof-date. requested_split_adjusted_columns : list of str List of requested split adjusted column names.
[ "Collect", "split", "adjustments", "for", "future", "quarters", ".", "Re", "-", "apply", "adjustments", "that", "would", "be", "overwritten", "by", "overwrites", ".", "Merge", "split", "adjustments", "with", "overwrites", "into", "the", "given", "dictionary", "of", "splits", "for", "the", "given", "sid", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/loaders/earnings_estimates.py#L1407-L1534
train
quantopian/zipline
zipline/pipeline/factors/basic.py
_ExponentialWeightedFactor.from_span
def from_span(cls, inputs, window_length, span, **kwargs): """ Convenience constructor for passing `decay_rate` in terms of `span`. Forwards `decay_rate` as `1 - (2.0 / (1 + span))`. This provides the behavior equivalent to passing `span` to pandas.ewma. Examples -------- .. code-block:: python # Equivalent to: # my_ewma = EWMA( # inputs=[EquityPricing.close], # window_length=30, # decay_rate=(1 - (2.0 / (1 + 15.0))), # ) my_ewma = EWMA.from_span( inputs=[EquityPricing.close], window_length=30, span=15, ) Notes ----- This classmethod is provided by both :class:`ExponentialWeightedMovingAverage` and :class:`ExponentialWeightedMovingStdDev`. """ if span <= 1: raise ValueError( "`span` must be a positive number. %s was passed." % span ) decay_rate = (1.0 - (2.0 / (1.0 + span))) assert 0.0 < decay_rate <= 1.0 return cls( inputs=inputs, window_length=window_length, decay_rate=decay_rate, **kwargs )
python
def from_span(cls, inputs, window_length, span, **kwargs): """ Convenience constructor for passing `decay_rate` in terms of `span`. Forwards `decay_rate` as `1 - (2.0 / (1 + span))`. This provides the behavior equivalent to passing `span` to pandas.ewma. Examples -------- .. code-block:: python # Equivalent to: # my_ewma = EWMA( # inputs=[EquityPricing.close], # window_length=30, # decay_rate=(1 - (2.0 / (1 + 15.0))), # ) my_ewma = EWMA.from_span( inputs=[EquityPricing.close], window_length=30, span=15, ) Notes ----- This classmethod is provided by both :class:`ExponentialWeightedMovingAverage` and :class:`ExponentialWeightedMovingStdDev`. """ if span <= 1: raise ValueError( "`span` must be a positive number. %s was passed." % span ) decay_rate = (1.0 - (2.0 / (1.0 + span))) assert 0.0 < decay_rate <= 1.0 return cls( inputs=inputs, window_length=window_length, decay_rate=decay_rate, **kwargs )
[ "def", "from_span", "(", "cls", ",", "inputs", ",", "window_length", ",", "span", ",", "*", "*", "kwargs", ")", ":", "if", "span", "<=", "1", ":", "raise", "ValueError", "(", "\"`span` must be a positive number. %s was passed.\"", "%", "span", ")", "decay_rate", "=", "(", "1.0", "-", "(", "2.0", "/", "(", "1.0", "+", "span", ")", ")", ")", "assert", "0.0", "<", "decay_rate", "<=", "1.0", "return", "cls", "(", "inputs", "=", "inputs", ",", "window_length", "=", "window_length", ",", "decay_rate", "=", "decay_rate", ",", "*", "*", "kwargs", ")" ]
Convenience constructor for passing `decay_rate` in terms of `span`. Forwards `decay_rate` as `1 - (2.0 / (1 + span))`. This provides the behavior equivalent to passing `span` to pandas.ewma. Examples -------- .. code-block:: python # Equivalent to: # my_ewma = EWMA( # inputs=[EquityPricing.close], # window_length=30, # decay_rate=(1 - (2.0 / (1 + 15.0))), # ) my_ewma = EWMA.from_span( inputs=[EquityPricing.close], window_length=30, span=15, ) Notes ----- This classmethod is provided by both :class:`ExponentialWeightedMovingAverage` and :class:`ExponentialWeightedMovingStdDev`.
[ "Convenience", "constructor", "for", "passing", "decay_rate", "in", "terms", "of", "span", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/factors/basic.py#L198-L240
train
quantopian/zipline
zipline/pipeline/factors/basic.py
_ExponentialWeightedFactor.from_halflife
def from_halflife(cls, inputs, window_length, halflife, **kwargs): """ Convenience constructor for passing ``decay_rate`` in terms of half life. Forwards ``decay_rate`` as ``exp(log(.5) / halflife)``. This provides the behavior equivalent to passing `halflife` to pandas.ewma. Examples -------- .. code-block:: python # Equivalent to: # my_ewma = EWMA( # inputs=[EquityPricing.close], # window_length=30, # decay_rate=np.exp(np.log(0.5) / 15), # ) my_ewma = EWMA.from_halflife( inputs=[EquityPricing.close], window_length=30, halflife=15, ) Notes ----- This classmethod is provided by both :class:`ExponentialWeightedMovingAverage` and :class:`ExponentialWeightedMovingStdDev`. """ if halflife <= 0: raise ValueError( "`span` must be a positive number. %s was passed." % halflife ) decay_rate = exp(log(.5) / halflife) assert 0.0 < decay_rate <= 1.0 return cls( inputs=inputs, window_length=window_length, decay_rate=decay_rate, **kwargs )
python
def from_halflife(cls, inputs, window_length, halflife, **kwargs): """ Convenience constructor for passing ``decay_rate`` in terms of half life. Forwards ``decay_rate`` as ``exp(log(.5) / halflife)``. This provides the behavior equivalent to passing `halflife` to pandas.ewma. Examples -------- .. code-block:: python # Equivalent to: # my_ewma = EWMA( # inputs=[EquityPricing.close], # window_length=30, # decay_rate=np.exp(np.log(0.5) / 15), # ) my_ewma = EWMA.from_halflife( inputs=[EquityPricing.close], window_length=30, halflife=15, ) Notes ----- This classmethod is provided by both :class:`ExponentialWeightedMovingAverage` and :class:`ExponentialWeightedMovingStdDev`. """ if halflife <= 0: raise ValueError( "`span` must be a positive number. %s was passed." % halflife ) decay_rate = exp(log(.5) / halflife) assert 0.0 < decay_rate <= 1.0 return cls( inputs=inputs, window_length=window_length, decay_rate=decay_rate, **kwargs )
[ "def", "from_halflife", "(", "cls", ",", "inputs", ",", "window_length", ",", "halflife", ",", "*", "*", "kwargs", ")", ":", "if", "halflife", "<=", "0", ":", "raise", "ValueError", "(", "\"`span` must be a positive number. %s was passed.\"", "%", "halflife", ")", "decay_rate", "=", "exp", "(", "log", "(", ".5", ")", "/", "halflife", ")", "assert", "0.0", "<", "decay_rate", "<=", "1.0", "return", "cls", "(", "inputs", "=", "inputs", ",", "window_length", "=", "window_length", ",", "decay_rate", "=", "decay_rate", ",", "*", "*", "kwargs", ")" ]
Convenience constructor for passing ``decay_rate`` in terms of half life. Forwards ``decay_rate`` as ``exp(log(.5) / halflife)``. This provides the behavior equivalent to passing `halflife` to pandas.ewma. Examples -------- .. code-block:: python # Equivalent to: # my_ewma = EWMA( # inputs=[EquityPricing.close], # window_length=30, # decay_rate=np.exp(np.log(0.5) / 15), # ) my_ewma = EWMA.from_halflife( inputs=[EquityPricing.close], window_length=30, halflife=15, ) Notes ----- This classmethod is provided by both :class:`ExponentialWeightedMovingAverage` and :class:`ExponentialWeightedMovingStdDev`.
[ "Convenience", "constructor", "for", "passing", "decay_rate", "in", "terms", "of", "half", "life", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/factors/basic.py#L244-L286
train
quantopian/zipline
zipline/pipeline/factors/basic.py
_ExponentialWeightedFactor.from_center_of_mass
def from_center_of_mass(cls, inputs, window_length, center_of_mass, **kwargs): """ Convenience constructor for passing `decay_rate` in terms of center of mass. Forwards `decay_rate` as `1 - (1 / 1 + center_of_mass)`. This provides behavior equivalent to passing `center_of_mass` to pandas.ewma. Examples -------- .. code-block:: python # Equivalent to: # my_ewma = EWMA( # inputs=[EquityPricing.close], # window_length=30, # decay_rate=(1 - (1 / 15.0)), # ) my_ewma = EWMA.from_center_of_mass( inputs=[EquityPricing.close], window_length=30, center_of_mass=15, ) Notes ----- This classmethod is provided by both :class:`ExponentialWeightedMovingAverage` and :class:`ExponentialWeightedMovingStdDev`. """ return cls( inputs=inputs, window_length=window_length, decay_rate=(1.0 - (1.0 / (1.0 + center_of_mass))), **kwargs )
python
def from_center_of_mass(cls, inputs, window_length, center_of_mass, **kwargs): """ Convenience constructor for passing `decay_rate` in terms of center of mass. Forwards `decay_rate` as `1 - (1 / 1 + center_of_mass)`. This provides behavior equivalent to passing `center_of_mass` to pandas.ewma. Examples -------- .. code-block:: python # Equivalent to: # my_ewma = EWMA( # inputs=[EquityPricing.close], # window_length=30, # decay_rate=(1 - (1 / 15.0)), # ) my_ewma = EWMA.from_center_of_mass( inputs=[EquityPricing.close], window_length=30, center_of_mass=15, ) Notes ----- This classmethod is provided by both :class:`ExponentialWeightedMovingAverage` and :class:`ExponentialWeightedMovingStdDev`. """ return cls( inputs=inputs, window_length=window_length, decay_rate=(1.0 - (1.0 / (1.0 + center_of_mass))), **kwargs )
[ "def", "from_center_of_mass", "(", "cls", ",", "inputs", ",", "window_length", ",", "center_of_mass", ",", "*", "*", "kwargs", ")", ":", "return", "cls", "(", "inputs", "=", "inputs", ",", "window_length", "=", "window_length", ",", "decay_rate", "=", "(", "1.0", "-", "(", "1.0", "/", "(", "1.0", "+", "center_of_mass", ")", ")", ")", ",", "*", "*", "kwargs", ")" ]
Convenience constructor for passing `decay_rate` in terms of center of mass. Forwards `decay_rate` as `1 - (1 / 1 + center_of_mass)`. This provides behavior equivalent to passing `center_of_mass` to pandas.ewma. Examples -------- .. code-block:: python # Equivalent to: # my_ewma = EWMA( # inputs=[EquityPricing.close], # window_length=30, # decay_rate=(1 - (1 / 15.0)), # ) my_ewma = EWMA.from_center_of_mass( inputs=[EquityPricing.close], window_length=30, center_of_mass=15, ) Notes ----- This classmethod is provided by both :class:`ExponentialWeightedMovingAverage` and :class:`ExponentialWeightedMovingStdDev`.
[ "Convenience", "constructor", "for", "passing", "decay_rate", "in", "terms", "of", "center", "of", "mass", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/factors/basic.py#L289-L328
train
quantopian/zipline
zipline/utils/math_utils.py
tolerant_equals
def tolerant_equals(a, b, atol=10e-7, rtol=10e-7, equal_nan=False): """Check if a and b are equal with some tolerance. Parameters ---------- a, b : float The floats to check for equality. atol : float, optional The absolute tolerance. rtol : float, optional The relative tolerance. equal_nan : bool, optional Should NaN compare equal? See Also -------- numpy.isclose Notes ----- This function is just a scalar version of numpy.isclose for performance. See the docstring of ``isclose`` for more information about ``atol`` and ``rtol``. """ if equal_nan and isnan(a) and isnan(b): return True return math.fabs(a - b) <= (atol + rtol * math.fabs(b))
python
def tolerant_equals(a, b, atol=10e-7, rtol=10e-7, equal_nan=False): """Check if a and b are equal with some tolerance. Parameters ---------- a, b : float The floats to check for equality. atol : float, optional The absolute tolerance. rtol : float, optional The relative tolerance. equal_nan : bool, optional Should NaN compare equal? See Also -------- numpy.isclose Notes ----- This function is just a scalar version of numpy.isclose for performance. See the docstring of ``isclose`` for more information about ``atol`` and ``rtol``. """ if equal_nan and isnan(a) and isnan(b): return True return math.fabs(a - b) <= (atol + rtol * math.fabs(b))
[ "def", "tolerant_equals", "(", "a", ",", "b", ",", "atol", "=", "10e-7", ",", "rtol", "=", "10e-7", ",", "equal_nan", "=", "False", ")", ":", "if", "equal_nan", "and", "isnan", "(", "a", ")", "and", "isnan", "(", "b", ")", ":", "return", "True", "return", "math", ".", "fabs", "(", "a", "-", "b", ")", "<=", "(", "atol", "+", "rtol", "*", "math", ".", "fabs", "(", "b", ")", ")" ]
Check if a and b are equal with some tolerance. Parameters ---------- a, b : float The floats to check for equality. atol : float, optional The absolute tolerance. rtol : float, optional The relative tolerance. equal_nan : bool, optional Should NaN compare equal? See Also -------- numpy.isclose Notes ----- This function is just a scalar version of numpy.isclose for performance. See the docstring of ``isclose`` for more information about ``atol`` and ``rtol``.
[ "Check", "if", "a", "and", "b", "are", "equal", "with", "some", "tolerance", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/math_utils.py#L21-L47
train
quantopian/zipline
zipline/utils/math_utils.py
round_if_near_integer
def round_if_near_integer(a, epsilon=1e-4): """ Round a to the nearest integer if that integer is within an epsilon of a. """ if abs(a - round(a)) <= epsilon: return round(a) else: return a
python
def round_if_near_integer(a, epsilon=1e-4): """ Round a to the nearest integer if that integer is within an epsilon of a. """ if abs(a - round(a)) <= epsilon: return round(a) else: return a
[ "def", "round_if_near_integer", "(", "a", ",", "epsilon", "=", "1e-4", ")", ":", "if", "abs", "(", "a", "-", "round", "(", "a", ")", ")", "<=", "epsilon", ":", "return", "round", "(", "a", ")", "else", ":", "return", "a" ]
Round a to the nearest integer if that integer is within an epsilon of a.
[ "Round", "a", "to", "the", "nearest", "integer", "if", "that", "integer", "is", "within", "an", "epsilon", "of", "a", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/math_utils.py#L72-L80
train
quantopian/zipline
zipline/pipeline/factors/factor.py
coerce_numbers_to_my_dtype
def coerce_numbers_to_my_dtype(f): """ A decorator for methods whose signature is f(self, other) that coerces ``other`` to ``self.dtype``. This is used to make comparison operations between numbers and `Factor` instances work independently of whether the user supplies a float or integer literal. For example, if I write:: my_filter = my_factor > 3 my_factor probably has dtype float64, but 3 is an int, so we want to coerce to float64 before doing the comparison. """ @wraps(f) def method(self, other): if isinstance(other, Number): other = coerce_to_dtype(self.dtype, other) return f(self, other) return method
python
def coerce_numbers_to_my_dtype(f): """ A decorator for methods whose signature is f(self, other) that coerces ``other`` to ``self.dtype``. This is used to make comparison operations between numbers and `Factor` instances work independently of whether the user supplies a float or integer literal. For example, if I write:: my_filter = my_factor > 3 my_factor probably has dtype float64, but 3 is an int, so we want to coerce to float64 before doing the comparison. """ @wraps(f) def method(self, other): if isinstance(other, Number): other = coerce_to_dtype(self.dtype, other) return f(self, other) return method
[ "def", "coerce_numbers_to_my_dtype", "(", "f", ")", ":", "@", "wraps", "(", "f", ")", "def", "method", "(", "self", ",", "other", ")", ":", "if", "isinstance", "(", "other", ",", "Number", ")", ":", "other", "=", "coerce_to_dtype", "(", "self", ".", "dtype", ",", "other", ")", "return", "f", "(", "self", ",", "other", ")", "return", "method" ]
A decorator for methods whose signature is f(self, other) that coerces ``other`` to ``self.dtype``. This is used to make comparison operations between numbers and `Factor` instances work independently of whether the user supplies a float or integer literal. For example, if I write:: my_filter = my_factor > 3 my_factor probably has dtype float64, but 3 is an int, so we want to coerce to float64 before doing the comparison.
[ "A", "decorator", "for", "methods", "whose", "signature", "is", "f", "(", "self", "other", ")", "that", "coerces", "other", "to", "self", ".", "dtype", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/factors/factor.py#L70-L91
train
quantopian/zipline
zipline/pipeline/factors/factor.py
binop_return_dtype
def binop_return_dtype(op, left, right): """ Compute the expected return dtype for the given binary operator. Parameters ---------- op : str Operator symbol, (e.g. '+', '-', ...). left : numpy.dtype Dtype of left hand side. right : numpy.dtype Dtype of right hand side. Returns ------- outdtype : numpy.dtype The dtype of the result of `left <op> right`. """ if is_comparison(op): if left != right: raise TypeError( "Don't know how to compute {left} {op} {right}.\n" "Comparisons are only supported between Factors of equal " "dtypes.".format(left=left, op=op, right=right) ) return bool_dtype elif left != float64_dtype or right != float64_dtype: raise TypeError( "Don't know how to compute {left} {op} {right}.\n" "Arithmetic operators are only supported between Factors of " "dtype 'float64'.".format( left=left.name, op=op, right=right.name, ) ) return float64_dtype
python
def binop_return_dtype(op, left, right): """ Compute the expected return dtype for the given binary operator. Parameters ---------- op : str Operator symbol, (e.g. '+', '-', ...). left : numpy.dtype Dtype of left hand side. right : numpy.dtype Dtype of right hand side. Returns ------- outdtype : numpy.dtype The dtype of the result of `left <op> right`. """ if is_comparison(op): if left != right: raise TypeError( "Don't know how to compute {left} {op} {right}.\n" "Comparisons are only supported between Factors of equal " "dtypes.".format(left=left, op=op, right=right) ) return bool_dtype elif left != float64_dtype or right != float64_dtype: raise TypeError( "Don't know how to compute {left} {op} {right}.\n" "Arithmetic operators are only supported between Factors of " "dtype 'float64'.".format( left=left.name, op=op, right=right.name, ) ) return float64_dtype
[ "def", "binop_return_dtype", "(", "op", ",", "left", ",", "right", ")", ":", "if", "is_comparison", "(", "op", ")", ":", "if", "left", "!=", "right", ":", "raise", "TypeError", "(", "\"Don't know how to compute {left} {op} {right}.\\n\"", "\"Comparisons are only supported between Factors of equal \"", "\"dtypes.\"", ".", "format", "(", "left", "=", "left", ",", "op", "=", "op", ",", "right", "=", "right", ")", ")", "return", "bool_dtype", "elif", "left", "!=", "float64_dtype", "or", "right", "!=", "float64_dtype", ":", "raise", "TypeError", "(", "\"Don't know how to compute {left} {op} {right}.\\n\"", "\"Arithmetic operators are only supported between Factors of \"", "\"dtype 'float64'.\"", ".", "format", "(", "left", "=", "left", ".", "name", ",", "op", "=", "op", ",", "right", "=", "right", ".", "name", ",", ")", ")", "return", "float64_dtype" ]
Compute the expected return dtype for the given binary operator. Parameters ---------- op : str Operator symbol, (e.g. '+', '-', ...). left : numpy.dtype Dtype of left hand side. right : numpy.dtype Dtype of right hand side. Returns ------- outdtype : numpy.dtype The dtype of the result of `left <op> right`.
[ "Compute", "the", "expected", "return", "dtype", "for", "the", "given", "binary", "operator", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/factors/factor.py#L101-L138
train
quantopian/zipline
zipline/pipeline/factors/factor.py
binary_operator
def binary_operator(op): """ Factory function for making binary operator methods on a Factor subclass. Returns a function, "binary_operator" suitable for implementing functions like __add__. """ # When combining a Factor with a NumericalExpression, we use this # attrgetter instance to defer to the commuted implementation of the # NumericalExpression operator. commuted_method_getter = attrgetter(method_name_for_op(op, commute=True)) @with_doc("Binary Operator: '%s'" % op) @with_name(method_name_for_op(op)) @coerce_numbers_to_my_dtype def binary_operator(self, other): # This can't be hoisted up a scope because the types returned by # binop_return_type aren't defined when the top-level function is # invoked in the class body of Factor. return_type = binop_return_type(op) if isinstance(self, NumExprFactor): self_expr, other_expr, new_inputs = self.build_binary_op( op, other, ) return return_type( "({left}) {op} ({right})".format( left=self_expr, op=op, right=other_expr, ), new_inputs, dtype=binop_return_dtype(op, self.dtype, other.dtype), ) elif isinstance(other, NumExprFactor): # NumericalExpression overrides ops to correctly handle merging of # inputs. Look up and call the appropriate reflected operator with # ourself as the input. return commuted_method_getter(other)(self) elif isinstance(other, Term): if self is other: return return_type( "x_0 {op} x_0".format(op=op), (self,), dtype=binop_return_dtype(op, self.dtype, other.dtype), ) return return_type( "x_0 {op} x_1".format(op=op), (self, other), dtype=binop_return_dtype(op, self.dtype, other.dtype), ) elif isinstance(other, Number): return return_type( "x_0 {op} ({constant})".format(op=op, constant=other), binds=(self,), # .dtype access is safe here because coerce_numbers_to_my_dtype # will convert any input numbers to numpy equivalents. dtype=binop_return_dtype(op, self.dtype, other.dtype) ) raise BadBinaryOperator(op, self, other) return binary_operator
python
def binary_operator(op): """ Factory function for making binary operator methods on a Factor subclass. Returns a function, "binary_operator" suitable for implementing functions like __add__. """ # When combining a Factor with a NumericalExpression, we use this # attrgetter instance to defer to the commuted implementation of the # NumericalExpression operator. commuted_method_getter = attrgetter(method_name_for_op(op, commute=True)) @with_doc("Binary Operator: '%s'" % op) @with_name(method_name_for_op(op)) @coerce_numbers_to_my_dtype def binary_operator(self, other): # This can't be hoisted up a scope because the types returned by # binop_return_type aren't defined when the top-level function is # invoked in the class body of Factor. return_type = binop_return_type(op) if isinstance(self, NumExprFactor): self_expr, other_expr, new_inputs = self.build_binary_op( op, other, ) return return_type( "({left}) {op} ({right})".format( left=self_expr, op=op, right=other_expr, ), new_inputs, dtype=binop_return_dtype(op, self.dtype, other.dtype), ) elif isinstance(other, NumExprFactor): # NumericalExpression overrides ops to correctly handle merging of # inputs. Look up and call the appropriate reflected operator with # ourself as the input. return commuted_method_getter(other)(self) elif isinstance(other, Term): if self is other: return return_type( "x_0 {op} x_0".format(op=op), (self,), dtype=binop_return_dtype(op, self.dtype, other.dtype), ) return return_type( "x_0 {op} x_1".format(op=op), (self, other), dtype=binop_return_dtype(op, self.dtype, other.dtype), ) elif isinstance(other, Number): return return_type( "x_0 {op} ({constant})".format(op=op, constant=other), binds=(self,), # .dtype access is safe here because coerce_numbers_to_my_dtype # will convert any input numbers to numpy equivalents. dtype=binop_return_dtype(op, self.dtype, other.dtype) ) raise BadBinaryOperator(op, self, other) return binary_operator
[ "def", "binary_operator", "(", "op", ")", ":", "# When combining a Factor with a NumericalExpression, we use this", "# attrgetter instance to defer to the commuted implementation of the", "# NumericalExpression operator.", "commuted_method_getter", "=", "attrgetter", "(", "method_name_for_op", "(", "op", ",", "commute", "=", "True", ")", ")", "@", "with_doc", "(", "\"Binary Operator: '%s'\"", "%", "op", ")", "@", "with_name", "(", "method_name_for_op", "(", "op", ")", ")", "@", "coerce_numbers_to_my_dtype", "def", "binary_operator", "(", "self", ",", "other", ")", ":", "# This can't be hoisted up a scope because the types returned by", "# binop_return_type aren't defined when the top-level function is", "# invoked in the class body of Factor.", "return_type", "=", "binop_return_type", "(", "op", ")", "if", "isinstance", "(", "self", ",", "NumExprFactor", ")", ":", "self_expr", ",", "other_expr", ",", "new_inputs", "=", "self", ".", "build_binary_op", "(", "op", ",", "other", ",", ")", "return", "return_type", "(", "\"({left}) {op} ({right})\"", ".", "format", "(", "left", "=", "self_expr", ",", "op", "=", "op", ",", "right", "=", "other_expr", ",", ")", ",", "new_inputs", ",", "dtype", "=", "binop_return_dtype", "(", "op", ",", "self", ".", "dtype", ",", "other", ".", "dtype", ")", ",", ")", "elif", "isinstance", "(", "other", ",", "NumExprFactor", ")", ":", "# NumericalExpression overrides ops to correctly handle merging of", "# inputs. Look up and call the appropriate reflected operator with", "# ourself as the input.", "return", "commuted_method_getter", "(", "other", ")", "(", "self", ")", "elif", "isinstance", "(", "other", ",", "Term", ")", ":", "if", "self", "is", "other", ":", "return", "return_type", "(", "\"x_0 {op} x_0\"", ".", "format", "(", "op", "=", "op", ")", ",", "(", "self", ",", ")", ",", "dtype", "=", "binop_return_dtype", "(", "op", ",", "self", ".", "dtype", ",", "other", ".", "dtype", ")", ",", ")", "return", "return_type", "(", "\"x_0 {op} x_1\"", ".", "format", "(", "op", "=", "op", ")", ",", "(", "self", ",", "other", ")", ",", "dtype", "=", "binop_return_dtype", "(", "op", ",", "self", ".", "dtype", ",", "other", ".", "dtype", ")", ",", ")", "elif", "isinstance", "(", "other", ",", "Number", ")", ":", "return", "return_type", "(", "\"x_0 {op} ({constant})\"", ".", "format", "(", "op", "=", "op", ",", "constant", "=", "other", ")", ",", "binds", "=", "(", "self", ",", ")", ",", "# .dtype access is safe here because coerce_numbers_to_my_dtype", "# will convert any input numbers to numpy equivalents.", "dtype", "=", "binop_return_dtype", "(", "op", ",", "self", ".", "dtype", ",", "other", ".", "dtype", ")", ")", "raise", "BadBinaryOperator", "(", "op", ",", "self", ",", "other", ")", "return", "binary_operator" ]
Factory function for making binary operator methods on a Factor subclass. Returns a function, "binary_operator" suitable for implementing functions like __add__.
[ "Factory", "function", "for", "making", "binary", "operator", "methods", "on", "a", "Factor", "subclass", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/factors/factor.py#L141-L201
train
quantopian/zipline
zipline/pipeline/factors/factor.py
reflected_binary_operator
def reflected_binary_operator(op): """ Factory function for making binary operator methods on a Factor. Returns a function, "reflected_binary_operator" suitable for implementing functions like __radd__. """ assert not is_comparison(op) @with_name(method_name_for_op(op, commute=True)) @coerce_numbers_to_my_dtype def reflected_binary_operator(self, other): if isinstance(self, NumericalExpression): self_expr, other_expr, new_inputs = self.build_binary_op( op, other ) return NumExprFactor( "({left}) {op} ({right})".format( left=other_expr, right=self_expr, op=op, ), new_inputs, dtype=binop_return_dtype(op, other.dtype, self.dtype) ) # Only have to handle the numeric case because in all other valid cases # the corresponding left-binding method will be called. elif isinstance(other, Number): return NumExprFactor( "{constant} {op} x_0".format(op=op, constant=other), binds=(self,), dtype=binop_return_dtype(op, other.dtype, self.dtype), ) raise BadBinaryOperator(op, other, self) return reflected_binary_operator
python
def reflected_binary_operator(op): """ Factory function for making binary operator methods on a Factor. Returns a function, "reflected_binary_operator" suitable for implementing functions like __radd__. """ assert not is_comparison(op) @with_name(method_name_for_op(op, commute=True)) @coerce_numbers_to_my_dtype def reflected_binary_operator(self, other): if isinstance(self, NumericalExpression): self_expr, other_expr, new_inputs = self.build_binary_op( op, other ) return NumExprFactor( "({left}) {op} ({right})".format( left=other_expr, right=self_expr, op=op, ), new_inputs, dtype=binop_return_dtype(op, other.dtype, self.dtype) ) # Only have to handle the numeric case because in all other valid cases # the corresponding left-binding method will be called. elif isinstance(other, Number): return NumExprFactor( "{constant} {op} x_0".format(op=op, constant=other), binds=(self,), dtype=binop_return_dtype(op, other.dtype, self.dtype), ) raise BadBinaryOperator(op, other, self) return reflected_binary_operator
[ "def", "reflected_binary_operator", "(", "op", ")", ":", "assert", "not", "is_comparison", "(", "op", ")", "@", "with_name", "(", "method_name_for_op", "(", "op", ",", "commute", "=", "True", ")", ")", "@", "coerce_numbers_to_my_dtype", "def", "reflected_binary_operator", "(", "self", ",", "other", ")", ":", "if", "isinstance", "(", "self", ",", "NumericalExpression", ")", ":", "self_expr", ",", "other_expr", ",", "new_inputs", "=", "self", ".", "build_binary_op", "(", "op", ",", "other", ")", "return", "NumExprFactor", "(", "\"({left}) {op} ({right})\"", ".", "format", "(", "left", "=", "other_expr", ",", "right", "=", "self_expr", ",", "op", "=", "op", ",", ")", ",", "new_inputs", ",", "dtype", "=", "binop_return_dtype", "(", "op", ",", "other", ".", "dtype", ",", "self", ".", "dtype", ")", ")", "# Only have to handle the numeric case because in all other valid cases", "# the corresponding left-binding method will be called.", "elif", "isinstance", "(", "other", ",", "Number", ")", ":", "return", "NumExprFactor", "(", "\"{constant} {op} x_0\"", ".", "format", "(", "op", "=", "op", ",", "constant", "=", "other", ")", ",", "binds", "=", "(", "self", ",", ")", ",", "dtype", "=", "binop_return_dtype", "(", "op", ",", "other", ".", "dtype", ",", "self", ".", "dtype", ")", ",", ")", "raise", "BadBinaryOperator", "(", "op", ",", "other", ",", "self", ")", "return", "reflected_binary_operator" ]
Factory function for making binary operator methods on a Factor. Returns a function, "reflected_binary_operator" suitable for implementing functions like __radd__.
[ "Factory", "function", "for", "making", "binary", "operator", "methods", "on", "a", "Factor", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/factors/factor.py#L204-L240
train
quantopian/zipline
zipline/pipeline/factors/factor.py
unary_operator
def unary_operator(op): """ Factory function for making unary operator methods for Factors. """ # Only negate is currently supported. valid_ops = {'-'} if op not in valid_ops: raise ValueError("Invalid unary operator %s." % op) @with_doc("Unary Operator: '%s'" % op) @with_name(unary_op_name(op)) def unary_operator(self): if self.dtype != float64_dtype: raise TypeError( "Can't apply unary operator {op!r} to instance of " "{typename!r} with dtype {dtypename!r}.\n" "{op!r} is only supported for Factors of dtype " "'float64'.".format( op=op, typename=type(self).__name__, dtypename=self.dtype.name, ) ) # This can't be hoisted up a scope because the types returned by # unary_op_return_type aren't defined when the top-level function is # invoked. if isinstance(self, NumericalExpression): return NumExprFactor( "{op}({expr})".format(op=op, expr=self._expr), self.inputs, dtype=float64_dtype, ) else: return NumExprFactor( "{op}x_0".format(op=op), (self,), dtype=float64_dtype, ) return unary_operator
python
def unary_operator(op): """ Factory function for making unary operator methods for Factors. """ # Only negate is currently supported. valid_ops = {'-'} if op not in valid_ops: raise ValueError("Invalid unary operator %s." % op) @with_doc("Unary Operator: '%s'" % op) @with_name(unary_op_name(op)) def unary_operator(self): if self.dtype != float64_dtype: raise TypeError( "Can't apply unary operator {op!r} to instance of " "{typename!r} with dtype {dtypename!r}.\n" "{op!r} is only supported for Factors of dtype " "'float64'.".format( op=op, typename=type(self).__name__, dtypename=self.dtype.name, ) ) # This can't be hoisted up a scope because the types returned by # unary_op_return_type aren't defined when the top-level function is # invoked. if isinstance(self, NumericalExpression): return NumExprFactor( "{op}({expr})".format(op=op, expr=self._expr), self.inputs, dtype=float64_dtype, ) else: return NumExprFactor( "{op}x_0".format(op=op), (self,), dtype=float64_dtype, ) return unary_operator
[ "def", "unary_operator", "(", "op", ")", ":", "# Only negate is currently supported.", "valid_ops", "=", "{", "'-'", "}", "if", "op", "not", "in", "valid_ops", ":", "raise", "ValueError", "(", "\"Invalid unary operator %s.\"", "%", "op", ")", "@", "with_doc", "(", "\"Unary Operator: '%s'\"", "%", "op", ")", "@", "with_name", "(", "unary_op_name", "(", "op", ")", ")", "def", "unary_operator", "(", "self", ")", ":", "if", "self", ".", "dtype", "!=", "float64_dtype", ":", "raise", "TypeError", "(", "\"Can't apply unary operator {op!r} to instance of \"", "\"{typename!r} with dtype {dtypename!r}.\\n\"", "\"{op!r} is only supported for Factors of dtype \"", "\"'float64'.\"", ".", "format", "(", "op", "=", "op", ",", "typename", "=", "type", "(", "self", ")", ".", "__name__", ",", "dtypename", "=", "self", ".", "dtype", ".", "name", ",", ")", ")", "# This can't be hoisted up a scope because the types returned by", "# unary_op_return_type aren't defined when the top-level function is", "# invoked.", "if", "isinstance", "(", "self", ",", "NumericalExpression", ")", ":", "return", "NumExprFactor", "(", "\"{op}({expr})\"", ".", "format", "(", "op", "=", "op", ",", "expr", "=", "self", ".", "_expr", ")", ",", "self", ".", "inputs", ",", "dtype", "=", "float64_dtype", ",", ")", "else", ":", "return", "NumExprFactor", "(", "\"{op}x_0\"", ".", "format", "(", "op", "=", "op", ")", ",", "(", "self", ",", ")", ",", "dtype", "=", "float64_dtype", ",", ")", "return", "unary_operator" ]
Factory function for making unary operator methods for Factors.
[ "Factory", "function", "for", "making", "unary", "operator", "methods", "for", "Factors", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/factors/factor.py#L243-L282
train
quantopian/zipline
zipline/pipeline/factors/factor.py
function_application
def function_application(func): """ Factory function for producing function application methods for Factor subclasses. """ if func not in NUMEXPR_MATH_FUNCS: raise ValueError("Unsupported mathematical function '%s'" % func) @with_doc(func) @with_name(func) def mathfunc(self): if isinstance(self, NumericalExpression): return NumExprFactor( "{func}({expr})".format(func=func, expr=self._expr), self.inputs, dtype=float64_dtype, ) else: return NumExprFactor( "{func}(x_0)".format(func=func), (self,), dtype=float64_dtype, ) return mathfunc
python
def function_application(func): """ Factory function for producing function application methods for Factor subclasses. """ if func not in NUMEXPR_MATH_FUNCS: raise ValueError("Unsupported mathematical function '%s'" % func) @with_doc(func) @with_name(func) def mathfunc(self): if isinstance(self, NumericalExpression): return NumExprFactor( "{func}({expr})".format(func=func, expr=self._expr), self.inputs, dtype=float64_dtype, ) else: return NumExprFactor( "{func}(x_0)".format(func=func), (self,), dtype=float64_dtype, ) return mathfunc
[ "def", "function_application", "(", "func", ")", ":", "if", "func", "not", "in", "NUMEXPR_MATH_FUNCS", ":", "raise", "ValueError", "(", "\"Unsupported mathematical function '%s'\"", "%", "func", ")", "@", "with_doc", "(", "func", ")", "@", "with_name", "(", "func", ")", "def", "mathfunc", "(", "self", ")", ":", "if", "isinstance", "(", "self", ",", "NumericalExpression", ")", ":", "return", "NumExprFactor", "(", "\"{func}({expr})\"", ".", "format", "(", "func", "=", "func", ",", "expr", "=", "self", ".", "_expr", ")", ",", "self", ".", "inputs", ",", "dtype", "=", "float64_dtype", ",", ")", "else", ":", "return", "NumExprFactor", "(", "\"{func}(x_0)\"", ".", "format", "(", "func", "=", "func", ")", ",", "(", "self", ",", ")", ",", "dtype", "=", "float64_dtype", ",", ")", "return", "mathfunc" ]
Factory function for producing function application methods for Factor subclasses.
[ "Factory", "function", "for", "producing", "function", "application", "methods", "for", "Factor", "subclasses", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/factors/factor.py#L285-L308
train
quantopian/zipline
zipline/pipeline/factors/factor.py
winsorize
def winsorize(row, min_percentile, max_percentile): """ This implementation is based on scipy.stats.mstats.winsorize """ a = row.copy() nan_count = isnan(row).sum() nonnan_count = a.size - nan_count # NOTE: argsort() sorts nans to the end of the array. idx = a.argsort() # Set values at indices below the min percentile to the value of the entry # at the cutoff. if min_percentile > 0: lower_cutoff = int(min_percentile * nonnan_count) a[idx[:lower_cutoff]] = a[idx[lower_cutoff]] # Set values at indices above the max percentile to the value of the entry # at the cutoff. if max_percentile < 1: upper_cutoff = int(ceil(nonnan_count * max_percentile)) # if max_percentile is close to 1, then upper_cutoff might not # remove any values. if upper_cutoff < nonnan_count: start_of_nans = (-nan_count) if nan_count else None a[idx[upper_cutoff:start_of_nans]] = a[idx[upper_cutoff - 1]] return a
python
def winsorize(row, min_percentile, max_percentile): """ This implementation is based on scipy.stats.mstats.winsorize """ a = row.copy() nan_count = isnan(row).sum() nonnan_count = a.size - nan_count # NOTE: argsort() sorts nans to the end of the array. idx = a.argsort() # Set values at indices below the min percentile to the value of the entry # at the cutoff. if min_percentile > 0: lower_cutoff = int(min_percentile * nonnan_count) a[idx[:lower_cutoff]] = a[idx[lower_cutoff]] # Set values at indices above the max percentile to the value of the entry # at the cutoff. if max_percentile < 1: upper_cutoff = int(ceil(nonnan_count * max_percentile)) # if max_percentile is close to 1, then upper_cutoff might not # remove any values. if upper_cutoff < nonnan_count: start_of_nans = (-nan_count) if nan_count else None a[idx[upper_cutoff:start_of_nans]] = a[idx[upper_cutoff - 1]] return a
[ "def", "winsorize", "(", "row", ",", "min_percentile", ",", "max_percentile", ")", ":", "a", "=", "row", ".", "copy", "(", ")", "nan_count", "=", "isnan", "(", "row", ")", ".", "sum", "(", ")", "nonnan_count", "=", "a", ".", "size", "-", "nan_count", "# NOTE: argsort() sorts nans to the end of the array.", "idx", "=", "a", ".", "argsort", "(", ")", "# Set values at indices below the min percentile to the value of the entry", "# at the cutoff.", "if", "min_percentile", ">", "0", ":", "lower_cutoff", "=", "int", "(", "min_percentile", "*", "nonnan_count", ")", "a", "[", "idx", "[", ":", "lower_cutoff", "]", "]", "=", "a", "[", "idx", "[", "lower_cutoff", "]", "]", "# Set values at indices above the max percentile to the value of the entry", "# at the cutoff.", "if", "max_percentile", "<", "1", ":", "upper_cutoff", "=", "int", "(", "ceil", "(", "nonnan_count", "*", "max_percentile", ")", ")", "# if max_percentile is close to 1, then upper_cutoff might not", "# remove any values.", "if", "upper_cutoff", "<", "nonnan_count", ":", "start_of_nans", "=", "(", "-", "nan_count", ")", "if", "nan_count", "else", "None", "a", "[", "idx", "[", "upper_cutoff", ":", "start_of_nans", "]", "]", "=", "a", "[", "idx", "[", "upper_cutoff", "-", "1", "]", "]", "return", "a" ]
This implementation is based on scipy.stats.mstats.winsorize
[ "This", "implementation", "is", "based", "on", "scipy", ".", "stats", ".", "mstats", ".", "winsorize" ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/factors/factor.py#L1671-L1698
train
quantopian/zipline
zipline/pipeline/factors/factor.py
Factor.demean
def demean(self, mask=NotSpecified, groupby=NotSpecified): """ Construct a Factor that computes ``self`` and subtracts the mean from row of the result. If ``mask`` is supplied, ignore values where ``mask`` returns False when computing row means, and output NaN anywhere the mask is False. If ``groupby`` is supplied, compute by partitioning each row based on the values produced by ``groupby``, de-meaning the partitioned arrays, and stitching the sub-results back together. Parameters ---------- mask : zipline.pipeline.Filter, optional A Filter defining values to ignore when computing means. groupby : zipline.pipeline.Classifier, optional A classifier defining partitions over which to compute means. Examples -------- Let ``f`` be a Factor which would produce the following output:: AAPL MSFT MCD BK 2017-03-13 1.0 2.0 3.0 4.0 2017-03-14 1.5 2.5 3.5 1.0 2017-03-15 2.0 3.0 4.0 1.5 2017-03-16 2.5 3.5 1.0 2.0 Let ``c`` be a Classifier producing the following output:: AAPL MSFT MCD BK 2017-03-13 1 1 2 2 2017-03-14 1 1 2 2 2017-03-15 1 1 2 2 2017-03-16 1 1 2 2 Let ``m`` be a Filter producing the following output:: AAPL MSFT MCD BK 2017-03-13 False True True True 2017-03-14 True False True True 2017-03-15 True True False True 2017-03-16 True True True False Then ``f.demean()`` will subtract the mean from each row produced by ``f``. :: AAPL MSFT MCD BK 2017-03-13 -1.500 -0.500 0.500 1.500 2017-03-14 -0.625 0.375 1.375 -1.125 2017-03-15 -0.625 0.375 1.375 -1.125 2017-03-16 0.250 1.250 -1.250 -0.250 ``f.demean(mask=m)`` will subtract the mean from each row, but means will be calculated ignoring values on the diagonal, and NaNs will written to the diagonal in the output. Diagonal values are ignored because they are the locations where the mask ``m`` produced False. :: AAPL MSFT MCD BK 2017-03-13 NaN -1.000 0.000 1.000 2017-03-14 -0.500 NaN 1.500 -1.000 2017-03-15 -0.166 0.833 NaN -0.666 2017-03-16 0.166 1.166 -1.333 NaN ``f.demean(groupby=c)`` will subtract the group-mean of AAPL/MSFT and MCD/BK from their respective entries. The AAPL/MSFT are grouped together because both assets always produce 1 in the output of the classifier ``c``. Similarly, MCD/BK are grouped together because they always produce 2. :: AAPL MSFT MCD BK 2017-03-13 -0.500 0.500 -0.500 0.500 2017-03-14 -0.500 0.500 1.250 -1.250 2017-03-15 -0.500 0.500 1.250 -1.250 2017-03-16 -0.500 0.500 -0.500 0.500 ``f.demean(mask=m, groupby=c)`` will also subtract the group-mean of AAPL/MSFT and MCD/BK, but means will be calculated ignoring values on the diagonal , and NaNs will be written to the diagonal in the output. :: AAPL MSFT MCD BK 2017-03-13 NaN 0.000 -0.500 0.500 2017-03-14 0.000 NaN 1.250 -1.250 2017-03-15 -0.500 0.500 NaN 0.000 2017-03-16 -0.500 0.500 0.000 NaN Notes ----- Mean is sensitive to the magnitudes of outliers. When working with factor that can potentially produce large outliers, it is often useful to use the ``mask`` parameter to discard values at the extremes of the distribution:: >>> base = MyFactor(...) # doctest: +SKIP >>> normalized = base.demean( ... mask=base.percentile_between(1, 99), ... ) # doctest: +SKIP ``demean()`` is only supported on Factors of dtype float64. See Also -------- :meth:`pandas.DataFrame.groupby` """ return GroupedRowTransform( transform=demean, transform_args=(), factor=self, groupby=groupby, dtype=self.dtype, missing_value=self.missing_value, window_safe=self.window_safe, mask=mask, )
python
def demean(self, mask=NotSpecified, groupby=NotSpecified): """ Construct a Factor that computes ``self`` and subtracts the mean from row of the result. If ``mask`` is supplied, ignore values where ``mask`` returns False when computing row means, and output NaN anywhere the mask is False. If ``groupby`` is supplied, compute by partitioning each row based on the values produced by ``groupby``, de-meaning the partitioned arrays, and stitching the sub-results back together. Parameters ---------- mask : zipline.pipeline.Filter, optional A Filter defining values to ignore when computing means. groupby : zipline.pipeline.Classifier, optional A classifier defining partitions over which to compute means. Examples -------- Let ``f`` be a Factor which would produce the following output:: AAPL MSFT MCD BK 2017-03-13 1.0 2.0 3.0 4.0 2017-03-14 1.5 2.5 3.5 1.0 2017-03-15 2.0 3.0 4.0 1.5 2017-03-16 2.5 3.5 1.0 2.0 Let ``c`` be a Classifier producing the following output:: AAPL MSFT MCD BK 2017-03-13 1 1 2 2 2017-03-14 1 1 2 2 2017-03-15 1 1 2 2 2017-03-16 1 1 2 2 Let ``m`` be a Filter producing the following output:: AAPL MSFT MCD BK 2017-03-13 False True True True 2017-03-14 True False True True 2017-03-15 True True False True 2017-03-16 True True True False Then ``f.demean()`` will subtract the mean from each row produced by ``f``. :: AAPL MSFT MCD BK 2017-03-13 -1.500 -0.500 0.500 1.500 2017-03-14 -0.625 0.375 1.375 -1.125 2017-03-15 -0.625 0.375 1.375 -1.125 2017-03-16 0.250 1.250 -1.250 -0.250 ``f.demean(mask=m)`` will subtract the mean from each row, but means will be calculated ignoring values on the diagonal, and NaNs will written to the diagonal in the output. Diagonal values are ignored because they are the locations where the mask ``m`` produced False. :: AAPL MSFT MCD BK 2017-03-13 NaN -1.000 0.000 1.000 2017-03-14 -0.500 NaN 1.500 -1.000 2017-03-15 -0.166 0.833 NaN -0.666 2017-03-16 0.166 1.166 -1.333 NaN ``f.demean(groupby=c)`` will subtract the group-mean of AAPL/MSFT and MCD/BK from their respective entries. The AAPL/MSFT are grouped together because both assets always produce 1 in the output of the classifier ``c``. Similarly, MCD/BK are grouped together because they always produce 2. :: AAPL MSFT MCD BK 2017-03-13 -0.500 0.500 -0.500 0.500 2017-03-14 -0.500 0.500 1.250 -1.250 2017-03-15 -0.500 0.500 1.250 -1.250 2017-03-16 -0.500 0.500 -0.500 0.500 ``f.demean(mask=m, groupby=c)`` will also subtract the group-mean of AAPL/MSFT and MCD/BK, but means will be calculated ignoring values on the diagonal , and NaNs will be written to the diagonal in the output. :: AAPL MSFT MCD BK 2017-03-13 NaN 0.000 -0.500 0.500 2017-03-14 0.000 NaN 1.250 -1.250 2017-03-15 -0.500 0.500 NaN 0.000 2017-03-16 -0.500 0.500 0.000 NaN Notes ----- Mean is sensitive to the magnitudes of outliers. When working with factor that can potentially produce large outliers, it is often useful to use the ``mask`` parameter to discard values at the extremes of the distribution:: >>> base = MyFactor(...) # doctest: +SKIP >>> normalized = base.demean( ... mask=base.percentile_between(1, 99), ... ) # doctest: +SKIP ``demean()`` is only supported on Factors of dtype float64. See Also -------- :meth:`pandas.DataFrame.groupby` """ return GroupedRowTransform( transform=demean, transform_args=(), factor=self, groupby=groupby, dtype=self.dtype, missing_value=self.missing_value, window_safe=self.window_safe, mask=mask, )
[ "def", "demean", "(", "self", ",", "mask", "=", "NotSpecified", ",", "groupby", "=", "NotSpecified", ")", ":", "return", "GroupedRowTransform", "(", "transform", "=", "demean", ",", "transform_args", "=", "(", ")", ",", "factor", "=", "self", ",", "groupby", "=", "groupby", ",", "dtype", "=", "self", ".", "dtype", ",", "missing_value", "=", "self", ".", "missing_value", ",", "window_safe", "=", "self", ".", "window_safe", ",", "mask", "=", "mask", ",", ")" ]
Construct a Factor that computes ``self`` and subtracts the mean from row of the result. If ``mask`` is supplied, ignore values where ``mask`` returns False when computing row means, and output NaN anywhere the mask is False. If ``groupby`` is supplied, compute by partitioning each row based on the values produced by ``groupby``, de-meaning the partitioned arrays, and stitching the sub-results back together. Parameters ---------- mask : zipline.pipeline.Filter, optional A Filter defining values to ignore when computing means. groupby : zipline.pipeline.Classifier, optional A classifier defining partitions over which to compute means. Examples -------- Let ``f`` be a Factor which would produce the following output:: AAPL MSFT MCD BK 2017-03-13 1.0 2.0 3.0 4.0 2017-03-14 1.5 2.5 3.5 1.0 2017-03-15 2.0 3.0 4.0 1.5 2017-03-16 2.5 3.5 1.0 2.0 Let ``c`` be a Classifier producing the following output:: AAPL MSFT MCD BK 2017-03-13 1 1 2 2 2017-03-14 1 1 2 2 2017-03-15 1 1 2 2 2017-03-16 1 1 2 2 Let ``m`` be a Filter producing the following output:: AAPL MSFT MCD BK 2017-03-13 False True True True 2017-03-14 True False True True 2017-03-15 True True False True 2017-03-16 True True True False Then ``f.demean()`` will subtract the mean from each row produced by ``f``. :: AAPL MSFT MCD BK 2017-03-13 -1.500 -0.500 0.500 1.500 2017-03-14 -0.625 0.375 1.375 -1.125 2017-03-15 -0.625 0.375 1.375 -1.125 2017-03-16 0.250 1.250 -1.250 -0.250 ``f.demean(mask=m)`` will subtract the mean from each row, but means will be calculated ignoring values on the diagonal, and NaNs will written to the diagonal in the output. Diagonal values are ignored because they are the locations where the mask ``m`` produced False. :: AAPL MSFT MCD BK 2017-03-13 NaN -1.000 0.000 1.000 2017-03-14 -0.500 NaN 1.500 -1.000 2017-03-15 -0.166 0.833 NaN -0.666 2017-03-16 0.166 1.166 -1.333 NaN ``f.demean(groupby=c)`` will subtract the group-mean of AAPL/MSFT and MCD/BK from their respective entries. The AAPL/MSFT are grouped together because both assets always produce 1 in the output of the classifier ``c``. Similarly, MCD/BK are grouped together because they always produce 2. :: AAPL MSFT MCD BK 2017-03-13 -0.500 0.500 -0.500 0.500 2017-03-14 -0.500 0.500 1.250 -1.250 2017-03-15 -0.500 0.500 1.250 -1.250 2017-03-16 -0.500 0.500 -0.500 0.500 ``f.demean(mask=m, groupby=c)`` will also subtract the group-mean of AAPL/MSFT and MCD/BK, but means will be calculated ignoring values on the diagonal , and NaNs will be written to the diagonal in the output. :: AAPL MSFT MCD BK 2017-03-13 NaN 0.000 -0.500 0.500 2017-03-14 0.000 NaN 1.250 -1.250 2017-03-15 -0.500 0.500 NaN 0.000 2017-03-16 -0.500 0.500 0.000 NaN Notes ----- Mean is sensitive to the magnitudes of outliers. When working with factor that can potentially produce large outliers, it is often useful to use the ``mask`` parameter to discard values at the extremes of the distribution:: >>> base = MyFactor(...) # doctest: +SKIP >>> normalized = base.demean( ... mask=base.percentile_between(1, 99), ... ) # doctest: +SKIP ``demean()`` is only supported on Factors of dtype float64. See Also -------- :meth:`pandas.DataFrame.groupby`
[ "Construct", "a", "Factor", "that", "computes", "self", "and", "subtracts", "the", "mean", "from", "row", "of", "the", "result", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/factors/factor.py#L402-L524
train
quantopian/zipline
zipline/pipeline/factors/factor.py
Factor.zscore
def zscore(self, mask=NotSpecified, groupby=NotSpecified): """ Construct a Factor that Z-Scores each day's results. The Z-Score of a row is defined as:: (row - row.mean()) / row.stddev() If ``mask`` is supplied, ignore values where ``mask`` returns False when computing row means and standard deviations, and output NaN anywhere the mask is False. If ``groupby`` is supplied, compute by partitioning each row based on the values produced by ``groupby``, z-scoring the partitioned arrays, and stitching the sub-results back together. Parameters ---------- mask : zipline.pipeline.Filter, optional A Filter defining values to ignore when Z-Scoring. groupby : zipline.pipeline.Classifier, optional A classifier defining partitions over which to compute Z-Scores. Returns ------- zscored : zipline.pipeline.Factor A Factor producing that z-scores the output of self. Notes ----- Mean and standard deviation are sensitive to the magnitudes of outliers. When working with factor that can potentially produce large outliers, it is often useful to use the ``mask`` parameter to discard values at the extremes of the distribution:: >>> base = MyFactor(...) # doctest: +SKIP >>> normalized = base.zscore( ... mask=base.percentile_between(1, 99), ... ) # doctest: +SKIP ``zscore()`` is only supported on Factors of dtype float64. Examples -------- See :meth:`~zipline.pipeline.factors.Factor.demean` for an in-depth example of the semantics for ``mask`` and ``groupby``. See Also -------- :meth:`pandas.DataFrame.groupby` """ return GroupedRowTransform( transform=zscore, transform_args=(), factor=self, groupby=groupby, dtype=self.dtype, missing_value=self.missing_value, mask=mask, window_safe=True, )
python
def zscore(self, mask=NotSpecified, groupby=NotSpecified): """ Construct a Factor that Z-Scores each day's results. The Z-Score of a row is defined as:: (row - row.mean()) / row.stddev() If ``mask`` is supplied, ignore values where ``mask`` returns False when computing row means and standard deviations, and output NaN anywhere the mask is False. If ``groupby`` is supplied, compute by partitioning each row based on the values produced by ``groupby``, z-scoring the partitioned arrays, and stitching the sub-results back together. Parameters ---------- mask : zipline.pipeline.Filter, optional A Filter defining values to ignore when Z-Scoring. groupby : zipline.pipeline.Classifier, optional A classifier defining partitions over which to compute Z-Scores. Returns ------- zscored : zipline.pipeline.Factor A Factor producing that z-scores the output of self. Notes ----- Mean and standard deviation are sensitive to the magnitudes of outliers. When working with factor that can potentially produce large outliers, it is often useful to use the ``mask`` parameter to discard values at the extremes of the distribution:: >>> base = MyFactor(...) # doctest: +SKIP >>> normalized = base.zscore( ... mask=base.percentile_between(1, 99), ... ) # doctest: +SKIP ``zscore()`` is only supported on Factors of dtype float64. Examples -------- See :meth:`~zipline.pipeline.factors.Factor.demean` for an in-depth example of the semantics for ``mask`` and ``groupby``. See Also -------- :meth:`pandas.DataFrame.groupby` """ return GroupedRowTransform( transform=zscore, transform_args=(), factor=self, groupby=groupby, dtype=self.dtype, missing_value=self.missing_value, mask=mask, window_safe=True, )
[ "def", "zscore", "(", "self", ",", "mask", "=", "NotSpecified", ",", "groupby", "=", "NotSpecified", ")", ":", "return", "GroupedRowTransform", "(", "transform", "=", "zscore", ",", "transform_args", "=", "(", ")", ",", "factor", "=", "self", ",", "groupby", "=", "groupby", ",", "dtype", "=", "self", ".", "dtype", ",", "missing_value", "=", "self", ".", "missing_value", ",", "mask", "=", "mask", ",", "window_safe", "=", "True", ",", ")" ]
Construct a Factor that Z-Scores each day's results. The Z-Score of a row is defined as:: (row - row.mean()) / row.stddev() If ``mask`` is supplied, ignore values where ``mask`` returns False when computing row means and standard deviations, and output NaN anywhere the mask is False. If ``groupby`` is supplied, compute by partitioning each row based on the values produced by ``groupby``, z-scoring the partitioned arrays, and stitching the sub-results back together. Parameters ---------- mask : zipline.pipeline.Filter, optional A Filter defining values to ignore when Z-Scoring. groupby : zipline.pipeline.Classifier, optional A classifier defining partitions over which to compute Z-Scores. Returns ------- zscored : zipline.pipeline.Factor A Factor producing that z-scores the output of self. Notes ----- Mean and standard deviation are sensitive to the magnitudes of outliers. When working with factor that can potentially produce large outliers, it is often useful to use the ``mask`` parameter to discard values at the extremes of the distribution:: >>> base = MyFactor(...) # doctest: +SKIP >>> normalized = base.zscore( ... mask=base.percentile_between(1, 99), ... ) # doctest: +SKIP ``zscore()`` is only supported on Factors of dtype float64. Examples -------- See :meth:`~zipline.pipeline.factors.Factor.demean` for an in-depth example of the semantics for ``mask`` and ``groupby``. See Also -------- :meth:`pandas.DataFrame.groupby`
[ "Construct", "a", "Factor", "that", "Z", "-", "Scores", "each", "day", "s", "results", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/factors/factor.py#L531-L591
train
quantopian/zipline
zipline/pipeline/factors/factor.py
Factor.rank
def rank(self, method='ordinal', ascending=True, mask=NotSpecified, groupby=NotSpecified): """ Construct a new Factor representing the sorted rank of each column within each row. Parameters ---------- method : str, {'ordinal', 'min', 'max', 'dense', 'average'} The method used to assign ranks to tied elements. See `scipy.stats.rankdata` for a full description of the semantics for each ranking method. Default is 'ordinal'. ascending : bool, optional Whether to return sorted rank in ascending or descending order. Default is True. mask : zipline.pipeline.Filter, optional A Filter representing assets to consider when computing ranks. If mask is supplied, ranks are computed ignoring any asset/date pairs for which `mask` produces a value of False. groupby : zipline.pipeline.Classifier, optional A classifier defining partitions over which to perform ranking. Returns ------- ranks : zipline.pipeline.factors.Rank A new factor that will compute the ranking of the data produced by `self`. Notes ----- The default value for `method` is different from the default for `scipy.stats.rankdata`. See that function's documentation for a full description of the valid inputs to `method`. Missing or non-existent data on a given day will cause an asset to be given a rank of NaN for that day. See Also -------- :func:`scipy.stats.rankdata` :class:`zipline.pipeline.factors.factor.Rank` """ if groupby is NotSpecified: return Rank(self, method=method, ascending=ascending, mask=mask) return GroupedRowTransform( transform=rankdata if ascending else rankdata_1d_descending, transform_args=(method,), factor=self, groupby=groupby, dtype=float64_dtype, missing_value=nan, mask=mask, window_safe=True, )
python
def rank(self, method='ordinal', ascending=True, mask=NotSpecified, groupby=NotSpecified): """ Construct a new Factor representing the sorted rank of each column within each row. Parameters ---------- method : str, {'ordinal', 'min', 'max', 'dense', 'average'} The method used to assign ranks to tied elements. See `scipy.stats.rankdata` for a full description of the semantics for each ranking method. Default is 'ordinal'. ascending : bool, optional Whether to return sorted rank in ascending or descending order. Default is True. mask : zipline.pipeline.Filter, optional A Filter representing assets to consider when computing ranks. If mask is supplied, ranks are computed ignoring any asset/date pairs for which `mask` produces a value of False. groupby : zipline.pipeline.Classifier, optional A classifier defining partitions over which to perform ranking. Returns ------- ranks : zipline.pipeline.factors.Rank A new factor that will compute the ranking of the data produced by `self`. Notes ----- The default value for `method` is different from the default for `scipy.stats.rankdata`. See that function's documentation for a full description of the valid inputs to `method`. Missing or non-existent data on a given day will cause an asset to be given a rank of NaN for that day. See Also -------- :func:`scipy.stats.rankdata` :class:`zipline.pipeline.factors.factor.Rank` """ if groupby is NotSpecified: return Rank(self, method=method, ascending=ascending, mask=mask) return GroupedRowTransform( transform=rankdata if ascending else rankdata_1d_descending, transform_args=(method,), factor=self, groupby=groupby, dtype=float64_dtype, missing_value=nan, mask=mask, window_safe=True, )
[ "def", "rank", "(", "self", ",", "method", "=", "'ordinal'", ",", "ascending", "=", "True", ",", "mask", "=", "NotSpecified", ",", "groupby", "=", "NotSpecified", ")", ":", "if", "groupby", "is", "NotSpecified", ":", "return", "Rank", "(", "self", ",", "method", "=", "method", ",", "ascending", "=", "ascending", ",", "mask", "=", "mask", ")", "return", "GroupedRowTransform", "(", "transform", "=", "rankdata", "if", "ascending", "else", "rankdata_1d_descending", ",", "transform_args", "=", "(", "method", ",", ")", ",", "factor", "=", "self", ",", "groupby", "=", "groupby", ",", "dtype", "=", "float64_dtype", ",", "missing_value", "=", "nan", ",", "mask", "=", "mask", ",", "window_safe", "=", "True", ",", ")" ]
Construct a new Factor representing the sorted rank of each column within each row. Parameters ---------- method : str, {'ordinal', 'min', 'max', 'dense', 'average'} The method used to assign ranks to tied elements. See `scipy.stats.rankdata` for a full description of the semantics for each ranking method. Default is 'ordinal'. ascending : bool, optional Whether to return sorted rank in ascending or descending order. Default is True. mask : zipline.pipeline.Filter, optional A Filter representing assets to consider when computing ranks. If mask is supplied, ranks are computed ignoring any asset/date pairs for which `mask` produces a value of False. groupby : zipline.pipeline.Classifier, optional A classifier defining partitions over which to perform ranking. Returns ------- ranks : zipline.pipeline.factors.Rank A new factor that will compute the ranking of the data produced by `self`. Notes ----- The default value for `method` is different from the default for `scipy.stats.rankdata`. See that function's documentation for a full description of the valid inputs to `method`. Missing or non-existent data on a given day will cause an asset to be given a rank of NaN for that day. See Also -------- :func:`scipy.stats.rankdata` :class:`zipline.pipeline.factors.factor.Rank`
[ "Construct", "a", "new", "Factor", "representing", "the", "sorted", "rank", "of", "each", "column", "within", "each", "row", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/factors/factor.py#L593-L651
train
quantopian/zipline
zipline/pipeline/factors/factor.py
Factor.pearsonr
def pearsonr(self, target, correlation_length, mask=NotSpecified): """ Construct a new Factor that computes rolling pearson correlation coefficients between `target` and the columns of `self`. This method can only be called on factors which are deemed safe for use as inputs to other factors. This includes `Returns` and any factors created from `Factor.rank` or `Factor.zscore`. Parameters ---------- target : zipline.pipeline.Term with a numeric dtype The term used to compute correlations against each column of data produced by `self`. This may be a Factor, a BoundColumn or a Slice. If `target` is two-dimensional, correlations are computed asset-wise. correlation_length : int Length of the lookback window over which to compute each correlation coefficient. mask : zipline.pipeline.Filter, optional A Filter describing which assets should have their correlation with the target slice computed each day. Returns ------- correlations : zipline.pipeline.factors.RollingPearson A new Factor that will compute correlations between `target` and the columns of `self`. Examples -------- Suppose we want to create a factor that computes the correlation between AAPL's 10-day returns and the 10-day returns of all other assets, computing each correlation over 30 days. This can be achieved by doing the following:: returns = Returns(window_length=10) returns_slice = returns[sid(24)] aapl_correlations = returns.pearsonr( target=returns_slice, correlation_length=30, ) This is equivalent to doing:: aapl_correlations = RollingPearsonOfReturns( target=sid(24), returns_length=10, correlation_length=30, ) See Also -------- :func:`scipy.stats.pearsonr` :class:`zipline.pipeline.factors.RollingPearsonOfReturns` :meth:`Factor.spearmanr` """ from .statistical import RollingPearson return RollingPearson( base_factor=self, target=target, correlation_length=correlation_length, mask=mask, )
python
def pearsonr(self, target, correlation_length, mask=NotSpecified): """ Construct a new Factor that computes rolling pearson correlation coefficients between `target` and the columns of `self`. This method can only be called on factors which are deemed safe for use as inputs to other factors. This includes `Returns` and any factors created from `Factor.rank` or `Factor.zscore`. Parameters ---------- target : zipline.pipeline.Term with a numeric dtype The term used to compute correlations against each column of data produced by `self`. This may be a Factor, a BoundColumn or a Slice. If `target` is two-dimensional, correlations are computed asset-wise. correlation_length : int Length of the lookback window over which to compute each correlation coefficient. mask : zipline.pipeline.Filter, optional A Filter describing which assets should have their correlation with the target slice computed each day. Returns ------- correlations : zipline.pipeline.factors.RollingPearson A new Factor that will compute correlations between `target` and the columns of `self`. Examples -------- Suppose we want to create a factor that computes the correlation between AAPL's 10-day returns and the 10-day returns of all other assets, computing each correlation over 30 days. This can be achieved by doing the following:: returns = Returns(window_length=10) returns_slice = returns[sid(24)] aapl_correlations = returns.pearsonr( target=returns_slice, correlation_length=30, ) This is equivalent to doing:: aapl_correlations = RollingPearsonOfReturns( target=sid(24), returns_length=10, correlation_length=30, ) See Also -------- :func:`scipy.stats.pearsonr` :class:`zipline.pipeline.factors.RollingPearsonOfReturns` :meth:`Factor.spearmanr` """ from .statistical import RollingPearson return RollingPearson( base_factor=self, target=target, correlation_length=correlation_length, mask=mask, )
[ "def", "pearsonr", "(", "self", ",", "target", ",", "correlation_length", ",", "mask", "=", "NotSpecified", ")", ":", "from", ".", "statistical", "import", "RollingPearson", "return", "RollingPearson", "(", "base_factor", "=", "self", ",", "target", "=", "target", ",", "correlation_length", "=", "correlation_length", ",", "mask", "=", "mask", ",", ")" ]
Construct a new Factor that computes rolling pearson correlation coefficients between `target` and the columns of `self`. This method can only be called on factors which are deemed safe for use as inputs to other factors. This includes `Returns` and any factors created from `Factor.rank` or `Factor.zscore`. Parameters ---------- target : zipline.pipeline.Term with a numeric dtype The term used to compute correlations against each column of data produced by `self`. This may be a Factor, a BoundColumn or a Slice. If `target` is two-dimensional, correlations are computed asset-wise. correlation_length : int Length of the lookback window over which to compute each correlation coefficient. mask : zipline.pipeline.Filter, optional A Filter describing which assets should have their correlation with the target slice computed each day. Returns ------- correlations : zipline.pipeline.factors.RollingPearson A new Factor that will compute correlations between `target` and the columns of `self`. Examples -------- Suppose we want to create a factor that computes the correlation between AAPL's 10-day returns and the 10-day returns of all other assets, computing each correlation over 30 days. This can be achieved by doing the following:: returns = Returns(window_length=10) returns_slice = returns[sid(24)] aapl_correlations = returns.pearsonr( target=returns_slice, correlation_length=30, ) This is equivalent to doing:: aapl_correlations = RollingPearsonOfReturns( target=sid(24), returns_length=10, correlation_length=30, ) See Also -------- :func:`scipy.stats.pearsonr` :class:`zipline.pipeline.factors.RollingPearsonOfReturns` :meth:`Factor.spearmanr`
[ "Construct", "a", "new", "Factor", "that", "computes", "rolling", "pearson", "correlation", "coefficients", "between", "target", "and", "the", "columns", "of", "self", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/factors/factor.py#L656-L716
train
quantopian/zipline
zipline/pipeline/factors/factor.py
Factor.spearmanr
def spearmanr(self, target, correlation_length, mask=NotSpecified): """ Construct a new Factor that computes rolling spearman rank correlation coefficients between `target` and the columns of `self`. This method can only be called on factors which are deemed safe for use as inputs to other factors. This includes `Returns` and any factors created from `Factor.rank` or `Factor.zscore`. Parameters ---------- target : zipline.pipeline.Term with a numeric dtype The term used to compute correlations against each column of data produced by `self`. This may be a Factor, a BoundColumn or a Slice. If `target` is two-dimensional, correlations are computed asset-wise. correlation_length : int Length of the lookback window over which to compute each correlation coefficient. mask : zipline.pipeline.Filter, optional A Filter describing which assets should have their correlation with the target slice computed each day. Returns ------- correlations : zipline.pipeline.factors.RollingSpearman A new Factor that will compute correlations between `target` and the columns of `self`. Examples -------- Suppose we want to create a factor that computes the correlation between AAPL's 10-day returns and the 10-day returns of all other assets, computing each correlation over 30 days. This can be achieved by doing the following:: returns = Returns(window_length=10) returns_slice = returns[sid(24)] aapl_correlations = returns.spearmanr( target=returns_slice, correlation_length=30, ) This is equivalent to doing:: aapl_correlations = RollingSpearmanOfReturns( target=sid(24), returns_length=10, correlation_length=30, ) See Also -------- :func:`scipy.stats.spearmanr` :class:`zipline.pipeline.factors.RollingSpearmanOfReturns` :meth:`Factor.pearsonr` """ from .statistical import RollingSpearman return RollingSpearman( base_factor=self, target=target, correlation_length=correlation_length, mask=mask, )
python
def spearmanr(self, target, correlation_length, mask=NotSpecified): """ Construct a new Factor that computes rolling spearman rank correlation coefficients between `target` and the columns of `self`. This method can only be called on factors which are deemed safe for use as inputs to other factors. This includes `Returns` and any factors created from `Factor.rank` or `Factor.zscore`. Parameters ---------- target : zipline.pipeline.Term with a numeric dtype The term used to compute correlations against each column of data produced by `self`. This may be a Factor, a BoundColumn or a Slice. If `target` is two-dimensional, correlations are computed asset-wise. correlation_length : int Length of the lookback window over which to compute each correlation coefficient. mask : zipline.pipeline.Filter, optional A Filter describing which assets should have their correlation with the target slice computed each day. Returns ------- correlations : zipline.pipeline.factors.RollingSpearman A new Factor that will compute correlations between `target` and the columns of `self`. Examples -------- Suppose we want to create a factor that computes the correlation between AAPL's 10-day returns and the 10-day returns of all other assets, computing each correlation over 30 days. This can be achieved by doing the following:: returns = Returns(window_length=10) returns_slice = returns[sid(24)] aapl_correlations = returns.spearmanr( target=returns_slice, correlation_length=30, ) This is equivalent to doing:: aapl_correlations = RollingSpearmanOfReturns( target=sid(24), returns_length=10, correlation_length=30, ) See Also -------- :func:`scipy.stats.spearmanr` :class:`zipline.pipeline.factors.RollingSpearmanOfReturns` :meth:`Factor.pearsonr` """ from .statistical import RollingSpearman return RollingSpearman( base_factor=self, target=target, correlation_length=correlation_length, mask=mask, )
[ "def", "spearmanr", "(", "self", ",", "target", ",", "correlation_length", ",", "mask", "=", "NotSpecified", ")", ":", "from", ".", "statistical", "import", "RollingSpearman", "return", "RollingSpearman", "(", "base_factor", "=", "self", ",", "target", "=", "target", ",", "correlation_length", "=", "correlation_length", ",", "mask", "=", "mask", ",", ")" ]
Construct a new Factor that computes rolling spearman rank correlation coefficients between `target` and the columns of `self`. This method can only be called on factors which are deemed safe for use as inputs to other factors. This includes `Returns` and any factors created from `Factor.rank` or `Factor.zscore`. Parameters ---------- target : zipline.pipeline.Term with a numeric dtype The term used to compute correlations against each column of data produced by `self`. This may be a Factor, a BoundColumn or a Slice. If `target` is two-dimensional, correlations are computed asset-wise. correlation_length : int Length of the lookback window over which to compute each correlation coefficient. mask : zipline.pipeline.Filter, optional A Filter describing which assets should have their correlation with the target slice computed each day. Returns ------- correlations : zipline.pipeline.factors.RollingSpearman A new Factor that will compute correlations between `target` and the columns of `self`. Examples -------- Suppose we want to create a factor that computes the correlation between AAPL's 10-day returns and the 10-day returns of all other assets, computing each correlation over 30 days. This can be achieved by doing the following:: returns = Returns(window_length=10) returns_slice = returns[sid(24)] aapl_correlations = returns.spearmanr( target=returns_slice, correlation_length=30, ) This is equivalent to doing:: aapl_correlations = RollingSpearmanOfReturns( target=sid(24), returns_length=10, correlation_length=30, ) See Also -------- :func:`scipy.stats.spearmanr` :class:`zipline.pipeline.factors.RollingSpearmanOfReturns` :meth:`Factor.pearsonr`
[ "Construct", "a", "new", "Factor", "that", "computes", "rolling", "spearman", "rank", "correlation", "coefficients", "between", "target", "and", "the", "columns", "of", "self", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/factors/factor.py#L721-L781
train
quantopian/zipline
zipline/pipeline/factors/factor.py
Factor.linear_regression
def linear_regression(self, target, regression_length, mask=NotSpecified): """ Construct a new Factor that performs an ordinary least-squares regression predicting the columns of `self` from `target`. This method can only be called on factors which are deemed safe for use as inputs to other factors. This includes `Returns` and any factors created from `Factor.rank` or `Factor.zscore`. Parameters ---------- target : zipline.pipeline.Term with a numeric dtype The term to use as the predictor/independent variable in each regression. This may be a Factor, a BoundColumn or a Slice. If `target` is two-dimensional, regressions are computed asset-wise. regression_length : int Length of the lookback window over which to compute each regression. mask : zipline.pipeline.Filter, optional A Filter describing which assets should be regressed with the target slice each day. Returns ------- regressions : zipline.pipeline.factors.RollingLinearRegression A new Factor that will compute linear regressions of `target` against the columns of `self`. Examples -------- Suppose we want to create a factor that regresses AAPL's 10-day returns against the 10-day returns of all other assets, computing each regression over 30 days. This can be achieved by doing the following:: returns = Returns(window_length=10) returns_slice = returns[sid(24)] aapl_regressions = returns.linear_regression( target=returns_slice, regression_length=30, ) This is equivalent to doing:: aapl_regressions = RollingLinearRegressionOfReturns( target=sid(24), returns_length=10, regression_length=30, ) See Also -------- :func:`scipy.stats.linregress` :class:`zipline.pipeline.factors.RollingLinearRegressionOfReturns` """ from .statistical import RollingLinearRegression return RollingLinearRegression( dependent=self, independent=target, regression_length=regression_length, mask=mask, )
python
def linear_regression(self, target, regression_length, mask=NotSpecified): """ Construct a new Factor that performs an ordinary least-squares regression predicting the columns of `self` from `target`. This method can only be called on factors which are deemed safe for use as inputs to other factors. This includes `Returns` and any factors created from `Factor.rank` or `Factor.zscore`. Parameters ---------- target : zipline.pipeline.Term with a numeric dtype The term to use as the predictor/independent variable in each regression. This may be a Factor, a BoundColumn or a Slice. If `target` is two-dimensional, regressions are computed asset-wise. regression_length : int Length of the lookback window over which to compute each regression. mask : zipline.pipeline.Filter, optional A Filter describing which assets should be regressed with the target slice each day. Returns ------- regressions : zipline.pipeline.factors.RollingLinearRegression A new Factor that will compute linear regressions of `target` against the columns of `self`. Examples -------- Suppose we want to create a factor that regresses AAPL's 10-day returns against the 10-day returns of all other assets, computing each regression over 30 days. This can be achieved by doing the following:: returns = Returns(window_length=10) returns_slice = returns[sid(24)] aapl_regressions = returns.linear_regression( target=returns_slice, regression_length=30, ) This is equivalent to doing:: aapl_regressions = RollingLinearRegressionOfReturns( target=sid(24), returns_length=10, regression_length=30, ) See Also -------- :func:`scipy.stats.linregress` :class:`zipline.pipeline.factors.RollingLinearRegressionOfReturns` """ from .statistical import RollingLinearRegression return RollingLinearRegression( dependent=self, independent=target, regression_length=regression_length, mask=mask, )
[ "def", "linear_regression", "(", "self", ",", "target", ",", "regression_length", ",", "mask", "=", "NotSpecified", ")", ":", "from", ".", "statistical", "import", "RollingLinearRegression", "return", "RollingLinearRegression", "(", "dependent", "=", "self", ",", "independent", "=", "target", ",", "regression_length", "=", "regression_length", ",", "mask", "=", "mask", ",", ")" ]
Construct a new Factor that performs an ordinary least-squares regression predicting the columns of `self` from `target`. This method can only be called on factors which are deemed safe for use as inputs to other factors. This includes `Returns` and any factors created from `Factor.rank` or `Factor.zscore`. Parameters ---------- target : zipline.pipeline.Term with a numeric dtype The term to use as the predictor/independent variable in each regression. This may be a Factor, a BoundColumn or a Slice. If `target` is two-dimensional, regressions are computed asset-wise. regression_length : int Length of the lookback window over which to compute each regression. mask : zipline.pipeline.Filter, optional A Filter describing which assets should be regressed with the target slice each day. Returns ------- regressions : zipline.pipeline.factors.RollingLinearRegression A new Factor that will compute linear regressions of `target` against the columns of `self`. Examples -------- Suppose we want to create a factor that regresses AAPL's 10-day returns against the 10-day returns of all other assets, computing each regression over 30 days. This can be achieved by doing the following:: returns = Returns(window_length=10) returns_slice = returns[sid(24)] aapl_regressions = returns.linear_regression( target=returns_slice, regression_length=30, ) This is equivalent to doing:: aapl_regressions = RollingLinearRegressionOfReturns( target=sid(24), returns_length=10, regression_length=30, ) See Also -------- :func:`scipy.stats.linregress` :class:`zipline.pipeline.factors.RollingLinearRegressionOfReturns`
[ "Construct", "a", "new", "Factor", "that", "performs", "an", "ordinary", "least", "-", "squares", "regression", "predicting", "the", "columns", "of", "self", "from", "target", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/factors/factor.py#L786-L843
train
quantopian/zipline
zipline/pipeline/factors/factor.py
Factor.winsorize
def winsorize(self, min_percentile, max_percentile, mask=NotSpecified, groupby=NotSpecified): """ Construct a new factor that winsorizes the result of this factor. Winsorizing changes values ranked less than the minimum percentile to the value at the minimum percentile. Similarly, values ranking above the maximum percentile are changed to the value at the maximum percentile. Winsorizing is useful for limiting the impact of extreme data points without completely removing those points. If ``mask`` is supplied, ignore values where ``mask`` returns False when computing percentile cutoffs, and output NaN anywhere the mask is False. If ``groupby`` is supplied, winsorization is applied separately separately to each group defined by ``groupby``. Parameters ---------- min_percentile: float, int Entries with values at or below this percentile will be replaced with the (len(input) * min_percentile)th lowest value. If low values should not be clipped, use 0. max_percentile: float, int Entries with values at or above this percentile will be replaced with the (len(input) * max_percentile)th lowest value. If high values should not be clipped, use 1. mask : zipline.pipeline.Filter, optional A Filter defining values to ignore when winsorizing. groupby : zipline.pipeline.Classifier, optional A classifier defining partitions over which to winsorize. Returns ------- winsorized : zipline.pipeline.Factor A Factor producing a winsorized version of self. Examples -------- .. code-block:: python price = USEquityPricing.close.latest columns={ 'PRICE': price, 'WINSOR_1: price.winsorize( min_percentile=0.25, max_percentile=0.75 ), 'WINSOR_2': price.winsorize( min_percentile=0.50, max_percentile=1.0 ), 'WINSOR_3': price.winsorize( min_percentile=0.0, max_percentile=0.5 ), } Given a pipeline with columns, defined above, the result for a given day could look like: :: 'PRICE' 'WINSOR_1' 'WINSOR_2' 'WINSOR_3' Asset_1 1 2 4 3 Asset_2 2 2 4 3 Asset_3 3 3 4 3 Asset_4 4 4 4 4 Asset_5 5 5 5 4 Asset_6 6 5 5 4 See Also -------- :func:`scipy.stats.mstats.winsorize` :meth:`pandas.DataFrame.groupby` """ if not 0.0 <= min_percentile < max_percentile <= 1.0: raise BadPercentileBounds( min_percentile=min_percentile, max_percentile=max_percentile, upper_bound=1.0, ) return GroupedRowTransform( transform=winsorize, transform_args=(min_percentile, max_percentile), factor=self, groupby=groupby, dtype=self.dtype, missing_value=self.missing_value, mask=mask, window_safe=self.window_safe, )
python
def winsorize(self, min_percentile, max_percentile, mask=NotSpecified, groupby=NotSpecified): """ Construct a new factor that winsorizes the result of this factor. Winsorizing changes values ranked less than the minimum percentile to the value at the minimum percentile. Similarly, values ranking above the maximum percentile are changed to the value at the maximum percentile. Winsorizing is useful for limiting the impact of extreme data points without completely removing those points. If ``mask`` is supplied, ignore values where ``mask`` returns False when computing percentile cutoffs, and output NaN anywhere the mask is False. If ``groupby`` is supplied, winsorization is applied separately separately to each group defined by ``groupby``. Parameters ---------- min_percentile: float, int Entries with values at or below this percentile will be replaced with the (len(input) * min_percentile)th lowest value. If low values should not be clipped, use 0. max_percentile: float, int Entries with values at or above this percentile will be replaced with the (len(input) * max_percentile)th lowest value. If high values should not be clipped, use 1. mask : zipline.pipeline.Filter, optional A Filter defining values to ignore when winsorizing. groupby : zipline.pipeline.Classifier, optional A classifier defining partitions over which to winsorize. Returns ------- winsorized : zipline.pipeline.Factor A Factor producing a winsorized version of self. Examples -------- .. code-block:: python price = USEquityPricing.close.latest columns={ 'PRICE': price, 'WINSOR_1: price.winsorize( min_percentile=0.25, max_percentile=0.75 ), 'WINSOR_2': price.winsorize( min_percentile=0.50, max_percentile=1.0 ), 'WINSOR_3': price.winsorize( min_percentile=0.0, max_percentile=0.5 ), } Given a pipeline with columns, defined above, the result for a given day could look like: :: 'PRICE' 'WINSOR_1' 'WINSOR_2' 'WINSOR_3' Asset_1 1 2 4 3 Asset_2 2 2 4 3 Asset_3 3 3 4 3 Asset_4 4 4 4 4 Asset_5 5 5 5 4 Asset_6 6 5 5 4 See Also -------- :func:`scipy.stats.mstats.winsorize` :meth:`pandas.DataFrame.groupby` """ if not 0.0 <= min_percentile < max_percentile <= 1.0: raise BadPercentileBounds( min_percentile=min_percentile, max_percentile=max_percentile, upper_bound=1.0, ) return GroupedRowTransform( transform=winsorize, transform_args=(min_percentile, max_percentile), factor=self, groupby=groupby, dtype=self.dtype, missing_value=self.missing_value, mask=mask, window_safe=self.window_safe, )
[ "def", "winsorize", "(", "self", ",", "min_percentile", ",", "max_percentile", ",", "mask", "=", "NotSpecified", ",", "groupby", "=", "NotSpecified", ")", ":", "if", "not", "0.0", "<=", "min_percentile", "<", "max_percentile", "<=", "1.0", ":", "raise", "BadPercentileBounds", "(", "min_percentile", "=", "min_percentile", ",", "max_percentile", "=", "max_percentile", ",", "upper_bound", "=", "1.0", ",", ")", "return", "GroupedRowTransform", "(", "transform", "=", "winsorize", ",", "transform_args", "=", "(", "min_percentile", ",", "max_percentile", ")", ",", "factor", "=", "self", ",", "groupby", "=", "groupby", ",", "dtype", "=", "self", ".", "dtype", ",", "missing_value", "=", "self", ".", "missing_value", ",", "mask", "=", "mask", ",", "window_safe", "=", "self", ".", "window_safe", ",", ")" ]
Construct a new factor that winsorizes the result of this factor. Winsorizing changes values ranked less than the minimum percentile to the value at the minimum percentile. Similarly, values ranking above the maximum percentile are changed to the value at the maximum percentile. Winsorizing is useful for limiting the impact of extreme data points without completely removing those points. If ``mask`` is supplied, ignore values where ``mask`` returns False when computing percentile cutoffs, and output NaN anywhere the mask is False. If ``groupby`` is supplied, winsorization is applied separately separately to each group defined by ``groupby``. Parameters ---------- min_percentile: float, int Entries with values at or below this percentile will be replaced with the (len(input) * min_percentile)th lowest value. If low values should not be clipped, use 0. max_percentile: float, int Entries with values at or above this percentile will be replaced with the (len(input) * max_percentile)th lowest value. If high values should not be clipped, use 1. mask : zipline.pipeline.Filter, optional A Filter defining values to ignore when winsorizing. groupby : zipline.pipeline.Classifier, optional A classifier defining partitions over which to winsorize. Returns ------- winsorized : zipline.pipeline.Factor A Factor producing a winsorized version of self. Examples -------- .. code-block:: python price = USEquityPricing.close.latest columns={ 'PRICE': price, 'WINSOR_1: price.winsorize( min_percentile=0.25, max_percentile=0.75 ), 'WINSOR_2': price.winsorize( min_percentile=0.50, max_percentile=1.0 ), 'WINSOR_3': price.winsorize( min_percentile=0.0, max_percentile=0.5 ), } Given a pipeline with columns, defined above, the result for a given day could look like: :: 'PRICE' 'WINSOR_1' 'WINSOR_2' 'WINSOR_3' Asset_1 1 2 4 3 Asset_2 2 2 4 3 Asset_3 3 3 4 3 Asset_4 4 4 4 4 Asset_5 5 5 5 4 Asset_6 6 5 5 4 See Also -------- :func:`scipy.stats.mstats.winsorize` :meth:`pandas.DataFrame.groupby`
[ "Construct", "a", "new", "factor", "that", "winsorizes", "the", "result", "of", "this", "factor", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/factors/factor.py#L852-L947
train
quantopian/zipline
zipline/pipeline/factors/factor.py
Factor.quantiles
def quantiles(self, bins, mask=NotSpecified): """ Construct a Classifier computing quantiles of the output of ``self``. Every non-NaN data point the output is labelled with an integer value from 0 to (bins - 1). NaNs are labelled with -1. If ``mask`` is supplied, ignore data points in locations for which ``mask`` produces False, and emit a label of -1 at those locations. Parameters ---------- bins : int Number of bins labels to compute. mask : zipline.pipeline.Filter, optional Mask of values to ignore when computing quantiles. Returns ------- quantiles : zipline.pipeline.classifiers.Quantiles A Classifier producing integer labels ranging from 0 to (bins - 1). """ if mask is NotSpecified: mask = self.mask return Quantiles(inputs=(self,), bins=bins, mask=mask)
python
def quantiles(self, bins, mask=NotSpecified): """ Construct a Classifier computing quantiles of the output of ``self``. Every non-NaN data point the output is labelled with an integer value from 0 to (bins - 1). NaNs are labelled with -1. If ``mask`` is supplied, ignore data points in locations for which ``mask`` produces False, and emit a label of -1 at those locations. Parameters ---------- bins : int Number of bins labels to compute. mask : zipline.pipeline.Filter, optional Mask of values to ignore when computing quantiles. Returns ------- quantiles : zipline.pipeline.classifiers.Quantiles A Classifier producing integer labels ranging from 0 to (bins - 1). """ if mask is NotSpecified: mask = self.mask return Quantiles(inputs=(self,), bins=bins, mask=mask)
[ "def", "quantiles", "(", "self", ",", "bins", ",", "mask", "=", "NotSpecified", ")", ":", "if", "mask", "is", "NotSpecified", ":", "mask", "=", "self", ".", "mask", "return", "Quantiles", "(", "inputs", "=", "(", "self", ",", ")", ",", "bins", "=", "bins", ",", "mask", "=", "mask", ")" ]
Construct a Classifier computing quantiles of the output of ``self``. Every non-NaN data point the output is labelled with an integer value from 0 to (bins - 1). NaNs are labelled with -1. If ``mask`` is supplied, ignore data points in locations for which ``mask`` produces False, and emit a label of -1 at those locations. Parameters ---------- bins : int Number of bins labels to compute. mask : zipline.pipeline.Filter, optional Mask of values to ignore when computing quantiles. Returns ------- quantiles : zipline.pipeline.classifiers.Quantiles A Classifier producing integer labels ranging from 0 to (bins - 1).
[ "Construct", "a", "Classifier", "computing", "quantiles", "of", "the", "output", "of", "self", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/factors/factor.py#L950-L974
train
quantopian/zipline
zipline/pipeline/factors/factor.py
Factor.top
def top(self, N, mask=NotSpecified, groupby=NotSpecified): """ Construct a Filter matching the top N asset values of self each day. If ``groupby`` is supplied, returns a Filter matching the top N asset values for each group. Parameters ---------- N : int Number of assets passing the returned filter each day. mask : zipline.pipeline.Filter, optional A Filter representing assets to consider when computing ranks. If mask is supplied, top values are computed ignoring any asset/date pairs for which `mask` produces a value of False. groupby : zipline.pipeline.Classifier, optional A classifier defining partitions over which to perform ranking. Returns ------- filter : zipline.pipeline.filters.Filter """ if N == 1: # Special case: if N == 1, we can avoid doing a full sort on every # group, which is a big win. return self._maximum(mask=mask, groupby=groupby) return self.rank(ascending=False, mask=mask, groupby=groupby) <= N
python
def top(self, N, mask=NotSpecified, groupby=NotSpecified): """ Construct a Filter matching the top N asset values of self each day. If ``groupby`` is supplied, returns a Filter matching the top N asset values for each group. Parameters ---------- N : int Number of assets passing the returned filter each day. mask : zipline.pipeline.Filter, optional A Filter representing assets to consider when computing ranks. If mask is supplied, top values are computed ignoring any asset/date pairs for which `mask` produces a value of False. groupby : zipline.pipeline.Classifier, optional A classifier defining partitions over which to perform ranking. Returns ------- filter : zipline.pipeline.filters.Filter """ if N == 1: # Special case: if N == 1, we can avoid doing a full sort on every # group, which is a big win. return self._maximum(mask=mask, groupby=groupby) return self.rank(ascending=False, mask=mask, groupby=groupby) <= N
[ "def", "top", "(", "self", ",", "N", ",", "mask", "=", "NotSpecified", ",", "groupby", "=", "NotSpecified", ")", ":", "if", "N", "==", "1", ":", "# Special case: if N == 1, we can avoid doing a full sort on every", "# group, which is a big win.", "return", "self", ".", "_maximum", "(", "mask", "=", "mask", ",", "groupby", "=", "groupby", ")", "return", "self", ".", "rank", "(", "ascending", "=", "False", ",", "mask", "=", "mask", ",", "groupby", "=", "groupby", ")", "<=", "N" ]
Construct a Filter matching the top N asset values of self each day. If ``groupby`` is supplied, returns a Filter matching the top N asset values for each group. Parameters ---------- N : int Number of assets passing the returned filter each day. mask : zipline.pipeline.Filter, optional A Filter representing assets to consider when computing ranks. If mask is supplied, top values are computed ignoring any asset/date pairs for which `mask` produces a value of False. groupby : zipline.pipeline.Classifier, optional A classifier defining partitions over which to perform ranking. Returns ------- filter : zipline.pipeline.filters.Filter
[ "Construct", "a", "Filter", "matching", "the", "top", "N", "asset", "values", "of", "self", "each", "day", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/factors/factor.py#L1048-L1074
train
quantopian/zipline
zipline/pipeline/factors/factor.py
Factor.bottom
def bottom(self, N, mask=NotSpecified, groupby=NotSpecified): """ Construct a Filter matching the bottom N asset values of self each day. If ``groupby`` is supplied, returns a Filter matching the bottom N asset values for each group. Parameters ---------- N : int Number of assets passing the returned filter each day. mask : zipline.pipeline.Filter, optional A Filter representing assets to consider when computing ranks. If mask is supplied, bottom values are computed ignoring any asset/date pairs for which `mask` produces a value of False. groupby : zipline.pipeline.Classifier, optional A classifier defining partitions over which to perform ranking. Returns ------- filter : zipline.pipeline.Filter """ return self.rank(ascending=True, mask=mask, groupby=groupby) <= N
python
def bottom(self, N, mask=NotSpecified, groupby=NotSpecified): """ Construct a Filter matching the bottom N asset values of self each day. If ``groupby`` is supplied, returns a Filter matching the bottom N asset values for each group. Parameters ---------- N : int Number of assets passing the returned filter each day. mask : zipline.pipeline.Filter, optional A Filter representing assets to consider when computing ranks. If mask is supplied, bottom values are computed ignoring any asset/date pairs for which `mask` produces a value of False. groupby : zipline.pipeline.Classifier, optional A classifier defining partitions over which to perform ranking. Returns ------- filter : zipline.pipeline.Filter """ return self.rank(ascending=True, mask=mask, groupby=groupby) <= N
[ "def", "bottom", "(", "self", ",", "N", ",", "mask", "=", "NotSpecified", ",", "groupby", "=", "NotSpecified", ")", ":", "return", "self", ".", "rank", "(", "ascending", "=", "True", ",", "mask", "=", "mask", ",", "groupby", "=", "groupby", ")", "<=", "N" ]
Construct a Filter matching the bottom N asset values of self each day. If ``groupby`` is supplied, returns a Filter matching the bottom N asset values for each group. Parameters ---------- N : int Number of assets passing the returned filter each day. mask : zipline.pipeline.Filter, optional A Filter representing assets to consider when computing ranks. If mask is supplied, bottom values are computed ignoring any asset/date pairs for which `mask` produces a value of False. groupby : zipline.pipeline.Classifier, optional A classifier defining partitions over which to perform ranking. Returns ------- filter : zipline.pipeline.Filter
[ "Construct", "a", "Filter", "matching", "the", "bottom", "N", "asset", "values", "of", "self", "each", "day", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/factors/factor.py#L1076-L1098
train
quantopian/zipline
zipline/pipeline/factors/factor.py
Factor.percentile_between
def percentile_between(self, min_percentile, max_percentile, mask=NotSpecified): """ Construct a new Filter representing entries from the output of this Factor that fall within the percentile range defined by min_percentile and max_percentile. Parameters ---------- min_percentile : float [0.0, 100.0] Return True for assets falling above this percentile in the data. max_percentile : float [0.0, 100.0] Return True for assets falling below this percentile in the data. mask : zipline.pipeline.Filter, optional A Filter representing assets to consider when percentile calculating thresholds. If mask is supplied, percentile cutoffs are computed each day using only assets for which ``mask`` returns True. Assets for which ``mask`` produces False will produce False in the output of this Factor as well. Returns ------- out : zipline.pipeline.filters.PercentileFilter A new filter that will compute the specified percentile-range mask. See Also -------- zipline.pipeline.filters.filter.PercentileFilter """ return PercentileFilter( self, min_percentile=min_percentile, max_percentile=max_percentile, mask=mask, )
python
def percentile_between(self, min_percentile, max_percentile, mask=NotSpecified): """ Construct a new Filter representing entries from the output of this Factor that fall within the percentile range defined by min_percentile and max_percentile. Parameters ---------- min_percentile : float [0.0, 100.0] Return True for assets falling above this percentile in the data. max_percentile : float [0.0, 100.0] Return True for assets falling below this percentile in the data. mask : zipline.pipeline.Filter, optional A Filter representing assets to consider when percentile calculating thresholds. If mask is supplied, percentile cutoffs are computed each day using only assets for which ``mask`` returns True. Assets for which ``mask`` produces False will produce False in the output of this Factor as well. Returns ------- out : zipline.pipeline.filters.PercentileFilter A new filter that will compute the specified percentile-range mask. See Also -------- zipline.pipeline.filters.filter.PercentileFilter """ return PercentileFilter( self, min_percentile=min_percentile, max_percentile=max_percentile, mask=mask, )
[ "def", "percentile_between", "(", "self", ",", "min_percentile", ",", "max_percentile", ",", "mask", "=", "NotSpecified", ")", ":", "return", "PercentileFilter", "(", "self", ",", "min_percentile", "=", "min_percentile", ",", "max_percentile", "=", "max_percentile", ",", "mask", "=", "mask", ",", ")" ]
Construct a new Filter representing entries from the output of this Factor that fall within the percentile range defined by min_percentile and max_percentile. Parameters ---------- min_percentile : float [0.0, 100.0] Return True for assets falling above this percentile in the data. max_percentile : float [0.0, 100.0] Return True for assets falling below this percentile in the data. mask : zipline.pipeline.Filter, optional A Filter representing assets to consider when percentile calculating thresholds. If mask is supplied, percentile cutoffs are computed each day using only assets for which ``mask`` returns True. Assets for which ``mask`` produces False will produce False in the output of this Factor as well. Returns ------- out : zipline.pipeline.filters.PercentileFilter A new filter that will compute the specified percentile-range mask. See Also -------- zipline.pipeline.filters.filter.PercentileFilter
[ "Construct", "a", "new", "Filter", "representing", "entries", "from", "the", "output", "of", "this", "Factor", "that", "fall", "within", "the", "percentile", "range", "defined", "by", "min_percentile", "and", "max_percentile", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/factors/factor.py#L1103-L1139
train
quantopian/zipline
zipline/pipeline/factors/factor.py
Rank._validate
def _validate(self): """ Verify that the stored rank method is valid. """ if self._method not in _RANK_METHODS: raise UnknownRankMethod( method=self._method, choices=set(_RANK_METHODS), ) return super(Rank, self)._validate()
python
def _validate(self): """ Verify that the stored rank method is valid. """ if self._method not in _RANK_METHODS: raise UnknownRankMethod( method=self._method, choices=set(_RANK_METHODS), ) return super(Rank, self)._validate()
[ "def", "_validate", "(", "self", ")", ":", "if", "self", ".", "_method", "not", "in", "_RANK_METHODS", ":", "raise", "UnknownRankMethod", "(", "method", "=", "self", ".", "_method", ",", "choices", "=", "set", "(", "_RANK_METHODS", ")", ",", ")", "return", "super", "(", "Rank", ",", "self", ")", ".", "_validate", "(", ")" ]
Verify that the stored rank method is valid.
[ "Verify", "that", "the", "stored", "rank", "method", "is", "valid", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/factors/factor.py#L1382-L1391
train
quantopian/zipline
zipline/pipeline/factors/factor.py
Rank._compute
def _compute(self, arrays, dates, assets, mask): """ For each row in the input, compute a like-shaped array of per-row ranks. """ return masked_rankdata_2d( arrays[0], mask, self.inputs[0].missing_value, self._method, self._ascending, )
python
def _compute(self, arrays, dates, assets, mask): """ For each row in the input, compute a like-shaped array of per-row ranks. """ return masked_rankdata_2d( arrays[0], mask, self.inputs[0].missing_value, self._method, self._ascending, )
[ "def", "_compute", "(", "self", ",", "arrays", ",", "dates", ",", "assets", ",", "mask", ")", ":", "return", "masked_rankdata_2d", "(", "arrays", "[", "0", "]", ",", "mask", ",", "self", ".", "inputs", "[", "0", "]", ".", "missing_value", ",", "self", ".", "_method", ",", "self", ".", "_ascending", ",", ")" ]
For each row in the input, compute a like-shaped array of per-row ranks.
[ "For", "each", "row", "in", "the", "input", "compute", "a", "like", "-", "shaped", "array", "of", "per", "-", "row", "ranks", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/factors/factor.py#L1393-L1404
train
quantopian/zipline
zipline/utils/pandas_utils.py
_time_to_micros
def _time_to_micros(time): """Convert a time into microseconds since midnight. Parameters ---------- time : datetime.time The time to convert. Returns ------- us : int The number of microseconds since midnight. Notes ----- This does not account for leap seconds or daylight savings. """ seconds = time.hour * 60 * 60 + time.minute * 60 + time.second return 1000000 * seconds + time.microsecond
python
def _time_to_micros(time): """Convert a time into microseconds since midnight. Parameters ---------- time : datetime.time The time to convert. Returns ------- us : int The number of microseconds since midnight. Notes ----- This does not account for leap seconds or daylight savings. """ seconds = time.hour * 60 * 60 + time.minute * 60 + time.second return 1000000 * seconds + time.microsecond
[ "def", "_time_to_micros", "(", "time", ")", ":", "seconds", "=", "time", ".", "hour", "*", "60", "*", "60", "+", "time", ".", "minute", "*", "60", "+", "time", ".", "second", "return", "1000000", "*", "seconds", "+", "time", ".", "microsecond" ]
Convert a time into microseconds since midnight. Parameters ---------- time : datetime.time The time to convert. Returns ------- us : int The number of microseconds since midnight. Notes ----- This does not account for leap seconds or daylight savings.
[ "Convert", "a", "time", "into", "microseconds", "since", "midnight", ".", "Parameters", "----------", "time", ":", "datetime", ".", "time", "The", "time", "to", "convert", ".", "Returns", "-------", "us", ":", "int", "The", "number", "of", "microseconds", "since", "midnight", ".", "Notes", "-----", "This", "does", "not", "account", "for", "leap", "seconds", "or", "daylight", "savings", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/pandas_utils.py#L48-L63
train
quantopian/zipline
zipline/utils/pandas_utils.py
mask_between_time
def mask_between_time(dts, start, end, include_start=True, include_end=True): """Return a mask of all of the datetimes in ``dts`` that are between ``start`` and ``end``. Parameters ---------- dts : pd.DatetimeIndex The index to mask. start : time Mask away times less than the start. end : time Mask away times greater than the end. include_start : bool, optional Inclusive on ``start``. include_end : bool, optional Inclusive on ``end``. Returns ------- mask : np.ndarray[bool] A bool array masking ``dts``. See Also -------- :meth:`pandas.DatetimeIndex.indexer_between_time` """ # This function is adapted from # `pandas.Datetime.Index.indexer_between_time` which was originally # written by Wes McKinney, Chang She, and Grant Roch. time_micros = dts._get_time_micros() start_micros = _time_to_micros(start) end_micros = _time_to_micros(end) left_op, right_op, join_op = _opmap[ bool(include_start), bool(include_end), start_micros <= end_micros, ] return join_op( left_op(start_micros, time_micros), right_op(time_micros, end_micros), )
python
def mask_between_time(dts, start, end, include_start=True, include_end=True): """Return a mask of all of the datetimes in ``dts`` that are between ``start`` and ``end``. Parameters ---------- dts : pd.DatetimeIndex The index to mask. start : time Mask away times less than the start. end : time Mask away times greater than the end. include_start : bool, optional Inclusive on ``start``. include_end : bool, optional Inclusive on ``end``. Returns ------- mask : np.ndarray[bool] A bool array masking ``dts``. See Also -------- :meth:`pandas.DatetimeIndex.indexer_between_time` """ # This function is adapted from # `pandas.Datetime.Index.indexer_between_time` which was originally # written by Wes McKinney, Chang She, and Grant Roch. time_micros = dts._get_time_micros() start_micros = _time_to_micros(start) end_micros = _time_to_micros(end) left_op, right_op, join_op = _opmap[ bool(include_start), bool(include_end), start_micros <= end_micros, ] return join_op( left_op(start_micros, time_micros), right_op(time_micros, end_micros), )
[ "def", "mask_between_time", "(", "dts", ",", "start", ",", "end", ",", "include_start", "=", "True", ",", "include_end", "=", "True", ")", ":", "# This function is adapted from", "# `pandas.Datetime.Index.indexer_between_time` which was originally", "# written by Wes McKinney, Chang She, and Grant Roch.", "time_micros", "=", "dts", ".", "_get_time_micros", "(", ")", "start_micros", "=", "_time_to_micros", "(", "start", ")", "end_micros", "=", "_time_to_micros", "(", "end", ")", "left_op", ",", "right_op", ",", "join_op", "=", "_opmap", "[", "bool", "(", "include_start", ")", ",", "bool", "(", "include_end", ")", ",", "start_micros", "<=", "end_micros", ",", "]", "return", "join_op", "(", "left_op", "(", "start_micros", ",", "time_micros", ")", ",", "right_op", "(", "time_micros", ",", "end_micros", ")", ",", ")" ]
Return a mask of all of the datetimes in ``dts`` that are between ``start`` and ``end``. Parameters ---------- dts : pd.DatetimeIndex The index to mask. start : time Mask away times less than the start. end : time Mask away times greater than the end. include_start : bool, optional Inclusive on ``start``. include_end : bool, optional Inclusive on ``end``. Returns ------- mask : np.ndarray[bool] A bool array masking ``dts``. See Also -------- :meth:`pandas.DatetimeIndex.indexer_between_time`
[ "Return", "a", "mask", "of", "all", "of", "the", "datetimes", "in", "dts", "that", "are", "between", "start", "and", "end", ".", "Parameters", "----------", "dts", ":", "pd", ".", "DatetimeIndex", "The", "index", "to", "mask", ".", "start", ":", "time", "Mask", "away", "times", "less", "than", "the", "start", ".", "end", ":", "time", "Mask", "away", "times", "greater", "than", "the", "end", ".", "include_start", ":", "bool", "optional", "Inclusive", "on", "start", ".", "include_end", ":", "bool", "optional", "Inclusive", "on", "end", ".", "Returns", "-------", "mask", ":", "np", ".", "ndarray", "[", "bool", "]", "A", "bool", "array", "masking", "dts", ".", "See", "Also", "--------", ":", "meth", ":", "pandas", ".", "DatetimeIndex", ".", "indexer_between_time" ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/pandas_utils.py#L72-L111
train
quantopian/zipline
zipline/utils/pandas_utils.py
find_in_sorted_index
def find_in_sorted_index(dts, dt): """ Find the index of ``dt`` in ``dts``. This function should be used instead of `dts.get_loc(dt)` if the index is large enough that we don't want to initialize a hash table in ``dts``. In particular, this should always be used on minutely trading calendars. Parameters ---------- dts : pd.DatetimeIndex Index in which to look up ``dt``. **Must be sorted**. dt : pd.Timestamp ``dt`` to be looked up. Returns ------- ix : int Integer index such that dts[ix] == dt. Raises ------ KeyError If dt is not in ``dts``. """ ix = dts.searchsorted(dt) if ix == len(dts) or dts[ix] != dt: raise LookupError("{dt} is not in {dts}".format(dt=dt, dts=dts)) return ix
python
def find_in_sorted_index(dts, dt): """ Find the index of ``dt`` in ``dts``. This function should be used instead of `dts.get_loc(dt)` if the index is large enough that we don't want to initialize a hash table in ``dts``. In particular, this should always be used on minutely trading calendars. Parameters ---------- dts : pd.DatetimeIndex Index in which to look up ``dt``. **Must be sorted**. dt : pd.Timestamp ``dt`` to be looked up. Returns ------- ix : int Integer index such that dts[ix] == dt. Raises ------ KeyError If dt is not in ``dts``. """ ix = dts.searchsorted(dt) if ix == len(dts) or dts[ix] != dt: raise LookupError("{dt} is not in {dts}".format(dt=dt, dts=dts)) return ix
[ "def", "find_in_sorted_index", "(", "dts", ",", "dt", ")", ":", "ix", "=", "dts", ".", "searchsorted", "(", "dt", ")", "if", "ix", "==", "len", "(", "dts", ")", "or", "dts", "[", "ix", "]", "!=", "dt", ":", "raise", "LookupError", "(", "\"{dt} is not in {dts}\"", ".", "format", "(", "dt", "=", "dt", ",", "dts", "=", "dts", ")", ")", "return", "ix" ]
Find the index of ``dt`` in ``dts``. This function should be used instead of `dts.get_loc(dt)` if the index is large enough that we don't want to initialize a hash table in ``dts``. In particular, this should always be used on minutely trading calendars. Parameters ---------- dts : pd.DatetimeIndex Index in which to look up ``dt``. **Must be sorted**. dt : pd.Timestamp ``dt`` to be looked up. Returns ------- ix : int Integer index such that dts[ix] == dt. Raises ------ KeyError If dt is not in ``dts``.
[ "Find", "the", "index", "of", "dt", "in", "dts", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/pandas_utils.py#L114-L142
train
quantopian/zipline
zipline/utils/pandas_utils.py
nearest_unequal_elements
def nearest_unequal_elements(dts, dt): """ Find values in ``dts`` closest but not equal to ``dt``. Returns a pair of (last_before, first_after). When ``dt`` is less than any element in ``dts``, ``last_before`` is None. When ``dt`` is greater any element in ``dts``, ``first_after`` is None. ``dts`` must be unique and sorted in increasing order. Parameters ---------- dts : pd.DatetimeIndex Dates in which to search. dt : pd.Timestamp Date for which to find bounds. """ if not dts.is_unique: raise ValueError("dts must be unique") if not dts.is_monotonic_increasing: raise ValueError("dts must be sorted in increasing order") if not len(dts): return None, None sortpos = dts.searchsorted(dt, side='left') try: sortval = dts[sortpos] except IndexError: # dt is greater than any value in the array. return dts[-1], None if dt < sortval: lower_ix = sortpos - 1 upper_ix = sortpos elif dt == sortval: lower_ix = sortpos - 1 upper_ix = sortpos + 1 else: lower_ix = sortpos upper_ix = sortpos + 1 lower_value = dts[lower_ix] if lower_ix >= 0 else None upper_value = dts[upper_ix] if upper_ix < len(dts) else None return lower_value, upper_value
python
def nearest_unequal_elements(dts, dt): """ Find values in ``dts`` closest but not equal to ``dt``. Returns a pair of (last_before, first_after). When ``dt`` is less than any element in ``dts``, ``last_before`` is None. When ``dt`` is greater any element in ``dts``, ``first_after`` is None. ``dts`` must be unique and sorted in increasing order. Parameters ---------- dts : pd.DatetimeIndex Dates in which to search. dt : pd.Timestamp Date for which to find bounds. """ if not dts.is_unique: raise ValueError("dts must be unique") if not dts.is_monotonic_increasing: raise ValueError("dts must be sorted in increasing order") if not len(dts): return None, None sortpos = dts.searchsorted(dt, side='left') try: sortval = dts[sortpos] except IndexError: # dt is greater than any value in the array. return dts[-1], None if dt < sortval: lower_ix = sortpos - 1 upper_ix = sortpos elif dt == sortval: lower_ix = sortpos - 1 upper_ix = sortpos + 1 else: lower_ix = sortpos upper_ix = sortpos + 1 lower_value = dts[lower_ix] if lower_ix >= 0 else None upper_value = dts[upper_ix] if upper_ix < len(dts) else None return lower_value, upper_value
[ "def", "nearest_unequal_elements", "(", "dts", ",", "dt", ")", ":", "if", "not", "dts", ".", "is_unique", ":", "raise", "ValueError", "(", "\"dts must be unique\"", ")", "if", "not", "dts", ".", "is_monotonic_increasing", ":", "raise", "ValueError", "(", "\"dts must be sorted in increasing order\"", ")", "if", "not", "len", "(", "dts", ")", ":", "return", "None", ",", "None", "sortpos", "=", "dts", ".", "searchsorted", "(", "dt", ",", "side", "=", "'left'", ")", "try", ":", "sortval", "=", "dts", "[", "sortpos", "]", "except", "IndexError", ":", "# dt is greater than any value in the array.", "return", "dts", "[", "-", "1", "]", ",", "None", "if", "dt", "<", "sortval", ":", "lower_ix", "=", "sortpos", "-", "1", "upper_ix", "=", "sortpos", "elif", "dt", "==", "sortval", ":", "lower_ix", "=", "sortpos", "-", "1", "upper_ix", "=", "sortpos", "+", "1", "else", ":", "lower_ix", "=", "sortpos", "upper_ix", "=", "sortpos", "+", "1", "lower_value", "=", "dts", "[", "lower_ix", "]", "if", "lower_ix", ">=", "0", "else", "None", "upper_value", "=", "dts", "[", "upper_ix", "]", "if", "upper_ix", "<", "len", "(", "dts", ")", "else", "None", "return", "lower_value", ",", "upper_value" ]
Find values in ``dts`` closest but not equal to ``dt``. Returns a pair of (last_before, first_after). When ``dt`` is less than any element in ``dts``, ``last_before`` is None. When ``dt`` is greater any element in ``dts``, ``first_after`` is None. ``dts`` must be unique and sorted in increasing order. Parameters ---------- dts : pd.DatetimeIndex Dates in which to search. dt : pd.Timestamp Date for which to find bounds.
[ "Find", "values", "in", "dts", "closest", "but", "not", "equal", "to", "dt", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/pandas_utils.py#L145-L192
train
quantopian/zipline
zipline/utils/pandas_utils.py
categorical_df_concat
def categorical_df_concat(df_list, inplace=False): """ Prepare list of pandas DataFrames to be used as input to pd.concat. Ensure any columns of type 'category' have the same categories across each dataframe. Parameters ---------- df_list : list List of dataframes with same columns. inplace : bool True if input list can be modified. Default is False. Returns ------- concatenated : df Dataframe of concatenated list. """ if not inplace: df_list = deepcopy(df_list) # Assert each dataframe has the same columns/dtypes df = df_list[0] if not all([(df.dtypes.equals(df_i.dtypes)) for df_i in df_list[1:]]): raise ValueError("Input DataFrames must have the same columns/dtypes.") categorical_columns = df.columns[df.dtypes == 'category'] for col in categorical_columns: new_categories = sorted( set().union( *(frame[col].cat.categories for frame in df_list) ) ) with ignore_pandas_nan_categorical_warning(): for df in df_list: df[col].cat.set_categories(new_categories, inplace=True) return pd.concat(df_list)
python
def categorical_df_concat(df_list, inplace=False): """ Prepare list of pandas DataFrames to be used as input to pd.concat. Ensure any columns of type 'category' have the same categories across each dataframe. Parameters ---------- df_list : list List of dataframes with same columns. inplace : bool True if input list can be modified. Default is False. Returns ------- concatenated : df Dataframe of concatenated list. """ if not inplace: df_list = deepcopy(df_list) # Assert each dataframe has the same columns/dtypes df = df_list[0] if not all([(df.dtypes.equals(df_i.dtypes)) for df_i in df_list[1:]]): raise ValueError("Input DataFrames must have the same columns/dtypes.") categorical_columns = df.columns[df.dtypes == 'category'] for col in categorical_columns: new_categories = sorted( set().union( *(frame[col].cat.categories for frame in df_list) ) ) with ignore_pandas_nan_categorical_warning(): for df in df_list: df[col].cat.set_categories(new_categories, inplace=True) return pd.concat(df_list)
[ "def", "categorical_df_concat", "(", "df_list", ",", "inplace", "=", "False", ")", ":", "if", "not", "inplace", ":", "df_list", "=", "deepcopy", "(", "df_list", ")", "# Assert each dataframe has the same columns/dtypes", "df", "=", "df_list", "[", "0", "]", "if", "not", "all", "(", "[", "(", "df", ".", "dtypes", ".", "equals", "(", "df_i", ".", "dtypes", ")", ")", "for", "df_i", "in", "df_list", "[", "1", ":", "]", "]", ")", ":", "raise", "ValueError", "(", "\"Input DataFrames must have the same columns/dtypes.\"", ")", "categorical_columns", "=", "df", ".", "columns", "[", "df", ".", "dtypes", "==", "'category'", "]", "for", "col", "in", "categorical_columns", ":", "new_categories", "=", "sorted", "(", "set", "(", ")", ".", "union", "(", "*", "(", "frame", "[", "col", "]", ".", "cat", ".", "categories", "for", "frame", "in", "df_list", ")", ")", ")", "with", "ignore_pandas_nan_categorical_warning", "(", ")", ":", "for", "df", "in", "df_list", ":", "df", "[", "col", "]", ".", "cat", ".", "set_categories", "(", "new_categories", ",", "inplace", "=", "True", ")", "return", "pd", ".", "concat", "(", "df_list", ")" ]
Prepare list of pandas DataFrames to be used as input to pd.concat. Ensure any columns of type 'category' have the same categories across each dataframe. Parameters ---------- df_list : list List of dataframes with same columns. inplace : bool True if input list can be modified. Default is False. Returns ------- concatenated : df Dataframe of concatenated list.
[ "Prepare", "list", "of", "pandas", "DataFrames", "to", "be", "used", "as", "input", "to", "pd", ".", "concat", ".", "Ensure", "any", "columns", "of", "type", "category", "have", "the", "same", "categories", "across", "each", "dataframe", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/pandas_utils.py#L247-L287
train
quantopian/zipline
zipline/utils/pandas_utils.py
check_indexes_all_same
def check_indexes_all_same(indexes, message="Indexes are not equal."): """Check that a list of Index objects are all equal. Parameters ---------- indexes : iterable[pd.Index] Iterable of indexes to check. Raises ------ ValueError If the indexes are not all the same. """ iterator = iter(indexes) first = next(iterator) for other in iterator: same = (first == other) if not same.all(): bad_loc = np.flatnonzero(~same)[0] raise ValueError( "{}\nFirst difference is at index {}: " "{} != {}".format( message, bad_loc, first[bad_loc], other[bad_loc] ), )
python
def check_indexes_all_same(indexes, message="Indexes are not equal."): """Check that a list of Index objects are all equal. Parameters ---------- indexes : iterable[pd.Index] Iterable of indexes to check. Raises ------ ValueError If the indexes are not all the same. """ iterator = iter(indexes) first = next(iterator) for other in iterator: same = (first == other) if not same.all(): bad_loc = np.flatnonzero(~same)[0] raise ValueError( "{}\nFirst difference is at index {}: " "{} != {}".format( message, bad_loc, first[bad_loc], other[bad_loc] ), )
[ "def", "check_indexes_all_same", "(", "indexes", ",", "message", "=", "\"Indexes are not equal.\"", ")", ":", "iterator", "=", "iter", "(", "indexes", ")", "first", "=", "next", "(", "iterator", ")", "for", "other", "in", "iterator", ":", "same", "=", "(", "first", "==", "other", ")", "if", "not", "same", ".", "all", "(", ")", ":", "bad_loc", "=", "np", ".", "flatnonzero", "(", "~", "same", ")", "[", "0", "]", "raise", "ValueError", "(", "\"{}\\nFirst difference is at index {}: \"", "\"{} != {}\"", ".", "format", "(", "message", ",", "bad_loc", ",", "first", "[", "bad_loc", "]", ",", "other", "[", "bad_loc", "]", ")", ",", ")" ]
Check that a list of Index objects are all equal. Parameters ---------- indexes : iterable[pd.Index] Iterable of indexes to check. Raises ------ ValueError If the indexes are not all the same.
[ "Check", "that", "a", "list", "of", "Index", "objects", "are", "all", "equal", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/pandas_utils.py#L325-L349
train
quantopian/zipline
zipline/pipeline/loaders/events.py
required_event_fields
def required_event_fields(next_value_columns, previous_value_columns): """ Compute the set of resource columns required to serve ``next_value_columns`` and ``previous_value_columns``. """ # These metadata columns are used to align event indexers. return { TS_FIELD_NAME, SID_FIELD_NAME, EVENT_DATE_FIELD_NAME, }.union( # We also expect any of the field names that our loadable columns # are mapped to. viewvalues(next_value_columns), viewvalues(previous_value_columns), )
python
def required_event_fields(next_value_columns, previous_value_columns): """ Compute the set of resource columns required to serve ``next_value_columns`` and ``previous_value_columns``. """ # These metadata columns are used to align event indexers. return { TS_FIELD_NAME, SID_FIELD_NAME, EVENT_DATE_FIELD_NAME, }.union( # We also expect any of the field names that our loadable columns # are mapped to. viewvalues(next_value_columns), viewvalues(previous_value_columns), )
[ "def", "required_event_fields", "(", "next_value_columns", ",", "previous_value_columns", ")", ":", "# These metadata columns are used to align event indexers.", "return", "{", "TS_FIELD_NAME", ",", "SID_FIELD_NAME", ",", "EVENT_DATE_FIELD_NAME", ",", "}", ".", "union", "(", "# We also expect any of the field names that our loadable columns", "# are mapped to.", "viewvalues", "(", "next_value_columns", ")", ",", "viewvalues", "(", "previous_value_columns", ")", ",", ")" ]
Compute the set of resource columns required to serve ``next_value_columns`` and ``previous_value_columns``.
[ "Compute", "the", "set", "of", "resource", "columns", "required", "to", "serve", "next_value_columns", "and", "previous_value_columns", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/loaders/events.py#L21-L36
train
quantopian/zipline
zipline/pipeline/loaders/events.py
validate_column_specs
def validate_column_specs(events, next_value_columns, previous_value_columns): """ Verify that the columns of ``events`` can be used by an EventsLoader to serve the BoundColumns described by ``next_value_columns`` and ``previous_value_columns``. """ required = required_event_fields(next_value_columns, previous_value_columns) received = set(events.columns) missing = required - received if missing: raise ValueError( "EventsLoader missing required columns {missing}.\n" "Got Columns: {received}\n" "Expected Columns: {required}".format( missing=sorted(missing), received=sorted(received), required=sorted(required), ) )
python
def validate_column_specs(events, next_value_columns, previous_value_columns): """ Verify that the columns of ``events`` can be used by an EventsLoader to serve the BoundColumns described by ``next_value_columns`` and ``previous_value_columns``. """ required = required_event_fields(next_value_columns, previous_value_columns) received = set(events.columns) missing = required - received if missing: raise ValueError( "EventsLoader missing required columns {missing}.\n" "Got Columns: {received}\n" "Expected Columns: {required}".format( missing=sorted(missing), received=sorted(received), required=sorted(required), ) )
[ "def", "validate_column_specs", "(", "events", ",", "next_value_columns", ",", "previous_value_columns", ")", ":", "required", "=", "required_event_fields", "(", "next_value_columns", ",", "previous_value_columns", ")", "received", "=", "set", "(", "events", ".", "columns", ")", "missing", "=", "required", "-", "received", "if", "missing", ":", "raise", "ValueError", "(", "\"EventsLoader missing required columns {missing}.\\n\"", "\"Got Columns: {received}\\n\"", "\"Expected Columns: {required}\"", ".", "format", "(", "missing", "=", "sorted", "(", "missing", ")", ",", "received", "=", "sorted", "(", "received", ")", ",", "required", "=", "sorted", "(", "required", ")", ",", ")", ")" ]
Verify that the columns of ``events`` can be used by an EventsLoader to serve the BoundColumns described by ``next_value_columns`` and ``previous_value_columns``.
[ "Verify", "that", "the", "columns", "of", "events", "can", "be", "used", "by", "an", "EventsLoader", "to", "serve", "the", "BoundColumns", "described", "by", "next_value_columns", "and", "previous_value_columns", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/loaders/events.py#L39-L58
train
quantopian/zipline
zipline/pipeline/loaders/events.py
EventsLoader.split_next_and_previous_event_columns
def split_next_and_previous_event_columns(self, requested_columns): """ Split requested columns into columns that should load the next known value and columns that should load the previous known value. Parameters ---------- requested_columns : iterable[BoundColumn] Returns ------- next_cols, previous_cols : iterable[BoundColumn], iterable[BoundColumn] ``requested_columns``, partitioned into sub-sequences based on whether the column should produce values from the next event or the previous event """ def next_or_previous(c): if c in self.next_value_columns: return 'next' elif c in self.previous_value_columns: return 'previous' raise ValueError( "{c} not found in next_value_columns " "or previous_value_columns".format(c=c) ) groups = groupby(next_or_previous, requested_columns) return groups.get('next', ()), groups.get('previous', ())
python
def split_next_and_previous_event_columns(self, requested_columns): """ Split requested columns into columns that should load the next known value and columns that should load the previous known value. Parameters ---------- requested_columns : iterable[BoundColumn] Returns ------- next_cols, previous_cols : iterable[BoundColumn], iterable[BoundColumn] ``requested_columns``, partitioned into sub-sequences based on whether the column should produce values from the next event or the previous event """ def next_or_previous(c): if c in self.next_value_columns: return 'next' elif c in self.previous_value_columns: return 'previous' raise ValueError( "{c} not found in next_value_columns " "or previous_value_columns".format(c=c) ) groups = groupby(next_or_previous, requested_columns) return groups.get('next', ()), groups.get('previous', ())
[ "def", "split_next_and_previous_event_columns", "(", "self", ",", "requested_columns", ")", ":", "def", "next_or_previous", "(", "c", ")", ":", "if", "c", "in", "self", ".", "next_value_columns", ":", "return", "'next'", "elif", "c", "in", "self", ".", "previous_value_columns", ":", "return", "'previous'", "raise", "ValueError", "(", "\"{c} not found in next_value_columns \"", "\"or previous_value_columns\"", ".", "format", "(", "c", "=", "c", ")", ")", "groups", "=", "groupby", "(", "next_or_previous", ",", "requested_columns", ")", "return", "groups", ".", "get", "(", "'next'", ",", "(", ")", ")", ",", "groups", ".", "get", "(", "'previous'", ",", "(", ")", ")" ]
Split requested columns into columns that should load the next known value and columns that should load the previous known value. Parameters ---------- requested_columns : iterable[BoundColumn] Returns ------- next_cols, previous_cols : iterable[BoundColumn], iterable[BoundColumn] ``requested_columns``, partitioned into sub-sequences based on whether the column should produce values from the next event or the previous event
[ "Split", "requested", "columns", "into", "columns", "that", "should", "load", "the", "next", "known", "value", "and", "columns", "that", "should", "load", "the", "previous", "known", "value", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/loaders/events.py#L119-L146
train
quantopian/zipline
zipline/lib/labelarray.py
compare_arrays
def compare_arrays(left, right): "Eq check with a short-circuit for identical objects." return ( left is right or ((left.shape == right.shape) and (left == right).all()) )
python
def compare_arrays(left, right): "Eq check with a short-circuit for identical objects." return ( left is right or ((left.shape == right.shape) and (left == right).all()) )
[ "def", "compare_arrays", "(", "left", ",", "right", ")", ":", "return", "(", "left", "is", "right", "or", "(", "(", "left", ".", "shape", "==", "right", ".", "shape", ")", "and", "(", "left", "==", "right", ")", ".", "all", "(", ")", ")", ")" ]
Eq check with a short-circuit for identical objects.
[ "Eq", "check", "with", "a", "short", "-", "circuit", "for", "identical", "objects", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/lib/labelarray.py#L38-L43
train
quantopian/zipline
zipline/lib/labelarray.py
LabelArray.from_codes_and_metadata
def from_codes_and_metadata(cls, codes, categories, reverse_categories, missing_value): """ Rehydrate a LabelArray from the codes and metadata. Parameters ---------- codes : np.ndarray[integral] The codes for the label array. categories : np.ndarray[object] The unique string categories. reverse_categories : dict[str, int] The mapping from category to its code-index. missing_value : any The value used to represent missing data. """ ret = codes.view(type=cls, dtype=np.void) ret._categories = categories ret._reverse_categories = reverse_categories ret._missing_value = missing_value return ret
python
def from_codes_and_metadata(cls, codes, categories, reverse_categories, missing_value): """ Rehydrate a LabelArray from the codes and metadata. Parameters ---------- codes : np.ndarray[integral] The codes for the label array. categories : np.ndarray[object] The unique string categories. reverse_categories : dict[str, int] The mapping from category to its code-index. missing_value : any The value used to represent missing data. """ ret = codes.view(type=cls, dtype=np.void) ret._categories = categories ret._reverse_categories = reverse_categories ret._missing_value = missing_value return ret
[ "def", "from_codes_and_metadata", "(", "cls", ",", "codes", ",", "categories", ",", "reverse_categories", ",", "missing_value", ")", ":", "ret", "=", "codes", ".", "view", "(", "type", "=", "cls", ",", "dtype", "=", "np", ".", "void", ")", "ret", ".", "_categories", "=", "categories", "ret", ".", "_reverse_categories", "=", "reverse_categories", "ret", ".", "_missing_value", "=", "missing_value", "return", "ret" ]
Rehydrate a LabelArray from the codes and metadata. Parameters ---------- codes : np.ndarray[integral] The codes for the label array. categories : np.ndarray[object] The unique string categories. reverse_categories : dict[str, int] The mapping from category to its code-index. missing_value : any The value used to represent missing data.
[ "Rehydrate", "a", "LabelArray", "from", "the", "codes", "and", "metadata", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/lib/labelarray.py#L194-L217
train
quantopian/zipline
zipline/lib/labelarray.py
LabelArray.as_int_array
def as_int_array(self): """ Convert self into a regular ndarray of ints. This is an O(1) operation. It does not copy the underlying data. """ return self.view( type=ndarray, dtype=unsigned_int_dtype_with_size_in_bytes(self.itemsize), )
python
def as_int_array(self): """ Convert self into a regular ndarray of ints. This is an O(1) operation. It does not copy the underlying data. """ return self.view( type=ndarray, dtype=unsigned_int_dtype_with_size_in_bytes(self.itemsize), )
[ "def", "as_int_array", "(", "self", ")", ":", "return", "self", ".", "view", "(", "type", "=", "ndarray", ",", "dtype", "=", "unsigned_int_dtype_with_size_in_bytes", "(", "self", ".", "itemsize", ")", ",", ")" ]
Convert self into a regular ndarray of ints. This is an O(1) operation. It does not copy the underlying data.
[ "Convert", "self", "into", "a", "regular", "ndarray", "of", "ints", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/lib/labelarray.py#L303-L312
train
quantopian/zipline
zipline/lib/labelarray.py
LabelArray.as_categorical
def as_categorical(self): """ Coerce self into a pandas categorical. This is only defined on 1D arrays, since that's all pandas supports. """ if len(self.shape) > 1: raise ValueError("Can't convert a 2D array to a categorical.") with ignore_pandas_nan_categorical_warning(): return pd.Categorical.from_codes( self.as_int_array(), # We need to make a copy because pandas >= 0.17 fails if this # buffer isn't writeable. self.categories.copy(), ordered=False, )
python
def as_categorical(self): """ Coerce self into a pandas categorical. This is only defined on 1D arrays, since that's all pandas supports. """ if len(self.shape) > 1: raise ValueError("Can't convert a 2D array to a categorical.") with ignore_pandas_nan_categorical_warning(): return pd.Categorical.from_codes( self.as_int_array(), # We need to make a copy because pandas >= 0.17 fails if this # buffer isn't writeable. self.categories.copy(), ordered=False, )
[ "def", "as_categorical", "(", "self", ")", ":", "if", "len", "(", "self", ".", "shape", ")", ">", "1", ":", "raise", "ValueError", "(", "\"Can't convert a 2D array to a categorical.\"", ")", "with", "ignore_pandas_nan_categorical_warning", "(", ")", ":", "return", "pd", ".", "Categorical", ".", "from_codes", "(", "self", ".", "as_int_array", "(", ")", ",", "# We need to make a copy because pandas >= 0.17 fails if this", "# buffer isn't writeable.", "self", ".", "categories", ".", "copy", "(", ")", ",", "ordered", "=", "False", ",", ")" ]
Coerce self into a pandas categorical. This is only defined on 1D arrays, since that's all pandas supports.
[ "Coerce", "self", "into", "a", "pandas", "categorical", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/lib/labelarray.py#L322-L338
train
quantopian/zipline
zipline/lib/labelarray.py
LabelArray.as_categorical_frame
def as_categorical_frame(self, index, columns, name=None): """ Coerce self into a pandas DataFrame of Categoricals. """ if len(self.shape) != 2: raise ValueError( "Can't convert a non-2D LabelArray into a DataFrame." ) expected_shape = (len(index), len(columns)) if expected_shape != self.shape: raise ValueError( "Can't construct a DataFrame with provided indices:\n\n" "LabelArray shape is {actual}, but index and columns imply " "that shape should be {expected}.".format( actual=self.shape, expected=expected_shape, ) ) return pd.Series( index=pd.MultiIndex.from_product([index, columns]), data=self.ravel().as_categorical(), name=name, ).unstack()
python
def as_categorical_frame(self, index, columns, name=None): """ Coerce self into a pandas DataFrame of Categoricals. """ if len(self.shape) != 2: raise ValueError( "Can't convert a non-2D LabelArray into a DataFrame." ) expected_shape = (len(index), len(columns)) if expected_shape != self.shape: raise ValueError( "Can't construct a DataFrame with provided indices:\n\n" "LabelArray shape is {actual}, but index and columns imply " "that shape should be {expected}.".format( actual=self.shape, expected=expected_shape, ) ) return pd.Series( index=pd.MultiIndex.from_product([index, columns]), data=self.ravel().as_categorical(), name=name, ).unstack()
[ "def", "as_categorical_frame", "(", "self", ",", "index", ",", "columns", ",", "name", "=", "None", ")", ":", "if", "len", "(", "self", ".", "shape", ")", "!=", "2", ":", "raise", "ValueError", "(", "\"Can't convert a non-2D LabelArray into a DataFrame.\"", ")", "expected_shape", "=", "(", "len", "(", "index", ")", ",", "len", "(", "columns", ")", ")", "if", "expected_shape", "!=", "self", ".", "shape", ":", "raise", "ValueError", "(", "\"Can't construct a DataFrame with provided indices:\\n\\n\"", "\"LabelArray shape is {actual}, but index and columns imply \"", "\"that shape should be {expected}.\"", ".", "format", "(", "actual", "=", "self", ".", "shape", ",", "expected", "=", "expected_shape", ",", ")", ")", "return", "pd", ".", "Series", "(", "index", "=", "pd", ".", "MultiIndex", ".", "from_product", "(", "[", "index", ",", "columns", "]", ")", ",", "data", "=", "self", ".", "ravel", "(", ")", ".", "as_categorical", "(", ")", ",", "name", "=", "name", ",", ")", ".", "unstack", "(", ")" ]
Coerce self into a pandas DataFrame of Categoricals.
[ "Coerce", "self", "into", "a", "pandas", "DataFrame", "of", "Categoricals", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/lib/labelarray.py#L340-L364
train
quantopian/zipline
zipline/lib/labelarray.py
LabelArray.set_scalar
def set_scalar(self, indexer, value): """ Set scalar value into the array. Parameters ---------- indexer : any The indexer to set the value at. value : str The value to assign at the given locations. Raises ------ ValueError Raised when ``value`` is not a value element of this this label array. """ try: value_code = self.reverse_categories[value] except KeyError: raise ValueError("%r is not in LabelArray categories." % value) self.as_int_array()[indexer] = value_code
python
def set_scalar(self, indexer, value): """ Set scalar value into the array. Parameters ---------- indexer : any The indexer to set the value at. value : str The value to assign at the given locations. Raises ------ ValueError Raised when ``value`` is not a value element of this this label array. """ try: value_code = self.reverse_categories[value] except KeyError: raise ValueError("%r is not in LabelArray categories." % value) self.as_int_array()[indexer] = value_code
[ "def", "set_scalar", "(", "self", ",", "indexer", ",", "value", ")", ":", "try", ":", "value_code", "=", "self", ".", "reverse_categories", "[", "value", "]", "except", "KeyError", ":", "raise", "ValueError", "(", "\"%r is not in LabelArray categories.\"", "%", "value", ")", "self", ".", "as_int_array", "(", ")", "[", "indexer", "]", "=", "value_code" ]
Set scalar value into the array. Parameters ---------- indexer : any The indexer to set the value at. value : str The value to assign at the given locations. Raises ------ ValueError Raised when ``value`` is not a value element of this this label array.
[ "Set", "scalar", "value", "into", "the", "array", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/lib/labelarray.py#L400-L422
train
quantopian/zipline
zipline/lib/labelarray.py
LabelArray._equality_check
def _equality_check(op): """ Shared code for __eq__ and __ne__, parameterized on the actual comparison operator to use. """ def method(self, other): if isinstance(other, LabelArray): self_mv = self.missing_value other_mv = other.missing_value if self_mv != other_mv: raise MissingValueMismatch(self_mv, other_mv) self_categories = self.categories other_categories = other.categories if not compare_arrays(self_categories, other_categories): raise CategoryMismatch(self_categories, other_categories) return ( op(self.as_int_array(), other.as_int_array()) & self.not_missing() & other.not_missing() ) elif isinstance(other, ndarray): # Compare to ndarrays as though we were an array of strings. # This is fairly expensive, and should generally be avoided. return op(self.as_string_array(), other) & self.not_missing() elif isinstance(other, self.SUPPORTED_SCALAR_TYPES): i = self._reverse_categories.get(other, -1) return op(self.as_int_array(), i) & self.not_missing() return op(super(LabelArray, self), other) return method
python
def _equality_check(op): """ Shared code for __eq__ and __ne__, parameterized on the actual comparison operator to use. """ def method(self, other): if isinstance(other, LabelArray): self_mv = self.missing_value other_mv = other.missing_value if self_mv != other_mv: raise MissingValueMismatch(self_mv, other_mv) self_categories = self.categories other_categories = other.categories if not compare_arrays(self_categories, other_categories): raise CategoryMismatch(self_categories, other_categories) return ( op(self.as_int_array(), other.as_int_array()) & self.not_missing() & other.not_missing() ) elif isinstance(other, ndarray): # Compare to ndarrays as though we were an array of strings. # This is fairly expensive, and should generally be avoided. return op(self.as_string_array(), other) & self.not_missing() elif isinstance(other, self.SUPPORTED_SCALAR_TYPES): i = self._reverse_categories.get(other, -1) return op(self.as_int_array(), i) & self.not_missing() return op(super(LabelArray, self), other) return method
[ "def", "_equality_check", "(", "op", ")", ":", "def", "method", "(", "self", ",", "other", ")", ":", "if", "isinstance", "(", "other", ",", "LabelArray", ")", ":", "self_mv", "=", "self", ".", "missing_value", "other_mv", "=", "other", ".", "missing_value", "if", "self_mv", "!=", "other_mv", ":", "raise", "MissingValueMismatch", "(", "self_mv", ",", "other_mv", ")", "self_categories", "=", "self", ".", "categories", "other_categories", "=", "other", ".", "categories", "if", "not", "compare_arrays", "(", "self_categories", ",", "other_categories", ")", ":", "raise", "CategoryMismatch", "(", "self_categories", ",", "other_categories", ")", "return", "(", "op", "(", "self", ".", "as_int_array", "(", ")", ",", "other", ".", "as_int_array", "(", ")", ")", "&", "self", ".", "not_missing", "(", ")", "&", "other", ".", "not_missing", "(", ")", ")", "elif", "isinstance", "(", "other", ",", "ndarray", ")", ":", "# Compare to ndarrays as though we were an array of strings.", "# This is fairly expensive, and should generally be avoided.", "return", "op", "(", "self", ".", "as_string_array", "(", ")", ",", "other", ")", "&", "self", ".", "not_missing", "(", ")", "elif", "isinstance", "(", "other", ",", "self", ".", "SUPPORTED_SCALAR_TYPES", ")", ":", "i", "=", "self", ".", "_reverse_categories", ".", "get", "(", "other", ",", "-", "1", ")", "return", "op", "(", "self", ".", "as_int_array", "(", ")", ",", "i", ")", "&", "self", ".", "not_missing", "(", ")", "return", "op", "(", "super", "(", "LabelArray", ",", "self", ")", ",", "other", ")", "return", "method" ]
Shared code for __eq__ and __ne__, parameterized on the actual comparison operator to use.
[ "Shared", "code", "for", "__eq__", "and", "__ne__", "parameterized", "on", "the", "actual", "comparison", "operator", "to", "use", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/lib/labelarray.py#L462-L496
train
quantopian/zipline
zipline/lib/labelarray.py
LabelArray.empty_like
def empty_like(self, shape): """ Make an empty LabelArray with the same categories as ``self``, filled with ``self.missing_value``. """ return type(self).from_codes_and_metadata( codes=np.full( shape, self.reverse_categories[self.missing_value], dtype=unsigned_int_dtype_with_size_in_bytes(self.itemsize), ), categories=self.categories, reverse_categories=self.reverse_categories, missing_value=self.missing_value, )
python
def empty_like(self, shape): """ Make an empty LabelArray with the same categories as ``self``, filled with ``self.missing_value``. """ return type(self).from_codes_and_metadata( codes=np.full( shape, self.reverse_categories[self.missing_value], dtype=unsigned_int_dtype_with_size_in_bytes(self.itemsize), ), categories=self.categories, reverse_categories=self.reverse_categories, missing_value=self.missing_value, )
[ "def", "empty_like", "(", "self", ",", "shape", ")", ":", "return", "type", "(", "self", ")", ".", "from_codes_and_metadata", "(", "codes", "=", "np", ".", "full", "(", "shape", ",", "self", ".", "reverse_categories", "[", "self", ".", "missing_value", "]", ",", "dtype", "=", "unsigned_int_dtype_with_size_in_bytes", "(", "self", ".", "itemsize", ")", ",", ")", ",", "categories", "=", "self", ".", "categories", ",", "reverse_categories", "=", "self", ".", "reverse_categories", ",", "missing_value", "=", "self", ".", "missing_value", ",", ")" ]
Make an empty LabelArray with the same categories as ``self``, filled with ``self.missing_value``.
[ "Make", "an", "empty", "LabelArray", "with", "the", "same", "categories", "as", "self", "filled", "with", "self", ".", "missing_value", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/lib/labelarray.py#L605-L619
train
quantopian/zipline
zipline/lib/labelarray.py
LabelArray.map_predicate
def map_predicate(self, f): """ Map a function from str -> bool element-wise over ``self``. ``f`` will be applied exactly once to each non-missing unique value in ``self``. Missing values will always return False. """ # Functions passed to this are of type str -> bool. Don't ever call # them on None, which is the only non-str value we ever store in # categories. if self.missing_value is None: def f_to_use(x): return False if x is None else f(x) else: f_to_use = f # Call f on each unique value in our categories. results = np.vectorize(f_to_use, otypes=[bool_dtype])(self.categories) # missing_value should produce False no matter what results[self.reverse_categories[self.missing_value]] = False # unpack the results form each unique value into their corresponding # locations in our indices. return results[self.as_int_array()]
python
def map_predicate(self, f): """ Map a function from str -> bool element-wise over ``self``. ``f`` will be applied exactly once to each non-missing unique value in ``self``. Missing values will always return False. """ # Functions passed to this are of type str -> bool. Don't ever call # them on None, which is the only non-str value we ever store in # categories. if self.missing_value is None: def f_to_use(x): return False if x is None else f(x) else: f_to_use = f # Call f on each unique value in our categories. results = np.vectorize(f_to_use, otypes=[bool_dtype])(self.categories) # missing_value should produce False no matter what results[self.reverse_categories[self.missing_value]] = False # unpack the results form each unique value into their corresponding # locations in our indices. return results[self.as_int_array()]
[ "def", "map_predicate", "(", "self", ",", "f", ")", ":", "# Functions passed to this are of type str -> bool. Don't ever call", "# them on None, which is the only non-str value we ever store in", "# categories.", "if", "self", ".", "missing_value", "is", "None", ":", "def", "f_to_use", "(", "x", ")", ":", "return", "False", "if", "x", "is", "None", "else", "f", "(", "x", ")", "else", ":", "f_to_use", "=", "f", "# Call f on each unique value in our categories.", "results", "=", "np", ".", "vectorize", "(", "f_to_use", ",", "otypes", "=", "[", "bool_dtype", "]", ")", "(", "self", ".", "categories", ")", "# missing_value should produce False no matter what", "results", "[", "self", ".", "reverse_categories", "[", "self", ".", "missing_value", "]", "]", "=", "False", "# unpack the results form each unique value into their corresponding", "# locations in our indices.", "return", "results", "[", "self", ".", "as_int_array", "(", ")", "]" ]
Map a function from str -> bool element-wise over ``self``. ``f`` will be applied exactly once to each non-missing unique value in ``self``. Missing values will always return False.
[ "Map", "a", "function", "from", "str", "-", ">", "bool", "element", "-", "wise", "over", "self", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/lib/labelarray.py#L621-L645
train
quantopian/zipline
zipline/lib/labelarray.py
LabelArray.map
def map(self, f): """ Map a function from str -> str element-wise over ``self``. ``f`` will be applied exactly once to each non-missing unique value in ``self``. Missing values will always map to ``self.missing_value``. """ # f() should only return None if None is our missing value. if self.missing_value is None: allowed_outtypes = self.SUPPORTED_SCALAR_TYPES else: allowed_outtypes = self.SUPPORTED_NON_NONE_SCALAR_TYPES def f_to_use(x, missing_value=self.missing_value, otypes=allowed_outtypes): # Don't call f on the missing value; those locations don't exist # semantically. We return _sortable_sentinel rather than None # because the np.unique call below sorts the categories array, # which raises an error on Python 3 because None and str aren't # comparable. if x == missing_value: return _sortable_sentinel ret = f(x) if not isinstance(ret, otypes): raise TypeError( "LabelArray.map expected function {f} to return a string" " or None, but got {type} instead.\n" "Value was {value}.".format( f=f.__name__, type=type(ret).__name__, value=ret, ) ) if ret == missing_value: return _sortable_sentinel return ret new_categories_with_duplicates = ( np.vectorize(f_to_use, otypes=[object])(self.categories) ) # If f() maps multiple inputs to the same output, then we can end up # with the same code duplicated multiple times. Compress the categories # by running them through np.unique, and then use the reverse lookup # table to compress codes as well. new_categories, bloated_inverse_index = np.unique( new_categories_with_duplicates, return_inverse=True ) if new_categories[0] is _sortable_sentinel: # f_to_use return _sortable_sentinel for locations that should be # missing values in our output. Since np.unique returns the uniques # in sorted order, and since _sortable_sentinel sorts before any # string, we only need to check the first array entry. new_categories[0] = self.missing_value # `reverse_index` will always be a 64 bit integer even if we can hold a # smaller array. reverse_index = bloated_inverse_index.astype( smallest_uint_that_can_hold(len(new_categories)) ) new_codes = np.take(reverse_index, self.as_int_array()) return self.from_codes_and_metadata( new_codes, new_categories, dict(zip(new_categories, range(len(new_categories)))), missing_value=self.missing_value, )
python
def map(self, f): """ Map a function from str -> str element-wise over ``self``. ``f`` will be applied exactly once to each non-missing unique value in ``self``. Missing values will always map to ``self.missing_value``. """ # f() should only return None if None is our missing value. if self.missing_value is None: allowed_outtypes = self.SUPPORTED_SCALAR_TYPES else: allowed_outtypes = self.SUPPORTED_NON_NONE_SCALAR_TYPES def f_to_use(x, missing_value=self.missing_value, otypes=allowed_outtypes): # Don't call f on the missing value; those locations don't exist # semantically. We return _sortable_sentinel rather than None # because the np.unique call below sorts the categories array, # which raises an error on Python 3 because None and str aren't # comparable. if x == missing_value: return _sortable_sentinel ret = f(x) if not isinstance(ret, otypes): raise TypeError( "LabelArray.map expected function {f} to return a string" " or None, but got {type} instead.\n" "Value was {value}.".format( f=f.__name__, type=type(ret).__name__, value=ret, ) ) if ret == missing_value: return _sortable_sentinel return ret new_categories_with_duplicates = ( np.vectorize(f_to_use, otypes=[object])(self.categories) ) # If f() maps multiple inputs to the same output, then we can end up # with the same code duplicated multiple times. Compress the categories # by running them through np.unique, and then use the reverse lookup # table to compress codes as well. new_categories, bloated_inverse_index = np.unique( new_categories_with_duplicates, return_inverse=True ) if new_categories[0] is _sortable_sentinel: # f_to_use return _sortable_sentinel for locations that should be # missing values in our output. Since np.unique returns the uniques # in sorted order, and since _sortable_sentinel sorts before any # string, we only need to check the first array entry. new_categories[0] = self.missing_value # `reverse_index` will always be a 64 bit integer even if we can hold a # smaller array. reverse_index = bloated_inverse_index.astype( smallest_uint_that_can_hold(len(new_categories)) ) new_codes = np.take(reverse_index, self.as_int_array()) return self.from_codes_and_metadata( new_codes, new_categories, dict(zip(new_categories, range(len(new_categories)))), missing_value=self.missing_value, )
[ "def", "map", "(", "self", ",", "f", ")", ":", "# f() should only return None if None is our missing value.", "if", "self", ".", "missing_value", "is", "None", ":", "allowed_outtypes", "=", "self", ".", "SUPPORTED_SCALAR_TYPES", "else", ":", "allowed_outtypes", "=", "self", ".", "SUPPORTED_NON_NONE_SCALAR_TYPES", "def", "f_to_use", "(", "x", ",", "missing_value", "=", "self", ".", "missing_value", ",", "otypes", "=", "allowed_outtypes", ")", ":", "# Don't call f on the missing value; those locations don't exist", "# semantically. We return _sortable_sentinel rather than None", "# because the np.unique call below sorts the categories array,", "# which raises an error on Python 3 because None and str aren't", "# comparable.", "if", "x", "==", "missing_value", ":", "return", "_sortable_sentinel", "ret", "=", "f", "(", "x", ")", "if", "not", "isinstance", "(", "ret", ",", "otypes", ")", ":", "raise", "TypeError", "(", "\"LabelArray.map expected function {f} to return a string\"", "\" or None, but got {type} instead.\\n\"", "\"Value was {value}.\"", ".", "format", "(", "f", "=", "f", ".", "__name__", ",", "type", "=", "type", "(", "ret", ")", ".", "__name__", ",", "value", "=", "ret", ",", ")", ")", "if", "ret", "==", "missing_value", ":", "return", "_sortable_sentinel", "return", "ret", "new_categories_with_duplicates", "=", "(", "np", ".", "vectorize", "(", "f_to_use", ",", "otypes", "=", "[", "object", "]", ")", "(", "self", ".", "categories", ")", ")", "# If f() maps multiple inputs to the same output, then we can end up", "# with the same code duplicated multiple times. Compress the categories", "# by running them through np.unique, and then use the reverse lookup", "# table to compress codes as well.", "new_categories", ",", "bloated_inverse_index", "=", "np", ".", "unique", "(", "new_categories_with_duplicates", ",", "return_inverse", "=", "True", ")", "if", "new_categories", "[", "0", "]", "is", "_sortable_sentinel", ":", "# f_to_use return _sortable_sentinel for locations that should be", "# missing values in our output. Since np.unique returns the uniques", "# in sorted order, and since _sortable_sentinel sorts before any", "# string, we only need to check the first array entry.", "new_categories", "[", "0", "]", "=", "self", ".", "missing_value", "# `reverse_index` will always be a 64 bit integer even if we can hold a", "# smaller array.", "reverse_index", "=", "bloated_inverse_index", ".", "astype", "(", "smallest_uint_that_can_hold", "(", "len", "(", "new_categories", ")", ")", ")", "new_codes", "=", "np", ".", "take", "(", "reverse_index", ",", "self", ".", "as_int_array", "(", ")", ")", "return", "self", ".", "from_codes_and_metadata", "(", "new_codes", ",", "new_categories", ",", "dict", "(", "zip", "(", "new_categories", ",", "range", "(", "len", "(", "new_categories", ")", ")", ")", ")", ",", "missing_value", "=", "self", ".", "missing_value", ",", ")" ]
Map a function from str -> str element-wise over ``self``. ``f`` will be applied exactly once to each non-missing unique value in ``self``. Missing values will always map to ``self.missing_value``.
[ "Map", "a", "function", "from", "str", "-", ">", "str", "element", "-", "wise", "over", "self", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/lib/labelarray.py#L647-L722
train
quantopian/zipline
zipline/finance/execution.py
asymmetric_round_price
def asymmetric_round_price(price, prefer_round_down, tick_size, diff=0.95): """ Asymmetric rounding function for adjusting prices to the specified number of places in a way that "improves" the price. For limit prices, this means preferring to round down on buys and preferring to round up on sells. For stop prices, it means the reverse. If prefer_round_down == True: When .05 below to .95 above a specified decimal place, use it. If prefer_round_down == False: When .95 below to .05 above a specified decimal place, use it. In math-speak: If prefer_round_down: [<X-1>.0095, X.0195) -> round to X.01. If not prefer_round_down: (<X-1>.0005, X.0105] -> round to X.01. """ precision = zp_math.number_of_decimal_places(tick_size) multiplier = int(tick_size * (10 ** precision)) diff -= 0.5 # shift the difference down diff *= (10 ** -precision) # adjust diff to precision of tick size diff *= multiplier # adjust diff to value of tick_size # Subtracting an epsilon from diff to enforce the open-ness of the upper # bound on buys and the lower bound on sells. Using the actual system # epsilon doesn't quite get there, so use a slightly less epsilon-ey value. epsilon = float_info.epsilon * 10 diff = diff - epsilon # relies on rounding half away from zero, unlike numpy's bankers' rounding rounded = tick_size * consistent_round( (price - (diff if prefer_round_down else -diff)) / tick_size ) if zp_math.tolerant_equals(rounded, 0.0): return 0.0 return rounded
python
def asymmetric_round_price(price, prefer_round_down, tick_size, diff=0.95): """ Asymmetric rounding function for adjusting prices to the specified number of places in a way that "improves" the price. For limit prices, this means preferring to round down on buys and preferring to round up on sells. For stop prices, it means the reverse. If prefer_round_down == True: When .05 below to .95 above a specified decimal place, use it. If prefer_round_down == False: When .95 below to .05 above a specified decimal place, use it. In math-speak: If prefer_round_down: [<X-1>.0095, X.0195) -> round to X.01. If not prefer_round_down: (<X-1>.0005, X.0105] -> round to X.01. """ precision = zp_math.number_of_decimal_places(tick_size) multiplier = int(tick_size * (10 ** precision)) diff -= 0.5 # shift the difference down diff *= (10 ** -precision) # adjust diff to precision of tick size diff *= multiplier # adjust diff to value of tick_size # Subtracting an epsilon from diff to enforce the open-ness of the upper # bound on buys and the lower bound on sells. Using the actual system # epsilon doesn't quite get there, so use a slightly less epsilon-ey value. epsilon = float_info.epsilon * 10 diff = diff - epsilon # relies on rounding half away from zero, unlike numpy's bankers' rounding rounded = tick_size * consistent_round( (price - (diff if prefer_round_down else -diff)) / tick_size ) if zp_math.tolerant_equals(rounded, 0.0): return 0.0 return rounded
[ "def", "asymmetric_round_price", "(", "price", ",", "prefer_round_down", ",", "tick_size", ",", "diff", "=", "0.95", ")", ":", "precision", "=", "zp_math", ".", "number_of_decimal_places", "(", "tick_size", ")", "multiplier", "=", "int", "(", "tick_size", "*", "(", "10", "**", "precision", ")", ")", "diff", "-=", "0.5", "# shift the difference down", "diff", "*=", "(", "10", "**", "-", "precision", ")", "# adjust diff to precision of tick size", "diff", "*=", "multiplier", "# adjust diff to value of tick_size", "# Subtracting an epsilon from diff to enforce the open-ness of the upper", "# bound on buys and the lower bound on sells. Using the actual system", "# epsilon doesn't quite get there, so use a slightly less epsilon-ey value.", "epsilon", "=", "float_info", ".", "epsilon", "*", "10", "diff", "=", "diff", "-", "epsilon", "# relies on rounding half away from zero, unlike numpy's bankers' rounding", "rounded", "=", "tick_size", "*", "consistent_round", "(", "(", "price", "-", "(", "diff", "if", "prefer_round_down", "else", "-", "diff", ")", ")", "/", "tick_size", ")", "if", "zp_math", ".", "tolerant_equals", "(", "rounded", ",", "0.0", ")", ":", "return", "0.0", "return", "rounded" ]
Asymmetric rounding function for adjusting prices to the specified number of places in a way that "improves" the price. For limit prices, this means preferring to round down on buys and preferring to round up on sells. For stop prices, it means the reverse. If prefer_round_down == True: When .05 below to .95 above a specified decimal place, use it. If prefer_round_down == False: When .95 below to .05 above a specified decimal place, use it. In math-speak: If prefer_round_down: [<X-1>.0095, X.0195) -> round to X.01. If not prefer_round_down: (<X-1>.0005, X.0105] -> round to X.01.
[ "Asymmetric", "rounding", "function", "for", "adjusting", "prices", "to", "the", "specified", "number", "of", "places", "in", "a", "way", "that", "improves", "the", "price", ".", "For", "limit", "prices", "this", "means", "preferring", "to", "round", "down", "on", "buys", "and", "preferring", "to", "round", "up", "on", "sells", ".", "For", "stop", "prices", "it", "means", "the", "reverse", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/finance/execution.py#L159-L193
train
quantopian/zipline
zipline/finance/execution.py
check_stoplimit_prices
def check_stoplimit_prices(price, label): """ Check to make sure the stop/limit prices are reasonable and raise a BadOrderParameters exception if not. """ try: if not isfinite(price): raise BadOrderParameters( msg="Attempted to place an order with a {} price " "of {}.".format(label, price) ) # This catches arbitrary objects except TypeError: raise BadOrderParameters( msg="Attempted to place an order with a {} price " "of {}.".format(label, type(price)) ) if price < 0: raise BadOrderParameters( msg="Can't place a {} order with a negative price.".format(label) )
python
def check_stoplimit_prices(price, label): """ Check to make sure the stop/limit prices are reasonable and raise a BadOrderParameters exception if not. """ try: if not isfinite(price): raise BadOrderParameters( msg="Attempted to place an order with a {} price " "of {}.".format(label, price) ) # This catches arbitrary objects except TypeError: raise BadOrderParameters( msg="Attempted to place an order with a {} price " "of {}.".format(label, type(price)) ) if price < 0: raise BadOrderParameters( msg="Can't place a {} order with a negative price.".format(label) )
[ "def", "check_stoplimit_prices", "(", "price", ",", "label", ")", ":", "try", ":", "if", "not", "isfinite", "(", "price", ")", ":", "raise", "BadOrderParameters", "(", "msg", "=", "\"Attempted to place an order with a {} price \"", "\"of {}.\"", ".", "format", "(", "label", ",", "price", ")", ")", "# This catches arbitrary objects", "except", "TypeError", ":", "raise", "BadOrderParameters", "(", "msg", "=", "\"Attempted to place an order with a {} price \"", "\"of {}.\"", ".", "format", "(", "label", ",", "type", "(", "price", ")", ")", ")", "if", "price", "<", "0", ":", "raise", "BadOrderParameters", "(", "msg", "=", "\"Can't place a {} order with a negative price.\"", ".", "format", "(", "label", ")", ")" ]
Check to make sure the stop/limit prices are reasonable and raise a BadOrderParameters exception if not.
[ "Check", "to", "make", "sure", "the", "stop", "/", "limit", "prices", "are", "reasonable", "and", "raise", "a", "BadOrderParameters", "exception", "if", "not", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/finance/execution.py#L196-L217
train
quantopian/zipline
zipline/data/bundles/csvdir.py
csvdir_bundle
def csvdir_bundle(environ, asset_db_writer, minute_bar_writer, daily_bar_writer, adjustment_writer, calendar, start_session, end_session, cache, show_progress, output_dir, tframes=None, csvdir=None): """ Build a zipline data bundle from the directory with csv files. """ if not csvdir: csvdir = environ.get('CSVDIR') if not csvdir: raise ValueError("CSVDIR environment variable is not set") if not os.path.isdir(csvdir): raise ValueError("%s is not a directory" % csvdir) if not tframes: tframes = set(["daily", "minute"]).intersection(os.listdir(csvdir)) if not tframes: raise ValueError("'daily' and 'minute' directories " "not found in '%s'" % csvdir) divs_splits = {'divs': DataFrame(columns=['sid', 'amount', 'ex_date', 'record_date', 'declared_date', 'pay_date']), 'splits': DataFrame(columns=['sid', 'ratio', 'effective_date'])} for tframe in tframes: ddir = os.path.join(csvdir, tframe) symbols = sorted(item.split('.csv')[0] for item in os.listdir(ddir) if '.csv' in item) if not symbols: raise ValueError("no <symbol>.csv* files found in %s" % ddir) dtype = [('start_date', 'datetime64[ns]'), ('end_date', 'datetime64[ns]'), ('auto_close_date', 'datetime64[ns]'), ('symbol', 'object')] metadata = DataFrame(empty(len(symbols), dtype=dtype)) if tframe == 'minute': writer = minute_bar_writer else: writer = daily_bar_writer writer.write(_pricing_iter(ddir, symbols, metadata, divs_splits, show_progress), show_progress=show_progress) # Hardcode the exchange to "CSVDIR" for all assets and (elsewhere) # register "CSVDIR" to resolve to the NYSE calendar, because these # are all equities and thus can use the NYSE calendar. metadata['exchange'] = "CSVDIR" asset_db_writer.write(equities=metadata) divs_splits['divs']['sid'] = divs_splits['divs']['sid'].astype(int) divs_splits['splits']['sid'] = divs_splits['splits']['sid'].astype(int) adjustment_writer.write(splits=divs_splits['splits'], dividends=divs_splits['divs'])
python
def csvdir_bundle(environ, asset_db_writer, minute_bar_writer, daily_bar_writer, adjustment_writer, calendar, start_session, end_session, cache, show_progress, output_dir, tframes=None, csvdir=None): """ Build a zipline data bundle from the directory with csv files. """ if not csvdir: csvdir = environ.get('CSVDIR') if not csvdir: raise ValueError("CSVDIR environment variable is not set") if not os.path.isdir(csvdir): raise ValueError("%s is not a directory" % csvdir) if not tframes: tframes = set(["daily", "minute"]).intersection(os.listdir(csvdir)) if not tframes: raise ValueError("'daily' and 'minute' directories " "not found in '%s'" % csvdir) divs_splits = {'divs': DataFrame(columns=['sid', 'amount', 'ex_date', 'record_date', 'declared_date', 'pay_date']), 'splits': DataFrame(columns=['sid', 'ratio', 'effective_date'])} for tframe in tframes: ddir = os.path.join(csvdir, tframe) symbols = sorted(item.split('.csv')[0] for item in os.listdir(ddir) if '.csv' in item) if not symbols: raise ValueError("no <symbol>.csv* files found in %s" % ddir) dtype = [('start_date', 'datetime64[ns]'), ('end_date', 'datetime64[ns]'), ('auto_close_date', 'datetime64[ns]'), ('symbol', 'object')] metadata = DataFrame(empty(len(symbols), dtype=dtype)) if tframe == 'minute': writer = minute_bar_writer else: writer = daily_bar_writer writer.write(_pricing_iter(ddir, symbols, metadata, divs_splits, show_progress), show_progress=show_progress) # Hardcode the exchange to "CSVDIR" for all assets and (elsewhere) # register "CSVDIR" to resolve to the NYSE calendar, because these # are all equities and thus can use the NYSE calendar. metadata['exchange'] = "CSVDIR" asset_db_writer.write(equities=metadata) divs_splits['divs']['sid'] = divs_splits['divs']['sid'].astype(int) divs_splits['splits']['sid'] = divs_splits['splits']['sid'].astype(int) adjustment_writer.write(splits=divs_splits['splits'], dividends=divs_splits['divs'])
[ "def", "csvdir_bundle", "(", "environ", ",", "asset_db_writer", ",", "minute_bar_writer", ",", "daily_bar_writer", ",", "adjustment_writer", ",", "calendar", ",", "start_session", ",", "end_session", ",", "cache", ",", "show_progress", ",", "output_dir", ",", "tframes", "=", "None", ",", "csvdir", "=", "None", ")", ":", "if", "not", "csvdir", ":", "csvdir", "=", "environ", ".", "get", "(", "'CSVDIR'", ")", "if", "not", "csvdir", ":", "raise", "ValueError", "(", "\"CSVDIR environment variable is not set\"", ")", "if", "not", "os", ".", "path", ".", "isdir", "(", "csvdir", ")", ":", "raise", "ValueError", "(", "\"%s is not a directory\"", "%", "csvdir", ")", "if", "not", "tframes", ":", "tframes", "=", "set", "(", "[", "\"daily\"", ",", "\"minute\"", "]", ")", ".", "intersection", "(", "os", ".", "listdir", "(", "csvdir", ")", ")", "if", "not", "tframes", ":", "raise", "ValueError", "(", "\"'daily' and 'minute' directories \"", "\"not found in '%s'\"", "%", "csvdir", ")", "divs_splits", "=", "{", "'divs'", ":", "DataFrame", "(", "columns", "=", "[", "'sid'", ",", "'amount'", ",", "'ex_date'", ",", "'record_date'", ",", "'declared_date'", ",", "'pay_date'", "]", ")", ",", "'splits'", ":", "DataFrame", "(", "columns", "=", "[", "'sid'", ",", "'ratio'", ",", "'effective_date'", "]", ")", "}", "for", "tframe", "in", "tframes", ":", "ddir", "=", "os", ".", "path", ".", "join", "(", "csvdir", ",", "tframe", ")", "symbols", "=", "sorted", "(", "item", ".", "split", "(", "'.csv'", ")", "[", "0", "]", "for", "item", "in", "os", ".", "listdir", "(", "ddir", ")", "if", "'.csv'", "in", "item", ")", "if", "not", "symbols", ":", "raise", "ValueError", "(", "\"no <symbol>.csv* files found in %s\"", "%", "ddir", ")", "dtype", "=", "[", "(", "'start_date'", ",", "'datetime64[ns]'", ")", ",", "(", "'end_date'", ",", "'datetime64[ns]'", ")", ",", "(", "'auto_close_date'", ",", "'datetime64[ns]'", ")", ",", "(", "'symbol'", ",", "'object'", ")", "]", "metadata", "=", "DataFrame", "(", "empty", "(", "len", "(", "symbols", ")", ",", "dtype", "=", "dtype", ")", ")", "if", "tframe", "==", "'minute'", ":", "writer", "=", "minute_bar_writer", "else", ":", "writer", "=", "daily_bar_writer", "writer", ".", "write", "(", "_pricing_iter", "(", "ddir", ",", "symbols", ",", "metadata", ",", "divs_splits", ",", "show_progress", ")", ",", "show_progress", "=", "show_progress", ")", "# Hardcode the exchange to \"CSVDIR\" for all assets and (elsewhere)", "# register \"CSVDIR\" to resolve to the NYSE calendar, because these", "# are all equities and thus can use the NYSE calendar.", "metadata", "[", "'exchange'", "]", "=", "\"CSVDIR\"", "asset_db_writer", ".", "write", "(", "equities", "=", "metadata", ")", "divs_splits", "[", "'divs'", "]", "[", "'sid'", "]", "=", "divs_splits", "[", "'divs'", "]", "[", "'sid'", "]", ".", "astype", "(", "int", ")", "divs_splits", "[", "'splits'", "]", "[", "'sid'", "]", "=", "divs_splits", "[", "'splits'", "]", "[", "'sid'", "]", ".", "astype", "(", "int", ")", "adjustment_writer", ".", "write", "(", "splits", "=", "divs_splits", "[", "'splits'", "]", ",", "dividends", "=", "divs_splits", "[", "'divs'", "]", ")" ]
Build a zipline data bundle from the directory with csv files.
[ "Build", "a", "zipline", "data", "bundle", "from", "the", "directory", "with", "csv", "files", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/bundles/csvdir.py#L98-L168
train
quantopian/zipline
zipline/pipeline/api_utils.py
restrict_to_dtype
def restrict_to_dtype(dtype, message_template): """ A factory for decorators that restrict Term methods to only be callable on Terms with a specific dtype. This is conceptually similar to zipline.utils.input_validation.expect_dtypes, but provides more flexibility for providing error messages that are specifically targeting Term methods. Parameters ---------- dtype : numpy.dtype The dtype on which the decorated method may be called. message_template : str A template for the error message to be raised. `message_template.format` will be called with keyword arguments `method_name`, `expected_dtype`, and `received_dtype`. Examples -------- @restrict_to_dtype( dtype=float64_dtype, message_template=( "{method_name}() was called on a factor of dtype {received_dtype}." "{method_name}() requires factors of dtype{expected_dtype}." ), ) def some_factor_method(self, ...): self.stuff_that_requires_being_float64(...) """ def processor(term_method, _, term_instance): term_dtype = term_instance.dtype if term_dtype != dtype: raise TypeError( message_template.format( method_name=term_method.__name__, expected_dtype=dtype.name, received_dtype=term_dtype, ) ) return term_instance return preprocess(self=processor)
python
def restrict_to_dtype(dtype, message_template): """ A factory for decorators that restrict Term methods to only be callable on Terms with a specific dtype. This is conceptually similar to zipline.utils.input_validation.expect_dtypes, but provides more flexibility for providing error messages that are specifically targeting Term methods. Parameters ---------- dtype : numpy.dtype The dtype on which the decorated method may be called. message_template : str A template for the error message to be raised. `message_template.format` will be called with keyword arguments `method_name`, `expected_dtype`, and `received_dtype`. Examples -------- @restrict_to_dtype( dtype=float64_dtype, message_template=( "{method_name}() was called on a factor of dtype {received_dtype}." "{method_name}() requires factors of dtype{expected_dtype}." ), ) def some_factor_method(self, ...): self.stuff_that_requires_being_float64(...) """ def processor(term_method, _, term_instance): term_dtype = term_instance.dtype if term_dtype != dtype: raise TypeError( message_template.format( method_name=term_method.__name__, expected_dtype=dtype.name, received_dtype=term_dtype, ) ) return term_instance return preprocess(self=processor)
[ "def", "restrict_to_dtype", "(", "dtype", ",", "message_template", ")", ":", "def", "processor", "(", "term_method", ",", "_", ",", "term_instance", ")", ":", "term_dtype", "=", "term_instance", ".", "dtype", "if", "term_dtype", "!=", "dtype", ":", "raise", "TypeError", "(", "message_template", ".", "format", "(", "method_name", "=", "term_method", ".", "__name__", ",", "expected_dtype", "=", "dtype", ".", "name", ",", "received_dtype", "=", "term_dtype", ",", ")", ")", "return", "term_instance", "return", "preprocess", "(", "self", "=", "processor", ")" ]
A factory for decorators that restrict Term methods to only be callable on Terms with a specific dtype. This is conceptually similar to zipline.utils.input_validation.expect_dtypes, but provides more flexibility for providing error messages that are specifically targeting Term methods. Parameters ---------- dtype : numpy.dtype The dtype on which the decorated method may be called. message_template : str A template for the error message to be raised. `message_template.format` will be called with keyword arguments `method_name`, `expected_dtype`, and `received_dtype`. Examples -------- @restrict_to_dtype( dtype=float64_dtype, message_template=( "{method_name}() was called on a factor of dtype {received_dtype}." "{method_name}() requires factors of dtype{expected_dtype}." ), ) def some_factor_method(self, ...): self.stuff_that_requires_being_float64(...)
[ "A", "factory", "for", "decorators", "that", "restrict", "Term", "methods", "to", "only", "be", "callable", "on", "Terms", "with", "a", "specific", "dtype", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/api_utils.py#L7-L49
train
quantopian/zipline
zipline/sources/benchmark_source.py
BenchmarkSource.daily_returns
def daily_returns(self, start, end=None): """Returns the daily returns for the given period. Parameters ---------- start : datetime The inclusive starting session label. end : datetime, optional The inclusive ending session label. If not provided, treat ``start`` as a scalar key. Returns ------- returns : pd.Series or float The returns in the given period. The index will be the trading calendar in the range [start, end]. If just ``start`` is provided, return the scalar value on that day. """ if end is None: return self._daily_returns[start] return self._daily_returns[start:end]
python
def daily_returns(self, start, end=None): """Returns the daily returns for the given period. Parameters ---------- start : datetime The inclusive starting session label. end : datetime, optional The inclusive ending session label. If not provided, treat ``start`` as a scalar key. Returns ------- returns : pd.Series or float The returns in the given period. The index will be the trading calendar in the range [start, end]. If just ``start`` is provided, return the scalar value on that day. """ if end is None: return self._daily_returns[start] return self._daily_returns[start:end]
[ "def", "daily_returns", "(", "self", ",", "start", ",", "end", "=", "None", ")", ":", "if", "end", "is", "None", ":", "return", "self", ".", "_daily_returns", "[", "start", "]", "return", "self", ".", "_daily_returns", "[", "start", ":", "end", "]" ]
Returns the daily returns for the given period. Parameters ---------- start : datetime The inclusive starting session label. end : datetime, optional The inclusive ending session label. If not provided, treat ``start`` as a scalar key. Returns ------- returns : pd.Series or float The returns in the given period. The index will be the trading calendar in the range [start, end]. If just ``start`` is provided, return the scalar value on that day.
[ "Returns", "the", "daily", "returns", "for", "the", "given", "period", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/sources/benchmark_source.py#L124-L145
train
quantopian/zipline
zipline/sources/benchmark_source.py
BenchmarkSource._initialize_precalculated_series
def _initialize_precalculated_series(self, asset, trading_calendar, trading_days, data_portal): """ Internal method that pre-calculates the benchmark return series for use in the simulation. Parameters ---------- asset: Asset to use trading_calendar: TradingCalendar trading_days: pd.DateTimeIndex data_portal: DataPortal Notes ----- If the benchmark asset started trading after the simulation start, or finished trading before the simulation end, exceptions are raised. If the benchmark asset started trading the same day as the simulation start, the first available minute price on that day is used instead of the previous close. We use history to get an adjusted price history for each day's close, as of the look-back date (the last day of the simulation). Prices are fully adjusted for dividends, splits, and mergers. Returns ------- returns : pd.Series indexed by trading day, whose values represent the % change from close to close. daily_returns : pd.Series the partial daily returns for each minute """ if self.emission_rate == "minute": minutes = trading_calendar.minutes_for_sessions_in_range( self.sessions[0], self.sessions[-1] ) benchmark_series = data_portal.get_history_window( [asset], minutes[-1], bar_count=len(minutes) + 1, frequency="1m", field="price", data_frequency=self.emission_rate, ffill=True )[asset] return ( benchmark_series.pct_change()[1:], self.downsample_minute_return_series( trading_calendar, benchmark_series, ), ) start_date = asset.start_date if start_date < trading_days[0]: # get the window of close prices for benchmark_asset from the # last trading day of the simulation, going up to one day # before the simulation start day (so that we can get the % # change on day 1) benchmark_series = data_portal.get_history_window( [asset], trading_days[-1], bar_count=len(trading_days) + 1, frequency="1d", field="price", data_frequency=self.emission_rate, ffill=True )[asset] returns = benchmark_series.pct_change()[1:] return returns, returns elif start_date == trading_days[0]: # Attempt to handle case where stock data starts on first # day, in this case use the open to close return. benchmark_series = data_portal.get_history_window( [asset], trading_days[-1], bar_count=len(trading_days), frequency="1d", field="price", data_frequency=self.emission_rate, ffill=True )[asset] # get a minute history window of the first day first_open = data_portal.get_spot_value( asset, 'open', trading_days[0], 'daily', ) first_close = data_portal.get_spot_value( asset, 'close', trading_days[0], 'daily', ) first_day_return = (first_close - first_open) / first_open returns = benchmark_series.pct_change()[:] returns[0] = first_day_return return returns, returns else: raise ValueError( 'cannot set benchmark to asset that does not exist during' ' the simulation period (asset start date=%r)' % start_date )
python
def _initialize_precalculated_series(self, asset, trading_calendar, trading_days, data_portal): """ Internal method that pre-calculates the benchmark return series for use in the simulation. Parameters ---------- asset: Asset to use trading_calendar: TradingCalendar trading_days: pd.DateTimeIndex data_portal: DataPortal Notes ----- If the benchmark asset started trading after the simulation start, or finished trading before the simulation end, exceptions are raised. If the benchmark asset started trading the same day as the simulation start, the first available minute price on that day is used instead of the previous close. We use history to get an adjusted price history for each day's close, as of the look-back date (the last day of the simulation). Prices are fully adjusted for dividends, splits, and mergers. Returns ------- returns : pd.Series indexed by trading day, whose values represent the % change from close to close. daily_returns : pd.Series the partial daily returns for each minute """ if self.emission_rate == "minute": minutes = trading_calendar.minutes_for_sessions_in_range( self.sessions[0], self.sessions[-1] ) benchmark_series = data_portal.get_history_window( [asset], minutes[-1], bar_count=len(minutes) + 1, frequency="1m", field="price", data_frequency=self.emission_rate, ffill=True )[asset] return ( benchmark_series.pct_change()[1:], self.downsample_minute_return_series( trading_calendar, benchmark_series, ), ) start_date = asset.start_date if start_date < trading_days[0]: # get the window of close prices for benchmark_asset from the # last trading day of the simulation, going up to one day # before the simulation start day (so that we can get the % # change on day 1) benchmark_series = data_portal.get_history_window( [asset], trading_days[-1], bar_count=len(trading_days) + 1, frequency="1d", field="price", data_frequency=self.emission_rate, ffill=True )[asset] returns = benchmark_series.pct_change()[1:] return returns, returns elif start_date == trading_days[0]: # Attempt to handle case where stock data starts on first # day, in this case use the open to close return. benchmark_series = data_portal.get_history_window( [asset], trading_days[-1], bar_count=len(trading_days), frequency="1d", field="price", data_frequency=self.emission_rate, ffill=True )[asset] # get a minute history window of the first day first_open = data_portal.get_spot_value( asset, 'open', trading_days[0], 'daily', ) first_close = data_portal.get_spot_value( asset, 'close', trading_days[0], 'daily', ) first_day_return = (first_close - first_open) / first_open returns = benchmark_series.pct_change()[:] returns[0] = first_day_return return returns, returns else: raise ValueError( 'cannot set benchmark to asset that does not exist during' ' the simulation period (asset start date=%r)' % start_date )
[ "def", "_initialize_precalculated_series", "(", "self", ",", "asset", ",", "trading_calendar", ",", "trading_days", ",", "data_portal", ")", ":", "if", "self", ".", "emission_rate", "==", "\"minute\"", ":", "minutes", "=", "trading_calendar", ".", "minutes_for_sessions_in_range", "(", "self", ".", "sessions", "[", "0", "]", ",", "self", ".", "sessions", "[", "-", "1", "]", ")", "benchmark_series", "=", "data_portal", ".", "get_history_window", "(", "[", "asset", "]", ",", "minutes", "[", "-", "1", "]", ",", "bar_count", "=", "len", "(", "minutes", ")", "+", "1", ",", "frequency", "=", "\"1m\"", ",", "field", "=", "\"price\"", ",", "data_frequency", "=", "self", ".", "emission_rate", ",", "ffill", "=", "True", ")", "[", "asset", "]", "return", "(", "benchmark_series", ".", "pct_change", "(", ")", "[", "1", ":", "]", ",", "self", ".", "downsample_minute_return_series", "(", "trading_calendar", ",", "benchmark_series", ",", ")", ",", ")", "start_date", "=", "asset", ".", "start_date", "if", "start_date", "<", "trading_days", "[", "0", "]", ":", "# get the window of close prices for benchmark_asset from the", "# last trading day of the simulation, going up to one day", "# before the simulation start day (so that we can get the %", "# change on day 1)", "benchmark_series", "=", "data_portal", ".", "get_history_window", "(", "[", "asset", "]", ",", "trading_days", "[", "-", "1", "]", ",", "bar_count", "=", "len", "(", "trading_days", ")", "+", "1", ",", "frequency", "=", "\"1d\"", ",", "field", "=", "\"price\"", ",", "data_frequency", "=", "self", ".", "emission_rate", ",", "ffill", "=", "True", ")", "[", "asset", "]", "returns", "=", "benchmark_series", ".", "pct_change", "(", ")", "[", "1", ":", "]", "return", "returns", ",", "returns", "elif", "start_date", "==", "trading_days", "[", "0", "]", ":", "# Attempt to handle case where stock data starts on first", "# day, in this case use the open to close return.", "benchmark_series", "=", "data_portal", ".", "get_history_window", "(", "[", "asset", "]", ",", "trading_days", "[", "-", "1", "]", ",", "bar_count", "=", "len", "(", "trading_days", ")", ",", "frequency", "=", "\"1d\"", ",", "field", "=", "\"price\"", ",", "data_frequency", "=", "self", ".", "emission_rate", ",", "ffill", "=", "True", ")", "[", "asset", "]", "# get a minute history window of the first day", "first_open", "=", "data_portal", ".", "get_spot_value", "(", "asset", ",", "'open'", ",", "trading_days", "[", "0", "]", ",", "'daily'", ",", ")", "first_close", "=", "data_portal", ".", "get_spot_value", "(", "asset", ",", "'close'", ",", "trading_days", "[", "0", "]", ",", "'daily'", ",", ")", "first_day_return", "=", "(", "first_close", "-", "first_open", ")", "/", "first_open", "returns", "=", "benchmark_series", ".", "pct_change", "(", ")", "[", ":", "]", "returns", "[", "0", "]", "=", "first_day_return", "return", "returns", ",", "returns", "else", ":", "raise", "ValueError", "(", "'cannot set benchmark to asset that does not exist during'", "' the simulation period (asset start date=%r)'", "%", "start_date", ")" ]
Internal method that pre-calculates the benchmark return series for use in the simulation. Parameters ---------- asset: Asset to use trading_calendar: TradingCalendar trading_days: pd.DateTimeIndex data_portal: DataPortal Notes ----- If the benchmark asset started trading after the simulation start, or finished trading before the simulation end, exceptions are raised. If the benchmark asset started trading the same day as the simulation start, the first available minute price on that day is used instead of the previous close. We use history to get an adjusted price history for each day's close, as of the look-back date (the last day of the simulation). Prices are fully adjusted for dividends, splits, and mergers. Returns ------- returns : pd.Series indexed by trading day, whose values represent the % change from close to close. daily_returns : pd.Series the partial daily returns for each minute
[ "Internal", "method", "that", "pre", "-", "calculates", "the", "benchmark", "return", "series", "for", "use", "in", "the", "simulation", "." ]
77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/sources/benchmark_source.py#L196-L312
train