Back to Community
Value template long-only w/ trend filter

Hi Quantopians,

As a follow-up to the long-short value template here's the simpler long-only version, including the famous 200-day simple moving average trend filter. The code also shows how to easily implement a composite fundamentals score using different metrics. (see lines 90+)

This code template is in part inspired by digging through "What Works on Wall Street, Fourth Edition" by J. O'Shaughnessy (and some of the related implementations here on Quantopian) and getting mixed results reproducing the exact outcomes. The three metrics I'm using here are pretty arbitrary and I'd be glad to hear from you: which ratios do a great job for long-only value investing?

--- Origin

Previous quantapolis.com templates:
* Volatility Selling w/ Constant Proportion Portfolio Insurance
* SPY constant volatility w/ dynamic leverage
* Value Example (long-short)

Clone Algorithm
149
Loading...
Backtest from to with initial capital
Total Returns
--
Alpha
--
Beta
--
Sharpe
--
Sortino
--
Max Drawdown
--
Benchmark Returns
--
Volatility
--
Returns 1 Month 3 Month 6 Month 12 Month
Alpha 1 Month 3 Month 6 Month 12 Month
Beta 1 Month 3 Month 6 Month 12 Month
Sharpe 1 Month 3 Month 6 Month 12 Month
Sortino 1 Month 3 Month 6 Month 12 Month
Volatility 1 Month 3 Month 6 Month 12 Month
Max Drawdown 1 Month 3 Month 6 Month 12 Month
#
# Quantapolis - Value Example (long only)
# (http://www.quantapolis.com)
#
# This is a simple implementation of a long-only value strategy for for a
# concentrated portfolio of US large-cap stocks on NYSE, based on the acquirers
# multiple (EV/EBITDA) with monthly rebalancing.
#
# Version:
#   1.0 (2015-08-18)
#
# Implementation credit:
#   Origin ([email protected])
#
#

import pandas as pd
import datetime

#
# Configuration
#

POSITION_COUNT     = 20
MARKETCAP_LIMIT    = 5e9

SYMBOLS_LIMIT      = 500
OBSOLETE_LOOKAHEAD = 60

HISTORY_PERIODS    = 200
HISTORY_MODE       = '1d'
SYMBOL_MARKET      = sid(8554)

#
# Setup
# - Schedule allocation and execution
#
def initialize(context):
    context.alloc = pd.Series()
    context.score = pd.Series()
    context.last_month = -1
    
    schedule_function(
        allocate,
        date_rule=date_rules.month_start(),
        time_rule=time_rules.market_close(hours=1)
    )

    schedule_function(
        execute,
        date_rule=date_rules.month_start(),
        time_rule=time_rules.market_close(hours=1)
    )

#
# Symbol selection
# - Only query once a month (for backtest performance)
# - Query fundamentals database for largest companies above market cap limit
# - Ensure data available for all active positions
#
def before_trading_start(context):
    # only query database at the beginning of the month
    month = get_datetime().month
    if context.last_month == month:
        return
    context.last_month = month
    
    # selected top K largest companies on NYSE
    fundamental_df = get_fundamentals(
        query(
            fundamentals.valuation_ratios.ev_to_ebitda,
            fundamentals.valuation_ratios.sales_yield,
            fundamentals.operation_ratios.roic
        )
        .filter(fundamentals.valuation.market_cap >= MARKETCAP_LIMIT)
        .filter(fundamentals.share_class_reference.is_primary_share == True)
        .filter(fundamentals.share_class_reference.is_depositary_receipt == False)
        .filter(fundamentals.company_reference.primary_exchange_id.in_(["NYSE", "NYS"]))
        .order_by(fundamentals.valuation.market_cap.desc())
        .limit(SYMBOLS_LIMIT)
    )
    
    dft = fundamental_df.T
    
    #####
    # calculate comosite value score from 3 metrics
    #####
    score = pd.Series(0, index=dft.index)
    
    # EV/EBITDA, in-order (lower is better), nan goes last
    score += dft['ev_to_ebitda'].rank(ascending=True, na_option='bottom')
    
    # sales yield, inverse (higher is better), nan goes last
    score += dft['sales_yield'].rank(ascending=False, na_option='top')
    
    # return on invested capital, inverse (higher is better), nan goes last
    score += dft['roic'].rank(ascending=False, na_option='top')
    
    # composit, lower is better, ignore nan
    score = score.dropna().order()
    
    context.score = score
    
    #####
    # ensure data available for all active positions
    #####
    must_have = set(context.portfolio.positions.keys()) | \
                set([SYMBOL_MARKET]) | \
                set(context.alloc.index)
    nice_to_have = set(fundamental_df.columns.values) - must_have
    selected = (list(must_have) + \
                list(score[nice_to_have].order().index))[:SYMBOLS_LIMIT]

    print "tracking %d symbols" % len(selected)
    
    update_universe(selected)
    
#
# Allocation 
# - Retrieve acquirers multiple for all tracked stocks
# - Filter candidates for missing data and end date
# - Equal allocation to top k candidates
#
def allocate(context, data):
    value = context.score

    #####
    # filters
    #####
    candidates = set(value.index)
    
    # ignore symbols without current price data
    ignore_data = set([s for s in candidates if s not in data])
    
    # ignore symbols becoming obsolete soon (lookahead bias)
    ignore_obsolete = set([s for s in candidates if s.end_date <= get_datetime() + datetime.timedelta(days=OBSOLETE_LOOKAHEAD)])
    
    # ignore market below 200-day sma
    h = history(HISTORY_PERIODS, HISTORY_MODE, 'price')
    ignore_trend = candidates if h[SYMBOL_MARKET][-1] < h[SYMBOL_MARKET].mean() else set()
    
    candidates = candidates - ignore_data - ignore_obsolete - ignore_trend
    
    print "selecting top %d among %d candidates" % (POSITION_COUNT, len(candidates))
    
    # sort by composit score, lower is better
    topk = list(value[candidates].order().index[:POSITION_COUNT])
    
    context.alloc = pd.Series(1.0 / POSITION_COUNT, index=topk)
    
    print "Allocation:\n%s" % context.alloc
    
#
# Execution
# - Sell positions dropping out of top k
# - Buy and rebalance positions in top k
#
def execute(context, data):
    # sell positions not in top k
    for s in context.portfolio.positions:
        if not s in context.alloc.index:
            order_target_percent(s, 0)
            
    # buy and adjust top k
    for s in context.alloc.index:
        order_target_percent(s, context.alloc[s])
    
#
# Logging
# - Track live account leverage
#
def handle_data(context, data):
    record(leverage = context.account.leverage)
    record(exposure = context.account.net_leverage)
There was a runtime error.
43 responses

For experimentation, here's the "clean" value strategy without trend filter.

Clone Algorithm
149
Loading...
Backtest from to with initial capital
Total Returns
--
Alpha
--
Beta
--
Sharpe
--
Sortino
--
Max Drawdown
--
Benchmark Returns
--
Volatility
--
Returns 1 Month 3 Month 6 Month 12 Month
Alpha 1 Month 3 Month 6 Month 12 Month
Beta 1 Month 3 Month 6 Month 12 Month
Sharpe 1 Month 3 Month 6 Month 12 Month
Sortino 1 Month 3 Month 6 Month 12 Month
Volatility 1 Month 3 Month 6 Month 12 Month
Max Drawdown 1 Month 3 Month 6 Month 12 Month
#
# Quantapolis - Value Example (long only)
# (http://www.quantapolis.com)
#
# This is a simple implementation of a long-only value strategy for for a
# concentrated portfolio of US large-cap stocks on NYSE, based on the acquirers
# multiple (EV/EBITDA) with monthly rebalancing.
#
# Version:
#   1.0 (2015-08-18)
#
# Implementation credit:
#   Origin ([email protected])
#
#

import pandas as pd
import datetime

#
# Configuration
#

POSITION_COUNT     = 20
MARKETCAP_LIMIT    = 5e9

SYMBOLS_LIMIT      = 500
OBSOLETE_LOOKAHEAD = 60

#
# Setup
# - Schedule allocation and execution
#
def initialize(context):
    context.alloc = pd.Series()
    context.score = pd.Series()
    context.last_month = -1
    
    schedule_function(
        allocate,
        date_rule=date_rules.month_start(),
        time_rule=time_rules.market_close(hours=1)
    )

    schedule_function(
        execute,
        date_rule=date_rules.month_start(),
        time_rule=time_rules.market_close(hours=1)
    )

#
# Symbol selection
# - Only query once a month (for backtest performance)
# - Query fundamentals database for largest companies above market cap limit
# - Ensure data available for all active positions
#
def before_trading_start(context):
    # only query database at the beginning of the month
    month = get_datetime().month
    if context.last_month == month:
        return
    context.last_month = month
    
    # selected top K largest companies on NYSE
    fundamental_df = get_fundamentals(
        query(
            fundamentals.valuation_ratios.ev_to_ebitda,
            fundamentals.valuation_ratios.sales_yield,
            fundamentals.operation_ratios.roic
        )
        .filter(fundamentals.valuation.market_cap >= MARKETCAP_LIMIT)
        .filter(fundamentals.share_class_reference.is_primary_share == True)
        .filter(fundamentals.share_class_reference.is_depositary_receipt == False)
        .filter(fundamentals.company_reference.primary_exchange_id.in_(["NYSE", "NYS"]))
        .order_by(fundamentals.valuation.market_cap.desc())
        .limit(SYMBOLS_LIMIT)
    )
    
    dft = fundamental_df.T
    
    #####
    # calculate comosite value score from 3 metrics
    #####
    score = pd.Series(0, index=dft.index)
    
    # EV/EBITDA, in-order (lower is better), nan goes last
    score += dft['ev_to_ebitda'].rank(ascending=True, na_option='bottom')
    
    # sales yield, inverse (higher is better), nan goes last
    score += dft['sales_yield'].rank(ascending=False, na_option='top')
    
    # return on invested capital, inverse (higher is better), nan goes last
    score += dft['roic'].rank(ascending=False, na_option='top')
    
    # composit, lower is better, ignore nan
    score = score.dropna().order()
    
    context.score = score
    
    #####
    # ensure data available for all active positions
    #####
    must_have = set(context.portfolio.positions.keys()) | \
                set(context.alloc.index)
    nice_to_have = set(fundamental_df.columns.values) - must_have
    selected = (list(must_have) + \
                list(score[nice_to_have].order().index))[:SYMBOLS_LIMIT]

    print "tracking %d symbols" % len(selected)
    
    update_universe(selected)
    
#
# Allocation 
# - Retrieve acquirers multiple for all tracked stocks
# - Filter candidates for missing data and end date
# - Equal allocation to top k candidates
#
def allocate(context, data):
    value = context.score

    #####
    # filters
    #####
    candidates = set(value.index)
    
    # ignore symbols without current price data
    ignore_data = set([s for s in candidates if s not in data])
    
    # ignore symbols becoming obsolete soon (lookahead bias)
    ignore_obsolete = set([s for s in candidates if s.end_date <= get_datetime() + datetime.timedelta(days=OBSOLETE_LOOKAHEAD)])
    
    candidates = candidates - ignore_data - ignore_obsolete
    
    print "selecting top %d among %d candidates" % (POSITION_COUNT, len(candidates))
    
    # sort by composit score, lower is better
    topk = list(value[candidates].order().index[:POSITION_COUNT])
    
    context.alloc = pd.Series(1.0 / POSITION_COUNT, index=topk)
    
    print "Allocation:\n%s" % context.alloc
    
#
# Execution
# - Sell positions dropping out of top k
# - Buy and rebalance positions in top k
#
def execute(context, data):
    # sell positions not in top k
    for s in context.portfolio.positions:
        if not s in context.alloc.index:
            order_target_percent(s, 0)
            
    # buy and adjust top k
    for s in context.alloc.index:
        order_target_percent(s, context.alloc[s])
    
#
# Logging
# - Track live account leverage
#
def handle_data(context, data):
    record(leverage = context.account.leverage)
    record(exposure = context.account.net_leverage)
There was a runtime error.

nice example strategy, thank you kindly sharing.

Agreed.
Meanwhile, I checked into those and despite the leverage appearing to sit around 1:
Margin: -$58,961 on 2014-09-02     (in the first algo only)

If sell positions not in top k chews on any, give those a minute for the sells to complete before buy and adjust top k, and that might help.

def execute(context, data):  
    # sell positions not in top k  
    liquidate = []  
    for s in context.portfolio.positions:  
        if not s in context.alloc.index:  
            order_target_percent(s, 0)  
            liquidate.append(s.symbol)  
    if liquidate:  
        #log.info(str(liquidate))    # Show them  
        return  # Higher output vs use of get_open_orders() below

    # Lower output vs use of return above.  
    #if get_open_orders(): return

    # buy and adjust top k  
    for s in context.alloc.index:  
        order_target_percent(s, context.alloc[s])  

That's a useful adjustment to make - it comes out to about a 10% rounding error at that point. It's probably also a good idea to trade stocks with passive VWAP at first and avoid making the spread before getting aggressive towards the end of the trading day.

With that particular effort for reeling in margin returns wound up around 420% instead of 700%.

Clone Algorithm
31
Loading...
Backtest from to with initial capital
Total Returns
--
Alpha
--
Beta
--
Sharpe
--
Sortino
--
Max Drawdown
--
Benchmark Returns
--
Volatility
--
Returns 1 Month 3 Month 6 Month 12 Month
Alpha 1 Month 3 Month 6 Month 12 Month
Beta 1 Month 3 Month 6 Month 12 Month
Sharpe 1 Month 3 Month 6 Month 12 Month
Sortino 1 Month 3 Month 6 Month 12 Month
Volatility 1 Month 3 Month 6 Month 12 Month
Max Drawdown 1 Month 3 Month 6 Month 12 Month
#
# Quantapolis - Value Example (long only)
# (http://www.quantapolis.com)
#
# This is a simple implementation of a long-only value strategy for for a
# concentrated portfolio of US large-cap stocks on NYSE, based on the acquirers
# multiple (EV/EBITDA) with monthly rebalancing.
#
# Version:
#   1.0 (2015-08-18)
#
# Implementation credit:
#   Origin ([email protected])
#
#

def info(context, data):
    ''' Custom chart and/or log of maxspent returns and related information
    '''
    record_max_lvrg = 1      # maximum leverage encountered
    record_q_return = 1      # Quantopian return value (percentage)
    record_maxspent = 1      # maxspent returns (percentage)
    record_pnl      = 0      # profit-n-loss
    record_cash_low = 1      # new lowest cash levels

    # Also log to the logging window conditionally
    log_method = 'cash_low'  # 'daily' or 'cash_low' for only when new cash low

    c = context                          # For brevity
    new_cash_low = 0                     # To trigger logging in cash_low case
    date = str(get_datetime().date())    # To trigger logging in daily case

    if 'cash_low' not in c:  # init these instead in initialize for better efficiency
        c.cash_low = c.portfolio.starting_cash
        c.date_prv = date
        c.max_lvrg = 0

    if c.portfolio.cash < c.cash_low:    # New cash low
        new_cash_low = 1
        c.cash_low   = c.portfolio.cash
        if record_cash_low:
            record(CashLow = int(c.cash_low))

    maxspent_rtrn = 0        # Returns based on maximum spent
    q_rtrn        = 0        # Returns by Quantopian
    profit_loss   = 0        # Profit-n-loss
    cash_max_used = c.portfolio.starting_cash - c.cash_low

    if record_max_lvrg:      # Maximum leverage
        if c.account.leverage > c.max_lvrg:
            c.max_lvrg = c.account.leverage
            record(MaxLvrg = c.max_lvrg)
        
    if record_q_return:      # Quantopian returns to compare to maxspent returns curve
        start  = context.portfolio.starting_cash  # starting_cash is king
        q_rtrn = 100 * (context.portfolio.portfolio_value - start) / start
        record(QRet = q_rtrn)
        
    if record_maxspent:      # Returns based on amount actually spent
        if cash_max_used != 0:     # Avoid zero-divide
            maxspent_rtrn = 100 * context.portfolio.pnl / cash_max_used
            record(MaxSpntRet = maxspent_rtrn)

    if record_pnl:           # "Profit and Loss" in dollars
        profit_loss = context.portfolio.pnl
        record(PnL = profit_loss)

    if log_method == 'cash_low' and new_cash_low \
      or log_method == 'daily' and c.date_prv != date:
        mxlv  = 'MaxLvrg '    + '%.1f' % c.max_lvrg    if record_max_lvrg else ''
        qret  = 'QRet '       + '%.2f' % q_rtrn        if record_q_return else ''
        mxspt = 'MaxSpntRet ' + '%.2f' % maxspent_rtrn if record_maxspent else ''
        pnl   = 'PnL '        + '%.0f' % profit_loss   if record_pnl      else ''
        csh   = 'CashLow '    + '%.0f' % c.cash_low    if record_cash_low else ''
        log.info('{} {} {} {} {}'.format(mxlv, qret, mxspt, pnl, csh))

    c.date_prv = date
    
    '''
    w_shorting = 0  
    # Think of shorting like an expense to be able to include it? 
    #   Valid? You could help solve the puzzle of shorting wrt maxspent_rtrn.
    if context.portfolio.positions_value < 0:
        w_shorting = c.cash_low + context.portfolio.positions_value
        if w_shorting < c.cash_low:
            c.cash_low = w_shorting
    '''

import pandas as pd
import datetime

#
# Configuration
#

POSITION_COUNT     = 20
MARKETCAP_LIMIT    = 5e9

SYMBOLS_LIMIT      = 500
OBSOLETE_LOOKAHEAD = 60

HISTORY_PERIODS    = 200
HISTORY_MODE       = '1d'
SYMBOL_MARKET      = sid(8554)

#
# Setup
# - Schedule allocation and execution
#
def initialize(context):
    context.alloc = pd.Series()
    context.score = pd.Series()
    context.last_month = -1
    
    schedule_function(
        allocate,
        date_rule=date_rules.month_start(),
        time_rule=time_rules.market_close(hours=1)
    )

    schedule_function(
        execute,
        date_rule=date_rules.month_start(),
        time_rule=time_rules.market_close(hours=1)
    )

    schedule_function(
        info,
        date_rule=date_rules.month_start(),
        time_rule=time_rules.market_close()
    )

#
# Symbol selection
# - Only query once a month (for backtest performance)
# - Query fundamentals database for largest companies above market cap limit
# - Ensure data available for all active positions
#
def before_trading_start(context):
    # only query database at the beginning of the month
    month = get_datetime().month
    if context.last_month == month:
        return
    context.last_month = month
    
    # selected top K largest companies on NYSE
    fundamental_df = get_fundamentals(
        query(
            fundamentals.valuation_ratios.ev_to_ebitda,
            fundamentals.valuation_ratios.sales_yield,
            fundamentals.operation_ratios.roic
        )
        .filter(fundamentals.valuation.market_cap >= MARKETCAP_LIMIT)
        .filter(fundamentals.share_class_reference.is_primary_share == True)
        .filter(fundamentals.share_class_reference.is_depositary_receipt == False)
        .filter(fundamentals.company_reference.primary_exchange_id.in_(["NYSE", "NYS"]))
        .order_by(fundamentals.valuation.market_cap.desc())
        .limit(SYMBOLS_LIMIT)
    )
    
    dft = fundamental_df.T
    
    #####
    # calculate comosite value score from 3 metrics
    #####
    score = pd.Series(0, index=dft.index)
    
    # EV/EBITDA, in-order (lower is better), nan goes last
    score += dft['ev_to_ebitda'].rank(ascending=True, na_option='bottom')
    
    # sales yield, inverse (higher is better), nan goes last
    score += dft['sales_yield'].rank(ascending=False, na_option='top')
    
    # return on invested capital, inverse (higher is better), nan goes last
    score += dft['roic'].rank(ascending=False, na_option='top')
    
    # composit, lower is better, ignore nan
    score = score.dropna().order()
    
    context.score = score
    
    #####
    # ensure data available for all active positions
    #####
    must_have = set(context.portfolio.positions.keys()) | \
                set([SYMBOL_MARKET]) | \
                set(context.alloc.index)
    nice_to_have = set(fundamental_df.columns.values) - must_have
    selected = (list(must_have) + \
                list(score[nice_to_have].order().index))[:SYMBOLS_LIMIT]

    #print "tracking %d symbols" % len(selected)
    
    update_universe(selected)
    
#
# Allocation 
# - Retrieve acquirers multiple for all tracked stocks
# - Filter candidates for missing data and end date
# - Equal allocation to top k candidates
#
def allocate(context, data):
    value = context.score

    #####
    # filters
    #####
    candidates = set(value.index)
    
    # ignore symbols without current price data
    ignore_data = set([s for s in candidates if s not in data])
    
    # ignore symbols becoming obsolete soon (lookahead bias)
    ignore_obsolete = set([s for s in candidates if s.end_date <= get_datetime() + datetime.timedelta(days=OBSOLETE_LOOKAHEAD)])
    
    # ignore market below 200-day sma
    h = history(HISTORY_PERIODS, HISTORY_MODE, 'price')
    ignore_trend = candidates if h[SYMBOL_MARKET][-1] < h[SYMBOL_MARKET].mean() else set()
    
    candidates = candidates - ignore_data - ignore_obsolete - ignore_trend
    
    #print "selecting top %d among %d candidates" % (POSITION_COUNT, len(candidates))
    
    # sort by composit score, lower is better
    topk = list(value[candidates].order().index[:POSITION_COUNT])
    
    context.alloc = pd.Series(1.0 / POSITION_COUNT, index=topk)
    
    #print "Allocation:\n%s" % context.alloc
    
#
# Execution
# - Sell positions dropping out of top k
# - Buy and rebalance positions in top k
#
def execute(context, data):
    # sell positions not in top k
    liquidate = []
    for s in context.portfolio.positions:
        if not s in context.alloc.index:
            order_target_percent(s, 0)
            liquidate.append(s.symbol)
    if liquidate:
        #log.info(str(liquidate))    # Show them
        return  # Higher output vs use of get_open_orders() below
    
    # Lower output vs use of return above
    #if get_open_orders(): return
    
    # buy and adjust top k
    for s in context.alloc.index:
        order_target_percent(s, context.alloc[s])

    
#
# Logging
# - Track live account leverage
#
def handle_data(context, data):
    #record(leverage = context.account.leverage)
    #record(exposure = context.account.net_leverage)
    
    info(context, data)
    
    
    
    
There was a runtime error.

Inviting custom data chart mouseover above and on this trading equivalent of the original.

To avoid the margin (negative cash, borrowing), have to make sure selling is complete before spending that money.

I'll toss an idea out there that might be pretty clean, a separate schedule_function for any selloffs that might be necessary, right before the scheduled allocate.

Clone Algorithm
31
Loading...
Backtest from to with initial capital
Total Returns
--
Alpha
--
Beta
--
Sharpe
--
Sortino
--
Max Drawdown
--
Benchmark Returns
--
Volatility
--
Returns 1 Month 3 Month 6 Month 12 Month
Alpha 1 Month 3 Month 6 Month 12 Month
Beta 1 Month 3 Month 6 Month 12 Month
Sharpe 1 Month 3 Month 6 Month 12 Month
Sortino 1 Month 3 Month 6 Month 12 Month
Volatility 1 Month 3 Month 6 Month 12 Month
Max Drawdown 1 Month 3 Month 6 Month 12 Month
#
# Quantapolis - Value Example (long only)
# (http://www.quantapolis.com)
#
# This is a simple implementation of a long-only value strategy for for a
# concentrated portfolio of US large-cap stocks on NYSE, based on the acquirers
# multiple (EV/EBITDA) with monthly rebalancing.
#
# Version:
#   1.0 (2015-08-18)
#
# Implementation credit:
#   Origin ([email protected])
#
#

def info(context, data):
    ''' Custom chart and/or log of maxspent returns and related information
    '''
    record_max_lvrg = 1      # maximum leverage encountered
    record_q_return = 1      # Quantopian return value (percentage)
    record_maxspent = 1      # maxspent returns (percentage)
    record_pnl      = 0      # profit-n-loss
    record_cash_low = 1      # new lowest cash levels

    # Also log to the logging window conditionally
    log_method = 'cash_low'  # 'daily' or 'cash_low' for only when new cash low

    c = context                          # For brevity
    new_cash_low = 0                     # To trigger logging in cash_low case
    date = str(get_datetime().date())    # To trigger logging in daily case

    if 'cash_low' not in c:  # init these instead in initialize for better efficiency
        c.cash_low = c.portfolio.starting_cash
        c.date_prv = date
        c.max_lvrg = 0

    if c.portfolio.cash < c.cash_low:    # New cash low
        new_cash_low = 1
        c.cash_low   = c.portfolio.cash
        if record_cash_low:
            record(CashLow = int(c.cash_low))

    maxspent_rtrn = 0        # Returns based on maximum spent
    q_rtrn        = 0        # Returns by Quantopian
    profit_loss   = 0        # Profit-n-loss
    cash_max_used = c.portfolio.starting_cash - c.cash_low

    if record_max_lvrg:      # Maximum leverage
        if c.account.leverage > c.max_lvrg:
            c.max_lvrg = c.account.leverage
            record(MaxLvrg = c.max_lvrg)
        
    if record_q_return:      # Quantopian returns to compare to maxspent returns curve
        start  = context.portfolio.starting_cash  # starting_cash is king
        q_rtrn = 100 * (context.portfolio.portfolio_value - start) / start
        record(QRet = q_rtrn)
        
    if record_maxspent:      # Returns based on amount actually spent
        if cash_max_used != 0:     # Avoid zero-divide
            maxspent_rtrn = 100 * context.portfolio.pnl / cash_max_used
            record(MaxSpntRet = maxspent_rtrn)

    if record_pnl:           # "Profit and Loss" in dollars
        profit_loss = context.portfolio.pnl
        record(PnL = profit_loss)

    if log_method == 'cash_low' and new_cash_low \
      or log_method == 'daily' and c.date_prv != date:
        mxlv  = 'MaxLvrg '    + '%.1f' % c.max_lvrg    if record_max_lvrg else ''
        qret  = 'QRet '       + '%.2f' % q_rtrn        if record_q_return else ''
        mxspt = 'MaxSpntRet ' + '%.2f' % maxspent_rtrn if record_maxspent else ''
        pnl   = 'PnL '        + '%.0f' % profit_loss   if record_pnl      else ''
        csh   = 'CashLow '    + '%.0f' % c.cash_low    if record_cash_low else ''
        log.info('{} {} {} {} {}'.format(mxlv, qret, mxspt, pnl, csh))

    c.date_prv = date
    
    '''
    w_shorting = 0  
    # Think of shorting like an expense to be able to include it? 
    #   Valid? You could help solve the puzzle of shorting wrt maxspent_rtrn.
    if context.portfolio.positions_value < 0:
        w_shorting = c.cash_low + context.portfolio.positions_value
        if w_shorting < c.cash_low:
            c.cash_low = w_shorting
    '''

import pandas as pd
import datetime

#
# Configuration
#

POSITION_COUNT     = 20
MARKETCAP_LIMIT    = 5e9

SYMBOLS_LIMIT      = 500
OBSOLETE_LOOKAHEAD = 60

HISTORY_PERIODS    = 200
HISTORY_MODE       = '1d'
SYMBOL_MARKET      = sid(8554)

#
# Setup
# - Schedule allocation and execution
#
def initialize(context):
    context.alloc = pd.Series()
    context.score = pd.Series()
    context.last_month = -1
    
    schedule_function(
        allocate,
        date_rule=date_rules.month_start(),
        time_rule=time_rules.market_close(hours=1)
    )

    schedule_function(
        execute,
        date_rule=date_rules.month_start(),
        time_rule=time_rules.market_close(hours=1)
    )

    schedule_function(
        info,
        date_rule=date_rules.month_start(),
        time_rule=time_rules.market_close()
    )

#
# Symbol selection
# - Only query once a month (for backtest performance)
# - Query fundamentals database for largest companies above market cap limit
# - Ensure data available for all active positions
#
def before_trading_start(context):
    # only query database at the beginning of the month
    month = get_datetime().month
    if context.last_month == month:
        return
    context.last_month = month
    
    # selected top K largest companies on NYSE
    fundamental_df = get_fundamentals(
        query(
            fundamentals.valuation_ratios.ev_to_ebitda,
            fundamentals.valuation_ratios.sales_yield,
            fundamentals.operation_ratios.roic
        )
        .filter(fundamentals.valuation.market_cap >= MARKETCAP_LIMIT)
        .filter(fundamentals.share_class_reference.is_primary_share == True)
        .filter(fundamentals.share_class_reference.is_depositary_receipt == False)
        .filter(fundamentals.company_reference.primary_exchange_id.in_(["NYSE", "NYS"]))
        .order_by(fundamentals.valuation.market_cap.desc())
        .limit(SYMBOLS_LIMIT)
    )
    
    dft = fundamental_df.T
    
    #####
    # calculate comosite value score from 3 metrics
    #####
    score = pd.Series(0, index=dft.index)
    
    # EV/EBITDA, in-order (lower is better), nan goes last
    score += dft['ev_to_ebitda'].rank(ascending=True, na_option='bottom')
    
    # sales yield, inverse (higher is better), nan goes last
    score += dft['sales_yield'].rank(ascending=False, na_option='top')
    
    # return on invested capital, inverse (higher is better), nan goes last
    score += dft['roic'].rank(ascending=False, na_option='top')
    
    # composit, lower is better, ignore nan
    score = score.dropna().order()
    
    context.score = score
    
    #####
    # ensure data available for all active positions
    #####
    must_have = set(context.portfolio.positions.keys()) | \
                set([SYMBOL_MARKET]) | \
                set(context.alloc.index)
    nice_to_have = set(fundamental_df.columns.values) - must_have
    selected = (list(must_have) + \
                list(score[nice_to_have].order().index))[:SYMBOLS_LIMIT]

    #print "tracking %d symbols" % len(selected)
    
    update_universe(selected)
    
#
# Allocation 
# - Retrieve acquirers multiple for all tracked stocks
# - Filter candidates for missing data and end date
# - Equal allocation to top k candidates
#
def allocate(context, data):
    value = context.score

    #####
    # filters
    #####
    candidates = set(value.index)
    
    # ignore symbols without current price data
    ignore_data = set([s for s in candidates if s not in data])
    
    # ignore symbols becoming obsolete soon (lookahead bias)
    ignore_obsolete = set([s for s in candidates if s.end_date <= get_datetime() + datetime.timedelta(days=OBSOLETE_LOOKAHEAD)])
    
    # ignore market below 200-day sma
    h = history(HISTORY_PERIODS, HISTORY_MODE, 'price')
    ignore_trend = candidates if h[SYMBOL_MARKET][-1] < h[SYMBOL_MARKET].mean() else set()
    
    candidates = candidates - ignore_data - ignore_obsolete - ignore_trend
    
    #print "selecting top %d among %d candidates" % (POSITION_COUNT, len(candidates))
    
    # sort by composit score, lower is better
    topk = list(value[candidates].order().index[:POSITION_COUNT])
    
    context.alloc = pd.Series(1.0 / POSITION_COUNT, index=topk)
    
    #print "Allocation:\n%s" % context.alloc
    
#
# Execution
# - Sell positions dropping out of top k
# - Buy and rebalance positions in top k
#
def execute(context, data):
    # sell positions not in top k
    liquidate = []
    for s in context.portfolio.positions:
        if not s in context.alloc.index:
            order_target_percent(s, 0)
            liquidate.append(s.symbol)
    if liquidate:
        #log.info(str(liquidate))    # Show them
        #return  # Higher output vs use of get_open_orders() below
        pass
    
    # Lower output vs use of return above
    #if get_open_orders(): return
    
    # buy and adjust top k
    for s in context.alloc.index:
        order_target_percent(s, context.alloc[s])

    
#
# Logging
# - Track live account leverage
#
def handle_data(context, data):
    #record(leverage = context.account.leverage)
    #record(exposure = context.account.net_leverage)
    
    info(context, data)
    
    
    
    
There was a runtime error.

Brilliant.

@garyha fixed a minor bug in the proposed execution logic that left money sitting around. Took the opportunity to split execution into two separate parts for buying and selling and also added leverage logging back in - you can see it's now stable at 1.0 again.

Clone Algorithm
76
Loading...
Backtest from to with initial capital
Total Returns
--
Alpha
--
Beta
--
Sharpe
--
Sortino
--
Max Drawdown
--
Benchmark Returns
--
Volatility
--
Returns 1 Month 3 Month 6 Month 12 Month
Alpha 1 Month 3 Month 6 Month 12 Month
Beta 1 Month 3 Month 6 Month 12 Month
Sharpe 1 Month 3 Month 6 Month 12 Month
Sortino 1 Month 3 Month 6 Month 12 Month
Volatility 1 Month 3 Month 6 Month 12 Month
Max Drawdown 1 Month 3 Month 6 Month 12 Month
#
# Quantapolis - Value Example (long only)
# (http://www.quantapolis.com)
#
# This is a simple implementation of a long-only value strategy for for a
# concentrated portfolio of US large-cap stocks on NYSE, based on the acquirers
# multiple (EV/EBITDA) with monthly rebalancing.
#
# Version:
#   1.0 (2015-08-18)
#
# Implementation credit:
#   Origin ([email protected])
#
#

def info(context, data):
    ''' Custom chart and/or log of maxspent returns and related information
    '''
    record_max_lvrg = 1      # maximum leverage encountered
    record_q_return = 1      # Quantopian return value (percentage)
    record_maxspent = 1      # maxspent returns (percentage)
    record_pnl      = 0      # profit-n-loss
    record_cash_low = 1      # new lowest cash levels

    # Also log to the logging window conditionally
    log_method = 'cash_low'  # 'daily' or 'cash_low' for only when new cash low

    c = context                          # For brevity
    new_cash_low = 0                     # To trigger logging in cash_low case
    date = str(get_datetime().date())    # To trigger logging in daily case

    if 'cash_low' not in c:  # init these instead in initialize for better efficiency
        c.cash_low = c.portfolio.starting_cash
        c.date_prv = date
        c.max_lvrg = 0

    if c.portfolio.cash < c.cash_low:    # New cash low
        new_cash_low = 1
        c.cash_low   = c.portfolio.cash
        if record_cash_low:
            record(CashLow = int(c.cash_low))

    maxspent_rtrn = 0        # Returns based on maximum spent
    q_rtrn        = 0        # Returns by Quantopian
    profit_loss   = 0        # Profit-n-loss
    cash_max_used = c.portfolio.starting_cash - c.cash_low

    if record_max_lvrg:      # Maximum leverage
        if c.account.leverage > c.max_lvrg:
            c.max_lvrg = c.account.leverage
            record(MaxLvrg = c.max_lvrg)
        
    if record_q_return:      # Quantopian returns to compare to maxspent returns curve
        start  = context.portfolio.starting_cash  # starting_cash is king
        q_rtrn = 100 * (context.portfolio.portfolio_value - start) / start
        record(QRet = q_rtrn)
        
    if record_maxspent:      # Returns based on amount actually spent
        if cash_max_used != 0:     # Avoid zero-divide
            maxspent_rtrn = 100 * context.portfolio.pnl / cash_max_used
            record(MaxSpntRet = maxspent_rtrn)

    if record_pnl:           # "Profit and Loss" in dollars
        profit_loss = context.portfolio.pnl
        record(PnL = profit_loss)

    if log_method == 'cash_low' and new_cash_low \
      or log_method == 'daily' and c.date_prv != date:
        mxlv  = 'MaxLvrg '    + '%.1f' % c.max_lvrg    if record_max_lvrg else ''
        qret  = 'QRet '       + '%.2f' % q_rtrn        if record_q_return else ''
        mxspt = 'MaxSpntRet ' + '%.2f' % maxspent_rtrn if record_maxspent else ''
        pnl   = 'PnL '        + '%.0f' % profit_loss   if record_pnl      else ''
        csh   = 'CashLow '    + '%.0f' % c.cash_low    if record_cash_low else ''
        log.info('{} {} {} {} {}'.format(mxlv, qret, mxspt, pnl, csh))

    c.date_prv = date
    
    '''
    w_shorting = 0  
    # Think of shorting like an expense to be able to include it? 
    #   Valid? You could help solve the puzzle of shorting wrt maxspent_rtrn.
    if context.portfolio.positions_value < 0:
        w_shorting = c.cash_low + context.portfolio.positions_value
        if w_shorting < c.cash_low:
            c.cash_low = w_shorting
    '''

import pandas as pd
import datetime

#
# Configuration
#

POSITION_COUNT     = 20
MARKETCAP_LIMIT    = 5e9

SYMBOLS_LIMIT      = 500
OBSOLETE_LOOKAHEAD = 60

HISTORY_PERIODS    = 200
HISTORY_MODE       = '1d'
SYMBOL_MARKET      = sid(8554)

#
# Setup
# - Schedule allocation and execution
#
def initialize(context):
    context.alloc = pd.Series()
    context.score = pd.Series()
    context.last_month = -1
    
    schedule_function(
        allocate,
        date_rule=date_rules.month_start(),
        time_rule=time_rules.market_close(hours=2)
    )

    schedule_function(
        execute_sell,
        date_rule=date_rules.month_start(),
        time_rule=time_rules.market_close(hours=2)
    )

    schedule_function(
        execute_buy,
        date_rule=date_rules.month_start(),
        time_rule=time_rules.market_close(hours=1)
    )

    schedule_function(
        info,
        date_rule=date_rules.month_start(),
        time_rule=time_rules.market_close()
    )

#
# Symbol selection
# - Only query once a month (for backtest performance)
# - Query fundamentals database for largest companies above market cap limit
# - Ensure data available for all active positions
#
def before_trading_start(context):
    # only query database at the beginning of the month
    month = get_datetime().month
    if context.last_month == month:
        return
    context.last_month = month
    
    # selected top K largest companies on NYSE
    fundamental_df = get_fundamentals(
        query(
            fundamentals.valuation_ratios.ev_to_ebitda,
            fundamentals.valuation_ratios.sales_yield,
            fundamentals.operation_ratios.roic
        )
        .filter(fundamentals.valuation.market_cap >= MARKETCAP_LIMIT)
        .filter(fundamentals.share_class_reference.is_primary_share == True)
        .filter(fundamentals.share_class_reference.is_depositary_receipt == False)
        .filter(fundamentals.company_reference.primary_exchange_id.in_(["NYSE", "NYS"]))
        .order_by(fundamentals.valuation.market_cap.desc())
        .limit(SYMBOLS_LIMIT)
    )
    
    dft = fundamental_df.T
    
    #####
    # calculate comosite value score from 3 metrics
    #####
    score = pd.Series(0, index=dft.index)
    
    # EV/EBITDA, in-order (lower is better), nan goes last
    score += dft['ev_to_ebitda'].rank(ascending=True, na_option='bottom')
    
    # sales yield, inverse (higher is better), nan goes last
    score += dft['sales_yield'].rank(ascending=False, na_option='top')
    
    # return on invested capital, inverse (higher is better), nan goes last
    score += dft['roic'].rank(ascending=False, na_option='top')
    
    # composit, lower is better, ignore nan
    score = score.dropna().order()
    
    context.score = score
    
    #####
    # ensure data available for all active positions
    #####
    must_have = set(context.portfolio.positions.keys()) | \
                set([SYMBOL_MARKET]) | \
                set(context.alloc.index)
    nice_to_have = set(fundamental_df.columns.values) - must_have
    selected = (list(must_have) + \
                list(score[nice_to_have].order().index))[:SYMBOLS_LIMIT]

    #print "tracking %d symbols" % len(selected)
    
    update_universe(selected)
    
#
# Allocation 
# - Retrieve acquirers multiple for all tracked stocks
# - Filter candidates for missing data and end date
# - Equal allocation to top k candidates
#
def allocate(context, data):
    value = context.score

    #####
    # filters
    #####
    candidates = set(value.index)
    
    # ignore symbols without current price data
    ignore_data = set([s for s in candidates if s not in data])
    
    # ignore symbols becoming obsolete soon (lookahead bias)
    ignore_obsolete = set([s for s in candidates if s.end_date <= get_datetime() + datetime.timedelta(days=OBSOLETE_LOOKAHEAD)])
    
    # ignore market below 200-day sma
    h = history(HISTORY_PERIODS, HISTORY_MODE, 'price')
    ignore_trend = candidates if h[SYMBOL_MARKET][-1] < h[SYMBOL_MARKET].mean() else set()
    
    candidates = candidates - ignore_data - ignore_obsolete - ignore_trend
    
    #print "selecting top %d among %d candidates" % (POSITION_COUNT, len(candidates))
    
    # sort by composit score, lower is better
    topk = list(value[candidates].order().index[:POSITION_COUNT])
    
    context.alloc = pd.Series(1.0 / POSITION_COUNT, index=topk)
    
    #print "Allocation:\n%s" % context.alloc
    
#
# Execution
# - Sell positions dropping out of top k
# - Buy and rebalance positions in top k
#
def execute_sell(context, data):
    # sell positions not in top k
    for s in context.portfolio.positions:
        if s not in context.alloc.index:
            order_target_percent(s, 0)
    
def execute_buy(context, data):
    # buy and adjust top k
    for s in context.alloc.index:
        order_target_percent(s, context.alloc[s])
    
#
# Logging
# - Track live account leverage
#
def handle_data(context, data):
    record(leverage = context.account.leverage)
    #record(exposure = context.account.net_leverage)
    
    info(context, data)
    
    
    
    
There was a runtime error.

I would just like to highlight what I think are some truly important take-home messages in this thread. I think Gary’s posts here, here, and within this thread reveal serious deficiencies in the Quantopian backtester. And it looks to me like Gary’s assertions are irrefutable. In many cases, what is displayed in Q’s cumulative returns plot is inaccurate - therefore misleading and untrustworthy.

Here’s what I derive from Gary’s work:

  1. Negative cash flows are frequently obscure and are not accounted for in the returns value and metrics.
  2. The Quantopian returns value in the IDE uses initial cash (instead of drawdown) and ignores any negative cash.
  3. Backtests are using money they don’t have.
  4. The backtest returns and metrics will only be accurate to the degree the algorithm spends 100% of initial capital at some point and no more.
  5. Negative cash can increase in magnitude while leverage remains around 1. (The leverage value can’t be trusted either.)
  6. For many algorithms, true performance will be worse than what appears in the GUI; for others, the user’s code would actually outperform what the chart indicates. That the UI can reflect returns much better than live trading would produce should concern every developer.
  7. The ability to follow the algorithm’s use of capital and monitor unused cash is critical.
  8. The “MaxSpentReturns” value in the custom chart is the one we should rely on while developing code, not the value in the backtester.
  9. All algorithms should take margin into account.

For simulations, trading profits and the trading decision model tend to capture most of our attention. But experimental results - in terms of real portfolio end values - must accurately account for margin, drawdown, and leverage. Quantopian's backtester apparently doesn't.

This is not a small issue.


It was a profit increase of 300+ percentage points in bringing the margin to light and addressing it.

is there a way to move the portfolio to a specific ETF (VGLT for example) or stock instead of going to cash when the SMA sell is triggered?

Here is the last version of the algo from a couple of messages above with the move to bonds implemented instead of going into cash when the overall market signal is negative.

The return is higher compared with the original version and the beta is lower, but the max drawdown is also significantly higher.

Perhaps one can try experimenting with a different bond instead ot the one used (TLT), or even another ETF or stock, as suggest by @William Spratt.

Clone Algorithm
36
Loading...
Backtest from to with initial capital
Total Returns
--
Alpha
--
Beta
--
Sharpe
--
Sortino
--
Max Drawdown
--
Benchmark Returns
--
Volatility
--
Returns 1 Month 3 Month 6 Month 12 Month
Alpha 1 Month 3 Month 6 Month 12 Month
Beta 1 Month 3 Month 6 Month 12 Month
Sharpe 1 Month 3 Month 6 Month 12 Month
Sortino 1 Month 3 Month 6 Month 12 Month
Volatility 1 Month 3 Month 6 Month 12 Month
Max Drawdown 1 Month 3 Month 6 Month 12 Month
#
# Quantapolis - Value Example (long only)
# (http://www.quantapolis.com)
#
# This is a simple implementation of a long-only value strategy for for a
# concentrated portfolio of US large-cap stocks on NYSE, based on the acquirers
# multiple (EV/EBITDA) with monthly rebalancing.
#
# Version:
#   1.0 (2015-08-18)
#
# Implementation credit:
#   Origin ([email protected])
#
#

def info(context, data):
    ''' Custom chart and/or log of maxspent returns and related information
    '''
    record_max_lvrg = 1      # maximum leverage encountered
    record_q_return = 1      # Quantopian return value (percentage)
    record_maxspent = 1      # maxspent returns (percentage)
    record_pnl      = 0      # profit-n-loss
    record_cash_low = 1      # new lowest cash levels

    # Also log to the logging window conditionally
    log_method = 'cash_low'  # 'daily' or 'cash_low' for only when new cash low

    c = context                          # For brevity
    new_cash_low = 0                     # To trigger logging in cash_low case
    date = str(get_datetime().date())    # To trigger logging in daily case

    if 'cash_low' not in c:  # init these instead in initialize for better efficiency
        c.cash_low = c.portfolio.starting_cash
        c.date_prv = date
        c.max_lvrg = 0

    if c.portfolio.cash < c.cash_low:    # New cash low
        new_cash_low = 1
        c.cash_low   = c.portfolio.cash
        if record_cash_low:
            record(CashLow = int(c.cash_low))

    maxspent_rtrn = 0        # Returns based on maximum spent
    q_rtrn        = 0        # Returns by Quantopian
    profit_loss   = 0        # Profit-n-loss
    cash_max_used = c.portfolio.starting_cash - c.cash_low

    if record_max_lvrg:      # Maximum leverage
        if c.account.leverage > c.max_lvrg:
            c.max_lvrg = c.account.leverage
            record(MaxLvrg = c.max_lvrg)
        
    if record_q_return:      # Quantopian returns to compare to maxspent returns curve
        start  = context.portfolio.starting_cash  # starting_cash is king
        q_rtrn = 100 * (context.portfolio.portfolio_value - start) / start
        record(QRet = q_rtrn)
        
    if record_maxspent:      # Returns based on amount actually spent
        if cash_max_used != 0:     # Avoid zero-divide
            maxspent_rtrn = 100 * context.portfolio.pnl / cash_max_used
            record(MaxSpntRet = maxspent_rtrn)

    if record_pnl:           # "Profit and Loss" in dollars
        profit_loss = context.portfolio.pnl
        record(PnL = profit_loss)

    if log_method == 'cash_low' and new_cash_low \
      or log_method == 'daily' and c.date_prv != date:
        mxlv  = 'MaxLvrg '    + '%.1f' % c.max_lvrg    if record_max_lvrg else ''
        qret  = 'QRet '       + '%.2f' % q_rtrn        if record_q_return else ''
        mxspt = 'MaxSpntRet ' + '%.2f' % maxspent_rtrn if record_maxspent else ''
        pnl   = 'PnL '        + '%.0f' % profit_loss   if record_pnl      else ''
        csh   = 'CashLow '    + '%.0f' % c.cash_low    if record_cash_low else ''
        log.info('{} {} {} {} {}'.format(mxlv, qret, mxspt, pnl, csh))

    c.date_prv = date
    
    '''
    w_shorting = 0  
    # Think of shorting like an expense to be able to include it? 
    #   Valid? You could help solve the puzzle of shorting wrt maxspent_rtrn.
    if context.portfolio.positions_value < 0:
        w_shorting = c.cash_low + context.portfolio.positions_value
        if w_shorting < c.cash_low:
            c.cash_low = w_shorting
    '''

import pandas as pd
import datetime

#
# Configuration
#

POSITION_COUNT     = 20
MARKETCAP_LIMIT    = 5e9

SYMBOLS_LIMIT      = 500
OBSOLETE_LOOKAHEAD = 60

HISTORY_PERIODS    = 200
HISTORY_MODE       = '1d'

SYMBOL_MARKET      = sid(8554)
SYMBOL_BOND        = sid(23921)

#
# Setup
# - Schedule allocation and execution
#
def initialize(context):
    context.alloc = pd.Series()
    context.score = pd.Series()
    context.last_month = -1
    
    schedule_function(
        allocate,
        date_rule=date_rules.month_start(),
        time_rule=time_rules.market_close(hours=2)
    )

    schedule_function(
        execute_sell,
        date_rule=date_rules.month_start(),
        time_rule=time_rules.market_close(hours=2)
    )

    schedule_function(
        execute_buy,
        date_rule=date_rules.month_start(),
        time_rule=time_rules.market_close(hours=1)
    )

    schedule_function(
        info,
        date_rule=date_rules.month_start(),
        time_rule=time_rules.market_close()
    )

#
# Symbol selection
# - Only query once a month (for backtest performance)
# - Query fundamentals database for largest companies above market cap limit
# - Ensure data available for all active positions
#
def before_trading_start(context, data):
    # only query database at the beginning of the month
    month = get_datetime().month
    if context.last_month == month:
        return
    context.last_month = month
    
    # selected top K largest companies on NYSE
    fundamental_df = get_fundamentals(
        query(
            fundamentals.valuation_ratios.ev_to_ebitda,
            fundamentals.valuation_ratios.sales_yield,
            fundamentals.operation_ratios.roic
        )
        .filter(fundamentals.valuation.market_cap >= MARKETCAP_LIMIT)
        .filter(fundamentals.share_class_reference.is_primary_share == True)
        .filter(fundamentals.share_class_reference.is_depositary_receipt == False)
        .filter(fundamentals.company_reference.primary_exchange_id.in_(["NYSE", "NYS"]))
        .order_by(fundamentals.valuation.market_cap.desc())
        .limit(SYMBOLS_LIMIT)
    )
    
    dft = fundamental_df.T
    
    #####
    # calculate comosite value score from 3 metrics
    #####
    score = pd.Series(0, index=dft.index)
    
    # EV/EBITDA, in-order (lower is better), nan goes last
    score += dft['ev_to_ebitda'].rank(ascending=True, na_option='bottom')
    
    # sales yield, inverse (higher is better), nan goes last
    score += dft['sales_yield'].rank(ascending=False, na_option='top')
    
    # return on invested capital, inverse (higher is better), nan goes last
    score += dft['roic'].rank(ascending=False, na_option='top')
    
    # composit, lower is better, ignore nan
    score = score.dropna().order()
    
    context.score = score
    
    #####
    # ensure data available for all active positions
    #####
    must_have = set(context.portfolio.positions.keys()) | \
                set([SYMBOL_MARKET]) | \
                set(context.alloc.index)
    nice_to_have = set(fundamental_df.columns.values) - must_have
    selected = (list(must_have) + \
                list(score[nice_to_have].order().index))[:SYMBOLS_LIMIT]

    #print "tracking %d symbols" % len(selected)
    
    update_universe(selected)
    
#
# Allocation 
# - Retrieve acquirers multiple for all tracked stocks
# - Filter candidates for missing data and end date
# - Equal allocation to top k candidates
#
def allocate(context, data):
    value = context.score

    #####
    # filters
    #####
    candidates = set(value.index)
    
    # ignore symbols without current price data
    ignore_data = set([s for s in candidates if s not in data])
    
    # ignore symbols becoming obsolete soon (lookahead bias)
    ignore_obsolete = set([s for s in candidates if s.end_date <= get_datetime() + datetime.timedelta(days=OBSOLETE_LOOKAHEAD)])
    
    # ignore market below 200-day sma
    h = history(HISTORY_PERIODS, HISTORY_MODE, 'price')
    ignore_trend = candidates if h[SYMBOL_MARKET][-1] < h[SYMBOL_MARKET].mean() else set()
    
    candidates = candidates - ignore_data - ignore_obsolete - ignore_trend
    #print "selecting top %d among %d candidates" % (POSITION_COUNT, len(candidates))
    
    # sort by composit score, lower is better
    
    topk = list(value[candidates].order().index[:POSITION_COUNT]) if h[SYMBOL_MARKET][-1] > h[SYMBOL_MARKET].mean() else [SYMBOL_BOND]
    
    N = POSITION_COUNT if h[SYMBOL_MARKET][-1] > h[SYMBOL_MARKET].mean() else 1
    
    context.alloc = pd.Series(1.0 / N, index=topk)
    
    #print "Allocation:\n%s" % context.alloc
    
#
# Execution
# - Sell positions dropping out of top k
# - Buy and rebalance positions in top k
#
def execute_sell(context, data):
    # sell positions not in top k
    for s in context.portfolio.positions:
        if s not in context.alloc.index:
            order_target_percent(s, 0)
    
def execute_buy(context, data):
    # buy and adjust top k
    for s in context.alloc.index:
        order_target_percent(s, context.alloc[s])
    
#
# Logging
# - Track live account leverage
#
def handle_data(context, data):
    record(leverage = context.account.leverage)
    #record(exposure = context.account.net_leverage)
    
    info(context, data)
    
    
    
    
There was a runtime error.

That worked great thank you Tim.

Does anyone have a version of the above algorithm that has been updated to work with the new API?

@William,

Here's a (modified) version for Q2.

Clone Algorithm
126
Loading...
Backtest from to with initial capital
Total Returns
--
Alpha
--
Beta
--
Sharpe
--
Sortino
--
Max Drawdown
--
Benchmark Returns
--
Volatility
--
Returns 1 Month 3 Month 6 Month 12 Month
Alpha 1 Month 3 Month 6 Month 12 Month
Beta 1 Month 3 Month 6 Month 12 Month
Sharpe 1 Month 3 Month 6 Month 12 Month
Sortino 1 Month 3 Month 6 Month 12 Month
Volatility 1 Month 3 Month 6 Month 12 Month
Max Drawdown 1 Month 3 Month 6 Month 12 Month

import pandas as pd
import datetime

        
def initialize(context):

    set_do_not_order_list(security_lists.leveraged_etf_list)
    
    schedule_function(rebalance,
        date_rule=date_rules.month_start(),
        time_rule=time_rules.market_open(minutes = 30))

    context.m = sid(24744)
    context.f = sid(23921)
    
    context.last_month = -1
    context.start = True
    
    
def before_trading_start(context, data):
    
    month = get_datetime().month
    if context.last_month == month:
        return
    context.last_month = month

    df = get_fundamentals(
        query(fundamentals.valuation_ratios.ev_to_ebitda,
            fundamentals.valuation_ratios.sales_yield,
            fundamentals.operation_ratios.roic)
        .filter(fundamentals.company_reference.primary_exchange_id.in_(["NYSE", "NYS"]))
        .filter(fundamentals.operation_ratios.total_debt_equity_ratio != None)
        .filter(fundamentals.valuation.market_cap != None)
        .filter(fundamentals.valuation.shares_outstanding != None)  
        .filter(fundamentals.company_reference.primary_exchange_id != "OTCPK") # no pink sheets
        .filter(fundamentals.company_reference.primary_exchange_id != "OTCBB") # no pink sheets
        .filter(fundamentals.asset_classification.morningstar_sector_code != None) # require sector
        .filter(fundamentals.share_class_reference.security_type == 'ST00000001') # common stock only
        .filter(~fundamentals.share_class_reference.symbol.contains('_WI')) # drop when-issued
        .filter(fundamentals.share_class_reference.is_primary_share == True) # remove ancillary classes
        .filter(((fundamentals.valuation.market_cap*1.0) / (fundamentals.valuation.shares_outstanding*1.0)) > 1.0)  # stock price > $1
        .filter(fundamentals.share_class_reference.is_depositary_receipt == False) # !ADR/GDR
        .filter(fundamentals.valuation.market_cap >= 5.0E+09)
        .filter(~fundamentals.company_reference.standard_name.contains(' LP')) # exclude LPs
        .filter(~fundamentals.company_reference.standard_name.contains(' L P'))
        .filter(~fundamentals.company_reference.standard_name.contains(' L.P'))
        .filter(fundamentals.balance_sheet.limited_partnership == None) # exclude LPs
        .order_by(fundamentals.valuation.market_cap.desc())
        .limit(500))
    
    dft = df.T
    
    context.score = pd.Series(0, index=dft.index)
    
    # EV/EBITDA, in-order (lower is better), nan goes last
    context.score += dft['ev_to_ebitda'].rank(ascending=True, na_option='bottom')
    
    # sales yield, inverse (higher is better), nan goes last
    context.score += dft['sales_yield'].rank(ascending=False, na_option='top')
    
    # return on invested capital, inverse (higher is better), nan goes last
    context.score += dft['roic'].rank(ascending=False, na_option='top')

    
def rebalance(context, data):

    P = data.history(context.score.index, 'price', 100, '1d')
    V = data.history(context.score.index, 'volume', 100, '1d')
    w = (P * V).median()
    w = w[w > 1.0E+06]
    
    context.score = context.score[w.index]  
    longs = context.score.dropna().order().head(10).index

    P = data.history([context.m], 'price', 100, '1d')
    u = P[context.m]
    if u.tail(10).median() < u.median():
        longs = [context.f] 
               
    for s in context.portfolio.positions:
        if s in security_lists.leveraged_etf_list:
            continue
        if s in longs:
            continue
        if not data.can_trade(s):
            continue        
        order_target(s, 0)    
            
    for s in longs:
        if s in security_lists.leveraged_etf_list:
            continue
        if not data.can_trade(s):
            continue
        order_target_percent(s, 1.0 / len(longs))
                             
                             
def handle_data(context, data):
    
    if context.start:
        rebalance(context, data)
        context.start = False
    else:
        pass

    record(leverage = context.account.leverage,
          exposure = context.account.net_leverage)
    
    
There was a runtime error.

@Tim

This is incredible, can't thank you enough. Excellent code, really appreciate it.

Thanks, William, I am glad you like the code.

If you could find what fundamentals-based criteria (ranking) to use for shorting, so that the strategy could be made long/short , i.e. properly hedged, it would be really great ... :-)

A variation on the theme that shows good returns for small initial capital.

Clone Algorithm
74
Loading...
Backtest from to with initial capital
Total Returns
--
Alpha
--
Beta
--
Sharpe
--
Sortino
--
Max Drawdown
--
Benchmark Returns
--
Volatility
--
Returns 1 Month 3 Month 6 Month 12 Month
Alpha 1 Month 3 Month 6 Month 12 Month
Beta 1 Month 3 Month 6 Month 12 Month
Sharpe 1 Month 3 Month 6 Month 12 Month
Sortino 1 Month 3 Month 6 Month 12 Month
Volatility 1 Month 3 Month 6 Month 12 Month
Max Drawdown 1 Month 3 Month 6 Month 12 Month

import numpy as np
import pandas as pd

        
def initialize(context):

    set_asset_restrictions(security_lists.restrict_leveraged_etfs)
    
    schedule_function(rebalance,
        date_rule=date_rules.month_start(),
        time_rule=time_rules.market_open(minutes = 20))

    schedule_function(buy,
        date_rule=date_rules.every_day(),
        time_rule=time_rules.market_open(minutes = 30))

    context.m = sid(24744)
    context.f = sid(23921)

    context.longs = None
    
    context.last_month = -1
    context.start = True
    
    
def before_trading_start(context, data):
    
    month = get_datetime().month
    if context.last_month == month:
        return
    context.last_month = month

    df = get_fundamentals(
        query(fundamentals.valuation_ratios.ev_to_ebitda,
            fundamentals.valuation_ratios.sales_yield,
            fundamentals.operation_ratios.roic)
        .filter(fundamentals.company_reference.primary_exchange_id.in_(["NYSE", "NYS"]))
        .filter(fundamentals.operation_ratios.total_debt_equity_ratio != None)
        .filter(fundamentals.valuation.market_cap != None)
        .filter(fundamentals.valuation.shares_outstanding != None)  
        .filter(fundamentals.company_reference.primary_exchange_id != "OTCPK") # no pink sheets
        .filter(fundamentals.company_reference.primary_exchange_id != "OTCBB") # no pink sheets
        .filter(fundamentals.asset_classification.morningstar_sector_code != None) # require sector
        .filter(fundamentals.share_class_reference.security_type == 'ST00000001') # common stock only
        .filter(~fundamentals.share_class_reference.symbol.contains('_WI')) # drop when-issued
        .filter(fundamentals.share_class_reference.is_primary_share == True) # remove ancillary classes
        .filter(((fundamentals.valuation.market_cap*1.0) / (fundamentals.valuation.shares_outstanding*1.0)) > 1.0)  # stock price > $1
        .filter(fundamentals.share_class_reference.is_depositary_receipt == False) # !ADR/GDR
        .filter(~fundamentals.company_reference.standard_name.contains(' LP')) # exclude LPs
        .filter(~fundamentals.company_reference.standard_name.contains(' L P'))
        .filter(~fundamentals.company_reference.standard_name.contains(' L.P'))
        .filter(fundamentals.balance_sheet.limited_partnership == None) # exclude LPs
        .order_by(fundamentals.valuation.market_cap.desc())
        .limit(1500))
    
    dft = df.T
    
    context.score = pd.Series(0, index = dft.index)
    
    # EV/EBITDA, in-order (lower is better), nan goes last
    context.score += dft['ev_to_ebitda'].rank(ascending=True, na_option='bottom')
    
    # sales yield, inverse (higher is better), nan goes last
    context.score += dft['sales_yield'].rank(ascending=False, na_option='top')
    
    # return on invested capital, inverse (higher is better), nan goes last
    context.score += dft['roic'].rank(ascending=False, na_option='top')

    
def rebalance(context, data):

    """
    
    P = data.history(context.score.index, 'price', 100, '1d')
    V = data.history(context.score.index, 'volume', 100, '1d')
    
    w = (P * V).median()
    w = w[w > 10E+06]
    
    context.score = context.score[w.index]
    
    """
    
    context.longs = context.score.dropna().order().head(10).index

    P = data.history(context.m, 'price', 100, '1d')
    if P.tail(10).median() < P.median():
        context.longs = [context.f] 

        
def buy(context, data):
    
    for s in context.portfolio.positions:
        if s in context.longs:
            continue
        if not data.can_trade(s):
            continue        
        order_target(s, 0)    
            
    for s in context.longs:
        if not data.can_trade(s):
            continue
        order_target_percent(s, 1.0 / len(context.longs))
                             
    record(leverage = context.account.leverage,
          exposure = context.account.net_leverage)                           
    
    
def handle_data(context, data):
    
    if context.start:
        rebalance(context, data)
        buy(context, data)
        context.start = False



    
    
There was a runtime error.

If we reduce the number of securities held at any one time to 5, we get the following interesting, but highly volatile result.

Clone Algorithm
74
Loading...
Backtest from to with initial capital
Total Returns
--
Alpha
--
Beta
--
Sharpe
--
Sortino
--
Max Drawdown
--
Benchmark Returns
--
Volatility
--
Returns 1 Month 3 Month 6 Month 12 Month
Alpha 1 Month 3 Month 6 Month 12 Month
Beta 1 Month 3 Month 6 Month 12 Month
Sharpe 1 Month 3 Month 6 Month 12 Month
Sortino 1 Month 3 Month 6 Month 12 Month
Volatility 1 Month 3 Month 6 Month 12 Month
Max Drawdown 1 Month 3 Month 6 Month 12 Month

import numpy as np
import pandas as pd

        
def initialize(context):

    set_asset_restrictions(security_lists.restrict_leveraged_etfs)
    
    schedule_function(rebalance,
        date_rule=date_rules.month_start(),
        time_rule=time_rules.market_open(minutes = 20))

    schedule_function(buy,
        date_rule=date_rules.every_day(),
        time_rule=time_rules.market_open(minutes = 30))

    context.m = sid(24744)
    context.f = sid(23921)

    context.longs = None
    
    context.last_month = -1
    context.start = True
    
    
def before_trading_start(context, data):
    
    month = get_datetime().month
    if context.last_month == month:
        return
    context.last_month = month

    df = get_fundamentals(
        query(fundamentals.valuation_ratios.ev_to_ebitda,
            fundamentals.valuation_ratios.sales_yield,
            fundamentals.operation_ratios.roic)
        .filter(fundamentals.company_reference.primary_exchange_id.in_(["NYSE", "NYS"]))
        .filter(fundamentals.operation_ratios.total_debt_equity_ratio != None)
        .filter(fundamentals.valuation.market_cap != None)
        .filter(fundamentals.valuation.shares_outstanding != None)  
        .filter(fundamentals.company_reference.primary_exchange_id != "OTCPK") # no pink sheets
        .filter(fundamentals.company_reference.primary_exchange_id != "OTCBB") # no pink sheets
        .filter(fundamentals.asset_classification.morningstar_sector_code != None) # require sector
        .filter(fundamentals.share_class_reference.security_type == 'ST00000001') # common stock only
        .filter(~fundamentals.share_class_reference.symbol.contains('_WI')) # drop when-issued
        .filter(fundamentals.share_class_reference.is_primary_share == True) # remove ancillary classes
        .filter(((fundamentals.valuation.market_cap*1.0) / (fundamentals.valuation.shares_outstanding*1.0)) > 1.0)  # stock price > $1
        .filter(fundamentals.share_class_reference.is_depositary_receipt == False) # !ADR/GDR
        .filter(~fundamentals.company_reference.standard_name.contains(' LP')) # exclude LPs
        .filter(~fundamentals.company_reference.standard_name.contains(' L P'))
        .filter(~fundamentals.company_reference.standard_name.contains(' L.P'))
        .filter(fundamentals.balance_sheet.limited_partnership == None) # exclude LPs
        .order_by(fundamentals.valuation.market_cap.desc())
        .limit(1500))
    
    dft = df.T
    
    context.score = pd.Series(0, index = dft.index)
    
    # EV/EBITDA, in-order (lower is better), nan goes last
    context.score += dft['ev_to_ebitda'].rank(ascending=True, na_option='bottom')
    
    # sales yield, inverse (higher is better), nan goes last
    context.score += dft['sales_yield'].rank(ascending=False, na_option='top')
    
    # return on invested capital, inverse (higher is better), nan goes last
    context.score += dft['roic'].rank(ascending=False, na_option='top')

    
def rebalance(context, data):

    context.longs = context.score.dropna().order().head(3).index

    P = data.history(context.m, 'price', 100, '1d')
    if P.tail(10).median() < P.median():
        context.longs = [context.f] 

        
def buy(context, data):
    
    for s in context.portfolio.positions:
        if s in context.longs:
            continue
        if not data.can_trade(s):
            continue        
        order_target(s, 0)    
            
    for s in context.longs:
        if not data.can_trade(s):
            continue
        order_target_percent(s, 1.0 / len(context.longs))
                             
    record(leverage = context.account.leverage,
          exposure = context.account.net_leverage)                           
    
    
def handle_data(context, data):
    
    if context.start:
        rebalance(context, data)
        buy(context, data)
        context.start = False


    
    
There was a runtime error.

If I understand the code correctly, the last ones have a bug - when shor tterm median is above long term median (the "trend filter" if I understand correctly) the algo just stops rebalancing and keeps the old position. I would guess that this is not the original point of the trend filter..

Tim, i been trading this one in Demo account and it results are really good. Now the new one variation is fun to play with. i have a question for you. If we do equal target weight every day. I think it would be interesting to apply a Minimum variance Principle to this to determine the wieght to adjust. I was thinking borrow the idea of Grant MVP w/ constraints to this .

The code rebalances monthly within the "rebalance" procedure, which runs 20 min after the opening of the exchange.

On a monthly bases, only at he beginning of the trading day, before_trading_starts also runs (by default, it tries to run every day), and delivers a fresh set of ranked securities. "Rebalance" then sorts them and keeps the top few (actually, the ones with the minimum value of the combined ranks).

If, however, the short term median is below the long-term one, the resulting list of securities to be ordered is replaced by a single bond ETF.

Throughout the month the selected securities are then ordered in the "buy" procedure, which runs daily (at 30 mins after open).

@Mikko,

I checked the "Daily positions and gains" list in the backtest data and the positions do change over time, as intended.

No Borrowing.

Clone Algorithm
121
Loading...
Backtest from to with initial capital
Total Returns
--
Alpha
--
Beta
--
Sharpe
--
Sortino
--
Max Drawdown
--
Benchmark Returns
--
Volatility
--
Returns 1 Month 3 Month 6 Month 12 Month
Alpha 1 Month 3 Month 6 Month 12 Month
Beta 1 Month 3 Month 6 Month 12 Month
Sharpe 1 Month 3 Month 6 Month 12 Month
Sortino 1 Month 3 Month 6 Month 12 Month
Volatility 1 Month 3 Month 6 Month 12 Month
Max Drawdown 1 Month 3 Month 6 Month 12 Month

import numpy as np
import pandas as pd

        
def initialize(context):

    set_asset_restrictions(security_lists.restrict_leveraged_etfs)
    
    schedule_function(rebalance,
        date_rule=date_rules.month_start(),
        time_rule=time_rules.market_open(minutes = 20))

    schedule_function(buy,
        date_rule=date_rules.every_day(),
        time_rule=time_rules.market_open(minutes = 30))

    context.m = sid(24744)
    context.f = sid(23921)

    context.longs = None
    
    context.last_month = -1
    context.start = True
    
    
def before_trading_start(context, data):
    
    month = get_datetime().month
    if context.last_month == month:
        return
    context.last_month = month

    df = get_fundamentals(
        query(fundamentals.valuation_ratios.ev_to_ebitda,
            fundamentals.valuation_ratios.sales_yield,
            fundamentals.operation_ratios.roic)
        .filter(fundamentals.company_reference.primary_exchange_id.in_(["NYSE", "NYS"]))
        .filter(fundamentals.operation_ratios.total_debt_equity_ratio != None)
        .filter(fundamentals.valuation.market_cap != None)
        .filter(fundamentals.valuation.shares_outstanding != None)  
        .filter(fundamentals.company_reference.primary_exchange_id != "OTCPK") # no pink sheets
        .filter(fundamentals.company_reference.primary_exchange_id != "OTCBB") # no pink sheets
        .filter(fundamentals.asset_classification.morningstar_sector_code != None) # require sector
        .filter(fundamentals.share_class_reference.security_type == 'ST00000001') # common stock only
        .filter(~fundamentals.share_class_reference.symbol.contains('_WI')) # drop when-issued
        .filter(fundamentals.share_class_reference.is_primary_share == True) # remove ancillary classes
        .filter(((fundamentals.valuation.market_cap*1.0) / (fundamentals.valuation.shares_outstanding*1.0)) > 1.0)  # stock price > $1
        .filter(fundamentals.share_class_reference.is_depositary_receipt == False) # !ADR/GDR
        .filter(~fundamentals.company_reference.standard_name.contains(' LP')) # exclude LPs
        .filter(~fundamentals.company_reference.standard_name.contains(' L P'))
        .filter(~fundamentals.company_reference.standard_name.contains(' L.P'))
        .filter(fundamentals.balance_sheet.limited_partnership == None) # exclude LPs
        .order_by(fundamentals.valuation.market_cap.desc())
        .limit(1500))
    
    dft = df.T
    
    context.score = pd.Series(0, index = dft.index)
    
    # EV/EBITDA, in-order (lower is better), nan goes last
    context.score += dft['ev_to_ebitda'].rank(ascending=True, na_option='bottom')
    
    # sales yield, inverse (higher is better), nan goes last
    context.score += dft['sales_yield'].rank(ascending=False, na_option='top')
    
    # return on invested capital, inverse (higher is better), nan goes last
    context.score += dft['roic'].rank(ascending=False, na_option='top')

    
def rebalance(context, data):

    context.longs = context.score.dropna().order().head(5).index

    P = data.history(context.m, 'price', 100, '1d')
    if P.tail(10).median() < P.median():
        context.longs = [context.f] 

        
def buy(context, data):
    
    for s in context.portfolio.positions:
        if s in context.longs:
            continue
        if not data.can_trade(s):
            continue        
        order_target(s, 0)    
        print (" Selling stocks: " + str(s))   
    if get_open_orders():
            return
    for s in context.longs:
        if not data.can_trade(s):
            continue
        
        order_target_percent(s, 1.0 / len(context.longs))
        print(" Buying stocks: "+str(s) + " shares: "+ str(context.portfolio.positions[s].amount))                     
    record(leverage = context.account.leverage,
          exposure = context.account.net_leverage)                           
    
    
def handle_data(context, data):
    
    if context.start:
        rebalance(context, data)
        buy(context, data)
        context.start = False


    
    
There was a runtime error.

Nguyen,

Many thanks for the no-borrowing version, it is essential, I suppose, for trading on Robinhood, for example.

As for minimum variance weighting of the positions (or inverse volatility weighting), you can of course try it, but with a small number of positions held it won't make much of a difference. In fact, much wiser men than me have said (sorry, cannot give you the exact quote) that with stocks, equal weighting is usually perfectly adequate. This is borne out, for example, by the fact that an equal-weight S&P 500 portfolio will outperform a capital-weighted one (at least in the long run).

The story is different with futures, mind you, according to my sources and my own experience on Quantiacs. Equal risk weighting is essential in this case.

I should point out that using a universe of the largest 1500 stocks by market capitalization is essential for a good performance of the algorithm -- not 500, not 1000 and not 2000 or 2500. This may point at the instability of the approach, but it can also be a legitimate feature, I think.

I've been running this for the past 4 days. The backtest is purchasing stocks, but the live paper trade is only trading TLT? Why the difference?

line 74 is deprecated - how can you update to sort_values?

I wonder why changing the initial capital to 100000 to the last algo the return get half...?
Then why the result of initials algos backtest are so different from now? (sharpe>5 etc, before) while now are all <1.4 and, DD some are positive other negative?? I apologize if I'm asking something obvious I'm a neewbe here. thanks

For Tyler: line 74 just change order() to sort_values() Ciao!!!

Seems that starting with 100000 instead of 10000 after a while the algo doesn't fit all orders (less capital invested mean less gain).
Any ideas??

@Michele Vianello

Liquidity issues, since the algo invest in only 5 assets at any given moment.

With 100000 of initial capital, try increasing their number to 10 or 20.

@Tyler Wilson

If the algo is staying fully invested in TLT, it's because the market momentum filer is telling it to do so (i.e., the filter value is negative).

@ Tim Vidmar

Tim, I have been trading this algorithm happily since you responded last year in May to my question thank you again for that, I was wondering if you would be interested in coding up a strategy based on your above code as well as a momentum based code combined similar to the strategies used by Wes Gray at Alpha Architects. I unfortunately have no coding skill myself but love markets and seeing how various strategies perform.

The idea would be to have one single code that placed 50% of capital in your value focused strategy above here and then 50% of capital in a momentum strategy based on the theories and research here: http://blog.alphaarchitect.com/2016/03/22/why-investors-should-combine-value-and-momentum/#gs.SUvoKXk

I have found other momentum codes here that may work well that even could be "glued" on if that was possible. https://www.quantopian.com/posts/value-momentum-strategy

That is interesting. Currently , strategy is doing well in live performance.

I'm a newcomer to algo trading so this may be a newbie question, but looking at the algorithm and the returns suggested in the backtesting here why are we not all putting money into this? Is there a serious risk that I am missing here?

  1. There is an issue with rebalance where you get into negative cash - Would require marin
  2. Risk - this algo has higher risk than others that utilize ETFs as this one trades between 5-3 stocks depending on how you set it up. If one goes bust thats a 33% drawdown in and of itself.
  3. The use of TLT as a "safe haven" - This is an unknown in the future as interest rates have been zero, or near zero from the beginning of the backtest
  • Just some insights from my POV, I'm sure there are smarter people here that might have other views.

Interesting, thank you for your thoughts on this strategy. I thought the negative cash issue was resolved with the "no borrowing" version? Thats the one I was looking at.

Has anyone been trading this strategy live that has any other insight or thoughts?

The issue with the no borrowing is that the leverage is calculated at the end of the day. I ran Blue's PvR routine, which captures every minute data for max leverage.

The algorithm issues buy and sell (rebalance) orders at the same time - this would not be allowed by Robinhood/IB unless you had margin - as the sale hasn't happened yet - thus causing leverage which is then rectified by the close of the day. Making that backtest look like there is no leverage.

I see. I guess without a margin account you would have to wait for funds to clear anyway so backtesting this without margin doesn't make much sense. Is that right? Is there even a version of this possible without margin?

Can someone explain this line:

  context.longs = context.score.dropna().order().head(5).index

    P = data.history(context.m, 'price', 100, '1d')  
    if P.tail(10).median() < P.median():  
        context.longs = [context.f]  

Not sure what a lot of these components are doing, and would really appreciate some more line comments.

@adam

--this is defining which stocks to go long based on top 5 of the score. check the code above to see how the score is defined
context.longs = context.score.dropna().order().head(5).index
--this is basically checking to see if the SPY 10 day median is less than the 100 day median. If so, change the longs to context.f (TLT)
P = data.history(context.m, 'price', 100, '1d')
if P.tail(10).median() < P.median():
context.longs = [context.f]

Is this easily updated now that get fundamentals is depricated? Can someone take a stab at it?

Here's an updated version.

Clone Algorithm
69
Loading...
Backtest from to with initial capital
Total Returns
--
Alpha
--
Beta
--
Sharpe
--
Sortino
--
Max Drawdown
--
Benchmark Returns
--
Volatility
--
Returns 1 Month 3 Month 6 Month 12 Month
Alpha 1 Month 3 Month 6 Month 12 Month
Beta 1 Month 3 Month 6 Month 12 Month
Sharpe 1 Month 3 Month 6 Month 12 Month
Sortino 1 Month 3 Month 6 Month 12 Month
Volatility 1 Month 3 Month 6 Month 12 Month
Max Drawdown 1 Month 3 Month 6 Month 12 Month
import pandas as pd
import numpy as np
import scipy as sp

from quantopian.algorithm import attach_pipeline, pipeline_output
from quantopian.pipeline import Pipeline
from quantopian.pipeline import factors, filters, classifiers
from quantopian.pipeline.data import Fundamentals  
from quantopian.pipeline.filters import QTradableStocksUS
from quantopian.pipeline.factors.fundamentals import MarketCap


def initialize(context):

    set_slippage(slippage.FixedBasisPointsSlippage())
    set_commission(commission.PerShare(cost = 0.001, min_trade_cost = 0))
    
    schedule_function(rebalance,
        date_rules.month_start(),
        time_rules.market_open(minutes = 20))    

    schedule_function(buy,
        date_rules.every_day(),
        time_rules.market_open(minutes = 30))

    schedule_function(display,
        date_rules.every_day(),
        time_rules.market_close())    

    context.m = sid(24744)
    context.f = sid(23921)

    context.score = None
    context.longs = []
    
    context.start = True    
    
    pipe = Pipeline()
    pipe = attach_pipeline(pipe, name='pipe')

    universe = MarketCap().top(1500, mask=QTradableStocksUS())
    
    x1 = Fundamentals.ev_to_ebitda.latest
    x2 = Fundamentals.sales_yield.latest
    x3 = Fundamentals.roic.latest
    
    score = x1.rank(ascending=True) + x2.rank(ascending=False) + x3.rank(ascending=False)
    
    pipe.add(score, 'score')    
    
    pipe.set_screen(universe)      


def before_trading_start(context, data):

    pipe_out = pipeline_output('pipe')
    context.score = pipe_out['score']

    
def rebalance(context,data):
    
    context.longs = context.score.sort_values().dropna().head(20).index
    
    P = data.history(context.m, 'price', 100, '1d')
    if P.tail(10).median() < P.median():
        context.longs = [context.f] 

        
def buy(context,data):
    
    for s in context.portfolio.positions:
        if s in context.longs:
            continue
        if not data.can_trade(s):
            continue
        if get_open_orders(s):
            continue
        order_target(s, 0)    
    
    for s in context.longs:
        if not data.can_trade(s):
            continue
        if get_open_orders(s):
            continue
        order_value(s, context.portfolio.cash / len(context.longs))
       
    
def display(context,data):
    
    record(leverage = context.account.leverage,
           exposure = context.account.net_leverage)
    
    
def handle_data(context, data):
    
    if context.start:
        rebalance(context, data)
        buy(context, data)
        display(context, data)
        context.start = False
There was a runtime error.

And a long-term backtest, for what it's worth.

Clone Algorithm
69
Loading...
Backtest from to with initial capital
Total Returns
--
Alpha
--
Beta
--
Sharpe
--
Sortino
--
Max Drawdown
--
Benchmark Returns
--
Volatility
--
Returns 1 Month 3 Month 6 Month 12 Month
Alpha 1 Month 3 Month 6 Month 12 Month
Beta 1 Month 3 Month 6 Month 12 Month
Sharpe 1 Month 3 Month 6 Month 12 Month
Sortino 1 Month 3 Month 6 Month 12 Month
Volatility 1 Month 3 Month 6 Month 12 Month
Max Drawdown 1 Month 3 Month 6 Month 12 Month
import pandas as pd
import numpy as np
import scipy as sp

from quantopian.algorithm import attach_pipeline, pipeline_output
from quantopian.pipeline import Pipeline
from quantopian.pipeline import factors, filters, classifiers
from quantopian.pipeline.data import Fundamentals  
from quantopian.pipeline.filters import QTradableStocksUS
from quantopian.pipeline.factors.fundamentals import MarketCap


def initialize(context):

    set_slippage(slippage.FixedBasisPointsSlippage())
    set_commission(commission.PerShare(cost = 0.001, min_trade_cost = 0))
    
    schedule_function(rebalance,
        date_rules.month_start(),
        time_rules.market_open(minutes = 20))    

    schedule_function(buy,
        date_rules.every_day(),
        time_rules.market_open(minutes = 30))

    schedule_function(display,
        date_rules.every_day(),
        time_rules.market_close())    

    context.m = sid(24744)
    context.f = sid(23921)

    context.score = None
    context.longs = []
    
    context.start = True    
    
    pipe = Pipeline()
    pipe = attach_pipeline(pipe, name='pipe')

    universe = MarketCap().top(1500, mask=QTradableStocksUS())
    
    x1 = Fundamentals.ev_to_ebitda.latest
    x2 = Fundamentals.sales_yield.latest
    x3 = Fundamentals.roic.latest
    
    score = x1.rank(ascending=True) + x2.rank(ascending=False) + x3.rank(ascending=False)
    
    pipe.add(score, 'score')    
    
    pipe.set_screen(universe)      


def before_trading_start(context, data):

    pipe_out = pipeline_output('pipe')
    context.score = pipe_out['score']

    
def rebalance(context,data):
    
    context.longs = context.score.sort_values().dropna().head(20).index
    
    P = data.history(context.m, 'price', 100, '1d')
    if P.tail(10).median() < P.median():
        context.longs = [context.f] 

        
def buy(context,data):
    
    for s in context.portfolio.positions:
        if s in context.longs:
            continue
        if not data.can_trade(s):
            continue
        if get_open_orders(s):
            continue
        order_target(s, 0)    
    
    for s in context.longs:
        if not data.can_trade(s):
            continue
        if get_open_orders(s):
            continue
        order_value(s, context.portfolio.cash / len(context.longs))
       
    
def display(context,data):
    
    record(leverage = context.account.leverage,
           exposure = context.account.net_leverage)
    
    
def handle_data(context, data):
    
    if context.start:
        rebalance(context, data)
        buy(context, data)
        display(context, data)
        context.start = False
There was a runtime error.