Back to Community
Equity Long-Short

This is the result of a couple of hours playing around trying to make an equity long-short algorithm. The returns do not survive costs/slippage, and it seems very sensitive to rebalancing frequency, but perhaps it's helpful to someone.

Clone Algorithm
886
Loading...
Backtest from to with initial capital
Total Returns
--
Alpha
--
Beta
--
Sharpe
--
Sortino
--
Max Drawdown
--
Benchmark Returns
--
Volatility
--
Returns 1 Month 3 Month 6 Month 12 Month
Alpha 1 Month 3 Month 6 Month 12 Month
Beta 1 Month 3 Month 6 Month 12 Month
Sharpe 1 Month 3 Month 6 Month 12 Month
Sortino 1 Month 3 Month 6 Month 12 Month
Volatility 1 Month 3 Month 6 Month 12 Month
Max Drawdown 1 Month 3 Month 6 Month 12 Month
from pandas import Series, DataFrame
import pandas as pd
import statsmodels
import statsmodels.api 
import datetime as dt
import datetime as datetime
import numpy as np
from brokers.ib import VWAPBestEffort

def initialize(context):
    context.benchmarkSecurity = symbol('IWM')
    schedule_function(func=regular_allocation,
                      date_rule=date_rules.week_start(),
                      time_rule=time_rules.market_open(minutes=1),
                      half_days=True
                      )
    schedule_function(bookkeeping)
    set_slippage(slippage.FixedSlippage(spread=0.00))
    set_commission(commission.PerShare(cost=0, min_trade_cost=None))

def bookkeeping(context, data):   
    short_count = 0
    long_count = 0
    for sid in context.portfolio.positions:
        if context.portfolio.positions[sid].amount > 0.0:
            long_count = long_count + 1
        if context.portfolio.positions[sid].amount < 0.0:
            short_count = short_count + 1
    record(long_count=long_count)
    record(short_count=short_count)
    # gross leverage should be 2, net leverage should be 0!
    record(leverage=context.account.leverage)
    
def handle_data(context, data):
    pass
    
def add_ebit_ev(df):
    ev = df['enterprise_value']
    ev[ev < 0.0] = 1.0
    df['enterprise_value'] = ev
    df['ebit_ev'] = df['ebit'] / df['enterprise_value']
    return df

def before_trading_start(context):         
    df = get_fundamentals(
        query(fundamentals.valuation.market_cap,
              fundamentals. valuation.shares_outstanding,
              fundamentals.income_statement.ebit,
              fundamentals.income_statement.ebit_as_of,
              fundamentals.valuation.market_cap,
              fundamentals.valuation.enterprise_value,
              fundamentals.valuation.enterprise_value_as_of,
              fundamentals.share_class_reference.symbol,
              fundamentals.company_reference.standard_name,
              fundamentals.operation_ratios.total_debt_equity_ratio
              )
        .filter(fundamentals.operation_ratios.total_debt_equity_ratio != None)
        .filter(fundamentals.valuation.market_cap != None)
        .filter(fundamentals.valuation.shares_outstanding != None)  
        .filter(fundamentals.company_reference.primary_exchange_id != "OTCPK") # no pink sheets
        .filter(fundamentals.company_reference.primary_exchange_id != "OTCBB") # no pink sheets
        .filter(fundamentals.asset_classification.morningstar_sector_code != None) # require sector
        .filter(fundamentals.share_class_reference.security_type == 'ST00000001') # common stock only
        .filter(~fundamentals.share_class_reference.symbol.contains('_WI')) # drop when-issued
        .filter(fundamentals.share_class_reference.is_primary_share == True) # remove ancillary classes
        .filter(((fundamentals.valuation.market_cap*1.0) / (fundamentals.valuation.shares_outstanding*1.0)) > 1.0)  # stock price > $1
        .filter(fundamentals.share_class_reference.is_depositary_receipt == False) # !ADR/GDR
        .filter(fundamentals.valuation.market_cap > 30000000) # cap > $30MM
        .filter(~fundamentals.company_reference.standard_name.contains(' LP')) # exclude LPs
        .filter(~fundamentals.company_reference.standard_name.contains(' L P'))
        .filter(~fundamentals.company_reference.standard_name.contains(' L.P'))
        .filter(fundamentals.balance_sheet.limited_partnership == None) # exclude LPs
        .order_by(fundamentals.valuation.market_cap.desc()) 
        .offset(0)
        .limit(2500) 
        ).T
    df = add_ebit_ev(df)
    context.longs = df.sort(['ebit_ev'],ascending=False)[0:100]
    context.shorts = df.sort(['ebit_ev'],ascending=True)[0:100]
    context.universe = np.union1d(context.longs.index.values, context.shorts.index.values)
    update_universe(context.universe)
    
def regular_allocation(context, data):
    prices = history(500,'1d','price')
    longs = context.longs.index
    shorts = context.shorts.index
    # now allocate our longs and shorts
    # first sell anything we no longer want
    desiredSids = longs.union(shorts)
    holdingSids = Series(context.portfolio.positions.keys())
    gettingTheBoot = holdingSids[holdingSids.isin(desiredSids) == False]
    for (ix,sid) in gettingTheBoot.iteritems():
        order_target_percent(sid, 0.0)
        
    # calculate naive "beta" of each portfolio
    prices_longs = prices[longs.intersection(prices.columns)]
    prices_shorts = prices[shorts.intersection(prices.columns)]
    prices_spy = prices[context.benchmarkSecurity]
    rets_long_port = prices_longs.pct_change().sum(axis=1)
    rets_short_port = prices_shorts.pct_change().sum(axis=1)
    rets_spy_port = prices_spy.pct_change()
    beta_span = 250
    benchVar = pd.stats.moments.ewmvar(rets_spy_port, span=beta_span)[beta_span:]
    long_cov = pd.stats.moments.ewmcov(rets_long_port, rets_spy_port, span=beta_span)[beta_span:]
    short_cov = pd.stats.moments.ewmcov(rets_short_port, rets_spy_port, span=beta_span)[beta_span:]
    long_beta = (long_cov / benchVar).iloc[-1]
    short_beta = (short_cov / benchVar).iloc[-1]
    beta_ratio = long_beta / short_beta
    target_lev_per_side = 2.0
    scale = target_lev_per_side / (1 + beta_ratio)
    long_each = (scale * 1.0) / len(longs)
    short_each = (scale * beta_ratio) / len(shorts)
    # now buy our longs, scaled by ex ante beta
    for sid in longs:
        if sid in data: # this is so stupid
            order_target_percent(sid, long_each)
    # sell our shorts, scaled by ex ante beta
    for sid in shorts:
        if sid in data: # stupid 
            order_target_percent(sid, -short_each)
    # our long-short portfolio might now have more gross leverage than 2.0, but 
    # should have an expected beta of 0
           
           
    
We have migrated this algorithm to work with a new version of the Quantopian API. The code is different than the original version, but the investment rationale of the algorithm has not changed. We've put everything you need to know here on one page.
There was a runtime error.
17 responses

Simon,

Perhaps you could provide some context:

  1. Is a long-short strategy a standard approach? Skimming over your code, it appears that you end up with a lists of long stocks and short stocks, but for it to be considered a "long-short" strategy by someone skilled in the art, would they be picked in a certain way?
  2. What was your recipe for picking the stocks, and why might it work?

And why the comments "this is so stupid" and "stupid" in the code?

Grant

  1. Yes, it's the original "hedge fund" approach. The magic is in how you pick them, and here I've not put much effort into that. People spend their lives working on that.
  2. Just best and worst of the "acquirer's multiple". It might work if the "value" effect is real, but even if it is, probably needs more refinement to isolate vs all the noise.

re: stupid, I forgot I left that in - every time I get a code exception because I placed an order for a stock that wasn't yet in data[] I bonk my head against the wall.

Simon.

Hey Grant, hey Simon.

The upcoming lecture and webinar will cover factor modeling for long-short equity. Long-short equity is quite common in my understanding, and factor modeling is a very common way to pick the longs and shorts -- come up with a fundamental factor model, much like Simon did here, and then use that to predict future returns and rank stocks. You can sign up for the webinar here, it will also eventually become available on the lectures page. This also all interfaces nicely with our upcoming FFC feature set, which will allow factor modeling.

Long-short equity does tend to be a very high minimum capacity strategy, but also a very high max capacity strategy. You will be destroyed by trading fees with a small amount of capital, but can successfully trade factor model long-short strategies up to tens of millions of dollars or potentially more.

I just finished attending the European Financial Association annual meeting, and am currently experiencing a little bit of factor model overload :)

Disclaimer

The material on this website is provided for informational purposes only and does not constitute an offer to sell, a solicitation to buy, or a recommendation or endorsement for any security or strategy, nor does it constitute an offer to provide investment advisory services by Quantopian. In addition, the material offers no opinion with respect to the suitability of any security or specific investment. No information contained herein should be regarded as a suggestion to engage in or refrain from any investment-related course of action as none of Quantopian nor any of its affiliates is undertaking to provide investment advice, act as an adviser to any plan or entity subject to the Employee Retirement Income Security Act of 1974, as amended, individual retirement account or individual retirement annuity, or give advice in a fiduciary capacity with respect to the materials presented herein. If you are an individual retirement or other investor, contact your financial advisor or other fiduciary unrelated to Quantopian about whether any given investment idea, strategy, product or service described herein may be appropriate for your circumstances. All investments involve risk, including loss of principal. Quantopian makes no guarantees as to the accuracy or completeness of the views expressed in the website. The views are subject to change, and may have become unreliable for various reasons, including changes in market conditions or economic circumstances.

Also nice algorithm, Simon. We definitely want to see more factor modeling and long-short principle driven strategies in the community as we set up the fund, so this comes at a great time.

So what is the time scale for your typical long-short strategy? How often would it trade?

And is it basically "Hmm? I think these M stocks will go up, and these N stocks will go down. I'll put $10M long on the M, and $10M short on the N. And maybe something in an ETF, to achieve perfect market neutrality."

Long-short equity does tend to be a very high minimum capacity strategy

So how is a contest at $100K capital gonna work? It sounds like it needs to be $25M and 50-100 stocks or so, no?

Passes the “anti-fragile” test. The algo returns +20.2% against SPY @ -53.1%: peak-trough for the sub-prime crisis (Oct 7, 2007 to Mar 2, 2009). Nice. Since hedge funds aren't the best model to emulate, a risk rotation model that limits the universe to SPY and IEF might be worth considering.

I'm not really sure what you are trying to accomplish, this algo was designed to create a beta-neutral long-short portfolio of stocks based on some fundamental criteria - SPY nor IEF have EBIT nor EV... trading those two alone would require some completely different algo, I would think?

@Simon Appreciate your input. Yeah, I think you’re right. My objective was simply to see if your algorithm could translate into a simple SPY IEF market neutral strategy. Bad idea, I guess.

(Whoops, what happened to my post?)

Cool. Thank you for sharing, Simon. I can only wonder why I've never noticed pandas' smoothened var and covar functions before. I pinned it on the wiki.

Did you have time to look into a sector-neutral version of the strategy? Issues with metrics like P/S and financials come to mind.

You mean where you allocate to the top and bottom decile of each sector, beta-neutralized vs each sector ETF? That would probably work well.

EDIT: It does work well.

Responding to your points, Grant:

Time scale is another part of the art I would say. I think you would need to do a robust parameter optimization looking at returns vs. trading costs as you varied the rebalancing frequency. David Edwards showed some of these approaches in this webinar as I recall. More frequent rebalancing will eat more transaction costs, but may be more responsive to market changes. As a rule of thumb I think weekly to monthly sounds about right.

Your intuition here is largely right. What it is doing is rather than betting on stocks in M to go up, or stocks in N to go down, you are betting on the predictive quality of your ranking. An equal dollar-volume long-short portfolio will make money based on r_M - r_N, where r_M is the returns of M and r_N is the returns of N. You make money when your ranking successfully predicts for increased returns, regardless of whether r_M and r_N are positive or negative. The spearman correlation lecture has an example of determining the predictive capacity of some ranking scheme.

The profitability of a strategy is determined by excess returns - trading costs. If you had a particularly good ranking scheme or incurred few enough trading costs due to infrequent rebalancing, then you might be able to run a long-short on $100,000. However, I suspect yes by and large most long-short strategies will not be profitable on $100,000. That said, you can submit your algorithm for consideration for the manager's program even if it's not appropriate for the contest. We understand that the contest is far from the be-all and end-all of which algorithms are good, and we are actively looking for algorithms outside those that do well in the contest. We also run contest submissions over larger capital allocations to check for these cases. You can you can always let us know about a strategy you'd like us to consider at [email protected], and we'll definitely take a look.

I ran the algo above with the default slippage and commission models, by commenting out these lines:

    # set_slippage(slippage.FixedSlippage(spread=0.00))  
    # set_commission(commission.PerShare(cost=0, min_trade_cost=None))  

and $1M in capital.

Kinda went bonkers. Is this expected?

Clone Algorithm
5
Loading...
Backtest from to with initial capital
Total Returns
--
Alpha
--
Beta
--
Sharpe
--
Sortino
--
Max Drawdown
--
Benchmark Returns
--
Volatility
--
Returns 1 Month 3 Month 6 Month 12 Month
Alpha 1 Month 3 Month 6 Month 12 Month
Beta 1 Month 3 Month 6 Month 12 Month
Sharpe 1 Month 3 Month 6 Month 12 Month
Sortino 1 Month 3 Month 6 Month 12 Month
Volatility 1 Month 3 Month 6 Month 12 Month
Max Drawdown 1 Month 3 Month 6 Month 12 Month
from pandas import Series, DataFrame
import pandas as pd
import statsmodels
import statsmodels.api 
import datetime as dt
import datetime as datetime
import numpy as np
from brokers.ib import VWAPBestEffort

def initialize(context):
    context.benchmarkSecurity = symbol('IWM')
    schedule_function(func=regular_allocation,
                      date_rule=date_rules.week_start(),
                      time_rule=time_rules.market_open(minutes=1),
                      half_days=True
                      )
    schedule_function(bookkeeping)
    # set_slippage(slippage.FixedSlippage(spread=0.00))
    # set_commission(commission.PerShare(cost=0, min_trade_cost=None))

def bookkeeping(context, data):   
    short_count = 0
    long_count = 0
    for sid in context.portfolio.positions:
        if context.portfolio.positions[sid].amount > 0.0:
            long_count = long_count + 1
        if context.portfolio.positions[sid].amount < 0.0:
            short_count = short_count + 1
    record(long_count=long_count)
    record(short_count=short_count)
    # gross leverage should be 2, net leverage should be 0!
    record(leverage=context.account.leverage)
    
def handle_data(context, data):
    pass
    
def add_ebit_ev(df):
    ev = df['enterprise_value']
    ev[ev < 0.0] = 1.0
    df['enterprise_value'] = ev
    df['ebit_ev'] = df['ebit'] / df['enterprise_value']
    return df

def before_trading_start(context):         
    df = get_fundamentals(
        query(fundamentals.valuation.market_cap,
              fundamentals. valuation.shares_outstanding,
              fundamentals.income_statement.ebit,
              fundamentals.income_statement.ebit_as_of,
              fundamentals.valuation.market_cap,
              fundamentals.valuation.enterprise_value,
              fundamentals.valuation.enterprise_value_as_of,
              fundamentals.share_class_reference.symbol,
              fundamentals.company_reference.standard_name,
              fundamentals.operation_ratios.total_debt_equity_ratio
              )
        .filter(fundamentals.operation_ratios.total_debt_equity_ratio != None)
        .filter(fundamentals.valuation.market_cap != None)
        .filter(fundamentals.valuation.shares_outstanding != None)  
        .filter(fundamentals.company_reference.primary_exchange_id != "OTCPK") # no pink sheets
        .filter(fundamentals.company_reference.primary_exchange_id != "OTCBB") # no pink sheets
        .filter(fundamentals.asset_classification.morningstar_sector_code != None) # require sector
        .filter(fundamentals.share_class_reference.security_type == 'ST00000001') # common stock only
        .filter(~fundamentals.share_class_reference.symbol.contains('_WI')) # drop when-issued
        .filter(fundamentals.share_class_reference.is_primary_share == True) # remove ancillary classes
        .filter(((fundamentals.valuation.market_cap*1.0) / (fundamentals.valuation.shares_outstanding*1.0)) > 1.0)  # stock price > $1
        .filter(fundamentals.share_class_reference.is_depositary_receipt == False) # !ADR/GDR
        .filter(fundamentals.valuation.market_cap > 30000000) # cap > $30MM
        .filter(~fundamentals.company_reference.standard_name.contains(' LP')) # exclude LPs
        .filter(~fundamentals.company_reference.standard_name.contains(' L P'))
        .filter(~fundamentals.company_reference.standard_name.contains(' L.P'))
        .filter(fundamentals.balance_sheet.limited_partnership == None) # exclude LPs
        .order_by(fundamentals.valuation.market_cap.desc()) 
        .offset(0)
        .limit(2500) 
        ).T
    df = add_ebit_ev(df)
    context.longs = df.sort(['ebit_ev'],ascending=False)[0:100]
    context.shorts = df.sort(['ebit_ev'],ascending=True)[0:100]
    context.universe = np.union1d(context.longs.index.values, context.shorts.index.values)
    update_universe(context.universe)
    
def regular_allocation(context, data):
    prices = history(500,'1d','price')
    longs = context.longs.index
    shorts = context.shorts.index
    # now allocate our longs and shorts
    # first sell anything we no longer want
    desiredSids = longs.union(shorts)
    holdingSids = Series(context.portfolio.positions.keys())
    gettingTheBoot = holdingSids[holdingSids.isin(desiredSids) == False]
    for (ix,sid) in gettingTheBoot.iteritems():
        order_target_percent(sid, 0.0)
        
    # calculate naive "beta" of each portfolio
    prices_longs = prices[longs.intersection(prices.columns)]
    prices_shorts = prices[shorts.intersection(prices.columns)]
    prices_spy = prices[context.benchmarkSecurity]
    rets_long_port = prices_longs.pct_change().sum(axis=1)
    rets_short_port = prices_shorts.pct_change().sum(axis=1)
    rets_spy_port = prices_spy.pct_change()
    beta_span = 250
    benchVar = pd.stats.moments.ewmvar(rets_spy_port, span=beta_span)[beta_span:]
    long_cov = pd.stats.moments.ewmcov(rets_long_port, rets_spy_port, span=beta_span)[beta_span:]
    short_cov = pd.stats.moments.ewmcov(rets_short_port, rets_spy_port, span=beta_span)[beta_span:]
    long_beta = (long_cov / benchVar).iloc[-1]
    short_beta = (short_cov / benchVar).iloc[-1]
    beta_ratio = long_beta / short_beta
    target_lev_per_side = 2.0
    scale = target_lev_per_side / (1 + beta_ratio)
    long_each = (scale * 1.0) / len(longs)
    short_each = (scale * beta_ratio) / len(shorts)
    # now buy our longs, scaled by ex ante beta
    for sid in longs:
        if sid in data: # this is so stupid
            order_target_percent(sid, long_each)
    # sell our shorts, scaled by ex ante beta
    for sid in shorts:
        if sid in data: # stupid 
            order_target_percent(sid, -short_each)
    # our long-short portfolio might now have more gross leverage than 2.0, but 
    # should have an expected beta of 0
           
           
    
There was a runtime error.

It seems like there are some issues backtesting over 2008 due to security delistings and therefore strange effects on leverage. It's something I will look into addressing. This falls into the 'execution' category, so the ordering logic could probably be a little smarter.

Here's an example of a long-short algorithm that survives transactions costs. It uses the following rules:

1) Trades only on Wednesdays
2) Risk based position sizing, with risk resizing each week
3) Universe is top 500 stocks by volume (includes ETFs and other weird stuff)
4) Sorts by 6 months' momentum (price / SMA over 126 days)
5) Top 100 are long pool, bottom 100 are short pool
6) Buy/short from top/bottom of list then hold each position until they leave the relevant pool
7) Buy/short as many positions as cash available, given the position sizing (long up to 100% of account value, short to -100%)

I've taken lots of ideas from Andreas Clenow's recent book, "Stocks on the Move", but reduced the weighting rules down to just the 6 month's momentum. I really liked his positioning sizing techniques and tricks to reduce turnover.

I am new to Q/Python, so please give me feedback on the code as well as the strategy. I was determined to use Pipeline, and NO for-loops.

Clone Algorithm
173
Loading...
Backtest from to with initial capital
Total Returns
--
Alpha
--
Beta
--
Sharpe
--
Sortino
--
Max Drawdown
--
Benchmark Returns
--
Volatility
--
Returns 1 Month 3 Month 6 Month 12 Month
Alpha 1 Month 3 Month 6 Month 12 Month
Beta 1 Month 3 Month 6 Month 12 Month
Sharpe 1 Month 3 Month 6 Month 12 Month
Sortino 1 Month 3 Month 6 Month 12 Month
Volatility 1 Month 3 Month 6 Month 12 Month
Max Drawdown 1 Month 3 Month 6 Month 12 Month
# Long-Short Momentum
# 
# Inspired by Andreas Clenow's Stocks on the Move, but much different from the system in the book.
#
# Work in progress:
# - Fixed number of positions, to properly balance long and short
# - Resize SPY accordingly
# - Stocks with huge share prices e.g. YRCW. Won't buy and other them, but check logic... does it skip well
# - Penny stocks
#
# Still to do: 
# - Test correlation of different lookback lengths (63,126,252) - scuppered because of timeouts
# - Test TR (i.e. ATR(1)) to see if the averaging and parameter really matters
# - Earnings call avoidance?
# - Mid-cap and small-cap
#   https://www.quantopian.com/posts/simulating-s-and-p-500-russell-1000-russell-3000-in-research


import numpy as np
import pandas as pd
import time
from quantopian.pipeline import Pipeline
from quantopian.pipeline import CustomFactor
from quantopian.pipeline.data import morningstar
from quantopian.pipeline.factors import Latest, SimpleMovingAverage, Returns
from quantopian.pipeline.data.builtin import USEquityPricing
from quantopian.algorithm import attach_pipeline, pipeline_output


UniverseSize = 500
DailyRangePerStock = 0.001 # targeting 10bp of account value per day
RebalanceThreshold = 0.25 # don't resize if the difference in exposure is less than this
Lookback = 126
Leverage = 1
Collateral = None #sid(8554) # Where to hold the cash we get from shorts: SPY to equitise returns, or None


# APR is ATR / Price, where ATR is SMA(20) of TR.
#
# It works round different backadjustment paradigms used by Quantopian:
#     https://www.quantopian.com/posts/stocks-on-the-move-by-andreas-clenow
# Uses a SMA(20) rather than the conventional Wilder exponential smoothing: 
#     http://www.macroption.com/average-true-range-calculator/
#
class APR(CustomFactor):
    inputs = [USEquityPricing.close,USEquityPricing.high,USEquityPricing.low]
    window_length = 21
    def compute(self, today, assets, out, close, high, low):
        hml = high - low
        hmpc = np.abs(high - np.roll(close, 1, axis=0))
        lmpc = np.abs(low - np.roll(close, 1, axis=0))
        tr = np.maximum(hml, np.maximum(hmpc, lmpc))
        atr = np.mean(tr[1:], axis=0) #skip the first one as it will be NaN
        apr = atr / close[-1]
        out[:] = apr
        
        
class AvgDailyDollarVolumeTraded(CustomFactor):
    inputs = [USEquityPricing.close, USEquityPricing.volume]
    #window_length = 100 # No default specified, do it in constructor
    def compute(self, today, assets, out, close_price, volume):
        dollar_volume = close_price * volume
        avg_dollar_volume = np.mean(dollar_volume, axis=0)
        out[:] = avg_dollar_volume


def initialize(context):
    
    context.spy = sid(8554)
    set_benchmark(context.spy)

    # define momentum as latest/SMA, same as to market (SPY) filter
    momentum        = (Latest(inputs=[USEquityPricing.close]) /
                       SimpleMovingAverage(inputs=[USEquityPricing.close], window_length=Lookback)) - 1
    #mkt_cap         = Latest(inputs=[morningstar.valuation.market_cap]) #very slow
    #universe        = mkt_cap.top(UniverseSize)
    dollar_volume   = AvgDailyDollarVolumeTraded(window_length=100)
    universe        = dollar_volume.top(UniverseSize)
    momentum_rank   = momentum.rank(mask=universe, ascending=False)
    long_filter     = momentum_rank <= 0.2*UniverseSize
    short_filter    = momentum_rank > 0.8*UniverseSize
    apr             = APR()
    apr_filter      = apr > 0.005
    
    pipe = Pipeline()
    #pipe.add(momentum, 'momentum') # include for debugging
    pipe.add(momentum_rank, 'momentum_rank')
    pipe.add(apr, 'apr')
    pipe.add(long_filter, 'long')
    pipe.add(short_filter, 'short')
    
    pipe.set_screen( universe & apr_filter & (long_filter | short_filter) )
    pipe = attach_pipeline(pipe, name='equitylongshort')
    
    schedule_function(func=rebalance_positions, 
                      date_rule=date_rules.week_start(days_offset=2),
                      time_rule=time_rules.market_open(hours=2),
                      half_days=True)
    schedule_function(func=cancel_all,
                      date_rule=date_rules.week_start(days_offset=2),
                      time_rule=time_rules.market_close(),
                      half_days=True)
    
    set_slippage(slippage.FixedSlippage(spread=0.01))
    set_commission(commission.PerShare(cost=0.0035, min_trade_cost=0.35))

    
def before_trading_start(context, data):
    context.pool = pipeline_output('equitylongshort').sort('momentum_rank')
    update_universe(context.pool.index)
    

def rebalance_positions(context, data):

    # There shouldn't be any open orders, as we sold at yesterday's close
    assert(not get_open_orders())   
    
    pool          = context.pool
    positions     = context.portfolio.positions
    account_value = context.portfolio.portfolio_value
    
    # Append current price and current share holding as column
    # (This works around pipeline backadjustment issue.)
    pool['price'] = [data[sid].price if sid in data else np.NaN for sid in pool.index]
    pool['current_shares'] = [positions[sid].amount if sid in positions else 0 for sid in pool.index]
    pool['is_current'] = pool.current_shares != 0
    pool['sign'] = pool.long*2-1 # +1 for longs, -1 for shorts
    
    # Calculate target number of shares (integer, can be rounded down to zero)
    pool['atr'] = (pool.apr * pool.price)
    pool['atr'].replace(0, np.NaN, inplace=True)
    pool['target_shares'] = (pool.sign * account_value * DailyRangePerStock / pool.atr).astype(int)
    pool['target_shares'].replace(np.NaN, 0, inplace=True)
    pool.loc[pool.price==0 | pool.price.isnull(), 'target_shares'] = 0
        
    # Save trading costs by not resizing current holdings if within tolerance
    pool.loc[abs(pool.current_shares - pool.target_shares) < 
             abs(pool.target_shares * RebalanceThreshold), 'target_shares'] = pool.current_shares
        
    # Sort so current holdings first, then highest momentum second
    longs  = pool[pool.long].copy()
    shorts = pool[pool.short].copy()   
    longs.sort(['is_current','momentum_rank'], inplace=True, ascending=[False,True])  
    shorts.sort(['is_current','momentum_rank'], inplace=True, ascending=[False,False])  

    # Only hold just enough to use the allowance
    longs.loc[(longs.target_shares*longs.price).cumsum() > Leverage * account_value,
                  'target_shares'] = 0
    shorts.loc[(shorts.target_shares*shorts.price).cumsum() < -Leverage * account_value,
                  'target_shares'] = 0
    '''
    # Fixed number of positions to give fixed risk (ignoring cross setional correlations)
    longs.loc[20:,'target_shares'] = 0
    shorts.loc[20:,'target_shares'] = 0'''
    
    record(LongValue=(sum(longs.target_shares*longs.price)))
    record(ShortValue=(sum(shorts.target_shares*shorts.price)))
    record(CashValue=(context.portfolio.cash))
        
    # Sell/cover positions that are no longer in the pool
    map(lambda sid: order_target(sid,0), [sid for sid in positions if (sid not in pool.index) & (sid != Collateral)])
    
    # Buy/short positions to target number of shares
    map(lambda sid: order_target(sid,longs.target_shares[sid]), [sid for sid in longs.index])
    map(lambda sid: order_target(sid,shorts.target_shares[sid]), [sid for sid in shorts.index])
    if Collateral: 
        order_target_percent(Collateral, Leverage) 
    
                   
# Called at market close, as there shouldn't be any order still active (they are "good till close")
def cancel_all(context, data):
    for security, orders in get_open_orders().iteritems():  
        for oo in orders: 
            log.warn("Cancelled %s order" % security.symbol)
            cancel_order(oo)


def handle_data(context, data):
    pass


There was a runtime error.

Here's the same strategy, but "equitised". It holds the cash rebate from the shorts in SPY, thus layering the normal equity return onto the long-short return. I think you would normally want to use stock index futures, rather than SPY, as this would keep more cash handy. It's super volatile, but I guess the point is to show long-short plays nicely with equities. I suspect I would need to use half the long-short to SPY, to maximise the combined Sharpe.

Clone Algorithm
173
Loading...
Backtest from to with initial capital
Total Returns
--
Alpha
--
Beta
--
Sharpe
--
Sortino
--
Max Drawdown
--
Benchmark Returns
--
Volatility
--
Returns 1 Month 3 Month 6 Month 12 Month
Alpha 1 Month 3 Month 6 Month 12 Month
Beta 1 Month 3 Month 6 Month 12 Month
Sharpe 1 Month 3 Month 6 Month 12 Month
Sortino 1 Month 3 Month 6 Month 12 Month
Volatility 1 Month 3 Month 6 Month 12 Month
Max Drawdown 1 Month 3 Month 6 Month 12 Month
# Long-Short Momentum
# 
# Inspired by Andreas Clenow's Stocks on the Move, but much different from the system in the book.
#
# Work in progress:
# - Fixed number of positions, to properly balance long and short
# - Resize SPY accordingly
# - Stocks with huge share prices e.g. YRCW. Won't buy and other them, but check logic... does it skip well
# - Penny stocks
#
# Still to do: 
# - Test correlation of different lookback lengths (63,126,252) - scuppered because of timeouts
# - Test TR (i.e. ATR(1)) to see if the averaging and parameter really matters
# - Earnings call avoidance?
# - Mid-cap and small-cap
#   https://www.quantopian.com/posts/simulating-s-and-p-500-russell-1000-russell-3000-in-research


import numpy as np
import pandas as pd
import time
from quantopian.pipeline import Pipeline
from quantopian.pipeline import CustomFactor
from quantopian.pipeline.data import morningstar
from quantopian.pipeline.factors import Latest, SimpleMovingAverage, Returns
from quantopian.pipeline.data.builtin import USEquityPricing
from quantopian.algorithm import attach_pipeline, pipeline_output


UniverseSize = 500
DailyRangePerStock = 0.001 # targeting 10bp of account value per day
RebalanceThreshold = 0.25 # don't resize if the difference in exposure is less than this
Lookback = 126
Leverage = 1
Collateral = sid(8554) # Where to hold the cash we get from shorts: SPY to equitise returns, or None


# APR is ATR / Price, where ATR is SMA(20) of TR.
#
# It works round different backadjustment paradigms used by Quantopian:
#     https://www.quantopian.com/posts/stocks-on-the-move-by-andreas-clenow
# Uses a SMA(20) rather than the conventional Wilder exponential smoothing: 
#     http://www.macroption.com/average-true-range-calculator/
#
class APR(CustomFactor):
    inputs = [USEquityPricing.close,USEquityPricing.high,USEquityPricing.low]
    window_length = 21
    def compute(self, today, assets, out, close, high, low):
        hml = high - low
        hmpc = np.abs(high - np.roll(close, 1, axis=0))
        lmpc = np.abs(low - np.roll(close, 1, axis=0))
        tr = np.maximum(hml, np.maximum(hmpc, lmpc))
        atr = np.mean(tr[1:], axis=0) #skip the first one as it will be NaN
        apr = atr / close[-1]
        out[:] = apr
        
        
class AvgDailyDollarVolumeTraded(CustomFactor):
    inputs = [USEquityPricing.close, USEquityPricing.volume]
    #window_length = 100 # No default specified, do it in constructor
    def compute(self, today, assets, out, close_price, volume):
        dollar_volume = close_price * volume
        avg_dollar_volume = np.mean(dollar_volume, axis=0)
        out[:] = avg_dollar_volume


def initialize(context):
    
    context.spy = sid(8554)
    set_benchmark(context.spy)

    # define momentum as latest/SMA, same as to market (SPY) filter
    momentum        = (Latest(inputs=[USEquityPricing.close]) /
                       SimpleMovingAverage(inputs=[USEquityPricing.close], window_length=Lookback)) - 1
    #mkt_cap         = Latest(inputs=[morningstar.valuation.market_cap]) #very slow
    #universe        = mkt_cap.top(UniverseSize)
    dollar_volume   = AvgDailyDollarVolumeTraded(window_length=100)
    universe        = dollar_volume.top(UniverseSize)
    momentum_rank   = momentum.rank(mask=universe, ascending=False)
    long_filter     = momentum_rank <= 0.2*UniverseSize
    short_filter    = momentum_rank > 0.8*UniverseSize
    apr             = APR()
    apr_filter      = apr > 0.005
    
    pipe = Pipeline()
    #pipe.add(momentum, 'momentum') # include for debugging
    pipe.add(momentum_rank, 'momentum_rank')
    pipe.add(apr, 'apr')
    pipe.add(long_filter, 'long')
    pipe.add(short_filter, 'short')
    
    pipe.set_screen( universe & apr_filter & (long_filter | short_filter) )
    pipe = attach_pipeline(pipe, name='equitylongshort')
    
    schedule_function(func=rebalance_positions, 
                      date_rule=date_rules.week_start(days_offset=2),
                      time_rule=time_rules.market_open(hours=2),
                      half_days=True)
    schedule_function(func=cancel_all,
                      date_rule=date_rules.week_start(days_offset=2),
                      time_rule=time_rules.market_close(),
                      half_days=True)
    
    set_slippage(slippage.FixedSlippage(spread=0.01))
    set_commission(commission.PerShare(cost=0.0035, min_trade_cost=0.35))

    
def before_trading_start(context, data):
    context.pool = pipeline_output('equitylongshort').sort('momentum_rank')
    update_universe(context.pool.index)
    

def rebalance_positions(context, data):

    # There shouldn't be any open orders, as we sold at yesterday's close
    assert(not get_open_orders())   
    
    pool          = context.pool
    positions     = context.portfolio.positions
    account_value = context.portfolio.portfolio_value
    
    # Append current price and current share holding as column
    # (This works around pipeline backadjustment issue.)
    pool['price'] = [data[sid].price if sid in data else np.NaN for sid in pool.index]
    pool['current_shares'] = [positions[sid].amount if sid in positions else 0 for sid in pool.index]
    pool['is_current'] = pool.current_shares != 0
    pool['sign'] = pool.long*2-1 # +1 for longs, -1 for shorts
    
    # Calculate target number of shares (integer, can be rounded down to zero)
    pool['atr'] = (pool.apr * pool.price)
    pool['atr'].replace(0, np.NaN, inplace=True)
    pool['target_shares'] = (pool.sign * account_value * DailyRangePerStock / pool.atr).astype(int)
    pool['target_shares'].replace(np.NaN, 0, inplace=True)
    pool.loc[pool.price==0 | pool.price.isnull(), 'target_shares'] = 0
        
    # Save trading costs by not resizing current holdings if within tolerance
    pool.loc[abs(pool.current_shares - pool.target_shares) < 
             abs(pool.target_shares * RebalanceThreshold), 'target_shares'] = pool.current_shares
        
    # Sort so current holdings first, then highest momentum second
    longs  = pool[pool.long].copy()
    shorts = pool[pool.short].copy()   
    longs.sort(['is_current','momentum_rank'], inplace=True, ascending=[False,True])  
    shorts.sort(['is_current','momentum_rank'], inplace=True, ascending=[False,False])  

    # Only hold just enough to use the allowance
    longs.loc[(longs.target_shares*longs.price).cumsum() > Leverage * account_value,
                  'target_shares'] = 0
    shorts.loc[(shorts.target_shares*shorts.price).cumsum() < -Leverage * account_value,
                  'target_shares'] = 0
    '''
    # Fixed number of positions to give fixed risk (ignoring cross setional correlations)
    longs.loc[20:,'target_shares'] = 0
    shorts.loc[20:,'target_shares'] = 0'''
    
    record(LongValue=(sum(longs.target_shares*longs.price)))
    record(ShortValue=(sum(shorts.target_shares*shorts.price)))
    record(CashValue=(context.portfolio.cash))
        
    # Sell/cover positions that are no longer in the pool
    map(lambda sid: order_target(sid,0), [sid for sid in positions if (sid not in pool.index) & (sid != Collateral)])
    
    # Buy/short positions to target number of shares
    map(lambda sid: order_target(sid,longs.target_shares[sid]), [sid for sid in longs.index])
    map(lambda sid: order_target(sid,shorts.target_shares[sid]), [sid for sid in shorts.index])
    if Collateral: 
        order_target_percent(Collateral, Leverage) 
    
                   
# Called at market close, as there shouldn't be any order still active (they are "good till close")
def cancel_all(context, data):
    for security, orders in get_open_orders().iteritems():  
        for oo in orders: 
            log.warn("Cancelled %s order" % security.symbol)
            cancel_order(oo)


def handle_data(context, data):
    pass


There was a runtime error.

Here's the vanilla long-short strategy again, this time with an absolute momentum filter. Long pool is only stocks with momentum > 0, and vice versa for shorts. It doesn't make much difference to the stats. The only divergence I can see from the original is a reduction in longs in late 2008, because most stocks will have been in decline. There was plenty to short however, so the system would have lost its market neutrality for that period. I don't think the complication and added risk is worth it.

Clone Algorithm
173
Loading...
Backtest from to with initial capital
Total Returns
--
Alpha
--
Beta
--
Sharpe
--
Sortino
--
Max Drawdown
--
Benchmark Returns
--
Volatility
--
Returns 1 Month 3 Month 6 Month 12 Month
Alpha 1 Month 3 Month 6 Month 12 Month
Beta 1 Month 3 Month 6 Month 12 Month
Sharpe 1 Month 3 Month 6 Month 12 Month
Sortino 1 Month 3 Month 6 Month 12 Month
Volatility 1 Month 3 Month 6 Month 12 Month
Max Drawdown 1 Month 3 Month 6 Month 12 Month
# Long-Short Momentum
# 
# Inspired by Andreas Clenow's Stocks on the Move, but much different from the system in the book.
#
# Work in progress:
# - Fixed number of positions, to properly balance long and short
# - Resize SPY accordingly
# - Stocks with huge share prices e.g. YRCW. Won't buy and other them, but check logic... does it skip well
# - Penny stocks
#
# Still to do: 
# - Test correlation of different lookback lengths (63,126,252) - scuppered because of timeouts
# - Test TR (i.e. ATR(1)) to see if the averaging and parameter really matters
# - Earnings call avoidance?
# - Mid-cap and small-cap
#   https://www.quantopian.com/posts/simulating-s-and-p-500-russell-1000-russell-3000-in-research


import numpy as np
import pandas as pd
import time
from quantopian.pipeline import Pipeline
from quantopian.pipeline import CustomFactor
from quantopian.pipeline.data import morningstar
from quantopian.pipeline.factors import Latest, SimpleMovingAverage, Returns
from quantopian.pipeline.data.builtin import USEquityPricing
from quantopian.algorithm import attach_pipeline, pipeline_output


UniverseSize = 500
DailyRangePerStock = 0.001 # targeting 10bp of account value per day
RebalanceThreshold = 0.25 # don't resize if the difference in exposure is less than this
Lookback = 126
Leverage = 1
Collateral = None #sid(8554) # Where to hold the cash we get from shorts: SPY to equitise returns, or None


# APR is ATR / Price, where ATR is SMA(20) of TR.
#
# It works round different backadjustment paradigms used by Quantopian:
#     https://www.quantopian.com/posts/stocks-on-the-move-by-andreas-clenow
# Uses a SMA(20) rather than the conventional Wilder exponential smoothing: 
#     http://www.macroption.com/average-true-range-calculator/
#
class APR(CustomFactor):
    inputs = [USEquityPricing.close,USEquityPricing.high,USEquityPricing.low]
    window_length = 21
    def compute(self, today, assets, out, close, high, low):
        hml = high - low
        hmpc = np.abs(high - np.roll(close, 1, axis=0))
        lmpc = np.abs(low - np.roll(close, 1, axis=0))
        tr = np.maximum(hml, np.maximum(hmpc, lmpc))
        atr = np.mean(tr[1:], axis=0) #skip the first one as it will be NaN
        apr = atr / close[-1]
        out[:] = apr
        
        
class AvgDailyDollarVolumeTraded(CustomFactor):
    inputs = [USEquityPricing.close, USEquityPricing.volume]
    #window_length = 100 # No default specified, do it in constructor
    def compute(self, today, assets, out, close_price, volume):
        dollar_volume = close_price * volume
        avg_dollar_volume = np.mean(dollar_volume, axis=0)
        out[:] = avg_dollar_volume


def initialize(context):
    
    context.spy = sid(8554)
    set_benchmark(context.spy)

    # define momentum as latest/SMA, same as to market (SPY) filter
    momentum        = (Latest(inputs=[USEquityPricing.close]) /
                       SimpleMovingAverage(inputs=[USEquityPricing.close], window_length=Lookback)) - 1
    #mkt_cap         = Latest(inputs=[morningstar.valuation.market_cap]) #very slow
    #universe        = mkt_cap.top(UniverseSize)
    dollar_volume   = AvgDailyDollarVolumeTraded(window_length=100)
    universe        = dollar_volume.top(UniverseSize)
    momentum_rank   = momentum.rank(mask=universe, ascending=False)
    long_filter     = (momentum_rank <= 0.2*UniverseSize) & (momentum>0)
    short_filter    = (momentum_rank > 0.8*UniverseSize) & (momentum<0)
    apr             = APR()
    apr_filter      = apr > 0.005
    
    pipe = Pipeline()
    #pipe.add(momentum, 'momentum') # include for debugging
    pipe.add(momentum_rank, 'momentum_rank')
    pipe.add(apr, 'apr')
    pipe.add(long_filter, 'long')
    pipe.add(short_filter, 'short')
    
    pipe.set_screen( universe & apr_filter & (long_filter | short_filter) )
    pipe = attach_pipeline(pipe, name='equitylongshort')
    
    schedule_function(func=rebalance_positions, 
                      date_rule=date_rules.week_start(days_offset=2),
                      time_rule=time_rules.market_open(hours=2),
                      half_days=True)
    schedule_function(func=cancel_all,
                      date_rule=date_rules.week_start(days_offset=2),
                      time_rule=time_rules.market_close(),
                      half_days=True)
    
    set_slippage(slippage.FixedSlippage(spread=0.01))
    set_commission(commission.PerShare(cost=0.0035, min_trade_cost=0.35))

    
def before_trading_start(context, data):
    context.pool = pipeline_output('equitylongshort').sort('momentum_rank')
    update_universe(context.pool.index)
    

def rebalance_positions(context, data):

    # There shouldn't be any open orders, as we sold at yesterday's close
    assert(not get_open_orders())   
    
    pool          = context.pool
    positions     = context.portfolio.positions
    account_value = context.portfolio.portfolio_value
    
    # Append current price and current share holding as column
    # (This works around pipeline backadjustment issue.)
    pool['price'] = [data[sid].price if sid in data else np.NaN for sid in pool.index]
    pool['current_shares'] = [positions[sid].amount if sid in positions else 0 for sid in pool.index]
    pool['is_current'] = pool.current_shares != 0
    pool['sign'] = pool.long*2-1 # +1 for longs, -1 for shorts
    
    # Calculate target number of shares (integer, can be rounded down to zero)
    pool['atr'] = (pool.apr * pool.price)
    pool['atr'].replace(0, np.NaN, inplace=True)
    pool['target_shares'] = np.fix(pool.sign * account_value * DailyRangePerStock / pool.atr)
    pool['target_shares'].replace(np.NaN, 0, inplace=True)
    pool.loc[pool.price==0 | pool.price.isnull(), 'target_shares'] = 0
        
    # Save trading costs by not resizing current holdings if within tolerance
    pool.loc[abs(pool.current_shares - pool.target_shares) < 
             abs(pool.target_shares * RebalanceThreshold), 'target_shares'] = pool.current_shares
        
    # Sort so current holdings first, then highest momentum second
    longs  = pool[pool.long].copy()
    shorts = pool[pool.short].copy()   
    longs.sort(['is_current','momentum_rank'], inplace=True, ascending=[False,True])  
    shorts.sort(['is_current','momentum_rank'], inplace=True, ascending=[False,False])  

    # Only hold just enough to use the allowance
    longs.loc[(longs.target_shares*longs.price).cumsum() > Leverage * account_value,
                  'target_shares'] = 0
    shorts.loc[(shorts.target_shares*shorts.price).cumsum() < -Leverage * account_value,
                  'target_shares'] = 0
    '''
    # Fixed number of positions to give fixed risk (ignoring cross setional correlations)
    longs.loc[20:,'target_shares'] = 0
    shorts.loc[20:,'target_shares'] = 0'''
    
    record(LongValue=(sum(longs.target_shares*longs.price)))
    record(ShortValue=(sum(shorts.target_shares*shorts.price)))
    record(CashValue=(context.portfolio.cash))
        
    # Sell/cover positions that are no longer in the pool
    map(lambda sid: order_target(sid,0), [sid for sid in positions if (sid not in pool.index) & (sid != Collateral)])
    
    # Buy/short positions to target number of shares
    map(lambda sid: order_target(sid,longs.target_shares[sid]), [sid for sid in longs.index])
    map(lambda sid: order_target(sid,shorts.target_shares[sid]), [sid for sid in shorts.index])
    if Collateral: 
        order_target_percent(Collateral, Leverage) 
    
                   
# Called at market close, as there shouldn't be any order still active (they are "good till close")
def cancel_all(context, data):
    for security, orders in get_open_orders().iteritems():  
        for oo in orders: 
            log.warn("Cancelled %s order" % security.symbol)
            cancel_order(oo)


def handle_data(context, data):
    pass


There was a runtime error.

Hi Burrito Dan,

Thanks for the "Rebalance Threshold"part of this.

For me as a "python newbie" (and very unskilled with it) this is exactly what I wanted to add to my own code but couldn't figure out how to implement successfully. Many thanks indeed.

Cheers, best wishes,
Tony