Back to Community
long-short mean reversion attempt

Here's something to consider. Needs some re-factoring, clean-up, comments, etc. It may be an over-fit, but recent performance looks pretty good. --Grant

Clone Algorithm
81
Loading...
Total Returns
--
Alpha
--
Beta
--
Sharpe
--
Sortino
--
Max Drawdown
--
Benchmark Returns
--
Volatility
--
Returns 1 Month 3 Month 6 Month 12 Month
Alpha 1 Month 3 Month 6 Month 12 Month
Beta 1 Month 3 Month 6 Month 12 Month
Sharpe 1 Month 3 Month 6 Month 12 Month
Sortino 1 Month 3 Month 6 Month 12 Month
Volatility 1 Month 3 Month 6 Month 12 Month
Max Drawdown 1 Month 3 Month 6 Month 12 Month
import numpy as np
from scipy import optimize
import pandas as pd
import datetime

def initialize(context):
    
    context.eps = 1.0
    context.leverage = 1.0
    context.bias = 0
    
    schedule_function(trade, date_rules.week_start(days_offset=1), time_rules.market_open(minutes=60))
    
    set_commission(commission.PerTrade(cost=0))
    set_slippage(slippage.FixedSlippage(spread=0.00))
    
def before_trading_start(context,data): 
    
    fundamental_df = get_fundamentals(
        query(
            fundamentals.valuation.market_cap,
        )
        .filter(fundamentals.valuation.market_cap != None)
        .order_by(fundamentals.valuation.market_cap.desc()).limit(50)) 
    update_universe(fundamental_df.columns.values)
    context.stocks = [stock for stock in fundamental_df]
    
def handle_data(context, data):
    
    leverage = context.account.leverage
    
    if leverage >= 3.0:
        print "Leverage >= 3.0"
    
    record(leverage = leverage)
            
    for stock in context.stocks:
        if stock in security_lists.leveraged_etf_list: # leveraged ETF?
            context.stocks.remove(stock)
            
    # check if data exists
    for stock in context.stocks:
        if stock not in data:
            context.stocks.remove(stock)
            
    num_secs = 0
    
    for stock in data:
        if context.portfolio.positions[stock].amount != 0:
            num_secs += 1
            
    record(num_secs = num_secs)

def get_allocation(context,data,prices):
      
    prices = pd.ewma(prices,span=195).as_matrix(context.stocks)
    
    b_t = []
    
    for stock in context.stocks:
        b_t.append(abs(context.portfolio.positions[stock].amount*data[stock].price))
         
    m = len(b_t)
    b_0 = np.ones(m)/m
    denom = np.sum(b_t)

    if denom > 0:
        b_t = np.divide(b_t,denom)
    else:     
        b_t = b_0
    
    x_tilde = []
    
    for i,stock in enumerate(context.stocks):
        mean_price = np.mean(prices[:,i])
        price_rel = mean_price/prices[-1,i]
        if price_rel < 1:
            price_rel = 1.0/price_rel
            context.ls[stock] += -1
        else:
            context.ls[stock] += 1
        x_tilde.append(price_rel)
        
    bnds = []
    limits = [0,1]
    
    for stock in context.stocks:
        bnds.append(limits)
        
    bnds = tuple(tuple(x) for x in bnds)
     
    cons = ({'type': 'eq', 'fun': lambda x:  np.sum(x)-1.0},
            {'type': 'ineq', 'fun': lambda x:  np.dot(x,x_tilde) - context.eps})
    
    res= optimize.minimize(norm_squared, b_0, args=b_t,jac=norm_squared_deriv,method='SLSQP',constraints=cons,bounds=bnds)
        
    allocation = res.x
    allocation[allocation<0.05*b_0[0]] = 0.05*b_0[0] 
    allocation = allocation/np.sum(allocation)
    
    if res.success:
        return (allocation,np.dot(allocation,x_tilde))
    else:
        return (b_t,1)

def trade(context,data):
    
    # find average weighted allocation over range of trailing window lengths
    
    prices = history(5*390,'1m','price')[context.stocks].dropna(axis=1)
    context.stocks = list(prices.columns.values)
    a = np.zeros(len(context.stocks))
    w = 0
    
    context.ls = {}
    
    for stock in context.stocks:
        context.ls[stock] = 0
    
    for n in range(1,11):
        (a,w) = get_allocation(context,data,prices.tail(n*195))
        a += w*a
        w += w
    
    allocation = a/w
    
    for stock in context.stocks:
        context.ls[stock] = np.sign(context.ls[stock])
    
    denom = np.sum(allocation)
    if denom > 0:
        allocation = allocation/np.sum(allocation)
    
    allocate(context,data,allocation)

def allocate(context, data, desired_port):
    
    # check for open orders
    for stock in context.stocks:
        if get_open_orders(stock):
            context.stocks.remove(stock)
    
    a = np.zeros(len(context.stocks))
        
    for i, stock in enumerate(context.stocks):
        a[i] = context.ls[stock]*desired_port[i]
    
    pct_ls = np.sum(a)
    denom = np.sum(np.absolute(a))
    
    if denom > 0:
        a = a/np.sum(np.absolute(a))
    
    scale = 1.0-0.5*abs(pct_ls)
        
    for i, stock in enumerate(context.stocks):
        order_target_percent(stock, scale*context.leverage*a[i])
        
    order_target_percent(sid(8554), -0.5*context.leverage*pct_ls)
    
    record(pct_ls = pct_ls)
    
    for stock in data:
        if stock not in context.stocks + [sid(8554)]:
            order_target_percent(stock,0)
    
def norm_squared(b,*args):
    
    b_t = np.asarray(args)
    delta_b = b - b_t
     
    return 0.5*np.dot(delta_b,delta_b.T)

def norm_squared_deriv(b,*args):
    
    b_t = np.asarray(args)
    delta_b = b - b_t
        
    return delta_b
There was a runtime error.
5 responses

Long-term backtest. Not so good.

Clone Algorithm
81
Loading...
Total Returns
--
Alpha
--
Beta
--
Sharpe
--
Sortino
--
Max Drawdown
--
Benchmark Returns
--
Volatility
--
Returns 1 Month 3 Month 6 Month 12 Month
Alpha 1 Month 3 Month 6 Month 12 Month
Beta 1 Month 3 Month 6 Month 12 Month
Sharpe 1 Month 3 Month 6 Month 12 Month
Sortino 1 Month 3 Month 6 Month 12 Month
Volatility 1 Month 3 Month 6 Month 12 Month
Max Drawdown 1 Month 3 Month 6 Month 12 Month
import numpy as np
from scipy import optimize
import pandas as pd
import datetime

def initialize(context):
    
    context.eps = 1.0
    context.leverage = 1.0
    context.bias = 0
    
    schedule_function(trade, date_rules.week_start(days_offset=1), time_rules.market_open(minutes=60))
    
    set_commission(commission.PerTrade(cost=0))
    set_slippage(slippage.FixedSlippage(spread=0.00))
    
def before_trading_start(context,data): 
    
    fundamental_df = get_fundamentals(
        query(
            fundamentals.valuation.market_cap,
        )
        .filter(fundamentals.valuation.market_cap != None)
        .order_by(fundamentals.valuation.market_cap.desc()).limit(50)) 
    update_universe(fundamental_df.columns.values)
    context.stocks = [stock for stock in fundamental_df]
    
def handle_data(context, data):
    
    leverage = context.account.leverage
    
    if leverage >= 3.0:
        print "Leverage >= 3.0"
    
    record(leverage = leverage)
            
    for stock in context.stocks:
        if stock in security_lists.leveraged_etf_list: # leveraged ETF?
            context.stocks.remove(stock)
            
    # check if data exists
    for stock in context.stocks:
        if stock not in data:
            context.stocks.remove(stock)
            
    num_secs = 0
    
    for stock in data:
        if context.portfolio.positions[stock].amount != 0:
            num_secs += 1
            
    record(num_secs = num_secs)

def get_allocation(context,data,prices):
      
    prices = pd.ewma(prices,span=195).as_matrix(context.stocks)
    
    b_t = []
    
    for stock in context.stocks:
        b_t.append(abs(context.portfolio.positions[stock].amount*data[stock].price))
         
    m = len(b_t)
    b_0 = np.ones(m)/m
    denom = np.sum(b_t)

    if denom > 0:
        b_t = np.divide(b_t,denom)
    else:     
        b_t = b_0
    
    x_tilde = []
    
    for i,stock in enumerate(context.stocks):
        mean_price = np.mean(prices[:,i])
        price_rel = mean_price/prices[-1,i]
        if price_rel < 1:
            price_rel = 1.0/price_rel
            context.ls[stock] += -1
        else:
            context.ls[stock] += 1
        x_tilde.append(price_rel)
        
    bnds = []
    limits = [0,1]
    
    for stock in context.stocks:
        bnds.append(limits)
        
    bnds = tuple(tuple(x) for x in bnds)
     
    cons = ({'type': 'eq', 'fun': lambda x:  np.sum(x)-1.0},
            {'type': 'ineq', 'fun': lambda x:  np.dot(x,x_tilde) - context.eps})
    
    res= optimize.minimize(norm_squared, b_0, args=b_t,jac=norm_squared_deriv,method='SLSQP',constraints=cons,bounds=bnds)
        
    allocation = res.x
    allocation[allocation<0.05*b_0[0]] = 0.05*b_0[0] 
    allocation = allocation/np.sum(allocation)
    
    if res.success:
        return (allocation,np.dot(allocation,x_tilde))
    else:
        return (b_t,1)

def trade(context,data):
    
    # find average weighted allocation over range of trailing window lengths
    
    prices = history(5*390,'1m','price')[context.stocks].dropna(axis=1)
    context.stocks = list(prices.columns.values)
    a = np.zeros(len(context.stocks))
    w = 0
    
    context.ls = {}
    
    for stock in context.stocks:
        context.ls[stock] = 0
    
    for n in range(1,11):
        (a,w) = get_allocation(context,data,prices.tail(n*195))
        a += w*a
        w += w
    
    allocation = a/w
    
    for stock in context.stocks:
        context.ls[stock] = np.sign(context.ls[stock])
    
    denom = np.sum(allocation)
    if denom > 0:
        allocation = allocation/np.sum(allocation)
    
    allocate(context,data,allocation)

def allocate(context, data, desired_port):
    
    # check for open orders
    for stock in context.stocks:
        if get_open_orders(stock):
            context.stocks.remove(stock)
    
    a = np.zeros(len(context.stocks))
        
    for i, stock in enumerate(context.stocks):
        a[i] = context.ls[stock]*desired_port[i]
    
    pct_ls = np.sum(a)
    denom = np.sum(np.absolute(a))
    
    if denom > 0:
        a = a/np.sum(np.absolute(a))
    
    scale = 1.0-0.5*abs(pct_ls)
        
    for i, stock in enumerate(context.stocks):
        order_target_percent(stock, scale*context.leverage*a[i])
        
    order_target_percent(sid(8554), -0.5*context.leverage*pct_ls)
    
    record(pct_ls = pct_ls)
    
    for stock in data:
        if stock not in context.stocks + [sid(8554)]:
            order_target_percent(stock,0)
    
def norm_squared(b,*args):
    
    b_t = np.asarray(args)
    delta_b = b - b_t
     
    return 0.5*np.dot(delta_b,delta_b.T)

def norm_squared_deriv(b,*args):
    
    b_t = np.asarray(args)
    delta_b = b - b_t
        
    return delta_b
There was a runtime error.

Thanks for sharing your code! I really like these OLMAR approaches. I notice slippage and commission were overridden. I ran with default slippage and commission (by just commenting out lines 14 and 15) and the results are different. See below. I didn't see this degradation nor need to override with your other OLMAR posts (e.g. see simple OLMAR w/ optimizer & get_fundamentals)

Clone Algorithm
1
Loading...
Total Returns
--
Alpha
--
Beta
--
Sharpe
--
Sortino
--
Max Drawdown
--
Benchmark Returns
--
Volatility
--
Returns 1 Month 3 Month 6 Month 12 Month
Alpha 1 Month 3 Month 6 Month 12 Month
Beta 1 Month 3 Month 6 Month 12 Month
Sharpe 1 Month 3 Month 6 Month 12 Month
Sortino 1 Month 3 Month 6 Month 12 Month
Volatility 1 Month 3 Month 6 Month 12 Month
Max Drawdown 1 Month 3 Month 6 Month 12 Month
import numpy as np
from scipy import optimize
import pandas as pd
import datetime

def initialize(context):
    
    context.eps = 1.0
    context.leverage = 1.0
    context.bias = 0
    
    schedule_function(trade, date_rules.week_start(days_offset=1), time_rules.market_open(minutes=60))
    
#    set_commission(commission.PerTrade(cost=0))
#    set_slippage(slippage.FixedSlippage(spread=0.00))
    
def before_trading_start(context,data): 
    
    fundamental_df = get_fundamentals(
        query(
            fundamentals.valuation.market_cap,
        )
        .filter(fundamentals.valuation.market_cap != None)
        .order_by(fundamentals.valuation.market_cap.desc()).limit(50)) 
    update_universe(fundamental_df.columns.values)
    context.stocks = [stock for stock in fundamental_df]
    
def handle_data(context, data):
    
    leverage = context.account.leverage
    
    if leverage >= 3.0:
        print "Leverage >= 3.0"
    
    record(leverage = leverage)
            
    for stock in context.stocks:
        if stock in security_lists.leveraged_etf_list: # leveraged ETF?
            context.stocks.remove(stock)
            
    # check if data exists
    for stock in context.stocks:
        if stock not in data:
            context.stocks.remove(stock)
            
    num_secs = 0
    
    for stock in data:
        if context.portfolio.positions[stock].amount != 0:
            num_secs += 1
            
    record(num_secs = num_secs)

def get_allocation(context,data,prices):
      
    prices = pd.ewma(prices,span=195).as_matrix(context.stocks)
    
    b_t = []
    
    for stock in context.stocks:
        b_t.append(abs(context.portfolio.positions[stock].amount*data[stock].price))
         
    m = len(b_t)
    b_0 = np.ones(m)/m
    denom = np.sum(b_t)

    if denom > 0:
        b_t = np.divide(b_t,denom)
    else:     
        b_t = b_0
    
    x_tilde = []
    
    for i,stock in enumerate(context.stocks):
        mean_price = np.mean(prices[:,i])
        price_rel = mean_price/prices[-1,i]
        if price_rel < 1:
            price_rel = 1.0/price_rel
            context.ls[stock] += -1
        else:
            context.ls[stock] += 1
        x_tilde.append(price_rel)
        
    bnds = []
    limits = [0,1]
    
    for stock in context.stocks:
        bnds.append(limits)
        
    bnds = tuple(tuple(x) for x in bnds)
     
    cons = ({'type': 'eq', 'fun': lambda x:  np.sum(x)-1.0},
            {'type': 'ineq', 'fun': lambda x:  np.dot(x,x_tilde) - context.eps})
    
    res= optimize.minimize(norm_squared, b_0, args=b_t,jac=norm_squared_deriv,method='SLSQP',constraints=cons,bounds=bnds)
        
    allocation = res.x
    allocation[allocation<0.05*b_0[0]] = 0.05*b_0[0] 
    allocation = allocation/np.sum(allocation)
    
    if res.success:
        return (allocation,np.dot(allocation,x_tilde))
    else:
        return (b_t,1)

def trade(context,data):
    
    # find average weighted allocation over range of trailing window lengths
    
    prices = history(5*390,'1m','price')[context.stocks].dropna(axis=1)
    context.stocks = list(prices.columns.values)
    a = np.zeros(len(context.stocks))
    w = 0
    
    context.ls = {}
    
    for stock in context.stocks:
        context.ls[stock] = 0
    
    for n in range(1,11):
        (a,w) = get_allocation(context,data,prices.tail(n*195))
        a += w*a
        w += w
    
    allocation = a/w
    
    for stock in context.stocks:
        context.ls[stock] = np.sign(context.ls[stock])
    
    denom = np.sum(allocation)
    if denom > 0:
        allocation = allocation/np.sum(allocation)
    
    allocate(context,data,allocation)

def allocate(context, data, desired_port):
    
    # check for open orders
    for stock in context.stocks:
        if get_open_orders(stock):
            context.stocks.remove(stock)
    
    a = np.zeros(len(context.stocks))
        
    for i, stock in enumerate(context.stocks):
        a[i] = context.ls[stock]*desired_port[i]
    
    pct_ls = np.sum(a)
    denom = np.sum(np.absolute(a))
    
    if denom > 0:
        a = a/np.sum(np.absolute(a))
    
    scale = 1.0-0.5*abs(pct_ls)
        
    for i, stock in enumerate(context.stocks):
        order_target_percent(stock, scale*context.leverage*a[i])
        
    order_target_percent(sid(8554), -0.5*context.leverage*pct_ls)
    
    record(pct_ls = pct_ls)
    
    for stock in data:
        if stock not in context.stocks + [sid(8554)]:
            order_target_percent(stock,0)
    
def norm_squared(b,*args):
    
    b_t = np.asarray(args)
    delta_b = b - b_t
     
    return 0.5*np.dot(delta_b,delta_b.T)

def norm_squared_deriv(b,*args):
    
    b_t = np.asarray(args)
    delta_b = b - b_t
        
    return delta_b
There was a runtime error.

Thanks Marc,

Not sure what's going on. There may be a lot of turn-over combined with the high level of capital? So, the slippage model kicks in?

Here's another version

Note:

set_commission(commission.PerTrade(cost=0))
set_slippage(slippage.FixedSlippage(spread=0.00))

Clone Algorithm
81
Loading...
Total Returns
--
Alpha
--
Beta
--
Sharpe
--
Sortino
--
Max Drawdown
--
Benchmark Returns
--
Volatility
--
Returns 1 Month 3 Month 6 Month 12 Month
Alpha 1 Month 3 Month 6 Month 12 Month
Beta 1 Month 3 Month 6 Month 12 Month
Sharpe 1 Month 3 Month 6 Month 12 Month
Sortino 1 Month 3 Month 6 Month 12 Month
Volatility 1 Month 3 Month 6 Month 12 Month
Max Drawdown 1 Month 3 Month 6 Month 12 Month
import numpy as np
from scipy import optimize
import pandas as pd
import datetime

def initialize(context):
    
    context.eps = 1.0
    context.leverage = 1.0
    context.bias = 0
    
    schedule_function(trade, date_rules.week_start(days_offset=1), time_rules.market_open(minutes=60))
    
    set_commission(commission.PerTrade(cost=0))
    set_slippage(slippage.FixedSlippage(spread=0.00))
    
    context.prices = None
    context.first_day = True
    
def before_trading_start(context,data): 
    
    fundamental_df = get_fundamentals(
        query(
            fundamentals.valuation.market_cap,
        )
        .filter(fundamentals.valuation.market_cap != None)
        .order_by(fundamentals.valuation.market_cap.desc()).limit(250)) 
    update_universe(fundamental_df.columns.values)
    context.stocks = [stock for stock in fundamental_df]
    
def handle_data(context, data):
    
    leverage = context.account.leverage
    
    if leverage >= 3.0:
        print "Leverage >= 3.0"
    
    record(leverage = leverage)
            
    for stock in context.stocks:
        if stock in security_lists.leveraged_etf_list: # leveraged ETF?
            context.stocks.remove(stock)
            
    # check if data exists
    for stock in context.stocks:
        if stock not in data:
            context.stocks.remove(stock)
            
    num_secs = 0
    
    for stock in data:
        if context.portfolio.positions[stock].amount != 0:
            num_secs += 1
            
    record(num_secs = num_secs)

def get_allocation(context,data,prices):
      
    prices = pd.ewma(prices,span=390).as_matrix(context.stocks)
    
    b_t = []
    
    for stock in context.stocks:
        b_t.append(abs(context.portfolio.positions[stock].amount*data[stock].price))
         
    m = len(b_t)
    b_0 = np.ones(m)/m
    denom = np.sum(b_t)

    if denom > 0:
        b_t = np.divide(b_t,denom)
    else:     
        b_t = b_0
    
    x_tilde = []
    
    for i,stock in enumerate(context.stocks):
        mean_price = np.mean(prices[:,i])
        price_rel = mean_price/prices[-1,i]
        if price_rel < 0.998:
            price_rel = 1.0/price_rel
            context.ls[stock] += -price_rel
        elif price_rel > 1.002:
            context.ls[stock] += price_rel        
        x_tilde.append(price_rel)
    
    bnds = [(0,1)]*len(context.stocks)
     
    cons = ({'type': 'eq', 'fun': lambda x:  np.sum(x)-1.0},
            {'type': 'ineq', 'fun': lambda x:  np.dot(x,x_tilde) - context.eps})
    
    res= optimize.minimize(norm_squared, b_0, args=b_t,jac=norm_squared_deriv,method='SLSQP',constraints=cons,bounds=bnds)
        
    a = res.x
    a[a<0.2*b_0[0]] = 0.2*b_0[0]
    a = a/np.sum(a)
    
    if res.success:
        return (a,np.dot(a,x_tilde))
    else:
        return (b_t,1)

def trade(context,data):
    
    # find average weighted allocation over range of trailing window lengths
    
    prices = history(20*390,'1m','price')[context.stocks].dropna(axis=1)
    context.stocks = list(prices.columns.values)
    a = np.zeros(len(context.stocks))
    w = 0
    
    context.ls = {}
    
    for stock in context.stocks:
        context.ls[stock] = 0
    
    for n in range(1,21):
        (a,w) = get_allocation(context,data,prices.tail(n*390))
        a += w*a
        w += w
    
    allocation = a/w
    
    # for stock in context.stocks:
    #     context.ls[stock] = np.sign(context.ls[stock])
    
    denom = np.sum(allocation)
    if denom > 0:
        allocation = allocation/np.sum(allocation)
    
    allocate(context,data,allocation)

def allocate(context, data, desired_port):
    
    # check for open orders
    for stock in context.stocks:
        if get_open_orders(stock):
            context.stocks.remove(stock)
    
    a = np.zeros(len(context.stocks))
        
    for i, stock in enumerate(context.stocks):
        a[i] = context.ls[stock]*desired_port[i]
    
    denom = np.sum(np.absolute(a))
    
    if denom > 0:
        a = a/denom
        pct_ls = np.sum(a)/denom
    
    scale = 1.0-0.5*abs(pct_ls)
        
    for i, stock in enumerate(context.stocks):
        order_target_percent(stock, scale*context.leverage*a[i])
        
    order_target_percent(sid(8554), -0.5*context.leverage*pct_ls)
    
    record(pct_ls = pct_ls)
    
    for stock in data:
        if stock not in context.stocks + [sid(8554)]:
            order_target_percent(stock,0)
    
def norm_squared(b,*args):
    
    b_t = np.asarray(args)
    delta_b = b - b_t
     
    return 0.5*np.dot(delta_b,delta_b.T)

def norm_squared_deriv(b,*args):
    
    b_t = np.asarray(args)
    delta_b = b - b_t
        
    return delta_b
There was a runtime error.

Since this takes over an hour to run in Quantopian 1, this might be a great candidate to test in Quantopian 2. I tried to port it... most is straight forward except dies with cryptic "error: failed in converting 4th argument `xl' of _slsqp.slsqp to C/Fortran array".