Back to Community
OLMAR 3.0 using new order and history features

Hi,

I updated the Quantopian-classic OLMAR algorithm to demonstrate the power and expressiveness of our new features (read more about them here). For more information on the OLMAR algorithm look around on the forums or read the original paper here.

The most useful new order method I think is order_target_percent() which is perfectly suited for rebalancing a portfolio to achieve a certain allocation given a vector of percentages. Before, OLMAR required an involved function to calculate how many shares have to be bought at which price to achieve the desired portfolio allocation. This is now 2 lines.

The history() function also obviates batchtransform which was mostly used to just return a dataframe -- the new method does this directly rather than requiring you to write a separate, decorated function.

Clone Algorithm
240
Loading...
Backtest from to with initial capital
Total Returns
--
Alpha
--
Beta
--
Sharpe
--
Sortino
--
Max Drawdown
--
Benchmark Returns
--
Volatility
--
Returns 1 Month 3 Month 6 Month 12 Month
Alpha 1 Month 3 Month 6 Month 12 Month
Beta 1 Month 3 Month 6 Month 12 Month
Sharpe 1 Month 3 Month 6 Month 12 Month
Sortino 1 Month 3 Month 6 Month 12 Month
Volatility 1 Month 3 Month 6 Month 12 Month
Max Drawdown 1 Month 3 Month 6 Month 12 Month
import numpy as np
import math
from pytz import timezone

# globals for get_data batch transform decorator
R_P = 0 #refresh period in days
W_L = 5 #window length in days

def initialize(context):
    
    context.stocks = [sid(1419),sid(12652),sid(6546),sid(5061),sid(6683)]
    
    for stock in context.stocks:
        print stock.symbol
    
    context.m = len(context.stocks)
    context.b_t = np.ones(context.m) / context.m
    context.eps = 1 # change epsilon here
    context.init = False
    context.previous_datetime = None
    
def handle_data(context, data):
    current_datetime = get_datetime().astimezone(timezone('US/Eastern'))
    
    cash = context.portfolio.cash
    record(cash=cash)
            
    if not context.init:
        # Equal weighting portfolio
        for stock, percent in zip(context.stocks, context.b_t):
            order_target_percent(stock, percent)
        context.init = True
      
    # only execute algorithm once per day
    if context.previous_datetime != None:
        if current_datetime.day != context.previous_datetime.day:
            new_day = True
            algo_executed = False
        else:
            new_day = False
    else:
        new_day = False
        algo_executed = True
        
    context.previous_datetime = current_datetime
    
    if not new_day or algo_executed == True:
        return
        
    # skip tic if any orders are open or any stocks did not trade
    for stock in context.stocks:
        if bool(get_open_orders(stock)) or data[stock].datetime < get_datetime():
            return
        
    # compute current portfolio allocations
    for i, stock in enumerate(context.stocks):
        context.b_t[i] = context.portfolio.positions[stock].amount*data[stock].price
        
    # Bring portfolio vector to unit length
    context.b_t = context.b_t / np.sum(context.b_t)

    # Compute new portfolio weights according to OLMAR algo.
    b_norm = olmar(context)
    
    # Rebalance Portfolio
    for stock, percent in zip(context.stocks, b_norm):
        order_target_percent(stock, percent)
    
    algo_executed = True

def olmar(context):
    """Logic of the olmar algorithm.

    :Returns: b_norm : vector for new portfolio
    """
    
    # get history -- prices and volums of the last 5 days (at close)
    prices = history(5, '1d', 'price')
    volumes = history(5, '1d', 'volume')
    
    # find relative moving volume weighted average price for each secuirty
    x_tilde = np.zeros(context.m)
    for i, stock in enumerate(context.stocks):
        vwa_price = np.dot(prices[stock], volumes[stock]) / np.sum(volumes[stock])
        x_tilde[i] = vwa_price/prices[stock].ix[-1]
    
    ###########################
    # Inside of OLMAR (algo 2)
    x_bar = x_tilde.mean()

    # Calculate terms for lambda (lam)
    dot_prod = np.dot(context.b_t, x_tilde)
    num = context.eps - dot_prod
    denom = (np.linalg.norm((x_tilde-x_bar)))**2

    # test for divide-by-zero case
    if denom == 0.0:
        lam = 0 # no portolio update
    else:     
        lam = max(0, num/denom)
    
    b = context.b_t + lam*(x_tilde-x_bar)

    b_norm = simplex_projection(b)
    
    return b_norm

def simplex_projection(v, b=1):
    """Projection vectors to the simplex domain

Implemented according to the paper: Efficient projections onto the
l1-ball for learning in high dimensions, John Duchi, et al. ICML 2008.
Implementation Time: 2011 June 17 by [email protected] AT pmail.ntu.edu.sg
Optimization Problem: min_{w}\| w - v \|_{2}^{2}
s.t. sum_{i=1}^{m}=z, w_{i}\geq 0

Input: A vector v \in R^{m}, and a scalar z > 0 (default=1)
Output: Projection vector w

:Example:
>>> proj = simplex_projection([.4 ,.3, -.4, .5])
>>> print proj
array([ 0.33333333, 0.23333333, 0. , 0.43333333])
>>> print proj.sum()
1.0

Original matlab implementation: John Duchi ([email protected])
Python-port: Copyright 2012 by Thomas Wiecki ([email protected]).
"""

    v = np.asarray(v)
    p = len(v)

    # Sort v into u in descending order
    v = (v > 0) * v
    u = np.sort(v)[::-1]
    sv = np.cumsum(u)

    rho = np.where(u > (sv - b) / np.arange(1, p+1))[0][-1]
    theta = np.max([0, (sv[rho] - b) / (rho+1)])
    w = (v - theta)
    w[w<0] = 0
    return w
This backtest was created using an older version of the backtester. Please re-run this backtest to see results using the latest backtester. Learn more about the recent changes.
There was a runtime error.
Disclaimer

The material on this website is provided for informational purposes only and does not constitute an offer to sell, a solicitation to buy, or a recommendation or endorsement for any security or strategy, nor does it constitute an offer to provide investment advisory services by Quantopian. In addition, the material offers no opinion with respect to the suitability of any security or specific investment. No information contained herein should be regarded as a suggestion to engage in or refrain from any investment-related course of action as none of Quantopian nor any of its affiliates is undertaking to provide investment advice, act as an adviser to any plan or entity subject to the Employee Retirement Income Security Act of 1974, as amended, individual retirement account or individual retirement annuity, or give advice in a fiduciary capacity with respect to the materials presented herein. If you are an individual retirement or other investor, contact your financial advisor or other fiduciary unrelated to Quantopian about whether any given investment idea, strategy, product or service described herein may be appropriate for your circumstances. All investments involve risk, including loss of principal. Quantopian makes no guarantees as to the accuracy or completeness of the views expressed in the website. The views are subject to change, and may have become unreliable for various reasons, including changes in market conditions or economic circumstances.

21 responses

Thanks Thomas,

We could consider a modification to the code that would perform the time-scale optimization described in the paper (the so-called BAH(OLMAR) algorithm). If I follow the paper correctly, this would mean running the algorithm 28 times per call to handle_data (for window lengths of 3 to 30 days).

I'm unclear how the author performed the computation. He states that one should "combine multiple experts’ portfolios weighted by their historical performance." But, he may be talking about using the projected daily return. Any idea?

Grant

Here's the BAH(OLMAR) code posted by Ben Vibhagool (https://www.quantopian.com/posts/olmar-implementation-fixed-bug).

Clone Algorithm
73
Loading...
Backtest from to with initial capital
Total Returns
--
Alpha
--
Beta
--
Sharpe
--
Sortino
--
Max Drawdown
--
Benchmark Returns
--
Volatility
--
Returns 1 Month 3 Month 6 Month 12 Month
Alpha 1 Month 3 Month 6 Month 12 Month
Beta 1 Month 3 Month 6 Month 12 Month
Sharpe 1 Month 3 Month 6 Month 12 Month
Sortino 1 Month 3 Month 6 Month 12 Month
Volatility 1 Month 3 Month 6 Month 12 Month
Max Drawdown 1 Month 3 Month 6 Month 12 Month
import numpy as np

#globals for get_avg batch transform decorator
R_P = 1 #refresh period in days
W_L = [3,4,5] #Vector of different window size
W_MAX = max(W_L) #Max window size

def initialize(context):
    #['AMD', 'CERN', 'COST', 'DELL', 'GPS', 'INTC', 'MMM']
    context.stocks = [sid(351), sid(1419), sid(1787), sid(25317), sid(3321), sid(3951), sid(4922)]
    
    context.m = len(context.stocks) #Number of stocks in portfolio
    context.w = len(W_L) #Number of window sizes
    
    context.b_t = np.ones(context.m) / context.m #Initial b_t vector
    context.eps = 1.00  #change epsilon here
    
    context.b_w = np.ones((context.m,context.w)) / context.m #Matrix of different b_t for each window size
    context.s_w = np.ones(context.w) #Vector of cum. returns from each window size
    
    context.init = False
    context.comPerTrade = 0
    set_slippage(slippage.VolumeShareSlippage(volume_limit=0.25,price_impact=0))
    set_commission(commission.PerShare(cost=0.00))
    
def handle_data(context, data):    
    
    # get data
    d = get_data(data,context.stocks)
    if d == None:
       return
    
    prices = d[0]
    #volumes = d[1]
    
    if not context.init:
        rebalance_portfolio(context, data, context.b_t)        
        context.init = True
        return

    m = context.m #Lenth of b vector
    x_tilde_a = np.zeros(m) #x_tilde vector
    b_optimal = np.zeros(m) #b_vector
    
    #For each window, we calculate the optimal b_norm
    for k, w in enumerate(W_L):
        #Caluclate predicted x for that window size
        for i, stock in enumerate(context.stocks):
            #Predicted ratio of price change from t to t+1
            x_tilde_a[i] = np.mean(prices[W_MAX-w:,i])/prices[W_MAX-1,i]
            
        #Update the b_w matrix
        context.b_w[:,k] = find_b_norm(context,x_tilde_a,context.b_w[:,k])
    
    length_p = len(prices[:,0])    #Length of price vector
    p_t = prices[length_p-1,:]    #Price vector today
    p_y = prices[length_p-2,:]    #Price vector yesterday
    
    #Ratio of price change (1 x m) vector from t-1 to t
    x_t = np.true_divide(p_t,p_y)
    
    #w is length of W_L
    #Daily returns (1 x w) vector
    s_d = np.dot(x_t,context.b_w)
    
    #Cumulative returns (1 x w) vector
    context.s_w = np.multiply(context.s_w,s_d)
    
    #Calculate return_weights (1 x w) vector
    return_weights = np.true_divide(context.s_w[:],np.sum(context.s_w)) #Weight according to cum. returns

    #Calculate b_{t+1} (n x 1) vector
    b_optimal = np.dot(context.b_w,return_weights) #Calculate the weighted portfolio
   
    #SJUAR
    rebalance_portfolio(context, data, b_optimal)
    
    # update portfolio
    context.b_t = b_optimal
    
#Calculate b_norm for a give b_t and x_tilde
def find_b_norm(context,x_tilde,b_t):    
    ###########################
    # Inside of OLMAR (algo 2)
    x_bar = x_tilde.mean()
        
    # Calculate terms for lambda (lam)
    dot_prod = np.dot(b_t, x_tilde)

    num = context.eps - dot_prod
    denom = (np.linalg.norm((x_tilde-x_bar)))**2

    # test for divide-by-zero case
    if denom == 0.0:
        lam = 0 # no portolio update
    else:     
        lam = max(0, num/denom)
    
    b = b_t + lam*(x_tilde-x_bar)
    
    #Finding min distance b_{t} and b_{t+1}
    b_norm = simplex_projection(b)
    
    return b_norm

# set globals R_P & W_L above
@batch_transform(refresh_period=R_P, window_length=W_MAX)
def get_data(datapanel,sids):
    p = datapanel['price'].as_matrix(sids)
    v = datapanel['volume'].as_matrix(sids)
    return [p,v]

def rebalance_portfolio(context, data, desired_port):
    #rebalance portfolio
    current_amount = np.zeros_like(desired_port)
    desired_amount = np.zeros_like(desired_port)
    
    if not context.init:
        positions_value = context.portfolio.starting_cash
    else:
        positions_value = context.portfolio.positions_value + context.portfolio.cash  
    
    for i, stock in enumerate(context.stocks):
        current_amount[i] = context.portfolio.positions[stock].amount
        desired_amount[i] = desired_port[i]*positions_value/data[stock].price

    diff_amount = desired_amount - current_amount

    for i, stock in enumerate(context.stocks):
        order(stock, diff_amount[i]) #order_stock

def simplex_projection(v, b=1):
    """Projection vectors to the simplex domain

Implemented according to the paper: Efficient projections onto the
l1-ball for learning in high dimensions, John Duchi, et al. ICML 2008.
Implementation Time: 2011 June 17 by [email protected] AT pmail.ntu.edu.sg
Optimization Problem: min_{w}\| w - v \|_{2}^{2}
s.t. sum_{i=1}^{m}=z, w_{i}\geq 0

Input: A vector v \in R^{m}, and a scalar z > 0 (default=1)
Output: Projection vector w

:Example:
>>> proj = simplex_projection([.4 ,.3, -.4, .5])
>>> print proj
array([ 0.33333333, 0.23333333, 0. , 0.43333333])
>>> print proj.sum()
1.0

Original matlab implementation: John Duchi ([email protected])
Python-port: Copyright 2012 by Thomas Wiecki ([email protected]).
"""

    v = np.asarray(v)
    p = len(v)

    # Sort v into u in descending order
    v = (v > 0) * v
    u = np.sort(v)[::-1]
    sv = np.cumsum(u)

    rho = np.where(u > (sv - b) / np.arange(1, p+1))[0][-1]
    theta = np.max([0, (sv[rho] - b) / (rho+1)])
    w = (v - theta)
    w[w<0] = 0
    return w
This backtest was created using an older version of the backtester. Please re-run this backtest to see results using the latest backtester. Learn more about the recent changes.
There was a runtime error.

Yeah, I agree Grant. Seems like the code should be pretty copy&pastable, no?

Hello Thomas,

When I try to run your code with an alternate set of stocks, I get an error (details below). As you'll see, the code executes fine for a while, and then crashes.

Any ideas?

Grant


    # http://www.investopedia.com/stock-analysis/cotd/bbry20130206.aspx  
    # http://blogs.marketwatch.com/thetell/2013/05/20/12-stocks-to-ride-sp-500-volatility-to-profits/  
    context.stocks = [sid(19831),sid(26892),sid(11224),sid(2673),sid(3384),  
                      sid(3455),sid(6257),sid(5214),sid(14064),sid(21418),  
                      sid(23328),sid(22996),sid(7674),sid(2),sid(10594)]  

There was a runtime error. See the more detailed error below. If you need help, please send us feedback.
IndexError: index -1 is out of bounds for axis 0 with size 0
File test_algorithm_sycheck.py:69, in handle_data
File test_algorithm_sycheck.py:110, in olmar
USER ALGORITHM:145, in simplex_projection
rho = np.where(u > (sv - b) / np.arange(1, p+1))[0][-1]

Thomas,

It seems that to optimize the algorithm, a portfolio of volatile but uncorrelated stocks needs to be identified. Any ideas how to do this? Perhaps a screen with set_universe could be developed?

Grant

Grant,

Indeed, that seems like a good insight. Did the paper mention that?

One manual way to do this would be to simple iterate over each stock in the universe and sum up the correlation coefficients with all other stocks and select the n smallest ones. It's not ideal though as you need to identify a subset that together is uncorrelated. Sounds like a huge combinatorial nightmare. Anyway, that would probably be a good start.

One thing I'm not sure about is how to change the stocks of the traded universe. Do we just sell the ones we don't want to trade anymore? Seems like the only reasonable thing.

Thomas

P.S. Not sure about the error you're getting. Maybe it means that one of the stocks stopped trading?

Thanks Thomas,

I'll think about how to articulate a separate post regarding the screening.

Grant

Hello Thomas,

Here's my first attempt at eliminating the window length as a free parameter. If I coded it correctly, the algorithm computes the OLMAR portfolio (using volume weights) for window lengths from 3 to 30 days. Then, the optimal portfolio is computed by weighting each OLMAR porfolio by its expected return, and then forming a normalized linear combination of the portfolio vectors. Note that this is not the BAH(OLMAR) approach described in the paper, since I am using expected returns, not cumulative historical ones.

Cheers,

Grant

Clone Algorithm
86
Loading...
Backtest from to with initial capital
Total Returns
--
Alpha
--
Beta
--
Sharpe
--
Sortino
--
Max Drawdown
--
Benchmark Returns
--
Volatility
--
Returns 1 Month 3 Month 6 Month 12 Month
Alpha 1 Month 3 Month 6 Month 12 Month
Beta 1 Month 3 Month 6 Month 12 Month
Sharpe 1 Month 3 Month 6 Month 12 Month
Sortino 1 Month 3 Month 6 Month 12 Month
Volatility 1 Month 3 Month 6 Month 12 Month
Max Drawdown 1 Month 3 Month 6 Month 12 Month
import numpy as np
import math
from pytz import timezone

def initialize(context):
    
    context.stocks = [sid(1419),sid(12652),sid(6546),sid(5061),sid(6683)]
    
    for stock in context.stocks:
        print stock.symbol
    
    context.m = len(context.stocks)
    context.b_t = np.ones(context.m) / context.m
    context.eps = 1 # change epsilon here
    context.init = False
    context.previous_datetime = None
    
def handle_data(context, data):
    current_datetime = get_datetime().astimezone(timezone('US/Eastern'))
    
    cash = context.portfolio.cash
    record(cash=cash)
            
    if not context.init:
        # Equal weighting portfolio
        for stock, percent in zip(context.stocks, context.b_t):
            order_target_percent(stock, percent)
        context.init = True
      
    # only execute algorithm once per day
    if context.previous_datetime != None:
        if current_datetime.day != context.previous_datetime.day:
            new_day = True
            algo_executed = False
        else:
            new_day = False
    else:
        new_day = False
        algo_executed = True
        
    context.previous_datetime = current_datetime
    
    if not new_day or algo_executed == True:
        return
        
    # skip tic if any orders are open or any stocks did not trade
    for stock in context.stocks:
        if bool(get_open_orders(stock)) or data[stock].datetime < get_datetime():
            return
        
    # compute current portfolio allocations
    for i, stock in enumerate(context.stocks):
        context.b_t[i] = context.portfolio.positions[stock].amount*data[stock].price
        
    # Bring portfolio vector to unit length
    context.b_t = context.b_t / np.sum(context.b_t)

    # Compute new portfolio weights according to OLMAR algo.
    
    b_norm = np.zeros((context.m,28))
    x_tilde = np.zeros((context.m,28))
    for k in range(28):
        b_norm[:,k],x_tilde[:,k] = olmar(context,k+3)
        
    s = np.zeros(28)
    b_norm_opt = np.zeros(context.m)
    s_sum = 0
    for k in range(28):
        s[k] = np.dot(b_norm[:,k],x_tilde[:,k])
        b_norm[:,k] = np.multiply(s[k],b_norm[:,k])
        b_norm_opt += b_norm[:,k]
        s_sum += s[k]
        
    b_norm_opt = np.divide(b_norm_opt,s_sum)
    
    print b_norm_opt
    
    # Rebalance Portfolio
    for stock, percent in zip(context.stocks, b_norm_opt):
        order_target_percent(stock, percent)
    
    algo_executed = True

def olmar(context,window):
    """Logic of the olmar algorithm.

    :Returns: b_norm : vector for new portfolio
    """
    
    # get history -- prices and volums of the last 5 days (at close)
    p = history(30, '1d', 'price')
    v = history(30, '1d', 'volume')
    
    prices = p.ix[30-window:-1]
    volumes = v.ix[30-window:-1]
    
    # find relative moving volume weighted average price for each secuirty
    x_tilde = np.zeros(context.m)
    for i, stock in enumerate(context.stocks):
        vwa_price = np.dot(prices[stock], volumes[stock]) / np.sum(volumes[stock])
        x_tilde[i] = vwa_price/prices[stock].ix[-1]
    
    ###########################
    # Inside of OLMAR (algo 2)
    x_bar = x_tilde.mean()

    # Calculate terms for lambda (lam)
    dot_prod = np.dot(context.b_t, x_tilde)
    num = context.eps - dot_prod
    denom = (np.linalg.norm((x_tilde-x_bar)))**2

    # test for divide-by-zero case
    if denom == 0.0:
        lam = 0 # no portolio update
    else:     
        lam = max(0, num/denom)
    
    b = context.b_t + lam*(x_tilde-x_bar)

    b_norm = simplex_projection(b)
    
    return b_norm,x_tilde

def simplex_projection(v, b=1):
    """Projection vectors to the simplex domain

Implemented according to the paper: Efficient projections onto the
l1-ball for learning in high dimensions, John Duchi, et al. ICML 2008.
Implementation Time: 2011 June 17 by [email protected] AT pmail.ntu.edu.sg
Optimization Problem: min_{w}\| w - v \|_{2}^{2}
s.t. sum_{i=1}^{m}=z, w_{i}\geq 0

Input: A vector v \in R^{m}, and a scalar z > 0 (default=1)
Output: Projection vector w

:Example:
>>> proj = simplex_projection([.4 ,.3, -.4, .5])
>>> print proj
array([ 0.33333333, 0.23333333, 0. , 0.43333333])
>>> print proj.sum()
1.0

Original matlab implementation: John Duchi ([email protected])
Python-port: Copyright 2012 by Thomas Wiecki ([email protected]).
"""

    v = np.asarray(v)
    p = len(v)

    # Sort v into u in descending order
    v = (v > 0) * v
    u = np.sort(v)[::-1]
    sv = np.cumsum(u)

    rho = np.where(u > (sv - b) / np.arange(1, p+1))[0][-1]
    theta = np.max([0, (sv[rho] - b) / (rho+1)])
    w = (v - theta)
    w[w<0] = 0
    return w
This backtest was created using an older version of the backtester. Please re-run this backtest to see results using the latest backtester. Learn more about the recent changes.
There was a runtime error.

Here's the same algorithm as above, except backtested with the epsilon parameter set to zero (an equal dollar weighting is maintained across the portfolio). The overall return for the active trading is 54% versus 40% for no algorithmic periodic re-allocation.

Clone Algorithm
86
Loading...
Backtest from to with initial capital
Total Returns
--
Alpha
--
Beta
--
Sharpe
--
Sortino
--
Max Drawdown
--
Benchmark Returns
--
Volatility
--
Returns 1 Month 3 Month 6 Month 12 Month
Alpha 1 Month 3 Month 6 Month 12 Month
Beta 1 Month 3 Month 6 Month 12 Month
Sharpe 1 Month 3 Month 6 Month 12 Month
Sortino 1 Month 3 Month 6 Month 12 Month
Volatility 1 Month 3 Month 6 Month 12 Month
Max Drawdown 1 Month 3 Month 6 Month 12 Month
import numpy as np
import math
from pytz import timezone

def initialize(context):
    
    context.stocks = [sid(1419),sid(12652),sid(6546),sid(5061),sid(6683)]
    
    for stock in context.stocks:
        print stock.symbol
    
    context.m = len(context.stocks)
    context.b_t = np.ones(context.m) / context.m
    context.eps = 0 # change epsilon here
    context.init = False
    context.previous_datetime = None
    
def handle_data(context, data):
    current_datetime = get_datetime().astimezone(timezone('US/Eastern'))
    
    cash = context.portfolio.cash
    record(cash=cash)
            
    if not context.init:
        # Equal weighting portfolio
        for stock, percent in zip(context.stocks, context.b_t):
            order_target_percent(stock, percent)
        context.init = True
      
    # only execute algorithm once per day
    if context.previous_datetime != None:
        if current_datetime.day != context.previous_datetime.day:
            new_day = True
            algo_executed = False
        else:
            new_day = False
    else:
        new_day = False
        algo_executed = True
        
    context.previous_datetime = current_datetime
    
    if not new_day or algo_executed == True:
        return
        
    # skip tic if any orders are open or any stocks did not trade
    for stock in context.stocks:
        if bool(get_open_orders(stock)) or data[stock].datetime < get_datetime():
            return
        
    # compute current portfolio allocations
    for i, stock in enumerate(context.stocks):
        context.b_t[i] = context.portfolio.positions[stock].amount*data[stock].price
        
    # Bring portfolio vector to unit length
    context.b_t = context.b_t / np.sum(context.b_t)

    # Compute new portfolio weights according to OLMAR algo.
    
    b_norm = np.zeros((context.m,28))
    x_tilde = np.zeros((context.m,28))
    for k in range(28):
        b_norm[:,k],x_tilde[:,k] = olmar(context,k+3)
        
    s = np.zeros(28)
    b_norm_opt = np.zeros(context.m)
    s_sum = 0
    for k in range(28):
        s[k] = np.dot(b_norm[:,k],x_tilde[:,k])
        b_norm[:,k] = np.multiply(s[k],b_norm[:,k])
        b_norm_opt += b_norm[:,k]
        s_sum += s[k]
        
    b_norm_opt = np.divide(b_norm_opt,s_sum)
    
    print b_norm_opt
    
    # Rebalance Portfolio
    for stock, percent in zip(context.stocks, b_norm_opt):
        order_target_percent(stock, percent)
    
    algo_executed = True

def olmar(context,window):
    """Logic of the olmar algorithm.

    :Returns: b_norm : vector for new portfolio
    """
    
    # get history -- prices and volums of the last 5 days (at close)
    p = history(30, '1d', 'price')
    v = history(30, '1d', 'volume')
    
    prices = p.ix[30-window:-1]
    volumes = v.ix[30-window:-1]
    
    # find relative moving volume weighted average price for each secuirty
    x_tilde = np.zeros(context.m)
    for i, stock in enumerate(context.stocks):
        vwa_price = np.dot(prices[stock], volumes[stock]) / np.sum(volumes[stock])
        x_tilde[i] = vwa_price/prices[stock].ix[-1]
    
    ###########################
    # Inside of OLMAR (algo 2)
    x_bar = x_tilde.mean()

    # Calculate terms for lambda (lam)
    dot_prod = np.dot(context.b_t, x_tilde)
    num = context.eps - dot_prod
    denom = (np.linalg.norm((x_tilde-x_bar)))**2

    # test for divide-by-zero case
    if denom == 0.0:
        lam = 0 # no portolio update
    else:     
        lam = max(0, num/denom)
    
    b = context.b_t + lam*(x_tilde-x_bar)

    b_norm = simplex_projection(b)
    
    return b_norm,x_tilde

def simplex_projection(v, b=1):
    """Projection vectors to the simplex domain

Implemented according to the paper: Efficient projections onto the
l1-ball for learning in high dimensions, John Duchi, et al. ICML 2008.
Implementation Time: 2011 June 17 by [email protected] AT pmail.ntu.edu.sg
Optimization Problem: min_{w}\| w - v \|_{2}^{2}
s.t. sum_{i=1}^{m}=z, w_{i}\geq 0

Input: A vector v \in R^{m}, and a scalar z > 0 (default=1)
Output: Projection vector w

:Example:
>>> proj = simplex_projection([.4 ,.3, -.4, .5])
>>> print proj
array([ 0.33333333, 0.23333333, 0. , 0.43333333])
>>> print proj.sum()
1.0

Original matlab implementation: John Duchi ([email protected])
Python-port: Copyright 2012 by Thomas Wiecki ([email protected]).
"""

    v = np.asarray(v)
    p = len(v)

    # Sort v into u in descending order
    v = (v > 0) * v
    u = np.sort(v)[::-1]
    sv = np.cumsum(u)

    rho = np.where(u > (sv - b) / np.arange(1, p+1))[0][-1]
    theta = np.max([0, (sv[rho] - b) / (rho+1)])
    w = (v - theta)
    w[w<0] = 0
    return w
This backtest was created using an older version of the backtester. Please re-run this backtest to see results using the latest backtester. Learn more about the recent changes.
There was a runtime error.

Grant: Looks great! Although quite sobering results, but I guess we would need to test this on a larger universe over a longer time period.

That makes me wonder about comparison of algos/parameters. There must be some work on significance testing (e.g. is 54% really better than 40% or is that within the margin of error?).

Hi Thomas,

I've been fiddling with the algorithm, trying to understand the potential. One problem is that NaNs creep into the data (even with filling on). So, I devised some code to run the algo only when there are no NaNs. Please have a look at https://www.quantopian.com/posts/avoid-nans-w-slash-history-api-most-efficient-approach. I'd appreciate your input.

Grant

Hi,

I am not sure this is the latest post? It appears the algo is not interesting enough since the trading costs kill the results? In my opinion it is not a 'learning' algorithm. It is more a calculated heuristic formula which is run based on a fixed set of mathematical rules. A neural network which can be (re)trained or to which datasets can be added or removed is more something that is learning from it data. I believe this algorithm needs something to test in advance or during the run if a stock is still mean-reversing. If a stock is not, it won't attribute to a buy/hold strategy. Also a sell/hold (short) should be added so that the algo also has returns in a bear market.

I did not look into the BAT(OLMAR) yet. Are there still people out there to explore this algo, or is it offline due to unsufficient returns?

J.

Hello Quant Trader,

It's been awhile since I worked on this. I have not been able to get it to the point where it'd make sense in live trading with real money, but perhaps somebody else was successful.

Grant

Hi Grant,
Thank you for your update. I am just reflecting a couple of things here, and I am wondering if you are interested in making this algo to run. My findings are next:
1. I have added the OLMAR function also to the initialization part since I noticed that half of the portfolio is sold and new positions are bought upon initialization. By having the OLMAR algo run over the initial portfolio this won't happen.
2. I do not believe you should run this algo over a small set of stocks to beat the benchmark. This also is perfectly for stocks which do have a mean-reversion behaviour. If you do have a limited amount of stocks not having this behaviour, or the stocks selected do not beat the benchmark since the are simple the 'bottom' of the market a simple buy-hold-sell won't help. Basically all stocks should be added.
3. It does not make sense to have stocks in which are not mean-reversion, so stocks must be tested before adding to the portfolio.
4. Adding a whole bunch of stocks is fine once they behave similar to the index and you do not change your portfolio often. If that happens you are making the broker rich, or market is quite volatile and then the question is if this behaviour is allowed. I was thinking of a set of 100 stocks and only trade with half of them? Then the good performers stay in, and the losers are changed by new winners.
5. I would also like to experiment with the volume/ price history. What if the volume is faster, so detecting a momentum of volume instead of price?
6. Trading costs are for stocks limited to 5 USD per trade? So only trade if it makes sense to earn 5 USD by a portfolio change?
7. I believe the BAH(OLMAR) algorithm should be implemented. If you look at the paper you see that its behaviour is flat and less dependent on epsilon and stock market used. I believe this also makes sense, the window in which stocks change momentum or are mean reverting are not the same, so it makes sense to buy a stock with a half life of 5 days and sell it after 10 days, and buy another stock with a different 'speed' / half life time of 30 days and sell it after 60 days.

In general I believe this algo has some potential since statistically it takes the best hold-buy stocks at their best moments. That means if you would buy the complete benchmark you would also buy the worst hold-buy stocks. So this algo must outperform the benchmark (a large spectrum of stocks).

Are you interested in teaming up? GitHub is in my Visual Studio / PVTS so we can exchange easily if you like. I would like to run it also offline with data from yahoo or other sources since I need the intellisense and step-in-step debugger to see how Python works :-).

J.

Hi J./Quant Trader,

Thanks for the offer to collaborate. I've scaled back my efforts on Quantopian, and have some other interests and obligations.

Best wishes,

Grant

Hi,

I encounter next error:

IndexError: index out of bounds

USER ALGORITHM:253, in simplex_projection

rho = np.where(u > (sv - b) / np.arange(1, p+1))[0][-1]  

Is there a way to find out when it occurs? It would be nice to have more 'dump' with data. For instance the data to limit debugging. I will add a try-catch .

J.

J, are you still experiencing the IndexError? If so, you can invite me to the algorithm via collab ([email protected]) and I'll help debug. We'll get the code up and running!

Disclaimer

The material on this website is provided for informational purposes only and does not constitute an offer to sell, a solicitation to buy, or a recommendation or endorsement for any security or strategy, nor does it constitute an offer to provide investment advisory services by Quantopian. In addition, the material offers no opinion with respect to the suitability of any security or specific investment. No information contained herein should be regarded as a suggestion to engage in or refrain from any investment-related course of action as none of Quantopian nor any of its affiliates is undertaking to provide investment advice, act as an adviser to any plan or entity subject to the Employee Retirement Income Security Act of 1974, as amended, individual retirement account or individual retirement annuity, or give advice in a fiduciary capacity with respect to the materials presented herein. If you are an individual retirement or other investor, contact your financial advisor or other fiduciary unrelated to Quantopian about whether any given investment idea, strategy, product or service described herein may be appropriate for your circumstances. All investments involve risk, including loss of principal. Quantopian makes no guarantees as to the accuracy or completeness of the views expressed in the website. The views are subject to change, and may have become unreliable for various reasons, including changes in market conditions or economic circumstances.

I updated the code to use the new schedule_function() which cleans it up quite a bit.

Clone Algorithm
143
Loading...
Backtest from to with initial capital
Total Returns
--
Alpha
--
Beta
--
Sharpe
--
Sortino
--
Max Drawdown
--
Benchmark Returns
--
Volatility
--
Returns 1 Month 3 Month 6 Month 12 Month
Alpha 1 Month 3 Month 6 Month 12 Month
Beta 1 Month 3 Month 6 Month 12 Month
Sharpe 1 Month 3 Month 6 Month 12 Month
Sortino 1 Month 3 Month 6 Month 12 Month
Volatility 1 Month 3 Month 6 Month 12 Month
Max Drawdown 1 Month 3 Month 6 Month 12 Month
import numpy as np
import math
from pytz import timezone

def initialize(context):
    context.stocks = symbols('CERN', 'DLTR', 'ROST', 'MSFT', 'SBUX')
    
    context.m = len(context.stocks)
    context.b_t = np.ones(context.m) / context.m
    context.eps = 1 # change epsilon here
    context.init = False
    context.previous_datetime = None
    schedule_function(daily, date_rule=date_rules.every_day(),
                      time_rule=time_rules.market_open())
    
def handle_data(context, data):
    pass

def daily(context, data):
    cash = context.portfolio.cash
    record(cash=cash)
            
    if not context.init:
        # Equal weighting portfolio
        for stock, percent in zip(context.stocks, context.b_t):
            order_target_percent(stock, percent)
        context.init = True
      
    
    # skip tic if any orders are open or any stocks did not trade
    for stock in context.stocks:
        if bool(get_open_orders(stock)) or data[stock].datetime < get_datetime():
            return
        
    # compute current portfolio allocations
    for i, stock in enumerate(context.stocks):
        context.b_t[i] = context.portfolio.positions[stock].amount*data[stock].price
        
    # Bring portfolio vector to unit length
    context.b_t = context.b_t / np.sum(context.b_t)

    # Compute new portfolio weights according to OLMAR algo.
    b_norm = np.zeros((context.m,28))
    x_tilde = np.zeros((context.m,28))
    for k in range(28):
        b_norm[:,k],x_tilde[:,k] = olmar(context,k+3)
        
    s = np.zeros(28)
    b_norm_opt = np.zeros(context.m)
    s_sum = 0
    for k in range(28):
        s[k] = np.dot(b_norm[:,k], x_tilde[:,k])
        b_norm[:,k] = np.multiply(s[k], b_norm[:,k])
        b_norm_opt += b_norm[:,k]
        s_sum += s[k]
        
    b_norm_opt = np.divide(b_norm_opt, s_sum)
    
    print b_norm_opt
    
    # Rebalance Portfolio
    for stock, percent in zip(context.stocks, b_norm_opt):
        order_target_percent(stock, percent)

def olmar(context, window):
    """Logic of the olmar algorithm.

    :Returns: b_norm : vector for new portfolio
    """
    
    # get history -- prices and volums of the last 5 days (at close)
    p = history(30, '1d', 'price')
    v = history(30, '1d', 'volume')
    
    prices = p.ix[30-window:-1]
    volumes = v.ix[30-window:-1]
    
    # find relative moving volume weighted average price for each secuirty
    x_tilde = np.zeros(context.m)
    for i, stock in enumerate(context.stocks):
        vwa_price = np.dot(prices[stock], volumes[stock]) / np.sum(volumes[stock])
        x_tilde[i] = vwa_price/prices[stock].ix[-1]
    
    ###########################
    # Inside of OLMAR (algo 2)
    x_bar = x_tilde.mean()

    # Calculate terms for lambda (lam)
    dot_prod = np.dot(context.b_t, x_tilde)
    num = context.eps - dot_prod
    denom = (np.linalg.norm((x_tilde-x_bar)))**2

    # test for divide-by-zero case
    if denom == 0.0:
        lam = 0 # no portolio update
    else:     
        lam = max(0, num/denom)
    
    b = context.b_t + lam*(x_tilde-x_bar)

    b_norm = simplex_projection(b)
    
    return b_norm,x_tilde

def simplex_projection(v, b=1):
    """Projection vectors to the simplex domain

Implemented according to the paper: Efficient projections onto the
l1-ball for learning in high dimensions, John Duchi, et al. ICML 2008.
Implementation Time: 2011 June 17 by [email protected] AT pmail.ntu.edu.sg
Optimization Problem: min_{w}\| w - v \|_{2}^{2}
s.t. sum_{i=1}^{m}=z, w_{i}\geq 0

Input: A vector v \in R^{m}, and a scalar z > 0 (default=1)
Output: Projection vector w

:Example:
>>> proj = simplex_projection([.4 ,.3, -.4, .5])
>>> print proj
array([ 0.33333333, 0.23333333, 0. , 0.43333333])
>>> print proj.sum()
1.0

Original matlab implementation: John Duchi ([email protected])
Python-port: Copyright 2012 by Thomas Wiecki ([email protected]).
"""

    v = np.asarray(v)
    p = len(v)

    # Sort v into u in descending order
    v = (v > 0) * v
    u = np.sort(v)[::-1]
    sv = np.cumsum(u)

    rho = np.where(u > (sv - b) / np.arange(1, p+1))[0][-1]
    theta = np.max([0, (sv[rho] - b) / (rho+1)])
    w = (v - theta)
    w[w<0] = 0
    return w
There was a runtime error.

Hi,
New here :)

Thank you for the algo, it seems amazing, I've tried to use the universe function and set the context.stocks to that universe but I keep getting the same error as someone mentioned before:

IndexError: index out of bounds

USER ALGORITHM:141, in simplex_projection

rho = np.where(u > (sv - b) / np.arange(1, p+1))[0][-1]  

Alisa, I've invited you to collaborate as you suggested above, i hope that's fine.
Once i'll get the backtest running i'll share the results here as well.

Thanks,
Z.

Hi Z and welcome to Quantopian,

I also tried changing the algorithm but it turns out that it's quite tricky. The reason is that as the universe changes (new symbols come in, old ones drop out), we have to update the portfolio vector (context.b I think) as well in a clever way and have to exit positions of symbols that disappeared. I gave this a fair shot quite a while back but couldn't get it to work so it probably requires some tinkering. Having said that, I think it would be great if the algorithm could work with a universe.

Thomas

I'm attaching my attempt from back then. It seems to run at least and you can see the logic. I think the problem is that not necessarily the size changes (as the logic implements here-within) but the symbols and there is no mechanism I know which tells you which symbols appeared and disappeared during universe roll-over. Another issue is that this is an older OLMAR algorithm (probably v2).

Clone Algorithm
231
Loading...
Backtest from to with initial capital
Total Returns
--
Alpha
--
Beta
--
Sharpe
--
Sortino
--
Max Drawdown
--
Benchmark Returns
--
Volatility
--
Returns 1 Month 3 Month 6 Month 12 Month
Alpha 1 Month 3 Month 6 Month 12 Month
Beta 1 Month 3 Month 6 Month 12 Month
Sharpe 1 Month 3 Month 6 Month 12 Month
Sortino 1 Month 3 Month 6 Month 12 Month
Volatility 1 Month 3 Month 6 Month 12 Month
Max Drawdown 1 Month 3 Month 6 Month 12 Month
import numpy as np
import datetime

def initialize(context):
    context.eps = 5  #change epsilon here
    context.init = True
    context.counter = 0
    context.stocks = []
    set_slippage(slippage.FixedSlippage())
    set_commission(commission.PerShare(cost=0))
    set_universe(universe.DollarVolumeUniverse(floor_percentile=98.0, 
                                               ceiling_percentile=100.0))
    
def handle_data(context, data):    
    context.counter += 1
    if context.counter <= 5:
        return
    
    context.stocks = [sid for sid in data]
    m = len(context.stocks)
    
    if context.init:
        context.b_t = np.ones(m) / m
        rebalance_portfolio(context, data, context.b_t)
        context.init = False
        return

    if len(context.b_t) > m:
        # need to decrease portfolio vector
        context.b_t = context.b_t[:m]
    elif len(context.b_t) < m:
        # need to grow portfolio vector
        len_bt = len(context.b_t)
        context.b_t = np.concatenate([context.b_t, np.ones(m-len_bt) / m])
    
    assert len(context.b_t) == m
    
    x_tilde = np.zeros(m)

    b = np.zeros(m)

    # find relative moving average price for each security
    for i, stock in enumerate(context.stocks):
        price = data[stock].price
        x_tilde[i] = data[stock].mavg(5) / price
        
    ###########################
    # Inside of OLMAR (algo 2)
    x_bar = x_tilde.mean()
        
    # market relative deviation
    mark_rel_dev = x_tilde - x_bar

    # Expected return with current portfolio
    exp_return = np.dot(context.b_t, x_tilde)
    log.debug("Expected Return: {exp_return}".format(exp_return=exp_return))
    weight = context.eps - exp_return
    log.debug("Weight: {weight}".format(weight=weight))
    variability = (np.linalg.norm(mark_rel_dev))**2
    log.debug("Variability: {norm}".format(norm=variability))
    # test for divide-by-zero case
    if variability == 0.0:
        step_size = 0 # no portolio update
    else:
        step_size = max(0, weight/variability)
    log.debug("Step-size: {size}".format(size=step_size))
    log.debug("Market relative deviation:")
    log.debug(mark_rel_dev)
    log.debug("Weighted market relative deviation:")
    log.debug(step_size*mark_rel_dev)
    b = context.b_t + step_size*mark_rel_dev
    b_norm = simplex_projection(b)
    #np.testing.assert_almost_equal(b_norm.sum(), 1)
        
    rebalance_portfolio(context, data, b_norm)
        
    # Predicted return with new portfolio
    pred_return = np.dot(b_norm, x_tilde)
    log.debug("Predicted return: {pred_return}".format(pred_return=pred_return))
    record(pred_return=pred_return,
           step_size=step_size)
    # Make sure that we actually optimized our objective
    #assert exp_return-.001 <= pred_return, "{new} <= {old}".format(new=exp_return, old=pred_return)
    # update portfolio
    context.b_t = b_norm
    
def rebalance_portfolio(context, data, desired_port):
    print 'desired'
    print desired_port
    desired_amount = np.zeros_like(desired_port)
    current_amount = np.zeros_like(desired_port)
    prices = np.zeros_like(desired_port)
        
    if context.init:
        positions_value = context.portfolio.starting_cash
    else:
        positions_value = context.portfolio.positions_value + context.portfolio.cash
        
    
    for i, stock in enumerate(context.stocks):
        current_amount[i] = context.portfolio.positions[stock].amount
        prices[i] = data[stock].price
    
    desired_amount = np.round(desired_port * positions_value / prices)
    diff_amount = desired_amount - current_amount
    for i, stock in enumerate(context.stocks):
        order(stock, diff_amount[i]) #order_stock

def simplex_projection(v, b=1):
    """Projection vectors to the simplex domain

Implemented according to the paper: Efficient projections onto the
l1-ball for learning in high dimensions, John Duchi, et al. ICML 2008.
Implementation Time: 2011 June 17 by [email protected] AT pmail.ntu.edu.sg
Optimization Problem: min_{w}\| w - v \|_{2}^{2}
s.t. sum_{i=1}^{m}=z, w_{i}\geq 0

Input: A vector v \in R^{m}, and a scalar z > 0 (default=1)
Output: Projection vector w

:Example:
>>> proj = simplex_projection([.4 ,.3, -.4, .5])
>>> print proj
array([ 0.33333333, 0.23333333, 0. , 0.43333333])
>>> print proj.sum()
1.0

Original matlab implementation: John Duchi ([email protected])
Python-port: Copyright 2012 by Thomas Wiecki ([email protected]).
"""

    v = np.asarray(v)
    p = len(v)

    # Sort v into u in descending order
    v = (v > 0) * v
    u = np.sort(v)[::-1]
    sv = np.cumsum(u)

    rho = np.where(u > (sv - b) / np.arange(1, p+1))[0][-1]
    theta = np.max([0, (sv[rho] - b) / (rho+1)])
    w = (v - theta)
    w[w<0] = 0
    return w
There was a runtime error.