Back to Community
ETF rebalance monthly based on momentum

Hello
I am trying to replicate this:

I have done the first step: Select the best performing ETF in a basket based on the returns of the last 60 days and then buy them (for now i haven't done the weight part yet) and rebalance monthly
Do you think that this code is efficent? i have tryied to avoid loops by using only pandas festures.
Thank you very much for your help

Clone Algorithm
67
Loading...
Backtest from to with initial capital
Total Returns
--
Alpha
--
Beta
--
Sharpe
--
Sortino
--
Max Drawdown
--
Benchmark Returns
--
Volatility
--
Returns 1 Month 3 Month 6 Month 12 Month
Alpha 1 Month 3 Month 6 Month 12 Month
Beta 1 Month 3 Month 6 Month 12 Month
Sharpe 1 Month 3 Month 6 Month 12 Month
Sortino 1 Month 3 Month 6 Month 12 Month
Volatility 1 Month 3 Month 6 Month 12 Month
Max Drawdown 1 Month 3 Month 6 Month 12 Month
'''Adaptive asset allocation:
based on http://bpgassociates.com/docs/Adaptive-Asset-Allocation-A-Primer.pdf
'''
import numpy as np

def initialize(context):
    schedule_function(
    rebalance,
    date_rules.month_start(),
    time_rules.market_open())
    # num of days to lookback to calculate performance of each stocks
    context.lookback = 60
    # num of best stocks to buy
    context.n_best_stocks = 3
    
    context.etf = [sid(8554), # SPY U.S.$Stocks
                     sid(23921), # TLT U.S.long term Treasuries
                     sid(26669), # VNQ Reit 
                     sid(24705), # EEM Emerging
                     sid(21769), # IEV Europe Stocks
                     sid(14520), # EWJ Japan Stocks
                   sid(33146), # GVI US intermediate Treasury
        
                     ]

def rebalance(context,data):
    print 'rebalance day'
    '''
    MOMENTUM RANKING based on best performance of the last 60 days
    return stock_to_buy
    '''
    historical_prices = history(100, '1d', 'price')
    n_stock_list = len(historical_prices.columns.values)
    trigger_value = n_stock_list - context.n_best_stocks
    momentum_df = historical_prices.pct_change(context.lookback).dropna()
    momentum_ranked= momentum_df.rank(axis=1)
    stock_to_buy = [i for i  in momentum_ranked.columns.values.tolist() if momentum_ranked[i].iloc[-1] > trigger_value ]
    for stock in data:
        if stock not in stock_to_buy and context.portfolio.positions[stock].amount !=0:
            order_target_percent(stock, 0)
            log.info("Exit on %s" % (stock.symbol))
        elif stock in stock_to_buy:
            order_target_percent(stock, 1.0/n_stock_list)
            log.info("Buy  %s" % (stock.symbol))
        else:
            log.info("Stay Flat on  %s" % (stock.symbol))
    
    
def handle_data(context, data):
    pass
There was a runtime error.
21 responses

Thanks for sharing this, I am looking for similar sorts of systems.

Glad to help, there was an error in the previous post because the weight for each stock was not correclty calculated,
In next posts I am giong to weight each stock differently using the minimum variance portfolio.

Clone Algorithm
67
Loading...
Backtest from to with initial capital
Total Returns
--
Alpha
--
Beta
--
Sharpe
--
Sortino
--
Max Drawdown
--
Benchmark Returns
--
Volatility
--
Returns 1 Month 3 Month 6 Month 12 Month
Alpha 1 Month 3 Month 6 Month 12 Month
Beta 1 Month 3 Month 6 Month 12 Month
Sharpe 1 Month 3 Month 6 Month 12 Month
Sortino 1 Month 3 Month 6 Month 12 Month
Volatility 1 Month 3 Month 6 Month 12 Month
Max Drawdown 1 Month 3 Month 6 Month 12 Month
'''Adaptive asset allocation:
based on http://bpgassociates.com/docs/Adaptive-Asset-Allocation-A-Primer.pdf
'''
import numpy as np

def initialize(context):
    schedule_function(
    rebalance,
    date_rules.month_start(),
    time_rules.market_open())
    # num of days to lookback to calculate performance of each stocks
    context.lookback = 60
    # num of best stocks to buy
    context.n_best_stocks = 5
    context.pecentage_ofportfolio_used = 1.
    context.etf = [sid(8554), # SPY U.S.$Stocks
                     sid(23921), # TLT U.S.long term Treasuries
                     sid(26669), # VNQ Reit 
                   sid(26807), # GLD Gold
                     sid(24705), # EEM Emerging
                     sid(21769), # IEV Europe Stocks
                     sid(14520), # EWJ Japan Stocks
                   sid(32505), #OIL 
                   sid(33146), # GVI US intermediate Treasury
        
                     ]

def rebalance(context,data):
    print 'rebalance day'
    '''
    MOMENTUM RANKING based on best performance of the last 60 days
    return stock_to_buy
    '''
    historical_prices = history(100, '1d', 'price')
    # total list of stocks
    stock_list = historical_prices.columns.values.tolist()
    n_stock_list = len(stock_list)
    # value used to calculate the number of best stocks
    trigger_value = n_stock_list - context.n_best_stocks
    # dataframe of performances of the last 60 days
    momentum_df = historical_prices.pct_change(context.lookback).dropna()
    # dataframe of the ranks
    momentum_ranked= momentum_df.rank(axis=1)
    # list of the best stocks
    stocks_to_buy = [i for i  in stock_list if momentum_ranked[i].iloc[-1] > trigger_value ]
    # order execution
    for stock in stock_list:
        if stock not in stocks_to_buy and context.portfolio.positions[stock].amount !=0:
            order_target_percent(stock, 0)
            log.info("Exit on %s" % (stock.symbol))
        elif stock in stocks_to_buy:
            order_target_percent(stock, weights(context,stocks_to_buy))
            log.info("Buy  %s" % (stock.symbol))
        else:
            log.info("Stay Flat on  %s" % (stock.symbol))
    
    
def handle_data(context, data):
    pass
def weights(context,stocks_to_buy):
    return context.pecentage_ofportfolio_used / len(stocks_to_buy)
    
There was a runtime error.

i'm having a hard time implementing the markowitz weights to obtain minimum variance, i am using this can someone please help me to figure out why these weights gives such a different resoult?
Thanks!

Clone Algorithm
5
Loading...
Backtest from to with initial capital
Total Returns
--
Alpha
--
Beta
--
Sharpe
--
Sortino
--
Max Drawdown
--
Benchmark Returns
--
Volatility
--
Returns 1 Month 3 Month 6 Month 12 Month
Alpha 1 Month 3 Month 6 Month 12 Month
Beta 1 Month 3 Month 6 Month 12 Month
Sharpe 1 Month 3 Month 6 Month 12 Month
Sortino 1 Month 3 Month 6 Month 12 Month
Volatility 1 Month 3 Month 6 Month 12 Month
Max Drawdown 1 Month 3 Month 6 Month 12 Month
'''Adaptive asset allocation:
based on http://bpgassociates.com/docs/Adaptive-Asset-Allocation-A-Primer.pdf
'''
import numpy as np
import cvxopt as opt
from cvxopt import blas, solvers
import pandas as pd
# Turn off progress printing 
solvers.options['show_progress'] = False

def initialize(context):
    schedule_function(
    rebalance,
    date_rules.month_start(),
    time_rules.market_open())
    # num of days to lookback to calculate performance of each stocks
    context.lookback = 60
    # num of best stocks to buy
    context.n_best_stocks = 5
    context.pecentage_ofportfolio_used = 1.
    context.etf = [sid(8554), # SPY U.S.$Stocks
                     sid(23921), # TLT U.S.long term Treasuries
                     sid(26669), # VNQ Reit 
                   sid(26807), # GLD Gold
                     sid(24705), # EEM Emerging
                     sid(21769), # IEV Europe Stocks
                     sid(14520), # EWJ Japan Stocks
                   sid(32505), #OIL 
                   sid(33146), # GVI US intermediate Treasury
        
                     ]

def rebalance(context,data):
    print 'rebalance day'
    '''
    MOMENTUM RANKING based on best performance of the last 60 days
    return stock_to_buy
    '''
    context.historical_prices = history(100, '1d', 'price')
    # total list of stocks
    stock_list = context.historical_prices.columns.values.tolist()
    n_stock_list = len(stock_list)
    # value used to calculate the number of best stocks
    trigger_value = n_stock_list - context.n_best_stocks
    # dataframe of performances of the last 60 days
    momentum_df = context.historical_prices.pct_change(context.lookback).dropna()
    # dataframe of the ranks
    momentum_ranked= momentum_df.rank(axis=1)
    # list of the best stocks
    context.stocks_to_buy = [i for i  in stock_list if momentum_ranked[i].iloc[-1] > trigger_value ]
    # calculate weights
    weights_dict = markowitz_weights(context,data)
    # order execution
    for stock in stock_list:
        if stock not in context.stocks_to_buy and context.portfolio.positions[stock].amount !=0:
            order_target_percent(stock, 0)
            log.info("Exit on %s" % (stock.symbol))
        elif stock in context.stocks_to_buy:
            order_target_percent(stock, weights_dict[stock])
            log.info("Buy  %s" % (stock.symbol))
        else:
            log.info("Stay Flat on  %s" % (stock.symbol))

    
def handle_data(context, data):
    pass

def markowitz_weights(context,data):
    '''
    returns a dictionary { stock to buy: weight }
    '''
    markowitz_dict = {}
    # total dataframe of percent return of all stocks
    returns = context.historical_prices.pct_change().dropna()
    # dataframe of returns limited to the stocks to buy:
    returns_of_the_stocks_to_buy = returns[context.stocks_to_buy]
    # Perform Markowitz-style portfolio optimization
    weights, _, _ = optimal_portfolio(returns_of_the_stocks_to_buy.T)
    for s, w in zip(returns_of_the_stocks_to_buy.columns, weights):
        markowitz_dict[s] = w
    return markowitz_dict
    
def optimal_portfolio(returns):
    n = len(returns)
    returns = np.asmatrix(returns)
    
    N = 100
    mus = [10**(5.0 * t/N - 1.0) for t in range(N)]
    
    # Convert to cvxopt matrices
    S = opt.matrix(np.cov(returns))
    pbar = opt.matrix(np.mean(returns, axis=1))
    
    # Create constraint matrices
    G = -opt.matrix(np.eye(n))   # negative n x n identity matrix
    h = opt.matrix(0.0, (n ,1))
    A = opt.matrix(1.0, (1, n))
    b = opt.matrix(1.0)
    
    # Calculate efficient frontier weights using quadratic programming
    portfolios = [solvers.qp(mu*S, -pbar, G, h, A, b)['x'] 
                  for mu in mus]
    ## CALCULATE RISKS AND RETURNS FOR FRONTIER
    returns = [blas.dot(pbar, x) for x in portfolios]
    risks = [np.sqrt(blas.dot(x, S*x)) for x in portfolios]
    ## CALCULATE THE 2ND DEGREE POLYNOMIAL OF THE FRONTIER CURVE
    m1 = np.polyfit(returns, risks, 2)
    x1 = np.sqrt(m1[2] / m1[0])
    # CALCULATE THE OPTIMAL PORTFOLIO
    wt = solvers.qp(opt.matrix(x1 * S), -pbar, G, h, A, b)['x']
    return np.asarray(wt), returns, risks

    
    
    
    
    
There was a runtime error.

Because it is a totally, wholly different system! You can't go from equal weighted to vol weighted and expect the same results. I am currently working on both myself and will post when I am a little further down the line.

And anyway what are you optimising for? Simply the lowest vol along the EF curve by the looks of it. Equal weighting is far from this. You need to include constraints and then targets. EG for a 15% vol what is the best return available......for a 5% vol what is the best return available and so forth. I don't know if it help but here is a link to a very good paper:

https://quantstrattrader.wordpress.com/2015/06/05/momentum-markowitz-and-solving-rank-deficient-covariance-matrices-the-constrained-critical-line-algorithm/

There is a Critical line implementation here but it seems horrible over complex to me:
https://www.quantopian.com/posts/critical-line-algorithm-for-portfolio-optimization

Ilya Kipnis' work in R looks a lot more straightforward but I need to download R and work through it.

Thank you Anthony.
Actually the goal of my algo is:
1) select n best performance stocks
2) construct a portfolio looking for the lowest ptf volatility along the Efficent Frontier curve.

It seems to me that this example does exactly the point number 2, there is also a notebook here
Now although i have understood what a minimum variance portfolio is, i have a hard time to understand the mechanism of the cvxopt library used to calculate weights.. so actually the way weights are calculated is a not very clear to me, but when i've looked at the example i tought that it was like a 'one size fits all' method to obtain the minimum variance.
The problem is that i was expecting just a volatility reduction and not such a totally different result.

Momentum is built into the Markowitz CLA solution anyway in the covariance matrix. You don't need/want to code for it separately. You just choose the portfolio further up to the right which will contain a greater weighting to higher momentum / vol stocks anyway.

Thus it becomes a case of simply telling the algo to find you the portfolio on the EF curve which gives you the desired vol/ return.

At least , this is my interpretation!

Unconstrained mean variance optimization is going to give you terrible results. You should first of all consider not using any estimates of mean returns, if you are doing so; try setting those all to zero. Second, you should do a Monte Carlo bootstrapping, maybe 100 iterations and covariance samples, that should give you more stable and reasonable results. I posted a notebook with some bootstrap code a few weeks ago.

Thank you Simon! , it must be this one: Bootstrapping volatility-standardized asset weights
very useful thanks

That is a very contrary view to that contained in the paper I referenced where the back test results were very good indeed. Mean return estimates are merely a forecast that the next month's returns will equal the returns of the lookback period.

So they should not be set to zero. The criticism is usually about using the very long terms returns as an estimate. In Ilya Kipnis' very attractive scheme these returns are recalculated each month based on a three to 12 month look back period.

Anyway, that is the scheme I am working on and that is the only way you will be able to incorporate momentum into the mean variance model.

I am not sure how clear I have been but in any event read the paper on Ilya's website. It is very clealry written. Markowitz suks if you use long term returns/estimates but rocks if you use the 3 to 12 month return as your estimate for the next period which is helpful in predicting momentum.

Well, I hope we are all successful! My experience with unconstrained minimum variance with mean return estimates has been that it leads to extreme, degenerate portfolios.

Doesn't have to. Use constraints. No shorts. No position except your bond ETF to take an allocation of more than 10% or whatever of the portfolio. That sort of stuff.

Sure, but after enough of those constraints, you may as well just equal weight. The bootstrapping works well to chop down the noise of the optimizer, worth a look.

Ilya's results suggest this is far from the case. But until I have coded it, I really could not say. To me, it seems very well worth looking at. But each to his own. When I get round to it, I'll post the algo.

Take 8 uncorrelated ETFs
Select best 4 assets based on 6 months momentum
weigth them on volatility over last 60 days
rebalance weekly

well, it does not make money at all, but volatility is low

Clone Algorithm
29
Loading...
Backtest from to with initial capital
Total Returns
--
Alpha
--
Beta
--
Sharpe
--
Sortino
--
Max Drawdown
--
Benchmark Returns
--
Volatility
--
Returns 1 Month 3 Month 6 Month 12 Month
Alpha 1 Month 3 Month 6 Month 12 Month
Beta 1 Month 3 Month 6 Month 12 Month
Sharpe 1 Month 3 Month 6 Month 12 Month
Sortino 1 Month 3 Month 6 Month 12 Month
Volatility 1 Month 3 Month 6 Month 12 Month
Max Drawdown 1 Month 3 Month 6 Month 12 Month
'''Adaptive asset allocation:
based on http://bpgassociates.com/docs/Adaptive-Asset-Allocation-A-Primer.pdf
'''
import numpy as np
import pandas as pd
def initialize(context):
    context.backtest_start_date  = pd.to_datetime('1-1-2005')
    schedule_function(
    rebalance,
    date_rules.week_start(),
    time_rules.market_close(minutes=30))

    
    context.etf = [sid(8554), # SPY U.S.$Stocks
                     sid(23921), # TLT U.S.long term Treasuries
                     sid(26669), # VNQ Reit 
                     sid(24705), # EEM Emerging
                     sid(21769), # IEV Europe Stocks
                     sid(14520), # EWJ Japan Stocks
                   sid(23870), # Ishares US intermediate Treasury
                   sid(26807),# GOld
                   sid(23911) # SHY short term treasury
                     ]

def rebalance(context,data):
    for s in context.etf:
        if s.start_date.replace(tzinfo=None)  > context.backtest_start_date:
            print 'not enough data for {}'.format(s.symbol)
    print 'rebalance day'
    '''
    MOMENTUM RANKING based on best performance of the last 60 days
    return stock_to_buy
    '''
    px = history(200, '1d', 'price')
    px_m = px.asfreq('BM')
    momentum = px_m.pct_change(periods = 6).iloc[-1]
    volatility = 1 / np.log(px.iloc[-60:]).diff().std()
    momentum_stocks = momentum.order(ascending=False)[:4]
    volatility_stocks = volatility[momentum_stocks.index]
    context.weigths = volatility_stocks/volatility_stocks.sum()
    for stock in data:
        if stock not in momentum_stocks and context.portfolio.positions[stock].amount !=0:
            order_target_percent(stock, 0)
            #log.info("Exit on %s" % (stock.symbol))
        elif stock in momentum_stocks:
            order_target_percent(stock, context.weigths.loc[stock])
            #log.info("Buy  %s" % (stock.symbol))
        else:
            #log.info("Stay Flat on  %s" % (stock.symbol))
            pass
    
    
def handle_data(context, data):
    record(
           exposure=context.account.net_leverage, 
           leverage=context.account.leverage)
There was a runtime error.

Take two negatively correlated ETF: XLP and TLT.
Fixed proportion : 0.55-0.45.
Re balance monthly.
Plain vanilla fixed ratio stock-bond portfolio making money and less volatile.

Clone Algorithm
146
Loading...
Backtest from to with initial capital
Total Returns
--
Alpha
--
Beta
--
Sharpe
--
Sortino
--
Max Drawdown
--
Benchmark Returns
--
Volatility
--
Returns 1 Month 3 Month 6 Month 12 Month
Alpha 1 Month 3 Month 6 Month 12 Month
Beta 1 Month 3 Month 6 Month 12 Month
Sharpe 1 Month 3 Month 6 Month 12 Month
Sortino 1 Month 3 Month 6 Month 12 Month
Volatility 1 Month 3 Month 6 Month 12 Month
Max Drawdown 1 Month 3 Month 6 Month 12 Month
# Plain vanilla fixed ratio stock-bond portfolio

def initialize(context):    
    context.assets = [symbol('XLP'), symbol('TLT')]   
    context.weights = {context.assets[0]: 0.55, context.assets[1]: 0.45}    
    schedule_function(rebalance,date_rules.month_end(),time_rules.market_close(minutes=5))   
    set_long_only()    

def rebalance(context, data):    
    for asset in context.assets:
        order_target_percent(asset, context.weights[asset])
        
def handle_data(context, data):   
    record(leverage = context.account.leverage)        
There was a runtime error.

Thanks Vladimir, i'll give a go to that kind of allocation systems for a while, actually i'd like something that include OIL (or commodities in general) and GOLD since they could have a lot of upside potential for the future