Back to Community
Modern Portfolio Theory - Minimum Variance Portfolio

This algorithm is the implementation of the classic Markowitz type minimum variance portfolio. This is based on the sector spyder etf products from state street. The optimization is based on these sectors. Application of leverage yields stronger results

Clone Algorithm
563
Loading...
Backtest from to with initial capital
Total Returns
--
Alpha
--
Beta
--
Sharpe
--
Sortino
--
Max Drawdown
--
Benchmark Returns
--
Volatility
--
Returns 1 Month 3 Month 6 Month 12 Month
Alpha 1 Month 3 Month 6 Month 12 Month
Beta 1 Month 3 Month 6 Month 12 Month
Sharpe 1 Month 3 Month 6 Month 12 Month
Sortino 1 Month 3 Month 6 Month 12 Month
Volatility 1 Month 3 Month 6 Month 12 Month
Max Drawdown 1 Month 3 Month 6 Month 12 Month
import pandas as pd
import numpy as np 
import numpy.linalg as la
import math

def initialize(context):
    #parameters
    context.nobs = 252
    context.recalibrate = 126 #re-estimate every so often (in days)
    context.leverage= 1
    
    #setup the identifiers and data storage
    context.tickers = ['xlf', 'xle', 'xlu', 'xlk', 'xlb', 'xlp', 'xly','xli', 'xlv']
    context.sids = [ sid(19656), sid(19655), 
                       sid(19660), sid(19658), 
                       sid(19654), sid(19659),
                       sid(19662), sid(19657),
                       sid(19661) ]
    context.data = pd.DataFrame({ k : pd.Series() for k in context.tickers } )
    context.mvp = np.array([0]*len(context.tickers))
    context.temp = False
    context.daysToRecalibration = 0
    context.onevec = np.asmatrix(np.ones((len(context.tickers), 1)))
    

def handle_data(context, data):
    if len(context.data.index) < context.nobs:
        #still recording data
        newRow = pd.DataFrame({k:float(data[e].returns()) for k,e in zip(context.tickers, context.sids) },index=[0])
        context.data = context.data.append(newRow, ignore_index = True)
    else:
        newRow = pd.DataFrame({k:float(data[e].returns()) for k,e in zip(context.tickers, context.sids) },index=[0])
        context.data = context.data.append(newRow, ignore_index = True)
        context.data = context.data[1:len(context.data.index)]
            
        if context.daysToRecalibration == 0:
            context.daysToRecalibration = context.recalibrate
            #recalibrate
            log.info('recalibrating...')
            
            #calculate the minimum variance portfolio weights;
            precision = np.asmatrix(la.inv(context.data.cov()))
            pimv = precision*context.onevec / (context.onevec.T*precision*context.onevec)
            pimv = { e:pimv[i,0] for i,e in enumerate(context.tickers) }
                
            #open all positions:
            startingCash = context.portfolio.starting_cash*context.leverage
            for i, e in enumerate(context.sids):
                currentPosition = context.portfolio.positions[e].amount
                newPosition = math.floor(startingCash*pimv[context.tickers[i]]/data[e].price)
                order(e, newPosition - currentPosition)
        else:
            context.daysToRecalibration -= 1

    record(c = context.portfolio.positions_value)
        
        
        
        
        
        
        
        
        
        
        
        
        
        
        
        
        
        
This backtest was created using an older version of the backtester. Please re-run this backtest to see results using the latest backtester. Learn more about the recent changes.
There was a runtime error.
18 responses

The first implementation takes gains out of the portfolio for income, better results may be obtained from reinvesting the gains of each re-calibration period, as seen below.

Clone Algorithm
563
Loading...
Backtest from to with initial capital
Total Returns
--
Alpha
--
Beta
--
Sharpe
--
Sortino
--
Max Drawdown
--
Benchmark Returns
--
Volatility
--
Returns 1 Month 3 Month 6 Month 12 Month
Alpha 1 Month 3 Month 6 Month 12 Month
Beta 1 Month 3 Month 6 Month 12 Month
Sharpe 1 Month 3 Month 6 Month 12 Month
Sortino 1 Month 3 Month 6 Month 12 Month
Volatility 1 Month 3 Month 6 Month 12 Month
Max Drawdown 1 Month 3 Month 6 Month 12 Month
import pandas as pd
import numpy as np 
import numpy.linalg as la
import math

def initialize(context):
    #parameters
    context.nobs = 252
    context.recalibrate = 126 #re-estimate every so often (in days)
    context.leverage= 1
    
    #setup the identifiers and data storage
    context.tickers = ['xlf', 'xle', 'xlu', 'xlk', 'xlb', 'xlp', 'xly','xli', 'xlv']
    context.sids = [ sid(19656), sid(19655), 
                       sid(19660), sid(19658), 
                       sid(19654), sid(19659),
                       sid(19662), sid(19657),
                       sid(19661) ]
    context.data = pd.DataFrame({ k : pd.Series() for k in context.tickers } )
    context.mvp = np.array([0]*len(context.tickers))
    context.temp = False
    context.daysToRecalibration = 0
    context.onevec = np.asmatrix(np.ones((len(context.tickers), 1)))
    

def handle_data(context, data):
    if len(context.data.index) < context.nobs:
        #still recording data
        newRow = pd.DataFrame({k:float(data[e].returns()) for k,e in zip(context.tickers, context.sids) },index=[0])
        context.data = context.data.append(newRow, ignore_index = True)
    else:
        newRow = pd.DataFrame({k:float(data[e].returns()) for k,e in zip(context.tickers, context.sids) },index=[0])
        context.data = context.data.append(newRow, ignore_index = True)
        context.data = context.data[1:len(context.data.index)]
            
        if context.daysToRecalibration == 0:
            context.daysToRecalibration = context.recalibrate
            #recalibrate
            log.info('recalibrating...')
            
            #calculate the minimum variance portfolio weights;
            precision = np.asmatrix(la.inv(context.data.cov()))
            pimv = precision*context.onevec / (context.onevec.T*precision*context.onevec)
            pimv = { e:pimv[i,0] for i,e in enumerate(context.tickers) }
                
            #open all positions:
            startingCash = (context.portfolio.starting_cash+context.portfolio.pnl)*context.leverage
            for i, e in enumerate(context.sids):
                currentPosition = context.portfolio.positions[e].amount
                newPosition = math.floor(startingCash*pimv[context.tickers[i]]/data[e].price)
                order(e, newPosition - currentPosition)
        else:
            context.daysToRecalibration -= 1

    record(c = context.portfolio.positions_value)
        
        
        
        
        
        
        
        
        
        
        
        
        
        
        
        
        
        
This backtest was created using an older version of the backtester. Please re-run this backtest to see results using the latest backtester. Learn more about the recent changes.
There was a runtime error.

Someone pointed out to me that I had a serious head start on the benchmark witch is absolutely true. Once the historical data management system is running well this will not be a problem, for now I fixed it by buying into the benchmark while the data is gathered so that the benchmark and my returns start from the same ground.

Clone Algorithm
563
Loading...
Backtest from to with initial capital
Total Returns
--
Alpha
--
Beta
--
Sharpe
--
Sortino
--
Max Drawdown
--
Benchmark Returns
--
Volatility
--
Returns 1 Month 3 Month 6 Month 12 Month
Alpha 1 Month 3 Month 6 Month 12 Month
Beta 1 Month 3 Month 6 Month 12 Month
Sharpe 1 Month 3 Month 6 Month 12 Month
Sortino 1 Month 3 Month 6 Month 12 Month
Volatility 1 Month 3 Month 6 Month 12 Month
Max Drawdown 1 Month 3 Month 6 Month 12 Month
import pandas as pd
import numpy as np 
import numpy.linalg as la
import math

def initialize(context):
    #parameters
    context.nobs = 252
    context.recalibrate = 126 #re-estimate every so often (in days)
    context.leverage= 1
    
    #setup the identifiers and data storage
    context.tickers = ['xlf', 'xle', 'xlu', 'xlk', 'xlb', 'xlp', 'xly','xli', 'xlv']
    context.sids = [ sid(19656), sid(19655), 
                       sid(19660), sid(19658), 
                       sid(19654), sid(19659),
                       sid(19662), sid(19657),
                       sid(19661) ]
    context.data = pd.DataFrame({ k : pd.Series() for k in context.tickers } )
    context.daysToRecalibration = 0
    context.onevec = np.asmatrix(np.ones((len(context.tickers), 1)))
    

def handle_data(context, data):
    if context.portfolio.starting_cash == context.portfolio.cash:
        #buy into the benchmark while we build the starting data set
        order(sid(8554), math.floor(context.portfolio.starting_cash/data[sid(8554)].close_price) )
        
    if len(context.data.index) < context.nobs:
        #still recording data
        newRow = pd.DataFrame({k:float(data[e].returns()) for k,e in zip(context.tickers, context.sids) },index=[0])
        context.data = context.data.append(newRow, ignore_index = True)
    else:
        newRow = pd.DataFrame({k:float(data[e].returns()) for k,e in zip(context.tickers, context.sids) },index=[0])
        context.data = context.data.append(newRow, ignore_index = True)
        context.data = context.data[1:len(context.data.index)]
        
        if context.portfolio.positions[sid(8554)].amount != 0:
            #data gathering time is done, get out of the benchmark
            order(sid(8554), -1.0*context.portfolio.positions[sid(8554)].amount)
            #wait a day for the trades to clear before placing the new trades.
            return
            
        if context.daysToRecalibration == 0:
            context.daysToRecalibration = context.recalibrate
            #recalibrate
            log.info('recalibrating...')
            
            #calculate the minimum variance portfolio weights;
            precision = np.asmatrix(la.inv(context.data.cov()))
            pimv = precision*context.onevec / (context.onevec.T*precision*context.onevec)
            pimv = { e:pimv[i,0] for i,e in enumerate(context.tickers) }
                
            #open all positions:
            startingCash = (context.portfolio.starting_cash+context.portfolio.pnl)*context.leverage
            for i, e in enumerate(context.sids):
                currentPosition = context.portfolio.positions[e].amount
                newPosition = math.floor(startingCash*pimv[context.tickers[i]]/data[e].price)
                order(e, newPosition - currentPosition)
        else:
            context.daysToRecalibration -= 1

    record(c = context.portfolio.positions_value)
        
        
        
        
        
        
        
        
        
        
        
        
        
        
        
        
        
        
This backtest was created using an older version of the backtester. Please re-run this backtest to see results using the latest backtester. Learn more about the recent changes.
There was a runtime error.

Hi Wayne,

Here's your most recently posted algorithm, starting in 2011. It trades pretty infrequently and I'm wondering if the return relative to the benchmark might be more due to your mix of securities rather than trading?

Grant

Clone Algorithm
36
Loading...
Backtest from to with initial capital
Total Returns
--
Alpha
--
Beta
--
Sharpe
--
Sortino
--
Max Drawdown
--
Benchmark Returns
--
Volatility
--
Returns 1 Month 3 Month 6 Month 12 Month
Alpha 1 Month 3 Month 6 Month 12 Month
Beta 1 Month 3 Month 6 Month 12 Month
Sharpe 1 Month 3 Month 6 Month 12 Month
Sortino 1 Month 3 Month 6 Month 12 Month
Volatility 1 Month 3 Month 6 Month 12 Month
Max Drawdown 1 Month 3 Month 6 Month 12 Month
import pandas as pd
import numpy as np 
import numpy.linalg as la
import math

def initialize(context):
    #parameters
    context.nobs = 252
    context.recalibrate = 126 #re-estimate every so often (in days)
    context.leverage= 1
    
    #setup the identifiers and data storage
    context.tickers = ['xlf', 'xle', 'xlu', 'xlk', 'xlb', 'xlp', 'xly','xli', 'xlv']
    context.sids = [ sid(19656), sid(19655), 
                       sid(19660), sid(19658), 
                       sid(19654), sid(19659),
                       sid(19662), sid(19657),
                       sid(19661) ]
    context.data = pd.DataFrame({ k : pd.Series() for k in context.tickers } )
    context.daysToRecalibration = 0
    context.onevec = np.asmatrix(np.ones((len(context.tickers), 1)))
    

def handle_data(context, data):
    if context.portfolio.starting_cash == context.portfolio.cash:
        #buy into the benchmark while we build the starting data set
        order(sid(8554), math.floor(context.portfolio.starting_cash/data[sid(8554)].close_price) )
        
    if len(context.data.index) < context.nobs:
        #still recording data
        newRow = pd.DataFrame({k:float(data[e].returns()) for k,e in zip(context.tickers, context.sids) },index=[0])
        context.data = context.data.append(newRow, ignore_index = True)
    else:
        newRow = pd.DataFrame({k:float(data[e].returns()) for k,e in zip(context.tickers, context.sids) },index=[0])
        context.data = context.data.append(newRow, ignore_index = True)
        context.data = context.data[1:len(context.data.index)]
        
        if context.portfolio.positions[sid(8554)].amount != 0:
            #data gathering time is done, get out of the benchmark
            order(sid(8554), -1.0*context.portfolio.positions[sid(8554)].amount)
            #wait a day for the trades to clear before placing the new trades.
            return
            
        if context.daysToRecalibration == 0:
            context.daysToRecalibration = context.recalibrate
            #recalibrate
            log.info('recalibrating...')
            
            #calculate the minimum variance portfolio weights;
            precision = np.asmatrix(la.inv(context.data.cov()))
            pimv = precision*context.onevec / (context.onevec.T*precision*context.onevec)
            pimv = { e:pimv[i,0] for i,e in enumerate(context.tickers) }
                
            #open all positions:
            startingCash = (context.portfolio.starting_cash+context.portfolio.pnl)*context.leverage
            for i, e in enumerate(context.sids):
                currentPosition = context.portfolio.positions[e].amount
                newPosition = math.floor(startingCash*pimv[context.tickers[i]]/data[e].price)
                order(e, newPosition - currentPosition)
        else:
            context.daysToRecalibration -= 1

    record(c = context.portfolio.positions_value)
        
        
        
        
        
        
        
        
        
        
        
        
        
        
        
        
        
        
This backtest was created using an older version of the backtester. Please re-run this backtest to see results using the latest backtester. Learn more about the recent changes.
There was a runtime error.

The strategy trades every 126 days (half of a trading year) unless you change the parameters. Outperformance is not necessarily the goal of this portfolio. The minimum variance portfolio is a commonly used tool in asset allocation. The securities that I chose were chosen because they span the assets within the S&P 500 while allowing for diversification across the industry sectors as they are determined by State Street (or possibly by MSCI (GICS) or someone else). This means that when estimating the covariance matrix we have only 9*8/2+9 = 45 parameters to estimate as opposed to doing all 500 constituents of the S&P 500 which would be 500*499/2+500 = 125,250 parameters. With 252 observations estimating 45 parameters gives 5.6 observations per parameter which is statistically rather shaky however estimating 125,250 parameters with 252 observations is... Another approach would be to select the assets which compose some high percentage of the S&P 500 like the top N securities for example and run the same algorithm on those securities. I am not sure how this would be accomplished in quantopian but it could prove quite powerful with a long enough observation window.

The short answer to your question is that this algorithm performs the way it does precisely because of the securities that I have chosen. The point is that this algorithm is all about selecting the weights which maximize diversification benefits across industrial sectors. The algorithm is re-calibrated every six months to account for the fact that we do not have stationary multivariate return distributions; correlations shift and covariance matrices are time varying.

Why the strategy backtest performance will difference that when you modify the ticker/sid order sequence ?

example like the sequence compare to original sequence:


    context.tickers = ['xli','xlk', 'xlb', 'xlp', 'xly', 'xlv','xlf', 'xle', 'xlu']  
    context.sids = [   sid(19657),sid(19658),  
                       sid(19654), sid(19659),  
                       sid(19662), sid(19661),  
                       sid(19656),sid(19655), sid(19660) ]  

I found the context.data.cov() will arrange the ticker by alphabet sequence,
so in following code ,the problem will occur in pimv to match the context.tickers dictionary,
How should we could to avoid the situation ?

pimv = { e:pimv[i,0] for i,e in enumerate(context.tickers) }  

the log for print context.data.cov() :

2005-07-05handle_data:39INFOrecalibrating...
2005-07-05PRINT('context.data.cov()=', '\n', xlb xle xlf xli xlk xlp xlu xlv xly
xlb 0.000129 0.000096 0.000055 0.000074 0.000058 0.000041 0.000052 0.000039 0.000064
xle 0.000096 0.000212 0.000038 0.000056 0.000042 0.000026 0.000057 0.000023 0.000045
xlf 0.000055 0.000038 0.000059 0.000048 0.000045 0.000031 0.000041 0.000032 0.000047
xli 0.000074 0.000056 0.000048 0.000065 0.000053 0.000036 0.000042 0.000036 0.000055
xlk 0.000058 0.000042 0.000045 0.000053 0.000073 0.000035 0.000037 0.000034 0.000054
xlp 0.000041 0.000026 0.000031 0.000036 0.000035 0.000034 0.000026 0.000029 0.000037
xlu 0.000052 0.000057 0.000041 0.000042 0.000037 0.000026 0.000065 0.000023 0.000036
xlv 0.000039 0.000023 0.000032 0.000036 0.000034 0.000029 0.000023 0.000046 0.000034
xly 0.000064 0.000045 0.000047 0.000055 0.000054 0.000037 0.000036 0.000034 0.000062)

UPDATE: I had try add sorted function, not sure this is right or not!
pimv = { e:pimv[i,0] for i,e in enumerate(sorted(context.tickers)) }

my BT :

Clone Algorithm
1
Loading...
Backtest from to with initial capital
Total Returns
--
Alpha
--
Beta
--
Sharpe
--
Sortino
--
Max Drawdown
--
Benchmark Returns
--
Volatility
--
Returns 1 Month 3 Month 6 Month 12 Month
Alpha 1 Month 3 Month 6 Month 12 Month
Beta 1 Month 3 Month 6 Month 12 Month
Sharpe 1 Month 3 Month 6 Month 12 Month
Sortino 1 Month 3 Month 6 Month 12 Month
Volatility 1 Month 3 Month 6 Month 12 Month
Max Drawdown 1 Month 3 Month 6 Month 12 Month
import pandas as pd
import numpy as np 
import numpy.linalg as la
import math

def initialize(context):
    #parameters
    context.nobs = 252
    context.recalibrate = 126 #re-estimate every so often (in days)
    context.leverage= 1
    
    #setup the identifiers and data storage
    context.tickers = ['xli','xlk', 'xlb', 'xlp', 'xly', 'xlv','xlf', 'xle', 'xlu']  
    context.sids = [   sid(19657),sid(19658),  
                       sid(19654), sid(19659),  
                       sid(19662), sid(19661),  
                       sid(19656),sid(19655), sid(19660) ]  
    context.data = pd.DataFrame({ k : pd.Series() for k in context.tickers } )
    context.daysToRecalibration = 0
    context.onevec = np.asmatrix(np.ones((len(context.tickers), 1)))
    

def handle_data(context, data):
    if context.portfolio.starting_cash == context.portfolio.cash:
        #buy into the benchmark while we build the starting data set
        order(sid(8554), math.floor(context.portfolio.starting_cash/data[sid(8554)].close_price) )
        
    if len(context.data.index) < context.nobs:
        #still recording data
        newRow = pd.DataFrame({k:float(data[e].returns()) for k,e in zip(context.tickers, context.sids) },index=[0])
        context.data = context.data.append(newRow, ignore_index = True)
    else:
        newRow = pd.DataFrame({k:float(data[e].returns()) for k,e in zip(context.tickers, context.sids) },index=[0])
        context.data = context.data.append(newRow, ignore_index = True)
        context.data = context.data[1:len(context.data.index)]
        
        if context.portfolio.positions[sid(8554)].amount != 0:
            #data gathering time is done, get out of the benchmark
            order(sid(8554), -1.0*context.portfolio.positions[sid(8554)].amount)
            #wait a day for the trades to clear before placing the new trades.
            return
            
        if context.daysToRecalibration == 0:
            context.daysToRecalibration = context.recalibrate
            #recalibrate
            log.info('recalibrating...')
            
            #calculate the minimum variance portfolio weights;
            precision = np.asmatrix(la.inv(context.data.cov()))
            pimv = precision*context.onevec / (context.onevec.T*precision*context.onevec)
            pimv = { e:pimv[i,0] for i,e in enumerate(context.tickers) }
                
            #open all positions:
            startingCash = (context.portfolio.starting_cash+context.portfolio.pnl)*context.leverage
            for i, e in enumerate(context.sids):
                currentPosition = context.portfolio.positions[e].amount
                newPosition = math.floor(startingCash*pimv[context.tickers[i]]/data[e].price)
                order(e, newPosition - currentPosition)
        else:
            context.daysToRecalibration -= 1

    record(c = context.portfolio.positions_value)
        
        
        
        
        
        
        
        
        
        
        
        
        
        
        
        
        
        
There was a runtime error.

A couple suggestions
- If you use minute mode you will be able to use the history() function, that way your algo will not have a warm up period while it collects data.
- Get rid of context.tickers all together to make the indexing simpler. If you want the symbols you can get them from the sids in context.sids via sid.symbol.

I did not go through and spot check this, but I believe the indexing should be good again. I got rid of context.tickers to simplify indexing. I also log the symbol when orders are placed so you can see how to to that too.
Hope this is helpful. You should probably try to move over to minute mode at some point too.

EDIT:
The indexing is off still in the example backtest. The simplest way to get it to work is to define context.sids as sorted([list of sids]), then they will have the same ordering. I also included a function for the minimum variance calculation with correct indexing.

def minimum_variance_weights(R, nobs=252, leverage=1.0):  
    '''  
    Minimum Variance Portfolio weights.

    params  
      R: DataFrame of historical returns.  
      nobs: Number of observations.  
      leverage: scalar multiplier for target weights

    returns  
      Series of target weights indexed by columns of R  
    '''  
    R = R.tail(nobs)  
    C_inv = np.linalg.inv(R.cov())  
    ones = np.ones(len(C_inv))  
    v = C_inv.dot(ones)  
    w = v / ones.T.dot(v)  
    return pd.Series(w, index=R.columns) * leverage  
Clone Algorithm
18
Loading...
Backtest from to with initial capital
Total Returns
--
Alpha
--
Beta
--
Sharpe
--
Sortino
--
Max Drawdown
--
Benchmark Returns
--
Volatility
--
Returns 1 Month 3 Month 6 Month 12 Month
Alpha 1 Month 3 Month 6 Month 12 Month
Beta 1 Month 3 Month 6 Month 12 Month
Sharpe 1 Month 3 Month 6 Month 12 Month
Sortino 1 Month 3 Month 6 Month 12 Month
Volatility 1 Month 3 Month 6 Month 12 Month
Max Drawdown 1 Month 3 Month 6 Month 12 Month
import pandas as pd
import numpy as np 
import numpy.linalg as la
import math


def initialize(context):
    #parameters
    context.nobs = 252
    context.recalibrate = 126 #re-estimate every so often (in days)
    context.leverage= 1
    
    #setup the identifiers and data storage
    context.sids = [   sid(19657),sid(19658),  
                       sid(19654), sid(19659),  
                       sid(19662), sid(19661),  
                       sid(19656),sid(19655), sid(19660) ]  
    
    context.data = pd.DataFrame({ k : pd.Series() for k in context.sids} )
    context.daysToRecalibration = 0
    context.onevec = np.asmatrix(np.ones((len(context.sids), 1)))
    

def handle_data(context, data):
    if context.portfolio.starting_cash == context.portfolio.cash:
        #buy into the benchmark while we build the starting data set
        order(sid(8554), math.floor(context.portfolio.starting_cash/data[sid(8554)].close_price))
    
    if len(context.data.index) < context.nobs:
        #still recording data
        newRow = pd.Series({k: float(data[k].returns()) for k in context.sids})
        context.data = context.data.append(newRow, ignore_index = True)
    else:
        newRow = pd.Series({k: float(data[k].returns()) for k in context.sids})
        # 
        context.data = context.data.append(newRow, ignore_index = True)
        context.data = context.data.tail(context.nobs)
        
        if context.portfolio.positions[sid(8554)].amount != 0:
            #data gathering time is done, get out of the benchmark
            order_target(sid(8554), 0)
            #wait a day for the trades to clear before placing the new trades.
            return

        if context.daysToRecalibration == 0:
            context.daysToRecalibration = context.recalibrate
            #recalibrate
            log.info('recalibrating...')

            #calculate the minimum variance portfolio weights;
            precision = np.asmatrix(la.inv(context.data.cov()))
            pimv = precision*context.onevec / (context.onevec.T*precision*context.onevec)
            pimv = { e:pimv[i,0] for i,e in enumerate(context.sids) }

            #open all positions:
            startingCash = (context.portfolio.starting_cash+context.portfolio.pnl)*context.leverage
            for i in context.sids:
                currentPosition = context.portfolio.positions[i].amount
                newPosition = math.floor(startingCash*pimv[i] / data[i].price)
                order(i, newPosition - currentPosition)
                log.info('Ordering %s shares of %s'%(newPosition - currentPosition, i.symbol))
                
        else:
            context.daysToRecalibration -= 1

        record(c = context.portfolio.positions_value)



        
        
        
        
        
        
        
        
        
        
        
        
        
        
        
There was a runtime error.

I took the changes made by all above and also added the TLT (bond ETF) to the basket. It really seems to smooth the back test and gives impressive returns with very low draw down over the time period (2002 - 2014). Now I do notice that it appears that the algo is going short, which I was surprised at. Should this be happening? I was under assumption that this was long only.

Clone Algorithm
19
Loading...
Backtest from to with initial capital
Total Returns
--
Alpha
--
Beta
--
Sharpe
--
Sortino
--
Max Drawdown
--
Benchmark Returns
--
Volatility
--
Returns 1 Month 3 Month 6 Month 12 Month
Alpha 1 Month 3 Month 6 Month 12 Month
Beta 1 Month 3 Month 6 Month 12 Month
Sharpe 1 Month 3 Month 6 Month 12 Month
Sortino 1 Month 3 Month 6 Month 12 Month
Volatility 1 Month 3 Month 6 Month 12 Month
Max Drawdown 1 Month 3 Month 6 Month 12 Month
import pandas as pd
import numpy as np 
import numpy.linalg as la
import math


def initialize(context):
    #parameters
    context.nobs = 252
    context.recalibrate = 63 #re-estimate every so often (in days)
    context.leverage= 1
    
    #setup the identifiers and data storage
    context.sids = [   sid(19657), sid(19658),  
                       sid(19654), sid(19659),  
                       sid(19662), sid(19661),  
                       sid(19656), sid(19655), 
                       sid(19660), sid(23921) ]  
    
    context.data = pd.DataFrame({ k : pd.Series() for k in context.sids} )
    context.daysToRecalibration = 0
    context.onevec = np.asmatrix(np.ones((len(context.sids), 1)))
    

def handle_data(context, data):
    
    if context.portfolio.starting_cash == context.portfolio.cash:
        #buy into the benchmark while we build the starting data set
        order(sid(8554), math.floor(context.portfolio.starting_cash/data[sid(8554)].close_price) )
    
    if len(context.data.index) < context.nobs:
        #still recording data
        newRow = pd.Series({k: float(data[k].returns()) for k in context.sids})
        context.data = context.data.append(newRow, ignore_index = True)
    else:
        newRow = pd.Series({k: float(data[k].returns()) for k in context.sids})
        # 
        context.data = context.data.append(newRow, ignore_index = True)
        context.data = context.data.tail(context.nobs)
        
        if context.portfolio.positions[sid(8554)].amount != 0:
            #data gathering time is done, get out of the benchmark
            order_target(sid(8554), 0)
            #wait a day for the trades to clear before placing the new trades.
            return

        if context.daysToRecalibration == 0:
            context.daysToRecalibration = context.recalibrate
            #recalibrate
            log.info('recalibrating...')

            #calculate the minimum variance portfolio weights;
            precision = np.asmatrix(la.inv(context.data.cov()))
            pimv = precision*context.onevec / (context.onevec.T*precision*context.onevec)
            pimv = { e:pimv[i,0] for i,e in enumerate(context.sids) }

            #open all positions:
            startingCash = (context.portfolio.starting_cash+context.portfolio.pnl)*context.leverage
            for i in context.sids:
                currentPosition = context.portfolio.positions[i].amount
                newPosition = math.floor(startingCash*pimv[i] / data[i].price)
                order(i, newPosition - currentPosition)
                log.info('Ordering %s shares of %s'%(newPosition - currentPosition, i.symbol))
                
        else:
            context.daysToRecalibration -= 1

        record(c = context.portfolio.positions_value)



        
        
        
        
        
        
        
        
        
        
        
        
        
        
        
There was a runtime error.

Going short is normal for a minimum variance portfolio. The equation returns the weights that should minimize the volatility, but it does not distinguish between upside and downside volatility, so it will short in order to reduce the total volatility.

Could this be modified to be constrained to be long only?

It can, but there is no correct way, you could use an optimization routine, or just shift the results. In this backtest, I use the same weights returned from the minimum variance formula, but if any of them are negative, it will add the absolute value of the smallest weight to all of them and normalizes the result. To me, this seems like a simple and intuitive way to turn this into a long only strategy.

Clone Algorithm
25
Loading...
Backtest from to with initial capital
Total Returns
--
Alpha
--
Beta
--
Sharpe
--
Sortino
--
Max Drawdown
--
Benchmark Returns
--
Volatility
--
Returns 1 Month 3 Month 6 Month 12 Month
Alpha 1 Month 3 Month 6 Month 12 Month
Beta 1 Month 3 Month 6 Month 12 Month
Sharpe 1 Month 3 Month 6 Month 12 Month
Sortino 1 Month 3 Month 6 Month 12 Month
Volatility 1 Month 3 Month 6 Month 12 Month
Max Drawdown 1 Month 3 Month 6 Month 12 Month
import pandas as pd
import numpy as np 
import numpy.linalg as la
import math


def initialize(context):
    #parameters
    context.nobs = 252
    context.recalibrate = 63 #re-estimate every so often (in days)
    context.leverage= 1
    
    #setup the identifiers and data storage
    context.sids = [   sid(19657), sid(19658),  
                       sid(19654), sid(19659),  
                       sid(19662), sid(19661),  
                       sid(19656), sid(19655), 
                       sid(19660), sid(23921) ]  
    
    context.data = pd.DataFrame({ k : pd.Series() for k in context.sids} )
    context.daysToRecalibration = 0
    context.onevec = np.asmatrix(np.ones((len(context.sids), 1)))
    

def handle_data(context, data):
    
    if context.portfolio.starting_cash == context.portfolio.cash:
        #buy into the benchmark while we build the starting data set
        order(sid(8554), math.floor(context.portfolio.starting_cash/data[sid(8554)].close_price) )
    
    if len(context.data.index) < context.nobs:
        #still recording data
        newRow = pd.Series({k: float(data[k].returns()) for k in context.sids})
        context.data = context.data.append(newRow, ignore_index = True)
    else:
        newRow = pd.Series({k: float(data[k].returns()) for k in context.sids})
        # 
        context.data = context.data.append(newRow, ignore_index = True)
        context.data = context.data.tail(context.nobs)
        
        if context.portfolio.positions[sid(8554)].amount != 0:
            #data gathering time is done, get out of the benchmark
            order_target(sid(8554), 0)
            #wait a day for the trades to clear before placing the new trades.
            return

        if context.daysToRecalibration == 0:
            context.daysToRecalibration = context.recalibrate
            #recalibrate
            log.info('recalibrating...')

            #calculate the minimum variance portfolio weights;
            precision = np.asmatrix(la.inv(context.data.cov()))
            pimv = precision*context.onevec / (context.onevec.T*precision*context.onevec)
            pimv = pd.Series({ e:pimv[i,0] for i,e in enumerate(context.sids) })
            
            # Shift the weights by the absloute value of the smallest weight
            # to make the strategy long only. 
            min_weight = min(pimv.values)
            if min_weight < 0:
                pimv += abs(min_weight)
                pimv /= pimv.sum()
            pimv = pimv.to_dict()

            #open all positions:
            startingCash = (context.portfolio.starting_cash+context.portfolio.pnl)*context.leverage
            for i in context.sids:
                currentPosition = context.portfolio.positions[i].amount
                newPosition = math.floor(startingCash*pimv[i] / data[i].price)
                order(i, newPosition - currentPosition)
                log.info('Ordering %s shares of %s'%(newPosition - currentPosition, i.symbol))
                
        else:
            context.daysToRecalibration -= 1

        record(c = context.portfolio.positions_value)



        
        
        
        
        
        
        
        
        
        
        
        
        
        
        
There was a runtime error.

Looking at the initial algorithm and the one where you made changes and removed the ticker list - what accounts for the major discrepancy in returns and trades? Is it the issue that was brought up about the ticker/sid order sequence ?

Yes, it was actually not fixed in the last example. When using the dict comprehensions to make a series, the sids get sorted into ascending order, so I made the context.sids a sorted list to avoid indexing error later. I think its good here, but I have not picked through it.

Clone Algorithm
25
Loading...
Backtest from to with initial capital
Total Returns
--
Alpha
--
Beta
--
Sharpe
--
Sortino
--
Max Drawdown
--
Benchmark Returns
--
Volatility
--
Returns 1 Month 3 Month 6 Month 12 Month
Alpha 1 Month 3 Month 6 Month 12 Month
Beta 1 Month 3 Month 6 Month 12 Month
Sharpe 1 Month 3 Month 6 Month 12 Month
Sortino 1 Month 3 Month 6 Month 12 Month
Volatility 1 Month 3 Month 6 Month 12 Month
Max Drawdown 1 Month 3 Month 6 Month 12 Month
import pandas as pd
import numpy as np 
import numpy.linalg as la
import math


def initialize(context):
    #parameters
    context.nobs = 252
    context.recalibrate = 63 #re-estimate every so often (in days)
    context.leverage= 1
    
    #setup the identifiers and data storage
    # Sort sids here to avoid indexing errors when using dict comprehensions later 
    context.sids = sorted([sid(19657), sid(19658),  
                           sid(19654), sid(19659),  
                           sid(19662), sid(19661),  
                           sid(19656), sid(19655), 
                           sid(19660), sid(23921)])

    context.data = pd.DataFrame({ k : pd.Series() for k in context.sids} )
    context.daysToRecalibration = 0
    context.onevec = np.asmatrix(np.ones((len(context.sids), 1)))
    

def handle_data(context, data):
    
    if context.portfolio.starting_cash == context.portfolio.cash:
        #buy into the benchmark while we build the starting data set
        order(sid(8554), math.floor(context.portfolio.starting_cash/data[sid(8554)].close_price) )
    
    if len(context.data.index) < context.nobs:
        #still recording data
        newRow = pd.Series({k: float(data[k].returns()) for k in context.sids})
        context.data = context.data.append(newRow, ignore_index = True)
    else:
        newRow = pd.Series({k: float(data[k].returns()) for k in context.sids})
        # 
        context.data = context.data.append(newRow, ignore_index = True)
        context.data = context.data.tail(context.nobs)
        
        if context.portfolio.positions[sid(8554)].amount != 0:
            #data gathering time is done, get out of the benchmark
            order_target(sid(8554), 0)
            #wait a day for the trades to clear before placing the new trades.
            return

        if context.daysToRecalibration == 0:
            context.daysToRecalibration = context.recalibrate
            #recalibrate
            log.info('recalibrating...')

            #calculate the minimum variance portfolio weights;
            precision = np.asmatrix(la.inv(context.data.cov()))
            pimv = precision*context.onevec / (context.onevec.T*precision*context.onevec)
            pimv = pd.Series({ e:pimv[i,0] for i,e in enumerate(context.sids) })
            
            # Shift the weights by the absloute value of the smallest weight
            # to make the strategy long only. 
            min_weight = min(pimv.values)
            if min_weight < 0:
                pimv += abs(min_weight)
                pimv /= pimv.sum()
            pimv = pimv.to_dict()

            #open all positions:
            startingCash = (context.portfolio.starting_cash+context.portfolio.pnl)*context.leverage
            for i in context.sids:
                currentPosition = context.portfolio.positions[i].amount
                newPosition = math.floor(startingCash*pimv[i] / data[i].price)
                order(i, newPosition - currentPosition)
                log.info('Ordering %s shares of %s'%(newPosition - currentPosition, i.symbol))
                
        else:
            context.daysToRecalibration -= 1

        record(c = context.portfolio.positions_value)



        
        
        
        
        
        
        
        
        
        
        
        
        
        
        
There was a runtime error.

Here's an attempt at a minimum variance long-only algorithm, using scipy.optimize.minimize. Seems to work nicely. --Grant

Clone Algorithm
300
Loading...
Backtest from to with initial capital
Total Returns
--
Alpha
--
Beta
--
Sharpe
--
Sortino
--
Max Drawdown
--
Benchmark Returns
--
Volatility
--
Returns 1 Month 3 Month 6 Month 12 Month
Alpha 1 Month 3 Month 6 Month 12 Month
Beta 1 Month 3 Month 6 Month 12 Month
Sharpe 1 Month 3 Month 6 Month 12 Month
Sortino 1 Month 3 Month 6 Month 12 Month
Volatility 1 Month 3 Month 6 Month 12 Month
Max Drawdown 1 Month 3 Month 6 Month 12 Month
import numpy as np
from pytz import timezone
import scipy

trading_freq = 20

def initialize(context):
    
    context.stocks = [ sid(19662),  # XLY Consumer Discrectionary SPDR Fund
                       sid(19656),  # XLF Financial SPDR Fund
                       sid(19658),  # XLK Technology SPDR Fund
                       sid(19655),  # XLE Energy SPDR Fund
                       sid(19661),  # XLV Health Care SPRD Fund
                       sid(19657),  # XLI Industrial SPDR Fund
                       sid(19659),  # XLP Consumer Staples SPDR Fund
                       sid(19654),  # XLB Materials SPDR Fund
                       sid(19660)]  # XLU Utilities SPRD Fund
    
    context.x0 = 1.0*np.ones_like(context.stocks)/len(context.stocks)

    set_commission(commission.PerShare(cost=0.013, min_trade_cost=1.3))
    
    context.day_count = -1

def handle_data(context, data):
     
    # Trade only once per day
    loc_dt = get_datetime().astimezone(timezone('US/Eastern'))
    if loc_dt.hour == 16 and loc_dt.minute == 0:
        context.day_count += 1
        pass
    else:
        return
    
    # Limit trading frequency
    if context.day_count % trading_freq != 0.0:
        return
    
    prices = history(21,'1d','price').as_matrix(context.stocks)
    ret = prices[1:,:]-prices[0:-1,:] # daily returns
    
    bnds = ((0,1),(0,1),(0,1),(0,1),(0,1),(0,1),(0,1),(0,1),(0,1))
    cons = ({'type': 'eq', 'fun': lambda x:  np.sum(x)-1.0})
    
    res= scipy.optimize.minimize(variance, context.x0, args=ret,method='SLSQP',constraints=cons,bounds=bnds)
    
    allocation = res.x
    allocation[allocation<0] = 0
    denom = np.sum(allocation)
    if denom != 0:
        allocation = allocation/np.sum(allocation)
    
    for i,stock in enumerate(context.stocks):
        order_target_percent(stock,allocation[i])
        
def variance(x,*args):
    
    p = np.asarray(args)
    Acov = np.cov(p.T)
    
    return np.dot(x,np.dot(Acov,x))
There was a runtime error.

Over a longer time period. --Grant

Clone Algorithm
300
Loading...
Backtest from to with initial capital
Total Returns
--
Alpha
--
Beta
--
Sharpe
--
Sortino
--
Max Drawdown
--
Benchmark Returns
--
Volatility
--
Returns 1 Month 3 Month 6 Month 12 Month
Alpha 1 Month 3 Month 6 Month 12 Month
Beta 1 Month 3 Month 6 Month 12 Month
Sharpe 1 Month 3 Month 6 Month 12 Month
Sortino 1 Month 3 Month 6 Month 12 Month
Volatility 1 Month 3 Month 6 Month 12 Month
Max Drawdown 1 Month 3 Month 6 Month 12 Month
import numpy as np
from pytz import timezone
import scipy

trading_freq = 20

def initialize(context):
    
    context.stocks = [ sid(19662),  # XLY Consumer Discrectionary SPDR Fund
                       sid(19656),  # XLF Financial SPDR Fund
                       sid(19658),  # XLK Technology SPDR Fund
                       sid(19655),  # XLE Energy SPDR Fund
                       sid(19661),  # XLV Health Care SPRD Fund
                       sid(19657),  # XLI Industrial SPDR Fund
                       sid(19659),  # XLP Consumer Staples SPDR Fund
                       sid(19654),  # XLB Materials SPDR Fund
                       sid(19660)]  # XLU Utilities SPRD Fund
    
    context.x0 = 1.0*np.ones_like(context.stocks)/len(context.stocks)

    set_commission(commission.PerShare(cost=0.013, min_trade_cost=1.3))
    
    context.day_count = -1

def handle_data(context, data):
     
    # Trade only once per day
    loc_dt = get_datetime().astimezone(timezone('US/Eastern'))
    if loc_dt.hour == 16 and loc_dt.minute == 0:
        context.day_count += 1
        pass
    else:
        return
    
    # Limit trading frequency
    if context.day_count % trading_freq != 0.0:
        return
    
    prices = history(21,'1d','price').as_matrix(context.stocks)
    ret = prices[1:,:]-prices[0:-1,:] # daily returns
    
    bnds = ((0,1),(0,1),(0,1),(0,1),(0,1),(0,1),(0,1),(0,1),(0,1))
    cons = ({'type': 'eq', 'fun': lambda x:  np.sum(x)-1.0})
    
    res= scipy.optimize.minimize(variance, context.x0, args=ret,method='SLSQP',constraints=cons,bounds=bnds)
    
    allocation = res.x
    allocation[allocation<0] = 0
    denom = np.sum(allocation)
    if denom != 0:
        allocation = allocation/np.sum(allocation)
    
    for i,stock in enumerate(context.stocks):
        order_target_percent(stock,allocation[i])
        
def variance(x,*args):
    
    p = np.asarray(args)
    Acov = np.cov(p.T)
    
    return np.dot(x,np.dot(Acov,x))
There was a runtime error.

Hey guys, I took a shot at minimum variance portfolio myself, although the results are only available in ipython notebook as for now. Perhaps you will find it interesting http://nbviewer.ipython.org/github/Marigold/universal-portfolios/blob/master/modern-portfolio-theory.ipynb.

Here is how it looks if rebalanced monthly after adding TLT to the list.

Clone Algorithm
91
Loading...
Backtest from to with initial capital
Total Returns
--
Alpha
--
Beta
--
Sharpe
--
Sortino
--
Max Drawdown
--
Benchmark Returns
--
Volatility
--
Returns 1 Month 3 Month 6 Month 12 Month
Alpha 1 Month 3 Month 6 Month 12 Month
Beta 1 Month 3 Month 6 Month 12 Month
Sharpe 1 Month 3 Month 6 Month 12 Month
Sortino 1 Month 3 Month 6 Month 12 Month
Volatility 1 Month 3 Month 6 Month 12 Month
Max Drawdown 1 Month 3 Month 6 Month 12 Month
# CAPM   Capital Asset Pricing Model
# https://www.quantopian.com/posts/modern-portfolio-theory-minimum-variance-portfolio

import pandas as pd
import numpy as np 
import numpy.linalg as la
import math

wind =126
lev = 1.0

def initialize(context):
    context.recalibrate = 22 
    set_symbol_lookup_date('2015-01-01')
    context.tickers = ['tlt','xlf' 'xle', 'xlu', 'xlk', 'xlb', 'xlp', 'xly','xli', 'xlv']
    context.secs =[symbol('tlt'), symbol('xle'), symbol('xlu'), symbol('xlk'), symbol('xlb'), 
                    symbol('xlp'), symbol('xly'), symbol('xli'), symbol('xlv')]    
    set_commission(commission.PerShare(cost=.005, min_trade_cost=1))
    set_slippage(slippage.VolumeShareSlippage(volume_limit=0.25, price_impact=0.02))    
#    schedule_function(trade, date_rules.every_day(), time_rules.market_close( minutes=5))
            
    context.data = pd.DataFrame({ k : pd.Series() for k in context.tickers } )
    context.mvp = np.array([0]*len(context.tickers))
    context.temp = False
    context.daysToRecalibration = 0
    context.onevec = np.asmatrix(np.ones((len(context.tickers), 1)))    

def handle_data(context, data):
    if len(context.data.index) < wind:
        #still recording data
        newRow = pd.DataFrame({k:float(data[e].returns()) for k,e in zip(context.tickers, context.secs) },index=[0])
        context.data = context.data.append(newRow, ignore_index = True)
    else:
        newRow = pd.DataFrame({k:float(data[e].returns()) for k,e in zip(context.tickers, context.secs) },index=[0])
        context.data = context.data.append(newRow, ignore_index = True)
        context.data = context.data[1:len(context.data.index)]
            
        if context.daysToRecalibration == 0:
            context.daysToRecalibration = context.recalibrate
            
            #calculate the minimum variance portfolio weights;
            precision = np.asmatrix(la.inv(context.data.cov()))
            pimv = precision*context.onevec / (context.onevec.T*precision*context.onevec)
            pimv = { e:pimv[i,0] for i,e in enumerate(context.tickers) }

            for i, e in enumerate(context.secs):
                order_target_percent(e, pimv[context.tickers[i]])
        else:
            context.daysToRecalibration -= 1
            
    record(Leverage = context.account.leverage) 
There was a runtime error.

Mojmir Vinkler, very interesting work book,
Can you or somebody else make it shareable in research platform.