Back to Community
Applying Meucci’s Checklist to Portfolio Construction

This toy example algo incorporates some interesting Meucci techniques to improve portfolio optimization. Some of the key points are:

  • Use longer time series for estimation – have the law of large numbers work in your favour
  • Apply flexible probabilities to enhance estimation ie weight historical observations differently eg weight more recent data higher
  • Shrink your mean and covariance matrix to reduce estimation risk
  • Use a 2-step mean-variance optimization approach to choose the optimal portfolio on the efficient frontier according to your preference (satisfaction)

See also the post https://www.quantopian.com/posts/the-efficient-frontier-markowitz-portfolio-optimization-in-python-using-cvxopt for some background and some of the issues that arise in portfolio optimization.

Thanks

Peter

Clone Algorithm
60
Loading...
Total Returns
--
Alpha
--
Beta
--
Sharpe
--
Sortino
--
Max Drawdown
--
Benchmark Returns
--
Volatility
--
Returns 1 Month 3 Month 6 Month 12 Month
Alpha 1 Month 3 Month 6 Month 12 Month
Beta 1 Month 3 Month 6 Month 12 Month
Sharpe 1 Month 3 Month 6 Month 12 Month
Sortino 1 Month 3 Month 6 Month 12 Month
Volatility 1 Month 3 Month 6 Month 12 Month
Max Drawdown 1 Month 3 Month 6 Month 12 Month
"""
Applying Meucci's Checklist Steps - Toy Example

@author: Peter Chan       www.returnandrisk.com

See also: https://www.quantopian.com/posts/the-efficient-frontier-markowitz-portfolio-optimization-in-python-using-cvxopt
"""
import numpy as np
import cvxopt as opt
from cvxopt import solvers, blas
import statsmodels.stats.weightstats as ws

def rnr_simple_shrinkage(mu, cov, mu_shrk_wt=0.1, cov_shrk_wt=0.1):
    """Reference: Attilio Meucci's Matlab file S_MVHorizon.m
    https://www.mathworks.com/matlabcentral/fileexchange/25010-exercises-in-advanced-risk-and-portfolio-management
    """
    n_asset = len(mu)
    
    # Mean shrinkage 
    Shrk_Exp = np.zeros(n_asset)
    Exp_C_Hat = (1 - mu_shrk_wt) * mu + mu_shrk_wt * Shrk_Exp
    
    # Covariance shrinkage
    Shrk_Cov = np.eye(n_asset) * np.trace(cov) / n_asset
    Cov_C_Hat = (1-cov_shrk_wt) * cov + cov_shrk_wt * Shrk_Cov
    return((Exp_C_Hat, Cov_C_Hat))

def rnr_efficient_frontier_qp_rets(n_portfolio, covariance, expected_values):
    """
    Port of Attilio Meucci's Matlab file EfficientFrontierQPRets.m
    https://www.mathworks.com/matlabcentral/fileexchange/25010-exercises-in-advanced-risk-and-portfolio-management
    This function returns the n_portfolio x 1 vector expected returns,
                          the n_portfolio x 1 vector of volatilities and
                          the n_portfolio x n_asset matrix of weights
    of n_portfolio efficient portfolios whose expected returns are equally spaced along the whole range of the efficient frontier
    """
    solvers.options['show_progress'] = False
    n_asset = covariance.shape[0]
    expected_values = opt.matrix(expected_values)
    
    # Determine weights, return and volatility of minimum-risk portfolio
    S = opt.matrix(covariance)
    pbar = opt.matrix(np.zeros(n_asset)) 
    # 1. positive weights
    G = opt.matrix(0.0, (n_asset, n_asset))
    G[::n_asset+1] = -1.0
    h = opt.matrix(0.0, (n_asset, 1))
    # 2. weights sum to one
    A = opt.matrix(1.0, (1, n_asset))
    b = opt.matrix(1.0)
    x0 = opt.matrix(1 / n_asset * np.ones(n_asset))
    min_x = solvers.qp(S, pbar, G, h, A, b, 'coneqp', x0)['x']
    min_ret = blas.dot(min_x.T, expected_values)
    min_vol = np.sqrt(blas.dot(min_x, S * min_x))
    
    # Determine weights, return and volatility of maximum-risk portfolio
    max_idx = np.asscalar(np.argmax(expected_values))
    max_x = np.zeros(n_asset)
    max_x[max_idx] = 1
    max_ret = expected_values[max_idx]
    max_vol = np.sqrt(np.dot(max_x, np.dot(covariance, max_x)))
    
    # Slice efficient frontier returns into n_portfolio segments
    target_rets = np.linspace(min_ret, max_ret, n_portfolio).tolist()
    
    # compute the n_portfolio weights and risk-return coordinates of the optimal allocations for each slice
    weights = np.zeros((n_portfolio, n_asset))
    rets = np.zeros(n_portfolio)
    vols = np.zeros(n_portfolio)
    # Start with min vol portfolio
    weights[0,:] = np.asarray(min_x).T
    rets[0] = min_ret
    vols[0] = min_vol
    
    for i in range(1, n_portfolio-1):
        # Determine least risky portfolio for given expected return
        A = opt.matrix(np.vstack([np.ones(n_asset), expected_values.T]))
        b = opt.matrix(np.hstack([1, target_rets[i]]))
        x = solvers.qp(S, pbar, G, h, A, b, 'coneqp', x0)['x']    
        weights[i,:] = np.asarray(x).T
        rets[i] = blas.dot(x.T, expected_values)
        vols[i] = np.sqrt(blas.dot(x, S * x))    
    
    # Add max ret portfolio
    weights[n_portfolio-1,:] = np.asarray(max_x).T
    rets[n_portfolio-1] = max_ret
    vols[n_portfolio-1] = max_vol
    return(weights, rets, vols)

def initialize(context):
    """
    Called once at the start of the algorithm.
    """   
    context.day = 0
    context.min_days_data = 504 # 2 years
    context.assets = [symbol('IBM'), symbol('GLD'), symbol('XOM'), symbol('AAPL'), symbol('MSFT'), symbol('TLT'), symbol('SHY')]
    context.n_asset = len(context.assets)
    context.n_portfolio = 40
    # Turn off the slippage model
    set_slippage(slippage.FixedSlippage(spread=0.0))
    # Set the commission model (Interactive Brokers Commission)
    set_commission(commission.PerShare(cost=0.01, min_trade_cost=1.0))
    # Rebalance every day
    schedule_function(my_rebalance, date_rules.every_day(), time_rules.market_close())
             
def my_rebalance(context,data):
    # Using expanding data window with minimum of min_days_data days
    context.day += 1 
    if context.day < context.min_days_data:
        return
    if context.day == context.min_days_data:
        log.info('Start trading : %s' % str(get_datetime('US/Eastern')))
    prices = data.history(context.assets, "price", context.day, "1d")
    returns = prices.pct_change().dropna()
    n_scenario = len(returns)
    
    # Set Flexible Probabilities using exponential smoothing
    half_life_prjn = 2 * 252 # in days
    lambda_prjn = np.log(2) / half_life_prjn
    probs = np.exp(-lambda_prjn * (np.arange(0, n_scenario)[::-1]))
    probs = probs / sum(probs)
    
    # Compute weighted mean and covariance matrix using flex probs
    fp_stats = ws.DescrStatsW(returns, probs)
    mu = fp_stats.mean
    sigma2 = fp_stats.cov
    
    # Perform shrinkage to mitigate estimation risk
    mu_shrk, cov_shrk = rnr_simple_shrinkage(mu, sigma2)

    # Step 1: mean-variance quadratic optimization for efficient frontier
    weights, rets, vols = rnr_efficient_frontier_qp_rets(context.n_portfolio, cov_shrk, mu_shrk)

    # Step 2: evaluate satisfaction for all allocations on the frontier
    satisfaction = rets / vols # Sharpe ratio

    # Choose the allocation that maximises satisfaction
    max_sat_idx = np.argmax(satisfaction)
    max_sat = satisfaction[max_sat_idx]
    max_sat_wt = weights[max_sat_idx, :]
    
    # Rebalance portfolio accordingly
    asset_weights = 'Weights: '
    sum_weights = 0.0
    for stock, weight in zip(prices.columns, max_sat_wt):
        order_target_percent(stock, weight) 
        asset_weights += '%s=%s, ' % (stock.symbol, round(weight,2))
        sum_weights += weight
    
    # Log and record some stuff
    asset_weights += 'SUM=%s' % (sum_weights)
    log.info(asset_weights)
    # Can only record max of 5 variables
    # Recording weights of 5 assets
    record(IBM=max_sat_wt[0], GLD=max_sat_wt[1], XOM=max_sat_wt[2], AAPL=max_sat_wt[3], TLT=max_sat_wt[5])
    #record(MSFT=max_sat_wt[4], SHY=max_sat_wt[6], Sum_weights=sum_weights)
    
    


There was a runtime error.
13 responses

Didn't Markowitz win a nobel prize for the stuff, but ended up doing equal weights stocks and bonds for his own portfolio? or something like that...

Here are the results using the same assets, but without any charlatanism (science) added to it, just equal weighted, monthly rebalancing.

Why add mumbo jumbo when you can do better by being "naiive"?

Clone Algorithm
27
Loading...
Total Returns
--
Alpha
--
Beta
--
Sharpe
--
Sortino
--
Max Drawdown
--
Benchmark Returns
--
Volatility
--
Returns 1 Month 3 Month 6 Month 12 Month
Alpha 1 Month 3 Month 6 Month 12 Month
Beta 1 Month 3 Month 6 Month 12 Month
Sharpe 1 Month 3 Month 6 Month 12 Month
Sortino 1 Month 3 Month 6 Month 12 Month
Volatility 1 Month 3 Month 6 Month 12 Month
Max Drawdown 1 Month 3 Month 6 Month 12 Month
"""
This algorithm defines a long-only equal weight portfolio and 
rebalances it at a user-specified frequency.
"""

# Import the libraries we will use here
import datetime
import pandas as pd

def initialize(context):
    # In this example, we're looking at 2 ETFs for stocks and bonds. The algo always keeps 10% in cash.  
    context.security_list = symbols(
                           'ibm','gld','xom','aapl','msft','tlt','shy'
                         )
                                    
    # This variable is used to manage leverage
    context.weights = .12


    # Rebalance every day (or the first trading day if it's a holiday).
    # At 11AM ET, which is 1 hour and 30 minutes after market open.
    schedule_function(rebalance, 
                      date_rules.month_start(),
                      time_rules.market_open(hours = 1, minutes = 30))

def rebalance(context, data):

    # Do the rebalance. Loop through each of the stocks and order to
    # the target percentage.  If already at the target, this command 
    # doesn't do anything. A future improvement could be to set rebalance
    # thresholds.
    for sec in context.security_list:
        if data.can_trade(sec):
            order_target_percent(sec, context.weights)

    # Get the current exchange time, in the exchange timezone 
    exchange_time = get_datetime('US/Eastern')
    log.info("Rebalanced to target portfolio weights at %s" % str(exchange_time))
There was a runtime error.

Sorry, I kept too much cash in my other backtest, here is one with no cash...

Clone Algorithm
27
Loading...
Total Returns
--
Alpha
--
Beta
--
Sharpe
--
Sortino
--
Max Drawdown
--
Benchmark Returns
--
Volatility
--
Returns 1 Month 3 Month 6 Month 12 Month
Alpha 1 Month 3 Month 6 Month 12 Month
Beta 1 Month 3 Month 6 Month 12 Month
Sharpe 1 Month 3 Month 6 Month 12 Month
Sortino 1 Month 3 Month 6 Month 12 Month
Volatility 1 Month 3 Month 6 Month 12 Month
Max Drawdown 1 Month 3 Month 6 Month 12 Month
"""
This algorithm defines a long-only equal weight portfolio and 
rebalances it at a user-specified frequency.
"""

# Import the libraries we will use here
import datetime
import pandas as pd

def initialize(context):
    # In this example, we're looking at 2 ETFs for stocks and bonds. The algo always keeps 10% in cash.  
    context.security_list = symbols(
                           'ibm','gld','xom','aapl','msft','tlt','shy'
                         )
                                    
    # This variable is used to manage leverage
    context.weights = .13


    # Rebalance every day (or the first trading day if it's a holiday).
    # At 11AM ET, which is 1 hour and 30 minutes after market open.
    schedule_function(rebalance, 
                      date_rules.month_start(),
                      time_rules.market_open(hours = 1, minutes = 30))

def rebalance(context, data):

    # Do the rebalance. Loop through each of the stocks and order to
    # the target percentage.  If already at the target, this command 
    # doesn't do anything. A future improvement could be to set rebalance
    # thresholds.
    for sec in context.security_list:
        if data.can_trade(sec):
            order_target_percent(sec, context.weights)

    # Get the current exchange time, in the exchange timezone 
    exchange_time = get_datetime('US/Eastern')
    log.info("Rebalanced to target portfolio weights at %s" % str(exchange_time))
There was a runtime error.

Or since our universe only contains stocks we know have done well in the past we could just assign weights randomly.

Clone Algorithm
7
Loading...
Total Returns
--
Alpha
--
Beta
--
Sharpe
--
Sortino
--
Max Drawdown
--
Benchmark Returns
--
Volatility
--
Returns 1 Month 3 Month 6 Month 12 Month
Alpha 1 Month 3 Month 6 Month 12 Month
Beta 1 Month 3 Month 6 Month 12 Month
Sharpe 1 Month 3 Month 6 Month 12 Month
Sortino 1 Month 3 Month 6 Month 12 Month
Volatility 1 Month 3 Month 6 Month 12 Month
Max Drawdown 1 Month 3 Month 6 Month 12 Month
"""
This algorithm defines a long-only equal weight portfolio and 
rebalances it at a user-specified frequency.
"""

# Import the libraries we will use here
import datetime
import pandas as pd
import numpy as np

def initialize(context):
    # In this example, we're looking at 2 ETFs for stocks and bonds. The algo always keeps 10% in cash.  
    context.security_list = symbols(
                           'ibm','gld','xom','aapl','msft','tlt','shy'
                         )


    # Rebalance every day (or the first trading day if it's a holiday).
    # At 11AM ET, which is 1 hour and 30 minutes after market open.
    schedule_function(rebalance, 
                      date_rules.month_start(),
                      time_rules.market_open(hours = 1, minutes = 30))

def rebalance(context, data):

    # Do the rebalance. Loop through each of the stocks and order to
    # the target percentage.  If already at the target, this command 
    # doesn't do anything. A future improvement could be to set rebalance
    # thresholds.
    

    a = np.random.random(len(context.security_list))
    a /= a.sum()
    
    context.weights = pd.Series(index=context.security_list, data=a)
    
    for sec in context.security_list:
        if data.can_trade(sec):
            order_target_percent(sec, context.weights[sec])

    # Get the current exchange time, in the exchange timezone 
    exchange_time = get_datetime('US/Eastern')
    log.info("Rebalanced to target portfolio weights at %s" % str(exchange_time))
There was a runtime error.
Disclaimer

The material on this website is provided for informational purposes only and does not constitute an offer to sell, a solicitation to buy, or a recommendation or endorsement for any security or strategy, nor does it constitute an offer to provide investment advisory services by Quantopian. In addition, the material offers no opinion with respect to the suitability of any security or specific investment. No information contained herein should be regarded as a suggestion to engage in or refrain from any investment-related course of action as none of Quantopian nor any of its affiliates is undertaking to provide investment advice, act as an adviser to any plan or entity subject to the Employee Retirement Income Security Act of 1974, as amended, individual retirement account or individual retirement annuity, or give advice in a fiduciary capacity with respect to the materials presented herein. If you are an individual retirement or other investor, contact your financial advisor or other fiduciary unrelated to Quantopian about whether any given investment idea, strategy, product or service described herein may be appropriate for your circumstances. All investments involve risk, including loss of principal. Quantopian makes no guarantees as to the accuracy or completeness of the views expressed in the website. The views are subject to change, and may have become unreliable for various reasons, including changes in market conditions or economic circumstances.

James, so da*n funny...this entire industry is a joke.

Here is the same "algo" with 98% of cash invested at all times...now give me your 2/20 before I close my fund for new investors.

Clone Algorithm
27
Loading...
Total Returns
--
Alpha
--
Beta
--
Sharpe
--
Sortino
--
Max Drawdown
--
Benchmark Returns
--
Volatility
--
Returns 1 Month 3 Month 6 Month 12 Month
Alpha 1 Month 3 Month 6 Month 12 Month
Beta 1 Month 3 Month 6 Month 12 Month
Sharpe 1 Month 3 Month 6 Month 12 Month
Sortino 1 Month 3 Month 6 Month 12 Month
Volatility 1 Month 3 Month 6 Month 12 Month
Max Drawdown 1 Month 3 Month 6 Month 12 Month
"""
This algorithm defines a long-only equal weight portfolio and 
rebalances it at a user-specified frequency.
"""

# Import the libraries we will use here
import datetime
import pandas as pd

def initialize(context):
    # In this example, we're looking at 2 ETFs for stocks and bonds. The algo always keeps 10% in cash.  
    context.security_list = symbols(
                           'ibm','gld','xom','aapl','msft','tlt','shy'
                         )
                                    
    # This variable is used to manage leverage
    context.weights = .14


    # Rebalance every day (or the first trading day if it's a holiday).
    # At 11AM ET, which is 1 hour and 30 minutes after market open.
    schedule_function(rebalance, 
                      date_rules.month_start(),
                      time_rules.market_open(hours = 1, minutes = 30))

def rebalance(context, data):

    # Do the rebalance. Loop through each of the stocks and order to
    # the target percentage.  If already at the target, this command 
    # doesn't do anything. A future improvement could be to set rebalance
    # thresholds.
    for sec in context.security_list:
        if data.can_trade(sec):
            order_target_percent(sec, context.weights)

    # Get the current exchange time, in the exchange timezone 
    exchange_time = get_datetime('US/Eastern')
    log.info("Rebalanced to target portfolio weights at %s" % str(exchange_time))
There was a runtime error.

In closing, I am adding some leverage to the same "algo"...now turn over your entire life savings PLUS morrgage your house and invest it all in my black box fund, before I close it for new investors ;)

Clone Algorithm
27
Loading...
Total Returns
--
Alpha
--
Beta
--
Sharpe
--
Sortino
--
Max Drawdown
--
Benchmark Returns
--
Volatility
--
Returns 1 Month 3 Month 6 Month 12 Month
Alpha 1 Month 3 Month 6 Month 12 Month
Beta 1 Month 3 Month 6 Month 12 Month
Sharpe 1 Month 3 Month 6 Month 12 Month
Sortino 1 Month 3 Month 6 Month 12 Month
Volatility 1 Month 3 Month 6 Month 12 Month
Max Drawdown 1 Month 3 Month 6 Month 12 Month
"""
This algorithm defines a long-only equal weight portfolio and 
rebalances it at a user-specified frequency.
"""

# Import the libraries we will use here
import datetime
import pandas as pd

def initialize(context):
    # In this example, we're looking at 2 ETFs for stocks and bonds. The algo always keeps 10% in cash.  
    context.security_list = symbols(
                           'ibm','gld','xom','aapl','msft','tlt','shy'
                         )
                                    
    # This variable is used to manage leverage
    context.weights = .42


    # Rebalance every day (or the first trading day if it's a holiday).
    # At 11AM ET, which is 1 hour and 30 minutes after market open.
    schedule_function(rebalance, 
                      date_rules.month_start(),
                      time_rules.market_open(hours = 1, minutes = 30))

def rebalance(context, data):

    # Do the rebalance. Loop through each of the stocks and order to
    # the target percentage.  If already at the target, this command 
    # doesn't do anything. A future improvement could be to set rebalance
    # thresholds.
    for sec in context.security_list:
        if data.can_trade(sec):
            order_target_percent(sec, context.weights)

    # Get the current exchange time, in the exchange timezone 
    exchange_time = get_datetime('US/Eastern')
    log.info("Rebalanced to target portfolio weights at %s" % str(exchange_time))
There was a runtime error.

Hi James

To be clear, I did not cherry-pick these stocks. They are the exact same ones from the original post https://www.quantopian.com/posts/the-efficient-frontier-markowitz-portfolio-optimization-in-python-using-cvxopt. The authors of that post are: Dr. Thomas Starke, David Edwards, Dr. Thomas Wiecki...

My algo above doesn't actually start trading until 2007, I have corrected the code to make this clearer and will show the results in another reply later. Attached is YOUR random portfolio construction results (2007-2015) I got when I ran it.

Thanks

Peter

Clone Algorithm
3
Loading...
Total Returns
--
Alpha
--
Beta
--
Sharpe
--
Sortino
--
Max Drawdown
--
Benchmark Returns
--
Volatility
--
Returns 1 Month 3 Month 6 Month 12 Month
Alpha 1 Month 3 Month 6 Month 12 Month
Beta 1 Month 3 Month 6 Month 12 Month
Sharpe 1 Month 3 Month 6 Month 12 Month
Sortino 1 Month 3 Month 6 Month 12 Month
Volatility 1 Month 3 Month 6 Month 12 Month
Max Drawdown 1 Month 3 Month 6 Month 12 Month
"""
This algorithm defines a long-only equal weight portfolio and 
rebalances it at a user-specified frequency.
"""

# Import the libraries we will use here
import datetime
import pandas as pd
import numpy as np

def initialize(context):
    # In this example, we're looking at 2 ETFs for stocks and bonds. The algo always keeps 10% in cash.  
    context.security_list = symbols(
                           'ibm','gld','xom','aapl','msft','tlt','shy'
                         )


    # Rebalance every day (or the first trading day if it's a holiday).
    # At 11AM ET, which is 1 hour and 30 minutes after market open.
    schedule_function(rebalance, 
                      date_rules.month_start(),
                      time_rules.market_open(hours = 1, minutes = 30))

def rebalance(context, data):

    # Do the rebalance. Loop through each of the stocks and order to
    # the target percentage.  If already at the target, this command 
    # doesn't do anything. A future improvement could be to set rebalance
    # thresholds.
    

    a = np.random.random(len(context.security_list))
    a /= a.sum()
    
    context.weights = pd.Series(index=context.security_list, data=a)
    
    for sec in context.security_list:
        if data.can_trade(sec):
            order_target_percent(sec, context.weights[sec])

    # Get the current exchange time, in the exchange timezone 
    exchange_time = get_datetime('US/Eastern')
    log.info("Rebalanced to target portfolio weights at %s" % str(exchange_time))
There was a runtime error.

Hi Behavioral Trader

Well where do I start... First off there is no charlatanism, mumbo jumbo and the entire industry is not a joke. It would be a better community, willing to share more and help each other, if members did not jump to conclusions and make such remarks - such behaviour is not conducive.

My algo above doesn't actually start trading until 2007, I have corrected the code to make this clearer and will show the results in another reply later. Attached is YOUR equal weight portfolio construction results (2007-2015) I got when I ran it.

Thanks

Peter

Clone Algorithm
2
Loading...
Total Returns
--
Alpha
--
Beta
--
Sharpe
--
Sortino
--
Max Drawdown
--
Benchmark Returns
--
Volatility
--
Returns 1 Month 3 Month 6 Month 12 Month
Alpha 1 Month 3 Month 6 Month 12 Month
Beta 1 Month 3 Month 6 Month 12 Month
Sharpe 1 Month 3 Month 6 Month 12 Month
Sortino 1 Month 3 Month 6 Month 12 Month
Volatility 1 Month 3 Month 6 Month 12 Month
Max Drawdown 1 Month 3 Month 6 Month 12 Month
"""
This algorithm defines a long-only equal weight portfolio and 
rebalances it at a user-specified frequency.
"""

# Import the libraries we will use here
import datetime
import pandas as pd

def initialize(context):
    # In this example, we're looking at 2 ETFs for stocks and bonds. The algo always keeps 10% in cash.  
    context.security_list = symbols(
                           'ibm','gld','xom','aapl','msft','tlt','shy'
                         )
                                    
    # This variable is used to manage leverage
    context.weights = .14


    # Rebalance every day (or the first trading day if it's a holiday).
    # At 11AM ET, which is 1 hour and 30 minutes after market open.
    schedule_function(rebalance, 
                      date_rules.month_start(),
                      time_rules.market_open(hours = 1, minutes = 30))

def rebalance(context, data):

    # Do the rebalance. Loop through each of the stocks and order to
    # the target percentage.  If already at the target, this command 
    # doesn't do anything. A future improvement could be to set rebalance
    # thresholds.
    for sec in context.security_list:
        if data.can_trade(sec):
            order_target_percent(sec, context.weights)

    # Get the current exchange time, in the exchange timezone 
    exchange_time = get_datetime('US/Eastern')
    log.info("Rebalanced to target portfolio weights at %s" % str(exchange_time))
There was a runtime error.

To the rest of the Quantopian community who don't jump to conclusions...

Before I post the results of the toy example, let me clear the air:

  • the intention of this post was to show that it is possible to use Markowitz style portfolio optimization in Quantopian to build portfolios that suit your return/risk preferences. On the other thread, there were quite a few unanswered questions on how to do it
  • it was never about who has the best algo
  • it's not the only way to construct portfolios, like I said from the get-go it's a toy example, but I personally think it's worth having a look at and experimenting with. There are a lot of tweaks you can do to improve it...
  • of course, there's a lot more to know and you need to do your own homework!

Best of skill

Peter

One thing I want to highlight is that your estimates of the covariance matrix and the expected returns are key for better optimization results. The optimizer is very sensitive in particular to the expected return input.

I have just used some adjusted form of sample mean and sample covariance in the toy example, but this is where your skill in modelling should be applied...

The amendments I made to the original code are as follows:
- aligned the trading start date with the algo start date
- changed commission and slippage to the defaults
- changed rebalancing to time_rules.market_open(hours = 1, minutes = 30)

Conclusion: outperforms random and equal-weight construction techniques.

Also, note how the beta-to-SPY is low enough for the contest...

Thanks

Peter

Clone Algorithm
60
Loading...
Total Returns
--
Alpha
--
Beta
--
Sharpe
--
Sortino
--
Max Drawdown
--
Benchmark Returns
--
Volatility
--
Returns 1 Month 3 Month 6 Month 12 Month
Alpha 1 Month 3 Month 6 Month 12 Month
Beta 1 Month 3 Month 6 Month 12 Month
Sharpe 1 Month 3 Month 6 Month 12 Month
Sortino 1 Month 3 Month 6 Month 12 Month
Volatility 1 Month 3 Month 6 Month 12 Month
Max Drawdown 1 Month 3 Month 6 Month 12 Month
"""
Applying Meucci's Checklist Steps - Toy Example AMENDED

@author: Peter Chan       www.returnandrisk.com

See also: https://www.quantopian.com/posts/the-efficient-frontier-markowitz-portfolio-optimization-in-python-using-cvxopt
"""
import numpy as np
import cvxopt as opt
from cvxopt import solvers, blas
import statsmodels.stats.weightstats as ws

def rnr_simple_shrinkage(mu, cov, mu_shrk_wt=0.1, cov_shrk_wt=0.1):
    """Reference: Attilio Meucci's Matlab file S_MVHorizon.m
    https://www.mathworks.com/matlabcentral/fileexchange/25010-exercises-in-advanced-risk-and-portfolio-management
    """
    n_asset = len(mu)
    
    # Mean shrinkage 
    Shrk_Exp = np.zeros(n_asset)
    Exp_C_Hat = (1 - mu_shrk_wt) * mu + mu_shrk_wt * Shrk_Exp
    
    # Covariance shrinkage
    Shrk_Cov = np.eye(n_asset) * np.trace(cov) / n_asset
    Cov_C_Hat = (1-cov_shrk_wt) * cov + cov_shrk_wt * Shrk_Cov
    return((Exp_C_Hat, Cov_C_Hat))

def rnr_efficient_frontier_qp_rets(n_portfolio, covariance, expected_values):
    """
    Port of Attilio Meucci's Matlab file EfficientFrontierQPRets.m
    https://www.mathworks.com/matlabcentral/fileexchange/25010-exercises-in-advanced-risk-and-portfolio-management
    This function returns the n_portfolio x 1 vector expected returns,
                          the n_portfolio x 1 vector of volatilities and
                          the n_portfolio x n_asset matrix of weights
    of n_portfolio efficient portfolios whose expected returns are equally spaced along the whole range of the efficient frontier
    """
    solvers.options['show_progress'] = False
    n_asset = covariance.shape[0]
    expected_values = opt.matrix(expected_values)
    
    # Determine weights, return and volatility of minimum-risk portfolio
    S = opt.matrix(covariance)
    pbar = opt.matrix(np.zeros(n_asset)) 
    # 1. positive weights
    G = opt.matrix(0.0, (n_asset, n_asset))
    G[::n_asset+1] = -1.0
    h = opt.matrix(0.0, (n_asset, 1))
    # 2. weights sum to one
    A = opt.matrix(1.0, (1, n_asset))
    b = opt.matrix(1.0)
    x0 = opt.matrix(1 / n_asset * np.ones(n_asset))
    min_x = solvers.qp(S, pbar, G, h, A, b, 'coneqp', x0)['x']
    min_ret = blas.dot(min_x.T, expected_values)
    min_vol = np.sqrt(blas.dot(min_x, S * min_x))
    
    # Determine weights, return and volatility of maximum-risk portfolio
    max_idx = np.asscalar(np.argmax(expected_values))
    max_x = np.zeros(n_asset)
    max_x[max_idx] = 1
    max_ret = expected_values[max_idx]
    max_vol = np.sqrt(np.dot(max_x, np.dot(covariance, max_x)))
    
    # Slice efficient frontier returns into n_portfolio segments
    target_rets = np.linspace(min_ret, max_ret, n_portfolio).tolist()
    
    # compute the n_portfolio weights and risk-return coordinates of the optimal allocations for each slice
    weights = np.zeros((n_portfolio, n_asset))
    rets = np.zeros(n_portfolio)
    vols = np.zeros(n_portfolio)
    # Start with min vol portfolio
    weights[0,:] = np.asarray(min_x).T
    rets[0] = min_ret
    vols[0] = min_vol
    
    for i in range(1, n_portfolio-1):
        # Determine least risky portfolio for given expected return
        A = opt.matrix(np.vstack([np.ones(n_asset), expected_values.T]))
        b = opt.matrix(np.hstack([1, target_rets[i]]))
        x = solvers.qp(S, pbar, G, h, A, b, 'coneqp', x0)['x']    
        weights[i,:] = np.asarray(x).T
        rets[i] = blas.dot(x.T, expected_values)
        vols[i] = np.sqrt(blas.dot(x, S * x))    
    
    # Add max ret portfolio
    weights[n_portfolio-1,:] = np.asarray(max_x).T
    rets[n_portfolio-1] = max_ret
    vols[n_portfolio-1] = max_vol
    return(weights, rets, vols)

def initialize(context):
    """
    Called once at the start of the algorithm.
    """   
    context.day = 503
    #context.min_days_data = 504 # 2 years
    context.assets = [symbol('IBM'), symbol('GLD'), symbol('XOM'), symbol('AAPL'), symbol('MSFT'), symbol('TLT'), symbol('SHY')]
    context.n_asset = len(context.assets)
    context.n_portfolio = 40
    # Turn off the slippage model
    #set_slippage(slippage.FixedSlippage(spread=0.0))
    # Set the commission model (Interactive Brokers Commission)
    #set_commission(commission.PerShare(cost=0.01, min_trade_cost=1.0))
    # Rebalance every day
    #schedule_function(my_rebalance, date_rules.every_day(), time_rules.market_close())
    schedule_function(my_rebalance, 
                      date_rules.every_day(),
                      time_rules.market_open(hours = 1, minutes = 30))
             
def my_rebalance(context,data):
    # Using expanding data window with minimum of min_days_data days
    context.day += 1 
    # if context.day < context.min_days_data:
    #     return
    # if context.day == context.min_days_data:
    #     log.info('Start trading : %s' % str(get_datetime('US/Eastern')))
    prices = data.history(context.assets, "price", context.day, "1d")
    returns = prices.pct_change().dropna()
    n_scenario = len(returns)
    
    # Set Flexible Probabilities using exponential smoothing
    half_life_prjn = 2 * 252 # in days
    lambda_prjn = np.log(2) / half_life_prjn
    probs = np.exp(-lambda_prjn * (np.arange(0, n_scenario)[::-1]))
    probs = probs / sum(probs)
    
    # Compute weighted mean and covariance matrix using flex probs
    fp_stats = ws.DescrStatsW(returns, probs)
    mu = fp_stats.mean
    sigma2 = fp_stats.cov
    
    # Perform shrinkage to mitigate estimation risk
    mu_shrk, cov_shrk = rnr_simple_shrinkage(mu, sigma2)

    # Step 1: mean-variance quadratic optimization for efficient frontier
    weights, rets, vols = rnr_efficient_frontier_qp_rets(context.n_portfolio, cov_shrk, mu_shrk)

    # Step 2: evaluate satisfaction for all allocations on the frontier
    satisfaction = rets / vols # Sharpe ratio

    # Choose the allocation that maximises satisfaction
    max_sat_idx = np.argmax(satisfaction)
    max_sat = satisfaction[max_sat_idx]
    max_sat_wt = weights[max_sat_idx, :]
    
    # Rebalance portfolio accordingly
    asset_weights = 'Weights: '
    sum_weights = 0.0
    for stock, weight in zip(prices.columns, max_sat_wt):
        order_target_percent(stock, weight) 
        asset_weights += '%s=%s, ' % (stock.symbol, round(weight,2))
        sum_weights += weight
    
    # Log and record some stuff
    asset_weights += 'SUM=%s' % (sum_weights)
    log.info(asset_weights)
    # Can only record max of 5 variables
    # Recording weights of 5 assets
    record(IBM=max_sat_wt[0], GLD=max_sat_wt[1], XOM=max_sat_wt[2], AAPL=max_sat_wt[3], TLT=max_sat_wt[5])
    #record(MSFT=max_sat_wt[4], SHY=max_sat_wt[6], Sum_weights=sum_weights)
    
    


There was a runtime error.

Yes this industry is full of charlatans who preselect securities for their backtests, play with backtest windows, but do not even check if their algo trades, or if it trades all the right securities half of the time. Some of them even shamelessly plug their websites and other spam.

LOL. The internet is full of trolls making unfounded accusations and misrepresenting the facts.

Replaced the stocks with the largest market cap stocks at the beginning of the backtest.

Equally weighted has Meucci beat.

Clone Algorithm
8
Loading...
Total Returns
--
Alpha
--
Beta
--
Sharpe
--
Sortino
--
Max Drawdown
--
Benchmark Returns
--
Volatility
--
Returns 1 Month 3 Month 6 Month 12 Month
Alpha 1 Month 3 Month 6 Month 12 Month
Beta 1 Month 3 Month 6 Month 12 Month
Sharpe 1 Month 3 Month 6 Month 12 Month
Sortino 1 Month 3 Month 6 Month 12 Month
Volatility 1 Month 3 Month 6 Month 12 Month
Max Drawdown 1 Month 3 Month 6 Month 12 Month
"""
Applying Meucci's Checklist Steps - Toy Example AMENDED

@author: Peter Chan       www.returnandrisk.com

See also: https://www.quantopian.com/posts/the-efficient-frontier-markowitz-portfolio-optimization-in-python-using-cvxopt
"""
import numpy as np
import cvxopt as opt
from cvxopt import solvers, blas
import statsmodels.stats.weightstats as ws

def rnr_simple_shrinkage(mu, cov, mu_shrk_wt=0.1, cov_shrk_wt=0.1):
    """Reference: Attilio Meucci's Matlab file S_MVHorizon.m
    https://www.mathworks.com/matlabcentral/fileexchange/25010-exercises-in-advanced-risk-and-portfolio-management
    """
    n_asset = len(mu)
    
    # Mean shrinkage 
    Shrk_Exp = np.zeros(n_asset)
    Exp_C_Hat = (1 - mu_shrk_wt) * mu + mu_shrk_wt * Shrk_Exp
    
    # Covariance shrinkage
    Shrk_Cov = np.eye(n_asset) * np.trace(cov) / n_asset
    Cov_C_Hat = (1-cov_shrk_wt) * cov + cov_shrk_wt * Shrk_Cov
    return((Exp_C_Hat, Cov_C_Hat))

def rnr_efficient_frontier_qp_rets(n_portfolio, covariance, expected_values):
    """
    Port of Attilio Meucci's Matlab file EfficientFrontierQPRets.m
    https://www.mathworks.com/matlabcentral/fileexchange/25010-exercises-in-advanced-risk-and-portfolio-management
    This function returns the n_portfolio x 1 vector expected returns,
                          the n_portfolio x 1 vector of volatilities and
                          the n_portfolio x n_asset matrix of weights
    of n_portfolio efficient portfolios whose expected returns are equally spaced along the whole range of the efficient frontier
    """
    solvers.options['show_progress'] = False
    n_asset = covariance.shape[0]
    expected_values = opt.matrix(expected_values)
    
    # Determine weights, return and volatility of minimum-risk portfolio
    S = opt.matrix(covariance)
    pbar = opt.matrix(np.zeros(n_asset)) 
    # 1. positive weights
    G = opt.matrix(0.0, (n_asset, n_asset))
    G[::n_asset+1] = -1.0
    h = opt.matrix(0.0, (n_asset, 1))
    # 2. weights sum to one
    A = opt.matrix(1.0, (1, n_asset))
    b = opt.matrix(1.0)
    x0 = opt.matrix(1 / n_asset * np.ones(n_asset))
    min_x = solvers.qp(S, pbar, G, h, A, b, 'coneqp', x0)['x']
    min_ret = blas.dot(min_x.T, expected_values)
    min_vol = np.sqrt(blas.dot(min_x, S * min_x))
    
    # Determine weights, return and volatility of maximum-risk portfolio
    max_idx = np.asscalar(np.argmax(expected_values))
    max_x = np.zeros(n_asset)
    max_x[max_idx] = 1
    max_ret = expected_values[max_idx]
    max_vol = np.sqrt(np.dot(max_x, np.dot(covariance, max_x)))
    
    # Slice efficient frontier returns into n_portfolio segments
    target_rets = np.linspace(min_ret, max_ret, n_portfolio).tolist()
    
    # compute the n_portfolio weights and risk-return coordinates of the optimal allocations for each slice
    weights = np.zeros((n_portfolio, n_asset))
    rets = np.zeros(n_portfolio)
    vols = np.zeros(n_portfolio)
    # Start with min vol portfolio
    weights[0,:] = np.asarray(min_x).T
    rets[0] = min_ret
    vols[0] = min_vol
    
    for i in range(1, n_portfolio-1):
        # Determine least risky portfolio for given expected return
        A = opt.matrix(np.vstack([np.ones(n_asset), expected_values.T]))
        b = opt.matrix(np.hstack([1, target_rets[i]]))
        x = solvers.qp(S, pbar, G, h, A, b, 'coneqp', x0)['x']    
        weights[i,:] = np.asarray(x).T
        rets[i] = blas.dot(x.T, expected_values)
        vols[i] = np.sqrt(blas.dot(x, S * x))    
    
    # Add max ret portfolio
    weights[n_portfolio-1,:] = np.asarray(max_x).T
    rets[n_portfolio-1] = max_ret
    vols[n_portfolio-1] = max_vol
    return(weights, rets, vols)

def initialize(context):
    """
    Called once at the start of the algorithm.
    """   
    context.day = 503
    #context.min_days_data = 504 # 2 years
    context.assets = [symbol('GE'), symbol('GLD'), symbol('BAC'), symbol('MSFT'), symbol('XOM'), symbol('BRK_B'), symbol('GOOG_L')]
    context.n_asset = len(context.assets)
    context.n_portfolio = 40
    # Turn off the slippage model
    #set_slippage(slippage.FixedSlippage(spread=0.0))
    # Set the commission model (Interactive Brokers Commission)
    #set_commission(commission.PerShare(cost=0.01, min_trade_cost=1.0))
    # Rebalance every day
    #schedule_function(my_rebalance, date_rules.every_day(), time_rules.market_close())
    schedule_function(my_rebalance, 
                      date_rules.every_day(),
                      time_rules.market_open(hours = 1, minutes = 30))
             
def my_rebalance(context,data):
    # Using expanding data window with minimum of min_days_data days
    context.day += 1 
    # if context.day < context.min_days_data:
    #     return
    # if context.day == context.min_days_data:
    #     log.info('Start trading : %s' % str(get_datetime('US/Eastern')))
    prices = data.history(context.assets, "price", context.day, "1d")
    returns = prices.pct_change().dropna()
    n_scenario = len(returns)
    
    # Set Flexible Probabilities using exponential smoothing
    half_life_prjn = 2 * 252 # in days
    lambda_prjn = np.log(2) / half_life_prjn
    probs = np.exp(-lambda_prjn * (np.arange(0, n_scenario)[::-1]))
    probs = probs / sum(probs)
    
    # Compute weighted mean and covariance matrix using flex probs
    fp_stats = ws.DescrStatsW(returns, probs)
    mu = fp_stats.mean
    sigma2 = fp_stats.cov
    
    # Perform shrinkage to mitigate estimation risk
    mu_shrk, cov_shrk = rnr_simple_shrinkage(mu, sigma2)

    # Step 1: mean-variance quadratic optimization for efficient frontier
    weights, rets, vols = rnr_efficient_frontier_qp_rets(context.n_portfolio, cov_shrk, mu_shrk)

    # Step 2: evaluate satisfaction for all allocations on the frontier
    satisfaction = rets / vols # Sharpe ratio

    # Choose the allocation that maximises satisfaction
    max_sat_idx = np.argmax(satisfaction)
    max_sat = satisfaction[max_sat_idx]
    max_sat_wt = weights[max_sat_idx, :]
    
    # Rebalance portfolio accordingly
    asset_weights = 'Weights: '
    sum_weights = 0.0
    for stock, weight in zip(prices.columns, max_sat_wt):
        order_target_percent(stock, weight) 
        asset_weights += '%s=%s, ' % (stock.symbol, round(weight,2))
        sum_weights += weight
    
    # Log and record some stuff
    asset_weights += 'SUM=%s' % (sum_weights)
    log.info(asset_weights)
    # Can only record max of 5 variables
    # Recording weights of 5 assets
    record(IBM=max_sat_wt[0], GLD=max_sat_wt[1], XOM=max_sat_wt[2], AAPL=max_sat_wt[3], TLT=max_sat_wt[5])
    #record(MSFT=max_sat_wt[4], SHY=max_sat_wt[6], Sum_weights=sum_weights)
    
    


There was a runtime error.