This toy example algo incorporates some interesting Meucci techniques to improve portfolio optimization. Some of the key points are:

- Use longer time series for estimation – have the law of large numbers work in your favour
- Apply flexible probabilities to enhance estimation ie weight historical observations differently eg weight more recent data higher
- Shrink your mean and covariance matrix to reduce estimation risk
- Use a 2-step mean-variance optimization approach to choose the optimal portfolio on the efficient frontier according to your preference (satisfaction)

See also the post https://www.quantopian.com/posts/the-efficient-frontier-markowitz-portfolio-optimization-in-python-using-cvxopt for some background and some of the issues that arise in portfolio optimization.

Thanks

Peter

Clone Algorithm

52

Loading...

There was an error loading this backtest.

Backtest from
to
with
initial capital

Cumulative performance:

Algorithm
Benchmark

Custom data:

Total Returns

--

Alpha

--

Beta

--

Sharpe

--

Sortino

--

Max Drawdown

--

Benchmark Returns

--

Volatility

--

Returns | 1 Month | 3 Month | 6 Month | 12 Month |

Alpha | 1 Month | 3 Month | 6 Month | 12 Month |

Beta | 1 Month | 3 Month | 6 Month | 12 Month |

Sharpe | 1 Month | 3 Month | 6 Month | 12 Month |

Sortino | 1 Month | 3 Month | 6 Month | 12 Month |

Volatility | 1 Month | 3 Month | 6 Month | 12 Month |

Max Drawdown | 1 Month | 3 Month | 6 Month | 12 Month |

""" Applying Meucci's Checklist Steps - Toy Example @author: Peter Chan www.returnandrisk.com See also: https://www.quantopian.com/posts/the-efficient-frontier-markowitz-portfolio-optimization-in-python-using-cvxopt """ import numpy as np import cvxopt as opt from cvxopt import solvers, blas import statsmodels.stats.weightstats as ws def rnr_simple_shrinkage(mu, cov, mu_shrk_wt=0.1, cov_shrk_wt=0.1): """Reference: Attilio Meucci's Matlab file S_MVHorizon.m https://www.mathworks.com/matlabcentral/fileexchange/25010-exercises-in-advanced-risk-and-portfolio-management """ n_asset = len(mu) # Mean shrinkage Shrk_Exp = np.zeros(n_asset) Exp_C_Hat = (1 - mu_shrk_wt) * mu + mu_shrk_wt * Shrk_Exp # Covariance shrinkage Shrk_Cov = np.eye(n_asset) * np.trace(cov) / n_asset Cov_C_Hat = (1-cov_shrk_wt) * cov + cov_shrk_wt * Shrk_Cov return((Exp_C_Hat, Cov_C_Hat)) def rnr_efficient_frontier_qp_rets(n_portfolio, covariance, expected_values): """ Port of Attilio Meucci's Matlab file EfficientFrontierQPRets.m https://www.mathworks.com/matlabcentral/fileexchange/25010-exercises-in-advanced-risk-and-portfolio-management This function returns the n_portfolio x 1 vector expected returns, the n_portfolio x 1 vector of volatilities and the n_portfolio x n_asset matrix of weights of n_portfolio efficient portfolios whose expected returns are equally spaced along the whole range of the efficient frontier """ solvers.options['show_progress'] = False n_asset = covariance.shape[0] expected_values = opt.matrix(expected_values) # Determine weights, return and volatility of minimum-risk portfolio S = opt.matrix(covariance) pbar = opt.matrix(np.zeros(n_asset)) # 1. positive weights G = opt.matrix(0.0, (n_asset, n_asset)) G[::n_asset+1] = -1.0 h = opt.matrix(0.0, (n_asset, 1)) # 2. weights sum to one A = opt.matrix(1.0, (1, n_asset)) b = opt.matrix(1.0) x0 = opt.matrix(1 / n_asset * np.ones(n_asset)) min_x = solvers.qp(S, pbar, G, h, A, b, 'coneqp', x0)['x'] min_ret = blas.dot(min_x.T, expected_values) min_vol = np.sqrt(blas.dot(min_x, S * min_x)) # Determine weights, return and volatility of maximum-risk portfolio max_idx = np.asscalar(np.argmax(expected_values)) max_x = np.zeros(n_asset) max_x[max_idx] = 1 max_ret = expected_values[max_idx] max_vol = np.sqrt(np.dot(max_x, np.dot(covariance, max_x))) # Slice efficient frontier returns into n_portfolio segments target_rets = np.linspace(min_ret, max_ret, n_portfolio).tolist() # compute the n_portfolio weights and risk-return coordinates of the optimal allocations for each slice weights = np.zeros((n_portfolio, n_asset)) rets = np.zeros(n_portfolio) vols = np.zeros(n_portfolio) # Start with min vol portfolio weights[0,:] = np.asarray(min_x).T rets[0] = min_ret vols[0] = min_vol for i in range(1, n_portfolio-1): # Determine least risky portfolio for given expected return A = opt.matrix(np.vstack([np.ones(n_asset), expected_values.T])) b = opt.matrix(np.hstack([1, target_rets[i]])) x = solvers.qp(S, pbar, G, h, A, b, 'coneqp', x0)['x'] weights[i,:] = np.asarray(x).T rets[i] = blas.dot(x.T, expected_values) vols[i] = np.sqrt(blas.dot(x, S * x)) # Add max ret portfolio weights[n_portfolio-1,:] = np.asarray(max_x).T rets[n_portfolio-1] = max_ret vols[n_portfolio-1] = max_vol return(weights, rets, vols) def initialize(context): """ Called once at the start of the algorithm. """ context.day = 0 context.min_days_data = 504 # 2 years context.assets = [symbol('IBM'), symbol('GLD'), symbol('XOM'), symbol('AAPL'), symbol('MSFT'), symbol('TLT'), symbol('SHY')] context.n_asset = len(context.assets) context.n_portfolio = 40 # Turn off the slippage model set_slippage(slippage.FixedSlippage(spread=0.0)) # Set the commission model (Interactive Brokers Commission) set_commission(commission.PerShare(cost=0.01, min_trade_cost=1.0)) # Rebalance every day schedule_function(my_rebalance, date_rules.every_day(), time_rules.market_close()) def my_rebalance(context,data): # Using expanding data window with minimum of min_days_data days context.day += 1 if context.day < context.min_days_data: return if context.day == context.min_days_data: log.info('Start trading : %s' % str(get_datetime('US/Eastern'))) prices = data.history(context.assets, "price", context.day, "1d") returns = prices.pct_change().dropna() n_scenario = len(returns) # Set Flexible Probabilities using exponential smoothing half_life_prjn = 2 * 252 # in days lambda_prjn = np.log(2) / half_life_prjn probs = np.exp(-lambda_prjn * (np.arange(0, n_scenario)[::-1])) probs = probs / sum(probs) # Compute weighted mean and covariance matrix using flex probs fp_stats = ws.DescrStatsW(returns, probs) mu = fp_stats.mean sigma2 = fp_stats.cov # Perform shrinkage to mitigate estimation risk mu_shrk, cov_shrk = rnr_simple_shrinkage(mu, sigma2) # Step 1: mean-variance quadratic optimization for efficient frontier weights, rets, vols = rnr_efficient_frontier_qp_rets(context.n_portfolio, cov_shrk, mu_shrk) # Step 2: evaluate satisfaction for all allocations on the frontier satisfaction = rets / vols # Sharpe ratio # Choose the allocation that maximises satisfaction max_sat_idx = np.argmax(satisfaction) max_sat = satisfaction[max_sat_idx] max_sat_wt = weights[max_sat_idx, :] # Rebalance portfolio accordingly asset_weights = 'Weights: ' sum_weights = 0.0 for stock, weight in zip(prices.columns, max_sat_wt): order_target_percent(stock, weight) asset_weights += '%s=%s, ' % (stock.symbol, round(weight,2)) sum_weights += weight # Log and record some stuff asset_weights += 'SUM=%s' % (sum_weights) log.info(asset_weights) # Can only record max of 5 variables # Recording weights of 5 assets record(IBM=max_sat_wt[0], GLD=max_sat_wt[1], XOM=max_sat_wt[2], AAPL=max_sat_wt[3], TLT=max_sat_wt[5]) #record(MSFT=max_sat_wt[4], SHY=max_sat_wt[6], Sum_weights=sum_weights)