OLMAR w/ NASDAQ 100 & dollar-volume

Here's the OLMAR algorithm with the NASDAQ 100 and dollar-volume data (instead of prices). --Grant

1476
Total Returns
--
Alpha
--
Beta
--
Sharpe
--
Sortino
--
Max Drawdown
--
Benchmark Returns
--
Volatility
--
 Returns 1 Month 3 Month 6 Month 12 Month
 Alpha 1 Month 3 Month 6 Month 12 Month
 Beta 1 Month 3 Month 6 Month 12 Month
 Sharpe 1 Month 3 Month 6 Month 12 Month
 Sortino 1 Month 3 Month 6 Month 12 Month
 Volatility 1 Month 3 Month 6 Month 12 Month
 Max Drawdown 1 Month 3 Month 6 Month 12 Month
import numpy as np

#globals for get_avg batch transform decorator
R_P = 1 #refresh period in days
W_L = 5 #window length in days

def initialize(context):

# Nasdaq 100 from https://www.quantopian.com/posts/list-of-nasdaq-100-sids-to-use-in-your-algo

context.stocks = [sid(24),    sid(114),   sid(122),   sid(630)  , sid(67),
sid(20680), sid(328),   sid(14328), sid(368),   sid(16841),
sid(9883),  sid(337),   sid(38650), sid(739),   sid(27533),
sid(3806),  sid(18529), sid(1209),  sid(40207), sid(1419),
sid(15101), sid(17632), sid(39095), sid(1637),  sid(1900),
sid(32301), sid(18870), sid(14014), sid(25317), sid(36930),
sid(12652), sid(26111), sid(24819), sid(24482), sid(2618),
sid(2663),  sid(27543), sid(1787) , sid(2696),  sid(42950),
sid(20208), sid(2853),  sid(8816),  sid(12213),  sid(3212),
sid(9736),  sid(23906), sid(26578), sid(22316), sid(13862),
sid(3951),  sid(8655),  sid(25339), sid(4246),  sid(43405),
sid(27357), sid(32046), sid(4485),  sid(43919), sid(4668),
sid(8677),  sid(22802), sid(3450),  sid(5061),  sid(5121),
sid(5149),  sid(5166),  sid(23709), sid(13905), sid(19926),
sid(19725), sid(8857),  sid(5767),  sid(5787),  sid(19917),
sid(6295),  sid(6413),  sid(6546),  sid(20281), sid(6683),
sid(26169), sid(6872),  sid(11901), sid(13940), sid(7061),
sid(15581), sid(24518), sid(7272),  sid(39840), sid(7671),
sid(27872), sid(8017),  sid(38817), sid(8045),  sid(8132),
sid(8158),  sid(24124), sid(8344),  sid(8352),  sid(14848)]

context.m = len(context.stocks)
context.b_t = np.ones(context.m) / context.m
context.eps = 1.1  # change epsilon here
context.init = False

set_slippage(slippage.FixedSlippage())
set_commission(commission.PerShare(cost=0))

def handle_data(context, data):

if not context.init:
rebalance_portfolio(context, data, context.b_t)
context.init = True

# get data
d = get_data(data,context.stocks)
if d == None:
return

prices = d

m = context.m

x_tilde = np.zeros(m)

b = np.zeros(m)

# find relative moving average price for each security
for i, stock in enumerate(context.stocks):
x_tilde[i] = np.mean(prices[:,i])/prices[W_L-1,i]

###########################
# Inside of OLMAR (algo 2)

x_bar = x_tilde.mean()

# Calculate terms for lambda (lam)
dot_prod = np.dot(context.b_t, x_tilde)
num = context.eps - dot_prod
denom = (np.linalg.norm((x_tilde-x_bar)))**2

# test for divide-by-zero case
if denom == 0.0:
lam = 0 # no portolio update
else:
lam = max(0, num/denom)

b = context.b_t + lam*(x_tilde-x_bar)

b_norm = simplex_projection(b)

rebalance_portfolio(context, data, b_norm)

# update portfolio
context.b_t = b_norm

log.debug(b_norm)

@batch_transform(refresh_period=R_P, window_length=W_L) # set globals R_P & W_L above
def get_data(datapanel,sids):
p = datapanel['price'].as_matrix(sids)
v = datapanel['volume'].as_matrix(sids)
return np.multiply(p,v)

def rebalance_portfolio(context, data, desired_port):
#rebalance portfolio
current_amount = np.zeros_like(desired_port)
desired_amount = np.zeros_like(desired_port)

if not context.init:
positions_value = context.portfolio.starting_cash
else:
positions_value = context.portfolio.positions_value + context.portfolio.cash

for i, stock in enumerate(context.stocks):
current_amount[i] = context.portfolio.positions[stock].amount
desired_amount[i] = desired_port[i]*positions_value/data[stock].price

diff_amount = desired_amount - current_amount

for i, stock in enumerate(context.stocks):
order(stock, diff_amount[i]) #order_stock

def simplex_projection(v, b=1):
"""Projection vectors to the simplex domain

Implemented according to the paper: Efficient projections onto the
l1-ball for learning in high dimensions, John Duchi, et al. ICML 2008.
Implementation Time: 2011 June 17 by [email protected] AT pmail.ntu.edu.sg
Optimization Problem: min_{w}\| w - v \|_{2}^{2}
s.t. sum_{i=1}^{m}=z, w_{i}\geq 0

Input: A vector v \in R^{m}, and a scalar z > 0 (default=1)
Output: Projection vector w

:Example:
>>> proj = simplex_projection([.4 ,.3, -.4, .5])
>>> print proj
array([ 0.33333333, 0.23333333, 0. , 0.43333333])
>>> print proj.sum()
1.0

Original matlab implementation: John Duchi ([email protected])
Python-port: Copyright 2012 by Thomas Wiecki ([email protected]).
"""

v = np.asarray(v)
p = len(v)

# Sort v into u in descending order
v = (v > 0) * v
u = np.sort(v)[::-1]
sv = np.cumsum(u)

rho = np.where(u > (sv - b) / np.arange(1, p+1))[0][-1]
theta = np.max([0, (sv[rho] - b) / (rho+1)])
w = (v - theta)
w[w<0] = 0
return w
This backtest was created using an older version of the backtester. Please re-run this backtest to see results using the latest backtester. Learn more about the recent changes.
There was a runtime error.
33 responses

I reduced the initial capital to $25K, and added:  open_orders = [] for stock in context.stocks: open_orders.append(bool(get_open_orders(stock))) if True in open_orders: return  I use the built-in slippage and commission models, and plot the cash balance. Grant 1476 Loading... Total Returns -- Alpha -- Beta -- Sharpe -- Sortino -- Max Drawdown -- Benchmark Returns -- Volatility --  Returns 1 Month 3 Month 6 Month 12 Month  Alpha 1 Month 3 Month 6 Month 12 Month  Beta 1 Month 3 Month 6 Month 12 Month  Sharpe 1 Month 3 Month 6 Month 12 Month  Sortino 1 Month 3 Month 6 Month 12 Month  Volatility 1 Month 3 Month 6 Month 12 Month  Max Drawdown 1 Month 3 Month 6 Month 12 Month import numpy as np #globals for get_avg batch transform decorator R_P = 1 #refresh period in days W_L = 5 #window length in days def initialize(context): # Nasdaq 100 from https://www.quantopian.com/posts/list-of-nasdaq-100-sids-to-use-in-your-algo context.stocks = [sid(24), sid(114), sid(122), sid(630) , sid(67), sid(20680), sid(328), sid(14328), sid(368), sid(16841), sid(9883), sid(337), sid(38650), sid(739), sid(27533), sid(3806), sid(18529), sid(1209), sid(40207), sid(1419), sid(15101), sid(17632), sid(39095), sid(1637), sid(1900), sid(32301), sid(18870), sid(14014), sid(25317), sid(36930), sid(12652), sid(26111), sid(24819), sid(24482), sid(2618), sid(2663), sid(27543), sid(1787) , sid(2696), sid(42950), sid(20208), sid(2853), sid(8816), sid(12213), sid(3212), sid(9736), sid(23906), sid(26578), sid(22316), sid(13862), sid(3951), sid(8655), sid(25339), sid(4246), sid(43405), sid(27357), sid(32046), sid(4485), sid(43919), sid(4668), sid(8677), sid(22802), sid(3450), sid(5061), sid(5121), sid(5149), sid(5166), sid(23709), sid(13905), sid(19926), sid(19725), sid(8857), sid(5767), sid(5787), sid(19917), sid(6295), sid(6413), sid(6546), sid(20281), sid(6683), sid(26169), sid(6872), sid(11901), sid(13940), sid(7061), sid(15581), sid(24518), sid(7272), sid(39840), sid(7671), sid(27872), sid(8017), sid(38817), sid(8045), sid(8132), sid(8158), sid(24124), sid(8344), sid(8352), sid(14848)] context.m = len(context.stocks) context.b_t = np.ones(context.m) / context.m context.eps = 1.1 # change epsilon here context.init = False def handle_data(context, data): cash = context.portfolio.cash record(cash=cash) if not context.init: rebalance_portfolio(context, data, context.b_t) context.init = True # get data d = get_data(data,context.stocks) if d == None: return open_orders = [] for stock in context.stocks: open_orders.append(bool(get_open_orders(stock))) if True in open_orders: return prices = d m = context.m x_tilde = np.zeros(m) b = np.zeros(m) # find relative moving average price for each security for i, stock in enumerate(context.stocks): x_tilde[i] = np.mean(prices[:,i])/prices[-1,i] ########################### # Inside of OLMAR (algo 2) x_bar = x_tilde.mean() # Calculate terms for lambda (lam) dot_prod = np.dot(context.b_t, x_tilde) num = context.eps - dot_prod denom = (np.linalg.norm((x_tilde-x_bar)))**2 # test for divide-by-zero case if denom == 0.0: lam = 0 # no portolio update else: lam = max(0, num/denom) b = context.b_t + lam*(x_tilde-x_bar) b_norm = simplex_projection(b) rebalance_portfolio(context, data, b_norm) # update portfolio context.b_t = b_norm # log.debug(b_norm) @batch_transform(refresh_period=R_P, window_length=W_L) # set globals R_P & W_L above def get_data(datapanel,sids): p = datapanel['price'].as_matrix(sids) v = datapanel['volume'].as_matrix(sids) return np.multiply(p,v) def rebalance_portfolio(context, data, desired_port): #rebalance portfolio current_amount = np.zeros_like(desired_port) desired_amount = np.zeros_like(desired_port) if not context.init: positions_value = context.portfolio.starting_cash else: positions_value = context.portfolio.positions_value + context.portfolio.cash for i, stock in enumerate(context.stocks): current_amount[i] = context.portfolio.positions[stock].amount desired_amount[i] = desired_port[i]*positions_value/data[stock].price diff_amount = desired_amount - current_amount for i, stock in enumerate(context.stocks): order(stock, diff_amount[i]) #order_stock def simplex_projection(v, b=1): """Projection vectors to the simplex domain Implemented according to the paper: Efficient projections onto the l1-ball for learning in high dimensions, John Duchi, et al. ICML 2008. Implementation Time: 2011 June 17 by [email protected] AT pmail.ntu.edu.sg Optimization Problem: min_{w}\| w - v \|_{2}^{2} s.t. sum_{i=1}^{m}=z, w_{i}\geq 0 Input: A vector v \in R^{m}, and a scalar z > 0 (default=1) Output: Projection vector w :Example: >>> proj = simplex_projection([.4 ,.3, -.4, .5]) >>> print proj array([ 0.33333333, 0.23333333, 0. , 0.43333333]) >>> print proj.sum() 1.0 Original matlab implementation: John Duchi ([email protected]) Python-port: Copyright 2012 by Thomas Wiecki ([email protected]). """ v = np.asarray(v) p = len(v) # Sort v into u in descending order v = (v > 0) * v u = np.sort(v)[::-1] sv = np.cumsum(u) rho = np.where(u > (sv - b) / np.arange(1, p+1))[0][-1] theta = np.max([0, (sv[rho] - b) / (rho+1)]) w = (v - theta) w[w<0] = 0 return w This backtest was created using an older version of the backtester. Please re-run this backtest to see results using the latest backtester. Learn more about the recent changes. There was a runtime error. Hello Grant, I wish I knew what it's doing! But you might as well 'return' as soon as you find the first pending order:  #open_orders = [] for stock in context.stocks: if bool(get_open_orders(stock)): return #if True in open_orders: # return  P. Thanks Peter, The link to the paper describing the original algorithm is http://icml.cc/2012/papers/168.pdf. It uses prices rather than dollar-volumes. Grant Hi Grant, Nice idea to use the dollar-volume data! How does the performance compare to using OLMAR with price data over the same period? Aidan Hello Aidan, Here you go. It's a simple code change: @batch_transform(refresh_period=R_P, window_length=W_L) # set globals R_P & W_L above def get_data(datapanel,sids): p = datapanel['price'].as_matrix(sids) # v = datapanel['volume'].as_matrix(sids) # return np.multiply(p,v) return p  Grant 1476 Loading... Total Returns -- Alpha -- Beta -- Sharpe -- Sortino -- Max Drawdown -- Benchmark Returns -- Volatility --  Returns 1 Month 3 Month 6 Month 12 Month  Alpha 1 Month 3 Month 6 Month 12 Month  Beta 1 Month 3 Month 6 Month 12 Month  Sharpe 1 Month 3 Month 6 Month 12 Month  Sortino 1 Month 3 Month 6 Month 12 Month  Volatility 1 Month 3 Month 6 Month 12 Month  Max Drawdown 1 Month 3 Month 6 Month 12 Month import numpy as np #globals for get_avg batch transform decorator R_P = 1 #refresh period in days W_L = 5 #window length in days def initialize(context): # Nasdaq 100 from https://www.quantopian.com/posts/list-of-nasdaq-100-sids-to-use-in-your-algo context.stocks = [sid(24), sid(114), sid(122), sid(630) , sid(67), sid(20680), sid(328), sid(14328), sid(368), sid(16841), sid(9883), sid(337), sid(38650), sid(739), sid(27533), sid(3806), sid(18529), sid(1209), sid(40207), sid(1419), sid(15101), sid(17632), sid(39095), sid(1637), sid(1900), sid(32301), sid(18870), sid(14014), sid(25317), sid(36930), sid(12652), sid(26111), sid(24819), sid(24482), sid(2618), sid(2663), sid(27543), sid(1787) , sid(2696), sid(42950), sid(20208), sid(2853), sid(8816), sid(12213), sid(3212), sid(9736), sid(23906), sid(26578), sid(22316), sid(13862), sid(3951), sid(8655), sid(25339), sid(4246), sid(43405), sid(27357), sid(32046), sid(4485), sid(43919), sid(4668), sid(8677), sid(22802), sid(3450), sid(5061), sid(5121), sid(5149), sid(5166), sid(23709), sid(13905), sid(19926), sid(19725), sid(8857), sid(5767), sid(5787), sid(19917), sid(6295), sid(6413), sid(6546), sid(20281), sid(6683), sid(26169), sid(6872), sid(11901), sid(13940), sid(7061), sid(15581), sid(24518), sid(7272), sid(39840), sid(7671), sid(27872), sid(8017), sid(38817), sid(8045), sid(8132), sid(8158), sid(24124), sid(8344), sid(8352), sid(14848)] context.m = len(context.stocks) context.b_t = np.ones(context.m) / context.m context.eps = 1.1 # change epsilon here context.init = False def handle_data(context, data): cash = context.portfolio.cash record(cash=cash) if not context.init: rebalance_portfolio(context, data, context.b_t) context.init = True # get data d = get_data(data,context.stocks) if d == None: return for stock in context.stocks: if bool(get_open_orders(stock)): return prices = d m = context.m x_tilde = np.zeros(m) b = np.zeros(m) # find relative moving average price for each security for i, stock in enumerate(context.stocks): x_tilde[i] = np.mean(prices[:,i])/prices[-1,i] ########################### # Inside of OLMAR (algo 2) x_bar = x_tilde.mean() # Calculate terms for lambda (lam) dot_prod = np.dot(context.b_t, x_tilde) num = context.eps - dot_prod denom = (np.linalg.norm((x_tilde-x_bar)))**2 # test for divide-by-zero case if denom == 0.0: lam = 0 # no portolio update else: lam = max(0, num/denom) b = context.b_t + lam*(x_tilde-x_bar) b_norm = simplex_projection(b) rebalance_portfolio(context, data, b_norm) # update portfolio context.b_t = b_norm # log.debug(b_norm) @batch_transform(refresh_period=R_P, window_length=W_L) # set globals R_P & W_L above def get_data(datapanel,sids): p = datapanel['price'].as_matrix(sids) # v = datapanel['volume'].as_matrix(sids) # return np.multiply(p,v) return p def rebalance_portfolio(context, data, desired_port): #rebalance portfolio current_amount = np.zeros_like(desired_port) desired_amount = np.zeros_like(desired_port) if not context.init: positions_value = context.portfolio.starting_cash else: positions_value = context.portfolio.positions_value + context.portfolio.cash for i, stock in enumerate(context.stocks): current_amount[i] = context.portfolio.positions[stock].amount desired_amount[i] = desired_port[i]*positions_value/data[stock].price diff_amount = desired_amount - current_amount for i, stock in enumerate(context.stocks): order(stock, diff_amount[i]) #order_stock def simplex_projection(v, b=1): """Projection vectors to the simplex domain Implemented according to the paper: Efficient projections onto the l1-ball for learning in high dimensions, John Duchi, et al. ICML 2008. Implementation Time: 2011 June 17 by [email protected] AT pmail.ntu.edu.sg Optimization Problem: min_{w}\| w - v \|_{2}^{2} s.t. sum_{i=1}^{m}=z, w_{i}\geq 0 Input: A vector v \in R^{m}, and a scalar z > 0 (default=1) Output: Projection vector w :Example: >>> proj = simplex_projection([.4 ,.3, -.4, .5]) >>> print proj array([ 0.33333333, 0.23333333, 0. , 0.43333333]) >>> print proj.sum() 1.0 Original matlab implementation: John Duchi ([email protected]) Python-port: Copyright 2012 by Thomas Wiecki ([email protected]). """ v = np.asarray(v) p = len(v) # Sort v into u in descending order v = (v > 0) * v u = np.sort(v)[::-1] sv = np.cumsum(u) rho = np.where(u > (sv - b) / np.arange(1, p+1))[0][-1] theta = np.max([0, (sv[rho] - b) / (rho+1)]) w = (v - theta) w[w<0] = 0 return w This backtest was created using an older version of the backtester. Please re-run this backtest to see results using the latest backtester. Learn more about the recent changes. There was a runtime error. Here's an attempt at minute-level trading. Seems to improve the return, but then it completely tanks at the end...not sure why. --Grant 1476 Loading... Total Returns -- Alpha -- Beta -- Sharpe -- Sortino -- Max Drawdown -- Benchmark Returns -- Volatility --  Returns 1 Month 3 Month 6 Month 12 Month  Alpha 1 Month 3 Month 6 Month 12 Month  Beta 1 Month 3 Month 6 Month 12 Month  Sharpe 1 Month 3 Month 6 Month 12 Month  Sortino 1 Month 3 Month 6 Month 12 Month  Volatility 1 Month 3 Month 6 Month 12 Month  Max Drawdown 1 Month 3 Month 6 Month 12 Month import numpy as np #globals for get_avg batch transform decorator R_P = 0 #refresh period in days W_L = 5 #window length in days def initialize(context): # Nasdaq 100 from https://www.quantopian.com/posts/list-of-nasdaq-100-sids-to-use-in-your-algo context.stocks = [sid(24), sid(114), sid(122), sid(630) , sid(67), sid(20680), sid(328), sid(14328), sid(368), sid(16841), sid(9883), sid(337), sid(38650), sid(739), sid(27533), sid(3806), sid(18529), sid(1209), sid(40207), sid(1419), sid(15101), sid(17632), sid(39095), sid(1637), sid(1900), sid(32301), sid(18870), sid(14014), sid(25317), sid(36930), sid(12652), sid(26111), sid(24819), sid(24482), sid(2618), sid(2663), sid(27543), sid(1787) , sid(2696), sid(42950), sid(20208), sid(2853), sid(8816), sid(12213), sid(3212), sid(9736), sid(23906), sid(26578), sid(22316), sid(13862), sid(3951), sid(8655), sid(25339), sid(4246), sid(43405), sid(27357), sid(32046), sid(4485), sid(43919), sid(4668), sid(8677), sid(22802), sid(3450), sid(5061), sid(5121), sid(5149), sid(5166), sid(23709), sid(13905), sid(19926), sid(19725), sid(8857), sid(5767), sid(5787), sid(19917), sid(6295), sid(6413), sid(6546), sid(20281), sid(6683), sid(26169), sid(6872), sid(11901), sid(13940), sid(7061), sid(15581), sid(24518), sid(7272), sid(39840), sid(7671), sid(27872), sid(8017), sid(38817), sid(8045), sid(8132), sid(8158), sid(24124), sid(8344), sid(8352), sid(14848)] context.m = len(context.stocks) context.b_t = np.ones(context.m) / context.m context.eps = 0.5 # change epsilon here context.init = False def handle_data(context, data): cash = context.portfolio.cash record(cash=cash) if not context.init: rebalance_portfolio(context, data, context.b_t) context.init = True # get data d = get_data(data,context.stocks) if d == None: return for stock in context.stocks: if bool(get_open_orders(stock)): return prices = d m = context.m x_tilde = np.zeros(m) b = np.zeros(m) # find relative moving average price for each security for i, stock in enumerate(context.stocks): x_tilde[i] = np.mean(prices[:,i])/prices[-1,i] ########################### # Inside of OLMAR (algo 2) x_bar = x_tilde.mean() # Calculate terms for lambda (lam) dot_prod = np.dot(context.b_t, x_tilde) num = context.eps - dot_prod denom = (np.linalg.norm((x_tilde-x_bar)))**2 # test for divide-by-zero case if denom == 0.0: lam = 0 # no portolio update else: lam = max(0, num/denom) b = context.b_t + lam*(x_tilde-x_bar) b_norm = simplex_projection(b) rebalance_portfolio(context, data, b_norm) # update portfolio context.b_t = b_norm # log.debug(b_norm) @batch_transform(refresh_period=R_P, window_length=W_L) # set globals R_P & W_L above def get_data(datapanel,sids): p = datapanel['price'].as_matrix(sids) v = datapanel['volume'].as_matrix(sids) return np.multiply(p,v) def rebalance_portfolio(context, data, desired_port): #rebalance portfolio current_amount = np.zeros_like(desired_port) desired_amount = np.zeros_like(desired_port) if not context.init: positions_value = context.portfolio.starting_cash else: positions_value = context.portfolio.positions_value + context.portfolio.cash for i, stock in enumerate(context.stocks): current_amount[i] = context.portfolio.positions[stock].amount desired_amount[i] = desired_port[i]*positions_value/data[stock].price diff_amount = desired_amount - current_amount for i, stock in enumerate(context.stocks): order(stock, diff_amount[i]) #order_stock def simplex_projection(v, b=1): """Projection vectors to the simplex domain Implemented according to the paper: Efficient projections onto the l1-ball for learning in high dimensions, John Duchi, et al. ICML 2008. Implementation Time: 2011 June 17 by [email protected] AT pmail.ntu.edu.sg Optimization Problem: min_{w}\| w - v \|_{2}^{2} s.t. sum_{i=1}^{m}=z, w_{i}\geq 0 Input: A vector v \in R^{m}, and a scalar z > 0 (default=1) Output: Projection vector w :Example: >>> proj = simplex_projection([.4 ,.3, -.4, .5]) >>> print proj array([ 0.33333333, 0.23333333, 0. , 0.43333333]) >>> print proj.sum() 1.0 Original matlab implementation: John Duchi ([email protected]) Python-port: Copyright 2012 by Thomas Wiecki ([email protected]). """ v = np.asarray(v) p = len(v) # Sort v into u in descending order v = (v > 0) * v u = np.sort(v)[::-1] sv = np.cumsum(u) rho = np.where(u > (sv - b) / np.arange(1, p+1))[0][-1] theta = np.max([0, (sv[rho] - b) / (rho+1)]) w = (v - theta) w[w<0] = 0 return w This backtest was created using an older version of the backtester. Please re-run this backtest to see results using the latest backtester. Learn more about the recent changes. There was a runtime error. Hi Grant, I think it would be really cool if we could add a penalization term to the optimization problem when we readjust the portfolio. So that you don't have to buy or sell too many stocks. The transaction costs probably wipes out profit if we try to match b to b* exactly every period. Note that I don't think that the dollar-volume computation in this algorithm is quite right, since the batch transform forward-fills both price and volume for missing bars (not yet confirmed). If a bar is missing, the volume should be zero in the dollar-volume code. I'm considering ways of fixing the problem; if you have a solution, please post it. --Grant @Grant - studying this one... I like the potential of OLMAR... is anyone running this in live trading yet? when u say " If a bar is missing, the volume should be zero in the dollar-volume code." so what happens now? it's using a previous dollar-volume from a previous get_data or the as_matrix call "datapanel['volume'].as_matrix(sids) " is not returning a 0? Hello Miguel, Grant will expand but Grant and myself have just realised that Quantopian data forward fills empty bars with price and volume information. This surprised me and perhaps I should have considered this before. Some discussion from the last few days in these threads: and some examples here: https://www.quantopian.com/posts/feature-request-display-hh-mm-in-log-entries-in-a-minutely-backtest I think, although he can confirm or deny it, that Grant would like either no forward fill of price and volume or a forward fill of price with volume set to '0'. Related, Simon asked some questions here about the forthcoming 'history' feature: https://www.quantopian.com/posts/draft-proposal-of-new-data-history-api i.e. Also re forward filling, are you forward filling a nil tick to produce a degenerate bar with 0 volume? Or are you actually forward filling whatever the last bar was, causing any code that has exponential decay, volume accumulation or really any calculation at all to change...? EDIT: I just re-read the reply from Eddie to Simon re: 'history': 'ffill=True' - fill the pricing information forward with the last known value (with the exception of inserting a "0" for volume) P. Hello Miguel and Peter, I don't know if anyone is running the OLMAR algorithm in paper trading. I tried and there was an error. I haven't gotten back to it. Regarding the forward filling, there are a couple issues at play here. I need to check if the forward filling used by the batch transform sets the volume to zero. If not, I think that I can turn off the forward filling and then just ignore the missing data (and with the upcoming history method, there will be a way to deal with missing data). The issue described on https://www.quantopian.com/posts/thinly-traded-stocks-why-no-gaps is different in that I think that it impacts order submission/fulfillment/slippage, since I think the filling is applied before the backtest is started...we'll have to see what the Quantopian response is to my questions. Grant Hi Grant, Glad to see our OLMAR is having a revival, the results are also very promising. Have you played around with eps and window_length at all? In my optimization experiments I found that those have a huge effect, especially epsilon. This also leads to @Ben's question: you can clone the algo and play around with eps which essentially is a parameter of how aggressively the portfolio should be rebalanced; the lower the more conservative. Thomas Disclaimer The material on this website is provided for informational purposes only and does not constitute an offer to sell, a solicitation to buy, or a recommendation or endorsement for any security or strategy, nor does it constitute an offer to provide investment advisory services by Quantopian. In addition, the material offers no opinion with respect to the suitability of any security or specific investment. No information contained herein should be regarded as a suggestion to engage in or refrain from any investment-related course of action as none of Quantopian nor any of its affiliates is undertaking to provide investment advice, act as an adviser to any plan or entity subject to the Employee Retirement Income Security Act of 1974, as amended, individual retirement account or individual retirement annuity, or give advice in a fiduciary capacity with respect to the materials presented herein. If you are an individual retirement or other investor, contact your financial advisor or other fiduciary unrelated to Quantopian about whether any given investment idea, strategy, product or service described herein may be appropriate for your circumstances. All investments involve risk, including loss of principal. Quantopian makes no guarantees as to the accuracy or completeness of the views expressed in the website. The views are subject to change, and may have become unreliable for various reasons, including changes in market conditions or economic circumstances. Hello Thomas, I've played around a bit with both eps and window_length...I may do a more systematic study once I sort out a few things. I got the algo to run on minute data, with slippage by adding: for stock in context.stocks: if bool(get_open_orders(stock)): return  This way, the portfolio can't be reallocated until all orders are filled. Particularly in minute mode, I'm concerned how missing bars (thinly traded securities) impact the algorithm. I can fix up the batch transform, but I'm not sure how to interpret my finding described on https://www.quantopian.com/posts/thinly-traded-stocks-why-no-gaps. My work-around is:  for stock in data: if data[stock].datetime < get_datetime(): return  The portfolio won't get reallocated if any of the securities did not trade (historically). However, I think that orders still get filled, right? Or not? Also, how does the backtester handle orders for fractional shares? I suspect as the algo is written now, there can be a difference between the actual portfolio state and the one that I record with: # update portfolio context.b_t = b_norm  Grant Hi Grant, Yeah, those work-arounds make sense. Orders should still get filled if you just return, yes. I also share you concern regarding the internal portfolio allocation vector b_t to get out of sync with the actual portfolio. Instead we should probably just recalculate b_t at the beginning of each handle_data(). Thomas good Another minute-level run. Still crashes at the end, though...hmm? --Grant 1476 Loading... Total Returns -- Alpha -- Beta -- Sharpe -- Sortino -- Max Drawdown -- Benchmark Returns -- Volatility --  Returns 1 Month 3 Month 6 Month 12 Month  Alpha 1 Month 3 Month 6 Month 12 Month  Beta 1 Month 3 Month 6 Month 12 Month  Sharpe 1 Month 3 Month 6 Month 12 Month  Sortino 1 Month 3 Month 6 Month 12 Month  Volatility 1 Month 3 Month 6 Month 12 Month  Max Drawdown 1 Month 3 Month 6 Month 12 Month import numpy as np #globals for get_avg batch transform decorator R_P = 0 #refresh period in days W_L = 5 #window length in days def initialize(context): # Nasdaq 100 from https://www.quantopian.com/posts/list-of-nasdaq-100-sids-to-use-in-your-algo context.stocks = [sid(24), sid(114), sid(122), sid(630) , sid(67), sid(20680), sid(328), sid(14328), sid(368), sid(16841), sid(9883), sid(337), sid(38650), sid(739), sid(27533), sid(3806), sid(18529), sid(1209), sid(40207), sid(1419), sid(15101), sid(17632), sid(39095), sid(1637), sid(1900), sid(32301), sid(18870), sid(14014), sid(25317), sid(36930), sid(12652), sid(26111), sid(24819), sid(24482), sid(2618), sid(2663), sid(27543), sid(1787) , sid(2696), sid(42950), sid(20208), sid(2853), sid(8816), sid(12213), sid(3212), sid(9736), sid(23906), sid(26578), sid(22316), sid(13862), sid(3951), sid(8655), sid(25339), sid(4246), sid(43405), sid(27357), sid(32046), sid(4485), sid(43919), sid(4668), sid(8677), sid(22802), sid(3450), sid(5061), sid(5121), sid(5149), sid(5166), sid(23709), sid(13905), sid(19926), sid(19725), sid(8857), sid(5767), sid(5787), sid(19917), sid(6295), sid(6413), sid(6546), sid(20281), sid(6683), sid(26169), sid(6872), sid(11901), sid(13940), sid(7061), sid(15581), sid(24518), sid(7272), sid(39840), sid(7671), sid(27872), sid(8017), sid(38817), sid(8045), sid(8132), sid(8158), sid(24124), sid(8344), sid(8352), sid(14848)] context.m = len(context.stocks) context.b_t = np.ones(context.m) / context.m context.eps = 0.5 # change epsilon here context.init = False def handle_data(context, data): cash = context.portfolio.cash record(cash=cash) if not context.init: rebalance_portfolio(context, data, context.b_t) context.init = True # get data d = get_data(data,context.stocks) if d == None: return # skip tic if any orders are open for stock in context.stocks: if bool(get_open_orders(stock)): return # skip tic if any stocks did not trade for stock in data: if data[stock].datetime < get_datetime(): return # update portfolio for i, stock in enumerate(context.stocks): context.b_t[i] = context.portfolio.positions[stock].amount*data[stock].price context.b_t = np.divide(context.b_t,np.sum(context.b_t)) prices = d m = context.m x_tilde = np.zeros(m) b = np.zeros(m) # find relative moving average price for each security for i, stock in enumerate(context.stocks): x_tilde[i] = np.mean(prices[:,i])/prices[-1,i] ########################### # Inside of OLMAR (algo 2) x_bar = x_tilde.mean() # Calculate terms for lambda (lam) dot_prod = np.dot(context.b_t, x_tilde) num = context.eps - dot_prod denom = (np.linalg.norm((x_tilde-x_bar)))**2 # test for divide-by-zero case if denom == 0.0: lam = 0 # no portolio update else: lam = max(0, num/denom) b = context.b_t + lam*(x_tilde-x_bar) b_norm = simplex_projection(b) rebalance_portfolio(context, data, b_norm) @batch_transform(refresh_period=R_P, window_length=W_L) # set globals R_P & W_L above def get_data(datapanel,sids,clean_nans=False): p = np.nan_to_num(datapanel['price'].as_matrix(sids)) v = np.nan_to_num(datapanel['volume'].as_matrix(sids)) return np.multiply(p,v) def rebalance_portfolio(context, data, desired_port): #rebalance portfolio current_amount = np.zeros_like(desired_port) desired_amount = np.zeros_like(desired_port) if not context.init: positions_value = context.portfolio.starting_cash else: positions_value = context.portfolio.positions_value + context.portfolio.cash for i, stock in enumerate(context.stocks): current_amount[i] = context.portfolio.positions[stock].amount desired_amount[i] = desired_port[i]*positions_value/data[stock].price diff_amount = desired_amount - current_amount for i, stock in enumerate(context.stocks): order(stock, diff_amount[i]) #order_stock def simplex_projection(v, b=1): """Projection vectors to the simplex domain Implemented according to the paper: Efficient projections onto the l1-ball for learning in high dimensions, John Duchi, et al. ICML 2008. Implementation Time: 2011 June 17 by [email protected] AT pmail.ntu.edu.sg Optimization Problem: min_{w}\| w - v \|_{2}^{2} s.t. sum_{i=1}^{m}=z, w_{i}\geq 0 Input: A vector v \in R^{m}, and a scalar z > 0 (default=1) Output: Projection vector w :Example: >>> proj = simplex_projection([.4 ,.3, -.4, .5]) >>> print proj array([ 0.33333333, 0.23333333, 0. , 0.43333333]) >>> print proj.sum() 1.0 Original matlab implementation: John Duchi ([email protected]) Python-port: Copyright 2012 by Thomas Wiecki ([email protected]). """ v = np.asarray(v) p = len(v) # Sort v into u in descending order v = (v > 0) * v u = np.sort(v)[::-1] sv = np.cumsum(u) rho = np.where(u > (sv - b) / np.arange(1, p+1))[0][-1] theta = np.max([0, (sv[rho] - b) / (rho+1)]) w = (v - theta) w[w<0] = 0 return w This backtest was created using an older version of the backtester. Please re-run this backtest to see results using the latest backtester. Learn more about the recent changes. There was a runtime error. Here's an update with the sid fix (https://www.quantopian.com/posts/list-of-nasdaq-100-sids-to-use-in-your-algo). Note that this algorithm isn't as great as it might appear, since it seems that most of the return can be obtained by just buying the entire Nasdaq 100, with equal dollar weights (set context.eps = 0). --Grant 1476 Loading... Total Returns -- Alpha -- Beta -- Sharpe -- Sortino -- Max Drawdown -- Benchmark Returns -- Volatility --  Returns 1 Month 3 Month 6 Month 12 Month  Alpha 1 Month 3 Month 6 Month 12 Month  Beta 1 Month 3 Month 6 Month 12 Month  Sharpe 1 Month 3 Month 6 Month 12 Month  Sortino 1 Month 3 Month 6 Month 12 Month  Volatility 1 Month 3 Month 6 Month 12 Month  Max Drawdown 1 Month 3 Month 6 Month 12 Month import numpy as np #globals for get_avg batch transform decorator R_P = 1 #refresh period in days W_L = 15 #window length in days def initialize(context): # Nasdaq 100 from https://www.quantopian.com/posts/list-of-nasdaq-100-sids-to-use-in-your-algo context.stocks = [sid(24), sid(114), sid(122), sid(630) , sid(67), sid(20680), sid(328), sid(14328), sid(368), sid(16841), sid(9883), sid(337), sid(38650), sid(739), sid(27533), sid(3806), sid(18529), sid(1209), sid(1406), sid(1419), sid(15101), sid(17632), sid(39095), sid(1637), sid(1900), sid(32301), sid(18870), sid(14014), sid(25317), sid(36930), sid(12652), sid(26111), sid(24819), sid(24482), sid(2618), sid(2663), sid(27543), sid(1787) , sid(2696), sid(42950), sid(20208), sid(2853), sid(8816), sid(12213), sid(3212), sid(9736), sid(23906), sid(26578), sid(22316), sid(13862), sid(3951), sid(8655), sid(25339), sid(4246), sid(43405), sid(27357), sid(32046), sid(4485), sid(43919), sid(4668), sid(8677), sid(22802), sid(3450), sid(5061), sid(5121), sid(5149), sid(5166), sid(23709), sid(13905), sid(19926), sid(19725), sid(8857), sid(5767), sid(5787), sid(19917), sid(6295), sid(6413), sid(6546), sid(20281), sid(6683), sid(26169), sid(6872), sid(11901), sid(13940), sid(7061), sid(15581), sid(24518), sid(7272), sid(39840), sid(7671), sid(27872), sid(8017), sid(38817), sid(8045), sid(8132), sid(8158), sid(24124), sid(8344), sid(8352), sid(14848)] context.m = len(context.stocks) context.b_t = np.ones(context.m) / context.m context.eps = 1 # change epsilon here context.init = False def handle_data(context, data): cash = context.portfolio.cash record(cash=cash) if not context.init: rebalance_portfolio(context, data, context.b_t) context.init = True # get data d = get_data(data,context.stocks) if d == None: return # skip tic if any orders are open for stock in context.stocks: if bool(get_open_orders(stock)): return # skip tic if any stocks did not trade for stock in data: if data[stock].datetime < get_datetime(): return # update portfolio for i, stock in enumerate(context.stocks): context.b_t[i] = context.portfolio.positions[stock].amount*data[stock].price context.b_t = np.divide(context.b_t,np.sum(context.b_t)) prices = d m = context.m x_tilde = np.zeros(m) b = np.zeros(m) # find relative moving average price for each security for i, stock in enumerate(context.stocks): x_tilde[i] = np.mean(prices[:,i])/prices[-1,i] ########################### # Inside of OLMAR (algo 2) x_bar = x_tilde.mean() # Calculate terms for lambda (lam) dot_prod = np.dot(context.b_t, x_tilde) num = context.eps - dot_prod denom = (np.linalg.norm((x_tilde-x_bar)))**2 # test for divide-by-zero case if denom == 0.0: lam = 0 # no portolio update else: lam = max(0, num/denom) b = context.b_t + lam*(x_tilde-x_bar) b_norm = simplex_projection(b) rebalance_portfolio(context, data, b_norm) @batch_transform(refresh_period=R_P, window_length=W_L) # set globals R_P & W_L above def get_data(datapanel,sids,clean_nans=False): p = np.nan_to_num(datapanel['price'].as_matrix(sids)) v = np.nan_to_num(datapanel['volume'].as_matrix(sids)) return np.multiply(p,v) def rebalance_portfolio(context, data, desired_port): #rebalance portfolio current_amount = np.zeros_like(desired_port) desired_amount = np.zeros_like(desired_port) if not context.init: positions_value = context.portfolio.starting_cash else: positions_value = context.portfolio.positions_value + context.portfolio.cash for i, stock in enumerate(context.stocks): current_amount[i] = context.portfolio.positions[stock].amount desired_amount[i] = desired_port[i]*positions_value/data[stock].price diff_amount = desired_amount - current_amount for i, stock in enumerate(context.stocks): order(stock, diff_amount[i]) #order_stock def simplex_projection(v, b=1): """Projection vectors to the simplex domain Implemented according to the paper: Efficient projections onto the l1-ball for learning in high dimensions, John Duchi, et al. ICML 2008. Implementation Time: 2011 June 17 by [email protected] AT pmail.ntu.edu.sg Optimization Problem: min_{w}\| w - v \|_{2}^{2} s.t. sum_{i=1}^{m}=z, w_{i}\geq 0 Input: A vector v \in R^{m}, and a scalar z > 0 (default=1) Output: Projection vector w :Example: >>> proj = simplex_projection([.4 ,.3, -.4, .5]) >>> print proj array([ 0.33333333, 0.23333333, 0. , 0.43333333]) >>> print proj.sum() 1.0 Original matlab implementation: John Duchi ([email protected]) Python-port: Copyright 2012 by Thomas Wiecki ([email protected]). """ v = np.asarray(v) p = len(v) # Sort v into u in descending order v = (v > 0) * v u = np.sort(v)[::-1] sv = np.cumsum(u) rho = np.where(u > (sv - b) / np.arange(1, p+1))[0][-1] theta = np.max([0, (sv[rho] - b) / (rho+1)]) w = (v - theta) w[w<0] = 0 return w This backtest was created using an older version of the backtester. Please re-run this backtest to see results using the latest backtester. Learn more about the recent changes. There was a runtime error. Here's an update (switched to using VWAP for the moving average, per Ben V. on https://www.quantopian.com/posts/olmar-implementation-fixed-bug). I also only allow shares to be transacted in even lots (multiples of 100). There's likely some bias in this approach, since I ran the algorithm a number of times to find 5 random stocks out of the Nasdaq 100 that juiced up the return. --Grant 1476 Loading... Total Returns -- Alpha -- Beta -- Sharpe -- Sortino -- Max Drawdown -- Benchmark Returns -- Volatility --  Returns 1 Month 3 Month 6 Month 12 Month  Alpha 1 Month 3 Month 6 Month 12 Month  Beta 1 Month 3 Month 6 Month 12 Month  Sharpe 1 Month 3 Month 6 Month 12 Month  Sortino 1 Month 3 Month 6 Month 12 Month  Volatility 1 Month 3 Month 6 Month 12 Month  Max Drawdown 1 Month 3 Month 6 Month 12 Month import numpy as np from random import sample #globals for get_avg batch transform decorator R_P = 1 #refresh period in days W_L = 5 #window length in days def initialize(context): # Nasdaq 100 from https://www.quantopian.com/posts/list-of-nasdaq-100-sids-to-use-in-your-algo # context.Nasdaq100 = [sid(24), sid(114), sid(122), sid(630) , sid(67), sid(20680), sid(328), sid(14328), sid(368), sid(16841), sid(9883), sid(337), sid(38650), sid(739), sid(27533), sid(3806), sid(18529), sid(1209), sid(1406), sid(1419), sid(15101), sid(17632), sid(39095), sid(1637), sid(1900), sid(32301), sid(18870), sid(14014), sid(25317), sid(36930), sid(12652), sid(26111), sid(24819), sid(24482), sid(2618), sid(2663), sid(27543), sid(1787) , sid(2696), sid(42950), sid(20208), sid(2853), sid(8816), sid(12213), sid(3212), sid(9736), sid(23906), sid(26578), sid(22316), sid(13862), sid(3951), sid(8655), sid(25339), sid(4246), sid(43405), sid(27357), sid(32046), sid(4485), sid(43919), sid(4668), sid(8677), sid(22802), sid(3450), sid(5061), sid(5121), sid(5149), sid(5166), sid(23709), sid(13905), sid(19926), sid(19725), sid(8857), sid(5767), sid(5787), sid(19917), sid(6295), sid(6413), sid(6546), sid(20281), sid(6683), sid(26169), sid(6872), sid(11901), sid(13940), sid(7061), sid(15581), sid(24518), sid(7272), sid(39840), sid(7671), sid(27872), sid(8017), sid(38817), sid(8045), sid(8132), sid(8158), sid(24124), sid(8344), sid(8352), sid(14848)] # context.stocks = sample(context.Nasdaq100,5) context.stocks = [sid(1419),sid(12652),sid(6546),sid(5061),sid(6683)] for stock in context.stocks: print stock.sid context.m = len(context.stocks) context.b_t = np.ones(context.m) / context.m context.eps = 1 # change epsilon here context.init = False def handle_data(context, data): cash = context.portfolio.cash record(cash=cash) if not context.init: rebalance_portfolio(context, data, context.b_t) context.init = True # get data d = get_data(data,context.stocks) if d == None: return # skip tic if any orders are open for stock in context.stocks: if bool(get_open_orders(stock)): return # skip tic if any stocks did not trade for stock in data: if data[stock].datetime < get_datetime(): return # update portfolio for i, stock in enumerate(context.stocks): context.b_t[i] = context.portfolio.positions[stock].amount*data[stock].price context.b_t = np.divide(context.b_t,np.sum(context.b_t)) prices = d[0] volumes = d[1] m = context.m x_tilde = np.zeros(m) b = np.zeros(m) # find relative moving volume weighted average price for each secuirty for i, stock in enumerate(context.stocks): vwa_price = np.dot(prices[:,i],volumes[:,i])/np.sum(volumes[:,i]) x_tilde[i] = vwa_price/prices[-1,i] ########################### # Inside of OLMAR (algo 2) x_bar = x_tilde.mean() # Calculate terms for lambda (lam) dot_prod = np.dot(context.b_t, x_tilde) num = context.eps - dot_prod denom = (np.linalg.norm((x_tilde-x_bar)))**2 # test for divide-by-zero case if denom == 0.0: lam = 0 # no portolio update else: lam = max(0, num/denom) b = context.b_t + lam*(x_tilde-x_bar) b_norm = simplex_projection(b) rebalance_portfolio(context, data, b_norm) @batch_transform(refresh_period=R_P, window_length=W_L) # set globals R_P & W_L above def get_data(datapanel,sids,clean_nans=False): p = np.nan_to_num(datapanel['price'].as_matrix(sids)) v = np.nan_to_num(datapanel['volume'].as_matrix(sids)) return [p,v] def rebalance_portfolio(context, data, desired_port): #rebalance portfolio current_amount = np.zeros_like(desired_port) desired_amount = np.zeros_like(desired_port) if not context.init: positions_value = context.portfolio.starting_cash else: positions_value = context.portfolio.positions_value + context.portfolio.cash for i, stock in enumerate(context.stocks): current_amount[i] = context.portfolio.positions[stock].amount desired_amount[i] = desired_port[i]*positions_value/data[stock].price diff_amount = desired_amount - current_amount for i, stock in enumerate(context.stocks): order(stock, int(round(diff_amount[i]/100.0)*100.0)) #order_stock def simplex_projection(v, b=1): """Projection vectors to the simplex domain Implemented according to the paper: Efficient projections onto the l1-ball for learning in high dimensions, John Duchi, et al. ICML 2008. Implementation Time: 2011 June 17 by [email protected] AT pmail.ntu.edu.sg Optimization Problem: min_{w}\| w - v \|_{2}^{2} s.t. sum_{i=1}^{m}=z, w_{i}\geq 0 Input: A vector v \in R^{m}, and a scalar z > 0 (default=1) Output: Projection vector w :Example: >>> proj = simplex_projection([.4 ,.3, -.4, .5]) >>> print proj array([ 0.33333333, 0.23333333, 0. , 0.43333333]) >>> print proj.sum() 1.0 Original matlab implementation: John Duchi ([email protected]) Python-port: Copyright 2012 by Thomas Wiecki ([email protected]). """ v = np.asarray(v) p = len(v) # Sort v into u in descending order v = (v > 0) * v u = np.sort(v)[::-1] sv = np.cumsum(u) rho = np.where(u > (sv - b) / np.arange(1, p+1))[0][-1] theta = np.max([0, (sv[rho] - b) / (rho+1)]) w = (v - theta) w[w<0] = 0 return w This backtest was created using an older version of the backtester. Please re-run this backtest to see results using the latest backtester. Learn more about the recent changes. There was a runtime error. Here's basically the same algo as immediately above, except run over a longer time period. --Grant 1476 Loading... Total Returns -- Alpha -- Beta -- Sharpe -- Sortino -- Max Drawdown -- Benchmark Returns -- Volatility --  Returns 1 Month 3 Month 6 Month 12 Month  Alpha 1 Month 3 Month 6 Month 12 Month  Beta 1 Month 3 Month 6 Month 12 Month  Sharpe 1 Month 3 Month 6 Month 12 Month  Sortino 1 Month 3 Month 6 Month 12 Month  Volatility 1 Month 3 Month 6 Month 12 Month  Max Drawdown 1 Month 3 Month 6 Month 12 Month import numpy as np import math from random import sample #globals for get_avg batch transform decorator R_P = 1 #refresh period in days W_L = 5 #window length in days def initialize(context): # Nasdaq 100 from https://www.quantopian.com/posts/list-of-nasdaq-100-sids-to-use-in-your-algo # context.Nasdaq100 = [sid(24), sid(114), sid(122), sid(630) , sid(67), sid(20680), sid(328), sid(14328), sid(368), sid(16841), sid(9883), sid(337), sid(38650), sid(739), sid(27533), sid(3806), sid(18529), sid(1209), sid(1406), sid(1419), sid(15101), sid(17632), sid(39095), sid(1637), sid(1900), sid(32301), sid(18870), sid(14014), sid(25317), sid(36930), sid(12652), sid(26111), sid(24819), sid(24482), sid(2618), sid(2663), sid(27543), sid(1787) , sid(2696), sid(42950), sid(20208), sid(2853), sid(8816), sid(12213), sid(3212), sid(9736), sid(23906), sid(26578), sid(22316), sid(13862), sid(3951), sid(8655), sid(25339), sid(4246), sid(43405), sid(27357), sid(32046), sid(4485), sid(43919), sid(4668), sid(8677), sid(22802), sid(3450), sid(5061), sid(5121), sid(5149), sid(5166), sid(23709), sid(13905), sid(19926), sid(19725), sid(8857), sid(5767), sid(5787), sid(19917), sid(6295), sid(6413), sid(6546), sid(20281), sid(6683), sid(26169), sid(6872), sid(11901), sid(13940), sid(7061), sid(15581), sid(24518), sid(7272), sid(39840), sid(7671), sid(27872), sid(8017), sid(38817), sid(8045), sid(8132), sid(8158), sid(24124), sid(8344), sid(8352), sid(14848)] # context.stocks = sample(context.Nasdaq100,5) context.stocks = [sid(1419),sid(12652),sid(6546),sid(5061),sid(6683)] for stock in context.stocks: print stock.sid context.m = len(context.stocks) context.b_t = np.ones(context.m) / context.m context.eps = 1 # change epsilon here context.init = False def handle_data(context, data): cash = context.portfolio.cash record(cash=cash) if not context.init: rebalance_portfolio(context, data, context.b_t) context.init = True # get data d = get_data(data,context.stocks) if d == None: return # skip tic if any orders are open for stock in context.stocks: if bool(get_open_orders(stock)): return # skip tic if any stocks did not trade for stock in data: if data[stock].datetime < get_datetime(): return # update portfolio for i, stock in enumerate(context.stocks): context.b_t[i] = context.portfolio.positions[stock].amount*data[stock].price context.b_t = np.divide(context.b_t,np.sum(context.b_t)) prices = d[0] volumes = d[1] m = context.m x_tilde = np.zeros(m) b = np.zeros(m) # find relative moving volume weighted average price for each secuirty for i, stock in enumerate(context.stocks): vwa_price = np.dot(prices[:,i],volumes[:,i])/np.sum(volumes[:,i]) x_tilde[i] = vwa_price/prices[-1,i] ########################### # Inside of OLMAR (algo 2) x_bar = x_tilde.mean() # Calculate terms for lambda (lam) dot_prod = np.dot(context.b_t, x_tilde) num = context.eps - dot_prod denom = (np.linalg.norm((x_tilde-x_bar)))**2 # test for divide-by-zero case if denom == 0.0: lam = 0 # no portolio update else: lam = max(0, num/denom) b = context.b_t + lam*(x_tilde-x_bar) b_norm = simplex_projection(b) rebalance_portfolio(context, data, b_norm) @batch_transform(refresh_period=R_P, window_length=W_L) # set globals R_P & W_L above def get_data(datapanel,sids,clean_nans=False): p = np.nan_to_num(datapanel['price'].as_matrix(sids)) v = np.nan_to_num(datapanel['volume'].as_matrix(sids)) return [p,v] def rebalance_portfolio(context, data, desired_port): #rebalance portfolio current_amount = np.zeros_like(desired_port) desired_amount = np.zeros_like(desired_port) if not context.init: positions_value = context.portfolio.starting_cash else: positions_value = context.portfolio.positions_value + context.portfolio.cash for i, stock in enumerate(context.stocks): current_amount[i] = context.portfolio.positions[stock].amount desired_amount[i] = desired_port[i]*positions_value/data[stock].price diff_amount = desired_amount - current_amount for i, stock in enumerate(context.stocks): order(stock, int(math.floor(diff_amount[i]/100.0))*100) #order_stock def simplex_projection(v, b=1): """Projection vectors to the simplex domain Implemented according to the paper: Efficient projections onto the l1-ball for learning in high dimensions, John Duchi, et al. ICML 2008. Implementation Time: 2011 June 17 by [email protected] AT pmail.ntu.edu.sg Optimization Problem: min_{w}\| w - v \|_{2}^{2} s.t. sum_{i=1}^{m}=z, w_{i}\geq 0 Input: A vector v \in R^{m}, and a scalar z > 0 (default=1) Output: Projection vector w :Example: >>> proj = simplex_projection([.4 ,.3, -.4, .5]) >>> print proj array([ 0.33333333, 0.23333333, 0. , 0.43333333]) >>> print proj.sum() 1.0 Original matlab implementation: John Duchi ([email protected]) Python-port: Copyright 2012 by Thomas Wiecki ([email protected]). """ v = np.asarray(v) p = len(v) # Sort v into u in descending order v = (v > 0) * v u = np.sort(v)[::-1] sv = np.cumsum(u) rho = np.where(u > (sv - b) / np.arange(1, p+1))[0][-1] theta = np.max([0, (sv[rho] - b) / (rho+1)]) w = (v - theta) w[w<0] = 0 return w This backtest was created using an older version of the backtester. Please re-run this backtest to see results using the latest backtester. Learn more about the recent changes. There was a runtime error. @Grant: I tried to paper trade the last version you posted but get: IndexError: index -1 is out of bounds for axis 0 with size 0 USER ALGORITHM:168, in simplex_projection rho = np.where(u > (sv - b) / np.arange(1, p+1))[0][-1] Seems like some issue with missing sids? Anyway, just wondering if you perhaps encountered this as well and know more. Thomas Hello Thomas, The problem might be due to missing sids, but I'm not sure. I'll tinker around with it when I get the chance. Grant Thomas, Here's basically the same algo. I'll try live trading with it to see if I get an error, too. Grant 110 Loading... Total Returns -- Alpha -- Beta -- Sharpe -- Sortino -- Max Drawdown -- Benchmark Returns -- Volatility --  Returns 1 Month 3 Month 6 Month 12 Month  Alpha 1 Month 3 Month 6 Month 12 Month  Beta 1 Month 3 Month 6 Month 12 Month  Sharpe 1 Month 3 Month 6 Month 12 Month  Sortino 1 Month 3 Month 6 Month 12 Month  Volatility 1 Month 3 Month 6 Month 12 Month  Max Drawdown 1 Month 3 Month 6 Month 12 Month import numpy as np import math # globals for get_data batch transform decorator R_P = 0 #refresh period in days W_L = 5 #window length in days def initialize(context): context.stocks = [sid(1419),sid(12652),sid(6546),sid(5061),sid(6683)] for stock in context.stocks: print stock.symbol context.m = len(context.stocks) context.b_t = np.ones(context.m) / context.m context.eps = 1 # change epsilon here context.init = False def handle_data(context, data): cash = context.portfolio.cash record(cash=cash) if not context.init: rebalance_portfolio(context, data, context.b_t) context.init = True # get data d = get_data(data,context.stocks) if d == None: return # skip tic if any orders are open or any stocks did not trade for stock in context.stocks: if bool(get_open_orders(stock)) or data[stock].datetime < get_datetime(): return # update portfolio for i, stock in enumerate(context.stocks): context.b_t[i] = context.portfolio.positions[stock].amount*data[stock].price context.b_t = np.divide(context.b_t,np.sum(context.b_t)) prices = d[0] volumes = d[1] m = context.m x_tilde = np.zeros(m) b = np.zeros(m) # find relative moving volume weighted average price for each secuirty for i, stock in enumerate(context.stocks): vwa_price = np.dot(prices[:,i],volumes[:,i])/np.sum(volumes[:,i]) x_tilde[i] = vwa_price/prices[-1,i] ########################### # Inside of OLMAR (algo 2) x_bar = x_tilde.mean() # Calculate terms for lambda (lam) dot_prod = np.dot(context.b_t, x_tilde) num = context.eps - dot_prod denom = (np.linalg.norm((x_tilde-x_bar)))**2 # test for divide-by-zero case if denom == 0.0: lam = 0 # no portolio update else: lam = max(0, num/denom) b = context.b_t + lam*(x_tilde-x_bar) b_norm = simplex_projection(b) rebalance_portfolio(context, data, b_norm) @batch_transform(refresh_period=R_P, window_length=W_L) # set globals R_P & W_L above def get_data(datapanel,sids,clean_nans=False): p = np.nan_to_num(datapanel['price'].as_matrix(sids)) v = np.nan_to_num(datapanel['volume'].as_matrix(sids)) return [p,v] def rebalance_portfolio(context, data, desired_port): #rebalance portfolio current_amount = np.zeros_like(desired_port) desired_amount = np.zeros_like(desired_port) if not context.init: positions_value = context.portfolio.starting_cash else: positions_value = context.portfolio.positions_value + context.portfolio.cash for i, stock in enumerate(context.stocks): current_amount[i] = context.portfolio.positions[stock].amount desired_amount[i] = desired_port[i]*positions_value/data[stock].price diff_amount = desired_amount - current_amount for i, stock in enumerate(context.stocks): # order(stock, int(math.floor(diff_amount[i]/100.0))*100) #order_stock order(stock, int(math.floor(diff_amount[i]))) def simplex_projection(v, b=1): """Projection vectors to the simplex domain Implemented according to the paper: Efficient projections onto the l1-ball for learning in high dimensions, John Duchi, et al. ICML 2008. Implementation Time: 2011 June 17 by [email protected] AT pmail.ntu.edu.sg Optimization Problem: min_{w}\| w - v \|_{2}^{2} s.t. sum_{i=1}^{m}=z, w_{i}\geq 0 Input: A vector v \in R^{m}, and a scalar z > 0 (default=1) Output: Projection vector w :Example: >>> proj = simplex_projection([.4 ,.3, -.4, .5]) >>> print proj array([ 0.33333333, 0.23333333, 0. , 0.43333333]) >>> print proj.sum() 1.0 Original matlab implementation: John Duchi ([email protected]) Python-port: Copyright 2012 by Thomas Wiecki ([email protected]). """ v = np.asarray(v) p = len(v) # Sort v into u in descending order v = (v > 0) * v u = np.sort(v)[::-1] sv = np.cumsum(u) rho = np.where(u > (sv - b) / np.arange(1, p+1))[0][-1] theta = np.max([0, (sv[rho] - b) / (rho+1)]) w = (v - theta) w[w<0] = 0 return w This backtest was created using an older version of the backtester. Please re-run this backtest to see results using the latest backtester. Learn more about the recent changes. There was a runtime error. Hello Thomas, I got through my first day of paper trading with no error. Here's the code I'm running: import numpy as np import math # globals for get_data batch transform decorator R_P = 0 #refresh period in days W_L = 5 #window length in days def initialize(context): context.stocks = [sid(1419),sid(12652),sid(6546),sid(5061),sid(6683)] for stock in context.stocks: print stock.symbol context.m = len(context.stocks) context.b_t = np.ones(context.m) / context.m context.eps = 1 # change epsilon here context.init = False def handle_data(context, data): cash = context.portfolio.cash record(cash=cash) if not context.init: rebalance_portfolio(context, data, context.b_t) context.init = True # get data d = get_data(data,context.stocks) if d == None: return # skip tic if any orders are open or any stocks did not trade for stock in context.stocks: if bool(get_open_orders(stock)) or data[stock].datetime < get_datetime(): return # update portfolio for i, stock in enumerate(context.stocks): context.b_t[i] = context.portfolio.positions[stock].amount*data[stock].price context.b_t = np.divide(context.b_t,np.sum(context.b_t)) prices = d[0] volumes = d[1] m = context.m x_tilde = np.zeros(m) b = np.zeros(m) # find relative moving volume weighted average price for each secuirty for i, stock in enumerate(context.stocks): vwa_price = np.dot(prices[:,i],volumes[:,i])/np.sum(volumes[:,i]) x_tilde[i] = vwa_price/prices[-1,i] ########################### # Inside of OLMAR (algo 2) x_bar = x_tilde.mean() # Calculate terms for lambda (lam) dot_prod = np.dot(context.b_t, x_tilde) num = context.eps - dot_prod denom = (np.linalg.norm((x_tilde-x_bar)))**2 # test for divide-by-zero case if denom == 0.0: lam = 0 # no portolio update else: lam = max(0, num/denom) b = context.b_t + lam*(x_tilde-x_bar) b_norm = simplex_projection(b) rebalance_portfolio(context, data, b_norm) @batch_transform(refresh_period=R_P, window_length=W_L) # set globals R_P & W_L above def get_data(datapanel,sids,clean_nans=False): p = np.nan_to_num(datapanel['price'].as_matrix(sids)) v = np.nan_to_num(datapanel['volume'].as_matrix(sids)) return [p,v] def rebalance_portfolio(context, data, desired_port): #rebalance portfolio current_amount = np.zeros_like(desired_port) desired_amount = np.zeros_like(desired_port) if not context.init: positions_value = context.portfolio.starting_cash else: positions_value = context.portfolio.positions_value + context.portfolio.cash for i, stock in enumerate(context.stocks): current_amount[i] = context.portfolio.positions[stock].amount desired_amount[i] = desired_port[i]*positions_value/data[stock].price diff_amount = desired_amount - current_amount for i, stock in enumerate(context.stocks): # order(stock, int(math.floor(diff_amount[i]/100.0))*100) #order_stock order(stock, int(math.floor(diff_amount[i]))) def simplex_projection(v, b=1): """Projection vectors to the simplex domain Implemented according to the paper: Efficient projections onto the l1-ball for learning in high dimensions, John Duchi, et al. ICML 2008. Implementation Time: 2011 June 17 by [email protected] AT pmail.ntu.edu.sg Optimization Problem: min_{w}\| w - v \|_{2}^{2} s.t. sum_{i=1}^{m}=z, w_{i}\geq 0 Input: A vector v \in R^{m}, and a scalar z > 0 (default=1) Output: Projection vector w :Example: >>> proj = simplex_projection([.4 ,.3, -.4, .5]) >>> print proj array([ 0.33333333, 0.23333333, 0. , 0.43333333]) >>> print proj.sum() 1.0 Original matlab implementation: John Duchi ([email protected]) Python-port: Copyright 2012 by Thomas Wiecki ([email protected]). """ v = np.asarray(v) p = len(v) # Sort v into u in descending order v = (v > 0) * v u = np.sort(v)[::-1] sv = np.cumsum(u) rho = np.where(u > (sv - b) / np.arange(1, p+1))[0][-1] theta = np.max([0, (sv[rho] - b) / (rho+1)]) w = (v - theta) w[w<0] = 0 return w  I'm confused what happened, though, since I should have approximately the same dollar amount in each security. Instead, I have: CERN$32,915.62

SBUX
$28,902.06 DLTR$24,547.25

MSFT
$8,029.78 ROST$4,988.75

For some reason, the algo both bought and sold shares. Due to the warm-up? I'd expected, after the first trading day to have about the same dollar amount in each security, with only buys.

Grant

Hi Grant,

Thanks, that code runs fine now. I did notice the same behavior as you did though. Do we have a bug in the portfolio filling?

Btw. we have some new order methods in zipline that allow better control over how many shares to order that should make it to quantopian soon.

Thomas

Thomas,

I think that the start-up behavior under paper trading is due to the warm up. I had coded to do an initial buy, with equal dollar amounts in each security, before the batch transform window was full. Apparently, paper trading went back in time, applied the initial buy, then the warm-up period ensued, followed by the first day of trading, when the re-balancing was reported. So, I don't think that there is a bug in portfolio filling.

Grant

Grant,

I used the flags in many of my own algos and shares, and it always causes problems. Using a boolean flag for the init is brittle in simulated trading, precisely because of the warmup sequence for the algorithms you mention. However, we've found that simple switches also tend to break in real trading, because the switch assumes a clean slate start in the account. This assumption breaks down when you have pre-existing positions in your account, or you make transfers/transactions from outside the algorithm, or your broker updates your account, etc.

Instead of the boolean flag, we've been writing snippets to look at the current portfolio and determine if we want our algorithm to return the positions to some baseline. Sometimes that means liquidating, sometimes it means rebalancing to equal weights, etc. In this case, it might be as simple as checking if any of your stocks are missing from the portfolio. In other algorithms, we've looked at the total percent invested as a trigger to return to baseline.

thanks,
fawce

Disclaimer

The material on this website is provided for informational purposes only and does not constitute an offer to sell, a solicitation to buy, or a recommendation or endorsement for any security or strategy, nor does it constitute an offer to provide investment advisory services by Quantopian. In addition, the material offers no opinion with respect to the suitability of any security or specific investment. No information contained herein should be regarded as a suggestion to engage in or refrain from any investment-related course of action as none of Quantopian nor any of its affiliates is undertaking to provide investment advice, act as an adviser to any plan or entity subject to the Employee Retirement Income Security Act of 1974, as amended, individual retirement account or individual retirement annuity, or give advice in a fiduciary capacity with respect to the materials presented herein. If you are an individual retirement or other investor, contact your financial advisor or other fiduciary unrelated to Quantopian about whether any given investment idea, strategy, product or service described herein may be appropriate for your circumstances. All investments involve risk, including loss of principal. Quantopian makes no guarantees as to the accuracy or completeness of the views expressed in the website. The views are subject to change, and may have become unreliable for various reasons, including changes in market conditions or economic circumstances.

Thanks Fawce,

In this case, the algo is initialized by buying equal dollar amounts across all of the securities in the portfolio. Then, the portfolio is re-balanced. I could have just as well initialized after the warm-up period, so that on the first day of paper trading, I would have seen equal dollar amounts in each security. There is no provision to return to a baseline (e.g. equal dollar amounts), although it would be straightforward.

Grant

Grant, I'm very pleased to see this implementation on Quantopian. I have run many experiments in the past using many of the predecessor algorithms cited in the paper (Cover's alg, EG, and the Hungarians' KNN).

One thing I can't tell by looking at the code: Is this a long only portfolio? No negative allocations?

Hello Ken,

The algorithm is long only (no negative allocations). The question has come up before, but to my knowledge, no one has posted a solution.

Grant

As Fawce informed me here, "All live trading, paper or real money, is minutely." So, I added some code to execute the algorithm at market open under minutely trading. Here's the result, which basically replicates the one above.

In the end, it might be better to make a trading decision every minute, but based on my testing, it seems that there may need to be some sort of smoothing/filtering of the data to replicate the results I obtain with daily re-balancing.

--Grant

110
Total Returns
--
Alpha
--
Beta
--
Sharpe
--
Sortino
--
Max Drawdown
--
Benchmark Returns
--
Volatility
--
 Returns 1 Month 3 Month 6 Month 12 Month
 Alpha 1 Month 3 Month 6 Month 12 Month
 Beta 1 Month 3 Month 6 Month 12 Month
 Sharpe 1 Month 3 Month 6 Month 12 Month
 Sortino 1 Month 3 Month 6 Month 12 Month
 Volatility 1 Month 3 Month 6 Month 12 Month
 Max Drawdown 1 Month 3 Month 6 Month 12 Month
import numpy as np
import math
from pytz import timezone

# globals for get_data batch transform decorator
R_P = 0 #refresh period in days
W_L = 5 #window length in days

def initialize(context):

context.stocks = [sid(1419),sid(12652),sid(6546),sid(5061),sid(6683)]

for stock in context.stocks:
print stock.symbol

context.m = len(context.stocks)
context.b_t = np.ones(context.m) / context.m
context.eps = 1 # change epsilon here
context.init = False
context.previous_datetime = None

def handle_data(context, data):

current_datetime = get_datetime().astimezone(timezone('US/Eastern'))

cash = context.portfolio.cash
record(cash=cash)

if not context.init:
rebalance_portfolio(context, data, context.b_t)
context.init = True

# get data
d = get_data(data,context.stocks)
if d == None:
return

# only execute algorithm once per day
if context.previous_datetime != None:
if current_datetime.day != context.previous_datetime.day:
new_day = True
algo_executed = False
else:
new_day = False
else:
new_day = False
algo_executed = True

context.previous_datetime = current_datetime

if not new_day or algo_executed == True:
return

# skip tic if any orders are open or any stocks did not trade
for stock in context.stocks:
if bool(get_open_orders(stock)) or data[stock].datetime < get_datetime():
return

# update portfolio
for i, stock in enumerate(context.stocks):
context.b_t[i] = context.portfolio.positions[stock].amount*data[stock].price

context.b_t = np.divide(context.b_t,np.sum(context.b_t))

prices = d[0]
volumes = d[1]

m = context.m

x_tilde = np.zeros(m)

b = np.zeros(m)

# find relative moving volume weighted average price for each secuirty
for i, stock in enumerate(context.stocks):
vwa_price = np.dot(prices[:,i],volumes[:,i])/np.sum(volumes[:,i])
x_tilde[i] = vwa_price/prices[-1,i]

###########################
# Inside of OLMAR (algo 2)

x_bar = x_tilde.mean()

# Calculate terms for lambda (lam)
dot_prod = np.dot(context.b_t, x_tilde)
num = context.eps - dot_prod
denom = (np.linalg.norm((x_tilde-x_bar)))**2

# test for divide-by-zero case
if denom == 0.0:
lam = 0 # no portolio update
else:
lam = max(0, num/denom)

b = context.b_t + lam*(x_tilde-x_bar)

b_norm = simplex_projection(b)

rebalance_portfolio(context, data, b_norm)

algo_executed = True

@batch_transform(refresh_period=R_P, window_length=W_L) # set globals R_P & W_L above
def get_data(datapanel,sids,clean_nans=False):
p = np.nan_to_num(datapanel['price'].as_matrix(sids))
v = np.nan_to_num(datapanel['volume'].as_matrix(sids))
return [p,v]

def rebalance_portfolio(context, data, desired_port):
#rebalance portfolio
current_amount = np.zeros_like(desired_port)
desired_amount = np.zeros_like(desired_port)

if not context.init:
positions_value = context.portfolio.starting_cash
else:
positions_value = context.portfolio.positions_value + context.portfolio.cash

for i, stock in enumerate(context.stocks):
current_amount[i] = context.portfolio.positions[stock].amount
desired_amount[i] = desired_port[i]*positions_value/data[stock].price

diff_amount = desired_amount - current_amount

for i, stock in enumerate(context.stocks):
# order(stock, int(math.floor(diff_amount[i]/100.0))*100) #order_stock
order(stock, int(math.floor(diff_amount[i])))

def simplex_projection(v, b=1):
"""Projection vectors to the simplex domain

Implemented according to the paper: Efficient projections onto the
l1-ball for learning in high dimensions, John Duchi, et al. ICML 2008.
Implementation Time: 2011 June 17 by [email protected] AT pmail.ntu.edu.sg
Optimization Problem: min_{w}\| w - v \|_{2}^{2}
s.t. sum_{i=1}^{m}=z, w_{i}\geq 0

Input: A vector v \in R^{m}, and a scalar z > 0 (default=1)
Output: Projection vector w

:Example:
>>> proj = simplex_projection([.4 ,.3, -.4, .5])
>>> print proj
array([ 0.33333333, 0.23333333, 0. , 0.43333333])
>>> print proj.sum()
1.0

Original matlab implementation: John Duchi ([email protected])
Python-port: Copyright 2012 by Thomas Wiecki ([email protected]).
"""

v = np.asarray(v)
p = len(v)

# Sort v into u in descending order
v = (v > 0) * v
u = np.sort(v)[::-1]
sv = np.cumsum(u)

rho = np.where(u > (sv - b) / np.arange(1, p+1))[0][-1]
theta = np.max([0, (sv[rho] - b) / (rho+1)])
w = (v - theta)
w[w<0] = 0
return w
This backtest was created using an older version of the backtester. Please re-run this backtest to see results using the latest backtester. Learn more about the recent changes.
There was a runtime error.

To demonstrate the usefulness of the new features (history() and new order methods; see here for more description), I updated the OLMAR algorithm. As you can see the code is much simpler now, especially order_target_percent() allows us to get rid of the rebalance_portfolio() logic.

247
Total Returns
--
Alpha
--
Beta
--
Sharpe
--
Sortino
--
Max Drawdown
--
Benchmark Returns
--
Volatility
--
 Returns 1 Month 3 Month 6 Month 12 Month
 Alpha 1 Month 3 Month 6 Month 12 Month
 Beta 1 Month 3 Month 6 Month 12 Month
 Sharpe 1 Month 3 Month 6 Month 12 Month
 Sortino 1 Month 3 Month 6 Month 12 Month
 Volatility 1 Month 3 Month 6 Month 12 Month
 Max Drawdown 1 Month 3 Month 6 Month 12 Month
import numpy as np
import math
from pytz import timezone

# globals for get_data batch transform decorator
R_P = 0 #refresh period in days
W_L = 5 #window length in days

def initialize(context):

context.stocks = [sid(1419),sid(12652),sid(6546),sid(5061),sid(6683)]

for stock in context.stocks:
print stock.symbol

context.m = len(context.stocks)
context.b_t = np.ones(context.m) / context.m
context.eps = 1 # change epsilon here
context.init = False
context.previous_datetime = None

def handle_data(context, data):
current_datetime = get_datetime().astimezone(timezone('US/Eastern'))

cash = context.portfolio.cash
record(cash=cash)

if not context.init:
# Equal weighting portfolio
for stock, percent in zip(context.stocks, context.b_t):
order_target_percent(stock, percent)
context.init = True

# only execute algorithm once per day
if context.previous_datetime != None:
if current_datetime.day != context.previous_datetime.day:
new_day = True
algo_executed = False
else:
new_day = False
else:
new_day = False
algo_executed = True

context.previous_datetime = current_datetime

if not new_day or algo_executed == True:
return

# skip tic if any orders are open or any stocks did not trade
for stock in context.stocks:
if bool(get_open_orders(stock)) or data[stock].datetime < get_datetime():
return

# compute current portfolio allocations
for i, stock in enumerate(context.stocks):
context.b_t[i] = context.portfolio.positions[stock].amount*data[stock].price

# Bring portfolio vector to unit length
context.b_t = context.b_t / np.sum(context.b_t)

# Compute new portfolio weights according to OLMAR algo.
b_norm = olmar(context)

# Rebalance Portfolio
for stock, percent in zip(context.stocks, b_norm):
order_target_percent(stock, percent)

algo_executed = True

def olmar(context):
"""Logic of the olmar algorithm.

:Returns: b_norm : vector for new portfolio
"""

# get history -- prices and volums of the last 5 days (at close)
prices = history(5, '1d', 'price')
volumes = history(5, '1d', 'volume')

# find relative moving volume weighted average price for each secuirty
x_tilde = np.zeros(context.m)
for i, stock in enumerate(context.stocks):
vwa_price = np.dot(prices[stock], volumes[stock]) / np.sum(volumes[stock])
x_tilde[i] = vwa_price/prices[stock].ix[-1]

###########################
# Inside of OLMAR (algo 2)
x_bar = x_tilde.mean()

# Calculate terms for lambda (lam)
dot_prod = np.dot(context.b_t, x_tilde)
num = context.eps - dot_prod
denom = (np.linalg.norm((x_tilde-x_bar)))**2

# test for divide-by-zero case
if denom == 0.0:
lam = 0 # no portolio update
else:
lam = max(0, num/denom)

b = context.b_t + lam*(x_tilde-x_bar)

b_norm = simplex_projection(b)

return b_norm

def simplex_projection(v, b=1):
"""Projection vectors to the simplex domain

Implemented according to the paper: Efficient projections onto the
l1-ball for learning in high dimensions, John Duchi, et al. ICML 2008.
Implementation Time: 2011 June 17 by [email protected] AT pmail.ntu.edu.sg
Optimization Problem: min_{w}\| w - v \|_{2}^{2}
s.t. sum_{i=1}^{m}=z, w_{i}\geq 0

Input: A vector v \in R^{m}, and a scalar z > 0 (default=1)
Output: Projection vector w

:Example:
>>> proj = simplex_projection([.4 ,.3, -.4, .5])
>>> print proj
array([ 0.33333333, 0.23333333, 0. , 0.43333333])
>>> print proj.sum()
1.0

Original matlab implementation: John Duchi ([email protected])
Python-port: Copyright 2012 by Thomas Wiecki ([email protected]).
"""

v = np.asarray(v)
p = len(v)

# Sort v into u in descending order
v = (v > 0) * v
u = np.sort(v)[::-1]
sv = np.cumsum(u)

rho = np.where(u > (sv - b) / np.arange(1, p+1))[0][-1]
theta = np.max([0, (sv[rho] - b) / (rho+1)])
w = (v - theta)
w[w<0] = 0
return w
This backtest was created using an older version of the backtester. Please re-run this backtest to see results using the latest backtester. Learn more about the recent changes.
There was a runtime error.

so what happens here ? :S

1
Total Returns
--
Alpha
--
Beta
--
Sharpe
--
Sortino
--
Max Drawdown
--
Benchmark Returns
--
Volatility
--
 Returns 1 Month 3 Month 6 Month 12 Month
 Alpha 1 Month 3 Month 6 Month 12 Month
 Beta 1 Month 3 Month 6 Month 12 Month
 Sharpe 1 Month 3 Month 6 Month 12 Month
 Sortino 1 Month 3 Month 6 Month 12 Month
 Volatility 1 Month 3 Month 6 Month 12 Month
 Max Drawdown 1 Month 3 Month 6 Month 12 Month
import numpy as np
import math
from pytz import timezone

# globals for get_data batch transform decorator
R_P = 0 #refresh period in days
W_L = 5 #window length in days

def initialize(context):

context.stocks = [sid(32267),sid(32265),sid(32268),sid(39215),sid(38533),sid(39214)]

for stock in context.stocks:
print stock.symbol

context.m = len(context.stocks)
context.b_t = np.ones(context.m) / context.m
context.eps = 1 # change epsilon here
context.init = False
context.previous_datetime = None

def handle_data(context, data):
current_datetime = get_datetime().astimezone(timezone('US/Eastern'))

cash = context.portfolio.cash
record(cash=cash)

if not context.init:
# Equal weighting portfolio
for stock, percent in zip(context.stocks, context.b_t):
order_target_percent(stock, percent)
context.init = True

# only execute algorithm once per day
if context.previous_datetime != None:
if current_datetime.day != context.previous_datetime.day:
new_day = True
algo_executed = False
else:
new_day = False
else:
new_day = False
algo_executed = True

context.previous_datetime = current_datetime

if not new_day or algo_executed == True:
return

# skip tic if any orders are open or any stocks did not trade
for stock in context.stocks:
if bool(get_open_orders(stock)) or data[stock].datetime < get_datetime():
return

# compute current portfolio allocations
for i, stock in enumerate(context.stocks):
context.b_t[i] = context.portfolio.positions[stock].amount*data[stock].price

# Bring portfolio vector to unit length
context.b_t = context.b_t / np.sum(context.b_t)

# Compute new portfolio weights according to OLMAR algo.
b_norm = olmar(context)

# Rebalance Portfolio
for stock, percent in zip(context.stocks, b_norm):
order_target_percent(stock, percent)

algo_executed = True

def olmar(context):
"""Logic of the olmar algorithm.

:Returns: b_norm : vector for new portfolio
"""

# get history -- prices and volums of the last 5 days (at close)
prices = history(5, '1d', 'price')
volumes = history(5, '1d', 'volume')

# find relative moving volume weighted average price for each secuirty
x_tilde = np.zeros(context.m)
for i, stock in enumerate(context.stocks):
vwa_price = np.dot(prices[stock], volumes[stock]) / np.sum(volumes[stock])
x_tilde[i] = vwa_price/prices[stock].ix[-1]

###########################
# Inside of OLMAR (algo 2)
x_bar = x_tilde.mean()

# Calculate terms for lambda (lam)
dot_prod = np.dot(context.b_t, x_tilde)
num = context.eps - dot_prod
denom = (np.linalg.norm((x_tilde-x_bar)))**2

# test for divide-by-zero case
if denom == 0.0:
lam = 0 # no portolio update
else:
lam = max(0, num/denom)

b = context.b_t + lam*(x_tilde-x_bar)

b_norm = simplex_projection(b)

return b_norm

def simplex_projection(v, b=1):
"""Projection vectors to the simplex domain

Implemented according to the paper: Efficient projections onto the
l1-ball for learning in high dimensions, John Duchi, et al. ICML 2008.
Implementation Time: 2011 June 17 by [email protected] AT pmail.ntu.edu.sg
Optimization Problem: min_{w}\| w - v \|_{2}^{2}
s.t. sum_{i=1}^{m}=z, w_{i}\geq 0

Input: A vector v \in R^{m}, and a scalar z > 0 (default=1)
Output: Projection vector w

:Example:
>>> proj = simplex_projection([.4 ,.3, -.4, .5])
>>> print proj
array([ 0.33333333, 0.23333333, 0. , 0.43333333])
>>> print proj.sum()
1.0

Original matlab implementation: John Duchi ([email protected])
Python-port: Copyright 2012 by Thomas Wiecki ([email protected]).
"""

v = np.asarray(v)
p = len(v)

# Sort v into u in descending order
v = (v > 0) * v
u = np.sort(v)[::-1]
sv = np.cumsum(u)

rho = np.where(u > (sv - b) / np.arange(1, p+1))[0][-1]
theta = np.max([0, (sv[rho] - b) / (rho+1)])
w = (v - theta)
w[w<0] = 0
return w
There was a runtime error.

The returns are fantastic. But seem correlated with the benchmark. Is there evidence of alpha (say bear market performance)? Also, a big question is how do we select the universe of stocks? I would think limiting to bluechips may help ensure reversion to the moving average in the long run.