Quantopian Open low-beta algo

Thought I'd tinker around to see if I could get the beta down of this algo. Just under the wire at 0.29. If you don't have an algo for the Open, feel free to submit this one (or improve it and submit it). It doesn't beat SPY, but the guidance I'm seeing is that so long as the return doesn't completely stink, it could be a keeper. --Grant

766
Backtest from to with initial capital
Total Returns
--
Alpha
--
Beta
--
Sharpe
--
Sortino
--
Max Drawdown
--
Benchmark Returns
--
Volatility
--
 Returns 1 Month 3 Month 6 Month 12 Month
 Alpha 1 Month 3 Month 6 Month 12 Month
 Beta 1 Month 3 Month 6 Month 12 Month
 Sharpe 1 Month 3 Month 6 Month 12 Month
 Sortino 1 Month 3 Month 6 Month 12 Month
 Volatility 1 Month 3 Month 6 Month 12 Month
 Max Drawdown 1 Month 3 Month 6 Month 12 Month
# Adapted from:
# Li, Bin, and Steven HOI. "On-Line Portfolio Selection with Moving Average Reversion." The 29th International Conference on Machine Learning (ICML2012), 2012.
# http://icml.cc/2012/papers/168.pdf

import numpy as np
from scipy import optimize
import pandas as pd

def initialize(context):

context.eps = 1.005
context.pct_index = 0.37 # max percentage of inverse ETF
context.leverage = 1.0

print 'context.eps = ' + str(context.eps)
print 'context.pct_index = ' + str(context.pct_index)
print 'context.leverage = ' + str(context.leverage)

context.data = []

fundamental_df = get_fundamentals(
query(
fundamentals.valuation.market_cap,
)
.filter(fundamentals.company_reference.primary_exchange_id == 'NAS')
.filter(fundamentals.valuation.market_cap != None)
.order_by(fundamentals.valuation.market_cap.desc()).limit(20))
update_universe(fundamental_df.columns.values)
context.stocks = [stock for stock in fundamental_df]

context.stocks.append(symbols('PSQ')[0]) # add inverse ETF to universe

# check if data exists
for stock in context.stocks:
if stock not in context.data:
context.stocks.remove(stock)

def handle_data(context, data):

record(leverage = context.account.leverage)

context.data = data

def get_allocation(context,data,n,prices):

prices = pd.ewma(prices,span=390).as_matrix(context.stocks)

b_t = []

for stock in context.stocks:
b_t.append(context.portfolio.positions[stock].amount*data[stock].price)

m = len(b_t)
b_0 = np.ones(m) / m  # equal-weight portfolio
denom = np.sum(b_t)

if denom == 0.0:
b_t = np.copy(b_0)
else:
b_t = np.divide(b_t,denom)

x_tilde = []

for i, stock in enumerate(context.stocks):
mean_price = np.mean(prices[:,i])
x_tilde.append(mean_price/prices[-1,i])

bnds = []
limits = [0,1]

for stock in context.stocks:
bnds.append(limits)

bnds = tuple(tuple(x) for x in bnds)

cons = ({'type': 'eq', 'fun': lambda x:  np.sum(x)-1.0},
{'type': 'ineq', 'fun': lambda x:  np.dot(x,x_tilde) - context.eps},
{'type': 'eq', 'fun': lambda x: x[-1] - context.pct_index})

res= optimize.minimize(norm_squared, b_0, args=b_t,jac=norm_squared_deriv,method='SLSQP',constraints=cons,bounds=bnds, options={'disp': False,  'maxiter': 100, 'iprint': 1, 'ftol': 1e-6})

allocation = res.x
allocation[allocation<0] = 0
allocation = allocation/np.sum(allocation)

if res.success and (np.dot(allocation,x_tilde)-context.eps > 0):
return (allocation,np.dot(allocation,x_tilde))
else:
return (b_t,1)

# check if data exists
for stock in context.stocks:
if stock not in data:
context.stocks.remove(stock)

# check for de-listed stocks & leveraged ETFs
for stock in context.stocks:
if stock.security_end_date < get_datetime():  # de-listed ?
context.stocks.remove(stock)
if stock in security_lists.leveraged_etf_list: # leveraged ETF?
context.stocks.remove(stock)

# check for open orders
if get_open_orders():
return

# find average weighted allocation over range of trailing window lengths
a = np.zeros(len(context.stocks))
w = 0
prices = history(8*390,'1m','price')
for n in range(1,9):
(a,w) = get_allocation(context,data,n,prices.tail(n*390))
a += w*a
w += w

allocation = a/w
allocation = allocation/np.sum(allocation)

allocate(context,data,allocation)

def allocate(context, data, desired_port):

record(long = sum(desired_port[0:-1]))
record(inverse = desired_port[-1])

for i, stock in enumerate(context.stocks):
order_target_percent(stock, context.leverage*desired_port[i])

for stock in data:
if stock not in context.stocks:
order_target_percent(stock,0)

def norm_squared(b,*args):

b_t = np.asarray(args)
delta_b = b - b_t

return 0.5*np.dot(delta_b,delta_b.T)

def norm_squared_deriv(b,*args):

b_t = np.asarray(args)
delta_b = b - b_t

return delta_b
We have migrated this algorithm to work with a new version of the Quantopian API. The code is different than the original version, but the investment rationale of the algorithm has not changed. We've put everything you need to know here on one page.
There was a runtime error.
12 responses

Grant,
This is a pretty cool algo. Really like how it integrates fundamentals for screening, quantitative mean-reversion, as well as scipy.optimize!

One thing I'd recommend though, is to replace your inverse QQQ ETF by just shorting the QQQ. It [should] slightly improve the performance over the long term because of the consistent drag incurred by inverse ETF's. As well, since QQQ is a much more liquid stock, transactions costs will improve as well as make the algo more scaleable.

-Justin

Disclaimer

The material on this website is provided for informational purposes only and does not constitute an offer to sell, a solicitation to buy, or a recommendation or endorsement for any security or strategy, nor does it constitute an offer to provide investment advisory services by Quantopian. In addition, the material offers no opinion with respect to the suitability of any security or specific investment. No information contained herein should be regarded as a suggestion to engage in or refrain from any investment-related course of action as none of Quantopian nor any of its affiliates is undertaking to provide investment advice, act as an adviser to any plan or entity subject to the Employee Retirement Income Security Act of 1974, as amended, individual retirement account or individual retirement annuity, or give advice in a fiduciary capacity with respect to the materials presented herein. If you are an individual retirement or other investor, contact your financial advisor or other fiduciary unrelated to Quantopian about whether any given investment idea, strategy, product or service described herein may be appropriate for your circumstances. All investments involve risk, including loss of principal. Quantopian makes no guarantees as to the accuracy or completeness of the views expressed in the website. The views are subject to change, and may have become unreliable for various reasons, including changes in market conditions or economic circumstances.

Thanks Justin,

It is on my to-do list to figure out how to cajole the algo into handling shorting. It was convenient to use the inverse ETF, which is consistent with the long-only portfolio that the algo can optimize.

By the way, if you have any influence and like the use of optimizers, get CVXOPT fully integrated, and CVXPY, too (which is more user-friendly)! My understanding is that it would be better to use an optimizer that is designed specifically for convex problems, over the scipy.optimize one, which I gather is more generic.

Also "pretty cool algo" because it has some interesting technical elements? Or "pretty cool algo" because you'd like to evaluate it in the contest for the Q hedge fund?

As a side comment, my prediction is that for the contest backtests, you'll get a lot invalid algos, since folks will bias the long-only side of their strategy like crazy, and then balance it out with shorting SPY, or some other relevant index. At least that's what I would consider, to make sure I ended up in the running for the month-long live trading. Heck with the hedge fund, which still seems to be a twinkle in Q's eye. Try to win the contest and make some cash!

Grant

I second Grant on request for CVXPY. There are so many ideas/papers on market neutral strategies that I want to try and all of them require optimization. Both CVXOPT and scipy optimize are so difficult to use. I think that Q wants to make it easier for algorithm writers to focus on their trading algos rather than spend hours trying to specify a convex optimization in scipy optimize or CVXOPT.

@Grant,
My "pretty cool" statement was around how your algo sort of merges the "old school" with the "new school."

"Old School" investors were typically brought up purely looking at only fundamental data from financial statements when making their investment decisions; "New School" because lots of the newer traders/investors out there are adhering solely to quantitative criteria like a mean-reversion signal. Then additionally in the "New School" category you're using a canned open-source python optimizer to arrive at your portfolio allocations. It's so nice how these days all of these complex tools like optimizers are effectively a single line of code, and as long as the user understands how to pass data and parameters into it, you can get results -- without having to write the optimizer code yourself!

Even just several years ago, the ease of which to integrate all 3 of these types of investment data/tools was very difficult for most, and surely nearly impossible for anyone not working at an investment firm with a large budget for both data + tools that interface across the data sources and analytical toolkit all within a single platform. That's what I feel is "pretty cool."

It's strategies such as these, that use various data sources and creates a signal from the merging of them into something actionable, and then applying additional analytical rigor through the intelligent use of tools like optimizers is where I might suspect a significant amount of alpha may be generated over the next N years in the markets, since as I mentioned it just wasn't all that easy to bring all the pieces together even just a few years ago so not a lot of time has been spent looking for strategies that might be profitable across all the new datasets available to us in the world today, coupled with the computing power and open source analytical libraries at our disposal.

With regards to shorting SPY: A quick and dirty way to use your existing code that requires only longs as inputs to the optimizers is to simply keep using the inverse ETF for the optimization, and then once you know how much to allocate to the inverse ETF, convert that % allocation of the inverse ETF and then just short that amount of SPY. E.g. Whether you are long $10,000 of the inverse ETF or short$10,000 of SPY is effectively the same thing.

As well, I definitely think its quite alright for those with good long-only strategies to just take a crack at shorting SPY against the longs as a first take at submitting contest algos that are more market neutral in the search for "alpha" (and hopefully a high contest score!) The simplest way to do this is to just short SPY in the same $or % amount as which your longs sum up to. More sophisticated ways to hedge with SPY might be to compute an N-day beta of each stock you are long versus the SPY and use that beta (e.g. the coefficient from that regression of stock to SPY) as the ratio of how much SPY to short for each stock. E.g. if you run the regression of AAPL.pct_change() = beta*SPY.pct_change() + constant and the regression returns a value of 1.2 for beta then for every$1 of AAPL you are long you would also go short \$1.20 of SPY. As you can imagine there is a bit of an art (or another optimization problem!) with which how many days of data with which to compute that regression to determine the betas for each long stock.

@ Justin,

Well, unfortunately we are dealing with a commodity accessible to everyone in the hedge fund space. Big honkin' computers, data galore, canned, sophisticated software packages, 24/7 global, high-speed networks, and well-educated, experienced people. It is not obvious that Q will win over market share by offering a free slice of the commodity technology available to its competitors. There's a fundamental assumption about crowd-sourcing being advantageous, but I'm not so sure. Any good algos yet?

@Grant: as you check whether the stocks are in context.data and before_trading_start happens before the market opens you always check whether the stock was in yesterdays data at close. I dont think it will fail often as test but it can.

for the rest I see I have to get better at using constraints in Optimise. How do you come up the constraints? Does one constrain everything except the thing you want to optimise for?

    bnds = []
limits = [0,1]
for stock in context.stocks:
bnds.append(limits)
bnds = tuple(tuple(x) for x in bnds)
cons = ({'type': 'eq', 'fun': lambda x:  np.sum(x)-1.0},
{'type': 'ineq', 'fun': lambda x:  np.dot(x,x_tilde) - context.eps},
{'type': 'eq', 'fun': lambda x: x[-1] - context.pct_index})


@ Peter,

The first equality constraint is simply setting the sum of the portfolio weights to 1.0. The second inequality constraint comes right out of the paper referenced in the code, Section 4.2:

Li, Bin, and Steven HOI. "On-Line Portfolio Selection with Moving Average Reversion." The 29th International Conference on Machine Learning (ICML2012), 2012.
http://icml.cc/2012/papers/168.pdf

The final equality constraint sets the weight of the inverse ETF (it should really come after the first constraint, for clarity, since it is simply adding an additional constraint on the weights).

I'm no expert, but I think that the bounds limit the search space of the optimizer, so they may be treated differently than the constraints. You can try removing the final constraint, and adding:

bnds[-1] = [context.pct_index,context.pct_index]


This just says that the weight of the inverse ETF will be context.pct_index.

It is a bit of a mind-bender in that what's being minimized is the difference between the old portfolio vector (weights) and the new one (the squared Euclidean distance is the objective function, see http://en.wikipedia.org/wiki/Euclidean_distance). So, without the constraints, the old and new vectors would be the same.

Again, I claim no expertise, but I think that either CVXOPT or CVXPY would be more appropriate here, since they are formulated for strictly convex objective functions with linear equality and inequality constraints.

Hope this helps.

Grant

Here's the code updated to incorporate Justin's suggestion of shorting QQQ instead of going long the inverse ETF, PSQ. I don't think it's the kind of thing the Q team is wanting to include in their fund, but it appears it might pass the beta sniff test with decent marks in other categories. So, maybe it would do o.k. in the contest? I can't participate, so give it a whirl for me. I'm curious. --Grant

766
Backtest from to with initial capital
Total Returns
--
Alpha
--
Beta
--
Sharpe
--
Sortino
--
Max Drawdown
--
Benchmark Returns
--
Volatility
--
 Returns 1 Month 3 Month 6 Month 12 Month
 Alpha 1 Month 3 Month 6 Month 12 Month
 Beta 1 Month 3 Month 6 Month 12 Month
 Sharpe 1 Month 3 Month 6 Month 12 Month
 Sortino 1 Month 3 Month 6 Month 12 Month
 Volatility 1 Month 3 Month 6 Month 12 Month
 Max Drawdown 1 Month 3 Month 6 Month 12 Month
# Adapted from:
# Li, Bin, and Steven HOI. "On-Line Portfolio Selection with Moving Average Reversion." The 29th International Conference on Machine Learning (ICML2012), 2012.
# http://icml.cc/2012/papers/168.pdf

import numpy as np
from scipy import optimize
import pandas as pd

def initialize(context):

context.eps = 1.005
context.pct_index = 0.37 # percentage short QQQ
context.leverage = 1.0

print 'context.eps = ' + str(context.eps)
print 'context.pct_index = ' + str(context.pct_index)
print 'context.leverage = ' + str(context.leverage)

context.data = []

fundamental_df = get_fundamentals(
query(
fundamentals.valuation.market_cap,
)
.filter(fundamentals.company_reference.primary_exchange_id == 'NAS')
.filter(fundamentals.valuation.market_cap != None)
.order_by(fundamentals.valuation.market_cap.desc()).limit(20))
update_universe(fundamental_df.columns.values)
context.stocks = [stock for stock in fundamental_df]

# check if data exists
for stock in context.stocks:
if stock not in context.data:
context.stocks.remove(stock)

def handle_data(context, data):

record(leverage = context.account.leverage)

context.data = data

def get_allocation(context,data,n,prices):

prices = pd.ewma(prices,span=390).as_matrix(context.stocks)

b_t = []

for stock in context.stocks:
b_t.append(context.portfolio.positions[stock].amount*data[stock].price)

m = len(b_t)
b_0 = np.ones(m) / m  # equal-weight portfolio
denom = np.sum(b_t)

if denom == 0.0:
b_t = np.copy(b_0)
else:
b_t = np.divide(b_t,denom)

x_tilde = []

for i, stock in enumerate(context.stocks):
mean_price = np.mean(prices[:,i])
x_tilde.append(mean_price/prices[-1,i])

bnds = []
limits = [0,1]

for stock in context.stocks:
bnds.append(limits)

bnds = tuple(tuple(x) for x in bnds)

cons = ({'type': 'eq', 'fun': lambda x:  np.sum(x)-1.0},
{'type': 'ineq', 'fun': lambda x:  np.dot(x,x_tilde) - context.eps})

res= optimize.minimize(norm_squared, b_0, args=b_t,jac=norm_squared_deriv,method='SLSQP',constraints=cons,bounds=bnds, options={'disp': False,  'maxiter': 100, 'iprint': 1, 'ftol': 1e-6})

allocation = res.x
allocation[allocation<0] = 0
allocation = allocation/np.sum(allocation)

if res.success and (np.dot(allocation,x_tilde)-context.eps > 0):
return (allocation,np.dot(allocation,x_tilde))
else:
return (b_t,1)

# check if data exists
for stock in context.stocks:
if stock not in data:
context.stocks.remove(stock)

# check for de-listed stocks & leveraged ETFs
for stock in context.stocks:
if stock.security_end_date < get_datetime():  # de-listed ?
context.stocks.remove(stock)
if stock in security_lists.leveraged_etf_list: # leveraged ETF?
context.stocks.remove(stock)

# check for open orders
if get_open_orders():
return

# find average weighted allocation over range of trailing window lengths
a = np.zeros(len(context.stocks))
w = 0
prices = history(8*390,'1m','price')
for n in range(1,9):
(a,w) = get_allocation(context,data,n,prices.tail(n*390))
a += w*a
w += w

allocation = a/w
allocation = allocation/np.sum(allocation)

allocate(context,data,allocation)

def allocate(context, data, desired_port):

# order long stocks
long_pct = 1.0 - context.pct_index
for i, stock in enumerate(context.stocks):
order_target_percent(stock, long_pct*context.leverage*desired_port[i])

qqq = sid(19920) # QQQ

# short index
order_target_percent(qqq,-context.pct_index)

for stock in data:
if stock in context.stocks:
pass
elif stock == qqq:
pass
else:
order_target_percent(stock,0)

def norm_squared(b,*args):

b_t = np.asarray(args)
delta_b = b - b_t

return 0.5*np.dot(delta_b,delta_b.T)

def norm_squared_deriv(b,*args):

b_t = np.asarray(args)
delta_b = b - b_t

return delta_b
We have migrated this algorithm to work with a new version of the Quantopian API. The code is different than the original version, but the investment rationale of the algorithm has not changed. We've put everything you need to know here on one page.
There was a runtime error.

Thought I would add a longer backtest.
This algorithm shows tremendous promise!
Grant, I was wondering if you could comment on the maybe weaknesses of the algorithm
so that it might be improved.

Cheers and thanks for the share!
Andrew

20
Backtest from to with initial capital
Total Returns
--
Alpha
--
Beta
--
Sharpe
--
Sortino
--
Max Drawdown
--
Benchmark Returns
--
Volatility
--
 Returns 1 Month 3 Month 6 Month 12 Month
 Alpha 1 Month 3 Month 6 Month 12 Month
 Beta 1 Month 3 Month 6 Month 12 Month
 Sharpe 1 Month 3 Month 6 Month 12 Month
 Sortino 1 Month 3 Month 6 Month 12 Month
 Volatility 1 Month 3 Month 6 Month 12 Month
 Max Drawdown 1 Month 3 Month 6 Month 12 Month
# Adapted from:
# Li, Bin, and Steven HOI. "On-Line Portfolio Selection with Moving Average Reversion." The 29th International Conference on Machine Learning (ICML2012), 2012.
# http://icml.cc/2012/papers/168.pdf

import numpy as np
from scipy import optimize
import pandas as pd

def initialize(context):

context.eps = 1.005
context.pct_index = 0.3 # percentage short QQQ
context.leverage = 2.0

print 'context.eps = ' + str(context.eps)
print 'context.pct_index = ' + str(context.pct_index)
print 'context.leverage = ' + str(context.leverage)

context.data = []

fundamental_df = get_fundamentals(
query(
fundamentals.valuation.market_cap,
)
.filter(fundamentals.company_reference.primary_exchange_id == 'NAS')
.filter(fundamentals.valuation.market_cap != None)
.order_by(fundamentals.valuation.market_cap.desc()).limit(20))
update_universe(fundamental_df.columns.values)
context.stocks = [stock for stock in fundamental_df]

# check if data exists
for stock in context.stocks:
if stock not in context.data:
context.stocks.remove(stock)

def handle_data(context, data):

record(leverage = context.account.leverage)

context.data = data

def get_allocation(context,data,n,prices):

prices = pd.ewma(prices,span=390).as_matrix(context.stocks)

b_t = []

for stock in context.stocks:
b_t.append(context.portfolio.positions[stock].amount*data[stock].price)

m = len(b_t)
b_0 = np.ones(m) / m  # equal-weight portfolio
denom = np.sum(b_t)

if denom == 0.0:
b_t = np.copy(b_0)
else:
b_t = np.divide(b_t,denom)

x_tilde = []

for i, stock in enumerate(context.stocks):
mean_price = np.mean(prices[:,i])
x_tilde.append(mean_price/prices[-1,i])

bnds = []
limits = [0,1]

for stock in context.stocks:
bnds.append(limits)

bnds = tuple(tuple(x) for x in bnds)

cons = ({'type': 'eq', 'fun': lambda x:  np.sum(x)-1.0},
{'type': 'ineq', 'fun': lambda x:  np.dot(x,x_tilde) - context.eps})

res= optimize.minimize(norm_squared, b_0, args=b_t,jac=norm_squared_deriv,method='SLSQP',constraints=cons,bounds=bnds, options={'disp': False,  'maxiter': 100, 'iprint': 1, 'ftol': 1e-6})

allocation = res.x
allocation[allocation<0] = 0
allocation = allocation/np.sum(allocation)

if res.success and (np.dot(allocation,x_tilde)-context.eps > 0):
return (allocation,np.dot(allocation,x_tilde))
else:
return (b_t,1)

# check if data exists
for stock in context.stocks:
if stock not in data:
context.stocks.remove(stock)

# check for de-listed stocks & leveraged ETFs
for stock in context.stocks:
if stock.security_end_date < get_datetime():  # de-listed ?
context.stocks.remove(stock)
if stock in security_lists.leveraged_etf_list: # leveraged ETF?
context.stocks.remove(stock)

# check for open orders
if get_open_orders():
return

# find average weighted allocation over range of trailing window lengths
a = np.zeros(len(context.stocks))
w = 0
prices = history(8*390,'1m','price')
for n in range(1,9):
(a,w) = get_allocation(context,data,n,prices.tail(n*390))
a += w*a
w += w

allocation = a/w
allocation = allocation/np.sum(allocation)

allocate(context,data,allocation)

def allocate(context, data, desired_port):

# order long stocks
long_pct = 1.0 - context.pct_index
for i, stock in enumerate(context.stocks):
order_target_percent(stock, long_pct*context.leverage*desired_port[i])

qqq = sid(19920) # QQQ

# short index
order_target_percent(qqq,-context.pct_index)

for stock in data:
if stock in context.stocks:
pass
elif stock == qqq:
pass
else:
order_target_percent(stock,0)

def norm_squared(b,*args):

b_t = np.asarray(args)
delta_b = b - b_t

return 0.5*np.dot(delta_b,delta_b.T)

def norm_squared_deriv(b,*args):

b_t = np.asarray(args)
delta_b = b - b_t

return delta_b
There was a runtime error.

Hello Andrew,

One weakness is that the algo doesn't treat the long and short allocations in the same fashion. I just tacked on the fixed QQQ short. In my mind, it should really be a basket of stocks long and and a basket short, with the objective of always being sorta market neutral. The basic mean reversion concept should work in both directions, I think. So, there would be an optimum allocation to maximize return for the longs and similarly for the shorts. The hard part, it seems, would be dynamically selecting the baskets of securities, and having enough of them for diversificaiton.

Grant

@Grant: Thanks so much for putting this and all the other versions of this out. I think your idea of figuring out the short side of this is a great one. I've been trying to do the same but I haven't been successful yet. I'll let you know when I am. Conceptually, I think there are all kinds of directions you could take this idea if you figure out the short.

Good luck and thanks for all you contribute to this community.

I noticed that somewhere around Memorial Day the set of stocks can be empty and the algorithm errors out in that case. So I included an if statement in trade that returns if their are no stocks in the set.

    if len(context.stocks) == 0:
return

10
Backtest from to with initial capital
Total Returns
--
Alpha
--
Beta
--
Sharpe
--
Sortino
--
Max Drawdown
--
Benchmark Returns
--
Volatility
--
 Returns 1 Month 3 Month 6 Month 12 Month
 Alpha 1 Month 3 Month 6 Month 12 Month
 Beta 1 Month 3 Month 6 Month 12 Month
 Sharpe 1 Month 3 Month 6 Month 12 Month
 Sortino 1 Month 3 Month 6 Month 12 Month
 Volatility 1 Month 3 Month 6 Month 12 Month
 Max Drawdown 1 Month 3 Month 6 Month 12 Month
# Adapted from:
# Li, Bin, and Steven HOI. "On-Line Portfolio Selection with Moving Average Reversion." The 29th International Conference on Machine Learning (ICML2012), 2012.
# http://icml.cc/2012/papers/168.pdf

import numpy as np
from scipy import optimize
import pandas as pd

def initialize(context):

context.eps = 1.005
context.pct_index = 0.37 # percentage short QQQ
context.leverage = 1.0

print 'context.eps = ' + str(context.eps)
print 'context.pct_index = ' + str(context.pct_index)
print 'context.leverage = ' + str(context.leverage)

context.data = []

fundamental_df = get_fundamentals(
query(
fundamentals.valuation.market_cap,
)
.filter(fundamentals.company_reference.primary_exchange_id == 'NAS')
.filter(fundamentals.valuation.market_cap != None)
.order_by(fundamentals.valuation.market_cap.desc()).limit(20))
update_universe(fundamental_df.columns.values)
context.stocks = [stock for stock in fundamental_df]

# check if data exists
for stock in context.stocks:
if stock not in context.data:
context.stocks.remove(stock)

def handle_data(context, data):

record(leverage = context.account.leverage)

context.data = data

def get_allocation(context,data,n,prices):

prices = pd.ewma(prices,span=390).as_matrix(context.stocks)

b_t = []

for stock in context.stocks:
b_t.append(context.portfolio.positions[stock].amount*data[stock].price)

m = len(b_t)
b_0 = np.ones(m) / m  # equal-weight portfolio
denom = np.sum(b_t)

if denom == 0.0:
b_t = np.copy(b_0)
else:
b_t = np.divide(b_t,denom)

x_tilde = []

for i, stock in enumerate(context.stocks):
mean_price = np.mean(prices[:,i])
x_tilde.append(mean_price/prices[-1,i])

bnds = []
limits = [0,1]

for stock in context.stocks:
bnds.append(limits)

bnds = tuple(tuple(x) for x in bnds)

cons = ({'type': 'eq', 'fun': lambda x:  np.sum(x)-1.0},
{'type': 'ineq', 'fun': lambda x:  np.dot(x,x_tilde) - context.eps})

res= optimize.minimize(norm_squared, b_0, args=b_t,jac=norm_squared_deriv,method='SLSQP',constraints=cons,bounds=bnds, options={'disp': False,  'maxiter': 100, 'iprint': 1, 'ftol': 1e-6})

allocation = res.x
print(allocation)
allocation[allocation<0] = 0
allocation = allocation/np.sum(allocation)

if res.success and (np.dot(allocation,x_tilde)-context.eps > 0):
return (allocation,np.dot(allocation,x_tilde))
else:
return (b_t,1)

# check if data exists
for stock in context.stocks:
if stock not in data:
context.stocks.remove(stock)

# check for de-listed stocks & leveraged ETFs
for stock in context.stocks:
if stock.security_end_date < get_datetime():  # de-listed ?
context.stocks.remove(stock)
if stock in security_lists.leveraged_etf_list: # leveraged ETF?
context.stocks.remove(stock)

# check for open orders
if get_open_orders():
return

if len(context.stocks) == 0:
return

# find average weighted allocation over range of trailing window lengths
a = np.zeros(len(context.stocks))
w = 0
prices = history(8*390,'1m','price')
for n in range(1,9):
(a,w) = get_allocation(context,data,n,prices.tail(n*390))
a += w*a
w += w

allocation = a/w
allocation = allocation/np.sum(allocation)

allocate(context,data,allocation)

def allocate(context, data, desired_port):

# order long stocks
long_pct = 1.0 - context.pct_index
for i, stock in enumerate(context.stocks):
print 'stock = ' + stock.symbol + ' allocation ' + str(desired_port[i])
order_target_percent(stock, long_pct*context.leverage*desired_port[i])

qqq = sid(19920) # QQQ

# short index
order_target_percent(qqq,-context.pct_index)

for stock in data:
if stock in context.stocks:
pass
elif stock == qqq:
pass
else:
order_target_percent(stock,0)

def norm_squared(b,*args):

b_t = np.asarray(args)
delta_b = b - b_t

return 0.5*np.dot(delta_b,delta_b.T)

def norm_squared_deriv(b,*args):

b_t = np.asarray(args)
delta_b = b - b_t

return delta_b
There was a runtime error.