I was just goofing around with this algo, and set the trading time to mid-day. The results were relatively poor. Then I switched to one hour after open, and got this result. There seems to be a time-of-day dependence, although it demands more investigation.

Thought I'd share in case someone has insights, or would like to do some testing.

Grant

Clone Algorithm

765

Loading...

There was an error loading this backtest.
Retry

Backtest from
to
with
initial capital

Cumulative performance:

Algorithm
Benchmark

Custom data:

Total Returns

--

Alpha

--

Beta

--

Sharpe

--

Sortino

--

Max Drawdown

--

Benchmark Returns

--

Volatility

--

Returns | 1 Month | 3 Month | 6 Month | 12 Month |

Alpha | 1 Month | 3 Month | 6 Month | 12 Month |

Beta | 1 Month | 3 Month | 6 Month | 12 Month |

Sharpe | 1 Month | 3 Month | 6 Month | 12 Month |

Sortino | 1 Month | 3 Month | 6 Month | 12 Month |

Volatility | 1 Month | 3 Month | 6 Month | 12 Month |

Max Drawdown | 1 Month | 3 Month | 6 Month | 12 Month |

# Adapted from: # Li, Bin, and Steven HOI. "On-Line Portfolio Selection with Moving Average Reversion." The 29th International Conference on Machine Learning (ICML2012), 2012. # http://icml.cc/2012/papers/168.pdf import numpy as np from scipy import optimize import pandas as pd def initialize(context): context.eps = 1.005 # context.pct_index = 1 # max percentage of inverse ETF context.leverage = 1.0 print 'context.eps = ' + str(context.eps) # print 'context.pct_index = ' + str(context.pct_index) print 'context.leverage = ' + str(context.leverage) schedule_function(trade, date_rules.every_day(), time_rules.market_open(minutes=60)) context.data = [] set_benchmark(sid(19920)) # QQQ def before_trading_start(context): fundamental_df = get_fundamentals( query( fundamentals.valuation.market_cap, ) .filter(fundamentals.company_reference.primary_exchange_id == 'NAS') .filter(fundamentals.valuation.market_cap != None) .order_by(fundamentals.valuation.market_cap.desc()).limit(20)) update_universe(fundamental_df.columns.values) context.stocks = [stock for stock in fundamental_df] # context.stocks.append(symbols('PSQ')[0]) # add inverse ETF to universe # check if data exists for stock in context.stocks: if stock not in context.data: context.stocks.remove(stock) def handle_data(context, data): record(leverage = context.account.leverage) context.data = data def get_allocation(context,data,n,prices): prices = pd.ewma(prices,span=390).as_matrix(context.stocks) b_t = [] for stock in context.stocks: b_t.append(context.portfolio.positions[stock].amount*data[stock].price) m = len(b_t) b_0 = np.ones(m) / m # equal-weight portfolio denom = np.sum(b_t) if denom == 0.0: b_t = np.copy(b_0) else: b_t = np.divide(b_t,denom) x_tilde = [] for i, stock in enumerate(context.stocks): mean_price = np.mean(prices[:,i]) x_tilde.append(mean_price/prices[-1,i]) bnds = [] limits = [0,1] for stock in context.stocks: bnds.append(limits) # bnds[-1] = [0,context.pct_index] # limit exposure to index bnds = tuple(tuple(x) for x in bnds) cons = ({'type': 'eq', 'fun': lambda x: np.sum(x)-1.0}, {'type': 'ineq', 'fun': lambda x: np.dot(x,x_tilde) - context.eps}) res= optimize.minimize(norm_squared, b_0, args=b_t,jac=norm_squared_deriv,method='SLSQP',constraints=cons,bounds=bnds, options={'disp': False, 'maxiter': 100, 'iprint': 1, 'ftol': 1e-6}) allocation = res.x allocation[allocation<0] = 0 allocation = allocation/np.sum(allocation) if res.success and (np.dot(allocation,x_tilde)-context.eps > 0): return (allocation,np.dot(allocation,x_tilde)) else: return (b_t,1) def trade(context,data): # check if data exists for stock in context.stocks: if stock not in data: context.stocks.remove(stock) # check for de-listed stocks & leveraged ETFs for stock in context.stocks: if stock.security_end_date < get_datetime(): # de-listed ? context.stocks.remove(stock) if stock in security_lists.leveraged_etf_list: # leveraged ETF? context.stocks.remove(stock) # check for open orders if get_open_orders(): return # find average weighted allocation over range of trailing window lengths a = np.zeros(len(context.stocks)) w = 0 prices = history(8*390,'1m','price') for n in range(1,9): (a,w) = get_allocation(context,data,n,prices.tail(n*390)) a += w*a w += w allocation = a/w allocation = allocation/np.sum(allocation) allocate(context,data,allocation) def allocate(context, data, desired_port): record(long = sum(desired_port)) # record(inverse = desired_port[-1]) for i, stock in enumerate(context.stocks): order_target_percent(stock, context.leverage*desired_port[i]) for stock in data: if stock not in context.stocks: order_target_percent(stock,0) def norm_squared(b,*args): b_t = np.asarray(args) delta_b = b - b_t return 0.5*np.dot(delta_b,delta_b.T) def norm_squared_deriv(b,*args): b_t = np.asarray(args) delta_b = b - b_t return delta_b

We have migrated this algorithm to work with a new version of the Quantopian API. The code is different than the original version, but the investment rationale of the algorithm has not changed. We've put everything you need to know here on one page.