Here's an algo I ran in the Quantopian contest for 6 months. It was started on 2016-06-01, and is still running under live trading (SR = 0.49 & 2.48%). No contest money, no fund allocation, no nothing...but I suppose I gained some confidence in what may turn out to be a profitless hobby (hey, some hobbies are very expensive, so it's a win). It seems o.k., but I guess not good enough relative to the Q fund competition.

If anyone has a take on the Q fund thing, and the nebulosity of it all, I'm all ears. Seems like a lot of work and a lot of waiting for zilch (well, I could have claimed my teeshirt, but I have plenty of those).

Edit: Note that in the contest, I used a higher gross leverage (
`context.leverage = 2.5`

). Here I'm using 1.0 gross leverage.

Returns | 1 Month | 3 Month | 6 Month | 12 Month |

Alpha | 1 Month | 3 Month | 6 Month | 12 Month |

Beta | 1 Month | 3 Month | 6 Month | 12 Month |

Sharpe | 1 Month | 3 Month | 6 Month | 12 Month |

Sortino | 1 Month | 3 Month | 6 Month | 12 Month |

Volatility | 1 Month | 3 Month | 6 Month | 12 Month |

Max Drawdown | 1 Month | 3 Month | 6 Month | 12 Month |

import numpy as np from scipy import optimize import pandas as pd def initialize(context): # parameters # -------------------------- context.n_stocks = 50 # number of stocks context.N = 5 # trailing window size, days context.eps = 1.0 # optimization model parameter context.leverage = 1.0 # gross leverage context.pct_etf = 0.5 # ETF percent, 0 to 0.5 # -------------------------- schedule_function(housekeep, date_rules.week_start(days_offset=1), time_rules.market_open()) schedule_function(get_weights, date_rules.week_start(days_offset=1), time_rules.market_open(minutes=60)) context.first_day = True context.n_weeks = 0 def before_trading_start(context,data): fundamental_df = get_fundamentals( query( fundamentals.valuation.market_cap, ) .filter(fundamentals.company_reference.primary_exchange_id == 'NAS') .filter(fundamentals.valuation.market_cap != None) .order_by(fundamentals.valuation.market_cap.desc()).limit(context.n_stocks)) context.stocks_current = [stock for stock in fundamental_df] if context.first_day: context.stocks = context.stocks_current context.first_day = False def update_stocks(context, data): context.stocks = context.stocks_current def housekeep(context, data): leverage = context.account.leverage if leverage >= 3.0: print "Leverage >= 3.0" record(leverage = leverage) for stock in context.stocks: if stock in security_lists.leveraged_etf_list: # leveraged ETF? context.stocks.remove(stock) # check if data exists for stock in context.stocks: if not data.can_trade(stock): context.stocks.remove(stock) num_secs = 0 for stock in context.portfolio.positions.keys(): if context.portfolio.positions[stock].amount != 0: num_secs += 1 record(num_secs = num_secs) def get_allocation(context,data,prices): m = len(context.stocks) b_t = 1.0*np.zeros(m)/m x_tilde = np.zeros(m) prices = pd.ewma(prices,com=390).as_matrix(context.stocks) for i, stock in enumerate(context.stocks): b_t[i] = abs(context.portfolio.positions[stock].amount*prices[-1,i]) denom = np.sum(b_t) # test for divide-by-zero case if denom > 0: b_t = np.divide(b_t,denom) else: b_t = np.ones(len(context.stocks)) / len(context.stocks) for i,stock in enumerate(context.stocks): mean_price = np.mean(prices[:,i]) price_rel = mean_price/prices[-1,i] if price_rel < 1: price_rel = 1.0/price_rel context.ls[stock] += -price_rel else: context.ls[stock] += price_rel x_tilde[i] = price_rel ########################### # Inside of OLMAR (algo 2) x_bar = x_tilde.mean() # Calculate terms for lambda (lam) dot_prod = np.dot(b_t, x_tilde) num = context.eps - dot_prod denom = (np.linalg.norm((x_tilde-x_bar)))**2 # test for divide-by-zero case if denom == 0.0: lam = 0 # no portolio update else: lam = max(0, num/denom) b = b_t + lam*(x_tilde-x_bar) b_norm = simplex_projection(b) weight = np.dot(b_norm,x_tilde) return (b_norm, weight) def get_weights(context,data): prices = data.history(context.stocks, 'price', 390*context.N, '1m').dropna(axis=1) context.stocks = list(prices.columns.values) a = np.zeros(len(context.stocks)) w = 0 context.ls = {} for stock in context.stocks: context.ls[stock] = 0 for n in range(1,context.N+1): (a,w) = get_allocation(context,data,prices.tail(n*390)) a += w*a w += w allocation = a/w for stock in context.stocks: context.ls[stock] = np.sign(context.ls[stock]) denom = np.sum(allocation) if denom > 0: allocation[allocation<0.2/len(context.stocks)] = 0.2/len(context.stocks) allocation = allocation/np.sum(allocation) allocate(context, data, allocation) def allocate(context, data, desired_port): pct_ls = 0 for i, stock in enumerate(context.stocks): pct_ls += context.ls[stock]*desired_port[i] scale = 1.0-0.5*abs(pct_ls) scale *= (2.0/3)*(2.0-context.pct_etf) for i, stock in enumerate(context.stocks): if data.can_trade(stock): order_target_percent(stock, scale*context.leverage*context.ls[stock]*desired_port[i]) order_target_percent(sid(19920), -context.pct_etf*context.leverage*pct_ls) record(pct_ls = pct_ls) for stock in context.portfolio.positions.keys(): if stock not in context.stocks + [sid(19920)]: if data.can_trade(stock): order_target_percent(stock,0) context.n_weeks += 1 if context.n_weeks%12 == 0: update_stocks(context, data) def simplex_projection(v, b=1): """Projection vectors to the simplex domain Implemented according to the paper: Efficient projections onto the l1-ball for learning in high dimensions, John Duchi, et al. ICML 2008. Implementation Time: 2011 June 17 by [email protected] AT pmail.ntu.edu.sg Optimization Problem: min_{w}\| w - v \|_{2}^{2} s.t. sum_{i=1}^{m}=z, w_{i}\geq 0 Input: A vector v \in R^{m}, and a scalar z > 0 (default=1) Output: Projection vector w :Example: >>> proj = simplex_projection([.4 ,.3, -.4, .5]) >>> print proj array([ 0.33333333, 0.23333333, 0. , 0.43333333]) >>> print proj.sum() 1.0 Original matlab implementation: John Duchi ([email protected]) Python-port: Copyright 2012 by Thomas Wiecki ([email protected]). """ v = np.asarray(v) p = len(v) # Sort v into u in descending order v = (v > 0) * v u = np.sort(v)[::-1] sv = np.cumsum(u) rho = np.where(u > (sv - b) / np.arange(1, p+1))[0][-1] theta = np.max([0, (sv[rho] - b) / (rho+1)]) w = (v - theta) w[w<0] = 0 return w