Looking for feedback on suitability for new contest.

Clone Algorithm

49

Loading...

There was an error loading this backtest.

Backtest from
to
with
initial capital

Cumulative performance:

Algorithm
Benchmark

Custom data:

Total Returns

--

Alpha

--

Beta

--

Sharpe

--

Sortino

--

Max Drawdown

--

Benchmark Returns

--

Volatility

--

Returns | 1 Month | 3 Month | 6 Month | 12 Month |

Alpha | 1 Month | 3 Month | 6 Month | 12 Month |

Beta | 1 Month | 3 Month | 6 Month | 12 Month |

Sharpe | 1 Month | 3 Month | 6 Month | 12 Month |

Sortino | 1 Month | 3 Month | 6 Month | 12 Month |

Volatility | 1 Month | 3 Month | 6 Month | 12 Month |

Max Drawdown | 1 Month | 3 Month | 6 Month | 12 Month |

from quantopian.algorithm import attach_pipeline, pipeline_output, order_optimal_portfolio from quantopian.pipeline import Pipeline from quantopian.pipeline.factors import Latest, SimpleBeta, AnnualizedVolatility from quantopian.pipeline.data import Fundamentals from quantopian.pipeline.data.builtin import USEquityPricing import quantopian.optimize as opt from sklearn import preprocessing from quantopian.pipeline.experimental import QTradableStocksUS, risk_loading_pipeline from scipy.stats.mstats import winsorize import numpy as np import pandas as pd # Algo parameters NUM_TOTAL_POSITIONS = 300 N_LEADING = 10 # days N_TRAILING = 30 # days EPSILON = 1.0 WIN_LIMIT = 0 # Optimize API parameters MAX_GROSS_EXPOSURE = 1.0 MAX_POSITION_SIZE = 0.05 # absolute value MIN_BETA_EXPOSURE = -0.3 MAX_BETA_EXPOSURE = -0.3 def initialize(context): # set_commission(commission.PerShare(cost=0, min_trade_cost=0)) # set_slippage(slippage.FixedSlippage(spread=0)) set_slippage(slippage.FixedBasisPointsSlippage()) schedule_function(record_out, date_rules.every_day(), time_rules.market_close()) schedule_function(get_weights, date_rules.every_day(), time_rules.market_open(minutes=60)) schedule_function(rebalance, date_rules.every_day(), time_rules.market_open(minutes=60)) attach_pipeline(make_pipeline(context), 'my_pipe') attach_pipeline(risk_loading_pipeline(), 'risk_loading_pipeline') def make_pipeline(context): universe = ( AnnualizedVolatility(mask=QTradableStocksUS()) .percentile_between(80, 95)) beta = SimpleBeta(target=sid(8554), regression_length=260, ) pipe = Pipeline(columns = { 'beta':beta, }, screen = universe) return pipe def before_trading_start(context,data): context.pipeline_data = pipeline_output('my_pipe') context.stocks = list(pipeline_output('my_pipe').index.values) context.risk_loading_pipeline = pipeline_output('risk_loading_pipeline') def record_out(context, data): num_secs = 0 for stock in context.portfolio.positions.keys(): if context.portfolio.positions[stock].amount > 0: num_secs += 1 record(num_secs = num_secs) record(leverage = context.account.leverage) def get_weights(context,data): prices = data.history(context.stocks, 'price', 390*N_TRAILING, '1m').dropna(axis=1) context.stocks = list(prices.columns.values) prices = prices.ewm(ignore_na=False,min_periods=0,adjust=True,com=78).mean() m = len(context.stocks) b_t = np.zeros(m) for i, stock in enumerate(context.stocks): b_t[i] = abs(context.portfolio.positions[stock].amount*data.current(stock,'price')) denom = np.sum(np.absolute(b_t)) # test for divide-by-zero case if denom > 0: b_t = b_t/denom else: b_t = 1.0*np.ones(m)/m a = np.zeros(m) b = np.zeros(m) for n in range(5*N_LEADING,5*N_TRAILING+1): p = prices.tail(n*78).as_matrix(context.stocks) p_mean = np.mean(p,axis=0) p_rel = p_mean/p[-1,:] p_rel[p_rel<1] = 0 a += preprocess(get_opt(p_rel,np.sign(p_rel)*b_t)) p_rel = p[-1,:]/p_mean p_rel[p_rel<1] = 0 b += preprocess(get_opt(p_rel,np.sign(p_rel)*b_t)) a = preprocess(a) a[a<0] = 0 b = preprocess(b) b[b<0] = 0 context.weights = pd.Series(preprocess(a-b),index=context.stocks) context.weights = context.weights - context.weights.mean() context.weights = context.weights/context.weights.abs().sum() close_list = {} for stock in context.portfolio.positions.keys(): if stock not in context.stocks: close_list[stock] = 0 context.weights = context.weights.append(pd.Series(close_list)) def get_opt(x_tilde,b_t): x_bar = x_tilde.mean() # Calculate terms for lambda (lam) dot_prod = np.dot(b_t, x_tilde) num = EPSILON - dot_prod denom = (np.linalg.norm((x_tilde-x_bar)))**2 # test for divide-by-zero case if denom == 0.0: lam = 0 # no portolio update else: lam = max(0, num/denom) b = b_t + lam*(x_tilde-x_bar) b_norm = simplex_projection(b) return b_norm*np.dot(b_norm,x_tilde) def rebalance(context, data): pipeline_data = context.pipeline_data objective = opt.TargetWeights(context.weights) constraints = [] constraints.append(opt.MaxGrossExposure(MAX_GROSS_EXPOSURE)) constraints.append(opt.DollarNeutral()) beta_neutral = opt.FactorExposure( loadings=pipeline_data[['beta']], min_exposures={'beta':MIN_BETA_EXPOSURE}, max_exposures={'beta':MAX_BETA_EXPOSURE} ) constraints.append(beta_neutral) constraints.append( opt.PositionConcentration.with_equal_bounds( min=-MAX_POSITION_SIZE, max=MAX_POSITION_SIZE )) risk_model_exposure = opt.experimental.RiskModelExposure( context.risk_loading_pipeline, version=opt.Newest, ) constraints.append(risk_model_exposure) order_optimal_portfolio( objective=objective, constraints=constraints, ) def simplex_projection(v, b=1): """Projection vectors to the simplex domain Implemented according to the paper: Efficient projections onto the l1-ball for learning in high dimensions, John Duchi, et al. ICML 2008. Implementation Time: 2011 June 17 by [email protected] AT pmail.ntu.edu.sg Optimization Problem: min_{w}\| w - v \|_{2}^{2} s.t. sum_{i=1}^{m}=z, w_{i}\geq 0 Input: A vector v \in R^{m}, and a scalar z > 0 (default=1) Output: Projection vector w :Example: >>> proj = simplex_projection([.4 ,.3, -.4, .5]) >>> print proj array([ 0.33333333, 0.23333333, 0. , 0.43333333]) >>> print proj.sum() 1.0 Original matlab implementation: John Duchi ([email protected]) Python-port: Copyright 2012 by Thomas Wiecki ([email protected]). """ v = np.asarray(v) p = len(v) # Sort v into u in descending order v = (v > 0) * v u = np.sort(v)[::-1] sv = np.cumsum(u) rho = np.where(u > (sv - b) / np.arange(1, p+1))[0][-1] theta = np.max([0, (sv[rho] - b) / (rho+1)]) w = (v - theta) w[w<0] = 0 return w def preprocess(a): a = np.nan_to_num(a - np.nanmean(a)) a = winsorize(a,limits=(WIN_LIMIT,WIN_LIMIT)) a = a/np.sum(np.absolute(a)) return preprocessing.scale(a)